repo_id
stringlengths
15
89
file_path
stringlengths
27
180
content
stringlengths
1
2.23M
__index_level_0__
int64
0
0
hf_public_repos/tokenizers/bindings
hf_public_repos/tokenizers/bindings/python/rust-toolchain
stable
0
hf_public_repos/tokenizers/bindings
hf_public_repos/tokenizers/bindings/python/setup.cfg
[isort] default_section = FIRSTPARTY ensure_newline_before_comments = True force_grid_wrap = 0 include_trailing_comma = True known_first_party = transformers known_third_party = absl conllu datasets elasticsearch fairseq faiss-cpu fastprogress fire fugashi git h5py matplotlib nltk numpy packaging pandas PIL psutil pytest pytorch_lightning rouge_score sacrebleu seqeval sklearn streamlit tensorboardX tensorflow tensorflow_datasets timeout_decorator torch torchaudio torchtext torchvision torch_xla tqdm line_length = 119 lines_after_imports = 2 multi_line_output = 3 use_parentheses = True [flake8] ignore = E203, E501, E741, W503, W605 max-line-length = 119 [tool:pytest] doctest_optionflags=NUMBER NORMALIZE_WHITESPACE ELLIPSIS
0
hf_public_repos/tokenizers/bindings
hf_public_repos/tokenizers/bindings/python/pyproject.toml
[project] name = 'tokenizers' requires-python = '>=3.7' authors = [ {name = 'Nicolas Patry', email = 'patry.nicolas@protonmail.com'}, {name = 'Anthony Moi', email = 'anthony@huggingface.co'} ] classifiers = [ "Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "Intended Audience :: Education", "Intended Audience :: Science/Research", "License :: OSI Approved :: Apache Software License", "Operating System :: OS Independent", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Topic :: Scientific/Engineering :: Artificial Intelligence", ] keywords = ["NLP", "tokenizer", "BPE", "transformer", "deep learning"] dynamic = [ 'description', 'license', 'readme', ] dependencies = ["huggingface_hub>=0.16.4,<1.0"] [project.urls] Homepage = 'https://github.com/huggingface/tokenizers' Source = 'https://github.com/huggingface/tokenizers' [project.optional-dependencies] testing = ["pytest", "requests", "numpy", "datasets", "black==22.3"] docs = ["sphinx", "sphinx_rtd_theme", "setuptools_rust"] dev = ["tokenizers[testing]"] [build-system] requires = ["maturin>=1.0,<2.0"] build-backend = "maturin" [tool.maturin] python-source = "py_src" module-name = "tokenizers.tokenizers" bindings = 'pyo3' features = ["pyo3/extension-module"] [tool.black] line-length = 119 target-version = ['py35']
0
hf_public_repos/tokenizers/bindings
hf_public_repos/tokenizers/bindings/python/Cargo.toml
[package] name = "tokenizers-python" version = "0.15.1-dev.0" authors = ["Anthony MOI <m.anthony.moi@gmail.com>"] edition = "2021" [lib] name = "tokenizers" crate-type = ["cdylib"] [dependencies] rayon = "1.8" serde = { version = "1.0", features = [ "rc", "derive" ]} serde_json = "1.0" libc = "0.2" env_logger = "0.10.0" pyo3 = { version = "0.19" } numpy = "0.19.0" ndarray = "0.15" onig = { version = "6.4", default-features = false } itertools = "0.11" [dependencies.tokenizers] version = "0.15.1-dev.0" path = "../../tokenizers" [dev-dependencies] tempfile = "3.8" pyo3 = { version = "0.19", features = ["auto-initialize"] } [features] defaut = ["pyo3/extension-module"]
0
hf_public_repos/tokenizers/bindings/python
hf_public_repos/tokenizers/bindings/python/src/error.rs
use pyo3::exceptions; use pyo3::prelude::*; use pyo3::type_object::PyTypeInfo; use std::fmt::{Display, Formatter, Result as FmtResult}; use tokenizers::tokenizer::Result; #[derive(Debug)] pub struct PyError(pub String); impl PyError { #[allow(dead_code)] pub fn from(s: &str) -> Self { PyError(String::from(s)) } pub fn into_pyerr<T: PyTypeInfo>(self) -> PyErr { PyErr::new::<T, _>(format!("{}", self)) } } impl Display for PyError { fn fmt(&self, fmt: &mut Formatter) -> FmtResult { write!(fmt, "{}", self.0) } } impl std::error::Error for PyError {} pub struct ToPyResult<T>(pub Result<T>); impl<T> From<ToPyResult<T>> for PyResult<T> { fn from(v: ToPyResult<T>) -> Self { v.0.map_err(|e| exceptions::PyException::new_err(format!("{}", e))) } } impl<T> ToPyResult<T> { pub fn into_py(self) -> PyResult<T> { self.into() } } pub(crate) fn deprecation_warning(py: Python<'_>, version: &str, message: &str) -> PyResult<()> { let deprecation_warning = py.import("builtins")?.getattr("DeprecationWarning")?; let full_message = format!("Deprecated in {}: {}", version, message); pyo3::PyErr::warn(py, deprecation_warning, &full_message, 0) }
0
hf_public_repos/tokenizers/bindings/python
hf_public_repos/tokenizers/bindings/python/src/encoding.rs
use pyo3::exceptions; use pyo3::prelude::*; use pyo3::types::*; use tk::tokenizer::{Offsets, PaddingDirection}; use tk::utils::truncation::TruncationDirection; use tokenizers as tk; use crate::error::{deprecation_warning, PyError}; /// The :class:`~tokenizers.Encoding` represents the output of a :class:`~tokenizers.Tokenizer`. #[pyclass(dict, module = "tokenizers", name = "Encoding")] #[repr(transparent)] pub struct PyEncoding { pub encoding: tk::tokenizer::Encoding, } impl From<tk::tokenizer::Encoding> for PyEncoding { fn from(v: tk::tokenizer::Encoding) -> Self { Self { encoding: v } } } #[pymethods] impl PyEncoding { #[new] #[pyo3(text_signature = None)] fn new() -> Self { Self { encoding: tk::tokenizer::Encoding::default(), } } fn __getstate__(&self, py: Python) -> PyResult<PyObject> { let data = serde_json::to_string(&self.encoding).map_err(|e| { exceptions::PyException::new_err(format!( "Error while attempting to pickle Encoding: {}", e )) })?; Ok(PyBytes::new(py, data.as_bytes()).to_object(py)) } fn __setstate__(&mut self, py: Python, state: PyObject) -> PyResult<()> { match state.extract::<&PyBytes>(py) { Ok(s) => { self.encoding = serde_json::from_slice(s.as_bytes()).map_err(|e| { exceptions::PyException::new_err(format!( "Error while attempting to unpickle Encoding: {}", e )) })?; Ok(()) } Err(e) => Err(e), } } fn __repr__(&self) -> PyResult<String> { Ok(format!( "Encoding(num_tokens={}, attributes=[ids, type_ids, tokens, offsets, \ attention_mask, special_tokens_mask, overflowing])", self.encoding.get_ids().len() )) } fn __len__(&self) -> PyResult<usize> { Ok(self.encoding.len()) } /// Merge the list of encodings into one final :class:`~tokenizers.Encoding` /// /// Args: /// encodings (A :obj:`List` of :class:`~tokenizers.Encoding`): /// The list of encodings that should be merged in one /// /// growing_offsets (:obj:`bool`, defaults to :obj:`True`): /// Whether the offsets should accumulate while merging /// /// Returns: /// :class:`~tokenizers.Encoding`: The resulting Encoding #[staticmethod] #[pyo3(signature = (encodings, growing_offsets = true))] #[pyo3(text_signature = "(encodings, growing_offsets=True)")] fn merge(encodings: Vec<PyRef<PyEncoding>>, growing_offsets: bool) -> PyEncoding { tk::tokenizer::Encoding::merge( encodings.into_iter().map(|e| e.encoding.clone()), growing_offsets, ) .into() } /// The number of sequences represented /// /// Returns: /// :obj:`int`: The number of sequences in this :class:`~tokenizers.Encoding` #[getter] fn get_n_sequences(&self) -> usize { self.encoding.n_sequences() } /// Set the given sequence index /// /// Set the given sequence index for the whole range of tokens contained in this /// :class:`~tokenizers.Encoding`. #[pyo3(text_signature = "(self, sequence_id)")] fn set_sequence_id(&mut self, sequence_id: usize) { self.encoding.set_sequence_id(sequence_id); } /// The generated IDs /// /// The IDs are the main input to a Language Model. They are the token indices, /// the numerical representations that a LM understands. /// /// Returns: /// :obj:`List[int]`: The list of IDs #[getter] fn get_ids(&self) -> Vec<u32> { self.encoding.get_ids().to_vec() } /// The generated tokens /// /// They are the string representation of the IDs. /// /// Returns: /// :obj:`List[str]`: The list of tokens #[getter] fn get_tokens(&self) -> Vec<String> { self.encoding.get_tokens().to_vec() } /// The generated word indices. /// /// .. warning:: /// This is deprecated and will be removed in a future version. /// Please use :obj:`~tokenizers.Encoding.word_ids` instead. /// /// They represent the index of the word associated to each token. /// When the input is pre-tokenized, they correspond to the ID of the given input label, /// otherwise they correspond to the words indices as defined by the /// :class:`~tokenizers.pre_tokenizers.PreTokenizer` that was used. /// /// For special tokens and such (any token that was generated from something that was /// not part of the input), the output is :obj:`None` /// /// Returns: /// A :obj:`List` of :obj:`Optional[int]`: A list of optional word index. #[getter] fn get_words(&self, py: Python<'_>) -> PyResult<Vec<Option<u32>>> { deprecation_warning( py, "0.9.4", "Encoding.words is deprecated, please use Encoding.word_ids instead.", )?; Ok(self.get_word_ids()) } /// The generated word indices. /// /// They represent the index of the word associated to each token. /// When the input is pre-tokenized, they correspond to the ID of the given input label, /// otherwise they correspond to the words indices as defined by the /// :class:`~tokenizers.pre_tokenizers.PreTokenizer` that was used. /// /// For special tokens and such (any token that was generated from something that was /// not part of the input), the output is :obj:`None` /// /// Returns: /// A :obj:`List` of :obj:`Optional[int]`: A list of optional word index. #[getter] fn get_word_ids(&self) -> Vec<Option<u32>> { self.encoding.get_word_ids().to_vec() } /// The generated sequence indices. /// /// They represent the index of the input sequence associated to each token. /// The sequence id can be None if the token is not related to any input sequence, /// like for example with special tokens. /// /// Returns: /// A :obj:`List` of :obj:`Optional[int]`: A list of optional sequence index. #[getter] fn get_sequence_ids(&self) -> Vec<Option<usize>> { self.encoding.get_sequence_ids() } /// The generated type IDs /// /// Generally used for tasks like sequence classification or question answering, /// these tokens let the LM know which input sequence corresponds to each tokens. /// /// Returns: /// :obj:`List[int]`: The list of type ids #[getter] fn get_type_ids(&self) -> Vec<u32> { self.encoding.get_type_ids().to_vec() } /// The offsets associated to each token /// /// These offsets let's you slice the input string, and thus retrieve the original /// part that led to producing the corresponding token. /// /// Returns: /// A :obj:`List` of :obj:`Tuple[int, int]`: The list of offsets #[getter] fn get_offsets(&self) -> Vec<(usize, usize)> { self.encoding.get_offsets().to_vec() } /// The special token mask /// /// This indicates which tokens are special tokens, and which are not. /// /// Returns: /// :obj:`List[int]`: The special tokens mask #[getter] fn get_special_tokens_mask(&self) -> Vec<u32> { self.encoding.get_special_tokens_mask().to_vec() } /// The attention mask /// /// This indicates to the LM which tokens should be attended to, and which should not. /// This is especially important when batching sequences, where we need to applying /// padding. /// /// Returns: /// :obj:`List[int]`: The attention mask #[getter] fn get_attention_mask(&self) -> Vec<u32> { self.encoding.get_attention_mask().to_vec() } /// A :obj:`List` of overflowing :class:`~tokenizers.Encoding` /// /// When using truncation, the :class:`~tokenizers.Tokenizer` takes care of splitting /// the output into as many pieces as required to match the specified maximum length. /// This field lets you retrieve all the subsequent pieces. /// /// When you use pairs of sequences, the overflowing pieces will contain enough /// variations to cover all the possible combinations, while respecting the provided /// maximum length. #[getter] fn get_overflowing(&self) -> Vec<PyEncoding> { self.encoding .get_overflowing() .clone() .into_iter() .map(|e| e.into()) .collect() } /// Get the encoded tokens corresponding to the word at the given index /// in one of the input sequences. /// /// Args: /// word_index (:obj:`int`): /// The index of a word in one of the input sequences. /// sequence_index (:obj:`int`, defaults to :obj:`0`): /// The index of the sequence that contains the target word /// /// Returns: /// :obj:`Tuple[int, int]`: The range of tokens: :obj:`(first, last + 1)` #[pyo3(signature = (word_index, sequence_index = 0))] #[pyo3(text_signature = "(self, word_index, sequence_index=0)")] fn word_to_tokens(&self, word_index: u32, sequence_index: usize) -> Option<(usize, usize)> { self.encoding.word_to_tokens(word_index, sequence_index) } /// Get the offsets of the word at the given index in one of the input sequences. /// /// Args: /// word_index (:obj:`int`): /// The index of a word in one of the input sequences. /// sequence_index (:obj:`int`, defaults to :obj:`0`): /// The index of the sequence that contains the target word /// /// Returns: /// :obj:`Tuple[int, int]`: The range of characters (span) :obj:`(first, last + 1)` #[pyo3(signature = (word_index, sequence_index = 0))] #[pyo3(text_signature = "(self, word_index, sequence_index=0)")] fn word_to_chars(&self, word_index: u32, sequence_index: usize) -> Option<Offsets> { self.encoding.word_to_chars(word_index, sequence_index) } /// Get the index of the sequence represented by the given token. /// /// In the general use case, this method returns :obj:`0` for a single sequence or /// the first sequence of a pair, and :obj:`1` for the second sequence of a pair /// /// Args: /// token_index (:obj:`int`): /// The index of a token in the encoded sequence. /// /// Returns: /// :obj:`int`: The sequence id of the given token #[pyo3(text_signature = "(self, token_index)")] fn token_to_sequence(&self, token_index: usize) -> Option<usize> { self.encoding.token_to_sequence(token_index) } /// Get the offsets of the token at the given index. /// /// The returned offsets are related to the input sequence that contains the /// token. In order to determine in which input sequence it belongs, you /// must call :meth:`~tokenizers.Encoding.token_to_sequence()`. /// /// Args: /// token_index (:obj:`int`): /// The index of a token in the encoded sequence. /// /// Returns: /// :obj:`Tuple[int, int]`: The token offsets :obj:`(first, last + 1)` #[pyo3(text_signature = "(self, token_index)")] fn token_to_chars(&self, token_index: usize) -> Option<Offsets> { let (_, offsets) = self.encoding.token_to_chars(token_index)?; Some(offsets) } /// Get the index of the word that contains the token in one of the input sequences. /// /// The returned word index is related to the input sequence that contains /// the token. In order to determine in which input sequence it belongs, you /// must call :meth:`~tokenizers.Encoding.token_to_sequence()`. /// /// Args: /// token_index (:obj:`int`): /// The index of a token in the encoded sequence. /// /// Returns: /// :obj:`int`: The index of the word in the relevant input sequence. #[pyo3(text_signature = "(self, token_index)")] fn token_to_word(&self, token_index: usize) -> Option<u32> { let (_, word_idx) = self.encoding.token_to_word(token_index)?; Some(word_idx) } /// Get the token that contains the char at the given position in the input sequence. /// /// Args: /// char_pos (:obj:`int`): /// The position of a char in the input string /// sequence_index (:obj:`int`, defaults to :obj:`0`): /// The index of the sequence that contains the target char /// /// Returns: /// :obj:`int`: The index of the token that contains this char in the encoded sequence #[pyo3(signature = (char_pos, sequence_index = 0))] #[pyo3(text_signature = "(self, char_pos, sequence_index=0)")] fn char_to_token(&self, char_pos: usize, sequence_index: usize) -> Option<usize> { self.encoding.char_to_token(char_pos, sequence_index) } /// Get the word that contains the char at the given position in the input sequence. /// /// Args: /// char_pos (:obj:`int`): /// The position of a char in the input string /// sequence_index (:obj:`int`, defaults to :obj:`0`): /// The index of the sequence that contains the target char /// /// Returns: /// :obj:`int`: The index of the word that contains this char in the input sequence #[pyo3(signature = (char_pos, sequence_index = 0))] #[pyo3(text_signature = "(self, char_pos, sequence_index=0)")] fn char_to_word(&self, char_pos: usize, sequence_index: usize) -> Option<u32> { self.encoding.char_to_word(char_pos, sequence_index) } /// Pad the :class:`~tokenizers.Encoding` at the given length /// /// Args: /// length (:obj:`int`): /// The desired length /// /// direction: (:obj:`str`, defaults to :obj:`right`): /// The expected padding direction. Can be either :obj:`right` or :obj:`left` /// /// pad_id (:obj:`int`, defaults to :obj:`0`): /// The ID corresponding to the padding token /// /// pad_type_id (:obj:`int`, defaults to :obj:`0`): /// The type ID corresponding to the padding token /// /// pad_token (:obj:`str`, defaults to `[PAD]`): /// The pad token to use #[pyo3(signature = (length, **kwargs))] #[pyo3( text_signature = "(self, length, direction='right', pad_id=0, pad_type_id=0, pad_token='[PAD]')" )] fn pad(&mut self, length: usize, kwargs: Option<&PyDict>) -> PyResult<()> { let mut pad_id = 0; let mut pad_type_id = 0; let mut pad_token = "[PAD]"; let mut direction = PaddingDirection::Right; if let Some(kwargs) = kwargs { for (key, value) in kwargs { let key: &str = key.extract()?; match key { "direction" => { let value: &str = value.extract()?; direction = match value { "left" => Ok(PaddingDirection::Left), "right" => Ok(PaddingDirection::Right), other => Err(PyError(format!( "Unknown `direction`: `{}`. Use \ one of `left` or `right`", other )) .into_pyerr::<exceptions::PyValueError>()), }?; } "pad_id" => pad_id = value.extract()?, "pad_type_id" => pad_type_id = value.extract()?, "pad_token" => pad_token = value.extract()?, _ => println!("Ignored unknown kwarg option {}", key), } } } self.encoding .pad(length, pad_id, pad_type_id, pad_token, direction); Ok(()) } /// Truncate the :class:`~tokenizers.Encoding` at the given length /// /// If this :class:`~tokenizers.Encoding` represents multiple sequences, when truncating /// this information is lost. It will be considered as representing a single sequence. /// /// Args: /// max_length (:obj:`int`): /// The desired length /// /// stride (:obj:`int`, defaults to :obj:`0`): /// The length of previous content to be included in each overflowing piece /// /// direction (:obj:`str`, defaults to :obj:`right`): /// Truncate direction #[pyo3(signature = (max_length, stride = 0, direction = "right"))] #[pyo3(text_signature = "(self, max_length, stride=0, direction='right')")] fn truncate(&mut self, max_length: usize, stride: usize, direction: &str) -> PyResult<()> { let tdir = match direction { "left" => Ok(TruncationDirection::Left), "right" => Ok(TruncationDirection::Right), _ => Err(PyError(format!( "Invalid truncation direction value : {}", direction )) .into_pyerr::<exceptions::PyValueError>()), }?; self.encoding.truncate(max_length, stride, tdir); Ok(()) } }
0
hf_public_repos/tokenizers/bindings/python
hf_public_repos/tokenizers/bindings/python/src/trainers.rs
use std::sync::{Arc, RwLock}; use crate::models::PyModel; use crate::tokenizer::PyAddedToken; use crate::utils::PyChar; use pyo3::exceptions; use pyo3::prelude::*; use pyo3::types::*; use serde::{Deserialize, Serialize}; use tk::models::TrainerWrapper; use tk::Trainer; use tokenizers as tk; /// Base class for all trainers /// /// This class is not supposed to be instantiated directly. Instead, any implementation of a /// Trainer will return an instance of this class when instantiated. #[pyclass(module = "tokenizers.trainers", name = "Trainer", subclass)] #[derive(Clone, Deserialize, Serialize)] pub struct PyTrainer { #[serde(flatten)] pub trainer: Arc<RwLock<TrainerWrapper>>, } impl PyTrainer { #[cfg(test)] pub(crate) fn new(trainer: Arc<RwLock<TrainerWrapper>>) -> Self { PyTrainer { trainer } } pub(crate) fn get_as_subtype(&self, py: Python<'_>) -> PyResult<PyObject> { let base = self.clone(); Ok(match *self.trainer.as_ref().read().unwrap() { TrainerWrapper::BpeTrainer(_) => Py::new(py, (PyBpeTrainer {}, base))?.into_py(py), TrainerWrapper::WordPieceTrainer(_) => { Py::new(py, (PyWordPieceTrainer {}, base))?.into_py(py) } TrainerWrapper::WordLevelTrainer(_) => { Py::new(py, (PyWordLevelTrainer {}, base))?.into_py(py) } TrainerWrapper::UnigramTrainer(_) => { Py::new(py, (PyUnigramTrainer {}, base))?.into_py(py) } }) } } #[pymethods] impl PyTrainer { fn __getstate__(&self, py: Python) -> PyResult<PyObject> { let data = serde_json::to_string(&self.trainer).map_err(|e| { exceptions::PyException::new_err(format!( "Error while attempting to pickle PyTrainer: {}", e )) })?; Ok(PyBytes::new(py, data.as_bytes()).to_object(py)) } fn __setstate__(&mut self, py: Python, state: PyObject) -> PyResult<()> { match state.extract::<&PyBytes>(py) { Ok(s) => { let unpickled = serde_json::from_slice(s.as_bytes()).map_err(|e| { exceptions::PyException::new_err(format!( "Error while attempting to unpickle PyTrainer: {}", e )) })?; self.trainer = unpickled; Ok(()) } Err(e) => Err(e), } } } impl Trainer for PyTrainer { type Model = PyModel; fn should_show_progress(&self) -> bool { self.trainer.read().unwrap().should_show_progress() } fn train(&self, model: &mut PyModel) -> tk::Result<Vec<tk::AddedToken>> { self.trainer .read() .unwrap() .train(&mut model.model.write().unwrap()) } fn feed<I, S, F>(&mut self, iterator: I, process: F) -> tk::Result<()> where I: Iterator<Item = S> + Send, S: AsRef<str> + Send, F: Fn(&str) -> tk::Result<Vec<String>> + Sync, { self.trainer.write().unwrap().feed(iterator, process) } } impl<I> From<I> for PyTrainer where I: Into<TrainerWrapper>, { fn from(trainer: I) -> Self { PyTrainer { trainer: Arc::new(RwLock::new(trainer.into())), } } } macro_rules! getter { ($self: ident, $variant: ident, $($name: tt)+) => {{ let super_ = $self.as_ref(); if let TrainerWrapper::$variant(ref trainer) = *super_.trainer.read().unwrap() { trainer.$($name)+ } else { unreachable!() } }}; } macro_rules! setter { ($self: ident, $variant: ident, $name: ident, $value: expr) => {{ let super_ = $self.as_ref(); if let TrainerWrapper::$variant(ref mut trainer) = *super_.trainer.write().unwrap() { trainer.$name = $value; } }}; ($self: ident, $variant: ident, @$name: ident, $value: expr) => {{ let super_ = $self.as_ref(); if let TrainerWrapper::$variant(ref mut trainer) = *super_.trainer.write().unwrap() { trainer.$name($value); } }}; } /// Trainer capable of training a BPE model /// /// Args: /// vocab_size (:obj:`int`, `optional`): /// The size of the final vocabulary, including all tokens and alphabet. /// /// min_frequency (:obj:`int`, `optional`): /// The minimum frequency a pair should have in order to be merged. /// /// show_progress (:obj:`bool`, `optional`): /// Whether to show progress bars while training. /// /// special_tokens (:obj:`List[Union[str, AddedToken]]`, `optional`): /// A list of special tokens the model should know of. /// /// limit_alphabet (:obj:`int`, `optional`): /// The maximum different characters to keep in the alphabet. /// /// initial_alphabet (:obj:`List[str]`, `optional`): /// A list of characters to include in the initial alphabet, even /// if not seen in the training dataset. /// If the strings contain more than one character, only the first one /// is kept. /// /// continuing_subword_prefix (:obj:`str`, `optional`): /// A prefix to be used for every subword that is not a beginning-of-word. /// /// end_of_word_suffix (:obj:`str`, `optional`): /// A suffix to be used for every subword that is a end-of-word. /// /// max_token_length (:obj:`int`, `optional`): /// Prevents creating tokens longer than the specified size. /// This can help with reducing polluting your vocabulary with /// highly repetitive tokens like `======` for wikipedia /// #[pyclass(extends=PyTrainer, module = "tokenizers.trainers", name = "BpeTrainer")] pub struct PyBpeTrainer {} #[pymethods] impl PyBpeTrainer { #[getter] fn get_vocab_size(self_: PyRef<Self>) -> usize { getter!(self_, BpeTrainer, vocab_size) } #[setter] fn set_vocab_size(self_: PyRef<Self>, vocab_size: usize) { setter!(self_, BpeTrainer, vocab_size, vocab_size); } #[getter] fn get_min_frequency(self_: PyRef<Self>) -> u32 { getter!(self_, BpeTrainer, min_frequency) } #[setter] fn set_min_frequency(self_: PyRef<Self>, freq: u32) { setter!(self_, BpeTrainer, min_frequency, freq); } #[getter] fn get_show_progress(self_: PyRef<Self>) -> bool { getter!(self_, BpeTrainer, show_progress) } #[setter] fn set_show_progress(self_: PyRef<Self>, show_progress: bool) { setter!(self_, BpeTrainer, show_progress, show_progress); } #[getter] fn get_special_tokens(self_: PyRef<Self>) -> Vec<PyAddedToken> { getter!( self_, BpeTrainer, special_tokens .iter() .map(|tok| tok.clone().into()) .collect() ) } #[setter] fn set_special_tokens(self_: PyRef<Self>, special_tokens: &PyList) -> PyResult<()> { setter!( self_, BpeTrainer, special_tokens, special_tokens .into_iter() .map(|token| { if let Ok(content) = token.extract::<String>() { Ok(tk::tokenizer::AddedToken::from(content, true)) } else if let Ok(mut token) = token.extract::<PyRefMut<PyAddedToken>>() { token.special = true; Ok(token.get_token()) } else { Err(exceptions::PyTypeError::new_err( "Special tokens must be a List[Union[str, AddedToken]]", )) } }) .collect::<PyResult<Vec<_>>>()? ); Ok(()) } #[getter] fn get_limit_alphabet(self_: PyRef<Self>) -> Option<usize> { getter!(self_, BpeTrainer, limit_alphabet) } #[setter] fn set_limit_alphabet(self_: PyRef<Self>, limit: Option<usize>) { setter!(self_, BpeTrainer, limit_alphabet, limit); } #[getter] fn get_max_token_length(self_: PyRef<Self>) -> Option<usize> { getter!(self_, BpeTrainer, max_token_length) } #[setter] fn set_max_token_length(self_: PyRef<Self>, limit: Option<usize>) { setter!(self_, BpeTrainer, max_token_length, limit); } #[getter] fn get_initial_alphabet(self_: PyRef<Self>) -> Vec<String> { getter!( self_, BpeTrainer, initial_alphabet.iter().map(|c| c.to_string()).collect() ) } #[setter] fn set_initial_alphabet(self_: PyRef<Self>, alphabet: Vec<PyChar>) { setter!( self_, BpeTrainer, initial_alphabet, alphabet.into_iter().map(|c| c.0).collect() ); } #[getter] fn get_continuing_subword_prefix(self_: PyRef<Self>) -> Option<String> { getter!(self_, BpeTrainer, continuing_subword_prefix.clone()) } #[setter] fn set_continuing_subword_prefix(self_: PyRef<Self>, prefix: Option<String>) { setter!(self_, BpeTrainer, continuing_subword_prefix, prefix); } #[getter] fn get_end_of_word_suffix(self_: PyRef<Self>) -> Option<String> { getter!(self_, BpeTrainer, end_of_word_suffix.clone()) } #[setter] fn set_end_of_word_suffix(self_: PyRef<Self>, suffix: Option<String>) { setter!(self_, BpeTrainer, end_of_word_suffix, suffix); } #[new] #[pyo3(signature = (**kwargs), text_signature = None)] pub fn new(kwargs: Option<&PyDict>) -> PyResult<(Self, PyTrainer)> { let mut builder = tk::models::bpe::BpeTrainer::builder(); if let Some(kwargs) = kwargs { for (key, val) in kwargs { let key: &str = key.extract()?; match key { "vocab_size" => builder = builder.vocab_size(val.extract()?), "min_frequency" => builder = builder.min_frequency(val.extract()?), "show_progress" => builder = builder.show_progress(val.extract()?), "special_tokens" => { builder = builder.special_tokens( val.downcast::<PyList>()? .into_iter() .map(|token| { if let Ok(content) = token.extract::<String>() { Ok(PyAddedToken::from(content, Some(true)).get_token()) } else if let Ok(mut token) = token.extract::<PyRefMut<PyAddedToken>>() { token.special = true; Ok(token.get_token()) } else { Err(exceptions::PyTypeError::new_err( "special_tokens must be a List[Union[str, AddedToken]]", )) } }) .collect::<PyResult<Vec<_>>>()?, ); } "limit_alphabet" => builder = builder.limit_alphabet(val.extract()?), "max_token_length" => builder = builder.max_token_length(val.extract()?), "initial_alphabet" => { let alphabet: Vec<String> = val.extract()?; builder = builder.initial_alphabet( alphabet .into_iter() .filter_map(|s| s.chars().next()) .collect(), ); } "continuing_subword_prefix" => { builder = builder.continuing_subword_prefix(val.extract()?) } "end_of_word_suffix" => builder = builder.end_of_word_suffix(val.extract()?), _ => println!("Ignored unknown kwargs option {}", key), }; } } Ok((PyBpeTrainer {}, builder.build().into())) } } /// Trainer capable of training a WordPiece model /// /// Args: /// vocab_size (:obj:`int`, `optional`): /// The size of the final vocabulary, including all tokens and alphabet. /// /// min_frequency (:obj:`int`, `optional`): /// The minimum frequency a pair should have in order to be merged. /// /// show_progress (:obj:`bool`, `optional`): /// Whether to show progress bars while training. /// /// special_tokens (:obj:`List[Union[str, AddedToken]]`, `optional`): /// A list of special tokens the model should know of. /// /// limit_alphabet (:obj:`int`, `optional`): /// The maximum different characters to keep in the alphabet. /// /// initial_alphabet (:obj:`List[str]`, `optional`): /// A list of characters to include in the initial alphabet, even /// if not seen in the training dataset. /// If the strings contain more than one character, only the first one /// is kept. /// /// continuing_subword_prefix (:obj:`str`, `optional`): /// A prefix to be used for every subword that is not a beginning-of-word. /// /// end_of_word_suffix (:obj:`str`, `optional`): /// A suffix to be used for every subword that is a end-of-word. #[pyclass(extends=PyTrainer, module = "tokenizers.trainers", name = "WordPieceTrainer")] pub struct PyWordPieceTrainer {} #[pymethods] impl PyWordPieceTrainer { #[getter] fn get_vocab_size(self_: PyRef<Self>) -> usize { getter!(self_, WordPieceTrainer, vocab_size()) } #[setter] fn set_vocab_size(self_: PyRef<Self>, vocab_size: usize) { setter!(self_, WordPieceTrainer, @set_vocab_size, vocab_size); } #[getter] fn get_min_frequency(self_: PyRef<Self>) -> u32 { getter!(self_, WordPieceTrainer, min_frequency()) } #[setter] fn set_min_frequency(self_: PyRef<Self>, freq: u32) { setter!(self_, WordPieceTrainer, @set_min_frequency, freq); } #[getter] fn get_show_progress(self_: PyRef<Self>) -> bool { getter!(self_, WordPieceTrainer, show_progress()) } #[setter] fn set_show_progress(self_: PyRef<Self>, show_progress: bool) { setter!(self_, WordPieceTrainer, @set_show_progress, show_progress); } #[getter] fn get_special_tokens(self_: PyRef<Self>) -> Vec<PyAddedToken> { getter!( self_, WordPieceTrainer, special_tokens() .iter() .map(|tok| tok.clone().into()) .collect() ) } #[setter] fn set_special_tokens(self_: PyRef<Self>, special_tokens: &PyList) -> PyResult<()> { setter!( self_, WordPieceTrainer, @set_special_tokens, special_tokens .into_iter() .map(|token| { if let Ok(content) = token.extract::<String>() { Ok(tk::tokenizer::AddedToken::from(content, true)) } else if let Ok(mut token) = token.extract::<PyRefMut<PyAddedToken>>() { token.special = true; Ok(token.get_token()) } else { Err(exceptions::PyTypeError::new_err( "Special tokens must be a List[Union[str, AddedToken]]", )) } }) .collect::<PyResult<Vec<_>>>()? ); Ok(()) } #[getter] fn get_limit_alphabet(self_: PyRef<Self>) -> Option<usize> { getter!(self_, WordPieceTrainer, limit_alphabet()) } #[setter] fn set_limit_alphabet(self_: PyRef<Self>, limit: Option<usize>) { setter!(self_, WordPieceTrainer, @set_limit_alphabet, limit); } #[getter] fn get_initial_alphabet(self_: PyRef<Self>) -> Vec<String> { getter!( self_, WordPieceTrainer, initial_alphabet().iter().map(|c| c.to_string()).collect() ) } #[setter] fn set_initial_alphabet(self_: PyRef<Self>, alphabet: Vec<PyChar>) { setter!( self_, WordPieceTrainer, @set_initial_alphabet, alphabet.into_iter().map(|c| c.0).collect() ); } #[getter] fn get_continuing_subword_prefix(self_: PyRef<Self>) -> Option<String> { getter!(self_, WordPieceTrainer, continuing_subword_prefix().clone()) } #[setter] fn set_continuing_subword_prefix(self_: PyRef<Self>, prefix: Option<String>) { setter!(self_, WordPieceTrainer, @set_continuing_subword_prefix, prefix); } #[getter] fn get_end_of_word_suffix(self_: PyRef<Self>) -> Option<String> { getter!(self_, WordPieceTrainer, end_of_word_suffix().clone()) } #[setter] fn set_end_of_word_suffix(self_: PyRef<Self>, suffix: Option<String>) { setter!(self_, WordPieceTrainer, @set_end_of_word_suffix, suffix); } #[new] #[pyo3( signature = (** kwargs), text_signature = "(self, vocab_size=30000, min_frequency=0, show_progress=True, special_tokens=[], limit_alphabet=None, initial_alphabet= [],continuing_subword_prefix=\"##\", end_of_word_suffix=None)" )] pub fn new(kwargs: Option<&PyDict>) -> PyResult<(Self, PyTrainer)> { let mut builder = tk::models::wordpiece::WordPieceTrainer::builder(); if let Some(kwargs) = kwargs { for (key, val) in kwargs { let key: &str = key.extract()?; match key { "vocab_size" => builder = builder.vocab_size(val.extract()?), "min_frequency" => builder = builder.min_frequency(val.extract()?), "show_progress" => builder = builder.show_progress(val.extract()?), "special_tokens" => { builder = builder.special_tokens( val.downcast::<PyList>()? .into_iter() .map(|token| { if let Ok(content) = token.extract::<String>() { Ok(PyAddedToken::from(content, Some(true)).get_token()) } else if let Ok(mut token) = token.extract::<PyRefMut<PyAddedToken>>() { token.special = true; Ok(token.get_token()) } else { Err(exceptions::PyTypeError::new_err( "special_tokens must be a List[Union[str, AddedToken]]", )) } }) .collect::<PyResult<Vec<_>>>()?, ); } "limit_alphabet" => builder = builder.limit_alphabet(val.extract()?), "initial_alphabet" => { let alphabet: Vec<String> = val.extract()?; builder = builder.initial_alphabet( alphabet .into_iter() .filter_map(|s| s.chars().next()) .collect(), ); } "continuing_subword_prefix" => { builder = builder.continuing_subword_prefix(val.extract()?) } "end_of_word_suffix" => builder = builder.end_of_word_suffix(val.extract()?), _ => println!("Ignored unknown kwargs option {}", key), }; } } Ok((PyWordPieceTrainer {}, builder.build().into())) } } /// Trainer capable of training a WorldLevel model /// /// Args: /// vocab_size (:obj:`int`, `optional`): /// The size of the final vocabulary, including all tokens and alphabet. /// /// min_frequency (:obj:`int`, `optional`): /// The minimum frequency a pair should have in order to be merged. /// /// show_progress (:obj:`bool`, `optional`): /// Whether to show progress bars while training. /// /// special_tokens (:obj:`List[Union[str, AddedToken]]`): /// A list of special tokens the model should know of. #[pyclass(extends=PyTrainer, module = "tokenizers.trainers", name = "WordLevelTrainer")] pub struct PyWordLevelTrainer {} #[pymethods] impl PyWordLevelTrainer { #[getter] fn get_vocab_size(self_: PyRef<Self>) -> usize { getter!(self_, WordLevelTrainer, vocab_size) } #[setter] fn set_vocab_size(self_: PyRef<Self>, vocab_size: usize) { setter!(self_, WordLevelTrainer, vocab_size, vocab_size); } #[getter] fn get_min_frequency(self_: PyRef<Self>) -> u32 { getter!(self_, WordLevelTrainer, min_frequency) } #[setter] fn set_min_frequency(self_: PyRef<Self>, freq: u32) { setter!(self_, WordLevelTrainer, min_frequency, freq); } #[getter] fn get_show_progress(self_: PyRef<Self>) -> bool { getter!(self_, WordLevelTrainer, show_progress) } #[setter] fn set_show_progress(self_: PyRef<Self>, show_progress: bool) { setter!(self_, WordLevelTrainer, show_progress, show_progress); } #[getter] fn get_special_tokens(self_: PyRef<Self>) -> Vec<PyAddedToken> { getter!( self_, WordLevelTrainer, special_tokens .iter() .map(|tok| tok.clone().into()) .collect() ) } #[setter] fn set_special_tokens(self_: PyRef<Self>, special_tokens: &PyList) -> PyResult<()> { setter!( self_, WordLevelTrainer, special_tokens, special_tokens .into_iter() .map(|token| { if let Ok(content) = token.extract::<String>() { Ok(tk::tokenizer::AddedToken::from(content, true)) } else if let Ok(mut token) = token.extract::<PyRefMut<PyAddedToken>>() { token.special = true; Ok(token.get_token()) } else { Err(exceptions::PyTypeError::new_err( "Special tokens must be a List[Union[str, AddedToken]]", )) } }) .collect::<PyResult<Vec<_>>>()? ); Ok(()) } #[new] #[pyo3(signature = (**kwargs), text_signature = None)] pub fn new(kwargs: Option<&PyDict>) -> PyResult<(Self, PyTrainer)> { let mut builder = tk::models::wordlevel::WordLevelTrainer::builder(); if let Some(kwargs) = kwargs { for (key, val) in kwargs { let key: &str = key.extract()?; match key { "vocab_size" => { builder.vocab_size(val.extract()?); } "min_frequency" => { builder.min_frequency(val.extract()?); } "show_progress" => { builder.show_progress(val.extract()?); } "special_tokens" => { builder.special_tokens( val.downcast::<PyList>()? .into_iter() .map(|token| { if let Ok(content) = token.extract::<String>() { Ok(PyAddedToken::from(content, Some(true)).get_token()) } else if let Ok(mut token) = token.extract::<PyRefMut<PyAddedToken>>() { token.special = true; Ok(token.get_token()) } else { Err(exceptions::PyTypeError::new_err( "special_tokens must be a List[Union[str, AddedToken]]", )) } }) .collect::<PyResult<Vec<_>>>()?, ); } _ => println!("Ignored unknown kwargs option {}", key), } } } Ok(( PyWordLevelTrainer {}, builder .build() .expect("WordLevelTrainerBuilder cannot fail") .into(), )) } } /// Trainer capable of training a Unigram model /// /// Args: /// vocab_size (:obj:`int`): /// The size of the final vocabulary, including all tokens and alphabet. /// /// show_progress (:obj:`bool`): /// Whether to show progress bars while training. /// /// special_tokens (:obj:`List[Union[str, AddedToken]]`): /// A list of special tokens the model should know of. /// /// initial_alphabet (:obj:`List[str]`): /// A list of characters to include in the initial alphabet, even /// if not seen in the training dataset. /// If the strings contain more than one character, only the first one /// is kept. /// /// shrinking_factor (:obj:`float`): /// The shrinking factor used at each step of the training to prune the /// vocabulary. /// /// unk_token (:obj:`str`): /// The token used for out-of-vocabulary tokens. /// /// max_piece_length (:obj:`int`): /// The maximum length of a given token. /// /// n_sub_iterations (:obj:`int`): /// The number of iterations of the EM algorithm to perform before /// pruning the vocabulary. #[pyclass(extends=PyTrainer, module = "tokenizers.trainers", name = "UnigramTrainer")] pub struct PyUnigramTrainer {} #[pymethods] impl PyUnigramTrainer { #[getter] fn get_vocab_size(self_: PyRef<Self>) -> u32 { getter!(self_, UnigramTrainer, vocab_size) } #[setter] fn set_vocab_size(self_: PyRef<Self>, vocab_size: u32) { setter!(self_, UnigramTrainer, vocab_size, vocab_size); } #[getter] fn get_show_progress(self_: PyRef<Self>) -> bool { getter!(self_, UnigramTrainer, show_progress) } #[setter] fn set_show_progress(self_: PyRef<Self>, show_progress: bool) { setter!(self_, UnigramTrainer, show_progress, show_progress); } #[getter] fn get_special_tokens(self_: PyRef<Self>) -> Vec<PyAddedToken> { getter!( self_, UnigramTrainer, special_tokens .iter() .map(|tok| tok.clone().into()) .collect() ) } #[setter] fn set_special_tokens(self_: PyRef<Self>, special_tokens: &PyList) -> PyResult<()> { setter!( self_, UnigramTrainer, special_tokens, special_tokens .into_iter() .map(|token| { if let Ok(content) = token.extract::<String>() { Ok(tk::tokenizer::AddedToken::from(content, true)) } else if let Ok(mut token) = token.extract::<PyRefMut<PyAddedToken>>() { token.special = true; Ok(token.get_token()) } else { Err(exceptions::PyTypeError::new_err( "Special tokens must be a List[Union[str, AddedToken]]", )) } }) .collect::<PyResult<Vec<_>>>()? ); Ok(()) } #[getter] fn get_initial_alphabet(self_: PyRef<Self>) -> Vec<String> { getter!( self_, UnigramTrainer, initial_alphabet.iter().map(|c| c.to_string()).collect() ) } #[setter] fn set_initial_alphabet(self_: PyRef<Self>, alphabet: Vec<PyChar>) { setter!( self_, UnigramTrainer, initial_alphabet, alphabet.into_iter().map(|c| c.0).collect() ); } #[new] #[pyo3( signature = (**kwargs), text_signature = "(self, vocab_size=8000, show_progress=True, special_tokens=[], shrinking_factor=0.75, unk_token=None, max_piece_length=16, n_sub_iterations=2)" )] pub fn new(kwargs: Option<&PyDict>) -> PyResult<(Self, PyTrainer)> { let mut builder = tk::models::unigram::UnigramTrainer::builder(); if let Some(kwargs) = kwargs { for (key, val) in kwargs { let key: &str = key.extract()?; match key { "vocab_size" => builder.vocab_size(val.extract()?), "show_progress" => builder.show_progress(val.extract()?), "n_sub_iterations" => builder.n_sub_iterations(val.extract()?), "shrinking_factor" => builder.shrinking_factor(val.extract()?), "unk_token" => builder.unk_token(val.extract()?), "max_piece_length" => builder.max_piece_length(val.extract()?), "seed_size" => builder.seed_size(val.extract()?), "initial_alphabet" => { let alphabet: Vec<String> = val.extract()?; builder.initial_alphabet( alphabet .into_iter() .filter_map(|s| s.chars().next()) .collect(), ) } "special_tokens" => builder.special_tokens( val.downcast::<PyList>()? .into_iter() .map(|token| { if let Ok(content) = token.extract::<String>() { Ok(PyAddedToken::from(content, Some(true)).get_token()) } else if let Ok(mut token) = token.extract::<PyRefMut<PyAddedToken>>() { token.special = true; Ok(token.get_token()) } else { Err(exceptions::PyTypeError::new_err( "special_tokens must be a List[Union[str, AddedToken]]", )) } }) .collect::<PyResult<Vec<_>>>()?, ), _ => { println!("Ignored unknown kwargs option {}", key); &mut builder } }; } } let trainer: tokenizers::models::unigram::UnigramTrainer = builder.build().map_err(|e| { exceptions::PyException::new_err(format!("Cannot build UnigramTrainer: {}", e)) })?; Ok((PyUnigramTrainer {}, trainer.into())) } } /// Trainers Module #[pymodule] pub fn trainers(_py: Python, m: &PyModule) -> PyResult<()> { m.add_class::<PyTrainer>()?; m.add_class::<PyBpeTrainer>()?; m.add_class::<PyWordPieceTrainer>()?; m.add_class::<PyWordLevelTrainer>()?; m.add_class::<PyUnigramTrainer>()?; Ok(()) } #[cfg(test)] mod tests { use super::*; use tk::models::bpe::trainer::BpeTrainer; #[test] fn get_subtype() { Python::with_gil(|py| { let py_trainer = PyTrainer::new(Arc::new(RwLock::new(BpeTrainer::default().into()))); let py_bpe = py_trainer.get_as_subtype(py).unwrap(); assert_eq!("BpeTrainer", py_bpe.as_ref(py).get_type().name().unwrap()); }) } }
0
hf_public_repos/tokenizers/bindings/python
hf_public_repos/tokenizers/bindings/python/src/processors.rs
use std::convert::TryInto; use std::sync::Arc; use pyo3::exceptions; use pyo3::prelude::*; use pyo3::types::*; use crate::encoding::PyEncoding; use crate::error::ToPyResult; use serde::{Deserialize, Serialize}; use tk::processors::bert::BertProcessing; use tk::processors::byte_level::ByteLevel; use tk::processors::roberta::RobertaProcessing; use tk::processors::sequence::Sequence; use tk::processors::template::{SpecialToken, Template}; use tk::processors::PostProcessorWrapper; use tk::{Encoding, PostProcessor}; use tokenizers as tk; /// Base class for all post-processors /// /// This class is not supposed to be instantiated directly. Instead, any implementation of /// a PostProcessor will return an instance of this class when instantiated. #[pyclass( dict, module = "tokenizers.processors", name = "PostProcessor", subclass )] #[derive(Clone, Deserialize, Serialize)] pub struct PyPostProcessor { #[serde(flatten)] pub processor: Arc<PostProcessorWrapper>, } impl PyPostProcessor { pub fn new(processor: Arc<PostProcessorWrapper>) -> Self { PyPostProcessor { processor } } pub(crate) fn get_as_subtype(&self, py: Python<'_>) -> PyResult<PyObject> { let base = self.clone(); Ok(match self.processor.as_ref() { PostProcessorWrapper::ByteLevel(_) => Py::new(py, (PyByteLevel {}, base))?.into_py(py), PostProcessorWrapper::Bert(_) => Py::new(py, (PyBertProcessing {}, base))?.into_py(py), PostProcessorWrapper::Roberta(_) => { Py::new(py, (PyRobertaProcessing {}, base))?.into_py(py) } PostProcessorWrapper::Template(_) => { Py::new(py, (PyTemplateProcessing {}, base))?.into_py(py) } PostProcessorWrapper::Sequence(_) => Py::new(py, (PySequence {}, base))?.into_py(py), }) } } impl PostProcessor for PyPostProcessor { fn added_tokens(&self, is_pair: bool) -> usize { self.processor.added_tokens(is_pair) } fn process_encodings( &self, encodings: Vec<Encoding>, add_special_tokens: bool, ) -> tk::Result<Vec<Encoding>> { self.processor .process_encodings(encodings, add_special_tokens) } } #[pymethods] impl PyPostProcessor { fn __getstate__(&self, py: Python) -> PyResult<PyObject> { let data = serde_json::to_string(self.processor.as_ref()).map_err(|e| { exceptions::PyException::new_err(format!( "Error while attempting to pickle PostProcessor: {}", e )) })?; Ok(PyBytes::new(py, data.as_bytes()).to_object(py)) } fn __setstate__(&mut self, py: Python, state: PyObject) -> PyResult<()> { match state.extract::<&PyBytes>(py) { Ok(s) => { self.processor = serde_json::from_slice(s.as_bytes()).map_err(|e| { exceptions::PyException::new_err(format!( "Error while attempting to unpickle PostProcessor: {}", e )) })?; Ok(()) } Err(e) => Err(e), } } /// Return the number of special tokens that would be added for single/pair sentences. /// /// Args: /// is_pair (:obj:`bool`): /// Whether the input would be a pair of sequences /// /// Returns: /// :obj:`int`: The number of tokens to add #[pyo3(text_signature = "(self, is_pair)")] fn num_special_tokens_to_add(&self, is_pair: bool) -> usize { self.processor.added_tokens(is_pair) } /// Post-process the given encodings, generating the final one /// /// Args: /// encoding (:class:`~tokenizers.Encoding`): /// The encoding for the first sequence /// /// pair (:class:`~tokenizers.Encoding`, `optional`): /// The encoding for the pair sequence /// /// add_special_tokens (:obj:`bool`): /// Whether to add the special tokens /// /// Return: /// :class:`~tokenizers.Encoding`: The final encoding #[pyo3(signature = (encoding, pair = None, add_special_tokens = true))] #[pyo3(text_signature = "(self, encoding, pair=None, add_special_tokens=True)")] fn process( &self, encoding: &PyEncoding, pair: Option<&PyEncoding>, add_special_tokens: bool, ) -> PyResult<PyEncoding> { let final_encoding = ToPyResult(self.processor.process( encoding.encoding.clone(), pair.map(|e| e.encoding.clone()), add_special_tokens, )) .into_py()?; Ok(final_encoding.into()) } } /// This post-processor takes care of adding the special tokens needed by /// a Bert model: /// /// - a SEP token /// - a CLS token /// /// Args: /// sep (:obj:`Tuple[str, int]`): /// A tuple with the string representation of the SEP token, and its id /// /// cls (:obj:`Tuple[str, int]`): /// A tuple with the string representation of the CLS token, and its id #[pyclass(extends=PyPostProcessor, module = "tokenizers.processors", name = "BertProcessing")] pub struct PyBertProcessing {} #[pymethods] impl PyBertProcessing { #[new] #[pyo3(text_signature = "(self, sep, cls)")] fn new(sep: (String, u32), cls: (String, u32)) -> (Self, PyPostProcessor) { ( PyBertProcessing {}, PyPostProcessor::new(Arc::new(BertProcessing::new(sep, cls).into())), ) } fn __getnewargs__<'p>(&self, py: Python<'p>) -> &'p PyTuple { PyTuple::new(py, [("", 0), ("", 0)]) } } /// This post-processor takes care of adding the special tokens needed by /// a Roberta model: /// /// - a SEP token /// - a CLS token /// /// It also takes care of trimming the offsets. /// By default, the ByteLevel BPE might include whitespaces in the produced tokens. If you don't /// want the offsets to include these whitespaces, then this PostProcessor should be initialized /// with :obj:`trim_offsets=True` /// /// Args: /// sep (:obj:`Tuple[str, int]`): /// A tuple with the string representation of the SEP token, and its id /// /// cls (:obj:`Tuple[str, int]`): /// A tuple with the string representation of the CLS token, and its id /// /// trim_offsets (:obj:`bool`, `optional`, defaults to :obj:`True`): /// Whether to trim the whitespaces from the produced offsets. /// /// add_prefix_space (:obj:`bool`, `optional`, defaults to :obj:`True`): /// Whether the add_prefix_space option was enabled during pre-tokenization. This /// is relevant because it defines the way the offsets are trimmed out. #[pyclass(extends=PyPostProcessor, module = "tokenizers.processors", name = "RobertaProcessing")] pub struct PyRobertaProcessing {} #[pymethods] impl PyRobertaProcessing { #[new] #[pyo3(signature = (sep, cls, trim_offsets = true, add_prefix_space = true), text_signature = "(self, sep, cls, trim_offsets=True, add_prefix_space=True)")] fn new( sep: (String, u32), cls: (String, u32), trim_offsets: bool, add_prefix_space: bool, ) -> (Self, PyPostProcessor) { let proc = RobertaProcessing::new(sep, cls) .trim_offsets(trim_offsets) .add_prefix_space(add_prefix_space); ( PyRobertaProcessing {}, PyPostProcessor::new(Arc::new(proc.into())), ) } fn __getnewargs__<'p>(&self, py: Python<'p>) -> &'p PyTuple { PyTuple::new(py, [("", 0), ("", 0)]) } } /// This post-processor takes care of trimming the offsets. /// /// By default, the ByteLevel BPE might include whitespaces in the produced tokens. If you don't /// want the offsets to include these whitespaces, then this PostProcessor must be used. /// /// Args: /// trim_offsets (:obj:`bool`): /// Whether to trim the whitespaces from the produced offsets. #[pyclass(extends=PyPostProcessor, module = "tokenizers.processors", name = "ByteLevel")] pub struct PyByteLevel {} #[pymethods] impl PyByteLevel { #[new] #[pyo3(signature = (trim_offsets = None, **_kwargs), text_signature = "(self, trim_offsets=True)")] fn new(trim_offsets: Option<bool>, _kwargs: Option<&PyDict>) -> (Self, PyPostProcessor) { let mut byte_level = ByteLevel::default(); if let Some(to) = trim_offsets { byte_level = byte_level.trim_offsets(to); } ( PyByteLevel {}, PyPostProcessor::new(Arc::new(byte_level.into())), ) } } #[derive(Clone, Debug)] pub struct PySpecialToken(SpecialToken); impl From<PySpecialToken> for SpecialToken { fn from(v: PySpecialToken) -> Self { v.0 } } impl FromPyObject<'_> for PySpecialToken { fn extract(ob: &PyAny) -> PyResult<Self> { if let Ok(v) = ob.extract::<(String, u32)>() { Ok(Self(v.into())) } else if let Ok(v) = ob.extract::<(u32, String)>() { Ok(Self(v.into())) } else if let Ok(d) = ob.downcast::<PyDict>() { let id = d .get_item("id") .ok_or_else(|| exceptions::PyValueError::new_err("`id` must be specified"))? .extract::<String>()?; let ids = d .get_item("ids") .ok_or_else(|| exceptions::PyValueError::new_err("`ids` must be specified"))? .extract::<Vec<u32>>()?; let tokens = d .get_item("tokens") .ok_or_else(|| exceptions::PyValueError::new_err("`tokens` must be specified"))? .extract::<Vec<String>>()?; Ok(Self( ToPyResult(SpecialToken::new(id, ids, tokens)).into_py()?, )) } else { Err(exceptions::PyTypeError::new_err( "Expected Union[Tuple[str, int], Tuple[int, str], dict]", )) } } } #[derive(Clone, Debug)] pub struct PyTemplate(Template); impl From<PyTemplate> for Template { fn from(v: PyTemplate) -> Self { v.0 } } impl FromPyObject<'_> for PyTemplate { fn extract(ob: &PyAny) -> PyResult<Self> { if let Ok(s) = ob.extract::<&str>() { Ok(Self( s.try_into().map_err(exceptions::PyValueError::new_err)?, )) } else if let Ok(s) = ob.extract::<Vec<&str>>() { Ok(Self( s.try_into().map_err(exceptions::PyValueError::new_err)?, )) } else { Err(exceptions::PyTypeError::new_err( "Expected Union[str, List[str]]", )) } } } /// Provides a way to specify templates in order to add the special tokens to each /// input sequence as relevant. /// /// Let's take :obj:`BERT` tokenizer as an example. It uses two special tokens, used to /// delimitate each sequence. :obj:`[CLS]` is always used at the beginning of the first /// sequence, and :obj:`[SEP]` is added at the end of both the first, and the pair /// sequences. The final result looks like this: /// /// - Single sequence: :obj:`[CLS] Hello there [SEP]` /// - Pair sequences: :obj:`[CLS] My name is Anthony [SEP] What is my name? [SEP]` /// /// With the type ids as following:: /// /// [CLS] ... [SEP] ... [SEP] /// 0 0 0 1 1 /// /// You can achieve such behavior using a TemplateProcessing:: /// /// TemplateProcessing( /// single="[CLS] $0 [SEP]", /// pair="[CLS] $A [SEP] $B:1 [SEP]:1", /// special_tokens=[("[CLS]", 1), ("[SEP]", 0)], /// ) /// /// In this example, each input sequence is identified using a ``$`` construct. This identifier /// lets us specify each input sequence, and the type_id to use. When nothing is specified, /// it uses the default values. Here are the different ways to specify it: /// /// - Specifying the sequence, with default ``type_id == 0``: ``$A`` or ``$B`` /// - Specifying the `type_id` with default ``sequence == A``: ``$0``, ``$1``, ``$2``, ... /// - Specifying both: ``$A:0``, ``$B:1``, ... /// /// The same construct is used for special tokens: ``<identifier>(:<type_id>)?``. /// /// **Warning**: You must ensure that you are giving the correct tokens/ids as these /// will be added to the Encoding without any further check. If the given ids correspond /// to something totally different in a `Tokenizer` using this `PostProcessor`, it /// might lead to unexpected results. /// /// Args: /// single (:obj:`Template`): /// The template used for single sequences /// /// pair (:obj:`Template`): /// The template used when both sequences are specified /// /// special_tokens (:obj:`Tokens`): /// The list of special tokens used in each sequences /// /// Types: /// /// Template (:obj:`str` or :obj:`List`): /// - If a :obj:`str` is provided, the whitespace is used as delimiter between tokens /// - If a :obj:`List[str]` is provided, a list of tokens /// /// Tokens (:obj:`List[Union[Tuple[int, str], Tuple[str, int], dict]]`): /// - A :obj:`Tuple` with both a token and its associated ID, in any order /// - A :obj:`dict` with the following keys: /// - "id": :obj:`str` => The special token id, as specified in the Template /// - "ids": :obj:`List[int]` => The associated IDs /// - "tokens": :obj:`List[str]` => The associated tokens /// /// The given dict expects the provided :obj:`ids` and :obj:`tokens` lists to have /// the same length. #[pyclass(extends=PyPostProcessor, module = "tokenizers.processors", name = "TemplateProcessing")] pub struct PyTemplateProcessing {} #[pymethods] impl PyTemplateProcessing { #[new] #[pyo3(signature = (single = None, pair = None, special_tokens = None), text_signature = "(self, single, pair, special_tokens)")] fn new( single: Option<PyTemplate>, pair: Option<PyTemplate>, special_tokens: Option<Vec<PySpecialToken>>, ) -> PyResult<(Self, PyPostProcessor)> { let mut builder = tk::processors::template::TemplateProcessing::builder(); if let Some(seq) = single { builder.single(seq.into()); } if let Some(seq) = pair { builder.pair(seq.into()); } if let Some(sp) = special_tokens { builder.special_tokens(sp); } let processor = builder .build() .map_err(|e| exceptions::PyValueError::new_err(e.to_string()))?; Ok(( PyTemplateProcessing {}, PyPostProcessor::new(Arc::new(processor.into())), )) } } /// Sequence Processor /// /// Args: /// processors (:obj:`List[PostProcessor]`) /// The processors that need to be chained #[pyclass(extends=PyPostProcessor, module = "tokenizers.processors", name = "Sequence")] pub struct PySequence {} #[pymethods] impl PySequence { #[new] #[pyo3(signature = (processors_py), text_signature = "(self, processors)")] fn new(processors_py: &PyList) -> (Self, PyPostProcessor) { let mut processors: Vec<PostProcessorWrapper> = Vec::with_capacity(processors_py.len()); for n in processors_py.iter() { let processor: PyRef<PyPostProcessor> = n.extract().unwrap(); let processor = processor.processor.as_ref(); processors.push(processor.clone()); } let sequence_processor = Sequence::new(processors); ( PySequence {}, PyPostProcessor::new(Arc::new(PostProcessorWrapper::Sequence(sequence_processor))), ) } fn __getnewargs__<'p>(&self, py: Python<'p>) -> &'p PyTuple { PyTuple::new(py, [PyList::empty(py)]) } } /// Processors Module #[pymodule] pub fn processors(_py: Python, m: &PyModule) -> PyResult<()> { m.add_class::<PyPostProcessor>()?; m.add_class::<PyBertProcessing>()?; m.add_class::<PyRobertaProcessing>()?; m.add_class::<PyByteLevel>()?; m.add_class::<PyTemplateProcessing>()?; m.add_class::<PySequence>()?; Ok(()) } #[cfg(test)] mod test { use std::sync::Arc; use pyo3::prelude::*; use tk::processors::bert::BertProcessing; use tk::processors::PostProcessorWrapper; use crate::processors::PyPostProcessor; #[test] fn get_subtype() { Python::with_gil(|py| { let py_proc = PyPostProcessor::new(Arc::new( BertProcessing::new(("SEP".into(), 0), ("CLS".into(), 1)).into(), )); let py_bert = py_proc.get_as_subtype(py).unwrap(); assert_eq!( "BertProcessing", py_bert.as_ref(py).get_type().name().unwrap() ); }) } #[test] fn serialize() { let rs_processing = BertProcessing::new(("SEP".into(), 0), ("CLS".into(), 1)); let rs_wrapper: PostProcessorWrapper = rs_processing.clone().into(); let rs_processing_ser = serde_json::to_string(&rs_processing).unwrap(); let rs_wrapper_ser = serde_json::to_string(&rs_wrapper).unwrap(); let py_processing = PyPostProcessor::new(Arc::new(rs_wrapper)); let py_ser = serde_json::to_string(&py_processing).unwrap(); assert_eq!(py_ser, rs_processing_ser); assert_eq!(py_ser, rs_wrapper_ser); let py_processing: PyPostProcessor = serde_json::from_str(&rs_processing_ser).unwrap(); match py_processing.processor.as_ref() { PostProcessorWrapper::Bert(_) => (), _ => panic!("Expected Bert postprocessor."), } let py_processing: PyPostProcessor = serde_json::from_str(&rs_wrapper_ser).unwrap(); match py_processing.processor.as_ref() { PostProcessorWrapper::Bert(_) => (), _ => panic!("Expected Bert postprocessor."), } } }
0
hf_public_repos/tokenizers/bindings/python
hf_public_repos/tokenizers/bindings/python/src/models.rs
use std::collections::HashMap; use std::path::{Path, PathBuf}; use std::sync::{Arc, RwLock}; use crate::token::PyToken; use crate::trainers::PyTrainer; use pyo3::exceptions; use pyo3::prelude::*; use pyo3::types::*; use serde::{Deserialize, Serialize}; use tk::models::bpe::{BpeBuilder, Merges, Vocab, BPE}; use tk::models::unigram::Unigram; use tk::models::wordlevel::WordLevel; use tk::models::wordpiece::{WordPiece, WordPieceBuilder}; use tk::models::ModelWrapper; use tk::{Model, Token}; use tokenizers as tk; use super::error::{deprecation_warning, ToPyResult}; /// Base class for all models /// /// The model represents the actual tokenization algorithm. This is the part that /// will contain and manage the learned vocabulary. /// /// This class cannot be constructed directly. Please use one of the concrete models. #[pyclass(module = "tokenizers.models", name = "Model", subclass)] #[derive(Clone, Serialize, Deserialize)] pub struct PyModel { #[serde(flatten)] pub model: Arc<RwLock<ModelWrapper>>, } impl PyModel { pub(crate) fn get_as_subtype(&self, py: Python<'_>) -> PyResult<PyObject> { let base = self.clone(); Ok(match *self.model.as_ref().read().unwrap() { ModelWrapper::BPE(_) => Py::new(py, (PyBPE {}, base))?.into_py(py), ModelWrapper::WordPiece(_) => Py::new(py, (PyWordPiece {}, base))?.into_py(py), ModelWrapper::WordLevel(_) => Py::new(py, (PyWordLevel {}, base))?.into_py(py), ModelWrapper::Unigram(_) => Py::new(py, (PyUnigram {}, base))?.into_py(py), }) } } impl Model for PyModel { type Trainer = PyTrainer; fn tokenize(&self, tokens: &str) -> tk::Result<Vec<Token>> { self.model.read().unwrap().tokenize(tokens) } fn token_to_id(&self, token: &str) -> Option<u32> { self.model.read().unwrap().token_to_id(token) } fn id_to_token(&self, id: u32) -> Option<String> { self.model.read().unwrap().id_to_token(id) } fn get_vocab(&self) -> HashMap<String, u32> { self.model.read().unwrap().get_vocab() } fn get_vocab_size(&self) -> usize { self.model.read().unwrap().get_vocab_size() } fn save(&self, folder: &Path, name: Option<&str>) -> tk::Result<Vec<PathBuf>> { self.model.read().unwrap().save(folder, name) } fn get_trainer(&self) -> Self::Trainer { self.model.read().unwrap().get_trainer().into() } } impl<I> From<I> for PyModel where I: Into<ModelWrapper>, { fn from(model: I) -> Self { Self { model: Arc::new(RwLock::new(model.into())), } } } #[pymethods] impl PyModel { #[new] #[pyo3(text_signature = None)] fn __new__() -> Self { // Instantiate a default empty model. This doesn't really make sense, but we need // to be able to instantiate an empty model for pickle capabilities. PyModel { model: Arc::new(RwLock::new(BPE::default().into())), } } fn __getstate__(&self, py: Python) -> PyResult<PyObject> { let data = serde_json::to_string(&self.model).map_err(|e| { exceptions::PyException::new_err(format!( "Error while attempting to pickle Model: {}", e )) })?; Ok(PyBytes::new(py, data.as_bytes()).to_object(py)) } fn __setstate__(&mut self, py: Python, state: PyObject) -> PyResult<()> { match state.extract::<&PyBytes>(py) { Ok(s) => { self.model = serde_json::from_slice(s.as_bytes()).map_err(|e| { exceptions::PyException::new_err(format!( "Error while attempting to unpickle Model: {}", e )) })?; Ok(()) } Err(e) => Err(e), } } /// Tokenize a sequence /// /// Args: /// sequence (:obj:`str`): /// A sequence to tokenize /// /// Returns: /// A :obj:`List` of :class:`~tokenizers.Token`: The generated tokens #[pyo3(text_signature = "(self, sequence)")] fn tokenize(&self, sequence: &str) -> PyResult<Vec<PyToken>> { Ok(ToPyResult(self.model.read().unwrap().tokenize(sequence)) .into_py()? .into_iter() .map(|t| t.into()) .collect()) } /// Get the ID associated to a token /// /// Args: /// token (:obj:`str`): /// A token to convert to an ID /// /// Returns: /// :obj:`int`: The ID associated to the token #[pyo3(text_signature = "(self, tokens)")] fn token_to_id(&self, token: &str) -> Option<u32> { self.model.read().unwrap().token_to_id(token) } /// Get the token associated to an ID /// /// Args: /// id (:obj:`int`): /// An ID to convert to a token /// /// Returns: /// :obj:`str`: The token associated to the ID #[pyo3(text_signature = "(self, id)")] fn id_to_token(&self, id: u32) -> Option<String> { self.model.read().unwrap().id_to_token(id) } /// Save the current model /// /// Save the current model in the given folder, using the given prefix for the various /// files that will get created. /// Any file with the same name that already exists in this folder will be overwritten. /// /// Args: /// folder (:obj:`str`): /// The path to the target folder in which to save the various files /// /// prefix (:obj:`str`, `optional`): /// An optional prefix, used to prefix each file name /// /// Returns: /// :obj:`List[str]`: The list of saved files #[pyo3(text_signature = "(self, folder, prefix)")] fn save<'a>( &self, py: Python<'_>, folder: &str, mut prefix: Option<&'a str>, name: Option<&'a str>, ) -> PyResult<Vec<String>> { if name.is_some() { deprecation_warning( py, "0.10.0", "Parameter `name` of Model.save has been renamed `prefix`", )?; if prefix.is_none() { prefix = name; } } let saved: PyResult<Vec<_>> = ToPyResult(self.model.read().unwrap().save(Path::new(folder), prefix)).into(); Ok(saved? .into_iter() .map(|path| path.to_string_lossy().into_owned()) .collect()) } /// Get the associated :class:`~tokenizers.trainers.Trainer` /// /// Retrieve the :class:`~tokenizers.trainers.Trainer` associated to this /// :class:`~tokenizers.models.Model`. /// /// Returns: /// :class:`~tokenizers.trainers.Trainer`: The Trainer used to train this model #[pyo3(text_signature = "(self)")] fn get_trainer(&self, py: Python<'_>) -> PyResult<PyObject> { PyTrainer::from(self.model.read().unwrap().get_trainer()).get_as_subtype(py) } } /// An implementation of the BPE (Byte-Pair Encoding) algorithm /// /// Args: /// vocab (:obj:`Dict[str, int]`, `optional`): /// A dictionnary of string keys and their ids :obj:`{"am": 0,...}` /// /// merges (:obj:`List[Tuple[str, str]]`, `optional`): /// A list of pairs of tokens (:obj:`Tuple[str, str]`) :obj:`[("a", "b"),...]` /// /// cache_capacity (:obj:`int`, `optional`): /// The number of words that the BPE cache can contain. The cache allows /// to speed-up the process by keeping the result of the merge operations /// for a number of words. /// /// dropout (:obj:`float`, `optional`): /// A float between 0 and 1 that represents the BPE dropout to use. /// /// unk_token (:obj:`str`, `optional`): /// The unknown token to be used by the model. /// /// continuing_subword_prefix (:obj:`str`, `optional`): /// The prefix to attach to subword units that don't represent a beginning of word. /// /// end_of_word_suffix (:obj:`str`, `optional`): /// The suffix to attach to subword units that represent an end of word. /// /// fuse_unk (:obj:`bool`, `optional`): /// Whether to fuse any subsequent unknown tokens into a single one /// /// byte_fallback (:obj:`bool`, `optional`): /// Whether to use spm byte-fallback trick (defaults to False) #[pyclass(extends=PyModel, module = "tokenizers.models", name = "BPE")] pub struct PyBPE {} impl PyBPE { fn with_builder(mut builder: BpeBuilder, kwargs: Option<&PyDict>) -> PyResult<(Self, PyModel)> { if let Some(kwargs) = kwargs { for (key, value) in kwargs { let key: &str = key.extract()?; match key { "cache_capacity" => builder = builder.cache_capacity(value.extract()?), "dropout" => { if let Some(dropout) = value.extract()? { builder = builder.dropout(dropout); } } "unk_token" => { if let Some(unk) = value.extract()? { builder = builder.unk_token(unk); } } "continuing_subword_prefix" => { builder = builder.continuing_subword_prefix(value.extract()?) } "end_of_word_suffix" => builder = builder.end_of_word_suffix(value.extract()?), "fuse_unk" => builder = builder.fuse_unk(value.extract()?), "byte_fallback" => builder = builder.byte_fallback(value.extract()?), _ => println!("Ignored unknown kwarg option {}", key), }; } } match builder.build() { Err(e) => Err(exceptions::PyException::new_err(format!( "Error while initializing BPE: {}", e ))), Ok(bpe) => Ok((PyBPE {}, bpe.into())), } } } macro_rules! getter { ($self: ident, $variant: ident, $($name: tt)+) => {{ let super_ = $self.as_ref(); let model = super_.model.read().unwrap(); if let ModelWrapper::$variant(ref mo) = *model { mo.$($name)+ } else { unreachable!() } }}; } macro_rules! setter { ($self: ident, $variant: ident, $name: ident, $value: expr) => {{ let super_ = $self.as_ref(); let mut model = super_.model.write().unwrap(); if let ModelWrapper::$variant(ref mut mo) = *model { mo.$name = $value; } }}; } #[derive(FromPyObject)] enum PyVocab<'a> { Vocab(Vocab), Filename(&'a str), } #[derive(FromPyObject)] enum PyMerges<'a> { Merges(Merges), Filename(&'a str), } #[pymethods] impl PyBPE { #[getter] fn get_dropout(self_: PyRef<Self>) -> Option<f32> { getter!(self_, BPE, dropout) } #[setter] fn set_dropout(self_: PyRef<Self>, dropout: Option<f32>) { setter!(self_, BPE, dropout, dropout); } #[getter] fn get_unk_token(self_: PyRef<Self>) -> Option<String> { getter!(self_, BPE, unk_token.clone()) } #[setter] fn set_unk_token(self_: PyRef<Self>, unk_token: Option<String>) { setter!(self_, BPE, unk_token, unk_token); } #[getter] fn get_continuing_subword_prefix(self_: PyRef<Self>) -> Option<String> { getter!(self_, BPE, continuing_subword_prefix.clone()) } #[setter] fn set_continuing_subword_prefix( self_: PyRef<Self>, continuing_subword_prefix: Option<String>, ) { setter!( self_, BPE, continuing_subword_prefix, continuing_subword_prefix ); } #[getter] fn get_end_of_word_suffix(self_: PyRef<Self>) -> Option<String> { getter!(self_, BPE, end_of_word_suffix.clone()) } #[setter] fn set_end_of_word_suffix(self_: PyRef<Self>, end_of_word_suffix: Option<String>) { setter!(self_, BPE, end_of_word_suffix, end_of_word_suffix); } #[getter] fn get_fuse_unk(self_: PyRef<Self>) -> bool { getter!(self_, BPE, fuse_unk) } #[setter] fn set_fuse_unk(self_: PyRef<Self>, fuse_unk: bool) { setter!(self_, BPE, fuse_unk, fuse_unk); } #[getter] fn get_byte_fallback(self_: PyRef<Self>) -> bool { getter!(self_, BPE, byte_fallback) } #[setter] fn set_byte_fallback(self_: PyRef<Self>, byte_fallback: bool) { setter!(self_, BPE, byte_fallback, byte_fallback); } #[new] #[pyo3( signature = (vocab=None, merges=None, **kwargs), text_signature = "(self, vocab=None, merges=None, cache_capacity=None, dropout=None, unk_token=None, continuing_subword_prefix=None, end_of_word_suffix=None, fuse_unk=None, byte_fallback=False)")] fn new( py: Python<'_>, vocab: Option<PyVocab>, merges: Option<PyMerges>, kwargs: Option<&PyDict>, ) -> PyResult<(Self, PyModel)> { if (vocab.is_some() && merges.is_none()) || (vocab.is_none() && merges.is_some()) { return Err(exceptions::PyValueError::new_err( "`vocab` and `merges` must be both specified", )); } let mut builder = BPE::builder(); if let (Some(vocab), Some(merges)) = (vocab, merges) { match (vocab, merges) { (PyVocab::Vocab(vocab), PyMerges::Merges(merges)) => { builder = builder.vocab_and_merges(vocab, merges); } (PyVocab::Filename(vocab_filename), PyMerges::Filename(merges_filename)) => { deprecation_warning( py, "0.9.0", "BPE.__init__ will not create from files anymore, try `BPE.from_file` instead", )?; builder = builder.files(vocab_filename.to_string(), merges_filename.to_string()); } _ => { return Err(exceptions::PyValueError::new_err( "`vocab` and `merges` must be both be from memory or both filenames", )); } } } PyBPE::with_builder(builder, kwargs) } /// Read a :obj:`vocab.json` and a :obj:`merges.txt` files /// /// This method provides a way to read and parse the content of these files, /// returning the relevant data structures. If you want to instantiate some BPE models /// from memory, this method gives you the expected input from the standard files. /// /// Args: /// vocab (:obj:`str`): /// The path to a :obj:`vocab.json` file /// /// merges (:obj:`str`): /// The path to a :obj:`merges.txt` file /// /// Returns: /// A :obj:`Tuple` with the vocab and the merges: /// The vocabulary and merges loaded into memory #[staticmethod] #[pyo3(text_signature = "(self, vocab, merges)")] fn read_file(vocab: &str, merges: &str) -> PyResult<(Vocab, Merges)> { BPE::read_file(vocab, merges).map_err(|e| { exceptions::PyException::new_err(format!( "Error while reading vocab & merges files: {}", e )) }) } /// Instantiate a BPE model from the given files. /// /// This method is roughly equivalent to doing:: /// /// vocab, merges = BPE.read_file(vocab_filename, merges_filename) /// bpe = BPE(vocab, merges) /// /// If you don't need to keep the :obj:`vocab, merges` values lying around, /// this method is more optimized than manually calling /// :meth:`~tokenizers.models.BPE.read_file` to initialize a :class:`~tokenizers.models.BPE` /// /// Args: /// vocab (:obj:`str`): /// The path to a :obj:`vocab.json` file /// /// merges (:obj:`str`): /// The path to a :obj:`merges.txt` file /// /// Returns: /// :class:`~tokenizers.models.BPE`: An instance of BPE loaded from these files #[classmethod] #[pyo3(signature = (vocab, merges, **kwargs))] #[pyo3(text_signature = "(cls, vocab, merge, **kwargs)")] fn from_file( _cls: &PyType, py: Python, vocab: &str, merges: &str, kwargs: Option<&PyDict>, ) -> PyResult<Py<Self>> { let (vocab, merges) = BPE::read_file(vocab, merges).map_err(|e| { exceptions::PyException::new_err(format!("Error while reading BPE files: {}", e)) })?; Py::new( py, PyBPE::new( py, Some(PyVocab::Vocab(vocab)), Some(PyMerges::Merges(merges)), kwargs, )?, ) } } /// An implementation of the WordPiece algorithm /// /// Args: /// vocab (:obj:`Dict[str, int]`, `optional`): /// A dictionnary of string keys and their ids :obj:`{"am": 0,...}` /// /// unk_token (:obj:`str`, `optional`): /// The unknown token to be used by the model. /// /// max_input_chars_per_word (:obj:`int`, `optional`): /// The maximum number of characters to authorize in a single word. #[pyclass(extends=PyModel, module = "tokenizers.models", name = "WordPiece")] pub struct PyWordPiece {} impl PyWordPiece { fn with_builder( mut builder: WordPieceBuilder, kwargs: Option<&PyDict>, ) -> PyResult<(Self, PyModel)> { if let Some(kwargs) = kwargs { for (key, val) in kwargs { let key: &str = key.extract()?; match key { "unk_token" => { builder = builder.unk_token(val.extract()?); } "max_input_chars_per_word" => { builder = builder.max_input_chars_per_word(val.extract()?); } "continuing_subword_prefix" => { builder = builder.continuing_subword_prefix(val.extract()?); } _ => println!("Ignored unknown kwargs option {}", key), } } } match builder.build() { Err(e) => Err(exceptions::PyException::new_err(format!( "Error while initializing WordPiece: {}", e ))), Ok(wordpiece) => Ok((PyWordPiece {}, wordpiece.into())), } } } #[pymethods] impl PyWordPiece { #[getter] fn get_unk_token(self_: PyRef<Self>) -> String { getter!(self_, WordPiece, unk_token.clone()) } #[setter] fn set_unk_token(self_: PyRef<Self>, unk_token: String) { setter!(self_, WordPiece, unk_token, unk_token); } #[getter] fn get_continuing_subword_prefix(self_: PyRef<Self>) -> String { getter!(self_, WordPiece, continuing_subword_prefix.clone()) } #[setter] fn set_continuing_subword_prefix(self_: PyRef<Self>, continuing_subword_prefix: String) { setter!( self_, WordPiece, continuing_subword_prefix, continuing_subword_prefix ); } #[getter] fn get_max_input_chars_per_word(self_: PyRef<Self>) -> usize { getter!(self_, WordPiece, max_input_chars_per_word) } #[setter] fn set_max_input_chars_per_word(self_: PyRef<Self>, max: usize) { setter!(self_, WordPiece, max_input_chars_per_word, max); } #[new] #[pyo3(signature = (vocab=None, **kwargs), text_signature = "(self, vocab, unk_token, max_input_chars_per_word)")] fn new( py: Python<'_>, vocab: Option<PyVocab>, kwargs: Option<&PyDict>, ) -> PyResult<(Self, PyModel)> { let mut builder = WordPiece::builder(); if let Some(vocab) = vocab { match vocab { PyVocab::Vocab(vocab) => { builder = builder.vocab(vocab); } PyVocab::Filename(vocab_filename) => { deprecation_warning( py, "0.9.0", "WordPiece.__init__ will not create from files anymore, try `WordPiece.from_file` instead", )?; builder = builder.files(vocab_filename.to_string()); } } } PyWordPiece::with_builder(builder, kwargs) } /// Read a :obj:`vocab.txt` file /// /// This method provides a way to read and parse the content of a standard `vocab.txt` /// file as used by the WordPiece Model, returning the relevant data structures. If you /// want to instantiate some WordPiece models from memory, this method gives you the /// expected input from the standard files. /// /// Args: /// vocab (:obj:`str`): /// The path to a :obj:`vocab.txt` file /// /// Returns: /// :obj:`Dict[str, int]`: The vocabulary as a :obj:`dict` #[staticmethod] #[pyo3(text_signature = "(vocab)")] fn read_file(vocab: &str) -> PyResult<Vocab> { WordPiece::read_file(vocab).map_err(|e| { exceptions::PyException::new_err(format!("Error while reading WordPiece file: {}", e)) }) } /// Instantiate a WordPiece model from the given file /// /// This method is roughly equivalent to doing:: /// /// vocab = WordPiece.read_file(vocab_filename) /// wordpiece = WordPiece(vocab) /// /// If you don't need to keep the :obj:`vocab` values lying around, this method is /// more optimized than manually calling :meth:`~tokenizers.models.WordPiece.read_file` to /// initialize a :class:`~tokenizers.models.WordPiece` /// /// Args: /// vocab (:obj:`str`): /// The path to a :obj:`vocab.txt` file /// /// Returns: /// :class:`~tokenizers.models.WordPiece`: An instance of WordPiece loaded from file #[classmethod] #[pyo3(signature = (vocab, **kwargs))] #[pyo3(text_signature = "(vocab, **kwargs)")] fn from_file( _cls: &PyType, py: Python, vocab: &str, kwargs: Option<&PyDict>, ) -> PyResult<Py<Self>> { let vocab = WordPiece::read_file(vocab).map_err(|e| { exceptions::PyException::new_err(format!("Error while reading WordPiece file: {}", e)) })?; Py::new( py, PyWordPiece::new(py, Some(PyVocab::Vocab(vocab)), kwargs)?, ) } } /// An implementation of the WordLevel algorithm /// /// Most simple tokenizer model based on mapping tokens to their corresponding id. /// /// Args: /// vocab (:obj:`str`, `optional`): /// A dictionnary of string keys and their ids :obj:`{"am": 0,...}` /// /// unk_token (:obj:`str`, `optional`): /// The unknown token to be used by the model. #[pyclass(extends=PyModel, module = "tokenizers.models", name = "WordLevel")] pub struct PyWordLevel {} #[pymethods] impl PyWordLevel { #[getter] fn get_unk_token(self_: PyRef<Self>) -> String { getter!(self_, WordLevel, unk_token.clone()) } #[setter] fn set_unk_token(self_: PyRef<Self>, unk_token: String) { setter!(self_, WordLevel, unk_token, unk_token); } #[new] #[pyo3(signature = (vocab=None, unk_token = None), text_signature = "(self, vocab, unk_token)")] fn new( py: Python<'_>, vocab: Option<PyVocab>, unk_token: Option<String>, ) -> PyResult<(Self, PyModel)> { let mut builder = WordLevel::builder(); if let Some(vocab) = vocab { match vocab { PyVocab::Vocab(vocab) => { builder = builder.vocab(vocab); } PyVocab::Filename(vocab_filename) => { deprecation_warning( py, "0.9.0", "WordLevel.__init__ will not create from files anymore, \ try `WordLevel.from_file` instead", )?; builder = builder.files(vocab_filename.to_string()); } }; } if let Some(unk_token) = unk_token { builder = builder.unk_token(unk_token); } Ok(( PyWordLevel {}, builder .build() .map_err(|e| exceptions::PyException::new_err(e.to_string()))? .into(), )) } /// Read a :obj:`vocab.json` /// /// This method provides a way to read and parse the content of a vocabulary file, /// returning the relevant data structures. If you want to instantiate some WordLevel models /// from memory, this method gives you the expected input from the standard files. /// /// Args: /// vocab (:obj:`str`): /// The path to a :obj:`vocab.json` file /// /// Returns: /// :obj:`Dict[str, int]`: The vocabulary as a :obj:`dict` #[staticmethod] #[pyo3(text_signature = "(vocab)")] fn read_file(vocab: &str) -> PyResult<Vocab> { WordLevel::read_file(vocab).map_err(|e| { exceptions::PyException::new_err(format!("Error while reading WordLevel file: {}", e)) }) } /// Instantiate a WordLevel model from the given file /// /// This method is roughly equivalent to doing:: /// /// vocab = WordLevel.read_file(vocab_filename) /// wordlevel = WordLevel(vocab) /// /// If you don't need to keep the :obj:`vocab` values lying around, this method is /// more optimized than manually calling :meth:`~tokenizers.models.WordLevel.read_file` to /// initialize a :class:`~tokenizers.models.WordLevel` /// /// Args: /// vocab (:obj:`str`): /// The path to a :obj:`vocab.json` file /// /// Returns: /// :class:`~tokenizers.models.WordLevel`: An instance of WordLevel loaded from file #[classmethod] #[pyo3(signature = (vocab, unk_token = None))] #[pyo3(text_signature = "(vocab, unk_token)")] fn from_file( _cls: &PyType, py: Python, vocab: &str, unk_token: Option<String>, ) -> PyResult<Py<Self>> { let vocab = WordLevel::read_file(vocab).map_err(|e| { exceptions::PyException::new_err(format!("Error while reading WordLevel file: {}", e)) })?; Py::new( py, PyWordLevel::new(py, Some(PyVocab::Vocab(vocab)), unk_token)?, ) } } /// An implementation of the Unigram algorithm /// /// Args: /// vocab (:obj:`List[Tuple[str, float]]`, `optional`, `optional`): /// A list of vocabulary items and their relative score [("am", -0.2442),...] #[pyclass(extends=PyModel, module = "tokenizers.models", name = "Unigram")] pub struct PyUnigram {} #[pymethods] impl PyUnigram { #[new] #[pyo3(text_signature = "(self, vocab, unk_id, byte_fallback)")] fn new( vocab: Option<Vec<(String, f64)>>, unk_id: Option<usize>, byte_fallback: Option<bool>, ) -> PyResult<(Self, PyModel)> { match (vocab, unk_id, byte_fallback) { (Some(vocab), unk_id, byte_fallback) => { let model = Unigram::from(vocab, unk_id, byte_fallback.unwrap_or(false)).map_err(|e| { exceptions::PyException::new_err(format!( "Error while loading Unigram: {}", e )) })?; Ok((PyUnigram {}, model.into())) } (None, None, _) => Ok((PyUnigram {}, Unigram::default().into())), _ => Err(exceptions::PyValueError::new_err( "`vocab` and `unk_id` must be both specified", )), } } } /// Models Module #[pymodule] pub fn models(_py: Python, m: &PyModule) -> PyResult<()> { m.add_class::<PyModel>()?; m.add_class::<PyBPE>()?; m.add_class::<PyWordPiece>()?; m.add_class::<PyWordLevel>()?; m.add_class::<PyUnigram>()?; Ok(()) } #[cfg(test)] mod test { use crate::models::PyModel; use pyo3::prelude::*; use tk::models::bpe::BPE; use tk::models::ModelWrapper; #[test] fn get_subtype() { Python::with_gil(|py| { let py_model = PyModel::from(BPE::default()); let py_bpe = py_model.get_as_subtype(py).unwrap(); assert_eq!("BPE", py_bpe.as_ref(py).get_type().name().unwrap()); }) } #[test] fn serialize() { let rs_bpe = BPE::default(); let rs_bpe_ser = serde_json::to_string(&rs_bpe).unwrap(); let rs_wrapper: ModelWrapper = rs_bpe.into(); let rs_wrapper_ser = serde_json::to_string(&rs_wrapper).unwrap(); let py_model = PyModel::from(rs_wrapper); let py_ser = serde_json::to_string(&py_model).unwrap(); assert_eq!(py_ser, rs_bpe_ser); assert_eq!(py_ser, rs_wrapper_ser); let py_model: PyModel = serde_json::from_str(&rs_bpe_ser).unwrap(); match *py_model.model.as_ref().read().unwrap() { ModelWrapper::BPE(_) => (), _ => panic!("Expected Bert postprocessor."), }; let py_model: PyModel = serde_json::from_str(&rs_wrapper_ser).unwrap(); match *py_model.model.as_ref().read().unwrap() { ModelWrapper::BPE(_) => (), _ => panic!("Expected Bert postprocessor."), }; } }
0
hf_public_repos/tokenizers/bindings/python
hf_public_repos/tokenizers/bindings/python/src/normalizers.rs
use std::sync::{Arc, RwLock}; use pyo3::exceptions; use pyo3::prelude::*; use pyo3::types::*; use crate::error::ToPyResult; use crate::utils::{PyNormalizedString, PyNormalizedStringRefMut, PyPattern}; use serde::ser::SerializeStruct; use serde::{Deserialize, Deserializer, Serialize, Serializer}; use tk::normalizers::{ BertNormalizer, Lowercase, Nmt, NormalizerWrapper, Precompiled, Prepend, Replace, Strip, StripAccents, NFC, NFD, NFKC, NFKD, }; use tk::{NormalizedString, Normalizer}; use tokenizers as tk; /// Represents the different kind of NormalizedString we can receive from Python: /// - Owned: Created in Python and owned by Python /// - RefMut: A mutable reference to a NormalizedString owned by Rust #[derive(FromPyObject)] enum PyNormalizedStringMut<'p> { Owned(PyRefMut<'p, PyNormalizedString>), RefMut(PyNormalizedStringRefMut), } impl PyNormalizedStringMut<'_> { /// Normalized the underlying `NormalizedString` using the provided normalizer pub fn normalize_with<N>(&mut self, normalizer: &N) -> PyResult<()> where N: Normalizer, { match self { PyNormalizedStringMut::Owned(ref mut n) => normalizer.normalize(&mut n.normalized), PyNormalizedStringMut::RefMut(n) => n.map_as_mut(|n| normalizer.normalize(n))?, } .map_err(|e| exceptions::PyException::new_err(format!("{}", e))) } } /// Base class for all normalizers /// /// This class is not supposed to be instantiated directly. Instead, any implementation of a /// Normalizer will return an instance of this class when instantiated. #[pyclass(dict, module = "tokenizers.normalizers", name = "Normalizer", subclass)] #[derive(Clone, Serialize, Deserialize)] pub struct PyNormalizer { #[serde(flatten)] pub(crate) normalizer: PyNormalizerTypeWrapper, } impl PyNormalizer { pub(crate) fn new(normalizer: PyNormalizerTypeWrapper) -> Self { PyNormalizer { normalizer } } pub(crate) fn get_as_subtype(&self, py: Python<'_>) -> PyResult<PyObject> { let base = self.clone(); Ok(match self.normalizer { PyNormalizerTypeWrapper::Sequence(_) => Py::new(py, (PySequence {}, base))?.into_py(py), PyNormalizerTypeWrapper::Single(ref inner) => match &*inner.as_ref().read().unwrap() { PyNormalizerWrapper::Custom(_) => Py::new(py, base)?.into_py(py), PyNormalizerWrapper::Wrapped(ref inner) => match inner { NormalizerWrapper::Sequence(_) => { Py::new(py, (PySequence {}, base))?.into_py(py) } NormalizerWrapper::BertNormalizer(_) => { Py::new(py, (PyBertNormalizer {}, base))?.into_py(py) } NormalizerWrapper::StripNormalizer(_) => { Py::new(py, (PyBertNormalizer {}, base))?.into_py(py) } NormalizerWrapper::Prepend(_) => Py::new(py, (PyPrepend {}, base))?.into_py(py), NormalizerWrapper::StripAccents(_) => { Py::new(py, (PyStripAccents {}, base))?.into_py(py) } NormalizerWrapper::NFC(_) => Py::new(py, (PyNFC {}, base))?.into_py(py), NormalizerWrapper::NFD(_) => Py::new(py, (PyNFD {}, base))?.into_py(py), NormalizerWrapper::NFKC(_) => Py::new(py, (PyNFKC {}, base))?.into_py(py), NormalizerWrapper::NFKD(_) => Py::new(py, (PyNFKD {}, base))?.into_py(py), NormalizerWrapper::Lowercase(_) => { Py::new(py, (PyLowercase {}, base))?.into_py(py) } NormalizerWrapper::Precompiled(_) => { Py::new(py, (PyPrecompiled {}, base))?.into_py(py) } NormalizerWrapper::Replace(_) => Py::new(py, (PyReplace {}, base))?.into_py(py), NormalizerWrapper::Nmt(_) => Py::new(py, (PyNmt {}, base))?.into_py(py), }, }, }) } } impl Normalizer for PyNormalizer { fn normalize(&self, normalized: &mut NormalizedString) -> tk::Result<()> { self.normalizer.normalize(normalized) } } #[pymethods] impl PyNormalizer { #[staticmethod] fn custom(obj: PyObject) -> Self { Self { normalizer: PyNormalizerWrapper::Custom(CustomNormalizer::new(obj)).into(), } } fn __getstate__(&self, py: Python) -> PyResult<PyObject> { let data = serde_json::to_string(&self.normalizer).map_err(|e| { exceptions::PyException::new_err(format!( "Error while attempting to pickle Normalizer: {}", e )) })?; Ok(PyBytes::new(py, data.as_bytes()).to_object(py)) } fn __setstate__(&mut self, py: Python, state: PyObject) -> PyResult<()> { match state.extract::<&PyBytes>(py) { Ok(s) => { self.normalizer = serde_json::from_slice(s.as_bytes()).map_err(|e| { exceptions::PyException::new_err(format!( "Error while attempting to unpickle Normalizer: {}", e )) })?; Ok(()) } Err(e) => Err(e), } } /// Normalize a :class:`~tokenizers.NormalizedString` in-place /// /// This method allows to modify a :class:`~tokenizers.NormalizedString` to /// keep track of the alignment information. If you just want to see the result /// of the normalization on a raw string, you can use /// :meth:`~tokenizers.normalizers.Normalizer.normalize_str` /// /// Args: /// normalized (:class:`~tokenizers.NormalizedString`): /// The normalized string on which to apply this /// :class:`~tokenizers.normalizers.Normalizer` #[pyo3(text_signature = "(self, normalized)")] fn normalize(&self, mut normalized: PyNormalizedStringMut) -> PyResult<()> { normalized.normalize_with(&self.normalizer) } /// Normalize the given string /// /// This method provides a way to visualize the effect of a /// :class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment /// information. If you need to get/convert offsets, you can use /// :meth:`~tokenizers.normalizers.Normalizer.normalize` /// /// Args: /// sequence (:obj:`str`): /// A string to normalize /// /// Returns: /// :obj:`str`: A string after normalization #[pyo3(text_signature = "(self, sequence)")] fn normalize_str(&self, sequence: &str) -> PyResult<String> { let mut normalized = NormalizedString::from(sequence); ToPyResult(self.normalizer.normalize(&mut normalized)).into_py()?; Ok(normalized.get().to_owned()) } } macro_rules! getter { ($self: ident, $variant: ident, $name: ident) => {{ let super_ = $self.as_ref(); if let PyNormalizerTypeWrapper::Single(ref norm) = super_.normalizer { let wrapper = norm.read().unwrap(); if let PyNormalizerWrapper::Wrapped(NormalizerWrapper::$variant(o)) = (*wrapper).clone() { o.$name } else { unreachable!() } } else { unreachable!() } }}; } macro_rules! setter { ($self: ident, $variant: ident, $name: ident, $value: expr) => {{ let super_ = $self.as_ref(); if let PyNormalizerTypeWrapper::Single(ref norm) = super_.normalizer { let mut wrapper = norm.write().unwrap(); if let PyNormalizerWrapper::Wrapped(NormalizerWrapper::$variant(ref mut o)) = *wrapper { o.$name = $value; } } }}; } /// BertNormalizer /// /// Takes care of normalizing raw text before giving it to a Bert model. /// This includes cleaning the text, handling accents, chinese chars and lowercasing /// /// Args: /// clean_text (:obj:`bool`, `optional`, defaults to :obj:`True`): /// Whether to clean the text, by removing any control characters /// and replacing all whitespaces by the classic one. /// /// handle_chinese_chars (:obj:`bool`, `optional`, defaults to :obj:`True`): /// Whether to handle chinese chars by putting spaces around them. /// /// strip_accents (:obj:`bool`, `optional`): /// Whether to strip all accents. If this option is not specified (ie == None), /// then it will be determined by the value for `lowercase` (as in the original Bert). /// /// lowercase (:obj:`bool`, `optional`, defaults to :obj:`True`): /// Whether to lowercase. #[pyclass(extends=PyNormalizer, module = "tokenizers.normalizers", name = "BertNormalizer")] pub struct PyBertNormalizer {} #[pymethods] impl PyBertNormalizer { #[getter] fn get_clean_text(self_: PyRef<Self>) -> bool { getter!(self_, BertNormalizer, clean_text) } #[setter] fn set_clean_text(self_: PyRef<Self>, clean_text: bool) { setter!(self_, BertNormalizer, clean_text, clean_text); } #[getter] fn get_handle_chinese_chars(self_: PyRef<Self>) -> bool { getter!(self_, BertNormalizer, handle_chinese_chars) } #[setter] fn set_handle_chinese_chars(self_: PyRef<Self>, handle_chinese_chars: bool) { setter!( self_, BertNormalizer, handle_chinese_chars, handle_chinese_chars ); } #[getter] fn get_strip_accents(self_: PyRef<Self>) -> Option<bool> { getter!(self_, BertNormalizer, strip_accents) } #[setter] fn set_strip_accents(self_: PyRef<Self>, strip_accents: Option<bool>) { setter!(self_, BertNormalizer, strip_accents, strip_accents); } #[getter] fn get_lowercase(self_: PyRef<Self>) -> bool { getter!(self_, BertNormalizer, lowercase) } #[setter] fn set_lowercase(self_: PyRef<Self>, lowercase: bool) { setter!(self_, BertNormalizer, lowercase, lowercase) } #[new] #[pyo3(signature = ( clean_text = true, handle_chinese_chars = true, strip_accents = None, lowercase = true ), text_signature = "(self, clean_text=True, handle_chinese_chars=True, strip_accents=None, lowercase=True)")] fn new( clean_text: bool, handle_chinese_chars: bool, strip_accents: Option<bool>, lowercase: bool, ) -> (Self, PyNormalizer) { let normalizer = BertNormalizer::new(clean_text, handle_chinese_chars, strip_accents, lowercase); (PyBertNormalizer {}, normalizer.into()) } } /// NFD Unicode Normalizer #[pyclass(extends=PyNormalizer, module = "tokenizers.normalizers", name = "NFD")] pub struct PyNFD {} #[pymethods] impl PyNFD { #[new] #[pyo3(text_signature = "(self)")] fn new() -> (Self, PyNormalizer) { (PyNFD {}, PyNormalizer::new(NFD.into())) } } /// NFKD Unicode Normalizer #[pyclass(extends=PyNormalizer, module = "tokenizers.normalizers", name = "NFKD")] pub struct PyNFKD {} #[pymethods] impl PyNFKD { #[new] #[pyo3(text_signature = "(self)")] fn new() -> (Self, PyNormalizer) { (PyNFKD {}, NFKD.into()) } } /// NFC Unicode Normalizer #[pyclass(extends=PyNormalizer, module = "tokenizers.normalizers", name = "NFC")] pub struct PyNFC {} #[pymethods] impl PyNFC { #[new] #[pyo3(text_signature = "(self)")] fn new() -> (Self, PyNormalizer) { (PyNFC {}, NFC.into()) } } /// NFKC Unicode Normalizer #[pyclass(extends=PyNormalizer, module = "tokenizers.normalizers", name = "NFKC")] pub struct PyNFKC {} #[pymethods] impl PyNFKC { #[new] #[pyo3(text_signature = "(self)")] fn new() -> (Self, PyNormalizer) { (PyNFKC {}, NFKC.into()) } } /// Allows concatenating multiple other Normalizer as a Sequence. /// All the normalizers run in sequence in the given order /// /// Args: /// normalizers (:obj:`List[Normalizer]`): /// A list of Normalizer to be run as a sequence #[pyclass(extends=PyNormalizer, module = "tokenizers.normalizers", name = "Sequence")] pub struct PySequence {} #[pymethods] impl PySequence { #[new] #[pyo3(text_signature = None)] fn new(normalizers: &PyList) -> PyResult<(Self, PyNormalizer)> { let mut sequence = Vec::with_capacity(normalizers.len()); for n in normalizers.iter() { let normalizer: PyRef<PyNormalizer> = n.extract()?; match &normalizer.normalizer { PyNormalizerTypeWrapper::Sequence(inner) => sequence.extend(inner.iter().cloned()), PyNormalizerTypeWrapper::Single(inner) => sequence.push(inner.clone()), } } Ok(( PySequence {}, PyNormalizer::new(PyNormalizerTypeWrapper::Sequence(sequence)), )) } fn __getnewargs__<'p>(&self, py: Python<'p>) -> &'p PyTuple { PyTuple::new(py, [PyList::empty(py)]) } fn __len__(&self) -> usize { 0 } } /// Lowercase Normalizer #[pyclass(extends=PyNormalizer, module = "tokenizers.normalizers", name = "Lowercase")] pub struct PyLowercase {} #[pymethods] impl PyLowercase { #[new] #[pyo3(text_signature = "(self)")] fn new() -> (Self, PyNormalizer) { (PyLowercase {}, Lowercase.into()) } } /// Strip normalizer #[pyclass(extends=PyNormalizer, module = "tokenizers.normalizers", name = "Strip")] pub struct PyStrip {} #[pymethods] impl PyStrip { #[getter] fn get_left(self_: PyRef<Self>) -> bool { getter!(self_, StripNormalizer, strip_left) } #[setter] fn set_left(self_: PyRef<Self>, left: bool) { setter!(self_, StripNormalizer, strip_left, left) } #[getter] fn get_right(self_: PyRef<Self>) -> bool { getter!(self_, StripNormalizer, strip_right) } #[setter] fn set_right(self_: PyRef<Self>, right: bool) { setter!(self_, StripNormalizer, strip_right, right) } #[new] #[pyo3(signature = (left = true, right = true), text_signature = "(self, left=True, right=True)")] fn new(left: bool, right: bool) -> (Self, PyNormalizer) { (PyStrip {}, Strip::new(left, right).into()) } } /// Prepend normalizer #[pyclass(extends=PyNormalizer, module = "tokenizers.normalizers", name = "Prepend")] pub struct PyPrepend {} #[pymethods] impl PyPrepend { #[getter] fn get_prepend(self_: PyRef<Self>) -> String { getter!(self_, Prepend, prepend) } #[setter] fn set_prepend(self_: PyRef<Self>, prepend: String) { setter!(self_, Prepend, prepend, prepend) } #[new] #[pyo3(signature = (prepend="▁".to_string()), text_signature = "(self, prepend)")] fn new(prepend: String) -> (Self, PyNormalizer) { (PyPrepend {}, Prepend::new(prepend).into()) } } /// StripAccents normalizer #[pyclass(extends=PyNormalizer, module = "tokenizers.normalizers", name = "StripAccents")] pub struct PyStripAccents {} #[pymethods] impl PyStripAccents { #[new] #[pyo3(text_signature = "(self)")] fn new() -> (Self, PyNormalizer) { (PyStripAccents {}, StripAccents.into()) } } /// Nmt normalizer #[pyclass(extends=PyNormalizer, module = "tokenizers.normalizers", name = "Nmt")] pub struct PyNmt {} #[pymethods] impl PyNmt { #[new] #[pyo3(text_signature = "(self)")] fn new() -> (Self, PyNormalizer) { (PyNmt {}, Nmt.into()) } } /// Precompiled normalizer /// Don't use manually it is used for compatiblity for SentencePiece. #[pyclass(extends=PyNormalizer, module = "tokenizers.normalizers", name = "Precompiled")] pub struct PyPrecompiled {} #[pymethods] impl PyPrecompiled { #[new] #[pyo3(text_signature = "(self, precompiled_charsmap)")] fn new(py_precompiled_charsmap: &PyBytes) -> PyResult<(Self, PyNormalizer)> { let precompiled_charsmap: &[u8] = FromPyObject::extract(py_precompiled_charsmap)?; Ok(( PyPrecompiled {}, Precompiled::from(precompiled_charsmap) .map_err(|e| { exceptions::PyException::new_err(format!( "Error while attempting to build Precompiled normalizer: {}", e )) })? .into(), )) } } /// Replace normalizer #[pyclass(extends=PyNormalizer, module = "tokenizers.normalizers", name = "Replace")] pub struct PyReplace {} #[pymethods] impl PyReplace { #[new] #[pyo3(text_signature = "(self, pattern, content)")] fn new(pattern: PyPattern, content: String) -> PyResult<(Self, PyNormalizer)> { Ok(( PyReplace {}, ToPyResult(Replace::new(pattern, content)).into_py()?.into(), )) } } #[derive(Debug, Clone)] pub(crate) struct CustomNormalizer { inner: PyObject, } impl CustomNormalizer { pub fn new(inner: PyObject) -> Self { Self { inner } } } impl tk::tokenizer::Normalizer for CustomNormalizer { fn normalize(&self, normalized: &mut NormalizedString) -> tk::Result<()> { Python::with_gil(|py| { let normalized = PyNormalizedStringRefMut::new(normalized); let py_normalized = self.inner.as_ref(py); py_normalized.call_method("normalize", (normalized.get(),), None)?; Ok(()) }) } } impl Serialize for CustomNormalizer { fn serialize<S>(&self, _serializer: S) -> Result<S::Ok, S::Error> where S: Serializer, { Err(serde::ser::Error::custom( "Custom Normalizer cannot be serialized", )) } } impl<'de> Deserialize<'de> for CustomNormalizer { fn deserialize<D>(_deserializer: D) -> Result<Self, D::Error> where D: Deserializer<'de>, { Err(serde::de::Error::custom( "Custom Normalizer cannot be deserialized", )) } } #[derive(Debug, Clone, Deserialize)] #[serde(untagged)] pub(crate) enum PyNormalizerWrapper { Custom(CustomNormalizer), Wrapped(NormalizerWrapper), } impl Serialize for PyNormalizerWrapper { fn serialize<S>(&self, serializer: S) -> Result<<S as Serializer>::Ok, <S as Serializer>::Error> where S: Serializer, { match self { PyNormalizerWrapper::Wrapped(inner) => inner.serialize(serializer), PyNormalizerWrapper::Custom(inner) => inner.serialize(serializer), } } } #[derive(Debug, Clone, Deserialize)] #[serde(untagged)] pub(crate) enum PyNormalizerTypeWrapper { Sequence(Vec<Arc<RwLock<PyNormalizerWrapper>>>), Single(Arc<RwLock<PyNormalizerWrapper>>), } impl Serialize for PyNormalizerTypeWrapper { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: Serializer, { match self { PyNormalizerTypeWrapper::Sequence(seq) => { let mut ser = serializer.serialize_struct("Sequence", 2)?; ser.serialize_field("type", "Sequence")?; ser.serialize_field("normalizers", seq)?; ser.end() } PyNormalizerTypeWrapper::Single(inner) => inner.serialize(serializer), } } } impl<I> From<I> for PyNormalizerWrapper where I: Into<NormalizerWrapper>, { fn from(norm: I) -> Self { PyNormalizerWrapper::Wrapped(norm.into()) } } impl<I> From<I> for PyNormalizerTypeWrapper where I: Into<PyNormalizerWrapper>, { fn from(norm: I) -> Self { PyNormalizerTypeWrapper::Single(Arc::new(RwLock::new(norm.into()))) } } impl<I> From<I> for PyNormalizer where I: Into<NormalizerWrapper>, { fn from(norm: I) -> Self { PyNormalizer { normalizer: norm.into().into(), } } } impl Normalizer for PyNormalizerTypeWrapper { fn normalize(&self, normalized: &mut NormalizedString) -> tk::Result<()> { match self { PyNormalizerTypeWrapper::Single(inner) => inner.read().unwrap().normalize(normalized), PyNormalizerTypeWrapper::Sequence(inner) => inner .iter() .try_for_each(|n| n.read().unwrap().normalize(normalized)), } } } impl Normalizer for PyNormalizerWrapper { fn normalize(&self, normalized: &mut NormalizedString) -> tk::Result<()> { match self { PyNormalizerWrapper::Wrapped(inner) => inner.normalize(normalized), PyNormalizerWrapper::Custom(inner) => inner.normalize(normalized), } } } /// Normalizers Module #[pymodule] pub fn normalizers(_py: Python, m: &PyModule) -> PyResult<()> { m.add_class::<PyNormalizer>()?; m.add_class::<PyBertNormalizer>()?; m.add_class::<PyNFD>()?; m.add_class::<PyNFKD>()?; m.add_class::<PyNFC>()?; m.add_class::<PyNFKC>()?; m.add_class::<PySequence>()?; m.add_class::<PyLowercase>()?; m.add_class::<PyStrip>()?; m.add_class::<PyStripAccents>()?; m.add_class::<PyPrepend>()?; m.add_class::<PyNmt>()?; m.add_class::<PyPrecompiled>()?; m.add_class::<PyReplace>()?; Ok(()) } #[cfg(test)] mod test { use pyo3::prelude::*; use tk::normalizers::unicode::{NFC, NFKC}; use tk::normalizers::utils::Sequence; use tk::normalizers::NormalizerWrapper; use crate::normalizers::{PyNormalizer, PyNormalizerTypeWrapper, PyNormalizerWrapper}; #[test] fn get_subtype() { Python::with_gil(|py| { let py_norm = PyNormalizer::new(NFC.into()); let py_nfc = py_norm.get_as_subtype(py).unwrap(); assert_eq!("NFC", py_nfc.as_ref(py).get_type().name().unwrap()); }) } #[test] fn serialize() { let py_wrapped: PyNormalizerWrapper = NFKC.into(); let py_ser = serde_json::to_string(&py_wrapped).unwrap(); let rs_wrapped = NormalizerWrapper::NFKC(NFKC); let rs_ser = serde_json::to_string(&rs_wrapped).unwrap(); assert_eq!(py_ser, rs_ser); let py_norm: PyNormalizer = serde_json::from_str(&rs_ser).unwrap(); match py_norm.normalizer { PyNormalizerTypeWrapper::Single(inner) => match *inner.as_ref().read().unwrap() { PyNormalizerWrapper::Wrapped(NormalizerWrapper::NFKC(_)) => {} _ => panic!("Expected NFKC"), }, _ => panic!("Expected wrapped, not sequence."), } let py_seq: PyNormalizerWrapper = Sequence::new(vec![NFC.into(), NFKC.into()]).into(); let py_wrapper_ser = serde_json::to_string(&py_seq).unwrap(); let rs_wrapped = NormalizerWrapper::Sequence(Sequence::new(vec![NFC.into(), NFKC.into()])); let rs_ser = serde_json::to_string(&rs_wrapped).unwrap(); assert_eq!(py_wrapper_ser, rs_ser); let py_seq = PyNormalizer::new(py_seq.into()); let py_ser = serde_json::to_string(&py_seq).unwrap(); assert_eq!(py_wrapper_ser, py_ser); let rs_seq = Sequence::new(vec![NFC.into(), NFKC.into()]); let rs_ser = serde_json::to_string(&rs_seq).unwrap(); assert_eq!(py_wrapper_ser, rs_ser); } #[test] fn deserialize_sequence() { let string = r#"{"type": "NFKC"}"#; let normalizer: PyNormalizer = serde_json::from_str(string).unwrap(); match normalizer.normalizer { PyNormalizerTypeWrapper::Single(inner) => match *inner.as_ref().read().unwrap() { PyNormalizerWrapper::Wrapped(NormalizerWrapper::NFKC(_)) => {} _ => panic!("Expected NFKC"), }, _ => panic!("Expected wrapped, not sequence."), } let sequence_string = format!(r#"{{"type": "Sequence", "normalizers": [{}]}}"#, string); let normalizer: PyNormalizer = serde_json::from_str(&sequence_string).unwrap(); match normalizer.normalizer { PyNormalizerTypeWrapper::Single(inner) => match &*inner.as_ref().read().unwrap() { PyNormalizerWrapper::Wrapped(NormalizerWrapper::Sequence(sequence)) => { let normalizers = sequence.get_normalizers(); assert_eq!(normalizers.len(), 1); match normalizers[0] { NormalizerWrapper::NFKC(_) => {} _ => panic!("Expected NFKC"), } } _ => panic!("Expected sequence"), }, _ => panic!("Expected single"), }; } }
0
hf_public_repos/tokenizers/bindings/python
hf_public_repos/tokenizers/bindings/python/src/lib.rs
#![warn(clippy::all)] #![allow(clippy::upper_case_acronyms)] // Many false positives with pyo3 it seems &str, and &PyAny get flagged #![allow(clippy::borrow_deref_ref)] extern crate tokenizers as tk; mod decoders; mod encoding; mod error; mod models; mod normalizers; mod pre_tokenizers; mod processors; mod token; mod tokenizer; mod trainers; mod utils; use pyo3::prelude::*; use pyo3::wrap_pymodule; pub const VERSION: &str = env!("CARGO_PKG_VERSION"); // For users using multiprocessing in python, it is quite easy to fork the process running // tokenizers, ending up with a deadlock because we internaly make use of multithreading. So // we register a callback to be called in the event of a fork so that we can warn the user. #[cfg(target_family = "unix")] static mut REGISTERED_FORK_CALLBACK: bool = false; #[cfg(target_family = "unix")] extern "C" fn child_after_fork() { use tk::parallelism::*; if has_parallelism_been_used() && !is_parallelism_configured() { eprintln!( "huggingface/tokenizers: The current process just got forked, after parallelism has \ already been used. Disabling parallelism to avoid deadlocks..." ); eprintln!("To disable this warning, you can either:"); eprintln!( "\t- Avoid using `tokenizers` before the fork if possible\n\ \t- Explicitly set the environment variable {}=(true | false)", ENV_VARIABLE ); set_parallelism(false); } } /// Tokenizers Module #[pymodule] pub fn tokenizers(_py: Python, m: &PyModule) -> PyResult<()> { let _ = env_logger::try_init_from_env("TOKENIZERS_LOG"); // Register the fork callback #[cfg(target_family = "unix")] unsafe { if !REGISTERED_FORK_CALLBACK { libc::pthread_atfork(None, None, Some(child_after_fork)); REGISTERED_FORK_CALLBACK = true; } } m.add_class::<tokenizer::PyTokenizer>()?; m.add_class::<tokenizer::PyAddedToken>()?; m.add_class::<token::PyToken>()?; m.add_class::<encoding::PyEncoding>()?; m.add_class::<utils::PyRegex>()?; m.add_class::<utils::PyNormalizedString>()?; m.add_class::<utils::PyPreTokenizedString>()?; m.add_wrapped(wrap_pymodule!(models::models))?; m.add_wrapped(wrap_pymodule!(pre_tokenizers::pre_tokenizers))?; m.add_wrapped(wrap_pymodule!(decoders::decoders))?; m.add_wrapped(wrap_pymodule!(processors::processors))?; m.add_wrapped(wrap_pymodule!(normalizers::normalizers))?; m.add_wrapped(wrap_pymodule!(trainers::trainers))?; m.add("__version__", env!("CARGO_PKG_VERSION"))?; Ok(()) }
0
hf_public_repos/tokenizers/bindings/python
hf_public_repos/tokenizers/bindings/python/src/token.rs
use pyo3::prelude::*; use tk::Token; #[pyclass(module = "tokenizers", name = "Token")] #[derive(Clone)] pub struct PyToken { token: Token, } impl From<Token> for PyToken { fn from(token: Token) -> Self { Self { token } } } impl From<PyToken> for Token { fn from(token: PyToken) -> Self { token.token } } #[pymethods] impl PyToken { #[new] #[pyo3(text_signature = None)] fn new(id: u32, value: String, offsets: (usize, usize)) -> PyToken { Token::new(id, value, offsets).into() } #[getter] fn get_id(&self) -> u32 { self.token.id } #[getter] fn get_value(&self) -> &str { &self.token.value } #[getter] fn get_offsets(&self) -> (usize, usize) { self.token.offsets } fn as_tuple(&self) -> (u32, &str, (usize, usize)) { (self.token.id, &self.token.value, self.token.offsets) } }
0
hf_public_repos/tokenizers/bindings/python
hf_public_repos/tokenizers/bindings/python/src/tokenizer.rs
use std::collections::{hash_map::DefaultHasher, HashMap}; use std::hash::{Hash, Hasher}; use numpy::{npyffi, PyArray1}; use pyo3::class::basic::CompareOp; use pyo3::exceptions; use pyo3::intern; use pyo3::prelude::*; use pyo3::types::*; use pyo3::AsPyPointer; use tk::models::bpe::BPE; use tk::tokenizer::{ Model, PaddingDirection, PaddingParams, PaddingStrategy, PostProcessor, TokenizerImpl, TruncationDirection, TruncationParams, TruncationStrategy, }; use tk::utils::iter::ResultShunt; use tokenizers as tk; use super::decoders::PyDecoder; use super::encoding::PyEncoding; use super::error::{PyError, ToPyResult}; use super::models::PyModel; use super::normalizers::PyNormalizer; use super::pre_tokenizers::PyPreTokenizer; use super::trainers::PyTrainer; use crate::processors::PyPostProcessor; use crate::utils::{MaybeSizedIterator, PyBufferedIterator}; use std::collections::BTreeMap; /// Represents a token that can be be added to a :class:`~tokenizers.Tokenizer`. /// It can have special options that defines the way it should behave. /// /// Args: /// content (:obj:`str`): The content of the token /// /// single_word (:obj:`bool`, defaults to :obj:`False`): /// Defines whether this token should only match single words. If :obj:`True`, this /// token will never match inside of a word. For example the token ``ing`` would match /// on ``tokenizing`` if this option is :obj:`False`, but not if it is :obj:`True`. /// The notion of "`inside of a word`" is defined by the word boundaries pattern in /// regular expressions (ie. the token should start and end with word boundaries). /// /// lstrip (:obj:`bool`, defaults to :obj:`False`): /// Defines whether this token should strip all potential whitespaces on its left side. /// If :obj:`True`, this token will greedily match any whitespace on its left. For /// example if we try to match the token ``[MASK]`` with ``lstrip=True``, in the text /// ``"I saw a [MASK]"``, we would match on ``" [MASK]"``. (Note the space on the left). /// /// rstrip (:obj:`bool`, defaults to :obj:`False`): /// Defines whether this token should strip all potential whitespaces on its right /// side. If :obj:`True`, this token will greedily match any whitespace on its right. /// It works just like :obj:`lstrip` but on the right. /// /// normalized (:obj:`bool`, defaults to :obj:`True` with :meth:`~tokenizers.Tokenizer.add_tokens` and :obj:`False` with :meth:`~tokenizers.Tokenizer.add_special_tokens`): /// Defines whether this token should match against the normalized version of the input /// text. For example, with the added token ``"yesterday"``, and a normalizer in charge of /// lowercasing the text, the token could be extract from the input ``"I saw a lion /// Yesterday"``. /// special (:obj:`bool`, defaults to :obj:`False` with :meth:`~tokenizers.Tokenizer.add_tokens` and :obj:`False` with :meth:`~tokenizers.Tokenizer.add_special_tokens`): /// Defines whether this token should be skipped when decoding. /// #[pyclass(dict, module = "tokenizers", name = "AddedToken")] pub struct PyAddedToken { pub content: String, pub special: bool, pub single_word: Option<bool>, pub lstrip: Option<bool>, pub rstrip: Option<bool>, pub normalized: Option<bool>, } impl PyAddedToken { pub fn from<S: Into<String>>(content: S, special: Option<bool>) -> Self { Self { content: content.into(), special: special.unwrap_or(false), single_word: None, lstrip: None, rstrip: None, normalized: None, } } pub fn get_token(&self) -> tk::tokenizer::AddedToken { let mut token = tk::AddedToken::from(&self.content, self.special); if let Some(sw) = self.single_word { token = token.single_word(sw); } if let Some(ls) = self.lstrip { token = token.lstrip(ls); } if let Some(rs) = self.rstrip { token = token.rstrip(rs); } if let Some(n) = self.normalized { token = token.normalized(n); } token } pub fn as_pydict<'py>(&self, py: Python<'py>) -> PyResult<&'py PyDict> { let dict = PyDict::new(py); let token = self.get_token(); dict.set_item("content", token.content)?; dict.set_item("single_word", token.single_word)?; dict.set_item("lstrip", token.lstrip)?; dict.set_item("rstrip", token.rstrip)?; dict.set_item("normalized", token.normalized)?; dict.set_item("special", token.special)?; Ok(dict) } } impl From<tk::AddedToken> for PyAddedToken { fn from(token: tk::AddedToken) -> Self { Self { content: token.content, single_word: Some(token.single_word), lstrip: Some(token.lstrip), rstrip: Some(token.rstrip), normalized: Some(token.normalized), special: token.special, } } } #[pymethods] impl PyAddedToken { #[new] #[pyo3(signature = (content=None, **kwargs), text_signature = "(self, content, single_word=False, lstrip=False, rstrip=False, normalized=True, special=False)")] fn __new__(content: Option<&str>, kwargs: Option<&PyDict>) -> PyResult<Self> { let mut token = PyAddedToken::from(content.unwrap_or(""), None); if let Some(kwargs) = kwargs { for (key, value) in kwargs { let key: &str = key.extract()?; match key { "single_word" => token.single_word = Some(value.extract()?), "lstrip" => token.lstrip = Some(value.extract()?), "rstrip" => token.rstrip = Some(value.extract()?), "normalized" => token.normalized = Some(value.extract()?), "special" => token.special = value.extract()?, _ => println!("Ignored unknown kwarg option {}", key), } } } Ok(token) } fn __getstate__<'py>(&self, py: Python<'py>) -> PyResult<&'py PyDict> { self.as_pydict(py) } fn __setstate__(&mut self, py: Python, state: PyObject) -> PyResult<()> { match state.extract::<&PyDict>(py) { Ok(state) => { for (key, value) in state { let key: &str = key.extract()?; match key { "content" => self.content = value.extract()?, "single_word" => self.single_word = Some(value.extract()?), "lstrip" => self.lstrip = Some(value.extract()?), "rstrip" => self.rstrip = Some(value.extract()?), "normalized" => self.normalized = Some(value.extract()?), "special" => self.special = value.extract()?, _ => {} } } Ok(()) } Err(e) => Err(e), } } /// Get the content of this :obj:`AddedToken` #[getter] fn get_content(&self) -> &str { &self.content } /// Set the content of this :obj:`AddedToken` #[setter] fn set_content(&mut self, content: String) { self.content = content; } /// Get the value of the :obj:`rstrip` option #[getter] fn get_rstrip(&self) -> bool { self.get_token().rstrip } /// Get the value of the :obj:`lstrip` option #[getter] fn get_lstrip(&self) -> bool { self.get_token().lstrip } /// Get the value of the :obj:`single_word` option #[getter] fn get_single_word(&self) -> bool { self.get_token().single_word } /// Get the value of the :obj:`normalized` option #[getter] fn get_normalized(&self) -> bool { self.get_token().normalized } /// Get the value of the :obj:`special` option #[getter] fn get_special(&self) -> bool { self.get_token().special } /// Set the value of the :obj:`special` option #[setter] fn set_special(&mut self, special: bool) { self.special = special; } fn __str__(&self) -> PyResult<&str> { Ok(&self.content) } fn __repr__(&self) -> PyResult<String> { let bool_to_python = |p| match p { true => "True", false => "False", }; let token = self.get_token(); Ok(format!( "AddedToken(\"{}\", rstrip={}, lstrip={}, single_word={}, normalized={}, special={})", self.content, bool_to_python(token.rstrip), bool_to_python(token.lstrip), bool_to_python(token.single_word), bool_to_python(token.normalized), bool_to_python(token.special) )) } fn __richcmp__(&self, other: Py<PyAddedToken>, op: CompareOp) -> bool { use CompareOp::*; Python::with_gil(|py| match op { Lt | Le | Gt | Ge => false, Eq => self.get_token() == other.borrow(py).get_token(), Ne => self.get_token() != other.borrow(py).get_token(), }) } fn __hash__(&self) -> u64 { let mut hasher = DefaultHasher::new(); self.get_token().hash(&mut hasher); hasher.finish() } } struct TextInputSequence<'s>(tk::InputSequence<'s>); impl<'s> FromPyObject<'s> for TextInputSequence<'s> { fn extract(ob: &'s PyAny) -> PyResult<Self> { let err = exceptions::PyTypeError::new_err("TextInputSequence must be str"); if let Ok(s) = ob.downcast::<PyString>() { Ok(Self(s.to_string_lossy().into())) } else { Err(err) } } } impl<'s> From<TextInputSequence<'s>> for tk::InputSequence<'s> { fn from(s: TextInputSequence<'s>) -> Self { s.0 } } struct PyArrayUnicode(Vec<String>); impl FromPyObject<'_> for PyArrayUnicode { fn extract(ob: &PyAny) -> PyResult<Self> { // SAFETY Making sure the pointer is a valid numpy array requires calling numpy C code if unsafe { npyffi::PyArray_Check(ob.py(), ob.as_ptr()) } == 0 { return Err(exceptions::PyTypeError::new_err("Expected an np.array")); } let arr = ob.as_ptr() as *mut npyffi::PyArrayObject; // SAFETY Getting all the metadata about the numpy array to check its sanity let (type_num, elsize, alignment, data, nd, flags) = unsafe { let desc = (*arr).descr; ( (*desc).type_num, (*desc).elsize as usize, (*desc).alignment as usize, (*arr).data, (*arr).nd, (*arr).flags, ) }; if nd != 1 { return Err(exceptions::PyTypeError::new_err( "Expected a 1 dimensional np.array", )); } if flags & (npyffi::NPY_ARRAY_C_CONTIGUOUS | npyffi::NPY_ARRAY_F_CONTIGUOUS) == 0 { return Err(exceptions::PyTypeError::new_err( "Expected a contiguous np.array", )); } if type_num != npyffi::types::NPY_TYPES::NPY_UNICODE as i32 { return Err(exceptions::PyTypeError::new_err( "Expected a np.array[dtype='U']", )); } // SAFETY Looking at the raw numpy data to create new owned Rust strings via copies (so it's safe afterwards). unsafe { let n_elem = *(*arr).dimensions as usize; let all_bytes = std::slice::from_raw_parts(data as *const u8, elsize * n_elem); let seq = (0..n_elem) .map(|i| { let bytes = &all_bytes[i * elsize..(i + 1) * elsize]; let unicode = pyo3::ffi::PyUnicode_FromKindAndData( pyo3::ffi::PyUnicode_4BYTE_KIND as _, bytes.as_ptr() as *const _, elsize as isize / alignment as isize, ); let py = ob.py(); let obj = PyObject::from_owned_ptr(py, unicode); let s = obj.downcast::<PyString>(py)?; Ok(s.to_string_lossy().trim_matches(char::from(0)).to_owned()) }) .collect::<PyResult<Vec<_>>>()?; Ok(Self(seq)) } } } impl From<PyArrayUnicode> for tk::InputSequence<'_> { fn from(s: PyArrayUnicode) -> Self { s.0.into() } } struct PyArrayStr(Vec<String>); impl FromPyObject<'_> for PyArrayStr { fn extract(ob: &PyAny) -> PyResult<Self> { let array = ob.downcast::<PyArray1<PyObject>>()?; let seq = array .readonly() .as_array() .iter() .map(|obj| { let s = obj.downcast::<PyString>(ob.py())?; Ok(s.to_string_lossy().into_owned()) }) .collect::<PyResult<Vec<_>>>()?; Ok(Self(seq)) } } impl From<PyArrayStr> for tk::InputSequence<'_> { fn from(s: PyArrayStr) -> Self { s.0.into() } } struct PreTokenizedInputSequence<'s>(tk::InputSequence<'s>); impl<'s> FromPyObject<'s> for PreTokenizedInputSequence<'s> { fn extract(ob: &'s PyAny) -> PyResult<Self> { if let Ok(seq) = ob.extract::<PyArrayUnicode>() { return Ok(Self(seq.into())); } if let Ok(seq) = ob.extract::<PyArrayStr>() { return Ok(Self(seq.into())); } if let Ok(s) = ob.downcast::<PyList>() { if let Ok(seq) = s.extract::<Vec<&str>>() { return Ok(Self(seq.into())); } } if let Ok(s) = ob.downcast::<PyTuple>() { if let Ok(seq) = s.extract::<Vec<&str>>() { return Ok(Self(seq.into())); } } Err(exceptions::PyTypeError::new_err( "PreTokenizedInputSequence must be Union[List[str], Tuple[str]]", )) } } impl<'s> From<PreTokenizedInputSequence<'s>> for tk::InputSequence<'s> { fn from(s: PreTokenizedInputSequence<'s>) -> Self { s.0 } } struct TextEncodeInput<'s>(tk::EncodeInput<'s>); impl<'s> FromPyObject<'s> for TextEncodeInput<'s> { fn extract(ob: &'s PyAny) -> PyResult<Self> { if let Ok(i) = ob.extract::<TextInputSequence>() { return Ok(Self(i.into())); } if let Ok((i1, i2)) = ob.extract::<(TextInputSequence, TextInputSequence)>() { return Ok(Self((i1, i2).into())); } if let Ok(arr) = ob.extract::<Vec<&PyAny>>() { if arr.len() == 2 { let first = arr[0].extract::<TextInputSequence>()?; let second = arr[1].extract::<TextInputSequence>()?; return Ok(Self((first, second).into())); } } Err(exceptions::PyTypeError::new_err( "TextEncodeInput must be Union[TextInputSequence, Tuple[InputSequence, InputSequence]]", )) } } impl<'s> From<TextEncodeInput<'s>> for tk::tokenizer::EncodeInput<'s> { fn from(i: TextEncodeInput<'s>) -> Self { i.0 } } struct PreTokenizedEncodeInput<'s>(tk::EncodeInput<'s>); impl<'s> FromPyObject<'s> for PreTokenizedEncodeInput<'s> { fn extract(ob: &'s PyAny) -> PyResult<Self> { if let Ok(i) = ob.extract::<PreTokenizedInputSequence>() { return Ok(Self(i.into())); } if let Ok((i1, i2)) = ob.extract::<(PreTokenizedInputSequence, PreTokenizedInputSequence)>() { return Ok(Self((i1, i2).into())); } if let Ok(arr) = ob.extract::<Vec<&PyAny>>() { if arr.len() == 2 { let first = arr[0].extract::<PreTokenizedInputSequence>()?; let second = arr[1].extract::<PreTokenizedInputSequence>()?; return Ok(Self((first, second).into())); } } Err(exceptions::PyTypeError::new_err( "PreTokenizedEncodeInput must be Union[PreTokenizedInputSequence, \ Tuple[PreTokenizedInputSequence, PreTokenizedInputSequence]]", )) } } impl<'s> From<PreTokenizedEncodeInput<'s>> for tk::tokenizer::EncodeInput<'s> { fn from(i: PreTokenizedEncodeInput<'s>) -> Self { i.0 } } type Tokenizer = TokenizerImpl<PyModel, PyNormalizer, PyPreTokenizer, PyPostProcessor, PyDecoder>; /// A :obj:`Tokenizer` works as a pipeline. It processes some raw text as input /// and outputs an :class:`~tokenizers.Encoding`. /// /// Args: /// model (:class:`~tokenizers.models.Model`): /// The core algorithm that this :obj:`Tokenizer` should be using. /// #[pyclass(dict, module = "tokenizers", name = "Tokenizer")] #[derive(Clone)] pub struct PyTokenizer { tokenizer: Tokenizer, } impl PyTokenizer { fn new(tokenizer: Tokenizer) -> Self { PyTokenizer { tokenizer } } fn from_model(model: PyModel) -> Self { PyTokenizer::new(TokenizerImpl::new(model)) } } #[pymethods] impl PyTokenizer { #[new] #[pyo3(text_signature = "(self, model)")] fn __new__(model: PyRef<PyModel>) -> Self { PyTokenizer::from_model(model.clone()) } fn __getstate__(&self, py: Python) -> PyResult<PyObject> { let data = serde_json::to_string(&self.tokenizer).map_err(|e| { exceptions::PyException::new_err(format!( "Error while attempting to pickle Tokenizer: {}", e )) })?; Ok(PyBytes::new(py, data.as_bytes()).to_object(py)) } fn __setstate__(&mut self, py: Python, state: PyObject) -> PyResult<()> { match state.extract::<&PyBytes>(py) { Ok(s) => { self.tokenizer = serde_json::from_slice(s.as_bytes()).map_err(|e| { exceptions::PyException::new_err(format!( "Error while attempting to unpickle Tokenizer: {}", e )) })?; Ok(()) } Err(e) => Err(e), } } fn __getnewargs__<'p>(&self, py: Python<'p>) -> &'p PyTuple { let model = PyModel::from(BPE::default()).into_py(py); PyTuple::new(py, vec![model]) } /// Instantiate a new :class:`~tokenizers.Tokenizer` from the given JSON string. /// /// Args: /// json (:obj:`str`): /// A valid JSON string representing a previously serialized /// :class:`~tokenizers.Tokenizer` /// /// Returns: /// :class:`~tokenizers.Tokenizer`: The new tokenizer #[staticmethod] #[pyo3(text_signature = "(json)")] fn from_str(json: &str) -> PyResult<Self> { let tokenizer: PyResult<_> = ToPyResult(json.parse()).into(); Ok(Self::new(tokenizer?)) } /// Instantiate a new :class:`~tokenizers.Tokenizer` from the file at the given path. /// /// Args: /// path (:obj:`str`): /// A path to a local JSON file representing a previously serialized /// :class:`~tokenizers.Tokenizer` /// /// Returns: /// :class:`~tokenizers.Tokenizer`: The new tokenizer #[staticmethod] #[pyo3(text_signature = "(path)")] fn from_file(path: &str) -> PyResult<Self> { let tokenizer: PyResult<_> = ToPyResult(Tokenizer::from_file(path)).into(); Ok(Self::new(tokenizer?)) } /// Instantiate a new :class:`~tokenizers.Tokenizer` from the given buffer. /// /// Args: /// buffer (:obj:`bytes`): /// A buffer containing a previously serialized :class:`~tokenizers.Tokenizer` /// /// Returns: /// :class:`~tokenizers.Tokenizer`: The new tokenizer #[staticmethod] #[pyo3(text_signature = "(buffer)")] fn from_buffer(buffer: &PyBytes) -> PyResult<Self> { let tokenizer = serde_json::from_slice(buffer.as_bytes()).map_err(|e| { exceptions::PyValueError::new_err(format!( "Cannot instantiate Tokenizer from buffer: {}", e )) })?; Ok(Self { tokenizer }) } /// Instantiate a new :class:`~tokenizers.Tokenizer` from an existing file on the /// Hugging Face Hub. /// /// Args: /// identifier (:obj:`str`): /// The identifier of a Model on the Hugging Face Hub, that contains /// a tokenizer.json file /// revision (:obj:`str`, defaults to `main`): /// A branch or commit id /// auth_token (:obj:`str`, `optional`, defaults to `None`): /// An optional auth token used to access private repositories on the /// Hugging Face Hub /// /// Returns: /// :class:`~tokenizers.Tokenizer`: The new tokenizer #[staticmethod] #[pyo3(signature = (identifier, revision = String::from("main"), auth_token = None))] #[pyo3(text_signature = "(identifier, revision=\"main\", auth_token=None)")] fn from_pretrained( identifier: &str, revision: String, auth_token: Option<String>, ) -> PyResult<Self> { let path = Python::with_gil(|py| -> PyResult<String> { let huggingface_hub = PyModule::import(py, intern!(py, "huggingface_hub"))?; let hf_hub_download = huggingface_hub.getattr(intern!(py, "hf_hub_download"))?; let kwargs = [ (intern!(py, "repo_id"), identifier), (intern!(py, "filename"), "tokenizer.json"), (intern!(py, "revision"), &revision), ] .into_py_dict(py); if let Some(auth_token) = auth_token { kwargs.set_item(intern!(py, "token"), auth_token)?; } let path: String = hf_hub_download.call((), Some(kwargs))?.extract()?; Ok(path) })?; let tokenizer: PyResult<_> = ToPyResult(Tokenizer::from_file(path)).into(); Ok(Self::new(tokenizer?)) } /// Gets a serialized string representing this :class:`~tokenizers.Tokenizer`. /// /// Args: /// pretty (:obj:`bool`, defaults to :obj:`False`): /// Whether the JSON string should be pretty formatted. /// /// Returns: /// :obj:`str`: A string representing the serialized Tokenizer #[pyo3(signature = (pretty = false))] #[pyo3(text_signature = "(self, pretty=False)")] fn to_str(&self, pretty: bool) -> PyResult<String> { ToPyResult(self.tokenizer.to_string(pretty)).into() } /// Save the :class:`~tokenizers.Tokenizer` to the file at the given path. /// /// Args: /// path (:obj:`str`): /// A path to a file in which to save the serialized tokenizer. /// /// pretty (:obj:`bool`, defaults to :obj:`True`): /// Whether the JSON file should be pretty formatted. #[pyo3(signature = (path, pretty = true))] #[pyo3(text_signature = "(self, path, pretty=True)")] fn save(&self, path: &str, pretty: bool) -> PyResult<()> { ToPyResult(self.tokenizer.save(path, pretty)).into() } /// Return the number of special tokens that would be added for single/pair sentences. /// :param is_pair: Boolean indicating if the input would be a single sentence or a pair /// :return: #[pyo3(text_signature = "(self, is_pair)")] fn num_special_tokens_to_add(&self, is_pair: bool) -> usize { self.tokenizer .get_post_processor() .map_or(0, |p| p.added_tokens(is_pair)) } /// Get the underlying vocabulary /// /// Args: /// with_added_tokens (:obj:`bool`, defaults to :obj:`True`): /// Whether to include the added tokens /// /// Returns: /// :obj:`Dict[str, int]`: The vocabulary #[pyo3(signature = (with_added_tokens = true))] #[pyo3(text_signature = "(self, with_added_tokens=True)")] fn get_vocab(&self, with_added_tokens: bool) -> HashMap<String, u32> { self.tokenizer.get_vocab(with_added_tokens) } /// Get the underlying vocabulary /// /// Returns: /// :obj:`Dict[int, AddedToken]`: The vocabulary #[pyo3(signature = ())] #[pyo3(text_signature = "(self)")] fn get_added_tokens_decoder(&self) -> BTreeMap<u32, PyAddedToken> { let mut sorted_map = BTreeMap::new(); for (key, value) in self.tokenizer.get_added_tokens_decoder() { sorted_map.insert(key, value.into()); } sorted_map } /// Get the size of the underlying vocabulary /// /// Args: /// with_added_tokens (:obj:`bool`, defaults to :obj:`True`): /// Whether to include the added tokens /// /// Returns: /// :obj:`int`: The size of the vocabulary #[pyo3(signature = (with_added_tokens = true))] #[pyo3(text_signature = "(self, with_added_tokens=True)")] fn get_vocab_size(&self, with_added_tokens: bool) -> usize { self.tokenizer.get_vocab_size(with_added_tokens) } /// Enable truncation /// /// Args: /// max_length (:obj:`int`): /// The max length at which to truncate /// /// stride (:obj:`int`, `optional`): /// The length of the previous first sequence to be included in the overflowing /// sequence /// /// strategy (:obj:`str`, `optional`, defaults to :obj:`longest_first`): /// The strategy used to truncation. Can be one of ``longest_first``, ``only_first`` or /// ``only_second``. /// /// direction (:obj:`str`, defaults to :obj:`right`): /// Truncate direction #[pyo3(signature = (max_length, **kwargs))] #[pyo3( text_signature = "(self, max_length, stride=0, strategy='longest_first', direction='right')" )] fn enable_truncation(&mut self, max_length: usize, kwargs: Option<&PyDict>) -> PyResult<()> { let mut params = TruncationParams { max_length, ..Default::default() }; if let Some(kwargs) = kwargs { for (key, value) in kwargs { let key: &str = key.extract()?; match key { "stride" => params.stride = value.extract()?, "strategy" => { let value: &str = value.extract()?; params.strategy = match value { "longest_first" => Ok(TruncationStrategy::LongestFirst), "only_first" => Ok(TruncationStrategy::OnlyFirst), "only_second" => Ok(TruncationStrategy::OnlySecond), _ => Err(PyError(format!( "Unknown `strategy`: `{}`. Use \ one of `longest_first`, `only_first`, or `only_second`", value )) .into_pyerr::<exceptions::PyValueError>()), }? } "direction" => { let value: &str = value.extract()?; params.direction = match value { "left" => Ok(TruncationDirection::Left), "right" => Ok(TruncationDirection::Right), _ => Err(PyError(format!( "Unknown `direction`: `{}`. Use \ one of `left` or `right`.", value )) .into_pyerr::<exceptions::PyValueError>()), }? } _ => println!("Ignored unknown kwarg option {}", key), } } } if let Err(error_message) = self.tokenizer.with_truncation(Some(params)) { return Err(PyError(error_message.to_string()).into_pyerr::<exceptions::PyValueError>()); } Ok(()) } /// Disable truncation #[pyo3(text_signature = "(self)")] fn no_truncation(&mut self) { self.tokenizer .with_truncation(None) .expect("Failed to set truncation to `None`! This should never happen"); } /// Get the currently set truncation parameters /// /// `Cannot set, use` :meth:`~tokenizers.Tokenizer.enable_truncation` `instead` /// /// Returns: /// (:obj:`dict`, `optional`): /// A dict with the current truncation parameters if truncation is enabled #[getter] fn get_truncation<'py>(&self, py: Python<'py>) -> PyResult<Option<&'py PyDict>> { self.tokenizer.get_truncation().map_or(Ok(None), |params| { let dict = PyDict::new(py); dict.set_item("max_length", params.max_length)?; dict.set_item("stride", params.stride)?; dict.set_item("strategy", params.strategy.as_ref())?; dict.set_item("direction", params.direction.as_ref())?; Ok(Some(dict)) }) } /// Enable the padding /// /// Args: /// direction (:obj:`str`, `optional`, defaults to :obj:`right`): /// The direction in which to pad. Can be either ``right`` or ``left`` /// /// pad_to_multiple_of (:obj:`int`, `optional`): /// If specified, the padding length should always snap to the next multiple of the /// given value. For example if we were going to pad witha length of 250 but /// ``pad_to_multiple_of=8`` then we will pad to 256. /// /// pad_id (:obj:`int`, defaults to 0): /// The id to be used when padding /// /// pad_type_id (:obj:`int`, defaults to 0): /// The type id to be used when padding /// /// pad_token (:obj:`str`, defaults to :obj:`[PAD]`): /// The pad token to be used when padding /// /// length (:obj:`int`, `optional`): /// If specified, the length at which to pad. If not specified we pad using the size of /// the longest sequence in a batch. #[pyo3(signature = (**kwargs))] #[pyo3( text_signature = "(self, direction='right', pad_id=0, pad_type_id=0, pad_token='[PAD]', length=None, pad_to_multiple_of=None)" )] fn enable_padding(&mut self, kwargs: Option<&PyDict>) -> PyResult<()> { let mut params = PaddingParams::default(); if let Some(kwargs) = kwargs { for (key, value) in kwargs { let key: &str = key.extract()?; match key { "direction" => { let value: &str = value.extract()?; params.direction = match value { "left" => Ok(PaddingDirection::Left), "right" => Ok(PaddingDirection::Right), other => Err(PyError(format!( "Unknown `direction`: `{}`. Use \ one of `left` or `right`", other )) .into_pyerr::<exceptions::PyValueError>()), }?; } "pad_to_multiple_of" => { if let Some(multiple) = value.extract()? { params.pad_to_multiple_of = multiple; } } "pad_id" => params.pad_id = value.extract()?, "pad_type_id" => params.pad_type_id = value.extract()?, "pad_token" => params.pad_token = value.extract()?, "max_length" => { println!( "enable_padding(max_length=X) is deprecated, \ use enable_padding(length=X) instead" ); if let Some(l) = value.extract()? { params.strategy = PaddingStrategy::Fixed(l); } else { params.strategy = PaddingStrategy::BatchLongest; } } "length" => { if let Some(l) = value.extract()? { params.strategy = PaddingStrategy::Fixed(l); } else { params.strategy = PaddingStrategy::BatchLongest; } } _ => println!("Ignored unknown kwarg option {}", key), } } } self.tokenizer.with_padding(Some(params)); Ok(()) } /// Disable padding #[pyo3(text_signature = "(self)")] fn no_padding(&mut self) { self.tokenizer.with_padding(None); } /// Get the current padding parameters /// /// `Cannot be set, use` :meth:`~tokenizers.Tokenizer.enable_padding` `instead` /// /// Returns: /// (:obj:`dict`, `optional`): /// A dict with the current padding parameters if padding is enabled #[getter] fn get_padding<'py>(&self, py: Python<'py>) -> PyResult<Option<&'py PyDict>> { self.tokenizer.get_padding().map_or(Ok(None), |params| { let dict = PyDict::new(py); dict.set_item( "length", match params.strategy { tk::PaddingStrategy::BatchLongest => None, tk::PaddingStrategy::Fixed(size) => Some(size), }, )?; dict.set_item("pad_to_multiple_of", params.pad_to_multiple_of)?; dict.set_item("pad_id", params.pad_id)?; dict.set_item("pad_token", &params.pad_token)?; dict.set_item("pad_type_id", params.pad_type_id)?; dict.set_item("direction", params.direction.as_ref())?; Ok(Some(dict)) }) } /// Encode the given sequence and pair. This method can process raw text sequences /// as well as already pre-tokenized sequences. /// /// Example: /// Here are some examples of the inputs that are accepted:: /// /// encode("A single sequence")` /// encode("A sequence", "And its pair")` /// encode([ "A", "pre", "tokenized", "sequence" ], is_pretokenized=True)` /// encode( /// [ "A", "pre", "tokenized", "sequence" ], [ "And", "its", "pair" ], /// is_pretokenized=True /// ) /// /// Args: /// sequence (:obj:`~tokenizers.InputSequence`): /// The main input sequence we want to encode. This sequence can be either raw /// text or pre-tokenized, according to the ``is_pretokenized`` argument: /// /// - If ``is_pretokenized=False``: :class:`~tokenizers.TextInputSequence` /// - If ``is_pretokenized=True``: :class:`~tokenizers.PreTokenizedInputSequence` /// /// pair (:obj:`~tokenizers.InputSequence`, `optional`): /// An optional input sequence. The expected format is the same that for ``sequence``. /// /// is_pretokenized (:obj:`bool`, defaults to :obj:`False`): /// Whether the input is already pre-tokenized /// /// add_special_tokens (:obj:`bool`, defaults to :obj:`True`): /// Whether to add the special tokens /// /// Returns: /// :class:`~tokenizers.Encoding`: The encoded result /// #[pyo3(signature = (sequence, pair = None, is_pretokenized = false, add_special_tokens = true))] #[pyo3( text_signature = "(self, sequence, pair=None, is_pretokenized=False, add_special_tokens=True)" )] fn encode( &self, sequence: &PyAny, pair: Option<&PyAny>, is_pretokenized: bool, add_special_tokens: bool, ) -> PyResult<PyEncoding> { let sequence: tk::InputSequence = if is_pretokenized { sequence.extract::<PreTokenizedInputSequence>()?.into() } else { sequence.extract::<TextInputSequence>()?.into() }; let input = match pair { Some(pair) => { let pair: tk::InputSequence = if is_pretokenized { pair.extract::<PreTokenizedInputSequence>()?.into() } else { pair.extract::<TextInputSequence>()?.into() }; tk::EncodeInput::Dual(sequence, pair) } None => tk::EncodeInput::Single(sequence), }; ToPyResult( self.tokenizer .encode_char_offsets(input, add_special_tokens) .map(|e| e.into()), ) .into() } /// Encode the given batch of inputs. This method accept both raw text sequences /// as well as already pre-tokenized sequences. /// /// Example: /// Here are some examples of the inputs that are accepted:: /// /// encode_batch([ /// "A single sequence", /// ("A tuple with a sequence", "And its pair"), /// [ "A", "pre", "tokenized", "sequence" ], /// ([ "A", "pre", "tokenized", "sequence" ], "And its pair") /// ]) /// /// Args: /// input (A :obj:`List`/:obj:`Tuple` of :obj:`~tokenizers.EncodeInput`): /// A list of single sequences or pair sequences to encode. Each sequence /// can be either raw text or pre-tokenized, according to the ``is_pretokenized`` /// argument: /// /// - If ``is_pretokenized=False``: :class:`~tokenizers.TextEncodeInput` /// - If ``is_pretokenized=True``: :class:`~tokenizers.PreTokenizedEncodeInput` /// /// is_pretokenized (:obj:`bool`, defaults to :obj:`False`): /// Whether the input is already pre-tokenized /// /// add_special_tokens (:obj:`bool`, defaults to :obj:`True`): /// Whether to add the special tokens /// /// Returns: /// A :obj:`List` of :class:`~tokenizers.Encoding`: The encoded batch /// #[pyo3(signature = (input, is_pretokenized = false, add_special_tokens = true))] #[pyo3(text_signature = "(self, input, is_pretokenized=False, add_special_tokens=True)")] fn encode_batch( &self, py: Python<'_>, input: Vec<&PyAny>, is_pretokenized: bool, add_special_tokens: bool, ) -> PyResult<Vec<PyEncoding>> { let input: Vec<tk::EncodeInput> = input .into_iter() .map(|o| { let input: tk::EncodeInput = if is_pretokenized { o.extract::<PreTokenizedEncodeInput>()?.into() } else { o.extract::<TextEncodeInput>()?.into() }; Ok(input) }) .collect::<PyResult<Vec<tk::EncodeInput>>>()?; py.allow_threads(|| { ToPyResult( self.tokenizer .encode_batch_char_offsets(input, add_special_tokens) .map(|encodings| encodings.into_iter().map(|e| e.into()).collect()), ) .into() }) } /// Decode the given list of ids back to a string /// /// This is used to decode anything coming back from a Language Model /// /// Args: /// ids (A :obj:`List/Tuple` of :obj:`int`): /// The list of ids that we want to decode /// /// skip_special_tokens (:obj:`bool`, defaults to :obj:`True`): /// Whether the special tokens should be removed from the decoded string /// /// Returns: /// :obj:`str`: The decoded string #[pyo3(signature = (ids, skip_special_tokens = true))] #[pyo3(text_signature = "(self, ids, skip_special_tokens=True)")] fn decode(&self, ids: Vec<u32>, skip_special_tokens: bool) -> PyResult<String> { ToPyResult(self.tokenizer.decode(&ids, skip_special_tokens)).into() } /// Decode a batch of ids back to their corresponding string /// /// Args: /// sequences (:obj:`List` of :obj:`List[int]`): /// The batch of sequences we want to decode /// /// skip_special_tokens (:obj:`bool`, defaults to :obj:`True`): /// Whether the special tokens should be removed from the decoded strings /// /// Returns: /// :obj:`List[str]`: A list of decoded strings #[pyo3(signature = (sequences, skip_special_tokens = true))] #[pyo3(text_signature = "(self, sequences, skip_special_tokens=True)")] fn decode_batch( &self, py: Python<'_>, sequences: Vec<Vec<u32>>, skip_special_tokens: bool, ) -> PyResult<Vec<String>> { py.allow_threads(|| { let slices = sequences.iter().map(|v| &v[..]).collect::<Vec<&[u32]>>(); ToPyResult(self.tokenizer.decode_batch(&slices, skip_special_tokens)).into() }) } /// Convert the given token to its corresponding id if it exists /// /// Args: /// token (:obj:`str`): /// The token to convert /// /// Returns: /// :obj:`Optional[int]`: An optional id, :obj:`None` if out of vocabulary #[pyo3(text_signature = "(self, token)")] fn token_to_id(&self, token: &str) -> Option<u32> { self.tokenizer.token_to_id(token) } /// Convert the given id to its corresponding token if it exists /// /// Args: /// id (:obj:`int`): /// The id to convert /// /// Returns: /// :obj:`Optional[str]`: An optional token, :obj:`None` if out of vocabulary #[pyo3(text_signature = "(self, id)")] fn id_to_token(&self, id: u32) -> Option<String> { self.tokenizer.id_to_token(id) } /// Add the given tokens to the vocabulary /// /// The given tokens are added only if they don't already exist in the vocabulary. /// Each token then gets a new attributed id. /// /// Args: /// tokens (A :obj:`List` of :class:`~tokenizers.AddedToken` or :obj:`str`): /// The list of tokens we want to add to the vocabulary. Each token can be either a /// string or an instance of :class:`~tokenizers.AddedToken` for more customization. /// /// Returns: /// :obj:`int`: The number of tokens that were created in the vocabulary #[pyo3(text_signature = "(self, tokens)")] fn add_tokens(&mut self, tokens: &PyList) -> PyResult<usize> { let tokens = tokens .into_iter() .map(|token| { if let Ok(content) = token.extract::<String>() { Ok(PyAddedToken::from(content, Some(false)).get_token()) } else if let Ok(mut token) = token.extract::<PyRefMut<PyAddedToken>>() { token.special = false; Ok(token.get_token()) } else { Err(exceptions::PyTypeError::new_err( "Input must be a List[Union[str, AddedToken]]", )) } }) .collect::<PyResult<Vec<_>>>()?; Ok(self.tokenizer.add_tokens(&tokens)) } /// Add the given special tokens to the Tokenizer. /// /// If these tokens are already part of the vocabulary, it just let the Tokenizer know about /// them. If they don't exist, the Tokenizer creates them, giving them a new id. /// /// These special tokens will never be processed by the model (ie won't be split into /// multiple tokens), and they can be removed from the output when decoding. /// /// Args: /// tokens (A :obj:`List` of :class:`~tokenizers.AddedToken` or :obj:`str`): /// The list of special tokens we want to add to the vocabulary. Each token can either /// be a string or an instance of :class:`~tokenizers.AddedToken` for more /// customization. /// /// Returns: /// :obj:`int`: The number of tokens that were created in the vocabulary #[pyo3(text_signature = "(self, tokens)")] fn add_special_tokens(&mut self, tokens: &PyList) -> PyResult<usize> { let tokens = tokens .into_iter() .map(|token| { if let Ok(content) = token.extract::<String>() { Ok(tk::tokenizer::AddedToken::from(content, true)) } else if let Ok(mut token) = token.extract::<PyRefMut<PyAddedToken>>() { token.special = true; Ok(token.get_token()) } else { Err(exceptions::PyTypeError::new_err( "Input must be a List[Union[str, AddedToken]]", )) } }) .collect::<PyResult<Vec<_>>>()?; Ok(self.tokenizer.add_special_tokens(&tokens)) } /// Train the Tokenizer using the given files. /// /// Reads the files line by line, while keeping all the whitespace, even new lines. /// If you want to train from data store in-memory, you can check /// :meth:`~tokenizers.Tokenizer.train_from_iterator` /// /// Args: /// files (:obj:`List[str]`): /// A list of path to the files that we should use for training /// /// trainer (:obj:`~tokenizers.trainers.Trainer`, `optional`): /// An optional trainer that should be used to train our Model #[pyo3(signature = (files, trainer = None))] #[pyo3(text_signature = "(self, files, trainer = None)")] fn train(&mut self, files: Vec<String>, trainer: Option<&mut PyTrainer>) -> PyResult<()> { let mut trainer = trainer.map_or_else(|| self.tokenizer.get_model().get_trainer(), |t| t.clone()); Python::with_gil(|py| { py.allow_threads(|| { ToPyResult( self.tokenizer .train_from_files(&mut trainer, files) .map(|_| {}), ) .into() }) }) } /// Train the Tokenizer using the provided iterator. /// /// You can provide anything that is a Python Iterator /// /// * A list of sequences :obj:`List[str]` /// * A generator that yields :obj:`str` or :obj:`List[str]` /// * A Numpy array of strings /// * ... /// /// Args: /// iterator (:obj:`Iterator`): /// Any iterator over strings or list of strings /// /// trainer (:obj:`~tokenizers.trainers.Trainer`, `optional`): /// An optional trainer that should be used to train our Model /// /// length (:obj:`int`, `optional`): /// The total number of sequences in the iterator. This is used to /// provide meaningful progress tracking #[pyo3(signature = (iterator, trainer = None, length = None))] #[pyo3(text_signature = "(self, iterator, trainer=None, length=None)")] fn train_from_iterator( &mut self, py: Python, iterator: &PyAny, trainer: Option<&mut PyTrainer>, length: Option<usize>, ) -> PyResult<()> { let mut trainer = trainer.map_or_else(|| self.tokenizer.get_model().get_trainer(), |t| t.clone()); let buffered_iter = PyBufferedIterator::new( iterator, |element| { // Each element of the iterator can either be: // - An iterator, to allow batching // - A string if let Ok(s) = element.downcast::<PyString>() { itertools::Either::Right(std::iter::once(s.to_str().map(|s| s.to_owned()))) } else { match element.iter() { Ok(iter) => itertools::Either::Left( iter.map(|i| i?.extract::<String>()) .collect::<Vec<_>>() .into_iter(), ), Err(e) => itertools::Either::Right(std::iter::once(Err(e))), } } }, 256, )?; py.allow_threads(|| { ResultShunt::process(buffered_iter, |iter| { self.tokenizer .train(&mut trainer, MaybeSizedIterator::new(iter, length)) .map(|_| {}) .map_err(|e| exceptions::PyException::new_err(e.to_string())) })? }) } /// Apply all the post-processing steps to the given encodings. /// /// The various steps are: /// /// 1. Truncate according to the set truncation params (provided with /// :meth:`~tokenizers.Tokenizer.enable_truncation`) /// 2. Apply the :class:`~tokenizers.processors.PostProcessor` /// 3. Pad according to the set padding params (provided with /// :meth:`~tokenizers.Tokenizer.enable_padding`) /// /// Args: /// encoding (:class:`~tokenizers.Encoding`): /// The :class:`~tokenizers.Encoding` corresponding to the main sequence. /// /// pair (:class:`~tokenizers.Encoding`, `optional`): /// An optional :class:`~tokenizers.Encoding` corresponding to the pair sequence. /// /// add_special_tokens (:obj:`bool`): /// Whether to add the special tokens /// /// Returns: /// :class:`~tokenizers.Encoding`: The final post-processed encoding #[pyo3(signature = (encoding, pair = None, add_special_tokens = true))] #[pyo3(text_signature = "(self, encoding, pair=None, add_special_tokens=True)")] fn post_process( &self, encoding: &PyEncoding, pair: Option<&PyEncoding>, add_special_tokens: bool, ) -> PyResult<PyEncoding> { ToPyResult( self.tokenizer .post_process( encoding.encoding.clone(), pair.map(|p| p.encoding.clone()), add_special_tokens, ) .map(|e| e.into()), ) .into() } /// The :class:`~tokenizers.models.Model` in use by the Tokenizer #[getter] fn get_model(&self, py: Python<'_>) -> PyResult<PyObject> { self.tokenizer.get_model().get_as_subtype(py) } /// Set the :class:`~tokenizers.models.Model` #[setter] fn set_model(&mut self, model: PyRef<PyModel>) { self.tokenizer.with_model(model.clone()); } /// The `optional` :class:`~tokenizers.normalizers.Normalizer` in use by the Tokenizer #[getter] fn get_normalizer(&self, py: Python<'_>) -> PyResult<PyObject> { if let Some(n) = self.tokenizer.get_normalizer() { n.get_as_subtype(py) } else { Ok(py.None()) } } /// Set the :class:`~tokenizers.normalizers.Normalizer` #[setter] fn set_normalizer(&mut self, normalizer: PyRef<PyNormalizer>) { self.tokenizer.with_normalizer(normalizer.clone()); } /// The `optional` :class:`~tokenizers.pre_tokenizers.PreTokenizer` in use by the Tokenizer #[getter] fn get_pre_tokenizer(&self, py: Python<'_>) -> PyResult<PyObject> { if let Some(pt) = self.tokenizer.get_pre_tokenizer() { pt.get_as_subtype(py) } else { Ok(py.None()) } } /// Set the :class:`~tokenizers.normalizers.Normalizer` #[setter] fn set_pre_tokenizer(&mut self, pretok: PyRef<PyPreTokenizer>) { self.tokenizer.with_pre_tokenizer(pretok.clone()); } /// The `optional` :class:`~tokenizers.processors.PostProcessor` in use by the Tokenizer #[getter] fn get_post_processor(&self, py: Python<'_>) -> PyResult<PyObject> { if let Some(n) = self.tokenizer.get_post_processor() { n.get_as_subtype(py) } else { Ok(py.None()) } } /// Set the :class:`~tokenizers.processors.PostProcessor` #[setter] fn set_post_processor(&mut self, processor: PyRef<PyPostProcessor>) { self.tokenizer.with_post_processor(processor.clone()); } /// The `optional` :class:`~tokenizers.decoders.Decoder` in use by the Tokenizer #[getter] fn get_decoder(&self, py: Python<'_>) -> PyResult<PyObject> { if let Some(dec) = self.tokenizer.get_decoder() { dec.get_as_subtype(py) } else { Ok(py.None()) } } /// Set the :class:`~tokenizers.decoders.Decoder` #[setter] fn set_decoder(&mut self, decoder: PyRef<PyDecoder>) { self.tokenizer.with_decoder(decoder.clone()); } } #[cfg(test)] mod test { use super::*; use crate::models::PyModel; use crate::normalizers::{PyNormalizer, PyNormalizerTypeWrapper}; use std::sync::{Arc, RwLock}; use tempfile::NamedTempFile; use tk::normalizers::{Lowercase, NFKC}; #[test] fn serialize() { let mut tokenizer = Tokenizer::new(PyModel::from(BPE::default())); tokenizer.with_normalizer(PyNormalizer::new(PyNormalizerTypeWrapper::Sequence(vec![ Arc::new(RwLock::new(NFKC.into())), Arc::new(RwLock::new(Lowercase.into())), ]))); let tmp = NamedTempFile::new().unwrap().into_temp_path(); tokenizer.save(&tmp, false).unwrap(); Tokenizer::from_file(&tmp).unwrap(); } }
0
hf_public_repos/tokenizers/bindings/python
hf_public_repos/tokenizers/bindings/python/src/pre_tokenizers.rs
use std::sync::{Arc, RwLock}; use pyo3::exceptions; use pyo3::prelude::*; use pyo3::types::*; use serde::ser::SerializeStruct; use serde::{Deserialize, Deserializer, Serialize, Serializer}; use tk::normalizer::SplitDelimiterBehavior; use tk::pre_tokenizers::bert::BertPreTokenizer; use tk::pre_tokenizers::byte_level::ByteLevel; use tk::pre_tokenizers::delimiter::CharDelimiterSplit; use tk::pre_tokenizers::digits::Digits; use tk::pre_tokenizers::metaspace::{Metaspace, PrependScheme}; use tk::pre_tokenizers::punctuation::Punctuation; use tk::pre_tokenizers::split::Split; use tk::pre_tokenizers::unicode_scripts::UnicodeScripts; use tk::pre_tokenizers::whitespace::{Whitespace, WhitespaceSplit}; use tk::pre_tokenizers::PreTokenizerWrapper; use tk::tokenizer::Offsets; use tk::{PreTokenizedString, PreTokenizer}; use tokenizers as tk; use super::error::ToPyResult; use super::utils::*; /// Base class for all pre-tokenizers /// /// This class is not supposed to be instantiated directly. Instead, any implementation of a /// PreTokenizer will return an instance of this class when instantiated. #[pyclass( dict, module = "tokenizers.pre_tokenizers", name = "PreTokenizer", subclass )] #[derive(Clone, Serialize, Deserialize)] pub struct PyPreTokenizer { #[serde(flatten)] pub(crate) pretok: PyPreTokenizerTypeWrapper, } impl PyPreTokenizer { #[allow(dead_code)] pub(crate) fn new(pretok: PyPreTokenizerTypeWrapper) -> Self { PyPreTokenizer { pretok } } pub(crate) fn get_as_subtype(&self, py: Python<'_>) -> PyResult<PyObject> { let base = self.clone(); Ok(match &self.pretok { PyPreTokenizerTypeWrapper::Sequence(_) => { Py::new(py, (PySequence {}, base))?.into_py(py) } PyPreTokenizerTypeWrapper::Single(ref inner) => { match &*inner.as_ref().read().unwrap() { PyPreTokenizerWrapper::Custom(_) => Py::new(py, base)?.into_py(py), PyPreTokenizerWrapper::Wrapped(inner) => match inner { PreTokenizerWrapper::Whitespace(_) => { Py::new(py, (PyWhitespace {}, base))?.into_py(py) } PreTokenizerWrapper::Split(_) => { Py::new(py, (PySplit {}, base))?.into_py(py) } PreTokenizerWrapper::Punctuation(_) => { Py::new(py, (PyPunctuation {}, base))?.into_py(py) } PreTokenizerWrapper::Sequence(_) => { Py::new(py, (PySequence {}, base))?.into_py(py) } PreTokenizerWrapper::Metaspace(_) => { Py::new(py, (PyMetaspace {}, base))?.into_py(py) } PreTokenizerWrapper::Delimiter(_) => { Py::new(py, (PyCharDelimiterSplit {}, base))?.into_py(py) } PreTokenizerWrapper::WhitespaceSplit(_) => { Py::new(py, (PyWhitespaceSplit {}, base))?.into_py(py) } PreTokenizerWrapper::ByteLevel(_) => { Py::new(py, (PyByteLevel {}, base))?.into_py(py) } PreTokenizerWrapper::BertPreTokenizer(_) => { Py::new(py, (PyBertPreTokenizer {}, base))?.into_py(py) } PreTokenizerWrapper::Digits(_) => { Py::new(py, (PyDigits {}, base))?.into_py(py) } PreTokenizerWrapper::UnicodeScripts(_) => { Py::new(py, (PyUnicodeScripts {}, base))?.into_py(py) } }, } } }) } } impl PreTokenizer for PyPreTokenizer { fn pre_tokenize(&self, normalized: &mut PreTokenizedString) -> tk::Result<()> { self.pretok.pre_tokenize(normalized) } } #[pymethods] impl PyPreTokenizer { #[staticmethod] fn custom(pretok: PyObject) -> Self { PyPreTokenizer { pretok: PyPreTokenizerWrapper::Custom(CustomPreTokenizer::new(pretok)).into(), } } fn __getstate__(&self, py: Python) -> PyResult<PyObject> { let data = serde_json::to_string(&self.pretok).map_err(|e| { exceptions::PyException::new_err(format!( "Error while attempting to pickle PreTokenizer: {}", e )) })?; Ok(PyBytes::new(py, data.as_bytes()).to_object(py)) } fn __setstate__(&mut self, py: Python, state: PyObject) -> PyResult<()> { match state.extract::<&PyBytes>(py) { Ok(s) => { let unpickled = serde_json::from_slice(s.as_bytes()).map_err(|e| { exceptions::PyException::new_err(format!( "Error while attempting to unpickle PreTokenizer: {}", e )) })?; self.pretok = unpickled; Ok(()) } Err(e) => Err(e), } } /// Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place /// /// This method allows to modify a :class:`~tokenizers.PreTokenizedString` to /// keep track of the pre-tokenization, and leverage the capabilities of the /// :class:`~tokenizers.PreTokenizedString`. If you just want to see the result of /// the pre-tokenization of a raw string, you can use /// :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str` /// /// Args: /// pretok (:class:`~tokenizers.PreTokenizedString): /// The pre-tokenized string on which to apply this /// :class:`~tokenizers.pre_tokenizers.PreTokenizer` #[pyo3(text_signature = "(self, pretok)")] fn pre_tokenize(&self, pretok: &mut PyPreTokenizedString) -> PyResult<()> { ToPyResult(self.pretok.pre_tokenize(&mut pretok.pretok)).into() } /// Pre tokenize the given string /// /// This method provides a way to visualize the effect of a /// :class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the /// alignment, nor does it provide all the capabilities of the /// :class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use /// :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize` /// /// Args: /// sequence (:obj:`str`): /// A string to pre-tokeize /// /// Returns: /// :obj:`List[Tuple[str, Offsets]]`: /// A list of tuple with the pre-tokenized parts and their offsets #[pyo3(text_signature = "(self, sequence)")] fn pre_tokenize_str(&self, s: &str) -> PyResult<Vec<(String, Offsets)>> { let mut pretokenized = tk::tokenizer::PreTokenizedString::from(s); ToPyResult(self.pretok.pre_tokenize(&mut pretokenized)).into_py()?; Ok(pretokenized .get_splits(tk::OffsetReferential::Original, tk::OffsetType::Char) .into_iter() .map(|(s, o, _)| (s.to_owned(), o)) .collect()) } } macro_rules! getter { ($self: ident, $variant: ident, $($name: tt)+) => {{ let super_ = $self.as_ref(); if let PyPreTokenizerTypeWrapper::Single(ref single) = super_.pretok { if let PyPreTokenizerWrapper::Wrapped(PreTokenizerWrapper::$variant(ref pretok)) = *single.read().unwrap() { pretok.$($name)+ } else { unreachable!() } } else { unreachable!() } }}; } macro_rules! setter { ($self: ident, $variant: ident, $name: ident, $value: expr) => {{ let super_ = $self.as_ref(); if let PyPreTokenizerTypeWrapper::Single(ref single) = super_.pretok { if let PyPreTokenizerWrapper::Wrapped(PreTokenizerWrapper::$variant(ref mut pretok)) = *single.write().unwrap() { pretok.$name = $value; } } }}; ($self: ident, $variant: ident, @$name: ident, $value: expr) => {{ let super_ = $self.as_ref(); if let PyPreTokenizerTypeWrapper::Single(ref single) = super_.pretok { if let PyPreTokenizerWrapper::Wrapped(PreTokenizerWrapper::$variant(ref mut pretok)) = *single.write().unwrap() { pretok.$name($value); } } }}; } /// ByteLevel PreTokenizer /// /// This pre-tokenizer takes care of replacing all bytes of the given string /// with a corresponding representation, as well as splitting into words. /// /// Args: /// add_prefix_space (:obj:`bool`, `optional`, defaults to :obj:`True`): /// Whether to add a space to the first word if there isn't already one. This /// lets us treat `hello` exactly like `say hello`. /// use_regex (:obj:`bool`, `optional`, defaults to :obj:`True`): /// Set this to :obj:`False` to prevent this `pre_tokenizer` from using /// the GPT2 specific regexp for spliting on whitespace. #[pyclass(extends=PyPreTokenizer, module = "tokenizers.pre_tokenizers", name = "ByteLevel")] pub struct PyByteLevel {} #[pymethods] impl PyByteLevel { #[getter] fn get_add_prefix_space(self_: PyRef<Self>) -> bool { getter!(self_, ByteLevel, add_prefix_space) } #[setter] fn set_add_prefix_space(self_: PyRef<Self>, add_prefix_space: bool) { setter!(self_, ByteLevel, add_prefix_space, add_prefix_space); } #[getter] fn get_use_regex(self_: PyRef<Self>) -> bool { getter!(self_, ByteLevel, use_regex) } #[setter] fn set_use_regex(self_: PyRef<Self>, use_regex: bool) { setter!(self_, ByteLevel, use_regex, use_regex); } #[new] #[pyo3(signature = (add_prefix_space = true, use_regex = true, **_kwargs), text_signature = "(self, add_prefix_space=True, use_regex=True)")] fn new( add_prefix_space: bool, use_regex: bool, _kwargs: Option<&PyDict>, ) -> (Self, PyPreTokenizer) { ( PyByteLevel {}, ByteLevel::default() .add_prefix_space(add_prefix_space) .use_regex(use_regex) .into(), ) } /// Returns the alphabet used by this PreTokenizer. /// /// Since the ByteLevel works as its name suggests, at the byte level, it /// encodes each byte value to a unique visible character. This means that there is a /// total of 256 different characters composing this alphabet. /// /// Returns: /// :obj:`List[str]`: A list of characters that compose the alphabet #[staticmethod] #[pyo3(text_signature = "()")] fn alphabet() -> Vec<String> { ByteLevel::alphabet() .into_iter() .map(|c| c.to_string()) .collect() } } /// This pre-tokenizer simply splits using the following regex: `\w+|[^\w\s]+` #[pyclass(extends=PyPreTokenizer, module = "tokenizers.pre_tokenizers", name = "Whitespace")] pub struct PyWhitespace {} #[pymethods] impl PyWhitespace { #[new] #[pyo3(text_signature = "(self)")] fn new() -> (Self, PyPreTokenizer) { (PyWhitespace {}, Whitespace {}.into()) } } /// This pre-tokenizer simply splits on the whitespace. Works like `.split()` #[pyclass(extends=PyPreTokenizer, module = "tokenizers.pre_tokenizers", name = "WhitespaceSplit")] pub struct PyWhitespaceSplit {} #[pymethods] impl PyWhitespaceSplit { #[new] #[pyo3(text_signature = "(self)")] fn new() -> (Self, PyPreTokenizer) { (PyWhitespaceSplit {}, WhitespaceSplit.into()) } } /// Split PreTokenizer /// /// This versatile pre-tokenizer splits using the provided pattern and /// according to the provided behavior. The pattern can be inverted by /// making use of the invert flag. /// /// Args: /// pattern (:obj:`str` or :class:`~tokenizers.Regex`): /// A pattern used to split the string. Usually a string or a a regex built with `tokenizers.Regex` /// /// behavior (:class:`~tokenizers.SplitDelimiterBehavior`): /// The behavior to use when splitting. /// Choices: "removed", "isolated", "merged_with_previous", "merged_with_next", /// "contiguous" /// /// invert (:obj:`bool`, `optional`, defaults to :obj:`False`): /// Whether to invert the pattern. #[pyclass(extends=PyPreTokenizer, module = "tokenizers.pre_tokenizers", name = "Split")] pub struct PySplit {} #[pymethods] impl PySplit { #[new] #[pyo3(signature = (pattern, behavior, invert = false), text_signature = "(self, pattern, behavior, invert=False)")] fn new( pattern: PyPattern, behavior: PySplitDelimiterBehavior, invert: bool, ) -> PyResult<(Self, PyPreTokenizer)> { Ok(( PySplit {}, ToPyResult(Split::new(pattern, behavior.into(), invert)) .into_py()? .into(), )) } fn __getnewargs__<'p>(&self, py: Python<'p>) -> &'p PyTuple { PyTuple::new(py, [" ", "removed"]) } } /// This pre-tokenizer simply splits on the provided char. Works like `.split(delimiter)` /// /// Args: /// delimiter: str: /// The delimiter char that will be used to split input #[pyclass(extends=PyPreTokenizer, module = "tokenizers.pre_tokenizers", name = "CharDelimiterSplit")] pub struct PyCharDelimiterSplit {} #[pymethods] impl PyCharDelimiterSplit { #[getter] fn get_delimiter(self_: PyRef<Self>) -> String { getter!(self_, Delimiter, delimiter.to_string()) } #[setter] fn set_delimiter(self_: PyRef<Self>, delimiter: PyChar) { setter!(self_, Delimiter, delimiter, delimiter.0); } #[new] #[pyo3(text_signature = None)] pub fn new(delimiter: PyChar) -> PyResult<(Self, PyPreTokenizer)> { Ok(( PyCharDelimiterSplit {}, CharDelimiterSplit::new(delimiter.0).into(), )) } fn __getnewargs__<'p>(&self, py: Python<'p>) -> &'p PyTuple { PyTuple::new(py, [" "]) } } /// BertPreTokenizer /// /// This pre-tokenizer splits tokens on spaces, and also on punctuation. /// Each occurence of a punctuation character will be treated separately. #[pyclass(extends=PyPreTokenizer, module = "tokenizers.pre_tokenizers", name = "BertPreTokenizer")] pub struct PyBertPreTokenizer {} #[pymethods] impl PyBertPreTokenizer { #[new] #[pyo3(text_signature = "(self)")] fn new() -> (Self, PyPreTokenizer) { (PyBertPreTokenizer {}, BertPreTokenizer.into()) } } /// This pre-tokenizer simply splits on punctuation as individual characters. /// /// Args: /// behavior (:class:`~tokenizers.SplitDelimiterBehavior`): /// The behavior to use when splitting. /// Choices: "removed", "isolated" (default), "merged_with_previous", "merged_with_next", /// "contiguous" #[pyclass(extends=PyPreTokenizer, module = "tokenizers.pre_tokenizers", name = "Punctuation")] pub struct PyPunctuation {} #[pymethods] impl PyPunctuation { #[new] #[pyo3( signature = (behavior = PySplitDelimiterBehavior(SplitDelimiterBehavior::Isolated)), text_signature = "(self, behavior=\"isolated\")")] fn new(behavior: PySplitDelimiterBehavior) -> (Self, PyPreTokenizer) { (PyPunctuation {}, Punctuation::new(behavior.into()).into()) } } /// This pre-tokenizer composes other pre_tokenizers and applies them in sequence #[pyclass(extends=PyPreTokenizer, module = "tokenizers.pre_tokenizers", name = "Sequence")] pub struct PySequence {} #[pymethods] impl PySequence { #[new] #[pyo3(text_signature = "(self, pretokenizers)")] fn new(pre_tokenizers: &PyList) -> PyResult<(Self, PyPreTokenizer)> { let mut sequence = Vec::with_capacity(pre_tokenizers.len()); for n in pre_tokenizers.iter() { let pretokenizer: PyRef<PyPreTokenizer> = n.extract()?; match &pretokenizer.pretok { PyPreTokenizerTypeWrapper::Sequence(inner) => { sequence.extend(inner.iter().cloned()) } PyPreTokenizerTypeWrapper::Single(inner) => sequence.push(inner.clone()), } } Ok(( PySequence {}, PyPreTokenizer::new(PyPreTokenizerTypeWrapper::Sequence(sequence)), )) } fn __getnewargs__<'p>(&self, py: Python<'p>) -> &'p PyTuple { PyTuple::new(py, [PyList::empty(py)]) } } fn from_string(string: String) -> Result<PrependScheme, PyErr> { let scheme = match string.as_str() { "first" => PrependScheme::First, "never" => PrependScheme::Never, "always" => PrependScheme::Always, _ => { return Err(exceptions::PyValueError::new_err(format!( "{} is an unknown variant, should be one of ['first', 'never', 'always']", string ))); } }; Ok(scheme) } /// Metaspace pre-tokenizer /// /// This pre-tokenizer replaces any whitespace by the provided replacement character. /// It then tries to split on these spaces. /// /// Args: /// replacement (:obj:`str`, `optional`, defaults to :obj:`▁`): /// The replacement character. Must be exactly one character. By default we /// use the `▁` (U+2581) meta symbol (Same as in SentencePiece). /// /// add_prefix_space (:obj:`bool`, `optional`, defaults to :obj:`True`): /// Whether to add a space to the first word if there isn't already one. This /// lets us treat `hello` exactly like `say hello`. #[pyclass(extends=PyPreTokenizer, module = "tokenizers.pre_tokenizers", name = "Metaspace")] pub struct PyMetaspace {} #[pymethods] impl PyMetaspace { #[getter] fn get_replacement(self_: PyRef<Self>) -> String { getter!(self_, Metaspace, get_replacement().to_string()) } #[setter] fn set_replacement(self_: PyRef<Self>, replacement: PyChar) { setter!(self_, Metaspace, @set_replacement, replacement.0); } #[getter] fn get_add_prefix_space(self_: PyRef<Self>) -> bool { getter!(self_, Metaspace, add_prefix_space) } #[setter] fn set_add_prefix_space(self_: PyRef<Self>, add_prefix_space: bool) { setter!(self_, Metaspace, add_prefix_space, add_prefix_space); } #[getter] fn get_prepend_scheme(self_: PyRef<Self>) -> String { // Assuming Metaspace has a method to get the prepend_scheme as a string let scheme: PrependScheme = getter!(self_, Metaspace, get_prepend_scheme()); match scheme { PrependScheme::First => "first", PrependScheme::Never => "never", PrependScheme::Always => "always", } .to_string() } #[setter] fn set_prepend_scheme(self_: PyRef<Self>, prepend_scheme: String) -> PyResult<()> { let scheme = from_string(prepend_scheme)?; setter!(self_, Metaspace, @set_prepend_scheme, scheme); Ok(()) } #[new] #[pyo3(signature = (replacement = PyChar('▁'), add_prefix_space = true, prepend_scheme=None, **_kwargs), text_signature = "(self, replacement=\"_\", add_prefix_space=True)")] fn new( replacement: PyChar, add_prefix_space: bool, prepend_scheme: Option<String>, _kwargs: Option<&PyDict>, ) -> PyResult<(Self, PyPreTokenizer)> { // Create a new Metaspace instance let mut new_instance: Metaspace = Metaspace::new(replacement.0, add_prefix_space); // If a prepend scheme is provided, set it if let Some(prepend_scheme) = prepend_scheme { match from_string(prepend_scheme) { Ok(prepend_scheme_enum) => new_instance.set_prepend_scheme(prepend_scheme_enum), Err(err) => return Err(err), } } Ok((PyMetaspace {}, new_instance.into())) } } /// This pre-tokenizer simply splits using the digits in separate tokens /// /// Args: /// individual_digits (:obj:`bool`, `optional`, defaults to :obj:`False`): /// If set to True, digits will each be separated as follows:: /// /// "Call 123 please" -> "Call ", "1", "2", "3", " please" /// /// If set to False, digits will grouped as follows:: /// /// "Call 123 please" -> "Call ", "123", " please" #[pyclass(extends=PyPreTokenizer, module = "tokenizers.pre_tokenizers", name = "Digits")] pub struct PyDigits {} #[pymethods] impl PyDigits { #[getter] fn get_individual_digits(self_: PyRef<Self>) -> bool { getter!(self_, Digits, individual_digits) } #[setter] fn set_individual_digits(self_: PyRef<Self>, individual_digits: bool) { setter!(self_, Digits, individual_digits, individual_digits); } #[new] #[pyo3(signature = (individual_digits = false), text_signature = "(self, individual_digits=False)")] fn new(individual_digits: bool) -> (Self, PyPreTokenizer) { (PyDigits {}, Digits::new(individual_digits).into()) } } /// This pre-tokenizer splits on characters that belong to different language family /// It roughly follows https://github.com/google/sentencepiece/blob/master/data/Scripts.txt /// Actually Hiragana and Katakana are fused with Han, and 0x30FC is Han too. /// This mimicks SentencePiece Unigram implementation. #[pyclass(extends=PyPreTokenizer, module = "tokenizers.pre_tokenizers", name = "UnicodeScripts")] pub struct PyUnicodeScripts {} #[pymethods] impl PyUnicodeScripts { #[new] #[pyo3(text_signature = "(self)")] fn new() -> (Self, PyPreTokenizer) { (PyUnicodeScripts {}, UnicodeScripts::new().into()) } } #[derive(Clone)] pub(crate) struct CustomPreTokenizer { inner: PyObject, } impl CustomPreTokenizer { pub fn new(inner: PyObject) -> Self { Self { inner } } } impl tk::tokenizer::PreTokenizer for CustomPreTokenizer { fn pre_tokenize(&self, sentence: &mut PreTokenizedString) -> tk::Result<()> { Python::with_gil(|py| { let pretok = PyPreTokenizedStringRefMut::new(sentence); let py_pretok = self.inner.as_ref(py); py_pretok.call_method("pre_tokenize", (pretok.get(),), None)?; Ok(()) }) } } impl Serialize for CustomPreTokenizer { fn serialize<S>(&self, _serializer: S) -> Result<S::Ok, S::Error> where S: Serializer, { Err(serde::ser::Error::custom( "Custom PreTokenizer cannot be serialized", )) } } impl<'de> Deserialize<'de> for CustomPreTokenizer { fn deserialize<D>(_deserializer: D) -> Result<Self, D::Error> where D: Deserializer<'de>, { Err(serde::de::Error::custom( "Custom PreTokenizer cannot be deserialized", )) } } #[derive(Clone, Deserialize)] #[serde(untagged)] pub(crate) enum PyPreTokenizerWrapper { Custom(CustomPreTokenizer), Wrapped(PreTokenizerWrapper), } impl Serialize for PyPreTokenizerWrapper { fn serialize<S>(&self, serializer: S) -> Result<<S as Serializer>::Ok, <S as Serializer>::Error> where S: Serializer, { match self { PyPreTokenizerWrapper::Wrapped(inner) => inner.serialize(serializer), PyPreTokenizerWrapper::Custom(inner) => inner.serialize(serializer), } } } #[derive(Clone, Deserialize)] #[serde(untagged)] pub(crate) enum PyPreTokenizerTypeWrapper { Sequence(Vec<Arc<RwLock<PyPreTokenizerWrapper>>>), Single(Arc<RwLock<PyPreTokenizerWrapper>>), } impl Serialize for PyPreTokenizerTypeWrapper { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: Serializer, { match self { PyPreTokenizerTypeWrapper::Sequence(seq) => { let mut ser = serializer.serialize_struct("Sequence", 2)?; ser.serialize_field("type", "Sequence")?; ser.serialize_field("pretokenizers", seq)?; ser.end() } PyPreTokenizerTypeWrapper::Single(inner) => inner.serialize(serializer), } } } impl<I> From<I> for PyPreTokenizerWrapper where I: Into<PreTokenizerWrapper>, { fn from(pretok: I) -> Self { PyPreTokenizerWrapper::Wrapped(pretok.into()) } } impl<I> From<I> for PyPreTokenizerTypeWrapper where I: Into<PyPreTokenizerWrapper>, { fn from(pretok: I) -> Self { PyPreTokenizerTypeWrapper::Single(Arc::new(RwLock::new(pretok.into()))) } } impl<I> From<I> for PyPreTokenizer where I: Into<PreTokenizerWrapper>, { fn from(pretok: I) -> Self { PyPreTokenizer { pretok: pretok.into().into(), } } } impl PreTokenizer for PyPreTokenizerTypeWrapper { fn pre_tokenize(&self, pretok: &mut PreTokenizedString) -> tk::Result<()> { match self { PyPreTokenizerTypeWrapper::Single(inner) => inner.read().unwrap().pre_tokenize(pretok), PyPreTokenizerTypeWrapper::Sequence(inner) => inner .iter() .try_for_each(|n| n.read().unwrap().pre_tokenize(pretok)), } } } impl PreTokenizer for PyPreTokenizerWrapper { fn pre_tokenize(&self, pretok: &mut PreTokenizedString) -> tk::Result<()> { match self { PyPreTokenizerWrapper::Wrapped(inner) => inner.pre_tokenize(pretok), PyPreTokenizerWrapper::Custom(inner) => inner.pre_tokenize(pretok), } } } /// PreTokenizers Module #[pymodule] pub fn pre_tokenizers(_py: Python, m: &PyModule) -> PyResult<()> { m.add_class::<PyPreTokenizer>()?; m.add_class::<PyByteLevel>()?; m.add_class::<PyWhitespace>()?; m.add_class::<PyWhitespaceSplit>()?; m.add_class::<PySplit>()?; m.add_class::<PyBertPreTokenizer>()?; m.add_class::<PyMetaspace>()?; m.add_class::<PyCharDelimiterSplit>()?; m.add_class::<PyPunctuation>()?; m.add_class::<PySequence>()?; m.add_class::<PyDigits>()?; m.add_class::<PyUnicodeScripts>()?; Ok(()) } #[cfg(test)] mod test { use pyo3::prelude::*; use tk::pre_tokenizers::sequence::Sequence; use tk::pre_tokenizers::whitespace::{Whitespace, WhitespaceSplit}; use tk::pre_tokenizers::PreTokenizerWrapper; use crate::pre_tokenizers::{ CustomPreTokenizer, PyPreTokenizer, PyPreTokenizerTypeWrapper, PyPreTokenizerWrapper, }; #[test] fn get_subtype() { Python::with_gil(|py| { let py_norm = PyPreTokenizer::new(Whitespace {}.into()); let py_wsp = py_norm.get_as_subtype(py).unwrap(); assert_eq!("Whitespace", py_wsp.as_ref(py).get_type().name().unwrap()); }) } #[test] fn serialize() { let py_wrapped: PyPreTokenizerWrapper = Whitespace {}.into(); let py_ser = serde_json::to_string(&py_wrapped).unwrap(); let rs_wrapped = PreTokenizerWrapper::Whitespace(Whitespace {}); let rs_ser = serde_json::to_string(&rs_wrapped).unwrap(); assert_eq!(py_ser, rs_ser); let py_pretok: PyPreTokenizer = serde_json::from_str(&rs_ser).unwrap(); match py_pretok.pretok { PyPreTokenizerTypeWrapper::Single(inner) => match *inner.as_ref().read().unwrap() { PyPreTokenizerWrapper::Wrapped(PreTokenizerWrapper::Whitespace(_)) => {} _ => panic!("Expected Whitespace"), }, _ => panic!("Expected wrapped, not custom."), } let py_seq: PyPreTokenizerWrapper = Sequence::new(vec![Whitespace {}.into(), WhitespaceSplit.into()]).into(); let py_wrapper_ser = serde_json::to_string(&py_seq).unwrap(); let rs_wrapped = PreTokenizerWrapper::Sequence(Sequence::new(vec![ Whitespace {}.into(), WhitespaceSplit.into(), ])); let rs_ser = serde_json::to_string(&rs_wrapped).unwrap(); assert_eq!(py_wrapper_ser, rs_ser); let py_seq = PyPreTokenizer::new(py_seq.into()); let py_ser = serde_json::to_string(&py_seq).unwrap(); assert_eq!(py_wrapper_ser, py_ser); let obj = Python::with_gil(|py| { let py_wsp = PyPreTokenizer::new(Whitespace {}.into()); let obj: PyObject = Py::new(py, py_wsp).unwrap().into_py(py); obj }); let py_seq: PyPreTokenizerWrapper = PyPreTokenizerWrapper::Custom(CustomPreTokenizer::new(obj)); assert!(serde_json::to_string(&py_seq).is_err()); } }
0
hf_public_repos/tokenizers/bindings/python
hf_public_repos/tokenizers/bindings/python/src/decoders.rs
use std::sync::{Arc, RwLock}; use crate::utils::PyChar; use crate::utils::PyPattern; use pyo3::exceptions; use pyo3::prelude::*; use pyo3::types::*; use serde::de::Error; use serde::{Deserialize, Deserializer, Serialize, Serializer}; use tk::decoders::bpe::BPEDecoder; use tk::decoders::byte_fallback::ByteFallback; use tk::decoders::byte_level::ByteLevel; use tk::decoders::ctc::CTC; use tk::decoders::fuse::Fuse; use tk::decoders::metaspace::Metaspace; use tk::decoders::sequence::Sequence; use tk::decoders::strip::Strip; use tk::decoders::wordpiece::WordPiece; use tk::decoders::DecoderWrapper; use tk::normalizers::replace::Replace; use tk::Decoder; use tokenizers as tk; use super::error::ToPyResult; /// Base class for all decoders /// /// This class is not supposed to be instantiated directly. Instead, any implementation of /// a Decoder will return an instance of this class when instantiated. #[pyclass(dict, module = "tokenizers.decoders", name = "Decoder", subclass)] #[derive(Clone, Deserialize, Serialize)] pub struct PyDecoder { #[serde(flatten)] pub(crate) decoder: PyDecoderWrapper, } impl PyDecoder { pub(crate) fn new(decoder: PyDecoderWrapper) -> Self { PyDecoder { decoder } } pub(crate) fn get_as_subtype(&self, py: Python<'_>) -> PyResult<PyObject> { let base = self.clone(); Ok(match &self.decoder { PyDecoderWrapper::Custom(_) => Py::new(py, base)?.into_py(py), PyDecoderWrapper::Wrapped(inner) => match &*inner.as_ref().read().unwrap() { DecoderWrapper::Metaspace(_) => Py::new(py, (PyMetaspaceDec {}, base))?.into_py(py), DecoderWrapper::WordPiece(_) => Py::new(py, (PyWordPieceDec {}, base))?.into_py(py), DecoderWrapper::ByteFallback(_) => { Py::new(py, (PyByteFallbackDec {}, base))?.into_py(py) } DecoderWrapper::Strip(_) => Py::new(py, (PyStrip {}, base))?.into_py(py), DecoderWrapper::Fuse(_) => Py::new(py, (PyFuseDec {}, base))?.into_py(py), DecoderWrapper::ByteLevel(_) => Py::new(py, (PyByteLevelDec {}, base))?.into_py(py), DecoderWrapper::Replace(_) => Py::new(py, (PyReplaceDec {}, base))?.into_py(py), DecoderWrapper::BPE(_) => Py::new(py, (PyBPEDecoder {}, base))?.into_py(py), DecoderWrapper::CTC(_) => Py::new(py, (PyCTCDecoder {}, base))?.into_py(py), DecoderWrapper::Sequence(_) => { Py::new(py, (PySequenceDecoder {}, base))?.into_py(py) } }, }) } } impl Decoder for PyDecoder { fn decode_chain(&self, tokens: Vec<String>) -> tk::Result<Vec<String>> { self.decoder.decode_chain(tokens) } } #[pymethods] impl PyDecoder { #[staticmethod] fn custom(decoder: PyObject) -> Self { let decoder = PyDecoderWrapper::Custom(Arc::new(RwLock::new(CustomDecoder::new(decoder)))); PyDecoder::new(decoder) } fn __getstate__(&self, py: Python) -> PyResult<PyObject> { let data = serde_json::to_string(&self.decoder).map_err(|e| { exceptions::PyException::new_err(format!( "Error while attempting to pickle Decoder: {}", e )) })?; Ok(PyBytes::new(py, data.as_bytes()).to_object(py)) } fn __setstate__(&mut self, py: Python, state: PyObject) -> PyResult<()> { match state.extract::<&PyBytes>(py) { Ok(s) => { self.decoder = serde_json::from_slice(s.as_bytes()).map_err(|e| { exceptions::PyException::new_err(format!( "Error while attempting to unpickle Decoder: {}", e )) })?; Ok(()) } Err(e) => Err(e), } } /// Decode the given list of tokens to a final string /// /// Args: /// tokens (:obj:`List[str]`): /// The list of tokens to decode /// /// Returns: /// :obj:`str`: The decoded string #[pyo3(text_signature = "(self, tokens)")] fn decode(&self, tokens: Vec<String>) -> PyResult<String> { ToPyResult(self.decoder.decode(tokens)).into() } } macro_rules! getter { ($self: ident, $variant: ident, $($name: tt)+) => {{ let super_ = $self.as_ref(); if let PyDecoderWrapper::Wrapped(ref wrap) = super_.decoder { if let DecoderWrapper::$variant(ref dec) = *wrap.read().unwrap() { dec.$($name)+ } else { unreachable!() } } else { unreachable!() } }}; } macro_rules! setter { ($self: ident, $variant: ident, $name: ident, $value: expr) => {{ let super_ = $self.as_ref(); if let PyDecoderWrapper::Wrapped(ref wrap) = super_.decoder { if let DecoderWrapper::$variant(ref mut dec) = *wrap.write().unwrap() { dec.$name = $value; } } }}; ($self: ident, $variant: ident, @$name: ident, $value: expr) => {{ let super_ = $self.as_ref(); if let PyDecoderWrapper::Wrapped(ref wrap) = super_.decoder { if let DecoderWrapper::$variant(ref mut dec) = *wrap.write().unwrap() { dec.$name($value); } } }}; } /// ByteLevel Decoder /// /// This decoder is to be used in tandem with the :class:`~tokenizers.pre_tokenizers.ByteLevel` /// :class:`~tokenizers.pre_tokenizers.PreTokenizer`. #[pyclass(extends=PyDecoder, module = "tokenizers.decoders", name = "ByteLevel")] pub struct PyByteLevelDec {} #[pymethods] impl PyByteLevelDec { #[new] #[pyo3(signature = (**_kwargs), text_signature = "(self)")] fn new(_kwargs: Option<&PyDict>) -> (Self, PyDecoder) { (PyByteLevelDec {}, ByteLevel::default().into()) } } /// Replace Decoder /// /// This decoder is to be used in tandem with the :class:`~tokenizers.pre_tokenizers.Replace` /// :class:`~tokenizers.pre_tokenizers.PreTokenizer`. #[pyclass(extends=PyDecoder, module = "tokenizers.decoders", name = "Replace")] pub struct PyReplaceDec {} #[pymethods] impl PyReplaceDec { #[new] #[pyo3(text_signature = "(self, pattern, content)")] fn new(pattern: PyPattern, content: String) -> PyResult<(Self, PyDecoder)> { Ok(( PyReplaceDec {}, ToPyResult(Replace::new(pattern, content)).into_py()?.into(), )) } } /// WordPiece Decoder /// /// Args: /// prefix (:obj:`str`, `optional`, defaults to :obj:`##`): /// The prefix to use for subwords that are not a beginning-of-word /// /// cleanup (:obj:`bool`, `optional`, defaults to :obj:`True`): /// Whether to cleanup some tokenization artifacts. Mainly spaces before punctuation, /// and some abbreviated english forms. #[pyclass(extends=PyDecoder, module = "tokenizers.decoders", name = "WordPiece")] pub struct PyWordPieceDec {} #[pymethods] impl PyWordPieceDec { #[getter] fn get_prefix(self_: PyRef<Self>) -> String { getter!(self_, WordPiece, prefix.clone()) } #[setter] fn set_prefix(self_: PyRef<Self>, prefix: String) { setter!(self_, WordPiece, prefix, prefix); } #[getter] fn get_cleanup(self_: PyRef<Self>) -> bool { getter!(self_, WordPiece, cleanup) } #[setter] fn set_cleanup(self_: PyRef<Self>, cleanup: bool) { setter!(self_, WordPiece, cleanup, cleanup); } #[new] #[pyo3(signature = (prefix = String::from("##"), cleanup = true), text_signature = "(self, prefix=\"##\", cleanup=True)")] fn new(prefix: String, cleanup: bool) -> (Self, PyDecoder) { (PyWordPieceDec {}, WordPiece::new(prefix, cleanup).into()) } } /// ByteFallback Decoder /// ByteFallback is a simple trick which converts tokens looking like `<0x61>` /// to pure bytes, and attempts to make them into a string. If the tokens /// cannot be decoded you will get � instead for each inconvertable byte token /// #[pyclass(extends=PyDecoder, module = "tokenizers.decoders", name = "ByteFallback")] pub struct PyByteFallbackDec {} #[pymethods] impl PyByteFallbackDec { #[new] #[pyo3(signature = (), text_signature = "(self)")] fn new() -> (Self, PyDecoder) { (PyByteFallbackDec {}, ByteFallback::new().into()) } } /// Fuse Decoder /// Fuse simply fuses every token into a single string. /// This is the last step of decoding, this decoder exists only if /// there is need to add other decoders *after* the fusion #[pyclass(extends=PyDecoder, module = "tokenizers.decoders", name = "Fuse")] pub struct PyFuseDec {} #[pymethods] impl PyFuseDec { #[new] #[pyo3(signature = (), text_signature = "(self)")] fn new() -> (Self, PyDecoder) { (PyFuseDec {}, Fuse::new().into()) } } /// Strip normalizer /// Strips n left characters of each token, or n right characters of each token #[pyclass(extends=PyDecoder, module = "tokenizers.decoders", name = "Strip")] pub struct PyStrip {} #[pymethods] impl PyStrip { #[getter] fn get_start(self_: PyRef<Self>) -> usize { getter!(self_, Strip, start) } #[setter] fn set_start(self_: PyRef<Self>, start: usize) { setter!(self_, Strip, start, start) } #[getter] fn get_stop(self_: PyRef<Self>) -> usize { getter!(self_, Strip, stop) } #[setter] fn set_stop(self_: PyRef<Self>, stop: usize) { setter!(self_, Strip, stop, stop) } #[getter] fn get_content(self_: PyRef<Self>) -> char { getter!(self_, Strip, content) } #[setter] fn set_content(self_: PyRef<Self>, content: char) { setter!(self_, Strip, content, content) } #[new] #[pyo3(signature = (content=' ', left=0, right=0), text_signature = "(self, content, left=0, right=0)")] fn new(content: char, left: usize, right: usize) -> (Self, PyDecoder) { (PyStrip {}, Strip::new(content, left, right).into()) } } /// Metaspace Decoder /// /// Args: /// replacement (:obj:`str`, `optional`, defaults to :obj:`▁`): /// The replacement character. Must be exactly one character. By default we /// use the `▁` (U+2581) meta symbol (Same as in SentencePiece). /// /// add_prefix_space (:obj:`bool`, `optional`, defaults to :obj:`True`): /// Whether to add a space to the first word if there isn't already one. This /// lets us treat `hello` exactly like `say hello`. #[pyclass(extends=PyDecoder, module = "tokenizers.decoders", name = "Metaspace")] pub struct PyMetaspaceDec {} #[pymethods] impl PyMetaspaceDec { #[getter] fn get_replacement(self_: PyRef<Self>) -> String { getter!(self_, Metaspace, get_replacement().to_string()) } #[setter] fn set_replacement(self_: PyRef<Self>, replacement: PyChar) { setter!(self_, Metaspace, @set_replacement, replacement.0); } #[getter] fn get_add_prefix_space(self_: PyRef<Self>) -> bool { getter!(self_, Metaspace, add_prefix_space) } #[setter] fn set_add_prefix_space(self_: PyRef<Self>, add_prefix_space: bool) { setter!(self_, Metaspace, add_prefix_space, add_prefix_space); } #[new] #[pyo3(signature = (replacement = PyChar('▁'), add_prefix_space = true), text_signature = "(self, replacement = \"▁\", add_prefix_space = True)")] fn new(replacement: PyChar, add_prefix_space: bool) -> (Self, PyDecoder) { ( PyMetaspaceDec {}, Metaspace::new(replacement.0, add_prefix_space).into(), ) } } /// BPEDecoder Decoder /// /// Args: /// suffix (:obj:`str`, `optional`, defaults to :obj:`</w>`): /// The suffix that was used to caracterize an end-of-word. This suffix will /// be replaced by whitespaces during the decoding #[pyclass(extends=PyDecoder, module = "tokenizers.decoders", name = "BPEDecoder")] pub struct PyBPEDecoder {} #[pymethods] impl PyBPEDecoder { #[getter] fn get_suffix(self_: PyRef<Self>) -> String { getter!(self_, BPE, suffix.clone()) } #[setter] fn set_suffix(self_: PyRef<Self>, suffix: String) { setter!(self_, BPE, suffix, suffix); } #[new] #[pyo3(signature = (suffix = String::from("</w>")), text_signature = "(self, suffix=\"</w>\")")] fn new(suffix: String) -> (Self, PyDecoder) { (PyBPEDecoder {}, BPEDecoder::new(suffix).into()) } } /// CTC Decoder /// /// Args: /// pad_token (:obj:`str`, `optional`, defaults to :obj:`<pad>`): /// The pad token used by CTC to delimit a new token. /// word_delimiter_token (:obj:`str`, `optional`, defaults to :obj:`|`): /// The word delimiter token. It will be replaced by a <space> /// cleanup (:obj:`bool`, `optional`, defaults to :obj:`True`): /// Whether to cleanup some tokenization artifacts. /// Mainly spaces before punctuation, and some abbreviated english forms. #[pyclass(extends=PyDecoder, module = "tokenizers.decoders", name = "CTC")] pub struct PyCTCDecoder {} #[pymethods] impl PyCTCDecoder { #[getter] fn get_pad_token(self_: PyRef<Self>) -> String { getter!(self_, CTC, pad_token.clone()) } #[setter] fn set_pad_token(self_: PyRef<Self>, pad_token: String) { setter!(self_, CTC, pad_token, pad_token); } #[getter] fn get_word_delimiter_token(self_: PyRef<Self>) -> String { getter!(self_, CTC, word_delimiter_token.clone()) } #[setter] fn set_word_delimiter_token(self_: PyRef<Self>, word_delimiter_token: String) { setter!(self_, CTC, word_delimiter_token, word_delimiter_token); } #[getter] fn get_cleanup(self_: PyRef<Self>) -> bool { getter!(self_, CTC, cleanup) } #[setter] fn set_cleanup(self_: PyRef<Self>, cleanup: bool) { setter!(self_, CTC, cleanup, cleanup); } #[new] #[pyo3(signature = ( pad_token = String::from("<pad>"), word_delimiter_token = String::from("|"), cleanup = true ), text_signature = "(self, pad_token=\"<pad>\", word_delimiter_token=\"|\", cleanup=True)")] fn new(pad_token: String, word_delimiter_token: String, cleanup: bool) -> (Self, PyDecoder) { ( PyCTCDecoder {}, CTC::new(pad_token, word_delimiter_token, cleanup).into(), ) } } /// Sequence Decoder /// /// Args: /// decoders (:obj:`List[Decoder]`) /// The decoders that need to be chained #[pyclass(extends=PyDecoder, module = "tokenizers.decoders", name="Sequence")] pub struct PySequenceDecoder {} #[pymethods] impl PySequenceDecoder { #[new] #[pyo3(signature = (decoders_py), text_signature = "(self, decoders)")] fn new(decoders_py: &PyList) -> PyResult<(Self, PyDecoder)> { let mut decoders: Vec<DecoderWrapper> = Vec::with_capacity(decoders_py.len()); for decoder_py in decoders_py.iter() { let decoder: PyRef<PyDecoder> = decoder_py.extract()?; let decoder = match &decoder.decoder { PyDecoderWrapper::Wrapped(inner) => inner, PyDecoderWrapper::Custom(_) => unimplemented!(), }; decoders.push(decoder.read().unwrap().clone()); } Ok((PySequenceDecoder {}, Sequence::new(decoders).into())) } fn __getnewargs__<'p>(&self, py: Python<'p>) -> &'p PyTuple { PyTuple::new(py, [PyList::empty(py)]) } } #[derive(Clone)] pub(crate) struct CustomDecoder { inner: PyObject, } impl CustomDecoder { pub(crate) fn new(inner: PyObject) -> Self { CustomDecoder { inner } } } impl Decoder for CustomDecoder { fn decode(&self, tokens: Vec<String>) -> tk::Result<String> { Python::with_gil(|py| { let decoded = self .inner .call_method(py, "decode", (tokens,), None)? .extract(py)?; Ok(decoded) }) } fn decode_chain(&self, tokens: Vec<String>) -> tk::Result<Vec<String>> { Python::with_gil(|py| { let decoded = self .inner .call_method(py, "decode_chain", (tokens,), None)? .extract(py)?; Ok(decoded) }) } } impl Serialize for CustomDecoder { fn serialize<S>(&self, _serializer: S) -> std::result::Result<S::Ok, S::Error> where S: Serializer, { Err(serde::ser::Error::custom( "Custom PyDecoder cannot be serialized", )) } } impl<'de> Deserialize<'de> for CustomDecoder { fn deserialize<D>(_deserializer: D) -> std::result::Result<Self, D::Error> where D: Deserializer<'de>, { Err(D::Error::custom("PyDecoder cannot be deserialized")) } } #[derive(Clone, Deserialize, Serialize)] #[serde(untagged)] pub(crate) enum PyDecoderWrapper { Custom(Arc<RwLock<CustomDecoder>>), Wrapped(Arc<RwLock<DecoderWrapper>>), } impl<I> From<I> for PyDecoderWrapper where I: Into<DecoderWrapper>, { fn from(norm: I) -> Self { PyDecoderWrapper::Wrapped(Arc::new(RwLock::new(norm.into()))) } } impl<I> From<I> for PyDecoder where I: Into<DecoderWrapper>, { fn from(dec: I) -> Self { PyDecoder { decoder: dec.into().into(), } } } impl Decoder for PyDecoderWrapper { fn decode_chain(&self, tokens: Vec<String>) -> tk::Result<Vec<String>> { match self { PyDecoderWrapper::Wrapped(inner) => inner.read().unwrap().decode_chain(tokens), PyDecoderWrapper::Custom(inner) => inner.read().unwrap().decode_chain(tokens), } } } /// Decoders Module #[pymodule] pub fn decoders(_py: Python, m: &PyModule) -> PyResult<()> { m.add_class::<PyDecoder>()?; m.add_class::<PyByteLevelDec>()?; m.add_class::<PyReplaceDec>()?; m.add_class::<PyWordPieceDec>()?; m.add_class::<PyByteFallbackDec>()?; m.add_class::<PyFuseDec>()?; m.add_class::<PyStrip>()?; m.add_class::<PyMetaspaceDec>()?; m.add_class::<PyBPEDecoder>()?; m.add_class::<PyCTCDecoder>()?; m.add_class::<PySequenceDecoder>()?; Ok(()) } #[cfg(test)] mod test { use std::sync::{Arc, RwLock}; use pyo3::prelude::*; use tk::decoders::metaspace::Metaspace; use tk::decoders::DecoderWrapper; use crate::decoders::{CustomDecoder, PyDecoder, PyDecoderWrapper}; #[test] fn get_subtype() { Python::with_gil(|py| { let py_dec = PyDecoder::new(Metaspace::default().into()); let py_meta = py_dec.get_as_subtype(py).unwrap(); assert_eq!("Metaspace", py_meta.as_ref(py).get_type().name().unwrap()); }) } #[test] fn serialize() { let py_wrapped: PyDecoderWrapper = Metaspace::default().into(); let py_ser = serde_json::to_string(&py_wrapped).unwrap(); let rs_wrapped = DecoderWrapper::Metaspace(Metaspace::default()); let rs_ser = serde_json::to_string(&rs_wrapped).unwrap(); assert_eq!(py_ser, rs_ser); let py_dec: PyDecoder = serde_json::from_str(&rs_ser).unwrap(); match py_dec.decoder { PyDecoderWrapper::Wrapped(msp) => match *msp.as_ref().read().unwrap() { DecoderWrapper::Metaspace(_) => {} _ => panic!("Expected Metaspace"), }, _ => panic!("Expected wrapped, not custom."), } let obj = Python::with_gil(|py| { let py_msp = PyDecoder::new(Metaspace::default().into()); let obj: PyObject = Py::new(py, py_msp).unwrap().into_py(py); obj }); let py_seq = PyDecoderWrapper::Custom(Arc::new(RwLock::new(CustomDecoder::new(obj)))); assert!(serde_json::to_string(&py_seq).is_err()); } }
0
hf_public_repos/tokenizers/bindings/python/src
hf_public_repos/tokenizers/bindings/python/src/utils/iterators.rs
use pyo3::prelude::*; use pyo3::AsPyPointer; use std::collections::VecDeque; /// An simple iterator that can be instantiated with a specified length. /// We use this with iterators that don't have a size_hint but we might /// know its size. This is useful with progress bars for example. pub struct MaybeSizedIterator<I> { length: Option<usize>, iter: I, } impl<I> MaybeSizedIterator<I> where I: Iterator, { pub fn new(iter: I, length: Option<usize>) -> Self { Self { length, iter } } } impl<I> Iterator for MaybeSizedIterator<I> where I: Iterator, { type Item = I::Item; fn next(&mut self) -> Option<Self::Item> { self.iter.next() } fn size_hint(&self) -> (usize, Option<usize>) { (self.length.unwrap_or(0), None) } } /// A buffered iterator that takes care of locking the GIL only when needed. /// The `PyIterator` provided by PyO3 keeps a Python GIL token all along /// and thus doesn't allow us to release the GIL to allow having other threads. /// /// This iterator serves two purposes: /// - First, as opposed to the `pyo3::PyIterator`, it is Send and can easily be parallelized /// - Second, this let us release the GIL between two refills of the buffer, allowing other /// Python threads to work pub struct PyBufferedIterator<T, F> { iter: Option<Py<PyAny>>, converter: F, buffer: VecDeque<PyResult<T>>, size: usize, } impl<T, F, I> PyBufferedIterator<T, F> where F: Fn(&PyAny) -> I, I: IntoIterator<Item = PyResult<T>>, { /// Create a new PyBufferedIterator using the provided Python object. /// This object must implement the Python Iterator Protocol, and an error will /// be return if the contract is not respected. /// /// The `converter` provides a way to convert each item in the iterator into /// something that doesn't embed a 'py token and thus allows the GIL to be released /// /// The `buffer_size` represents the number of items that we buffer before we /// need to acquire the GIL again. pub fn new(iter: &PyAny, converter: F, buffer_size: usize) -> PyResult<Self> { let py = iter.py(); let iter: Py<PyAny> = unsafe { py.from_borrowed_ptr_or_err::<PyAny>(pyo3::ffi::PyObject_GetIter(iter.as_ptr()))? .to_object(py) }; Ok(Self { iter: Some(iter), converter, buffer: VecDeque::with_capacity(buffer_size), size: buffer_size, }) } /// Refill the buffer, and set `self.iter` as `None` if nothing more to get fn refill(&mut self) -> PyResult<()> { if self.iter.is_none() { return Ok(()); } Python::with_gil(|py| loop { if self.buffer.len() >= self.size { return Ok(()); } match unsafe { py.from_owned_ptr_or_opt::<PyAny>(pyo3::ffi::PyIter_Next( self.iter.as_ref().unwrap().as_ref(py).as_ptr(), )) } { Some(obj) => self.buffer.extend((self.converter)(obj)), None => { if PyErr::occurred(py) { return Err(PyErr::fetch(py)); } else { self.iter = None; } } }; if self.iter.is_none() { return Ok(()); } }) } } impl<T, F, I> Iterator for PyBufferedIterator<T, F> where F: Fn(&PyAny) -> I, I: IntoIterator<Item = PyResult<T>>, { type Item = PyResult<T>; fn next(&mut self) -> Option<Self::Item> { if !self.buffer.is_empty() { self.buffer.pop_front() } else if self.iter.is_some() { if let Err(e) = self.refill() { return Some(Err(e)); } self.next() } else { None } } }
0
hf_public_repos/tokenizers/bindings/python/src
hf_public_repos/tokenizers/bindings/python/src/utils/regex.rs
use onig::Regex; use pyo3::exceptions; use pyo3::prelude::*; /// Instantiate a new Regex with the given pattern #[pyclass(module = "tokenizers", name = "Regex")] pub struct PyRegex { pub inner: Regex, pub pattern: String, } #[pymethods] impl PyRegex { #[new] #[pyo3(text_signature = "(self, pattern)")] fn new(s: &str) -> PyResult<Self> { Ok(Self { inner: Regex::new(s) .map_err(|e| exceptions::PyException::new_err(e.description().to_owned()))?, pattern: s.to_owned(), }) } }
0
hf_public_repos/tokenizers/bindings/python/src
hf_public_repos/tokenizers/bindings/python/src/utils/normalization.rs
use super::regex::PyRegex; use super::{DestroyPtr, RefMutContainer, RefMutGuard}; use crate::error::ToPyResult; use pyo3::exceptions; use pyo3::prelude::*; use pyo3::types::*; use tk::normalizer::{char_to_bytes, NormalizedString, Range, SplitDelimiterBehavior}; use tk::pattern::Pattern; /// Represents a Pattern as used by `NormalizedString` #[derive(Clone, FromPyObject)] pub enum PyPattern<'p> { #[pyo3(annotation = "str")] Str(&'p str), #[pyo3(annotation = "tokenizers.Regex")] Regex(Py<PyRegex>), // TODO: Add the compatibility for Fn(char) -> bool } impl Pattern for PyPattern<'_> { fn find_matches(&self, inside: &str) -> tk::Result<Vec<(tk::Offsets, bool)>> { match self { PyPattern::Str(s) => { let mut chars = s.chars(); if let (Some(c), None) = (chars.next(), chars.next()) { c.find_matches(inside) } else { s.find_matches(inside) } } PyPattern::Regex(r) => { Python::with_gil(|py| (&r.borrow(py).inner).find_matches(inside)) } } } } impl From<PyPattern<'_>> for tk::normalizers::replace::ReplacePattern { fn from(pattern: PyPattern<'_>) -> Self { match pattern { PyPattern::Str(s) => Self::String(s.to_owned()), PyPattern::Regex(r) => Python::with_gil(|py| Self::Regex(r.borrow(py).pattern.clone())), } } } impl From<PyPattern<'_>> for tk::pre_tokenizers::split::SplitPattern { fn from(pattern: PyPattern<'_>) -> Self { match pattern { PyPattern::Str(s) => Self::String(s.to_owned()), PyPattern::Regex(r) => Python::with_gil(|py| Self::Regex(r.borrow(py).pattern.clone())), } } } #[derive(Debug, Clone, FromPyObject)] pub enum PyRange<'s> { #[pyo3(annotation = "int")] Single(isize), #[pyo3(annotation = "Tuple[uint, uint]")] Range(usize, usize), #[pyo3(annotation = "slice")] Slice(&'s PySlice), } impl PyRange<'_> { pub fn to_range(&self, max_len: usize) -> PyResult<std::ops::Range<usize>> { match self { PyRange::Single(i) => { if i.is_negative() { let i = -i as usize; if i > max_len { Err(exceptions::PyValueError::new_err(format!( "{} is bigger than max len", i ))) } else { Ok(max_len - i..max_len - i + 1) } } else { let i = *i as usize; Ok(i..i + 1) } } PyRange::Range(s, e) => Ok(*s..*e), PyRange::Slice(s) => { let r = s.indices(max_len as std::os::raw::c_long)?; Ok(r.start as usize..r.stop as usize) } } } } #[derive(Clone)] pub struct PySplitDelimiterBehavior(pub SplitDelimiterBehavior); impl FromPyObject<'_> for PySplitDelimiterBehavior { fn extract(obj: &PyAny) -> PyResult<Self> { let s = obj.extract::<&str>()?; Ok(Self(match s { "removed" => Ok(SplitDelimiterBehavior::Removed), "isolated" => Ok(SplitDelimiterBehavior::Isolated), "merged_with_previous" => Ok(SplitDelimiterBehavior::MergedWithPrevious), "merged_with_next" => Ok(SplitDelimiterBehavior::MergedWithNext), "contiguous" => Ok(SplitDelimiterBehavior::Contiguous), _ => Err(exceptions::PyValueError::new_err( "Wrong value for SplitDelimiterBehavior, expected one of: \ `removed, isolated, merged_with_previous, merged_with_next, contiguous`", )), }?)) } } impl From<PySplitDelimiterBehavior> for SplitDelimiterBehavior { fn from(v: PySplitDelimiterBehavior) -> Self { v.0 } } fn filter(normalized: &mut NormalizedString, func: &PyAny) -> PyResult<()> { let err = "`filter` expect a callable with the signature: `fn(char) -> bool`"; if !func.is_callable() { Err(exceptions::PyTypeError::new_err(err)) } else { normalized.filter(|c| { func.call1((c.to_string(),)) .expect(err) .extract() .expect(err) }); Ok(()) } } fn for_each(normalized: &NormalizedString, func: &PyAny) -> PyResult<()> { let err = "`for_each` expect a callable with the signature: `fn(char)`"; if !func.is_callable() { Err(exceptions::PyTypeError::new_err(err)) } else { normalized.for_each(|c| { func.call1((c.to_string(),)).expect(err); }); Ok(()) } } fn map(normalized: &mut NormalizedString, func: &PyAny) -> PyResult<()> { let err = "`map` expect a callable with the signature: `fn(char) -> char`"; if !func.is_callable() { Err(exceptions::PyTypeError::new_err(err)) } else { normalized.map(|c| { let c: &str = func .call1((c.to_string(),)) .expect(err) .extract() .expect(err); c.chars().next().expect(err) }); Ok(()) } } fn slice( normalized: &NormalizedString, range: &PyRange<'_>, ) -> PyResult<Option<PyNormalizedString>> { let n_char = normalized.len(); let char_range = range.to_range(n_char)?; Ok( char_to_bytes(normalized.get(), char_range).and_then(|bytes_range| { normalized .slice(Range::Normalized(bytes_range)) .map(|n| n.into()) }), ) } /// NormalizedString /// /// A NormalizedString takes care of modifying an "original" string, to obtain a "normalized" one. /// While making all the requested modifications, it keeps track of the alignment information /// between the two versions of the string. /// /// Args: /// sequence: str: /// The string sequence used to initialize this NormalizedString #[pyclass(module = "tokenizers", name = "NormalizedString")] #[derive(Clone)] pub struct PyNormalizedString { pub(crate) normalized: NormalizedString, } #[pymethods] impl PyNormalizedString { #[new] #[pyo3(text_signature = None)] fn new(s: &str) -> Self { NormalizedString::from(s).into() } /// The normalized part of the string #[getter] fn get_normalized(&self) -> &str { self.normalized.get() } #[getter] fn get_original(&self) -> &str { self.normalized.get_original() } /// Runs the NFD normalization #[pyo3(text_signature = "(self)")] fn nfd(&mut self) { self.normalized.nfd(); } /// Runs the NFKD normalization #[pyo3(text_signature = "(self)")] fn nfkd(&mut self) { self.normalized.nfkd(); } /// Runs the NFC normalization #[pyo3(text_signature = "(self)")] fn nfc(&mut self) { self.normalized.nfc(); } /// Runs the NFKC normalization #[pyo3(text_signature = "(self)")] fn nfkc(&mut self) { self.normalized.nfkc(); } /// Lowercase the string #[pyo3(text_signature = "(self)")] fn lowercase(&mut self) { self.normalized.lowercase(); } /// Uppercase the string #[pyo3(text_signature = "(self)")] fn uppercase(&mut self) { self.normalized.uppercase(); } /// Prepend the given sequence to the string #[pyo3(text_signature = "(self, s)")] fn prepend(&mut self, s: &str) { self.normalized.prepend(s); } /// Append the given sequence to the string #[pyo3(text_signature = "(self, s)")] fn append(&mut self, s: &str) { self.normalized.append(s); } /// Strip the left of the string #[pyo3(text_signature = "(self)")] fn lstrip(&mut self) { self.normalized.lstrip(); } /// Strip the right of the string #[pyo3(text_signature = "(self)")] fn rstrip(&mut self) { self.normalized.rstrip(); } /// Strip both ends of the string #[pyo3(text_signature = "(self)")] fn strip(&mut self) { self.normalized.strip(); } /// Clears the string #[pyo3(text_signature = "(self)")] fn clear(&mut self) { self.normalized.clear(); } /// Slice the string using the given range #[pyo3(text_signature = "(self, range)")] fn slice(&self, range: PyRange) -> PyResult<Option<PyNormalizedString>> { slice(&self.normalized, &range) } /// Filter each character of the string using the given func #[pyo3(text_signature = "(self, func)")] fn filter(&mut self, func: &PyAny) -> PyResult<()> { filter(&mut self.normalized, func) } /// Calls the given function for each character of the string #[pyo3(text_signature = "(self, func)")] fn for_each(&self, func: &PyAny) -> PyResult<()> { for_each(&self.normalized, func) } /// Calls the given function for each character of the string /// /// Replaces each character of the string using the returned value. Each /// returned value **must** be a str of length 1 (ie a character). #[pyo3(text_signature = "(self, func)")] fn map(&mut self, func: &PyAny) -> PyResult<()> { map(&mut self.normalized, func) } /// Split the NormalizedString using the given pattern and the specified behavior /// /// Args: /// pattern: Pattern: /// A pattern used to split the string. Usually a string or a regex built with `tokenizers.Regex` /// /// behavior: SplitDelimiterBehavior: /// The behavior to use when splitting. /// Choices: "removed", "isolated", "merged_with_previous", "merged_with_next", /// "contiguous" /// /// Returns: /// A list of NormalizedString, representing each split #[pyo3(text_signature = "(self, pattern, behavior)")] fn split( &mut self, pattern: PyPattern, behavior: PySplitDelimiterBehavior, ) -> PyResult<Vec<PyNormalizedString>> { Ok(ToPyResult(self.normalized.split(pattern, behavior.into())) .into_py()? .into_iter() .map(|n| n.into()) .collect()) } /// Replace the content of the given pattern with the provided content /// /// Args: /// pattern: Pattern: /// A pattern used to match the string. Usually a string or a Regex /// /// content: str: /// The content to be used as replacement #[pyo3(text_signature = "(self, pattern, content)")] fn replace(&mut self, pattern: PyPattern, content: &str) -> PyResult<()> { ToPyResult(self.normalized.replace(pattern, content)).into() } fn __repr__(&self) -> String { format!( r#"NormalizedString(original="{}", normalized="{}")"#, self.normalized.get_original(), self.normalized.get() ) } fn __str__(&self) -> &str { self.normalized.get() } fn __getitem__(&self, range: PyRange<'_>) -> PyResult<Option<PyNormalizedString>> { slice(&self.normalized, &range) } } impl From<NormalizedString> for PyNormalizedString { fn from(normalized: NormalizedString) -> Self { Self { normalized } } } impl From<PyNormalizedString> for NormalizedString { fn from(normalized: PyNormalizedString) -> Self { normalized.normalized } } #[pyclass(module = "tokenizers", name = "NormalizedStringRefMut")] #[derive(Clone)] pub struct PyNormalizedStringRefMut { inner: RefMutContainer<NormalizedString>, } impl DestroyPtr for PyNormalizedStringRefMut { fn destroy(&mut self) { self.inner.destroy(); } } impl PyNormalizedStringRefMut { pub fn new(normalized: &mut NormalizedString) -> RefMutGuard<Self> { RefMutGuard::new(Self { inner: RefMutContainer::new(normalized), }) } pub fn destroyed_error() -> PyErr { exceptions::PyException::new_err("Cannot use a NormalizedStringRefMut outside `normalize`") } /// Provides a way to access a reference to the underlying NormalizedString pub fn map_as_ref<F: FnOnce(&NormalizedString) -> U, U>(&self, f: F) -> PyResult<U> { self.inner .map(f) .ok_or_else(PyNormalizedStringRefMut::destroyed_error) } /// Provides a way to access a mutable reference to the underlying NormalizedString pub fn map_as_mut<F: FnOnce(&mut NormalizedString) -> U, U>(&mut self, f: F) -> PyResult<U> { self.inner .map_mut(f) .ok_or_else(PyNormalizedStringRefMut::destroyed_error) } } #[pymethods] impl PyNormalizedStringRefMut { #[getter] fn get_normalized(&self) -> PyResult<String> { self.inner .map(|n| n.get().to_owned()) .ok_or_else(PyNormalizedStringRefMut::destroyed_error) } #[getter] fn get_original(&self) -> PyResult<String> { self.inner .map(|n| n.get_original().to_owned()) .ok_or_else(PyNormalizedStringRefMut::destroyed_error) } fn nfd(&mut self) -> PyResult<()> { self.inner .map_mut(|n| { n.nfd(); }) .ok_or_else(PyNormalizedStringRefMut::destroyed_error)?; Ok(()) } fn nfkd(&mut self) -> PyResult<()> { self.inner .map_mut(|n| { n.nfkd(); }) .ok_or_else(PyNormalizedStringRefMut::destroyed_error)?; Ok(()) } fn nfc(&mut self) -> PyResult<()> { self.inner .map_mut(|n| { n.nfc(); }) .ok_or_else(PyNormalizedStringRefMut::destroyed_error)?; Ok(()) } fn nfkc(&mut self) -> PyResult<()> { self.inner .map_mut(|n| { n.nfkc(); }) .ok_or_else(PyNormalizedStringRefMut::destroyed_error)?; Ok(()) } fn lowercase(&mut self) -> PyResult<()> { self.inner .map_mut(|n| { n.lowercase(); }) .ok_or_else(PyNormalizedStringRefMut::destroyed_error)?; Ok(()) } fn uppercase(&mut self) -> PyResult<()> { self.inner .map_mut(|n| { n.uppercase(); }) .ok_or_else(PyNormalizedStringRefMut::destroyed_error)?; Ok(()) } fn prepend(&mut self, s: &str) -> PyResult<()> { self.inner .map_mut(|n| { n.prepend(s); }) .ok_or_else(PyNormalizedStringRefMut::destroyed_error)?; Ok(()) } fn append(&mut self, s: &str) -> PyResult<()> { self.inner .map_mut(|n| { n.append(s); }) .ok_or_else(PyNormalizedStringRefMut::destroyed_error)?; Ok(()) } fn lstrip(&mut self) -> PyResult<()> { self.inner .map_mut(|n| { n.lstrip(); }) .ok_or_else(PyNormalizedStringRefMut::destroyed_error)?; Ok(()) } fn rstrip(&mut self) -> PyResult<()> { self.inner .map_mut(|n| { n.rstrip(); }) .ok_or_else(PyNormalizedStringRefMut::destroyed_error)?; Ok(()) } fn strip(&mut self) -> PyResult<()> { self.inner .map_mut(|n| { n.strip(); }) .ok_or_else(PyNormalizedStringRefMut::destroyed_error)?; Ok(()) } fn clear(&mut self) -> PyResult<()> { self.inner .map_mut(|n| { n.clear(); }) .ok_or_else(PyNormalizedStringRefMut::destroyed_error)?; Ok(()) } fn slice(&self, range: PyRange) -> PyResult<Option<PyNormalizedString>> { self.inner .map(|n| slice(n, &range)) .ok_or_else(PyNormalizedStringRefMut::destroyed_error)? } fn filter(&mut self, func: &PyAny) -> PyResult<()> { self.inner .map_mut(|n| filter(n, func)) .ok_or_else(PyNormalizedStringRefMut::destroyed_error)??; Ok(()) } fn for_each(&self, func: &PyAny) -> PyResult<()> { self.inner .map(|n| for_each(n, func)) .ok_or_else(PyNormalizedStringRefMut::destroyed_error)??; Ok(()) } fn map(&mut self, func: &PyAny) -> PyResult<()> { self.inner .map_mut(|n| map(n, func)) .ok_or_else(PyNormalizedStringRefMut::destroyed_error)??; Ok(()) } fn split( &mut self, pattern: PyPattern, behavior: PySplitDelimiterBehavior, ) -> PyResult<Vec<PyNormalizedString>> { Ok(ToPyResult( self.inner .map_mut(|n| n.split(pattern, behavior.into())) .ok_or_else(PyNormalizedStringRefMut::destroyed_error)?, ) .into_py()? .into_iter() .map(|n| n.into()) .collect()) } fn replace(&mut self, pattern: PyPattern, content: &str) -> PyResult<()> { ToPyResult( self.inner .map_mut(|n| n.replace(pattern, content)) .ok_or_else(PyNormalizedStringRefMut::destroyed_error)?, ) .into() } }
0
hf_public_repos/tokenizers/bindings/python/src
hf_public_repos/tokenizers/bindings/python/src/utils/pretokenization.rs
use tokenizers as tk; use pyo3::exceptions; use pyo3::prelude::*; use pyo3::types::*; use super::{ DestroyPtr, PyNormalizedString, PyNormalizedStringRefMut, RefMutContainer, RefMutGuard, }; use crate::encoding::PyEncoding; use crate::error::ToPyResult; use crate::token::PyToken; use tk::{OffsetReferential, OffsetType, Offsets, PreTokenizedString, Token}; fn split(pretok: &mut PreTokenizedString, func: &PyAny) -> PyResult<()> { if !func.is_callable() { Err(exceptions::PyTypeError::new_err( "`split` expect a callable with the signature: \ `fn(index: int, normalized: NormalizedString) -> List[NormalizedString]`", )) } else { ToPyResult(pretok.split(|i, normalized| { let output = func.call((i, PyNormalizedString::from(normalized)), None)?; Ok(output .extract::<Vec<PyNormalizedString>>()? .into_iter() .map(tk::NormalizedString::from)) })) .into() } } fn normalize(pretok: &mut PreTokenizedString, func: &PyAny) -> PyResult<()> { if !func.is_callable() { Err(exceptions::PyTypeError::new_err( "`normalize` expect a callable with the signature: \ `fn(normalized: NormalizedString)`", )) } else { ToPyResult(pretok.normalize(|normalized| { let norm = PyNormalizedStringRefMut::new(normalized); func.call((norm.get(),), None)?; Ok(()) })) .into() } } fn tokenize(pretok: &mut PreTokenizedString, func: &PyAny) -> PyResult<()> { if !func.is_callable() { Err(exceptions::PyTypeError::new_err( "`tokenize` expect a callable with the signature: \ `fn(str) -> List[Token]`", )) } else { ToPyResult(pretok.tokenize(|normalized| { let output = func.call((normalized.get(),), None)?; Ok(output .extract::<&PyList>()? .into_iter() .map(|obj| Ok(Token::from(obj.extract::<PyToken>()?))) .collect::<PyResult<Vec<_>>>()?) })) .into() } } /// This is an enum #[derive(Clone)] pub struct PyOffsetReferential(OffsetReferential); impl FromPyObject<'_> for PyOffsetReferential { fn extract(obj: &PyAny) -> PyResult<Self> { let s = obj.extract::<&str>()?; Ok(Self(match s { "original" => Ok(OffsetReferential::Original), "normalized" => Ok(OffsetReferential::Normalized), _ => Err(exceptions::PyValueError::new_err( "Wrong value for OffsetReferential, expected one of `original, normalized`", )), }?)) } } #[derive(Clone)] pub struct PyOffsetType(OffsetType); impl FromPyObject<'_> for PyOffsetType { fn extract(obj: &PyAny) -> PyResult<Self> { let s = obj.extract::<&str>()?; Ok(Self(match s { "byte" => Ok(OffsetType::Byte), "char" => Ok(OffsetType::Char), _ => Err(exceptions::PyValueError::new_err( "Wrong value for OffsetType, expected one of `byte, char`", )), }?)) } } type PySplit = (String, Offsets, Option<Vec<PyToken>>); fn get_splits( pretok: &PreTokenizedString, offset_referential: PyOffsetReferential, offset_type: PyOffsetType, ) -> Vec<PySplit> { pretok .get_splits(offset_referential.0, offset_type.0) .into_iter() .map(|(s, o, t)| { ( s.to_owned(), o, t.as_ref() .map(|tokens| tokens.iter().map(|t| t.clone().into()).collect()), ) }) .collect() } fn to_encoding( pretok: &PreTokenizedString, type_id: u32, word_idx: Option<u32>, ) -> PyResult<PyEncoding> { Ok(ToPyResult( pretok .clone() .into_encoding(word_idx, type_id, tk::OffsetType::Char), ) .into_py()? .into()) } /// PreTokenizedString /// /// Wrapper over a string, that provides a way to normalize, pre-tokenize, tokenize the /// underlying string, while keeping track of the alignment information (offsets). /// /// The PreTokenizedString manages what we call `splits`. Each split represents a substring /// which is a subpart of the original string, with the relevant offsets and tokens. /// /// When calling one of the methods used to modify the PreTokenizedString (namely one of /// `split`, `normalize` or `tokenize), only the `splits` that don't have any associated /// tokens will get modified. /// /// Args: /// sequence: str: /// The string sequence used to initialize this PreTokenizedString #[pyclass(module = "tokenizers", name = "PreTokenizedString")] pub struct PyPreTokenizedString { pub(crate) pretok: tk::PreTokenizedString, } impl From<PreTokenizedString> for PyPreTokenizedString { fn from(pretok: PreTokenizedString) -> Self { Self { pretok } } } impl From<PyPreTokenizedString> for PreTokenizedString { fn from(pretok: PyPreTokenizedString) -> Self { pretok.pretok } } #[pymethods] impl PyPreTokenizedString { #[new] #[pyo3(text_signature = "(self, sequence)")] fn new(s: &str) -> Self { PreTokenizedString::from(s).into() } /// Split the PreTokenizedString using the given `func` /// /// Args: /// func: Callable[[index, NormalizedString], List[NormalizedString]]: /// The function used to split each underlying split. /// It is expected to return a list of `NormalizedString`, that represent the new /// splits. If the given `NormalizedString` does not need any splitting, we can /// just return it directly. /// In order for the offsets to be tracked accurately, any returned `NormalizedString` /// should come from calling either `.split` or `.slice` on the received one. #[pyo3(text_signature = "(self, func)")] fn split(&mut self, func: &PyAny) -> PyResult<()> { split(&mut self.pretok, func) } /// Normalize each split of the `PreTokenizedString` using the given `func` /// /// Args: /// func: Callable[[NormalizedString], None]: /// The function used to normalize each underlying split. This function /// does not need to return anything, just calling the methods on the provided /// NormalizedString allow its modification. #[pyo3(text_signature = "(self, func)")] fn normalize(&mut self, func: &PyAny) -> PyResult<()> { normalize(&mut self.pretok, func) } /// Tokenize each split of the `PreTokenizedString` using the given `func` /// /// Args: /// func: Callable[[str], List[Token]]: /// The function used to tokenize each underlying split. This function must return /// a list of Token generated from the input str. #[pyo3(text_signature = "(self, func)")] fn tokenize(&mut self, func: &PyAny) -> PyResult<()> { tokenize(&mut self.pretok, func) } /// Return an Encoding generated from this PreTokenizedString /// /// Args: /// type_id: int = 0: /// The type_id to be used on the generated Encoding. /// /// word_idx: Optional[int] = None: /// An optional word index to be used for each token of this Encoding. If provided, /// all the word indices in the generated Encoding will use this value, instead /// of the one automatically tracked during pre-tokenization. /// /// Returns: /// An Encoding #[pyo3(signature = (type_id = 0, word_idx = None))] #[pyo3(text_signature = "(self, type_id=0, word_idx=None)")] fn to_encoding(&self, type_id: u32, word_idx: Option<u32>) -> PyResult<PyEncoding> { to_encoding(&self.pretok, type_id, word_idx) } /// Get the splits currently managed by the PreTokenizedString /// /// Args: /// offset_referential: :obj:`str` /// Whether the returned splits should have offsets expressed relative /// to the original string, or the normalized one. choices: "original", "normalized". /// /// offset_type: :obj:`str` /// Whether the returned splits should have offsets expressed in bytes or chars. /// When slicing an str, we usually want to use chars, which is the default value. /// Now in some cases it might be interesting to get these offsets expressed in bytes, /// so it is possible to change this here. /// choices: "char", "bytes" /// /// Returns /// A list of splits #[pyo3(signature = ( offset_referential = PyOffsetReferential(OffsetReferential::Original), offset_type = PyOffsetType(OffsetType::Char) ))] #[pyo3(text_signature = "(self, offset_referential=\"original\", offset_type=\"char\")")] fn get_splits( &self, offset_referential: PyOffsetReferential, offset_type: PyOffsetType, ) -> Vec<PySplit> { get_splits(&self.pretok, offset_referential, offset_type) } } #[pyclass(module = "tokenizers", name = "PreTokenizedString")] #[derive(Clone)] pub struct PyPreTokenizedStringRefMut { inner: RefMutContainer<PreTokenizedString>, } impl DestroyPtr for PyPreTokenizedStringRefMut { fn destroy(&mut self) { self.inner.destroy(); } } impl PyPreTokenizedStringRefMut { pub fn new(pretok: &mut tk::PreTokenizedString) -> RefMutGuard<Self> { // SAFETY: This is safe because we return a RefMutGuard here. // The compiler will make sure the &mut stays valid as necessary. RefMutGuard::new(Self { inner: RefMutContainer::new(pretok), }) } pub fn destroyed_error() -> PyErr { exceptions::PyException::new_err( "Cannot use a PreTokenizedStringRefMut outside `pre_tokenize`", ) } } #[pymethods] impl PyPreTokenizedStringRefMut { fn split(&mut self, func: &PyAny) -> PyResult<()> { self.inner .map_mut(|pretok| split(pretok, func)) .ok_or_else(PyPreTokenizedStringRefMut::destroyed_error)? } fn normalize(&mut self, func: &PyAny) -> PyResult<()> { self.inner .map_mut(|pretok| normalize(pretok, func)) .ok_or_else(PyPreTokenizedStringRefMut::destroyed_error)? } fn tokenize(&mut self, func: &PyAny) -> PyResult<()> { self.inner .map_mut(|pretok| tokenize(pretok, func)) .ok_or_else(PyPreTokenizedStringRefMut::destroyed_error)? } #[pyo3(signature = (type_id = 0, word_idx = None))] fn to_encoding(&self, type_id: u32, word_idx: Option<u32>) -> PyResult<PyEncoding> { self.inner .map(|pretok| to_encoding(pretok, type_id, word_idx)) .ok_or_else(PyPreTokenizedStringRefMut::destroyed_error)? } #[pyo3(signature = ( offset_referential = PyOffsetReferential(OffsetReferential::Original), offset_type = PyOffsetType(OffsetType::Char) ))] fn get_splits( &self, offset_referential: PyOffsetReferential, offset_type: PyOffsetType, ) -> PyResult<Vec<PySplit>> { self.inner .map(|pretok| get_splits(pretok, offset_referential, offset_type)) .ok_or_else(PyPreTokenizedStringRefMut::destroyed_error) } }
0
hf_public_repos/tokenizers/bindings/python/src
hf_public_repos/tokenizers/bindings/python/src/utils/mod.rs
use pyo3::exceptions; use pyo3::prelude::*; use pyo3::types::*; use std::marker::PhantomData; use std::sync::{Arc, Mutex}; mod iterators; mod normalization; mod pretokenization; mod regex; pub use iterators::*; pub use normalization::*; pub use pretokenization::*; pub use regex::*; // PyChar // This type is a temporary hack to accept `char` as argument // To be removed once https://github.com/PyO3/pyo3/pull/1282 has been released pub struct PyChar(pub char); impl FromPyObject<'_> for PyChar { fn extract(obj: &PyAny) -> PyResult<Self> { let s = <PyString as PyTryFrom<'_>>::try_from(obj)?.to_str()?; let mut iter = s.chars(); if let (Some(ch), None) = (iter.next(), iter.next()) { Ok(Self(ch)) } else { Err(exceptions::PyValueError::new_err( "expected a string of length 1", )) } } } // RefMut utils pub trait DestroyPtr { fn destroy(&mut self); } pub struct RefMutGuard<'r, T: DestroyPtr + Clone> { content: T, r: PhantomData<&'r mut T>, } impl<T: DestroyPtr + Clone> RefMutGuard<'_, T> { pub fn new(content: T) -> Self { Self { content, r: PhantomData, } } pub fn get(&self) -> T { self.content.clone() } } impl<T: DestroyPtr + Clone> Drop for RefMutGuard<'_, T> { fn drop(&mut self) { self.content.destroy() } } #[derive(Clone)] pub struct RefMutContainer<T> { inner: Arc<Mutex<Option<*mut T>>>, } impl<T> RefMutContainer<T> { pub fn new(content: &mut T) -> Self { Self { inner: Arc::new(Mutex::new(Some(content))), } } pub fn map<F: FnOnce(&T) -> U, U>(&self, f: F) -> Option<U> { let lock = self.inner.lock().unwrap(); let ptr = lock.as_ref()?; Some(f(unsafe { ptr.as_ref().unwrap() })) } pub fn map_mut<F: FnOnce(&mut T) -> U, U>(&mut self, f: F) -> Option<U> { let lock = self.inner.lock().unwrap(); let ptr = lock.as_ref()?; Some(f(unsafe { ptr.as_mut().unwrap() })) } } impl<T> DestroyPtr for RefMutContainer<T> { fn destroy(&mut self) { self.inner.lock().unwrap().take(); } } unsafe impl<T: Send> Send for RefMutContainer<T> {} unsafe impl<T: Sync> Sync for RefMutContainer<T> {}
0
hf_public_repos/tokenizers/bindings/python/py_src
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/__init__.py
from enum import Enum from typing import List, Tuple, Union Offsets = Tuple[int, int] TextInputSequence = str """A :obj:`str` that represents an input sequence """ PreTokenizedInputSequence = Union[List[str], Tuple[str]] """A pre-tokenized input sequence. Can be one of: - A :obj:`List` of :obj:`str` - A :obj:`Tuple` of :obj:`str` """ TextEncodeInput = Union[ TextInputSequence, Tuple[TextInputSequence, TextInputSequence], List[TextInputSequence], ] """Represents a textual input for encoding. Can be either: - A single sequence: :data:`~tokenizers.TextInputSequence` - A pair of sequences: - A :obj:`Tuple` of :data:`~tokenizers.TextInputSequence` - Or a :obj:`List` of :data:`~tokenizers.TextInputSequence` of size 2 """ PreTokenizedEncodeInput = Union[ PreTokenizedInputSequence, Tuple[PreTokenizedInputSequence, PreTokenizedInputSequence], List[PreTokenizedInputSequence], ] """Represents a pre-tokenized input for encoding. Can be either: - A single sequence: :data:`~tokenizers.PreTokenizedInputSequence` - A pair of sequences: - A :obj:`Tuple` of :data:`~tokenizers.PreTokenizedInputSequence` - Or a :obj:`List` of :data:`~tokenizers.PreTokenizedInputSequence` of size 2 """ InputSequence = Union[TextInputSequence, PreTokenizedInputSequence] """Represents all the possible types of input sequences for encoding. Can be: - When ``is_pretokenized=False``: :data:`~TextInputSequence` - When ``is_pretokenized=True``: :data:`~PreTokenizedInputSequence` """ EncodeInput = Union[TextEncodeInput, PreTokenizedEncodeInput] """Represents all the possible types of input for encoding. Can be: - When ``is_pretokenized=False``: :data:`~TextEncodeInput` - When ``is_pretokenized=True``: :data:`~PreTokenizedEncodeInput` """ class OffsetReferential(Enum): ORIGINAL = "original" NORMALIZED = "normalized" class OffsetType(Enum): BYTE = "byte" CHAR = "char" class SplitDelimiterBehavior(Enum): REMOVED = "removed" ISOLATED = "isolated" MERGED_WITH_PREVIOUS = "merged_with_previous" MERGED_WITH_NEXT = "merged_with_next" CONTIGUOUS = "contiguous" from .tokenizers import ( AddedToken, Encoding, NormalizedString, PreTokenizedString, Regex, Token, Tokenizer, decoders, models, normalizers, pre_tokenizers, processors, trainers, __version__, ) from .implementations import ( BertWordPieceTokenizer, ByteLevelBPETokenizer, CharBPETokenizer, SentencePieceBPETokenizer, SentencePieceUnigramTokenizer, )
0
hf_public_repos/tokenizers/bindings/python/py_src
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/__init__.pyi
# Generated content DO NOT EDIT class AddedToken: """ Represents a token that can be be added to a :class:`~tokenizers.Tokenizer`. It can have special options that defines the way it should behave. Args: content (:obj:`str`): The content of the token single_word (:obj:`bool`, defaults to :obj:`False`): Defines whether this token should only match single words. If :obj:`True`, this token will never match inside of a word. For example the token ``ing`` would match on ``tokenizing`` if this option is :obj:`False`, but not if it is :obj:`True`. The notion of "`inside of a word`" is defined by the word boundaries pattern in regular expressions (ie. the token should start and end with word boundaries). lstrip (:obj:`bool`, defaults to :obj:`False`): Defines whether this token should strip all potential whitespaces on its left side. If :obj:`True`, this token will greedily match any whitespace on its left. For example if we try to match the token ``[MASK]`` with ``lstrip=True``, in the text ``"I saw a [MASK]"``, we would match on ``" [MASK]"``. (Note the space on the left). rstrip (:obj:`bool`, defaults to :obj:`False`): Defines whether this token should strip all potential whitespaces on its right side. If :obj:`True`, this token will greedily match any whitespace on its right. It works just like :obj:`lstrip` but on the right. normalized (:obj:`bool`, defaults to :obj:`True` with :meth:`~tokenizers.Tokenizer.add_tokens` and :obj:`False` with :meth:`~tokenizers.Tokenizer.add_special_tokens`): Defines whether this token should match against the normalized version of the input text. For example, with the added token ``"yesterday"``, and a normalizer in charge of lowercasing the text, the token could be extract from the input ``"I saw a lion Yesterday"``. special (:obj:`bool`, defaults to :obj:`False` with :meth:`~tokenizers.Tokenizer.add_tokens` and :obj:`False` with :meth:`~tokenizers.Tokenizer.add_special_tokens`): Defines whether this token should be skipped when decoding. """ def __init__(self, content, single_word=False, lstrip=False, rstrip=False, normalized=True, special=False): pass @property def content(self): """ Get the content of this :obj:`AddedToken` """ pass @property def lstrip(self): """ Get the value of the :obj:`lstrip` option """ pass @property def normalized(self): """ Get the value of the :obj:`normalized` option """ pass @property def rstrip(self): """ Get the value of the :obj:`rstrip` option """ pass @property def single_word(self): """ Get the value of the :obj:`single_word` option """ pass @property def special(self): """ Get the value of the :obj:`special` option """ pass class Encoding: """ The :class:`~tokenizers.Encoding` represents the output of a :class:`~tokenizers.Tokenizer`. """ @property def attention_mask(self): """ The attention mask This indicates to the LM which tokens should be attended to, and which should not. This is especially important when batching sequences, where we need to applying padding. Returns: :obj:`List[int]`: The attention mask """ pass def char_to_token(self, char_pos, sequence_index=0): """ Get the token that contains the char at the given position in the input sequence. Args: char_pos (:obj:`int`): The position of a char in the input string sequence_index (:obj:`int`, defaults to :obj:`0`): The index of the sequence that contains the target char Returns: :obj:`int`: The index of the token that contains this char in the encoded sequence """ pass def char_to_word(self, char_pos, sequence_index=0): """ Get the word that contains the char at the given position in the input sequence. Args: char_pos (:obj:`int`): The position of a char in the input string sequence_index (:obj:`int`, defaults to :obj:`0`): The index of the sequence that contains the target char Returns: :obj:`int`: The index of the word that contains this char in the input sequence """ pass @property def ids(self): """ The generated IDs The IDs are the main input to a Language Model. They are the token indices, the numerical representations that a LM understands. Returns: :obj:`List[int]`: The list of IDs """ pass @staticmethod def merge(encodings, growing_offsets=True): """ Merge the list of encodings into one final :class:`~tokenizers.Encoding` Args: encodings (A :obj:`List` of :class:`~tokenizers.Encoding`): The list of encodings that should be merged in one growing_offsets (:obj:`bool`, defaults to :obj:`True`): Whether the offsets should accumulate while merging Returns: :class:`~tokenizers.Encoding`: The resulting Encoding """ pass @property def n_sequences(self): """ The number of sequences represented Returns: :obj:`int`: The number of sequences in this :class:`~tokenizers.Encoding` """ pass @property def offsets(self): """ The offsets associated to each token These offsets let's you slice the input string, and thus retrieve the original part that led to producing the corresponding token. Returns: A :obj:`List` of :obj:`Tuple[int, int]`: The list of offsets """ pass @property def overflowing(self): """ A :obj:`List` of overflowing :class:`~tokenizers.Encoding` When using truncation, the :class:`~tokenizers.Tokenizer` takes care of splitting the output into as many pieces as required to match the specified maximum length. This field lets you retrieve all the subsequent pieces. When you use pairs of sequences, the overflowing pieces will contain enough variations to cover all the possible combinations, while respecting the provided maximum length. """ pass def pad(self, length, direction="right", pad_id=0, pad_type_id=0, pad_token="[PAD]"): """ Pad the :class:`~tokenizers.Encoding` at the given length Args: length (:obj:`int`): The desired length direction: (:obj:`str`, defaults to :obj:`right`): The expected padding direction. Can be either :obj:`right` or :obj:`left` pad_id (:obj:`int`, defaults to :obj:`0`): The ID corresponding to the padding token pad_type_id (:obj:`int`, defaults to :obj:`0`): The type ID corresponding to the padding token pad_token (:obj:`str`, defaults to `[PAD]`): The pad token to use """ pass @property def sequence_ids(self): """ The generated sequence indices. They represent the index of the input sequence associated to each token. The sequence id can be None if the token is not related to any input sequence, like for example with special tokens. Returns: A :obj:`List` of :obj:`Optional[int]`: A list of optional sequence index. """ pass def set_sequence_id(self, sequence_id): """ Set the given sequence index Set the given sequence index for the whole range of tokens contained in this :class:`~tokenizers.Encoding`. """ pass @property def special_tokens_mask(self): """ The special token mask This indicates which tokens are special tokens, and which are not. Returns: :obj:`List[int]`: The special tokens mask """ pass def token_to_chars(self, token_index): """ Get the offsets of the token at the given index. The returned offsets are related to the input sequence that contains the token. In order to determine in which input sequence it belongs, you must call :meth:`~tokenizers.Encoding.token_to_sequence()`. Args: token_index (:obj:`int`): The index of a token in the encoded sequence. Returns: :obj:`Tuple[int, int]`: The token offsets :obj:`(first, last + 1)` """ pass def token_to_sequence(self, token_index): """ Get the index of the sequence represented by the given token. In the general use case, this method returns :obj:`0` for a single sequence or the first sequence of a pair, and :obj:`1` for the second sequence of a pair Args: token_index (:obj:`int`): The index of a token in the encoded sequence. Returns: :obj:`int`: The sequence id of the given token """ pass def token_to_word(self, token_index): """ Get the index of the word that contains the token in one of the input sequences. The returned word index is related to the input sequence that contains the token. In order to determine in which input sequence it belongs, you must call :meth:`~tokenizers.Encoding.token_to_sequence()`. Args: token_index (:obj:`int`): The index of a token in the encoded sequence. Returns: :obj:`int`: The index of the word in the relevant input sequence. """ pass @property def tokens(self): """ The generated tokens They are the string representation of the IDs. Returns: :obj:`List[str]`: The list of tokens """ pass def truncate(self, max_length, stride=0, direction="right"): """ Truncate the :class:`~tokenizers.Encoding` at the given length If this :class:`~tokenizers.Encoding` represents multiple sequences, when truncating this information is lost. It will be considered as representing a single sequence. Args: max_length (:obj:`int`): The desired length stride (:obj:`int`, defaults to :obj:`0`): The length of previous content to be included in each overflowing piece direction (:obj:`str`, defaults to :obj:`right`): Truncate direction """ pass @property def type_ids(self): """ The generated type IDs Generally used for tasks like sequence classification or question answering, these tokens let the LM know which input sequence corresponds to each tokens. Returns: :obj:`List[int]`: The list of type ids """ pass @property def word_ids(self): """ The generated word indices. They represent the index of the word associated to each token. When the input is pre-tokenized, they correspond to the ID of the given input label, otherwise they correspond to the words indices as defined by the :class:`~tokenizers.pre_tokenizers.PreTokenizer` that was used. For special tokens and such (any token that was generated from something that was not part of the input), the output is :obj:`None` Returns: A :obj:`List` of :obj:`Optional[int]`: A list of optional word index. """ pass def word_to_chars(self, word_index, sequence_index=0): """ Get the offsets of the word at the given index in one of the input sequences. Args: word_index (:obj:`int`): The index of a word in one of the input sequences. sequence_index (:obj:`int`, defaults to :obj:`0`): The index of the sequence that contains the target word Returns: :obj:`Tuple[int, int]`: The range of characters (span) :obj:`(first, last + 1)` """ pass def word_to_tokens(self, word_index, sequence_index=0): """ Get the encoded tokens corresponding to the word at the given index in one of the input sequences. Args: word_index (:obj:`int`): The index of a word in one of the input sequences. sequence_index (:obj:`int`, defaults to :obj:`0`): The index of the sequence that contains the target word Returns: :obj:`Tuple[int, int]`: The range of tokens: :obj:`(first, last + 1)` """ pass @property def words(self): """ The generated word indices. .. warning:: This is deprecated and will be removed in a future version. Please use :obj:`~tokenizers.Encoding.word_ids` instead. They represent the index of the word associated to each token. When the input is pre-tokenized, they correspond to the ID of the given input label, otherwise they correspond to the words indices as defined by the :class:`~tokenizers.pre_tokenizers.PreTokenizer` that was used. For special tokens and such (any token that was generated from something that was not part of the input), the output is :obj:`None` Returns: A :obj:`List` of :obj:`Optional[int]`: A list of optional word index. """ pass class NormalizedString: """ NormalizedString A NormalizedString takes care of modifying an "original" string, to obtain a "normalized" one. While making all the requested modifications, it keeps track of the alignment information between the two versions of the string. Args: sequence: str: The string sequence used to initialize this NormalizedString """ def append(self, s): """ Append the given sequence to the string """ pass def clear(self): """ Clears the string """ pass def filter(self, func): """ Filter each character of the string using the given func """ pass def for_each(self, func): """ Calls the given function for each character of the string """ pass def lowercase(self): """ Lowercase the string """ pass def lstrip(self): """ Strip the left of the string """ pass def map(self, func): """ Calls the given function for each character of the string Replaces each character of the string using the returned value. Each returned value **must** be a str of length 1 (ie a character). """ pass def nfc(self): """ Runs the NFC normalization """ pass def nfd(self): """ Runs the NFD normalization """ pass def nfkc(self): """ Runs the NFKC normalization """ pass def nfkd(self): """ Runs the NFKD normalization """ pass @property def normalized(self): """ The normalized part of the string """ pass def prepend(self, s): """ Prepend the given sequence to the string """ pass def replace(self, pattern, content): """ Replace the content of the given pattern with the provided content Args: pattern: Pattern: A pattern used to match the string. Usually a string or a Regex content: str: The content to be used as replacement """ pass def rstrip(self): """ Strip the right of the string """ pass def slice(self, range): """ Slice the string using the given range """ pass def split(self, pattern, behavior): """ Split the NormalizedString using the given pattern and the specified behavior Args: pattern: Pattern: A pattern used to split the string. Usually a string or a regex built with `tokenizers.Regex` behavior: SplitDelimiterBehavior: The behavior to use when splitting. Choices: "removed", "isolated", "merged_with_previous", "merged_with_next", "contiguous" Returns: A list of NormalizedString, representing each split """ pass def strip(self): """ Strip both ends of the string """ pass def uppercase(self): """ Uppercase the string """ pass class PreTokenizedString: """ PreTokenizedString Wrapper over a string, that provides a way to normalize, pre-tokenize, tokenize the underlying string, while keeping track of the alignment information (offsets). The PreTokenizedString manages what we call `splits`. Each split represents a substring which is a subpart of the original string, with the relevant offsets and tokens. When calling one of the methods used to modify the PreTokenizedString (namely one of `split`, `normalize` or `tokenize), only the `splits` that don't have any associated tokens will get modified. Args: sequence: str: The string sequence used to initialize this PreTokenizedString """ def __init__(self, sequence): pass def get_splits(self, offset_referential="original", offset_type="char"): """ Get the splits currently managed by the PreTokenizedString Args: offset_referential: :obj:`str` Whether the returned splits should have offsets expressed relative to the original string, or the normalized one. choices: "original", "normalized". offset_type: :obj:`str` Whether the returned splits should have offsets expressed in bytes or chars. When slicing an str, we usually want to use chars, which is the default value. Now in some cases it might be interesting to get these offsets expressed in bytes, so it is possible to change this here. choices: "char", "bytes" Returns A list of splits """ pass def normalize(self, func): """ Normalize each split of the `PreTokenizedString` using the given `func` Args: func: Callable[[NormalizedString], None]: The function used to normalize each underlying split. This function does not need to return anything, just calling the methods on the provided NormalizedString allow its modification. """ pass def split(self, func): """ Split the PreTokenizedString using the given `func` Args: func: Callable[[index, NormalizedString], List[NormalizedString]]: The function used to split each underlying split. It is expected to return a list of `NormalizedString`, that represent the new splits. If the given `NormalizedString` does not need any splitting, we can just return it directly. In order for the offsets to be tracked accurately, any returned `NormalizedString` should come from calling either `.split` or `.slice` on the received one. """ pass def to_encoding(self, type_id=0, word_idx=None): """ Return an Encoding generated from this PreTokenizedString Args: type_id: int = 0: The type_id to be used on the generated Encoding. word_idx: Optional[int] = None: An optional word index to be used for each token of this Encoding. If provided, all the word indices in the generated Encoding will use this value, instead of the one automatically tracked during pre-tokenization. Returns: An Encoding """ pass def tokenize(self, func): """ Tokenize each split of the `PreTokenizedString` using the given `func` Args: func: Callable[[str], List[Token]]: The function used to tokenize each underlying split. This function must return a list of Token generated from the input str. """ pass class Regex: """ Instantiate a new Regex with the given pattern """ def __init__(self, pattern): pass class Token: pass class Tokenizer: """ A :obj:`Tokenizer` works as a pipeline. It processes some raw text as input and outputs an :class:`~tokenizers.Encoding`. Args: model (:class:`~tokenizers.models.Model`): The core algorithm that this :obj:`Tokenizer` should be using. """ def __init__(self, model): pass def add_special_tokens(self, tokens): """ Add the given special tokens to the Tokenizer. If these tokens are already part of the vocabulary, it just let the Tokenizer know about them. If they don't exist, the Tokenizer creates them, giving them a new id. These special tokens will never be processed by the model (ie won't be split into multiple tokens), and they can be removed from the output when decoding. Args: tokens (A :obj:`List` of :class:`~tokenizers.AddedToken` or :obj:`str`): The list of special tokens we want to add to the vocabulary. Each token can either be a string or an instance of :class:`~tokenizers.AddedToken` for more customization. Returns: :obj:`int`: The number of tokens that were created in the vocabulary """ pass def add_tokens(self, tokens): """ Add the given tokens to the vocabulary The given tokens are added only if they don't already exist in the vocabulary. Each token then gets a new attributed id. Args: tokens (A :obj:`List` of :class:`~tokenizers.AddedToken` or :obj:`str`): The list of tokens we want to add to the vocabulary. Each token can be either a string or an instance of :class:`~tokenizers.AddedToken` for more customization. Returns: :obj:`int`: The number of tokens that were created in the vocabulary """ pass def decode(self, ids, skip_special_tokens=True): """ Decode the given list of ids back to a string This is used to decode anything coming back from a Language Model Args: ids (A :obj:`List/Tuple` of :obj:`int`): The list of ids that we want to decode skip_special_tokens (:obj:`bool`, defaults to :obj:`True`): Whether the special tokens should be removed from the decoded string Returns: :obj:`str`: The decoded string """ pass def decode_batch(self, sequences, skip_special_tokens=True): """ Decode a batch of ids back to their corresponding string Args: sequences (:obj:`List` of :obj:`List[int]`): The batch of sequences we want to decode skip_special_tokens (:obj:`bool`, defaults to :obj:`True`): Whether the special tokens should be removed from the decoded strings Returns: :obj:`List[str]`: A list of decoded strings """ pass @property def decoder(self): """ The `optional` :class:`~tokenizers.decoders.Decoder` in use by the Tokenizer """ pass def enable_padding( self, direction="right", pad_id=0, pad_type_id=0, pad_token="[PAD]", length=None, pad_to_multiple_of=None ): """ Enable the padding Args: direction (:obj:`str`, `optional`, defaults to :obj:`right`): The direction in which to pad. Can be either ``right`` or ``left`` pad_to_multiple_of (:obj:`int`, `optional`): If specified, the padding length should always snap to the next multiple of the given value. For example if we were going to pad witha length of 250 but ``pad_to_multiple_of=8`` then we will pad to 256. pad_id (:obj:`int`, defaults to 0): The id to be used when padding pad_type_id (:obj:`int`, defaults to 0): The type id to be used when padding pad_token (:obj:`str`, defaults to :obj:`[PAD]`): The pad token to be used when padding length (:obj:`int`, `optional`): If specified, the length at which to pad. If not specified we pad using the size of the longest sequence in a batch. """ pass def enable_truncation(self, max_length, stride=0, strategy="longest_first", direction="right"): """ Enable truncation Args: max_length (:obj:`int`): The max length at which to truncate stride (:obj:`int`, `optional`): The length of the previous first sequence to be included in the overflowing sequence strategy (:obj:`str`, `optional`, defaults to :obj:`longest_first`): The strategy used to truncation. Can be one of ``longest_first``, ``only_first`` or ``only_second``. direction (:obj:`str`, defaults to :obj:`right`): Truncate direction """ pass def encode(self, sequence, pair=None, is_pretokenized=False, add_special_tokens=True): """ Encode the given sequence and pair. This method can process raw text sequences as well as already pre-tokenized sequences. Example: Here are some examples of the inputs that are accepted:: encode("A single sequence")` encode("A sequence", "And its pair")` encode([ "A", "pre", "tokenized", "sequence" ], is_pretokenized=True)` encode( [ "A", "pre", "tokenized", "sequence" ], [ "And", "its", "pair" ], is_pretokenized=True ) Args: sequence (:obj:`~tokenizers.InputSequence`): The main input sequence we want to encode. This sequence can be either raw text or pre-tokenized, according to the ``is_pretokenized`` argument: - If ``is_pretokenized=False``: :class:`~tokenizers.TextInputSequence` - If ``is_pretokenized=True``: :class:`~tokenizers.PreTokenizedInputSequence` pair (:obj:`~tokenizers.InputSequence`, `optional`): An optional input sequence. The expected format is the same that for ``sequence``. is_pretokenized (:obj:`bool`, defaults to :obj:`False`): Whether the input is already pre-tokenized add_special_tokens (:obj:`bool`, defaults to :obj:`True`): Whether to add the special tokens Returns: :class:`~tokenizers.Encoding`: The encoded result """ pass def encode_batch(self, input, is_pretokenized=False, add_special_tokens=True): """ Encode the given batch of inputs. This method accept both raw text sequences as well as already pre-tokenized sequences. Example: Here are some examples of the inputs that are accepted:: encode_batch([ "A single sequence", ("A tuple with a sequence", "And its pair"), [ "A", "pre", "tokenized", "sequence" ], ([ "A", "pre", "tokenized", "sequence" ], "And its pair") ]) Args: input (A :obj:`List`/:obj:`Tuple` of :obj:`~tokenizers.EncodeInput`): A list of single sequences or pair sequences to encode. Each sequence can be either raw text or pre-tokenized, according to the ``is_pretokenized`` argument: - If ``is_pretokenized=False``: :class:`~tokenizers.TextEncodeInput` - If ``is_pretokenized=True``: :class:`~tokenizers.PreTokenizedEncodeInput` is_pretokenized (:obj:`bool`, defaults to :obj:`False`): Whether the input is already pre-tokenized add_special_tokens (:obj:`bool`, defaults to :obj:`True`): Whether to add the special tokens Returns: A :obj:`List` of :class:`~tokenizers.Encoding`: The encoded batch """ pass @staticmethod def from_buffer(buffer): """ Instantiate a new :class:`~tokenizers.Tokenizer` from the given buffer. Args: buffer (:obj:`bytes`): A buffer containing a previously serialized :class:`~tokenizers.Tokenizer` Returns: :class:`~tokenizers.Tokenizer`: The new tokenizer """ pass @staticmethod def from_file(path): """ Instantiate a new :class:`~tokenizers.Tokenizer` from the file at the given path. Args: path (:obj:`str`): A path to a local JSON file representing a previously serialized :class:`~tokenizers.Tokenizer` Returns: :class:`~tokenizers.Tokenizer`: The new tokenizer """ pass @staticmethod def from_pretrained(identifier, revision="main", auth_token=None): """ Instantiate a new :class:`~tokenizers.Tokenizer` from an existing file on the Hugging Face Hub. Args: identifier (:obj:`str`): The identifier of a Model on the Hugging Face Hub, that contains a tokenizer.json file revision (:obj:`str`, defaults to `main`): A branch or commit id auth_token (:obj:`str`, `optional`, defaults to `None`): An optional auth token used to access private repositories on the Hugging Face Hub Returns: :class:`~tokenizers.Tokenizer`: The new tokenizer """ pass @staticmethod def from_str(json): """ Instantiate a new :class:`~tokenizers.Tokenizer` from the given JSON string. Args: json (:obj:`str`): A valid JSON string representing a previously serialized :class:`~tokenizers.Tokenizer` Returns: :class:`~tokenizers.Tokenizer`: The new tokenizer """ pass def get_added_tokens_decoder(self): """ Get the underlying vocabulary Returns: :obj:`Dict[int, AddedToken]`: The vocabulary """ pass def get_vocab(self, with_added_tokens=True): """ Get the underlying vocabulary Args: with_added_tokens (:obj:`bool`, defaults to :obj:`True`): Whether to include the added tokens Returns: :obj:`Dict[str, int]`: The vocabulary """ pass def get_vocab_size(self, with_added_tokens=True): """ Get the size of the underlying vocabulary Args: with_added_tokens (:obj:`bool`, defaults to :obj:`True`): Whether to include the added tokens Returns: :obj:`int`: The size of the vocabulary """ pass def id_to_token(self, id): """ Convert the given id to its corresponding token if it exists Args: id (:obj:`int`): The id to convert Returns: :obj:`Optional[str]`: An optional token, :obj:`None` if out of vocabulary """ pass @property def model(self): """ The :class:`~tokenizers.models.Model` in use by the Tokenizer """ pass def no_padding(self): """ Disable padding """ pass def no_truncation(self): """ Disable truncation """ pass @property def normalizer(self): """ The `optional` :class:`~tokenizers.normalizers.Normalizer` in use by the Tokenizer """ pass def num_special_tokens_to_add(self, is_pair): """ Return the number of special tokens that would be added for single/pair sentences. :param is_pair: Boolean indicating if the input would be a single sentence or a pair :return: """ pass @property def padding(self): """ Get the current padding parameters `Cannot be set, use` :meth:`~tokenizers.Tokenizer.enable_padding` `instead` Returns: (:obj:`dict`, `optional`): A dict with the current padding parameters if padding is enabled """ pass def post_process(self, encoding, pair=None, add_special_tokens=True): """ Apply all the post-processing steps to the given encodings. The various steps are: 1. Truncate according to the set truncation params (provided with :meth:`~tokenizers.Tokenizer.enable_truncation`) 2. Apply the :class:`~tokenizers.processors.PostProcessor` 3. Pad according to the set padding params (provided with :meth:`~tokenizers.Tokenizer.enable_padding`) Args: encoding (:class:`~tokenizers.Encoding`): The :class:`~tokenizers.Encoding` corresponding to the main sequence. pair (:class:`~tokenizers.Encoding`, `optional`): An optional :class:`~tokenizers.Encoding` corresponding to the pair sequence. add_special_tokens (:obj:`bool`): Whether to add the special tokens Returns: :class:`~tokenizers.Encoding`: The final post-processed encoding """ pass @property def post_processor(self): """ The `optional` :class:`~tokenizers.processors.PostProcessor` in use by the Tokenizer """ pass @property def pre_tokenizer(self): """ The `optional` :class:`~tokenizers.pre_tokenizers.PreTokenizer` in use by the Tokenizer """ pass def save(self, path, pretty=True): """ Save the :class:`~tokenizers.Tokenizer` to the file at the given path. Args: path (:obj:`str`): A path to a file in which to save the serialized tokenizer. pretty (:obj:`bool`, defaults to :obj:`True`): Whether the JSON file should be pretty formatted. """ pass def to_str(self, pretty=False): """ Gets a serialized string representing this :class:`~tokenizers.Tokenizer`. Args: pretty (:obj:`bool`, defaults to :obj:`False`): Whether the JSON string should be pretty formatted. Returns: :obj:`str`: A string representing the serialized Tokenizer """ pass def token_to_id(self, token): """ Convert the given token to its corresponding id if it exists Args: token (:obj:`str`): The token to convert Returns: :obj:`Optional[int]`: An optional id, :obj:`None` if out of vocabulary """ pass def train(self, files, trainer=None): """ Train the Tokenizer using the given files. Reads the files line by line, while keeping all the whitespace, even new lines. If you want to train from data store in-memory, you can check :meth:`~tokenizers.Tokenizer.train_from_iterator` Args: files (:obj:`List[str]`): A list of path to the files that we should use for training trainer (:obj:`~tokenizers.trainers.Trainer`, `optional`): An optional trainer that should be used to train our Model """ pass def train_from_iterator(self, iterator, trainer=None, length=None): """ Train the Tokenizer using the provided iterator. You can provide anything that is a Python Iterator * A list of sequences :obj:`List[str]` * A generator that yields :obj:`str` or :obj:`List[str]` * A Numpy array of strings * ... Args: iterator (:obj:`Iterator`): Any iterator over strings or list of strings trainer (:obj:`~tokenizers.trainers.Trainer`, `optional`): An optional trainer that should be used to train our Model length (:obj:`int`, `optional`): The total number of sequences in the iterator. This is used to provide meaningful progress tracking """ pass @property def truncation(self): """ Get the currently set truncation parameters `Cannot set, use` :meth:`~tokenizers.Tokenizer.enable_truncation` `instead` Returns: (:obj:`dict`, `optional`): A dict with the current truncation parameters if truncation is enabled """ pass
0
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/models/__init__.py
# Generated content DO NOT EDIT from .. import models Model = models.Model BPE = models.BPE Unigram = models.Unigram WordLevel = models.WordLevel WordPiece = models.WordPiece
0
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/models/__init__.pyi
# Generated content DO NOT EDIT class Model: """ Base class for all models The model represents the actual tokenization algorithm. This is the part that will contain and manage the learned vocabulary. This class cannot be constructed directly. Please use one of the concrete models. """ def get_trainer(self): """ Get the associated :class:`~tokenizers.trainers.Trainer` Retrieve the :class:`~tokenizers.trainers.Trainer` associated to this :class:`~tokenizers.models.Model`. Returns: :class:`~tokenizers.trainers.Trainer`: The Trainer used to train this model """ pass def id_to_token(self, id): """ Get the token associated to an ID Args: id (:obj:`int`): An ID to convert to a token Returns: :obj:`str`: The token associated to the ID """ pass def save(self, folder, prefix): """ Save the current model Save the current model in the given folder, using the given prefix for the various files that will get created. Any file with the same name that already exists in this folder will be overwritten. Args: folder (:obj:`str`): The path to the target folder in which to save the various files prefix (:obj:`str`, `optional`): An optional prefix, used to prefix each file name Returns: :obj:`List[str]`: The list of saved files """ pass def token_to_id(self, tokens): """ Get the ID associated to a token Args: token (:obj:`str`): A token to convert to an ID Returns: :obj:`int`: The ID associated to the token """ pass def tokenize(self, sequence): """ Tokenize a sequence Args: sequence (:obj:`str`): A sequence to tokenize Returns: A :obj:`List` of :class:`~tokenizers.Token`: The generated tokens """ pass class BPE(Model): """ An implementation of the BPE (Byte-Pair Encoding) algorithm Args: vocab (:obj:`Dict[str, int]`, `optional`): A dictionnary of string keys and their ids :obj:`{"am": 0,...}` merges (:obj:`List[Tuple[str, str]]`, `optional`): A list of pairs of tokens (:obj:`Tuple[str, str]`) :obj:`[("a", "b"),...]` cache_capacity (:obj:`int`, `optional`): The number of words that the BPE cache can contain. The cache allows to speed-up the process by keeping the result of the merge operations for a number of words. dropout (:obj:`float`, `optional`): A float between 0 and 1 that represents the BPE dropout to use. unk_token (:obj:`str`, `optional`): The unknown token to be used by the model. continuing_subword_prefix (:obj:`str`, `optional`): The prefix to attach to subword units that don't represent a beginning of word. end_of_word_suffix (:obj:`str`, `optional`): The suffix to attach to subword units that represent an end of word. fuse_unk (:obj:`bool`, `optional`): Whether to fuse any subsequent unknown tokens into a single one byte_fallback (:obj:`bool`, `optional`): Whether to use spm byte-fallback trick (defaults to False) """ def __init__( self, vocab=None, merges=None, cache_capacity=None, dropout=None, unk_token=None, continuing_subword_prefix=None, end_of_word_suffix=None, fuse_unk=None, byte_fallback=False, ): pass @staticmethod def from_file(cls, vocab, merge, **kwargs): """ Instantiate a BPE model from the given files. This method is roughly equivalent to doing:: vocab, merges = BPE.read_file(vocab_filename, merges_filename) bpe = BPE(vocab, merges) If you don't need to keep the :obj:`vocab, merges` values lying around, this method is more optimized than manually calling :meth:`~tokenizers.models.BPE.read_file` to initialize a :class:`~tokenizers.models.BPE` Args: vocab (:obj:`str`): The path to a :obj:`vocab.json` file merges (:obj:`str`): The path to a :obj:`merges.txt` file Returns: :class:`~tokenizers.models.BPE`: An instance of BPE loaded from these files """ pass def get_trainer(self): """ Get the associated :class:`~tokenizers.trainers.Trainer` Retrieve the :class:`~tokenizers.trainers.Trainer` associated to this :class:`~tokenizers.models.Model`. Returns: :class:`~tokenizers.trainers.Trainer`: The Trainer used to train this model """ pass def id_to_token(self, id): """ Get the token associated to an ID Args: id (:obj:`int`): An ID to convert to a token Returns: :obj:`str`: The token associated to the ID """ pass @staticmethod def read_file(self, vocab, merges): """ Read a :obj:`vocab.json` and a :obj:`merges.txt` files This method provides a way to read and parse the content of these files, returning the relevant data structures. If you want to instantiate some BPE models from memory, this method gives you the expected input from the standard files. Args: vocab (:obj:`str`): The path to a :obj:`vocab.json` file merges (:obj:`str`): The path to a :obj:`merges.txt` file Returns: A :obj:`Tuple` with the vocab and the merges: The vocabulary and merges loaded into memory """ pass def save(self, folder, prefix): """ Save the current model Save the current model in the given folder, using the given prefix for the various files that will get created. Any file with the same name that already exists in this folder will be overwritten. Args: folder (:obj:`str`): The path to the target folder in which to save the various files prefix (:obj:`str`, `optional`): An optional prefix, used to prefix each file name Returns: :obj:`List[str]`: The list of saved files """ pass def token_to_id(self, tokens): """ Get the ID associated to a token Args: token (:obj:`str`): A token to convert to an ID Returns: :obj:`int`: The ID associated to the token """ pass def tokenize(self, sequence): """ Tokenize a sequence Args: sequence (:obj:`str`): A sequence to tokenize Returns: A :obj:`List` of :class:`~tokenizers.Token`: The generated tokens """ pass class Unigram(Model): """ An implementation of the Unigram algorithm Args: vocab (:obj:`List[Tuple[str, float]]`, `optional`, `optional`): A list of vocabulary items and their relative score [("am", -0.2442),...] """ def __init__(self, vocab, unk_id, byte_fallback): pass def get_trainer(self): """ Get the associated :class:`~tokenizers.trainers.Trainer` Retrieve the :class:`~tokenizers.trainers.Trainer` associated to this :class:`~tokenizers.models.Model`. Returns: :class:`~tokenizers.trainers.Trainer`: The Trainer used to train this model """ pass def id_to_token(self, id): """ Get the token associated to an ID Args: id (:obj:`int`): An ID to convert to a token Returns: :obj:`str`: The token associated to the ID """ pass def save(self, folder, prefix): """ Save the current model Save the current model in the given folder, using the given prefix for the various files that will get created. Any file with the same name that already exists in this folder will be overwritten. Args: folder (:obj:`str`): The path to the target folder in which to save the various files prefix (:obj:`str`, `optional`): An optional prefix, used to prefix each file name Returns: :obj:`List[str]`: The list of saved files """ pass def token_to_id(self, tokens): """ Get the ID associated to a token Args: token (:obj:`str`): A token to convert to an ID Returns: :obj:`int`: The ID associated to the token """ pass def tokenize(self, sequence): """ Tokenize a sequence Args: sequence (:obj:`str`): A sequence to tokenize Returns: A :obj:`List` of :class:`~tokenizers.Token`: The generated tokens """ pass class WordLevel(Model): """ An implementation of the WordLevel algorithm Most simple tokenizer model based on mapping tokens to their corresponding id. Args: vocab (:obj:`str`, `optional`): A dictionnary of string keys and their ids :obj:`{"am": 0,...}` unk_token (:obj:`str`, `optional`): The unknown token to be used by the model. """ def __init__(self, vocab, unk_token): pass @staticmethod def from_file(vocab, unk_token): """ Instantiate a WordLevel model from the given file This method is roughly equivalent to doing:: vocab = WordLevel.read_file(vocab_filename) wordlevel = WordLevel(vocab) If you don't need to keep the :obj:`vocab` values lying around, this method is more optimized than manually calling :meth:`~tokenizers.models.WordLevel.read_file` to initialize a :class:`~tokenizers.models.WordLevel` Args: vocab (:obj:`str`): The path to a :obj:`vocab.json` file Returns: :class:`~tokenizers.models.WordLevel`: An instance of WordLevel loaded from file """ pass def get_trainer(self): """ Get the associated :class:`~tokenizers.trainers.Trainer` Retrieve the :class:`~tokenizers.trainers.Trainer` associated to this :class:`~tokenizers.models.Model`. Returns: :class:`~tokenizers.trainers.Trainer`: The Trainer used to train this model """ pass def id_to_token(self, id): """ Get the token associated to an ID Args: id (:obj:`int`): An ID to convert to a token Returns: :obj:`str`: The token associated to the ID """ pass @staticmethod def read_file(vocab): """ Read a :obj:`vocab.json` This method provides a way to read and parse the content of a vocabulary file, returning the relevant data structures. If you want to instantiate some WordLevel models from memory, this method gives you the expected input from the standard files. Args: vocab (:obj:`str`): The path to a :obj:`vocab.json` file Returns: :obj:`Dict[str, int]`: The vocabulary as a :obj:`dict` """ pass def save(self, folder, prefix): """ Save the current model Save the current model in the given folder, using the given prefix for the various files that will get created. Any file with the same name that already exists in this folder will be overwritten. Args: folder (:obj:`str`): The path to the target folder in which to save the various files prefix (:obj:`str`, `optional`): An optional prefix, used to prefix each file name Returns: :obj:`List[str]`: The list of saved files """ pass def token_to_id(self, tokens): """ Get the ID associated to a token Args: token (:obj:`str`): A token to convert to an ID Returns: :obj:`int`: The ID associated to the token """ pass def tokenize(self, sequence): """ Tokenize a sequence Args: sequence (:obj:`str`): A sequence to tokenize Returns: A :obj:`List` of :class:`~tokenizers.Token`: The generated tokens """ pass class WordPiece(Model): """ An implementation of the WordPiece algorithm Args: vocab (:obj:`Dict[str, int]`, `optional`): A dictionnary of string keys and their ids :obj:`{"am": 0,...}` unk_token (:obj:`str`, `optional`): The unknown token to be used by the model. max_input_chars_per_word (:obj:`int`, `optional`): The maximum number of characters to authorize in a single word. """ def __init__(self, vocab, unk_token, max_input_chars_per_word): pass @staticmethod def from_file(vocab, **kwargs): """ Instantiate a WordPiece model from the given file This method is roughly equivalent to doing:: vocab = WordPiece.read_file(vocab_filename) wordpiece = WordPiece(vocab) If you don't need to keep the :obj:`vocab` values lying around, this method is more optimized than manually calling :meth:`~tokenizers.models.WordPiece.read_file` to initialize a :class:`~tokenizers.models.WordPiece` Args: vocab (:obj:`str`): The path to a :obj:`vocab.txt` file Returns: :class:`~tokenizers.models.WordPiece`: An instance of WordPiece loaded from file """ pass def get_trainer(self): """ Get the associated :class:`~tokenizers.trainers.Trainer` Retrieve the :class:`~tokenizers.trainers.Trainer` associated to this :class:`~tokenizers.models.Model`. Returns: :class:`~tokenizers.trainers.Trainer`: The Trainer used to train this model """ pass def id_to_token(self, id): """ Get the token associated to an ID Args: id (:obj:`int`): An ID to convert to a token Returns: :obj:`str`: The token associated to the ID """ pass @staticmethod def read_file(vocab): """ Read a :obj:`vocab.txt` file This method provides a way to read and parse the content of a standard `vocab.txt` file as used by the WordPiece Model, returning the relevant data structures. If you want to instantiate some WordPiece models from memory, this method gives you the expected input from the standard files. Args: vocab (:obj:`str`): The path to a :obj:`vocab.txt` file Returns: :obj:`Dict[str, int]`: The vocabulary as a :obj:`dict` """ pass def save(self, folder, prefix): """ Save the current model Save the current model in the given folder, using the given prefix for the various files that will get created. Any file with the same name that already exists in this folder will be overwritten. Args: folder (:obj:`str`): The path to the target folder in which to save the various files prefix (:obj:`str`, `optional`): An optional prefix, used to prefix each file name Returns: :obj:`List[str]`: The list of saved files """ pass def token_to_id(self, tokens): """ Get the ID associated to a token Args: token (:obj:`str`): A token to convert to an ID Returns: :obj:`int`: The ID associated to the token """ pass def tokenize(self, sequence): """ Tokenize a sequence Args: sequence (:obj:`str`): A sequence to tokenize Returns: A :obj:`List` of :class:`~tokenizers.Token`: The generated tokens """ pass
0
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/processors/__init__.py
# Generated content DO NOT EDIT from .. import processors PostProcessor = processors.PostProcessor BertProcessing = processors.BertProcessing ByteLevel = processors.ByteLevel RobertaProcessing = processors.RobertaProcessing Sequence = processors.Sequence TemplateProcessing = processors.TemplateProcessing
0
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/processors/__init__.pyi
# Generated content DO NOT EDIT class PostProcessor: """ Base class for all post-processors This class is not supposed to be instantiated directly. Instead, any implementation of a PostProcessor will return an instance of this class when instantiated. """ def num_special_tokens_to_add(self, is_pair): """ Return the number of special tokens that would be added for single/pair sentences. Args: is_pair (:obj:`bool`): Whether the input would be a pair of sequences Returns: :obj:`int`: The number of tokens to add """ pass def process(self, encoding, pair=None, add_special_tokens=True): """ Post-process the given encodings, generating the final one Args: encoding (:class:`~tokenizers.Encoding`): The encoding for the first sequence pair (:class:`~tokenizers.Encoding`, `optional`): The encoding for the pair sequence add_special_tokens (:obj:`bool`): Whether to add the special tokens Return: :class:`~tokenizers.Encoding`: The final encoding """ pass class BertProcessing(PostProcessor): """ This post-processor takes care of adding the special tokens needed by a Bert model: - a SEP token - a CLS token Args: sep (:obj:`Tuple[str, int]`): A tuple with the string representation of the SEP token, and its id cls (:obj:`Tuple[str, int]`): A tuple with the string representation of the CLS token, and its id """ def __init__(self, sep, cls): pass def num_special_tokens_to_add(self, is_pair): """ Return the number of special tokens that would be added for single/pair sentences. Args: is_pair (:obj:`bool`): Whether the input would be a pair of sequences Returns: :obj:`int`: The number of tokens to add """ pass def process(self, encoding, pair=None, add_special_tokens=True): """ Post-process the given encodings, generating the final one Args: encoding (:class:`~tokenizers.Encoding`): The encoding for the first sequence pair (:class:`~tokenizers.Encoding`, `optional`): The encoding for the pair sequence add_special_tokens (:obj:`bool`): Whether to add the special tokens Return: :class:`~tokenizers.Encoding`: The final encoding """ pass class ByteLevel(PostProcessor): """ This post-processor takes care of trimming the offsets. By default, the ByteLevel BPE might include whitespaces in the produced tokens. If you don't want the offsets to include these whitespaces, then this PostProcessor must be used. Args: trim_offsets (:obj:`bool`): Whether to trim the whitespaces from the produced offsets. """ def __init__(self, trim_offsets=True): pass def num_special_tokens_to_add(self, is_pair): """ Return the number of special tokens that would be added for single/pair sentences. Args: is_pair (:obj:`bool`): Whether the input would be a pair of sequences Returns: :obj:`int`: The number of tokens to add """ pass def process(self, encoding, pair=None, add_special_tokens=True): """ Post-process the given encodings, generating the final one Args: encoding (:class:`~tokenizers.Encoding`): The encoding for the first sequence pair (:class:`~tokenizers.Encoding`, `optional`): The encoding for the pair sequence add_special_tokens (:obj:`bool`): Whether to add the special tokens Return: :class:`~tokenizers.Encoding`: The final encoding """ pass class RobertaProcessing(PostProcessor): """ This post-processor takes care of adding the special tokens needed by a Roberta model: - a SEP token - a CLS token It also takes care of trimming the offsets. By default, the ByteLevel BPE might include whitespaces in the produced tokens. If you don't want the offsets to include these whitespaces, then this PostProcessor should be initialized with :obj:`trim_offsets=True` Args: sep (:obj:`Tuple[str, int]`): A tuple with the string representation of the SEP token, and its id cls (:obj:`Tuple[str, int]`): A tuple with the string representation of the CLS token, and its id trim_offsets (:obj:`bool`, `optional`, defaults to :obj:`True`): Whether to trim the whitespaces from the produced offsets. add_prefix_space (:obj:`bool`, `optional`, defaults to :obj:`True`): Whether the add_prefix_space option was enabled during pre-tokenization. This is relevant because it defines the way the offsets are trimmed out. """ def __init__(self, sep, cls, trim_offsets=True, add_prefix_space=True): pass def num_special_tokens_to_add(self, is_pair): """ Return the number of special tokens that would be added for single/pair sentences. Args: is_pair (:obj:`bool`): Whether the input would be a pair of sequences Returns: :obj:`int`: The number of tokens to add """ pass def process(self, encoding, pair=None, add_special_tokens=True): """ Post-process the given encodings, generating the final one Args: encoding (:class:`~tokenizers.Encoding`): The encoding for the first sequence pair (:class:`~tokenizers.Encoding`, `optional`): The encoding for the pair sequence add_special_tokens (:obj:`bool`): Whether to add the special tokens Return: :class:`~tokenizers.Encoding`: The final encoding """ pass class Sequence(PostProcessor): """ Sequence Processor Args: processors (:obj:`List[PostProcessor]`) The processors that need to be chained """ def __init__(self, processors): pass def num_special_tokens_to_add(self, is_pair): """ Return the number of special tokens that would be added for single/pair sentences. Args: is_pair (:obj:`bool`): Whether the input would be a pair of sequences Returns: :obj:`int`: The number of tokens to add """ pass def process(self, encoding, pair=None, add_special_tokens=True): """ Post-process the given encodings, generating the final one Args: encoding (:class:`~tokenizers.Encoding`): The encoding for the first sequence pair (:class:`~tokenizers.Encoding`, `optional`): The encoding for the pair sequence add_special_tokens (:obj:`bool`): Whether to add the special tokens Return: :class:`~tokenizers.Encoding`: The final encoding """ pass class TemplateProcessing(PostProcessor): """ Provides a way to specify templates in order to add the special tokens to each input sequence as relevant. Let's take :obj:`BERT` tokenizer as an example. It uses two special tokens, used to delimitate each sequence. :obj:`[CLS]` is always used at the beginning of the first sequence, and :obj:`[SEP]` is added at the end of both the first, and the pair sequences. The final result looks like this: - Single sequence: :obj:`[CLS] Hello there [SEP]` - Pair sequences: :obj:`[CLS] My name is Anthony [SEP] What is my name? [SEP]` With the type ids as following:: [CLS] ... [SEP] ... [SEP] 0 0 0 1 1 You can achieve such behavior using a TemplateProcessing:: TemplateProcessing( single="[CLS] $0 [SEP]", pair="[CLS] $A [SEP] $B:1 [SEP]:1", special_tokens=[("[CLS]", 1), ("[SEP]", 0)], ) In this example, each input sequence is identified using a ``$`` construct. This identifier lets us specify each input sequence, and the type_id to use. When nothing is specified, it uses the default values. Here are the different ways to specify it: - Specifying the sequence, with default ``type_id == 0``: ``$A`` or ``$B`` - Specifying the `type_id` with default ``sequence == A``: ``$0``, ``$1``, ``$2``, ... - Specifying both: ``$A:0``, ``$B:1``, ... The same construct is used for special tokens: ``<identifier>(:<type_id>)?``. **Warning**: You must ensure that you are giving the correct tokens/ids as these will be added to the Encoding without any further check. If the given ids correspond to something totally different in a `Tokenizer` using this `PostProcessor`, it might lead to unexpected results. Args: single (:obj:`Template`): The template used for single sequences pair (:obj:`Template`): The template used when both sequences are specified special_tokens (:obj:`Tokens`): The list of special tokens used in each sequences Types: Template (:obj:`str` or :obj:`List`): - If a :obj:`str` is provided, the whitespace is used as delimiter between tokens - If a :obj:`List[str]` is provided, a list of tokens Tokens (:obj:`List[Union[Tuple[int, str], Tuple[str, int], dict]]`): - A :obj:`Tuple` with both a token and its associated ID, in any order - A :obj:`dict` with the following keys: - "id": :obj:`str` => The special token id, as specified in the Template - "ids": :obj:`List[int]` => The associated IDs - "tokens": :obj:`List[str]` => The associated tokens The given dict expects the provided :obj:`ids` and :obj:`tokens` lists to have the same length. """ def __init__(self, single, pair, special_tokens): pass def num_special_tokens_to_add(self, is_pair): """ Return the number of special tokens that would be added for single/pair sentences. Args: is_pair (:obj:`bool`): Whether the input would be a pair of sequences Returns: :obj:`int`: The number of tokens to add """ pass def process(self, encoding, pair=None, add_special_tokens=True): """ Post-process the given encodings, generating the final one Args: encoding (:class:`~tokenizers.Encoding`): The encoding for the first sequence pair (:class:`~tokenizers.Encoding`, `optional`): The encoding for the pair sequence add_special_tokens (:obj:`bool`): Whether to add the special tokens Return: :class:`~tokenizers.Encoding`: The final encoding """ pass
0
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/decoders/__init__.py
from .. import decoders Decoder = decoders.Decoder ByteLevel = decoders.ByteLevel Replace = decoders.Replace WordPiece = decoders.WordPiece ByteFallback = decoders.ByteFallback Fuse = decoders.Fuse Strip = decoders.Strip Metaspace = decoders.Metaspace BPEDecoder = decoders.BPEDecoder CTC = decoders.CTC Sequence = decoders.Sequence
0
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/decoders/__init__.pyi
# Generated content DO NOT EDIT class Decoder: """ Base class for all decoders This class is not supposed to be instantiated directly. Instead, any implementation of a Decoder will return an instance of this class when instantiated. """ def decode(self, tokens): """ Decode the given list of tokens to a final string Args: tokens (:obj:`List[str]`): The list of tokens to decode Returns: :obj:`str`: The decoded string """ pass class BPEDecoder(Decoder): """ BPEDecoder Decoder Args: suffix (:obj:`str`, `optional`, defaults to :obj:`</w>`): The suffix that was used to caracterize an end-of-word. This suffix will be replaced by whitespaces during the decoding """ def __init__(self, suffix="</w>"): pass def decode(self, tokens): """ Decode the given list of tokens to a final string Args: tokens (:obj:`List[str]`): The list of tokens to decode Returns: :obj:`str`: The decoded string """ pass class ByteFallback(Decoder): """ ByteFallback Decoder ByteFallback is a simple trick which converts tokens looking like `<0x61>` to pure bytes, and attempts to make them into a string. If the tokens cannot be decoded you will get � instead for each inconvertable byte token """ def __init__(self): pass def decode(self, tokens): """ Decode the given list of tokens to a final string Args: tokens (:obj:`List[str]`): The list of tokens to decode Returns: :obj:`str`: The decoded string """ pass class ByteLevel(Decoder): """ ByteLevel Decoder This decoder is to be used in tandem with the :class:`~tokenizers.pre_tokenizers.ByteLevel` :class:`~tokenizers.pre_tokenizers.PreTokenizer`. """ def __init__(self): pass def decode(self, tokens): """ Decode the given list of tokens to a final string Args: tokens (:obj:`List[str]`): The list of tokens to decode Returns: :obj:`str`: The decoded string """ pass class CTC(Decoder): """ CTC Decoder Args: pad_token (:obj:`str`, `optional`, defaults to :obj:`<pad>`): The pad token used by CTC to delimit a new token. word_delimiter_token (:obj:`str`, `optional`, defaults to :obj:`|`): The word delimiter token. It will be replaced by a <space> cleanup (:obj:`bool`, `optional`, defaults to :obj:`True`): Whether to cleanup some tokenization artifacts. Mainly spaces before punctuation, and some abbreviated english forms. """ def __init__(self, pad_token="<pad>", word_delimiter_token="|", cleanup=True): pass def decode(self, tokens): """ Decode the given list of tokens to a final string Args: tokens (:obj:`List[str]`): The list of tokens to decode Returns: :obj:`str`: The decoded string """ pass class Fuse(Decoder): """ Fuse Decoder Fuse simply fuses every token into a single string. This is the last step of decoding, this decoder exists only if there is need to add other decoders *after* the fusion """ def __init__(self): pass def decode(self, tokens): """ Decode the given list of tokens to a final string Args: tokens (:obj:`List[str]`): The list of tokens to decode Returns: :obj:`str`: The decoded string """ pass class Metaspace(Decoder): """ Metaspace Decoder Args: replacement (:obj:`str`, `optional`, defaults to :obj:`▁`): The replacement character. Must be exactly one character. By default we use the `▁` (U+2581) meta symbol (Same as in SentencePiece). add_prefix_space (:obj:`bool`, `optional`, defaults to :obj:`True`): Whether to add a space to the first word if there isn't already one. This lets us treat `hello` exactly like `say hello`. """ def __init__(self, replacement="▁", add_prefix_space=True): pass def decode(self, tokens): """ Decode the given list of tokens to a final string Args: tokens (:obj:`List[str]`): The list of tokens to decode Returns: :obj:`str`: The decoded string """ pass class Replace(Decoder): """ Replace Decoder This decoder is to be used in tandem with the :class:`~tokenizers.pre_tokenizers.Replace` :class:`~tokenizers.pre_tokenizers.PreTokenizer`. """ def __init__(self, pattern, content): pass def decode(self, tokens): """ Decode the given list of tokens to a final string Args: tokens (:obj:`List[str]`): The list of tokens to decode Returns: :obj:`str`: The decoded string """ pass class Sequence(Decoder): """ Sequence Decoder Args: decoders (:obj:`List[Decoder]`) The decoders that need to be chained """ def __init__(self, decoders): pass def decode(self, tokens): """ Decode the given list of tokens to a final string Args: tokens (:obj:`List[str]`): The list of tokens to decode Returns: :obj:`str`: The decoded string """ pass class Strip(Decoder): """ Strip normalizer Strips n left characters of each token, or n right characters of each token """ def __init__(self, content, left=0, right=0): pass def decode(self, tokens): """ Decode the given list of tokens to a final string Args: tokens (:obj:`List[str]`): The list of tokens to decode Returns: :obj:`str`: The decoded string """ pass class WordPiece(Decoder): """ WordPiece Decoder Args: prefix (:obj:`str`, `optional`, defaults to :obj:`##`): The prefix to use for subwords that are not a beginning-of-word cleanup (:obj:`bool`, `optional`, defaults to :obj:`True`): Whether to cleanup some tokenization artifacts. Mainly spaces before punctuation, and some abbreviated english forms. """ def __init__(self, prefix="##", cleanup=True): pass def decode(self, tokens): """ Decode the given list of tokens to a final string Args: tokens (:obj:`List[str]`): The list of tokens to decode Returns: :obj:`str`: The decoded string """ pass
0
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/tools/visualizer.py
import itertools import os import re from string import Template from typing import Any, Callable, Dict, List, NamedTuple, Optional, Tuple from tokenizers import Encoding, Tokenizer dirname = os.path.dirname(__file__) css_filename = os.path.join(dirname, "visualizer-styles.css") with open(css_filename) as f: css = f.read() class Annotation: start: int end: int label: int def __init__(self, start: int, end: int, label: str): self.start = start self.end = end self.label = label AnnotationList = List[Annotation] PartialIntList = List[Optional[int]] class CharStateKey(NamedTuple): token_ix: Optional[int] anno_ix: Optional[int] class CharState: char_ix: Optional[int] def __init__(self, char_ix): self.char_ix = char_ix self.anno_ix: Optional[int] = None self.tokens: List[int] = [] @property def token_ix(self): return self.tokens[0] if len(self.tokens) > 0 else None @property def is_multitoken(self): """ BPE tokenizers can output more than one token for a char """ return len(self.tokens) > 1 def partition_key(self) -> CharStateKey: return CharStateKey( token_ix=self.token_ix, anno_ix=self.anno_ix, ) class Aligned: pass class EncodingVisualizer: """ Build an EncodingVisualizer Args: tokenizer (:class:`~tokenizers.Tokenizer`): A tokenizer instance default_to_notebook (:obj:`bool`): Whether to render html output in a notebook by default annotation_converter (:obj:`Callable`, `optional`): An optional (lambda) function that takes an annotation in any format and returns an Annotation object """ unk_token_regex = re.compile("(.{1}\b)?(unk|oov)(\b.{1})?", flags=re.IGNORECASE) def __init__( self, tokenizer: Tokenizer, default_to_notebook: bool = True, annotation_converter: Optional[Callable[[Any], Annotation]] = None, ): if default_to_notebook: try: from IPython.core.display import HTML, display except ImportError as e: raise Exception( """We couldn't import IPython utils for html display. Are you running in a notebook? You can also pass `default_to_notebook=False` to get back raw HTML """ ) self.tokenizer = tokenizer self.default_to_notebook = default_to_notebook self.annotation_coverter = annotation_converter pass def __call__( self, text: str, annotations: AnnotationList = [], default_to_notebook: Optional[bool] = None, ) -> Optional[str]: """ Build a visualization of the given text Args: text (:obj:`str`): The text to tokenize annotations (:obj:`List[Annotation]`, `optional`): An optional list of annotations of the text. The can either be an annotation class or anything else if you instantiated the visualizer with a converter function default_to_notebook (:obj:`bool`, `optional`, defaults to `False`): If True, will render the html in a notebook. Otherwise returns an html string. Returns: The HTML string if default_to_notebook is False, otherwise (default) returns None and renders the HTML in the notebook """ final_default_to_notebook = self.default_to_notebook if default_to_notebook is not None: final_default_to_notebook = default_to_notebook if final_default_to_notebook: try: from IPython.core.display import HTML, display except ImportError as e: raise Exception( """We couldn't import IPython utils for html display. Are you running in a notebook?""" ) if self.annotation_coverter is not None: annotations = list(map(self.annotation_coverter, annotations)) encoding = self.tokenizer.encode(text) html = EncodingVisualizer.__make_html(text, encoding, annotations) if final_default_to_notebook: display(HTML(html)) else: return html @staticmethod def calculate_label_colors(annotations: AnnotationList) -> Dict[str, str]: """ Generates a color palette for all the labels in a given set of annotations Args: annotations (:obj:`Annotation`): A list of annotations Returns: :obj:`dict`: A dictionary mapping labels to colors in HSL format """ if len(annotations) == 0: return {} labels = set(map(lambda x: x.label, annotations)) num_labels = len(labels) h_step = int(255 / num_labels) if h_step < 20: h_step = 20 s = 32 l = 64 h = 10 colors = {} for label in sorted(labels): # sort so we always get the same colors for a given set of labels colors[label] = f"hsl({h},{s}%,{l}%" h += h_step return colors @staticmethod def consecutive_chars_to_html( consecutive_chars_list: List[CharState], text: str, encoding: Encoding, ): """ Converts a list of "consecutive chars" into a single HTML element. Chars are consecutive if they fall under the same word, token and annotation. The CharState class is a named tuple with a "partition_key" method that makes it easy to compare if two chars are consecutive. Args: consecutive_chars_list (:obj:`List[CharState]`): A list of CharStates that have been grouped together text (:obj:`str`): The original text being processed encoding (:class:`~tokenizers.Encoding`): The encoding returned from the tokenizer Returns: :obj:`str`: The HTML span for a set of consecutive chars """ first = consecutive_chars_list[0] if first.char_ix is None: # its a special token stoken = encoding.tokens[first.token_ix] # special tokens are represented as empty spans. We use the data attribute and css # magic to display it return f'<span class="special-token" data-stoken={stoken}></span>' # We're not in a special token so this group has a start and end. last = consecutive_chars_list[-1] start = first.char_ix end = last.char_ix + 1 span_text = text[start:end] css_classes = [] # What css classes will we apply on the resulting span data_items = {} # What data attributes will we apply on the result span if first.token_ix is not None: # We can either be in a token or not (e.g. in white space) css_classes.append("token") if first.is_multitoken: css_classes.append("multi-token") if first.token_ix % 2: # We use this to color alternating tokens. # A token might be split by an annotation that ends in the middle of it, so this # lets us visually indicate a consecutive token despite its possible splitting in # the html markup css_classes.append("odd-token") else: # Like above, but a different color so we can see the tokens alternate css_classes.append("even-token") if EncodingVisualizer.unk_token_regex.search(encoding.tokens[first.token_ix]) is not None: # This is a special token that is in the text. probably UNK css_classes.append("special-token") # TODO is this the right name for the data attribute ? data_items["stok"] = encoding.tokens[first.token_ix] else: # In this case we are looking at a group/single char that is not tokenized. # e.g. white space css_classes.append("non-token") css = f'''class="{' '.join(css_classes)}"''' data = "" for key, val in data_items.items(): data += f' data-{key}="{val}"' return f"<span {css} {data} >{span_text}</span>" @staticmethod def __make_html(text: str, encoding: Encoding, annotations: AnnotationList) -> str: char_states = EncodingVisualizer.__make_char_states(text, encoding, annotations) current_consecutive_chars = [char_states[0]] prev_anno_ix = char_states[0].anno_ix spans = [] label_colors_dict = EncodingVisualizer.calculate_label_colors(annotations) cur_anno_ix = char_states[0].anno_ix if cur_anno_ix is not None: # If we started in an annotation make a span for it anno = annotations[cur_anno_ix] label = anno.label color = label_colors_dict[label] spans.append(f'<span class="annotation" style="color:{color}" data-label="{label}">') for cs in char_states[1:]: cur_anno_ix = cs.anno_ix if cur_anno_ix != prev_anno_ix: # If we've transitioned in or out of an annotation spans.append( # Create a span from the current consecutive characters EncodingVisualizer.consecutive_chars_to_html( current_consecutive_chars, text=text, encoding=encoding, ) ) current_consecutive_chars = [cs] if prev_anno_ix is not None: # if we transitioned out of an annotation close it's span spans.append("</span>") if cur_anno_ix is not None: # If we entered a new annotation make a span for it anno = annotations[cur_anno_ix] label = anno.label color = label_colors_dict[label] spans.append(f'<span class="annotation" style="color:{color}" data-label="{label}">') prev_anno_ix = cur_anno_ix if cs.partition_key() == current_consecutive_chars[0].partition_key(): # If the current charchter is in the same "group" as the previous one current_consecutive_chars.append(cs) else: # Otherwise we make a span for the previous group spans.append( EncodingVisualizer.consecutive_chars_to_html( current_consecutive_chars, text=text, encoding=encoding, ) ) # An reset the consecutive_char_list to form a new group current_consecutive_chars = [cs] # All that's left is to fill out the final span # TODO I think there is an edge case here where an annotation's span might not close spans.append( EncodingVisualizer.consecutive_chars_to_html( current_consecutive_chars, text=text, encoding=encoding, ) ) res = HTMLBody(spans) # Send the list of spans to the body of our html return res @staticmethod def __make_anno_map(text: str, annotations: AnnotationList) -> PartialIntList: """ Args: text (:obj:`str`): The raw text we want to align to annotations (:obj:`AnnotationList`): A (possibly empty) list of annotations Returns: A list of length len(text) whose entry at index i is None if there is no annotation on charachter i or k, the index of the annotation that covers index i where k is with respect to the list of annotations """ annotation_map = [None] * len(text) for anno_ix, a in enumerate(annotations): for i in range(a.start, a.end): annotation_map[i] = anno_ix return annotation_map @staticmethod def __make_char_states(text: str, encoding: Encoding, annotations: AnnotationList) -> List[CharState]: """ For each character in the original text, we emit a tuple representing it's "state": * which token_ix it corresponds to * which word_ix it corresponds to * which annotation_ix it corresponds to Args: text (:obj:`str`): The raw text we want to align to annotations (:obj:`List[Annotation]`): A (possibly empty) list of annotations encoding: (:class:`~tokenizers.Encoding`): The encoding returned from the tokenizer Returns: :obj:`List[CharState]`: A list of CharStates, indicating for each char in the text what it's state is """ annotation_map = EncodingVisualizer.__make_anno_map(text, annotations) # Todo make this a dataclass or named tuple char_states: List[CharState] = [CharState(char_ix) for char_ix in range(len(text))] for token_ix, token in enumerate(encoding.tokens): offsets = encoding.token_to_chars(token_ix) if offsets is not None: start, end = offsets for i in range(start, end): char_states[i].tokens.append(token_ix) for char_ix, anno_ix in enumerate(annotation_map): char_states[char_ix].anno_ix = anno_ix return char_states def HTMLBody(children: List[str], css_styles=css) -> str: """ Generates the full html with css from a list of html spans Args: children (:obj:`List[str]`): A list of strings, assumed to be html elements css_styles (:obj:`str`, `optional`): Optional alternative implementation of the css Returns: :obj:`str`: An HTML string with style markup """ children_text = "".join(children) return f""" <html> <head> <style> {css_styles} </style> </head> <body> <div class="tokenized-text" dir=auto> {children_text} </div> </body> </html> """
0
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/tools/visualizer-styles.css
.tokenized-text { width:100%; padding:2rem; max-height: 400px; overflow-y: auto; box-sizing:border-box; line-height:4rem; /* Lots of space between lines */ font-family: "Roboto Light", "Ubuntu Light", "Ubuntu", monospace; box-shadow: 2px 2px 2px rgba(0,0,0,0.2); background-color: rgba(0,0,0,0.01); letter-spacing:2px; /* Give some extra separation between chars */ } .non-token{ /* White space and other things the tokenizer ignores*/ white-space: pre; letter-spacing:4px; border-top:1px solid #A0A0A0; /* A gentle border on top and bottom makes tabs more ovious*/ border-bottom:1px solid #A0A0A0; line-height: 1rem; height: calc(100% - 2px); } .token { white-space: pre; position:relative; color:black; letter-spacing:2px; } .annotation{ white-space:nowrap; /* Important - ensures that annotations appears even if the annotated text wraps a line */ border-radius:4px; position:relative; width:fit-content; } .annotation:before { /*The before holds the text and the after holds the background*/ z-index:1000; /* Make sure this is above the background */ content:attr(data-label); /* The annotations label is on a data attribute */ color:white; position:absolute; font-size:1rem; text-align:center; font-weight:bold; top:1.75rem; line-height:0; left:0; width:100%; padding:0.5rem 0; /* These make it so an annotation doesn't stretch beyond the annotated text if the label is longer*/ overflow: hidden; white-space: nowrap; text-overflow:ellipsis; } .annotation:after { content:attr(data-label); /* The content defines the width of the annotation*/ position:absolute; font-size:0.75rem; text-align:center; font-weight:bold; text-overflow:ellipsis; top:1.75rem; line-height:0; overflow: hidden; white-space: nowrap; left:0; width:100%; /* 100% of the parent, which is the annotation whose width is the tokens inside it*/ padding:0.5rem 0; /* Nast hack below: We set the annotations color in code because we don't know the colors at css time. But you can't pass a color as a data attribute to get it into the pseudo element (this thing) So to get around that, annotations have the color set on them with a style attribute and then we can get the color with currentColor. Annotations wrap tokens and tokens set the color back to black */ background-color: currentColor; } .annotation:hover::after, .annotation:hover::before{ /* When the user hovers over an annotation expand the label to display in full */ min-width: fit-content; } .annotation:hover{ /* Emphasize the annotation start end with a border on hover*/ border-color: currentColor; border: 2px solid; } .special-token:not(:empty){ /* A none empty special token is like UNK (as opposed to CLS which has no representation in the text ) */ position:relative; } .special-token:empty::before{ /* Special tokens that don't have text are displayed as pseudo elements so we dont select them with the mouse*/ content:attr(data-stok); background:#202020; font-size:0.75rem; color:white; margin: 0 0.25rem; padding: 0.25rem; border-radius:4px } .special-token:not(:empty):before { /* Special tokens that have text (UNK) are displayed above the actual text*/ content:attr(data-stok); position:absolute; bottom:1.75rem; min-width:100%; width:100%; height:1rem; line-height:1rem; font-size:1rem; text-align:center; color:white; font-weight:bold; background:#202020; border-radius:10%; } /* We want to alternate the color of tokens, but we can't use nth child because tokens might be broken up by annotations instead we apply even and odd class at generation time and color them that way */ .even-token{ background:#DCDCDC ; border: 1px solid #DCDCDC; } .odd-token{ background:#A0A0A0; border: 1px solid #A0A0A0; } .even-token.multi-token,.odd-token.multi-token{ background: repeating-linear-gradient( 45deg, transparent, transparent 1px, #ccc 1px, #ccc 1px ), /* on "bottom" */ linear-gradient( to bottom, #FFB6C1, #999 ); } .multi-token:hover::after { content:"This char has more than 1 token"; /* The content defines the width of the annotation*/ color:white; background-color: black; position:absolute; font-size:0.75rem; text-align:center; font-weight:bold; text-overflow:ellipsis; top:1.75rem; line-height:0; overflow: hidden; white-space: nowrap; left:0; width:fit-content; /* 100% of the parent, which is the annotation whose width is the tokens inside it*/ padding:0.5rem 0; }
0
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/tools/__init__.py
from .visualizer import Annotation, EncodingVisualizer
0
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/implementations/bert_wordpiece.py
from typing import Dict, Iterator, List, Optional, Union from tokenizers import AddedToken, Tokenizer, decoders, trainers from tokenizers.models import WordPiece from tokenizers.normalizers import BertNormalizer from tokenizers.pre_tokenizers import BertPreTokenizer from tokenizers.processors import BertProcessing from .base_tokenizer import BaseTokenizer class BertWordPieceTokenizer(BaseTokenizer): """Bert WordPiece Tokenizer""" def __init__( self, vocab: Optional[Union[str, Dict[str, int]]] = None, unk_token: Union[str, AddedToken] = "[UNK]", sep_token: Union[str, AddedToken] = "[SEP]", cls_token: Union[str, AddedToken] = "[CLS]", pad_token: Union[str, AddedToken] = "[PAD]", mask_token: Union[str, AddedToken] = "[MASK]", clean_text: bool = True, handle_chinese_chars: bool = True, strip_accents: Optional[bool] = None, lowercase: bool = True, wordpieces_prefix: str = "##", ): if vocab is not None: tokenizer = Tokenizer(WordPiece(vocab, unk_token=str(unk_token))) else: tokenizer = Tokenizer(WordPiece(unk_token=str(unk_token))) # Let the tokenizer know about special tokens if they are part of the vocab if tokenizer.token_to_id(str(unk_token)) is not None: tokenizer.add_special_tokens([str(unk_token)]) if tokenizer.token_to_id(str(sep_token)) is not None: tokenizer.add_special_tokens([str(sep_token)]) if tokenizer.token_to_id(str(cls_token)) is not None: tokenizer.add_special_tokens([str(cls_token)]) if tokenizer.token_to_id(str(pad_token)) is not None: tokenizer.add_special_tokens([str(pad_token)]) if tokenizer.token_to_id(str(mask_token)) is not None: tokenizer.add_special_tokens([str(mask_token)]) tokenizer.normalizer = BertNormalizer( clean_text=clean_text, handle_chinese_chars=handle_chinese_chars, strip_accents=strip_accents, lowercase=lowercase, ) tokenizer.pre_tokenizer = BertPreTokenizer() if vocab is not None: sep_token_id = tokenizer.token_to_id(str(sep_token)) if sep_token_id is None: raise TypeError("sep_token not found in the vocabulary") cls_token_id = tokenizer.token_to_id(str(cls_token)) if cls_token_id is None: raise TypeError("cls_token not found in the vocabulary") tokenizer.post_processor = BertProcessing((str(sep_token), sep_token_id), (str(cls_token), cls_token_id)) tokenizer.decoder = decoders.WordPiece(prefix=wordpieces_prefix) parameters = { "model": "BertWordPiece", "unk_token": unk_token, "sep_token": sep_token, "cls_token": cls_token, "pad_token": pad_token, "mask_token": mask_token, "clean_text": clean_text, "handle_chinese_chars": handle_chinese_chars, "strip_accents": strip_accents, "lowercase": lowercase, "wordpieces_prefix": wordpieces_prefix, } super().__init__(tokenizer, parameters) @staticmethod def from_file(vocab: str, **kwargs): vocab = WordPiece.read_file(vocab) return BertWordPieceTokenizer(vocab, **kwargs) def train( self, files: Union[str, List[str]], vocab_size: int = 30000, min_frequency: int = 2, limit_alphabet: int = 1000, initial_alphabet: List[str] = [], special_tokens: List[Union[str, AddedToken]] = [ "[PAD]", "[UNK]", "[CLS]", "[SEP]", "[MASK]", ], show_progress: bool = True, wordpieces_prefix: str = "##", ): """Train the model using the given files""" trainer = trainers.WordPieceTrainer( vocab_size=vocab_size, min_frequency=min_frequency, limit_alphabet=limit_alphabet, initial_alphabet=initial_alphabet, special_tokens=special_tokens, show_progress=show_progress, continuing_subword_prefix=wordpieces_prefix, ) if isinstance(files, str): files = [files] self._tokenizer.train(files, trainer=trainer) def train_from_iterator( self, iterator: Union[Iterator[str], Iterator[Iterator[str]]], vocab_size: int = 30000, min_frequency: int = 2, limit_alphabet: int = 1000, initial_alphabet: List[str] = [], special_tokens: List[Union[str, AddedToken]] = [ "[PAD]", "[UNK]", "[CLS]", "[SEP]", "[MASK]", ], show_progress: bool = True, wordpieces_prefix: str = "##", length: Optional[int] = None, ): """Train the model using the given iterator""" trainer = trainers.WordPieceTrainer( vocab_size=vocab_size, min_frequency=min_frequency, limit_alphabet=limit_alphabet, initial_alphabet=initial_alphabet, special_tokens=special_tokens, show_progress=show_progress, continuing_subword_prefix=wordpieces_prefix, ) self._tokenizer.train_from_iterator( iterator, trainer=trainer, length=length, )
0
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/implementations/sentencepiece_unigram.py
import json import os from typing import Iterator, List, Optional, Union, Tuple from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers from tokenizers.models import Unigram from .base_tokenizer import BaseTokenizer class SentencePieceUnigramTokenizer(BaseTokenizer): """SentencePiece Unigram Tokenizer Represents the Unigram algorithm, with the pretokenization used by SentencePiece """ def __init__( self, vocab: Optional[List[Tuple[str, float]]] = None, replacement: str = "▁", add_prefix_space: bool = True, ): if vocab is not None: # Let Unigram(..) fail if only one of them is None tokenizer = Tokenizer(Unigram(vocab)) else: tokenizer = Tokenizer(Unigram()) tokenizer.normalizer = normalizers.Sequence( [normalizers.Nmt(), normalizers.NFKC(), normalizers.Replace(Regex(" {2,}"), " ")] ) tokenizer.pre_tokenizer = pre_tokenizers.Metaspace(replacement=replacement, add_prefix_space=add_prefix_space) tokenizer.decoder = decoders.Metaspace(replacement=replacement, add_prefix_space=add_prefix_space) parameters = { "model": "SentencePieceUnigram", "replacement": replacement, "add_prefix_space": add_prefix_space, } super().__init__(tokenizer, parameters) def train( self, files: Union[str, List[str]], vocab_size: int = 8000, show_progress: bool = True, special_tokens: Optional[List[Union[str, AddedToken]]] = None, initial_alphabet: Optional[List[str]] = None, unk_token: Optional[str] = None, ): """ Train the model using the given files Args: files (:obj:`List[str]`): A list of path to the files that we should use for training vocab_size (:obj:`int`): The size of the final vocabulary, including all tokens and alphabet. show_progress (:obj:`bool`): Whether to show progress bars while training. special_tokens (:obj:`List[Union[str, AddedToken]]`, `optional`): A list of special tokens the model should know of. initial_alphabet (:obj:`List[str]`, `optional`): A list of characters to include in the initial alphabet, even if not seen in the training dataset. If the strings contain more than one character, only the first one is kept. unk_token (:obj:`str`, `optional`): The unknown token to be used by the model. """ if special_tokens is None: special_tokens = [] if initial_alphabet is None: initial_alphabet = [] trainer = trainers.UnigramTrainer( vocab_size=vocab_size, special_tokens=special_tokens, show_progress=show_progress, initial_alphabet=initial_alphabet, unk_token=unk_token, ) if isinstance(files, str): files = [files] self._tokenizer.train(files, trainer=trainer) def train_from_iterator( self, iterator: Union[Iterator[str], Iterator[Iterator[str]]], vocab_size: int = 8000, show_progress: bool = True, special_tokens: Optional[List[Union[str, AddedToken]]] = None, initial_alphabet: Optional[List[str]] = None, unk_token: Optional[str] = None, length: Optional[int] = None, ): """ Train the model using the given iterator Args: iterator (:obj:`Union[Iterator[str], Iterator[Iterator[str]]]`): Any iterator over strings or list of strings vocab_size (:obj:`int`): The size of the final vocabulary, including all tokens and alphabet. show_progress (:obj:`bool`): Whether to show progress bars while training. special_tokens (:obj:`List[Union[str, AddedToken]]`, `optional`): A list of special tokens the model should know of. initial_alphabet (:obj:`List[str]`, `optional`): A list of characters to include in the initial alphabet, even if not seen in the training dataset. If the strings contain more than one character, only the first one is kept. unk_token (:obj:`str`, `optional`): The unknown token to be used by the model. length (:obj:`int`, `optional`): The total number of sequences in the iterator. This is used to provide meaningful progress tracking """ if special_tokens is None: special_tokens = [] if initial_alphabet is None: initial_alphabet = [] trainer = trainers.UnigramTrainer( vocab_size=vocab_size, special_tokens=special_tokens, show_progress=show_progress, initial_alphabet=initial_alphabet, unk_token=unk_token, ) self._tokenizer.train_from_iterator( iterator, trainer=trainer, length=length, ) @staticmethod def from_spm(filename: str): try: import sys sys.path.append(".") import sentencepiece_model_pb2 as model except Exception: raise Exception( "You don't seem to have the required protobuf file, in order to use this function you need to run `pip install protobuf` and `wget https://raw.githubusercontent.com/google/sentencepiece/master/python/src/sentencepiece/sentencepiece_model_pb2.py` for us to be able to read the intrinsics of your spm_file. `pip install sentencepiece` is not required." ) m = model.ModelProto() m.ParseFromString(open(filename, "rb").read()) precompiled_charsmap = m.normalizer_spec.precompiled_charsmap vocab = [(piece.piece, piece.score) for piece in m.pieces] unk_id = m.trainer_spec.unk_id model_type = m.trainer_spec.model_type byte_fallback = m.trainer_spec.byte_fallback if model_type != 1: raise Exception( "You're trying to run a `Unigram` model but you're file was trained with a different algorithm" ) replacement = "▁" add_prefix_space = True tokenizer = Tokenizer(Unigram(vocab, unk_id, byte_fallback)) if precompiled_charsmap: tokenizer.normalizer = normalizers.Sequence( [ normalizers.Precompiled(precompiled_charsmap), normalizers.Replace(Regex(" {2,}"), " "), ] ) else: tokenizer.normalizer = normalizers.Sequence([normalizers.Replace(Regex(" {2,}"), " ")]) tokenizer.pre_tokenizer = pre_tokenizers.Metaspace(replacement=replacement, add_prefix_space=add_prefix_space) tokenizer.decoder = decoders.Metaspace(replacement=replacement, add_prefix_space=add_prefix_space) parameters = { "model": "SentencePieceUnigram", } obj = BaseTokenizer.__new__(SentencePieceUnigramTokenizer, tokenizer, parameters) BaseTokenizer.__init__(obj, tokenizer, parameters) return obj
0
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/implementations/char_level_bpe.py
from typing import Dict, Iterator, List, Optional, Tuple, Union from .. import AddedToken, Tokenizer, decoders, pre_tokenizers, trainers from ..models import BPE from ..normalizers import BertNormalizer, Lowercase, Sequence, unicode_normalizer_from_str from .base_tokenizer import BaseTokenizer class CharBPETokenizer(BaseTokenizer): """Original BPE Tokenizer Represents the BPE algorithm, as introduced by Rico Sennrich (https://arxiv.org/abs/1508.07909) The defaults settings corresponds to OpenAI GPT BPE tokenizers and differs from the original Sennrich subword-nmt implementation by the following options that you can deactivate: - adding a normalizer to clean up the text (deactivate with `bert_normalizer=False`) by: * removing any control characters and replacing all whitespaces by the classic one. * handle chinese chars by putting spaces around them. * strip all accents. - spitting on punctuation in addition to whitespaces (deactivate it with `split_on_whitespace_only=True`) """ def __init__( self, vocab: Optional[Union[str, Dict[str, int]]] = None, merges: Optional[Union[str, Dict[Tuple[int, int], Tuple[int, int]]]] = None, unk_token: Union[str, AddedToken] = "<unk>", suffix: str = "</w>", dropout: Optional[float] = None, lowercase: bool = False, unicode_normalizer: Optional[str] = None, bert_normalizer: bool = True, split_on_whitespace_only: bool = False, ): if vocab is not None and merges is not None: tokenizer = Tokenizer( BPE( vocab, merges, dropout=dropout, unk_token=str(unk_token), end_of_word_suffix=suffix, ) ) else: tokenizer = Tokenizer(BPE(unk_token=str(unk_token), dropout=dropout, end_of_word_suffix=suffix)) if tokenizer.token_to_id(str(unk_token)) is not None: tokenizer.add_special_tokens([str(unk_token)]) # Check for Unicode normalization first (before everything else) normalizers = [] if unicode_normalizer: normalizers += [unicode_normalizer_from_str(unicode_normalizer)] if bert_normalizer: normalizers += [BertNormalizer(lowercase=False)] if lowercase: normalizers += [Lowercase()] # Create the normalizer structure if len(normalizers) > 0: if len(normalizers) > 1: tokenizer.normalizer = Sequence(normalizers) else: tokenizer.normalizer = normalizers[0] if split_on_whitespace_only: tokenizer.pre_tokenizer = pre_tokenizers.WhitespaceSplit() else: tokenizer.pre_tokenizer = pre_tokenizers.BertPreTokenizer() tokenizer.decoder = decoders.BPEDecoder(suffix=suffix) parameters = { "model": "BPE", "unk_token": unk_token, "suffix": suffix, "dropout": dropout, "lowercase": lowercase, "unicode_normalizer": unicode_normalizer, "bert_normalizer": bert_normalizer, "split_on_whitespace_only": split_on_whitespace_only, } super().__init__(tokenizer, parameters) @staticmethod def from_file(vocab_filename: str, merges_filename: str, **kwargs): vocab, merges = BPE.read_file(vocab_filename, merges_filename) return CharBPETokenizer(vocab, merges, **kwargs) def train( self, files: Union[str, List[str]], vocab_size: int = 30000, min_frequency: int = 2, special_tokens: List[Union[str, AddedToken]] = ["<unk>"], limit_alphabet: int = 1000, initial_alphabet: List[str] = [], suffix: Optional[str] = "</w>", show_progress: bool = True, ): """Train the model using the given files""" trainer = trainers.BpeTrainer( vocab_size=vocab_size, min_frequency=min_frequency, special_tokens=special_tokens, limit_alphabet=limit_alphabet, initial_alphabet=initial_alphabet, end_of_word_suffix=suffix, show_progress=show_progress, ) if isinstance(files, str): files = [files] self._tokenizer.train(files, trainer=trainer) def train_from_iterator( self, iterator: Union[Iterator[str], Iterator[Iterator[str]]], vocab_size: int = 30000, min_frequency: int = 2, special_tokens: List[Union[str, AddedToken]] = ["<unk>"], limit_alphabet: int = 1000, initial_alphabet: List[str] = [], suffix: Optional[str] = "</w>", show_progress: bool = True, length: Optional[int] = None, ): """Train the model using the given iterator""" trainer = trainers.BpeTrainer( vocab_size=vocab_size, min_frequency=min_frequency, special_tokens=special_tokens, limit_alphabet=limit_alphabet, initial_alphabet=initial_alphabet, end_of_word_suffix=suffix, show_progress=show_progress, ) self._tokenizer.train_from_iterator( iterator, trainer=trainer, length=length, )
0
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/implementations/sentencepiece_bpe.py
from typing import Dict, Iterator, List, Optional, Tuple, Union from tokenizers import AddedToken, Tokenizer, decoders, pre_tokenizers, trainers from tokenizers.models import BPE from tokenizers.normalizers import NFKC from .base_tokenizer import BaseTokenizer class SentencePieceBPETokenizer(BaseTokenizer): """SentencePiece BPE Tokenizer Represents the BPE algorithm, with the pretokenization used by SentencePiece """ def __init__( self, vocab: Optional[Union[str, Dict[str, int]]] = None, merges: Optional[Union[str, Dict[Tuple[int, int], Tuple[int, int]]]] = None, unk_token: Union[str, AddedToken] = "<unk>", replacement: str = "▁", add_prefix_space: bool = True, dropout: Optional[float] = None, fuse_unk: Optional[bool] = False, ): if vocab is not None and merges is not None: tokenizer = Tokenizer(BPE(vocab, merges, dropout=dropout, unk_token=unk_token, fuse_unk=fuse_unk)) else: tokenizer = Tokenizer(BPE(dropout=dropout, unk_token=unk_token, fuse_unk=fuse_unk)) if tokenizer.token_to_id(str(unk_token)) is not None: tokenizer.add_special_tokens([str(unk_token)]) tokenizer.normalizer = NFKC() tokenizer.pre_tokenizer = pre_tokenizers.Metaspace(replacement=replacement, add_prefix_space=add_prefix_space) tokenizer.decoder = decoders.Metaspace(replacement=replacement, add_prefix_space=add_prefix_space) parameters = { "model": "SentencePieceBPE", "unk_token": unk_token, "replacement": replacement, "add_prefix_space": add_prefix_space, "dropout": dropout, } super().__init__(tokenizer, parameters) @staticmethod def from_file(vocab_filename: str, merges_filename: str, **kwargs): vocab, merges = BPE.read_file(vocab_filename, merges_filename) return SentencePieceBPETokenizer(vocab, merges, **kwargs) def train( self, files: Union[str, List[str]], vocab_size: int = 30000, min_frequency: int = 2, special_tokens: List[Union[str, AddedToken]] = ["<unk>"], limit_alphabet: int = 1000, initial_alphabet: List[str] = [], show_progress: bool = True, ): """Train the model using the given files""" trainer = trainers.BpeTrainer( vocab_size=vocab_size, min_frequency=min_frequency, special_tokens=special_tokens, limit_alphabet=limit_alphabet, initial_alphabet=initial_alphabet, show_progress=show_progress, ) if isinstance(files, str): files = [files] self._tokenizer.train(files, trainer=trainer) def train_from_iterator( self, iterator: Union[Iterator[str], Iterator[Iterator[str]]], vocab_size: int = 30000, min_frequency: int = 2, special_tokens: List[Union[str, AddedToken]] = ["<unk>"], limit_alphabet: int = 1000, initial_alphabet: List[str] = [], show_progress: bool = True, length: Optional[int] = None, ): """Train the model using the given iterator""" trainer = trainers.BpeTrainer( vocab_size=vocab_size, min_frequency=min_frequency, special_tokens=special_tokens, limit_alphabet=limit_alphabet, initial_alphabet=initial_alphabet, show_progress=show_progress, ) self._tokenizer.train_from_iterator( iterator, trainer=trainer, length=length, )
0
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/implementations/byte_level_bpe.py
from typing import Dict, Iterator, List, Optional, Tuple, Union from tokenizers import AddedToken, Tokenizer, decoders, pre_tokenizers, processors, trainers from tokenizers.models import BPE from tokenizers.normalizers import Lowercase, Sequence, unicode_normalizer_from_str from .base_tokenizer import BaseTokenizer class ByteLevelBPETokenizer(BaseTokenizer): """ByteLevelBPETokenizer Represents a Byte-level BPE as introduced by OpenAI with their GPT-2 model """ def __init__( self, vocab: Optional[Union[str, Dict[str, int]]] = None, merges: Optional[Union[str, Dict[Tuple[int, int], Tuple[int, int]]]] = None, add_prefix_space: bool = False, lowercase: bool = False, dropout: Optional[float] = None, unicode_normalizer: Optional[str] = None, continuing_subword_prefix: Optional[str] = None, end_of_word_suffix: Optional[str] = None, trim_offsets: bool = False, ): if vocab is not None and merges is not None: tokenizer = Tokenizer( BPE( vocab, merges, dropout=dropout, continuing_subword_prefix=continuing_subword_prefix or "", end_of_word_suffix=end_of_word_suffix or "", ) ) else: tokenizer = Tokenizer(BPE()) # Check for Unicode normalization first (before everything else) normalizers = [] if unicode_normalizer: normalizers += [unicode_normalizer_from_str(unicode_normalizer)] if lowercase: normalizers += [Lowercase()] # Create the normalizer structure if len(normalizers) > 0: if len(normalizers) > 1: tokenizer.normalizer = Sequence(normalizers) else: tokenizer.normalizer = normalizers[0] tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel(add_prefix_space=add_prefix_space) tokenizer.decoder = decoders.ByteLevel() tokenizer.post_processor = processors.ByteLevel(trim_offsets=trim_offsets) parameters = { "model": "ByteLevelBPE", "add_prefix_space": add_prefix_space, "lowercase": lowercase, "dropout": dropout, "unicode_normalizer": unicode_normalizer, "continuing_subword_prefix": continuing_subword_prefix, "end_of_word_suffix": end_of_word_suffix, "trim_offsets": trim_offsets, } super().__init__(tokenizer, parameters) @staticmethod def from_file(vocab_filename: str, merges_filename: str, **kwargs): vocab, merges = BPE.read_file(vocab_filename, merges_filename) return ByteLevelBPETokenizer(vocab, merges, **kwargs) def train( self, files: Union[str, List[str]], vocab_size: int = 30000, min_frequency: int = 2, show_progress: bool = True, special_tokens: List[Union[str, AddedToken]] = [], ): """Train the model using the given files""" trainer = trainers.BpeTrainer( vocab_size=vocab_size, min_frequency=min_frequency, show_progress=show_progress, special_tokens=special_tokens, initial_alphabet=pre_tokenizers.ByteLevel.alphabet(), ) if isinstance(files, str): files = [files] self._tokenizer.train(files, trainer=trainer) def train_from_iterator( self, iterator: Union[Iterator[str], Iterator[Iterator[str]]], vocab_size: int = 30000, min_frequency: int = 2, show_progress: bool = True, special_tokens: List[Union[str, AddedToken]] = [], length: Optional[int] = None, ): """Train the model using the given iterator""" trainer = trainers.BpeTrainer( vocab_size=vocab_size, min_frequency=min_frequency, show_progress=show_progress, special_tokens=special_tokens, initial_alphabet=pre_tokenizers.ByteLevel.alphabet(), ) self._tokenizer.train_from_iterator( iterator, trainer=trainer, length=length, )
0
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/implementations/base_tokenizer.py
from typing import Dict, List, Optional, Tuple, Union from tokenizers import AddedToken, EncodeInput, Encoding, InputSequence, Tokenizer from tokenizers.decoders import Decoder from tokenizers.models import Model from tokenizers.normalizers import Normalizer from tokenizers.pre_tokenizers import PreTokenizer from tokenizers.processors import PostProcessor Offsets = Tuple[int, int] class BaseTokenizer: def __init__(self, tokenizer: Tokenizer, parameters=None): self._tokenizer = tokenizer self._parameters = parameters if parameters is not None else {} def __repr__(self): return "Tokenizer(vocabulary_size={}, {})".format( self._tokenizer.get_vocab_size(), ", ".join(k + "=" + str(v) for k, v in self._parameters.items()), ) def num_special_tokens_to_add(self, is_pair: bool) -> int: """ Return the number of special tokens that would be added for single/pair sentences. :param is_pair: Boolean indicating if the input would be a single sentence or a pair :return: """ return self._tokenizer.num_special_tokens_to_add(is_pair) def get_vocab(self, with_added_tokens: bool = True) -> Dict[str, int]: """Returns the vocabulary Args: with_added_tokens: boolean: Whether to include the added tokens in the vocabulary Returns: The vocabulary """ return self._tokenizer.get_vocab(with_added_tokens=with_added_tokens) def get_added_tokens_decoder(self) -> Dict[int, AddedToken]: """Returns the added reverse vocabulary Returns: The added vocabulary mapping ints to AddedTokens """ return self._tokenizer.get_added_tokens_decoder() def get_vocab_size(self, with_added_tokens: bool = True) -> int: """Return the size of vocabulary, with or without added tokens. Args: with_added_tokens: (`optional`) bool: Whether to count in added special tokens or not Returns: Size of vocabulary """ return self._tokenizer.get_vocab_size(with_added_tokens=with_added_tokens) def enable_padding( self, direction: Optional[str] = "right", pad_to_multiple_of: Optional[int] = None, pad_id: Optional[int] = 0, pad_type_id: Optional[int] = 0, pad_token: Optional[str] = "[PAD]", length: Optional[int] = None, ): """Change the padding strategy Args: direction: (`optional`) str: Can be one of: `right` or `left` pad_to_multiple_of: (`optional`) unsigned int: If specified, the padding length should always snap to the next multiple of the given value. For example if we were going to pad with a length of 250 but `pad_to_multiple_of=8` then we will pad to 256. pad_id: (`optional`) unsigned int: The indice to be used when padding pad_type_id: (`optional`) unsigned int: The type indice to be used when padding pad_token: (`optional`) str: The pad token to be used when padding length: (`optional`) unsigned int: If specified, the length at which to pad. If not specified we pad using the size of the longest sequence in a batch """ return self._tokenizer.enable_padding( direction=direction, pad_to_multiple_of=pad_to_multiple_of, pad_id=pad_id, pad_type_id=pad_type_id, pad_token=pad_token, length=length, ) def no_padding(self): """Disable padding""" return self._tokenizer.no_padding() @property def padding(self) -> Optional[dict]: """Get the current padding parameters Returns: None if padding is disabled, a dict with the currently set parameters if the padding is enabled. """ return self._tokenizer.padding def enable_truncation(self, max_length: int, stride: Optional[int] = 0, strategy: Optional[str] = "longest_first"): """Change the truncation options Args: max_length: unsigned int: The maximum length at which to truncate stride: (`optional`) unsigned int: The length of the previous first sequence to be included in the overflowing sequence strategy: (`optional`) str: Can be one of `longest_first`, `only_first` or `only_second` """ return self._tokenizer.enable_truncation(max_length, stride=stride, strategy=strategy) def no_truncation(self): """Disable truncation""" return self._tokenizer.no_truncation() @property def truncation(self) -> Optional[dict]: """Get the current truncation parameters Returns: None if truncation is disabled, a dict with the current truncation parameters if truncation is enabled """ return self._tokenizer.truncation def add_tokens(self, tokens: List[Union[str, AddedToken]]) -> int: """Add the given tokens to the vocabulary Args: tokens: List[Union[str, AddedToken]]: A list of tokens to add to the vocabulary. Each token can either be a string, or an instance of AddedToken Returns: The number of tokens that were added to the vocabulary """ return self._tokenizer.add_tokens(tokens) def add_special_tokens(self, special_tokens: List[Union[str, AddedToken]]) -> int: """Add the given special tokens to the vocabulary, and treat them as special tokens. The special tokens will never be processed by the model, and will be removed while decoding. Args: tokens: List[Union[str, AddedToken]]: A list of special tokens to add to the vocabulary. Each token can either be a string, or an instance of AddedToken Returns: The number of tokens that were added to the vocabulary """ return self._tokenizer.add_special_tokens(special_tokens) def normalize(self, sequence: str) -> str: """Normalize the given sequence Args: sequence: str: The sequence to normalize Returns: The normalized string """ return self._tokenizer.normalize(sequence) def encode( self, sequence: InputSequence, pair: Optional[InputSequence] = None, is_pretokenized: bool = False, add_special_tokens: bool = True, ) -> Encoding: """Encode the given sequence and pair. This method can process raw text sequences as well as already pre-tokenized sequences. Args: sequence: InputSequence: The sequence we want to encode. This sequence can be either raw text or pre-tokenized, according to the `is_pretokenized` argument: - If `is_pretokenized=False`: `InputSequence` is expected to be `str` - If `is_pretokenized=True`: `InputSequence` is expected to be `Union[List[str], Tuple[str]]` is_pretokenized: bool: Whether the input is already pre-tokenized. add_special_tokens: bool: Whether to add the special tokens while encoding. Returns: An Encoding """ if sequence is None: raise ValueError("encode: `sequence` can't be `None`") return self._tokenizer.encode(sequence, pair, is_pretokenized, add_special_tokens) def encode_batch( self, inputs: List[EncodeInput], is_pretokenized: bool = False, add_special_tokens: bool = True, ) -> List[Encoding]: """Encode the given inputs. This method accept both raw text sequences as well as already pre-tokenized sequences. Args: inputs: List[EncodeInput]: A list of single sequences or pair sequences to encode. Each `EncodeInput` is expected to be of the following form: `Union[InputSequence, Tuple[InputSequence, InputSequence]]` Each `InputSequence` can either be raw text or pre-tokenized, according to the `is_pretokenized` argument: - If `is_pretokenized=False`: `InputSequence` is expected to be `str` - If `is_pretokenized=True`: `InputSequence` is expected to be `Union[List[str], Tuple[str]]` is_pretokenized: bool: Whether the input is already pre-tokenized. add_special_tokens: bool: Whether to add the special tokens while encoding. Returns: A list of Encoding """ if inputs is None: raise ValueError("encode_batch: `inputs` can't be `None`") return self._tokenizer.encode_batch(inputs, is_pretokenized, add_special_tokens) def decode(self, ids: List[int], skip_special_tokens: Optional[bool] = True) -> str: """Decode the given list of ids to a string sequence Args: ids: List[unsigned int]: A list of ids to be decoded skip_special_tokens: (`optional`) boolean: Whether to remove all the special tokens from the output string Returns: The decoded string """ if ids is None: raise ValueError("None input is not valid. Should be a list of integers.") return self._tokenizer.decode(ids, skip_special_tokens=skip_special_tokens) def decode_batch(self, sequences: List[List[int]], skip_special_tokens: Optional[bool] = True) -> str: """Decode the list of sequences to a list of string sequences Args: sequences: List[List[unsigned int]]: A list of sequence of ids to be decoded skip_special_tokens: (`optional`) boolean: Whether to remove all the special tokens from the output strings Returns: A list of decoded strings """ if sequences is None: raise ValueError("None input is not valid. Should be list of list of integers.") return self._tokenizer.decode_batch(sequences, skip_special_tokens=skip_special_tokens) def token_to_id(self, token: str) -> Optional[int]: """Convert the given token to its corresponding id Args: token: str: The token to convert Returns: The corresponding id if it exists, None otherwise """ return self._tokenizer.token_to_id(token) def id_to_token(self, id: int) -> Optional[str]: """Convert the given token id to its corresponding string Args: token: id: The token id to convert Returns: The corresponding string if it exists, None otherwise """ return self._tokenizer.id_to_token(id) def save_model(self, directory: str, prefix: Optional[str] = None): """Save the current model to the given directory Args: directory: str: A path to the destination directory prefix: (Optional) str: An optional prefix, used to prefix each file name """ return self._tokenizer.model.save(directory, prefix=prefix) def save(self, path: str, pretty: bool = True): """Save the current Tokenizer at the given path Args: path: str: A path to the destination Tokenizer file """ return self._tokenizer.save(path, pretty) def to_str(self, pretty: bool = False): """Get a serialized JSON version of the Tokenizer as a str Args: pretty: bool: Whether the JSON string should be prettified Returns: str """ return self._tokenizer.to_str(pretty) def post_process( self, encoding: Encoding, pair: Optional[Encoding] = None, add_special_tokens: bool = True ) -> Encoding: """Apply all the post-processing steps to the given encodings. The various steps are: 1. Truncate according to global params (provided to `enable_truncation`) 2. Apply the PostProcessor 3. Pad according to global params. (provided to `enable_padding`) Args: encoding: Encoding: The main Encoding to post process pair: Optional[Encoding]: An optional pair Encoding add_special_tokens: bool: Whether to add special tokens Returns: The resulting Encoding """ return self._tokenizer.post_process(encoding, pair, add_special_tokens) @property def model(self) -> Model: return self._tokenizer.model @model.setter def model(self, model: Model): self._tokenizer.model = model @property def normalizer(self) -> Normalizer: return self._tokenizer.normalizer @normalizer.setter def normalizer(self, normalizer: Normalizer): self._tokenizer.normalizer = normalizer @property def pre_tokenizer(self) -> PreTokenizer: return self._tokenizer.pre_tokenizer @pre_tokenizer.setter def pre_tokenizer(self, pre_tokenizer: PreTokenizer): self._tokenizer.pre_tokenizer = pre_tokenizer @property def post_processor(self) -> PostProcessor: return self._tokenizer.post_processor @post_processor.setter def post_processor(self, post_processor: PostProcessor): self._tokenizer.post_processor = post_processor @property def decoder(self) -> Decoder: return self._tokenizer.decoder @decoder.setter def decoder(self, decoder: Decoder): self._tokenizer.decoder = decoder
0
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/implementations/__init__.py
from .base_tokenizer import BaseTokenizer from .bert_wordpiece import BertWordPieceTokenizer from .byte_level_bpe import ByteLevelBPETokenizer from .char_level_bpe import CharBPETokenizer from .sentencepiece_bpe import SentencePieceBPETokenizer from .sentencepiece_unigram import SentencePieceUnigramTokenizer
0
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/trainers/__init__.py
# Generated content DO NOT EDIT from .. import trainers Trainer = trainers.Trainer BpeTrainer = trainers.BpeTrainer UnigramTrainer = trainers.UnigramTrainer WordLevelTrainer = trainers.WordLevelTrainer WordPieceTrainer = trainers.WordPieceTrainer
0
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/trainers/__init__.pyi
# Generated content DO NOT EDIT class Trainer: """ Base class for all trainers This class is not supposed to be instantiated directly. Instead, any implementation of a Trainer will return an instance of this class when instantiated. """ class BpeTrainer(Trainer): """ Trainer capable of training a BPE model Args: vocab_size (:obj:`int`, `optional`): The size of the final vocabulary, including all tokens and alphabet. min_frequency (:obj:`int`, `optional`): The minimum frequency a pair should have in order to be merged. show_progress (:obj:`bool`, `optional`): Whether to show progress bars while training. special_tokens (:obj:`List[Union[str, AddedToken]]`, `optional`): A list of special tokens the model should know of. limit_alphabet (:obj:`int`, `optional`): The maximum different characters to keep in the alphabet. initial_alphabet (:obj:`List[str]`, `optional`): A list of characters to include in the initial alphabet, even if not seen in the training dataset. If the strings contain more than one character, only the first one is kept. continuing_subword_prefix (:obj:`str`, `optional`): A prefix to be used for every subword that is not a beginning-of-word. end_of_word_suffix (:obj:`str`, `optional`): A suffix to be used for every subword that is a end-of-word. max_token_length (:obj:`int`, `optional`): Prevents creating tokens longer than the specified size. This can help with reducing polluting your vocabulary with highly repetitive tokens like `======` for wikipedia """ class UnigramTrainer(Trainer): """ Trainer capable of training a Unigram model Args: vocab_size (:obj:`int`): The size of the final vocabulary, including all tokens and alphabet. show_progress (:obj:`bool`): Whether to show progress bars while training. special_tokens (:obj:`List[Union[str, AddedToken]]`): A list of special tokens the model should know of. initial_alphabet (:obj:`List[str]`): A list of characters to include in the initial alphabet, even if not seen in the training dataset. If the strings contain more than one character, only the first one is kept. shrinking_factor (:obj:`float`): The shrinking factor used at each step of the training to prune the vocabulary. unk_token (:obj:`str`): The token used for out-of-vocabulary tokens. max_piece_length (:obj:`int`): The maximum length of a given token. n_sub_iterations (:obj:`int`): The number of iterations of the EM algorithm to perform before pruning the vocabulary. """ def __init__( self, vocab_size=8000, show_progress=True, special_tokens=[], shrinking_factor=0.75, unk_token=None, max_piece_length=16, n_sub_iterations=2, ): pass class WordLevelTrainer(Trainer): """ Trainer capable of training a WorldLevel model Args: vocab_size (:obj:`int`, `optional`): The size of the final vocabulary, including all tokens and alphabet. min_frequency (:obj:`int`, `optional`): The minimum frequency a pair should have in order to be merged. show_progress (:obj:`bool`, `optional`): Whether to show progress bars while training. special_tokens (:obj:`List[Union[str, AddedToken]]`): A list of special tokens the model should know of. """ class WordPieceTrainer(Trainer): """ Trainer capable of training a WordPiece model Args: vocab_size (:obj:`int`, `optional`): The size of the final vocabulary, including all tokens and alphabet. min_frequency (:obj:`int`, `optional`): The minimum frequency a pair should have in order to be merged. show_progress (:obj:`bool`, `optional`): Whether to show progress bars while training. special_tokens (:obj:`List[Union[str, AddedToken]]`, `optional`): A list of special tokens the model should know of. limit_alphabet (:obj:`int`, `optional`): The maximum different characters to keep in the alphabet. initial_alphabet (:obj:`List[str]`, `optional`): A list of characters to include in the initial alphabet, even if not seen in the training dataset. If the strings contain more than one character, only the first one is kept. continuing_subword_prefix (:obj:`str`, `optional`): A prefix to be used for every subword that is not a beginning-of-word. end_of_word_suffix (:obj:`str`, `optional`): A suffix to be used for every subword that is a end-of-word. """ def __init__( self, vocab_size=30000, min_frequency=0, show_progress=True, special_tokens=[], limit_alphabet=None, initial_alphabet=[], continuing_subword_prefix="##", end_of_word_suffix=None, ): pass
0
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/normalizers/__init__.py
from .. import normalizers Normalizer = normalizers.Normalizer BertNormalizer = normalizers.BertNormalizer NFD = normalizers.NFD NFKD = normalizers.NFKD NFC = normalizers.NFC NFKC = normalizers.NFKC Sequence = normalizers.Sequence Lowercase = normalizers.Lowercase Prepend = normalizers.Prepend Strip = normalizers.Strip StripAccents = normalizers.StripAccents Nmt = normalizers.Nmt Precompiled = normalizers.Precompiled Replace = normalizers.Replace NORMALIZERS = {"nfc": NFC, "nfd": NFD, "nfkc": NFKC, "nfkd": NFKD} def unicode_normalizer_from_str(normalizer: str) -> Normalizer: if normalizer not in NORMALIZERS: raise ValueError( "{} is not a known unicode normalizer. Available are {}".format(normalizer, NORMALIZERS.keys()) ) return NORMALIZERS[normalizer]()
0
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/normalizers/__init__.pyi
# Generated content DO NOT EDIT class Normalizer: """ Base class for all normalizers This class is not supposed to be instantiated directly. Instead, any implementation of a Normalizer will return an instance of this class when instantiated. """ def normalize(self, normalized): """ Normalize a :class:`~tokenizers.NormalizedString` in-place This method allows to modify a :class:`~tokenizers.NormalizedString` to keep track of the alignment information. If you just want to see the result of the normalization on a raw string, you can use :meth:`~tokenizers.normalizers.Normalizer.normalize_str` Args: normalized (:class:`~tokenizers.NormalizedString`): The normalized string on which to apply this :class:`~tokenizers.normalizers.Normalizer` """ pass def normalize_str(self, sequence): """ Normalize the given string This method provides a way to visualize the effect of a :class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment information. If you need to get/convert offsets, you can use :meth:`~tokenizers.normalizers.Normalizer.normalize` Args: sequence (:obj:`str`): A string to normalize Returns: :obj:`str`: A string after normalization """ pass class BertNormalizer(Normalizer): """ BertNormalizer Takes care of normalizing raw text before giving it to a Bert model. This includes cleaning the text, handling accents, chinese chars and lowercasing Args: clean_text (:obj:`bool`, `optional`, defaults to :obj:`True`): Whether to clean the text, by removing any control characters and replacing all whitespaces by the classic one. handle_chinese_chars (:obj:`bool`, `optional`, defaults to :obj:`True`): Whether to handle chinese chars by putting spaces around them. strip_accents (:obj:`bool`, `optional`): Whether to strip all accents. If this option is not specified (ie == None), then it will be determined by the value for `lowercase` (as in the original Bert). lowercase (:obj:`bool`, `optional`, defaults to :obj:`True`): Whether to lowercase. """ def __init__(self, clean_text=True, handle_chinese_chars=True, strip_accents=None, lowercase=True): pass def normalize(self, normalized): """ Normalize a :class:`~tokenizers.NormalizedString` in-place This method allows to modify a :class:`~tokenizers.NormalizedString` to keep track of the alignment information. If you just want to see the result of the normalization on a raw string, you can use :meth:`~tokenizers.normalizers.Normalizer.normalize_str` Args: normalized (:class:`~tokenizers.NormalizedString`): The normalized string on which to apply this :class:`~tokenizers.normalizers.Normalizer` """ pass def normalize_str(self, sequence): """ Normalize the given string This method provides a way to visualize the effect of a :class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment information. If you need to get/convert offsets, you can use :meth:`~tokenizers.normalizers.Normalizer.normalize` Args: sequence (:obj:`str`): A string to normalize Returns: :obj:`str`: A string after normalization """ pass class Lowercase(Normalizer): """ Lowercase Normalizer """ def __init__(self): pass def normalize(self, normalized): """ Normalize a :class:`~tokenizers.NormalizedString` in-place This method allows to modify a :class:`~tokenizers.NormalizedString` to keep track of the alignment information. If you just want to see the result of the normalization on a raw string, you can use :meth:`~tokenizers.normalizers.Normalizer.normalize_str` Args: normalized (:class:`~tokenizers.NormalizedString`): The normalized string on which to apply this :class:`~tokenizers.normalizers.Normalizer` """ pass def normalize_str(self, sequence): """ Normalize the given string This method provides a way to visualize the effect of a :class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment information. If you need to get/convert offsets, you can use :meth:`~tokenizers.normalizers.Normalizer.normalize` Args: sequence (:obj:`str`): A string to normalize Returns: :obj:`str`: A string after normalization """ pass class NFC(Normalizer): """ NFC Unicode Normalizer """ def __init__(self): pass def normalize(self, normalized): """ Normalize a :class:`~tokenizers.NormalizedString` in-place This method allows to modify a :class:`~tokenizers.NormalizedString` to keep track of the alignment information. If you just want to see the result of the normalization on a raw string, you can use :meth:`~tokenizers.normalizers.Normalizer.normalize_str` Args: normalized (:class:`~tokenizers.NormalizedString`): The normalized string on which to apply this :class:`~tokenizers.normalizers.Normalizer` """ pass def normalize_str(self, sequence): """ Normalize the given string This method provides a way to visualize the effect of a :class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment information. If you need to get/convert offsets, you can use :meth:`~tokenizers.normalizers.Normalizer.normalize` Args: sequence (:obj:`str`): A string to normalize Returns: :obj:`str`: A string after normalization """ pass class NFD(Normalizer): """ NFD Unicode Normalizer """ def __init__(self): pass def normalize(self, normalized): """ Normalize a :class:`~tokenizers.NormalizedString` in-place This method allows to modify a :class:`~tokenizers.NormalizedString` to keep track of the alignment information. If you just want to see the result of the normalization on a raw string, you can use :meth:`~tokenizers.normalizers.Normalizer.normalize_str` Args: normalized (:class:`~tokenizers.NormalizedString`): The normalized string on which to apply this :class:`~tokenizers.normalizers.Normalizer` """ pass def normalize_str(self, sequence): """ Normalize the given string This method provides a way to visualize the effect of a :class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment information. If you need to get/convert offsets, you can use :meth:`~tokenizers.normalizers.Normalizer.normalize` Args: sequence (:obj:`str`): A string to normalize Returns: :obj:`str`: A string after normalization """ pass class NFKC(Normalizer): """ NFKC Unicode Normalizer """ def __init__(self): pass def normalize(self, normalized): """ Normalize a :class:`~tokenizers.NormalizedString` in-place This method allows to modify a :class:`~tokenizers.NormalizedString` to keep track of the alignment information. If you just want to see the result of the normalization on a raw string, you can use :meth:`~tokenizers.normalizers.Normalizer.normalize_str` Args: normalized (:class:`~tokenizers.NormalizedString`): The normalized string on which to apply this :class:`~tokenizers.normalizers.Normalizer` """ pass def normalize_str(self, sequence): """ Normalize the given string This method provides a way to visualize the effect of a :class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment information. If you need to get/convert offsets, you can use :meth:`~tokenizers.normalizers.Normalizer.normalize` Args: sequence (:obj:`str`): A string to normalize Returns: :obj:`str`: A string after normalization """ pass class NFKD(Normalizer): """ NFKD Unicode Normalizer """ def __init__(self): pass def normalize(self, normalized): """ Normalize a :class:`~tokenizers.NormalizedString` in-place This method allows to modify a :class:`~tokenizers.NormalizedString` to keep track of the alignment information. If you just want to see the result of the normalization on a raw string, you can use :meth:`~tokenizers.normalizers.Normalizer.normalize_str` Args: normalized (:class:`~tokenizers.NormalizedString`): The normalized string on which to apply this :class:`~tokenizers.normalizers.Normalizer` """ pass def normalize_str(self, sequence): """ Normalize the given string This method provides a way to visualize the effect of a :class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment information. If you need to get/convert offsets, you can use :meth:`~tokenizers.normalizers.Normalizer.normalize` Args: sequence (:obj:`str`): A string to normalize Returns: :obj:`str`: A string after normalization """ pass class Nmt(Normalizer): """ Nmt normalizer """ def __init__(self): pass def normalize(self, normalized): """ Normalize a :class:`~tokenizers.NormalizedString` in-place This method allows to modify a :class:`~tokenizers.NormalizedString` to keep track of the alignment information. If you just want to see the result of the normalization on a raw string, you can use :meth:`~tokenizers.normalizers.Normalizer.normalize_str` Args: normalized (:class:`~tokenizers.NormalizedString`): The normalized string on which to apply this :class:`~tokenizers.normalizers.Normalizer` """ pass def normalize_str(self, sequence): """ Normalize the given string This method provides a way to visualize the effect of a :class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment information. If you need to get/convert offsets, you can use :meth:`~tokenizers.normalizers.Normalizer.normalize` Args: sequence (:obj:`str`): A string to normalize Returns: :obj:`str`: A string after normalization """ pass class Precompiled(Normalizer): """ Precompiled normalizer Don't use manually it is used for compatiblity for SentencePiece. """ def __init__(self, precompiled_charsmap): pass def normalize(self, normalized): """ Normalize a :class:`~tokenizers.NormalizedString` in-place This method allows to modify a :class:`~tokenizers.NormalizedString` to keep track of the alignment information. If you just want to see the result of the normalization on a raw string, you can use :meth:`~tokenizers.normalizers.Normalizer.normalize_str` Args: normalized (:class:`~tokenizers.NormalizedString`): The normalized string on which to apply this :class:`~tokenizers.normalizers.Normalizer` """ pass def normalize_str(self, sequence): """ Normalize the given string This method provides a way to visualize the effect of a :class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment information. If you need to get/convert offsets, you can use :meth:`~tokenizers.normalizers.Normalizer.normalize` Args: sequence (:obj:`str`): A string to normalize Returns: :obj:`str`: A string after normalization """ pass class Prepend(Normalizer): """ Prepend normalizer """ def __init__(self, prepend): pass def normalize(self, normalized): """ Normalize a :class:`~tokenizers.NormalizedString` in-place This method allows to modify a :class:`~tokenizers.NormalizedString` to keep track of the alignment information. If you just want to see the result of the normalization on a raw string, you can use :meth:`~tokenizers.normalizers.Normalizer.normalize_str` Args: normalized (:class:`~tokenizers.NormalizedString`): The normalized string on which to apply this :class:`~tokenizers.normalizers.Normalizer` """ pass def normalize_str(self, sequence): """ Normalize the given string This method provides a way to visualize the effect of a :class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment information. If you need to get/convert offsets, you can use :meth:`~tokenizers.normalizers.Normalizer.normalize` Args: sequence (:obj:`str`): A string to normalize Returns: :obj:`str`: A string after normalization """ pass class Replace(Normalizer): """ Replace normalizer """ def __init__(self, pattern, content): pass def normalize(self, normalized): """ Normalize a :class:`~tokenizers.NormalizedString` in-place This method allows to modify a :class:`~tokenizers.NormalizedString` to keep track of the alignment information. If you just want to see the result of the normalization on a raw string, you can use :meth:`~tokenizers.normalizers.Normalizer.normalize_str` Args: normalized (:class:`~tokenizers.NormalizedString`): The normalized string on which to apply this :class:`~tokenizers.normalizers.Normalizer` """ pass def normalize_str(self, sequence): """ Normalize the given string This method provides a way to visualize the effect of a :class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment information. If you need to get/convert offsets, you can use :meth:`~tokenizers.normalizers.Normalizer.normalize` Args: sequence (:obj:`str`): A string to normalize Returns: :obj:`str`: A string after normalization """ pass class Sequence(Normalizer): """ Allows concatenating multiple other Normalizer as a Sequence. All the normalizers run in sequence in the given order Args: normalizers (:obj:`List[Normalizer]`): A list of Normalizer to be run as a sequence """ def normalize(self, normalized): """ Normalize a :class:`~tokenizers.NormalizedString` in-place This method allows to modify a :class:`~tokenizers.NormalizedString` to keep track of the alignment information. If you just want to see the result of the normalization on a raw string, you can use :meth:`~tokenizers.normalizers.Normalizer.normalize_str` Args: normalized (:class:`~tokenizers.NormalizedString`): The normalized string on which to apply this :class:`~tokenizers.normalizers.Normalizer` """ pass def normalize_str(self, sequence): """ Normalize the given string This method provides a way to visualize the effect of a :class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment information. If you need to get/convert offsets, you can use :meth:`~tokenizers.normalizers.Normalizer.normalize` Args: sequence (:obj:`str`): A string to normalize Returns: :obj:`str`: A string after normalization """ pass class Strip(Normalizer): """ Strip normalizer """ def __init__(self, left=True, right=True): pass def normalize(self, normalized): """ Normalize a :class:`~tokenizers.NormalizedString` in-place This method allows to modify a :class:`~tokenizers.NormalizedString` to keep track of the alignment information. If you just want to see the result of the normalization on a raw string, you can use :meth:`~tokenizers.normalizers.Normalizer.normalize_str` Args: normalized (:class:`~tokenizers.NormalizedString`): The normalized string on which to apply this :class:`~tokenizers.normalizers.Normalizer` """ pass def normalize_str(self, sequence): """ Normalize the given string This method provides a way to visualize the effect of a :class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment information. If you need to get/convert offsets, you can use :meth:`~tokenizers.normalizers.Normalizer.normalize` Args: sequence (:obj:`str`): A string to normalize Returns: :obj:`str`: A string after normalization """ pass class StripAccents(Normalizer): """ StripAccents normalizer """ def __init__(self): pass def normalize(self, normalized): """ Normalize a :class:`~tokenizers.NormalizedString` in-place This method allows to modify a :class:`~tokenizers.NormalizedString` to keep track of the alignment information. If you just want to see the result of the normalization on a raw string, you can use :meth:`~tokenizers.normalizers.Normalizer.normalize_str` Args: normalized (:class:`~tokenizers.NormalizedString`): The normalized string on which to apply this :class:`~tokenizers.normalizers.Normalizer` """ pass def normalize_str(self, sequence): """ Normalize the given string This method provides a way to visualize the effect of a :class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment information. If you need to get/convert offsets, you can use :meth:`~tokenizers.normalizers.Normalizer.normalize` Args: sequence (:obj:`str`): A string to normalize Returns: :obj:`str`: A string after normalization """ pass
0
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/pre_tokenizers/__init__.py
# Generated content DO NOT EDIT from .. import pre_tokenizers PreTokenizer = pre_tokenizers.PreTokenizer BertPreTokenizer = pre_tokenizers.BertPreTokenizer ByteLevel = pre_tokenizers.ByteLevel CharDelimiterSplit = pre_tokenizers.CharDelimiterSplit Digits = pre_tokenizers.Digits Metaspace = pre_tokenizers.Metaspace Punctuation = pre_tokenizers.Punctuation Sequence = pre_tokenizers.Sequence Split = pre_tokenizers.Split UnicodeScripts = pre_tokenizers.UnicodeScripts Whitespace = pre_tokenizers.Whitespace WhitespaceSplit = pre_tokenizers.WhitespaceSplit
0
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/pre_tokenizers/__init__.pyi
# Generated content DO NOT EDIT class PreTokenizer: """ Base class for all pre-tokenizers This class is not supposed to be instantiated directly. Instead, any implementation of a PreTokenizer will return an instance of this class when instantiated. """ def pre_tokenize(self, pretok): """ Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place This method allows to modify a :class:`~tokenizers.PreTokenizedString` to keep track of the pre-tokenization, and leverage the capabilities of the :class:`~tokenizers.PreTokenizedString`. If you just want to see the result of the pre-tokenization of a raw string, you can use :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str` Args: pretok (:class:`~tokenizers.PreTokenizedString): The pre-tokenized string on which to apply this :class:`~tokenizers.pre_tokenizers.PreTokenizer` """ pass def pre_tokenize_str(self, sequence): """ Pre tokenize the given string This method provides a way to visualize the effect of a :class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the alignment, nor does it provide all the capabilities of the :class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize` Args: sequence (:obj:`str`): A string to pre-tokeize Returns: :obj:`List[Tuple[str, Offsets]]`: A list of tuple with the pre-tokenized parts and their offsets """ pass class BertPreTokenizer(PreTokenizer): """ BertPreTokenizer This pre-tokenizer splits tokens on spaces, and also on punctuation. Each occurence of a punctuation character will be treated separately. """ def __init__(self): pass def pre_tokenize(self, pretok): """ Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place This method allows to modify a :class:`~tokenizers.PreTokenizedString` to keep track of the pre-tokenization, and leverage the capabilities of the :class:`~tokenizers.PreTokenizedString`. If you just want to see the result of the pre-tokenization of a raw string, you can use :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str` Args: pretok (:class:`~tokenizers.PreTokenizedString): The pre-tokenized string on which to apply this :class:`~tokenizers.pre_tokenizers.PreTokenizer` """ pass def pre_tokenize_str(self, sequence): """ Pre tokenize the given string This method provides a way to visualize the effect of a :class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the alignment, nor does it provide all the capabilities of the :class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize` Args: sequence (:obj:`str`): A string to pre-tokeize Returns: :obj:`List[Tuple[str, Offsets]]`: A list of tuple with the pre-tokenized parts and their offsets """ pass class ByteLevel(PreTokenizer): """ ByteLevel PreTokenizer This pre-tokenizer takes care of replacing all bytes of the given string with a corresponding representation, as well as splitting into words. Args: add_prefix_space (:obj:`bool`, `optional`, defaults to :obj:`True`): Whether to add a space to the first word if there isn't already one. This lets us treat `hello` exactly like `say hello`. use_regex (:obj:`bool`, `optional`, defaults to :obj:`True`): Set this to :obj:`False` to prevent this `pre_tokenizer` from using the GPT2 specific regexp for spliting on whitespace. """ def __init__(self, add_prefix_space=True, use_regex=True): pass @staticmethod def alphabet(): """ Returns the alphabet used by this PreTokenizer. Since the ByteLevel works as its name suggests, at the byte level, it encodes each byte value to a unique visible character. This means that there is a total of 256 different characters composing this alphabet. Returns: :obj:`List[str]`: A list of characters that compose the alphabet """ pass def pre_tokenize(self, pretok): """ Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place This method allows to modify a :class:`~tokenizers.PreTokenizedString` to keep track of the pre-tokenization, and leverage the capabilities of the :class:`~tokenizers.PreTokenizedString`. If you just want to see the result of the pre-tokenization of a raw string, you can use :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str` Args: pretok (:class:`~tokenizers.PreTokenizedString): The pre-tokenized string on which to apply this :class:`~tokenizers.pre_tokenizers.PreTokenizer` """ pass def pre_tokenize_str(self, sequence): """ Pre tokenize the given string This method provides a way to visualize the effect of a :class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the alignment, nor does it provide all the capabilities of the :class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize` Args: sequence (:obj:`str`): A string to pre-tokeize Returns: :obj:`List[Tuple[str, Offsets]]`: A list of tuple with the pre-tokenized parts and their offsets """ pass class CharDelimiterSplit(PreTokenizer): """ This pre-tokenizer simply splits on the provided char. Works like `.split(delimiter)` Args: delimiter: str: The delimiter char that will be used to split input """ def pre_tokenize(self, pretok): """ Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place This method allows to modify a :class:`~tokenizers.PreTokenizedString` to keep track of the pre-tokenization, and leverage the capabilities of the :class:`~tokenizers.PreTokenizedString`. If you just want to see the result of the pre-tokenization of a raw string, you can use :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str` Args: pretok (:class:`~tokenizers.PreTokenizedString): The pre-tokenized string on which to apply this :class:`~tokenizers.pre_tokenizers.PreTokenizer` """ pass def pre_tokenize_str(self, sequence): """ Pre tokenize the given string This method provides a way to visualize the effect of a :class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the alignment, nor does it provide all the capabilities of the :class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize` Args: sequence (:obj:`str`): A string to pre-tokeize Returns: :obj:`List[Tuple[str, Offsets]]`: A list of tuple with the pre-tokenized parts and their offsets """ pass class Digits(PreTokenizer): """ This pre-tokenizer simply splits using the digits in separate tokens Args: individual_digits (:obj:`bool`, `optional`, defaults to :obj:`False`): If set to True, digits will each be separated as follows:: "Call 123 please" -> "Call ", "1", "2", "3", " please" If set to False, digits will grouped as follows:: "Call 123 please" -> "Call ", "123", " please" """ def __init__(self, individual_digits=False): pass def pre_tokenize(self, pretok): """ Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place This method allows to modify a :class:`~tokenizers.PreTokenizedString` to keep track of the pre-tokenization, and leverage the capabilities of the :class:`~tokenizers.PreTokenizedString`. If you just want to see the result of the pre-tokenization of a raw string, you can use :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str` Args: pretok (:class:`~tokenizers.PreTokenizedString): The pre-tokenized string on which to apply this :class:`~tokenizers.pre_tokenizers.PreTokenizer` """ pass def pre_tokenize_str(self, sequence): """ Pre tokenize the given string This method provides a way to visualize the effect of a :class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the alignment, nor does it provide all the capabilities of the :class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize` Args: sequence (:obj:`str`): A string to pre-tokeize Returns: :obj:`List[Tuple[str, Offsets]]`: A list of tuple with the pre-tokenized parts and their offsets """ pass class Metaspace(PreTokenizer): """ Metaspace pre-tokenizer This pre-tokenizer replaces any whitespace by the provided replacement character. It then tries to split on these spaces. Args: replacement (:obj:`str`, `optional`, defaults to :obj:`▁`): The replacement character. Must be exactly one character. By default we use the `▁` (U+2581) meta symbol (Same as in SentencePiece). add_prefix_space (:obj:`bool`, `optional`, defaults to :obj:`True`): Whether to add a space to the first word if there isn't already one. This lets us treat `hello` exactly like `say hello`. """ def __init__(self, replacement="_", add_prefix_space=True): pass def pre_tokenize(self, pretok): """ Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place This method allows to modify a :class:`~tokenizers.PreTokenizedString` to keep track of the pre-tokenization, and leverage the capabilities of the :class:`~tokenizers.PreTokenizedString`. If you just want to see the result of the pre-tokenization of a raw string, you can use :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str` Args: pretok (:class:`~tokenizers.PreTokenizedString): The pre-tokenized string on which to apply this :class:`~tokenizers.pre_tokenizers.PreTokenizer` """ pass def pre_tokenize_str(self, sequence): """ Pre tokenize the given string This method provides a way to visualize the effect of a :class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the alignment, nor does it provide all the capabilities of the :class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize` Args: sequence (:obj:`str`): A string to pre-tokeize Returns: :obj:`List[Tuple[str, Offsets]]`: A list of tuple with the pre-tokenized parts and their offsets """ pass class Punctuation(PreTokenizer): """ This pre-tokenizer simply splits on punctuation as individual characters. Args: behavior (:class:`~tokenizers.SplitDelimiterBehavior`): The behavior to use when splitting. Choices: "removed", "isolated" (default), "merged_with_previous", "merged_with_next", "contiguous" """ def __init__(self, behavior="isolated"): pass def pre_tokenize(self, pretok): """ Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place This method allows to modify a :class:`~tokenizers.PreTokenizedString` to keep track of the pre-tokenization, and leverage the capabilities of the :class:`~tokenizers.PreTokenizedString`. If you just want to see the result of the pre-tokenization of a raw string, you can use :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str` Args: pretok (:class:`~tokenizers.PreTokenizedString): The pre-tokenized string on which to apply this :class:`~tokenizers.pre_tokenizers.PreTokenizer` """ pass def pre_tokenize_str(self, sequence): """ Pre tokenize the given string This method provides a way to visualize the effect of a :class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the alignment, nor does it provide all the capabilities of the :class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize` Args: sequence (:obj:`str`): A string to pre-tokeize Returns: :obj:`List[Tuple[str, Offsets]]`: A list of tuple with the pre-tokenized parts and their offsets """ pass class Sequence(PreTokenizer): """ This pre-tokenizer composes other pre_tokenizers and applies them in sequence """ def __init__(self, pretokenizers): pass def pre_tokenize(self, pretok): """ Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place This method allows to modify a :class:`~tokenizers.PreTokenizedString` to keep track of the pre-tokenization, and leverage the capabilities of the :class:`~tokenizers.PreTokenizedString`. If you just want to see the result of the pre-tokenization of a raw string, you can use :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str` Args: pretok (:class:`~tokenizers.PreTokenizedString): The pre-tokenized string on which to apply this :class:`~tokenizers.pre_tokenizers.PreTokenizer` """ pass def pre_tokenize_str(self, sequence): """ Pre tokenize the given string This method provides a way to visualize the effect of a :class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the alignment, nor does it provide all the capabilities of the :class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize` Args: sequence (:obj:`str`): A string to pre-tokeize Returns: :obj:`List[Tuple[str, Offsets]]`: A list of tuple with the pre-tokenized parts and their offsets """ pass class Split(PreTokenizer): """ Split PreTokenizer This versatile pre-tokenizer splits using the provided pattern and according to the provided behavior. The pattern can be inverted by making use of the invert flag. Args: pattern (:obj:`str` or :class:`~tokenizers.Regex`): A pattern used to split the string. Usually a string or a a regex built with `tokenizers.Regex` behavior (:class:`~tokenizers.SplitDelimiterBehavior`): The behavior to use when splitting. Choices: "removed", "isolated", "merged_with_previous", "merged_with_next", "contiguous" invert (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether to invert the pattern. """ def __init__(self, pattern, behavior, invert=False): pass def pre_tokenize(self, pretok): """ Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place This method allows to modify a :class:`~tokenizers.PreTokenizedString` to keep track of the pre-tokenization, and leverage the capabilities of the :class:`~tokenizers.PreTokenizedString`. If you just want to see the result of the pre-tokenization of a raw string, you can use :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str` Args: pretok (:class:`~tokenizers.PreTokenizedString): The pre-tokenized string on which to apply this :class:`~tokenizers.pre_tokenizers.PreTokenizer` """ pass def pre_tokenize_str(self, sequence): """ Pre tokenize the given string This method provides a way to visualize the effect of a :class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the alignment, nor does it provide all the capabilities of the :class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize` Args: sequence (:obj:`str`): A string to pre-tokeize Returns: :obj:`List[Tuple[str, Offsets]]`: A list of tuple with the pre-tokenized parts and their offsets """ pass class UnicodeScripts(PreTokenizer): """ This pre-tokenizer splits on characters that belong to different language family It roughly follows https://github.com/google/sentencepiece/blob/master/data/Scripts.txt Actually Hiragana and Katakana are fused with Han, and 0x30FC is Han too. This mimicks SentencePiece Unigram implementation. """ def __init__(self): pass def pre_tokenize(self, pretok): """ Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place This method allows to modify a :class:`~tokenizers.PreTokenizedString` to keep track of the pre-tokenization, and leverage the capabilities of the :class:`~tokenizers.PreTokenizedString`. If you just want to see the result of the pre-tokenization of a raw string, you can use :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str` Args: pretok (:class:`~tokenizers.PreTokenizedString): The pre-tokenized string on which to apply this :class:`~tokenizers.pre_tokenizers.PreTokenizer` """ pass def pre_tokenize_str(self, sequence): """ Pre tokenize the given string This method provides a way to visualize the effect of a :class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the alignment, nor does it provide all the capabilities of the :class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize` Args: sequence (:obj:`str`): A string to pre-tokeize Returns: :obj:`List[Tuple[str, Offsets]]`: A list of tuple with the pre-tokenized parts and their offsets """ pass class Whitespace(PreTokenizer): """ This pre-tokenizer simply splits using the following regex: `\w+|[^\w\s]+` """ def __init__(self): pass def pre_tokenize(self, pretok): """ Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place This method allows to modify a :class:`~tokenizers.PreTokenizedString` to keep track of the pre-tokenization, and leverage the capabilities of the :class:`~tokenizers.PreTokenizedString`. If you just want to see the result of the pre-tokenization of a raw string, you can use :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str` Args: pretok (:class:`~tokenizers.PreTokenizedString): The pre-tokenized string on which to apply this :class:`~tokenizers.pre_tokenizers.PreTokenizer` """ pass def pre_tokenize_str(self, sequence): """ Pre tokenize the given string This method provides a way to visualize the effect of a :class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the alignment, nor does it provide all the capabilities of the :class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize` Args: sequence (:obj:`str`): A string to pre-tokeize Returns: :obj:`List[Tuple[str, Offsets]]`: A list of tuple with the pre-tokenized parts and their offsets """ pass class WhitespaceSplit(PreTokenizer): """ This pre-tokenizer simply splits on the whitespace. Works like `.split()` """ def __init__(self): pass def pre_tokenize(self, pretok): """ Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place This method allows to modify a :class:`~tokenizers.PreTokenizedString` to keep track of the pre-tokenization, and leverage the capabilities of the :class:`~tokenizers.PreTokenizedString`. If you just want to see the result of the pre-tokenization of a raw string, you can use :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str` Args: pretok (:class:`~tokenizers.PreTokenizedString): The pre-tokenized string on which to apply this :class:`~tokenizers.pre_tokenizers.PreTokenizer` """ pass def pre_tokenize_str(self, sequence): """ Pre tokenize the given string This method provides a way to visualize the effect of a :class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the alignment, nor does it provide all the capabilities of the :class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize` Args: sequence (:obj:`str`): A string to pre-tokeize Returns: :obj:`List[Tuple[str, Offsets]]`: A list of tuple with the pre-tokenized parts and their offsets """ pass
0
hf_public_repos/tokenizers/bindings/python
hf_public_repos/tokenizers/bindings/python/tests/utils.py
import multiprocessing as mp import os import pytest import requests DATA_PATH = os.path.join("tests", "data") def download(url, with_filename=None): filename = with_filename if with_filename is not None else url.rsplit("/")[-1] filepath = os.path.join(DATA_PATH, filename) if not os.path.exists(filepath): with open(filepath, "wb") as f: response = requests.get(url, stream=True) response.raise_for_status() for chunk in response.iter_content(1024): f.write(chunk) return filepath @pytest.fixture(scope="session") def data_dir(): assert os.getcwd().endswith("python") exist = os.path.exists(DATA_PATH) and os.path.isdir(DATA_PATH) if not exist: os.mkdir(DATA_PATH) @pytest.fixture(scope="session") def roberta_files(data_dir): return { "vocab": download("https://s3.amazonaws.com/models.huggingface.co/bert/roberta-base-vocab.json"), "merges": download("https://s3.amazonaws.com/models.huggingface.co/bert/roberta-base-merges.txt"), } @pytest.fixture(scope="session") def bert_files(data_dir): return { "vocab": download("https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-vocab.txt"), } @pytest.fixture(scope="session") def openai_files(data_dir): return { "vocab": download("https://s3.amazonaws.com/models.huggingface.co/bert/openai-gpt-vocab.json"), "merges": download("https://s3.amazonaws.com/models.huggingface.co/bert/openai-gpt-merges.txt"), } @pytest.fixture(scope="session") def train_files(data_dir): big = download("https://norvig.com/big.txt") small = os.path.join(DATA_PATH, "small.txt") with open(small, "w") as f: with open(big, "r") as g: for i, line in enumerate(g): f.write(line) if i > 100: break return { "small": small, "big": big, } @pytest.fixture(scope="session") def albert_base(data_dir): return download("https://s3.amazonaws.com/models.huggingface.co/bert/albert-base-v1-tokenizer.json") @pytest.fixture(scope="session") def doc_wiki_tokenizer(data_dir): return download( "https://s3.amazonaws.com/models.huggingface.co/bert/anthony/doc-quicktour/tokenizer.json", "tokenizer-wiki.json", ) @pytest.fixture(scope="session") def doc_pipeline_bert_tokenizer(data_dir): return download( "https://s3.amazonaws.com/models.huggingface.co/bert/anthony/doc-pipeline/tokenizer.json", "bert-wiki.json", ) # On MacOS Python 3.8+ the default was modified to `spawn`, we need `fork` in tests. mp.set_start_method("fork") def multiprocessing_with_parallelism(tokenizer, enabled: bool): """ This helper can be used to test that disabling parallelism avoids dead locks when the same tokenizer is used after forking. """ # It's essential to this test that we call 'encode' or 'encode_batch' # before the fork. This causes the main process to "lock" some resources # provided by the Rust "rayon" crate that are needed for parallel processing. tokenizer.encode("Hi") tokenizer.encode_batch(["hi", "there"]) def encode(tokenizer): tokenizer.encode("Hi") tokenizer.encode_batch(["hi", "there"]) # Make sure this environment variable is set before the fork happens os.environ["TOKENIZERS_PARALLELISM"] = str(enabled) p = mp.Process(target=encode, args=(tokenizer,)) p.start() p.join(timeout=1) # At this point the process should have successfully exited, depending on whether parallelism # was activated or not. So we check the status and kill it if needed alive = p.is_alive() if alive: p.terminate() assert (alive and mp.get_start_method() == "fork") == enabled
0
hf_public_repos/tokenizers/bindings/python
hf_public_repos/tokenizers/bindings/python/tests/test_serialization.py
import json import os import unittest import tqdm from huggingface_hub import HfApi, cached_download, hf_hub_url from tokenizers import Tokenizer from .utils import albert_base, data_dir class TestSerialization: def test_full_serialization_albert(self, albert_base): # Check we can read this file. # This used to fail because of BufReader that would fail because the # file exceeds the buffer capacity tokenizer = Tokenizer.from_file(albert_base) def check(tokenizer_file) -> bool: with open(tokenizer_file, "r") as f: data = json.load(f) if "pre_tokenizer" not in data: return True if "type" not in data["pre_tokenizer"]: return False if data["pre_tokenizer"]["type"] == "Sequence": for pre_tok in data["pre_tokenizer"]["pretokenizers"]: if "type" not in pre_tok: return False return True def slow(test_case): """ Decorator marking a test as slow. Slow tests are skipped by default. Set the RUN_SLOW environment variable to a truthy value to run them. """ if os.getenv("RUN_SLOW") != "1": return unittest.skip("use `RUN_SLOW=1` to run")(test_case) else: return test_case @slow class TestFullDeserialization(unittest.TestCase): def test_full_deserialization_hub(self): # Check we can read this file. # This used to fail because of BufReader that would fail because the # file exceeds the buffer capacity api = HfApi() not_loadable = [] invalid_pre_tokenizer = [] # models = api.list_models(filter="transformers") # for model in tqdm.tqdm(models): # model_id = model.modelId # for model_file in model.siblings: # filename = model_file.rfilename # if filename == "tokenizer.json": # all_models.append((model_id, filename)) all_models = [("HueyNemud/das22-10-camembert_pretrained", "tokenizer.json")] for model_id, filename in tqdm.tqdm(all_models): tokenizer_file = cached_download(hf_hub_url(model_id, filename=filename)) is_ok = check(tokenizer_file) if not is_ok: print(f"{model_id} is affected by no type") invalid_pre_tokenizer.append(model_id) try: Tokenizer.from_file(tokenizer_file) except Exception as e: print(f"{model_id} is not loadable: {e}") not_loadable.append(model_id) except: print(f"{model_id} is not loadable: Rust error") not_loadable.append(model_id) self.assertEqual(invalid_pre_tokenizer, []) self.assertEqual(not_loadable, [])
0
hf_public_repos/tokenizers/bindings/python/tests
hf_public_repos/tokenizers/bindings/python/tests/implementations/test_byte_level_bpe.py
import pytest from tokenizers import ByteLevelBPETokenizer from ..utils import data_dir, multiprocessing_with_parallelism, roberta_files class TestByteLevelBPE: def test_basic_encode(self, roberta_files): tokenizer = ByteLevelBPETokenizer.from_file(roberta_files["vocab"], roberta_files["merges"]) output = tokenizer.encode("The quick brown fox jumps over the lazy dog") assert output.ids == [133, 2119, 6219, 23602, 13855, 81, 5, 22414, 2335] assert output.tokens == [ "The", "Ġquick", "Ġbrown", "Ġfox", "Ġjumps", "Ġover", "Ġthe", "Ġlazy", "Ġdog", ] assert output.offsets == [ (0, 3), (3, 9), (9, 15), (15, 19), (19, 25), (25, 30), (30, 34), (34, 39), (39, 43), ] def test_add_prefix_space(self, roberta_files): tokenizer = ByteLevelBPETokenizer.from_file( roberta_files["vocab"], roberta_files["merges"], add_prefix_space=True ) output = tokenizer.encode("The quick brown fox jumps over the lazy dog") assert output.ids == [20, 2119, 6219, 23602, 13855, 81, 5, 22414, 2335] assert output.tokens == [ "ĠThe", "Ġquick", "Ġbrown", "Ġfox", "Ġjumps", "Ġover", "Ġthe", "Ġlazy", "Ġdog", ] assert output.offsets == [ (0, 3), (3, 9), (9, 15), (15, 19), (19, 25), (25, 30), (30, 34), (34, 39), (39, 43), ] def test_lowerspace(self, roberta_files): tokenizer = ByteLevelBPETokenizer.from_file( roberta_files["vocab"], roberta_files["merges"], add_prefix_space=True, lowercase=True, ) output = tokenizer.encode("The Quick Brown Fox Jumps Over The Lazy Dog") assert output.ids == [5, 2119, 6219, 23602, 13855, 81, 5, 22414, 2335] assert output.tokens == [ "Ġthe", "Ġquick", "Ġbrown", "Ġfox", "Ġjumps", "Ġover", "Ġthe", "Ġlazy", "Ġdog", ] def test_multiprocessing_with_parallelism(self, roberta_files): tokenizer = ByteLevelBPETokenizer.from_file(roberta_files["vocab"], roberta_files["merges"]) multiprocessing_with_parallelism(tokenizer, False) multiprocessing_with_parallelism(tokenizer, True) def test_train_from_iterator(self): text = ["A first sentence", "Another sentence", "And a last one"] tokenizer = ByteLevelBPETokenizer() tokenizer.train_from_iterator(text, show_progress=False) output = tokenizer.encode("A sentence") assert output.tokens == ["A", "Ġsentence"]
0
hf_public_repos/tokenizers/bindings/python/tests
hf_public_repos/tokenizers/bindings/python/tests/implementations/test_char_bpe.py
import pytest from tokenizers import CharBPETokenizer from ..utils import data_dir, multiprocessing_with_parallelism, openai_files class TestCharBPETokenizer: def test_basic_encode(self, openai_files): tokenizer = CharBPETokenizer.from_file(openai_files["vocab"], openai_files["merges"]) output = tokenizer.encode("My name is John", "pair") assert output.ids == [0, 253, 1362, 544, 0, 7, 12662, 2688] assert output.tokens == [ "<unk>", "y</w>", "name</w>", "is</w>", "<unk>", "o", "hn</w>", "pair</w>", ] assert output.offsets == [ (0, 1), (1, 2), (3, 7), (8, 10), (11, 12), (12, 13), (13, 15), (0, 4), ] assert output.type_ids == [0, 0, 0, 0, 0, 0, 0, 1] def test_lowercase(self, openai_files): tokenizer = CharBPETokenizer.from_file(openai_files["vocab"], openai_files["merges"], lowercase=True) output = tokenizer.encode("My name is John", "pair", add_special_tokens=False) assert output.ids == [547, 1362, 544, 2476, 2688] assert output.tokens == ["my</w>", "name</w>", "is</w>", "john</w>", "pair</w>"] assert output.offsets == [(0, 2), (3, 7), (8, 10), (11, 15), (0, 4)] assert output.type_ids == [0, 0, 0, 0, 1] def test_decoding(self, openai_files): tokenizer = CharBPETokenizer.from_file(openai_files["vocab"], openai_files["merges"], lowercase=True) decoded = tokenizer.decode(tokenizer.encode("my name is john").ids) assert decoded == "my name is john" def test_multiprocessing_with_parallelism(self, openai_files): tokenizer = CharBPETokenizer.from_file(openai_files["vocab"], openai_files["merges"]) multiprocessing_with_parallelism(tokenizer, False) multiprocessing_with_parallelism(tokenizer, True) def test_train_from_iterator(self): text = ["A first sentence", "Another sentence", "And a last one"] tokenizer = CharBPETokenizer() tokenizer.train_from_iterator(text, show_progress=False) output = tokenizer.encode("A sentence") assert output.tokens == ["A</w>", "sentence</w>"]
0
hf_public_repos/tokenizers/bindings/python/tests
hf_public_repos/tokenizers/bindings/python/tests/implementations/test_bert_wordpiece.py
import pytest from tokenizers import BertWordPieceTokenizer from ..utils import bert_files, data_dir, multiprocessing_with_parallelism class TestBertWordPieceTokenizer: def test_basic_encode(self, bert_files): tokenizer = BertWordPieceTokenizer.from_file(bert_files["vocab"]) # Encode with special tokens by default output = tokenizer.encode("My name is John", "pair") assert output.ids == [101, 2026, 2171, 2003, 2198, 102, 3940, 102] assert output.tokens == [ "[CLS]", "my", "name", "is", "john", "[SEP]", "pair", "[SEP]", ] assert output.offsets == [ (0, 0), (0, 2), (3, 7), (8, 10), (11, 15), (0, 0), (0, 4), (0, 0), ] assert output.type_ids == [0, 0, 0, 0, 0, 0, 1, 1] # Can encode without the special tokens output = tokenizer.encode("My name is John", "pair", add_special_tokens=False) assert output.ids == [2026, 2171, 2003, 2198, 3940] assert output.tokens == ["my", "name", "is", "john", "pair"] assert output.offsets == [(0, 2), (3, 7), (8, 10), (11, 15), (0, 4)] assert output.type_ids == [0, 0, 0, 0, 1] def test_multiprocessing_with_parallelism(self, bert_files): tokenizer = BertWordPieceTokenizer.from_file(bert_files["vocab"]) multiprocessing_with_parallelism(tokenizer, False) multiprocessing_with_parallelism(tokenizer, True) def test_train_from_iterator(self): text = ["A first sentence", "Another sentence", "And a last one"] tokenizer = BertWordPieceTokenizer() tokenizer.train_from_iterator(text, show_progress=False) output = tokenizer.encode("A sentence") assert output.tokens == ["a", "sentence"]
0
hf_public_repos/tokenizers/bindings/python/tests
hf_public_repos/tokenizers/bindings/python/tests/implementations/test_sentencepiece.py
import os import pytest from tokenizers import SentencePieceBPETokenizer, SentencePieceUnigramTokenizer class TestSentencePieceBPE: def test_train_from_iterator(self): text = ["A first sentence", "Another sentence", "And a last one"] tokenizer = SentencePieceBPETokenizer() tokenizer.train_from_iterator(text, show_progress=False) output = tokenizer.encode("A sentence") assert output.tokens == ["▁A", "▁sentence"] class TestSentencePieceUnigram: def test_train(self, tmpdir): p = tmpdir.mkdir("tmpdir").join("file.txt") p.write("A first sentence\nAnother sentence\nAnd a last one") tokenizer = SentencePieceUnigramTokenizer() tokenizer.train(files=str(p), show_progress=False) output = tokenizer.encode("A sentence") assert output.tokens == ["▁A", "▁", "s", "en", "t", "en", "c", "e"] with pytest.raises(Exception) as excinfo: _ = tokenizer.encode("A sentence 🤗") assert str(excinfo.value) == "Encountered an unknown token but `unk_id` is missing" def test_train_with_unk_token(self, tmpdir): p = tmpdir.mkdir("tmpdir").join("file.txt") p.write("A first sentence\nAnother sentence\nAnd a last one") tokenizer = SentencePieceUnigramTokenizer() tokenizer.train(files=str(p), show_progress=False, special_tokens=["<unk>"], unk_token="<unk>") output = tokenizer.encode("A sentence 🤗") assert output.ids[-1] == 0 assert output.tokens == ["▁A", "▁", "s", "en", "t", "en", "c", "e", "▁", "🤗"] def test_train_from_iterator(self): text = ["A first sentence", "Another sentence", "And a last one"] tokenizer = SentencePieceUnigramTokenizer() tokenizer.train_from_iterator(text, show_progress=False) output = tokenizer.encode("A sentence") assert output.tokens == ["▁A", "▁", "s", "en", "t", "en", "c", "e"] with pytest.raises(Exception) as excinfo: _ = tokenizer.encode("A sentence 🤗") assert str(excinfo.value) == "Encountered an unknown token but `unk_id` is missing" def test_train_from_iterator_with_unk_token(self): text = ["A first sentence", "Another sentence", "And a last one"] tokenizer = SentencePieceUnigramTokenizer() tokenizer.train_from_iterator( text, vocab_size=100, show_progress=False, special_tokens=["<unk>"], unk_token="<unk>" ) output = tokenizer.encode("A sentence 🤗") assert output.ids[-1] == 0 assert output.tokens == ["▁A", "▁", "s", "en", "t", "en", "c", "e", "▁", "🤗"]
0
hf_public_repos/tokenizers/bindings/python/tests
hf_public_repos/tokenizers/bindings/python/tests/implementations/test_base_tokenizer.py
import pytest from tokenizers import Tokenizer, decoders, models, normalizers, pre_tokenizers, processors from tokenizers.implementations import BaseTokenizer class TestBaseTokenizer: def test_get_set_components(self): toki = Tokenizer(models.BPE()) toki.normalizer = normalizers.NFC() toki.pre_tokenizer = pre_tokenizers.ByteLevel() toki.post_processor = processors.BertProcessing(("A", 0), ("B", 1)) toki.decoder = decoders.ByteLevel() tokenizer = BaseTokenizer(toki) assert isinstance(tokenizer.model, models.BPE) assert isinstance(tokenizer.normalizer, normalizers.NFC) assert isinstance(tokenizer.pre_tokenizer, pre_tokenizers.ByteLevel) assert isinstance(tokenizer.post_processor, processors.BertProcessing) assert isinstance(tokenizer.decoder, decoders.ByteLevel) tokenizer.model = models.Unigram() assert isinstance(tokenizer.model, models.Unigram) tokenizer.normalizer = normalizers.NFD() assert isinstance(tokenizer.normalizer, normalizers.NFD) tokenizer.pre_tokenizer = pre_tokenizers.Whitespace() assert isinstance(tokenizer.pre_tokenizer, pre_tokenizers.Whitespace) tokenizer.post_processor = processors.ByteLevel() assert isinstance(tokenizer.post_processor, processors.ByteLevel) tokenizer.decoder = decoders.WordPiece() assert isinstance(tokenizer.decoder, decoders.WordPiece)
0
hf_public_repos/tokenizers/bindings/python/tests
hf_public_repos/tokenizers/bindings/python/tests/documentation/test_pipeline.py
from tokenizers import Tokenizer from ..utils import data_dir, doc_pipeline_bert_tokenizer, doc_wiki_tokenizer disable_printing = True original_print = print def print(*args, **kwargs): if not disable_printing: original_print(*args, **kwargs) class TestPipeline: def test_pipeline(self, doc_wiki_tokenizer): try: # START reload_tokenizer from tokenizers import Tokenizer tokenizer = Tokenizer.from_file("data/tokenizer-wiki.json") # END reload_tokenizer except Exception: tokenizer = Tokenizer.from_file(doc_wiki_tokenizer) # START setup_normalizer from tokenizers import normalizers from tokenizers.normalizers import NFD, StripAccents normalizer = normalizers.Sequence([NFD(), StripAccents()]) # END setup_normalizer # START test_normalizer normalizer.normalize_str("Héllò hôw are ü?") # "Hello how are u?" # END test_normalizer assert normalizer.normalize_str("Héllò hôw are ü?") == "Hello how are u?" # START replace_normalizer tokenizer.normalizer = normalizer # END replace_normalizer # START setup_pre_tokenizer from tokenizers.pre_tokenizers import Whitespace pre_tokenizer = Whitespace() pre_tokenizer.pre_tokenize_str("Hello! How are you? I'm fine, thank you.") # [("Hello", (0, 5)), ("!", (5, 6)), ("How", (7, 10)), ("are", (11, 14)), ("you", (15, 18)), # ("?", (18, 19)), ("I", (20, 21)), ("'", (21, 22)), ('m', (22, 23)), ("fine", (24, 28)), # (",", (28, 29)), ("thank", (30, 35)), ("you", (36, 39)), (".", (39, 40))] # END setup_pre_tokenizer assert pre_tokenizer.pre_tokenize_str("Hello! How are you? I'm fine, thank you.") == [ ("Hello", (0, 5)), ("!", (5, 6)), ("How", (7, 10)), ("are", (11, 14)), ("you", (15, 18)), ("?", (18, 19)), ("I", (20, 21)), ("'", (21, 22)), ("m", (22, 23)), ("fine", (24, 28)), (",", (28, 29)), ("thank", (30, 35)), ("you", (36, 39)), (".", (39, 40)), ] # START combine_pre_tokenizer from tokenizers import pre_tokenizers from tokenizers.pre_tokenizers import Digits pre_tokenizer = pre_tokenizers.Sequence([Whitespace(), Digits(individual_digits=True)]) pre_tokenizer.pre_tokenize_str("Call 911!") # [("Call", (0, 4)), ("9", (5, 6)), ("1", (6, 7)), ("1", (7, 8)), ("!", (8, 9))] # END combine_pre_tokenizer assert pre_tokenizer.pre_tokenize_str("Call 911!") == [ ("Call", (0, 4)), ("9", (5, 6)), ("1", (6, 7)), ("1", (7, 8)), ("!", (8, 9)), ] # START replace_pre_tokenizer tokenizer.pre_tokenizer = pre_tokenizer # END replace_pre_tokenizer # START setup_processor from tokenizers.processors import TemplateProcessing tokenizer.post_processor = TemplateProcessing( single="[CLS] $A [SEP]", pair="[CLS] $A [SEP] $B:1 [SEP]:1", special_tokens=[("[CLS]", 1), ("[SEP]", 2)], ) # END setup_processor # START test_decoding output = tokenizer.encode("Hello, y'all! How are you 😁 ?") print(output.ids) # [1, 27253, 16, 93, 11, 5097, 5, 7961, 5112, 6218, 0, 35, 2] tokenizer.decode([1, 27253, 16, 93, 11, 5097, 5, 7961, 5112, 6218, 0, 35, 2]) # "Hello , y ' all ! How are you ?" # END test_decoding assert output.ids == [1, 27253, 16, 93, 11, 5097, 5, 7961, 5112, 6218, 0, 35, 2] assert ( tokenizer.decode([1, 27253, 16, 93, 11, 5097, 5, 7961, 5112, 6218, 0, 35, 2]) == "Hello , y ' all ! How are you ?" ) @staticmethod def slow_train(): # START bert_setup_tokenizer from tokenizers import Tokenizer from tokenizers.models import WordPiece bert_tokenizer = Tokenizer(WordPiece(unk_token="[UNK]")) # END bert_setup_tokenizer # START bert_setup_normalizer from tokenizers import normalizers from tokenizers.normalizers import NFD, Lowercase, StripAccents bert_tokenizer.normalizer = normalizers.Sequence([NFD(), Lowercase(), StripAccents()]) # END bert_setup_normalizer # START bert_setup_pre_tokenizer from tokenizers.pre_tokenizers import Whitespace bert_tokenizer.pre_tokenizer = Whitespace() # END bert_setup_pre_tokenizer # START bert_setup_processor from tokenizers.processors import TemplateProcessing bert_tokenizer.post_processor = TemplateProcessing( single="[CLS] $A [SEP]", pair="[CLS] $A [SEP] $B:1 [SEP]:1", special_tokens=[ ("[CLS]", 1), ("[SEP]", 2), ], ) # END bert_setup_processor # START bert_train_tokenizer from tokenizers.trainers import WordPieceTrainer trainer = WordPieceTrainer(vocab_size=30522, special_tokens=["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]"]) files = [f"data/wikitext-103-raw/wiki.{split}.raw" for split in ["test", "train", "valid"]] bert_tokenizer.train(files, trainer) bert_tokenizer.save("data/bert-wiki.json") # END bert_train_tokenizer def test_bert_example(self, doc_pipeline_bert_tokenizer): try: bert_tokenizer = Tokenizer.from_file("data/bert-wiki.json") except Exception: bert_tokenizer = Tokenizer.from_file(doc_pipeline_bert_tokenizer) # START bert_test_decoding output = bert_tokenizer.encode("Welcome to the 🤗 Tokenizers library.") print(output.tokens) # ["[CLS]", "welcome", "to", "the", "[UNK]", "tok", "##eni", "##zer", "##s", "library", ".", "[SEP]"] bert_tokenizer.decode(output.ids) # "welcome to the tok ##eni ##zer ##s library ." # END bert_test_decoding assert bert_tokenizer.decode(output.ids) == "welcome to the tok ##eni ##zer ##s library ." # START bert_proper_decoding from tokenizers import decoders bert_tokenizer.decoder = decoders.WordPiece() bert_tokenizer.decode(output.ids) # "welcome to the tokenizers library." # END bert_proper_decoding assert bert_tokenizer.decode(output.ids) == "welcome to the tokenizers library." if __name__ == "__main__": import os from urllib import request from zipfile import ZipFile disable_printing = False if not os.path.isdir("data/wikitext-103-raw"): print("Downloading wikitext-103...") wiki_text, _ = request.urlretrieve( "https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-103-raw-v1.zip" ) with ZipFile(wiki_text, "r") as z: print("Unzipping in data...") z.extractall("data") print("Now training...") TestPipeline.slow_train()
0
hf_public_repos/tokenizers/bindings/python/tests
hf_public_repos/tokenizers/bindings/python/tests/documentation/test_quicktour.py
from tokenizers import Tokenizer from tokenizers.models import BPE from tokenizers.pre_tokenizers import Whitespace from tokenizers.trainers import BpeTrainer from ..utils import data_dir, doc_wiki_tokenizer disable_printing = True original_print = print def print(*args, **kwargs): if not disable_printing: original_print(*args, **kwargs) class TestQuicktour: # This method contains everything we don't want to run @staticmethod def slow_train(): tokenizer, trainer = TestQuicktour.get_tokenizer_trainer() # START train files = [f"data/wikitext-103-raw/wiki.{split}.raw" for split in ["test", "train", "valid"]] tokenizer.train(files, trainer) # END train # START save tokenizer.save("data/tokenizer-wiki.json") # END save @staticmethod def get_tokenizer_trainer(): # START init_tokenizer from tokenizers import Tokenizer from tokenizers.models import BPE tokenizer = Tokenizer(BPE(unk_token="[UNK]")) # END init_tokenizer # START init_trainer from tokenizers.trainers import BpeTrainer trainer = BpeTrainer(special_tokens=["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]"]) # END init_trainer # START init_pretok from tokenizers.pre_tokenizers import Whitespace tokenizer.pre_tokenizer = Whitespace() # END init_pretok return tokenizer, trainer def test_quicktour(self, doc_wiki_tokenizer): def print(*args, **kwargs): pass try: # START reload_tokenizer tokenizer = Tokenizer.from_file("data/tokenizer-wiki.json") # END reload_tokenizer except Exception: tokenizer = Tokenizer.from_file(doc_wiki_tokenizer) # START encode output = tokenizer.encode("Hello, y'all! How are you 😁 ?") # END encode # START print_tokens print(output.tokens) # ["Hello", ",", "y", "'", "all", "!", "How", "are", "you", "[UNK]", "?"] # END print_tokens assert output.tokens == [ "Hello", ",", "y", "'", "all", "!", "How", "are", "you", "[UNK]", "?", ] # START print_ids print(output.ids) # [27253, 16, 93, 11, 5097, 5, 7961, 5112, 6218, 0, 35] # END print_ids assert output.ids == [27253, 16, 93, 11, 5097, 5, 7961, 5112, 6218, 0, 35] # START print_offsets print(output.offsets[9]) # (26, 27) # END print_offsets assert output.offsets[9] == (26, 27) # START use_offsets sentence = "Hello, y'all! How are you 😁 ?" sentence[26:27] # "😁" # END use_offsets assert sentence[26:27] == "😁" # START check_sep tokenizer.token_to_id("[SEP]") # 2 # END check_sep assert tokenizer.token_to_id("[SEP]") == 2 # START init_template_processing from tokenizers.processors import TemplateProcessing tokenizer.post_processor = TemplateProcessing( single="[CLS] $A [SEP]", pair="[CLS] $A [SEP] $B:1 [SEP]:1", special_tokens=[ ("[CLS]", tokenizer.token_to_id("[CLS]")), ("[SEP]", tokenizer.token_to_id("[SEP]")), ], ) # END init_template_processing # START print_special_tokens output = tokenizer.encode("Hello, y'all! How are you 😁 ?") print(output.tokens) # ["[CLS]", "Hello", ",", "y", "'", "all", "!", "How", "are", "you", "[UNK]", "?", "[SEP]"] # END print_special_tokens assert output.tokens == [ "[CLS]", "Hello", ",", "y", "'", "all", "!", "How", "are", "you", "[UNK]", "?", "[SEP]", ] # START print_special_tokens_pair output = tokenizer.encode("Hello, y'all!", "How are you 😁 ?") print(output.tokens) # ["[CLS]", "Hello", ",", "y", "'", "all", "!", "[SEP]", "How", "are", "you", "[UNK]", "?", "[SEP]"] # END print_special_tokens_pair assert output.tokens == [ "[CLS]", "Hello", ",", "y", "'", "all", "!", "[SEP]", "How", "are", "you", "[UNK]", "?", "[SEP]", ] # START print_type_ids print(output.type_ids) # [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1] # END print_type_ids assert output.type_ids == [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1] # START encode_batch output = tokenizer.encode_batch(["Hello, y'all!", "How are you 😁 ?"]) # END encode_batch # START encode_batch_pair output = tokenizer.encode_batch( [["Hello, y'all!", "How are you 😁 ?"], ["Hello to you too!", "I'm fine, thank you!"]] ) # END encode_batch_pair # START enable_padding tokenizer.enable_padding(pad_id=3, pad_token="[PAD]") # END enable_padding # START print_batch_tokens output = tokenizer.encode_batch(["Hello, y'all!", "How are you 😁 ?"]) print(output[1].tokens) # ["[CLS]", "How", "are", "you", "[UNK]", "?", "[SEP]", "[PAD]"] # END print_batch_tokens assert output[1].tokens == ["[CLS]", "How", "are", "you", "[UNK]", "?", "[SEP]", "[PAD]"] # START print_attention_mask print(output[1].attention_mask) # [1, 1, 1, 1, 1, 1, 1, 0] # END print_attention_mask assert output[1].attention_mask == [1, 1, 1, 1, 1, 1, 1, 0] if __name__ == "__main__": import os from urllib import request from zipfile import ZipFile disable_printing = False if not os.path.isdir("data/wikitext-103-raw"): print("Downloading wikitext-103...") wiki_text, _ = request.urlretrieve( "https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-103-raw-v1.zip" ) with ZipFile(wiki_text, "r") as z: print("Unzipping in data...") z.extractall("data") print("Now training...") TestQuicktour.slow_train()
0
hf_public_repos/tokenizers/bindings/python/tests
hf_public_repos/tokenizers/bindings/python/tests/documentation/test_tutorial_train_from_iterators.py
import gzip import os import datasets import pytest from ..utils import data_dir, train_files class TestTrainFromIterators: @staticmethod def get_tokenizer_trainer(): # START init_tokenizer_trainer from tokenizers import Tokenizer, decoders, models, normalizers, pre_tokenizers, trainers tokenizer = Tokenizer(models.Unigram()) tokenizer.normalizer = normalizers.NFKC() tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel() tokenizer.decoder = decoders.ByteLevel() trainer = trainers.UnigramTrainer( vocab_size=20000, initial_alphabet=pre_tokenizers.ByteLevel.alphabet(), special_tokens=["<PAD>", "<BOS>", "<EOS>"], ) # END init_tokenizer_trainer trainer.show_progress = False return tokenizer, trainer @staticmethod def load_dummy_dataset(): # START load_dataset import datasets dataset = datasets.load_dataset("wikitext", "wikitext-103-raw-v1", split="train+test+validation") # END load_dataset @pytest.fixture(scope="class") def setup_gzip_files(self, train_files): with open(train_files["small"], "rt") as small: for n in range(3): path = f"data/my-file.{n}.gz" with gzip.open(path, "wt") as f: f.write(small.read()) def test_train_basic(self): tokenizer, trainer = self.get_tokenizer_trainer() # START train_basic # First few lines of the "Zen of Python" https://www.python.org/dev/peps/pep-0020/ data = [ "Beautiful is better than ugly." "Explicit is better than implicit." "Simple is better than complex." "Complex is better than complicated." "Flat is better than nested." "Sparse is better than dense." "Readability counts." ] tokenizer.train_from_iterator(data, trainer=trainer) # END train_basic def test_datasets(self): tokenizer, trainer = self.get_tokenizer_trainer() # In order to keep tests fast, we only use the first 100 examples os.environ["TOKENIZERS_PARALLELISM"] = "true" dataset = datasets.load_dataset("wikitext", "wikitext-103-raw-v1", split="train[0:100]") # START def_batch_iterator def batch_iterator(batch_size=1000): for i in range(0, len(dataset), batch_size): yield dataset[i : i + batch_size]["text"] # END def_batch_iterator # START train_datasets tokenizer.train_from_iterator(batch_iterator(), trainer=trainer, length=len(dataset)) # END train_datasets def test_gzip(self, setup_gzip_files): tokenizer, trainer = self.get_tokenizer_trainer() # START single_gzip import gzip with gzip.open("data/my-file.0.gz", "rt") as f: tokenizer.train_from_iterator(f, trainer=trainer) # END single_gzip # START multi_gzip files = ["data/my-file.0.gz", "data/my-file.1.gz", "data/my-file.2.gz"] def gzip_iterator(): for path in files: with gzip.open(path, "rt") as f: for line in f: yield line tokenizer.train_from_iterator(gzip_iterator(), trainer=trainer) # END multi_gzip
0
hf_public_repos/tokenizers/bindings/python/tests
hf_public_repos/tokenizers/bindings/python/tests/bindings/test_decoders.py
import json import pickle import pytest from tokenizers.decoders import ( CTC, BPEDecoder, ByteLevel, Decoder, Metaspace, Sequence, WordPiece, ByteFallback, Replace, Strip, Fuse, ) class TestByteLevel: def test_instantiate(self): assert ByteLevel() is not None assert isinstance(ByteLevel(), Decoder) assert isinstance(ByteLevel(), ByteLevel) assert isinstance(pickle.loads(pickle.dumps(ByteLevel())), ByteLevel) def test_decoding(self): decoder = ByteLevel() assert decoder.decode(["My", "Ġname", "Ġis", "ĠJohn"]) == "My name is John" def test_manual_reload(self): byte_level = ByteLevel() state = json.loads(byte_level.__getstate__()) reloaded = ByteLevel(**state) assert isinstance(reloaded, ByteLevel) class TestReplace: def test_instantiate(self): assert Replace("_", " ") is not None assert isinstance(Replace("_", " "), Decoder) assert isinstance(Replace("_", " "), Replace) # assert isinstance(pickle.loads(pickle.dumps(Replace("_", " "))), Replace) def test_decoding(self): decoder = Replace("_", " ") assert decoder.decode(["My", "_name", "_is", "_John"]) == "My name is John" class TestWordPiece: def test_instantiate(self): assert WordPiece() is not None assert WordPiece(prefix="__") is not None assert WordPiece(cleanup=True) is not None assert isinstance(WordPiece(), Decoder) assert isinstance(WordPiece(), WordPiece) assert isinstance(pickle.loads(pickle.dumps(WordPiece())), WordPiece) def test_decoding(self): decoder = WordPiece() assert decoder.decode(["My", "na", "##me", "is", "Jo", "##hn"]) == "My name is John" assert decoder.decode(["I", "'m", "Jo", "##hn"]) == "I'm John" decoder = WordPiece(prefix="__", cleanup=False) assert decoder.decode(["My", "na", "__me", "is", "Jo", "__hn"]) == "My name is John" assert decoder.decode(["I", "'m", "Jo", "__hn"]) == "I 'm John" def test_can_modify(self): decoder = WordPiece(prefix="$$", cleanup=False) assert decoder.prefix == "$$" assert decoder.cleanup == False # Modify these decoder.prefix = "__" assert decoder.prefix == "__" decoder.cleanup = True assert decoder.cleanup == True class TestByteFallback: def test_instantiate(self): assert ByteFallback() is not None assert isinstance(ByteFallback(), Decoder) assert isinstance(ByteFallback(), ByteFallback) assert isinstance(pickle.loads(pickle.dumps(ByteFallback())), ByteFallback) def test_decoding(self): decoder = ByteFallback() assert decoder.decode(["My", " na", "me"]) == "My name" assert decoder.decode(["<0x61>"]) == "a" assert decoder.decode(["<0xE5>"]) == "�" assert decoder.decode(["<0xE5>", "<0x8f>"]) == "��" assert decoder.decode(["<0xE5>", "<0x8f>", "<0xab>"]) == "叫" assert decoder.decode(["<0xE5>", "<0x8f>", "a"]) == "��a" assert decoder.decode(["<0xE5>", "<0x8f>", "<0xab>", "a"]) == "叫a" class TestFuse: def test_instantiate(self): assert Fuse() is not None assert isinstance(Fuse(), Decoder) assert isinstance(Fuse(), Fuse) assert isinstance(pickle.loads(pickle.dumps(Fuse())), Fuse) def test_decoding(self): decoder = Fuse() assert decoder.decode(["My", " na", "me"]) == "My name" class TestStrip: def test_instantiate(self): assert Strip(left=0, right=0) is not None assert isinstance(Strip(content="_", left=0, right=0), Decoder) assert isinstance(Strip(content="_", left=0, right=0), Strip) assert isinstance(pickle.loads(pickle.dumps(Strip(content="_", left=0, right=0))), Strip) def test_decoding(self): decoder = Strip(content="_", left=1, right=0) assert decoder.decode(["_My", " na", "me", " _-", "__-"]) == "My name _-_-" class TestMetaspace: def test_instantiate(self): assert Metaspace() is not None assert Metaspace(replacement="-") is not None with pytest.raises(ValueError, match="expected a string of length 1"): Metaspace(replacement="") assert Metaspace(add_prefix_space=True) is not None assert isinstance(Metaspace(), Decoder) assert isinstance(Metaspace(), Metaspace) assert isinstance(pickle.loads(pickle.dumps(Metaspace())), Metaspace) def test_decoding(self): decoder = Metaspace() assert decoder.decode(["▁My", "▁name", "▁is", "▁John"]) == "My name is John" decoder = Metaspace(replacement="-", add_prefix_space=False) assert decoder.decode(["-My", "-name", "-is", "-John"]) == " My name is John" def test_can_modify(self): decoder = Metaspace(replacement="*", add_prefix_space=False) assert decoder.replacement == "*" assert decoder.add_prefix_space == False # Modify these decoder.replacement = "&" assert decoder.replacement == "&" decoder.add_prefix_space = True assert decoder.add_prefix_space == True class TestBPEDecoder: def test_instantiate(self): assert BPEDecoder() is not None assert BPEDecoder(suffix="_") is not None assert isinstance(BPEDecoder(), Decoder) assert isinstance(BPEDecoder(), BPEDecoder) assert isinstance(pickle.loads(pickle.dumps(BPEDecoder())), BPEDecoder) def test_decoding(self): decoder = BPEDecoder() assert decoder.decode(["My</w>", "na", "me</w>", "is</w>", "Jo", "hn</w>"]) == "My name is John" decoder = BPEDecoder(suffix="_") assert decoder.decode(["My_", "na", "me_", "is_", "Jo", "hn_"]) == "My name is John" def test_can_modify(self): decoder = BPEDecoder(suffix="123") assert decoder.suffix == "123" # Modify these decoder.suffix = "</w>" assert decoder.suffix == "</w>" class TestCTCDecoder: def test_instantiate(self): assert CTC() is not None assert CTC(pad_token="[PAD]") is not None assert isinstance(CTC(), Decoder) assert isinstance(CTC(), CTC) assert isinstance(pickle.loads(pickle.dumps(CTC())), CTC) def test_decoding(self): decoder = CTC() assert ( decoder.decode(["<pad>", "<pad>", "h", "e", "e", "l", "l", "<pad>", "l", "o", "o", "o", "<pad>"]) == "hello" ) decoder = CTC(pad_token="[PAD]") assert ( decoder.decode(["[PAD]", "[PAD]", "h", "e", "e", "l", "l", "[PAD]", "l", "o", "o", "o", "[PAD]"]) == "hello" ) def test_can_modify(self): decoder = CTC(pad_token="[PAD]") assert decoder.pad_token == "[PAD]" assert decoder.word_delimiter_token == "|" assert decoder.cleanup == True # Modify these decoder.pad_token = "{pad}" assert decoder.pad_token == "{pad}" decoder.word_delimiter_token = "_" assert decoder.word_delimiter_token == "_" decoder.cleanup = False assert decoder.cleanup == False class TestSequenceDecoder: def test_instantiate(self): assert Sequence([]) is not None assert Sequence([CTC()]) is not None assert isinstance(Sequence([]), Decoder) assert isinstance(Sequence([]), Sequence) serialized = pickle.dumps(Sequence([])) assert isinstance(pickle.loads(serialized), Sequence) def test_decoding(self): decoder = Sequence([CTC(), Metaspace()]) initial = ["▁", "▁", "H", "H", "i", "i", "▁", "y", "o", "u"] expected = "Hi you" assert decoder.decode(initial) == expected
0
hf_public_repos/tokenizers/bindings/python/tests
hf_public_repos/tokenizers/bindings/python/tests/bindings/test_tokenizer.py
import pickle import numpy as np import pytest from tokenizers import AddedToken, Encoding, Tokenizer from tokenizers.implementations import BertWordPieceTokenizer from tokenizers.models import BPE, Model, WordPiece, Unigram from tokenizers.normalizers import Lowercase from tokenizers.pre_tokenizers import ByteLevel from tokenizers.processors import BertProcessing, RobertaProcessing from ..utils import bert_files, data_dir, multiprocessing_with_parallelism, roberta_files class TestAddedToken: def test_instantiate_with_content_only(self): added_token = AddedToken("<mask>") added_token.content = "<MASK>" assert added_token.content == "<MASK>" assert type(added_token) == AddedToken added_token.content = added_token.content.lower() assert added_token.special == False added_token.special = True assert added_token.special == True added_token.special = False assert str(added_token) == "<mask>" assert ( repr(added_token) == 'AddedToken("<mask>", rstrip=False, lstrip=False, single_word=False, normalized=True, special=False)' ) assert added_token.rstrip == False assert added_token.lstrip == False assert added_token.single_word == False assert added_token.normalized == True assert isinstance(pickle.loads(pickle.dumps(added_token)), AddedToken) def test_can_set_rstrip(self): added_token = AddedToken("<mask>", rstrip=True) assert added_token.rstrip == True assert added_token.lstrip == False assert added_token.single_word == False assert added_token.normalized == True def test_can_set_lstrip(self): added_token = AddedToken("<mask>", lstrip=True) assert added_token.rstrip == False assert added_token.lstrip == True assert added_token.single_word == False assert added_token.normalized == True def test_can_set_single_world(self): added_token = AddedToken("<mask>", single_word=True) assert added_token.rstrip == False assert added_token.lstrip == False assert added_token.single_word == True assert added_token.normalized == True def test_can_set_normalized(self): added_token = AddedToken("<mask>", normalized=False) assert added_token.rstrip == False assert added_token.lstrip == False assert added_token.single_word == False assert added_token.normalized == False class TestTokenizer: def test_has_expected_type_and_methods(self): tokenizer = Tokenizer(BPE()) assert type(tokenizer) == Tokenizer assert callable(tokenizer.num_special_tokens_to_add) assert callable(tokenizer.get_vocab) assert callable(tokenizer.get_vocab_size) assert callable(tokenizer.enable_truncation) assert callable(tokenizer.no_truncation) assert callable(tokenizer.enable_padding) assert callable(tokenizer.no_padding) assert callable(tokenizer.encode) assert callable(tokenizer.encode_batch) assert callable(tokenizer.decode) assert callable(tokenizer.decode_batch) assert callable(tokenizer.token_to_id) assert callable(tokenizer.id_to_token) assert callable(tokenizer.add_tokens) assert callable(tokenizer.add_special_tokens) assert callable(tokenizer.train) assert callable(tokenizer.post_process) assert isinstance(tokenizer.model, Model) assert tokenizer.normalizer is None assert tokenizer.pre_tokenizer is None assert tokenizer.post_processor is None assert tokenizer.decoder is None assert isinstance(pickle.loads(pickle.dumps(Tokenizer(BPE()))), Tokenizer) def test_add_tokens(self): tokenizer = Tokenizer(BPE()) added = tokenizer.add_tokens(["my", "name", "is", "john"]) assert added == 4 tokens = [AddedToken("the"), AddedToken("quick", normalized=False), AddedToken()] assert tokens[0].normalized == True added = tokenizer.add_tokens(tokens) assert added == 2 assert tokens[0].normalized == True assert tokens[1].normalized == False def test_add_special_tokens(self): tokenizer = Tokenizer(BPE()) # Can add special tokens as `str` added = tokenizer.add_special_tokens(["my", "name", "is", "john"]) assert added == 4 # Can add special tokens as `AddedToken` tokens = [AddedToken("the"), AddedToken("quick", normalized=True), AddedToken()] assert tokens[0].normalized == True added = tokenizer.add_special_tokens(tokens) assert added == 2 assert tokens[0].normalized == False assert tokens[1].normalized == True def test_encode(self): tokenizer = Tokenizer(BPE()) tokenizer.add_tokens(["my", "name", "is", "john", "pair"]) # Can encode single sequence output = tokenizer.encode("my name is john") assert output.tokens == ["my", "name", "is", "john"] assert type(output.ids) == list assert type(output.type_ids) == list assert type(output.offsets) == list with pytest.warns(DeprecationWarning): assert type(output.words) == list assert type(output.word_ids) == list assert type(output.special_tokens_mask) == list assert type(output.attention_mask) == list assert type(output.overflowing) == list # Can encode a pair of sequences output = tokenizer.encode("my name is john", "pair") assert output.tokens == ["my", "name", "is", "john", "pair"] assert isinstance(pickle.loads(pickle.dumps(output)), Encoding) # Can encode a single pre-tokenized sequence output = tokenizer.encode(["my", "name", "is", "john"], is_pretokenized=True) assert output.tokens == ["my", "name", "is", "john"] # Can encode a batch with both a single sequence and a pair of sequences output = tokenizer.encode_batch(["my name is john", ("my name is john", "pair")]) assert len(output) == 2 def test_encode_formats(self, bert_files): with pytest.deprecated_call(): tokenizer = BertWordPieceTokenizer(bert_files["vocab"]) # Encode output = tokenizer.encode("my name is john") assert output.tokens == ["[CLS]", "my", "name", "is", "john", "[SEP]"] output = tokenizer.encode("my name is john", "pair") assert output.tokens == ["[CLS]", "my", "name", "is", "john", "[SEP]", "pair", "[SEP]"] output = tokenizer.encode(["my", "name", "is", "john"], is_pretokenized=True) assert output.tokens == ["[CLS]", "my", "name", "is", "john", "[SEP]"] output = tokenizer.encode(["my", "name", "is", "john"], ["pair"], is_pretokenized=True) assert output.tokens == ["[CLS]", "my", "name", "is", "john", "[SEP]", "pair", "[SEP]"] # Encode batch result_single = [ ["[CLS]", "my", "name", "is", "john", "[SEP]"], ["[CLS]", "my", "name", "is", "georges", "[SEP]"], ] result_pair = [ ["[CLS]", "my", "name", "is", "john", "[SEP]", "pair", "[SEP]"], ["[CLS]", "my", "name", "is", "georges", "[SEP]", "pair", "[SEP]"], ] def format(encodings): return [e.tokens for e in encodings] def test_single(input, is_pretokenized=False): output = tokenizer.encode_batch(input, is_pretokenized=is_pretokenized) assert format(output) == result_single def test_pair(input, is_pretokenized=False): output = tokenizer.encode_batch(input, is_pretokenized=is_pretokenized) assert format(output) == result_pair # Classic inputs # Lists test_single(["My name is John", "My name is Georges"]) test_pair([("my name is john", "pair"), ("my name is georges", "pair")]) test_pair([["my name is john", "pair"], ["my name is georges", "pair"]]) # Tuples test_single(("My name is John", "My name is Georges")) test_pair((("My name is John", "pair"), ("My name is Georges", "pair"))) # Numpy test_single(np.array(["My name is John", "My name is Georges"])) test_pair(np.array([("My name is John", "pair"), ("My name is Georges", "pair")])) test_pair(np.array([["My name is John", "pair"], ["My name is Georges", "pair"]])) # PreTokenized inputs # Lists test_single([["My", "name", "is", "John"], ["My", "name", "is", "Georges"]], True) test_pair( [ (["My", "name", "is", "John"], ["pair"]), (["My", "name", "is", "Georges"], ["pair"]), ], True, ) test_pair( [ [["My", "name", "is", "John"], ["pair"]], [["My", "name", "is", "Georges"], ["pair"]], ], True, ) # Tuples test_single((("My", "name", "is", "John"), ("My", "name", "is", "Georges")), True) test_pair( ( (("My", "name", "is", "John"), ("pair",)), (("My", "name", "is", "Georges"), ("pair",)), ), True, ) test_pair( ( (["My", "name", "is", "John"], ["pair"]), (["My", "name", "is", "Georges"], ["pair"]), ), True, ) # Numpy test_single( np.array([["My", "name", "is", "John"], ["My", "name", "is", "Georges"]]), True, ) test_single( np.array((("My", "name", "is", "John"), ("My", "name", "is", "Georges"))), True, ) test_pair( np.array( [ [["My", "name", "is", "John"], ["pair"]], [["My", "name", "is", "Georges"], ["pair"]], ], dtype=object, ), True, ) test_pair( np.array( ( (("My", "name", "is", "John"), ("pair",)), (("My", "name", "is", "Georges"), ("pair",)), ), dtype=object, ), True, ) # Mal formed with pytest.raises(TypeError, match="TextInputSequence must be str"): tokenizer.encode([["my", "name"]]) with pytest.raises(TypeError, match="TextInputSequence must be str"): tokenizer.encode("My name is john", [["pair"]]) with pytest.raises(TypeError, match="TextInputSequence must be str"): tokenizer.encode("my name is john", ["pair"]) with pytest.raises(TypeError, match="InputSequence must be Union[List[str]"): tokenizer.encode("My name is john", is_pretokenized=True) with pytest.raises(TypeError, match="InputSequence must be Union[List[str]"): tokenizer.encode("My name is john", ["pair"], is_pretokenized=True) with pytest.raises(TypeError, match="InputSequence must be Union[List[str]"): tokenizer.encode(["My", "name", "is", "John"], "pair", is_pretokenized=True) def test_encode_add_special_tokens(self, roberta_files): with pytest.deprecated_call(): tokenizer = Tokenizer(BPE(roberta_files["vocab"], roberta_files["merges"])) tokenizer.add_special_tokens(["<s>", "</s>"]) tokenizer.pre_tokenizer = ByteLevel(add_prefix_space=True) tokenizer.post_processor = RobertaProcessing( ("</s>", tokenizer.token_to_id("</s>")), ("<s>", tokenizer.token_to_id("<s>")), ) # Can encode with special tokens output_with_specials = tokenizer.encode("My name is John", add_special_tokens=True) assert output_with_specials.tokens == ["<s>", "ĠMy", "Ġname", "Ġis", "ĠJohn", "</s>"] # Can encode without special tokens output_without_specials = tokenizer.encode("My name is John", add_special_tokens=False) assert output_without_specials.tokens == ["ĠMy", "Ġname", "Ġis", "ĠJohn"] def test_truncation(self): tokenizer = Tokenizer(BPE()) tokenizer.add_tokens(["my", "name", "is", "john", "pair"]) tokenizer.enable_truncation(2) # Can truncate single sequences output = tokenizer.encode("my name is john") assert output.tokens == ["my", "name"] # Can truncate pair sequences as well output = tokenizer.encode("my name is john", "pair") assert output.tokens == ["my", "pair"] # Can get the params and give them to enable_truncation trunc = tokenizer.truncation tokenizer.enable_truncation(**trunc) # Left truncation direction tokenizer.enable_truncation(2, direction="left") output = tokenizer.encode("my name is john") assert output.tokens == ["is", "john"] output = tokenizer.encode("my name is john", "pair") assert output.tokens == ["john", "pair"] def test_padding(self): tokenizer = Tokenizer(BPE()) tokenizer.add_tokens(["my", "name", "is", "john", "pair"]) # By default it does nothing when encoding single sequence tokenizer.enable_padding() output = tokenizer.encode("my name") assert output.tokens == ["my", "name"] # Can pad to the longest in a batch output = tokenizer.encode_batch(["my name", "my name is john"]) assert all([len(encoding) == 4 for encoding in output]) # Can pad to the specified length otherwise tokenizer.enable_padding(length=4) output = tokenizer.encode("my name") assert output.tokens == ["my", "name", "[PAD]", "[PAD]"] output = tokenizer.encode("my name", "pair") assert output.tokens == ["my", "name", "pair", "[PAD]"] # Can get the params and give them to enable_padding padding = tokenizer.padding tokenizer.enable_padding(**padding) def test_decode(self): tokenizer = Tokenizer(BPE()) tokenizer.add_tokens(["my", "name", "is", "john", "pair"]) # Can decode single sequences output = tokenizer.decode([0, 1, 2, 3]) assert output == "my name is john" # Can decode batch output = tokenizer.decode_batch([[0, 1, 2, 3], [4]]) assert output == ["my name is john", "pair"] def test_get_vocab(self): tokenizer = Tokenizer(BPE()) tokenizer.add_tokens(["my", "name", "is", "john", "pair"]) # Can retrieve vocab with added tokens vocab = tokenizer.get_vocab(with_added_tokens=True) assert vocab == {"is": 2, "john": 3, "my": 0, "name": 1, "pair": 4} # Can retrieve vocab without added tokens vocab = tokenizer.get_vocab(with_added_tokens=False) assert vocab == {} # Can retrieve added token decoder vocab = tokenizer.get_added_tokens_decoder() assert vocab == { 0: AddedToken("my", rstrip=False, lstrip=False, single_word=False, normalized=True, special=False), 1: AddedToken("name", rstrip=False, lstrip=False, single_word=False, normalized=True, special=False), 2: AddedToken("is", rstrip=False, lstrip=False, single_word=False, normalized=True, special=False), 3: AddedToken("john", rstrip=False, lstrip=False, single_word=False, normalized=True, special=False), 4: AddedToken("pair", rstrip=False, lstrip=False, single_word=False, normalized=True, special=False), } def test_get_vocab_size(self): tokenizer = Tokenizer(BPE()) tokenizer.add_tokens(["my", "name", "is", "john", "pair"]) # Can retrieve vocab's size with added tokens size = tokenizer.get_vocab_size(with_added_tokens=True) assert size == 5 # Can retrieve vocab's size without added tokens size = tokenizer.get_vocab_size(with_added_tokens=False) assert size == 0 def test_post_process(self): tokenizer = Tokenizer(BPE()) tokenizer.add_tokens(["my", "name", "is", "john", "pair"]) tokenizer.enable_truncation(2) tokenizer.enable_padding(length=4) encoding = tokenizer.encode("my name is john") pair_encoding = tokenizer.encode("pair") # Can post process a single encoding output = tokenizer.post_process(encoding) assert output.tokens == ["my", "name", "[PAD]", "[PAD]"] # Can post process a pair of encodings output = tokenizer.post_process(encoding, pair_encoding) assert output.tokens == ["my", "pair", "[PAD]", "[PAD]"] def test_multiprocessing_with_parallelism(self): tokenizer = Tokenizer(BPE()) multiprocessing_with_parallelism(tokenizer, False) multiprocessing_with_parallelism(tokenizer, True) def test_from_pretrained(self): tokenizer = Tokenizer.from_pretrained("bert-base-cased") output = tokenizer.encode("Hey there dear friend!", add_special_tokens=False) assert output.tokens == ["Hey", "there", "dear", "friend", "!"] def test_from_pretrained_revision(self): tokenizer = Tokenizer.from_pretrained("anthony/tokenizers-test") output = tokenizer.encode("Hey there dear friend!", add_special_tokens=False) assert output.tokens == ["hey", "there", "dear", "friend", "!"] tokenizer = Tokenizer.from_pretrained("anthony/tokenizers-test", revision="gpt-2") output = tokenizer.encode("Hey there dear friend!", add_special_tokens=False) assert output.tokens == ["Hey", "Ġthere", "Ġdear", "Ġfriend", "!"] def test_unigram_byte_fallback(self): vocab = [ ("<unk>", 0.0), ("A", -0.01), ("sen", -0.02), ("te", -0.03), ("n", -0.04), ("ce", -0.05), ("<0xF0>", -0.06), ("<0x9F>", -0.06), ("<0xA4>", -0.06), ("<0x97>", -0.06), (" ", -0.4), ] tokenizer = tokenizer = Tokenizer(Unigram(vocab, 0, byte_fallback=False)) output = tokenizer.encode("A sentence 🤗") assert output.ids == [1, 10, 2, 3, 4, 5, 10, 0] assert output.tokens == ["A", " ", "sen", "te", "n", "ce", " ", "🤗"] tokenizer = Tokenizer(Unigram(vocab, 0, byte_fallback=True)) output = tokenizer.encode("A sentence 🤗") assert output.ids == [1, 10, 2, 3, 4, 5, 10, 6, 7, 8, 9] assert output.tokens == ["A", " ", "sen", "te", "n", "ce", " ", "<0xF0>", "<0x9F>", "<0xA4>", "<0x97>"]
0
hf_public_repos/tokenizers/bindings/python/tests
hf_public_repos/tokenizers/bindings/python/tests/bindings/test_processors.py
import json import pickle import pytest from tokenizers import Tokenizer from tokenizers.models import BPE from tokenizers.pre_tokenizers import ByteLevel as ByteLevelPreTokenizer from tokenizers.processors import ( BertProcessing, ByteLevel, PostProcessor, RobertaProcessing, Sequence, TemplateProcessing, ) from ..utils import data_dir, roberta_files class TestBertProcessing: def test_instantiate(self): processor = BertProcessing(("[SEP]", 0), ("[CLS]", 1)) assert processor is not None assert isinstance(processor, PostProcessor) assert isinstance(processor, BertProcessing) assert isinstance( pickle.loads(pickle.dumps(BertProcessing(("[SEP]", 0), ("[CLS]", 1)))), BertProcessing, ) def test_processing(self): tokenizer = Tokenizer(BPE()) tokenizer.add_special_tokens(["[SEP]", "[CLS]"]) tokenizer.add_tokens(["my", "name", "is", "john", "pair"]) tokenizer.post_processor = BertProcessing(("[SEP]", 0), ("[CLS]", 1)) output = tokenizer.encode("my name", "pair") assert output.tokens == ["[CLS]", "my", "name", "[SEP]", "pair", "[SEP]"] assert output.ids == [1, 2, 3, 0, 6, 0] class TestRobertaProcessing: def test_instantiate(self): processor = RobertaProcessing(("</s>", 1), ("<s>", 0)) assert processor is not None assert isinstance(processor, PostProcessor) assert isinstance(processor, RobertaProcessing) assert isinstance( pickle.loads(pickle.dumps(RobertaProcessing(("</s>", 1), ("<s>", 0)))), RobertaProcessing, ) def test_processing(self): tokenizer = Tokenizer(BPE()) tokenizer.add_special_tokens(["<s>", "</s>"]) tokenizer.add_tokens(["my", "name", "is", "john", "pair"]) tokenizer.post_processor = RobertaProcessing(("</s>", 1), ("<s>", 0)) output = tokenizer.encode("my name", "pair") assert output.tokens == ["<s>", "my", "name", "</s>", "</s>", "pair", "</s>"] assert output.ids == [0, 2, 3, 1, 1, 6, 1] class TestByteLevelProcessing: def test_instantiate(self): assert ByteLevel() is not None assert ByteLevel(trim_offsets=True) is not None assert isinstance(ByteLevel(), PostProcessor) assert isinstance(ByteLevel(), ByteLevel) assert isinstance(pickle.loads(pickle.dumps(ByteLevel())), ByteLevel) def test_processing(self, roberta_files): # Deprecated in 0.9 with pytest.deprecated_call(): tokenizer = Tokenizer(BPE(roberta_files["vocab"], roberta_files["merges"])) tokenizer.pre_tokenizer = ByteLevelPreTokenizer(add_prefix_space=True) # Keeps original offsets output = tokenizer.encode("My name is John") assert output.tokens == ["ĠMy", "Ġname", "Ġis", "ĠJohn"] assert output.offsets == [(0, 2), (2, 7), (7, 10), (10, 15)] # Trims offsets when activated tokenizer.post_processor = ByteLevel(trim_offsets=True) output = tokenizer.encode("My name is John") assert output.tokens == ["ĠMy", "Ġname", "Ġis", "ĠJohn"] assert output.offsets == [(0, 2), (3, 7), (8, 10), (11, 15)] def test_manual_reload(self): byte_level = ByteLevel() state = json.loads(byte_level.__getstate__()) reloaded = ByteLevel(**state) assert isinstance(reloaded, ByteLevel) class TestTemplateProcessing: def get_bert(self): return TemplateProcessing( single=["[CLS]", "$0", "[SEP]"], pair=["[CLS]", "$A", "[SEP]", "$B:1", "[SEP]:1"], special_tokens=[("[CLS]", 1), ("[SEP]", 0)], ) def get_roberta(self): return TemplateProcessing( single="<s> $0 </s>", pair="<s> $A </s> </s> $B </s>", special_tokens=[("<s>", 0), ("</s>", 1)], ) def get_t5_squad(self): # >>> from transformers import AutoTokenizer # >>> tok = AutoTokenizer.from_pretrained("t5-small") # >>> tok.tokenize("question: ") # ['▁question', ':'] # >>> tok.tokenize("context: ") # ['▁context', ':'] # >>> tok.encode("context: ") # [2625, 10] # >>> tok.encode("question: ") # [822, 10] return TemplateProcessing( single=["$0"], pair=["Q", "$A", "C", "$B"], special_tokens=[ { "id": "Q", "ids": [2625, 10], "tokens": ["_question", ":"], }, { "id": "C", "ids": [822, 10], "tokens": ["_context", ":"], }, ], ) def test_instantiate(self): bert = self.get_bert() assert bert is not None assert isinstance(bert, PostProcessor) assert isinstance(bert, TemplateProcessing) assert isinstance(pickle.loads(pickle.dumps(bert)), TemplateProcessing) # It is absolutely legal to have tokens with spaces in the name: processor = TemplateProcessing( single=["[ C L S ]", "Token with space"], special_tokens=[("[ C L S ]", 0), ("Token with space", 1)], ) # Sequence identifiers must be well formed: with pytest.raises(Exception, match="Cannot build Piece"): processor = TemplateProcessing(single="[CLS] $$ [SEP]") with pytest.raises(Exception, match="Cannot build Piece"): processor = TemplateProcessing(single="[CLS] $A: [SEP]") # Special tokens must be provided when used in template: with pytest.raises(Exception, match="Missing SpecialToken\\(s\\) with id\\(s\\)"): processor = TemplateProcessing(single=["[CLS]"]) def test_bert_parity(self): tokenizer = Tokenizer(BPE()) tokenizer.add_special_tokens(["[SEP]", "[CLS]"]) tokenizer.add_tokens(["my", "name", "is", "john", "pair"]) tokenizer.post_processor = BertProcessing(("[SEP]", 0), ("[CLS]", 1)) original = tokenizer.encode("my name", "pair") tokenizer.post_processor = self.get_bert() template = tokenizer.encode("my name", "pair") assert original.ids == template.ids def test_roberta_parity(self): tokenizer = Tokenizer(BPE()) tokenizer.add_special_tokens(["<s>", "</s>"]) tokenizer.add_tokens(["my", "name", "is", "john", "pair"]) tokenizer.post_processor = RobertaProcessing(("</s>", 1), ("<s>", 0)) original = tokenizer.encode("my name is john", "pair") tokenizer.post_processor = self.get_roberta() template = tokenizer.encode("my name is john", "pair") assert original.ids == template.ids class TestSequenceProcessing: def test_sequence_processing(self): assert Sequence([]) is not None assert Sequence([ByteLevel()]) is not None assert isinstance(Sequence([]), PostProcessor) assert isinstance(Sequence([]), Sequence) serialized = pickle.dumps(Sequence([])) assert isinstance(pickle.loads(serialized), Sequence) def test_post_process(self): byte_level = ByteLevel(trim_offsets=True) template = TemplateProcessing( single=["[CLS]", "$0", "[SEP]"], pair=["[CLS]:0", "$A", "[SEP]:0", "$B:1", "[SEP]:1"], special_tokens=[("[CLS]", 1), ("[SEP]", 0)], ) tokenizer = Tokenizer(BPE()) tokenizer.add_special_tokens(["[SEP]", "[CLS]"]) tokenizer.add_tokens(["my", "name", "is", "Ġjohn", "pair"]) tokenizer.post_processor = template # Before the sequence original = tokenizer.encode("my name is Ġjohn") assert original.ids == [1, 2, 3, 4, 5, 0] assert original.type_ids == [0, 0, 0, 0, 0, 0] assert original.offsets == [(0, 0), (0, 2), (3, 7), (8, 10), (11, 16), (0, 0)] pair = tokenizer.encode("my name is Ġjohn", "pair") # assert pair.ids == [1, 2, 3, 4, 5, 0, 6, 0] assert pair.type_ids == [0, 0, 0, 0, 0, 0, 1, 1] assert pair.offsets == [(0, 0), (0, 2), (3, 7), (8, 10), (11, 16), (0, 0), (0, 4), (0, 0)] processor = Sequence([byte_level, template]) tokenizer.post_processor = processor original = tokenizer.encode("my name is Ġjohn") assert original.ids == [1, 2, 3, 4, 5, 0] assert original.type_ids == [0, 0, 0, 0, 0, 0] # Offsets ARE trimmed assert original.offsets == [(0, 0), (0, 2), (3, 7), (8, 10), (12, 16), (0, 0)] pair = tokenizer.encode("my name is Ġjohn", "pair") # assert pair.ids == [1, 2, 3, 4, 5, 0, 6, 0] assert pair.type_ids == [0, 0, 0, 0, 0, 0, 1, 1] assert pair.offsets == [(0, 0), (0, 2), (3, 7), (8, 10), (12, 16), (0, 0), (0, 4), (0, 0)]
0
hf_public_repos/tokenizers/bindings/python/tests
hf_public_repos/tokenizers/bindings/python/tests/bindings/test_models.py
import pickle import pytest from tokenizers.models import BPE, Model, WordLevel, WordPiece from ..utils import bert_files, data_dir, roberta_files class TestBPE: def test_instantiate(self, roberta_files): assert isinstance(BPE(), Model) assert isinstance(BPE(), BPE) vocab = {"a": 0, "b": 1, "ab": 2} merges = [("a", "b")] assert isinstance(BPE(vocab, merges), Model) assert isinstance(BPE.from_file(roberta_files["vocab"], roberta_files["merges"]), BPE) with pytest.raises(ValueError, match="`vocab` and `merges` must be both specified"): BPE(vocab=vocab) with pytest.raises(ValueError, match="`vocab` and `merges` must be both specified"): BPE(merges=merges) assert isinstance( pickle.loads(pickle.dumps(BPE(vocab, merges))), BPE, ) # Deprecated calls in 0.9 with pytest.deprecated_call(): assert isinstance(BPE(roberta_files["vocab"], roberta_files["merges"]), Model) with pytest.raises(ValueError, match="`vocab` and `merges` must be both specified"): BPE(vocab=roberta_files["vocab"]) with pytest.raises(ValueError, match="`vocab` and `merges` must be both specified"): BPE(merges=roberta_files["merges"]) with pytest.deprecated_call(): assert isinstance( pickle.loads(pickle.dumps(BPE(roberta_files["vocab"], roberta_files["merges"]))), BPE, ) def test_can_modify(self): model = BPE( dropout=0.5, unk_token="[UNK]", continuing_subword_prefix="__prefix__", end_of_word_suffix="__suffix__", fuse_unk=False, ) assert model.dropout == 0.5 assert model.unk_token == "[UNK]" assert model.continuing_subword_prefix == "__prefix__" assert model.end_of_word_suffix == "__suffix__" assert model.fuse_unk == False assert model.byte_fallback == False # Modify these model.dropout = 0.1 assert pytest.approx(model.dropout) == 0.1 model.unk_token = "<unk>" assert model.unk_token == "<unk>" model.continuing_subword_prefix = None assert model.continuing_subword_prefix == None model.end_of_word_suffix = "suff" assert model.end_of_word_suffix == "suff" model.fuse_unk = True assert model.fuse_unk == True model.byte_fallback = True assert model.byte_fallback == True class TestWordPiece: def test_instantiate(self, bert_files): assert isinstance(WordPiece(), Model) assert isinstance(WordPiece(), WordPiece) vocab = {"a": 0, "b": 1, "ab": 2} assert isinstance(WordPiece(vocab), Model) assert isinstance(WordPiece(vocab), WordPiece) assert isinstance(WordPiece.from_file(bert_files["vocab"]), WordPiece) assert isinstance(pickle.loads(pickle.dumps(WordPiece(vocab))), WordPiece) # Deprecated calls in 0.9 with pytest.deprecated_call(): assert isinstance(WordPiece(bert_files["vocab"]), Model) with pytest.deprecated_call(): assert isinstance(pickle.loads(pickle.dumps(WordPiece(bert_files["vocab"]))), WordPiece) def test_can_modify(self): model = WordPiece( unk_token="<oov>", continuing_subword_prefix="__prefix__", max_input_chars_per_word=200, ) assert model.unk_token == "<oov>" assert model.continuing_subword_prefix == "__prefix__" assert model.max_input_chars_per_word == 200 # Modify these model.unk_token = "<unk>" assert model.unk_token == "<unk>" model.continuing_subword_prefix = "$$$" assert model.continuing_subword_prefix == "$$$" model.max_input_chars_per_word = 10 assert model.max_input_chars_per_word == 10 class TestWordLevel: def test_instantiate(self, roberta_files): assert isinstance(WordLevel(), Model) assert isinstance(WordLevel(), WordLevel) vocab = {"a": 0, "b": 1, "ab": 2} assert isinstance(WordLevel(vocab), Model) assert isinstance(WordLevel(vocab), WordLevel) assert isinstance(WordLevel.from_file(roberta_files["vocab"]), WordLevel) # The WordLevel model expects a vocab.json using the same format as roberta # so we can just try to load with this file with pytest.deprecated_call(): assert isinstance(WordLevel(roberta_files["vocab"]), Model) with pytest.deprecated_call(): assert isinstance(WordLevel(roberta_files["vocab"]), WordLevel) def test_can_modify(self): model = WordLevel(unk_token="<oov>") assert model.unk_token == "<oov>" # Modify these model.unk_token = "<unk>" assert model.unk_token == "<unk>"
0
hf_public_repos/tokenizers/bindings/python/tests
hf_public_repos/tokenizers/bindings/python/tests/bindings/test_encoding.py
import pytest from tokenizers import BertWordPieceTokenizer from ..utils import bert_files, data_dir class TestEncoding: @pytest.fixture(scope="class") def encodings(self, bert_files): tokenizer = BertWordPieceTokenizer.from_file(bert_files["vocab"]) single_encoding = tokenizer.encode("I love HuggingFace") pair_encoding = tokenizer.encode("I love HuggingFace", "Do you?") return single_encoding, pair_encoding def test_sequence_ids(self, encodings): single, pair = encodings assert single.sequence_ids == [None, 0, 0, 0, 0, None] assert pair.sequence_ids == [None, 0, 0, 0, 0, None, 1, 1, 1, None] def test_n_sequences(self, encodings): single, pair = encodings assert single.n_sequences == 1 assert pair.n_sequences == 2 def test_word_to_tokens(self, encodings): single, pair = encodings assert single.tokens == ["[CLS]", "i", "love", "hugging", "##face", "[SEP]"] assert single.word_to_tokens(0) == (1, 2) assert pair.tokens == [ "[CLS]", "i", "love", "hugging", "##face", "[SEP]", "do", "you", "?", "[SEP]", ] assert pair.word_to_tokens(0) == (1, 2) assert pair.word_to_tokens(0, 0) == (1, 2) assert pair.word_to_tokens(6, 0) == None assert pair.word_to_tokens(0, 1) == (6, 7) def test_word_to_chars(self, encodings): single, pair = encodings assert single.word_to_chars(2) == (7, 18) assert pair.word_to_chars(2) == (7, 18) assert pair.word_to_chars(2, 0) == (7, 18) assert pair.word_to_chars(2, 1) == (6, 7) def test_token_to_sequence(self, encodings): single, pair = encodings assert single.token_to_sequence(2) == 0 assert pair.token_to_sequence(2) == 0 assert pair.token_to_sequence(0) == None assert pair.token_to_sequence(5) == None assert pair.token_to_sequence(6) == 1 assert pair.token_to_sequence(8) == 1 assert pair.token_to_sequence(9) == None assert pair.token_to_sequence(1200) == None def test_token_to_chars(self, encodings): single, pair = encodings assert single.token_to_chars(0) == None assert single.token_to_chars(2) == (2, 6) assert pair.token_to_chars(2) == (2, 6) assert pair.token_to_chars(5) == None assert pair.token_to_chars(6) == (0, 2) def test_token_to_word(self, encodings): single, pair = encodings assert single.token_to_word(0) == None assert single.token_to_word(1) == 0 assert single.token_to_word(4) == 2 assert pair.token_to_word(1) == 0 assert pair.token_to_word(4) == 2 assert pair.token_to_word(5) == None assert pair.token_to_word(6) == 0 assert pair.token_to_word(7) == 1 def test_char_to_token(self, encodings): single, pair = encodings assert single.char_to_token(0) == 1 assert pair.char_to_token(0) == 1 assert pair.char_to_token(0, 0) == 1 assert pair.char_to_token(1, 0) == None assert pair.char_to_token(0, 1) == 6 assert pair.char_to_token(2, 1) == None def test_char_to_word(self, encodings): single, pair = encodings assert single.char_to_word(0) == 0 assert single.char_to_word(1) == None assert pair.char_to_word(2) == 1 assert pair.char_to_word(2, 0) == 1 assert pair.char_to_word(2, 1) == None assert pair.char_to_word(3, 1) == 1 def test_truncation(self, encodings): single, _ = encodings single.truncate(2, 1, "right") assert single.tokens == ["[CLS]", "i"] assert single.overflowing[0].tokens == ["i", "love"] def test_invalid_truncate_direction(self, encodings): single, _ = encodings with pytest.raises(ValueError) as excinfo: single.truncate(2, 1, "not_a_direction") assert "Invalid truncation direction value : not_a_direction" == str(excinfo.value)
0
hf_public_repos/tokenizers/bindings/python/tests
hf_public_repos/tokenizers/bindings/python/tests/bindings/test_trainers.py
import copy import os import pickle import pytest from tokenizers import ( AddedToken, SentencePieceUnigramTokenizer, Tokenizer, models, normalizers, pre_tokenizers, trainers, ) from ..utils import data_dir, train_files class TestBpeTrainer: def test_can_modify(self): trainer = trainers.BpeTrainer( vocab_size=12345, min_frequency=12, show_progress=False, special_tokens=["1", "2"], limit_alphabet=13, initial_alphabet=["a", "b", "c"], continuing_subword_prefix="pref", end_of_word_suffix="suf", ) assert trainer.vocab_size == 12345 assert trainer.min_frequency == 12 assert trainer.show_progress == False assert trainer.special_tokens == [ AddedToken("1", special=True), AddedToken("2", special=True), ] assert trainer.limit_alphabet == 13 assert sorted(trainer.initial_alphabet) == ["a", "b", "c"] assert trainer.continuing_subword_prefix == "pref" assert trainer.end_of_word_suffix == "suf" # Modify these trainer.vocab_size = 20000 assert trainer.vocab_size == 20000 trainer.min_frequency = 1 assert trainer.min_frequency == 1 trainer.show_progress = True assert trainer.show_progress == True trainer.special_tokens = [] assert trainer.special_tokens == [] trainer.limit_alphabet = None assert trainer.limit_alphabet == None trainer.initial_alphabet = ["d", "z"] assert sorted(trainer.initial_alphabet) == ["d", "z"] trainer.continuing_subword_prefix = None assert trainer.continuing_subword_prefix == None trainer.end_of_word_suffix = None assert trainer.continuing_subword_prefix == None def test_can_pickle(self): assert ( trainers.BpeTrainer(min_frequency=12).__getstate__() == b"""{"BpeTrainer":{"min_frequency":12,"vocab_size":30000,"show_progress":true,"special_tokens":[],"limit_alphabet":null,"initial_alphabet":[],"continuing_subword_prefix":null,"end_of_word_suffix":null,"max_token_length":null,"words":{}}}""" ) assert isinstance(pickle.loads(pickle.dumps(trainers.BpeTrainer(min_frequency=12))), trainers.BpeTrainer) assert isinstance(copy.deepcopy(trainers.BpeTrainer(min_frequency=12)), trainers.BpeTrainer) # Make sure everything is correct assert pickle.dumps(pickle.loads(pickle.dumps(trainers.BpeTrainer(min_frequency=12)))) == pickle.dumps( trainers.BpeTrainer(min_frequency=12) ) class TestWordPieceTrainer: def test_can_modify(self): trainer = trainers.WordPieceTrainer( vocab_size=12345, min_frequency=12, show_progress=False, special_tokens=["1", "2"], limit_alphabet=13, initial_alphabet=["a", "b", "c"], continuing_subword_prefix="pref", end_of_word_suffix="suf", ) assert trainer.vocab_size == 12345 assert trainer.min_frequency == 12 assert trainer.show_progress == False assert trainer.special_tokens == [ AddedToken("1", special=True), AddedToken("2", special=True), ] assert trainer.limit_alphabet == 13 assert sorted(trainer.initial_alphabet) == ["a", "b", "c"] assert trainer.continuing_subword_prefix == "pref" assert trainer.end_of_word_suffix == "suf" # Modify these trainer.vocab_size = 20000 assert trainer.vocab_size == 20000 trainer.min_frequency = 1 assert trainer.min_frequency == 1 trainer.show_progress = True assert trainer.show_progress == True trainer.special_tokens = [] assert trainer.special_tokens == [] trainer.limit_alphabet = None assert trainer.limit_alphabet == None trainer.initial_alphabet = ["d", "z"] assert sorted(trainer.initial_alphabet) == ["d", "z"] trainer.continuing_subword_prefix = None assert trainer.continuing_subword_prefix == None trainer.end_of_word_suffix = None assert trainer.continuing_subword_prefix == None def test_can_pickle(self): assert isinstance(pickle.loads(pickle.dumps(trainers.WordPieceTrainer())), trainers.WordPieceTrainer) class TestWordLevelTrainer: def test_can_modify(self): trainer = trainers.WordLevelTrainer( vocab_size=12345, min_frequency=12, show_progress=False, special_tokens=["1", "2"] ) assert trainer.vocab_size == 12345 assert trainer.min_frequency == 12 assert trainer.show_progress == False assert trainer.special_tokens == [ AddedToken("1", special=True), AddedToken("2", special=True), ] # Modify these trainer.vocab_size = 20000 assert trainer.vocab_size == 20000 trainer.min_frequency = 1 assert trainer.min_frequency == 1 trainer.show_progress = True assert trainer.show_progress == True trainer.special_tokens = [] assert trainer.special_tokens == [] def test_can_pickle(self): assert isinstance(pickle.loads(pickle.dumps(trainers.WordLevelTrainer())), trainers.WordLevelTrainer) class TestUnigram: def test_train(self, train_files): tokenizer = SentencePieceUnigramTokenizer() tokenizer.train(train_files["small"], show_progress=False) filename = "tests/data/unigram_trained.json" tokenizer.save(filename) os.remove(filename) def test_train_parallelism_with_custom_pretokenizer(self, train_files): class GoodCustomPretok: def split(self, n, normalized): # Here we just test that we can return a List[NormalizedString], it # does not really make sense to return twice the same otherwise return [normalized, normalized] def pre_tokenize(self, pretok): pretok.split(self.split) custom = pre_tokenizers.PreTokenizer.custom(GoodCustomPretok()) bpe_tokenizer = Tokenizer(models.BPE()) bpe_tokenizer.normalizer = normalizers.Lowercase() bpe_tokenizer.pre_tokenizer = custom if "TOKENIZERS_PARALLELISM" in os.environ: del os.environ["TOKENIZERS_PARALLELISM"] trainer = trainers.BpeTrainer(special_tokens=["<unk>"], show_progress=False) bpe_tokenizer.train([train_files["small"]], trainer=trainer) def test_can_pickle(self): assert isinstance(pickle.loads(pickle.dumps(trainers.UnigramTrainer())), trainers.UnigramTrainer) def test_train_with_special_tokens(self): filename = "tests/data/dummy-unigram-special_tokens-train.txt" with open(filename, "w") as f: f.write( """ [CLS] The Zen of Python, by Tim Peters [SEP] [CLS] Beautiful is better than ugly. [SEP] [CLS] Explicit is better than implicit. [SEP] [CLS] Simple is better than complex. [SEP] [CLS] Complex is better than complicated. [SEP] [CLS] Flat is better than nested. [SEP] [CLS] Sparse is better than dense. [SEP] [CLS] Readability counts. [SEP] [CLS] Special cases aren't special enough to break the rules. [SEP] [CLS] Although practicality beats purity. [SEP] [CLS] Errors should never pass silently. [SEP] [CLS] Unless explicitly silenced. [SEP] [CLS] In the face of ambiguity, refuse the temptation to guess. [SEP] [CLS] There should be one-- and preferably only one --obvious way to do it. [SEP] [CLS] Although that way may not be obvious at first unless you're Dutch. [SEP] [CLS] Now is better than never. [SEP] [CLS] Although never is often better than *right* now. [SEP] [CLS] If the implementation is hard to explain, it's a bad idea. [SEP] [CLS] If the implementation is easy to explain, it may be a good idea. [SEP] [CLS] Namespaces are one honking great idea -- let's do more of those! [SEP] """ ) tokenizer = Tokenizer(models.Unigram()) trainer = trainers.UnigramTrainer( show_progress=False, special_tokens=["[PAD]", "[SEP]", "[CLS]"], unk_token="[UNK]" ) tokenizer.train([filename], trainer=trainer) assert tokenizer.encode("[CLS] This is a test [SEP]").tokens == [ "[CLS]", " T", "h", "i", "s", " is ", "a", " ", "te", "s", "t ", "[SEP]", ] tokenizer = Tokenizer(models.Unigram()) trainer = trainers.UnigramTrainer( show_progress=False, special_tokens=["[PAD]", "[SEP]", "[CLS]"], unk_token="[UNK]", vocab_size=100, ) tokenizer.train([filename], trainer=trainer) assert tokenizer.get_vocab_size() == 100 tokenizer = Tokenizer(models.Unigram()) trainer = trainers.UnigramTrainer( show_progress=False, special_tokens=["[PAD]", "[SEP]", "[CLS]", "[UNK]"], unk_token="[UNK]", vocab_size=100, ) tokenizer.train([filename], trainer=trainer) assert tokenizer.get_vocab_size() == 100 def test_cannot_train_different_model(self): tokenizer = Tokenizer(models.BPE()) trainer = trainers.UnigramTrainer(show_progress=False) with pytest.raises(Exception, match="UnigramTrainer can only train a Unigram"): tokenizer.train([], trainer) def test_can_modify(self): trainer = trainers.UnigramTrainer( vocab_size=12345, show_progress=False, special_tokens=["1", AddedToken("2", lstrip=True)], initial_alphabet=["a", "b", "c"], ) assert trainer.vocab_size == 12345 assert trainer.show_progress == False assert trainer.special_tokens == [ AddedToken("1", normalized=False, special=True), AddedToken("2", lstrip=True, normalized=False, special=True), ] assert sorted(trainer.initial_alphabet) == ["a", "b", "c"] # Modify these trainer.vocab_size = 20000 assert trainer.vocab_size == 20000 trainer.show_progress = True assert trainer.show_progress == True trainer.special_tokens = [] assert trainer.special_tokens == [] trainer.initial_alphabet = ["d", "z"] assert sorted(trainer.initial_alphabet) == ["d", "z"] def test_continuing_prefix_trainer_mistmatch(self): UNK = "[UNK]" special_tokens = [UNK] tokenizer = Tokenizer(models.BPE(unk_token=UNK, continuing_subword_prefix="##")) trainer = trainers.BpeTrainer(special_tokens=special_tokens) tokenizer.pre_tokenizer = pre_tokenizers.Sequence( [pre_tokenizers.Whitespace(), pre_tokenizers.Digits(individual_digits=True)] ) tokenizer.train(files=["data/big.txt"], trainer=trainer) tokenizer.save("data/tokenizer.json") tokenizer.from_file("data/tokenizer.json")
0
hf_public_repos/tokenizers/bindings/python/tests
hf_public_repos/tokenizers/bindings/python/tests/bindings/test_normalizers.py
import pickle import pytest from tokenizers import NormalizedString, Tokenizer from tokenizers.models import BPE from tokenizers.normalizers import BertNormalizer, Lowercase, Normalizer, Sequence, Strip, Prepend class TestBertNormalizer: def test_instantiate(self): assert isinstance(BertNormalizer(), Normalizer) assert isinstance(BertNormalizer(), BertNormalizer) assert isinstance(pickle.loads(pickle.dumps(BertNormalizer())), BertNormalizer) def test_strip_accents(self): normalizer = BertNormalizer(strip_accents=True, lowercase=False, handle_chinese_chars=False, clean_text=False) output = normalizer.normalize_str("Héllò") assert output == "Hello" def test_handle_chinese_chars(self): normalizer = BertNormalizer(strip_accents=False, lowercase=False, handle_chinese_chars=True, clean_text=False) output = normalizer.normalize_str("你好") assert output == " 你 好 " def test_clean_text(self): normalizer = BertNormalizer(strip_accents=False, lowercase=False, handle_chinese_chars=False, clean_text=True) output = normalizer.normalize_str("\ufeffHello") assert output == "Hello" def test_lowercase(self): normalizer = BertNormalizer(strip_accents=False, lowercase=True, handle_chinese_chars=False, clean_text=False) output = normalizer.normalize_str("Héllò") assert output == "héllò" def test_can_modify(self): normalizer = BertNormalizer(clean_text=True, handle_chinese_chars=True, strip_accents=True, lowercase=True) assert normalizer.clean_text == True assert normalizer.handle_chinese_chars == True assert normalizer.strip_accents == True assert normalizer.lowercase == True # Modify these normalizer.clean_text = False assert normalizer.clean_text == False normalizer.handle_chinese_chars = False assert normalizer.handle_chinese_chars == False normalizer.strip_accents = None assert normalizer.strip_accents == None normalizer.lowercase = False assert normalizer.lowercase == False class TestSequence: def test_instantiate(self): assert isinstance(Sequence([]), Normalizer) assert isinstance(Sequence([]), Sequence) assert isinstance(pickle.loads(pickle.dumps(Sequence([]))), Sequence) def test_can_make_sequences(self): normalizer = Sequence([Lowercase(), Strip()]) output = normalizer.normalize_str(" HELLO ") assert output == "hello" class TestLowercase: def test_instantiate(self): assert isinstance(Lowercase(), Normalizer) assert isinstance(Lowercase(), Lowercase) assert isinstance(pickle.loads(pickle.dumps(Lowercase())), Lowercase) def test_lowercase(self): normalizer = Lowercase() output = normalizer.normalize_str("HELLO") assert output == "hello" class TestStrip: def test_instantiate(self): assert isinstance(Strip(), Normalizer) assert isinstance(Strip(), Strip) assert isinstance(pickle.loads(pickle.dumps(Strip())), Strip) def test_left_strip(self): normalizer = Strip(left=True, right=False) output = normalizer.normalize_str(" hello ") assert output == "hello " def test_right_strip(self): normalizer = Strip(left=False, right=True) output = normalizer.normalize_str(" hello ") assert output == " hello" def test_full_strip(self): normalizer = Strip(left=True, right=True) output = normalizer.normalize_str(" hello ") assert output == "hello" def test_can_modify(self): normalizer = Strip(left=True, right=True) assert normalizer.left == True assert normalizer.right == True # Modify these normalizer.left = False assert normalizer.left == False normalizer.right = False assert normalizer.right == False class TestPrepend: def test_instantiate(self): assert isinstance(Prepend("▁"), Normalizer) assert isinstance(Prepend("▁"), Prepend) assert isinstance(pickle.loads(pickle.dumps(Prepend("▁"))), Prepend) def test_prepend(self): normalizer = Prepend(prepend="▁") output = normalizer.normalize_str("hello") assert output == "▁hello" def test_can_modify(self): normalizer = Prepend("▁") assert normalizer.prepend == "▁" # Modify these normalizer.prepend = "-" assert normalizer.prepend == "-" class TestCustomNormalizer: class BadCustomNormalizer: def normalize(self, normalized, wrong): pass class GoodCustomNormalizer: def normalize(self, normalized): self.kept_normalized = normalized normalized.replace("there", "you") def use_after_normalize(self): self.kept_normalized.replace("something", "else") def test_instantiate(self): bad = Normalizer.custom(TestCustomNormalizer.BadCustomNormalizer()) good_custom = TestCustomNormalizer.GoodCustomNormalizer() good = Normalizer.custom(good_custom) assert isinstance(bad, Normalizer) assert isinstance(good, Normalizer) with pytest.raises(Exception, match="TypeError:.*normalize()"): bad.normalize_str("Hey there!") assert good.normalize_str("Hey there!") == "Hey you!" with pytest.raises(Exception, match="Cannot use a NormalizedStringRefMut outside `normalize`"): good_custom.use_after_normalize() def test_normalizer_interface(self): normalizer = Normalizer.custom(TestCustomNormalizer.GoodCustomNormalizer()) normalized = NormalizedString("Hey there!") normalizer.normalize(normalized) assert repr(normalized) == 'NormalizedString(original="Hey there!", normalized="Hey you!")' assert str(normalized) == "Hey you!"
0
hf_public_repos/tokenizers/bindings/python/tests
hf_public_repos/tokenizers/bindings/python/tests/bindings/test_pre_tokenizers.py
import json import pickle import pytest from tokenizers.pre_tokenizers import ( BertPreTokenizer, ByteLevel, CharDelimiterSplit, Digits, Metaspace, PreTokenizer, Punctuation, Sequence, Split, UnicodeScripts, Whitespace, WhitespaceSplit, ) class TestByteLevel: def test_instantiate(self): assert ByteLevel() is not None assert ByteLevel(add_prefix_space=True) is not None assert ByteLevel(add_prefix_space=False) is not None assert isinstance(ByteLevel(), PreTokenizer) assert isinstance(ByteLevel(), ByteLevel) assert isinstance(pickle.loads(pickle.dumps(ByteLevel())), ByteLevel) def test_has_alphabet(self): assert isinstance(ByteLevel.alphabet(), list) assert len(ByteLevel.alphabet()) == 256 def test_can_modify(self): pretok = ByteLevel(add_prefix_space=False) assert pretok.add_prefix_space == False # Modify these pretok.add_prefix_space = True assert pretok.add_prefix_space == True def test_manual_reload(self): byte_level = ByteLevel() state = json.loads(byte_level.__getstate__()) reloaded = ByteLevel(**state) assert isinstance(reloaded, ByteLevel) class TestSplit: def test_instantiate(self): pre_tokenizer = Split(pattern=" ", behavior="removed") assert pre_tokenizer is not None assert isinstance(pre_tokenizer, PreTokenizer) assert isinstance(pre_tokenizer, Split) assert isinstance(pickle.loads(pickle.dumps(Split(" ", "removed"))), Split) # test with invert=True pre_tokenizer_with_invert = Split(pattern=" ", behavior="isolated", invert=True) assert pre_tokenizer_with_invert is not None assert isinstance(pre_tokenizer_with_invert, PreTokenizer) assert isinstance(pre_tokenizer_with_invert, Split) assert isinstance(pickle.loads(pickle.dumps(Split(" ", "removed", True))), Split) class TestWhitespace: def test_instantiate(self): assert Whitespace() is not None assert isinstance(Whitespace(), PreTokenizer) assert isinstance(Whitespace(), Whitespace) assert isinstance(pickle.loads(pickle.dumps(Whitespace())), Whitespace) class TestWhitespaceSplit: def test_instantiate(self): assert WhitespaceSplit() is not None assert isinstance(WhitespaceSplit(), PreTokenizer) assert isinstance(WhitespaceSplit(), WhitespaceSplit) assert isinstance(pickle.loads(pickle.dumps(WhitespaceSplit())), WhitespaceSplit) class TestBertPreTokenizer: def test_instantiate(self): assert BertPreTokenizer() is not None assert isinstance(BertPreTokenizer(), PreTokenizer) assert isinstance(BertPreTokenizer(), BertPreTokenizer) assert isinstance(pickle.loads(pickle.dumps(BertPreTokenizer())), BertPreTokenizer) class TestMetaspace: def test_instantiate(self): assert Metaspace() is not None assert Metaspace(replacement="-") is not None with pytest.raises(ValueError, match="expected a string of length 1"): Metaspace(replacement="") assert Metaspace(add_prefix_space=True) is not None assert isinstance(Metaspace(), PreTokenizer) assert isinstance(Metaspace(), Metaspace) assert isinstance(pickle.loads(pickle.dumps(Metaspace())), Metaspace) def test_can_modify(self): pretok = Metaspace(replacement="$", add_prefix_space=False) assert pretok.replacement == "$" assert pretok.add_prefix_space == False # Modify these pretok.replacement = "%" assert pretok.replacement == "%" pretok.add_prefix_space = True assert pretok.add_prefix_space == True pretok.prepend_scheme = "never" assert pretok.prepend_scheme == "never" class TestCharDelimiterSplit: def test_instantiate(self): assert CharDelimiterSplit("-") is not None with pytest.raises(ValueError, match="expected a string of length 1"): CharDelimiterSplit("") assert isinstance(CharDelimiterSplit(" "), PreTokenizer) assert isinstance(CharDelimiterSplit(" "), CharDelimiterSplit) assert isinstance(pickle.loads(pickle.dumps(CharDelimiterSplit("-"))), CharDelimiterSplit) def test_can_modify(self): pretok = CharDelimiterSplit("@") assert pretok.delimiter == "@" # Modify these pretok.delimiter = "!" assert pretok.delimiter == "!" class TestPunctuation: def test_instantiate(self): assert Punctuation() is not None assert Punctuation("removed") is not None assert isinstance(Punctuation(), PreTokenizer) assert isinstance(Punctuation(), Punctuation) assert isinstance(pickle.loads(pickle.dumps(Punctuation())), Punctuation) class TestSequence: def test_instantiate(self): assert Sequence([]) is not None assert isinstance(Sequence([]), PreTokenizer) assert isinstance(Sequence([]), Sequence) dumped = pickle.dumps(Sequence([])) assert isinstance(pickle.loads(dumped), Sequence) def test_bert_like(self): pre_tokenizer = Sequence([WhitespaceSplit(), Punctuation()]) assert isinstance(Sequence([]), PreTokenizer) assert isinstance(Sequence([]), Sequence) assert isinstance(pickle.loads(pickle.dumps(pre_tokenizer)), Sequence) result = pre_tokenizer.pre_tokenize_str("Hey friend! How are you?!?") assert result == [ ("Hey", (0, 3)), ("friend", (4, 10)), ("!", (10, 11)), ("How", (16, 19)), ("are", (20, 23)), ("you", (24, 27)), ("?", (27, 28)), ("!", (28, 29)), ("?", (29, 30)), ] class TestDigits: def test_instantiate(self): assert Digits() is not None assert isinstance(Digits(), PreTokenizer) assert isinstance(Digits(), Digits) assert isinstance(Digits(True), Digits) assert isinstance(Digits(False), Digits) assert isinstance(pickle.loads(pickle.dumps(Digits())), Digits) def test_can_modify(self): pretok = Digits(individual_digits=False) assert pretok.individual_digits == False # Modify these pretok.individual_digits = True assert pretok.individual_digits == True class TestUnicodeScripts: def test_instantiate(self): assert UnicodeScripts() is not None assert isinstance(UnicodeScripts(), PreTokenizer) assert isinstance(UnicodeScripts(), UnicodeScripts) assert isinstance(pickle.loads(pickle.dumps(UnicodeScripts())), UnicodeScripts) class TestCustomPreTokenizer: class BadCustomPretok: def pre_tokenize(self, pretok, wrong): # This method does not have the right signature: it takes one too many arg pass class GoodCustomPretok: def split(self, n, normalized): # Here we just test that we can return a List[NormalizedString], it # does not really make sense to return twice the same otherwise return [normalized, normalized] def pre_tokenize(self, pretok): pretok.split(self.split) def test_instantiate(self): bad = PreTokenizer.custom(TestCustomPreTokenizer.BadCustomPretok()) good = PreTokenizer.custom(TestCustomPreTokenizer.GoodCustomPretok()) assert isinstance(bad, PreTokenizer) assert isinstance(good, PreTokenizer) with pytest.raises(Exception, match="TypeError:.*pre_tokenize()"): bad.pre_tokenize_str("Hey there!") assert good.pre_tokenize_str("Hey there!") == [ ("Hey there!", (0, 10)), ("Hey there!", (0, 10)), ] def test_camel_case(self): class CamelCasePretok: def get_state(self, c): if c.islower(): return "lower" elif c.isupper(): return "upper" elif c.isdigit(): return "digit" else: return "rest" def split(self, n, normalized): i = 0 # states = {"any", "lower", "upper", "digit", "rest"} state = "any" pieces = [] for j, c in enumerate(normalized.normalized): c_state = self.get_state(c) if state == "any": state = c_state if state != "rest" and state == c_state: pass elif state == "upper" and c_state == "lower": pass else: pieces.append(normalized[i:j]) i = j state = c_state pieces.append(normalized[i:]) return pieces def pre_tokenize(self, pretok): pretok.split(self.split) camel = PreTokenizer.custom(CamelCasePretok()) assert camel.pre_tokenize_str("HeyThere!?-ThisIsLife") == [ ("Hey", (0, 3)), ("There", (3, 8)), ("!", (8, 9)), ("?", (9, 10)), ("-", (10, 11)), ("This", (11, 15)), ("Is", (15, 17)), ("Life", (17, 21)), ]
0
hf_public_repos/tokenizers/bindings/python
hf_public_repos/tokenizers/bindings/python/.cargo/config.toml
[target.x86_64-apple-darwin] rustflags = [ "-C", "link-arg=-undefined", "-C", "link-arg=dynamic_lookup", "-C", "link-arg=-mmacosx-version-min=10.11", ] [target.aarch64-apple-darwin] rustflags = [ "-C", "link-arg=-undefined", "-C", "link-arg=dynamic_lookup", "-C", "link-arg=-mmacosx-version-min=10.11", ]
0
hf_public_repos/tokenizers/bindings/python
hf_public_repos/tokenizers/bindings/python/scripts/spm_parity_check.py
import tokenizers from argparse import ArgumentParser import sentencepiece as spm from collections import Counter import json import os import datetime try: from termcolor import colored has_color = True except Exception: has_color = False def main(): parser = ArgumentParser("SentencePiece parity checker") parser.add_argument( "--input-file", "-i", type=str, required=True, help="Which files do you want to train from", ) parser.add_argument( "--model-file", "-m", type=str, required=False, default=None, help="Use a pretrained token file", ) parser.add_argument( "--model-prefix", type=str, default="spm_parity", help="Model prefix for spm_train", ) parser.add_argument( "--vocab-size", "-v", type=int, default=8000, help="Vocab size for spm_train", ) parser.add_argument( "--verbose", action="store_true", help="Verbosity", ) parser.add_argument( "--train", action="store_true", help="Instead of checking the encoder part, we check the trainer part", ) parser.add_argument( "--from-spm", action="store_true", help="Directly load the spm file with it's own normalizer", ) args = parser.parse_args() trained = False if args.model_file is None: spm.SentencePieceTrainer.Train( f"--input={args.input_file} --model_prefix={args.model_prefix}" f" --character_coverage=1.0" f" --max_sentence_length=40000" f" --num_threads=1" f" --vocab_size={args.vocab_size}" ) trained = True args.model_file = f"{args.model_prefix}.model" try: if args.train: check_train(args) else: check_encode(args) finally: if trained: os.remove(f"{args.model_prefix}.model") os.remove(f"{args.model_prefix}.vocab") def check_train(args): sp = spm.SentencePieceProcessor() sp.Load(args.model_file) tokenizer = tokenizers.SentencePieceUnigramTokenizer() tokenizer.train(args.input_file, show_progress=False) spm_tokens = 0 tokenizer_tokens = 0 with open(args.input_file, "r") as f: for i, line in enumerate(f): line = line.strip() ids = sp.EncodeAsIds(line) encoded = tokenizer.encode(line) spm_tokens += len(ids) tokenizer_tokens += len(encoded.ids) vocab = [0 for i in range(args.vocab_size)] spm_vocab = [0 for i in range(args.vocab_size)] for token, index in tokenizer.get_vocab().items(): vocab[index] = token for i in range(args.vocab_size): spm_vocab[i] = sp.id_to_piece(i) # 0 is unk in tokenizers, 0, 1, 2 are unk bos, eos in spm by default. for i, (token, spm_token) in enumerate(zip(vocab[1:], spm_vocab[3:])): if token != spm_token: print(f"First different token is token {i} ({token} != {spm_token})") break print(f"Tokenizer used {tokenizer_tokens}, where spm used {spm_tokens}") assert ( tokenizer_tokens < spm_tokens ), "Our trainer should be at least more efficient than the SPM one" print("Ok our trainer is at least more efficient than the SPM one") def check_diff(spm_diff, tok_diff, sp, tok): if spm_diff == list(reversed(tok_diff)): # AAA -> AA+A vs A+AA case. return True elif len(spm_diff) == len(tok_diff) and tok.decode(spm_diff) == tok.decode( tok_diff ): # Second order OK # Barrich -> Barr + ich vs Bar + rich return True spm_reencoded = sp.encode(sp.decode(spm_diff)) tok_reencoded = tok.encode(tok.decode(spm_diff)).ids if spm_reencoded != spm_diff and spm_reencoded == tok_reencoded: # Type 3 error. # Snehagatha -> # Sne, h, aga, th, a # Sne, ha, gat, ha # Encoding the wrong with sp does not even recover what spm gave us # It fits tokenizer however... return True return False def check_details(line, spm_ids, tok_ids, sp, tok): # Encoding can be the same with same result AAA -> A + AA vs AA + A # We can check that we use at least exactly the same number of tokens. for i, (spm_id, tok_id) in enumerate(zip(spm_ids, tok_ids)): if spm_id != tok_id: break first = i for i, (spm_id, tok_id) in enumerate(zip(reversed(spm_ids), reversed(tok_ids))): if spm_id != tok_id: break last = len(spm_ids) - i spm_diff = spm_ids[first:last] tok_diff = tok_ids[first:last] if check_diff(spm_diff, tok_diff, sp, tok): return True if last - first > 5: # We might have twice a single problem, attempt to subdivide the disjointed tokens into smaller problems spms = Counter(spm_ids[first:last]) toks = Counter(tok_ids[first:last]) removable_tokens = { spm_ for (spm_, si) in spms.items() if toks.get(spm_, 0) == si } min_width = 3 for i in range(last - first - min_width): if all( spm_ids[first + i + j] in removable_tokens for j in range(min_width) ): possible_matches = [ k for k in range(last - first - min_width) if tok_ids[first + k : first + k + min_width] == spm_ids[first + i : first + i + min_width] ] for j in possible_matches: if check_diff( spm_ids[first : first + i], tok_ids[first : first + j], sp, tok ) and check_details( line, spm_ids[first + i : last], tok_ids[first + j : last], sp, tok, ): return True print(f"Spm: {[tok.decode([spm_ids[i]]) for i in range(first, last)]}") try: print(f"Tok: {[tok.decode([tok_ids[i]]) for i in range(first, last)]}") except Exception: pass ok_start = tok.decode(spm_ids[:first]) ok_end = tok.decode(spm_ids[last:]) wrong = tok.decode(spm_ids[first:last]) print() if has_color: print( f"{colored(ok_start, 'grey')}{colored(wrong, 'red')}{colored(ok_end, 'grey')}" ) else: print(wrong) return False def check_encode(args): sp = spm.SentencePieceProcessor() sp.Load(args.model_file) if args.from_spm: tok = tokenizers.SentencePieceUnigramTokenizer.from_spm(args.model_file) else: vocab = [(sp.id_to_piece(i), sp.get_score(i)) for i in range(sp.piece_size())] unk_id = sp.unk_id() tok = tokenizers.SentencePieceUnigramTokenizer(vocab, unk_id) perfect = 0 imperfect = 0 wrong = 0 now = datetime.datetime.now spm_total_time = datetime.timedelta(seconds=0) tok_total_time = datetime.timedelta(seconds=0) with open(args.input_file, "r", encoding="utf-8-sig") as f: for i, line in enumerate(f): line = line.strip() start = now() ids = sp.EncodeAsIds(line) spm_time = now() encoded = tok.encode(line) tok_time = now() spm_total_time += spm_time - start tok_total_time += tok_time - spm_time if args.verbose: if i % 10000 == 0: print( f"({perfect} / {imperfect} / {wrong} ----- {perfect + imperfect + wrong})" ) print(f"SPM: {spm_total_time} - TOK: {tok_total_time}") if ids != encoded.ids: if check_details(line, ids, encoded.ids, sp, tok): imperfect += 1 continue else: wrong += 1 else: perfect += 1 assert ids == encoded.ids, f"line {i}: {line} : \n\n{ids}\n{encoded.ids}\n{list(zip(encoded.ids, encoded.tokens))}" print(f"({perfect} / {imperfect} / {wrong} ----- {perfect + imperfect + wrong})") total = perfect + imperfect + wrong print( f"Accuracy {perfect * 100 / total:.2f} Slowdown : {tok_total_time/ spm_total_time:.2f}" ) if __name__ == "__main__": main()
0
hf_public_repos/tokenizers/bindings/python
hf_public_repos/tokenizers/bindings/python/scripts/convert.py
import transformers from tokenizers.implementations import SentencePieceUnigramTokenizer, BaseTokenizer from tokenizers.processors import TemplateProcessing from tokenizers.models import Unigram, BPE from tokenizers import decoders from tokenizers import Tokenizer, Regex from tokenizers.normalizers import ( StripAccents, NFKD, Lowercase, Sequence, BertNormalizer, Precompiled, Replace, ) from tokenizers.pre_tokenizers import ( Digits, WhitespaceSplit, Metaspace, Sequence as PSequence, ) import json import unicodedata import sys import os import datetime import argparse sys.path.append(".") from spm_parity_check import check_details from sentencepiece_extractor import SentencePieceExtractor def check_number_comma(piece: str) -> bool: return len(piece) < 2 or piece[-1] != "," or not piece[-2].isdigit() def get_proto(filename: str): try: import sys sys.path.append(".") import sentencepiece_model_pb2 as model except Exception: raise Exception( "You don't seem to have the required protobuf file, in order to use this function you need to run `pip install protobuf` and `wget https://raw.githubusercontent.com/google/sentencepiece/master/python/sentencepiece_model_pb2.py` for us to be able to read the intrinsics of your spm_file. `pip install sentencepiece` is not required." ) m = model.ModelProto() m.ParseFromString(open(filename, "rb").read()) return m class Converter: def __init__(self, original_tokenizer): self.original_tokenizer = original_tokenizer def converted(self) -> Tokenizer: raise NotImplementedError() class SpmConverter(Converter): def __init__(self, *args): super().__init__(*args) self.proto = get_proto(self.original_tokenizer.vocab_file) def vocab(self, proto): return [(piece.piece, piece.score) for piece in proto.pieces] def unk_id(self, proto): return proto.trainer_spec.unk_id def tokenizer(self, proto): model_type = proto.trainer_spec.model_type vocab = self.vocab(proto) unk_id = self.unk_id(proto) if model_type == 1: tokenizer = Tokenizer(Unigram(vocab, unk_id)) elif model_type == 2: vocab, merges = SentencePieceExtractor(self.original_tokenizer.vocab_file).extract() tokenizer = Tokenizer( BPE(vocab, merges, unk_token=proto.trainer_spec.unk_piece, fuse_unk=True) ) else: raise Exception( "You're trying to run a `Unigram` model but you're file was trained with a different algorithm" ) return tokenizer def normalizer(self, proto): precompiled_charsmap = proto.normalizer_spec.precompiled_charsmap return Sequence([Precompiled(precompiled_charsmap), Replace(Regex(" {2,}"), " ")]) def post_processor(self, tokenizer): return None def converted(self): tokenizer = self.tokenizer(self.proto) # Tokenizer assemble tokenizer.normalizer = self.normalizer(self.proto) replacement = "▁" add_prefix_space = True tokenizer.pre_tokenizer = Metaspace( replacement=replacement, add_prefix_space=add_prefix_space ) tokenizer.decoder = decoders.Metaspace( replacement=replacement, add_prefix_space=add_prefix_space ) post_processor = self.post_processor(tokenizer) if post_processor: tokenizer.post_processor = post_processor # TODO what parameters should we give ? parameters = {} return BaseTokenizer(tokenizer, parameters) class AlbertConverter(SpmConverter): def vocab(self, proto): return [ (piece.piece, piece.score) if check_number_comma(piece.piece) else (piece.piece, piece.score - 100) for piece in proto.pieces ] def normalizer(self, proto): normalizers = [Replace("``", '"'), Replace("''", '"')] if not self.original_tokenizer.keep_accents: normalizers.append(NFKD()) normalizers.append(StripAccents()) if self.original_tokenizer.do_lower_case: normalizers.append(Lowercase()) precompiled_charsmap = proto.normalizer_spec.precompiled_charsmap normalizers.append(Precompiled(precompiled_charsmap)) normalizers.append(Replace(Regex(" {2,}"), " ")) return Sequence(normalizers) def post_processor(self, tokenizer): return TemplateProcessing( seq_a=["[CLS]", "$0", "[SEP]"], seq_b=["$1", "[SEP]"], special_tokens=[ ("[CLS]", tokenizer.get_vocab()["[CLS]"]), ("[SEP]", tokenizer.get_vocab()["[SEP]"]), ], ) class CamembertConverter(SpmConverter): def vocab(self, proto): vocab = [ ("<s>NOTUSED", 0.0), ("<pad>", 0.0), ("</s>NOTUSED", 0.0), ("<unk>", 0.0), ] vocab += [(piece.piece, piece.score) for piece in proto.pieces] return vocab def unk_id(self, proto): # See vocab unk position return 3 def post_processor(self, tokenizer): return TemplateProcessing( seq_a=["<s>", "$0", "</s>"], seq_b=["$1", "</s>"], special_tokens=[ ("<s>", tokenizer.get_vocab()["<s>"]), ("</s>", tokenizer.get_vocab()["</s>"]), ], ) class MBartConverter(SpmConverter): def vocab(self, proto): vocab = [ ("<s>", 0.0), ("<pad>", 0.0), ("</s>", 0.0), ("<unk>", 0.0), ] vocab += [(piece.piece, piece.score) for piece in proto.pieces[3:]] vocab += [ ("ar_AR", 0.0), ("cs_CZ", 0.0), ("de_DE", 0.0), ("en_XX", 0.0), ("es_XX", 0.0), ("et_EE", 0.0), ("fi_FI", 0.0), ("fr_XX", 0.0), ("gu_IN", 0.0), ("hi_IN", 0.0), ("it_IT", 0.0), ("ja_XX", 0.0), ("kk_KZ", 0.0), ("ko_KR", 0.0), ("lt_LT", 0.0), ("lv_LV", 0.0), ("my_MM", 0.0), ("ne_NP", 0.0), ("nl_XX", 0.0), ("ro_RO", 0.0), ("ru_RU", 0.0), ("si_LK", 0.0), ("tr_TR", 0.0), ("vi_VN", 0.0), ("zh_CN", 0.0), ] return vocab def unk_id(self, proto): return 3 def post_processor(self, tokenizer): return TemplateProcessing( seq_a=["$0", "</s>", "en_XX"], seq_b=["$1", "</s>"], special_tokens=[ ("en_XX", tokenizer.get_vocab()["en_XX"]), ("</s>", tokenizer.get_vocab()["</s>"]), ], ) class XLMRobertaConverter(SpmConverter): def vocab(self, proto): vocab = [ ("<s>", 0.0), ("<pad>", 0.0), ("</s>", 0.0), ("<unk>", 0.0), ] vocab += [(piece.piece, piece.score) for piece in proto.pieces[3:]] return vocab def unk_id(self, proto): unk_id = 3 return unk_id def post_processor(self, tokenizer): return TemplateProcessing( seq_a=["<s>", "$0", "</s>"], seq_b=["$1", "</s>"], special_tokens=[ ("<s>", tokenizer.get_vocab()["<s>"]), ("</s>", tokenizer.get_vocab()["</s>"]), ], ) class XLNetConverter(SpmConverter): def vocab(self, proto): return [ (piece.piece, piece.score) if check_number_comma(piece.piece) else (piece.piece, piece.score - 100) for piece in proto.pieces ] def normalizer(self, proto): normalizers = [Replace("``", '"'), Replace("''", '"')] if not self.original_tokenizer.keep_accents: normalizers.append(NFKD()) normalizers.append(StripAccents()) if self.original_tokenizer.do_lower_case: normalizers.append(Lowercase()) precompiled_charsmap = proto.normalizer_spec.precompiled_charsmap normalizers.append(Precompiled(precompiled_charsmap)) normalizers.append(Replace(Regex(" {2,}"), " ")) return Sequence(normalizers) def post_processor(self, tokenizer): return TemplateProcessing( seq_a=["$0", "<sep>", "<cls>"], seq_b=["$1", "<sep>"], special_tokens=[ ("<sep>", tokenizer.get_vocab()["<sep>"]), ("<cls>", tokenizer.get_vocab()["<cls>"]), ], ) class ReformerConverter(SpmConverter): pass class PegasusConverter(SpmConverter): offset = 103 def vocab(self, proto): vocab = [ (self.original_tokenizer.pad_token, 0), (self.original_tokenizer.eos_token, 0), ] vocab += [(f"unk_{i}", -100) for i in range(2, 2 + self.offset)] vocab += [(piece.piece, piece.score) for piece in proto.pieces[2:]] return vocab def unk_id(self, proto): return proto.trainer_spec.unk_id + self.offset def post_processor(self, tokenizer): eos = self.original_tokenizer.eos_token return TemplateProcessing( seq_a=["$0", eos], seq_b=["$1", eos], special_tokens=[(eos, tokenizer.get_vocab()[eos])], ) class T5Converter(SpmConverter): def post_processor(self, tokenizer): return TemplateProcessing( seq_a=["$0", "</s>"], seq_b=["$1", "</s>"], special_tokens=[("</s>", tokenizer.get_vocab()["</s>"])], ) CONVERTERS = { "AlbertTokenizer": AlbertConverter, "CamembertTokenizer": CamembertConverter, "XLMRobertaTokenizer": XLMRobertaConverter, "MBartTokenizer": MBartConverter, "XLNetTokenizer": XLNetConverter, "ReformerTokenizer": ReformerConverter, "PegasusTokenizer": PegasusConverter, "T5Tokenizer": T5Converter, } def check(pretrained, filename): transformer_tokenizer = transformers.AutoTokenizer.from_pretrained(pretrained) converter_class = CONVERTERS[transformer_tokenizer.__class__.__name__] tokenizer = converter_class(transformer_tokenizer).converted() now = datetime.datetime.now trans_total_time = datetime.timedelta(seconds=0) tok_total_time = datetime.timedelta(seconds=0) with open(filename, "r") as f: for i, line in enumerate(f): line = line.strip() start = now() ids = transformer_tokenizer.encode(line) trans = now() tok_ids = tokenizer.encode(line).ids tok = now() trans_total_time += trans - start tok_total_time += tok - trans if ids != tok_ids: if check_details(line, ids, tok_ids, transformer_tokenizer, tokenizer): continue assert ids == tok_ids, f"Error in line {i}: {line} {ids} != {tok_ids}" tokenizer.save(f"{pretrained.replace('/', '-')}.json") return ("OK", trans_total_time / tok_total_time) def main(): pretraineds = [ "albert-base-v1", "albert-large-v1", "albert-xlarge-v1", "albert-xxlarge-v1", "albert-base-v2", "albert-large-v2", "albert-xlarge-v2", "albert-xxlarge-v2", "camembert-base", "xlm-roberta-base", "xlm-roberta-large", "xlm-roberta-large-finetuned-conll02-dutch", "xlm-roberta-large-finetuned-conll02-spanish", "xlm-roberta-large-finetuned-conll03-english", "xlm-roberta-large-finetuned-conll03-german", "facebook/mbart-large-en-ro", "facebook/mbart-large-cc25", "xlnet-base-cased", "xlnet-large-cased", "google/reformer-crime-and-punishment", "t5-small", "google/pegasus-large", ] parser = argparse.ArgumentParser() parser.add_argument( "--filename", required=True, type=str, help="The filename that we are going to encode in both versions to check that conversion worked", ) parser.add_argument( "--models", type=lambda s: s.split(","), default=pretraineds, help=f"The pretrained tokenizers you want to test agains, (default: {pretraineds})", ) args = parser.parse_args() print(args.filename) model_len = 50 status_len = 6 speedup_len = 8 print(f"|{'Model':^{model_len}}|{'Status':^{status_len}}|{'Speedup':^{speedup_len}}|") print(f"|{'-'*model_len}|{'-'*status_len}|{'-'*speedup_len}|") for pretrained in args.models: status, speedup = check(pretrained, args.filename) print( f"|{pretrained:<{model_len}}|{status:^{status_len}}|{speedup:^{speedup_len - 1}.2f}x|" ) if __name__ == "__main__": main()
0
hf_public_repos/tokenizers/bindings/python
hf_public_repos/tokenizers/bindings/python/scripts/sentencepiece_extractor.py
from argparse import ArgumentParser from json import dump from logging import basicConfig, getLogger from os import linesep, remove from os.path import exists from tempfile import NamedTemporaryFile from typing import Dict, List, Tuple from requests import get from sentencepiece import SentencePieceProcessor from tqdm import trange, tqdm basicConfig() logger = getLogger() class SentencePieceExtractor: """ Extractor implementation for SentencePiece trained models. https://github.com/google/sentencepiece """ def __init__(self, model: str): # Get SentencePiece self.sp = SentencePieceProcessor() self.sp.Load(model) def extract(self) -> Tuple[Dict[str, int], List[Tuple]]: sp = self.sp vocab = {sp.id_to_piece(index): index for index in trange(sp.GetPieceSize())} # Merges merges = [] for piece_l in tqdm(vocab.keys(), total=sp.GetPieceSize()): for piece_r in vocab.keys(): merge = f"{piece_l}{piece_r}" piece_id = vocab.get(merge, None) if piece_id: merges += [(piece_l, piece_r, piece_id)] merges = sorted(merges, key=lambda val: val[2]) merges = [(val[0], val[1]) for val in merges] return vocab, merges class YouTokenToMeExtractor: """ Extractor implementation for YouTokenToMe trained models format. Model are as follow: vocab_size nb_merges piece piece_id ...(repeated vocab_size) piece_id_left piece_id_right piece_id ...(repeated nb merges) """ def __init__(self, model: str): self._model = model def extract(self) -> Tuple[Dict[str, int], List[Tuple]]: with open(self._model, "r") as model_f: # Retrieve information nb_pieces, nb_merges = map(int, model_f.readline().split()) vocab, merges = {}, [] # Vocab for _ in trange(nb_pieces): piece, piece_id = map(int, model_f.readline().split()) vocab[piece_id] = chr(piece) # Merges for _ in trange(nb_merges): piece_id_l, piece_id_r, piece = map(int, model_f.readline().split()) piece_l, piece_r = vocab[piece_id_l], vocab[piece_id_r] vocab[piece] = f"{piece_l}{piece_r}" merges += [(piece_l, piece_r)] # Special tokens unk, pad, bos, eos = map(int, model_f.readline().split()) vocab[unk] = "<unk>" vocab[pad] = "<pad>" vocab[bos] = "<bos>" vocab[eos] = "<eos>" # Invert key and value for vocab vocab = dict(zip(vocab.values(), vocab.keys())) return vocab, merges if __name__ == "__main__": parser = ArgumentParser("SentencePiece vocab extractor") parser.add_argument( "--provider", type=str, required=True, choices=["sentencepiece", "youtokentome"], help="Indicate the format of the file.", ) parser.add_argument( "--model", type=str, required=True, help="SentencePiece model to extract vocab from." ) parser.add_argument( "--vocab-output-path", type=str, required=True, help="Path where the vocab.json file will be extracted", ) parser.add_argument( "--merges-output-path", type=str, required=True, help="Path where the merges file will be extracted", ) # Parse cli arguments args = parser.parse_args() try: if args.model.startswith("http"): # Saving model with NamedTemporaryFile("wb", delete=False) as f: logger.info("Writing content from {} to {}".format(args.model, f.name)) response = get(args.model, allow_redirects=True) f.write(response.content) args.remote_model = args.model args.model = f.name # Allocate extractor extractor = ( SentencePieceExtractor if args.provider == "sentencepiece" else YouTokenToMeExtractor ) extractor = extractor(args.model) logger.info(f"Using {type(extractor).__name__}") # Open output files and let's extract model information with open(args.vocab_output_path, "w") as vocab_f: with open(args.merges_output_path, "w") as merges_f: # Do the extraction vocab, merges = extractor.extract() # Save content dump(vocab, vocab_f) merges_f.writelines(map(lambda x: f"{x[0]} {x[1]}{linesep}", merges)) finally: # If model was downloaded from internet we need to cleanup the tmp folder. if hasattr(args, "remote_model") and exists(args.model): remove(args.model)
0
hf_public_repos/tokenizers/bindings/python
hf_public_repos/tokenizers/bindings/python/examples/example.py
import argparse import logging import time from tqdm import tqdm logging.getLogger("transformers").disabled = True logging.getLogger("transformers.tokenization_utils").disabled = True from tokenizers import Tokenizer, decoders, pre_tokenizers from tokenizers.models import BPE, WordPiece from tokenizers.normalizers import BertNormalizer from tokenizers.processors import BertProcessing from transformers import BertTokenizer, GPT2Tokenizer parser = argparse.ArgumentParser() parser.add_argument("--type", default="gpt2", type=str, help="The type of tokenizer (bert|gpt2)") parser.add_argument("--file", default=None, type=str, help="The file to encode") parser.add_argument("--vocab", default=None, type=str, required=True, help="The vocab file") parser.add_argument("--merges", default=None, type=str, help="The merges.txt file") parser.add_argument("--debug", action="store_true", help="Verbose output") args = parser.parse_args() if args.type == "gpt2" and args.merges is None: raise Exception("Expected merges.txt file") if args.file is not None: with open(args.file, "r") as fp: text = [line.strip() for line in fp] else: text = """ The Zen of Python, by Tim Peters Beautiful is better than ugly. Explicit is better than implicit. Simple is better than complex. Complex is better than complicated. Flat is better than nested. Sparse is better than dense. Readability counts. Special cases aren't special enough to break the rules. Although practicality beats purity. Errors should never pass silently. Unless explicitly silenced. In the face of ambiguity, refuse the temptation to guess. There should be one-- and preferably only one --obvious way to do it. Although that way may not be obvious at first unless you're Dutch. Now is better than never. Although never is often better than *right* now. If the implementation is hard to explain, it's a bad idea. If the implementation is easy to explain, it may be a good idea. Namespaces are one honking great idea -- let's do more of those! """.split( "\n" ) if args.type == "gpt2": print("Running GPT-2 tokenizer") tok_p = GPT2Tokenizer.from_pretrained("gpt2") # Create a Tokenizer using BPE tok_r = Tokenizer(BPE(args.vocab, args.merges)) # Use ByteLevel PreTokenizer tok_r.pre_tokenizer = pre_tokenizers.ByteLevel(add_prefix_space=False) # Use ByteLevel Decoder tok_r.decoder = decoders.ByteLevel() elif args.type == "bert": print("Running Bert tokenizer") tok_p = BertTokenizer.from_pretrained(args.vocab) tok_r = Tokenizer(WordPiece(args.vocab, unk_token="[UNK]", max_input_chars_per_word=100)) tok_r.normalizer = BertNormalizer( clean_text=True, handle_chinese_chars=True, strip_accents=True, lowercase=True, ) # tok_r.pre_tokenizer = pre_tokenizers.Whitespace() tok_r.pre_tokenizer = pre_tokenizers.BertPreTokenizer() tok_r.decoder = decoders.WordPiece() tok_r.post_processor = BertProcessing( ("[SEP]", tok_r.token_to_id("[SEP]")), ("[CLS]", tok_r.token_to_id("[CLS]")), ) else: raise Exception(f"Unknown type {args.type}") def tokenize_r(): return tok_r.encode_batch(text) def tokenize_p(): return [tok_p.encode(sentence, add_special_tokens=True) for sentence in tqdm(text)] print(f"Tokenizing {len(text)} lines") # Rust version start = time.time() encoded_r = tokenize_r() end = time.time() time_r = end - start print(f"Rust tokenizer took: {time_r} sec") # Python version start = time.time() encoded_p = tokenize_p() end = time.time() time_p = end - start print(f"Transformer tokenizer took: {time_p} sec") print(f"SpeedUp Ratio: {time_p / time_r}") ids_r = [sentence.ids for sentence in encoded_r] diff_ids = 0 for i in range(0, len(encoded_r)): if encoded_r[i].ids != encoded_p[i]: diff_ids += 1 if args.debug: print(encoded_r[i].ids) print(encoded_p[i]) print(encoded_r[i].tokens) print(tok_p.tokenize(text[i])) print(text[i]) print("") print(f"Ids differences: {diff_ids}") decoded_r = tok_r.decode_batch([sentence.ids for sentence in encoded_r], False) decoded_p = [tok_p.decode(en) for en in encoded_p] diff_decoded = 0 for i in range(0, len(text)): if decoded_r[i] != decoded_p[i]: diff_decoded += 1 if args.debug: print(f"Original: {text[i]}") print(f"Rust: {decoded_r[i]}") print(f"Python: {decoded_p[i]}") print("") print(f"Decoding differences: {diff_decoded}")
0
hf_public_repos/tokenizers/bindings/python
hf_public_repos/tokenizers/bindings/python/examples/using_the_visualizer.ipynb
from tokenizers import BertWordPieceTokenizer from tokenizers.tools import EncodingVisualizer EncodingVisualizer.unk_token_regex.search("aaa[udsnk]aaa")text = """Mathias Bynens 'Z͑ͫ̓ͪ̂ͫ̽͏̴̙̤̞͉͚̯̞̠͍A̴̵̜̰͔ͫ͗͢L̠ͨͧͩ͘G̴̻͈͍͔̹̑͗̎̅͛́Ǫ̵̹̻̝̳͂̌̌͘!͖̬̰̙̗̿̋ͥͥ̂ͣ̐́́͜͞': Whenever you’re working on a piece of JavaScript code that deals with strings or regular expressions in some way, just add a unit test that contains a pile of poo (💩) in a string, 💩💩💩💩💩💩💩💩💩💩💩💩 and see if anything breaks. It’s a quick, fun, and easy way to see if your code supports astral symbols. Once you’ve found a Unicode-related bug in your code, all you need to do is apply the techniques discussed in this post to fix it."""tokenizer = BertWordPieceTokenizer("/tmp/bert-base-uncased-vocab.txt", lowercase=True) visualizer = EncodingVisualizer(tokenizer=tokenizer)visualizer(text)from tokenizers.tools import Annotationanno1 = Annotation(start=0, end=2, label="foo") anno2 = Annotation(start=2, end=4, label="bar") anno3 = Annotation(start=6, end=8, label="poo") anno4 = Annotation(start=9, end=12, label="shoe") annotations=[ anno1, anno2, anno3, anno4, Annotation(start=23, end=30, label="random tandem bandem sandem landem fandom"), Annotation(start=63, end=70, label="foo"), Annotation(start=80, end=95, label="bar"), Annotation(start=120, end=128, label="bar"), Annotation(start=152, end=155, label="poo"), ] visualizer(text,annotations=annotations)funnyAnnotations = [dict(startPlace=i,endPlace=i+3,theTag=str(i)) for i in range(0,20,4)] funnyAnnotationsconverter = lambda funny: Annotation(start=funny['startPlace'], end=funny['endPlace'], label=funny['theTag']) visualizer = EncodingVisualizer(tokenizer=tokenizer, default_to_notebook=True, annotation_converter=converter)visualizer(text, annotations=funnyAnnotations)from tokenizers import ByteLevelBPETokenizer roberta_tokenizer = ByteLevelBPETokenizer.from_file('/tmp/roberta-base-vocab.json', '/tmp/roberta-base-merges.txt') roberta_visualizer = EncodingVisualizer(tokenizer=roberta_tokenizer, default_to_notebook=True) roberta_visualizer(text, annotations=annotations)
0
hf_public_repos/tokenizers/bindings/python
hf_public_repos/tokenizers/bindings/python/examples/train_with_datasets.py
import datasets from tokenizers import Tokenizer, models, normalizers, pre_tokenizers, trainers # Build a tokenizer bpe_tokenizer = Tokenizer(models.BPE()) bpe_tokenizer.pre_tokenizer = pre_tokenizers.Whitespace() bpe_tokenizer.normalizer = normalizers.Lowercase() # Initialize a dataset dataset = datasets.load_dataset("wikitext", "wikitext-103-raw-v1", split="train") # Build an iterator over this dataset def batch_iterator(): batch_size = 1000 for batch in dataset.iter(batch_size=batch_size): yield batch["text"] # And finally train bpe_tokenizer.train_from_iterator(batch_iterator(), length=len(dataset))
0
hf_public_repos/tokenizers/bindings/python
hf_public_repos/tokenizers/bindings/python/examples/train_bert_wordpiece.py
import argparse import glob from tokenizers import BertWordPieceTokenizer parser = argparse.ArgumentParser() parser.add_argument( "--files", default=None, metavar="path", type=str, required=True, help="The files to use as training; accept '**/*.txt' type of patterns \ if enclosed in quotes", ) parser.add_argument( "--out", default="./", type=str, help="Path to the output directory, where the files will be saved", ) parser.add_argument("--name", default="bert-wordpiece", type=str, help="The name of the output vocab files") args = parser.parse_args() files = glob.glob(args.files) if not files: print(f"File does not exist: {args.files}") exit(1) # Initialize an empty tokenizer tokenizer = BertWordPieceTokenizer( clean_text=True, handle_chinese_chars=True, strip_accents=True, lowercase=True, ) # And then train tokenizer.train( files, vocab_size=10000, min_frequency=2, show_progress=True, special_tokens=["[PAD]", "[UNK]", "[CLS]", "[SEP]", "[MASK]"], limit_alphabet=1000, wordpieces_prefix="##", ) # Save the files tokenizer.save_model(args.out, args.name)
0
hf_public_repos/tokenizers/bindings/python
hf_public_repos/tokenizers/bindings/python/examples/train_bytelevel_bpe.py
import argparse import glob from os.path import join from tokenizers import ByteLevelBPETokenizer parser = argparse.ArgumentParser() parser.add_argument( "--files", default=None, metavar="path", type=str, required=True, help="The files to use as training; accept '**/*.txt' type of patterns \ if enclosed in quotes", ) parser.add_argument( "--out", default="./", type=str, help="Path to the output directory, where the files will be saved", ) parser.add_argument("--name", default="bpe-bytelevel", type=str, help="The name of the output vocab files") args = parser.parse_args() files = glob.glob(args.files) if not files: print(f"File does not exist: {args.files}") exit(1) # Initialize an empty tokenizer tokenizer = ByteLevelBPETokenizer(add_prefix_space=True) # And then train tokenizer.train( files, vocab_size=10000, min_frequency=2, show_progress=True, special_tokens=["<s>", "<pad>", "</s>"], ) # Save the files tokenizer.save_model(args.out, args.name) # Restoring model from learned vocab/merges tokenizer = ByteLevelBPETokenizer( join(args.out, "{}-vocab.json".format(args.name)), join(args.out, "{}-merges.txt".format(args.name)), add_prefix_space=True, ) # Test encoding print(tokenizer.encode("Training ByteLevel BPE is very easy").tokens)
0
hf_public_repos/tokenizers/bindings/python
hf_public_repos/tokenizers/bindings/python/examples/custom_components.py
from typing import List import jieba from tokenizers import NormalizedString, PreTokenizedString, Regex, Tokenizer from tokenizers.decoders import Decoder from tokenizers.models import BPE from tokenizers.normalizers import Normalizer from tokenizers.pre_tokenizers import PreTokenizer class JiebaPreTokenizer: def jieba_split(self, i: int, normalized_string: NormalizedString) -> List[NormalizedString]: splits = [] # we need to call `str(normalized_string)` because jieba expects a str, # not a NormalizedString for token, start, stop in jieba.tokenize(str(normalized_string)): splits.append(normalized_string[start:stop]) return splits # We can also easily do it in one line: # return [normalized_string[w[1] : w[2]] for w in jieba.tokenize(str(normalized_string))] def odd_number_split(self, i: int, normalized_string: NormalizedString) -> List[NormalizedString]: # Just an odd example... splits = [] last = 0 for i, char in enumerate(str(normalized_string)): if char.isnumeric() and int(char) % 2 == 1: splits.append(normalized_string[last:i]) last = i # Don't forget the last one splits.append(normalized_string[last:]) return splits def pre_tokenize(self, pretok: PreTokenizedString): # Let's call split on the PreTokenizedString to split using `self.jieba_split` pretok.split(self.jieba_split) # Here we can call `pretok.split` multiple times if we want to apply # different algorithm, but we generally just need to call it once. pretok.split(self.odd_number_split) class CustomDecoder: def decode(self, tokens: List[str]) -> str: return "".join(tokens) class CustomNormalizer: def normalize(self, normalized: NormalizedString): # Most of these can be replaced by a `Sequence` combining some provided Normalizer, # (ie Sequence([ NFKC(), Replace(Regex("\s+"), " "), Lowercase() ]) # and it should be the prefered way. That being said, here is an example of the kind # of things that can be done here: normalized.nfkc() normalized.filter(lambda char: not char.isnumeric()) normalized.replace(Regex("\s+"), " ") normalized.lowercase() # This section shows how to attach these custom components to the Tokenizer tok = Tokenizer(BPE()) tok.normalizer = Normalizer.custom(CustomNormalizer()) tok.pre_tokenizer = PreTokenizer.custom(JiebaPreTokenizer()) tok.decoder = Decoder.custom(CustomDecoder()) input = "永和服装饰品有限公司" print("PreTokenize:", input) print(tok.pre_tokenizer.pre_tokenize_str(input)) # [('永和', (0, 2)), ('服装', (2, 4)), ('饰品', (4, 6)), ('有限公司', (6, 10))] input = "112233" print("PreTokenize:", input) print(tok.pre_tokenizer.pre_tokenize_str(input)) # [('1', (0, 1)), ('122', (1, 4)), ('3', (4, 5)), ('3', (5, 6))] input = "1234 ℌ𝔢𝔩𝔩𝔬 𝔱𝔥𝔢𝔯𝔢 𝓂𝓎 𝒹ℯ𝒶𝓇 𝕕𝕖𝕒𝕣 𝕗𝕣𝕚𝕖𝕟𝕕!" print("Normalize:", input) print(tok.normalizer.normalize_str(input)) # " hello there my dear dear friend!"
0
hf_public_repos
hf_public_repos/peft/Makefile
.PHONY: quality style test docs check_dirs := src tests examples docs # Check that source code meets quality standards # this target runs checks on all files quality: black --check $(check_dirs) ruff $(check_dirs) doc-builder style src/peft tests docs/source --max_len 119 --check_only # Format source code automatically and check is there are any problems left that need manual fixing style: black $(check_dirs) ruff $(check_dirs) --fix doc-builder style src/peft tests docs/source --max_len 119 test: python -m pytest -n 3 tests/ $(if $(IS_GITHUB_CI),--report-log "ci_tests.log",) tests_examples_multi_gpu: python -m pytest -m multi_gpu_tests tests/test_gpu_examples.py $(if $(IS_GITHUB_CI),--report-log "multi_gpu_examples.log",) tests_examples_single_gpu: python -m pytest -m single_gpu_tests tests/test_gpu_examples.py $(if $(IS_GITHUB_CI),--report-log "single_gpu_examples.log",) tests_core_multi_gpu: python -m pytest -m multi_gpu_tests tests/test_common_gpu.py $(if $(IS_GITHUB_CI),--report-log "core_multi_gpu.log",) tests_core_single_gpu: python -m pytest -m single_gpu_tests tests/test_common_gpu.py $(if $(IS_GITHUB_CI),--report-log "core_single_gpu.log",) tests_common_gpu: python -m pytest tests/test_decoder_models.py $(if $(IS_GITHUB_CI),--report-log "common_decoder.log",) python -m pytest tests/test_encoder_decoder_models.py $(if $(IS_GITHUB_CI),--report-log "common_encoder_decoder.log",)
0
hf_public_repos
hf_public_repos/peft/LICENSE
Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
0
hf_public_repos
hf_public_repos/peft/README.md
<!--- Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> <h1 align="center"> <p>🤗 PEFT</p></h1> <h3 align="center"> <p>State-of-the-art Parameter-Efficient Fine-Tuning (PEFT) methods</p> </h3> Parameter-Efficient Fine-Tuning (PEFT) methods enable efficient adaptation of pre-trained language models (PLMs) to various downstream applications without fine-tuning all the model's parameters. Fine-tuning large-scale PLMs is often prohibitively costly. In this regard, PEFT methods only fine-tune a small number of (extra) model parameters, thereby greatly decreasing the computational and storage costs. Recent State-of-the-Art PEFT techniques achieve performance comparable to that of full fine-tuning. Seamlessly integrated with 🤗 Accelerate for large scale models leveraging DeepSpeed and Big Model Inference. Supported methods: 1. LoRA: [LORA: LOW-RANK ADAPTATION OF LARGE LANGUAGE MODELS](https://arxiv.org/abs/2106.09685) 2. Prefix Tuning: [Prefix-Tuning: Optimizing Continuous Prompts for Generation](https://aclanthology.org/2021.acl-long.353/), [P-Tuning v2: Prompt Tuning Can Be Comparable to Fine-tuning Universally Across Scales and Tasks](https://arxiv.org/pdf/2110.07602.pdf) 3. P-Tuning: [GPT Understands, Too](https://arxiv.org/abs/2103.10385) 4. Prompt Tuning: [The Power of Scale for Parameter-Efficient Prompt Tuning](https://arxiv.org/abs/2104.08691) 5. AdaLoRA: [Adaptive Budget Allocation for Parameter-Efficient Fine-Tuning](https://arxiv.org/abs/2303.10512) 6. $(IA)^3$: [Few-Shot Parameter-Efficient Fine-Tuning is Better and Cheaper than In-Context Learning](https://arxiv.org/abs/2205.05638) 7. MultiTask Prompt Tuning: [Multitask Prompt Tuning Enables Parameter-Efficient Transfer Learning](https://arxiv.org/abs/2303.02861) 8. LoHa: [FedPara: Low-Rank Hadamard Product for Communication-Efficient Federated Learning](https://arxiv.org/abs/2108.06098) 9. LoKr: [KronA: Parameter Efficient Tuning with Kronecker Adapter](https://arxiv.org/abs/2212.10650) based on [Navigating Text-To-Image Customization:From LyCORIS Fine-Tuning to Model Evaluation](https://arxiv.org/abs/2309.14859) implementation ## Getting started ```python from transformers import AutoModelForSeq2SeqLM from peft import get_peft_config, get_peft_model, LoraConfig, TaskType model_name_or_path = "bigscience/mt0-large" tokenizer_name_or_path = "bigscience/mt0-large" peft_config = LoraConfig( task_type=TaskType.SEQ_2_SEQ_LM, inference_mode=False, r=8, lora_alpha=32, lora_dropout=0.1 ) model = AutoModelForSeq2SeqLM.from_pretrained(model_name_or_path) model = get_peft_model(model, peft_config) model.print_trainable_parameters() # output: trainable params: 2359296 || all params: 1231940608 || trainable%: 0.19151053100118282 ``` ## Use Cases ### Get comparable performance to full finetuning by adapting LLMs to downstream tasks using consumer hardware GPU memory required for adapting LLMs on the few-shot dataset [`ought/raft/twitter_complaints`](https://huggingface.co/datasets/ought/raft/viewer/twitter_complaints). Here, settings considered are full finetuning, PEFT-LoRA using plain PyTorch and PEFT-LoRA using DeepSpeed with CPU Offloading. Hardware: Single A100 80GB GPU with CPU RAM above 64GB | Model | Full Finetuning | PEFT-LoRA PyTorch | PEFT-LoRA DeepSpeed with CPU Offloading | | --------- | ---- | ---- | ---- | | bigscience/T0_3B (3B params) | 47.14GB GPU / 2.96GB CPU | 14.4GB GPU / 2.96GB CPU | 9.8GB GPU / 17.8GB CPU | | bigscience/mt0-xxl (12B params) | OOM GPU | 56GB GPU / 3GB CPU | 22GB GPU / 52GB CPU | | bigscience/bloomz-7b1 (7B params) | OOM GPU | 32GB GPU / 3.8GB CPU | 18.1GB GPU / 35GB CPU | Performance of PEFT-LoRA tuned [`bigscience/T0_3B`](https://huggingface.co/bigscience/T0_3B) on [`ought/raft/twitter_complaints`](https://huggingface.co/datasets/ought/raft/viewer/twitter_complaints) leaderboard. A point to note is that we didn't try to squeeze performance by playing around with input instruction templates, LoRA hyperparams and other training related hyperparams. Also, we didn't use the larger 13B [mt0-xxl](https://huggingface.co/bigscience/mt0-xxl) model. So, we are already seeing comparable performance to SoTA with parameter efficient tuning. Also, the final additional checkpoint size is just `19MB` in comparison to `11GB` size of the backbone [`bigscience/T0_3B`](https://huggingface.co/bigscience/T0_3B) model, but one still has to load the original full size model. | Submission Name | Accuracy | | --------- | ---- | | Human baseline (crowdsourced) | 0.897 | | Flan-T5 | 0.892 | | lora-t0-3b | 0.863 | **Therefore, we can see that performance comparable to SoTA is achievable by PEFT methods with consumer hardware such as 16GB and 24GB GPUs.** An insightful blogpost explaining the advantages of using PEFT for fine-tuning FlanT5-XXL: [https://www.philschmid.de/fine-tune-flan-t5-peft](https://www.philschmid.de/fine-tune-flan-t5-peft) ### Parameter Efficient Tuning of Diffusion Models GPU memory required by different settings during training is given below. The final checkpoint size is `8.8 MB`. Hardware: Single A100 80GB GPU with CPU RAM above 64GB | Model | Full Finetuning | PEFT-LoRA | PEFT-LoRA with Gradient Checkpointing | | --------- | ---- | ---- | ---- | | CompVis/stable-diffusion-v1-4 | 27.5GB GPU / 3.97GB CPU | 15.5GB GPU / 3.84GB CPU | 8.12GB GPU / 3.77GB CPU | **Training** An example of using LoRA for parameter efficient dreambooth training is given in [`examples/lora_dreambooth/train_dreambooth.py`](examples/lora_dreambooth/train_dreambooth.py) ```bash export MODEL_NAME= "CompVis/stable-diffusion-v1-4" #"stabilityai/stable-diffusion-2-1" export INSTANCE_DIR="path-to-instance-images" export CLASS_DIR="path-to-class-images" export OUTPUT_DIR="path-to-save-model" accelerate launch train_dreambooth.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --instance_data_dir=$INSTANCE_DIR \ --class_data_dir=$CLASS_DIR \ --output_dir=$OUTPUT_DIR \ --train_text_encoder \ --with_prior_preservation --prior_loss_weight=1.0 \ --instance_prompt="a photo of sks dog" \ --class_prompt="a photo of dog" \ --resolution=512 \ --train_batch_size=1 \ --lr_scheduler="constant" \ --lr_warmup_steps=0 \ --num_class_images=200 \ --use_lora \ --lora_r 16 \ --lora_alpha 27 \ --lora_text_encoder_r 16 \ --lora_text_encoder_alpha 17 \ --learning_rate=1e-4 \ --gradient_accumulation_steps=1 \ --gradient_checkpointing \ --max_train_steps=800 ``` Try out the 🤗 Gradio Space which should run seamlessly on a T4 instance: [smangrul/peft-lora-sd-dreambooth](https://huggingface.co/spaces/smangrul/peft-lora-sd-dreambooth). ![peft lora dreambooth gradio space](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/peft/peft_lora_dreambooth_gradio_space.png) **NEW** ✨ Multi Adapter support and combining multiple LoRA adapters in a weighted combination ![peft lora dreambooth weighted adapter](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/peft/weighted_adapter_dreambooth_lora.png) **NEW** ✨ Dreambooth training for Stable Diffusion using LoHa and LoKr adapters [`examples/stable_diffusion/train_dreambooth.py`](examples/stable_diffusion/train_dreambooth.py) ### Parameter Efficient Tuning of LLMs for RLHF components such as Ranker and Policy - Here is an example in [trl](https://github.com/lvwerra/trl) library using PEFT+INT8 for tuning policy model: [gpt2-sentiment_peft.py](https://github.com/lvwerra/trl/blob/main/examples/sentiment/scripts/gpt2-sentiment_peft.py) and corresponding [Blog](https://huggingface.co/blog/trl-peft) - Example using PEFT for Instruction finetuning, reward model and policy : [stack_llama](https://github.com/lvwerra/trl/tree/main/examples/research_projects/stack_llama/scripts) and corresponding [Blog](https://huggingface.co/blog/stackllama) ### INT8 training of large models in Colab using PEFT LoRA and bitsandbytes - Here is now a demo on how to fine tune [OPT-6.7b](https://huggingface.co/facebook/opt-6.7b) (14GB in fp16) in a Google Colab: [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1jCkpikz0J2o20FBQmYmAGdiKmJGOMo-o?usp=sharing) - Here is now a demo on how to fine tune [whisper-large](https://huggingface.co/openai/whisper-large-v2) (1.5B params) (14GB in fp16) in a Google Colab: [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1DOkD_5OUjFa0r5Ik3SgywJLJtEo2qLxO?usp=sharing) and [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1vhF8yueFqha3Y3CpTHN6q9EVcII9EYzs?usp=sharing) ### Save compute and storage even for medium and small models Save storage by avoiding full finetuning of models on each of the downstream tasks/datasets, With PEFT methods, users only need to store tiny checkpoints in the order of `MBs` all the while retaining performance comparable to full finetuning. An example of using LoRA for the task of adapting `LayoutLMForTokenClassification` on `FUNSD` dataset is given in `~examples/token_classification/PEFT_LoRA_LayoutLMForTokenClassification_on_FUNSD.py`. We can observe that with only `0.62 %` of parameters being trainable, we achieve performance (F1 0.777) comparable to full finetuning (F1 0.786) (without any hyperparam tuning runs for extracting more performance), and the checkpoint of this is only `2.8MB`. Now, if there are `N` such datasets, just have these PEFT models one for each dataset and save a lot of storage without having to worry about the problem of catastrophic forgetting or overfitting of backbone/base model. Another example is fine-tuning [`roberta-large`](https://huggingface.co/roberta-large) on [`MRPC` GLUE](https://huggingface.co/datasets/glue/viewer/mrpc) dataset using different PEFT methods. The notebooks are given in `~examples/sequence_classification`. ## PEFT + 🤗 Accelerate PEFT models work with 🤗 Accelerate out of the box. Use 🤗 Accelerate for Distributed training on various hardware such as GPUs, Apple Silicon devices, etc during training. Use 🤗 Accelerate for inferencing on consumer hardware with small resources. ### Example of PEFT model training using 🤗 Accelerate's DeepSpeed integration DeepSpeed version required `v0.8.0`. An example is provided in `~examples/conditional_generation/peft_lora_seq2seq_accelerate_ds_zero3_offload.py`. a. First, run `accelerate config --config_file ds_zero3_cpu.yaml` and answer the questionnaire. Below are the contents of the config file. ```yaml compute_environment: LOCAL_MACHINE deepspeed_config: gradient_accumulation_steps: 1 gradient_clipping: 1.0 offload_optimizer_device: cpu offload_param_device: cpu zero3_init_flag: true zero3_save_16bit_model: true zero_stage: 3 distributed_type: DEEPSPEED downcast_bf16: 'no' dynamo_backend: 'NO' fsdp_config: {} machine_rank: 0 main_training_function: main megatron_lm_config: {} mixed_precision: 'no' num_machines: 1 num_processes: 1 rdzv_backend: static same_network: true use_cpu: false ``` b. run the below command to launch the example script ```bash accelerate launch --config_file ds_zero3_cpu.yaml examples/peft_lora_seq2seq_accelerate_ds_zero3_offload.py ``` c. output logs: ```bash GPU Memory before entering the train : 1916 GPU Memory consumed at the end of the train (end-begin): 66 GPU Peak Memory consumed during the train (max-begin): 7488 GPU Total Peak Memory consumed during the train (max): 9404 CPU Memory before entering the train : 19411 CPU Memory consumed at the end of the train (end-begin): 0 CPU Peak Memory consumed during the train (max-begin): 0 CPU Total Peak Memory consumed during the train (max): 19411 epoch=4: train_ppl=tensor(1.0705, device='cuda:0') train_epoch_loss=tensor(0.0681, device='cuda:0') 100%|████████████████████████████████████████████████████████████████████████████████████████████| 7/7 [00:27<00:00, 3.92s/it] GPU Memory before entering the eval : 1982 GPU Memory consumed at the end of the eval (end-begin): -66 GPU Peak Memory consumed during the eval (max-begin): 672 GPU Total Peak Memory consumed during the eval (max): 2654 CPU Memory before entering the eval : 19411 CPU Memory consumed at the end of the eval (end-begin): 0 CPU Peak Memory consumed during the eval (max-begin): 0 CPU Total Peak Memory consumed during the eval (max): 19411 accuracy=100.0 eval_preds[:10]=['no complaint', 'no complaint', 'complaint', 'complaint', 'no complaint', 'no complaint', 'no complaint', 'complaint', 'complaint', 'no complaint'] dataset['train'][label_column][:10]=['no complaint', 'no complaint', 'complaint', 'complaint', 'no complaint', 'no complaint', 'no complaint', 'complaint', 'complaint', 'no complaint'] ``` ### Example of PEFT model inference using 🤗 Accelerate's Big Model Inferencing capabilities An example is provided in [this notebook](https://github.com/huggingface/peft/blob/main/examples/causal_language_modeling/peft_lora_clm_accelerate_big_model_inference.ipynb). ## Models support matrix Find models that are supported out of the box below. Note that PEFT works with almost all models -- if it is not listed, you just need to [do some manual configuration](https://huggingface.co/docs/peft/developer_guides/custom_models). ### Causal Language Modeling | Model | LoRA | Prefix Tuning | P-Tuning | Prompt Tuning | IA3 | |--------------| ---- | ---- | ---- | ---- | ---- | | GPT-2 | ✅ | ✅ | ✅ | ✅ | ✅ | | Bloom | ✅ | ✅ | ✅ | ✅ | ✅ | | OPT | ✅ | ✅ | ✅ | ✅ | ✅ | | GPT-Neo | ✅ | ✅ | ✅ | ✅ | ✅ | | GPT-J | ✅ | ✅ | ✅ | ✅ | ✅ | | GPT-NeoX-20B | ✅ | ✅ | ✅ | ✅ | ✅ | | LLaMA | ✅ | ✅ | ✅ | ✅ | ✅ | | ChatGLM | ✅ | ✅ | ✅ | ✅ | ✅ | | Mistral | ✅ | | | | | ### Conditional Generation | Model | LoRA | Prefix Tuning | P-Tuning | Prompt Tuning | IA3 | | --------- | ---- | ---- | ---- | ---- | ---- | | T5 | ✅ | ✅ | ✅ | ✅ | ✅ | | BART | ✅ | ✅ | ✅ | ✅ | ✅ | ### Sequence Classification | Model | LoRA | Prefix Tuning | P-Tuning | Prompt Tuning | IA3 | | --------- | ---- | ---- | ---- | ---- | ---- | | BERT | ✅ | ✅ | ✅ | ✅ | ✅ | | RoBERTa | ✅ | ✅ | ✅ | ✅ | ✅ | | GPT-2 | ✅ | ✅ | ✅ | ✅ | | | Bloom | ✅ | ✅ | ✅ | ✅ | | | OPT | ✅ | ✅ | ✅ | ✅ | | | GPT-Neo | ✅ | ✅ | ✅ | ✅ | | | GPT-J | ✅ | ✅ | ✅ | ✅ | | | Deberta | ✅ | | ✅ | ✅ | | | Deberta-v2 | ✅ | | ✅ | ✅ | | ### Token Classification | Model | LoRA | Prefix Tuning | P-Tuning | Prompt Tuning | IA3 | | --------- | ---- | ---- | ---- | ---- | ---- | | BERT | ✅ | ✅ | | | | | RoBERTa | ✅ | ✅ | | | | | GPT-2 | ✅ | ✅ | | | | | Bloom | ✅ | ✅ | | | | | OPT | ✅ | ✅ | | | | | GPT-Neo | ✅ | ✅ | | | | | GPT-J | ✅ | ✅ | | | | | Deberta | ✅ | | | | | | Deberta-v2 | ✅ | | | | | ### Text-to-Image Generation | Model | LoRA | LoHa | LoKr | Prefix Tuning | P-Tuning | Prompt Tuning | IA3 | | --------- | ---- | ---- | ---- | ---- | ---- | ---- | ---- | | Stable Diffusion | ✅ | ✅ | ✅ | | | | ### Image Classification | Model | LoRA | Prefix Tuning | P-Tuning | Prompt Tuning | IA3 | | --------- | ---- | ---- | ---- | ---- | ---- | | ViT | ✅ | | | | | | Swin | ✅ | | | | | ### Image to text (Multi-modal models) | Model | LoRA | Prefix Tuning | P-Tuning | Prompt Tuning | IA3 | --------- | ---- | ---- | ---- | ---- | ---- | | Blip-2 | ✅ | | | | | ___Note that we have tested LoRA for [ViT](https://huggingface.co/docs/transformers/model_doc/vit) and [Swin](https://huggingface.co/docs/transformers/model_doc/swin) for fine-tuning on image classification. However, it should be possible to use LoRA for any compatible model [provided](https://huggingface.co/models?pipeline_tag=image-classification&sort=downloads&search=vit) by 🤗 Transformers. Check out the respective examples to learn more. If you run into problems, please open an issue.___ The same principle applies to our [segmentation models](https://huggingface.co/models?pipeline_tag=image-segmentation&sort=downloads) as well. ### Semantic Segmentation | Model | LoRA | Prefix Tuning | P-Tuning | Prompt Tuning | IA3 | | --------- | ---- | ---- | ---- | ---- | ---- | | SegFormer | ✅ | | | | | ## Caveats: 1. Below is an example of using PyTorch FSDP for training. However, it doesn't lead to any GPU memory savings. Please refer issue [[FSDP] FSDP with CPU offload consumes 1.65X more GPU memory when training models with most of the params frozen](https://github.com/pytorch/pytorch/issues/91165). ```python from peft.utils.other import fsdp_auto_wrap_policy ... if os.environ.get("ACCELERATE_USE_FSDP", None) is not None: accelerator.state.fsdp_plugin.auto_wrap_policy = fsdp_auto_wrap_policy(model) model = accelerator.prepare(model) ``` Example of parameter efficient tuning with [`mt0-xxl`](https://huggingface.co/bigscience/mt0-xxl) base model using 🤗 Accelerate is provided in `~examples/conditional_generation/peft_lora_seq2seq_accelerate_fsdp.py`. a. First, run `accelerate config --config_file fsdp_config.yaml` and answer the questionnaire. Below are the contents of the config file. ```yaml command_file: null commands: null compute_environment: LOCAL_MACHINE deepspeed_config: {} distributed_type: FSDP downcast_bf16: 'no' dynamo_backend: 'NO' fsdp_config: fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP fsdp_backward_prefetch_policy: BACKWARD_PRE fsdp_offload_params: true fsdp_sharding_strategy: 1 fsdp_state_dict_type: FULL_STATE_DICT fsdp_transformer_layer_cls_to_wrap: T5Block gpu_ids: null machine_rank: 0 main_process_ip: null main_process_port: null main_training_function: main megatron_lm_config: {} mixed_precision: 'no' num_machines: 1 num_processes: 2 rdzv_backend: static same_network: true tpu_name: null tpu_zone: null use_cpu: false ``` b. run the below command to launch the example script ```bash accelerate launch --config_file fsdp_config.yaml examples/peft_lora_seq2seq_accelerate_fsdp.py ``` 2. When using ZeRO3 with zero3_init_flag=True, if you find the gpu memory increase with training steps. we might need to update deepspeed after [deepspeed commit 42858a9891422abc](https://github.com/microsoft/DeepSpeed/commit/42858a9891422abcecaa12c1bd432d28d33eb0d4) . The related issue is [[BUG] Peft Training with Zero.Init() and Zero3 will increase GPU memory every forward step ](https://github.com/microsoft/DeepSpeed/issues/3002) ## 🤗 PEFT as a utility library Inject trainable adapters on any `torch` model using `inject_adapter_in_model` method. Note the method will make no further change to the model. ```python import torch from peft import inject_adapter_in_model, LoraConfig class DummyModel(torch.nn.Module): def __init__(self): super().__init__() self.embedding = torch.nn.Embedding(10, 10) self.linear = torch.nn.Linear(10, 10) self.lm_head = torch.nn.Linear(10, 10) def forward(self, input_ids): x = self.embedding(input_ids) x = self.linear(x) x = self.lm_head(x) return x lora_config = LoraConfig( lora_alpha=16, lora_dropout=0.1, r=64, bias="none", target_modules=["linear"], ) model = DummyModel() model = inject_adapter_in_model(lora_config, model) dummy_inputs = torch.LongTensor([[0, 1, 2, 3, 4, 5, 6, 7]]) dummy_outputs = model(dummy_inputs) ``` Learn more about the [low level API in the docs](https://huggingface.co/docs/peft/developer_guides/low_level_api). ## Contributing If you would like to contribute to PEFT, please check out our [contributing guide](https://huggingface.co/docs/peft/developer_guides/contributing). ## Citing 🤗 PEFT If you use 🤗 PEFT in your publication, please cite it by using the following BibTeX entry. ```bibtex @Misc{peft, title = {PEFT: State-of-the-art Parameter-Efficient Fine-Tuning methods}, author = {Sourab Mangrulkar and Sylvain Gugger and Lysandre Debut and Younes Belkada and Sayak Paul and Benjamin Bossan}, howpublished = {\url{https://github.com/huggingface/peft}}, year = {2022} } ```
0
hf_public_repos
hf_public_repos/peft/setup.py
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from setuptools import find_packages, setup extras = {} extras["quality"] = ["black ~= 22.0", "ruff>=0.0.241", "urllib3<=2.0.0"] extras["docs_specific"] = ["hf-doc-builder"] extras["dev"] = extras["quality"] + extras["docs_specific"] extras["test"] = extras["dev"] + [ "pytest", "pytest-cov", "pytest-xdist", "parameterized", "datasets", "diffusers<0.21.0", "scipy" ] setup( name="peft", version="0.6.3.dev0", description="Parameter-Efficient Fine-Tuning (PEFT)", license_files=["LICENSE"], long_description=open("README.md", "r", encoding="utf-8").read(), long_description_content_type="text/markdown", keywords="deep learning", license="Apache", author="The HuggingFace team", author_email="sourab@huggingface.co", url="https://github.com/huggingface/peft", package_dir={"": "src"}, packages=find_packages("src"), package_data={"peft": ["py.typed"]}, entry_points={}, python_requires=">=3.8.0", install_requires=[ "numpy>=1.17", "packaging>=20.0", "psutil", "pyyaml", "torch>=1.13.0", "transformers", "tqdm", "accelerate>=0.21.0", "safetensors", "huggingface_hub>=0.17.0", ], extras_require=extras, classifiers=[ "Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "Intended Audience :: Education", "Intended Audience :: Science/Research", "License :: OSI Approved :: Apache Software License", "Operating System :: OS Independent", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.8", "Topic :: Scientific/Engineering :: Artificial Intelligence", ], ) # Release checklist # 1. Change the version in __init__.py and setup.py to the release version, e.g. from "0.6.0.dev0" to "0.6.0" # 2. Check if there are any deprecations that need to be addressed for this release by seaching for "# TODO" in the code # 3. Commit these changes with the message: "Release: VERSION", create a PR and merge it. # 4. Add a tag in git to mark the release: "git tag -a VERSION -m 'Adds tag VERSION for pypi' " # Push the tag to git: # git push --tags origin main # It is necessary to work on the original repository, not on a fork. # 5. Run the following commands in the top-level directory: # python setup.py bdist_wheel # python setup.py sdist # Ensure that you are on the clean and up-to-date main branch (git status --untracked-files=no should not list any # files and show the main branch) # 6. Upload the package to the pypi test server first: # twine upload dist/* -r pypitest # 7. Check that you can install it in a virtualenv by running: # pip install -i https://testpypi.python.org/pypi peft # 8. Upload the final version to actual pypi: # twine upload dist/* -r pypi # 9. Add release notes to the tag on https://github.com/huggingface/peft/releases once everything is looking hunky-dory. # Check the notes here: https://docs.google.com/document/d/1k-sOIfykuKjWcOIALqjhFKz4amFEp-myeJUJEzNgjoU/edit?usp=sharing # 10. Update the version in __init__.py, setup.py to the bumped minor version + ".dev0" (e.g. from "0.6.0" to "0.7.0.dev0")
0
hf_public_repos
hf_public_repos/peft/pyproject.toml
[tool.black] line-length = 119 target-version = ['py36'] [tool.ruff] ignore = ["C901", "E501", "E741", "W605"] select = ["C", "E", "F", "I", "W"] line-length = 119 [tool.ruff.isort] lines-after-imports = 2 known-first-party = ["peft"] [isort] default_section = "FIRSTPARTY" known_first_party = "peft" known_third_party = [ "numpy", "torch", "accelerate", "transformers", ] line_length = 119 lines_after_imports = 2 multi_line_output = 3 include_trailing_comma = true force_grid_wrap = 0 use_parentheses = true ensure_newline_before_comments = true [tool.pytest] doctest_optionflags = [ "NORMALIZE_WHITESPACE", "ELLIPSIS", "NUMBER", ] [tool.pytest.ini_options] addopts = "--cov=src/peft --cov-report=term-missing" markers = [ "single_gpu_tests: tests that run on a single GPU", "multi_gpu_tests: tests that run on multiple GPUs", ]
0
hf_public_repos/peft/src
hf_public_repos/peft/src/peft/peft_model.py
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import collections import inspect import os import warnings from contextlib import contextmanager from copy import deepcopy from typing import Any, Dict, List, Optional, Union import torch from accelerate import dispatch_model, infer_auto_device_map from accelerate.hooks import AlignDevicesHook, add_hook_to_module, remove_hook_from_submodules from accelerate.utils import get_balanced_memory from huggingface_hub import ModelCard, ModelCardData, hf_hub_download from safetensors.torch import save_file as safe_save_file from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from transformers import PreTrainedModel from transformers.modeling_outputs import QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput from transformers.utils import PushToHubMixin from . import __version__ from .config import PeftConfig from .tuners import ( AdaLoraModel, AdaptionPromptModel, IA3Model, LoHaModel, LoKrModel, LoraModel, MultitaskPromptEmbedding, PrefixEncoder, PromptEmbedding, PromptEncoder, ) from .utils import ( SAFETENSORS_WEIGHTS_NAME, TRANSFORMERS_MODELS_TO_PREFIX_TUNING_POSTPROCESS_MAPPING, WEIGHTS_NAME, PeftType, TaskType, _get_batch_size, _prepare_prompt_learning_config, _set_adapter, _set_trainable, get_peft_model_state_dict, id_tensor_storage, infer_device, load_peft_weights, set_peft_model_state_dict, shift_tokens_right, ) PEFT_TYPE_TO_MODEL_MAPPING = { PeftType.LORA: LoraModel, PeftType.LOHA: LoHaModel, PeftType.LOKR: LoKrModel, PeftType.PROMPT_TUNING: PromptEmbedding, PeftType.P_TUNING: PromptEncoder, PeftType.PREFIX_TUNING: PrefixEncoder, PeftType.ADALORA: AdaLoraModel, PeftType.ADAPTION_PROMPT: AdaptionPromptModel, PeftType.IA3: IA3Model, } class PeftModel(PushToHubMixin, torch.nn.Module): """ Base model encompassing various Peft methods. Args: model ([`~transformers.PreTrainedModel`]): The base transformer model used for Peft. peft_config ([`PeftConfig`]): The configuration of the Peft model. adapter_name (`str`): The name of the adapter, defaults to `"default"`. **Attributes**: - **base_model** ([`~transformers.PreTrainedModel`]) -- The base transformer model used for Peft. - **peft_config** ([`PeftConfig`]) -- The configuration of the Peft model. - **modules_to_save** (`list` of `str`) -- The list of sub-module names to save when saving the model. - **prompt_encoder** ([`PromptEncoder`]) -- The prompt encoder used for Peft if using [`PromptLearningConfig`]. - **prompt_tokens** (`torch.Tensor`) -- The virtual prompt tokens used for Peft if using [`PromptLearningConfig`]. - **transformer_backbone_name** (`str`) -- The name of the transformer backbone in the base model if using [`PromptLearningConfig`]. - **word_embeddings** (`torch.nn.Embedding`) -- The word embeddings of the transformer backbone in the base model if using [`PromptLearningConfig`]. """ def __init__(self, model: PreTrainedModel, peft_config: PeftConfig, adapter_name: str = "default"): super().__init__() self.modules_to_save = None self.active_adapter = adapter_name self.peft_type = peft_config.peft_type self._is_prompt_learning = peft_config.is_prompt_learning if self._is_prompt_learning: self._peft_config = {adapter_name: peft_config} self.base_model = model self.add_adapter(adapter_name, peft_config) else: self._peft_config = None cls = PEFT_TYPE_TO_MODEL_MAPPING[peft_config.peft_type] self.base_model = cls(model, {adapter_name: peft_config}, adapter_name) self.set_additional_trainable_modules(peft_config, adapter_name) self.config = getattr(self.base_model, "config", {"model_type": "custom"}) if getattr(model, "is_gradient_checkpointing", True): model = self._prepare_model_for_gradient_checkpointing(model) # the `pretraining_tp` is set for some models to simulate Tensor Parallelism during inference to avoid # numerical differences, https://github.com/pytorch/pytorch/issues/76232 - to avoid any unexpected # behavior we disable that in this line. if hasattr(self.base_model, "config") and hasattr(self.base_model.config, "pretraining_tp"): self.base_model.config.pretraining_tp = 1 @property def peft_config(self) -> Dict[str, PeftConfig]: if self._is_prompt_learning: return self._peft_config return self.base_model.peft_config @property def active_adapters(self): try: adapters = self.base_model.active_adapters except AttributeError: adapters = self.active_adapter if isinstance(adapters, str): adapters = [adapters] return adapters @peft_config.setter def peft_config(self, value: Dict[str, PeftConfig]): if self._is_prompt_learning: self._peft_config = value else: self.base_model.peft_config = value def save_pretrained( self, save_directory: str, safe_serialization: bool = True, selected_adapters: Optional[List[str]] = None, **kwargs: Any, ): r""" This function saves the adapter model and the adapter configuration files to a directory, so that it can be reloaded using the [`PeftModel.from_pretrained`] class method, and also used by the [`PeftModel.push_to_hub`] method. Args: save_directory (`str`): Directory where the adapter model and configuration files will be saved (will be created if it does not exist). safe_serialization (`bool`, *optional*): Whether to save the adapter files in safetensors format. kwargs (additional keyword arguments, *optional*): Additional keyword arguments passed along to the `push_to_hub` method. """ if os.path.isfile(save_directory): raise ValueError(f"Provided path ({save_directory}) should be a directory, not a file") if selected_adapters is None: selected_adapters = list(self.peft_config.keys()) else: if any( selected_adapter_name not in list(self.peft_config.keys()) for selected_adapter_name in selected_adapters ): raise ValueError( f"You passed an invalid `selected_adapters` arguments, current supported adapter names are" f" {list(self.peft_config.keys())} - got {selected_adapters}." ) os.makedirs(save_directory, exist_ok=True) self.create_or_update_model_card(save_directory) for adapter_name in selected_adapters: peft_config = self.peft_config[adapter_name] # save only the trainable weights output_state_dict = get_peft_model_state_dict( self, state_dict=kwargs.get("state_dict", None), adapter_name=adapter_name ) output_dir = os.path.join(save_directory, adapter_name) if adapter_name != "default" else save_directory os.makedirs(output_dir, exist_ok=True) if safe_serialization: # Section copied from: https://github.com/huggingface/transformers/blob/main/src/transformers/modeling_utils.py#L2111-L2134 # Safetensors does not allow tensor aliasing. # We're going to remove aliases before saving ptrs = collections.defaultdict(list) for name, tensor in output_state_dict.items(): # Sometimes in the state_dict we have non-tensor objects. # e.g. in bitsandbytes we have some `str` objects in the state_dict if isinstance(tensor, torch.Tensor): ptrs[id_tensor_storage(tensor)].append(name) else: # In the non-tensor case, fall back to the pointer of the object itself ptrs[id(tensor)].append(name) # These are all the pointers of shared tensors. shared_ptrs = {ptr: names for ptr, names in ptrs.items() if len(names) > 1} for _, names in shared_ptrs.items(): # Here we just clone the shared tensors to avoid tensor aliasing which is # not supported in safetensors. for shared_tensor_name in names[1:]: output_state_dict[shared_tensor_name] = output_state_dict[shared_tensor_name].clone() safe_save_file( output_state_dict, os.path.join(output_dir, SAFETENSORS_WEIGHTS_NAME), metadata={"format": "pt"}, ) else: torch.save(output_state_dict, os.path.join(output_dir, WEIGHTS_NAME)) # save the config and change the inference mode to `True` if peft_config.base_model_name_or_path is None: peft_config.base_model_name_or_path = ( self.base_model.__dict__.get("name_or_path", None) if peft_config.is_prompt_learning else self.base_model.model.__dict__.get("name_or_path", None) ) inference_mode = peft_config.inference_mode peft_config.inference_mode = True if peft_config.task_type is None: # deal with auto mapping base_model_class = self._get_base_model_class( is_prompt_tuning=peft_config.is_prompt_learning, ) parent_library = base_model_class.__module__ auto_mapping_dict = { "base_model_class": base_model_class.__name__, "parent_library": parent_library, } else: auto_mapping_dict = None peft_config.save_pretrained(output_dir, auto_mapping_dict=auto_mapping_dict) peft_config.inference_mode = inference_mode @classmethod def from_pretrained( cls, model: PreTrainedModel, model_id: Union[str, os.PathLike], adapter_name: str = "default", is_trainable: bool = False, config: Optional[PeftConfig] = None, **kwargs: Any, ): r""" Instantiate a PEFT model from a pretrained model and loaded PEFT weights. Note that the passed `model` may be modified inplace. Args: model ([`~transformers.PreTrainedModel`]): The model to be adapted. The model should be initialized with the [`~transformers.PreTrainedModel.from_pretrained`] method from the 🤗 Transformers library. model_id (`str` or `os.PathLike`): The name of the PEFT configuration to use. Can be either: - A string, the `model id` of a PEFT configuration hosted inside a model repo on the Hugging Face Hub. - A path to a directory containing a PEFT configuration file saved using the `save_pretrained` method (`./my_peft_config_directory/`). adapter_name (`str`, *optional*, defaults to `"default"`): The name of the adapter to be loaded. This is useful for loading multiple adapters. is_trainable (`bool`, *optional*, defaults to `False`): Whether the adapter should be trainable or not. If `False`, the adapter will be frozen and use for inference config ([`~peft.PeftConfig`], *optional*): The configuration object to use instead of an automatically loaded configuation. This configuration object is mutually exclusive with `model_id` and `kwargs`. This is useful when configuration is already loaded before calling `from_pretrained`. kwargs: (`optional`): Additional keyword arguments passed along to the specific PEFT configuration class. """ from .mapping import MODEL_TYPE_TO_PEFT_MODEL_MAPPING, PEFT_TYPE_TO_CONFIG_MAPPING # load the config if config is None: config = PEFT_TYPE_TO_CONFIG_MAPPING[ PeftConfig._get_peft_type( model_id, subfolder=kwargs.get("subfolder", None), revision=kwargs.get("revision", None), cache_dir=kwargs.get("cache_dir", None), use_auth_token=kwargs.get("use_auth_token", None), token=kwargs.get("token", None), ) ].from_pretrained(model_id, **kwargs) elif isinstance(config, PeftConfig): config.inference_mode = not is_trainable else: raise ValueError(f"The input config must be a PeftConfig, got {config.__class__}") if (getattr(model, "hf_device_map", None) is not None) and len( set(model.hf_device_map.values()).intersection({"cpu", "disk"}) ) > 0: remove_hook_from_submodules(model) if config.is_prompt_learning and is_trainable: raise ValueError("Cannot set a prompt learning adapter to trainable when loading pretrained adapter.") else: config.inference_mode = not is_trainable if config.task_type not in MODEL_TYPE_TO_PEFT_MODEL_MAPPING.keys(): model = cls(model, config, adapter_name) else: model = MODEL_TYPE_TO_PEFT_MODEL_MAPPING[config.task_type](model, config, adapter_name) model.load_adapter(model_id, adapter_name, is_trainable=is_trainable, **kwargs) return model def _setup_prompt_encoder(self, adapter_name: str): config = self.peft_config[adapter_name] if not hasattr(self, "prompt_encoder"): self.prompt_encoder = torch.nn.ModuleDict({}) self.prompt_tokens = {} transformer_backbone = None for name, module in self.base_model.named_children(): for param in module.parameters(): param.requires_grad = False if isinstance(module, PreTrainedModel): # Make sure to freeze Tranformers model if transformer_backbone is None: transformer_backbone = module self.transformer_backbone_name = name if transformer_backbone is None: transformer_backbone = self.base_model if config.num_transformer_submodules is None: config.num_transformer_submodules = 2 if config.task_type == TaskType.SEQ_2_SEQ_LM else 1 for named_param, value in list(transformer_backbone.named_parameters()): # for ZeRO-3, the tensor is sharded across accelerators and deepspeed modifies it to a tensor with shape [0] # the actual unsharded shape is stored in "ds_shape" attribute # special handling is needed in case the model is initialized in deepspeed.zero.Init() context or HfDeepSpeedConfig # has been called before # For reference refer to issue: https://github.com/huggingface/peft/issues/996 deepspeed_distributed_tensor_shape = getattr(value, "ds_shape", None) if value.shape[0] == self.base_model.config.vocab_size or ( deepspeed_distributed_tensor_shape is not None and deepspeed_distributed_tensor_shape[0] == self.base_model.config.vocab_size ): self.word_embeddings = transformer_backbone.get_submodule(named_param.replace(".weight", "")) break if config.peft_type == PeftType.PROMPT_TUNING: prompt_encoder = PromptEmbedding(config, self.word_embeddings) elif config.peft_type == PeftType.MULTITASK_PROMPT_TUNING: prompt_encoder = MultitaskPromptEmbedding(config, self.word_embeddings) elif config.peft_type == PeftType.P_TUNING: prompt_encoder = PromptEncoder(config) elif config.peft_type == PeftType.PREFIX_TUNING: prompt_encoder = PrefixEncoder(config) else: raise ValueError("Not supported") prompt_encoder = prompt_encoder.to(self.device) self.prompt_encoder.update(torch.nn.ModuleDict({adapter_name: prompt_encoder})) self.prompt_tokens[adapter_name] = torch.arange( config.num_virtual_tokens * config.num_transformer_submodules ).long() def _prepare_model_for_gradient_checkpointing(self, model: PreTrainedModel): r""" Prepares the model for gradient checkpointing if necessary """ if not ( getattr(model, "is_loaded_in_8bit", False) or getattr(model, "is_loaded_in_4bit", False) or getattr(model, "is_quantized", False) ): if hasattr(model, "enable_input_require_grads"): model.enable_input_require_grads() elif hasattr(model, "get_input_embeddings"): def make_inputs_require_grad(module, input, output): output.requires_grad_(True) model.get_input_embeddings().register_forward_hook(make_inputs_require_grad) return model def get_prompt_embedding_to_save(self, adapter_name: str): """ Returns the prompt embedding to save when saving the model. Only applicable when `peft_config.peft_type != PeftType.LORA`. """ prompt_encoder = self.prompt_encoder[adapter_name] prompt_tokens = ( self.prompt_tokens[adapter_name].unsqueeze(0).expand(1, -1).to(prompt_encoder.embedding.weight.device) ) if self.peft_config[adapter_name].peft_type == PeftType.PREFIX_TUNING: prompt_tokens = prompt_tokens[:, : self.peft_config[adapter_name].num_virtual_tokens] if self.peft_config[adapter_name].peft_type == PeftType.MULTITASK_PROMPT_TUNING: prompt_embeddings = super(MultitaskPromptEmbedding, prompt_encoder).forward(prompt_tokens) else: prompt_embeddings = prompt_encoder(prompt_tokens) return prompt_embeddings[0].detach().cpu() def get_prompt(self, batch_size: int, task_ids: Optional[torch.Tensor] = None): """ Returns the virtual prompts to use for Peft. Only applicable when `peft_config.peft_type != PeftType.LORA`. """ peft_config = self.active_peft_config prompt_encoder = self.prompt_encoder[self.active_adapter] prompt_tokens = ( self.prompt_tokens[self.active_adapter] .unsqueeze(0) .expand(batch_size, -1) .to(prompt_encoder.embedding.weight.device) ) if peft_config.peft_type == PeftType.PREFIX_TUNING: prompt_tokens = prompt_tokens[:, : peft_config.num_virtual_tokens] if peft_config.inference_mode: past_key_values = prompt_encoder.embedding.weight.repeat(batch_size, 1, 1) else: past_key_values = prompt_encoder(prompt_tokens) if self.base_model_torch_dtype is not None: past_key_values = past_key_values.to(self.base_model_torch_dtype) past_key_values = past_key_values.view( batch_size, peft_config.num_virtual_tokens, peft_config.num_layers * 2, peft_config.num_attention_heads, peft_config.token_dim // peft_config.num_attention_heads, ) if peft_config.num_transformer_submodules == 2: past_key_values = torch.cat([past_key_values, past_key_values], dim=2) past_key_values = past_key_values.permute([2, 0, 3, 1, 4]).split( peft_config.num_transformer_submodules * 2 ) if TRANSFORMERS_MODELS_TO_PREFIX_TUNING_POSTPROCESS_MAPPING.get(self.config.model_type, None) is not None: post_process_fn = TRANSFORMERS_MODELS_TO_PREFIX_TUNING_POSTPROCESS_MAPPING[self.config.model_type] past_key_values = post_process_fn(past_key_values) return past_key_values else: if peft_config.peft_type == PeftType.MULTITASK_PROMPT_TUNING: prompts = prompt_encoder(prompt_tokens, task_ids) else: if peft_config.inference_mode: prompts = prompt_encoder.embedding.weight.repeat(batch_size, 1, 1) else: prompts = prompt_encoder(prompt_tokens) return prompts def get_nb_trainable_parameters(self): r""" Returns the number of trainable parameters and number of all parameters in the model. """ trainable_params = 0 all_param = 0 for _, param in self.named_parameters(): num_params = param.numel() # if using DS Zero 3 and the weights are initialized empty if num_params == 0 and hasattr(param, "ds_numel"): num_params = param.ds_numel # Due to the design of 4bit linear layers from bitsandbytes # one needs to multiply the number of parameters by 2 to get # the correct number of parameters if param.__class__.__name__ == "Params4bit": num_params = num_params * 2 all_param += num_params if param.requires_grad: trainable_params += num_params return trainable_params, all_param def print_trainable_parameters(self): """ Prints the number of trainable parameters in the model. """ trainable_params, all_param = self.get_nb_trainable_parameters() print( f"trainable params: {trainable_params:,d} || all params: {all_param:,d} || trainable%: {100 * trainable_params / all_param}" ) def __getattr__(self, name: str): """Forward missing attributes to the wrapped module.""" try: return super().__getattr__(name) # defer to nn.Module's logic except AttributeError: return getattr(self.base_model, name) def forward(self, *args: Any, **kwargs: Any): """ Forward pass of the model. """ return self.get_base_model()(*args, **kwargs) def _get_base_model_class(self, is_prompt_tuning=False): """ Returns the base model class. """ if not is_prompt_tuning: return self.base_model.model.__class__ return self.base_model.__class__ @contextmanager def disable_adapter(self): """ Disables the adapter module. """ try: if self.peft_config[self.active_adapter].is_prompt_learning: # TODO: consider replacing this patching of methods with a more robust mechanism: setting a flag and # letting the underyling methods deal with it, same as how LoRA does it. old_forward = self.forward self.forward = self.base_model.forward old_prepare_inputs_for_generation = self.prepare_inputs_for_generation self.prepare_inputs_for_generation = self.base_model.prepare_inputs_for_generation else: self.base_model.disable_adapter_layers() yield finally: if self.peft_config[self.active_adapter].is_prompt_learning: self.forward = old_forward self.old_prepare_inputs_for_generation = old_prepare_inputs_for_generation else: self.base_model.enable_adapter_layers() def get_base_model(self): """ Returns the base model. """ return self.base_model if self.active_peft_config.is_prompt_learning else self.base_model.model def add_adapter(self, adapter_name: str, peft_config: PeftConfig): if peft_config.peft_type != self.peft_type: raise ValueError( f"Cannot combine adapters with different peft types. " f"Found {self.peft_type} and {peft_config.peft_type}." ) try: if peft_config.is_prompt_learning: self.peft_config[adapter_name] = peft_config if hasattr(self.config, "to_dict"): dict_config = self.config.to_dict() else: dict_config = self.config peft_config = _prepare_prompt_learning_config(peft_config, dict_config) self._setup_prompt_encoder(adapter_name) elif peft_config.is_adaption_prompt: self.base_model.add_adapter(adapter_name, peft_config) else: self.peft_config[adapter_name] = peft_config self.base_model.inject_adapter(self.base_model.model, adapter_name) except Exception: # somthing went wrong, roll back if adapter_name in self.peft_config: del self.peft_config[adapter_name] raise self.set_additional_trainable_modules(peft_config, adapter_name) def set_additional_trainable_modules(self, peft_config, adapter_name): if getattr(peft_config, "modules_to_save", None) is not None: if self.modules_to_save is None: self.modules_to_save = set(peft_config.modules_to_save) else: self.modules_to_save.update(peft_config.modules_to_save) _set_trainable(self, adapter_name) @classmethod def _split_kwargs(cls, kwargs: Dict[str, Any]): _kwargs_not_in_hf_hub_download_signature = ("use_auth_token",) hf_hub_download_kwargs = {} other_kwargs = {} for key, value in kwargs.items(): if key in inspect.signature(hf_hub_download).parameters or key in _kwargs_not_in_hf_hub_download_signature: hf_hub_download_kwargs[key] = value else: other_kwargs[key] = value return hf_hub_download_kwargs, other_kwargs def load_adapter(self, model_id: str, adapter_name: str, is_trainable: bool = False, **kwargs: Any): from .mapping import PEFT_TYPE_TO_CONFIG_MAPPING hf_hub_download_kwargs, kwargs = self._split_kwargs(kwargs) torch_device = infer_device() if adapter_name not in self.peft_config: # load the config peft_config = PEFT_TYPE_TO_CONFIG_MAPPING[ PeftConfig._get_peft_type( model_id, **hf_hub_download_kwargs, ) ].from_pretrained( model_id, **hf_hub_download_kwargs, ) if peft_config.is_prompt_learning and is_trainable: raise ValueError("Cannot set a prompt learning adapter to trainable when loading pretrained adapter.") else: peft_config.inference_mode = not is_trainable self.add_adapter(adapter_name, peft_config) adapters_weights = load_peft_weights(model_id, device=torch_device, **hf_hub_download_kwargs) # load the weights into the model load_result = set_peft_model_state_dict(self, adapters_weights, adapter_name=adapter_name) if ( (getattr(self, "hf_device_map", None) is not None) and (len(set(self.hf_device_map.values()).intersection({"cpu", "disk"})) > 0) and len(self.peft_config) == 1 ): device_map = kwargs.get("device_map", "auto") max_memory = kwargs.get("max_memory", None) offload_dir = kwargs.get("offload_folder", None) offload_index = kwargs.get("offload_index", None) dispatch_model_kwargs = {} # Safety checker for previous `accelerate` versions # `offload_index` was introduced in https://github.com/huggingface/accelerate/pull/873/ if "offload_index" in inspect.signature(dispatch_model).parameters: dispatch_model_kwargs["offload_index"] = offload_index no_split_module_classes = self._no_split_modules if device_map != "sequential": max_memory = get_balanced_memory( self, max_memory=max_memory, no_split_module_classes=no_split_module_classes, low_zero=(device_map == "balanced_low_0"), ) if isinstance(device_map, str): device_map = infer_auto_device_map( self, max_memory=max_memory, no_split_module_classes=no_split_module_classes ) dispatch_model( self, device_map=device_map, offload_dir=offload_dir, **dispatch_model_kwargs, ) hook = AlignDevicesHook(io_same_device=True) if self.peft_config[adapter_name].is_prompt_learning: remove_hook_from_submodules(self.prompt_encoder) add_hook_to_module(self.get_base_model(), hook) # Set model in evaluation mode to deactivate Dropout modules by default if not is_trainable: self.eval() return load_result def set_adapter(self, adapter_name: str): """ Sets the active adapter. """ if adapter_name not in self.peft_config: raise ValueError(f"Adapter {adapter_name} not found.") self.active_adapter = adapter_name if not self.peft_config[adapter_name].is_prompt_learning: self.base_model.set_adapter(adapter_name) _set_adapter(self, adapter_name) @property def base_model_torch_dtype(self): return getattr(self.base_model, "dtype", None) @property def active_peft_config(self): return self.peft_config[self.active_adapter] def create_or_update_model_card(self, output_dir: str): """ Updates or create model card to include information about peft: 1. Adds `peft` library tag 2. Adds peft version 3. Adds base model info 4. Adds quantization information if it was used """ filename = os.path.join(output_dir, "README.md") card = ModelCard.load(filename) if os.path.exists(filename) else ModelCard.from_template(ModelCardData()) card.data["library_name"] = "peft" model_config = self.config if hasattr(model_config, "to_dict"): model_config = model_config.to_dict() if model_config.get("model_type", "custom") != "custom": card.data["base_model"] = model_config["_name_or_path"] lines = card.text.splitlines() quantization_config = None if hasattr(self.config, "quantization_config"): quantization_config = self.config.quantization_config.to_dict() training_config_text = "" # Adds quantization information if it was used if quantization_config is not None: training_config_text += "\nThe following `bitsandbytes` quantization config was used during training:\n" training_config_text += "\n".join([f"- {name}: {value}" for name, value in quantization_config.items()]) training_config_text += "\n" training_procedure_heading = "## Training procedure\n" if training_procedure_heading in lines: lines.insert(lines.index(training_procedure_heading) + 2, training_config_text) else: lines.append(f"{training_procedure_heading}\n{training_config_text}") # Adds peft version framework_block_heading = "### Framework versions\n" if framework_block_heading in lines: lines.insert(lines.index(framework_block_heading) + 2, f"- PEFT {__version__}\n") else: lines.append(f"{framework_block_heading}\n\n- PEFT {__version__}\n") card.text = "\n".join(lines) card.save(filename) class PeftModelForSequenceClassification(PeftModel): """ Peft model for sequence classification tasks. Args: model ([`~transformers.PreTrainedModel`]): Base transformer model. peft_config ([`PeftConfig`]): Peft config. **Attributes**: - **config** ([`~transformers.PretrainedConfig`]) -- The configuration object of the base model. - **cls_layer_name** (`str`) -- The name of the classification layer. Example: ```py >>> from transformers import AutoModelForSequenceClassification >>> from peft import PeftModelForSequenceClassification, get_peft_config >>> config = { ... "peft_type": "PREFIX_TUNING", ... "task_type": "SEQ_CLS", ... "inference_mode": False, ... "num_virtual_tokens": 20, ... "token_dim": 768, ... "num_transformer_submodules": 1, ... "num_attention_heads": 12, ... "num_layers": 12, ... "encoder_hidden_size": 768, ... "prefix_projection": False, ... "postprocess_past_key_value_function": None, ... } >>> peft_config = get_peft_config(config) >>> model = AutoModelForSequenceClassification.from_pretrained("bert-base-cased") >>> peft_model = PeftModelForSequenceClassification(model, peft_config) >>> peft_model.print_trainable_parameters() trainable params: 370178 || all params: 108680450 || trainable%: 0.3406113979101117 ``` """ def __init__(self, model, peft_config: PeftConfig, adapter_name="default"): super().__init__(model, peft_config, adapter_name) if self.modules_to_save is None: self.modules_to_save = {"classifier", "score"} else: self.modules_to_save.update({"classifier", "score"}) for name, _ in self.base_model.named_children(): if any(module_name in name for module_name in self.modules_to_save): self.cls_layer_name = name break # to make sure classifier layer is trainable _set_trainable(self, adapter_name) def forward( self, input_ids=None, attention_mask=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, task_ids=None, **kwargs, ): return_dict = return_dict if return_dict is not None else self.config.use_return_dict peft_config = self.active_peft_config if not peft_config.is_prompt_learning: return self.base_model( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, labels=labels, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, **kwargs, ) batch_size = _get_batch_size(input_ids, inputs_embeds) if attention_mask is not None: # concat prompt attention mask prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to(attention_mask.device) attention_mask = torch.cat((prefix_attention_mask, attention_mask), dim=1) if kwargs.get("position_ids", None) is not None: warnings.warn("Position ids are not supported for parameter efficient tuning. Ignoring position ids.") kwargs["position_ids"] = None kwargs.update( { "attention_mask": attention_mask, "labels": labels, "output_attentions": output_attentions, "output_hidden_states": output_hidden_states, "return_dict": return_dict, } ) if peft_config.peft_type == PeftType.PREFIX_TUNING: return self._prefix_tuning_forward(input_ids=input_ids, **kwargs) else: if kwargs.get("token_type_ids", None) is not None: kwargs["token_type_ids"] = torch.cat( ( torch.zeros(batch_size, peft_config.num_virtual_tokens).to(self.word_embeddings.weight.device), kwargs["token_type_ids"], ), dim=1, ).long() if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) prompts = self.get_prompt(batch_size=batch_size, task_ids=task_ids) prompts = prompts.to(inputs_embeds.dtype) inputs_embeds = torch.cat((prompts, inputs_embeds), dim=1) return self.base_model(inputs_embeds=inputs_embeds, **kwargs) def _prefix_tuning_forward( self, input_ids=None, attention_mask=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, **kwargs, ): batch_size = _get_batch_size(input_ids, inputs_embeds) past_key_values = self.get_prompt(batch_size) fwd_params = list(inspect.signature(self.base_model.forward).parameters.keys()) kwargs.update( { "input_ids": input_ids, "attention_mask": attention_mask, "inputs_embeds": inputs_embeds, "output_attentions": output_attentions, "output_hidden_states": output_hidden_states, "return_dict": return_dict, "past_key_values": past_key_values, } ) if "past_key_values" in fwd_params: return self.base_model(labels=labels, **kwargs) else: transformer_backbone_name = self.base_model.get_submodule(self.transformer_backbone_name) fwd_params = list(inspect.signature(transformer_backbone_name.forward).parameters.keys()) if "past_key_values" not in fwd_params: raise ValueError("Model does not support past key values which are required for prefix tuning.") outputs = transformer_backbone_name(**kwargs) pooled_output = outputs[1] if len(outputs) > 1 else outputs[0] if "dropout" in [name for name, _ in list(self.base_model.named_children())]: pooled_output = self.base_model.dropout(pooled_output) logits = self.base_model.get_submodule(self.cls_layer_name)(pooled_output) loss = None if labels is not None: if self.config.problem_type is None: if self.base_model.num_labels == 1: self.config.problem_type = "regression" elif self.base_model.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.base_model.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.base_model.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class PeftModelForCausalLM(PeftModel): """ Peft model for causal language modeling. Args: model ([`~transformers.PreTrainedModel`]): Base transformer model. peft_config ([`PeftConfig`]): Peft config. Example: ```py >>> from transformers import AutoModelForCausalLM >>> from peft import PeftModelForCausalLM, get_peft_config >>> config = { ... "peft_type": "PREFIX_TUNING", ... "task_type": "CAUSAL_LM", ... "inference_mode": False, ... "num_virtual_tokens": 20, ... "token_dim": 1280, ... "num_transformer_submodules": 1, ... "num_attention_heads": 20, ... "num_layers": 36, ... "encoder_hidden_size": 1280, ... "prefix_projection": False, ... "postprocess_past_key_value_function": None, ... } >>> peft_config = get_peft_config(config) >>> model = AutoModelForCausalLM.from_pretrained("gpt2-large") >>> peft_model = PeftModelForCausalLM(model, peft_config) >>> peft_model.print_trainable_parameters() trainable params: 1843200 || all params: 775873280 || trainable%: 0.23756456724479544 ``` """ def __init__(self, model, peft_config: PeftConfig, adapter_name="default"): super().__init__(model, peft_config, adapter_name) self.base_model_prepare_inputs_for_generation = self.base_model.prepare_inputs_for_generation def forward( self, input_ids=None, attention_mask=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, task_ids=None, **kwargs, ): peft_config = self.active_peft_config if not peft_config.is_prompt_learning: if self.base_model.config.model_type == "mpt": if inputs_embeds is not None: raise AssertionError("forward in MPTForCausalLM does not support inputs_embeds") return self.base_model( input_ids=input_ids, attention_mask=attention_mask, labels=labels, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, **kwargs, ) return self.base_model( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, labels=labels, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, **kwargs, ) batch_size = _get_batch_size(input_ids, inputs_embeds) if attention_mask is not None: # concat prompt attention mask prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to(attention_mask.device) attention_mask = torch.cat((prefix_attention_mask, attention_mask), dim=1) if kwargs.get("position_ids", None) is not None: warnings.warn("Position ids are not supported for parameter efficient tuning. Ignoring position ids.") kwargs["position_ids"] = None if kwargs.get("token_type_ids", None) is not None: warnings.warn("Token type ids are not supported for parameter efficient tuning. Ignoring token type ids") kwargs["token_type_ids"] = None kwargs.update( { "attention_mask": attention_mask, "labels": labels, "output_attentions": output_attentions, "output_hidden_states": output_hidden_states, "return_dict": return_dict, } ) if peft_config.peft_type == PeftType.PREFIX_TUNING: past_key_values = self.get_prompt(batch_size) return self.base_model( input_ids=input_ids, inputs_embeds=inputs_embeds, past_key_values=past_key_values, **kwargs ) else: if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) # concat prompt labels if labels is not None: prefix_labels = torch.full((batch_size, peft_config.num_virtual_tokens), -100).to(labels.device) kwargs["labels"] = torch.cat((prefix_labels, labels), dim=1) prompts = self.get_prompt(batch_size=batch_size, task_ids=task_ids) prompts = prompts.to(inputs_embeds.dtype) inputs_embeds = torch.cat((prompts, inputs_embeds), dim=1) return self.base_model(inputs_embeds=inputs_embeds, **kwargs) def generate(self, **kwargs): self.base_model.prepare_inputs_for_generation = self.prepare_inputs_for_generation if hasattr(self.base_model, "model"): self.base_model.model.generation_config = self.generation_config else: self.base_model.generation_config = self.generation_config try: outputs = self.base_model.generate(**kwargs) except: self.base_model.prepare_inputs_for_generation = self.base_model_prepare_inputs_for_generation raise else: self.base_model.prepare_inputs_for_generation = self.base_model_prepare_inputs_for_generation return outputs def prepare_inputs_for_generation(self, *args, task_ids: torch.Tensor = None, **kwargs): peft_config = self.active_peft_config model_kwargs = self.base_model_prepare_inputs_for_generation(*args, **kwargs) if peft_config.is_prompt_learning: if model_kwargs.get("attention_mask", None) is not None: prefix_attention_mask = torch.ones( model_kwargs["input_ids"].shape[0], peft_config.num_virtual_tokens ).to(model_kwargs["input_ids"].device) model_kwargs["attention_mask"] = torch.cat( (prefix_attention_mask, model_kwargs["attention_mask"]), dim=1 ) if model_kwargs.get("position_ids", None) is not None: warnings.warn("Position ids are not supported for parameter efficient tuning. Ignoring position ids.") model_kwargs["position_ids"] = None if kwargs.get("token_type_ids", None) is not None: warnings.warn( "Token type ids are not supported for parameter efficient tuning. Ignoring token type ids" ) kwargs["token_type_ids"] = None if model_kwargs["past_key_values"] is None and peft_config.peft_type == PeftType.PREFIX_TUNING: past_key_values = self.get_prompt(batch_size=model_kwargs["input_ids"].shape[0]) model_kwargs["past_key_values"] = past_key_values else: if model_kwargs["past_key_values"] is None: inputs_embeds = self.word_embeddings(model_kwargs["input_ids"]) prompts = self.get_prompt(batch_size=model_kwargs["input_ids"].shape[0], task_ids=task_ids) prompts = prompts.to(inputs_embeds.dtype) model_kwargs["inputs_embeds"] = torch.cat((prompts, inputs_embeds), dim=1) model_kwargs["input_ids"] = None return model_kwargs class PeftModelForSeq2SeqLM(PeftModel): """ Peft model for sequence-to-sequence language modeling. Args: model ([`~transformers.PreTrainedModel`]): Base transformer model. peft_config ([`PeftConfig`]): Peft config. Example: ```py >>> from transformers import AutoModelForSeq2SeqLM >>> from peft import PeftModelForSeq2SeqLM, get_peft_config >>> config = { ... "peft_type": "LORA", ... "task_type": "SEQ_2_SEQ_LM", ... "inference_mode": False, ... "r": 8, ... "target_modules": ["q", "v"], ... "lora_alpha": 32, ... "lora_dropout": 0.1, ... "fan_in_fan_out": False, ... "enable_lora": None, ... "bias": "none", ... } >>> peft_config = get_peft_config(config) >>> model = AutoModelForSeq2SeqLM.from_pretrained("t5-base") >>> peft_model = PeftModelForSeq2SeqLM(model, peft_config) >>> peft_model.print_trainable_parameters() trainable params: 884736 || all params: 223843584 || trainable%: 0.3952474242013566 ``` """ def __init__(self, model, peft_config: PeftConfig, adapter_name="default"): super().__init__(model, peft_config, adapter_name) self.base_model_prepare_inputs_for_generation = self.base_model.prepare_inputs_for_generation self.base_model_prepare_encoder_decoder_kwargs_for_generation = ( self.base_model._prepare_encoder_decoder_kwargs_for_generation ) def forward( self, input_ids=None, attention_mask=None, inputs_embeds=None, decoder_input_ids=None, decoder_attention_mask=None, decoder_inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, task_ids=None, **kwargs, ): peft_config = self.active_peft_config if not peft_config.is_prompt_learning: return self.base_model( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, decoder_inputs_embeds=decoder_inputs_embeds, labels=labels, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, **kwargs, ) batch_size = _get_batch_size(input_ids, inputs_embeds) if decoder_attention_mask is not None: # concat prompt attention mask prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to( decoder_attention_mask.device ) if peft_config.peft_type not in [PeftType.PROMPT_TUNING, PeftType.P_TUNING]: decoder_attention_mask = torch.cat((prefix_attention_mask, decoder_attention_mask), dim=1) if kwargs.get("position_ids", None) is not None: warnings.warn("Position ids are not supported for parameter efficient tuning. Ignoring position ids.") kwargs["position_ids"] = None if kwargs.get("token_type_ids", None) is not None: warnings.warn("Token type ids are not supported for parameter efficient tuning. Ignoring token type ids") kwargs["token_type_ids"] = None kwargs.update( { "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "labels": labels, "output_attentions": output_attentions, "output_hidden_states": output_hidden_states, "return_dict": return_dict, } ) if peft_config.peft_type == PeftType.PREFIX_TUNING: past_key_values = self.get_prompt(batch_size) return self.base_model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, decoder_inputs_embeds=decoder_inputs_embeds, past_key_values=past_key_values, **kwargs, ) elif peft_config.peft_type in [PeftType.PROMPT_TUNING, PeftType.P_TUNING]: if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) if attention_mask is not None: # concat prompt attention mask prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to( attention_mask.device ) kwargs["attention_mask"] = torch.cat((prefix_attention_mask, attention_mask), dim=1) prompts = self.get_prompt(batch_size=batch_size) prompts = prompts.to(inputs_embeds.dtype) inputs_embeds = torch.cat((prompts[:, : peft_config.num_virtual_tokens], inputs_embeds), dim=1) return self.base_model( inputs_embeds=inputs_embeds, decoder_input_ids=decoder_input_ids, decoder_inputs_embeds=decoder_inputs_embeds, **kwargs, ) else: if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) if decoder_inputs_embeds is None and decoder_input_ids is None: decoder_input_ids = shift_tokens_right( labels, self.config.pad_token_id, self.config.decoder_start_token_id ) decoder_inputs_embeds = self.word_embeddings(decoder_input_ids) if attention_mask is not None: # concat prompt attention mask prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to( attention_mask.device ) kwargs["attention_mask"] = torch.cat((prefix_attention_mask, attention_mask), dim=1) # concat prompt labels if labels is not None: if peft_config.num_transformer_submodules == 1: kwargs["labels"] = labels elif peft_config.num_transformer_submodules == 2: prefix_labels = torch.full((batch_size, peft_config.num_virtual_tokens), -100).to(labels.device) kwargs["labels"] = torch.cat((prefix_labels, labels), dim=1) prompts = self.get_prompt(batch_size=batch_size, task_ids=task_ids) prompts = prompts.to(inputs_embeds.dtype) inputs_embeds = torch.cat((prompts[:, : peft_config.num_virtual_tokens], inputs_embeds), dim=1) if peft_config.num_transformer_submodules == 1: return self.base_model(inputs_embeds=inputs_embeds, **kwargs) elif peft_config.num_transformer_submodules == 2: decoder_inputs_embeds = torch.cat( (prompts[:, peft_config.num_virtual_tokens :], decoder_inputs_embeds), dim=1 ) return self.base_model( inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, **kwargs ) def generate(self, **kwargs): peft_config = self.active_peft_config self.base_model.prepare_inputs_for_generation = self.prepare_inputs_for_generation self.base_model._prepare_encoder_decoder_kwargs_for_generation = ( self._prepare_encoder_decoder_kwargs_for_generation ) try: if not peft_config.is_prompt_learning: outputs = self.base_model.generate(**kwargs) else: if "input_ids" not in kwargs: raise ValueError("input_ids must be provided for Peft model generation") if kwargs.get("position_ids", None) is not None: warnings.warn( "Position ids are not supported for parameter efficient tuning. Ignoring position ids." ) kwargs["position_ids"] = None if kwargs.get("token_type_ids", None) is not None: warnings.warn( "Token type ids are not supported for parameter efficient tuning. Ignoring token type ids" ) kwargs["token_type_ids"] = None if peft_config.peft_type == PeftType.PREFIX_TUNING: outputs = self.base_model.generate(**kwargs) elif peft_config.peft_type in [ PeftType.PROMPT_TUNING, PeftType.P_TUNING, PeftType.MULTITASK_PROMPT_TUNING, ]: kwargs = deepcopy(kwargs) if "encoder_outputs" in kwargs: del kwargs["encoder_ouputs"] warnings.warn( "`encoder_outputs` should not be passed to `generate` when using prompt tuning. Ignoring it." ) input_ids = kwargs.pop("input_ids") inputs_embeds = self.word_embeddings(input_ids) batch_size = inputs_embeds.shape[0] prompts = self.get_prompt(batch_size=batch_size, task_ids=kwargs.pop("task_ids", None)) prompts = prompts.to(inputs_embeds.dtype) inputs_embeds = torch.cat((prompts[:, : peft_config.num_virtual_tokens], inputs_embeds), dim=1) kwargs["inputs_embeds"] = inputs_embeds if "attention_mask" in kwargs: prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to( kwargs["attention_mask"].device ) kwargs["attention_mask"] = torch.cat((prefix_attention_mask, kwargs["attention_mask"]), dim=1) return self.base_model.generate(**kwargs) else: raise NotImplementedError except: self.base_model.prepare_inputs_for_generation = self.base_model_prepare_inputs_for_generation self.base_model._prepare_encoder_decoder_kwargs_for_generation = ( self.base_model_prepare_encoder_decoder_kwargs_for_generation ) raise else: self.base_model.prepare_inputs_for_generation = self.base_model_prepare_inputs_for_generation self.base_model._prepare_encoder_decoder_kwargs_for_generation = ( self.base_model_prepare_encoder_decoder_kwargs_for_generation ) return outputs def prepare_inputs_for_generation(self, *args, **kwargs): peft_config = self.active_peft_config model_kwargs = self.base_model_prepare_inputs_for_generation(*args, **kwargs) if model_kwargs["past_key_values"] is None and peft_config.peft_type == PeftType.PREFIX_TUNING: batch_size = model_kwargs["decoder_input_ids"].shape[0] past_key_values = self.get_prompt(batch_size) model_kwargs["past_key_values"] = past_key_values return model_kwargs class PeftModelForTokenClassification(PeftModel): """ Peft model for token classification tasks. Args: model ([`~transformers.PreTrainedModel`]): Base transformer model. peft_config ([`PeftConfig`]): Peft config. **Attributes**: - **config** ([`~transformers.PretrainedConfig`]) -- The configuration object of the base model. - **cls_layer_name** (`str`) -- The name of the classification layer. Example: ```py >>> from transformers import AutoModelForSequenceClassification >>> from peft import PeftModelForTokenClassification, get_peft_config >>> config = { ... "peft_type": "PREFIX_TUNING", ... "task_type": "TOKEN_CLS", ... "inference_mode": False, ... "num_virtual_tokens": 20, ... "token_dim": 768, ... "num_transformer_submodules": 1, ... "num_attention_heads": 12, ... "num_layers": 12, ... "encoder_hidden_size": 768, ... "prefix_projection": False, ... "postprocess_past_key_value_function": None, ... } >>> peft_config = get_peft_config(config) >>> model = AutoModelForTokenClassification.from_pretrained("bert-base-cased") >>> peft_model = PeftModelForTokenClassification(model, peft_config) >>> peft_model.print_trainable_parameters() trainable params: 370178 || all params: 108680450 || trainable%: 0.3406113979101117 ``` """ def __init__(self, model, peft_config: PeftConfig = None, adapter_name="default"): super().__init__(model, peft_config, adapter_name) if self.modules_to_save is None: self.modules_to_save = {"classifier", "score"} else: self.modules_to_save.update({"classifier", "score"}) for name, _ in self.base_model.named_children(): if any(module_name in name for module_name in self.modules_to_save): self.cls_layer_name = name break # to make sure classifier layer is trainable _set_trainable(self, adapter_name) def forward( self, input_ids=None, attention_mask=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, task_ids=None, **kwargs, ): peft_config = self.active_peft_config return_dict = return_dict if return_dict is not None else self.config.use_return_dict if not peft_config.is_prompt_learning: return self.base_model( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, labels=labels, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, **kwargs, ) batch_size = _get_batch_size(input_ids, inputs_embeds) if attention_mask is not None: # concat prompt attention mask prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to(attention_mask.device) attention_mask = torch.cat((prefix_attention_mask, attention_mask), dim=1) if kwargs.get("position_ids", None) is not None: warnings.warn("Position ids are not supported for parameter efficient tuning. Ignoring position ids.") kwargs["position_ids"] = None kwargs.update( { "attention_mask": attention_mask, "labels": labels, "output_attentions": output_attentions, "output_hidden_states": output_hidden_states, "return_dict": return_dict, } ) if peft_config.peft_type == PeftType.PREFIX_TUNING: return self._prefix_tuning_forward(input_ids=input_ids, **kwargs) else: if kwargs.get("token_type_ids", None) is not None: kwargs["token_type_ids"] = torch.cat( ( torch.zeros(batch_size, peft_config.num_virtual_tokens).to(self.word_embeddings.weight.device), kwargs["token_type_ids"], ), dim=1, ).long() if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) prompts = self.get_prompt(batch_size=batch_size, task_ids=task_ids) prompts = prompts.to(inputs_embeds.dtype) inputs_embeds = torch.cat((prompts, inputs_embeds), dim=1) return self.base_model(inputs_embeds=inputs_embeds, **kwargs) def _prefix_tuning_forward( self, input_ids=None, attention_mask=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, **kwargs, ): batch_size = _get_batch_size(input_ids, inputs_embeds) past_key_values = self.get_prompt(batch_size) fwd_params = list(inspect.signature(self.base_model.forward).parameters.keys()) kwargs.update( { "input_ids": input_ids, "attention_mask": attention_mask, "inputs_embeds": inputs_embeds, "output_attentions": output_attentions, "output_hidden_states": output_hidden_states, "return_dict": return_dict, "past_key_values": past_key_values, } ) if "past_key_values" in fwd_params: return self.base_model(labels=labels, **kwargs) else: transformer_backbone_name = self.base_model.get_submodule(self.transformer_backbone_name) fwd_params = list(inspect.signature(transformer_backbone_name.forward).parameters.keys()) if "past_key_values" not in fwd_params: raise ValueError("Model does not support past key values which are required for prefix tuning.") outputs = transformer_backbone_name(**kwargs) sequence_output = outputs[0] if "dropout" in [name for name, _ in list(self.base_model.named_children())]: sequence_output = self.base_model.dropout(sequence_output) logits = self.base_model.get_submodule(self.cls_layer_name)(sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class PeftModelForQuestionAnswering(PeftModel): """ Peft model for extractive question answering. Args: model ([`~transformers.PreTrainedModel`]): Base transformer model. peft_config ([`PeftConfig`]): Peft config. **Attributes**: - **config** ([`~transformers.PretrainedConfig`]) -- The configuration object of the base model. - **cls_layer_name** (`str`) -- The name of the classification layer. Example: ```py >>> from transformers import AutoModelForQuestionAnswering >>> from peft import PeftModelForQuestionAnswering, get_peft_config >>> config = { ... "peft_type": "LORA", ... "task_type": "QUESTION_ANS", ... "inference_mode": False, ... "r": 16, ... "target_modules": ["query", "value"], ... "lora_alpha": 32, ... "lora_dropout": 0.05, ... "fan_in_fan_out": False, ... "bias": "none", ... } >>> peft_config = get_peft_config(config) >>> model = AutoModelForQuestionAnswering.from_pretrained("bert-base-cased") >>> peft_model = PeftModelForQuestionAnswering(model, peft_config) >>> peft_model.print_trainable_parameters() trainable params: 592900 || all params: 108312580 || trainable%: 0.5473971721475013 ``` """ def __init__(self, model, peft_config: PeftConfig = None, adapter_name="default"): super().__init__(model, peft_config, adapter_name) if self.modules_to_save is None: self.modules_to_save = {"qa_outputs"} else: self.modules_to_save.update({"qa_outputs"}) for name, _ in self.base_model.named_children(): if any(module_name in name for module_name in self.modules_to_save): self.cls_layer_name = name break # to make sure classifier layer is trainable _set_trainable(self, adapter_name) def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, inputs_embeds=None, start_positions=None, end_positions=None, output_attentions=None, output_hidden_states=None, return_dict=None, **kwargs, ): peft_config = self.active_peft_config return_dict = return_dict if return_dict is not None else self.config.use_return_dict if not peft_config.is_prompt_learning: return self.base_model( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, start_positions=start_positions, end_positions=end_positions, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, **kwargs, ) batch_size = _get_batch_size(input_ids, inputs_embeds) if attention_mask is not None: # concat prompt attention mask prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to(attention_mask.device) attention_mask = torch.cat((prefix_attention_mask, attention_mask), dim=1) if kwargs.get("position_ids", None) is not None: warnings.warn("Position ids are not supported for parameter efficient tuning. Ignoring position ids.") kwargs["position_ids"] = None kwargs.update( { "attention_mask": attention_mask, "start_positions": start_positions, "end_positions": end_positions, "output_attentions": output_attentions, "output_hidden_states": output_hidden_states, "return_dict": return_dict, } ) if peft_config.peft_type == PeftType.PREFIX_TUNING: return self._prefix_tuning_forward(input_ids=input_ids, **kwargs) else: if kwargs.get("token_type_ids", None) is not None: kwargs["token_type_ids"] = torch.cat( ( torch.zeros(batch_size, peft_config.num_virtual_tokens).to(self.word_embeddings.weight.device), kwargs["token_type_ids"], ), dim=1, ).long() if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) prompts = self.get_prompt(batch_size=batch_size) prompts = prompts.to(inputs_embeds.dtype) inputs_embeds = torch.cat((prompts, inputs_embeds), dim=1) return self.base_model(inputs_embeds=inputs_embeds, **kwargs) def _prefix_tuning_forward( self, input_ids=None, attention_mask=None, inputs_embeds=None, start_positions=None, end_positions=None, output_attentions=None, output_hidden_states=None, return_dict=None, **kwargs, ): batch_size = _get_batch_size(input_ids, inputs_embeds) past_key_values = self.get_prompt(batch_size) fwd_params = list(inspect.signature(self.base_model.forward).parameters.keys()) kwargs.update( { "input_ids": input_ids, "attention_mask": attention_mask, "inputs_embeds": inputs_embeds, "output_attentions": output_attentions, "output_hidden_states": output_hidden_states, "return_dict": return_dict, "past_key_values": past_key_values, } ) if "past_key_values" in fwd_params: return self.base_model(start_positions=start_positions, end_positions=end_positions, **kwargs) else: transformer_backbone_name = self.base_model.get_submodule(self.transformer_backbone_name) fwd_params = list(inspect.signature(transformer_backbone_name.forward).parameters.keys()) if "past_key_values" not in fwd_params: raise ValueError("Model does not support past key values which are required for prefix tuning.") outputs = transformer_backbone_name(**kwargs) sequence_output = outputs[0] if "dropout" in [name for name, _ in list(self.base_model.named_children())]: sequence_output = self.base_model.dropout(sequence_output) logits = self.base_model.get_submodule(self.cls_layer_name)(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1).contiguous() end_logits = end_logits.squeeze(-1).contiguous() total_loss = None if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions = start_positions.clamp(0, ignored_index) end_positions = end_positions.clamp(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = (start_logits, end_logits) + outputs[2:] return ((total_loss,) + output) if total_loss is not None else output return QuestionAnsweringModelOutput( loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class PeftModelForFeatureExtraction(PeftModel): """ Peft model for extracting features/embeddings from transformer models Args: model ([`~transformers.PreTrainedModel`]): Base transformer model. peft_config ([`PeftConfig`]): Peft config. **Attributes**: - **config** ([`~transformers.PretrainedConfig`]) -- The configuration object of the base model. Example: ```py >>> from transformers import AutoModel >>> from peft import PeftModelForFeatureExtraction, get_peft_config >>> config = { ... "peft_type": "LORA", ... "task_type": "FEATURE_EXTRACTION", ... "inference_mode": False, ... "r": 16, ... "target_modules": ["query", "value"], ... "lora_alpha": 32, ... "lora_dropout": 0.05, ... "fan_in_fan_out": False, ... "bias": "none", ... } >>> peft_config = get_peft_config(config) >>> model = AutoModel.from_pretrained("bert-base-cased") >>> peft_model = PeftModelForFeatureExtraction(model, peft_config) >>> peft_model.print_trainable_parameters() ``` """ def __init__(self, model, peft_config: PeftConfig = None, adapter_name="default"): super().__init__(model, peft_config, adapter_name) def forward( self, input_ids=None, attention_mask=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None, **kwargs, ): peft_config = self.active_peft_config if not peft_config.is_prompt_learning: return self.base_model( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, **kwargs, ) batch_size = _get_batch_size(input_ids, inputs_embeds) if attention_mask is not None: # concat prompt attention mask prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to(attention_mask.device) attention_mask = torch.cat((prefix_attention_mask, attention_mask), dim=1) if kwargs.get("position_ids", None) is not None: warnings.warn("Position ids are not supported for parameter efficient tuning. Ignoring position ids.") kwargs["position_ids"] = None if kwargs.get("token_type_ids", None) is not None: warnings.warn("Token type ids are not supported for parameter efficient tuning. Ignoring token type ids") kwargs["token_type_ids"] = None kwargs.update( { "attention_mask": attention_mask, "output_attentions": output_attentions, "output_hidden_states": output_hidden_states, "return_dict": return_dict, } ) if peft_config.peft_type == PeftType.PREFIX_TUNING: past_key_values = self.get_prompt(batch_size) return self.base_model(input_ids=input_ids, past_key_values=past_key_values, **kwargs) else: if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) prompts = self.get_prompt(batch_size=batch_size) prompts = prompts.to(inputs_embeds.dtype) inputs_embeds = torch.cat((prompts, inputs_embeds), dim=1) return self.base_model(inputs_embeds=inputs_embeds, **kwargs)
0
hf_public_repos/peft/src
hf_public_repos/peft/src/peft/mapping.py
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations from typing import TYPE_CHECKING, Any, Dict import torch from .config import PeftConfig from .peft_model import ( PeftModel, PeftModelForCausalLM, PeftModelForFeatureExtraction, PeftModelForQuestionAnswering, PeftModelForSeq2SeqLM, PeftModelForSequenceClassification, PeftModelForTokenClassification, ) from .tuners import ( AdaLoraConfig, AdaLoraModel, AdaptionPromptConfig, IA3Config, IA3Model, LoHaConfig, LoHaModel, LoKrConfig, LoKrModel, LoraConfig, LoraModel, MultitaskPromptTuningConfig, PrefixTuningConfig, PromptEncoderConfig, PromptTuningConfig, ) from .utils import _prepare_prompt_learning_config if TYPE_CHECKING: from transformers import PreTrainedModel MODEL_TYPE_TO_PEFT_MODEL_MAPPING: Dict[str, PeftModel] = { "SEQ_CLS": PeftModelForSequenceClassification, "SEQ_2_SEQ_LM": PeftModelForSeq2SeqLM, "CAUSAL_LM": PeftModelForCausalLM, "TOKEN_CLS": PeftModelForTokenClassification, "QUESTION_ANS": PeftModelForQuestionAnswering, "FEATURE_EXTRACTION": PeftModelForFeatureExtraction, } PEFT_TYPE_TO_CONFIG_MAPPING: Dict[str, PeftConfig] = { "ADAPTION_PROMPT": AdaptionPromptConfig, "PROMPT_TUNING": PromptTuningConfig, "PREFIX_TUNING": PrefixTuningConfig, "P_TUNING": PromptEncoderConfig, "LORA": LoraConfig, "LOHA": LoHaConfig, "LOKR": LoKrConfig, "ADALORA": AdaLoraConfig, "IA3": IA3Config, "MULTITASK_PROMPT_TUNING": MultitaskPromptTuningConfig, } PEFT_TYPE_TO_TUNER_MAPPING = { "LORA": LoraModel, "LOHA": LoHaModel, "LOKR": LoKrModel, "ADALORA": AdaLoraModel, "IA3": IA3Model, } def get_peft_config(config_dict: Dict[str, Any]) -> PeftConfig: """ Returns a Peft config object from a dictionary. Args: config_dict (`Dict[str, Any]`): Dictionary containing the configuration parameters. """ return PEFT_TYPE_TO_CONFIG_MAPPING[config_dict["peft_type"]](**config_dict) def get_peft_model(model: PreTrainedModel, peft_config: PeftConfig, adapter_name: str = "default") -> PeftModel: """ Returns a Peft model object from a model and a config. Args: model ([`transformers.PreTrainedModel`]): Model to be wrapped. peft_config ([`PeftConfig`]): Configuration object containing the parameters of the Peft model. """ model_config = getattr(model, "config", {"model_type": "custom"}) if hasattr(model_config, "to_dict"): model_config = model_config.to_dict() peft_config.base_model_name_or_path = model.__dict__.get("name_or_path", None) if peft_config.task_type not in MODEL_TYPE_TO_PEFT_MODEL_MAPPING.keys() and not peft_config.is_prompt_learning: return PeftModel(model, peft_config, adapter_name=adapter_name) if peft_config.is_prompt_learning: peft_config = _prepare_prompt_learning_config(peft_config, model_config) return MODEL_TYPE_TO_PEFT_MODEL_MAPPING[peft_config.task_type](model, peft_config, adapter_name=adapter_name) def inject_adapter_in_model( peft_config: PeftConfig, model: torch.nn.Module, adapter_name: str = "default" ) -> torch.nn.Module: r""" A simple API to create and inject adapter in-place into a model. Currently the API does not support prompt learning methods and adaption prompt. Make sure to have the correct `target_names` set in the `peft_config` object. The API calls `get_peft_model` under the hood but would be restricted only to non-prompt learning methods. Args: peft_config (`PeftConfig`): Configuration object containing the parameters of the Peft model. model (`torch.nn.Module`): The input model where the adapter will be injected. adapter_name (`str`, `optional`, defaults to `"default"`): The name of the adapter to be injected, if not provided, the default adapter name is used ("default"). """ if peft_config.is_prompt_learning or peft_config.is_adaption_prompt: raise ValueError("`create_and_replace` does not support prompt learning and adaption prompt yet.") if peft_config.peft_type not in PEFT_TYPE_TO_TUNER_MAPPING.keys(): raise ValueError( f"`inject_adapter_in_model` does not support {peft_config.peft_type} yet. Please use `get_peft_model`." ) tuner_cls = PEFT_TYPE_TO_TUNER_MAPPING[peft_config.peft_type] # By instantiating a peft model we are injecting randomly initialized LoRA layers into the model's modules. peft_model = tuner_cls(model, peft_config, adapter_name=adapter_name) return peft_model.model
0
hf_public_repos/peft/src
hf_public_repos/peft/src/peft/helpers.py
import inspect from copy import deepcopy from functools import update_wrapper from types import MethodType from .peft_model import PeftModel def update_forward_signature(model: PeftModel) -> None: """ Args: Updates the forward signature of the PeftModel to include parents class signature model (`PeftModel`): Peft model to update the forward signature Example: ```python >>> from transformers import WhisperForConditionalGeneration >>> from peft import get_peft_model, LoraConfig, update_forward_signature >>> model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny.en") >>> peft_config = LoraConfig(r=8, lora_alpha=32, lora_dropout=0.1, target_modules=["q_proj", "v_proj"]) >>> peft_model = get_peft_model(model, peft_config) >>> update_forward_signature(peft_model) ``` """ # Only update signature when the current forward signature only has *args and **kwargs current_signature = inspect.signature(model.forward) if ( len(current_signature.parameters) == 2 and "args" in current_signature.parameters and "kwargs" in current_signature.parameters ): forward = deepcopy(model.forward.__func__) update_wrapper( forward, type(model.get_base_model()).forward, assigned=("__doc__", "__name__", "__annotations__") ) model.forward = MethodType(forward, model) def update_generate_signature(model: PeftModel) -> None: """ Args: Updates the generate signature of a PeftModel with overriding generate to include parents class signature model (`PeftModel`): Peft model to update the generate signature Example: ```python >>> from transformers import AutoModelForSeq2SeqLM, AutoTokenizer >>> from peft import get_peft_model, LoraConfig, TaskType, update_generate_signature >>> model_name_or_path = "bigscience/mt0-large" >>> tokenizer = AutoTokenizer.from_pretrained(model_name_or_path) >>> model = AutoModelForSeq2SeqLM.from_pretrained(model_name_or_path) >>> peft_config = LoraConfig( ... task_type=TaskType.SEQ_2_SEQ_LM, inference_mode=False, r=8, lora_alpha=32, lora_dropout=0.1 ... ) >>> peft_model = get_peft_model(model, peft_config) >>> update_generate_signature(peft_model) >>> help(peft_model.generate) ``` """ if not hasattr(model, "generate"): return current_signature = inspect.signature(model.generate) if ( len(current_signature.parameters) == 2 and "args" in current_signature.parameters and "kwargs" in current_signature.parameters ) or (len(current_signature.parameters) == 1 and "kwargs" in current_signature.parameters): generate = deepcopy(model.generate.__func__) update_wrapper( generate, type(model.get_base_model()).generate, assigned=("__doc__", "__name__", "__annotations__"), ) model.generate = MethodType(generate, model) def update_signature(model: PeftModel, method: str = "all") -> None: """ Args: Updates the signature of a PeftModel include parents class signature for forward or generate method model (`PeftModel`): Peft model to update generate or forward signature method (`str`): method to update signature choose one of "forward", "generate", "all" Example: ```python >>> from transformers import AutoModelForSeq2SeqLM, AutoTokenizer >>> from peft import get_peft_model, LoraConfig, TaskType, update_signature >>> model_name_or_path = "bigscience/mt0-large" >>> tokenizer = AutoTokenizer.from_pretrained(model_name_or_path) >>> model = AutoModelForSeq2SeqLM.from_pretrained(model_name_or_path) >>> peft_config = LoraConfig( ... task_type=TaskType.SEQ_2_SEQ_LM, inference_mode=False, r=8, lora_alpha=32, lora_dropout=0.1 ... ) >>> peft_model = get_peft_model(model, peft_config) >>> update_signature(peft_model) >>> help(peft_model.generate) ``` """ if method == "forward": update_forward_signature(model) elif method == "generate": update_generate_signature(model) elif method == "all": update_forward_signature(model) update_generate_signature(model) else: raise ValueError(f"method {method} is not supported please choose one of ['forward', 'generate', 'all']")
0
hf_public_repos/peft/src
hf_public_repos/peft/src/peft/import_utils.py
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import importlib import importlib.metadata as importlib_metadata from functools import lru_cache import packaging.version def is_bnb_available() -> bool: return importlib.util.find_spec("bitsandbytes") is not None def is_bnb_4bit_available() -> bool: if not is_bnb_available(): return False import bitsandbytes as bnb return hasattr(bnb.nn, "Linear4bit") def is_auto_gptq_available(): if importlib.util.find_spec("auto_gptq") is not None: AUTOGPTQ_MINIMUM_VERSION = packaging.version.parse("0.5.0") version_autogptq = packaging.version.parse(importlib_metadata.version("auto_gptq")) if AUTOGPTQ_MINIMUM_VERSION <= version_autogptq: return True else: raise ImportError( f"Found an incompatible version of auto-gptq. Found version {version_autogptq}, " f"but only versions above {AUTOGPTQ_MINIMUM_VERSION} are supported" ) def is_optimum_available() -> bool: return importlib.util.find_spec("optimum") is not None @lru_cache() def is_torch_tpu_available(check_device=True): "Checks if `torch_xla` is installed and potentially if a TPU is in the environment" if importlib.util.find_spec("torch_xla") is not None: if check_device: # We need to check if `xla_device` can be found, will raise a RuntimeError if not try: import torch_xla.core.xla_model as xm _ = xm.xla_device() return True except RuntimeError: return False return True return False
0
hf_public_repos/peft/src
hf_public_repos/peft/src/peft/auto.py
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import importlib from typing import Optional from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForQuestionAnswering, AutoModelForSeq2SeqLM, AutoModelForSequenceClassification, AutoModelForTokenClassification, ) from .config import PeftConfig from .mapping import MODEL_TYPE_TO_PEFT_MODEL_MAPPING from .peft_model import ( PeftModel, PeftModelForCausalLM, PeftModelForFeatureExtraction, PeftModelForQuestionAnswering, PeftModelForSeq2SeqLM, PeftModelForSequenceClassification, PeftModelForTokenClassification, ) class _BaseAutoPeftModel: _target_class = None _target_peft_class = None def __init__(self, *args, **kwargs): # For consistency with transformers: https://github.com/huggingface/transformers/blob/91d7df58b6537d385e90578dac40204cb550f706/src/transformers/models/auto/auto_factory.py#L400 raise EnvironmentError( f"{self.__class__.__name__} is designed to be instantiated " f"using the `{self.__class__.__name__}.from_pretrained(pretrained_model_name_or_path)` or " f"`{self.__class__.__name__}.from_config(config)` methods." ) @classmethod def from_pretrained( cls, pretrained_model_name_or_path, adapter_name: str = "default", is_trainable: bool = False, config: Optional[PeftConfig] = None, **kwargs, ): r""" A wrapper around all the preprocessing steps a user needs to perform in order to load a PEFT model. The kwargs are passed along to `PeftConfig` that automatically takes care of filtering the kwargs of the Hub methods and the config object init. """ peft_config = PeftConfig.from_pretrained(pretrained_model_name_or_path, **kwargs) base_model_path = peft_config.base_model_name_or_path task_type = getattr(peft_config, "task_type", None) if cls._target_class is not None: target_class = cls._target_class elif cls._target_class is None and task_type is not None: # this is only in the case where we use `AutoPeftModel` raise ValueError( "Cannot use `AutoPeftModel` with a task type, please use a specific class for your task type. (e.g. `AutoPeftModelForCausalLM` for `task_type='CAUSAL_LM'`)" ) if task_type is not None: expected_target_class = MODEL_TYPE_TO_PEFT_MODEL_MAPPING[task_type] if cls._target_peft_class.__name__ != expected_target_class.__name__: raise ValueError( f"Expected target PEFT class: {expected_target_class.__name__}, but you have asked for: {cls._target_peft_class.__name__ }" " make sure that you are loading the correct model for your task type." ) elif task_type is None and getattr(peft_config, "auto_mapping", None) is not None: auto_mapping = getattr(peft_config, "auto_mapping", None) base_model_class = auto_mapping["base_model_class"] parent_library_name = auto_mapping["parent_library"] parent_library = importlib.import_module(parent_library_name) target_class = getattr(parent_library, base_model_class) else: raise ValueError( "Cannot infer the auto class from the config, please make sure that you are loading the correct model for your task type." ) base_model = target_class.from_pretrained(base_model_path, **kwargs) return cls._target_peft_class.from_pretrained( base_model, pretrained_model_name_or_path, adapter_name=adapter_name, is_trainable=is_trainable, config=config, **kwargs, ) class AutoPeftModel(_BaseAutoPeftModel): _target_class = None _target_peft_class = PeftModel class AutoPeftModelForCausalLM(_BaseAutoPeftModel): _target_class = AutoModelForCausalLM _target_peft_class = PeftModelForCausalLM class AutoPeftModelForSeq2SeqLM(_BaseAutoPeftModel): _target_class = AutoModelForSeq2SeqLM _target_peft_class = PeftModelForSeq2SeqLM class AutoPeftModelForSequenceClassification(_BaseAutoPeftModel): _target_class = AutoModelForSequenceClassification _target_peft_class = PeftModelForSequenceClassification class AutoPeftModelForTokenClassification(_BaseAutoPeftModel): _target_class = AutoModelForTokenClassification _target_peft_class = PeftModelForTokenClassification class AutoPeftModelForQuestionAnswering(_BaseAutoPeftModel): _target_class = AutoModelForQuestionAnswering _target_peft_class = PeftModelForQuestionAnswering class AutoPeftModelForFeatureExtraction(_BaseAutoPeftModel): _target_class = AutoModel _target_peft_class = PeftModelForFeatureExtraction
0
hf_public_repos/peft/src
hf_public_repos/peft/src/peft/__init__.py
# flake8: noqa # There's no way to ignore "F401 '...' imported but unused" warnings in this # module, but to preserve other warnings. So, don't check this module at all. # coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. __version__ = "0.6.3.dev0" from .auto import ( AutoPeftModel, AutoPeftModelForCausalLM, AutoPeftModelForSequenceClassification, AutoPeftModelForSeq2SeqLM, AutoPeftModelForTokenClassification, AutoPeftModelForQuestionAnswering, AutoPeftModelForFeatureExtraction, ) from .mapping import ( MODEL_TYPE_TO_PEFT_MODEL_MAPPING, PEFT_TYPE_TO_CONFIG_MAPPING, get_peft_config, get_peft_model, inject_adapter_in_model, ) from .peft_model import ( PeftModel, PeftModelForCausalLM, PeftModelForSeq2SeqLM, PeftModelForSequenceClassification, PeftModelForTokenClassification, PeftModelForQuestionAnswering, PeftModelForFeatureExtraction, ) from .tuners import ( AdaptionPromptConfig, AdaptionPromptModel, LoraConfig, LoraModel, LoHaConfig, LoHaModel, LoKrConfig, LoKrModel, IA3Config, IA3Model, AdaLoraConfig, AdaLoraModel, PrefixEncoder, PrefixTuningConfig, PromptEmbedding, PromptEncoder, PromptEncoderConfig, PromptEncoderReparameterizationType, PromptTuningConfig, PromptTuningInit, MultitaskPromptTuningConfig, MultitaskPromptTuningInit, ) from .utils import ( TRANSFORMERS_MODELS_TO_PREFIX_TUNING_POSTPROCESS_MAPPING, PeftType, TaskType, bloom_model_postprocess_past_key_value, get_peft_model_state_dict, prepare_model_for_int8_training, prepare_model_for_kbit_training, set_peft_model_state_dict, shift_tokens_right, load_peft_weights, ) from .config import PeftConfig, PromptLearningConfig
0
hf_public_repos/peft/src
hf_public_repos/peft/src/peft/config.py
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import json import os from dataclasses import asdict, dataclass, field from typing import Dict, Optional, Union from huggingface_hub import hf_hub_download from transformers.utils import PushToHubMixin from .utils import CONFIG_NAME, PeftType, TaskType @dataclass class PeftConfigMixin(PushToHubMixin): r""" This is the base configuration class for PEFT adapter models. It contains all the methods that are common to all PEFT adapter models. This class inherits from [`~transformers.utils.PushToHubMixin`] which contains the methods to push your model to the Hub. The method `save_pretrained` will save the configuration of your adapter model in a directory. The method `from_pretrained` will load the configuration of your adapter model from a directory. Args: peft_type (Union[[`~peft.utils.config.PeftType`], `str`]): The type of Peft method to use. """ peft_type: Optional[PeftType] = field(default=None, metadata={"help": "The type of PEFT model."}) auto_mapping: Optional[dict] = field( default=None, metadata={"help": "An auto mapping dict to help retrieve the base model class if needed."} ) def to_dict(self) -> Dict: return asdict(self) def save_pretrained(self, save_directory: str, **kwargs) -> None: r""" This method saves the configuration of your adapter model in a directory. Args: save_directory (`str`): The directory where the configuration will be saved. kwargs (additional keyword arguments, *optional*): Additional keyword arguments passed along to the [`~transformers.utils.PushToHubMixin.push_to_hub`] method. """ if os.path.isfile(save_directory): raise AssertionError(f"Provided path ({save_directory}) should be a directory, not a file") os.makedirs(save_directory, exist_ok=True) auto_mapping_dict = kwargs.pop("auto_mapping_dict", None) output_dict = asdict(self) # converting set type to list for key, value in output_dict.items(): if isinstance(value, set): output_dict[key] = list(value) output_path = os.path.join(save_directory, CONFIG_NAME) # Add auto mapping details for custom models. if auto_mapping_dict is not None: output_dict["auto_mapping"] = auto_mapping_dict # save it with open(output_path, "w") as writer: writer.write(json.dumps(output_dict, indent=2, sort_keys=True)) @classmethod def from_pretrained(cls, pretrained_model_name_or_path: str, subfolder: Optional[str] = None, **kwargs): r""" This method loads the configuration of your adapter model from a directory. Args: pretrained_model_name_or_path (`str`): The directory or the Hub repository id where the configuration is saved. kwargs (additional keyword arguments, *optional*): Additional keyword arguments passed along to the child class initialization. """ # Avoid circular dependency .. TODO: fix this with a larger refactor from peft.mapping import PEFT_TYPE_TO_CONFIG_MAPPING path = ( os.path.join(pretrained_model_name_or_path, subfolder) if subfolder is not None else pretrained_model_name_or_path ) hf_hub_download_kwargs, class_kwargs, _ = cls._split_kwargs(kwargs) if os.path.isfile(os.path.join(path, CONFIG_NAME)): config_file = os.path.join(path, CONFIG_NAME) else: try: config_file = hf_hub_download( pretrained_model_name_or_path, CONFIG_NAME, subfolder=subfolder, **hf_hub_download_kwargs ) except Exception: raise ValueError(f"Can't find '{CONFIG_NAME}' at '{pretrained_model_name_or_path}'") loaded_attributes = cls.from_json_file(config_file) # TODO: this hack is needed to fix the following issue (on commit 702f937): # if someone saves a default config and loads it back with `PeftConfig` class it yields to # not loading the correct config class. # from peft import AdaLoraConfig, PeftConfig # peft_config = AdaLoraConfig() # print(peft_config) # >>> AdaLoraConfig(peft_type=<PeftType.ADALORA: 'ADALORA'>, auto_mapping=None, base_model_name_or_path=None, # revision=None, task_type=None, inference_mode=False, r=8, target_modules=None, lora_alpha=8, lora_dropout=0.0, ... # # peft_config.save_pretrained("./test_config") # peft_config = PeftConfig.from_pretrained("./test_config") # print(peft_config) # >>> PeftConfig(peft_type='ADALORA', auto_mapping=None, base_model_name_or_path=None, revision=None, task_type=None, inference_mode=False) if "peft_type" in loaded_attributes: peft_type = loaded_attributes["peft_type"] config_cls = PEFT_TYPE_TO_CONFIG_MAPPING[peft_type] else: config_cls = cls kwargs = {**class_kwargs, **loaded_attributes} config = config_cls(**kwargs) return config @classmethod def from_json_file(cls, path_json_file: str, **kwargs): r""" Loads a configuration file from a json file. Args: path_json_file (`str`): The path to the json file. """ with open(path_json_file, "r") as file: json_object = json.load(file) return json_object @classmethod def _split_kwargs(cls, kwargs): hf_hub_download_kwargs = {} class_kwargs = {} other_kwargs = {} for key, value in kwargs.items(): if key in inspect.signature(hf_hub_download).parameters: hf_hub_download_kwargs[key] = value elif key in list(cls.__annotations__): class_kwargs[key] = value else: other_kwargs[key] = value return hf_hub_download_kwargs, class_kwargs, other_kwargs @classmethod def _get_peft_type( cls, model_id: str, **hf_hub_download_kwargs, ): subfolder = hf_hub_download_kwargs.get("subfolder", None) path = os.path.join(model_id, subfolder) if subfolder is not None else model_id if os.path.isfile(os.path.join(path, CONFIG_NAME)): config_file = os.path.join(path, CONFIG_NAME) else: try: config_file = hf_hub_download( model_id, CONFIG_NAME, **hf_hub_download_kwargs, ) except Exception: raise ValueError(f"Can't find '{CONFIG_NAME}' at '{model_id}'") loaded_attributes = cls.from_json_file(config_file) return loaded_attributes["peft_type"] @property def is_prompt_learning(self) -> bool: r""" Utility method to check if the configuration is for prompt learning. """ return False @property def is_adaption_prompt(self) -> bool: """Return True if this is an adaption prompt config.""" return False @dataclass class PeftConfig(PeftConfigMixin): """ This is the base configuration class to store the configuration of a [`PeftModel`]. Args: peft_type (Union[[`~peft.utils.config.PeftType`], `str`]): The type of Peft method to use. task_type (Union[[`~peft.utils.config.TaskType`], `str`]): The type of task to perform. inference_mode (`bool`, defaults to `False`): Whether to use the Peft model in inference mode. """ base_model_name_or_path: Optional[str] = field( default=None, metadata={"help": "The name of the base model to use."} ) revision: Optional[str] = field(default=None, metadata={"help": "The specific model version to use."}) peft_type: Optional[Union[str, PeftType]] = field(default=None, metadata={"help": "Peft type"}) task_type: Optional[Union[str, TaskType]] = field(default=None, metadata={"help": "Task type"}) inference_mode: bool = field(default=False, metadata={"help": "Whether to use inference mode"}) @dataclass class PromptLearningConfig(PeftConfig): """ This is the base configuration class to store the configuration of [`PrefixTuning`], [`PromptEncoder`], or [`PromptTuning`]. Args: num_virtual_tokens (`int`): The number of virtual tokens to use. token_dim (`int`): The hidden embedding dimension of the base transformer model. num_transformer_submodules (`int`): The number of transformer submodules in the base transformer model. num_attention_heads (`int`): The number of attention heads in the base transformer model. num_layers (`int`): The number of layers in the base transformer model. """ num_virtual_tokens: int = field(default=None, metadata={"help": "Number of virtual tokens"}) token_dim: int = field( default=None, metadata={"help": "The hidden embedding dimension of the base transformer model"} ) num_transformer_submodules: Optional[int] = field( default=None, metadata={"help": "Number of transformer submodules"} ) num_attention_heads: Optional[int] = field(default=None, metadata={"help": "Number of attention heads"}) num_layers: Optional[int] = field(default=None, metadata={"help": "Number of transformer layers"}) @property def is_prompt_learning(self) -> bool: r""" Utility method to check if the configuration is for prompt learning. """ return True
0
hf_public_repos/peft/src/peft
hf_public_repos/peft/src/peft/utils/save_and_load.py
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from typing import Optional import torch from huggingface_hub import file_exists, hf_hub_download from huggingface_hub.utils import EntryNotFoundError from safetensors.torch import load_file as safe_load_file from .other import SAFETENSORS_WEIGHTS_NAME, WEIGHTS_NAME, infer_device from .peft_types import PeftType def get_peft_model_state_dict(model, state_dict=None, adapter_name="default", unwrap_compiled=False): """ Get the state dict of the Peft model. Args: model ([`PeftModel`]): The Peft model. When using torch.nn.DistributedDataParallel, DeepSpeed or FSDP, the model should be the underlying model/unwrapped model (i.e. model.module). state_dict (`dict`, *optional*, defaults to `None`): The state dict of the model. If not provided, the state dict of the passed model will be used. adapter_name (`str`, *optional*, defaults to `"default"`): The name of the adapter whose state dict should be returned. unwrap_compiled (`bool`, *optional*, defaults to `False`): Whether to unwrap the model if torch.compile was used. """ if unwrap_compiled: model = getattr(model, "_orig_mod", model) config = model.peft_config[adapter_name] if state_dict is None: state_dict = model.state_dict() if config.peft_type in (PeftType.LORA, PeftType.ADALORA): # to_return = lora_state_dict(model, bias=model.peft_config.bias) # adapted from `https://github.com/microsoft/LoRA/blob/main/loralib/utils.py` # to be used directly with the state dict which is necessary when using DeepSpeed or FSDP bias = config.bias if bias == "none": to_return = {k: state_dict[k] for k in state_dict if "lora_" in k} elif bias == "all": to_return = {k: state_dict[k] for k in state_dict if "lora_" in k or "bias" in k} elif bias == "lora_only": to_return = {} for k in state_dict: if "lora_" in k: to_return[k] = state_dict[k] bias_name = k.split("lora_")[0] + "bias" if bias_name in state_dict: to_return[bias_name] = state_dict[bias_name] else: raise NotImplementedError to_return = {k: v for k, v in to_return.items() if (("lora_" in k and adapter_name in k) or ("bias" in k))} if config.peft_type == PeftType.ADALORA: rank_pattern = config.rank_pattern if rank_pattern is not None: rank_pattern = {k.replace(f".{adapter_name}", ""): v for k, v in rank_pattern.items()} config.rank_pattern = rank_pattern to_return = model.resize_state_dict_by_rank_pattern(rank_pattern, to_return, adapter_name) elif config.peft_type == PeftType.LOHA: to_return = {k: state_dict[k] for k in state_dict if "hada_" in k} elif config.peft_type == PeftType.LOKR: to_return = {k: state_dict[k] for k in state_dict if "lokr_" in k} elif config.peft_type == PeftType.ADAPTION_PROMPT: to_return = {k: state_dict[k] for k in state_dict if k.split(".")[-1].startswith("adaption_")} elif config.is_prompt_learning: to_return = {} if config.peft_type == PeftType.MULTITASK_PROMPT_TUNING: to_return["prefix_task_cols"] = model.prompt_encoder[adapter_name].prefix_task_cols to_return["prefix_task_rows"] = model.prompt_encoder[adapter_name].prefix_task_rows prompt_embeddings = model.prompt_encoder[adapter_name].embedding.weight else: if config.inference_mode: prompt_embeddings = model.prompt_encoder[adapter_name].embedding.weight else: prompt_embeddings = model.get_prompt_embedding_to_save(adapter_name) to_return["prompt_embeddings"] = prompt_embeddings elif config.peft_type == PeftType.IA3: to_return = {k: state_dict[k] for k in state_dict if "ia3_" in k} else: raise NotImplementedError if getattr(model, "modules_to_save", None) is not None: for key, value in state_dict.items(): if any(f"{module_name}.modules_to_save.{adapter_name}" in key for module_name in model.modules_to_save): to_return[key.replace("modules_to_save.", "")] = value to_return = {k.replace(f".{adapter_name}", ""): v for k, v in to_return.items()} return to_return def set_peft_model_state_dict(model, peft_model_state_dict, adapter_name="default"): """ Set the state dict of the Peft model. Args: model ([`PeftModel`]): The Peft model. peft_model_state_dict (`dict`): The state dict of the Peft model. """ config = model.peft_config[adapter_name] state_dict = {} if getattr(model, "modules_to_save", None) is not None: for key, value in peft_model_state_dict.items(): if any(module_name in key for module_name in model.modules_to_save): for module_name in model.modules_to_save: if module_name in key: key = key.replace(module_name, f"{module_name}.modules_to_save.{adapter_name}") break state_dict[key] = value else: state_dict = peft_model_state_dict if config.peft_type in (PeftType.LORA, PeftType.LOHA, PeftType.LOKR, PeftType.ADALORA, PeftType.IA3): peft_model_state_dict = {} parameter_prefix = { PeftType.IA3: "ia3_", PeftType.LORA: "lora_", PeftType.ADALORA: "lora_", PeftType.LOHA: "hada_", PeftType.LOKR: "lokr_", }[config.peft_type] for k, v in state_dict.items(): if parameter_prefix in k: suffix = k.split(parameter_prefix)[1] if "." in suffix: suffix_to_replace = ".".join(suffix.split(".")[1:]) k = k.replace(suffix_to_replace, f"{adapter_name}.{suffix_to_replace}") else: k = f"{k}.{adapter_name}" peft_model_state_dict[k] = v else: peft_model_state_dict[k] = v if config.peft_type == PeftType.ADALORA: rank_pattern = config.rank_pattern if rank_pattern is not None: model.resize_modules_by_rank_pattern(rank_pattern, adapter_name) elif config.is_prompt_learning or config.peft_type == PeftType.ADAPTION_PROMPT: peft_model_state_dict = state_dict else: raise NotImplementedError load_result = model.load_state_dict(peft_model_state_dict, strict=False) if config.is_prompt_learning: model.prompt_encoder[adapter_name].embedding.load_state_dict( {"weight": peft_model_state_dict["prompt_embeddings"]}, strict=True ) if config.peft_type == PeftType.MULTITASK_PROMPT_TUNING: model.prompt_encoder[adapter_name].load_state_dict(peft_model_state_dict, strict=False) return load_result def load_peft_weights(model_id: str, device: Optional[str] = None, **hf_hub_download_kwargs) -> dict: r""" A helper method to load the PEFT weights from the HuggingFace Hub or locally Args: model_id (`str`): The local path to the adapter weights or the name of the adapter to load from the HuggingFace Hub. device (`str`): The device to load the weights onto. hf_hub_download_kwargs (`dict`): Additional arguments to pass to the `hf_hub_download` method when loading from the HuggingFace Hub. """ path = ( os.path.join(model_id, hf_hub_download_kwargs["subfolder"]) if hf_hub_download_kwargs.get("subfolder", None) is not None else model_id ) if device is None: device = infer_device() if os.path.exists(os.path.join(path, SAFETENSORS_WEIGHTS_NAME)): filename = os.path.join(path, SAFETENSORS_WEIGHTS_NAME) use_safetensors = True elif os.path.exists(os.path.join(path, WEIGHTS_NAME)): filename = os.path.join(path, WEIGHTS_NAME) use_safetensors = False else: token = hf_hub_download_kwargs.get("token", None) if token is None: token = hf_hub_download_kwargs.get("use_auth_token", None) has_remote_safetensors_file = file_exists( repo_id=model_id, filename=SAFETENSORS_WEIGHTS_NAME, revision=hf_hub_download_kwargs.get("revision", None), repo_type=hf_hub_download_kwargs.get("repo_type", None), token=token, ) use_safetensors = has_remote_safetensors_file if has_remote_safetensors_file: # Priority 1: load safetensors weights filename = hf_hub_download( model_id, SAFETENSORS_WEIGHTS_NAME, **hf_hub_download_kwargs, ) else: try: filename = hf_hub_download(model_id, WEIGHTS_NAME, **hf_hub_download_kwargs) except EntryNotFoundError: raise ValueError( f"Can't find weights for {model_id} in {model_id} or in the Hugging Face Hub. " f"Please check that the file {WEIGHTS_NAME} or {SAFETENSORS_WEIGHTS_NAME} is present at {model_id}." ) if use_safetensors: adapters_weights = safe_load_file(filename, device=device) else: adapters_weights = torch.load(filename, map_location=torch.device(device)) return adapters_weights
0
hf_public_repos/peft/src/peft
hf_public_repos/peft/src/peft/utils/peft_types.py
# flake8: noqa # There's no way to ignore "F401 '...' imported but unused" warnings in this # module, but to preserve other warnings. So, don't check this module at all # coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import enum class PeftType(str, enum.Enum): PROMPT_TUNING = "PROMPT_TUNING" MULTITASK_PROMPT_TUNING = "MULTITASK_PROMPT_TUNING" P_TUNING = "P_TUNING" PREFIX_TUNING = "PREFIX_TUNING" LORA = "LORA" ADALORA = "ADALORA" ADAPTION_PROMPT = "ADAPTION_PROMPT" IA3 = "IA3" LOHA = "LOHA" LOKR = "LOKR" class TaskType(str, enum.Enum): SEQ_CLS = "SEQ_CLS" SEQ_2_SEQ_LM = "SEQ_2_SEQ_LM" CAUSAL_LM = "CAUSAL_LM" TOKEN_CLS = "TOKEN_CLS" QUESTION_ANS = "QUESTION_ANS" FEATURE_EXTRACTION = "FEATURE_EXTRACTION"
0
hf_public_repos/peft/src/peft
hf_public_repos/peft/src/peft/utils/other.py
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import inspect import warnings from typing import Optional, Tuple import accelerate import torch from accelerate.hooks import add_hook_to_module, remove_hook_from_module from accelerate.utils import is_npu_available, is_xpu_available from safetensors.torch import storage_ptr, storage_size from ..import_utils import is_auto_gptq_available, is_torch_tpu_available # Get current device name based on available devices def infer_device(): if torch.cuda.is_available(): torch_device = "cuda" elif is_xpu_available(): torch_device = "xpu" elif is_npu_available(): torch_device = "npu" else: torch_device = "cpu" return torch_device # needed for prefix-tuning of bloom model def bloom_model_postprocess_past_key_value(past_key_values): past_key_values = torch.cat(past_key_values) total_layers, batch_size, num_attention_heads, num_virtual_tokens, head_dim = past_key_values.shape keys = past_key_values[: total_layers // 2] keys = keys.transpose(2, 3).reshape( total_layers // 2, batch_size * num_attention_heads, head_dim, num_virtual_tokens ) values = past_key_values[total_layers // 2 :] values = values.reshape(total_layers // 2, batch_size * num_attention_heads, num_virtual_tokens, head_dim) return tuple(zip(keys, values)) # needed for prefix-tuning of StarCoder models def starcoder_model_postprocess_past_key_value(past_key_values): result = [] for k in past_key_values: k = k[:, :, 0] k = k.permute([1, 2, 0, 3]) k = k.reshape(*k.shape[:-2], -1) result.append(k) return tuple(result) def prepare_model_for_kbit_training(model, use_gradient_checkpointing=True, gradient_checkpointing_kwargs=None): r""" Note this method only works for `transformers` models. This method wraps the entire protocol for preparing a model before running a training. This includes: 1- Cast the layernorm in fp32 2- making output embedding layer require grads 3- Add the upcasting of the lm head to fp32 Args: model (`transformers.PreTrainedModel`): The loaded model from `transformers` use_gradient_checkpointing (`bool`, *optional*, defaults to `True`): If True, use gradient checkpointing to save memory at the expense of slower backward pass. gradient_checkpointing_kwargs (`dict`, *optional*, defaults to `None`): Keyword arguments to pass to the gradient checkpointing function, please refer to the documentation of `torch.utils.checkpoint.checkpoint` for more details about the arguments that you can pass to that method. Note this is only available in the latest transformers versions (> 4.34.1). """ loaded_in_kbit = getattr(model, "is_loaded_in_8bit", False) or getattr(model, "is_loaded_in_4bit", False) is_gptq_quantized = getattr(model, "quantization_method", None) == "gptq" if gradient_checkpointing_kwargs is None: gradient_checkpointing_kwargs = {} for name, param in model.named_parameters(): # freeze base model's layers param.requires_grad = False if not is_gptq_quantized: # cast all non INT8 parameters to fp32 for param in model.parameters(): if (param.dtype == torch.float16) or (param.dtype == torch.bfloat16): param.data = param.data.to(torch.float32) if (loaded_in_kbit or is_gptq_quantized) and use_gradient_checkpointing: # When having `use_reentrant=False` + gradient_checkpointing, there is no need for this hack if "use_reentrant" not in gradient_checkpointing_kwargs or gradient_checkpointing_kwargs["use_reentrant"]: # For backward compatibility if hasattr(model, "enable_input_require_grads"): model.enable_input_require_grads() else: def make_inputs_require_grad(module, input, output): output.requires_grad_(True) model.get_input_embeddings().register_forward_hook(make_inputs_require_grad) # To support older transformers versions, check if the model supports gradient_checkpointing_kwargs _supports_gc_kwargs = "gradient_checkpointing_kwargs" in list( inspect.signature(model.gradient_checkpointing_enable).parameters ) if not _supports_gc_kwargs and len(gradient_checkpointing_kwargs) > 0: warnings.warn( "gradient_checkpointing_kwargs is not supported in this version of transformers. The passed kwargs will be ignored." " if you want to use that feature, please upgrade to the latest version of transformers.", FutureWarning, ) gc_enable_kwargs = ( {} if not _supports_gc_kwargs else {"gradient_checkpointing_kwargs": gradient_checkpointing_kwargs} ) # enable gradient checkpointing for memory efficiency model.gradient_checkpointing_enable(**gc_enable_kwargs) return model # For backward compatibility def prepare_model_for_int8_training(*args, **kwargs): warnings.warn( "prepare_model_for_int8_training is deprecated and will be removed in a future version. Use prepare_model_for_kbit_training instead.", FutureWarning, ) return prepare_model_for_kbit_training(*args, **kwargs) # copied from transformers.models.bart.modeling_bart def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int): """ Shift input ids one token to the right. Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): input ids pad_token_id (`int`): The id of the `padding` token. decoder_start_token_id (`int`): The id of the `start` token. """ shifted_input_ids = input_ids.new_zeros(input_ids.shape) shifted_input_ids[:, 1:] = input_ids[:, :-1].clone() shifted_input_ids[:, 0] = decoder_start_token_id if pad_token_id is None: raise ValueError("self.model.config.pad_token_id has to be defined.") # replace possible -100 values in labels by `pad_token_id` shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id) return shifted_input_ids class ModulesToSaveWrapper(torch.nn.Module): def __init__(self, module_to_save, adapter_name): super().__init__() self.original_module = module_to_save self.modules_to_save = torch.nn.ModuleDict({}) self._active_adapter = adapter_name self._disable_adapters = False self.update(adapter_name) @property def disable_adapters(self) -> bool: # use a property to ensure that disable_adapters is not set directly, instead use the enable_adapters method return self._disable_adapters @property def active_adapter(self) -> str: # use a property to ensure that active_adapter is not set directly, instead use the set_adapter method return self._active_adapter def update(self, adapter_name): self.modules_to_save.update(torch.nn.ModuleDict({adapter_name: copy.deepcopy(self.original_module)})) if hasattr(self.modules_to_save[adapter_name], "_hf_hook"): old_hook = self.modules_to_save[adapter_name]._hf_hook new_hook = self._create_new_hook(old_hook) remove_hook_from_module(self.modules_to_save[adapter_name]) add_hook_to_module(self.modules_to_save[adapter_name], new_hook) self.original_module.requires_grad_(False) if adapter_name == self.active_adapter: self.modules_to_save[adapter_name].requires_grad_(True) def _create_new_hook(self, old_hook): r""" Creates a new hook based on the old hook. Use it only if you know what you are doing ! """ old_hook_cls = getattr(accelerate.hooks, old_hook.__class__.__name__) old_hook_attr = old_hook.__dict__ filtered_old_hook_attr = {} old_hook_init_signature = inspect.signature(old_hook_cls.__init__) for k in old_hook_attr.keys(): if k in old_hook_init_signature.parameters: filtered_old_hook_attr[k] = old_hook_attr[k] new_hook = old_hook_cls(**filtered_old_hook_attr) return new_hook def forward(self, *args, **kwargs): if self.disable_adapters or (self.active_adapter not in self.modules_to_save): return self.original_module(*args, **kwargs) return self.modules_to_save[self.active_adapter](*args, **kwargs) def enable_adapters(self, enabled: bool): """Toggle the enabling and disabling of adapters Takes care of setting the requires_grad flag for the adapter weights. Args: enabled (bool): True to enable adapters, False to disable adapters """ if self._disable_adapters is not enabled: # already in the desired state, do nothing return if enabled: self.original_module.requires_grad_(False) self.modules_to_save[self.active_adapter].requires_grad_(True) self._disable_adapters = False else: self.original_module.requires_grad_(True) self.modules_to_save.requires_grad_(False) self._disable_adapters = True def set_adapter(self, adapter_name: str): """Set the active adapter Args: adapter_name (str): The name of the adapter to set as active """ if adapter_name not in self.modules_to_save: raise ValueError(f"Adapter {adapter_name} not found in {self.modules_to_save.keys()}") self.modules_to_save[self.active_adapter].requires_grad_(False) self.modules_to_save[adapter_name].requires_grad_(True) self._active_adapter = adapter_name def _get_submodules(model, key): parent = model.get_submodule(".".join(key.split(".")[:-1])) target_name = key.split(".")[-1] target = model.get_submodule(key) return parent, target, target_name def _freeze_adapter(model, adapter_name): for n, p in model.named_parameters(): if adapter_name in n: p.requires_grad = False def _set_trainable(model, adapter_name): key_list = [key for key, _ in model.named_modules()] for key in key_list: target_module_found = any(key.endswith(target_key) for target_key in model.modules_to_save) if target_module_found: parent, target, target_name = _get_submodules(model, key) if isinstance(target, ModulesToSaveWrapper): target.update(adapter_name) target.set_adapter(target.active_adapter) else: new_module = ModulesToSaveWrapper(target, adapter_name) new_module.set_adapter(adapter_name) setattr(parent, target_name, new_module) def _set_adapter(model, adapter_name): def check_adapter_name(adapter_name): if isinstance(adapter_name, str): return adapter_name # adapter_name is a list of str if len(adapter_name) > 1: raise ValueError("Only one adapter can be set at a time for modules_to_save") elif len(adapter_name) == 0: raise ValueError("Please specify at least one adapter to set") adapter_name = adapter_name[0] return adapter_name for module in model.modules(): if isinstance(module, ModulesToSaveWrapper): # only check the adapter_name if we actually encounter a ModulesToSaveWrapper, otherwise we don't care adapter_name = check_adapter_name(adapter_name) module.set_adapter(adapter_name) def _prepare_prompt_learning_config(peft_config, model_config): if peft_config.num_layers is None: if "num_hidden_layers" in model_config: num_layers = model_config["num_hidden_layers"] elif "num_layers" in model_config: num_layers = model_config["num_layers"] elif "n_layer" in model_config: num_layers = model_config["n_layer"] else: raise ValueError("Please specify `num_layers` in `peft_config`") peft_config.num_layers = num_layers if peft_config.token_dim is None: if "hidden_size" in model_config: token_dim = model_config["hidden_size"] elif "n_embd" in model_config: token_dim = model_config["n_embd"] elif "d_model" in model_config: token_dim = model_config["d_model"] else: raise ValueError("Please specify `token_dim` in `peft_config`") peft_config.token_dim = token_dim if peft_config.num_attention_heads is None: if "num_attention_heads" in model_config: num_attention_heads = model_config["num_attention_heads"] elif "n_head" in model_config: num_attention_heads = model_config["n_head"] elif "num_heads" in model_config: num_attention_heads = model_config["num_heads"] elif "encoder_attention_heads" in model_config: num_attention_heads = model_config["encoder_attention_heads"] else: raise ValueError("Please specify `num_attention_heads` in `peft_config`") peft_config.num_attention_heads = num_attention_heads if getattr(peft_config, "encoder_hidden_size", None) is None: setattr(peft_config, "encoder_hidden_size", peft_config.token_dim) return peft_config def fsdp_auto_wrap_policy(model): import functools import os from accelerate import FullyShardedDataParallelPlugin from torch.distributed.fsdp.wrap import _or_policy, lambda_auto_wrap_policy, transformer_auto_wrap_policy from ..tuners import PrefixEncoder, PromptEmbedding, PromptEncoder def lambda_policy_fn(module): if ( len(list(module.named_children())) == 0 and getattr(module, "weight", None) is not None and module.weight.requires_grad ): return True return False lambda_policy = functools.partial(lambda_auto_wrap_policy, lambda_fn=lambda_policy_fn) transformer_wrap_policy = functools.partial( transformer_auto_wrap_policy, transformer_layer_cls=( PrefixEncoder, PromptEncoder, PromptEmbedding, FullyShardedDataParallelPlugin.get_module_class_from_name( model, os.environ.get("FSDP_TRANSFORMER_CLS_TO_WRAP", "") ), ), ) auto_wrap_policy = functools.partial(_or_policy, policies=[lambda_policy, transformer_wrap_policy]) return auto_wrap_policy def transpose(weight, fan_in_fan_out): if not fan_in_fan_out: return weight if isinstance(weight, torch.nn.Parameter): return torch.nn.Parameter(weight.T) return weight.T def _is_valid_match(key: str, target_key: str): """ Helper function to match module names target_key and key. Makes sure that either the key is exactly the target_key or the target_key is a submodule of key """ if key.endswith(target_key): if len(key) > len(target_key): return key.endswith("." + target_key) # must be a sub module return True return False def _get_batch_size(input_ids: Optional[torch.Tensor], inputs_embeds: Optional[torch.Tensor]) -> int: """Get the batch size based on either input_ids or input_embeds Raises an ValueError if both are None. """ if (input_ids is None) and (inputs_embeds is None): raise ValueError("You have to provide either input_ids or inputs_embeds") if input_ids is not None: batch_size = input_ids.shape[0] else: batch_size = inputs_embeds.shape[0] return batch_size def get_quantization_config(model: torch.nn.Module, method: str): """ Get the quantization config of the related quantization method """ if ( hasattr(model, "config") and hasattr(model.config, "quantization_config") and (getattr(model, "quantization_method", None) == method) ): return model.config.quantization_config return None def get_auto_gptq_quant_linear(gptq_quantization_config): """ Get the right AutoGPTQQuantLinear class based on the quantization config file """ if gptq_quantization_config is not None and is_auto_gptq_available(): from auto_gptq.utils.import_utils import dynamically_import_QuantLinear desc_act = gptq_quantization_config.desc_act group_size = gptq_quantization_config.group_size bits = gptq_quantization_config.bits if hasattr(gptq_quantization_config, "use_exllama"): use_exllama = gptq_quantization_config.use_exllama else: use_exllama = not gptq_quantization_config.disable_exllama if hasattr(gptq_quantization_config, "exllama_config"): exllama_version = gptq_quantization_config.exllama_config["version"] else: exllama_version = 1 AutoGPTQQuantLinear = dynamically_import_QuantLinear( use_triton=False, desc_act=desc_act, group_size=group_size, bits=bits, disable_exllama=not (use_exllama and exllama_version == 1), disable_exllamav2=not (use_exllama and exllama_version == 2), ) return AutoGPTQQuantLinear return None def id_tensor_storage(tensor: torch.Tensor) -> Tuple[torch.device, int, int]: """ Unique identifier to a tensor storage. Multiple different tensors can share the same underlying storage. For example, "meta" tensors all share the same storage, and thus their identifier will all be equal. This identifier is guaranteed to be unique and constant for this tensor's storage during its lifetime. Two tensor storages with non-overlapping lifetimes may have the same id. This method is the exact same copy of https://github.com/huggingface/transformers/blob/main/src/transformers/pytorch_utils.py#L282C1-L300C58 but we added it here manually to avoid import issue with old versions of transformers. """ if tensor.device.type == "xla" and is_torch_tpu_available(): # NOTE: xla tensors dont have storage # use some other unique id to distinguish. # this is a XLA tensor, it must be created using torch_xla's # device. So the following import is safe: import torch_xla unique_id = torch_xla._XLAC._xla_get_tensor_id(tensor) else: unique_id = storage_ptr(tensor) return tensor.device, unique_id, storage_size(tensor) TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING = { "t5": ["q", "v"], "mt5": ["q", "v"], "bart": ["q_proj", "v_proj"], "gpt2": ["c_attn"], "bloom": ["query_key_value"], "blip-2": ["q", "v", "q_proj", "v_proj"], "opt": ["q_proj", "v_proj"], "gptj": ["q_proj", "v_proj"], "gpt_neox": ["query_key_value"], "gpt_neo": ["q_proj", "v_proj"], "bert": ["query", "value"], "roberta": ["query", "value"], "xlm-roberta": ["query", "value"], "electra": ["query", "value"], "deberta-v2": ["query_proj", "value_proj"], "deberta": ["in_proj"], "layoutlm": ["query", "value"], "llama": ["q_proj", "v_proj"], "chatglm": ["query_key_value"], "gpt_bigcode": ["c_attn"], "mpt": ["Wqkv"], "RefinedWebModel": ["query_key_value"], "RefinedWeb": ["query_key_value"], "falcon": ["query_key_value"], "btlm": ["c_proj", "c_attn"], "codegen": ["qkv_proj"], "mistral": ["q_proj", "v_proj"], "stablelm": ["q_proj", "v_proj"], "phi": ["Wqkv", "out_proj", "fc1", "fc2"], } TRANSFORMERS_MODELS_TO_IA3_TARGET_MODULES_MAPPING = { "t5": ["k", "v", "wo"], "mt5": ["k", "v", "wi_1"], "gpt2": ["c_attn", "mlp.c_proj"], "bloom": ["query_key_value", "mlp.dense_4h_to_h"], "roberta": ["key", "value", "output.dense"], "opt": ["q_proj", "k_proj", "fc2"], "gptj": ["q_proj", "v_proj", "fc_out"], "gpt_neox": ["query_key_value", "dense_4h_to_h"], "gpt_neo": ["q_proj", "v_proj", "c_proj"], "bart": ["q_proj", "v_proj", "fc2"], "gpt_bigcode": ["c_attn", "mlp.c_proj"], "llama": ["k_proj", "v_proj", "down_proj"], "bert": ["key", "value", "output.dense"], "deberta-v2": ["key_proj", "value_proj", "output.dense"], "deberta": ["in_proj", "output.dense"], "RefinedWebModel": ["query_key_value", "dense_4h_to_h"], "RefinedWeb": ["query_key_value", "dense_4h_to_h"], "falcon": ["query_key_value", "dense_4h_to_h"], } TRANSFORMERS_MODELS_TO_IA3_FEEDFORWARD_MODULES_MAPPING = { "t5": ["wo"], "mt5": [], "gpt2": ["mlp.c_proj"], "bloom": ["mlp.dense_4h_to_h"], "roberta": ["output.dense"], "opt": ["fc2"], "gptj": ["fc_out"], "gpt_neox": ["dense_4h_to_h"], "gpt_neo": ["c_proj"], "bart": ["fc2"], "gpt_bigcode": ["mlp.c_proj"], "llama": ["down_proj"], "bert": ["output.dense"], "deberta-v2": ["output.dense"], "deberta": ["output.dense"], "RefinedWeb": ["dense_4h_to_h"], "RefinedWebModel": ["dense_4h_to_h"], "falcon": ["dense_4h_to_h"], } COMMON_LAYERS_PATTERN = ["layers", "h", "block", "blocks", "layer"] TRANSFORMERS_MODELS_TO_ADALORA_TARGET_MODULES_MAPPING = { "t5": ["q", "k", "v", "o", "wi", "wo"], "mt5": ["q", "k", "v", "o", "wi_0", "wi_1", "wo"], "bart": ["q_proj", "k_proj", "v_proj", "out_proj", "fc1", "fc2"], "gpt2": ["c_attn"], "bloom": ["query_key_value"], "opt": ["q_proj", "k_proj", "v_proj", "out_proj", "fc1", "fc2"], "gptj": ["q_proj", "v_proj"], "gpt_neox": ["query_key_value"], "gpt_neo": ["q_proj", "v_proj"], "llama": ["q_proj", "v_proj"], "bert": ["query", "value"], "roberta": ["query", "key", "value", "dense"], # "xlm-roberta": ["query", "value"], # "electra": ["query", "value"], "deberta-v2": ["query_proj", "key_proj", "value_proj", "dense"], "gpt_bigcode": ["c_attn"], "deberta": ["in_proj"], # "layoutlm": ["query", "value"], } TRANSFORMERS_MODELS_TO_PREFIX_TUNING_POSTPROCESS_MAPPING = { "bloom": bloom_model_postprocess_past_key_value, "gpt_bigcode": starcoder_model_postprocess_past_key_value, } WEIGHTS_NAME = "adapter_model.bin" SAFETENSORS_WEIGHTS_NAME = "adapter_model.safetensors" CONFIG_NAME = "adapter_config.json"
0
hf_public_repos/peft/src/peft
hf_public_repos/peft/src/peft/utils/__init__.py
# flake8: noqa # There's no way to ignore "F401 '...' imported but unused" warnings in this # module, but to preserve other warnings. So, don't check this module at all # coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from .config import PeftConfig, PeftType, PromptLearningConfig, TaskType from .peft_types import PeftType, TaskType from .other import ( TRANSFORMERS_MODELS_TO_PREFIX_TUNING_POSTPROCESS_MAPPING, TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING, TRANSFORMERS_MODELS_TO_ADALORA_TARGET_MODULES_MAPPING, TRANSFORMERS_MODELS_TO_IA3_TARGET_MODULES_MAPPING, TRANSFORMERS_MODELS_TO_IA3_FEEDFORWARD_MODULES_MAPPING, COMMON_LAYERS_PATTERN, CONFIG_NAME, WEIGHTS_NAME, SAFETENSORS_WEIGHTS_NAME, _set_trainable, bloom_model_postprocess_past_key_value, prepare_model_for_int8_training, prepare_model_for_kbit_training, shift_tokens_right, transpose, _get_batch_size, _get_submodules, _set_adapter, _freeze_adapter, ModulesToSaveWrapper, _prepare_prompt_learning_config, _is_valid_match, infer_device, get_auto_gptq_quant_linear, get_quantization_config, id_tensor_storage, ) from .save_and_load import get_peft_model_state_dict, set_peft_model_state_dict, load_peft_weights
0
hf_public_repos/peft/src/peft
hf_public_repos/peft/src/peft/tuners/tuners_utils.py
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import logging import re import warnings from abc import ABC, abstractmethod from typing import Any, Union import torch from torch import nn from peft.utils import COMMON_LAYERS_PATTERN from ..config import PeftConfig from ..utils import ModulesToSaveWrapper, _get_submodules logger = logging.getLogger(__name__) class BaseTuner(nn.Module, ABC): r""" A base tuner model that provides the common methods and attributes for all tuners that are injectable into a torch.nn.Module For adding a new Tuner class, one needs to overwrite the following methods: - **_prepare_adapter_config**: A private method to eventually prepare the adapter config, for example in case the field `target_modules` is missing. - **_check_target_module_exists**: A helper private method to check if the passed module's key name matches any of the target modules in the adatper_config. - **_create_and_replace**: A private method to create and replace the target module with the adapter module. - **_check_target_module_exists**: A private helper method to check if the passed module's key name matches any of the target modules in the adatper_config. The easiest is to check what is done in the `peft.tuners.lora.LoraModel` class. Attributes: model (`torch.nn.Module`): The model to which the adapter tuner layers will be attached. forward (`Callable`): The forward method of the model. peft_config (`Union[`PeftConfig`, dict[str, PeftConfig]]`): The adapter configuration object, it should be a dictionary of `str` to `PeftConfig` objects. One can also pass a PeftConfig object and a new adapter will be created with the default name `adapter` or create a new dictionary with a key `adapter_name` and a value of that peft config. config (`dict[str, Any]`): The model configuration object, it should be a dictionary of `str` to `Any` objects. """ def __init__(self, model, peft_config: Union[PeftConfig, dict[str, PeftConfig]], adapter_name: str) -> None: super().__init__() self.model = model # For advanced developpers, if you want to attach multiple adapters to your # model, just add a `peft_config` dict attribute to your model. if not hasattr(self, "peft_config"): self.peft_config = {adapter_name: peft_config} if isinstance(peft_config, PeftConfig) else peft_config else: logger.info( "Already found a `peft_config` attribute in the model. This will lead to having multiple adapters" " in the model. Make sure to know what you are doing!" ) if isinstance(peft_config, PeftConfig): self.peft_config[adapter_name] = peft_config else: # user is adding a dict of PeftConfigs self.peft_config.update(peft_config) self.active_adapter = adapter_name # transformers models have a .config attribute, whose presence is assumed later on if not hasattr(self, "config"): self.config = {"model_type": "custom"} self.inject_adapter(self.model, adapter_name) # Copy the peft_config in the injected model. self.model.peft_config = self.peft_config @property def active_adapters(self) -> list[str]: if isinstance(self.active_adapter, str): return [self.active_adapter] # is already a list of str return self.active_adapter def forward(self, *args: Any, **kwargs: Any): return self.model.forward(*args, **kwargs) @abstractmethod def _prepare_adapter_config(self, peft_config: PeftConfig, model_config: dict) -> PeftConfig: r""" A private method to eventually prepare the adapter config. For transformers based models, if `peft_config.target_modules` is None, we can automatically infer the target modules from the `TRANSFORMERS_MODELS_TO_XXX_TARGET_MODULES_MAPPING`. This method can be further refactored in the future to automatically infer it for all tuner models. Check out `peft.tuner.lora.LoraModel._prepare_adapter_config` for an example. Args: peft_config (`str`): The adapter config. model_config (`str`): The transformers model config, that config should contain the `model_type` key. """ ... @abstractmethod def _check_target_module_exists(peft_config: PeftConfig, key: str) -> bool: r""" A helper private method to check if the passed module's key name matches any of the target modules in the `peft_config.target_modules` list. If it does, return `True`, else return `False`. Args: peft_config (`PeftConfig`): The adapter config. key (`str`): The module's key name. """ ... @abstractmethod def _create_and_replace( self, peft_config: PeftConfig, adapter_name: str, target: nn.Module, target_name: str, parent: nn.Module, **optional_kwargs: Any, ) -> None: r""" Inplace replacement of the target module with the adapter layer. This method needs to be overriden by all the tuner classes. Check `peft.tuners.lora.LoraModel._create_and_replace` for an example. Args: peft_config (`PeftConfig`): The adapter config. adapter_name (`str`): The adapter name. target (`nn.Module`): The target module. target_name (`str`): The target module's name. parent (`nn.Module`): The parent module. **optional_kwargs (`dict`): The optional keyword arguments to pass to deal with particular cases (e.g. 8bit, 4bit quantization) """ ... @abstractmethod def _mark_only_adapters_as_trainable(self): r""" A helper method to mark only the adapter layers as trainable (i.e. module.requires_grad = False) This needs to be overriden for all tuner classes to match the correct key names. Check `peft.tuners.lora.LoraModel._mark_only_adapters_as_trainable` for an example. """ ... def _check_new_adapter_config(self, config: PeftConfig) -> None: """ A helper method to check the config when a new adapter is being added. Raise a ValueError if there is something wrong with the config or if it conflicts with existing adapters. """ pass def inject_adapter(self, model: nn.Module, adapter_name: str): r""" Creates adapter layers and replaces the target modules with the adapter layers. This method is called under the hood by `peft.mapping.get_peft_model` if a non-prompt tuning adapter class is passed. The corresponding PEFT config is directly retrieved from the `peft_config` attribute of the BaseTuner class. Args: model (`nn.Module`): The model to be tuned. adapter_name (`str`): The adapter name. """ peft_config = self.peft_config[adapter_name] # Note: If possible, all checks should be performed *at the start of this method*. # This way, we can raise early if something goes wrong, without leaving the model # in a bad (half-initialized) state. self._check_new_adapter_config(peft_config) is_target_modules_in_base_model = False key_list = [key for key, _ in model.named_modules()] _check_for_modules_to_save = getattr(peft_config, "modules_to_save", None) is not None _has_modules_to_save = False model_config = getattr(model, "config", {"model_type": "custom"}) if hasattr(model_config, "to_dict"): model_config = model_config.to_dict() peft_config = self._prepare_adapter_config(peft_config, model_config) for key in key_list: # Check for modules_to_save in case if _check_for_modules_to_save and any( key.endswith(f"{module_to_save}") for module_to_save in peft_config.modules_to_save ): # Optionally set the modules to save parent, target, target_name = _get_submodules(model, key) if not isinstance(target, ModulesToSaveWrapper): new_module = ModulesToSaveWrapper(target, adapter_name) setattr(parent, target_name, new_module) else: target.update(adapter_name) _has_modules_to_save = True continue if not self._check_target_module_exists(peft_config, key): continue is_target_modules_in_base_model = True parent, target, target_name = _get_submodules(model, key) optional_kwargs = { "loaded_in_8bit": getattr(model, "is_loaded_in_8bit", False), "loaded_in_4bit": getattr(model, "is_loaded_in_4bit", False), "current_key": key, } self._create_and_replace(peft_config, adapter_name, target, target_name, parent, **optional_kwargs) if not is_target_modules_in_base_model: raise ValueError( f"Target modules {peft_config.target_modules} not found in the base model. " f"Please check the target modules and try again." ) self._mark_only_adapters_as_trainable() if self.peft_config[adapter_name].inference_mode: for n, p in self.model.named_parameters(): if adapter_name in n: p.requires_grad = False if _has_modules_to_save: if not hasattr(model, "modules_to_save"): model.modules_to_save = set(peft_config.modules_to_save) else: model.modules_to_save.update(set(peft_config.modules_to_save)) def merge_adapter(self): """ This method merges the LoRa layers into the base model. """ for module in self.model.modules(): if isinstance(module, BaseTunerLayer): module.merge() def unmerge_adapter(self): """ This method unmerges the LoRa layers from the base model. """ for module in self.model.modules(): if isinstance(module, BaseTunerLayer): module.unmerge() class BaseTunerLayer(ABC): r""" A tuner layer mixin that provides the common methods and attributes for all tuners. Args: is_plugable (`bool`, *optional*): Whether the adapter layer can be plugged to any pytorch module active_adapters (Union[List[`str`], `str`], *optional*): The name of the active adapter. """ active_adapter = None # All names of layers that may contain adapter (trainable) weights adapter_layer_names: tuple[str] = () # All names of other parameters that may contain adapter-related parameters other_param_names: tuple[str] = () # indicates whether all adapters should be disabled _disable_adapters: bool = False # the currently active adapter(s) _active_adapter: str | list[str] = "default" # List all merged adapters merged_adapters: list[str] = [] def get_base_layer(self) -> nn.Module: """ (Recursively) get the base_layer. This is necessary for the case that the tuner layer wraps another tuner layer. """ base_layer = self while hasattr(base_layer, "base_layer"): base_layer = base_layer.base_layer return base_layer @property def weight(self) -> torch.Tensor: # This is required for some transformers code, e.g. for T5, weight is accessed as: # self.wo.weight # where "wo" is the adapter layer. # https://github.com/huggingface/transformers/blob/78f6ed6c70b29c1560780e3869a7ad4c6b3d2710/src/transformers # /models/t5/modeling_t5.py#L292 base_layer = self.get_base_layer() if hasattr(base_layer, "qweight"): # QuantLinear weight = base_layer.qweight else: # Other layers weight = base_layer.weight return weight def merge(self, *args) -> None: raise NotImplementedError def unmerge(self, *args) -> None: raise NotImplementedError @property def merged(self) -> bool: return bool(self.merged_adapters) @property def disable_adapters(self) -> bool: # use a property to ensure that disable_adapters is not set directly, instead use the enable_adapters method return self._disable_adapters @property def active_adapter(self) -> str: # use a property to ensure that active_adapter is not set directly, instead use the set_adapter method return self._active_adapter @property def active_adapters(self): if isinstance(self.active_adapter, str): return [self.active_adapter] # is already a list of str return self.active_adapter def enable_adapters(self, enabled: bool): """Toggle the enabling and disabling of adapters Takes care of setting the requires_grad flag for the adapter weights. Args: enabled (bool): True to enable adapters, False to disable adapters """ if enabled: self.set_adapter(self.active_adapters) self._disable_adapters = False else: # disable grads on all adapter layers for layer_name in self.adapter_layer_names: layer = getattr(self, layer_name) layer.requires_grad_(False) self._disable_adapters = True def set_adapter(self, adapter_names: str | list[str]): """Set the active adapter Args: adapter_name (str): The name of the adapter to set as active """ if isinstance(adapter_names, str): adapter_names = [adapter_names] # Deactivate grads on the inactive adapter and activate grads on the active adapter for layer_name in self.adapter_layer_names: module_dict = getattr(self, layer_name) for key, layer in module_dict.items(): if key in adapter_names: # Note: It is possible that not a single layer is called with requires_grad_(True) here. This may # happen if a completely different adapter layer is being activated. layer.requires_grad_(True) else: layer.requires_grad_(False) self._active_adapter = adapter_names def _all_available_adapter_names(self) -> list[str]: """Return a sorted list of all available adapter names""" adapter_names = set() for name in self.adapter_layer_names + self.other_param_names: # we check each possible attribute and if it's a dict or ModuleDict, we assume that the keys are the adapter # names attr = getattr(self, name) if hasattr(attr, "keys"): adapter_names.update(attr.keys()) return sorted(adapter_names) def delete_adapter(self, adapter_name: str) -> None: """ Delete an adapter from the layer This should be called on all adapter layers, or else we will get an inconsistent state. This method will also set a new active adapter if the deleted adapter was an active adapter. It is important that the new adapter is chosen in a deterministic way, so that the same adapter is chosen on all layers. Args: adapter_name (`str`): The name of the adapter to delete """ for attr in self.adapter_layer_names + self.other_param_names: if adapter_name in getattr(self, attr): del getattr(self, attr)[adapter_name] if adapter_name in self.active_adapters: # choose a new active adapter active_adapters = self.active_adapters[:] active_adapters.remove(adapter_name) if active_adapters: self.set_adapter(active_adapters) else: # no active adapters left, set a new default adapter # here we get the list of all adapters existing adapter names and choose the first one remaining_adapters = self._all_available_adapter_names() if not remaining_adapters: self.set_adapter([]) else: new_active_adapter = remaining_adapters[0] warnings.warn( f"Adapter {adapter_name} was active which is now deleted. Setting active adapter to " f"{new_active_adapter}." ) self.set_adapter(remaining_adapters[0]) def check_target_module_exists(config, key: str) -> bool | re.Match[str] | None: """A helper method to check if the passed module's key name matches any of the target modules in the adapter_config. Args: config (`LoraConfig` | `LycorisConfig`): A config to match target modules from key (`str`): A key to search any matches in config Returns: `bool` | `re.Match[str]` | `None`: True of match object if key matches any target modules from config, False or None if no match found """ if isinstance(config.target_modules, str): target_module_found = re.fullmatch(config.target_modules, key) else: target_module_found = key in config.target_modules or any( key.endswith(f".{target_key}") for target_key in config.target_modules ) is_using_layer_indexes = getattr(config, "layers_to_transform", None) is not None layer_indexing_pattern = getattr(config, "layers_pattern", None) if is_using_layer_indexes and target_module_found: layers_pattern = COMMON_LAYERS_PATTERN if layer_indexing_pattern is None else layer_indexing_pattern layers_pattern = [layers_pattern] if isinstance(layers_pattern, str) else layers_pattern for pattern in layers_pattern: layer_index = re.match(f".*.{pattern}\.(\d+)\.*", key) if layer_index is not None: layer_index = int(layer_index.group(1)) if isinstance(config.layers_to_transform, int): target_module_found = layer_index == config.layers_to_transform else: target_module_found = layer_index in config.layers_to_transform break else: target_module_found = False return target_module_found def inspect_matched_modules(tuner: BaseTuner, adapter_name: str = "default") -> dict: """ A helper function to inspect the set of matched and unmatched modules for a PEFT model and the given adapter. """ config = tuner.peft_config[adapter_name] key_list = [key for key, _ in tuner.model.named_modules()] module_dict = {"matched": [], "unmatched": []} for key in key_list: if tuner._check_target_module_exists(config, key): module_dict["matched"].append(key) else: module_dict["unmatched"].append(key) return module_dict
0
hf_public_repos/peft/src/peft
hf_public_repos/peft/src/peft/tuners/lycoris_utils.py
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import warnings from abc import abstractmethod from dataclasses import dataclass, field from typing import Any, Dict, List, Optional, Set, Type, Union import torch import torch.nn as nn from tqdm import tqdm from peft.config import PeftConfig from peft.utils import ( ModulesToSaveWrapper, _get_submodules, ) from .tuners_utils import BaseTuner, BaseTunerLayer, check_target_module_exists @dataclass class LycorisConfig(PeftConfig): r""" A base config for LyCORIS like adapters """ rank_pattern: Optional[dict] = field( default_factory=dict, metadata={ "help": ( "The mapping from layer names or regexp expression to ranks which are different from the default rank specified by `r`. " "For example, `{model.decoder.layers.0.encoder_attn.k_proj: 8`}" ) }, ) alpha_pattern: Optional[dict] = field( default_factory=dict, metadata={ "help": ( "The mapping from layer names or regexp expression to alphas which are different from the default alpha specified by `alpha`. " "For example, `{model.decoder.layers.0.encoder_attn.k_proj: 32`}" ) }, ) class LycorisLayer(BaseTunerLayer): r""" A base layer for LyCORIS like adapters """ # adapter_layer_names needs to be defined on the child class other_param_names = ("r", "alpha", "scaling", "rank_dropout", "module_dropout") def __init__(self, base_layer: nn.Module) -> None: self.base_layer = base_layer self.r = {} self.alpha = {} self.scaling = {} self.rank_dropout = {} self.module_dropout = {} # Tuner info self._disable_adapters = False self.merged_adapters = [] @property @abstractmethod def _available_adapters(self) -> Set[str]: ... def _init_empty_weights(self, cls, *args, **kwargs) -> None: # A helper method that allows to initialize the layer of the given class without spending time to initialize the # model weights. The implementation is inspired by # https://pytorch.org/docs/stable/generated/torch.nn.utils.skip_init.html but this function cannot be used # directly. # Instead of this approach, it would be possible to bypass the __init__ of the class but that runs the risk of # omitting important logic inside that __init__. kwargs = kwargs.copy() final_device = kwargs.pop("device", "cpu") cls.__init__(self, *args, device="meta", **kwargs) self.to_empty(device=final_device) @abstractmethod def create_adapter_parameters(self, adapter_name: str, r: int, **kwargs): ... # TODO: refactor LoRA to use the same approach @abstractmethod def _get_delta_activations(self, adapter_name: str, x: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tensor: """Activations added on top of the base layer output (i.e. after the base layer forward pass)""" @abstractmethod def get_delta_weight(self, adapter_name: str) -> torch.Tensor: ... def merge(self, safe_merge: bool = False, adapter_names: Optional[List[str]] = None) -> None: if self.merged: warnings.warn( f"Already following adapters were merged {','.join(self.merged_adapters)}. " f"You are now additionally merging {','.join(self.active_adapters)}." ) if adapter_names is None: adapter_names = self.active_adapters for active_adapter in adapter_names: if active_adapter in self._available_adapters: base_layer = self.get_base_layer() if safe_merge: orig_weights = base_layer.weight.data orig_weights += self.get_delta_weight(active_adapter) if not torch.isfinite(orig_weights).all(): raise ValueError( f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken" ) base_layer.weight.data = orig_weights else: base_layer.weight.data += self.get_delta_weight(active_adapter) self.merged_adapters.append(active_adapter) @abstractmethod def reset_adapter_parameters(self, adapter_name: str): ... def set_scale(self, adapter, scale): if adapter not in self._available_adapters: # Ignore the case where the adapter is not in the layer return self.scaling[adapter] = scale * self.alpha[adapter] / self.r[adapter] def scale_layer(self, scale: float) -> None: if scale == 1: return for active_adapter in self.active_adapters: if active_adapter not in self._available_adapters: continue self.scaling[active_adapter] *= scale def unmerge(self) -> None: if not self.merged: warnings.warn("Already unmerged. Nothing to do.") return while len(self.merged_adapters) > 0: active_adapter = self.merged_adapters.pop() if active_adapter in self._available_adapters: self.get_base_layer().weight.data -= self.get_delta_weight(active_adapter) def unscale_layer(self, scale=None) -> None: for active_adapter in self.active_adapters: if active_adapter not in self._available_adapters: continue if scale is None: self.scaling[active_adapter] = self.alpha[active_adapter] / self.r[active_adapter] else: self.scaling[active_adapter] /= scale @abstractmethod def update_layer(self, adapter_name: str, r: int, alpha: float, **kwargs): ... class LycorisTuner(BaseTuner): r""" A base tuner for LyCORIS like adapters """ prefix: str layers_mapping: Dict[Type[torch.nn.Module], Type[LycorisLayer]] def __init__(self, model, config, adapter_name): super().__init__(model, config, adapter_name) def __getattr__(self, name: str): """Forward missing attributes to the wrapped module.""" try: return super().__getattr__(name) # defer to nn.Module's logic except AttributeError: return getattr(self.model, name) @staticmethod def _check_target_module_exists(config, key): return check_target_module_exists(config, key) @abstractmethod def _create_and_replace( self, config: LycorisConfig, adapter_name: str, target: Union[LycorisLayer, nn.Module], target_name, parent, current_key, **optional_kwargs, ): ... @classmethod def _create_new_module(cls, config: LycorisConfig, adapter_name: str, target: nn.Module, **kwargs) -> LycorisLayer: # Find corresponding subtype of provided target module new_module_cls = None for subtype, target_cls in cls.layers_mapping.items(): if ( hasattr(target, "base_layer") and isinstance(target.get_base_layer(), subtype) and isinstance(target, BaseTunerLayer) ): # nested tuner layers are allowed new_module_cls = target_cls break elif isinstance(target, subtype): new_module_cls = target_cls break # We didn't find corresponding type, so adapter for this layer is not supported if new_module_cls is None: supported_modules = ", ".join(layer.__name__ for layer in cls.layers_mapping.keys()) raise ValueError( f"Target module of type {type(target)} not supported, " f"currently only adapters for {supported_modules} are supported" ) if isinstance(target, BaseTunerLayer): target_base_layer = target.get_base_layer() else: target_base_layer = target if isinstance(target_base_layer, torch.nn.Conv2d): new_module = new_module_cls(target, adapter_name=adapter_name, **kwargs) elif isinstance(target_base_layer, torch.nn.Linear): new_module = new_module_cls(target, adapter_name=adapter_name, **kwargs) else: supported_modules = ", ".join(layer.__name__ for layer in cls.layers_mapping.keys()) raise ValueError( f"Target module of type {type(target)} not supported, " f"currently only adapters for {supported_modules} are supported" ) return new_module def _mark_only_adapters_as_trainable(self) -> None: for n, p in self.model.named_parameters(): if self.prefix not in n: p.requires_grad = False @staticmethod def _prepare_adapter_config(peft_config, model_config): if peft_config.target_modules is None: raise ValueError("Please specify `target_modules` in `peft_config`") return peft_config def _replace_module(self, parent, child_name, new_module, child): setattr(parent, child_name, new_module) # It's not necessary to set requires_grad here, as that is handled by # _mark_only_adapters_as_trainable if not hasattr(new_module, "base_layer"): new_module.weight = child.weight if hasattr(child, "bias"): new_module.bias = child.bias if getattr(child, "state", None) is not None: if hasattr(new_module, "base_layer"): new_module.base_layer.state = child.state else: new_module.state = child.state new_module.to(child.weight.device) # dispatch to correct device for name, module in new_module.named_modules(): if self.prefix in name: module.to(child.weight.device) def _set_adapter_layers(self, enabled=True): for module in self.model.modules(): if isinstance(module, (BaseTunerLayer, ModulesToSaveWrapper)): module.enable_adapters(enabled) def _unload_and_optionally_merge( self, merge: bool = True, progressbar: bool = False, safe_merge: bool = False, adapter_names: Optional[List[str]] = None, ): if merge: if getattr(self.model, "quantization_method", None) == "gptq": raise ValueError("Cannot merge LOHA layers when the model is gptq quantized") key_list = [key for key, _ in self.model.named_modules() if self.prefix not in key] desc = "Unloading " + ("and merging " if merge else "") + "model" for key in tqdm(key_list, disable=not progressbar, desc=desc): try: parent, target, target_name = _get_submodules(self.model, key) except AttributeError: continue if hasattr(target, "base_layer"): if merge: target.merge(safe_merge=safe_merge, adapter_names=adapter_names) self._replace_module(parent, target_name, target.get_base_layer(), target) elif isinstance(target, ModulesToSaveWrapper): # save any additional trainable modules part of `modules_to_save` setattr(parent, target_name, target.modules_to_save[target.active_adapter]) return self.model def enable_adapter_layers(self): self._set_adapter_layers(enabled=True) def disable_adapter_layers(self): self._set_adapter_layers(enabled=False) def merge_and_unload( self, progressbar: bool = False, safe_merge: bool = False, adapter_names: Optional[List[str]] = None ): r""" This method merges the adapter layers into the base model. This is needed if someone wants to use the base model as a standalone model. Args: progressbar (`bool`): whether to show a progressbar indicating the unload and merge process safe_merge (`bool`): whether to activate the safe merging check to check if there is any potential Nan in the adapter weights adapter_names (`List[str]`, *optional*): The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults to `None`. """ return self._unload_and_optionally_merge( progressbar=progressbar, safe_merge=safe_merge, adapter_names=adapter_names ) def unload(self): """ Gets back the base model by removing all the lora modules without merging. This gives back the original base model. """ return self._unload_and_optionally_merge(merge=False) def set_adapter(self, adapter_name): for module in self.model.modules(): if isinstance(module, LycorisLayer): if module.merged: warnings.warn("Adapter cannot be set when the model is merged. Unmerging the model first.") module.unmerge() module.set_adapter(adapter_name) def delete_adapter(self, adapter_name: str): """ Deletes an existing adapter. Args: adapter_name (`str`): Name of the adapter to be deleted. """ if adapter_name not in list(self.peft_config.keys()): raise ValueError(f"Adapter {adapter_name} does not exist") del self.peft_config[adapter_name] key_list = [key for key, _ in self.model.named_modules() if self.prefix not in key] new_adapter = None for key in key_list: _, target, _ = _get_submodules(self.model, key) if isinstance(target, LycorisLayer): target.delete_adapter(adapter_name) if new_adapter is None: new_adapter = target.active_adapters[:] self.active_adapter = new_adapter or []
0
hf_public_repos/peft/src/peft
hf_public_repos/peft/src/peft/tuners/__init__.py
# flake8: noqa # There's no way to ignore "F401 '...' imported but unused" warnings in this # module, but to preserve other warnings. So, don't check this module at all # coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .adaption_prompt import AdaptionPromptConfig, AdaptionPromptModel from .lora import LoraConfig, LoraModel from .loha import LoHaConfig, LoHaModel from .lokr import LoKrConfig, LoKrModel from .ia3 import IA3Config, IA3Model from .adalora import AdaLoraConfig, AdaLoraModel from .p_tuning import PromptEncoder, PromptEncoderConfig, PromptEncoderReparameterizationType from .prefix_tuning import PrefixEncoder, PrefixTuningConfig from .prompt_tuning import PromptEmbedding, PromptTuningConfig, PromptTuningInit from .multitask_prompt_tuning import MultitaskPromptEmbedding, MultitaskPromptTuningConfig, MultitaskPromptTuningInit
0
hf_public_repos/peft/src/peft/tuners
hf_public_repos/peft/src/peft/tuners/prompt_tuning/model.py
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math import torch from .config import PromptTuningInit class PromptEmbedding(torch.nn.Module): """ The model to encode virtual tokens into prompt embeddings. Args: config ([`PromptTuningConfig`]): The configuration of the prompt embedding. word_embeddings (`torch.nn.Module`): The word embeddings of the base transformer model. **Attributes**: - **embedding** (`torch.nn.Embedding`) -- The embedding layer of the prompt embedding. Example: ```py >>> from peft import PromptEmbedding, PromptTuningConfig >>> config = PromptTuningConfig( ... peft_type="PROMPT_TUNING", ... task_type="SEQ_2_SEQ_LM", ... num_virtual_tokens=20, ... token_dim=768, ... num_transformer_submodules=1, ... num_attention_heads=12, ... num_layers=12, ... prompt_tuning_init="TEXT", ... prompt_tuning_init_text="Predict if sentiment of this review is positive, negative or neutral", ... tokenizer_name_or_path="t5-base", ... ) >>> # t5_model.shared is the word embeddings of the base model >>> prompt_embedding = PromptEmbedding(config, t5_model.shared) ``` Input Shape: (`batch_size`, `total_virtual_tokens`) Output Shape: (`batch_size`, `total_virtual_tokens`, `token_dim`) """ def __init__(self, config, word_embeddings): super().__init__() total_virtual_tokens = config.num_virtual_tokens * config.num_transformer_submodules self.embedding = torch.nn.Embedding(total_virtual_tokens, config.token_dim) if config.prompt_tuning_init == PromptTuningInit.TEXT: from transformers import AutoTokenizer tokenizer_kwargs = config.tokenizer_kwargs or {} tokenizer = AutoTokenizer.from_pretrained(config.tokenizer_name_or_path, **tokenizer_kwargs) init_text = config.prompt_tuning_init_text init_token_ids = tokenizer(init_text)["input_ids"] # Trim or iterate until num_text_tokens matches total_virtual_tokens num_text_tokens = len(init_token_ids) if num_text_tokens > total_virtual_tokens: init_token_ids = init_token_ids[:total_virtual_tokens] elif num_text_tokens < total_virtual_tokens: num_reps = math.ceil(total_virtual_tokens / num_text_tokens) init_token_ids = init_token_ids * num_reps init_token_ids = init_token_ids[:total_virtual_tokens] init_token_ids = torch.LongTensor(init_token_ids).to(word_embeddings.weight.device) word_embedding_weights = word_embeddings(init_token_ids).detach().clone() word_embedding_weights = word_embedding_weights.to(torch.float32) self.embedding.weight = torch.nn.Parameter(word_embedding_weights) def forward(self, indices): # Just get embeddings prompt_embeddings = self.embedding(indices) return prompt_embeddings
0
hf_public_repos/peft/src/peft/tuners
hf_public_repos/peft/src/peft/tuners/prompt_tuning/__init__.py
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .config import PromptTuningConfig, PromptTuningInit from .model import PromptEmbedding __all__ = ["PromptTuningConfig", "PromptEmbedding", "PromptTuningInit"]
0
hf_public_repos/peft/src/peft/tuners
hf_public_repos/peft/src/peft/tuners/prompt_tuning/config.py
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import enum from dataclasses import dataclass, field from typing import Optional, Union from peft.config import PromptLearningConfig from peft.utils import PeftType class PromptTuningInit(str, enum.Enum): TEXT = "TEXT" RANDOM = "RANDOM" @dataclass class PromptTuningConfig(PromptLearningConfig): """ This is the configuration class to store the configuration of a [`PromptEmbedding`]. Args: prompt_tuning_init (Union[[`PromptTuningInit`], `str`]): The initialization of the prompt embedding. prompt_tuning_init_text (`str`, *optional*): The text to initialize the prompt embedding. Only used if `prompt_tuning_init` is `TEXT`. tokenizer_name_or_path (`str`, *optional*): The name or path of the tokenizer. Only used if `prompt_tuning_init` is `TEXT`. tokenizer_kwargs (`dict`, *optional*): The keyword arguments to pass to `AutoTokenizer.from_pretrained`. Only used if `prompt_tuning_init` is `TEXT`. """ prompt_tuning_init: Union[PromptTuningInit, str] = field( default=PromptTuningInit.RANDOM, metadata={"help": "How to initialize the prompt tuning parameters"}, ) prompt_tuning_init_text: Optional[str] = field( default=None, metadata={ "help": "The text to use for prompt tuning initialization. Only used if prompt_tuning_init is `TEXT`" }, ) tokenizer_name_or_path: Optional[str] = field( default=None, metadata={ "help": "The tokenizer to use for prompt tuning initialization. Only used if prompt_tuning_init is `TEXT`" }, ) tokenizer_kwargs: Optional[dict] = field( default=None, metadata={ "help": ( "The keyword arguments to pass to `AutoTokenizer.from_pretrained`. Only used if prompt_tuning_init is " "`TEXT`" ), }, ) def __post_init__(self): self.peft_type = PeftType.PROMPT_TUNING if self.tokenizer_kwargs and (self.prompt_tuning_init != PromptTuningInit.TEXT): raise ValueError( f"tokenizer_kwargs only valid when using prompt_tuning_init='{PromptTuningInit.TEXT.value}'." )
0
hf_public_repos/peft/src/peft/tuners
hf_public_repos/peft/src/peft/tuners/p_tuning/model.py
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Based on https://github.com/NVIDIA/NeMo/blob/main/nemo/collections/nlp/modules/common/prompt_encoder.py # with some refactor import warnings import torch from .config import PromptEncoderConfig, PromptEncoderReparameterizationType class PromptEncoder(torch.nn.Module): """ The prompt encoder network that is used to generate the virtual token embeddings for p-tuning. Args: config ([`PromptEncoderConfig`]): The configuration of the prompt encoder. Example: ```py >>> from peft import PromptEncoder, PromptEncoderConfig >>> config = PromptEncoderConfig( ... peft_type="P_TUNING", ... task_type="SEQ_2_SEQ_LM", ... num_virtual_tokens=20, ... token_dim=768, ... num_transformer_submodules=1, ... num_attention_heads=12, ... num_layers=12, ... encoder_reparameterization_type="MLP", ... encoder_hidden_size=768, ... ) >>> prompt_encoder = PromptEncoder(config) ``` **Attributes**: - **embedding** (`torch.nn.Embedding`) -- The embedding layer of the prompt encoder. - **mlp_head** (`torch.nn.Sequential`) -- The MLP head of the prompt encoder if `inference_mode=False`. - **lstm_head** (`torch.nn.LSTM`) -- The LSTM head of the prompt encoder if `inference_mode=False` and `encoder_reparameterization_type="LSTM"`. - **token_dim** (`int`) -- The hidden embedding dimension of the base transformer model. - **input_size** (`int`) -- The input size of the prompt encoder. - **output_size** (`int`) -- The output size of the prompt encoder. - **hidden_size** (`int`) -- The hidden size of the prompt encoder. - **total_virtual_tokens** (`int`): The total number of virtual tokens of the prompt encoder. - **encoder_type** (Union[[`PromptEncoderReparameterizationType`], `str`]): The encoder type of the prompt encoder. Input shape: (`batch_size`, `total_virtual_tokens`) Output shape: (`batch_size`, `total_virtual_tokens`, `token_dim`) """ def __init__(self, config): super().__init__() self.token_dim = config.token_dim self.input_size = self.token_dim self.output_size = self.token_dim self.hidden_size = config.encoder_hidden_size self.total_virtual_tokens = config.num_virtual_tokens * config.num_transformer_submodules self.encoder_type = config.encoder_reparameterization_type # embedding self.embedding = torch.nn.Embedding(self.total_virtual_tokens, self.token_dim) if not config.inference_mode: if self.encoder_type == PromptEncoderReparameterizationType.LSTM: lstm_dropout = config.encoder_dropout num_layers = config.encoder_num_layers # LSTM self.lstm_head = torch.nn.LSTM( input_size=self.input_size, hidden_size=self.hidden_size, num_layers=num_layers, dropout=lstm_dropout, bidirectional=True, batch_first=True, ) self.mlp_head = torch.nn.Sequential( torch.nn.Linear(self.hidden_size * 2, self.hidden_size * 2), torch.nn.ReLU(), torch.nn.Linear(self.hidden_size * 2, self.output_size), ) elif self.encoder_type == PromptEncoderReparameterizationType.MLP: encoder_num_layers_default = PromptEncoderConfig.encoder_num_layers if config.encoder_num_layers != encoder_num_layers_default: warnings.warn( f"for {self.encoder_type.value}, the argument `encoder_num_layers` is ignored. " f"Exactly {encoder_num_layers_default} MLP layers are used." ) layers = [ torch.nn.Linear(self.input_size, self.hidden_size), torch.nn.ReLU(), torch.nn.Linear(self.hidden_size, self.hidden_size), torch.nn.ReLU(), torch.nn.Linear(self.hidden_size, self.output_size), ] self.mlp_head = torch.nn.Sequential(*layers) else: raise ValueError("Prompt encoder type not recognized. Please use one of MLP (recommended) or LSTM.") def forward(self, indices): input_embeds = self.embedding(indices) if self.encoder_type == PromptEncoderReparameterizationType.LSTM: output_embeds = self.mlp_head(self.lstm_head(input_embeds)[0]) elif self.encoder_type == PromptEncoderReparameterizationType.MLP: output_embeds = self.mlp_head(input_embeds) else: raise ValueError("Prompt encoder type not recognized. Please use one of MLP (recommended) or LSTM.") return output_embeds
0
hf_public_repos/peft/src/peft/tuners
hf_public_repos/peft/src/peft/tuners/p_tuning/__init__.py
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .config import PromptEncoderConfig, PromptEncoderReparameterizationType from .model import PromptEncoder __all__ = ["PromptEncoder", "PromptEncoderConfig", "PromptEncoderReparameterizationType"]
0
hf_public_repos/peft/src/peft/tuners
hf_public_repos/peft/src/peft/tuners/p_tuning/config.py
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import enum from dataclasses import dataclass, field from typing import Union from peft.config import PromptLearningConfig from peft.utils import PeftType class PromptEncoderReparameterizationType(str, enum.Enum): MLP = "MLP" LSTM = "LSTM" @dataclass class PromptEncoderConfig(PromptLearningConfig): """ This is the configuration class to store the configuration of a [`PromptEncoder`]. Args: encoder_reparameterization_type (Union[[`PromptEncoderReparameterizationType`], `str`]): The type of reparameterization to use. encoder_hidden_size (`int`): The hidden size of the prompt encoder. encoder_num_layers (`int`): The number of layers of the prompt encoder. encoder_dropout (`float`): The dropout probability of the prompt encoder. """ encoder_reparameterization_type: Union[str, PromptEncoderReparameterizationType] = field( default=PromptEncoderReparameterizationType.MLP, metadata={"help": "How to reparameterize the prompt encoder"}, ) encoder_hidden_size: int = field( default=None, metadata={"help": "The hidden size of the prompt encoder"}, ) encoder_num_layers: int = field( default=2, metadata={"help": "The number of layers of the prompt encoder"}, ) encoder_dropout: float = field( default=0.0, metadata={"help": "The dropout of the prompt encoder"}, ) def __post_init__(self): self.peft_type = PeftType.P_TUNING
0
hf_public_repos/peft/src/peft/tuners
hf_public_repos/peft/src/peft/tuners/loha/model.py
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from itertools import chain from typing import Dict, Type, Union import torch from torch import nn from peft.tuners.lycoris_utils import LycorisConfig, LycorisTuner from .layer import Conv2d, Linear, LoHaLayer class LoHaModel(LycorisTuner): """ Creates Low-Rank Hadamard Product model from a pretrained model. The method is partially described in https://arxiv.org/abs/2108.06098 Current implementation heavily borrows from https://github.com/KohakuBlueleaf/LyCORIS/blob/eb460098187f752a5d66406d3affade6f0a07ece/lycoris/modules/loha.py Args: model (`torch.nn.Module`): The model to which the adapter tuner layers will be attached. config ([`LoHaConfig`]): The configuration of the LoHa model. adapter_name (`str`): The name of the adapter, defaults to `"default"`. Returns: `torch.nn.Module`: The LoHa model. Example: ```py >>> from diffusers import StableDiffusionPipeline >>> from peft import LoHaModel, LoHaConfig >>> config_te = LoHaConfig( ... r=8, ... lora_alpha=32, ... target_modules=["k_proj", "q_proj", "v_proj", "out_proj", "fc1", "fc2"], ... rank_dropout=0.0, ... module_dropout=0.0, ... init_weights=True, ... ) >>> config_unet = LoHaConfig( ... r=8, ... lora_alpha=32, ... target_modules=[ ... "proj_in", ... "proj_out", ... "to_k", ... "to_q", ... "to_v", ... "to_out.0", ... "ff.net.0.proj", ... "ff.net.2", ... ], ... rank_dropout=0.0, ... module_dropout=0.0, ... init_weights=True, ... use_effective_conv2d=True, ... ) >>> model = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5") >>> model.text_encoder = LoHaModel(model.text_encoder, config_te, "default") >>> model.unet = LoHaModel(model.unet, config_unet, "default") ``` **Attributes**: - **model** ([`~torch.nn.Module`]) -- The model to be adapted. - **peft_config** ([`LoHaConfig`]): The configuration of the LoHa model. """ prefix: str = "hada_" layers_mapping: Dict[Type[torch.nn.Module], Type[LoHaLayer]] = { torch.nn.Conv2d: Conv2d, torch.nn.Linear: Linear, } def _create_and_replace( self, config: LycorisConfig, adapter_name: str, target: Union[LoHaLayer, nn.Module], target_name: str, parent: nn.Module, current_key: str, **optional_kwargs, ) -> None: """ A private method to create and replace the target module with the adapter module. """ # Regexp matching - Find key which matches current target_name in patterns provided pattern_keys = list(chain(config.rank_pattern.keys(), config.alpha_pattern.keys())) target_name_key = next(filter(lambda key: re.match(f"(.*\.)?{key}$", current_key), pattern_keys), target_name) kwargs = config.to_dict() kwargs["r"] = config.rank_pattern.get(target_name_key, config.r) kwargs["alpha"] = config.alpha_pattern.get(target_name_key, config.alpha) if isinstance(target, LoHaLayer): target.update_layer(adapter_name, **kwargs) else: new_module = self._create_new_module(config, adapter_name, target, **kwargs) self._replace_module(parent, target_name, new_module, target)
0
hf_public_repos/peft/src/peft/tuners
hf_public_repos/peft/src/peft/tuners/loha/layer.py
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math from typing import Any, Set, Tuple import torch import torch.nn as nn import torch.nn.functional as F from peft.tuners.lycoris_utils import LycorisLayer class LoHaLayer(nn.Module, LycorisLayer): # All names of layers that may contain adapter weights adapter_layer_names = ("hada_w1_a", "hada_w1_b", "hada_w2_a", "hada_w2_b", "hada_t1", "hada_t2") # other_param_names is defined on parent class def __init__(self, base_layer: nn.Module): super().__init__() LycorisLayer.__init__(self, base_layer) # LoHa info self.hada_w1_a = nn.ParameterDict({}) self.hada_w1_b = nn.ParameterDict({}) self.hada_w2_a = nn.ParameterDict({}) self.hada_w2_b = nn.ParameterDict({}) self.hada_t1 = nn.ParameterDict({}) self.hada_t2 = nn.ParameterDict({}) @property def _available_adapters(self) -> Set[str]: return {*self.hada_w1_a, *self.hada_w1_b, *self.hada_w2_a, *self.hada_w2_b, *self.hada_t1, *self.hada_t2} def create_adapter_parameters(self, adapter_name: str, r: int, shape: Tuple[int, ...]): # https://github.com/KohakuBlueleaf/LyCORIS/blob/eb460098187f752a5d66406d3affade6f0a07ece/lycoris/modules/loha.py#L130C9-L143C75 if len(shape) == 4: self.hada_t1[adapter_name] = nn.Parameter(torch.empty(r, r, shape[2], shape[3])) self.hada_w1_a[adapter_name] = nn.Parameter(torch.empty(r, shape[0])) # out_dim, 1-mode self.hada_w1_b[adapter_name] = nn.Parameter(torch.empty(r, shape[1])) # in_dim , 2-mode self.hada_t2[adapter_name] = nn.Parameter(torch.empty(r, r, shape[2], shape[3])) self.hada_w2_a[adapter_name] = nn.Parameter(torch.empty(r, shape[0])) # out_dim, 1-mode self.hada_w2_b[adapter_name] = nn.Parameter(torch.empty(r, shape[1])) # in_dim , 2-mode else: self.hada_w1_a[adapter_name] = nn.Parameter(torch.empty(shape[0], r)) self.hada_w1_b[adapter_name] = nn.Parameter(torch.empty(r, shape[1])) self.hada_w2_a[adapter_name] = nn.Parameter(torch.empty(shape[0], r)) self.hada_w2_b[adapter_name] = nn.Parameter(torch.empty(r, shape[1])) def reset_adapter_parameters(self, adapter_name: str): # Original implementation performs initialization with normal distribution # https://github.com/KohakuBlueleaf/LyCORIS/blob/3549fdef8f564761d68b695a08ef88b1122fdedc/lycoris/modules/loha.py#L158 # FedPara paper proposes to perform He initialization, let's stick with it # It is enough to initialize only single matrix with zeros to make adapter do nothing after initialization if adapter_name in self.hada_w1_a.keys(): nn.init.kaiming_uniform_(self.hada_w1_a[adapter_name], a=math.sqrt(5)) nn.init.kaiming_uniform_(self.hada_w1_b[adapter_name], a=math.sqrt(5)) nn.init.kaiming_uniform_(self.hada_w2_a[adapter_name], a=math.sqrt(5)) nn.init.zeros_(self.hada_w2_b[adapter_name]) if adapter_name in self.hada_t1.keys(): nn.init.kaiming_uniform_(self.hada_t1[adapter_name], a=math.sqrt(5)) nn.init.kaiming_uniform_(self.hada_t2[adapter_name], a=math.sqrt(5)) def reset_adapter_parameters_random(self, adapter_name: str): # Original implementation performs initialization with normal distribution # https://github.com/KohakuBlueleaf/LyCORIS/blob/3549fdef8f564761d68b695a08ef88b1122fdedc/lycoris/modules/loha.py#L158 # FedPara paper proposes to perform He initialization, let's stick with it # It is enough to initialize only single matrix with zeros to make adapter do nothing after initialization if adapter_name in self.hada_w1_a.keys(): nn.init.kaiming_uniform_(self.hada_w1_a[adapter_name], a=math.sqrt(5)) nn.init.kaiming_uniform_(self.hada_w1_b[adapter_name], a=math.sqrt(5)) nn.init.kaiming_uniform_(self.hada_w2_a[adapter_name], a=math.sqrt(5)) nn.init.kaiming_uniform_(self.hada_w2_b[adapter_name], a=math.sqrt(5)) if adapter_name in self.hada_t1.keys(): nn.init.kaiming_uniform_(self.hada_t1[adapter_name], a=math.sqrt(5)) nn.init.kaiming_uniform_(self.hada_t2[adapter_name], a=math.sqrt(5)) def update_layer( self, adapter_name: str, r: int, alpha: float, rank_dropout: float, module_dropout: float, init_weights: bool, use_effective_conv2d: bool = False, **kwargs, ) -> None: """Internal function to create loha adapter Args: adapter_name (`str`): Name for the adapter to add. r (`int`): Rank for the added adapter. alpha (`float`): Alpha for the added adapter. rank_dropout (`float`): The dropout probability for rank dimension during training. module_dropout (`float`): The dropout probability for disabling adapter during training. init_weights (`bool`): Whether to initialize weights. use_effective_conv2d (`bool`, *optional*, defaults to `False`): Use parameter effective decomposition for Conv2d with ksize > 1. """ self.r[adapter_name] = r self.alpha[adapter_name] = alpha self.scaling[adapter_name] = alpha / r self.rank_dropout[adapter_name] = rank_dropout self.module_dropout[adapter_name] = module_dropout # Determine shape of LoHa weights base_layer = self.get_base_layer() if isinstance(base_layer, nn.Linear): shape = tuple(base_layer.weight.shape) elif isinstance(base_layer, nn.Conv2d): use_effective_conv2d = use_effective_conv2d and base_layer.kernel_size != (1, 1) if use_effective_conv2d: shape = (base_layer.out_channels, base_layer.in_channels, *base_layer.kernel_size) else: shape = ( base_layer.out_channels, base_layer.in_channels * base_layer.kernel_size[0] * base_layer.kernel_size[1], ) else: raise TypeError(f"LoHa is not implemented for base layers of type {type(base_layer).__name__}") # Create weights with provided shape self.create_adapter_parameters(adapter_name, r, shape) # Initialize weights if init_weights: self.reset_adapter_parameters(adapter_name) else: self.reset_adapter_parameters_random(adapter_name) # Move new weights to device weight = getattr(self.get_base_layer(), "weight", None) if weight is not None: # the layer is already completely initialized, this is an update if weight.dtype.is_floating_point or weight.dtype.is_complex: self.to(weight.device, dtype=weight.dtype) else: self.to(weight.device) self.set_adapter(self.active_adapters) def get_delta_weight(self, adapter_name: str) -> torch.Tensor: # https://github.com/KohakuBlueleaf/LyCORIS/blob/eb460098187f752a5d66406d3affade6f0a07ece/lycoris/modules/loha.py#L178 if adapter_name in self.hada_t1.keys(): weight = make_weight_cp( self.hada_t1[adapter_name], self.hada_w1_a[adapter_name], self.hada_w1_b[adapter_name], self.hada_t2[adapter_name], self.hada_w2_a[adapter_name], self.hada_w2_b[adapter_name], scale=torch.tensor(self.scaling[adapter_name]), ) else: weight = make_weight( self.hada_w1_a[adapter_name], self.hada_w1_b[adapter_name], self.hada_w2_a[adapter_name], self.hada_w2_b[adapter_name], scale=torch.tensor(self.scaling[adapter_name]), ) base_layer = self.get_base_layer() weight = weight.reshape(base_layer.weight.shape) # Perform rank dropout during training - drop rows of addition weights rank_dropout = self.rank_dropout[adapter_name] if self.training and rank_dropout: drop = (torch.rand(weight.size(0)) > rank_dropout).to(weight.dtype) drop = drop.view(-1, *[1] * len(weight.shape[1:])).to(weight.device) # TODO: Investigate if there should be a scaler like in normal dropout during training # Original implementation doesn't have it # https://github.com/KohakuBlueleaf/LyCORIS/blob/eb460098187f752a5d66406d3affade6f0a07ece/lycoris/modules/loha.py#L193 drop /= drop.mean() weight *= drop return weight def forward(self, x: torch.Tensor, *args, **kwargs) -> torch.Tensor: previous_dtype = x.dtype if self.disable_adapters: if self.merged: self.unmerge() result = self.base_layer(x, *args, **kwargs) elif self.merged: result = self.base_layer(x, *args, **kwargs) else: result = self.base_layer(x, *args, **kwargs) # Execute all the adapters for active_adapter in self.active_adapters: if active_adapter not in self._available_adapters: continue module_dropout = self.module_dropout[active_adapter] # Modify current execution weights if (not self.training) or (self.training and torch.rand(1) > module_dropout): result = result + self._get_delta_activations(active_adapter, x, *args, **kwargs) result = result.to(previous_dtype) return result class Linear(LoHaLayer): """LoHa implemented in Linear layer""" def __init__( self, base_layer: nn.Module, adapter_name: str = "default", r: int = 0, alpha: float = 0.0, rank_dropout: float = 0.0, module_dropout: float = 0.0, init_weights: bool = True, **kwargs, ): super().__init__(base_layer) # Create adapter and set it active self._active_adapter = adapter_name self.update_layer(adapter_name, r, alpha, rank_dropout, module_dropout, init_weights, **kwargs) def _get_delta_activations( self, adapter_name: str, input: torch.Tensor, *args: Any, **kwargs: Any ) -> torch.Tensor: delta_weight = self.get_delta_weight(adapter_name) # don't add bias here, because the bias is already included in the output of the base_layer return F.linear(input, delta_weight) def __repr__(self) -> str: rep = super().__repr__() return "loha." + rep class Conv2d(LoHaLayer): """LoHa implemented in Conv2d layer""" def __init__( self, base_layer: nn.Module, adapter_name: str = "default", r: int = 0, alpha: float = 0.0, rank_dropout: float = 0.0, module_dropout: float = 0.0, use_effective_conv2d: bool = False, init_weights: bool = True, **kwargs, ): super().__init__(base_layer) # Create adapter and set it active self._active_adapter = adapter_name self.update_layer( adapter_name, r, alpha, rank_dropout, module_dropout, init_weights, use_effective_conv2d, **kwargs ) def _get_delta_activations( self, adapter_name: str, input: torch.Tensor, *args: Any, **kwargs: Any ) -> torch.Tensor: delta_weight = self.get_delta_weight(adapter_name) # don't add bias here, because the bias is already included in the output of the base_layer base_layer = self.get_base_layer() return F.conv2d( input, delta_weight, stride=base_layer.stride, padding=base_layer.padding, dilation=base_layer.dilation, groups=base_layer.groups, ) def __repr__(self) -> str: rep = super().__repr__() return "loha." + rep # Below code is a direct copy from https://github.com/KohakuBlueleaf/LyCORIS/blob/eb460098187f752a5d66406d3affade6f0a07ece/lycoris/modules/loha.py#L9 class HadaWeight(torch.autograd.Function): @staticmethod def forward(ctx, w1a, w1b, w2a, w2b, scale=torch.tensor(1)): ctx.save_for_backward(w1a, w1b, w2a, w2b, scale) diff_weight = ((w1a @ w1b) * (w2a @ w2b)) * scale return diff_weight @staticmethod def backward(ctx, grad_out): (w1a, w1b, w2a, w2b, scale) = ctx.saved_tensors grad_out = grad_out * scale temp = grad_out * (w2a @ w2b) grad_w1a = temp @ w1b.T grad_w1b = w1a.T @ temp temp = grad_out * (w1a @ w1b) grad_w2a = temp @ w2b.T grad_w2b = w2a.T @ temp del temp return grad_w1a, grad_w1b, grad_w2a, grad_w2b, None class HadaWeightCP(torch.autograd.Function): @staticmethod def forward(ctx, t1, w1a, w1b, t2, w2a, w2b, scale=torch.tensor(1)): ctx.save_for_backward(t1, w1a, w1b, t2, w2a, w2b, scale) rebuild1 = torch.einsum("i j k l, j r, i p -> p r k l", t1, w1b, w1a) rebuild2 = torch.einsum("i j k l, j r, i p -> p r k l", t2, w2b, w2a) return rebuild1 * rebuild2 * scale @staticmethod def backward(ctx, grad_out): (t1, w1a, w1b, t2, w2a, w2b, scale) = ctx.saved_tensors grad_out = grad_out * scale temp = torch.einsum("i j k l, j r -> i r k l", t2, w2b) rebuild = torch.einsum("i j k l, i r -> r j k l", temp, w2a) grad_w = rebuild * grad_out del rebuild grad_w1a = torch.einsum("r j k l, i j k l -> r i", temp, grad_w) grad_temp = torch.einsum("i j k l, i r -> r j k l", grad_w, w1a.T) del grad_w, temp grad_w1b = torch.einsum("i r k l, i j k l -> r j", t1, grad_temp) grad_t1 = torch.einsum("i j k l, j r -> i r k l", grad_temp, w1b.T) del grad_temp temp = torch.einsum("i j k l, j r -> i r k l", t1, w1b) rebuild = torch.einsum("i j k l, i r -> r j k l", temp, w1a) grad_w = rebuild * grad_out del rebuild grad_w2a = torch.einsum("r j k l, i j k l -> r i", temp, grad_w) grad_temp = torch.einsum("i j k l, i r -> r j k l", grad_w, w2a.T) del grad_w, temp grad_w2b = torch.einsum("i r k l, i j k l -> r j", t2, grad_temp) grad_t2 = torch.einsum("i j k l, j r -> i r k l", grad_temp, w2b.T) del grad_temp return grad_t1, grad_w1a, grad_w1b, grad_t2, grad_w2a, grad_w2b, None def make_weight(w1a, w1b, w2a, w2b, scale): return HadaWeight.apply(w1a, w1b, w2a, w2b, scale) def make_weight_cp(t1, w1a, w1b, t2, w2a, w2b, scale): return HadaWeightCP.apply(t1, w1a, w1b, t2, w2a, w2b, scale)
0
hf_public_repos/peft/src/peft/tuners
hf_public_repos/peft/src/peft/tuners/loha/__init__.py
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .config import LoHaConfig from .layer import Conv2d, Linear, LoHaLayer from .model import LoHaModel __all__ = ["LoHaConfig", "LoHaModel", "Conv2d", "Linear", "LoHaLayer"]
0