text
stringlengths
7
1.24M
id
stringlengths
14
166
metadata
dict
__index_level_0__
int64
0
519
# Fork that adds only the correct stream to this kernel in order # to make cuda graphs work. awq_commit := bd1dc2d5254345cc76ab71894651fb821275bdd4 awq: rm -rf llm-awq git clone https://github.com/huggingface/llm-awq build-awq: awq cd llm-awq/ && git fetch && git checkout $(awq_commit) cd llm-awq/awq/kernels && python setup.py build install-awq: build-awq pip uninstall awq_inference_engine -y || true cd llm-awq/awq/kernels && python setup.py install
text-generation-inference/server/Makefile-awq/0
{ "file_path": "text-generation-inference/server/Makefile-awq", "repo_id": "text-generation-inference", "token_count": 183 }
266
// Adapted from turboderp exllama: https://github.com/turboderp/exllama #ifndef _cuda_buffers_cuh #define _cuda_buffers_cuh #include <cuda_runtime.h> #include <cuda_fp16.h> #include <cstdint> #include <cstdio> const int CUDA_MAX_DEVICES = 16; // #ifndef _cuda_buffers_cu // extern __constant__ half2 q4_table[16][256]; // #endif class CudaBuffers { public: int device; half* temp_state; // [max_hidden_rows * intermediate_size] half* temp_dq; // size of largest quant tensor * 8 cudaStream_t alt_stream_1; cudaStream_t alt_stream_2; cudaStream_t alt_stream_3; cudaEvent_t alt_stream_1_done; cudaEvent_t alt_stream_2_done; cudaEvent_t alt_stream_3_done; CudaBuffers ( int _device, half* _temp_state, half* _temp_dq ); ~CudaBuffers(); }; CudaBuffers* get_buffers(const int device_index); void prepare_buffers_cuda ( int _device, half* _temp_state, half* _temp_dq ); void cleanup_buffers_cuda(); #endif
text-generation-inference/server/exllama_kernels/exllama_kernels/cuda_buffers.cuh/0
{ "file_path": "text-generation-inference/server/exllama_kernels/exllama_kernels/cuda_buffers.cuh", "repo_id": "text-generation-inference", "token_count": 471 }
267
#ifndef _matrix_view_cuh #define _matrix_view_cuh #include <cuda_runtime.h> #include <cuda_fp16.h> #include "quant/qdq_util.cuh" class MatrixView_half { public: const half* data; const int height; const int width; __device__ __forceinline__ MatrixView_half(const half* data, const int height, const int width) : data(data), height(height), width(width) { } __device__ __forceinline__ half item(int row, int column) const { return data[row * width + column]; } __device__ __forceinline__ half2 item_half2(int row, int column) const { return ((half2*)data)[(row * width + column) / 2]; } __device__ __forceinline__ half2 item_half2half2(int row, int column) const { return __half2half2(data[row * width + column]); } __device__ __forceinline__ const half* item_ptr(int row, int column) const { return &data[row * width + column]; } __device__ __forceinline__ void item4(half (&items)[4], int row, int column) const { half2* ptr = (half2*) item_ptr(row, column); half2 i01 = ptr[0]; half2 i23 = ptr[1]; items[0] = __low2half(i01); items[1] = __high2half(i01); items[2] = __low2half(i23); items[3] = __high2half(i23); } __device__ __forceinline__ void item4_f(float (&items)[4], int row, int column) const { half2* ptr = (half2*)item_ptr(row, column); half2 i01 = ptr[0]; half2 i23 = ptr[1]; items[0] = __half2float(__low2half(i01)); items[1] = __half2float(__high2half(i01)); items[2] = __half2float(__low2half(i23)); items[3] = __half2float(__high2half(i23)); } __device__ __forceinline__ void item4_h2(half2 (&items)[4], int row, int column) const { half2* ptr = (half2*)item_ptr(row, column); half2 i01 = ptr[0]; half2 i23 = ptr[1]; items[0] = __half2half2(__low2half(i01)); items[1] = __half2half2(__high2half(i01)); items[2] = __half2half2(__low2half(i23)); items[3] = __half2half2(__high2half(i23)); } }; class MatrixView_half_rw { public: half* data; const int height; const int width; __device__ __forceinline__ MatrixView_half_rw(half* data, const int height, const int width) : data(data), height(height), width(width) { } __device__ __forceinline__ half item(int row, int column) const { return data[row * width + column]; } __device__ __forceinline__ half2 item_half2(int row, int column) const { return ((half2*)data)[(row * width + column) / 2]; } __device__ __forceinline__ half* item_ptr(int row, int column) { return &data[row * width + column]; } __device__ __forceinline__ void set(int row, int column, half value) { data[row * width + column] = value; } __device__ __forceinline__ void set_half2(int row, int column, half2 value) { ((half2*)data)[(row * width + column) / 2] = value; } __device__ __forceinline__ void set4(int row, int column, half v0, half v1, half v2, half v3) { half2 v01 = __halves2half2(v0, v1); half2 v23 = __halves2half2(v2, v3); half2* ptr = (half2*) item_ptr(row, column); ptr[0] = v01; ptr[1] = v23; } }; class MatrixView_q4_row { public: const uint32_t* data; const int height; const int width; __device__ __forceinline__ MatrixView_q4_row(const uint32_t* data, const int height, const int width) : data(data), height(height), width(width) { } __device__ __forceinline__ int item(int row, int column) const { int shift = (column & 0x07) * 4; return (data[row * width / 8 + column / 8] >> shift) & 0x0f; } __device__ __forceinline__ void item2(int (&items)[2], int row, int column) const { int shift = (column & 0x07) * 4; uint32_t d = data[row * width / 8 + column / 8] >> shift; items[0] = d & 0x0f; items[1] = (d >> 4) & 0x0f; } __device__ __forceinline__ void item4(int (&items)[4], int row, int column) const { int shift = (column & 0x07) * 4; uint32_t d = data[row * width / 8 + column / 8] >> shift; items[0] = d & 0x0f; items[1] = (d >> 4) & 0x0f; items[2] = (d >> 8) & 0x0f; items[3] = (d >> 12) & 0x0f; } }; #endif
text-generation-inference/server/exllamav2_kernels/exllamav2_kernels/cuda/matrix_view.cuh/0
{ "file_path": "text-generation-inference/server/exllamav2_kernels/exllamav2_kernels/cuda/matrix_view.cuh", "repo_id": "text-generation-inference", "token_count": 1862 }
268
from setuptools import setup from torch.utils.cpp_extension import BuildExtension, CUDAExtension import torch extra_cuda_cflags = ["-lineinfo", "-O3"] if torch.version.hip: extra_cuda_cflags += ["-DHIPBLAS_USE_HIP_HALF"] extra_compile_args = { "nvcc": extra_cuda_cflags, } setup( name="exllamav2_kernels", ext_modules=[ CUDAExtension( name="exllamav2_kernels", sources=[ "exllamav2_kernels/ext.cpp", "exllamav2_kernels/cuda/q_matrix.cu", "exllamav2_kernels/cuda/q_gemm.cu", ], extra_compile_args=extra_compile_args, ) ], cmdclass={"build_ext": BuildExtension}, )
text-generation-inference/server/exllamav2_kernels/setup.py/0
{ "file_path": "text-generation-inference/server/exllamav2_kernels/setup.py", "repo_id": "text-generation-inference", "token_count": 363 }
269
import torch from text_generation_server.utils.tokens import ( StopSequenceCriteria, StoppingCriteria, FinishReason, batch_top_tokens, ) def test_stop_sequence_criteria(): criteria = StopSequenceCriteria("/test;") assert not criteria("/") assert not criteria("/test") assert criteria("/test;") assert not criteria("/test; ") def test_stop_sequence_criteria_escape(): criteria = StopSequenceCriteria("<|stop|>") assert not criteria("<") assert not criteria("<|stop") assert criteria("<|stop|>") assert not criteria("<|stop|> ") def test_stopping_criteria(): criteria = StoppingCriteria(0, [StopSequenceCriteria("/test;")], max_new_tokens=5) assert criteria(65827, "/test") == (False, None) assert criteria(30, ";") == (True, FinishReason.FINISH_REASON_STOP_SEQUENCE) def test_stopping_criteria_eos(): criteria = StoppingCriteria(0, [StopSequenceCriteria("/test;")], max_new_tokens=5) assert criteria(1, "") == (False, None) assert criteria(0, "") == (True, FinishReason.FINISH_REASON_EOS_TOKEN) def test_stopping_criteria_max(): criteria = StoppingCriteria(0, [StopSequenceCriteria("/test;")], max_new_tokens=5) assert criteria(1, "") == (False, None) assert criteria(1, "") == (False, None) assert criteria(1, "") == (False, None) assert criteria(1, "") == (False, None) assert criteria(1, "") == (True, FinishReason.FINISH_REASON_LENGTH) def test_batch_top_tokens(): top_n_tokens = [0, 2, 3, 4, 5] top_n_tokens_tensor = torch.tensor(top_n_tokens) inp_logprobs = torch.tensor([[-1.0, -3.0, -4.0, -2.0, -3.0]] * 5) accepted_ids = torch.ones_like(top_n_tokens_tensor) topn_tok_ids, topn_tok_logprobs = batch_top_tokens( top_n_tokens, top_n_tokens_tensor, inp_logprobs, accepted_ids ) assert topn_tok_ids[0] == [[]] assert topn_tok_ids[1] == [[0, 3]] assert topn_tok_ids[2] == [[0, 3, 1, 4]] assert topn_tok_ids[3] == [[0, 3, 1, 4]] assert topn_tok_ids[4] == [[0, 3, 1, 4, 2]] assert topn_tok_logprobs[0] == [[]] assert topn_tok_logprobs[1] == [[-1, -2]] assert topn_tok_logprobs[2] == [[-1, -2, -3, -3]] assert topn_tok_logprobs[3] == [[-1, -2, -3, -3]] assert topn_tok_logprobs[4] == [[-1, -2, -3, -3, -4]] # Now let's make second member of the batch be speculated inp_logprobs = torch.tensor([[-1.0, -3.0, -4.0, -2.0, -3.0]] * 5 * 2) accepted_ids[1] = 2 topn_tok_ids, topn_tok_logprobs = batch_top_tokens( top_n_tokens, top_n_tokens_tensor, inp_logprobs, accepted_ids ) assert topn_tok_ids[0] == [[]] assert topn_tok_ids[1] == [[0, 3], [0, 3]] assert topn_tok_ids[2] == [[0, 3, 1, 4]] assert topn_tok_ids[3] == [[0, 3, 1, 4]] assert topn_tok_ids[4] == [[0, 3, 1, 4, 2]] assert topn_tok_logprobs[0] == [[]] assert topn_tok_logprobs[1] == [[-1, -2], [-1, -2]] assert topn_tok_logprobs[2] == [[-1, -2, -3, -3]] assert topn_tok_logprobs[3] == [[-1, -2, -3, -3]] assert topn_tok_logprobs[4] == [[-1, -2, -3, -3, -4]]
text-generation-inference/server/tests/utils/test_tokens.py/0
{ "file_path": "text-generation-inference/server/tests/utils/test_tokens.py", "repo_id": "text-generation-inference", "token_count": 1427 }
270
from typing import Optional from contextvars import ContextVar from contextlib import contextmanager import flashinfer import torch prefill_state: ContextVar[flashinfer.BatchPrefillWithRaggedKVCacheWrapper] = ContextVar( "prefill_state" ) prefill_with_paged_kv_state: ContextVar[ flashinfer.BatchPrefillWithPagedKVCacheWrapper ] = ContextVar("prefill_with_paged_kv_state") decode_state: ContextVar[flashinfer.BatchDecodeWithPagedKVCacheWrapper] = ContextVar( "decode_state" ) workspace: Optional[torch.Tensor] = None def get_workspace(device): """Get shared flashinfer workspace.""" global workspace if workspace is None: workspace = torch.empty(128 * 1024 * 1024, dtype=torch.uint8, device=device) return workspace def create_prefill_with_paged_kv_state( *, device: torch.device, ): """Create a prefill state that uses the KV cache.""" workspace_buffer = get_workspace(device) return flashinfer.BatchPrefillWithPagedKVCacheWrapper( workspace_buffer, kv_layout="NHD", use_cuda_graph=False ) @contextmanager def use_prefill_with_paged_kv_state( *, state: flashinfer.BatchPrefillWithPagedKVCacheWrapper, block_tables: torch.Tensor, cu_seqlens: torch.Tensor, input_lengths: torch.Tensor, num_heads: int, num_kv_heads: int, head_size: int, page_size: int, query_dtype: str = "float16", ): """ Context manager to set the active flashinfer prefill state to the given `state` and parameters. This state will be used by all calls to the `attention` function while the context manager is active. """ indptr = torch.zeros( input_lengths.shape[0] + 1, device=input_lengths.device, dtype=torch.int32 ) # Round up to page size and then calculate the cumulative sum to get # the indices into the block table. torch.add(input_lengths, page_size - 1, out=indptr[1:]) indptr[1:].div_(page_size, rounding_mode="floor") indptr[1:].cumsum_(-1) # Get the lengths of the last page in a block. if page_size == 1: last_page_len = torch.ones( input_lengths.shape[0], dtype=torch.int32, device=input_lengths.device ) else: last_page_len = torch.empty( input_lengths.shape[0], dtype=torch.int32, device=input_lengths.device ) torch.sub(input_lengths, 1, out=last_page_len) last_page_len.remainder_(page_size) last_page_len += 1 token = prefill_with_paged_kv_state.set(state) try: state.begin_forward( qo_indptr=cu_seqlens, paged_kv_indptr=indptr, paged_kv_indices=block_tables, paged_kv_last_page_len=last_page_len, num_qo_heads=num_heads, num_kv_heads=num_kv_heads, head_dim=head_size, q_data_type=query_dtype, page_size=page_size, ) yield finally: state.end_forward() if token is not None: prefill_with_paged_kv_state.reset(token) def create_prefill_state( *, device: torch.device, ): """Create a prefill state.""" workspace_buffer = get_workspace(device) return flashinfer.BatchPrefillWithRaggedKVCacheWrapper( workspace_buffer, kv_layout="NHD", use_cuda_graph=False ) @contextmanager def use_prefill_state( *, state: flashinfer.BatchPrefillWithRaggedKVCacheWrapper, cu_seqlens: torch.Tensor, num_heads: int, num_kv_heads: int, head_size: int, query_dtype: str = "float16", ): """ Context manager to set the active flashinfer prefill state to the given `state` and parameters. This state will be used by all calls to the `attention` function while the context manager is active. """ token = prefill_state.set(state) try: state.begin_forward( qo_indptr=cu_seqlens, kv_indptr=cu_seqlens, num_qo_heads=num_heads, num_kv_heads=num_kv_heads, head_dim=head_size, q_data_type=query_dtype, ) yield finally: state.end_forward() if token is not None: prefill_state.reset(token) def create_decode_state( *, device: torch.device, num_heads: int, num_kv_heads: int, ): """Create a decode state.""" workspace_buffer = get_workspace(device) return flashinfer.BatchDecodeWithPagedKVCacheWrapper( workspace_buffer, kv_layout="NHD", use_cuda_graph=False, use_tensor_cores=num_heads // num_kv_heads > 4, ) def create_decode_state_cuda_graphs( *, device: torch.device, block_tables: torch.Tensor, block_tables_ptr: torch.Tensor, last_page_len: torch.Tensor, num_heads: int, num_kv_heads: int, ): """ Create a decode state for use with CUDA Graphs. `block_tables`, `block_tables_ptr`, and `last_page_len` are used in CUDA Graphs and are therefore stored as part of the state. """ workspace_buffer = get_workspace(device) return flashinfer.BatchDecodeWithPagedKVCacheWrapper( workspace_buffer, kv_layout="NHD", use_cuda_graph=True, paged_kv_indices_buffer=block_tables, paged_kv_indptr_buffer=block_tables_ptr, paged_kv_last_page_len_buffer=last_page_len, use_tensor_cores=num_heads // num_kv_heads > 4, ) @contextmanager def use_decode_state( *, state: flashinfer.BatchDecodeWithPagedKVCacheWrapper, input_lengths: torch.Tensor, block_tables: torch.Tensor, num_heads: int, num_kv_heads: int, head_size: int, page_size: int, query_dtype: str = "float16", ): """ Context manager to set the active flashinfer decoding state to the given `state` and parameters. This state will be used by all calls to the `paged_attention` function while the context manager is active. """ indptr = torch.zeros( input_lengths.shape[0] + 1, device=input_lengths.device, dtype=torch.int32 ) # Round up to page size and then calculate the cumulative sum to get # the indices into the block table. torch.add(input_lengths, page_size - 1, out=indptr[1:]) indptr[1:].div_(page_size, rounding_mode="floor") indptr[1:].cumsum_(-1) # Get the lengths of the last page in a block. last_page_len = torch.empty( input_lengths.shape[0], dtype=torch.int32, device=input_lengths.device ) torch.sub(input_lengths, 1, out=last_page_len) last_page_len.remainder_(page_size) last_page_len += 1 token = decode_state.set(state) try: state.begin_forward( indptr=indptr, indices=block_tables, last_page_len=last_page_len, num_qo_heads=num_heads, num_kv_heads=num_kv_heads, head_dim=head_size, page_size=page_size, q_data_type=query_dtype, ) yield finally: state.end_forward() if token is not None: decode_state.reset(token)
text-generation-inference/server/text_generation_server/layers/attention/flashinfer.py/0
{ "file_path": "text-generation-inference/server/text_generation_server/layers/attention/flashinfer.py", "repo_id": "text-generation-inference", "token_count": 3150 }
271
import torch # copied from https://github.com/openppl-public/ppq/blob/master/ppq/quantization/measure/norm.py def torch_snr_error( y_pred: torch.Tensor, y_real: torch.Tensor, reduction: str = "mean" ) -> torch.Tensor: """ Compute SNR between y_pred(tensor) and y_real(tensor) SNR can be calcualted as following equation: SNR(pred, real) = (pred - real) ^ 2 / (real) ^ 2 if x and y are matrixs, SNR error over matrix should be the mean value of SNR error over all elements. SNR(pred, real) = mean((pred - real) ^ 2 / (real) ^ 2) Args: y_pred (torch.Tensor): _description_ y_real (torch.Tensor): _description_ reduction (str, optional): _description_. Defaults to 'mean'. Raises: ValueError: _description_ ValueError: _description_ Returns: torch.Tensor: _description_ """ if y_pred.shape != y_real.shape: raise ValueError( f"Can not compute snr loss for tensors with different shape. " f"({y_pred.shape} and {y_real.shape})" ) reduction = str(reduction).lower() if y_pred.ndim == 1: y_pred = y_pred.unsqueeze(0) y_real = y_real.unsqueeze(0) y_pred = y_pred.flatten(start_dim=1) y_real = y_real.flatten(start_dim=1) noise_power = torch.pow(y_pred - y_real, 2).sum(dim=-1) signal_power = torch.pow(y_real, 2).sum(dim=-1) snr = (noise_power) / (signal_power + 1e-7) if reduction == "mean": return torch.mean(snr) elif reduction == "sum": return torch.sum(snr) elif reduction == "none": return snr else: raise ValueError("Unsupported reduction method.")
text-generation-inference/server/text_generation_server/layers/gptq/utils.py/0
{ "file_path": "text-generation-inference/server/text_generation_server/layers/gptq/utils.py", "repo_id": "text-generation-inference", "token_count": 742 }
272
import torch import time import torch.distributed from dataclasses import dataclass from opentelemetry import trace from transformers import ( AutoConfig, AutoTokenizer, AutoModelForCausalLM, PreTrainedTokenizerBase, ) from typing import Optional, Tuple, List, Type, Dict from text_generation_server.utils import ( initialize_torch_distributed, weight_files, Weights, ) from text_generation_server.models import Model from text_generation_server.utils.chunks import concat_text_chunks from text_generation_server.utils.import_utils import SYSTEM from text_generation_server.utils.quantization import get_loader from text_generation_server.utils.tokens import batch_top_tokens from text_generation_server.models.types import ( Batch, Tokens, Generation, GeneratedText, ) from text_generation_server.pb import generate_pb2 from text_generation_server.utils import NextTokenChooser, StoppingCriteria, Sampling tracer = trace.get_tracer(__name__) @dataclass class CausalLMBatch(Batch): batch_id: int requests: List[generate_pb2.Request] requests_idx_mapping: Dict[int, int] # Decoder values input_ids: torch.Tensor attention_mask: torch.Tensor position_ids: torch.Tensor past_key_values: Optional[List[Tuple]] # All tokens all_input_ids: List[torch.Tensor] # Lengths of all generations present in the batch input_lengths: List[int] prefix_offsets: List[int] read_offsets: List[int] # Generation helpers next_token_choosers: List[NextTokenChooser] stopping_criterias: List[StoppingCriteria] top_n_tokens: List[int] top_n_tokens_tensor: torch.Tensor # Metadata used for padding max_input_length: int padding_right_offset: int # Maximum number of tokens this batch will grow to max_tokens: int # Past metadata keys_head_dim_last: bool = True def to_pb(self) -> generate_pb2.CachedBatch: return generate_pb2.CachedBatch( id=self.batch_id, request_ids=[r.id for r in self.requests], size=len(self), max_tokens=self.max_tokens, ) @classmethod def from_pb( cls, pb: generate_pb2.Batch, tokenizer: PreTrainedTokenizerBase, dtype: torch.dtype, device: torch.device, ) -> "CausalLMBatch": inputs = [] next_token_choosers = [] stopping_criterias = [] top_n_tokens = [] prefix_offsets = [] read_offsets = [] requests_idx_mapping = {} # Parse batch max_truncation = 0 padding_right_offset = 0 max_decode_tokens = 0 for i, r in enumerate(pb.requests): requests_idx_mapping[r.id] = i inputs.append(concat_text_chunks(r.input_chunks.chunks)) next_token_choosers.append( NextTokenChooser.from_pb(r.parameters, device, tokenizer) ) stopping_criteria = StoppingCriteria.from_pb( r.stopping_parameters, tokenizer ) stopping_criterias.append(stopping_criteria) top_n_tokens.append(r.top_n_tokens) max_truncation = max(max_truncation, r.truncate) max_decode_tokens += stopping_criteria.max_new_tokens padding_right_offset = max( padding_right_offset, stopping_criteria.max_new_tokens ) tokenized_inputs = tokenizer( inputs, return_tensors="pt", padding=True, return_token_type_ids=False, truncation=True, max_length=max_truncation, ).to(device) for _ in pb.requests: input_len = tokenized_inputs["input_ids"].shape[1] prefix_offsets.append(input_len - 5) read_offsets.append(input_len) input_lengths = tokenized_inputs["attention_mask"].sum(1) max_input_length = input_lengths.max() input_ids = tokenized_inputs["input_ids"] # Allocate maximum attention_mask attention_mask = input_ids.new_zeros( (pb.size, max_input_length + padding_right_offset) ) # Copy tokenizer attention_mask into fully allocated attention_mask attention_mask[:, :max_input_length] = tokenized_inputs["attention_mask"] position_ids = tokenized_inputs["attention_mask"].long().cumsum(-1) - 1 position_ids.masked_fill_(tokenized_inputs["attention_mask"] == 0, 1) all_input_ids = tokenized_inputs["input_ids"].T.split(1, dim=1) top_n_tokens_tensor = torch.tensor( top_n_tokens, device=device, dtype=torch.int64 ) max_tokens = len(inputs) * (max_input_length + max_decode_tokens) return cls( batch_id=pb.id, requests=pb.requests, requests_idx_mapping=requests_idx_mapping, input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=None, all_input_ids=list(all_input_ids), input_lengths=input_lengths.tolist(), prefix_offsets=prefix_offsets, read_offsets=read_offsets, next_token_choosers=next_token_choosers, stopping_criterias=stopping_criterias, top_n_tokens=top_n_tokens, top_n_tokens_tensor=top_n_tokens_tensor, max_input_length=max_input_length.item(), padding_right_offset=padding_right_offset, max_tokens=max_tokens, ) @tracer.start_as_current_span("filter") def filter(self, request_ids: List[int]) -> Optional["CausalLMBatch"]: if len(request_ids) == 0: raise ValueError("Batch must have at least one request") if len(request_ids) == len(self): return self keep_indices = [] # New values after filtering requests_idx_mapping = {} requests = [] input_lengths = [] prefix_offsets = [] read_offsets = [] all_input_ids = [] max_input_length = 0 next_token_choosers = [] stopping_criterias = [] top_n_tokens = [] total_remaining_decode_tokens = 0 new_padding_right_offset = 0 for i, request_id in enumerate(request_ids): idx = self.requests_idx_mapping[request_id] requests_idx_mapping[request_id] = i keep_indices.append(idx) requests.append(self.requests[idx]) prefix_offsets.append(self.prefix_offsets[idx]) read_offsets.append(self.read_offsets[idx]) all_input_ids.append(self.all_input_ids[idx]) request_input_length = self.input_lengths[idx] input_lengths.append(request_input_length) max_input_length = max(max_input_length, request_input_length) next_token_choosers.append(self.next_token_choosers[idx]) stopping_criteria = self.stopping_criterias[idx] stopping_criterias.append(stopping_criteria) top_n_tokens.append(self.top_n_tokens[idx]) remaining_decode_tokens = ( stopping_criteria.max_new_tokens - stopping_criteria.current_tokens ) total_remaining_decode_tokens += remaining_decode_tokens new_padding_right_offset = max( new_padding_right_offset, remaining_decode_tokens ) # Apply indices to input_ids, attention mask, past key values and other items that need to be cached input_ids = self.input_ids[keep_indices] position_ids = self.position_ids[keep_indices] self.attention_mask = self.attention_mask[ keep_indices, -(self.padding_right_offset + max_input_length) : ( self.attention_mask.shape[1] - self.padding_right_offset ) + new_padding_right_offset, ] # Ensure that past_key_values tensors can be updated in-place if type(self.past_key_values[0]) is tuple: self.past_key_values = [list(layer) for layer in self.past_key_values] # Update tensors in-place to allow incremental garbage collection past_kv_length = max_input_length - 1 for layer in self.past_key_values: past_keys, past_values = layer if len(past_keys.shape) == 3: # Force past to be of dim [self_size, num_heads, ...] for easy indexing past_keys = past_keys.view(len(self), -1, *past_keys.shape[-2:]) past_values = past_values.view(len(self), -1, *past_values.shape[-2:]) if self.keys_head_dim_last: layer[0] = past_keys[keep_indices, :, -past_kv_length:, :] else: layer[0] = past_keys[keep_indices, :, :, -past_kv_length:] del past_keys layer[1] = past_values[keep_indices, :, -past_kv_length:, :] del past_values top_n_tokens_tensor = self.top_n_tokens_tensor[keep_indices] max_tokens = len(request_ids) * max_input_length + total_remaining_decode_tokens self.requests = requests self.requests_idx_mapping = requests_idx_mapping self.input_ids = input_ids self.position_ids = position_ids self.all_input_ids = all_input_ids self.input_lengths = input_lengths self.prefix_offsets = prefix_offsets self.read_offsets = read_offsets self.next_token_choosers = next_token_choosers self.stopping_criterias = stopping_criterias self.top_n_tokens = top_n_tokens self.top_n_tokens_tensor = top_n_tokens_tensor self.max_input_length = max_input_length self.padding_right_offset = new_padding_right_offset self.max_tokens = max_tokens return self @classmethod @tracer.start_as_current_span("concatenate") def concatenate(cls, batches: List["CausalLMBatch"]) -> "CausalLMBatch": # Used for padding total_batch_size = 0 max_input_length = 0 padding_right_offset = 0 for batch in batches: total_batch_size += len(batch) max_input_length = max(max_input_length, batch.max_input_length) padding_right_offset = max(padding_right_offset, batch.padding_right_offset) # Batch attributes requests = [] requests_idx_mapping = {} input_lengths = [] prefix_offsets = [] read_offsets = [] all_input_ids = [] next_token_choosers = [] stopping_criterias = [] top_n_tokens = [] max_tokens = 0 # Batch tensors input_ids = None attention_mask = None position_ids = None past_key_values = [] top_n_tokens_tensor = None # Used for slicing correctly inside the tensors # Equivalent to a cumsum on batch sizes start_index = 0 for i, batch in enumerate(batches): requests.extend(batch.requests) input_lengths.extend(batch.input_lengths) prefix_offsets.extend(batch.prefix_offsets) read_offsets.extend(batch.read_offsets) all_input_ids.extend(batch.all_input_ids) next_token_choosers.extend(batch.next_token_choosers) stopping_criterias.extend(batch.stopping_criterias) top_n_tokens.extend(batch.top_n_tokens) if i == 0: requests_idx_mapping = batch.requests_idx_mapping else: # We need to offset the mapping for each batch by the cumulative batch size for k, v in batch.requests_idx_mapping.items(): requests_idx_mapping[k] = v + start_index # Slicing end index for this batch end_index = start_index + len(batch) # We only concatenate batches that did at least one step if batch.past_key_values is None: raise ValueError("only concatenate prefilled batches") # Create empty tensor # input_ids is always of shape [batch_size, 1] # We do not need to pad it if input_ids is None: input_ids = batch.input_ids.new_empty((total_batch_size, 1)) # Copy to correct indices input_ids[start_index:end_index] = batch.input_ids # Create padded tensor if attention_mask is None: attention_mask = batch.attention_mask.new_zeros( (total_batch_size, max_input_length + padding_right_offset), ) if top_n_tokens_tensor is None: top_n_tokens_tensor = batches[0].top_n_tokens_tensor.new_zeros( total_batch_size, ) top_n_tokens_tensor[start_index:end_index] = batch.top_n_tokens_tensor # We need to slice the attention mask to remove padding from previous steps # and to remove unused allocated space left_offset = max_input_length - batch.max_input_length batch_left_offset = ( batch.attention_mask.shape[1] - batch.max_input_length - batch.padding_right_offset ) attention_mask[ start_index:end_index, left_offset:-padding_right_offset, ] = batch.attention_mask[ :, batch_left_offset : -batch.padding_right_offset, ] # Create empty tensor # position_ids is always of shape [batch_size, 1] if position_ids is None: position_ids = batch.position_ids.new_empty((total_batch_size, 1)) position_ids[start_index:end_index] = batch.position_ids # Shenanigans to get dimensions because BLOOM outputs a past with a different shape # BLOOM Keys: [batch_size * num_heads, head_dim, seq_length] # BLOOM Values: [batch_size * num_heads, seq_length, head_dim] # And ensure that we can update tensors in-place if isinstance(batch.past_key_values[0], tuple): batch.past_key_values = [ [t.view(len(batch), -1, *t.shape[-2:]) for t in layer] for layer in batch.past_key_values ] elif len(batch.past_key_values[0][0].shape) == 3: for layer in batch.past_key_values: for k, t in enumerate(layer): layer[k] = t.view(len(batch), -1, *t.shape[-2:]) # Add eventual padding tokens that were added while concatenating max_tokens += batch.max_tokens + ( max_input_length - batch.max_input_length ) * len(batch) start_index = end_index first_past_kvs = batches[0].past_key_values _, num_heads, padded_sequence_length, head_dim = first_past_kvs[0][1].shape padded_past_values_shape = ( total_batch_size, num_heads, max_input_length - 1, head_dim, ) if batches[0].keys_head_dim_last: padded_past_keys_shape = padded_past_values_shape else: # seq_length is last for BLOOM padded_past_keys_shape = ( total_batch_size, num_heads, head_dim, max_input_length - 1, ) # Iterate over attention layers # Concatenate past key values layer by layer to allow incremental garbage collection for j in range(len(first_past_kvs)): padded_past_keys = first_past_kvs[j][0].new_zeros(padded_past_keys_shape) start_index = 0 for batch in batches: past_keys = batch.past_key_values[j][0] # Clear reference to the original tensor batch.past_key_values[j][0] = None # Slicing end index for this batch end_index = start_index + len(batch) # We slice the keys to remove the padding from previous batches past_seq_len = batch.max_input_length - 1 if batch.keys_head_dim_last: padded_past_keys[start_index:end_index, :, -past_seq_len:, :] = ( past_keys[:, :, -past_seq_len:, :] ) else: # BLOOM case padded_past_keys[start_index:end_index, :, :, -past_seq_len:] = ( past_keys[:, :, :, -past_seq_len:] ) del past_keys start_index = end_index padded_past_values = first_past_kvs[j][1].new_zeros( padded_past_values_shape ) start_index = 0 for batch in batches: past_values = batch.past_key_values[j][1] # Clear reference to the original tensor batch.past_key_values[j][1] = None # Slicing end index for this batch end_index = start_index + len(batch) # We slice the past values to remove the padding from previous batches past_seq_len = batch.max_input_length - 1 padded_past_values[start_index:end_index, :, -past_seq_len:, :] = ( past_values[:, :, -past_seq_len:, :] ) del past_values # Update values start_index = end_index past_key_values.append([padded_past_keys, padded_past_values]) return cls( batch_id=batches[0].batch_id, requests=requests, requests_idx_mapping=requests_idx_mapping, input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, all_input_ids=all_input_ids, input_lengths=input_lengths, prefix_offsets=prefix_offsets, read_offsets=read_offsets, next_token_choosers=next_token_choosers, stopping_criterias=stopping_criterias, top_n_tokens=top_n_tokens, top_n_tokens_tensor=top_n_tokens_tensor, max_input_length=max_input_length, padding_right_offset=padding_right_offset, keys_head_dim_last=batches[0].keys_head_dim_last, max_tokens=max_tokens, ) def __len__(self): return len(self.requests) @dataclass class CausalLMBatchKeysLast(CausalLMBatch): keys_head_dim_last: bool = False class CausalLM(Model): def __init__( self, model_id: str, model_class, revision: Optional[str] = None, quantize: Optional[str] = None, speculator: Optional[str] = None, dtype: Optional[torch.dtype] = None, default_dtype=torch.float16, trust_remote_code: bool = False, tokenizer_class=AutoTokenizer, config_class=AutoConfig, batch_class=CausalLMBatch, ): self.quantize = quantize self.batch_class = batch_class self.process_group, rank, world_size = initialize_torch_distributed() if torch.cuda.is_available(): device = torch.device(f"cuda:{rank}") dtype = default_dtype if dtype is None else dtype elif SYSTEM == "ipex": if hasattr(torch, "xpu") and torch.xpu.is_available(): device = torch.device(f"xpu:{rank}") dtype = default_dtype if dtype is None else dtype else: device = torch.device("cpu") # Float16 doesn't exist on target. dtype = torch.bfloat16 if dtype is None else dtype else: device = torch.device("cpu") dtype = torch.float32 if dtype is None else dtype tokenizer = tokenizer_class.from_pretrained( model_id, revision=revision, padding_side="left", truncation_side="left", trust_remote_code=trust_remote_code, ) config = config_class.from_pretrained( model_id, revision=revision, trust_remote_code=trust_remote_code, ) config.quantize = quantize config.speculator = speculator if tokenizer.pad_token_id is None: if config.pad_token_id is not None: tokenizer.pad_token_id = config.pad_token_id elif config.eos_token_id is not None: tokenizer.pad_token_id = config.eos_token_id elif tokenizer.eos_token_id is not None: tokenizer.pad_token_id = tokenizer.eos_token_id torch.distributed.barrier(group=self.process_group) weights_loader = get_loader( quantize=quantize, model_id=model_id, revision=revision ) filenames = weight_files(model_id, revision=revision, extension=".safetensors") weights = Weights( filenames, device=device, dtype=dtype, process_group=self.process_group, weights_loader=weights_loader, ) prefix = "" model = model_class(prefix, config, weights) torch.distributed.barrier(group=self.process_group) super().__init__( model_id=model_id, model=model, tokenizer=tokenizer, requires_padding=True, dtype=dtype, device=device, rank=rank, world_size=world_size, ) @classmethod def fallback( cls, model_id: str, revision: Optional[str] = None, quantize: Optional[str] = None, speculator: Optional[str] = None, dtype: Optional[torch.dtype] = None, trust_remote_code: bool = False, ): if speculator: raise RuntimeError("Speculator decoding is not enabled for AutoModel") if torch.cuda.is_available(): device = torch.device("cuda") dtype = torch.float16 if dtype is None else dtype else: if quantize: raise ValueError("quantization is not available on CPU") device = torch.device("cpu") dtype = torch.float32 if dtype is None else dtype tokenizer = AutoTokenizer.from_pretrained( model_id, revision=revision, padding_side="left", truncation_side="left", trust_remote_code=trust_remote_code, ) model = AutoModelForCausalLM.from_pretrained( model_id, revision=revision, torch_dtype=dtype, device_map=( "auto" if torch.cuda.is_available() and torch.cuda.device_count() > 1 else None ), load_in_8bit=quantize == "bitsandbytes", trust_remote_code=trust_remote_code, ) if ( torch.cuda.is_available() and torch.cuda.device_count() == 1 and quantize != "bitsandbytes" ): model = model.cuda() if tokenizer.pad_token_id is None: if model.config.pad_token_id is not None: tokenizer.pad_token_id = model.config.pad_token_id elif model.config.eos_token_id is not None: tokenizer.pad_token_id = model.config.eos_token_id elif tokenizer.eos_token_id is not None: tokenizer.pad_token_id = tokenizer.eos_token_id else: tokenizer.add_special_tokens({"pad_token": "[PAD]"}) self = cls.__new__( cls, ) self.batch_class = CausalLMBatch super().__init__( self, model_id=model_id, model=model, tokenizer=tokenizer, requires_padding=True, dtype=dtype, device=device, ) self.quantize = quantize return self @property def batch_type(self) -> Type[CausalLMBatch]: return self.batch_class def forward( self, input_ids, attention_mask, position_ids, past_key_values: Optional = None ) -> Tuple[ torch.Tensor, Optional[torch.Tensor], List[Tuple[torch.Tensor, torch.Tensor]] ]: # Model Forward kwargs = { "input_ids": input_ids, "attention_mask": attention_mask, "past_key_values": past_key_values, "use_cache": True, "return_dict": True, } if self.has_position_ids: kwargs["position_ids"] = position_ids outputs = self.model.forward(**kwargs) if isinstance(outputs, tuple): outputs, speculative_logits = outputs else: speculative_logits = None return outputs.logits, speculative_logits, outputs.past_key_values @tracer.start_as_current_span("generate_token") def generate_token( self, batch: CausalLMBatch ) -> Tuple[List[Generation], Optional[CausalLMBatch], Tuple[int, int]]: start = time.time_ns() # slice the attention mask to the correct shape attention_mask = batch.attention_mask[:, : -batch.padding_right_offset] logits, speculative_logits, past = self.forward( batch.input_ids, attention_mask, batch.position_ids, batch.past_key_values, ) # Results generations: List[Generation] = [] stopped = True # Speculation is not active for causal accepted_ids = torch.ones_like(batch.input_ids)[:, 0] batch_top_token_ids, batch_top_token_logprobs = batch_top_tokens( batch.top_n_tokens, batch.top_n_tokens_tensor, torch.log_softmax(logits[:, -1], -1), accepted_ids, ) start_decode = time.time_ns() # Zipped iterator iterator = zip( batch.requests, batch.input_lengths, batch.prefix_offsets, batch.read_offsets, logits, batch.next_token_choosers, batch.stopping_criterias, batch.all_input_ids, batch.top_n_tokens, batch_top_token_ids, batch_top_token_logprobs, ) # For each member of the batch for i, ( request, input_length, prefix_offset, read_offset, logits, next_token_chooser, stopping_criteria, all_input_ids, top_n_tokens, top_token_ids, top_token_logprobs, ) in enumerate(iterator): # Select next token next_token_id, logprobs = next_token_chooser( all_input_ids.view(1, -1), logits[-1:, :] ) # Append next token to all tokens all_input_ids = torch.cat([all_input_ids, next_token_id]) new_input_length = input_length + 1 # Generated token next_token_logprob = logprobs[-1, next_token_id] next_token_id_squeezed = next_token_id.squeeze() next_token_text, prefix_offset, read_offset = self.decode_token( all_input_ids[:, 0], prefix_offset, read_offset ) # Evaluate stopping criteria stop, reason = stopping_criteria( next_token_id_squeezed, next_token_text, ) if not stop: stopped = False # Shard generations # All generations will be appended in the rust sharded client if i % self.world_size == self.rank: if stop: # Decode generated tokens output_text, _, _ = self.decode_token( all_input_ids[:, 0], prefix_offset=len(all_input_ids) - stopping_criteria.current_tokens - 1, read_offset=len(all_input_ids) - stopping_criteria.current_tokens, skip_special_tokens=True, ) # Get seed if isinstance(next_token_chooser.choice, Sampling): seed = next_token_chooser.choice.seed else: seed = None generated_text = GeneratedText( output_text, stopping_criteria.current_tokens, reason, seed ) else: generated_text = None # Prefill if stopping_criteria.current_tokens == 1 and request.prefill_logprobs: # Remove generated token to only have prefill and add nan for first prompt token prefill_logprobs = [float("nan")] + torch.log_softmax( logits, -1 ).gather(1, all_input_ids[1:]).squeeze(1)[ -new_input_length:-1 ].tolist() prefill_token_ids = all_input_ids[-new_input_length:-1] prefill_texts = self.tokenizer.batch_decode( prefill_token_ids, clean_up_tokenization_spaces=False, skip_special_tokens=False, ) prefill_tokens = Tokens( prefill_token_ids, prefill_logprobs, prefill_texts, is_special=[], ) else: prefill_tokens = None if top_n_tokens > 0: all_top_tokens = [] for top_token_ids, top_token_logprobs in zip( top_token_ids, top_token_logprobs ): toptoken_texts = self.tokenizer.batch_decode( top_token_ids, clean_up_tokenization_spaces=False, skip_special_tokens=False, ) special_toptokens = [ token_id in self.all_special_ids for token_id in top_token_ids ] top_tokens = Tokens( top_token_ids, top_token_logprobs, toptoken_texts, special_toptokens, ) all_top_tokens.append(top_tokens) top_tokens = all_top_tokens else: top_tokens = None generation = Generation( request.id, prefill_tokens, Tokens( [next_token_id_squeezed], [next_token_logprob], [next_token_text], [next_token_id_squeezed.item() in self.all_special_ids], ), generated_text, top_tokens, ) generations.append(generation) # Update values batch.next_token_choosers[i] = batch.next_token_choosers[i].advance_grammar( next_token_id_squeezed.item() ) batch.input_ids[i, 0] = next_token_id batch.all_input_ids[i] = all_input_ids batch.input_lengths[i] = new_input_length batch.prefix_offsets[i] = prefix_offset batch.read_offsets[i] = read_offset batch.max_input_length = max(batch.max_input_length, new_input_length) # We finished all generations in the batch; there is no next batch if stopped: forward_ns = start_decode - start decode_ns = time.time_ns() - start_decode return generations, None, (forward_ns, decode_ns) # Slice unused values from prefill batch.input_ids = batch.input_ids[:, :1] # Update attention_mask as we added a new token to input_ids batch.attention_mask[:, -batch.padding_right_offset] = 1 # Decrease right offset batch.padding_right_offset -= 1 # Update position_ids batch.position_ids = batch.position_ids[:, -1:] + 1 # Update past key values batch.past_key_values = past forward_ns = start_decode - start decode_ns = time.time_ns() - start_decode return generations, batch, (forward_ns, decode_ns)
text-generation-inference/server/text_generation_server/models/causal_lm.py/0
{ "file_path": "text-generation-inference/server/text_generation_server/models/causal_lm.py", "repo_id": "text-generation-inference", "token_count": 16948 }
273
import torch import torch.distributed from torch import nn from transformers.activations import ACT2FN from transformers.configuration_utils import PretrainedConfig from typing import Optional, List, Tuple from text_generation_server.layers.attention import ( paged_attention, attention, reshape_and_cache, Seqlen, ) from text_generation_server.layers import ( TensorParallelRowLinear, TensorParallelColumnLinear, TensorParallelEmbedding, SpeculativeHead, get_linear, ) from text_generation_server.layers.layernorm import ( FastLayerNorm, ) from text_generation_server.layers.rotary import ( PositionRotaryEmbedding, ) class PhiConfig(PretrainedConfig): def __init__( self, vocab_size=51200, hidden_size=2560, num_hidden_layers=32, num_attention_heads=32, num_key_value_heads=32, hidden_act="gelu_fast", # llama uses silu layer_norm_eps=1e-05, # rms in llama, pad_token_id=0, bos_token_id=1, eos_token_id=2, tie_word_embeddings=False, rope_theta=10000.0, resid_pdrop=0.1, # llama doesn't have this partial_rotary_factor=0.5, # important difference between llama and phi **kwargs, ): self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.num_key_value_heads = num_key_value_heads self.hidden_act = hidden_act self.layer_norm_eps = layer_norm_eps self.rope_theta = rope_theta self.resid_pdrop = resid_pdrop self.partial_rotary_factor = partial_rotary_factor super().__init__( pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs, ) # this is the same as llama except for Phi uses bias=True def load_attention(config, prefix, weights): if config.num_attention_heads != config.num_key_value_heads: return _load_gqa(config, prefix, weights) else: return TensorParallelColumnLinear.load_multi( config, prefixes=[f"{prefix}.q_proj", f"{prefix}.k_proj", f"{prefix}.v_proj"], dim=0, weights=weights, bias=True, ) def _load_gqa(config, prefix: str, weights): assert config.hidden_size % config.num_attention_heads == 0 assert config.num_attention_heads % weights.process_group.size() == 0 weight = weights.get_multi_weights_col( prefixes=[f"{prefix}.q_proj", f"{prefix}.k_proj", f"{prefix}.v_proj"], dim=0, ) if config.quantize not in ["gptq", "awq", "marlin"]: weight = weight.to(dtype=weights.dtype).to(device=weights.device) head_size = config.hidden_size // config.num_attention_heads num_heads = config.num_attention_heads // weights.process_group.size() num_key_value_heads = config.num_key_value_heads // weights.process_group.size() assert list(weight.shape) == [ (num_heads + 2 * num_key_value_heads) * head_size, config.hidden_size, ], f"{list(weight.shape)} != {[(num_heads + 2 * config.num_key_value_heads) * head_size, config.hidden_size]}" # this is the same as llama except for Phi uses bias=True return TensorParallelColumnLinear(get_linear(weight, bias=True)) class FlashPhiAttention(torch.nn.Module): def __init__( self, prefix: str, config, weights, ): super().__init__() self.num_heads = config.num_attention_heads self.hidden_size = config.hidden_size self.head_size = self.hidden_size // self.num_heads self.softmax_scale = self.head_size**-0.5 self.rotary_dim = int(config.partial_rotary_factor * self.head_size) self.rotary_emb = PositionRotaryEmbedding.static( config=config, dim=self.rotary_dim, base=config.rope_theta, device=weights.device, ) if self.num_heads % weights.process_group.size() != 0: raise ValueError( f"`num_heads` must be divisible by `num_shards` (got `num_heads`: {self.num_heads} " f"and `num_shards`: {weights.process_group.size()}" ) self.num_heads = self.num_heads // weights.process_group.size() self.num_key_value_heads = ( config.num_key_value_heads // weights.process_group.size() ) self.query_key_value = load_attention(config, prefix, weights) # in llama the dense layer is called "o_proj" and has bias=False self.dense = TensorParallelRowLinear.load( config, prefix=f"{prefix}.dense", weights=weights, bias=True, ) self.num_groups = self.num_heads // self.num_key_value_heads self.kv_head_mapping = torch.arange( 0, self.num_key_value_heads, dtype=torch.int32, device=weights.device ).repeat_interleave(self.num_groups) def forward( self, hidden_states, cos, sin, cu_seqlen_prefill, kv_cache, block_tables, slots, seqlen, max_s, ): # Compute query, key, value and split qkv = self.query_key_value(hidden_states) query, kv = qkv.split( [ self.head_size * self.num_heads, 2 * self.head_size * self.num_key_value_heads, ], dim=1, ) # Reshape query and key for rotary embeddings query = query.view(-1, self.num_heads, self.head_size) kv = kv.view(-1, 2, self.num_key_value_heads, self.head_size) # NOTE: this is the main difference between Llama and Phi # in llama the rotary embeddings are applied to the whole query and key. # Phi uses PARTIAL rotary embeddings, which are applied to the first 32 dimensions # # Apply partial positional embeddings in place self.rotary_emb( query[:, :, : self.rotary_dim], kv[:, 0, :, : self.rotary_dim], cos, sin ) # Reshape key and value and cache reshape_and_cache(kv[:, 0], kv[:, 1], kv_cache[0], kv_cache[1], slots) # Prefill if cu_seqlen_prefill is not None: attn_output = attention( query, kv_cache[0], kv_cache[1], seqlen, block_tables, self.softmax_scale, ) # Decode else: attn_output = paged_attention( query, kv_cache[0], kv_cache[1], self.kv_head_mapping, self.softmax_scale, block_tables, seqlen, max_s, ) return self.dense(attn_output.view(-1, self.num_heads * self.head_size)) class PhiMLP(nn.Module): def __init__(self, prefix, config, weights): super().__init__() act = config.hidden_act self.act = ( ACT2FN[act] if "gelu" not in act else lambda x: torch.nn.functional.gelu( x, approximate=( "tanh" if act in ["gelu_fast", "gelu_pytorch_tanh"] else "none" ), ) ) # llama weights are up_proj and down_proj and bias=False self.up_proj = TensorParallelColumnLinear.load( config, prefix=f"{prefix}.fc1", weights=weights, bias=True, ) self.down_proj = TensorParallelRowLinear.load( config, prefix=f"{prefix}.fc2", weights=weights, bias=True, ) def forward(self, hidden_states): # NOTE: Llama requires the gate up states to an intermediate size # Phi does not and we can avoid the `view` operation return self.down_proj(self.act(self.up_proj(hidden_states))) class FlashPhiLayer(nn.Module): def __init__(self, prefix: str, layer_id, config, weights): super().__init__() prefix = f"{prefix}.layers.{layer_id}" self.self_attn = FlashPhiAttention( prefix=f"{prefix}.self_attn", config=config, weights=weights ) self.mlp = PhiMLP(prefix=f"{prefix}.mlp", config=config, weights=weights) self.input_layernorm = FastLayerNorm.load( prefix=f"{prefix}.input_layernorm", weights=weights, eps=config.layer_norm_eps, ) self.resid_dropout = torch.nn.Dropout(config.resid_pdrop) def forward( self, hidden_states, residual, cos, sin, cu_seqlen_prefill, kv_cache, block_tables, slots, seqlen, max_s, ): hidden_states, res = self.input_layernorm(hidden_states, residual) # Self Attention attn_output = self.self_attn( hidden_states, cos, sin, cu_seqlen_prefill, kv_cache, block_tables, slots, seqlen, max_s, ) hidden_states = self.resid_dropout(attn_output).add( self.resid_dropout(self.mlp(hidden_states)) ) return hidden_states, res class FlashPhiModel(torch.nn.Module): def __init__(self, prefix: str, config, weights): super().__init__() process_group = weights.process_group self.tp_rank = process_group.rank() self.tp_world_size = process_group.size() self.embed_tokens = TensorParallelEmbedding( prefix=f"{prefix}.embed_tokens", weights=weights ) self.layers = nn.ModuleList( [ FlashPhiLayer( prefix, layer_id, config, weights, ) for layer_id in range(config.num_hidden_layers) ] ) self.gradient_checkpointing = False self.head_size = self.layers[0].self_attn.head_size self.num_heads = self.layers[0].self_attn.num_heads self.num_key_value_heads = self.layers[0].self_attn.num_key_value_heads self.norm = FastLayerNorm.load( prefix="model.final_layernorm", weights=weights, eps=config.layer_norm_eps, ) def forward( self, input_ids: torch.Tensor, position_ids: torch.Tensor, cu_seqlen_prefill: Optional[torch.Tensor], kv_cache: List[Tuple[torch.Tensor, torch.Tensor]], block_tables: torch.Tensor, slots: torch.Tensor, seqlen: Seqlen, max_s: int, ) -> torch.Tensor: hidden_states = self.embed_tokens(input_ids) # Get rotary cos and sin for this forward # Avoid to index in each layer cos, sin = self.layers[0].self_attn.rotary_emb.get_cos_sin( position_ids, max_s, hidden_states.dtype ) residual = None for i, layer in enumerate(self.layers): hidden_states, residual = layer( hidden_states, residual, cos, sin, cu_seqlen_prefill, kv_cache[i], block_tables, slots, seqlen, max_s, ) hidden_states, _ = self.norm(hidden_states, residual) return hidden_states class FlashPhiForCausalLM(torch.nn.Module): def __init__(self, prefix: str, config, weights): super().__init__() if not prefix: prefix = "model" else: prefix = f"{prefix}.model" self.model = FlashPhiModel(prefix, config, weights) self.lm_head = SpeculativeHead.load( config, prefix="lm_head", weights=weights, ) def forward( self, input_ids: torch.Tensor, position_ids: torch.Tensor, cu_seqlen_prefill: Optional[torch.Tensor], kv_cache: List[Tuple[torch.Tensor, torch.Tensor]], block_tables: torch.Tensor, slots: torch.Tensor, seqlen: Seqlen, max_s: int, prefill_cache_indices: Optional[torch.Tensor], lm_head_indices: Optional[torch.Tensor] = None, adapter_data: Optional[torch.Tensor] = None, ) -> torch.Tensor: hidden_states = self.model( input_ids, position_ids, cu_seqlen_prefill, kv_cache, block_tables, slots, seqlen, max_s, ) if lm_head_indices is not None: hidden_states = hidden_states[lm_head_indices] return self.lm_head(hidden_states)
text-generation-inference/server/text_generation_server/models/custom_modeling/flash_phi_modeling.py/0
{ "file_path": "text-generation-inference/server/text_generation_server/models/custom_modeling/flash_phi_modeling.py", "repo_id": "text-generation-inference", "token_count": 6605 }
274
# coding=utf-8 # Copyright 2022 The Fairseq Authors and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch OPT model.""" import random from typing import List, Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from transformers.activations import ACT2FN from transformers.modeling_outputs import ( BaseModelOutputWithPast, CausalLMOutputWithPast, ) from transformers.modeling_utils import PreTrainedModel from transformers import OPTConfig from text_generation_server.layers import ( FastLinear, TensorParallelColumnLinear, TensorParallelEmbedding, TensorParallelRowLinear, SpeculativeHead, ) EPS = 1e-5 # Copied from transformers.models.bart.modeling_bart._make_causal_mask def _make_causal_mask( input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0, ): """ Make causal mask used for bi-directional self-attention. """ bsz, tgt_len = input_ids_shape mask = torch.full( (tgt_len, tgt_len), torch.tensor(torch.finfo(dtype).min, device=device), device=device, ) mask_cond = torch.arange(mask.size(-1), device=device) mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0) mask = mask.to(dtype) if past_key_values_length > 0: mask = torch.cat( [ torch.zeros( tgt_len, past_key_values_length, dtype=dtype, device=device ), mask, ], dim=-1, ) return mask[None, None, :, :].expand( bsz, 1, tgt_len, tgt_len + past_key_values_length ) def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None): """ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. """ bsz, src_len = mask.size() tgt_len = tgt_len if tgt_len is not None else src_len expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype) inverted_mask = 1.0 - expanded_mask return inverted_mask.masked_fill( inverted_mask.to(torch.bool), torch.finfo(dtype).min ) class OPTLearnedPositionalEmbedding(nn.Module): """ This module learns positional embeddings up to a fixed maximum size. """ def __init__(self, prefix: str, weights): super().__init__() self.offset = 2 self.weight = nn.Parameter( weights.get_tensor( f"{prefix + '.' if prefix else ''}decoder.embed_positions.weight" ) ) def forward( self, attention_mask: torch.LongTensor, past_key_values_length: int = 0 ): """`input_ids_shape` is expected to be [bsz x seqlen].""" attention_mask = attention_mask.long() # create positions depending on attention_mask positions = ( torch.cumsum(attention_mask, dim=1).type_as(attention_mask) * attention_mask ).long() - 1 # cut positions if `past_key_values_length` is > 0 positions = positions[:, past_key_values_length:] return torch.nn.functional.embedding(positions + self.offset, self.weight) class OPTAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__( self, config, prefix, weights, is_decoder: bool = False, bias: bool = True, process_group=None, ): super().__init__() hidden_size = config.hidden_size num_heads = config.num_attention_heads self.hidden_size = hidden_size self.num_heads = num_heads self.dropout = config.dropout self.head_dim = hidden_size // num_heads if (self.head_dim * num_heads) != self.hidden_size: raise ValueError( f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}" f" and `num_heads`: {num_heads})." ) self.scaling = self.head_dim**-0.5 self.is_decoder = is_decoder process_group = weights.process_group if self.num_heads % weights.process_group.size() != 0: raise ValueError( f"`num_heads` must be divisible by `num_shards` (got `num_heads`: {self.num_heads} " f"and `num_shards`: {weights.process_group.size()}" ) self.num_heads = self.num_heads // process_group.size() self.hidden_size = self.hidden_size // process_group.size() self.q_proj = TensorParallelColumnLinear.load( config, prefix=f"{prefix}.q_proj", weights=weights, bias=bias ) self.k_proj = TensorParallelColumnLinear.load( config, prefix=f"{prefix}.k_proj", weights=weights, bias=bias ) self.v_proj = TensorParallelColumnLinear.load( config, prefix=f"{prefix}.v_proj", weights=weights, bias=bias ) self.out_proj = TensorParallelRowLinear.load( config, prefix=f"{prefix}.out_proj", weights=weights, bias=bias ) def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return ( tensor.view(bsz, seq_len, self.num_heads, self.head_dim) .transpose(1, 2) .contiguous() ) def forward( self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, attention_mask: Optional[torch.Tensor] = None, layer_head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" # if key_value_states are provided this layer is used as a cross-attention layer # for the decoder is_cross_attention = key_value_states is not None bsz, tgt_len, _ = hidden_states.size() # get query proj query_states = self.q_proj(hidden_states) * self.scaling # get key, value proj if is_cross_attention and past_key_value is not None: # reuse k,v, cross_attentions key_states = past_key_value[0] value_states = past_key_value[1] elif is_cross_attention: # cross_attentions key_states = self._shape(self.k_proj(key_value_states), -1, bsz) value_states = self._shape(self.v_proj(key_value_states), -1, bsz) elif past_key_value is not None: # reuse k, v, self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) key_states = torch.cat([past_key_value[0], key_states], dim=2) value_states = torch.cat([past_key_value[1], value_states], dim=2) else: # self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) if self.is_decoder: # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of # all previous decoder key/value_states. Further calls to uni-directional self-attention # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_states, value_states) proj_shape = (bsz * self.num_heads, -1, self.head_dim) query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) key_states = key_states.view(*proj_shape) value_states = value_states.view(*proj_shape) src_len = key_states.size(1) attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): raise ValueError( f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" f" {attn_weights.size()}" ) if attention_mask is not None: if attention_mask.size() != (bsz, 1, tgt_len, src_len): raise ValueError( f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}" ) attn_weights = ( attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask ) attn_weights = torch.max( attn_weights, torch.tensor(torch.finfo(attn_weights.dtype).min) ) attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) # upcast to fp32 if the weights are in fp16. Please see https://github.com/huggingface/transformers/pull/17437 if attn_weights.dtype == torch.float16: attn_weights = nn.functional.softmax( attn_weights, dim=-1, dtype=torch.float32 ).to(torch.float16) else: attn_weights = nn.functional.softmax(attn_weights, dim=-1) if layer_head_mask is not None: if layer_head_mask.size() != (self.num_heads,): raise ValueError( f"Head mask for a single layer should be of size {(self.num_heads,)}, but is" f" {layer_head_mask.size()}" ) attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view( bsz, self.num_heads, tgt_len, src_len ) attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) if output_attentions: # this operation is a bit awkward, but it's required to # make sure that attn_weights keeps its gradient. # In order to do so, attn_weights have to be reshaped # twice and have to be reused in the following attn_weights_reshaped = attn_weights.view( bsz, self.num_heads, tgt_len, src_len ) attn_weights = attn_weights_reshaped.view( bsz * self.num_heads, tgt_len, src_len ) else: attn_weights_reshaped = None attn_probs = nn.functional.dropout( attn_weights, p=self.dropout, training=self.training ) attn_output = torch.bmm(attn_probs, value_states) if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim): raise ValueError( f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is" f" {attn_output.size()}" ) attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) attn_output = attn_output.transpose(1, 2) # Use the `hidden_size` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be # partitioned aross GPUs when using tensor-parallelism. attn_output = attn_output.reshape(bsz, tgt_len, self.hidden_size) attn_output = self.out_proj(attn_output) return attn_output, attn_weights_reshaped, past_key_value class OPTDecoderLayer(nn.Module): def __init__(self, layer_id: int, prefix: str, config: OPTConfig, weights): super().__init__() self.process_group = weights.process_group self.hidden_size = config.hidden_size prefix = f"{prefix + '.' if prefix else ''}decoder.layers.{layer_id}" self.self_attn = OPTAttention( config, prefix=f"{prefix}.self_attn", weights=weights, is_decoder=True, bias=config.enable_bias, ) self.do_layer_norm_before = config.do_layer_norm_before self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.self_attn_layer_norm = nn.LayerNorm.load( prefix=f"{prefix}.self_attn_layer_norm", weights=weights, eps=EPS ) self.fc1 = TensorParallelColumnLinear.load( config, prefix=f"{prefix}.fc1", weights=weights, bias=config.enable_bias ) self.fc2 = TensorParallelRowLinear.load( config, prefix=f"{prefix}.fc2", weights=weights, bias=config.enable_bias ) self.final_layer_norm = nn.LayerNorm.load( prefix=f"{prefix}.final_layer_norm", weights=weights, eps=EPS ) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, layer_head_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = False, past_key_value: Optional[Tuple[torch.Tensor]] = None, ) -> Tuple[ torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]] ]: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, hidden_size)` attention_mask (`torch.FloatTensor`, *optional*): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. layer_head_mask (`torch.FloatTensor`, *optional*): mask for attention heads in a given layer of size `(encoder_attention_heads,)`. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states """ residual = hidden_states # 125m, 1.7B, ..., 175B applies layer norm BEFORE attention if self.do_layer_norm_before: hidden_states = self.self_attn_layer_norm(hidden_states) # Self Attention hidden_states, self_attn_weights, present_key_value = self.self_attn( hidden_states=hidden_states, past_key_value=past_key_value, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions, ) hidden_states = nn.functional.dropout( hidden_states, p=self.dropout, training=self.training ) hidden_states = residual + hidden_states # 350m applies layer norm AFTER attention if not self.do_layer_norm_before: hidden_states = self.self_attn_layer_norm(hidden_states) # Fully Connected hidden_states_shape = hidden_states.shape hidden_states = hidden_states.reshape(-1, hidden_states.size(-1)) residual = hidden_states # 125m, 1.7B, ..., 175B applies layer norm BEFORE attention if self.do_layer_norm_before: hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.fc1(hidden_states) hidden_states = self.activation_fn(hidden_states) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout( hidden_states, p=self.dropout, training=self.training ) hidden_states = (residual + hidden_states).view(hidden_states_shape) # 350m applies layer norm AFTER attention if not self.do_layer_norm_before: hidden_states = self.final_layer_norm(hidden_states) outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights,) if use_cache: outputs += (present_key_value,) return outputs class OPTPreTrainedModel(PreTrainedModel): config_class = OPTConfig class OPTDecoder(OPTPreTrainedModel): def __init__(self, prefix: str, config: OPTConfig, weights): super().__init__(config) self.dropout = config.dropout self.layerdrop = config.layerdrop self.padding_idx = config.pad_token_id self.max_target_positions = config.max_position_embeddings self.vocab_size = config.vocab_size prefix = prefix + "." if prefix else "" self.embed_tokens = TensorParallelEmbedding( prefix=f"{prefix}decoder.embed_tokens", weights=weights ) self.embed_positions = OPTLearnedPositionalEmbedding(prefix, weights) if config.word_embed_proj_dim != config.hidden_size: self.project_out = FastLinear.load( config, prefix=f"{prefix}decoder.project_out", weights=weights, bias=False, ) else: self.project_out = None if config.word_embed_proj_dim != config.hidden_size: self.project_in = FastLinear.load( config, prefix=f"{prefix}decoder.project_in", weights=weights, bias=False, ) else: self.project_in = None # Note that the only purpose of `config._remove_final_layer_norm` is to keep backward compatibility # with checkpoints that have been fine-tuned before transformers v4.20.1 # see https://github.com/facebookresearch/metaseq/pull/164 if config.do_layer_norm_before and not config._remove_final_layer_norm: self.final_layer_norm = nn.LayerNorm.load( prefix=f"{prefix}decoder.final_layer_norm", weights=weights, eps=EPS ) else: self.final_layer_norm = None self.layers = nn.ModuleList( [ OPTDecoderLayer(layer_id, prefix, config, weights) for layer_id in range(config.num_hidden_layers) ] ) # Copied from transformers.models.bart.modeling_bart.BartDecoder._prepare_decoder_attention_mask def _prepare_decoder_attention_mask( self, attention_mask, input_shape, inputs_embeds, past_key_values_length ): # create causal mask # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] combined_attention_mask = None if input_shape[-1] > 1: combined_attention_mask = _make_causal_mask( input_shape, inputs_embeds.dtype, device=inputs_embeds.device, past_key_values_length=past_key_values_length, ) if attention_mask is not None: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] expanded_attn_mask = _expand_mask( attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1] ).to(inputs_embeds.device) combined_attention_mask = ( expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask ) return combined_attention_mask def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutputWithPast]: r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) head_mask (`torch.Tensor` of shape `(num_hidden_layers, num_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ output_attentions = ( output_attentions if output_attentions is not None else self.config.output_attentions ) output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = ( return_dict if return_dict is not None else self.config.use_return_dict ) # retrieve input_ids and inputs_embeds if input_ids is not None and inputs_embeds is not None: raise ValueError( "You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time" ) elif input_ids is not None: input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError( "You have to specify either decoder_input_ids or decoder_inputs_embeds" ) if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) batch_size, seq_length = input_shape past_key_values_length = ( past_key_values[0][0].shape[2] if past_key_values is not None else 0 ) # required mask seq length can be calculated via length of past mask_seq_length = past_key_values_length + seq_length # embed positions if attention_mask is None: attention_mask = torch.ones( batch_size, mask_seq_length, device=inputs_embeds.device ) causal_attention_mask = self._prepare_decoder_attention_mask( attention_mask, input_shape, inputs_embeds, past_key_values_length ) pos_embeds = self.embed_positions(attention_mask, past_key_values_length) if self.project_in is not None: inputs_embeds = self.project_in(inputs_embeds) hidden_states = inputs_embeds + pos_embeds # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None next_decoder_cache = () if use_cache else None # check if head_mask has a correct number of layers specified if desired for attn_mask, mask_name in zip([head_mask], ["head_mask"]): if attn_mask is not None: if attn_mask.size()[0] != (len(self.layers)): raise ValueError( f"The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for" f" {head_mask.size()[0]}." ) for idx, decoder_layer in enumerate(self.layers): # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) if output_hidden_states: all_hidden_states += (hidden_states,) dropout_probability = random.uniform(0, 1) if self.training and (dropout_probability < self.layerdrop): continue past_key_value = ( past_key_values[idx] if past_key_values is not None else None ) layer_outputs = decoder_layer( hidden_states, attention_mask=causal_attention_mask, layer_head_mask=(head_mask[idx] if head_mask is not None else None), past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, ) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[2 if output_attentions else 1],) if output_attentions: all_self_attns += (layer_outputs[1],) if self.final_layer_norm is not None: hidden_states = self.final_layer_norm(hidden_states) if self.project_out is not None: hidden_states = self.project_out(hidden_states) # add hidden states from the last decoder layer if output_hidden_states: all_hidden_states += (hidden_states,) next_cache = next_decoder_cache if use_cache else None if not return_dict: return tuple( v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None ) return BaseModelOutputWithPast( last_hidden_state=hidden_states, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_self_attns, ) class OPTModel(OPTPreTrainedModel): def __init__(self, prefix: str, config: OPTConfig, weights): super().__init__(config) self.decoder = OPTDecoder(prefix, config, weights) # Initialize weights and apply final processing def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutputWithPast]: output_attentions = ( output_attentions if output_attentions is not None else self.config.output_attentions ) output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = ( return_dict if return_dict is not None else self.config.use_return_dict ) # decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn) decoder_outputs = self.decoder( input_ids=input_ids, attention_mask=attention_mask, head_mask=head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if not return_dict: return decoder_outputs return BaseModelOutputWithPast( last_hidden_state=decoder_outputs.last_hidden_state, past_key_values=decoder_outputs.past_key_values, hidden_states=decoder_outputs.hidden_states, attentions=decoder_outputs.attentions, ) class OPTForCausalLM(OPTPreTrainedModel): def __init__(self, prefix, config, weights): super().__init__(config) self.model = OPTModel(prefix, config, weights) self.lm_head = SpeculativeHead.load( config, prefix=f"{prefix + '.' if prefix else ''}decoder.embed_tokens", weights=weights, ) def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, CausalLMOutputWithPast]: output_attentions = ( output_attentions if output_attentions is not None else self.config.output_attentions ) output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = ( return_dict if return_dict is not None else self.config.use_return_dict ) # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) outputs = self.model.decoder( input_ids=input_ids, attention_mask=attention_mask, head_mask=head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) logits, speculative_logits = self.lm_head(outputs.last_hidden_state) loss = None return ( CausalLMOutputWithPast( loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ), speculative_logits, ) def prepare_inputs_for_generation( self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs, ): if past_key_values: input_ids = input_ids[:, -1:] # if `inputs_embeds` are passed, we only want to use them in the 1st generation step if inputs_embeds is not None and past_key_values is None: model_inputs = {"inputs_embeds": inputs_embeds} else: model_inputs = {"input_ids": input_ids} model_inputs.update( { "past_key_values": past_key_values, "use_cache": kwargs.get("use_cache"), "attention_mask": attention_mask, } ) return model_inputs @staticmethod def _reorder_cache(past_key_values, beam_idx): reordered_past = () for layer_past in past_key_values: reordered_past += ( tuple( past_state.index_select(0, beam_idx) for past_state in layer_past ), ) return reordered_past
text-generation-inference/server/text_generation_server/models/custom_modeling/opt_modeling.py/0
{ "file_path": "text-generation-inference/server/text_generation_server/models/custom_modeling/opt_modeling.py", "repo_id": "text-generation-inference", "token_count": 15778 }
275
# Origin: https://github.com/predibase/lorax # Path: lorax/server/lorax_server/utils/segments.py # License: Apache License Version 2.0, January 2004 from typing import List, Tuple, Union import torch def find_segments( adapter_indices: Union[torch.Tensor, List[int]] ) -> Tuple[List[int], List[int]]: segments = [0] segment_indices = [] if isinstance(adapter_indices, torch.Tensor): # Calling .item() repeatedly on CUDA tensor is very slow, so we move it to CPU first adapter_indices = adapter_indices.cpu().tolist() start_index = 0 for i in range(1, len(adapter_indices)): if adapter_indices[i] != adapter_indices[i - 1]: segments.append(i) segment_indices.append(adapter_indices[i - 1]) start_index = i # Handle the last segment if start_index < len(adapter_indices): segments.append(len(adapter_indices)) segment_indices.append(adapter_indices[-1]) return segments, segment_indices class SegmentConcatBuilder: def __init__(self): self.adapter_segment_indices = [] self.adapter_segment_tensors = [] def concat(self, adapter_segments: torch.Tensor, segment_indices: List[int]): # Update adapter segments if self.adapter_segment_tensors: # Because we have already processed at least one batch, remove the 0 start index # from this batch denoting the beginning of the segment, then offset all segment # positions by the value of the last segment in the previous batch to account for # the concatenation. adapter_segments = ( adapter_segments[1:] + self.adapter_segment_tensors[-1][-1] ) if ( self.adapter_segment_indices and self.adapter_segment_indices[-1] == segment_indices[0] ): # If the last segment in the previous batch is the same as the first segment in this batch, # then we merge them together into a single segment. In effect, this means removing it from # the segment indices of this batch, and extending the segment span by removing the segment # end index from the previous batch. segment_indices = segment_indices[1:] self.adapter_segment_tensors[-1] = self.adapter_segment_tensors[-1][:-1] self.adapter_segment_indices.extend(segment_indices) self.adapter_segment_tensors.append(adapter_segments) def build(self) -> Tuple[torch.Tensor, List[int]]: return torch.concat(self.adapter_segment_tensors), self.adapter_segment_indices
text-generation-inference/server/text_generation_server/utils/segments.py/0
{ "file_path": "text-generation-inference/server/text_generation_server/utils/segments.py", "repo_id": "text-generation-inference", "token_count": 1081 }
276
exclude = ["node_modules/**/*.toml"] # https://taplo.tamasfe.dev/configuration/formatter-options.html [formatting] align_entries = true indent_tables = true reorder_keys = true
tokenizers/bindings/node/.taplo.toml/0
{ "file_path": "tokenizers/bindings/node/.taplo.toml", "repo_id": "tokenizers", "token_count": 66 }
277
import { bpeDecoder, byteFallbackDecoder, ctcDecoder, fuseDecoder, metaspaceDecoder, replaceDecoder, sequenceDecoder, stripDecoder, wordPieceDecoder, } from '../../' describe('wordPieceDecoder', () => { it('accepts `undefined` as first parameter', () => { expect(wordPieceDecoder(undefined)).toBeDefined() }) it('accepts `undefined` as second parameter', () => { expect(wordPieceDecoder('test', undefined)).toBeDefined() }) it('can decode arrays of strings', () => { expect(wordPieceDecoder().decode(['Hel', '##lo', 'there', 'my', 'fr', '##iend'])).toEqual('Hello there my friend') }) }) describe('byteFallbackDecoder', () => { it('accepts `undefined` as first parameter', () => { expect(byteFallbackDecoder()).toBeDefined() }) it('can decode arrays of strings', () => { expect(byteFallbackDecoder().decode(['Hel', 'lo'])).toEqual('Hello') expect(byteFallbackDecoder().decode(['<0x61>'])).toEqual('a') expect(byteFallbackDecoder().decode(['<0x61>'])).toEqual('a') expect(byteFallbackDecoder().decode(['My', ' na', 'me'])).toEqual('My name') expect(byteFallbackDecoder().decode(['<0x61>'])).toEqual('a') expect(byteFallbackDecoder().decode(['<0xE5>'])).toEqual('�') expect(byteFallbackDecoder().decode(['<0xE5>', '<0x8f>'])).toEqual('��') expect(byteFallbackDecoder().decode(['<0xE5>', '<0x8f>', '<0xab>'])).toEqual('叫') expect(byteFallbackDecoder().decode(['<0xE5>', '<0x8f>', 'a'])).toEqual('��a') expect(byteFallbackDecoder().decode(['<0xE5>', '<0x8f>', '<0xab>', 'a'])).toEqual('叫a') }) }) describe('replaceDecoder', () => { it('can decode arrays of strings', () => { expect(replaceDecoder('_', ' ').decode(['Hello', '_Hello'])).toEqual('Hello Hello') }) }) describe('fuseDecoder', () => { it('accepts `undefined` as first parameter', () => { expect(fuseDecoder()).toBeDefined() }) it('can decode arrays of strings', () => { expect(fuseDecoder().decode(['Hel', 'lo'])).toEqual('Hello') }) }) describe('stripDecoder', () => { it('accepts `undefined` as first parameter', () => { expect(stripDecoder('_', 0, 0)).toBeDefined() }) it('can decode arrays of strings', () => { expect(stripDecoder('_', 1, 0).decode(['_Hel', 'lo', '__there'])).toEqual('Hello_there') }) }) describe('metaspaceDecoder', () => { it('accepts `undefined` as first parameter', () => { expect(metaspaceDecoder(undefined)).toBeDefined() }) it('accepts `undefined` as second parameter', () => { expect(metaspaceDecoder('t', undefined)).toBeDefined() }) it('works', () => { expect(metaspaceDecoder().decode(['▁Hello'])).toEqual('Hello') }) }) describe('bpeDecoder', () => { it('accepts `undefined` as parameter', () => { expect(bpeDecoder(undefined)).toBeDefined() }) }) describe('ctcDecoder', () => { it('accepts `undefined` as parameter', () => { expect(ctcDecoder(undefined)).toBeDefined() }) it('encodes correctly', () => { expect(ctcDecoder().decode(['<pad>', 'h', 'h', 'e', 'e', 'l', 'l', '<pad>', 'l', 'l', 'o'])).toEqual('hello') }) }) describe('sequenceDecoder', () => { it('accepts `empty list` as parameter', () => { expect(sequenceDecoder([])).toBeDefined() }) it('encodes correctly', () => { expect( sequenceDecoder([ctcDecoder(), metaspaceDecoder()]).decode(['▁', '▁', 'H', 'H', 'i', 'i', '▁', 'y', 'o', 'u']), ).toEqual('Hi you') }) })
tokenizers/bindings/node/lib/bindings/decoders.test.ts/0
{ "file_path": "tokenizers/bindings/node/lib/bindings/decoders.test.ts", "repo_id": "tokenizers", "token_count": 1393 }
278
# `tokenizers-freebsd-x64` This is the **x86_64-unknown-freebsd** binary for `tokenizers`
tokenizers/bindings/node/npm/freebsd-x64/README.md/0
{ "file_path": "tokenizers/bindings/node/npm/freebsd-x64/README.md", "repo_id": "tokenizers", "token_count": 36 }
279
# `tokenizers-win32-x64-msvc` This is the **x86_64-pc-windows-msvc** binary for `tokenizers`
tokenizers/bindings/node/npm/win32-x64-msvc/README.md/0
{ "file_path": "tokenizers/bindings/node/npm/win32-x64-msvc/README.md", "repo_id": "tokenizers", "token_count": 39 }
280
use crate::models::Model; use napi_derive::napi; use std::sync::{Arc, RwLock}; use tokenizers as tk; use tokenizers::models::TrainerWrapper; #[napi] pub struct Trainer { trainer: Option<Arc<RwLock<TrainerWrapper>>>, } impl From<TrainerWrapper> for Trainer { fn from(trainer: TrainerWrapper) -> Self { Self { trainer: Some(Arc::new(RwLock::new(trainer))), } } } impl tk::Trainer for Trainer { type Model = Model; fn should_show_progress(&self) -> bool { self .trainer .as_ref() .expect("Uninitialized Trainer") .read() .unwrap() .should_show_progress() } fn train(&self, model: &mut Self::Model) -> tk::Result<Vec<tk::AddedToken>> { let special_tokens = self .trainer .as_ref() .ok_or("Uninitialized Trainer")? .read() .unwrap() .train( &mut model .model .as_ref() .ok_or("Uninitialized Model")? .write() .unwrap(), )?; Ok(special_tokens) } fn feed<I, S, F>(&mut self, iterator: I, process: F) -> tk::Result<()> where I: Iterator<Item = S> + Send, S: AsRef<str> + Send, F: Fn(&str) -> tk::Result<Vec<String>> + Sync, { self .trainer .as_ref() .ok_or("Uninitialized Trainer")? .write() .unwrap() .feed(iterator, process) } }
tokenizers/bindings/node/src/trainers.rs/0
{ "file_path": "tokenizers/bindings/node/src/trainers.rs", "repo_id": "tokenizers", "token_count": 641 }
281
import argparse import glob from tokenizers import BertWordPieceTokenizer parser = argparse.ArgumentParser() parser.add_argument( "--files", default=None, metavar="path", type=str, required=True, help="The files to use as training; accept '**/*.txt' type of patterns \ if enclosed in quotes", ) parser.add_argument( "--out", default="./", type=str, help="Path to the output directory, where the files will be saved", ) parser.add_argument("--name", default="bert-wordpiece", type=str, help="The name of the output vocab files") args = parser.parse_args() files = glob.glob(args.files) if not files: print(f"File does not exist: {args.files}") exit(1) # Initialize an empty tokenizer tokenizer = BertWordPieceTokenizer( clean_text=True, handle_chinese_chars=True, strip_accents=True, lowercase=True, ) # And then train tokenizer.train( files, vocab_size=10000, min_frequency=2, show_progress=True, special_tokens=["[PAD]", "[UNK]", "[CLS]", "[SEP]", "[MASK]"], limit_alphabet=1000, wordpieces_prefix="##", ) # Save the files tokenizer.save_model(args.out, args.name)
tokenizers/bindings/python/examples/train_bert_wordpiece.py/0
{ "file_path": "tokenizers/bindings/python/examples/train_bert_wordpiece.py", "repo_id": "tokenizers", "token_count": 472 }
282
# Generated content DO NOT EDIT class Model: """ Base class for all models The model represents the actual tokenization algorithm. This is the part that will contain and manage the learned vocabulary. This class cannot be constructed directly. Please use one of the concrete models. """ def get_trainer(self): """ Get the associated :class:`~tokenizers.trainers.Trainer` Retrieve the :class:`~tokenizers.trainers.Trainer` associated to this :class:`~tokenizers.models.Model`. Returns: :class:`~tokenizers.trainers.Trainer`: The Trainer used to train this model """ pass def id_to_token(self, id): """ Get the token associated to an ID Args: id (:obj:`int`): An ID to convert to a token Returns: :obj:`str`: The token associated to the ID """ pass def save(self, folder, prefix): """ Save the current model Save the current model in the given folder, using the given prefix for the various files that will get created. Any file with the same name that already exists in this folder will be overwritten. Args: folder (:obj:`str`): The path to the target folder in which to save the various files prefix (:obj:`str`, `optional`): An optional prefix, used to prefix each file name Returns: :obj:`List[str]`: The list of saved files """ pass def token_to_id(self, tokens): """ Get the ID associated to a token Args: token (:obj:`str`): A token to convert to an ID Returns: :obj:`int`: The ID associated to the token """ pass def tokenize(self, sequence): """ Tokenize a sequence Args: sequence (:obj:`str`): A sequence to tokenize Returns: A :obj:`List` of :class:`~tokenizers.Token`: The generated tokens """ pass class BPE(Model): """ An implementation of the BPE (Byte-Pair Encoding) algorithm Args: vocab (:obj:`Dict[str, int]`, `optional`): A dictionary of string keys and their ids :obj:`{"am": 0,...}` merges (:obj:`List[Tuple[str, str]]`, `optional`): A list of pairs of tokens (:obj:`Tuple[str, str]`) :obj:`[("a", "b"),...]` cache_capacity (:obj:`int`, `optional`): The number of words that the BPE cache can contain. The cache allows to speed-up the process by keeping the result of the merge operations for a number of words. dropout (:obj:`float`, `optional`): A float between 0 and 1 that represents the BPE dropout to use. unk_token (:obj:`str`, `optional`): The unknown token to be used by the model. continuing_subword_prefix (:obj:`str`, `optional`): The prefix to attach to subword units that don't represent a beginning of word. end_of_word_suffix (:obj:`str`, `optional`): The suffix to attach to subword units that represent an end of word. fuse_unk (:obj:`bool`, `optional`): Whether to fuse any subsequent unknown tokens into a single one byte_fallback (:obj:`bool`, `optional`): Whether to use spm byte-fallback trick (defaults to False) ignore_merges (:obj:`bool`, `optional`): Whether or not to match tokens with the vocab before using merges. """ def __init__( self, vocab=None, merges=None, cache_capacity=None, dropout=None, unk_token=None, continuing_subword_prefix=None, end_of_word_suffix=None, fuse_unk=None, byte_fallback=False, ignore_merges=False, ): pass @staticmethod def from_file(cls, vocab, merge, **kwargs): """ Instantiate a BPE model from the given files. This method is roughly equivalent to doing:: vocab, merges = BPE.read_file(vocab_filename, merges_filename) bpe = BPE(vocab, merges) If you don't need to keep the :obj:`vocab, merges` values lying around, this method is more optimized than manually calling :meth:`~tokenizers.models.BPE.read_file` to initialize a :class:`~tokenizers.models.BPE` Args: vocab (:obj:`str`): The path to a :obj:`vocab.json` file merges (:obj:`str`): The path to a :obj:`merges.txt` file Returns: :class:`~tokenizers.models.BPE`: An instance of BPE loaded from these files """ pass def get_trainer(self): """ Get the associated :class:`~tokenizers.trainers.Trainer` Retrieve the :class:`~tokenizers.trainers.Trainer` associated to this :class:`~tokenizers.models.Model`. Returns: :class:`~tokenizers.trainers.Trainer`: The Trainer used to train this model """ pass def id_to_token(self, id): """ Get the token associated to an ID Args: id (:obj:`int`): An ID to convert to a token Returns: :obj:`str`: The token associated to the ID """ pass @staticmethod def read_file(self, vocab, merges): """ Read a :obj:`vocab.json` and a :obj:`merges.txt` files This method provides a way to read and parse the content of these files, returning the relevant data structures. If you want to instantiate some BPE models from memory, this method gives you the expected input from the standard files. Args: vocab (:obj:`str`): The path to a :obj:`vocab.json` file merges (:obj:`str`): The path to a :obj:`merges.txt` file Returns: A :obj:`Tuple` with the vocab and the merges: The vocabulary and merges loaded into memory """ pass def save(self, folder, prefix): """ Save the current model Save the current model in the given folder, using the given prefix for the various files that will get created. Any file with the same name that already exists in this folder will be overwritten. Args: folder (:obj:`str`): The path to the target folder in which to save the various files prefix (:obj:`str`, `optional`): An optional prefix, used to prefix each file name Returns: :obj:`List[str]`: The list of saved files """ pass def token_to_id(self, tokens): """ Get the ID associated to a token Args: token (:obj:`str`): A token to convert to an ID Returns: :obj:`int`: The ID associated to the token """ pass def tokenize(self, sequence): """ Tokenize a sequence Args: sequence (:obj:`str`): A sequence to tokenize Returns: A :obj:`List` of :class:`~tokenizers.Token`: The generated tokens """ pass class Unigram(Model): """ An implementation of the Unigram algorithm Args: vocab (:obj:`List[Tuple[str, float]]`, `optional`, `optional`): A list of vocabulary items and their relative score [("am", -0.2442),...] """ def __init__(self, vocab, unk_id, byte_fallback): pass def get_trainer(self): """ Get the associated :class:`~tokenizers.trainers.Trainer` Retrieve the :class:`~tokenizers.trainers.Trainer` associated to this :class:`~tokenizers.models.Model`. Returns: :class:`~tokenizers.trainers.Trainer`: The Trainer used to train this model """ pass def id_to_token(self, id): """ Get the token associated to an ID Args: id (:obj:`int`): An ID to convert to a token Returns: :obj:`str`: The token associated to the ID """ pass def save(self, folder, prefix): """ Save the current model Save the current model in the given folder, using the given prefix for the various files that will get created. Any file with the same name that already exists in this folder will be overwritten. Args: folder (:obj:`str`): The path to the target folder in which to save the various files prefix (:obj:`str`, `optional`): An optional prefix, used to prefix each file name Returns: :obj:`List[str]`: The list of saved files """ pass def token_to_id(self, tokens): """ Get the ID associated to a token Args: token (:obj:`str`): A token to convert to an ID Returns: :obj:`int`: The ID associated to the token """ pass def tokenize(self, sequence): """ Tokenize a sequence Args: sequence (:obj:`str`): A sequence to tokenize Returns: A :obj:`List` of :class:`~tokenizers.Token`: The generated tokens """ pass class WordLevel(Model): """ An implementation of the WordLevel algorithm Most simple tokenizer model based on mapping tokens to their corresponding id. Args: vocab (:obj:`str`, `optional`): A dictionary of string keys and their ids :obj:`{"am": 0,...}` unk_token (:obj:`str`, `optional`): The unknown token to be used by the model. """ def __init__(self, vocab, unk_token): pass @staticmethod def from_file(vocab, unk_token): """ Instantiate a WordLevel model from the given file This method is roughly equivalent to doing:: vocab = WordLevel.read_file(vocab_filename) wordlevel = WordLevel(vocab) If you don't need to keep the :obj:`vocab` values lying around, this method is more optimized than manually calling :meth:`~tokenizers.models.WordLevel.read_file` to initialize a :class:`~tokenizers.models.WordLevel` Args: vocab (:obj:`str`): The path to a :obj:`vocab.json` file Returns: :class:`~tokenizers.models.WordLevel`: An instance of WordLevel loaded from file """ pass def get_trainer(self): """ Get the associated :class:`~tokenizers.trainers.Trainer` Retrieve the :class:`~tokenizers.trainers.Trainer` associated to this :class:`~tokenizers.models.Model`. Returns: :class:`~tokenizers.trainers.Trainer`: The Trainer used to train this model """ pass def id_to_token(self, id): """ Get the token associated to an ID Args: id (:obj:`int`): An ID to convert to a token Returns: :obj:`str`: The token associated to the ID """ pass @staticmethod def read_file(vocab): """ Read a :obj:`vocab.json` This method provides a way to read and parse the content of a vocabulary file, returning the relevant data structures. If you want to instantiate some WordLevel models from memory, this method gives you the expected input from the standard files. Args: vocab (:obj:`str`): The path to a :obj:`vocab.json` file Returns: :obj:`Dict[str, int]`: The vocabulary as a :obj:`dict` """ pass def save(self, folder, prefix): """ Save the current model Save the current model in the given folder, using the given prefix for the various files that will get created. Any file with the same name that already exists in this folder will be overwritten. Args: folder (:obj:`str`): The path to the target folder in which to save the various files prefix (:obj:`str`, `optional`): An optional prefix, used to prefix each file name Returns: :obj:`List[str]`: The list of saved files """ pass def token_to_id(self, tokens): """ Get the ID associated to a token Args: token (:obj:`str`): A token to convert to an ID Returns: :obj:`int`: The ID associated to the token """ pass def tokenize(self, sequence): """ Tokenize a sequence Args: sequence (:obj:`str`): A sequence to tokenize Returns: A :obj:`List` of :class:`~tokenizers.Token`: The generated tokens """ pass class WordPiece(Model): """ An implementation of the WordPiece algorithm Args: vocab (:obj:`Dict[str, int]`, `optional`): A dictionary of string keys and their ids :obj:`{"am": 0,...}` unk_token (:obj:`str`, `optional`): The unknown token to be used by the model. max_input_chars_per_word (:obj:`int`, `optional`): The maximum number of characters to authorize in a single word. """ def __init__(self, vocab, unk_token, max_input_chars_per_word): pass @staticmethod def from_file(vocab, **kwargs): """ Instantiate a WordPiece model from the given file This method is roughly equivalent to doing:: vocab = WordPiece.read_file(vocab_filename) wordpiece = WordPiece(vocab) If you don't need to keep the :obj:`vocab` values lying around, this method is more optimized than manually calling :meth:`~tokenizers.models.WordPiece.read_file` to initialize a :class:`~tokenizers.models.WordPiece` Args: vocab (:obj:`str`): The path to a :obj:`vocab.txt` file Returns: :class:`~tokenizers.models.WordPiece`: An instance of WordPiece loaded from file """ pass def get_trainer(self): """ Get the associated :class:`~tokenizers.trainers.Trainer` Retrieve the :class:`~tokenizers.trainers.Trainer` associated to this :class:`~tokenizers.models.Model`. Returns: :class:`~tokenizers.trainers.Trainer`: The Trainer used to train this model """ pass def id_to_token(self, id): """ Get the token associated to an ID Args: id (:obj:`int`): An ID to convert to a token Returns: :obj:`str`: The token associated to the ID """ pass @staticmethod def read_file(vocab): """ Read a :obj:`vocab.txt` file This method provides a way to read and parse the content of a standard `vocab.txt` file as used by the WordPiece Model, returning the relevant data structures. If you want to instantiate some WordPiece models from memory, this method gives you the expected input from the standard files. Args: vocab (:obj:`str`): The path to a :obj:`vocab.txt` file Returns: :obj:`Dict[str, int]`: The vocabulary as a :obj:`dict` """ pass def save(self, folder, prefix): """ Save the current model Save the current model in the given folder, using the given prefix for the various files that will get created. Any file with the same name that already exists in this folder will be overwritten. Args: folder (:obj:`str`): The path to the target folder in which to save the various files prefix (:obj:`str`, `optional`): An optional prefix, used to prefix each file name Returns: :obj:`List[str]`: The list of saved files """ pass def token_to_id(self, tokens): """ Get the ID associated to a token Args: token (:obj:`str`): A token to convert to an ID Returns: :obj:`int`: The ID associated to the token """ pass def tokenize(self, sequence): """ Tokenize a sequence Args: sequence (:obj:`str`): A sequence to tokenize Returns: A :obj:`List` of :class:`~tokenizers.Token`: The generated tokens """ pass
tokenizers/bindings/python/py_src/tokenizers/models/__init__.pyi/0
{ "file_path": "tokenizers/bindings/python/py_src/tokenizers/models/__init__.pyi", "repo_id": "tokenizers", "token_count": 7626 }
283
import tokenizers from argparse import ArgumentParser import sentencepiece as spm from collections import Counter import json import os import datetime try: from termcolor import colored has_color = True except Exception: has_color = False def main(): parser = ArgumentParser("SentencePiece parity checker") parser.add_argument( "--input-file", "-i", type=str, required=True, help="Which files do you want to train from", ) parser.add_argument( "--model-file", "-m", type=str, required=False, default=None, help="Use a pretrained token file", ) parser.add_argument( "--model-prefix", type=str, default="spm_parity", help="Model prefix for spm_train", ) parser.add_argument( "--vocab-size", "-v", type=int, default=8000, help="Vocab size for spm_train", ) parser.add_argument( "--verbose", action="store_true", help="Verbosity", ) parser.add_argument( "--train", action="store_true", help="Instead of checking the encoder part, we check the trainer part", ) parser.add_argument( "--from-spm", action="store_true", help="Directly load the spm file with it's own normalizer", ) args = parser.parse_args() trained = False if args.model_file is None: spm.SentencePieceTrainer.Train( f"--input={args.input_file} --model_prefix={args.model_prefix}" f" --character_coverage=1.0" f" --max_sentence_length=40000" f" --num_threads=1" f" --vocab_size={args.vocab_size}" ) trained = True args.model_file = f"{args.model_prefix}.model" try: if args.train: check_train(args) else: check_encode(args) finally: if trained: os.remove(f"{args.model_prefix}.model") os.remove(f"{args.model_prefix}.vocab") def check_train(args): sp = spm.SentencePieceProcessor() sp.Load(args.model_file) tokenizer = tokenizers.SentencePieceUnigramTokenizer() tokenizer.train(args.input_file, show_progress=False) spm_tokens = 0 tokenizer_tokens = 0 with open(args.input_file, "r") as f: for i, line in enumerate(f): line = line.strip() ids = sp.EncodeAsIds(line) encoded = tokenizer.encode(line) spm_tokens += len(ids) tokenizer_tokens += len(encoded.ids) vocab = [0 for i in range(args.vocab_size)] spm_vocab = [0 for i in range(args.vocab_size)] for token, index in tokenizer.get_vocab().items(): vocab[index] = token for i in range(args.vocab_size): spm_vocab[i] = sp.id_to_piece(i) # 0 is unk in tokenizers, 0, 1, 2 are unk bos, eos in spm by default. for i, (token, spm_token) in enumerate(zip(vocab[1:], spm_vocab[3:])): if token != spm_token: print(f"First different token is token {i} ({token} != {spm_token})") break print(f"Tokenizer used {tokenizer_tokens}, where spm used {spm_tokens}") assert tokenizer_tokens < spm_tokens, "Our trainer should be at least more efficient than the SPM one" print("Ok our trainer is at least more efficient than the SPM one") def check_diff(spm_diff, tok_diff, sp, tok): if spm_diff == list(reversed(tok_diff)): # AAA -> AA+A vs A+AA case. return True elif len(spm_diff) == len(tok_diff) and tok.decode(spm_diff) == tok.decode(tok_diff): # Second order OK # Barrich -> Barr + ich vs Bar + rich return True spm_reencoded = sp.encode(sp.decode(spm_diff)) tok_reencoded = tok.encode(tok.decode(spm_diff)).ids if spm_reencoded != spm_diff and spm_reencoded == tok_reencoded: # Type 3 error. # Snehagatha -> # Sne, h, aga, th, a # Sne, ha, gat, ha # Encoding the wrong with sp does not even recover what spm gave us # It fits tokenizer however... return True return False def check_details(line, spm_ids, tok_ids, sp, tok): # Encoding can be the same with same result AAA -> A + AA vs AA + A # We can check that we use at least exactly the same number of tokens. for i, (spm_id, tok_id) in enumerate(zip(spm_ids, tok_ids)): if spm_id != tok_id: break first = i for i, (spm_id, tok_id) in enumerate(zip(reversed(spm_ids), reversed(tok_ids))): if spm_id != tok_id: break last = len(spm_ids) - i spm_diff = spm_ids[first:last] tok_diff = tok_ids[first:last] if check_diff(spm_diff, tok_diff, sp, tok): return True if last - first > 5: # We might have twice a single problem, attempt to subdivide the disjointed tokens into smaller problems spms = Counter(spm_ids[first:last]) toks = Counter(tok_ids[first:last]) removable_tokens = {spm_ for (spm_, si) in spms.items() if toks.get(spm_, 0) == si} min_width = 3 for i in range(last - first - min_width): if all(spm_ids[first + i + j] in removable_tokens for j in range(min_width)): possible_matches = [ k for k in range(last - first - min_width) if tok_ids[first + k : first + k + min_width] == spm_ids[first + i : first + i + min_width] ] for j in possible_matches: if check_diff(spm_ids[first : first + i], tok_ids[first : first + j], sp, tok) and check_details( line, spm_ids[first + i : last], tok_ids[first + j : last], sp, tok, ): return True print(f"Spm: {[tok.decode([spm_ids[i]]) for i in range(first, last)]}") try: print(f"Tok: {[tok.decode([tok_ids[i]]) for i in range(first, last)]}") except Exception: pass ok_start = tok.decode(spm_ids[:first]) ok_end = tok.decode(spm_ids[last:]) wrong = tok.decode(spm_ids[first:last]) print() if has_color: print(f"{colored(ok_start, 'grey')}{colored(wrong, 'red')}{colored(ok_end, 'grey')}") else: print(wrong) return False def check_encode(args): sp = spm.SentencePieceProcessor() sp.Load(args.model_file) if args.from_spm: tok = tokenizers.SentencePieceUnigramTokenizer.from_spm(args.model_file) else: vocab = [(sp.id_to_piece(i), sp.get_score(i)) for i in range(sp.piece_size())] unk_id = sp.unk_id() tok = tokenizers.SentencePieceUnigramTokenizer(vocab, unk_id) perfect = 0 imperfect = 0 wrong = 0 now = datetime.datetime.now spm_total_time = datetime.timedelta(seconds=0) tok_total_time = datetime.timedelta(seconds=0) with open(args.input_file, "r", encoding="utf-8-sig") as f: for i, line in enumerate(f): line = line.strip() start = now() ids = sp.EncodeAsIds(line) spm_time = now() encoded = tok.encode(line) tok_time = now() spm_total_time += spm_time - start tok_total_time += tok_time - spm_time if args.verbose: if i % 10000 == 0: print(f"({perfect} / {imperfect} / {wrong} ----- {perfect + imperfect + wrong})") print(f"SPM: {spm_total_time} - TOK: {tok_total_time}") if ids != encoded.ids: if check_details(line, ids, encoded.ids, sp, tok): imperfect += 1 continue else: wrong += 1 else: perfect += 1 assert ( ids == encoded.ids ), f"line {i}: {line} : \n\n{ids}\n{encoded.ids}\n{list(zip(encoded.ids, encoded.tokens))}" print(f"({perfect} / {imperfect} / {wrong} ----- {perfect + imperfect + wrong})") total = perfect + imperfect + wrong print(f"Accuracy {perfect * 100 / total:.2f} Slowdown : {tok_total_time/ spm_total_time:.2f}") if __name__ == "__main__": main()
tokenizers/bindings/python/scripts/spm_parity_check.py/0
{ "file_path": "tokenizers/bindings/python/scripts/spm_parity_check.py", "repo_id": "tokenizers", "token_count": 4110 }
284
use tokenizers as tk; use pyo3::exceptions; use pyo3::prelude::*; use pyo3::types::*; use super::{ DestroyPtr, PyNormalizedString, PyNormalizedStringRefMut, RefMutContainer, RefMutGuard, }; use crate::encoding::PyEncoding; use crate::error::ToPyResult; use crate::token::PyToken; use tk::{OffsetReferential, OffsetType, Offsets, PreTokenizedString, Token}; fn split(pretok: &mut PreTokenizedString, func: &Bound<'_, PyAny>) -> PyResult<()> { if !func.is_callable() { Err(exceptions::PyTypeError::new_err( "`split` expect a callable with the signature: \ `fn(index: int, normalized: NormalizedString) -> List[NormalizedString]`", )) } else { ToPyResult(pretok.split(|i, normalized| { let output = func.call((i, PyNormalizedString::from(normalized)), None)?; Ok(output .extract::<Vec<PyNormalizedString>>()? .into_iter() .map(tk::NormalizedString::from)) })) .into() } } fn normalize(pretok: &mut PreTokenizedString, func: &Bound<'_, PyAny>) -> PyResult<()> { if !func.is_callable() { Err(exceptions::PyTypeError::new_err( "`normalize` expect a callable with the signature: \ `fn(normalized: NormalizedString)`", )) } else { ToPyResult(pretok.normalize(|normalized| { let norm = PyNormalizedStringRefMut::new(normalized); func.call((norm.get(),), None)?; Ok(()) })) .into() } } fn tokenize(pretok: &mut PreTokenizedString, func: &Bound<'_, PyAny>) -> PyResult<()> { if !func.is_callable() { Err(exceptions::PyTypeError::new_err( "`tokenize` expect a callable with the signature: \ `fn(str) -> List[Token]`", )) } else { ToPyResult(pretok.tokenize(|normalized| { let output = func.call((normalized.get(),), None)?; Ok(output .extract::<&PyList>()? .into_iter() .map(|obj| Ok(Token::from(obj.extract::<PyToken>()?))) .collect::<PyResult<Vec<_>>>()?) })) .into() } } /// This is an enum #[derive(Clone)] pub struct PyOffsetReferential(OffsetReferential); impl FromPyObject<'_> for PyOffsetReferential { fn extract(obj: &PyAny) -> PyResult<Self> { let s = obj.extract::<&str>()?; Ok(Self(match s { "original" => Ok(OffsetReferential::Original), "normalized" => Ok(OffsetReferential::Normalized), _ => Err(exceptions::PyValueError::new_err( "Wrong value for OffsetReferential, expected one of `original, normalized`", )), }?)) } } #[derive(Clone)] pub struct PyOffsetType(OffsetType); impl FromPyObject<'_> for PyOffsetType { fn extract(obj: &PyAny) -> PyResult<Self> { let s = obj.extract::<&str>()?; Ok(Self(match s { "byte" => Ok(OffsetType::Byte), "char" => Ok(OffsetType::Char), _ => Err(exceptions::PyValueError::new_err( "Wrong value for OffsetType, expected one of `byte, char`", )), }?)) } } type PySplit = (String, Offsets, Option<Vec<PyToken>>); fn get_splits( pretok: &PreTokenizedString, offset_referential: PyOffsetReferential, offset_type: PyOffsetType, ) -> Vec<PySplit> { pretok .get_splits(offset_referential.0, offset_type.0) .into_iter() .map(|(s, o, t)| { ( s.to_owned(), o, t.as_ref() .map(|tokens| tokens.iter().map(|t| t.clone().into()).collect()), ) }) .collect() } fn to_encoding( pretok: &PreTokenizedString, type_id: u32, word_idx: Option<u32>, ) -> PyResult<PyEncoding> { Ok(ToPyResult( pretok .clone() .into_encoding(word_idx, type_id, tk::OffsetType::Char), ) .into_py()? .into()) } /// PreTokenizedString /// /// Wrapper over a string, that provides a way to normalize, pre-tokenize, tokenize the /// underlying string, while keeping track of the alignment information (offsets). /// /// The PreTokenizedString manages what we call `splits`. Each split represents a substring /// which is a subpart of the original string, with the relevant offsets and tokens. /// /// When calling one of the methods used to modify the PreTokenizedString (namely one of /// `split`, `normalize` or `tokenize), only the `splits` that don't have any associated /// tokens will get modified. /// /// Args: /// sequence: str: /// The string sequence used to initialize this PreTokenizedString #[pyclass(module = "tokenizers", name = "PreTokenizedString")] pub struct PyPreTokenizedString { pub(crate) pretok: tk::PreTokenizedString, } impl From<PreTokenizedString> for PyPreTokenizedString { fn from(pretok: PreTokenizedString) -> Self { Self { pretok } } } impl From<PyPreTokenizedString> for PreTokenizedString { fn from(pretok: PyPreTokenizedString) -> Self { pretok.pretok } } #[pymethods] impl PyPreTokenizedString { #[new] #[pyo3(text_signature = "(self, sequence)")] fn new(s: &str) -> Self { PreTokenizedString::from(s).into() } /// Split the PreTokenizedString using the given `func` /// /// Args: /// func: Callable[[index, NormalizedString], List[NormalizedString]]: /// The function used to split each underlying split. /// It is expected to return a list of `NormalizedString`, that represent the new /// splits. If the given `NormalizedString` does not need any splitting, we can /// just return it directly. /// In order for the offsets to be tracked accurately, any returned `NormalizedString` /// should come from calling either `.split` or `.slice` on the received one. #[pyo3(text_signature = "(self, func)")] fn split(&mut self, func: &Bound<'_, PyAny>) -> PyResult<()> { split(&mut self.pretok, func) } /// Normalize each split of the `PreTokenizedString` using the given `func` /// /// Args: /// func: Callable[[NormalizedString], None]: /// The function used to normalize each underlying split. This function /// does not need to return anything, just calling the methods on the provided /// NormalizedString allow its modification. #[pyo3(text_signature = "(self, func)")] fn normalize(&mut self, func: &Bound<'_, PyAny>) -> PyResult<()> { normalize(&mut self.pretok, func) } /// Tokenize each split of the `PreTokenizedString` using the given `func` /// /// Args: /// func: Callable[[str], List[Token]]: /// The function used to tokenize each underlying split. This function must return /// a list of Token generated from the input str. #[pyo3(text_signature = "(self, func)")] fn tokenize(&mut self, func: &Bound<'_, PyAny>) -> PyResult<()> { tokenize(&mut self.pretok, func) } /// Return an Encoding generated from this PreTokenizedString /// /// Args: /// type_id: int = 0: /// The type_id to be used on the generated Encoding. /// /// word_idx: Optional[int] = None: /// An optional word index to be used for each token of this Encoding. If provided, /// all the word indices in the generated Encoding will use this value, instead /// of the one automatically tracked during pre-tokenization. /// /// Returns: /// An Encoding #[pyo3(signature = (type_id = 0, word_idx = None))] #[pyo3(text_signature = "(self, type_id=0, word_idx=None)")] fn to_encoding(&self, type_id: u32, word_idx: Option<u32>) -> PyResult<PyEncoding> { to_encoding(&self.pretok, type_id, word_idx) } /// Get the splits currently managed by the PreTokenizedString /// /// Args: /// offset_referential: :obj:`str` /// Whether the returned splits should have offsets expressed relative /// to the original string, or the normalized one. choices: "original", "normalized". /// /// offset_type: :obj:`str` /// Whether the returned splits should have offsets expressed in bytes or chars. /// When slicing an str, we usually want to use chars, which is the default value. /// Now in some cases it might be interesting to get these offsets expressed in bytes, /// so it is possible to change this here. /// choices: "char", "bytes" /// /// Returns /// A list of splits #[pyo3(signature = ( offset_referential = PyOffsetReferential(OffsetReferential::Original), offset_type = PyOffsetType(OffsetType::Char) ))] #[pyo3(text_signature = "(self, offset_referential=\"original\", offset_type=\"char\")")] fn get_splits( &self, offset_referential: PyOffsetReferential, offset_type: PyOffsetType, ) -> Vec<PySplit> { get_splits(&self.pretok, offset_referential, offset_type) } } #[pyclass(module = "tokenizers", name = "PreTokenizedString")] #[derive(Clone)] pub struct PyPreTokenizedStringRefMut { inner: RefMutContainer<PreTokenizedString>, } impl DestroyPtr for PyPreTokenizedStringRefMut { fn destroy(&mut self) { self.inner.destroy(); } } impl PyPreTokenizedStringRefMut { pub fn new(pretok: &mut tk::PreTokenizedString) -> RefMutGuard<Self> { // SAFETY: This is safe because we return a RefMutGuard here. // The compiler will make sure the &mut stays valid as necessary. RefMutGuard::new(Self { inner: RefMutContainer::new(pretok), }) } pub fn destroyed_error() -> PyErr { exceptions::PyException::new_err( "Cannot use a PreTokenizedStringRefMut outside `pre_tokenize`", ) } } #[pymethods] impl PyPreTokenizedStringRefMut { fn split(&mut self, func: &Bound<'_, PyAny>) -> PyResult<()> { self.inner .map_mut(|pretok| split(pretok, func)) .ok_or_else(PyPreTokenizedStringRefMut::destroyed_error)? } fn normalize(&mut self, func: &Bound<'_, PyAny>) -> PyResult<()> { self.inner .map_mut(|pretok| normalize(pretok, func)) .ok_or_else(PyPreTokenizedStringRefMut::destroyed_error)? } fn tokenize(&mut self, func: &Bound<'_, PyAny>) -> PyResult<()> { self.inner .map_mut(|pretok| tokenize(pretok, func)) .ok_or_else(PyPreTokenizedStringRefMut::destroyed_error)? } #[pyo3(signature = (type_id = 0, word_idx = None))] fn to_encoding(&self, type_id: u32, word_idx: Option<u32>) -> PyResult<PyEncoding> { self.inner .map(|pretok| to_encoding(pretok, type_id, word_idx)) .ok_or_else(PyPreTokenizedStringRefMut::destroyed_error)? } #[pyo3(signature = ( offset_referential = PyOffsetReferential(OffsetReferential::Original), offset_type = PyOffsetType(OffsetType::Char) ))] fn get_splits( &self, offset_referential: PyOffsetReferential, offset_type: PyOffsetType, ) -> PyResult<Vec<PySplit>> { self.inner .map(|pretok| get_splits(pretok, offset_referential, offset_type)) .ok_or_else(PyPreTokenizedStringRefMut::destroyed_error) } }
tokenizers/bindings/python/src/utils/pretokenization.rs/0
{ "file_path": "tokenizers/bindings/python/src/utils/pretokenization.rs", "repo_id": "tokenizers", "token_count": 4930 }
285
from tokenizers import Tokenizer from ..utils import data_dir, doc_pipeline_bert_tokenizer, doc_wiki_tokenizer disable_printing = True original_print = print def print(*args, **kwargs): if not disable_printing: original_print(*args, **kwargs) class TestPipeline: def test_pipeline(self, doc_wiki_tokenizer): try: # START reload_tokenizer from tokenizers import Tokenizer tokenizer = Tokenizer.from_file("data/tokenizer-wiki.json") # END reload_tokenizer except Exception: tokenizer = Tokenizer.from_file(doc_wiki_tokenizer) # START setup_normalizer from tokenizers import normalizers from tokenizers.normalizers import NFD, StripAccents normalizer = normalizers.Sequence([NFD(), StripAccents()]) # END setup_normalizer # START test_normalizer normalizer.normalize_str("Héllò hôw are ü?") # "Hello how are u?" # END test_normalizer assert normalizer.normalize_str("Héllò hôw are ü?") == "Hello how are u?" # START replace_normalizer tokenizer.normalizer = normalizer # END replace_normalizer # START setup_pre_tokenizer from tokenizers.pre_tokenizers import Whitespace pre_tokenizer = Whitespace() pre_tokenizer.pre_tokenize_str("Hello! How are you? I'm fine, thank you.") # [("Hello", (0, 5)), ("!", (5, 6)), ("How", (7, 10)), ("are", (11, 14)), ("you", (15, 18)), # ("?", (18, 19)), ("I", (20, 21)), ("'", (21, 22)), ('m', (22, 23)), ("fine", (24, 28)), # (",", (28, 29)), ("thank", (30, 35)), ("you", (36, 39)), (".", (39, 40))] # END setup_pre_tokenizer assert pre_tokenizer.pre_tokenize_str("Hello! How are you? I'm fine, thank you.") == [ ("Hello", (0, 5)), ("!", (5, 6)), ("How", (7, 10)), ("are", (11, 14)), ("you", (15, 18)), ("?", (18, 19)), ("I", (20, 21)), ("'", (21, 22)), ("m", (22, 23)), ("fine", (24, 28)), (",", (28, 29)), ("thank", (30, 35)), ("you", (36, 39)), (".", (39, 40)), ] # START combine_pre_tokenizer from tokenizers import pre_tokenizers from tokenizers.pre_tokenizers import Digits pre_tokenizer = pre_tokenizers.Sequence([Whitespace(), Digits(individual_digits=True)]) pre_tokenizer.pre_tokenize_str("Call 911!") # [("Call", (0, 4)), ("9", (5, 6)), ("1", (6, 7)), ("1", (7, 8)), ("!", (8, 9))] # END combine_pre_tokenizer assert pre_tokenizer.pre_tokenize_str("Call 911!") == [ ("Call", (0, 4)), ("9", (5, 6)), ("1", (6, 7)), ("1", (7, 8)), ("!", (8, 9)), ] # START replace_pre_tokenizer tokenizer.pre_tokenizer = pre_tokenizer # END replace_pre_tokenizer # START setup_processor from tokenizers.processors import TemplateProcessing tokenizer.post_processor = TemplateProcessing( single="[CLS] $A [SEP]", pair="[CLS] $A [SEP] $B:1 [SEP]:1", special_tokens=[("[CLS]", 1), ("[SEP]", 2)], ) # END setup_processor # START test_decoding output = tokenizer.encode("Hello, y'all! How are you 😁 ?") print(output.ids) # [1, 27253, 16, 93, 11, 5097, 5, 7961, 5112, 6218, 0, 35, 2] tokenizer.decode([1, 27253, 16, 93, 11, 5097, 5, 7961, 5112, 6218, 0, 35, 2]) # "Hello , y ' all ! How are you ?" # END test_decoding assert output.ids == [1, 27253, 16, 93, 11, 5097, 5, 7961, 5112, 6218, 0, 35, 2] assert ( tokenizer.decode([1, 27253, 16, 93, 11, 5097, 5, 7961, 5112, 6218, 0, 35, 2]) == "Hello , y ' all ! How are you ?" ) @staticmethod def slow_train(): # START bert_setup_tokenizer from tokenizers import Tokenizer from tokenizers.models import WordPiece bert_tokenizer = Tokenizer(WordPiece(unk_token="[UNK]")) # END bert_setup_tokenizer # START bert_setup_normalizer from tokenizers import normalizers from tokenizers.normalizers import NFD, Lowercase, StripAccents bert_tokenizer.normalizer = normalizers.Sequence([NFD(), Lowercase(), StripAccents()]) # END bert_setup_normalizer # START bert_setup_pre_tokenizer from tokenizers.pre_tokenizers import Whitespace bert_tokenizer.pre_tokenizer = Whitespace() # END bert_setup_pre_tokenizer # START bert_setup_processor from tokenizers.processors import TemplateProcessing bert_tokenizer.post_processor = TemplateProcessing( single="[CLS] $A [SEP]", pair="[CLS] $A [SEP] $B:1 [SEP]:1", special_tokens=[ ("[CLS]", 1), ("[SEP]", 2), ], ) # END bert_setup_processor # START bert_train_tokenizer from tokenizers.trainers import WordPieceTrainer trainer = WordPieceTrainer(vocab_size=30522, special_tokens=["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]"]) files = [f"data/wikitext-103-raw/wiki.{split}.raw" for split in ["test", "train", "valid"]] bert_tokenizer.train(files, trainer) bert_tokenizer.save("data/bert-wiki.json") # END bert_train_tokenizer def test_bert_example(self, doc_pipeline_bert_tokenizer): try: bert_tokenizer = Tokenizer.from_file("data/bert-wiki.json") except Exception: bert_tokenizer = Tokenizer.from_file(doc_pipeline_bert_tokenizer) # START bert_test_decoding output = bert_tokenizer.encode("Welcome to the 🤗 Tokenizers library.") print(output.tokens) # ["[CLS]", "welcome", "to", "the", "[UNK]", "tok", "##eni", "##zer", "##s", "library", ".", "[SEP]"] bert_tokenizer.decode(output.ids) # "welcome to the tok ##eni ##zer ##s library ." # END bert_test_decoding assert bert_tokenizer.decode(output.ids) == "welcome to the tok ##eni ##zer ##s library ." # START bert_proper_decoding from tokenizers import decoders bert_tokenizer.decoder = decoders.WordPiece() bert_tokenizer.decode(output.ids) # "welcome to the tokenizers library." # END bert_proper_decoding assert bert_tokenizer.decode(output.ids) == "welcome to the tokenizers library." if __name__ == "__main__": import os from urllib import request from zipfile import ZipFile disable_printing = False if not os.path.isdir("data/wikitext-103-raw"): print("Downloading wikitext-103...") wiki_text, _ = request.urlretrieve( "https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-103-raw-v1.zip" ) with ZipFile(wiki_text, "r") as z: print("Unzipping in data...") z.extractall("data") print("Now training...") TestPipeline.slow_train()
tokenizers/bindings/python/tests/documentation/test_pipeline.py/0
{ "file_path": "tokenizers/bindings/python/tests/documentation/test_pipeline.py", "repo_id": "tokenizers", "token_count": 3351 }
286
# Encode Inputs <tokenizerslangcontent> <python> These types represent all the different kinds of input that a [`~tokenizers.Tokenizer`] accepts when using [`~tokenizers.Tokenizer.encode_batch`]. ## TextEncodeInput[[[[tokenizers.TextEncodeInput]]]] <code>tokenizers.TextEncodeInput</code> Represents a textual input for encoding. Can be either: - A single sequence: [TextInputSequence](/docs/tokenizers/api/input-sequences#tokenizers.TextInputSequence) - A pair of sequences: - A Tuple of [TextInputSequence](/docs/tokenizers/api/input-sequences#tokenizers.TextInputSequence) - Or a List of [TextInputSequence](/docs/tokenizers/api/input-sequences#tokenizers.TextInputSequence) of size 2 alias of `Union[str, Tuple[str, str], List[str]]`. ## PreTokenizedEncodeInput[[[[tokenizers.PreTokenizedEncodeInput]]]] <code>tokenizers.PreTokenizedEncodeInput</code> Represents a pre-tokenized input for encoding. Can be either: - A single sequence: [PreTokenizedInputSequence](/docs/tokenizers/api/input-sequences#tokenizers.PreTokenizedInputSequence) - A pair of sequences: - A Tuple of [PreTokenizedInputSequence](/docs/tokenizers/api/input-sequences#tokenizers.PreTokenizedInputSequence) - Or a List of [PreTokenizedInputSequence](/docs/tokenizers/api/input-sequences#tokenizers.PreTokenizedInputSequence) of size 2 alias of `Union[List[str], Tuple[str], Tuple[Union[List[str], Tuple[str]], Union[List[str], Tuple[str]]], List[Union[List[str], Tuple[str]]]]`. ## EncodeInput[[[[tokenizers.EncodeInput]]]] <code>tokenizers.EncodeInput</code> Represents all the possible types of input for encoding. Can be: - When `is_pretokenized=False`: [TextEncodeInput](#tokenizers.TextEncodeInput) - When `is_pretokenized=True`: [PreTokenizedEncodeInput](#tokenizers.PreTokenizedEncodeInput) alias of `Union[str, Tuple[str, str], List[str], Tuple[str], Tuple[Union[List[str], Tuple[str]], Union[List[str], Tuple[str]]], List[Union[List[str], Tuple[str]]]]`. </python> <rust> The Rust API Reference is available directly on the [Docs.rs](https://docs.rs/tokenizers/latest/tokenizers/) website. </rust> <node> The node API has not been documented yet. </node> </tokenizerslangcontent>
tokenizers/docs/source-doc-builder/api/encode-inputs.mdx/0
{ "file_path": "tokenizers/docs/source-doc-builder/api/encode-inputs.mdx", "repo_id": "tokenizers", "token_count": 716 }
287
from collections import defaultdict, abc from typing import cast from docutils import nodes from docutils.parsers.rst import Directive import sphinx from sphinx.locale import _ from sphinx.util.docutils import SphinxDirective from sphinx.errors import ExtensionError from conf import languages as LANGUAGES logger = sphinx.util.logging.getLogger(__name__) GLOBALNAME = "$GLOBAL$" def update(d, u): for k, v in u.items(): if isinstance(v, abc.Mapping): d[k] = update(d.get(k, {}), v) else: d[k] = v return d class EntityNode(nodes.General, nodes.Element): pass class EntitiesNode(nodes.General, nodes.Element): pass class AllEntities: def __init__(self): self.entities = defaultdict(dict) @classmethod def install(cls, env): if not hasattr(env, "entity_all_entities"): entities = cls() env.entity_all_entities = entities return env.entity_all_entities def merge(self, other): self.entities.update(other.entities) def purge(self, docname): for env_docname in [GLOBALNAME, docname]: self.entities[env_docname] = dict( [ (name, entity) for name, entity in self.entities[env_docname].items() if entity["docname"] != docname ] ) def _extract_entities(self, nodes): pass def _extract_options(self, nodes): pass def _add_entities(self, entities, language, is_global, docname): scope = GLOBALNAME if is_global else docname for entity in entities: name = f'{language}-{entity["name"]}' content = entity["content"] if name in self.entities[scope]: logger.warning( f'Entity "{name}" has already been defined{" globally" if is_global else ""}', location=docname, ) self.entities[scope][name] = {"docname": docname, "content": content} def _extract_global(self, nodes): for node in nodes: if node.tagname != "field": raise Exception(f"Expected a field, found {node.tagname}") name, _ = node.children if name.tagname != "field_name": raise Exception(f"Expected a field name here, found {name_node.tagname}") if str(name.children[0]) == "global": return True def _extract_entities(self, nodes): entities = [] for node in nodes: if node.tagname != "definition_list_item": raise Exception(f"Expected a list item here, found {node.tagname}") name_node, content_node = node.children if name_node.tagname != "term": raise Exception(f"Expected a term here, found {name_node.tagname}") if content_node.tagname != "definition": raise Exception(f"Expected a definition here, found {content_node.tagname}") name = str(name_node.children[0]) if len(content_node.children) == 1 and content_node.children[0].tagname == "paragraph": content = content_node.children[0].children[0] else: content = content_node entities.append({"name": name, "content": content}) return entities def extract(self, node, docname): is_global = False entities = [] language = None for node in node.children: if language is None and node.tagname != "paragraph": raise Exception(f"Expected language name:\n.. entities:: <LANGUAGE>") elif language is None and node.tagname == "paragraph": language = str(node.children[0]) if language not in LANGUAGES: raise Exception( f'Unknown language "{language}. Might be missing a newline after language"' ) elif node.tagname == "field_list": is_global = self._extract_global(node.children) elif node.tagname == "definition_list": entities.extend(self._extract_entities(node.children)) else: raise Exception(f"Expected a list of terms/options, found {node.tagname}") self._add_entities(entities, language, is_global, docname) def resolve_pendings(self, app): env = app.builder.env updates = defaultdict(dict) for env_docname in self.entities.keys(): for name, entity in self.entities[env_docname].items(): docname = entity["docname"] node = entity["content"] for node in node.traverse(sphinx.addnodes.pending_xref): contnode = cast(nodes.TextElement, node[0].deepcopy()) newnode = None typ = node["reftype"] target = node["reftarget"] refdoc = node.get("refdoc", docname) domain = None try: if "refdomain" in node and node["refdomain"]: # let the domain try to resolve the reference try: domain = env.domains[node["refdomain"]] except KeyError as exc: raise NoUri(target, typ) from exc newnode = domain.resolve_xref( env, refdoc, app.builder, typ, target, node, contnode ) except NoUri: newnode = contnode updates[env_docname][name] = { "docname": docname, "content": newnode or contnode, } update(self.entities, updates) def get(self, language, name, docname): name = f"{language}-{name}" if name in self.entities[docname]: return self.entities[docname][name] elif name in self.entities[GLOBALNAME]: return self.entities[GLOBALNAME][name] else: return None class EntitiesDirective(SphinxDirective): has_content = True def run(self): content = nodes.definition_list() self.state.nested_parse(self.content, self.content_offset, content) try: entities = AllEntities.install(self.env) entities.extract(content, self.env.docname) except Exception as err: raise self.error(f'Malformed directive "entities": {err}') return [] def entity_role(name, rawtext, text, lineno, inliner, options={}, content=[]): node = EntityNode() node.entity = text return [node], [] def process_entity_nodes(app, doctree, docname): """ Replace all the entities by their content """ env = app.builder.env entities = AllEntities.install(env) entities.resolve_pendings(app) language = None try: language = next(l for l in LANGUAGES if l in app.tags) except Exception: logger.warning(f"No language tag specified, not resolving entities in {docname}") for node in doctree.traverse(EntityNode): if language is None: node.replace_self(nodes.Text(_(node.entity), _(node.entity))) else: entity = entities.get(language, node.entity, docname) if entity is None: node.replace_self(nodes.Text(_(node.entity), _(node.entity))) logger.warning(f'Entity "{node.entity}" has not been defined', location=node) else: node.replace_self(entity["content"]) def purge_entities(app, env, docname): """ Purge any entity that comes from the given docname """ entities = AllEntities.install(env) entities.purge(docname) def merge_entities(app, env, docnames, other): """ Merge multiple environment entities """ entities = AllEntities.install(env) other_entities = AllEntities.install(other) entities.merge(other_entities) def setup(app): app.add_node(EntityNode) app.add_node(EntitiesNode) app.add_directive("entities", EntitiesDirective) app.add_role("entity", entity_role) app.connect("doctree-resolved", process_entity_nodes) app.connect("env-merge-info", merge_entities) app.connect("env-purge-doc", purge_entities) return { "version": "0.1", "parallel_read_safe": True, "parallel_write_safe": True, }
tokenizers/docs/source/_ext/entities.py/0
{ "file_path": "tokenizers/docs/source/_ext/entities.py", "repo_id": "tokenizers", "token_count": 4032 }
288
.. entities:: python :global: class class classmethod class method Tokenizer :class:`~tokenizers.Tokenizer` Tokenizer.train :meth:`~tokenizers.Tokenizer.train` Tokenizer.save :meth:`~tokenizers.Tokenizer.save` Tokenizer.from_file :meth:`~tokenizers.Tokenizer.from_file` Tokenizer.encode :meth:`~tokenizers.Tokenizer.encode` Tokenizer.encode_batch :meth:`~tokenizers.Tokenizer.encode_batch` Tokenizer.decode :meth:`~tokenizers.Tokenizer.decode` Tokenizer.decode_batch :meth:`~tokenizers.Tokenizer.decode_batch` Tokenizer.token_to_id :meth:`~tokenizers.Tokenizer.token_to_id` Tokenizer.enable_padding :meth:`~tokenizers.Tokenizer.enable_padding` Encoding :class:`~tokenizers.Encoding` TemplateProcessing :class:`~tokenizers.processors.TemplateProcessing` Normalizer :class:`~tokenizers.normalizers.Normalizer` normalizers.Sequence :class:`~tokenizers.normalizers.Sequence` pre_tokenizers.Whitespace :class:`~tokenizers.pre_tokenizers.Whitespace` PreTokenizer :class:`~tokenizers.pre_tokenizers.PreTokenizer` models.BPE :class:`~tokenizers.models.BPE` models.Unigram :class:`~tokenizers.models.Unigram` models.WordLevel :class:`~tokenizers.models.WordLevel` models.WordPiece :class:`~tokenizers.models.WordPiece` Decoder :class:`~tokenizers.decoders.Decoder` .. entities:: rust :global: class struct classmethod static method Tokenizer :rust_struct:`~tokenizers::tokenizer::Tokenizer` Tokenizer.train :rust_meth:`~tokenizers::tokenizer::Tokenizer::train` Tokenizer.save :rust_meth:`~tokenizers::tokenizer::Tokenizer::save` Tokenizer.from_file :rust_meth:`~tokenizers::tokenizer::Tokenizer::from_file` Tokenizer.encode :rust_meth:`~tokenizers::tokenizer::Tokenizer::encode` Tokenizer.encode_batch :rust_meth:`~tokenizers::tokenizer::Tokenizer::encode_batch` Tokenizer.decode :rust_meth:`~tokenizers::tokenizer::Tokenizer::decode` Tokenizer.decode_batch :rust_meth:`~tokenizers::tokenizer::Tokenizer::decode_batch` Tokenizer.token_to_id :rust_meth:`~tokenizers::tokenizer::Tokenizer::token_to_id` Tokenizer.enable_padding :rust_meth:`~tokenizers::tokenizer::Tokenizer::enable_padding` Encoding :rust_struct:`~tokenizers::tokenizer::Encoding` TemplateProcessing :rust_struct:`~tokenizers::processors::template::TemplateProcessing` Normalizer :rust_trait:`~tokenizers::tokenizer::Normalizer` normalizers.Sequence :rust_struct:`~tokenizers::normalizers::utils::Sequence` pre_tokenizers.Whitespace :rust_struct:`~tokenizers::normalizers::whitespace::Whitespace` PreTokenizer :rust_trait:`~tokenizers::tokenizer::PreTokenizer` models.BPE :rust_struct:`~tokenizers::models::bpe::BPE` models.Unigram :rust_struct:`~tokenizers::models::unigram::Unigram` models.WordLevel :rust_struct:`~tokenizers::models::wordlevel::WordLevel` models.WordPiece :rust_struct:`~tokenizers::models::wordpiece::WordPiece` Decoder :rust_trait:`~tokenizers::tokenizer::Decoder` .. entities:: node :global: class class classmethod static method Tokenizer :obj:`Tokenizer` Tokenizer.train :obj:`Tokenizer.train()` Tokenizer.save :obj:`Tokenizer.save()` Tokenizer.from_file :obj:`Tokenizer.fromFile()` Tokenizer.encode :obj:`Tokenizer.encode()` Tokenizer.encode_batch :obj:`Tokenizer.encodeBatch()` Tokenizer.decode :obj:`Tokenizer.decode()` Tokenizer.decode_batch :obj:`Tokenizer.decodeBatch()` Tokenizer.token_to_id :obj:`Tokenizer.tokenToId()` Tokenizer.enable_padding :obj:`Tokenizer.setPadding()` Encoding :obj:`Encoding` TemplateProcessing :obj:`TemplateProcessing` Normalizer :obj:`Normalizer` normalizers.Sequence :obj:`Sequence` pre_tokenizers.Whitespace :obj:`Whitespace` PreTokenizer :obj:`PreTokenizer` models.BPE :obj:`BPE` models.Unigram :obj:`Unigram` models.WordLevel :obj:`WordLevel` models.WordPiece :obj:`WordPiece` Decoder :obj:`Decoder`
tokenizers/docs/source/entities.inc/0
{ "file_path": "tokenizers/docs/source/entities.inc", "repo_id": "tokenizers", "token_count": 2078 }
289
#[macro_use] extern crate criterion; mod common; use std::fs::File; use std::io::{BufRead, BufReader}; use std::path::Path; use criterion::Criterion; use tokenizers::models::bpe::{BpeTrainerBuilder, BPE}; use tokenizers::models::TrainerWrapper; use tokenizers::pre_tokenizers::byte_level::ByteLevel; use tokenizers::pre_tokenizers::whitespace::Whitespace; use tokenizers::tokenizer::{AddedToken, EncodeInput}; use tokenizers::Tokenizer; use common::{iter_bench_encode, iter_bench_encode_batch, iter_bench_train}; use std::ops::Deref; static BATCH_SIZE: usize = 1_000; fn create_gpt2_tokenizer(bpe: BPE) -> Tokenizer { let mut tokenizer = Tokenizer::new(bpe); tokenizer.with_pre_tokenizer(Some(ByteLevel::default())); tokenizer.with_decoder(Some(ByteLevel::default())); tokenizer.add_tokens(&[AddedToken::from("ing", false).single_word(false)]); tokenizer.add_special_tokens(&[AddedToken::from("[ENT]", true).single_word(true)]); tokenizer } fn bench_gpt2(c: &mut Criterion) { let bpe = BPE::from_file("data/gpt2-vocab.json", "data/gpt2-merges.txt") .build() .unwrap(); let tokenizer = create_gpt2_tokenizer(bpe); let mut lines: Vec<EncodeInput> = vec![]; let mut batches: Vec<Vec<EncodeInput>> = vec![vec![]]; for line in BufReader::new(File::open(Path::new("data/big.txt")).unwrap()).lines() { let line: EncodeInput = line.unwrap().into(); lines.push(line.clone()); if batches.last().unwrap().len() >= BATCH_SIZE { batches.push(vec![]); } batches.last_mut().unwrap().push(line); } c.bench_function("BPE GPT2 encode", |b| { b.iter_custom(|iters| iter_bench_encode(iters, tokenizer.deref(), &lines)) }); c.bench_function("BPE GPT2 encode batch", |b| { b.iter_custom(|iters| iter_bench_encode_batch(iters, tokenizer.deref(), &batches)) }); let bpe = BPE::from_file("data/gpt2-vocab.json", "data/gpt2-merges.txt") .cache_capacity(0) .build() .unwrap(); let tokenizer = create_gpt2_tokenizer(bpe); c.bench_function("BPE GPT2 encode, no cache", |b| { b.iter_custom(|iters| iter_bench_encode(iters, &tokenizer, &lines)) }); c.bench_function("BPE GPT2 encode batch, no cache", |b| { b.iter_custom(|iters| iter_bench_encode_batch(iters, &tokenizer, &batches)) }); } fn bench_train(c: &mut Criterion) { let mut trainer: TrainerWrapper = BpeTrainerBuilder::default() .show_progress(false) .build() .into(); let mut tokenizer = Tokenizer::new(BPE::default()).into_inner(); tokenizer.with_pre_tokenizer(Some(Whitespace {})); c.bench_function("BPE Train vocabulary (small)", |b| { b.iter_custom(|iters| { iter_bench_train( iters, &mut tokenizer, &mut trainer, vec!["data/small.txt".to_string()], ) }) }); let mut tokenizer = Tokenizer::new(BPE::default()).into_inner(); tokenizer.with_pre_tokenizer(Some(Whitespace {})); c.bench_function("BPE Train vocabulary (big)", |b| { b.iter_custom(|iters| { iter_bench_train( iters, &mut tokenizer, &mut trainer, vec!["data/big.txt".to_string()], ) }) }); } criterion_group! { name = benches; config = Criterion::default().sample_size(20); targets = bench_gpt2 } criterion_group! { name = benches_train; config = Criterion::default().sample_size(10); targets = bench_train } criterion_main!(benches, benches_train);
tokenizers/tokenizers/benches/bpe_benchmark.rs/0
{ "file_path": "tokenizers/tokenizers/benches/bpe_benchmark.rs", "repo_id": "tokenizers", "token_count": 1631 }
290
use crate::tokenizer::{Decoder, Result}; use serde::{Deserialize, Serialize}; #[derive(Deserialize, Clone, Debug, Serialize, Default)] /// Strip is a simple trick which converts tokens looking like `<0x61>` /// to pure bytes, and attempts to make them into a string. If the tokens /// cannot be decoded you will get � instead for each inconvertable byte token #[serde(tag = "type")] #[non_exhaustive] pub struct Strip { pub content: char, pub start: usize, pub stop: usize, } impl Strip { pub fn new(content: char, start: usize, stop: usize) -> Self { Self { content, start, stop, } } } impl Decoder for Strip { fn decode_chain(&self, tokens: Vec<String>) -> Result<Vec<String>> { Ok(tokens .into_iter() .map(|token| { let chars: Vec<char> = token.chars().collect(); let mut start_cut = 0; for (i, &c) in chars.iter().enumerate().take(self.start) { if c == self.content { start_cut = i + 1; continue; } else { break; } } let mut stop_cut = chars.len(); for i in 0..self.stop { let index = chars.len() - i - 1; if chars[index] == self.content { stop_cut = index; continue; } else { break; } } let new_token: String = chars[start_cut..stop_cut].iter().collect(); new_token }) .collect()) } } #[cfg(test)] mod tests { use super::*; #[test] fn decode() { let decoder = Strip::new('H', 1, 0); let res = decoder .decode_chain(vec!["Hey".into(), " friend!".into(), "HHH".into()]) .unwrap(); assert_eq!(res, vec!["ey", " friend!", "HH"]); let decoder = Strip::new('y', 0, 1); let res = decoder .decode_chain(vec!["Hey".into(), " friend!".into()]) .unwrap(); assert_eq!(res, vec!["He", " friend!"]); } }
tokenizers/tokenizers/src/decoders/strip.rs/0
{ "file_path": "tokenizers/tokenizers/src/decoders/strip.rs", "repo_id": "tokenizers", "token_count": 1217 }
291
use super::{super::OrderedVocabIter, WordLevel, WordLevelBuilder}; use serde::{ de::{MapAccess, Visitor}, ser::SerializeStruct, Deserialize, Deserializer, Serialize, Serializer, }; use std::collections::HashSet; impl Serialize for WordLevel { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: Serializer, { let mut model = serializer.serialize_struct("WordLevel", 3)?; let ordered_vocab = OrderedVocabIter::new(&self.vocab_r); model.serialize_field("type", "WordLevel")?; model.serialize_field("vocab", &ordered_vocab)?; model.serialize_field("unk_token", &self.unk_token)?; model.end() } } impl<'de> Deserialize<'de> for WordLevel { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: Deserializer<'de>, { deserializer.deserialize_struct( "WordLevel", &["type", "vocab", "unk_token"], WordLevelVisitor, ) } } struct WordLevelVisitor; impl<'de> Visitor<'de> for WordLevelVisitor { type Value = WordLevel; fn expecting(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { write!(fmt, "struct WordLevel") } fn visit_map<V>(self, mut map: V) -> std::result::Result<Self::Value, V::Error> where V: MapAccess<'de>, { let mut builder = WordLevelBuilder::new(); let mut missing_fields = vec![ // for retrocompatibility the "type" field is not mandatory "unk_token", "vocab", ] .into_iter() .collect::<HashSet<_>>(); while let Some(key) = map.next_key::<String>()? { match key.as_ref() { "vocab" => builder = builder.vocab(map.next_value()?), "unk_token" => builder = builder.unk_token(map.next_value()?), "type" => match map.next_value()? { "WordLevel" => {} u => { return Err(serde::de::Error::invalid_value( serde::de::Unexpected::Str(u), &"WordLevel", )) } }, _ => {} } missing_fields.remove::<str>(&key); } if !missing_fields.is_empty() { Err(serde::de::Error::missing_field( missing_fields.iter().next().unwrap(), )) } else { Ok(builder.build().map_err(serde::de::Error::custom)?) } } } #[cfg(test)] mod tests { use crate::models::wordlevel::{Vocab, WordLevel, WordLevelBuilder}; #[test] fn serde() { let wl = WordLevel::default(); let wl_s = r#"{"type":"WordLevel","vocab":{},"unk_token":"<unk>"}"#; assert_eq!(serde_json::to_string(&wl).unwrap(), wl_s); assert_eq!(serde_json::from_str::<WordLevel>(wl_s).unwrap(), wl); } #[test] fn incomplete_vocab() { let vocab: Vocab = [("<unk>".into(), 0), ("b".into(), 2)] .iter() .cloned() .collect(); let wordlevel = WordLevelBuilder::default() .vocab(vocab) .unk_token("<unk>".to_string()) .build() .unwrap(); let wl_s = r#"{"type":"WordLevel","vocab":{"<unk>":0,"b":2},"unk_token":"<unk>"}"#; assert_eq!(serde_json::to_string(&wordlevel).unwrap(), wl_s); assert_eq!(serde_json::from_str::<WordLevel>(wl_s).unwrap(), wordlevel); } #[test] fn deserialization_should_fail() { let missing_unk = r#"{"type":"WordLevel","vocab":{}}"#; assert!(serde_json::from_str::<WordLevel>(missing_unk) .unwrap_err() .to_string() .starts_with("missing field `unk_token`")); let wrong_type = r#"{"type":"WordPiece","vocab":{}}"#; assert!(serde_json::from_str::<WordLevel>(wrong_type) .unwrap_err() .to_string() .starts_with("invalid value: string \"WordPiece\", expected WordLevel")); } }
tokenizers/tokenizers/src/models/wordlevel/serialization.rs/0
{ "file_path": "tokenizers/tokenizers/src/models/wordlevel/serialization.rs", "repo_id": "tokenizers", "token_count": 2084 }
292
use serde::{Deserialize, Serialize}; use crate::tokenizer::{PreTokenizedString, PreTokenizer, Result, SplitDelimiterBehavior}; use crate::utils::macro_rules_attribute; #[derive(Copy, Clone, Debug, PartialEq, Eq)] #[non_exhaustive] #[macro_rules_attribute(impl_serde_type!)] pub struct CharDelimiterSplit { pub delimiter: char, } impl CharDelimiterSplit { pub fn new(delimiter: char) -> Self { Self { delimiter } } } impl PreTokenizer for CharDelimiterSplit { fn pre_tokenize(&self, pretokenized: &mut PreTokenizedString) -> Result<()> { // TODO: Maybe add the option to specify the behavior pretokenized.split(|_, normalized| { normalized.split(self.delimiter, SplitDelimiterBehavior::Removed) }) } }
tokenizers/tokenizers/src/pre_tokenizers/delimiter.rs/0
{ "file_path": "tokenizers/tokenizers/src/pre_tokenizers/delimiter.rs", "repo_id": "tokenizers", "token_count": 296 }
293
use super::{ normalizer::Range, Model, NormalizedString, Normalizer, Offsets, PreTokenizedString, Token, }; use aho_corasick::{AhoCorasick, AhoCorasickBuilder, MatchKind}; use regex::Regex; use serde::{ser::SerializeSeq, Deserialize, Serialize, Serializer}; use std::collections::{HashMap, HashSet}; /// Represent a token added by the user on top of the existing Model vocabulary. /// AddedToken can be configured to specify the behavior they should have in various situations /// like: /// - Whether they should only match single words /// - Whether to include any whitespace on its left or right #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] pub struct AddedToken { /// The content of the added token pub content: String, /// Whether this token must be a single word or can break words pub single_word: bool, /// Whether this token should strip whitespaces on its left pub lstrip: bool, /// Whether this token should strip whitespaces on its right pub rstrip: bool, /// Whether this token should be normalized pub normalized: bool, /// Whether this token is special pub special: bool, } impl AddedToken { /// Build this token from the given content, specifying if it is intented to be a /// special token. Special tokens are not normalized by default. pub fn from<S: Into<String>>(content: S, special: bool) -> Self { Self { content: content.into(), normalized: !special, special, ..Default::default() } } /// Specify whether this token should only match on whole single words, and never /// part of a word. #[must_use] pub fn single_word(mut self, single_word: bool) -> Self { self.single_word = single_word; self } /// Specify whether this token should include all the whitespaces on its left, in /// order to strip them out. #[must_use] pub fn lstrip(mut self, lstrip: bool) -> Self { self.lstrip = lstrip; self } /// Specify whether this token should include all the whitespaces on its right, in /// order to strip them out. #[must_use] pub fn rstrip(mut self, rstrip: bool) -> Self { self.rstrip = rstrip; self } /// Specify whether this token should be normalized and match against its normalized /// version in the input text. #[must_use] pub fn normalized(mut self, normalized: bool) -> Self { self.normalized = normalized; self } /// Specify whether this token is special, meaning if it should be skipped when decoding #[must_use] pub fn special(mut self, special: bool) -> Self { self.special = special; self } } impl Default for AddedToken { fn default() -> Self { Self { content: String::new(), single_word: false, lstrip: false, rstrip: false, normalized: true, special: false, } } } // AddedTokens can be updated if value changed impl std::hash::Hash for AddedToken { fn hash<H: std::hash::Hasher>(&self, state: &mut H) { self.content.hash(state); } } type MatchingSet = (AhoCorasick, Vec<u32>); lazy_static! { static ref STARTS_WITH_WORD: Regex = Regex::new(r"^\w").unwrap(); static ref ENDS_WITH_WORD: Regex = Regex::new(r"\w$").unwrap(); static ref RIGHTMOST_SPACE_AT_START: Regex = Regex::new(r"^\s*").unwrap(); static ref LEFTMOST_SPACE_AT_END: Regex = Regex::new(r"\s*$").unwrap(); } fn ends_with_word(sentence: &str) -> bool { ENDS_WITH_WORD.is_match(sentence) } fn starts_with_word(sentence: &str) -> bool { STARTS_WITH_WORD.is_match(sentence) } fn space_leftmost_at_end(sentence: &str) -> usize { if let Some(match_) = LEFTMOST_SPACE_AT_END.find(sentence) { match_.start() } else { sentence.len() } } fn space_rightmost_at_start(sentence: &str) -> usize { if let Some(match_) = RIGHTMOST_SPACE_AT_START.find(sentence) { match_.end() } else { 0 } } /// /// A vocabulary built on top of the Model /// /// This provides a way to add new vocabulary to a Tokenizer that has already been trained, /// in a previous process, maybe by someone else. This is especially interesting in the case /// of fine-tunings, where we want to finetune a model while adding some new functionalities /// using some new special tokens, or maybe add some tokens in the case of unknown tokens, etc. /// /// One of the reasons we need to handle these tokens outside of the model is simply that /// for many models, it is not possible to add new tokens after the training process. For example, /// using BPE, the training process generates merges pairs along the vocabulary, and any token /// in the vocabulary can be decomposed in other tokens, down to the original alphabet. If we /// were to add new tokens after this training process, we couldn't make sure the merges pairs /// exist as required. /// #[derive(Clone, Debug)] pub struct AddedVocabulary { /// Contains the mapping from String (token content) to ID. This map contains both special /// tokens and classic added tokens that were added to the this vocabulary. added_tokens_map: HashMap<String, u32>, /// Contains the mapping from ID to AddedToken for all the added tokens, both special /// and classic. added_tokens_map_r: HashMap<u32, AddedToken>, /// Contains only the classic AddedToken, in the specific order the user gave them. added_tokens: Vec<AddedToken>, /// Contains only the special AddedToken, in the specific order the user gave them. special_tokens: Vec<AddedToken>, /// A Set, containing all the special token for easy access while decoding. This let's /// us remove them easily with an O(1) complexity. special_tokens_set: HashSet<String>, /// A RegexSet containing all the non-normalized patterns used to split on AddedTokens split_trie: MatchingSet, /// A RegexSet containing all the normalized patterns used to split on AddedTokens split_normalized_trie: MatchingSet, /// Whether or not special tokens should be splitted when encoding. This is equivalent to ignoring them encode_special_tokens: bool, } impl AddedVocabulary { pub fn new() -> Self { let trie = AhoCorasickBuilder::new() .match_kind(MatchKind::LeftmostLongest) .build::<_, &&[u8]>([]) .expect("The trie should build correctly"); let normalized_trie = AhoCorasickBuilder::new() .match_kind(MatchKind::LeftmostLongest) .build::<_, &&[u8]>([]) .expect("The normalized trie should build correctly"); Self { added_tokens_map: HashMap::new(), added_tokens_map_r: HashMap::new(), added_tokens: vec![], special_tokens: vec![], special_tokens_set: HashSet::new(), split_trie: (trie, vec![]), split_normalized_trie: (normalized_trie, vec![]), encode_special_tokens: false, } } /// Size of the additional vocabulary #[allow(dead_code)] // Suppress the "method is never used" warning pub fn len(&self) -> usize { self.added_tokens_map.len() } /// Whether or not this vocabulary is empty pub fn is_empty(&self) -> bool { self.added_tokens_map.is_empty() } /// Get the additional vocabulary pub fn get_vocab(&self) -> &HashMap<String, u32> { &self.added_tokens_map } /// Get the additional vocabulary with the AddedTokens pub fn get_added_tokens_decoder(&self) -> &HashMap<u32, AddedToken> { &self.added_tokens_map_r } /// Get the id matching one of our token if it exists pub fn token_to_id(&self, token: &str, model: &impl Model) -> Option<u32> { self.added_tokens_map .get(token) .copied() .or_else(|| model.token_to_id(token)) } /// Get the token matching the given id if it exists #[deprecated( since = "0.19.0", note = "please use `added_vocabulary.simple_id_to_token(id).or_else(|| model.id_to_token(id)` instead" )] pub fn id_to_token(&self, id: u32, model: &impl Model) -> Option<String> { self.added_tokens_map_r .get(&id) .map(|t| t.content.clone()) .or_else(|| model.id_to_token(id)) } pub fn simple_id_to_token(&self, id: u32) -> Option<String> { self.added_tokens_map_r.get(&id).map(|t| t.content.clone()) } // pub fn set_encode_special_tokens(&mut self, value: bool) { self.encode_special_tokens = value; } pub fn get_encode_special_tokens(&self) -> bool { self.encode_special_tokens } /// Check if a token is a special token pub fn is_special_token(&self, token: &str) -> bool { self.special_tokens_set.contains(token) } /// Add some special tokens to the vocabulary pub fn add_special_tokens<N: Normalizer>( &mut self, tokens: &[AddedToken], model: &impl Model, normalizer: Option<&N>, ) -> usize { self.add_tokens(tokens, model, normalizer) } /// Add some tokens to the vocabulary pub fn add_tokens<N: Normalizer>( &mut self, tokens: &[AddedToken], model: &impl Model, normalizer: Option<&N>, ) -> usize { // Handle special tokens (if any) for token in tokens { if token.special && !token.content.is_empty() && !self.special_tokens_set.contains(&token.content) { self.special_tokens.push(token.to_owned()); self.special_tokens_set.insert(token.content.clone()); } } // Then we delegate to `add_tokens`, that will take care of refreshing added tokens too. let mut ignored = 0; for token in tokens { if token.content.is_empty() || self.added_tokens_map_r.values().any(|val| val == token) { ignored += 1; continue; } // If a token is already part of the vocabulary, we mark it as added let new_id = if let Some(new_id) = self.token_to_id(&token.content, model) { new_id } else { self.added_tokens_map.values().cloned().max().map_or( model.get_vocab_size() as u32, |max| { if (max >= model.get_vocab_size() as u32) || model.get_vocab_size() == 0 { max + 1 } else { model.get_vocab_size() as u32 } }, ) }; // Make sure we modify the previous entry self.added_tokens_map .entry(token.content.clone()) .and_modify(|old_id| *old_id = new_id) .or_insert_with(|| new_id); // Update the current revert operation self.added_tokens_map_r .entry(new_id) .and_modify(|t| *t = token.clone()) .or_insert_with(|| token.clone()); // Make sure to remove previous entry (if the token gets a new id) // Finally add the token to the classic set if special if !self.special_tokens_set.contains(&token.content) { self.added_tokens.push(token.clone()); } } self.refresh_added_tokens(model, normalizer); // Return the number of added tokens tokens.len() - ignored } /// Reconstruct our internal RegexSet when new tokens are added to the vocabulary. /// /// We keep two different RegexSet, one that will take care of matching against the /// non-normalized string, and one matching against the normalized one. fn refresh_added_tokens<N: Normalizer>(&mut self, model: &impl Model, normalizer: Option<&N>) { type TupleTokenId<'a> = (&'a AddedToken, u32); let (normalized, non_normalized): (Vec<TupleTokenId>, Vec<TupleTokenId>) = self .special_tokens .iter() .chain(self.added_tokens.iter()) .map(|token| { ( token, self.token_to_id(&token.content, model) .expect("Missing additional token"), ) }) .partition(|(token, _)| token.normalized); let (tokens, ids): (Vec<&AddedToken>, Vec<u32>) = non_normalized.into_iter().unzip(); let trie = AhoCorasickBuilder::new() .match_kind(MatchKind::LeftmostLongest) .build(tokens.iter().map(|token| &token.content)) .expect("Failed to build tried when refreshing tokens"); self.split_trie = (trie, ids); let (ntokens, nids): (Vec<&AddedToken>, Vec<u32>) = normalized.into_iter().unzip(); let patterns: Vec<_> = ntokens .iter() .map(|token| { let mut content = NormalizedString::from(token.content.as_ref()); if let Some(n) = normalizer { n.normalize(&mut content).unwrap(); } content }) .collect(); let normalized_trie = AhoCorasickBuilder::new() .match_kind(MatchKind::LeftmostLongest) .build(patterns.iter().map(|content| content.get())) .expect("Failed to build tried when refreshing tokens (normalized)"); self.split_normalized_trie = (normalized_trie, nids); } /// Find any AddedToken in the given sentence, using the provided MatchingSet. /// This method returns a list "splits", each of them being a pair of Offsets /// and an optional ID if it is an AddedToken. /// The list of splits cover the entire input string. fn find_matches(&self, sentence: &str, split_re: &MatchingSet) -> Vec<(Option<u32>, Offsets)> { if sentence.is_empty() { return vec![(None, (0, 0))]; } let mut start_offset = 0; let mut splits = vec![]; for mat in split_re.0.find_iter(sentence) { let mut start = mat.start(); let mut stop = mat.end(); let aho_id = mat.pattern(); let id = split_re.1[aho_id]; let added_token = &self.added_tokens_map_r.get(&id).unwrap(); if self.encode_special_tokens && self.special_tokens_set.contains(&added_token.content) { continue; } if added_token.single_word { let start_space = start == 0 || !ends_with_word(&sentence[..start]); let stop_space = stop == sentence.len() || !starts_with_word(&sentence[stop..]); if !stop_space || !start_space { // Discard not single word continue; } } if added_token.lstrip { // This will be strictly inferior to start and in correct sentence offset let newstart = space_leftmost_at_end(&sentence[..start]); // The previous match could have already matched those spaces // Ignore them if it's already matched start = std::cmp::max(newstart, start_offset); } if added_token.rstrip { // This will starting a the stop+1 character, so we need // to add the previous stop value stop += space_rightmost_at_start(&sentence[stop..]) } if start_offset < start { splits.push((None, (start_offset, start))); } splits.push((Some(id), (start, stop))); start_offset = stop; } let total_byte_len = sentence.len(); if start_offset != total_byte_len { splits.push((None, (start_offset, total_byte_len))); } splits } /// Split the input sentence to extract anything we found from the `MatchingSet`, as well as /// the list of corresponding IDs /// The list of IDs have the exact same number of elements than the Iterator. fn split_with_indices( &self, sentence: NormalizedString, split_re: &MatchingSet, ) -> Vec<(NormalizedString, Option<Vec<Token>>)> { self.find_matches(sentence.get(), split_re) .into_iter() .map(|(id, byte_offsets)| { let slice = sentence .slice(Range::Normalized(byte_offsets.0..byte_offsets.1)) .expect("AddedVocabulary bad split"); if let Some(id) = id { let value = slice.get().to_owned(); let len = value.len(); (slice, Some(vec![Token::new(id, value, (0, len))])) } else { (slice, None) } }) .collect() } /// Extract the additional vocabulary from the given sentence, normalizing it along the way. /// /// Some tokens should match against their normalized representation, as well as the /// non-normalized one. For example, when we expect to extract the token `yesterday` in the /// input sentence `I read a book Yesterday`, if the normalizer is supposed to lowercase /// everything, we expect a match. pub fn extract_and_normalize<N: Normalizer>( &self, normalizer: Option<&N>, sequence: &str, ) -> PreTokenizedString { let mut pretokenized: PreTokenizedString = sequence.into(); // 1. We extract all the non-normalized tokens from the non-normalized string pretokenized .split(|_, sequence| Ok(self.split_with_indices(sequence, &self.split_trie))) .expect("AddedVocabulary bad split"); // <s> normalized = False // "I read a book <s>Hey" -> "I read a book", " <s>", "Hey" // </s> normalized = True -> "▁</s>" // "I read a book</s>Hey" -> "I read a book</s>Hey" // Day normalized = True -> "Day" // "I read a book monday" -> "I read a book monday" // [DAY] normalized = False -> "Day" // "I read a [DAY] monday" -> "I read a " "[DAY]", "book monday" // 320055 // 2. Then extract the normalized tokens from the normalized pieces of the string pretokenized .split(|_, mut sequence| { normalizer.map(|n| n.normalize(&mut sequence)); Ok(self.split_with_indices(sequence, &self.split_normalized_trie)) }) .expect("AddedVocabulary bad split"); // ["I read a book", " <s>", "Hey"] -> ["▁I read a book", "▁ <s>", "▁Hey"] // ["▁I read a book", "▁ <s>", "▁Hey"] -> [.., "▁ ", "<s>", "▁Hey"] // </s> normalized = True -> "▁</s>" // "I read a book</s>Hey" -> ["▁I read a book", "<","/","s",">", "Hey"] // "I read a " "[DAY]", "book monday" -> "i read a " "[day]", "book monday" pretokenized } } impl Default for AddedVocabulary { fn default() -> Self { Self::new() } } #[derive(Debug, Serialize, Deserialize)] pub(super) struct AddedTokenWithId { /// The id assigned to this token pub id: u32, #[serde(flatten)] /// The target AddedToken pub token: AddedToken, } impl Serialize for AddedVocabulary { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: Serializer, { let mut added_tokens = self .added_tokens_map_r .iter() .map(|(id, token)| AddedTokenWithId { id: *id, token: token.clone(), }) .collect::<Vec<_>>(); // We need to have these added tokens ordered by ascending ID added_tokens.sort_unstable_by_key(|o| o.id); let mut vocabulary = serializer.serialize_seq(Some(added_tokens.len()))?; for token in added_tokens { vocabulary.serialize_element(&token)?; } vocabulary.end() } } #[cfg(test)] mod tests { use super::*; use crate::normalizers::byte_level::ByteLevel as ByteLevelNormalizer; use crate::normalizers::utils::Lowercase; use crate::normalizers::NormalizerWrapper; use crate::{OffsetReferential, OffsetType, Result, Token, Trainer}; use std::path::{Path, PathBuf}; #[derive(Serialize, Deserialize)] struct ModelMock { vocab: HashMap<String, u32>, vocab_r: HashMap<u32, String>, } impl ModelMock { pub fn new<I>(iter: I) -> Self where I: IntoIterator<Item = &'static (&'static str, u32)>, { let vocab: HashMap<String, u32> = iter .into_iter() .map(|&(tok, id)| (tok.to_string(), id)) .collect(); Self { vocab_r: vocab .iter() .map(|(tok, id)| (*id, tok.to_owned())) .collect(), vocab, } } } fn simplify_output(result: &'_ PreTokenizedString) -> Vec<(&'_ str, Option<Vec<u32>>)> { result .get_splits(OffsetReferential::Original, OffsetType::Byte) .into_iter() .map(|(s, _, tokens)| { ( s, tokens .as_ref() .map(|t| t.iter().map(|t| t.id).collect::<Vec<_>>()), ) }) .collect::<Vec<_>>() } struct TrainerMock; impl Trainer for TrainerMock { type Model = ModelMock; fn should_show_progress(&self) -> bool { true } fn train(&self, _model: &mut ModelMock) -> Result<Vec<AddedToken>> { unimplemented!() } fn feed<I, S, F>(&mut self, _iterator: I, _process: F) -> Result<()> where I: Iterator<Item = S> + Send, S: AsRef<str> + Send, F: Fn(&str) -> Result<Vec<String>> + Sync, { unimplemented!() } } impl Model for ModelMock { type Trainer = TrainerMock; fn tokenize(&self, _sequence: &str) -> Result<Vec<Token>> { unimplemented!() } fn token_to_id(&self, token: &str) -> Option<u32> { self.vocab.get(token).copied() } fn id_to_token(&self, id: u32) -> Option<String> { self.vocab_r.get(&id).cloned() } fn get_vocab(&self) -> HashMap<String, u32> { self.vocab.clone() } fn get_vocab_size(&self) -> usize { self.vocab.len() } fn save(&self, _folder: &Path, _name: Option<&str>) -> Result<Vec<PathBuf>> { unimplemented!() } fn get_trainer(&self) -> Self::Trainer { TrainerMock } } #[test] fn can_add_tokens() { let model = ModelMock::new(&[("test", 0), ("tost", 1)]); let mut vocab = AddedVocabulary::new(); let normalizer: Option<&NormalizerWrapper> = None; // Add tokens normally assert_eq!( vocab.add_tokens( &[AddedToken::from("added_token_1", false)], &model, normalizer ), 1 ); let vocab_len: usize = vocab.len(); assert_eq!(vocab_len, 1); // Does not add multiple time the same token assert_eq!( vocab.add_tokens( &[ AddedToken::from("added_token_2", false), AddedToken::from("added_token_2", false) ], &model, normalizer ), 1 ); assert_eq!(vocab.len(), 2); // Also adds tokens already covered by the model let added_token = AddedToken::from("test", false); assert_eq!( vocab.add_tokens(&[added_token.clone()], &model, normalizer), 1 ); assert_eq!(vocab.len(), 3); assert_eq!(vocab.get_added_tokens_decoder()[&0], added_token); } #[test] fn can_add_special_tokens() { let model = ModelMock::new(&[("test", 0), ("tost", 1)]); let mut vocab = AddedVocabulary::new(); let normalizer: Option<&NormalizerWrapper> = None; // Add tokens normally assert_eq!( vocab.add_special_tokens( &[AddedToken::from("added_token_1", true)], &model, normalizer ), 1 ); assert_eq!(vocab.len(), 1); // Does not add multiple time the same token assert_eq!( vocab.add_special_tokens( &[ AddedToken::from("added_token_2", true), AddedToken::from("added_token_2", true) ], &model, normalizer ), 1 ); assert_eq!(vocab.len(), 2); // Can add tokens already covered by the model assert_eq!( vocab.add_special_tokens(&[AddedToken::from("test", true)], &model, normalizer), 1 ); assert_eq!(vocab.len(), 3); // New token was added assert!(vocab.is_special_token("test")); assert_eq!( *vocab.get_added_tokens_decoder(), HashMap::from([ (0, AddedToken::from("test", true)), (2, AddedToken::from("added_token_1", true)), (3, AddedToken::from("added_token_2", true)), ]) ); assert!(vocab.added_tokens_map.contains_key("test")); assert!(vocab.added_tokens_map_r.contains_key(&0)); vocab.add_tokens( &[ AddedToken::from("tost", true), AddedToken::from("another_two", false), ], &model, normalizer, ); assert_eq!(vocab.len(), 5); // New token was added assert_eq!(vocab.get_vocab()["another_two"], 4); // New token was added, but the index is not the length of the vocab // Let's add an already added token again assert_eq!( vocab.add_special_tokens(&[AddedToken::from("another_two", true)], &model, normalizer), 1 ); assert_eq!(vocab.len(), 5); // Token was already there assert_eq!(vocab.get_vocab()["another_two"], 4); // Token idx not changed // Just checking that we can set the content of the string in rust let mut token: AddedToken = AddedToken::from("Hey", false); token.content = "hey".to_string(); assert_eq!(token.content, "hey"); // Token was already there token.special = true; assert!(token.special); // Token was already there } #[test] fn can_extract_added_tokens() { // Is able to extract both normal and special tokens let model = ModelMock::new(&[]); let mut vocab = AddedVocabulary::new(); let normalizer: Option<&NormalizerWrapper> = None; vocab.add_tokens( &[ AddedToken::from("my", false), AddedToken::from("name", false), ], &model, normalizer, ); vocab.add_special_tokens( &[ AddedToken::from("[CLS]", true), AddedToken::from("[SEP]", true), ], &model, normalizer, ); let result = vocab.extract_and_normalize(normalizer, "[CLS] My name is Anthony [SEP]"); assert_eq!( result .get_splits(OffsetReferential::Original, OffsetType::Byte) .into_iter() .map(|(s, _, tokens)| ( s, tokens .as_ref() .map(|t| t.iter().map(|t| t.id).collect::<Vec<_>>()) )) .collect::<Vec<_>>(), vec![ ("[CLS]", Some(vec![2])), (" My ", None), ("name", Some(vec![1])), (" is Anthony ", None), ("[SEP]", Some(vec![3])) ] ); } #[test] fn options_use_cases() { // Is able to extract both normal and special tokens, with various options (lstrip, rstrip, // single_word, normalized) let model = ModelMock::new(&[]); let normalizer = Lowercase; let mut vocab = AddedVocabulary::new(); vocab.add_tokens( &[ AddedToken::from("my", false).lstrip(true).rstrip(true), AddedToken::from("name", false), AddedToken::from("ony", false).single_word(true), ], &model, Some(&normalizer), ); vocab.add_special_tokens( &[ AddedToken::from("[CLS]", true), AddedToken::from("[SEP]", true), ], &model, Some(&normalizer), ); let result = vocab.extract_and_normalize(Some(&normalizer), "[CLS] My name is Anthony [SEP]"); assert_eq!( simplify_output(&result), vec![ ("[CLS]", Some(vec![3])), // This one includes both spaces because of the lstrip & rstrip // And it matches because normalized == true (" my ", Some(vec![0])), ("name", Some(vec![1])), // `ony` is not extracted here thanks to single_word (" is anthony ", None), ("[SEP]", Some(vec![4])), ] ); } #[test] fn empty_matches() { let vocab = AddedVocabulary::new(); let matches = vocab.find_matches("", &vocab.split_trie); assert_eq!(matches, vec![(None, (0, 0))]); } #[test] fn test_single_word_is_correct() { // Is able to extract both normal and special tokens, with various options (lstrip, rstrip, // single_word, normalized) let model = ModelMock::new(&[]); let mut vocab = AddedVocabulary::new(); let normalizer = Lowercase; vocab.add_tokens( &[AddedToken::from("<mask>", false).single_word(true)], &model, Some(&normalizer), ); // Left, in the middle, non single world left, non single word right, end of sentence valid let result = vocab.extract_and_normalize( Some(&normalizer), "<mask> My name <mask> A<mask> <mask>ony <mask>", ); assert_eq!( simplify_output(&result), vec![ ("<mask>", Some(vec![0])), (" my name ", None), ("<mask>", Some(vec![0])), (" a<mask> <mask>ony ", None), ("<mask>", Some(vec![0])) ] ); } #[test] fn test_single_word_is_unicode_correct() { let model = ModelMock::new(&[]); let mut vocab = AddedVocabulary::new(); let normalizer = Lowercase; assert_eq!(vocab.len(), 0); vocab.add_tokens( &[AddedToken::from("<mask>", false).single_word(true)], &model, Some(&normalizer), ); let result = vocab.extract_and_normalize(Some(&normalizer), "<mask>, <mask>- ◌̰<mask>"); assert_eq!( simplify_output(&result), vec![ // Punctuation is not word ("<mask>", Some(vec![0])), (", ", None), // dash is not word ("<mask>", Some(vec![0])), // This is unicode combining mark character and is word: https://en.wikipedia.org/wiki/Combining_Diacritical_Marks ("- ◌̰<mask>", None), ] ); } #[test] fn test_lstrip_unicode_space() { let model = ModelMock::new(&[]); let mut vocab = AddedVocabulary::new(); let normalizer = Lowercase; vocab.add_tokens( &[AddedToken::from("<mask>", false) .lstrip(true) .rstrip(true) .single_word(true)], &model, Some(&normalizer), ); let result = vocab .extract_and_normalize(Some(&normalizer), "Hi <mask> there\t<mask>\t<mask>\u{2000}"); assert_eq!( simplify_output(&result), vec![ ("hi", None), // Regular space (" <mask> ", Some(vec![0])), ("there", None), // \t is a spacing character ("\t<mask>\t", Some(vec![0])), // Non overlapping // \u{2000} is mongolian vowel separator: https://jkorpela.fi/chars/spaces.html ("<mask>\u{2000}", Some(vec![0])), ] ); } #[test] fn test_encode_special_tokens() { let model = ModelMock::new(&[]); let mut vocab = AddedVocabulary::new(); let normalizer = Lowercase; vocab.add_tokens( &[ AddedToken::from("<mask>", true) .lstrip(true) .rstrip(true) .single_word(true), AddedToken::from("ask>", false), AddedToken::from("<pad>", true), ], &model, Some(&normalizer), ); vocab.set_encode_special_tokens(true); let result = vocab.extract_and_normalize( Some(&normalizer), "Hi <mask> there\t<mask>\t<mask>\u{2000} <pad> <mask><pad><pad>", ); assert_eq!( simplify_output(&result), vec![ ("hi <m", None), ("ask>", Some(vec![1])), (" there\t<m", None), ("ask>", Some(vec![1])), ("\t<m", None), ("ask>", Some(vec![1])), ("\u{2000} <pad> <m", None), ("ask>", Some(vec![1])), ("<pad><pad>", None) ] ); vocab.set_encode_special_tokens(false); let result = vocab.extract_and_normalize( Some(&normalizer), "Hi <mask> there\t<mask>\t<mask>\u{2000} <pad> <mask><pad><pad>", ); assert_eq!( simplify_output(&result), vec![ ("hi", None), (" <mask> ", Some(vec![0])), ("there", None), ("\t<mask>\t", Some(vec![0])), ("<mask>\u{2000} ", Some(vec![0])), ("<pad>", Some(vec![2])), (" <mask>", Some(vec![0])), ("<pad>", Some(vec![2])), ("<pad>", Some(vec![2])) ] ); } #[test] fn byte_level_normalizer() { // Is able to extract both normal and special tokens let model = ModelMock::new(&[]); let mut vocab = AddedVocabulary::new(); let from = NormalizerWrapper::from(ByteLevelNormalizer::new()); let normalizer: Option<&NormalizerWrapper> = Some(&from); vocab.add_tokens( &[AddedToken::from("my", false), AddedToken::from("今", false)], &model, normalizer, ); let result = vocab.extract_and_normalize(normalizer, "my今"); assert_eq!( result .get_splits(OffsetReferential::Original, OffsetType::Byte) .into_iter() .map(|(s, _, tokens)| ( s, tokens .as_ref() .map(|t| t.iter().map(|t| t.id).collect::<Vec<_>>()) )) .collect::<Vec<_>>(), vec![("my", Some(vec![0])), ("ä»Ĭ", Some(vec![1])),] ); } }
tokenizers/tokenizers/src/tokenizer/added_vocabulary.rs/0
{ "file_path": "tokenizers/tokenizers/src/tokenizer/added_vocabulary.rs", "repo_id": "tokenizers", "token_count": 17733 }
294
use crate::tokenizer::{Encoding, Result}; use serde::{Deserialize, Serialize}; use std::cmp; use std::mem; #[derive(Debug, Clone, Copy, PartialEq, Serialize, Deserialize, Eq, Default)] pub enum TruncationDirection { Left, #[default] Right, } impl std::convert::AsRef<str> for TruncationDirection { fn as_ref(&self) -> &str { match self { TruncationDirection::Left => "left", TruncationDirection::Right => "right", } } } #[derive(Debug, Clone, Serialize, Deserialize)] pub struct TruncationParams { #[serde(default)] pub direction: TruncationDirection, pub max_length: usize, pub strategy: TruncationStrategy, pub stride: usize, } impl Default for TruncationParams { fn default() -> Self { Self { max_length: 512, strategy: TruncationStrategy::default(), stride: 0, direction: TruncationDirection::default(), } } } #[derive(thiserror::Error, Debug)] pub enum TruncationError { /// We are supposed to truncate the pair sequence, but it has not been provided. #[error("Truncation error: Second sequence not provided")] SecondSequenceNotProvided, /// We cannot truncate the target sequence enough to respect the provided max length. #[error("Truncation error: Sequence to truncate too short to respect the provided max_length")] SequenceTooShort, } #[derive(Debug, Clone, Copy, PartialEq, Serialize, Deserialize, Eq)] pub enum TruncationStrategy { LongestFirst, OnlyFirst, OnlySecond, } impl Default for TruncationStrategy { fn default() -> Self { Self::LongestFirst } } impl std::convert::AsRef<str> for TruncationStrategy { fn as_ref(&self) -> &str { match self { Self::LongestFirst => "longest_first", Self::OnlyFirst => "only_first", Self::OnlySecond => "only_second", } } } pub fn truncate_encodings( mut encoding: Encoding, mut pair_encoding: Option<Encoding>, params: &TruncationParams, ) -> Result<(Encoding, Option<Encoding>)> { if params.max_length == 0 { encoding.truncate(0, params.stride, params.direction); if let Some(other_encoding) = pair_encoding.as_mut() { other_encoding.truncate(0, params.stride, params.direction); } return Ok((encoding, pair_encoding)); } let total_length = encoding.get_ids().len() + pair_encoding .as_ref() .map(|e| e.get_ids().len()) .unwrap_or(0); let to_remove = if total_length > params.max_length { total_length - params.max_length } else { return Ok((encoding, pair_encoding)); }; match params.strategy { TruncationStrategy::LongestFirst => { if let Some(other_encoding) = pair_encoding.as_mut() { // Assuming n1 <= n2, there are 3 cases // Case 1: // No truncation needs to be performed. // This scenario is handled before the match. // Case 2: // Only the longer input needs to be truncated. // n1 = n1 // n2 = max_length - n1 // Case 3: // Both inputs must be truncated. // n1 = max_length / 2 // n2 = n1 + max_length % 2 let mut n1 = encoding.get_ids().len(); let mut n2 = other_encoding.get_ids().len(); let mut swap = false; // Ensure n1 is the length of the shortest input if n1 > n2 { swap = true; mem::swap(&mut n1, &mut n2); } if n1 > params.max_length { // This needs to be a special case // to avoid max_length - n1 < 0 // since n1 and n2 are unsigned n2 = n1; } else { n2 = cmp::max(n1, params.max_length - n1); } if n1 + n2 > params.max_length { n1 = params.max_length / 2; n2 = n1 + params.max_length % 2; } // Swap lengths if we swapped previosuly if swap { mem::swap(&mut n1, &mut n2); } encoding.truncate(n1, params.stride, params.direction); other_encoding.truncate(n2, params.stride, params.direction); } else { encoding.truncate(total_length - to_remove, params.stride, params.direction); } } TruncationStrategy::OnlyFirst | TruncationStrategy::OnlySecond => { let target = if params.strategy == TruncationStrategy::OnlyFirst { Ok(&mut encoding) } else if let Some(encoding) = pair_encoding.as_mut() { Ok(encoding) } else { Err(Box::new(TruncationError::SecondSequenceNotProvided)) }?; let target_len = target.get_ids().len(); if target_len > to_remove { target.truncate(target_len - to_remove, params.stride, params.direction); } else { return Err(Box::new(TruncationError::SequenceTooShort)); } } } Ok((encoding, pair_encoding)) } #[cfg(test)] mod tests { use super::*; use crate::tokenizer::Encoding; use std::collections::HashMap; fn get_empty() -> Encoding { Encoding::new( vec![], vec![], vec![], vec![], vec![], vec![], vec![], vec![], HashMap::new(), ) } fn get_short() -> Encoding { Encoding::new( vec![1, 2], vec![0, 0], vec![String::from("a"), String::from("b")], vec![Some(0), Some(1)], vec![(0, 1), (1, 2)], vec![0, 0], vec![1, 1], vec![], HashMap::new(), ) } fn get_medium() -> Encoding { Encoding::new( vec![3, 4, 5, 6], vec![0, 0, 0, 0], vec![ String::from("d"), String::from("e"), String::from("f"), String::from("g"), ], vec![Some(0), Some(1), Some(2), Some(3)], vec![(0, 1), (1, 2), (2, 3), (3, 4)], vec![0, 0, 0, 0], vec![1, 1, 1, 1], vec![], HashMap::new(), ) } fn get_long() -> Encoding { Encoding::new( vec![7, 8, 9, 10, 11, 12, 13, 14], vec![0, 0, 0, 0, 0, 0, 0, 0], vec![ String::from("h"), String::from("i"), String::from("j"), String::from("k"), String::from("l"), String::from("m"), String::from("n"), String::from("o"), ], vec![ Some(0), Some(1), Some(2), Some(3), Some(4), Some(5), Some(6), Some(7), ], vec![ (0, 1), (1, 2), (2, 3), (3, 4), (4, 5), (5, 6), (6, 7), (6, 8), ], vec![0, 0, 0, 0, 0, 0, 0, 0], vec![1, 1, 1, 1, 1, 1, 1, 1], vec![], HashMap::new(), ) } fn truncate_and_assert( encoding1: Encoding, encoding2: Encoding, params: &TruncationParams, n1: usize, n2: usize, ) { match truncate_encodings(encoding1, Some(encoding2), params) { Ok((e1, Some(e2))) => { assert!(e1.get_ids().len() == n1); assert!(e2.get_ids().len() == n2); } _ => panic!(), }; } #[test] fn truncate_encodings_longest_first() { let params = TruncationParams { max_length: 7, strategy: TruncationStrategy::LongestFirst, stride: 0, direction: TruncationDirection::Right, }; truncate_and_assert(get_empty(), get_empty(), &params, 0, 0); truncate_and_assert(get_empty(), get_short(), &params, 0, 2); truncate_and_assert(get_empty(), get_medium(), &params, 0, 4); truncate_and_assert(get_empty(), get_long(), &params, 0, 7); truncate_and_assert(get_short(), get_empty(), &params, 2, 0); truncate_and_assert(get_short(), get_short(), &params, 2, 2); truncate_and_assert(get_short(), get_medium(), &params, 2, 4); truncate_and_assert(get_short(), get_long(), &params, 2, 5); truncate_and_assert(get_medium(), get_empty(), &params, 4, 0); truncate_and_assert(get_medium(), get_short(), &params, 4, 2); truncate_and_assert(get_medium(), get_medium(), &params, 3, 4); truncate_and_assert(get_medium(), get_long(), &params, 3, 4); truncate_and_assert(get_long(), get_empty(), &params, 7, 0); truncate_and_assert(get_long(), get_short(), &params, 5, 2); truncate_and_assert(get_long(), get_medium(), &params, 4, 3); truncate_and_assert(get_long(), get_long(), &params, 3, 4); } #[test] fn truncate_encodings_empty() { let params = TruncationParams { max_length: 0, strategy: TruncationStrategy::LongestFirst, stride: 0, direction: TruncationDirection::Right, }; truncate_and_assert(get_empty(), get_short(), &params, 0, 0); truncate_and_assert(get_medium(), get_medium(), &params, 0, 0); truncate_and_assert(get_long(), get_long(), &params, 0, 0); } #[test] fn test_deserialize_defaults() { let old_truncation_params = r#"{"max_length":256,"strategy":"LongestFirst","stride":0}"#; let params: TruncationParams = serde_json::from_str(old_truncation_params).unwrap(); assert_eq!(params.direction, TruncationDirection::Right); } }
tokenizers/tokenizers/src/utils/truncation.rs/0
{ "file_path": "tokenizers/tokenizers/src/utils/truncation.rs", "repo_id": "tokenizers", "token_count": 5473 }
295
# Security Policy ## Hugging Face Hub, remote artefacts, and remote code Transformers is open-source software that is tightly coupled to the Hugging Face Hub. While you have the ability to use it offline with pre-downloaded model weights, it provides a very simple way to download, use, and manage models locally. When downloading artefacts that have been uploaded by others on any platform, you expose yourself to risks. Please read below for the security recommendations in order to keep your runtime and local environment safe. ### Remote artefacts Models uploaded on the Hugging Face Hub come in different formats. We heavily recommend uploading and downloading models in the [`safetensors`](https://github.com/huggingface/safetensors) format (which is the default prioritized by the transformers library), as developed specifically to prevent arbitrary code execution on your system. To avoid loading models from unsafe formats(e.g. [pickle](https://docs.python.org/3/library/pickle.html), you should use the `use_safetensors` parameter. If doing so, in the event that no .safetensors file is present, transformers will error when loading the model. ### Remote code #### Modeling Transformers supports many model architectures, but is also the bridge between your Python runtime and models that are stored in model repositories on the Hugging Face Hub. These models require the `trust_remote_code=True` parameter to be set when using them; please **always** verify the content of the modeling files when using this argument. We recommend setting a revision in order to ensure you protect yourself from updates on the repository. #### Tools Through the `Agent` framework, remote tools can be downloaded to be used by the Agent. You're to specify these tools yourself, but please keep in mind that their code will be run on your machine if the Agent chooses to run them. Please inspect the code of the tools before passing them to the Agent to protect your runtime and local setup. ## Reporting a Vulnerability 🤗 Please feel free to submit vulnerability reports to our private bug bounty program at https://hackerone.com/hugging_face. You'll need to request access to the program by emailing security@huggingface.co. Note that you'll need to be invited to our program, so send us a quick email at security@huggingface.co if you've found a vulnerability.
transformers/SECURITY.md/0
{ "file_path": "transformers/SECURITY.md", "repo_id": "transformers", "token_count": 553 }
296
FROM python:3.10-slim ENV PYTHONDONTWRITEBYTECODE=1 ARG REF=main USER root RUN apt-get update && apt-get install -y --no-install-recommends libsndfile1-dev espeak-ng time git g++ pkg-config openssh-client git RUN apt-get install -y cmake ENV UV_PYTHON=/usr/local/bin/python RUN pip --no-cache-dir install uv && uv venv && uv pip install --no-cache-dir -U pip setuptools RUN pip install --upgrade --no-cache-dir "git+https://github.com/huggingface/transformers.git@${REF}#egg=transformers[tf-cpu,sklearn,testing,sentencepiece,tf-speech,vision]" RUN uv pip install --no-cache-dir "protobuf==3.20.3" RUN pip uninstall -y transformers RUN apt-get clean && rm -rf /var/lib/apt/lists/* && apt-get autoremove && apt-get autoclean
transformers/docker/tf-light.dockerfile/0
{ "file_path": "transformers/docker/tf-light.dockerfile", "repo_id": "transformers", "token_count": 277 }
297
#!/bin/bash source ~/.bashrc echo "running docker-entrypoint.sh" conda activate container echo $KUBE_GOOGLE_CLOUD_TPU_ENDPOINTS echo "printed TPU info" export XRT_TPU_CONFIG="tpu_worker;0;${KUBE_GOOGLE_CLOUD_TPU_ENDPOINTS:7}" exec "$@"#!/bin/bash
transformers/docker/transformers-pytorch-tpu/docker-entrypoint.sh/0
{ "file_path": "transformers/docker/transformers-pytorch-tpu/docker-entrypoint.sh", "repo_id": "transformers", "token_count": 112 }
298
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Ein Modell teilen Die letzten beiden Tutorials haben gezeigt, wie man ein Modell mit PyTorch, Keras und 🤗 Accelerate für verteilte Setups feinabstimmen kann. Der nächste Schritt besteht darin, Ihr Modell mit der Community zu teilen! Bei Hugging Face glauben wir an den offenen Austausch von Wissen und Ressourcen, um künstliche Intelligenz für alle zu demokratisieren. Wir ermutigen Sie, Ihr Modell mit der Community zu teilen, um anderen zu helfen, Zeit und Ressourcen zu sparen. In diesem Tutorial lernen Sie zwei Methoden kennen, wie Sie ein trainiertes oder verfeinertes Modell auf dem [Model Hub](https://huggingface.co/models) teilen können: - Programmgesteuertes Übertragen Ihrer Dateien auf den Hub. - Ziehen Sie Ihre Dateien per Drag-and-Drop über die Weboberfläche in den Hub. <iframe width="560" height="315" src="https://www.youtube.com/embed/XvSGPZFEjDY" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe> <Tip> Um ein Modell mit der Öffentlichkeit zu teilen, benötigen Sie ein Konto auf [huggingface.co](https://huggingface.co/join). Sie können auch einer bestehenden Organisation beitreten oder eine neue Organisation gründen. </Tip> ## Repository-Funktionen Jedes Repository im Model Hub verhält sich wie ein typisches GitHub-Repository. Unsere Repositorys bieten Versionierung, Commit-Historie und die Möglichkeit, Unterschiede zu visualisieren. Die integrierte Versionierung des Model Hub basiert auf Git und [git-lfs](https://git-lfs.github.com/). Mit anderen Worten: Sie können ein Modell als ein Repository behandeln, was eine bessere Zugriffskontrolle und Skalierbarkeit ermöglicht. Die Versionskontrolle ermöglicht *Revisionen*, eine Methode zum Anheften einer bestimmten Version eines Modells mit einem Commit-Hash, Tag oder Branch. Folglich können Sie eine bestimmte Modellversion mit dem Parameter "Revision" laden: ```py >>> model = AutoModel.from_pretrained( ... "julien-c/EsperBERTo-small", revision="v2.0.1" # tag name, or branch name, or commit hash ... ) ``` Dateien lassen sich auch in einem Repository leicht bearbeiten, und Sie können die Commit-Historie sowie die Unterschiede einsehen: ![vis_diff](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/vis_diff.png) ## Einrichtung Bevor Sie ein Modell für den Hub freigeben, benötigen Sie Ihre Hugging Face-Anmeldedaten. Wenn Sie Zugang zu einem Terminal haben, führen Sie den folgenden Befehl in der virtuellen Umgebung aus, in der 🤗 Transformers installiert ist. Dadurch werden Ihre Zugangsdaten in Ihrem Hugging Face-Cache-Ordner (standardmäßig `~/.cache/`) gespeichert: ```bash huggingface-cli login ``` Wenn Sie ein Notebook wie Jupyter oder Colaboratory verwenden, stellen Sie sicher, dass Sie die [`huggingface_hub`](https://huggingface.co/docs/hub/adding-a-library) Bibliothek installiert haben. Diese Bibliothek ermöglicht Ihnen die programmatische Interaktion mit dem Hub. ```bash pip install huggingface_hub ``` Verwenden Sie dann `notebook_login`, um sich beim Hub anzumelden, und folgen Sie dem Link [hier](https://huggingface.co/settings/token), um ein Token für die Anmeldung zu generieren: ```py >>> from huggingface_hub import notebook_login >>> notebook_login() ``` ## Ein Modell für alle Frameworks konvertieren Um sicherzustellen, dass Ihr Modell von jemandem verwendet werden kann, der mit einem anderen Framework arbeitet, empfehlen wir Ihnen, Ihr Modell sowohl mit PyTorch- als auch mit TensorFlow-Checkpoints zu konvertieren und hochzuladen. Während Benutzer immer noch in der Lage sind, Ihr Modell von einem anderen Framework zu laden, wenn Sie diesen Schritt überspringen, wird es langsamer sein, weil 🤗 Transformers den Checkpoint on-the-fly konvertieren müssen. Die Konvertierung eines Checkpoints für ein anderes Framework ist einfach. Stellen Sie sicher, dass Sie PyTorch und TensorFlow installiert haben (siehe [hier](installation) für Installationsanweisungen), und finden Sie dann das spezifische Modell für Ihre Aufgabe in dem anderen Framework. <frameworkcontent> <pt> Geben Sie `from_tf=True` an, um einen Prüfpunkt von TensorFlow nach PyTorch zu konvertieren: ```py >>> pt_model = DistilBertForSequenceClassification.from_pretrained("path/to/awesome-name-you-picked", from_tf=True) >>> pt_model.save_pretrained("path/to/awesome-name-you-picked") ``` </pt> <tf> Geben Sie `from_pt=True` an, um einen Prüfpunkt von PyTorch nach TensorFlow zu konvertieren: ```py >>> tf_model = TFDistilBertForSequenceClassification.from_pretrained("path/to/awesome-name-you-picked", from_pt=True) ``` Dann können Sie Ihr neues TensorFlow-Modell mit seinem neuen Checkpoint speichern: ```py >>> tf_model.save_pretrained("path/to/awesome-name-you-picked") ``` </tf> <jax> Wenn ein Modell in Flax verfügbar ist, können Sie auch einen Kontrollpunkt von PyTorch nach Flax konvertieren: ```py >>> flax_model = FlaxDistilBertForSequenceClassification.from_pretrained( ... "path/to/awesome-name-you-picked", from_pt=True ... ) ``` </jax> </frameworkcontent> ## Ein Modell während des Trainings hochladen <frameworkcontent> <pt> <Youtube id="Z1-XMy-GNLQ"/> Die Weitergabe eines Modells an den Hub ist so einfach wie das Hinzufügen eines zusätzlichen Parameters oder Rückrufs. Erinnern Sie sich an das [Feinabstimmungs-Tutorial](training), in der Klasse [`TrainingArguments`] geben Sie Hyperparameter und zusätzliche Trainingsoptionen an. Eine dieser Trainingsoptionen beinhaltet die Möglichkeit, ein Modell direkt an den Hub zu pushen. Setzen Sie `push_to_hub=True` in Ihrer [`TrainingArguments`]: ```py >>> training_args = TrainingArguments(output_dir="my-awesome-model", push_to_hub=True) ``` Übergeben Sie Ihre Trainingsargumente wie gewohnt an [`Trainer`]: ```py >>> trainer = Trainer( ... model=model, ... args=training_args, ... train_dataset=small_train_dataset, ... eval_dataset=small_eval_dataset, ... compute_metrics=compute_metrics, ... ) ``` Nach der Feinabstimmung Ihres Modells rufen Sie [`~transformers.Trainer.push_to_hub`] auf [`Trainer`] auf, um das trainierte Modell an den Hub zu übertragen. Transformers fügt sogar automatisch Trainings-Hyperparameter, Trainingsergebnisse und Framework-Versionen zu Ihrer Modellkarte hinzu! ```py >>> trainer.push_to_hub() ``` </pt> <tf> Geben Sie ein Modell mit [`PushToHubCallback`] an den Hub weiter. In der [`PushToHubCallback`] Funktion, fügen Sie hinzu: - Ein Ausgabeverzeichnis für Ihr Modell. - Einen Tokenizer. - Die `hub_model_id`, die Ihr Hub-Benutzername und Modellname ist. ```py >>> from transformers import PushToHubCallback >>> push_to_hub_callback = PushToHubCallback( ... output_dir="./your_model_save_path", tokenizer=tokenizer, hub_model_id="your-username/my-awesome-model" ... ) ``` Fügen Sie den Callback zu [`fit`](https://keras.io/api/models/model_training_apis/) hinzu, und 🤗 Transformers wird das trainierte Modell an den Hub weiterleiten: ```py >>> model.fit(tf_train_dataset, validation_data=tf_validation_dataset, epochs=3, callbacks=push_to_hub_callback) ``` </tf> </frameworkcontent> ## Verwenden Sie die Funktion `push_to_hub`. Sie können `push_to_hub` auch direkt für Ihr Modell aufrufen, um es in den Hub hochzuladen. Geben Sie den Namen Ihres Modells in "push_to_hub" an: ```py >>> pt_model.push_to_hub("my-awesome-model") ``` Dadurch wird ein Repository unter Ihrem Benutzernamen mit dem Modellnamen `my-awesome-model` erstellt. Benutzer können nun Ihr Modell mit der Funktion `from_pretrained` laden: ```py >>> from transformers import AutoModel >>> model = AutoModel.from_pretrained("your_username/my-awesome-model") ``` Wenn Sie zu einer Organisation gehören und Ihr Modell stattdessen unter dem Namen der Organisation pushen wollen, fügen Sie diesen einfach zur `repo_id` hinzu: ```py >>> pt_model.push_to_hub("my-awesome-org/my-awesome-model") ``` Die Funktion "push_to_hub" kann auch verwendet werden, um andere Dateien zu einem Modell-Repository hinzuzufügen. Zum Beispiel kann man einen Tokenizer zu einem Modell-Repository hinzufügen: ```py >>> tokenizer.push_to_hub("my-awesome-model") ``` Oder vielleicht möchten Sie die TensorFlow-Version Ihres fein abgestimmten PyTorch-Modells hinzufügen: ```py >>> tf_model.push_to_hub("my-awesome-model") ``` Wenn Sie nun zu Ihrem Hugging Face-Profil navigieren, sollten Sie Ihr neu erstelltes Modell-Repository sehen. Wenn Sie auf die Registerkarte **Dateien** klicken, werden alle Dateien angezeigt, die Sie in das Repository hochgeladen haben. Weitere Einzelheiten zum Erstellen und Hochladen von Dateien in ein Repository finden Sie in der Hub-Dokumentation [hier](https://huggingface.co/docs/hub/how-to-upstream). ## Hochladen mit der Weboberfläche Benutzer, die einen no-code Ansatz bevorzugen, können ein Modell über das Webinterface des Hubs hochladen. Besuchen Sie [huggingface.co/new](https://huggingface.co/new) um ein neues Repository zu erstellen: ![new_model_repo](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/new_model_repo.png) Fügen Sie von hier aus einige Informationen über Ihr Modell hinzu: - Wählen Sie den **Besitzer** des Repositorys. Dies können Sie selbst oder eine der Organisationen sein, denen Sie angehören. - Wählen Sie einen Namen für Ihr Modell, der auch der Name des Repositorys sein wird. - Wählen Sie, ob Ihr Modell öffentlich oder privat ist. - Geben Sie die Lizenzverwendung für Ihr Modell an. Klicken Sie nun auf die Registerkarte **Dateien** und klicken Sie auf die Schaltfläche **Datei hinzufügen**, um eine neue Datei in Ihr Repository hochzuladen. Ziehen Sie dann eine Datei per Drag-and-Drop hoch und fügen Sie eine Übergabemeldung hinzu. ![upload_file](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/upload_file.png) ## Hinzufügen einer Modellkarte Um sicherzustellen, dass die Benutzer die Fähigkeiten, Grenzen, möglichen Verzerrungen und ethischen Aspekte Ihres Modells verstehen, fügen Sie bitte eine Modellkarte zu Ihrem Repository hinzu. Die Modellkarte wird in der Datei `README.md` definiert. Sie können eine Modellkarte hinzufügen, indem Sie: * Manuelles Erstellen und Hochladen einer "README.md"-Datei. * Klicken Sie auf die Schaltfläche **Modellkarte bearbeiten** in Ihrem Modell-Repository. Werfen Sie einen Blick auf die DistilBert [model card](https://huggingface.co/distilbert/distilbert-base-uncased) als gutes Beispiel für die Art von Informationen, die eine Modellkarte enthalten sollte. Weitere Details über andere Optionen, die Sie in der Datei "README.md" einstellen können, wie z.B. den Kohlenstoff-Fußabdruck eines Modells oder Beispiele für Widgets, finden Sie in der Dokumentation [hier](https://huggingface.co/docs/hub/models-cards).
transformers/docs/source/de/model_sharing.md/0
{ "file_path": "transformers/docs/source/de/model_sharing.md", "repo_id": "transformers", "token_count": 4287 }
299
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Agents and tools [[open-in-colab]] ### What is an agent? Large Language Models (LLMs) trained to perform [causal language modeling](./tasks/language_modeling.) can tackle a wide range of tasks, but they often struggle with basic tasks like logic, calculation, and search. When prompted in domains in which they do not perform well, they often fail to generate the answer we expect them to. One approach to overcome this weakness is to create an *agent*. An agent is a system that uses an LLM as its engine, and it has access to functions called *tools*. These *tools* are functions for performing a task, and they contain all necessary description for the agent to properly use them. The agent can be programmed to: - devise a series of actions/tools and run them all at once like the [`CodeAgent`] for example - plan and execute actions/tools one by one and wait for the outcome of each action before launching the next one like the [`ReactJsonAgent`] for example ### Types of agents #### Code agent This agent has a planning step, then generates python code to execute all its actions at once. It natively handles different input and output types for its tools, thus it is the recommended choice for multimodal tasks. #### React agents This is the go-to agent to solve reasoning tasks, since the ReAct framework ([Yao et al., 2022](https://huggingface.co/papers/2210.03629)) makes it really efficient to think on the basis of its previous observations. We implement two versions of ReactJsonAgent: - [`ReactJsonAgent`] generates tool calls as a JSON in its output. - [`ReactCodeAgent`] is a new type of ReactJsonAgent that generates its tool calls as blobs of code, which works really well for LLMs that have strong coding performance. > [!TIP] > Read [Open-source LLMs as LangChain Agents](https://huggingface.co/blog/open-source-llms-as-agents) blog post to learn more the ReAct agent. ![Framework of a React Agent](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/open-source-llms-as-agents/ReAct.png) For example, here is how a ReAct Code agent would work its way through the following question. ```py3 >>> agent.run( ... "How many more blocks (also denoted as layers) in BERT base encoder than the encoder from the architecture proposed in Attention is All You Need?", ... ) =====New task===== How many more blocks (also denoted as layers) in BERT base encoder than the encoder from the architecture proposed in Attention is All You Need? ====Agent is executing the code below: bert_blocks = search(query="number of blocks in BERT base encoder") print("BERT blocks:", bert_blocks) ==== Print outputs: BERT blocks: twelve encoder blocks ====Agent is executing the code below: attention_layer = search(query="number of layers in Attention is All You Need") print("Attention layers:", attention_layer) ==== Print outputs: Attention layers: Encoder: The encoder is composed of a stack of N = 6 identical layers. Each layer has two sub-layers. The first is a multi-head self-attention mechanism, and the second is a simple, position- 2 Page 3 Figure 1: The Transformer - model architecture. ====Agent is executing the code below: bert_blocks = 12 attention_layers = 6 diff = bert_blocks - attention_layers print("Difference in blocks:", diff) final_answer(diff) ==== Print outputs: Difference in blocks: 6 Final answer: 6 ``` ### How can I build an agent? To initialize an agent, you need these arguments: - an LLM to power your agent - the agent is not exactly the LLM, it’s more like the agent is a program that uses an LLM as its engine. - a system prompt: what the LLM engine will be prompted with to generate its output - a toolbox from which the agent pick tools to execute - a parser to extract from the LLM output which tools are to call and with which arguments Upon initialization of the agent system, the tool attributes are used to generate a tool description, then baked into the agent’s `system_prompt` to let it know which tools it can use and why. To start with, please install the `agents` extras in order to install all default dependencies. ```bash pip install transformers[agents] ``` Build your LLM engine by defining a `llm_engine` method which accepts a list of [messages](./chat_templating.) and returns text. This callable also needs to accept a `stop` argument that indicates when to stop generating. ```python from huggingface_hub import login, InferenceClient login("<YOUR_HUGGINGFACEHUB_API_TOKEN>") client = InferenceClient(model="meta-llama/Meta-Llama-3-70B-Instruct") def llm_engine(messages, stop_sequences=["Task"]) -> str: response = client.chat_completion(messages, stop=stop_sequences, max_tokens=1000) answer = response.choices[0].message.content return answer ``` You could use any `llm_engine` method as long as: 1. it follows the [messages format](./chat_templating.md) (`List[Dict[str, str]]`) for its input `messages`, and it returns a `str`. 2. it stops generating outputs at the sequences passed in the argument `stop_sequences` Additionally, `llm_engine` can also take a `grammar` argument. In the case where you specify a `grammar` upon agent initialization, this argument will be passed to the calls to llm_engine, with the `grammar` that you defined upon initialization, to allow [constrained generation](https://huggingface.co/docs/text-generation-inference/conceptual/guidance) in order to force properly-formatted agent outputs. You will also need a `tools` argument which accepts a list of `Tools` - it can be an empty list. You can also add the default toolbox on top of your `tools` list by defining the optional argument `add_base_tools=True`. Now you can create an agent, like [`CodeAgent`], and run it. You can also create a [`TransformersEngine`] with a pre-initialized pipeline to run inference on your local machine using `transformers`. For convenience, since agentic behaviours generally require stronger models such as `Llama-3.1-70B-Instruct` that are harder to run locally for now, we also provide the [`HfApiEngine`] class that initializes a `huggingface_hub.InferenceClient` under the hood. ```python from transformers import CodeAgent, HfApiEngine llm_engine = HfApiEngine(model="meta-llama/Meta-Llama-3-70B-Instruct") agent = CodeAgent(tools=[], llm_engine=llm_engine, add_base_tools=True) agent.run( "Could you translate this sentence from French, say it out loud and return the audio.", sentence="Où est la boulangerie la plus proche?", ) ``` This will be handy in case of emergency baguette need! You can even leave the argument `llm_engine` undefined, and an [`HfApiEngine`] will be created by default. ```python from transformers import CodeAgent agent = CodeAgent(tools=[], add_base_tools=True) agent.run( "Could you translate this sentence from French, say it out loud and give me the audio.", sentence="Où est la boulangerie la plus proche?", ) ``` Note that we used an additional `sentence` argument: you can pass text as additional arguments to the model. You can also use this to indicate the path to local or remote files for the model to use: ```py from transformers import ReactCodeAgent agent = ReactCodeAgent(tools=[], llm_engine=llm_engine, add_base_tools=True) agent.run("Why does Mike not know many people in New York?", audio="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/recording.mp3") ``` The prompt and output parser were automatically defined, but you can easily inspect them by calling the `system_prompt_template` on your agent. ```python print(agent.system_prompt_template) ``` It's important to explain as clearly as possible the task you want to perform. Every [`~Agent.run`] operation is independent, and since an agent is powered by an LLM, minor variations in your prompt might yield completely different results. You can also run an agent consecutively for different tasks: each time the attributes `agent.task` and `agent.logs` will be re-initialized. #### Code execution A Python interpreter executes the code on a set of inputs passed along with your tools. This should be safe because the only functions that can be called are the tools you provided (especially if it's only tools by Hugging Face) and the print function, so you're already limited in what can be executed. The Python interpreter also doesn't allow imports by default outside of a safe list, so all the most obvious attacks shouldn't be an issue. You can still authorize additional imports by passing the authorized modules as a list of strings in argument `additional_authorized_imports` upon initialization of your [`ReactCodeAgent`] or [`CodeAgent`]: ```py >>> from transformers import ReactCodeAgent >>> agent = ReactCodeAgent(tools=[], additional_authorized_imports=['requests', 'bs4']) >>> agent.run("Could you get me the title of the page at url 'https://huggingface.co/blog'?") (...) 'Hugging Face – Blog' ``` The execution will stop at any code trying to perform an illegal operation or if there is a regular Python error with the code generated by the agent. > [!WARNING] > The LLM can generate arbitrary code that will then be executed: do not add any unsafe imports! ### The system prompt An agent, or rather the LLM that drives the agent, generates an output based on the system prompt. The system prompt can be customized and tailored to the intended task. For example, check the system prompt for the [`ReactCodeAgent`] (below version is slightly simplified). ```text You will be given a task to solve as best you can. You have access to the following tools: <<tool_descriptions>> To solve the task, you must plan forward to proceed in a series of steps, in a cycle of 'Thought:', 'Code:', and 'Observation:' sequences. At each step, in the 'Thought:' sequence, you should first explain your reasoning towards solving the task, then the tools that you want to use. Then in the 'Code:' sequence, you shold write the code in simple Python. The code sequence must end with '/End code' sequence. During each intermediate step, you can use 'print()' to save whatever important information you will then need. These print outputs will then be available in the 'Observation:' field, for using this information as input for the next step. In the end you have to return a final answer using the `final_answer` tool. Here are a few examples using notional tools: --- {examples} Above example were using notional tools that might not exist for you. You only have acces to those tools: <<tool_names>> You also can perform computations in the python code you generate. Always provide a 'Thought:' and a 'Code:\n```py' sequence ending with '```<end_code>' sequence. You MUST provide at least the 'Code:' sequence to move forward. Remember to not perform too many operations in a single code block! You should split the task into intermediate code blocks. Print results at the end of each step to save the intermediate results. Then use final_answer() to return the final result. Remember to make sure that variables you use are all defined. Now Begin! ``` The system prompt includes: - An *introduction* that explains how the agent should behave and what tools are. - A description of all the tools that is defined by a `<<tool_descriptions>>` token that is dynamically replaced at runtime with the tools defined/chosen by the user. - The tool description comes from the tool attributes, `name`, `description`, `inputs` and `output_type`, and a simple `jinja2` template that you can refine. - The expected output format. You could improve the system prompt, for example, by adding an explanation of the output format. For maximum flexibility, you can overwrite the whole system prompt template by passing your custom prompt as an argument to the `system_prompt` parameter. ```python from transformers import ReactJsonAgent from transformers.agents import PythonInterpreterTool agent = ReactJsonAgent(tools=[PythonInterpreterTool()], system_prompt="{your_custom_prompt}") ``` > [!WARNING] > Please make sure to define the `<<tool_descriptions>>` string somewhere in the `template` so the agent is aware of the available tools. ### Inspecting an agent run Here are a few useful attributes to inspect what happened after a run: - `agent.logs` stores the fine-grained logs of the agent. At every step of the agent's run, everything gets stored in a dictionary that then is appended to `agent.logs`. - Running `agent.write_inner_memory_from_logs()` creates an inner memory of the agent's logs for the LLM to view, as a list of chat messages. This method goes over each step of the log and only stores what it's interested in as a message: for instance, it will save the system prompt and task in separate messages, then for each step it will store the LLM output as a message, and the tool call output as another message. Use this if you want a higher-level view of what has happened - but not every log will be transcripted by this method. ## Tools A tool is an atomic function to be used by an agent. You can for instance check the [`PythonInterpreterTool`]: it has a name, a description, input descriptions, an output type, and a `__call__` method to perform the action. When the agent is initialized, the tool attributes are used to generate a tool description which is baked into the agent's system prompt. This lets the agent know which tools it can use and why. ### Default toolbox Transformers comes with a default toolbox for empowering agents, that you can add to your agent upon initialization with argument `add_base_tools = True`: - **Document question answering**: given a document (such as a PDF) in image format, answer a question on this document ([Donut](./model_doc/donut)) - **Image question answering**: given an image, answer a question on this image ([VILT](./model_doc/vilt)) - **Speech to text**: given an audio recording of a person talking, transcribe the speech into text ([Whisper](./model_doc/whisper)) - **Text to speech**: convert text to speech ([SpeechT5](./model_doc/speecht5)) - **Translation**: translates a given sentence from source language to target language. - **Python code interpreter**: runs your the LLM generated Python code in a secure environment. This tool will only be added to [`ReactJsonAgent`] if you use `add_base_tools=True`, since code-based tools can already execute Python code You can manually use a tool by calling the [`load_tool`] function and a task to perform. ```python from transformers import load_tool tool = load_tool("text-to-speech") audio = tool("This is a text to speech tool") ``` ### Create a new tool You can create your own tool for use cases not covered by the default tools from Hugging Face. For example, let's create a tool that returns the most downloaded model for a given task from the Hub. You'll start with the code below. ```python from huggingface_hub import list_models task = "text-classification" model = next(iter(list_models(filter=task, sort="downloads", direction=-1))) print(model.id) ``` This code can be converted into a class that inherits from the [`Tool`] superclass. The custom tool needs: - An attribute `name`, which corresponds to the name of the tool itself. The name usually describes what the tool does. Since the code returns the model with the most downloads for a task, let's name is `model_download_counter`. - An attribute `description` is used to populate the agent's system prompt. - An `inputs` attribute, which is a dictionary with keys `"type"` and `"description"`. It contains information that helps the Python interpreter make educated choices about the input. - An `output_type` attribute, which specifies the output type. - A `forward` method which contains the inference code to be executed. ```python from transformers import Tool from huggingface_hub import list_models class HFModelDownloadsTool(Tool): name = "model_download_counter" description = ( "This is a tool that returns the most downloaded model of a given task on the Hugging Face Hub. " "It returns the name of the checkpoint." ) inputs = { "task": { "type": "text", "description": "the task category (such as text-classification, depth-estimation, etc)", } } output_type = "text" def forward(self, task: str): model = next(iter(list_models(filter=task, sort="downloads", direction=-1))) return model.id ``` Now that the custom `HfModelDownloadsTool` class is ready, you can save it to a file named `model_downloads.py` and import it for use. ```python from model_downloads import HFModelDownloadsTool tool = HFModelDownloadsTool() ``` You can also share your custom tool to the Hub by calling [`~Tool.push_to_hub`] on the tool. Make sure you've created a repository for it on the Hub and are using a token with read access. ```python tool.push_to_hub("{your_username}/hf-model-downloads") ``` Load the tool with the [`~Tool.load_tool`] function and pass it to the `tools` parameter in your agent. ```python from transformers import load_tool, CodeAgent model_download_tool = load_tool("m-ric/hf-model-downloads") agent = CodeAgent(tools=[model_download_tool], llm_engine=llm_engine) agent.run( "Can you give me the name of the model that has the most downloads in the 'text-to-video' task on the Hugging Face Hub?" ) ``` You get the following: ```text ======== New task ======== Can you give me the name of the model that has the most downloads in the 'text-to-video' task on the Hugging Face Hub? ==== Agent is executing the code below: most_downloaded_model = model_download_counter(task="text-to-video") print(f"The most downloaded model for the 'text-to-video' task is {most_downloaded_model}.") ==== ``` And the output: `"The most downloaded model for the 'text-to-video' task is ByteDance/AnimateDiff-Lightning."` ### Manage your agent's toolbox If you have already initialized an agent, it is inconvenient to reinitialize it from scratch with a tool you want to use. With Transformers, you can manage an agent's toolbox by adding or replacing a tool. Let's add the `model_download_tool` to an existing agent initialized with only the default toolbox. ```python from transformers import CodeAgent agent = CodeAgent(tools=[], llm_engine=llm_engine, add_base_tools=True) agent.toolbox.add_tool(model_download_tool) ``` Now we can leverage both the new tool and the previous text-to-speech tool: ```python agent.run( "Can you read out loud the name of the model that has the most downloads in the 'text-to-video' task on the Hugging Face Hub and return the audio?" ) ``` | **Audio** | |------------------------------------------------------------------------------------------------------------------------------------------------------| | <audio controls><source src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/damo.wav" type="audio/wav"/> | > [!WARNING] > Beware when adding tools to an agent that already works well because it can bias selection towards your tool or select another tool other than the one already defined. Use the `agent.toolbox.update_tool()` method to replace an existing tool in the agent's toolbox. This is useful if your new tool is a one-to-one replacement of the existing tool because the agent already knows how to perform that specific task. Just make sure the new tool follows the same API as the replaced tool or adapt the system prompt template to ensure all examples using the replaced tool are updated. ### Use a collection of tools You can leverage tool collections by using the ToolCollection object, with the slug of the collection you want to use. Then pass them as a list to initialize you agent, and start using them! ```py from transformers import ToolCollection, ReactCodeAgent image_tool_collection = ToolCollection(collection_slug="huggingface-tools/diffusion-tools-6630bb19a942c2306a2cdb6f") agent = ReactCodeAgent(tools=[*image_tool_collection.tools], add_base_tools=True) agent.run("Please draw me a picture of rivers and lakes.") ``` To speed up the start, tools are loaded only if called by the agent. This gets you this image: <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/rivers_and_lakes.png"> ### Use gradio-tools [gradio-tools](https://github.com/freddyaboulton/gradio-tools) is a powerful library that allows using Hugging Face Spaces as tools. It supports many existing Spaces as well as custom Spaces. Transformers supports `gradio_tools` with the [`Tool.from_gradio`] method. For example, let's use the [`StableDiffusionPromptGeneratorTool`](https://github.com/freddyaboulton/gradio-tools/blob/main/gradio_tools/tools/prompt_generator.py) from `gradio-tools` toolkit for improving prompts to generate better images. Import and instantiate the tool, then pass it to the `Tool.from_gradio` method: ```python from gradio_tools import StableDiffusionPromptGeneratorTool from transformers import Tool, load_tool, CodeAgent gradio_prompt_generator_tool = StableDiffusionPromptGeneratorTool() prompt_generator_tool = Tool.from_gradio(gradio_prompt_generator_tool) ``` Now you can use it just like any other tool. For example, let's improve the prompt `a rabbit wearing a space suit`. ```python image_generation_tool = load_tool('huggingface-tools/text-to-image') agent = CodeAgent(tools=[prompt_generator_tool, image_generation_tool], llm_engine=llm_engine) agent.run( "Improve this prompt, then generate an image of it.", prompt='A rabbit wearing a space suit' ) ``` The model adequately leverages the tool: ```text ======== New task ======== Improve this prompt, then generate an image of it. You have been provided with these initial arguments: {'prompt': 'A rabbit wearing a space suit'}. ==== Agent is executing the code below: improved_prompt = StableDiffusionPromptGenerator(query=prompt) while improved_prompt == "QUEUE_FULL": improved_prompt = StableDiffusionPromptGenerator(query=prompt) print(f"The improved prompt is {improved_prompt}.") image = image_generator(prompt=improved_prompt) ==== ``` Before finally generating the image: <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/rabbit.png"> > [!WARNING] > gradio-tools require *textual* inputs and outputs even when working with different modalities like image and audio objects. Image and audio inputs and outputs are currently incompatible. ### Use LangChain tools We love Langchain and think it has a very compelling suite of tools. To import a tool from LangChain, use the `from_langchain()` method. Here is how you can use it to recreate the intro's search result using a LangChain web search tool. ```python from langchain.agents import load_tools from transformers import Tool, ReactCodeAgent search_tool = Tool.from_langchain(load_tools(["serpapi"])[0]) agent = ReactCodeAgent(tools=[search_tool]) agent.run("How many more blocks (also denoted as layers) in BERT base encoder than the encoder from the architecture proposed in Attention is All You Need?") ``` ## Gradio interface You can leverage `gradio.Chatbot`to display your agent's thoughts using `stream_to_gradio`, here is an example: ```py import gradio as gr from transformers import ( load_tool, ReactCodeAgent, HfApiEngine, stream_to_gradio, ) # Import tool from Hub image_generation_tool = load_tool("m-ric/text-to-image") llm_engine = HfApiEngine("meta-llama/Meta-Llama-3-70B-Instruct") # Initialize the agent with the image generation tool agent = ReactCodeAgent(tools=[image_generation_tool], llm_engine=llm_engine) def interact_with_agent(task): messages = [] messages.append(gr.ChatMessage(role="user", content=task)) yield messages for msg in stream_to_gradio(agent, task): messages.append(msg) yield messages + [ gr.ChatMessage(role="assistant", content="⏳ Task not finished yet!") ] yield messages with gr.Blocks() as demo: text_input = gr.Textbox(lines=1, label="Chat Message", value="Make me a picture of the Statue of Liberty.") submit = gr.Button("Run illustrator agent!") chatbot = gr.Chatbot( label="Agent", type="messages", avatar_images=( None, "https://em-content.zobj.net/source/twitter/53/robot-face_1f916.png", ), ) submit.click(interact_with_agent, [text_input], [chatbot]) if __name__ == "__main__": demo.launch() ```
transformers/docs/source/en/agents.md/0
{ "file_path": "transformers/docs/source/en/agents.md", "repo_id": "transformers", "token_count": 7222 }
300
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Text generation strategies Text generation is essential to many NLP tasks, such as open-ended text generation, summarization, translation, and more. It also plays a role in a variety of mixed-modality applications that have text as an output like speech-to-text and vision-to-text. Some of the models that can generate text include GPT2, XLNet, OpenAI GPT, CTRL, TransformerXL, XLM, Bart, T5, GIT, Whisper. Check out a few examples that use [`~generation.GenerationMixin.generate`] method to produce text outputs for different tasks: * [Text summarization](./tasks/summarization#inference) * [Image captioning](./model_doc/git#transformers.GitForCausalLM.forward.example) * [Audio transcription](./model_doc/whisper#transformers.WhisperForConditionalGeneration.forward.example) Note that the inputs to the generate method depend on the model's modality. They are returned by the model's preprocessor class, such as AutoTokenizer or AutoProcessor. If a model's preprocessor creates more than one kind of input, pass all the inputs to generate(). You can learn more about the individual model's preprocessor in the corresponding model's documentation. The process of selecting output tokens to generate text is known as decoding, and you can customize the decoding strategy that the `generate()` method will use. Modifying a decoding strategy does not change the values of any trainable parameters. However, it can have a noticeable impact on the quality of the generated output. It can help reduce repetition in the text and make it more coherent. This guide describes: * default generation configuration * common decoding strategies and their main parameters * saving and sharing custom generation configurations with your fine-tuned model on 🤗 Hub ## Default text generation configuration A decoding strategy for a model is defined in its generation configuration. When using pre-trained models for inference within a [`pipeline`], the models call the `PreTrainedModel.generate()` method that applies a default generation configuration under the hood. The default configuration is also used when no custom configuration has been saved with the model. When you load a model explicitly, you can inspect the generation configuration that comes with it through `model.generation_config`: ```python >>> from transformers import AutoModelForCausalLM >>> model = AutoModelForCausalLM.from_pretrained("distilbert/distilgpt2") >>> model.generation_config GenerationConfig { "bos_token_id": 50256, "eos_token_id": 50256 } <BLANKLINE> ``` Printing out the `model.generation_config` reveals only the values that are different from the default generation configuration, and does not list any of the default values. The default generation configuration limits the size of the output combined with the input prompt to a maximum of 20 tokens to avoid running into resource limitations. The default decoding strategy is greedy search, which is the simplest decoding strategy that picks a token with the highest probability as the next token. For many tasks and small output sizes this works well. However, when used to generate longer outputs, greedy search can start producing highly repetitive results. ## Customize text generation You can override any `generation_config` by passing the parameters and their values directly to the [`generate`] method: ```python >>> my_model.generate(**inputs, num_beams=4, do_sample=True) # doctest: +SKIP ``` Even if the default decoding strategy mostly works for your task, you can still tweak a few things. Some of the commonly adjusted parameters include: - `max_new_tokens`: the maximum number of tokens to generate. In other words, the size of the output sequence, not including the tokens in the prompt. As an alternative to using the output's length as a stopping criteria, you can choose to stop generation whenever the full generation exceeds some amount of time. To learn more, check [`StoppingCriteria`]. - `num_beams`: by specifying a number of beams higher than 1, you are effectively switching from greedy search to beam search. This strategy evaluates several hypotheses at each time step and eventually chooses the hypothesis that has the overall highest probability for the entire sequence. This has the advantage of identifying high-probability sequences that start with a lower probability initial tokens and would've been ignored by the greedy search. Visualize how it works [here](https://huggingface.co/spaces/m-ric/beam_search_visualizer). - `do_sample`: if set to `True`, this parameter enables decoding strategies such as multinomial sampling, beam-search multinomial sampling, Top-K sampling and Top-p sampling. All these strategies select the next token from the probability distribution over the entire vocabulary with various strategy-specific adjustments. - `num_return_sequences`: the number of sequence candidates to return for each input. This option is only available for the decoding strategies that support multiple sequence candidates, e.g. variations of beam search and sampling. Decoding strategies like greedy search and contrastive search return a single output sequence. ## Save a custom decoding strategy with your model If you would like to share your fine-tuned model with a specific generation configuration, you can: * Create a [`GenerationConfig`] class instance * Specify the decoding strategy parameters * Save your generation configuration with [`GenerationConfig.save_pretrained`], making sure to leave its `config_file_name` argument empty * Set `push_to_hub` to `True` to upload your config to the model's repo ```python >>> from transformers import AutoModelForCausalLM, GenerationConfig >>> model = AutoModelForCausalLM.from_pretrained("my_account/my_model") # doctest: +SKIP >>> generation_config = GenerationConfig( ... max_new_tokens=50, do_sample=True, top_k=50, eos_token_id=model.config.eos_token_id ... ) >>> generation_config.save_pretrained("my_account/my_model", push_to_hub=True) # doctest: +SKIP ``` You can also store several generation configurations in a single directory, making use of the `config_file_name` argument in [`GenerationConfig.save_pretrained`]. You can later instantiate them with [`GenerationConfig.from_pretrained`]. This is useful if you want to store several generation configurations for a single model (e.g. one for creative text generation with sampling, and one for summarization with beam search). You must have the right Hub permissions to add configuration files to a model. ```python >>> from transformers import AutoModelForSeq2SeqLM, AutoTokenizer, GenerationConfig >>> tokenizer = AutoTokenizer.from_pretrained("google-t5/t5-small") >>> model = AutoModelForSeq2SeqLM.from_pretrained("google-t5/t5-small") >>> translation_generation_config = GenerationConfig( ... num_beams=4, ... early_stopping=True, ... decoder_start_token_id=0, ... eos_token_id=model.config.eos_token_id, ... pad_token=model.config.pad_token_id, ... ) >>> # Tip: add `push_to_hub=True` to push to the Hub >>> translation_generation_config.save_pretrained("/tmp", "translation_generation_config.json") >>> # You could then use the named generation config file to parameterize generation >>> generation_config = GenerationConfig.from_pretrained("/tmp", "translation_generation_config.json") >>> inputs = tokenizer("translate English to French: Configuration files are easy to use!", return_tensors="pt") >>> outputs = model.generate(**inputs, generation_config=generation_config) >>> print(tokenizer.batch_decode(outputs, skip_special_tokens=True)) ['Les fichiers de configuration sont faciles à utiliser!'] ``` ## Streaming The `generate()` supports streaming, through its `streamer` input. The `streamer` input is compatible with any instance from a class that has the following methods: `put()` and `end()`. Internally, `put()` is used to push new tokens and `end()` is used to flag the end of text generation. <Tip warning={true}> The API for the streamer classes is still under development and may change in the future. </Tip> In practice, you can craft your own streaming class for all sorts of purposes! We also have basic streaming classes ready for you to use. For example, you can use the [`TextStreamer`] class to stream the output of `generate()` into your screen, one word at a time: ```python >>> from transformers import AutoModelForCausalLM, AutoTokenizer, TextStreamer >>> tok = AutoTokenizer.from_pretrained("openai-community/gpt2") >>> model = AutoModelForCausalLM.from_pretrained("openai-community/gpt2") >>> inputs = tok(["An increasing sequence: one,"], return_tensors="pt") >>> streamer = TextStreamer(tok) >>> # Despite returning the usual output, the streamer will also print the generated text to stdout. >>> _ = model.generate(**inputs, streamer=streamer, max_new_tokens=20) An increasing sequence: one, two, three, four, five, six, seven, eight, nine, ten, eleven, ``` ## Watermarking The `generate()` supports watermarking the generated text by randomly marking a portion of tokens as "green". When generating the "green" will have a small 'bias' value added to their logits, thus having a higher chance to be generated. The watermarked text can be detected by calculating the proportion of "green" tokens in the text and estimating how likely it is statistically to obtain that amount of "green" tokens for human-generated text. This watermarking strategy was proposed in the paper ["On the Reliability of Watermarks for Large Language Models"](https://arxiv.org/abs/2306.04634). For more information on the inner functioning of watermarking, it is recommended to refer to the paper. The watermarking can be used with any generative model in `tranformers` and does not require an extra classification model to detect watermarked text. To trigger watermarking, pass in a [`WatermarkingConfig`] with needed arguments directly to the `.generate()` method or add it to the [`GenerationConfig`]. Watermarked text can be later detected with a [`WatermarkDetector`]. <Tip warning={true}> The WatermarkDetector internally relies on the proportion of "green" tokens, and whether generated text follows the coloring pattern. That is why it is recommended to strip off the prompt text, if it is much longer than the generated text. This also can have an effect when one sequence in the batch is a lot longer causing other rows to be padded. Additionally, the detector **must** be initiated with identical watermark configuration arguments used when generating. </Tip> Let's generate some text with watermarking. In the below code snippet, we set the bias to 2.5 which is a value that will be added to "green" tokens' logits. After generating watermarked text, we can pass it directly to the `WatermarkDetector` to check if the text is machine-generated (outputs `True` for machine-generated and `False` otherwise). ```python >>> from transformers import AutoTokenizer, AutoModelForCausalLM, WatermarkDetector, WatermarkingConfig >>> model = AutoModelForCausalLM.from_pretrained("openai-community/gpt2") >>> tok = AutoTokenizer.from_pretrained("openai-community/gpt2") >>> tok.pad_token_id = tok.eos_token_id >>> tok.padding_side = "left" >>> inputs = tok(["This is the beginning of a long story", "Alice and Bob are"], padding=True, return_tensors="pt") >>> input_len = inputs["input_ids"].shape[-1] >>> watermarking_config = WatermarkingConfig(bias=2.5, seeding_scheme="selfhash") >>> out = model.generate(**inputs, watermarking_config=watermarking_config, do_sample=False, max_length=20) >>> detector = WatermarkDetector(model_config=model.config, device="cpu", watermarking_config=watermarking_config) >>> detection_out = detector(out, return_dict=True) >>> detection_out.prediction array([True, True]) ``` ## Decoding strategies Certain combinations of the `generate()` parameters, and ultimately `generation_config`, can be used to enable specific decoding strategies. If you are new to this concept, we recommend reading [this blog post that illustrates how common decoding strategies work](https://huggingface.co/blog/how-to-generate). Here, we'll show some of the parameters that control the decoding strategies and illustrate how you can use them. <Tip> Selecting a given decoding strategy is not the only way you can influence the outcome of `generate()` with your model. The decoding strategies act based (mostly) on the logits, the distribution of probabilities for the next token, and thus selecting a good logits manipulation strategy can go a long way! In other words, manipulating the logits is another dimension you can act upon, in addition to selecting a decoding strategy. Popular logits manipulation strategies include `top_p`, `min_p`, and `repetition_penalty` -- you can check the full list in the [`GenerationConfig`] class. </Tip> ### Greedy Search [`generate`] uses greedy search decoding by default so you don't have to pass any parameters to enable it. This means the parameters `num_beams` is set to 1 and `do_sample=False`. ```python >>> from transformers import AutoModelForCausalLM, AutoTokenizer >>> prompt = "I look forward to" >>> checkpoint = "distilbert/distilgpt2" >>> tokenizer = AutoTokenizer.from_pretrained(checkpoint) >>> inputs = tokenizer(prompt, return_tensors="pt") >>> model = AutoModelForCausalLM.from_pretrained(checkpoint) >>> outputs = model.generate(**inputs) >>> tokenizer.batch_decode(outputs, skip_special_tokens=True) ['I look forward to seeing you all again!\n\n\n\n\n\n\n\n\n\n\n'] ``` ### Contrastive search The contrastive search decoding strategy was proposed in the 2022 paper [A Contrastive Framework for Neural Text Generation](https://arxiv.org/abs/2202.06417). It demonstrates superior results for generating non-repetitive yet coherent long outputs. To learn how contrastive search works, check out [this blog post](https://huggingface.co/blog/introducing-csearch). The two main parameters that enable and control the behavior of contrastive search are `penalty_alpha` and `top_k`: ```python >>> from transformers import AutoTokenizer, AutoModelForCausalLM >>> checkpoint = "openai-community/gpt2-large" >>> tokenizer = AutoTokenizer.from_pretrained(checkpoint) >>> model = AutoModelForCausalLM.from_pretrained(checkpoint) >>> prompt = "Hugging Face Company is" >>> inputs = tokenizer(prompt, return_tensors="pt") >>> outputs = model.generate(**inputs, penalty_alpha=0.6, top_k=4, max_new_tokens=100) >>> tokenizer.batch_decode(outputs, skip_special_tokens=True) ['Hugging Face Company is a family owned and operated business. We pride ourselves on being the best in the business and our customer service is second to none.\n\nIf you have any questions about our products or services, feel free to contact us at any time. We look forward to hearing from you!'] ``` ### Multinomial sampling As opposed to greedy search that always chooses a token with the highest probability as the next token, multinomial sampling (also called ancestral sampling) randomly selects the next token based on the probability distribution over the entire vocabulary given by the model. Every token with a non-zero probability has a chance of being selected, thus reducing the risk of repetition. To enable multinomial sampling set `do_sample=True` and `num_beams=1`. ```python >>> from transformers import AutoTokenizer, AutoModelForCausalLM, set_seed >>> set_seed(0) # For reproducibility >>> checkpoint = "openai-community/gpt2-large" >>> tokenizer = AutoTokenizer.from_pretrained(checkpoint) >>> model = AutoModelForCausalLM.from_pretrained(checkpoint) >>> prompt = "Today was an amazing day because" >>> inputs = tokenizer(prompt, return_tensors="pt") >>> outputs = model.generate(**inputs, do_sample=True, num_beams=1, max_new_tokens=100) >>> tokenizer.batch_decode(outputs, skip_special_tokens=True) ["Today was an amazing day because we received these wonderful items by the way of a gift shop. The box arrived on a Thursday and I opened it on Monday afternoon to receive the gifts. Both bags featured pieces from all the previous years!\n\nThe box had lots of surprises in it, including some sweet little mini chocolate chips! I don't think I'd eat all of these. This was definitely one of the most expensive presents I have ever got, I actually got most of them for free!\n\nThe first package came"] ``` ### Beam-search decoding Unlike greedy search, beam-search decoding keeps several hypotheses at each time step and eventually chooses the hypothesis that has the overall highest probability for the entire sequence. This has the advantage of identifying high-probability sequences that start with lower probability initial tokens and would've been ignored by the greedy search. <a href="https://huggingface.co/spaces/m-ric/beam_search_visualizer" class="flex flex-col justify-center"> <img style="max-width: 90%; margin: auto;" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/beam_search.png"/> </a> You can visualize how beam-search decoding works in [this interactive demo](https://huggingface.co/spaces/m-ric/beam_search_visualizer): type your input sentence, and play with the parameters to see how the decoding beams change. To enable this decoding strategy, specify the `num_beams` (aka number of hypotheses to keep track of) that is greater than 1. ```python >>> from transformers import AutoModelForCausalLM, AutoTokenizer >>> prompt = "It is astonishing how one can" >>> checkpoint = "openai-community/gpt2-medium" >>> tokenizer = AutoTokenizer.from_pretrained(checkpoint) >>> inputs = tokenizer(prompt, return_tensors="pt") >>> model = AutoModelForCausalLM.from_pretrained(checkpoint) >>> outputs = model.generate(**inputs, num_beams=5, max_new_tokens=50) >>> tokenizer.batch_decode(outputs, skip_special_tokens=True) ['It is astonishing how one can have such a profound impact on the lives of so many people in such a short period of time."\n\nHe added: "I am very proud of the work I have been able to do in the last few years.\n\n"I have'] ``` ### Beam-search multinomial sampling As the name implies, this decoding strategy combines beam search with multinomial sampling. You need to specify the `num_beams` greater than 1, and set `do_sample=True` to use this decoding strategy. ```python >>> from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, set_seed >>> set_seed(0) # For reproducibility >>> prompt = "translate English to German: The house is wonderful." >>> checkpoint = "google-t5/t5-small" >>> tokenizer = AutoTokenizer.from_pretrained(checkpoint) >>> inputs = tokenizer(prompt, return_tensors="pt") >>> model = AutoModelForSeq2SeqLM.from_pretrained(checkpoint) >>> outputs = model.generate(**inputs, num_beams=5, do_sample=True) >>> tokenizer.decode(outputs[0], skip_special_tokens=True) 'Das Haus ist wunderbar.' ``` ### Diverse beam search decoding The diverse beam search decoding strategy is an extension of the beam search strategy that allows for generating a more diverse set of beam sequences to choose from. To learn how it works, refer to [Diverse Beam Search: Decoding Diverse Solutions from Neural Sequence Models](https://arxiv.org/pdf/1610.02424.pdf). This approach has three main parameters: `num_beams`, `num_beam_groups`, and `diversity_penalty`. The diversity penalty ensures the outputs are distinct across groups, and beam search is used within each group. ```python >>> from transformers import AutoTokenizer, AutoModelForSeq2SeqLM >>> checkpoint = "google/pegasus-xsum" >>> prompt = ( ... "The Permaculture Design Principles are a set of universal design principles " ... "that can be applied to any location, climate and culture, and they allow us to design " ... "the most efficient and sustainable human habitation and food production systems. " ... "Permaculture is a design system that encompasses a wide variety of disciplines, such " ... "as ecology, landscape design, environmental science and energy conservation, and the " ... "Permaculture design principles are drawn from these various disciplines. Each individual " ... "design principle itself embodies a complete conceptual framework based on sound " ... "scientific principles. When we bring all these separate principles together, we can " ... "create a design system that both looks at whole systems, the parts that these systems " ... "consist of, and how those parts interact with each other to create a complex, dynamic, " ... "living system. Each design principle serves as a tool that allows us to integrate all " ... "the separate parts of a design, referred to as elements, into a functional, synergistic, " ... "whole system, where the elements harmoniously interact and work together in the most " ... "efficient way possible." ... ) >>> tokenizer = AutoTokenizer.from_pretrained(checkpoint) >>> inputs = tokenizer(prompt, return_tensors="pt") >>> model = AutoModelForSeq2SeqLM.from_pretrained(checkpoint) >>> outputs = model.generate(**inputs, num_beams=5, num_beam_groups=5, max_new_tokens=30, diversity_penalty=1.0) >>> tokenizer.decode(outputs[0], skip_special_tokens=True) 'The Design Principles are a set of universal design principles that can be applied to any location, climate and culture, and they allow us to design the' ``` This guide illustrates the main parameters that enable various decoding strategies. More advanced parameters exist for the [`generate`] method, which gives you even further control over the [`generate`] method's behavior. For the complete list of the available parameters, refer to the [API documentation](./main_classes/text_generation.md). ### Speculative Decoding Speculative decoding (also known as assisted decoding) is a modification of the decoding strategies above, that uses an assistant model (ideally a much smaller one) with the same tokenizer, to generate a few candidate tokens. The main model then validates the candidate tokens in a single forward pass, which speeds up the decoding process. If `do_sample=True`, then the token validation with resampling introduced in the [speculative decoding paper](https://arxiv.org/pdf/2211.17192.pdf) is used. Currently, only greedy search and sampling are supported with assisted decoding, and assisted decoding doesn't support batched inputs. To learn more about assisted decoding, check [this blog post](https://huggingface.co/blog/assisted-generation). To enable assisted decoding, set the `assistant_model` argument with a model. ```python >>> from transformers import AutoModelForCausalLM, AutoTokenizer >>> prompt = "Alice and Bob" >>> checkpoint = "EleutherAI/pythia-1.4b-deduped" >>> assistant_checkpoint = "EleutherAI/pythia-160m-deduped" >>> tokenizer = AutoTokenizer.from_pretrained(checkpoint) >>> inputs = tokenizer(prompt, return_tensors="pt") >>> model = AutoModelForCausalLM.from_pretrained(checkpoint) >>> assistant_model = AutoModelForCausalLM.from_pretrained(assistant_checkpoint) >>> outputs = model.generate(**inputs, assistant_model=assistant_model) >>> tokenizer.batch_decode(outputs, skip_special_tokens=True) ['Alice and Bob are sitting in a bar. Alice is drinking a beer and Bob is drinking a'] ``` When using assisted decoding with sampling methods, you can use the `temperature` argument to control the randomness, just like in multinomial sampling. However, in assisted decoding, reducing the temperature may help improve the latency. ```python >>> from transformers import AutoModelForCausalLM, AutoTokenizer, set_seed >>> set_seed(42) # For reproducibility >>> prompt = "Alice and Bob" >>> checkpoint = "EleutherAI/pythia-1.4b-deduped" >>> assistant_checkpoint = "EleutherAI/pythia-160m-deduped" >>> tokenizer = AutoTokenizer.from_pretrained(checkpoint) >>> inputs = tokenizer(prompt, return_tensors="pt") >>> model = AutoModelForCausalLM.from_pretrained(checkpoint) >>> assistant_model = AutoModelForCausalLM.from_pretrained(assistant_checkpoint) >>> outputs = model.generate(**inputs, assistant_model=assistant_model, do_sample=True, temperature=0.5) >>> tokenizer.batch_decode(outputs, skip_special_tokens=True) ['Alice and Bob, a couple of friends of mine, who are both in the same office as'] ``` Alternativelly, you can also set the `prompt_lookup_num_tokens` to trigger n-gram based assisted decoding, as opposed to model based assisted decoding. You can read more about it [here](https://twitter.com/joao_gante/status/1747322413006643259). ### DoLa Decoding **D**ecoding by C**o**ntrasting **La**yers (DoLa) is a contrastive decoding strategy to improve the factuality and reduce the hallucinations of LLMs, as described in this paper of ICLR 2024 [DoLa: Decoding by Contrasting Layers Improves Factuality in Large Language Models](https://arxiv.org/abs/2309.03883). DoLa is achieved by contrasting the differences in logits obtained from final layers versus earlier layers, thus amplify the factual knowledge localized to particular part of transformer layers. Do the following two steps to activate DoLa decoding when calling the `model.generate` function: 1. Set the `dola_layers` argument, which can be either a string or a list of integers. - If set to a string, it can be one of `low`, `high`. - If set to a list of integers, it should be a list of layer indices between 0 and the total number of layers in the model. The 0-th layer is word embedding, and the 1st layer is the first transformer layer, and so on. 2. Set `repetition_penalty = 1.2` is suggested to reduce repetition in DoLa decoding. See the following examples for DoLa decoding with the 32-layer LLaMA-7B model. ```python >>> from transformers import AutoTokenizer, AutoModelForCausalLM, set_seed >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("huggyllama/llama-7b") >>> model = AutoModelForCausalLM.from_pretrained("huggyllama/llama-7b", torch_dtype=torch.float16) >>> device = 'cuda' if torch.cuda.is_available() else 'cpu' >>> model.to(device) >>> set_seed(42) >>> text = "On what date was the Declaration of Independence officially signed?" >>> inputs = tokenizer(text, return_tensors="pt").to(device) # Vanilla greddy decoding >>> vanilla_output = model.generate(**inputs, do_sample=False, max_new_tokens=50) >>> tokenizer.batch_decode(vanilla_output[:, inputs.input_ids.shape[-1]:], skip_special_tokens=True) ['\nThe Declaration of Independence was signed on July 4, 1776.\nWhat was the date of the signing of the Declaration of Independence?\nThe Declaration of Independence was signed on July 4,'] # DoLa decoding with contrasting higher part of layers (layers 16,18,...,30) >>> dola_high_output = model.generate(**inputs, do_sample=False, max_new_tokens=50, dola_layers='high') >>> tokenizer.batch_decode(dola_high_output[:, inputs.input_ids.shape[-1]:], skip_special_tokens=True) ['\nJuly 4, 1776, when the Continental Congress voted to separate from Great Britain. The 56 delegates to the Continental Congress signed the Declaration on August 2, 1776.'] # DoLa decoding with contrasting specific layers (layers 28 and 30) >>> dola_custom_output = model.generate(**inputs, do_sample=False, max_new_tokens=50, dola_layers=[28,30], repetition_penalty=1.2) >>> tokenizer.batch_decode(dola_custom_output[:, inputs.input_ids.shape[-1]:], skip_special_tokens=True) ['\nIt was officially signed on 2 August 1776, when 56 members of the Second Continental Congress, representing the original 13 American colonies, voted unanimously for the resolution for independence. The 2'] ``` #### Understanding the `dola_layers` argument `dola_layers` stands for the candidate layers in premature layer selection, as described in the DoLa paper. The selected premature layer will be contrasted with the final layer. Setting `dola_layers` to `'low'` or `'high'` will select the lower or higher part of the layers to contrast, respectively. - For `N`-layer models with `N <= 40` layers, the layers of `range(0, N // 2, 2)` and `range(N // 2, N, 2)` are used for `'low'` and `'high'` layers, respectively. - For models with `N > 40` layers, the layers of `range(0, 20, 2)` and `range(N - 20, N, 2)` are used for `'low'` and `'high'` layers, respectively. - If the model has tied word embeddings, we skip the word embeddings (0-th) layer and start from the 2nd layer, as the early exit from word embeddings will become identity function. - Set the `dola_layers` to a list of integers for layer indices to contrast manually specified layers. For example, setting `dola_layers=[28,30]` will contrast the final layer (32-th layer) with the 28-th and 30-th layers. The paper suggested that contrasting `'high'` layers to improve short-answer tasks like TruthfulQA, and contrasting `'low'` layers to improve all the other long-answer reasoning tasks, such as GSM8K, StrategyQA, FACTOR, and VicunaQA. Applying DoLa to smaller models like GPT-2 is not recommended, as the results shown in the Appendix N of the paper.
transformers/docs/source/en/generation_strategies.md/0
{ "file_path": "transformers/docs/source/en/generation_strategies.md", "repo_id": "transformers", "token_count": 8119 }
301
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # LLM inference optimization Large language models (LLMs) have pushed text generation applications, such as chat and code completion models, to the next level by producing text that displays a high level of understanding and fluency. But what makes LLMs so powerful - namely their size - also presents challenges for inference. Basic inference is slow because LLMs have to be called repeatedly to generate the next token. The input sequence increases as generation progresses, which takes longer and longer for the LLM to process. LLMs also have billions of parameters, making it a challenge to store and handle all those weights in memory. This guide will show you how to use the optimization techniques available in Transformers to accelerate LLM inference. > [!TIP] > Hugging Face also provides [Text Generation Inference (TGI)](https://hf.co/docs/text-generation-inference), a library dedicated to deploying and serving highly optimized LLMs for inference. It includes deployment-oriented optimization features not included in Transformers, such as continuous batching for increasing throughput and tensor parallelism for multi-GPU inference. ## Static kv-cache and `torch.compile` During decoding, a LLM computes the key-value (kv) values for each input token and since it is autoregressive, it computes the same kv values each time because the generated output becomes part of the input now. This is not very efficient because you're recomputing the same kv values each time. To optimize this, you can use a kv-cache to store the past keys and values instead of recomputing them each time. However, since the kv-cache grows with each generation step and is dynamic, it prevents you from taking advantage of [`torch.compile`](./perf_torch_compile), a powerful optimization tool that fuses PyTorch code into fast and optimized kernels. The *static kv-cache* solves this issue by pre-allocating the kv-cache size to a maximum value which allows you to combine it with `torch.compile` for up to a 4x speed up. Your speed up may vary depending on the model size (larger models have a smaller speed up) and hardware. > [!WARNING] > Currently, only [Llama](./model_doc/llama2) and a few other models support static kv-cache and `torch.compile`. Check [this issue](https://github.com/huggingface/transformers/issues/28981) for a live model compatibility list. There are three flavors of static kv-cache usage, depending on the complexity of your task: 1. Basic usage: simply set a flag in `generation_config` (recommended); 2. Advanced usage: handle a cache object for multi-turn generation or a custom generation loop; 3. Advanced usage: compile the entire `generate` function into a single graph, if having a single graph is relevant for you. Select the correct tab below for further instructions on each of these flavors. > [!TIP] > Regardless of the strategy used with `torch.compile`, you can avoid shape-related recompilations if you left-pad your LLM inputs to a limited set of values. The [`pad_to_multiple_of` tokenizer flag](https://huggingface.co/docs/transformers/main_classes/tokenizer#transformers.PreTrainedTokenizer.__call__.pad_to_multiple_of) is your friend! <hfoptions id="static-kv"> <hfoption id="basic usage: generation_config"> For this example, let's use the [Gemma](https://hf.co/google/gemma-2b) model. All we need to do is to: 1. Access the model's `generation_config` attribute and set the `cache_implementation` to "static"; 2. Call `torch.compile` on the model to compile the forward pass with the static kv-cache. And that's it! ```py from transformers import AutoTokenizer, AutoModelForCausalLM import torch import os os.environ["TOKENIZERS_PARALLELISM"] = "false" # To prevent long warnings :) tokenizer = AutoTokenizer.from_pretrained("google/gemma-2b") model = AutoModelForCausalLM.from_pretrained("google/gemma-2b", device_map="auto") model.generation_config.cache_implementation = "static" model.forward = torch.compile(model.forward, mode="reduce-overhead", fullgraph=True) input_text = "The theory of special relativity states " input_ids = tokenizer(input_text, return_tensors="pt").to("cuda") outputs = model.generate(**input_ids) print(tokenizer.batch_decode(outputs, skip_special_tokens=True)) ['The theory of special relativity states 1. The speed of light is constant in all inertial reference'] ``` Under the hood, `generate` will attempt to reuse the same cache object, removing the need for re-compilation at each call. Avoiding re-compilation is critical to get the most out of `torch.compile`, and you should be aware of the following: 1. If the batch size changes or the maximum output length increases between calls, the cache will have to be reinitialized, triggering a new compilation; 2. The first couple of calls of the compiled function are slower, as the function is being compiled. > [!WARNING] > For a more advanced usage of the static cache, such as multi-turn conversations, we recommend instantiating and manipulating the cache object outside [`~GenerationMixin.generate`]. See the advanced usage tab. </hfoption> <hfoption id="advanced usage: control Static Cache"> A [`StaticCache`] object can be passed to the model's [`~GenerationMixin.generate`] under the `past_key_values` argument. The object will retain the cache contents, so you can pass it to a new [`~GenerationMixin.generate`] call to continue generation, like you would do with a dynamic cache. ```py from transformers import AutoTokenizer, AutoModelForCausalLM, StaticCache import torch import os os.environ["TOKENIZERS_PARALLELISM"] = "false" # To prevent long warnings :) tokenizer = AutoTokenizer.from_pretrained("google/gemma-2b") model = AutoModelForCausalLM.from_pretrained("google/gemma-2b", device_map="auto") model.forward = torch.compile(model.forward, mode="reduce-overhead", fullgraph=True) input_text = "The theory of special relativity states " input_ids = tokenizer(input_text, return_tensors="pt").to("cuda") prompt_length = input_ids.input_ids.shape[1] model.generation_config.max_new_tokens = 16 past_key_values = StaticCache( config=model.config, batch_size=1, # If you plan to reuse the cache, make sure the cache length is large enough for all cases max_cache_len=prompt_length+(model.generation_config.max_new_tokens*2), device=model.device, dtype=model.dtype ) outputs = model.generate(**input_ids, past_key_values=past_key_values) print(tokenizer.batch_decode(outputs, skip_special_tokens=True)) ['The theory of special relativity states 1. The speed of light is constant in all inertial reference frames. 2'] # pass in the generated text and the same cache object to continue generation from where it left off. Optionally, in a # multi-turn conversation, append the new user input to the generated text. new_input_ids = outputs outputs = model.generate(new_input_ids, past_key_values=past_key_values) print(tokenizer.batch_decode(outputs, skip_special_tokens=True)) ['The theory of special relativity states 1. The speed of light is constant in all inertial reference frames. 2. The speed of light is constant in all inertial reference frames. 3.'] ``` > [!TIP] > If you want to reuse the same [`StaticCache`] object on a new prompt, be sure to reset its contents with the `.reset()` method between calls If you want to go further down a level, the [`StaticCache`] object can also be passed to the model's forward pass under the same `past_key_values` argument. Using this strategy, you can write your own function to decode the next token given the current token and position and cache position of previously generated tokens. ```py from transformers import LlamaTokenizer, LlamaForCausalLM, StaticCache, logging from transformers.testing_utils import CaptureLogger import torch prompts = [ "Simply put, the theory of relativity states that ", "My favorite all time favorite condiment is ketchup.", ] NUM_TOKENS_TO_GENERATE = 40 torch_device = "cuda" tokenizer = LlamaTokenizer.from_pretrained("meta-llama/Llama-2-7b-hf", pad_token="</s>", padding_side="right") model = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-7b-hf", device_map="sequential") inputs = tokenizer(prompts, return_tensors="pt", padding=True).to(model.device) def decode_one_tokens(model, cur_token, input_pos, cache_position, past_key_values): logits = model( cur_token, position_ids=input_pos, cache_position=cache_position, past_key_values=past_key_values, return_dict=False, use_cache=True )[0] new_token = torch.argmax(logits[:, -1], dim=-1)[:, None] return new_token ``` There are a few important things you must do to enable static kv-cache and `torch.compile` with the `StaticCache` method: 1. Initialize the [`StaticCache`] instance before using the model for inference. There you can configure parameters like the maximum batch size and sequence length. 2. Call `torch.compile` on the model to compile the forward pass with the static kv-cache. 3. Set `enable_math=True` in the [torch.backends.cuda.sdp_kernel](https://pytorch.org/docs/master/generated/torch.nn.functional.scaled_dot_product_attention.html) context manager to enable the native PyTorch C++ implementation of scaled dot product attention to speed up inference even more. ```py batch_size, seq_length = inputs["input_ids"].shape with torch.no_grad(): past_key_values = StaticCache( config=model.config, batch_size=2, max_cache_len=4096, device=torch_device, dtype=model.dtype ) cache_position = torch.arange(seq_length, device=torch_device) generated_ids = torch.zeros( batch_size, seq_length + NUM_TOKENS_TO_GENERATE + 1, dtype=torch.int, device=torch_device ) generated_ids[:, cache_position] = inputs["input_ids"].to(torch_device).to(torch.int) logits = model( **inputs, cache_position=cache_position, past_key_values=past_key_values,return_dict=False, use_cache=True )[0] next_token = torch.argmax(logits[:, -1], dim=-1)[:, None] generated_ids[:, seq_length] = next_token[:, 0] decode_one_tokens = torch.compile(decode_one_tokens, mode="reduce-overhead", fullgraph=True) cache_position = torch.tensor([seq_length + 1], device=torch_device) for _ in range(1, NUM_TOKENS_TO_GENERATE): with torch.backends.cuda.sdp_kernel(enable_flash=False, enable_mem_efficient=False, enable_math=True): next_token = decode_one_tokens(model, next_token.clone(), None, cache_position, past_key_values) generated_ids[:, cache_position] = next_token.int() cache_position += 1 text = tokenizer.batch_decode(generated_ids, skip_special_tokens=True) text ['Simply put, the theory of relativity states that 1) the speed of light is constant, 2) the speed of light is the same for all observers, and 3) the laws of physics are the same for all observers.', 'My favorite all time favorite condiment is ketchup. I love it on everything. I love it on my eggs, my fries, my chicken, my burgers, my hot dogs, my sandwiches, my salads, my p'] ``` </hfoption> <hfoption id="advanced usage: end-to-end generate compilation"> Compiling the entire `generate` function, in terms of code, is even simpler than in the basic usage: call `torch.compile` on `generate` to compile the entire function. No need to specify the use of the static cache: although it is compatible, dynamic cache (default) was faster in our benchmarks. ```py from transformers import AutoTokenizer, AutoModelForCausalLM import torch import os os.environ["TOKENIZERS_PARALLELISM"] = "false" # To prevent long warnings :) tokenizer = AutoTokenizer.from_pretrained("google/gemma-2b") model = AutoModelForCausalLM.from_pretrained("google/gemma-2b", device_map="auto") model.generate = torch.compile(model.generate, mode="reduce-overhead", fullgraph=True) input_text = "The theory of special relativity states " input_ids = tokenizer(input_text, return_tensors="pt").to("cuda") outputs = model.generate(**input_ids) print(tokenizer.batch_decode(outputs, skip_special_tokens=True)) ['The theory of special relativity states 1. The speed of light is constant in all inertial reference'] ``` As a result, we compile not only the model forward pass, but also all input preparation, logit processor operations, and so on. The result should be a slightly `generate` call, compared to the basic usage example, and the compiled graph may be better suited to more exotic hardware devices or use cases. However, there are severe drawbacks in using this approach: 1. Compilation is much slower; 2. All parameterization of `generate` must be done through `generation_config`; 3. Many warnings and exceptions are suppressed -- we suggest testing with its uncompiled form first; 4. Although we are working on it, it is heavily feature restricted (for instance, at the time of writing, generation does not stop if an EOS token is selected). </hfoption> </hfoptions> ## Speculative decoding > [!TIP] > For a more in-depth explanation, take a look at the [Assisted Generation: a new direction toward low-latency text generation](https://hf.co/blog/assisted-generation) blog post! Another issue with autoregression is that for each input token you need to load the model weights each time during the forward pass. This is slow and cumbersome for LLMs which have billions of parameters. Speculative decoding alleviates this slowdown by using a second smaller and faster assistant model to generate candidate tokens that are verified by the larger LLM in a single forward pass. If the verified tokens are correct, the LLM essentially gets them for "free" without having to generate them itself. There is no degradation in accuracy because the verification forward pass ensures the same outputs are generated as if the LLM had generated them on its own. To get the largest speed up, the assistant model should be a lot smaller than the LLM so that it can generate tokens quickly. The assistant and LLM model must also share the same tokenizer to avoid re-encoding and decoding tokens. > [!WARNING] > Speculative decoding is only supported for the greedy search and sampling decoding strategies, and it also doesn't support batched inputs. Enable speculative decoding by loading an assistant model and passing it to the [`~GenerationMixin.generate`] method. <hfoptions id="spec-decoding"> <hfoption id="greedy search"> ```py from transformers import AutoModelForCausalLM, AutoTokenizer import torch device = "cuda" if torch.cuda.is_available() else "cpu" tokenizer = AutoTokenizer.from_pretrained("facebook/opt-1.3b") inputs = tokenizer("Einstein's theory of relativity states", return_tensors="pt").to(device) model = AutoModelForCausalLM.from_pretrained("facebook/opt-1.3b").to(device) assistant_model = AutoModelForCausalLM.from_pretrained("facebook/opt-125m").to(device) outputs = model.generate(**inputs, assistant_model=assistant_model) tokenizer.batch_decode(outputs, skip_special_tokens=True) ["Einstein's theory of relativity states that the speed of light is constant. "] ``` </hfoption> <hfoption id="sampling"> For speculative sampling decoding, add the `do_sample` and `temperature` parameters to the [`~GenerationMixin.generate`] method in addition to the assistant model. ```py from transformers import AutoModelForCausalLM, AutoTokenizer import torch device = "cuda" if torch.cuda.is_available() else "cpu" tokenizer = AutoTokenizer.from_pretrained("facebook/opt-1.3b") inputs = tokenizer("Einstein's theory of relativity states", return_tensors="pt").to(device) model = AutoModelForCausalLM.from_pretrained("facebook/opt-1.3b").to(device) assistant_model = AutoModelForCausalLM.from_pretrained("facebook/opt-125m").to(device) outputs = model.generate(**inputs, assistant_model=assistant_model, do_sample=True, temperature=0.7) print(tokenizer.batch_decode(outputs, skip_special_tokens=True)) ["Einstein's theory of relativity states that motion in the universe is not a straight line.\n"] ``` </hfoption> </hfoptions> ### Prompt lookup decoding Prompt lookup decoding is a variant of speculative decoding that is also compatible with greedy search and sampling. Prompt lookup works especially well for input-grounded tasks - such as summarization - where there is often overlapping words between the prompt and output. These overlapping n-grams are used as the LLM candidate tokens. To enable prompt lookup decoding, specify the number of tokens that should be overlapping in the `prompt_lookup_num_tokens` parameter. Then you can pass this parameter to the [`~GenerationMixin.generate`] method. <hfoptions id="pld"> <hfoption id="greedy decoding"> ```py from transformers import AutoModelForCausalLM, AutoTokenizer import torch device = "cuda" if torch.cuda.is_available() else "cpu" tokenizer = AutoTokenizer.from_pretrained("facebook/opt-1.3b") inputs = tokenizer("The second law of thermodynamics states", return_tensors="pt").to(device) model = AutoModelForCausalLM.from_pretrained("facebook/opt-1.3b").to(device) assistant_model = AutoModelForCausalLM.from_pretrained("facebook/opt-125m").to(device) outputs = model.generate(**inputs, prompt_lookup_num_tokens=3) print(tokenizer.batch_decode(outputs, skip_special_tokens=True)) ['The second law of thermodynamics states that entropy increases with temperature. '] ``` </hfoption> <hfoption id="sampling"> For prompt lookup decoding with sampling, add the `do_sample` and `temperature` parameters to the [`~GenerationMixin.generate`] method. ```py from transformers import AutoModelForCausalLM, AutoTokenizer import torch device = "cuda" if torch.cuda.is_available() else "cpu" tokenizer = AutoTokenizer.from_pretrained("facebook/opt-1.3b") inputs = tokenizer("The second law of thermodynamics states", return_tensors="pt").to(device) model = AutoModelForCausalLM.from_pretrained("facebook/opt-1.3b").to(device) outputs = model.generate(**inputs, prompt_lookup_num_tokens=3, do_sample=True, temperature=0.7) print(tokenizer.batch_decode(outputs, skip_special_tokens=True)) ["The second law of thermodynamics states that energy cannot be created nor destroyed. It's not a"] ``` </hfoption> </hfoptions> ## Attention optimizations A known issue with transformer models is that the self-attention mechanism grows quadratically in compute and memory with the number of input tokens. This limitation is only magnified in LLMs which handles much longer sequences. To address this, try FlashAttention2 or PyTorch's scaled dot product attention (SDPA), which are more memory efficient attention implementations and can accelerate inference. ### FlashAttention-2 FlashAttention and [FlashAttention-2](./perf_infer_gpu_one#flashattention-2) break up the attention computation into smaller chunks and reduces the number of intermediate read/write operations to GPU memory to speed up inference. FlashAttention-2 improves on the original FlashAttention algorithm by also parallelizing over sequence length dimension and better partitioning work on the hardware to reduce synchronization and communication overhead. To use FlashAttention-2, set `attn_implementation="flash_attention_2"` in the [`~PreTrainedModel.from_pretrained`] method. ```py from transformers import AutoModelForCausalLM, BitsAndBytesConfig quant_config = BitsAndBytesConfig(load_in_8bit=True) model = AutoModelForCausalLM.from_pretrained( "google/gemma-2b", quantization_config=quant_config, torch_dtype=torch.bfloat16, attn_implementation="flash_attention_2", ) ``` ### PyTorch scaled dot product attention Scaled dot product attention (SDPA) is automatically enabled in PyTorch 2.0 and it supports FlashAttention, xFormers, and PyTorch's C++ implementation. SDPA chooses the most performant attention algorithm if you're using a CUDA backend. For other backends, SDPA defaults to the PyTorch C++ implementation. > [!TIP] > SDPA supports FlashAttention-2 as long as you have the latest PyTorch version installed. Use the [torch.backends.cuda.sdp_kernel](https://pytorch.org/docs/master/generated/torch.nn.functional.scaled_dot_product_attention.html) context manager to explicitly enable or disable any of the three attention algorithms. For example, set `enable_flash=True` to enable FlashAttention. ```py import torch from transformers import AutoModelForCausalLM model = AutoModelForCausalLM.from_pretrained( "google/gemma-2b", torch_dtype=torch.bfloat16, ) with torch.backends.cuda.sdp_kernel(enable_flash=True, enable_math=False, enable_mem_efficient=False): outputs = model.generate(**inputs) ``` ## Quantization Quantization reduces the size of the LLM weights by storing them in a lower precision. This translates to lower memory usage and makes loading LLMs for inference more accessible if you're constrained by your GPUs memory. If you aren't limited by your GPU, you don't necessarily need to quantize your model because it can incur a small latency cost (except for AWQ and fused AWQ modules) due to the extra step required to quantize and dequantize the weights. > [!TIP] > There are many quantization libraries (see the [Quantization](./quantization) guide for more details) available, such as Quanto, AQLM, AWQ, and AutoGPTQ. Feel free to try them out and see which one works best for your use case. We also recommend reading the [Overview of natively supported quantization schemes in 🤗 Transformers](https://hf.co/blog/overview-quantization-transformers) blog post which compares AutoGPTQ and bitsandbytes. Use the Model Memory Calculator below to estimate and compare how much memory is required to load a model. For example, try estimating how much memory it costs to load [Mistral-7B-v0.1](https://huggingface.co/mistralai/Mistral-7B-v0.1). <iframe src="https://hf-accelerate-model-memory-usage.hf.space" frameborder="0" width="850" height="450" ></iframe> To load Mistral-7B-v0.1 in half-precision, set the `torch_dtype` parameter in the [`~transformers.AutoModelForCausalLM.from_pretrained`] method to `torch.bfloat16`. This requires 13.74GB of memory. ```py from transformers import AutoTokenizer, AutoModelForCausalLM import torch model = AutoModelForCausalLM.from_pretrained( "mistralai/Mistral-7B-v0.1", torch_dtype=torch.bfloat16, device_map="auto", ) ``` To load a quantized model (8-bit or 4-bit) for inference, try [bitsandbytes](https://hf.co/docs/bitsandbytes) and set the `load_in_4bit` or `load_in_8bit` parameters to `True`. Loading the model in 8-bits only requires 6.87 GB of memory. ```py from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig import torch quant_config = BitsAndBytesConfig(load_in_8bit=True) model = AutoModelForCausalLM.from_pretrained( "mistralai/Mistral-7B-v0.1", quantization_config=quant_config, device_map="auto" ) ```
transformers/docs/source/en/llm_optims.md/0
{ "file_path": "transformers/docs/source/en/llm_optims.md", "repo_id": "transformers", "token_count": 6884 }
302
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Model outputs All models have outputs that are instances of subclasses of [`~utils.ModelOutput`]. Those are data structures containing all the information returned by the model, but that can also be used as tuples or dictionaries. Let's see how this looks in an example: ```python from transformers import BertTokenizer, BertForSequenceClassification import torch tokenizer = BertTokenizer.from_pretrained("google-bert/bert-base-uncased") model = BertForSequenceClassification.from_pretrained("google-bert/bert-base-uncased") inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") labels = torch.tensor([1]).unsqueeze(0) # Batch size 1 outputs = model(**inputs, labels=labels) ``` The `outputs` object is a [`~modeling_outputs.SequenceClassifierOutput`], as we can see in the documentation of that class below, it means it has an optional `loss`, a `logits`, an optional `hidden_states` and an optional `attentions` attribute. Here we have the `loss` since we passed along `labels`, but we don't have `hidden_states` and `attentions` because we didn't pass `output_hidden_states=True` or `output_attentions=True`. <Tip> When passing `output_hidden_states=True` you may expect the `outputs.hidden_states[-1]` to match `outputs.last_hidden_states` exactly. However, this is not always the case. Some models apply normalization or subsequent process to the last hidden state when it's returned. </Tip> You can access each attribute as you would usually do, and if that attribute has not been returned by the model, you will get `None`. Here for instance `outputs.loss` is the loss computed by the model, and `outputs.attentions` is `None`. When considering our `outputs` object as tuple, it only considers the attributes that don't have `None` values. Here for instance, it has two elements, `loss` then `logits`, so ```python outputs[:2] ``` will return the tuple `(outputs.loss, outputs.logits)` for instance. When considering our `outputs` object as dictionary, it only considers the attributes that don't have `None` values. Here for instance, it has two keys that are `loss` and `logits`. We document here the generic model outputs that are used by more than one model type. Specific output types are documented on their corresponding model page. ## ModelOutput [[autodoc]] utils.ModelOutput - to_tuple ## BaseModelOutput [[autodoc]] modeling_outputs.BaseModelOutput ## BaseModelOutputWithPooling [[autodoc]] modeling_outputs.BaseModelOutputWithPooling ## BaseModelOutputWithCrossAttentions [[autodoc]] modeling_outputs.BaseModelOutputWithCrossAttentions ## BaseModelOutputWithPoolingAndCrossAttentions [[autodoc]] modeling_outputs.BaseModelOutputWithPoolingAndCrossAttentions ## BaseModelOutputWithPast [[autodoc]] modeling_outputs.BaseModelOutputWithPast ## BaseModelOutputWithPastAndCrossAttentions [[autodoc]] modeling_outputs.BaseModelOutputWithPastAndCrossAttentions ## Seq2SeqModelOutput [[autodoc]] modeling_outputs.Seq2SeqModelOutput ## CausalLMOutput [[autodoc]] modeling_outputs.CausalLMOutput ## CausalLMOutputWithCrossAttentions [[autodoc]] modeling_outputs.CausalLMOutputWithCrossAttentions ## CausalLMOutputWithPast [[autodoc]] modeling_outputs.CausalLMOutputWithPast ## MaskedLMOutput [[autodoc]] modeling_outputs.MaskedLMOutput ## Seq2SeqLMOutput [[autodoc]] modeling_outputs.Seq2SeqLMOutput ## NextSentencePredictorOutput [[autodoc]] modeling_outputs.NextSentencePredictorOutput ## SequenceClassifierOutput [[autodoc]] modeling_outputs.SequenceClassifierOutput ## Seq2SeqSequenceClassifierOutput [[autodoc]] modeling_outputs.Seq2SeqSequenceClassifierOutput ## MultipleChoiceModelOutput [[autodoc]] modeling_outputs.MultipleChoiceModelOutput ## TokenClassifierOutput [[autodoc]] modeling_outputs.TokenClassifierOutput ## QuestionAnsweringModelOutput [[autodoc]] modeling_outputs.QuestionAnsweringModelOutput ## Seq2SeqQuestionAnsweringModelOutput [[autodoc]] modeling_outputs.Seq2SeqQuestionAnsweringModelOutput ## Seq2SeqSpectrogramOutput [[autodoc]] modeling_outputs.Seq2SeqSpectrogramOutput ## SemanticSegmenterOutput [[autodoc]] modeling_outputs.SemanticSegmenterOutput ## ImageClassifierOutput [[autodoc]] modeling_outputs.ImageClassifierOutput ## ImageClassifierOutputWithNoAttention [[autodoc]] modeling_outputs.ImageClassifierOutputWithNoAttention ## DepthEstimatorOutput [[autodoc]] modeling_outputs.DepthEstimatorOutput ## Wav2Vec2BaseModelOutput [[autodoc]] modeling_outputs.Wav2Vec2BaseModelOutput ## XVectorOutput [[autodoc]] modeling_outputs.XVectorOutput ## Seq2SeqTSModelOutput [[autodoc]] modeling_outputs.Seq2SeqTSModelOutput ## Seq2SeqTSPredictionOutput [[autodoc]] modeling_outputs.Seq2SeqTSPredictionOutput ## SampleTSPredictionOutput [[autodoc]] modeling_outputs.SampleTSPredictionOutput ## TFBaseModelOutput [[autodoc]] modeling_tf_outputs.TFBaseModelOutput ## TFBaseModelOutputWithPooling [[autodoc]] modeling_tf_outputs.TFBaseModelOutputWithPooling ## TFBaseModelOutputWithPoolingAndCrossAttentions [[autodoc]] modeling_tf_outputs.TFBaseModelOutputWithPoolingAndCrossAttentions ## TFBaseModelOutputWithPast [[autodoc]] modeling_tf_outputs.TFBaseModelOutputWithPast ## TFBaseModelOutputWithPastAndCrossAttentions [[autodoc]] modeling_tf_outputs.TFBaseModelOutputWithPastAndCrossAttentions ## TFSeq2SeqModelOutput [[autodoc]] modeling_tf_outputs.TFSeq2SeqModelOutput ## TFCausalLMOutput [[autodoc]] modeling_tf_outputs.TFCausalLMOutput ## TFCausalLMOutputWithCrossAttentions [[autodoc]] modeling_tf_outputs.TFCausalLMOutputWithCrossAttentions ## TFCausalLMOutputWithPast [[autodoc]] modeling_tf_outputs.TFCausalLMOutputWithPast ## TFMaskedLMOutput [[autodoc]] modeling_tf_outputs.TFMaskedLMOutput ## TFSeq2SeqLMOutput [[autodoc]] modeling_tf_outputs.TFSeq2SeqLMOutput ## TFNextSentencePredictorOutput [[autodoc]] modeling_tf_outputs.TFNextSentencePredictorOutput ## TFSequenceClassifierOutput [[autodoc]] modeling_tf_outputs.TFSequenceClassifierOutput ## TFSeq2SeqSequenceClassifierOutput [[autodoc]] modeling_tf_outputs.TFSeq2SeqSequenceClassifierOutput ## TFMultipleChoiceModelOutput [[autodoc]] modeling_tf_outputs.TFMultipleChoiceModelOutput ## TFTokenClassifierOutput [[autodoc]] modeling_tf_outputs.TFTokenClassifierOutput ## TFQuestionAnsweringModelOutput [[autodoc]] modeling_tf_outputs.TFQuestionAnsweringModelOutput ## TFSeq2SeqQuestionAnsweringModelOutput [[autodoc]] modeling_tf_outputs.TFSeq2SeqQuestionAnsweringModelOutput ## FlaxBaseModelOutput [[autodoc]] modeling_flax_outputs.FlaxBaseModelOutput ## FlaxBaseModelOutputWithPast [[autodoc]] modeling_flax_outputs.FlaxBaseModelOutputWithPast ## FlaxBaseModelOutputWithPooling [[autodoc]] modeling_flax_outputs.FlaxBaseModelOutputWithPooling ## FlaxBaseModelOutputWithPastAndCrossAttentions [[autodoc]] modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions ## FlaxSeq2SeqModelOutput [[autodoc]] modeling_flax_outputs.FlaxSeq2SeqModelOutput ## FlaxCausalLMOutputWithCrossAttentions [[autodoc]] modeling_flax_outputs.FlaxCausalLMOutputWithCrossAttentions ## FlaxMaskedLMOutput [[autodoc]] modeling_flax_outputs.FlaxMaskedLMOutput ## FlaxSeq2SeqLMOutput [[autodoc]] modeling_flax_outputs.FlaxSeq2SeqLMOutput ## FlaxNextSentencePredictorOutput [[autodoc]] modeling_flax_outputs.FlaxNextSentencePredictorOutput ## FlaxSequenceClassifierOutput [[autodoc]] modeling_flax_outputs.FlaxSequenceClassifierOutput ## FlaxSeq2SeqSequenceClassifierOutput [[autodoc]] modeling_flax_outputs.FlaxSeq2SeqSequenceClassifierOutput ## FlaxMultipleChoiceModelOutput [[autodoc]] modeling_flax_outputs.FlaxMultipleChoiceModelOutput ## FlaxTokenClassifierOutput [[autodoc]] modeling_flax_outputs.FlaxTokenClassifierOutput ## FlaxQuestionAnsweringModelOutput [[autodoc]] modeling_flax_outputs.FlaxQuestionAnsweringModelOutput ## FlaxSeq2SeqQuestionAnsweringModelOutput [[autodoc]] modeling_flax_outputs.FlaxSeq2SeqQuestionAnsweringModelOutput
transformers/docs/source/en/main_classes/output.md/0
{ "file_path": "transformers/docs/source/en/main_classes/output.md", "repo_id": "transformers", "token_count": 2785 }
303
<!--Copyright 2021 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # BARTpho ## Overview The BARTpho model was proposed in [BARTpho: Pre-trained Sequence-to-Sequence Models for Vietnamese](https://arxiv.org/abs/2109.09701) by Nguyen Luong Tran, Duong Minh Le and Dat Quoc Nguyen. The abstract from the paper is the following: *We present BARTpho with two versions -- BARTpho_word and BARTpho_syllable -- the first public large-scale monolingual sequence-to-sequence models pre-trained for Vietnamese. Our BARTpho uses the "large" architecture and pre-training scheme of the sequence-to-sequence denoising model BART, thus especially suitable for generative NLP tasks. Experiments on a downstream task of Vietnamese text summarization show that in both automatic and human evaluations, our BARTpho outperforms the strong baseline mBART and improves the state-of-the-art. We release BARTpho to facilitate future research and applications of generative Vietnamese NLP tasks.* This model was contributed by [dqnguyen](https://huggingface.co/dqnguyen). The original code can be found [here](https://github.com/VinAIResearch/BARTpho). ## Usage example ```python >>> import torch >>> from transformers import AutoModel, AutoTokenizer >>> bartpho = AutoModel.from_pretrained("vinai/bartpho-syllable") >>> tokenizer = AutoTokenizer.from_pretrained("vinai/bartpho-syllable") >>> line = "Chúng tôi là những nghiên cứu viên." >>> input_ids = tokenizer(line, return_tensors="pt") >>> with torch.no_grad(): ... features = bartpho(**input_ids) # Models outputs are now tuples >>> # With TensorFlow 2.0+: >>> from transformers import TFAutoModel >>> bartpho = TFAutoModel.from_pretrained("vinai/bartpho-syllable") >>> input_ids = tokenizer(line, return_tensors="tf") >>> features = bartpho(**input_ids) ``` ## Usage tips - Following mBART, BARTpho uses the "large" architecture of BART with an additional layer-normalization layer on top of both the encoder and decoder. Thus, usage examples in the [documentation of BART](bart), when adapting to use with BARTpho, should be adjusted by replacing the BART-specialized classes with the mBART-specialized counterparts. For example: ```python >>> from transformers import MBartForConditionalGeneration >>> bartpho = MBartForConditionalGeneration.from_pretrained("vinai/bartpho-syllable") >>> TXT = "Chúng tôi là <mask> nghiên cứu viên." >>> input_ids = tokenizer([TXT], return_tensors="pt")["input_ids"] >>> logits = bartpho(input_ids).logits >>> masked_index = (input_ids[0] == tokenizer.mask_token_id).nonzero().item() >>> probs = logits[0, masked_index].softmax(dim=0) >>> values, predictions = probs.topk(5) >>> print(tokenizer.decode(predictions).split()) ``` - This implementation is only for tokenization: "monolingual_vocab_file" consists of Vietnamese-specialized types extracted from the pre-trained SentencePiece model "vocab_file" that is available from the multilingual XLM-RoBERTa. Other languages, if employing this pre-trained multilingual SentencePiece model "vocab_file" for subword segmentation, can reuse BartphoTokenizer with their own language-specialized "monolingual_vocab_file". ## BartphoTokenizer [[autodoc]] BartphoTokenizer
transformers/docs/source/en/model_doc/bartpho.md/0
{ "file_path": "transformers/docs/source/en/model_doc/bartpho.md", "repo_id": "transformers", "token_count": 1166 }
304
<!--Copyright 2023 The Intel Labs Team Authors, The Microsoft Research Team Authors and HuggingFace Inc. team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # BridgeTower ## Overview The BridgeTower model was proposed in [BridgeTower: Building Bridges Between Encoders in Vision-Language Representative Learning](https://arxiv.org/abs/2206.08657) by Xiao Xu, Chenfei Wu, Shachar Rosenman, Vasudev Lal, Wanxiang Che, Nan Duan. The goal of this model is to build a bridge between each uni-modal encoder and the cross-modal encoder to enable comprehensive and detailed interaction at each layer of the cross-modal encoder thus achieving remarkable performance on various downstream tasks with almost negligible additional performance and computational costs. This paper has been accepted to the [AAAI'23](https://aaai.org/Conferences/AAAI-23/) conference. The abstract from the paper is the following: *Vision-Language (VL) models with the TWO-TOWER architecture have dominated visual-language representation learning in recent years. Current VL models either use lightweight uni-modal encoders and learn to extract, align and fuse both modalities simultaneously in a deep cross-modal encoder, or feed the last-layer uni-modal representations from the deep pre-trained uni-modal encoders into the top cross-modal encoder. Both approaches potentially restrict vision-language representation learning and limit model performance. In this paper, we propose BRIDGETOWER, which introduces multiple bridge layers that build a connection between the top layers of uni-modal encoders and each layer of the crossmodal encoder. This enables effective bottom-up cross-modal alignment and fusion between visual and textual representations of different semantic levels of pre-trained uni-modal encoders in the cross-modal encoder. Pre-trained with only 4M images, BRIDGETOWER achieves state-of-the-art performance on various downstream vision-language tasks. In particular, on the VQAv2 test-std set, BRIDGETOWER achieves an accuracy of 78.73%, outperforming the previous state-of-the-art model METER by 1.09% with the same pre-training data and almost negligible additional parameters and computational costs. Notably, when further scaling the model, BRIDGETOWER achieves an accuracy of 81.15%, surpassing models that are pre-trained on orders-of-magnitude larger datasets.* <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/bridgetower_architecture%20.jpg" alt="drawing" width="600"/> <small> BridgeTower architecture. Taken from the <a href="https://arxiv.org/abs/2206.08657">original paper.</a> </small> This model was contributed by [Anahita Bhiwandiwalla](https://huggingface.co/anahita-b), [Tiep Le](https://huggingface.co/Tile) and [Shaoyen Tseng](https://huggingface.co/shaoyent). The original code can be found [here](https://github.com/microsoft/BridgeTower). ## Usage tips and examples BridgeTower consists of a visual encoder, a textual encoder and cross-modal encoder with multiple lightweight bridge layers. The goal of this approach was to build a bridge between each uni-modal encoder and the cross-modal encoder to enable comprehensive and detailed interaction at each layer of the cross-modal encoder. In principle, one can apply any visual, textual or cross-modal encoder in the proposed architecture. The [`BridgeTowerProcessor`] wraps [`RobertaTokenizer`] and [`BridgeTowerImageProcessor`] into a single instance to both encode the text and prepare the images respectively. The following example shows how to run contrastive learning using [`BridgeTowerProcessor`] and [`BridgeTowerForContrastiveLearning`]. ```python >>> from transformers import BridgeTowerProcessor, BridgeTowerForContrastiveLearning >>> import requests >>> from PIL import Image >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> texts = ["An image of two cats chilling on a couch", "A football player scoring a goal"] >>> processor = BridgeTowerProcessor.from_pretrained("BridgeTower/bridgetower-large-itm-mlm-itc") >>> model = BridgeTowerForContrastiveLearning.from_pretrained("BridgeTower/bridgetower-large-itm-mlm-itc") >>> # forward pass >>> scores = dict() >>> for text in texts: ... # prepare inputs ... encoding = processor(image, text, return_tensors="pt") ... outputs = model(**encoding) ... scores[text] = outputs ``` The following example shows how to run image-text retrieval using [`BridgeTowerProcessor`] and [`BridgeTowerForImageAndTextRetrieval`]. ```python >>> from transformers import BridgeTowerProcessor, BridgeTowerForImageAndTextRetrieval >>> import requests >>> from PIL import Image >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> texts = ["An image of two cats chilling on a couch", "A football player scoring a goal"] >>> processor = BridgeTowerProcessor.from_pretrained("BridgeTower/bridgetower-base-itm-mlm") >>> model = BridgeTowerForImageAndTextRetrieval.from_pretrained("BridgeTower/bridgetower-base-itm-mlm") >>> # forward pass >>> scores = dict() >>> for text in texts: ... # prepare inputs ... encoding = processor(image, text, return_tensors="pt") ... outputs = model(**encoding) ... scores[text] = outputs.logits[0, 1].item() ``` The following example shows how to run masked language modeling using [`BridgeTowerProcessor`] and [`BridgeTowerForMaskedLM`]. ```python >>> from transformers import BridgeTowerProcessor, BridgeTowerForMaskedLM >>> from PIL import Image >>> import requests >>> url = "http://images.cocodataset.org/val2017/000000360943.jpg" >>> image = Image.open(requests.get(url, stream=True).raw).convert("RGB") >>> text = "a <mask> looking out of the window" >>> processor = BridgeTowerProcessor.from_pretrained("BridgeTower/bridgetower-base-itm-mlm") >>> model = BridgeTowerForMaskedLM.from_pretrained("BridgeTower/bridgetower-base-itm-mlm") >>> # prepare inputs >>> encoding = processor(image, text, return_tensors="pt") >>> # forward pass >>> outputs = model(**encoding) >>> results = processor.decode(outputs.logits.argmax(dim=-1).squeeze(0).tolist()) >>> print(results) .a cat looking out of the window. ``` Tips: - This implementation of BridgeTower uses [`RobertaTokenizer`] to generate text embeddings and OpenAI's CLIP/ViT model to compute visual embeddings. - Checkpoints for pre-trained [bridgeTower-base](https://huggingface.co/BridgeTower/bridgetower-base) and [bridgetower masked language modeling and image text matching](https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm) are released. - Please refer to [Table 5](https://arxiv.org/pdf/2206.08657.pdf) for BridgeTower's performance on Image Retrieval and other down stream tasks. - The PyTorch version of this model is only available in torch 1.10 and higher. ## BridgeTowerConfig [[autodoc]] BridgeTowerConfig ## BridgeTowerTextConfig [[autodoc]] BridgeTowerTextConfig ## BridgeTowerVisionConfig [[autodoc]] BridgeTowerVisionConfig ## BridgeTowerImageProcessor [[autodoc]] BridgeTowerImageProcessor - preprocess ## BridgeTowerProcessor [[autodoc]] BridgeTowerProcessor - __call__ ## BridgeTowerModel [[autodoc]] BridgeTowerModel - forward ## BridgeTowerForContrastiveLearning [[autodoc]] BridgeTowerForContrastiveLearning - forward ## BridgeTowerForMaskedLM [[autodoc]] BridgeTowerForMaskedLM - forward ## BridgeTowerForImageAndTextRetrieval [[autodoc]] BridgeTowerForImageAndTextRetrieval - forward
transformers/docs/source/en/model_doc/bridgetower.md/0
{ "file_path": "transformers/docs/source/en/model_doc/bridgetower.md", "repo_id": "transformers", "token_count": 2392 }
305
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # ConvNeXT ## Overview The ConvNeXT model was proposed in [A ConvNet for the 2020s](https://arxiv.org/abs/2201.03545) by Zhuang Liu, Hanzi Mao, Chao-Yuan Wu, Christoph Feichtenhofer, Trevor Darrell, Saining Xie. ConvNeXT is a pure convolutional model (ConvNet), inspired by the design of Vision Transformers, that claims to outperform them. The abstract from the paper is the following: *The "Roaring 20s" of visual recognition began with the introduction of Vision Transformers (ViTs), which quickly superseded ConvNets as the state-of-the-art image classification model. A vanilla ViT, on the other hand, faces difficulties when applied to general computer vision tasks such as object detection and semantic segmentation. It is the hierarchical Transformers (e.g., Swin Transformers) that reintroduced several ConvNet priors, making Transformers practically viable as a generic vision backbone and demonstrating remarkable performance on a wide variety of vision tasks. However, the effectiveness of such hybrid approaches is still largely credited to the intrinsic superiority of Transformers, rather than the inherent inductive biases of convolutions. In this work, we reexamine the design spaces and test the limits of what a pure ConvNet can achieve. We gradually "modernize" a standard ResNet toward the design of a vision Transformer, and discover several key components that contribute to the performance difference along the way. The outcome of this exploration is a family of pure ConvNet models dubbed ConvNeXt. Constructed entirely from standard ConvNet modules, ConvNeXts compete favorably with Transformers in terms of accuracy and scalability, achieving 87.8% ImageNet top-1 accuracy and outperforming Swin Transformers on COCO detection and ADE20K segmentation, while maintaining the simplicity and efficiency of standard ConvNets.* <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/convnext_architecture.jpg" alt="drawing" width="600"/> <small> ConvNeXT architecture. Taken from the <a href="https://arxiv.org/abs/2201.03545">original paper</a>.</small> This model was contributed by [nielsr](https://huggingface.co/nielsr). TensorFlow version of the model was contributed by [ariG23498](https://github.com/ariG23498), [gante](https://github.com/gante), and [sayakpaul](https://github.com/sayakpaul) (equal contribution). The original code can be found [here](https://github.com/facebookresearch/ConvNeXt). ## Resources A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with ConvNeXT. <PipelineTag pipeline="image-classification"/> - [`ConvNextForImageClassification`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/pytorch/image-classification) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/image_classification.ipynb). - See also: [Image classification task guide](../tasks/image_classification) If you're interested in submitting a resource to be included here, please feel free to open a Pull Request and we'll review it! The resource should ideally demonstrate something new instead of duplicating an existing resource. ## ConvNextConfig [[autodoc]] ConvNextConfig ## ConvNextFeatureExtractor [[autodoc]] ConvNextFeatureExtractor ## ConvNextImageProcessor [[autodoc]] ConvNextImageProcessor - preprocess <frameworkcontent> <pt> ## ConvNextModel [[autodoc]] ConvNextModel - forward ## ConvNextForImageClassification [[autodoc]] ConvNextForImageClassification - forward </pt> <tf> ## TFConvNextModel [[autodoc]] TFConvNextModel - call ## TFConvNextForImageClassification [[autodoc]] TFConvNextForImageClassification - call </tf> </frameworkcontent>
transformers/docs/source/en/model_doc/convnext.md/0
{ "file_path": "transformers/docs/source/en/model_doc/convnext.md", "repo_id": "transformers", "token_count": 1215 }
306
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # ERNIE ## Overview ERNIE is a series of powerful models proposed by baidu, especially in Chinese tasks, including [ERNIE1.0](https://arxiv.org/abs/1904.09223), [ERNIE2.0](https://ojs.aaai.org/index.php/AAAI/article/view/6428), [ERNIE3.0](https://arxiv.org/abs/2107.02137), [ERNIE-Gram](https://arxiv.org/abs/2010.12148), [ERNIE-health](https://arxiv.org/abs/2110.07244), etc. These models are contributed by [nghuyong](https://huggingface.co/nghuyong) and the official code can be found in [PaddleNLP](https://github.com/PaddlePaddle/PaddleNLP) (in PaddlePaddle). ### Usage example Take `ernie-1.0-base-zh` as an example: ```Python from transformers import AutoTokenizer, AutoModel tokenizer = AutoTokenizer.from_pretrained("nghuyong/ernie-1.0-base-zh") model = AutoModel.from_pretrained("nghuyong/ernie-1.0-base-zh") ``` ### Model checkpoints | Model Name | Language | Description | |:-------------------:|:--------:|:-------------------------------:| | ernie-1.0-base-zh | Chinese | Layer:12, Heads:12, Hidden:768 | | ernie-2.0-base-en | English | Layer:12, Heads:12, Hidden:768 | | ernie-2.0-large-en | English | Layer:24, Heads:16, Hidden:1024 | | ernie-3.0-base-zh | Chinese | Layer:12, Heads:12, Hidden:768 | | ernie-3.0-medium-zh | Chinese | Layer:6, Heads:12, Hidden:768 | | ernie-3.0-mini-zh | Chinese | Layer:6, Heads:12, Hidden:384 | | ernie-3.0-micro-zh | Chinese | Layer:4, Heads:12, Hidden:384 | | ernie-3.0-nano-zh | Chinese | Layer:4, Heads:12, Hidden:312 | | ernie-health-zh | Chinese | Layer:12, Heads:12, Hidden:768 | | ernie-gram-zh | Chinese | Layer:12, Heads:12, Hidden:768 | You can find all the supported models from huggingface's model hub: [huggingface.co/nghuyong](https://huggingface.co/nghuyong), and model details from paddle's official repo: [PaddleNLP](https://paddlenlp.readthedocs.io/zh/latest/model_zoo/transformers/ERNIE/contents.html) and [ERNIE](https://github.com/PaddlePaddle/ERNIE/blob/repro). ## Resources - [Text classification task guide](../tasks/sequence_classification) - [Token classification task guide](../tasks/token_classification) - [Question answering task guide](../tasks/question_answering) - [Causal language modeling task guide](../tasks/language_modeling) - [Masked language modeling task guide](../tasks/masked_language_modeling) - [Multiple choice task guide](../tasks/multiple_choice) ## ErnieConfig [[autodoc]] ErnieConfig - all ## Ernie specific outputs [[autodoc]] models.ernie.modeling_ernie.ErnieForPreTrainingOutput ## ErnieModel [[autodoc]] ErnieModel - forward ## ErnieForPreTraining [[autodoc]] ErnieForPreTraining - forward ## ErnieForCausalLM [[autodoc]] ErnieForCausalLM - forward ## ErnieForMaskedLM [[autodoc]] ErnieForMaskedLM - forward ## ErnieForNextSentencePrediction [[autodoc]] ErnieForNextSentencePrediction - forward ## ErnieForSequenceClassification [[autodoc]] ErnieForSequenceClassification - forward ## ErnieForMultipleChoice [[autodoc]] ErnieForMultipleChoice - forward ## ErnieForTokenClassification [[autodoc]] ErnieForTokenClassification - forward ## ErnieForQuestionAnswering [[autodoc]] ErnieForQuestionAnswering - forward
transformers/docs/source/en/model_doc/ernie.md/0
{ "file_path": "transformers/docs/source/en/model_doc/ernie.md", "repo_id": "transformers", "token_count": 1417 }
307
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Gemma2 ## Overview The Gemma2 model was proposed in [Gemma2: Open Models Based on Gemini Technology and Research](https://blog.google/technology/developers/google-gemma-2/) by Gemma2 Team, Google. Two Gemma2 models are released, with parameters sizes of 9 billion (9B) and 27 billion (27B). The abstract from the blog post is the following: *Now we’re officially releasing Gemma 2 to researchers and developers globally. Available in both 9 billion (9B) and 27 billion (27B) parameter sizes, Gemma 2 is higher-performing and more efficient at inference than the first generation, with significant safety advancements built in. In fact, at 27B, it offers competitive alternatives to models more than twice its size, delivering the kind of performance that was only possible with proprietary models as recently as December.* Tips: - The original checkpoints can be converted using the conversion script `src/transformers/models/Gemma2/convert_Gemma2_weights_to_hf.py` <Tip warning={true}> - Gemma2 uses sliding window attention every second layer, which makes it unsuitable for typical kv caching with [`~DynamicCache`] or tuples of tensors. To enable caching in Gemma2 forward call, you must initialize a [`~HybridCache`] instance and pass it as `past_key_values` to the forward call. Note, that you also have to prepare `cache_position` if the `past_key_values` already contains previous keys and values. </Tip> This model was contributed by [Arthur Zucker](https://huggingface.co/ArthurZ), [Pedro Cuenca](https://huggingface.co/pcuenq) and [Tom Arsen](). ## Gemma2Config [[autodoc]] Gemma2Config ## Gemma2Model [[autodoc]] Gemma2Model - forward ## Gemma2ForCausalLM [[autodoc]] Gemma2ForCausalLM - forward ## Gemma2ForSequenceClassification [[autodoc]] Gemma2ForSequenceClassification - forward ## Gemma2ForTokenClassification [[autodoc]] Gemma2ForTokenClassification - forward
transformers/docs/source/en/model_doc/gemma2.md/0
{ "file_path": "transformers/docs/source/en/model_doc/gemma2.md", "repo_id": "transformers", "token_count": 736 }
308
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Hiera ## Overview Hiera was proposed in [Hiera: A Hierarchical Vision Transformer without the Bells-and-Whistles](https://arxiv.org/abs/2306.00989) by Chaitanya Ryali, Yuan-Ting Hu, Daniel Bolya, Chen Wei, Haoqi Fan, Po-Yao Huang, Vaibhav Aggarwal, Arkabandhu Chowdhury, Omid Poursaeed, Judy Hoffman, Jitendra Malik, Yanghao Li, Christoph Feichtenhofer The paper introduces "Hiera," a hierarchical Vision Transformer that simplifies the architecture of modern hierarchical vision transformers by removing unnecessary components without compromising on accuracy or efficiency. Unlike traditional transformers that add complex vision-specific components to improve supervised classification performance, Hiera demonstrates that such additions, often termed "bells-and-whistles," are not essential for high accuracy. By leveraging a strong visual pretext task (MAE) for pretraining, Hiera retains simplicity and achieves superior accuracy and speed both in inference and training across various image and video recognition tasks. The approach suggests that spatial biases required for vision tasks can be effectively learned through proper pretraining, eliminating the need for added architectural complexity. The abstract from the paper is the following: *Modern hierarchical vision transformers have added several vision-specific components in the pursuit of supervised classification performance. While these components lead to effective accuracies and attractive FLOP counts, the added complexity actually makes these transformers slower than their vanilla ViT counterparts. In this paper, we argue that this additional bulk is unnecessary. By pretraining with a strong visual pretext task (MAE), we can strip out all the bells-and-whistles from a state-of-the-art multi-stage vision transformer without losing accuracy. In the process, we create Hiera, an extremely simple hierarchical vision transformer that is more accurate than previous models while being significantly faster both at inference and during training. We evaluate Hiera on a variety of tasks for image and video recognition. Our code and models are available at https://github.com/facebookresearch/hiera.* <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/hiera_overview.png" alt="drawing" width="600"/> <small> Hiera architecture. Taken from the <a href="https://arxiv.org/abs/2306.00989">original paper.</a> </small> This model was a joint contibution by [EduardoPacheco](https://huggingface.co/EduardoPacheco) and [namangarg110](https://huggingface.co/namangarg110). The original code can be found [here] (https://github.com/facebookresearch/hiera). ## Resources A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with Hiera. If you're interested in submitting a resource to be included here, please feel free to open a Pull Request and we'll review it! The resource should ideally demonstrate something new instead of duplicating an existing resource. <PipelineTag pipeline="image-classification"/> - [`HieraForImageClassification`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/pytorch/image-classification) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/image_classification.ipynb). - See also: [Image classification task guide](../tasks/image_classification) ## HieraConfig [[autodoc]] HieraConfig ## HieraModel [[autodoc]] HieraModel - forward ## HieraForPreTraining [[autodoc]] HieraForPreTraining - forward ## HieraForImageClassification [[autodoc]] HieraForImageClassification - forward
transformers/docs/source/en/model_doc/hiera.md/0
{ "file_path": "transformers/docs/source/en/model_doc/hiera.md", "repo_id": "transformers", "token_count": 1119 }
309
<!--Copyright 2021 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # LayoutXLM ## Overview LayoutXLM was proposed in [LayoutXLM: Multimodal Pre-training for Multilingual Visually-rich Document Understanding](https://arxiv.org/abs/2104.08836) by Yiheng Xu, Tengchao Lv, Lei Cui, Guoxin Wang, Yijuan Lu, Dinei Florencio, Cha Zhang, Furu Wei. It's a multilingual extension of the [LayoutLMv2 model](https://arxiv.org/abs/2012.14740) trained on 53 languages. The abstract from the paper is the following: *Multimodal pre-training with text, layout, and image has achieved SOTA performance for visually-rich document understanding tasks recently, which demonstrates the great potential for joint learning across different modalities. In this paper, we present LayoutXLM, a multimodal pre-trained model for multilingual document understanding, which aims to bridge the language barriers for visually-rich document understanding. To accurately evaluate LayoutXLM, we also introduce a multilingual form understanding benchmark dataset named XFUN, which includes form understanding samples in 7 languages (Chinese, Japanese, Spanish, French, Italian, German, Portuguese), and key-value pairs are manually labeled for each language. Experiment results show that the LayoutXLM model has significantly outperformed the existing SOTA cross-lingual pre-trained models on the XFUN dataset.* This model was contributed by [nielsr](https://huggingface.co/nielsr). The original code can be found [here](https://github.com/microsoft/unilm). ## Usage tips and examples One can directly plug in the weights of LayoutXLM into a LayoutLMv2 model, like so: ```python from transformers import LayoutLMv2Model model = LayoutLMv2Model.from_pretrained("microsoft/layoutxlm-base") ``` Note that LayoutXLM has its own tokenizer, based on [`LayoutXLMTokenizer`]/[`LayoutXLMTokenizerFast`]. You can initialize it as follows: ```python from transformers import LayoutXLMTokenizer tokenizer = LayoutXLMTokenizer.from_pretrained("microsoft/layoutxlm-base") ``` Similar to LayoutLMv2, you can use [`LayoutXLMProcessor`] (which internally applies [`LayoutLMv2ImageProcessor`] and [`LayoutXLMTokenizer`]/[`LayoutXLMTokenizerFast`] in sequence) to prepare all data for the model. <Tip> As LayoutXLM's architecture is equivalent to that of LayoutLMv2, one can refer to [LayoutLMv2's documentation page](layoutlmv2) for all tips, code examples and notebooks. </Tip> ## LayoutXLMTokenizer [[autodoc]] LayoutXLMTokenizer - __call__ - build_inputs_with_special_tokens - get_special_tokens_mask - create_token_type_ids_from_sequences - save_vocabulary ## LayoutXLMTokenizerFast [[autodoc]] LayoutXLMTokenizerFast - __call__ ## LayoutXLMProcessor [[autodoc]] LayoutXLMProcessor - __call__
transformers/docs/source/en/model_doc/layoutxlm.md/0
{ "file_path": "transformers/docs/source/en/model_doc/layoutxlm.md", "repo_id": "transformers", "token_count": 981 }
310
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # MMS ## Overview The MMS model was proposed in [Scaling Speech Technology to 1,000+ Languages](https://arxiv.org/abs/2305.13516) by Vineel Pratap, Andros Tjandra, Bowen Shi, Paden Tomasello, Arun Babu, Sayani Kundu, Ali Elkahky, Zhaoheng Ni, Apoorv Vyas, Maryam Fazel-Zarandi, Alexei Baevski, Yossi Adi, Xiaohui Zhang, Wei-Ning Hsu, Alexis Conneau, Michael Auli The abstract from the paper is the following: *Expanding the language coverage of speech technology has the potential to improve access to information for many more people. However, current speech technology is restricted to about one hundred languages which is a small fraction of the over 7,000 languages spoken around the world. The Massively Multilingual Speech (MMS) project increases the number of supported languages by 10-40x, depending on the task. The main ingredients are a new dataset based on readings of publicly available religious texts and effectively leveraging self-supervised learning. We built pre-trained wav2vec 2.0 models covering 1,406 languages, a single multilingual automatic speech recognition model for 1,107 languages, speech synthesis models for the same number of languages, as well as a language identification model for 4,017 languages. Experiments show that our multilingual speech recognition model more than halves the word error rate of Whisper on 54 languages of the FLEURS benchmark while being trained on a small fraction of the labeled data.* Here are the different models open sourced in the MMS project. The models and code are originally released [here](https://github.com/facebookresearch/fairseq/tree/main/examples/mms). We have add them to the `transformers` framework, making them easier to use. ### Automatic Speech Recognition (ASR) The ASR model checkpoints can be found here : [mms-1b-fl102](https://huggingface.co/facebook/mms-1b-fl102), [mms-1b-l1107](https://huggingface.co/facebook/mms-1b-l1107), [mms-1b-all](https://huggingface.co/facebook/mms-1b-all). For best accuracy, use the `mms-1b-all` model. Tips: - All ASR models accept a float array corresponding to the raw waveform of the speech signal. The raw waveform should be pre-processed with [`Wav2Vec2FeatureExtractor`]. - The models were trained using connectionist temporal classification (CTC) so the model output has to be decoded using [`Wav2Vec2CTCTokenizer`]. - You can load different language adapter weights for different languages via [`~Wav2Vec2PreTrainedModel.load_adapter`]. Language adapters only consists of roughly 2 million parameters and can therefore be efficiently loaded on the fly when needed. #### Loading By default MMS loads adapter weights for English. If you want to load adapter weights of another language make sure to specify `target_lang=<your-chosen-target-lang>` as well as `"ignore_mismatched_sizes=True`. The `ignore_mismatched_sizes=True` keyword has to be passed to allow the language model head to be resized according to the vocabulary of the specified language. Similarly, the processor should be loaded with the same target language ```py from transformers import Wav2Vec2ForCTC, AutoProcessor model_id = "facebook/mms-1b-all" target_lang = "fra" processor = AutoProcessor.from_pretrained(model_id, target_lang=target_lang) model = Wav2Vec2ForCTC.from_pretrained(model_id, target_lang=target_lang, ignore_mismatched_sizes=True) ``` <Tip> You can safely ignore a warning such as: ```text Some weights of Wav2Vec2ForCTC were not initialized from the model checkpoint at facebook/mms-1b-all and are newly initialized because the shapes did not match: - lm_head.bias: found shape torch.Size([154]) in the checkpoint and torch.Size([314]) in the model instantiated - lm_head.weight: found shape torch.Size([154, 1280]) in the checkpoint and torch.Size([314, 1280]) in the model instantiated You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. ``` </Tip> If you want to use the ASR pipeline, you can load your chosen target language as such: ```py from transformers import pipeline model_id = "facebook/mms-1b-all" target_lang = "fra" pipe = pipeline(model=model_id, model_kwargs={"target_lang": "fra", "ignore_mismatched_sizes": True}) ``` #### Inference Next, let's look at how we can run MMS in inference and change adapter layers after having called [`~PretrainedModel.from_pretrained`] First, we load audio data in different languages using the [Datasets](https://github.com/huggingface/datasets). ```py from datasets import load_dataset, Audio # English stream_data = load_dataset("mozilla-foundation/common_voice_13_0", "en", split="test", streaming=True) stream_data = stream_data.cast_column("audio", Audio(sampling_rate=16000)) en_sample = next(iter(stream_data))["audio"]["array"] # French stream_data = load_dataset("mozilla-foundation/common_voice_13_0", "fr", split="test", streaming=True) stream_data = stream_data.cast_column("audio", Audio(sampling_rate=16000)) fr_sample = next(iter(stream_data))["audio"]["array"] ``` Next, we load the model and processor ```py from transformers import Wav2Vec2ForCTC, AutoProcessor import torch model_id = "facebook/mms-1b-all" processor = AutoProcessor.from_pretrained(model_id) model = Wav2Vec2ForCTC.from_pretrained(model_id) ``` Now we process the audio data, pass the processed audio data to the model and transcribe the model output, just like we usually do for [`Wav2Vec2ForCTC`]. ```py inputs = processor(en_sample, sampling_rate=16_000, return_tensors="pt") with torch.no_grad(): outputs = model(**inputs).logits ids = torch.argmax(outputs, dim=-1)[0] transcription = processor.decode(ids) # 'joe keton disapproved of films and buster also had reservations about the media' ``` We can now keep the same model in memory and simply switch out the language adapters by calling the convenient [`~Wav2Vec2ForCTC.load_adapter`] function for the model and [`~Wav2Vec2CTCTokenizer.set_target_lang`] for the tokenizer. We pass the target language as an input - `"fra"` for French. ```py processor.tokenizer.set_target_lang("fra") model.load_adapter("fra") inputs = processor(fr_sample, sampling_rate=16_000, return_tensors="pt") with torch.no_grad(): outputs = model(**inputs).logits ids = torch.argmax(outputs, dim=-1)[0] transcription = processor.decode(ids) # "ce dernier est volé tout au long de l'histoire romaine" ``` In the same way the language can be switched out for all other supported languages. Please have a look at: ```py processor.tokenizer.vocab.keys() ``` to see all supported languages. To further improve performance from ASR models, language model decoding can be used. See the documentation [here](https://huggingface.co/facebook/mms-1b-all) for further details. ### Speech Synthesis (TTS) MMS-TTS uses the same model architecture as VITS, which was added to 🤗 Transformers in v4.33. MMS trains a separate model checkpoint for each of the 1100+ languages in the project. All available checkpoints can be found on the Hugging Face Hub: [facebook/mms-tts](https://huggingface.co/models?sort=trending&search=facebook%2Fmms-tts), and the inference documentation under [VITS](https://huggingface.co/docs/transformers/main/en/model_doc/vits). #### Inference To use the MMS model, first update to the latest version of the Transformers library: ```bash pip install --upgrade transformers accelerate ``` Since the flow-based model in VITS is non-deterministic, it is good practice to set a seed to ensure reproducibility of the outputs. - For languages with a Roman alphabet, such as English or French, the tokenizer can be used directly to pre-process the text inputs. The following code example runs a forward pass using the MMS-TTS English checkpoint: ```python import torch from transformers import VitsTokenizer, VitsModel, set_seed tokenizer = VitsTokenizer.from_pretrained("facebook/mms-tts-eng") model = VitsModel.from_pretrained("facebook/mms-tts-eng") inputs = tokenizer(text="Hello - my dog is cute", return_tensors="pt") set_seed(555) # make deterministic with torch.no_grad(): outputs = model(**inputs) waveform = outputs.waveform[0] ``` The resulting waveform can be saved as a `.wav` file: ```python import scipy scipy.io.wavfile.write("synthesized_speech.wav", rate=model.config.sampling_rate, data=waveform) ``` Or displayed in a Jupyter Notebook / Google Colab: ```python from IPython.display import Audio Audio(waveform, rate=model.config.sampling_rate) ``` For certain languages with non-Roman alphabets, such as Arabic, Mandarin or Hindi, the [`uroman`](https://github.com/isi-nlp/uroman) perl package is required to pre-process the text inputs to the Roman alphabet. You can check whether you require the `uroman` package for your language by inspecting the `is_uroman` attribute of the pre-trained `tokenizer`: ```python from transformers import VitsTokenizer tokenizer = VitsTokenizer.from_pretrained("facebook/mms-tts-eng") print(tokenizer.is_uroman) ``` If required, you should apply the uroman package to your text inputs **prior** to passing them to the `VitsTokenizer`, since currently the tokenizer does not support performing the pre-processing itself. To do this, first clone the uroman repository to your local machine and set the bash variable `UROMAN` to the local path: ```bash git clone https://github.com/isi-nlp/uroman.git cd uroman export UROMAN=$(pwd) ``` You can then pre-process the text input using the following code snippet. You can either rely on using the bash variable `UROMAN` to point to the uroman repository, or you can pass the uroman directory as an argument to the `uromaize` function: ```python import torch from transformers import VitsTokenizer, VitsModel, set_seed import os import subprocess tokenizer = VitsTokenizer.from_pretrained("facebook/mms-tts-kor") model = VitsModel.from_pretrained("facebook/mms-tts-kor") def uromanize(input_string, uroman_path): """Convert non-Roman strings to Roman using the `uroman` perl package.""" script_path = os.path.join(uroman_path, "bin", "uroman.pl") command = ["perl", script_path] process = subprocess.Popen(command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) # Execute the perl command stdout, stderr = process.communicate(input=input_string.encode()) if process.returncode != 0: raise ValueError(f"Error {process.returncode}: {stderr.decode()}") # Return the output as a string and skip the new-line character at the end return stdout.decode()[:-1] text = "이봐 무슨 일이야" uromaized_text = uromanize(text, uroman_path=os.environ["UROMAN"]) inputs = tokenizer(text=uromaized_text, return_tensors="pt") set_seed(555) # make deterministic with torch.no_grad(): outputs = model(inputs["input_ids"]) waveform = outputs.waveform[0] ``` **Tips:** * The MMS-TTS checkpoints are trained on lower-cased, un-punctuated text. By default, the `VitsTokenizer` *normalizes* the inputs by removing any casing and punctuation, to avoid passing out-of-vocabulary characters to the model. Hence, the model is agnostic to casing and punctuation, so these should be avoided in the text prompt. You can disable normalisation by setting `normalize=False` in the call to the tokenizer, but this will lead to un-expected behaviour and is discouraged. * The speaking rate can be varied by setting the attribute `model.speaking_rate` to a chosen value. Likewise, the randomness of the noise is controlled by `model.noise_scale`: ```python import torch from transformers import VitsTokenizer, VitsModel, set_seed tokenizer = VitsTokenizer.from_pretrained("facebook/mms-tts-eng") model = VitsModel.from_pretrained("facebook/mms-tts-eng") inputs = tokenizer(text="Hello - my dog is cute", return_tensors="pt") # make deterministic set_seed(555) # make speech faster and more noisy model.speaking_rate = 1.5 model.noise_scale = 0.8 with torch.no_grad(): outputs = model(**inputs) ``` ### Language Identification (LID) Different LID models are available based on the number of languages they can recognize - [126](https://huggingface.co/facebook/mms-lid-126), [256](https://huggingface.co/facebook/mms-lid-256), [512](https://huggingface.co/facebook/mms-lid-512), [1024](https://huggingface.co/facebook/mms-lid-1024), [2048](https://huggingface.co/facebook/mms-lid-2048), [4017](https://huggingface.co/facebook/mms-lid-4017). #### Inference First, we install transformers and some other libraries ```bash pip install torch accelerate datasets[audio] pip install --upgrade transformers ```` Next, we load a couple of audio samples via `datasets`. Make sure that the audio data is sampled to 16000 kHz. ```py from datasets import load_dataset, Audio # English stream_data = load_dataset("mozilla-foundation/common_voice_13_0", "en", split="test", streaming=True) stream_data = stream_data.cast_column("audio", Audio(sampling_rate=16000)) en_sample = next(iter(stream_data))["audio"]["array"] # Arabic stream_data = load_dataset("mozilla-foundation/common_voice_13_0", "ar", split="test", streaming=True) stream_data = stream_data.cast_column("audio", Audio(sampling_rate=16000)) ar_sample = next(iter(stream_data))["audio"]["array"] ``` Next, we load the model and processor ```py from transformers import Wav2Vec2ForSequenceClassification, AutoFeatureExtractor import torch model_id = "facebook/mms-lid-126" processor = AutoFeatureExtractor.from_pretrained(model_id) model = Wav2Vec2ForSequenceClassification.from_pretrained(model_id) ``` Now we process the audio data, pass the processed audio data to the model to classify it into a language, just like we usually do for Wav2Vec2 audio classification models such as [ehcalabres/wav2vec2-lg-xlsr-en-speech-emotion-recognition](https://huggingface.co/harshit345/xlsr-wav2vec-speech-emotion-recognition) ```py # English inputs = processor(en_sample, sampling_rate=16_000, return_tensors="pt") with torch.no_grad(): outputs = model(**inputs).logits lang_id = torch.argmax(outputs, dim=-1)[0].item() detected_lang = model.config.id2label[lang_id] # 'eng' # Arabic inputs = processor(ar_sample, sampling_rate=16_000, return_tensors="pt") with torch.no_grad(): outputs = model(**inputs).logits lang_id = torch.argmax(outputs, dim=-1)[0].item() detected_lang = model.config.id2label[lang_id] # 'ara' ``` To see all the supported languages of a checkpoint, you can print out the language ids as follows: ```py processor.id2label.values() ``` ### Audio Pretrained Models Pretrained models are available for two different sizes - [300M](https://huggingface.co/facebook/mms-300m) , [1Bil](https://huggingface.co/facebook/mms-1b). <Tip> The MMS for ASR architecture is based on the Wav2Vec2 model, refer to [Wav2Vec2's documentation page](wav2vec2) for further details on how to finetune with models for various downstream tasks. MMS-TTS uses the same model architecture as VITS, refer to [VITS's documentation page](vits) for API reference. </Tip>
transformers/docs/source/en/model_doc/mms.md/0
{ "file_path": "transformers/docs/source/en/model_doc/mms.md", "repo_id": "transformers", "token_count": 4924 }
311
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # NLLB-MOE ## Overview The NLLB model was presented in [No Language Left Behind: Scaling Human-Centered Machine Translation](https://arxiv.org/abs/2207.04672) by Marta R. Costa-jussà, James Cross, Onur Çelebi, Maha Elbayad, Kenneth Heafield, Kevin Heffernan, Elahe Kalbassi, Janice Lam, Daniel Licht, Jean Maillard, Anna Sun, Skyler Wang, Guillaume Wenzek, Al Youngblood, Bapi Akula, Loic Barrault, Gabriel Mejia Gonzalez, Prangthip Hansanti, John Hoffman, Semarley Jarrett, Kaushik Ram Sadagopan, Dirk Rowe, Shannon Spruit, Chau Tran, Pierre Andrews, Necip Fazil Ayan, Shruti Bhosale, Sergey Edunov, Angela Fan, Cynthia Gao, Vedanuj Goswami, Francisco Guzmán, Philipp Koehn, Alexandre Mourachko, Christophe Ropers, Safiyyah Saleem, Holger Schwenk, and Jeff Wang. The abstract of the paper is the following: *Driven by the goal of eradicating language barriers on a global scale, machine translation has solidified itself as a key focus of artificial intelligence research today. However, such efforts have coalesced around a small subset of languages, leaving behind the vast majority of mostly low-resource languages. What does it take to break the 200 language barrier while ensuring safe, high quality results, all while keeping ethical considerations in mind? In No Language Left Behind, we took on this challenge by first contextualizing the need for low-resource language translation support through exploratory interviews with native speakers. Then, we created datasets and models aimed at narrowing the performance gap between low and high-resource languages. More specifically, we developed a conditional compute model based on Sparsely Gated Mixture of Experts that is trained on data obtained with novel and effective data mining techniques tailored for low-resource languages. We propose multiple architectural and training improvements to counteract overfitting while training on thousands of tasks. Critically, we evaluated the performance of over 40,000 different translation directions using a human-translated benchmark, Flores-200, and combined human evaluation with a novel toxicity benchmark covering all languages in Flores-200 to assess translation safety. Our model achieves an improvement of 44% BLEU relative to the previous state-of-the-art, laying important groundwork towards realizing a universal translation system.* This model was contributed by [Arthur Zucker](https://huggingface.co/ArthurZ). The original code can be found [here](https://github.com/facebookresearch/fairseq). ## Usage tips - M2M100ForConditionalGeneration is the base model for both NLLB and NLLB MoE - The NLLB-MoE is very similar to the NLLB model, but it's feed forward layer is based on the implementation of SwitchTransformers. - The tokenizer is the same as the NLLB models. ## Implementation differences with SwitchTransformers The biggest difference is the way the tokens are routed. NLLB-MoE uses a `top-2-gate` which means that for each input, only the top two experts are selected based on the highest predicted probabilities from the gating network, and the remaining experts are ignored. In `SwitchTransformers`, only the top-1 probabilities are computed, which means that tokens have less probability of being forwarded. Moreover, if a token is not routed to any expert, `SwitchTransformers` still adds its unmodified hidden states (kind of like a residual connection) while they are masked in `NLLB`'s top-2 routing mechanism. ## Generating with NLLB-MoE The available checkpoints require around 350GB of storage. Make sure to use `accelerate` if you do not have enough RAM on your machine. While generating the target text set the `forced_bos_token_id` to the target language id. The following example shows how to translate English to French using the *facebook/nllb-200-distilled-600M* model. Note that we're using the BCP-47 code for French `fra_Latn`. See [here](https://github.com/facebookresearch/flores/blob/main/flores200/README.md#languages-in-flores-200) for the list of all BCP-47 in the Flores 200 dataset. ```python >>> from transformers import AutoModelForSeq2SeqLM, AutoTokenizer >>> tokenizer = AutoTokenizer.from_pretrained("facebook/nllb-moe-54b") >>> model = AutoModelForSeq2SeqLM.from_pretrained("facebook/nllb-moe-54b") >>> article = "Previously, Ring's CEO, Jamie Siminoff, remarked the company started when his doorbell wasn't audible from his shop in his garage." >>> inputs = tokenizer(article, return_tensors="pt") >>> translated_tokens = model.generate( ... **inputs, forced_bos_token_id=tokenizer.lang_code_to_id["fra_Latn"], max_length=50 ... ) >>> tokenizer.batch_decode(translated_tokens, skip_special_tokens=True)[0] "Auparavant, le PDG de Ring, Jamie Siminoff, a fait remarquer que la société avait commencé lorsque sa sonnette n'était pas audible depuis son magasin dans son garage." ``` ### Generating from any other language than English English (`eng_Latn`) is set as the default language from which to translate. In order to specify that you'd like to translate from a different language, you should specify the BCP-47 code in the `src_lang` keyword argument of the tokenizer initialization. See example below for a translation from romanian to german: ```python >>> from transformers import AutoModelForSeq2SeqLM, AutoTokenizer >>> tokenizer = AutoTokenizer.from_pretrained("facebook/nllb-moe-54b", src_lang="ron_Latn") >>> model = AutoModelForSeq2SeqLM.from_pretrained("facebook/nllb-moe-54b") >>> article = "Şeful ONU spune că nu există o soluţie militară în Siria" >>> inputs = tokenizer(article, return_tensors="pt") >>> translated_tokens = model.generate( ... **inputs, forced_bos_token_id=tokenizer.lang_code_to_id["deu_Latn"], max_length=30 ... ) >>> tokenizer.batch_decode(translated_tokens, skip_special_tokens=True)[0] ``` ## Resources - [Translation task guide](../tasks/translation) - [Summarization task guide](../tasks/summarization) ## NllbMoeConfig [[autodoc]] NllbMoeConfig ## NllbMoeTop2Router [[autodoc]] NllbMoeTop2Router - route_tokens - forward ## NllbMoeSparseMLP [[autodoc]] NllbMoeSparseMLP - forward ## NllbMoeModel [[autodoc]] NllbMoeModel - forward ## NllbMoeForConditionalGeneration [[autodoc]] NllbMoeForConditionalGeneration - forward
transformers/docs/source/en/model_doc/nllb-moe.md/0
{ "file_path": "transformers/docs/source/en/model_doc/nllb-moe.md", "repo_id": "transformers", "token_count": 2003 }
312
<!--Copyright 2021 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Perceiver ## Overview The Perceiver IO model was proposed in [Perceiver IO: A General Architecture for Structured Inputs & Outputs](https://arxiv.org/abs/2107.14795) by Andrew Jaegle, Sebastian Borgeaud, Jean-Baptiste Alayrac, Carl Doersch, Catalin Ionescu, David Ding, Skanda Koppula, Daniel Zoran, Andrew Brock, Evan Shelhamer, Olivier Hénaff, Matthew M. Botvinick, Andrew Zisserman, Oriol Vinyals, João Carreira. Perceiver IO is a generalization of [Perceiver](https://arxiv.org/abs/2103.03206) to handle arbitrary outputs in addition to arbitrary inputs. The original Perceiver only produced a single classification label. In addition to classification labels, Perceiver IO can produce (for example) language, optical flow, and multimodal videos with audio. This is done using the same building blocks as the original Perceiver. The computational complexity of Perceiver IO is linear in the input and output size and the bulk of the processing occurs in the latent space, allowing us to process inputs and outputs that are much larger than can be handled by standard Transformers. This means, for example, Perceiver IO can do BERT-style masked language modeling directly using bytes instead of tokenized inputs. The abstract from the paper is the following: *The recently-proposed Perceiver model obtains good results on several domains (images, audio, multimodal, point clouds) while scaling linearly in compute and memory with the input size. While the Perceiver supports many kinds of inputs, it can only produce very simple outputs such as class scores. Perceiver IO overcomes this limitation without sacrificing the original's appealing properties by learning to flexibly query the model's latent space to produce outputs of arbitrary size and semantics. Perceiver IO still decouples model depth from data size and still scales linearly with data size, but now with respect to both input and output sizes. The full Perceiver IO model achieves strong results on tasks with highly structured output spaces, such as natural language and visual understanding, StarCraft II, and multi-task and multi-modal domains. As highlights, Perceiver IO matches a Transformer-based BERT baseline on the GLUE language benchmark without the need for input tokenization and achieves state-of-the-art performance on Sintel optical flow estimation.* Here's a TLDR explaining how Perceiver works: The main problem with the self-attention mechanism of the Transformer is that the time and memory requirements scale quadratically with the sequence length. Hence, models like BERT and RoBERTa are limited to a max sequence length of 512 tokens. Perceiver aims to solve this issue by, instead of performing self-attention on the inputs, perform it on a set of latent variables, and only use the inputs for cross-attention. In this way, the time and memory requirements don't depend on the length of the inputs anymore, as one uses a fixed amount of latent variables, like 256 or 512. These are randomly initialized, after which they are trained end-to-end using backpropagation. Internally, [`PerceiverModel`] will create the latents, which is a tensor of shape `(batch_size, num_latents, d_latents)`. One must provide `inputs` (which could be text, images, audio, you name it!) to the model, which it will use to perform cross-attention with the latents. The output of the Perceiver encoder is a tensor of the same shape. One can then, similar to BERT, convert the last hidden states of the latents to classification logits by averaging along the sequence dimension, and placing a linear layer on top of that to project the `d_latents` to `num_labels`. This was the idea of the original Perceiver paper. However, it could only output classification logits. In a follow-up work, PerceiverIO, they generalized it to let the model also produce outputs of arbitrary size. How, you might ask? The idea is actually relatively simple: one defines outputs of an arbitrary size, and then applies cross-attention with the last hidden states of the latents, using the outputs as queries, and the latents as keys and values. So let's say one wants to perform masked language modeling (BERT-style) with the Perceiver. As the Perceiver's input length will not have an impact on the computation time of the self-attention layers, one can provide raw bytes, providing `inputs` of length 2048 to the model. If one now masks out certain of these 2048 tokens, one can define the `outputs` as being of shape: `(batch_size, 2048, 768)`. Next, one performs cross-attention with the final hidden states of the latents to update the `outputs` tensor. After cross-attention, one still has a tensor of shape `(batch_size, 2048, 768)`. One can then place a regular language modeling head on top, to project the last dimension to the vocabulary size of the model, i.e. creating logits of shape `(batch_size, 2048, 262)` (as Perceiver uses a vocabulary size of 262 byte IDs). <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/perceiver_architecture.jpg" alt="drawing" width="600"/> <small> Perceiver IO architecture. Taken from the <a href="https://arxiv.org/abs/2105.15203">original paper</a> </small> This model was contributed by [nielsr](https://huggingface.co/nielsr). The original code can be found [here](https://github.com/deepmind/deepmind-research/tree/master/perceiver). <Tip warning={true}> Perceiver does **not** work with `torch.nn.DataParallel` due to a bug in PyTorch, see [issue #36035](https://github.com/pytorch/pytorch/issues/36035) </Tip> ## Resources - The quickest way to get started with the Perceiver is by checking the [tutorial notebooks](https://github.com/NielsRogge/Transformers-Tutorials/tree/master/Perceiver). - Refer to the [blog post](https://huggingface.co/blog/perceiver) if you want to fully understand how the model works and is implemented in the library. Note that the models available in the library only showcase some examples of what you can do with the Perceiver. There are many more use cases, including question answering, named-entity recognition, object detection, audio classification, video classification, etc. - [Text classification task guide](../tasks/sequence_classification) - [Masked language modeling task guide](../tasks/masked_language_modeling) - [Image classification task guide](../tasks/image_classification) ## Perceiver specific outputs [[autodoc]] models.perceiver.modeling_perceiver.PerceiverModelOutput [[autodoc]] models.perceiver.modeling_perceiver.PerceiverDecoderOutput [[autodoc]] models.perceiver.modeling_perceiver.PerceiverMaskedLMOutput [[autodoc]] models.perceiver.modeling_perceiver.PerceiverClassifierOutput ## PerceiverConfig [[autodoc]] PerceiverConfig ## PerceiverTokenizer [[autodoc]] PerceiverTokenizer - __call__ ## PerceiverFeatureExtractor [[autodoc]] PerceiverFeatureExtractor - __call__ ## PerceiverImageProcessor [[autodoc]] PerceiverImageProcessor - preprocess ## PerceiverTextPreprocessor [[autodoc]] models.perceiver.modeling_perceiver.PerceiverTextPreprocessor ## PerceiverImagePreprocessor [[autodoc]] models.perceiver.modeling_perceiver.PerceiverImagePreprocessor ## PerceiverOneHotPreprocessor [[autodoc]] models.perceiver.modeling_perceiver.PerceiverOneHotPreprocessor ## PerceiverAudioPreprocessor [[autodoc]] models.perceiver.modeling_perceiver.PerceiverAudioPreprocessor ## PerceiverMultimodalPreprocessor [[autodoc]] models.perceiver.modeling_perceiver.PerceiverMultimodalPreprocessor ## PerceiverProjectionDecoder [[autodoc]] models.perceiver.modeling_perceiver.PerceiverProjectionDecoder ## PerceiverBasicDecoder [[autodoc]] models.perceiver.modeling_perceiver.PerceiverBasicDecoder ## PerceiverClassificationDecoder [[autodoc]] models.perceiver.modeling_perceiver.PerceiverClassificationDecoder ## PerceiverOpticalFlowDecoder [[autodoc]] models.perceiver.modeling_perceiver.PerceiverOpticalFlowDecoder ## PerceiverBasicVideoAutoencodingDecoder [[autodoc]] models.perceiver.modeling_perceiver.PerceiverBasicVideoAutoencodingDecoder ## PerceiverMultimodalDecoder [[autodoc]] models.perceiver.modeling_perceiver.PerceiverMultimodalDecoder ## PerceiverProjectionPostprocessor [[autodoc]] models.perceiver.modeling_perceiver.PerceiverProjectionPostprocessor ## PerceiverAudioPostprocessor [[autodoc]] models.perceiver.modeling_perceiver.PerceiverAudioPostprocessor ## PerceiverClassificationPostprocessor [[autodoc]] models.perceiver.modeling_perceiver.PerceiverClassificationPostprocessor ## PerceiverMultimodalPostprocessor [[autodoc]] models.perceiver.modeling_perceiver.PerceiverMultimodalPostprocessor ## PerceiverModel [[autodoc]] PerceiverModel - forward ## PerceiverForMaskedLM [[autodoc]] PerceiverForMaskedLM - forward ## PerceiverForSequenceClassification [[autodoc]] PerceiverForSequenceClassification - forward ## PerceiverForImageClassificationLearned [[autodoc]] PerceiverForImageClassificationLearned - forward ## PerceiverForImageClassificationFourier [[autodoc]] PerceiverForImageClassificationFourier - forward ## PerceiverForImageClassificationConvProcessing [[autodoc]] PerceiverForImageClassificationConvProcessing - forward ## PerceiverForOpticalFlow [[autodoc]] PerceiverForOpticalFlow - forward ## PerceiverForMultimodalAutoencoding [[autodoc]] PerceiverForMultimodalAutoencoding - forward
transformers/docs/source/en/model_doc/perceiver.md/0
{ "file_path": "transformers/docs/source/en/model_doc/perceiver.md", "repo_id": "transformers", "token_count": 2762 }
313
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Qwen2_VL ## Overview The [Qwen2_VL](https://qwenlm.github.io/blog/qwen2-vl/) is a major update to our [Qwen-VL](https://arxiv.org/pdf/2308.12966) model from the Qwen team. The abstract from the blog is the following: *This blog introduces Qwen2-VL, an advanced version of the Qwen-VL model that has undergone significant enhancements over the past year. Key improvements include enhanced image comprehension, advanced video understanding, integrated visual agent functionality, and expanded multilingual support. The model architecture has been optimized for handling arbitrary image resolutions through Naive Dynamic Resolution support and utilizes Multimodal Rotary Position Embedding (M-ROPE) to effectively process both 1D textual and multi-dimensional visual data. This updated model demonstrates competitive performance against leading AI systems like GPT-4o and Claude 3.5 Sonnet in vision-related tasks and ranks highly among open-source models in text capabilities. These advancements make Qwen2-VL a versatile tool for various applications requiring robust multimodal processing and reasoning abilities.* ## Usage example ### Single Media inference The model can accept both images and videos as input. Here's an example code for inference. ```python from PIL import Image import requests import torch from torchvision import io from typing import Dict from transformers import Qwen2VLForConditionalGeneration, AutoTokenizer, AutoProcessor # Load the model in half-precision on the available device(s) model = Qwen2VLForConditionalGeneration.from_pretrained("Qwen/Qwen2-VL-7B-Instruct", device_map="auto") processor = AutoProcessor.from_pretrained("Qwen/Qwen2-VL-7B-Instruct") # Image url = "https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-VL/assets/demo.jpeg" image = Image.open(requests.get(url, stream=True).raw) conversation = [ { "role":"user", "content":[ { "type":"image", }, { "type":"text", "text":"Describe this image." } ] } ] # Preprocess the inputs text_prompt = processor.apply_chat_template(conversation, add_generation_prompt=True) # Excepted output: '<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n<|im_start|>user\n<|vision_start|><|image_pad|><|vision_end|>Describe this image.<|im_end|>\n<|im_start|>assistant\n' inputs = processor(text=[text_prompt], images=[image], padding=True, return_tensors="pt") inputs = inputs.to('cuda') # Inference: Generation of the output output_ids = model.generate(**inputs, max_new_tokens=128) generated_ids = [output_ids[len(input_ids):] for input_ids, output_ids in zip(inputs.input_ids, output_ids)] output_text = processor.batch_decode(generated_ids, skip_special_tokens=True, clean_up_tokenization_spaces=True) print(output_text) # Video def fetch_video(ele: Dict, nframe_factor=2): if isinstance(ele['video'], str): def round_by_factor(number: int, factor: int) -> int: return round(number / factor) * factor video = ele["video"] if video.startswith("file://"): video = video[7:] video, _, info = io.read_video( video, start_pts=ele.get("video_start", 0.0), end_pts=ele.get("video_end", None), pts_unit="sec", output_format="TCHW", ) assert not ("fps" in ele and "nframes" in ele), "Only accept either `fps` or `nframes`" if "nframes" in ele: nframes = round_by_factor(ele["nframes"], nframe_factor) else: fps = ele.get("fps", 1.0) nframes = round_by_factor(video.size(0) / info["video_fps"] * fps, nframe_factor) idx = torch.linspace(0, video.size(0) - 1, nframes, dtype=torch.int64) return video[idx] video_info = {"type": "video", "video": "/path/to/video.mp4", "fps": 1.0} video = fetch_video(video_info) conversation = [ { "role": "user", "content": [ {"type": "video"}, {"type": "text", "text": "What happened in the video?"}, ], } ] # Preprocess the inputs text_prompt = processor.apply_chat_template(conversation, add_generation_prompt=True) # Excepted output: '<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n<|im_start|>user\n<|vision_start|><|video_pad|><|vision_end|>What happened in the video?<|im_end|>\n<|im_start|>assistant\n' inputs = processor(text=[text_prompt], videos=[video], padding=True, return_tensors="pt") inputs = inputs.to('cuda') # Inference: Generation of the output output_ids = model.generate(**inputs, max_new_tokens=128) generated_ids = [output_ids[len(input_ids):] for input_ids, output_ids in zip(inputs.input_ids, output_ids)] output_text = processor.batch_decode(generated_ids, skip_special_tokens=True, clean_up_tokenization_spaces=True) print(output_text) ``` ### Batch Mixed Media Inference The model can batch inputs composed of mixed samples of various types such as images, videos, and text. Here is an example. ```python image1 = Image.open("/path/to/image1.jpg") image2 = Image.open("/path/to/image2.jpg") image3 = Image.open("/path/to/image3.jpg") image4 = Image.open("/path/to/image4.jpg") image5 = Image.open("/path/to/image5.jpg") video = fetch_video({ "type": "video", "video": "/path/to/video.mp4", "fps": 1.0 }) # Conversation for the first image conversation1 = [ { "role": "user", "content": [ {"type": "image"}, {"type": "text", "text": "Describe this image."} ] } ] # Conversation with two images conversation2 = [ { "role": "user", "content": [ {"type": "image"}, {"type": "image"}, {"type": "text", "text": "What is written in the pictures?"} ] } ] # Conversation with pure text conversation3 = [ { "role": "user", "content": "who are you?" } ] # Conversation with mixed midia conversation4 = [ { "role": "user", "content": [ {"type": "image"}, {"type": "image"}, {"type": "video"}, {"type": "text", "text": "What are the common elements in these medias?"}, ], } ] conversations = [conversation1, conversation2, conversation3, conversation4] # Preparation for batch inference texts = [processor.apply_chat_template(msg, add_generation_prompt=True) for msg in conversations] inputs = processor( text=texts, images=[image1, image2, image3, image4, image5], videos=[video], padding=True, return_tensors="pt", ) inputs = inputs.to('cuda') # Batch Inference output_ids = model.generate(**inputs, max_new_tokens=128) generated_ids = [output_ids[len(input_ids):] for input_ids, output_ids in zip(inputs.input_ids, output_ids)] output_text = processor.batch_decode(generated_ids, skip_special_tokens=True, clean_up_tokenization_spaces=True) print(output_text) ``` ### Usage Tips #### Image Resolution for performance boost The model supports a wide range of resolution inputs. By default, it uses the native resolution for input, but higher resolutions can enhance performance at the cost of more computation. Users can set the minimum and maximum number of pixels to achieve an optimal configuration for their needs. ```python min_pixels = 224*224 max_pixels = 2048*2048 processor = AutoProcessor.from_pretrained("Qwen/Qwen2-VL-7B-Instruct", min_pixels=min_pixels, max_pixels=max_pixels) ``` #### Multiple Image Inputs By default, images and video content are directly included in the conversation. When handling multiple images, it's helpful to add labels to the images and videos for better reference. Users can control this behavior with the following settings: ```python conversation = [ { "role": "user", "content": [ {"type": "image"}, {"type": "text", "text": "Hello, how are you?"} ] }, { "role": "assistant", "content": "I'm doing well, thank you for asking. How can I assist you today?" }, { "role": "user", "content": [ {"type": "text", "text": "Can you describe these images and video?"}, {"type": "image"}, {"type": "image"}, {"type": "video"}, {"type": "text", "text": "These are from my vacation."} ] }, { "role": "assistant", "content": "I'd be happy to describe the images and video for you. Could you please provide more context about your vacation?" }, { "role": "user", "content": "It was a trip to the mountains. Can you see the details in the images and video?" } ] # default: prompt_without_id = processor.apply_chat_template(conversation, add_generation_prompt=True) # Excepted output: '<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n<|im_start|>user\n<|vision_start|><|image_pad|><|vision_end|>Hello, how are you?<|im_end|>\n<|im_start|>assistant\nI'm doing well, thank you for asking. How can I assist you today?<|im_end|>\n<|im_start|>user\nCan you describe these images and video?<|vision_start|><|image_pad|><|vision_end|><|vision_start|><|image_pad|><|vision_end|><|vision_start|><|video_pad|><|vision_end|>These are from my vacation.<|im_end|>\n<|im_start|>assistant\nI'd be happy to describe the images and video for you. Could you please provide more context about your vacation?<|im_end|>\n<|im_start|>user\nIt was a trip to the mountains. Can you see the details in the images and video?<|im_end|>\n<|im_start|>assistant\n' # add ids prompt_with_id = processor.apply_chat_template(conversation, add_generation_prompt=True, add_vision_id=True) # Excepted output: '<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n<|im_start|>user\nPicture 1: <|vision_start|><|image_pad|><|vision_end|>Hello, how are you?<|im_end|>\n<|im_start|>assistant\nI'm doing well, thank you for asking. How can I assist you today?<|im_end|>\n<|im_start|>user\nCan you describe these images and video?Picture 2: <|vision_start|><|image_pad|><|vision_end|>Picture 3: <|vision_start|><|image_pad|><|vision_end|>Video 1: <|vision_start|><|video_pad|><|vision_end|>These are from my vacation.<|im_end|>\n<|im_start|>assistant\nI'd be happy to describe the images and video for you. Could you please provide more context about your vacation?<|im_end|>\n<|im_start|>user\nIt was a trip to the mountains. Can you see the details in the images and video?<|im_end|>\n<|im_start|>assistant\n' ``` #### Flash-Attention 2 to speed up generation First, make sure to install the latest version of Flash Attention 2: ```bash pip install -U flash-attn --no-build-isolation ``` Also, you should have a hardware that is compatible with Flash-Attention 2. Read more about it in the official documentation of the [flash attention repository](https://github.com/Dao-AILab/flash-attention). FlashAttention-2 can only be used when a model is loaded in `torch.float16` or `torch.bfloat16`. To load and run a model using Flash Attention-2, simply add `attn_implementation="flash_attention_2"` when loading the model as follows: ```python from transformers import Qwen2VLForConditionalGeneration model = Qwen2VLForConditionalGeneration.from_pretrained( "Qwen/Qwen2-VL-7B-Instruct", torch_dtype=torch.bfloat16, attn_implementation="flash_attention_2", ) ``` ## Qwen2VLConfig [[autodoc]] Qwen2VLConfig ## Qwen2VLImageProcessor [[autodoc]] Qwen2VLImageProcessor - preprocess ## Qwen2VLProcessor [[autodoc]] Qwen2VLProcessor ## Qwen2VLModel [[autodoc]] Qwen2VLModel - forward ## Qwen2VLForConditionalGeneration [[autodoc]] Qwen2VLForConditionalGeneration - forward
transformers/docs/source/en/model_doc/qwen2_vl.md/0
{ "file_path": "transformers/docs/source/en/model_doc/qwen2_vl.md", "repo_id": "transformers", "token_count": 4619 }
314
<!--Copyright 2021 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Vision Transformer (ViT) ## Overview The Vision Transformer (ViT) model was proposed in [An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale](https://arxiv.org/abs/2010.11929) by Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, Neil Houlsby. It's the first paper that successfully trains a Transformer encoder on ImageNet, attaining very good results compared to familiar convolutional architectures. The abstract from the paper is the following: *While the Transformer architecture has become the de-facto standard for natural language processing tasks, its applications to computer vision remain limited. In vision, attention is either applied in conjunction with convolutional networks, or used to replace certain components of convolutional networks while keeping their overall structure in place. We show that this reliance on CNNs is not necessary and a pure transformer applied directly to sequences of image patches can perform very well on image classification tasks. When pre-trained on large amounts of data and transferred to multiple mid-sized or small image recognition benchmarks (ImageNet, CIFAR-100, VTAB, etc.), Vision Transformer (ViT) attains excellent results compared to state-of-the-art convolutional networks while requiring substantially fewer computational resources to train.* <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/vit_architecture.jpg" alt="drawing" width="600"/> <small> ViT architecture. Taken from the <a href="https://arxiv.org/abs/2010.11929">original paper.</a> </small> Following the original Vision Transformer, some follow-up works have been made: - [DeiT](deit) (Data-efficient Image Transformers) by Facebook AI. DeiT models are distilled vision transformers. The authors of DeiT also released more efficiently trained ViT models, which you can directly plug into [`ViTModel`] or [`ViTForImageClassification`]. There are 4 variants available (in 3 different sizes): *facebook/deit-tiny-patch16-224*, *facebook/deit-small-patch16-224*, *facebook/deit-base-patch16-224* and *facebook/deit-base-patch16-384*. Note that one should use [`DeiTImageProcessor`] in order to prepare images for the model. - [BEiT](beit) (BERT pre-training of Image Transformers) by Microsoft Research. BEiT models outperform supervised pre-trained vision transformers using a self-supervised method inspired by BERT (masked image modeling) and based on a VQ-VAE. - DINO (a method for self-supervised training of Vision Transformers) by Facebook AI. Vision Transformers trained using the DINO method show very interesting properties not seen with convolutional models. They are capable of segmenting objects, without having ever been trained to do so. DINO checkpoints can be found on the [hub](https://huggingface.co/models?other=dino). - [MAE](vit_mae) (Masked Autoencoders) by Facebook AI. By pre-training Vision Transformers to reconstruct pixel values for a high portion (75%) of masked patches (using an asymmetric encoder-decoder architecture), the authors show that this simple method outperforms supervised pre-training after fine-tuning. This model was contributed by [nielsr](https://huggingface.co/nielsr). The original code (written in JAX) can be found [here](https://github.com/google-research/vision_transformer). Note that we converted the weights from Ross Wightman's [timm library](https://github.com/rwightman/pytorch-image-models), who already converted the weights from JAX to PyTorch. Credits go to him! ## Usage tips - To feed images to the Transformer encoder, each image is split into a sequence of fixed-size non-overlapping patches, which are then linearly embedded. A [CLS] token is added to serve as representation of an entire image, which can be used for classification. The authors also add absolute position embeddings, and feed the resulting sequence of vectors to a standard Transformer encoder. - As the Vision Transformer expects each image to be of the same size (resolution), one can use [`ViTImageProcessor`] to resize (or rescale) and normalize images for the model. - Both the patch resolution and image resolution used during pre-training or fine-tuning are reflected in the name of each checkpoint. For example, `google/vit-base-patch16-224` refers to a base-sized architecture with patch resolution of 16x16 and fine-tuning resolution of 224x224. All checkpoints can be found on the [hub](https://huggingface.co/models?search=vit). - The available checkpoints are either (1) pre-trained on [ImageNet-21k](http://www.image-net.org/) (a collection of 14 million images and 21k classes) only, or (2) also fine-tuned on [ImageNet](http://www.image-net.org/challenges/LSVRC/2012/) (also referred to as ILSVRC 2012, a collection of 1.3 million images and 1,000 classes). - The Vision Transformer was pre-trained using a resolution of 224x224. During fine-tuning, it is often beneficial to use a higher resolution than pre-training [(Touvron et al., 2019)](https://arxiv.org/abs/1906.06423), [(Kolesnikov et al., 2020)](https://arxiv.org/abs/1912.11370). In order to fine-tune at higher resolution, the authors perform 2D interpolation of the pre-trained position embeddings, according to their location in the original image. - The best results are obtained with supervised pre-training, which is not the case in NLP. The authors also performed an experiment with a self-supervised pre-training objective, namely masked patched prediction (inspired by masked language modeling). With this approach, the smaller ViT-B/16 model achieves 79.9% accuracy on ImageNet, a significant improvement of 2% to training from scratch, but still 4% behind supervised pre-training. ### Using Scaled Dot Product Attention (SDPA) PyTorch includes a native scaled dot-product attention (SDPA) operator as part of `torch.nn.functional`. This function encompasses several implementations that can be applied depending on the inputs and the hardware in use. See the [official documentation](https://pytorch.org/docs/stable/generated/torch.nn.functional.scaled_dot_product_attention.html) or the [GPU Inference](https://huggingface.co/docs/transformers/main/en/perf_infer_gpu_one#pytorch-scaled-dot-product-attention) page for more information. SDPA is used by default for `torch>=2.1.1` when an implementation is available, but you may also set `attn_implementation="sdpa"` in `from_pretrained()` to explicitly request SDPA to be used. ``` from transformers import ViTForImageClassification model = ViTForImageClassification.from_pretrained("google/vit-base-patch16-224", attn_implementation="sdpa", torch_dtype=torch.float16) ... ``` For the best speedups, we recommend loading the model in half-precision (e.g. `torch.float16` or `torch.bfloat16`). On a local benchmark (A100-40GB, PyTorch 2.3.0, OS Ubuntu 22.04) with `float32` and `google/vit-base-patch16-224` model, we saw the following speedups during inference. | Batch size | Average inference time (ms), eager mode | Average inference time (ms), sdpa model | Speed up, Sdpa / Eager (x) | |--------------|-------------------------------------------|-------------------------------------------|------------------------------| | 1 | 7 | 6 | 1.17 | | 2 | 8 | 6 | 1.33 | | 4 | 8 | 6 | 1.33 | | 8 | 8 | 6 | 1.33 | ## Resources Demo notebooks regarding inference as well as fine-tuning ViT on custom data can be found [here](https://github.com/NielsRogge/Transformers-Tutorials/tree/master/VisionTransformer). A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with ViT. If you're interested in submitting a resource to be included here, please feel free to open a Pull Request and we'll review it! The resource should ideally demonstrate something new instead of duplicating an existing resource. `ViTForImageClassification` is supported by: <PipelineTag pipeline="image-classification"/> - A blog post on how to [Fine-Tune ViT for Image Classification with Hugging Face Transformers](https://huggingface.co/blog/fine-tune-vit) - A blog post on [Image Classification with Hugging Face Transformers and `Keras`](https://www.philschmid.de/image-classification-huggingface-transformers-keras) - A notebook on [Fine-tuning for Image Classification with Hugging Face Transformers](https://github.com/huggingface/notebooks/blob/main/examples/image_classification.ipynb) - A notebook on how to [Fine-tune the Vision Transformer on CIFAR-10 with the Hugging Face Trainer](https://github.com/NielsRogge/Transformers-Tutorials/blob/master/VisionTransformer/Fine_tuning_the_Vision_Transformer_on_CIFAR_10_with_the_%F0%9F%A4%97_Trainer.ipynb) - A notebook on how to [Fine-tune the Vision Transformer on CIFAR-10 with PyTorch Lightning](https://github.com/NielsRogge/Transformers-Tutorials/blob/master/VisionTransformer/Fine_tuning_the_Vision_Transformer_on_CIFAR_10_with_PyTorch_Lightning.ipynb) ⚗️ Optimization - A blog post on how to [Accelerate Vision Transformer (ViT) with Quantization using Optimum](https://www.philschmid.de/optimizing-vision-transformer) ⚡️ Inference - A notebook on [Quick demo: Vision Transformer (ViT) by Google Brain](https://github.com/NielsRogge/Transformers-Tutorials/blob/master/VisionTransformer/Quick_demo_of_HuggingFace_version_of_Vision_Transformer_inference.ipynb) 🚀 Deploy - A blog post on [Deploying Tensorflow Vision Models in Hugging Face with TF Serving](https://huggingface.co/blog/tf-serving-vision) - A blog post on [Deploying Hugging Face ViT on Vertex AI](https://huggingface.co/blog/deploy-vertex-ai) - A blog post on [Deploying Hugging Face ViT on Kubernetes with TF Serving](https://huggingface.co/blog/deploy-tfserving-kubernetes) ## ViTConfig [[autodoc]] ViTConfig ## ViTFeatureExtractor [[autodoc]] ViTFeatureExtractor - __call__ ## ViTImageProcessor [[autodoc]] ViTImageProcessor - preprocess ## ViTImageProcessorFast [[autodoc]] ViTImageProcessorFast - preprocess <frameworkcontent> <pt> ## ViTModel [[autodoc]] ViTModel - forward ## ViTForMaskedImageModeling [[autodoc]] ViTForMaskedImageModeling - forward ## ViTForImageClassification [[autodoc]] ViTForImageClassification - forward </pt> <tf> ## TFViTModel [[autodoc]] TFViTModel - call ## TFViTForImageClassification [[autodoc]] TFViTForImageClassification - call </tf> <jax> ## FlaxVitModel [[autodoc]] FlaxViTModel - __call__ ## FlaxViTForImageClassification [[autodoc]] FlaxViTForImageClassification - __call__ </jax> </frameworkcontent>
transformers/docs/source/en/model_doc/vit.md/0
{ "file_path": "transformers/docs/source/en/model_doc/vit.md", "repo_id": "transformers", "token_count": 3788 }
315
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # XLM-ProphetNet <Tip warning={true}> This model is in maintenance mode only, we don't accept any new PRs changing its code. If you run into any issues running this model, please reinstall the last version that supported this model: v4.40.2. You can do so by running the following command: `pip install -U transformers==4.40.2`. </Tip> <div class="flex flex-wrap space-x-1"> <a href="https://huggingface.co/models?filter=xprophetnet"> <img alt="Models" src="https://img.shields.io/badge/All_model_pages-xprophetnet-blueviolet"> </a> <a href="https://huggingface.co/spaces/docs-demos/xprophetnet-large-wiki100-cased-xglue-ntg"> <img alt="Spaces" src="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue"> </a> </div> **DISCLAIMER:** If you see something strange, file a [Github Issue](https://github.com/huggingface/transformers/issues/new?assignees=&labels=&template=bug-report.md&title) and assign @patrickvonplaten ## Overview The XLM-ProphetNet model was proposed in [ProphetNet: Predicting Future N-gram for Sequence-to-Sequence Pre-training,](https://arxiv.org/abs/2001.04063) by Yu Yan, Weizhen Qi, Yeyun Gong, Dayiheng Liu, Nan Duan, Jiusheng Chen, Ruofei Zhang, Ming Zhou on 13 Jan, 2020. XLM-ProphetNet is an encoder-decoder model and can predict n-future tokens for "ngram" language modeling instead of just the next token. Its architecture is identical to ProhpetNet, but the model was trained on the multi-lingual "wiki100" Wikipedia dump. XLM-ProphetNet's model architecture and pretraining objective is same as ProphetNet, but XLM-ProphetNet was pre-trained on the cross-lingual dataset XGLUE. The abstract from the paper is the following: *In this paper, we present a new sequence-to-sequence pretraining model called ProphetNet, which introduces a novel self-supervised objective named future n-gram prediction and the proposed n-stream self-attention mechanism. Instead of the optimization of one-step ahead prediction in traditional sequence-to-sequence model, the ProphetNet is optimized by n-step ahead prediction which predicts the next n tokens simultaneously based on previous context tokens at each time step. The future n-gram prediction explicitly encourages the model to plan for the future tokens and prevent overfitting on strong local correlations. We pre-train ProphetNet using a base scale dataset (16GB) and a large scale dataset (160GB) respectively. Then we conduct experiments on CNN/DailyMail, Gigaword, and SQuAD 1.1 benchmarks for abstractive summarization and question generation tasks. Experimental results show that ProphetNet achieves new state-of-the-art results on all these datasets compared to the models using the same scale pretraining corpus.* The Authors' code can be found [here](https://github.com/microsoft/ProphetNet). ## Resources - [Causal language modeling task guide](../tasks/language_modeling) - [Translation task guide](../tasks/translation) - [Summarization task guide](../tasks/summarization) ## XLMProphetNetConfig [[autodoc]] XLMProphetNetConfig ## XLMProphetNetTokenizer [[autodoc]] XLMProphetNetTokenizer ## XLMProphetNetModel [[autodoc]] XLMProphetNetModel ## XLMProphetNetEncoder [[autodoc]] XLMProphetNetEncoder ## XLMProphetNetDecoder [[autodoc]] XLMProphetNetDecoder ## XLMProphetNetForConditionalGeneration [[autodoc]] XLMProphetNetForConditionalGeneration ## XLMProphetNetForCausalLM [[autodoc]] XLMProphetNetForCausalLM
transformers/docs/source/en/model_doc/xlm-prophetnet.md/0
{ "file_path": "transformers/docs/source/en/model_doc/xlm-prophetnet.md", "repo_id": "transformers", "token_count": 1218 }
316
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Pipelines for inference The [`pipeline`] makes it simple to use any model from the [Hub](https://huggingface.co/models) for inference on any language, computer vision, speech, and multimodal tasks. Even if you don't have experience with a specific modality or aren't familiar with the underlying code behind the models, you can still use them for inference with the [`pipeline`]! This tutorial will teach you to: * Use a [`pipeline`] for inference. * Use a specific tokenizer or model. * Use a [`pipeline`] for audio, vision, and multimodal tasks. <Tip> Take a look at the [`pipeline`] documentation for a complete list of supported tasks and available parameters. </Tip> ## Pipeline usage While each task has an associated [`pipeline`], it is simpler to use the general [`pipeline`] abstraction which contains all the task-specific pipelines. The [`pipeline`] automatically loads a default model and a preprocessing class capable of inference for your task. Let's take the example of using the [`pipeline`] for automatic speech recognition (ASR), or speech-to-text. 1. Start by creating a [`pipeline`] and specify the inference task: ```py >>> from transformers import pipeline >>> transcriber = pipeline(task="automatic-speech-recognition") ``` 2. Pass your input to the [`pipeline`]. In the case of speech recognition, this is an audio input file: ```py >>> transcriber("https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/mlk.flac") {'text': 'I HAVE A DREAM BUT ONE DAY THIS NATION WILL RISE UP LIVE UP THE TRUE MEANING OF ITS TREES'} ``` Not the result you had in mind? Check out some of the [most downloaded automatic speech recognition models](https://huggingface.co/models?pipeline_tag=automatic-speech-recognition&sort=trending) on the Hub to see if you can get a better transcription. Let's try the [Whisper large-v2](https://huggingface.co/openai/whisper-large-v2) model from OpenAI. Whisper was released 2 years later than Wav2Vec2, and was trained on close to 10x more data. As such, it beats Wav2Vec2 on most downstream benchmarks. It also has the added benefit of predicting punctuation and casing, neither of which are possible with Wav2Vec2. Let's give it a try here to see how it performs: ```py >>> transcriber = pipeline(model="openai/whisper-large-v2") >>> transcriber("https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/mlk.flac") {'text': ' I have a dream that one day this nation will rise up and live out the true meaning of its creed.'} ``` Now this result looks more accurate! For a deep-dive comparison on Wav2Vec2 vs Whisper, refer to the [Audio Transformers Course](https://huggingface.co/learn/audio-course/chapter5/asr_models). We really encourage you to check out the Hub for models in different languages, models specialized in your field, and more. You can check out and compare model results directly from your browser on the Hub to see if it fits or handles corner cases better than other ones. And if you don't find a model for your use case, you can always start [training](training) your own! If you have several inputs, you can pass your input as a list: ```py transcriber( [ "https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/mlk.flac", "https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/1.flac", ] ) ``` Pipelines are great for experimentation as switching from one model to another is trivial; however, there are some ways to optimize them for larger workloads than experimentation. See the following guides that dive into iterating over whole datasets or using pipelines in a webserver: of the docs: * [Using pipelines on a dataset](#using-pipelines-on-a-dataset) * [Using pipelines for a webserver](./pipeline_webserver) ## Parameters [`pipeline`] supports many parameters; some are task specific, and some are general to all pipelines. In general, you can specify parameters anywhere you want: ```py transcriber = pipeline(model="openai/whisper-large-v2", my_parameter=1) out = transcriber(...) # This will use `my_parameter=1`. out = transcriber(..., my_parameter=2) # This will override and use `my_parameter=2`. out = transcriber(...) # This will go back to using `my_parameter=1`. ``` Let's check out 3 important ones: ### Device If you use `device=n`, the pipeline automatically puts the model on the specified device. This will work regardless of whether you are using PyTorch or Tensorflow. ```py transcriber = pipeline(model="openai/whisper-large-v2", device=0) ``` If the model is too large for a single GPU and you are using PyTorch, you can set `torch_dtype='float16'` to enable FP16 precision inference. Usually this would not cause significant performance drops but make sure you evaluate it on your models! Alternatively, you can set `device_map="auto"` to automatically determine how to load and store the model weights. Using the `device_map` argument requires the 🤗 [Accelerate](https://huggingface.co/docs/accelerate) package: ```bash pip install --upgrade accelerate ``` The following code automatically loads and stores model weights across devices: ```py transcriber = pipeline(model="openai/whisper-large-v2", device_map="auto") ``` Note that if `device_map="auto"` is passed, there is no need to add the argument `device=device` when instantiating your `pipeline` as you may encounter some unexpected behavior! ### Batch size By default, pipelines will not batch inference for reasons explained in detail [here](https://huggingface.co/docs/transformers/main_classes/pipelines#pipeline-batching). The reason is that batching is not necessarily faster, and can actually be quite slower in some cases. But if it works in your use case, you can use: ```py transcriber = pipeline(model="openai/whisper-large-v2", device=0, batch_size=2) audio_filenames = [f"https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/{i}.flac" for i in range(1, 5)] texts = transcriber(audio_filenames) ``` This runs the pipeline on the 4 provided audio files, but it will pass them in batches of 2 to the model (which is on a GPU, where batching is more likely to help) without requiring any further code from you. The output should always match what you would have received without batching. It is only meant as a way to help you get more speed out of a pipeline. Pipelines can also alleviate some of the complexities of batching because, for some pipelines, a single item (like a long audio file) needs to be chunked into multiple parts to be processed by a model. The pipeline performs this [*chunk batching*](./main_classes/pipelines#pipeline-chunk-batching) for you. ### Task specific parameters All tasks provide task specific parameters which allow for additional flexibility and options to help you get your job done. For instance, the [`transformers.AutomaticSpeechRecognitionPipeline.__call__`] method has a `return_timestamps` parameter which sounds promising for subtitling videos: ```py >>> transcriber = pipeline(model="openai/whisper-large-v2", return_timestamps=True) >>> transcriber("https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/mlk.flac") {'text': ' I have a dream that one day this nation will rise up and live out the true meaning of its creed.', 'chunks': [{'timestamp': (0.0, 11.88), 'text': ' I have a dream that one day this nation will rise up and live out the true meaning of its'}, {'timestamp': (11.88, 12.38), 'text': ' creed.'}]} ``` As you can see, the model inferred the text and also outputted **when** the various sentences were pronounced. There are many parameters available for each task, so check out each task's API reference to see what you can tinker with! For instance, the [`~transformers.AutomaticSpeechRecognitionPipeline`] has a `chunk_length_s` parameter which is helpful for working on really long audio files (for example, subtitling entire movies or hour-long videos) that a model typically cannot handle on its own: ```python >>> transcriber = pipeline(model="openai/whisper-large-v2", chunk_length_s=30) >>> transcriber("https://huggingface.co/datasets/reach-vb/random-audios/resolve/main/ted_60.wav") {'text': " So in college, I was a government major, which means I had to write a lot of papers. Now, when a normal student writes a paper, they might spread the work out a little like this. So, you know. You get started maybe a little slowly, but you get enough done in the first week that with some heavier days later on, everything gets done and things stay civil. And I would want to do that like that. That would be the plan. I would have it all ready to go, but then actually the paper would come along, and then I would kind of do this. And that would happen every single paper. But then came my 90-page senior thesis, a paper you're supposed to spend a year on. I knew for a paper like that, my normal workflow was not an option, it was way too big a project. So I planned things out and I decided I kind of had to go something like this. This is how the year would go. So I'd start off light and I'd bump it up"} ``` If you can't find a parameter that would really help you out, feel free to [request it](https://github.com/huggingface/transformers/issues/new?assignees=&labels=feature&template=feature-request.yml)! ## Using pipelines on a dataset The pipeline can also run inference on a large dataset. The easiest way we recommend doing this is by using an iterator: ```py def data(): for i in range(1000): yield f"My example {i}" pipe = pipeline(model="openai-community/gpt2", device=0) generated_characters = 0 for out in pipe(data()): generated_characters += len(out[0]["generated_text"]) ``` The iterator `data()` yields each result, and the pipeline automatically recognizes the input is iterable and will start fetching the data while it continues to process it on the GPU (this uses [DataLoader](https://pytorch.org/docs/stable/data.html#torch.utils.data.DataLoader) under the hood). This is important because you don't have to allocate memory for the whole dataset and you can feed the GPU as fast as possible. Since batching could speed things up, it may be useful to try tuning the `batch_size` parameter here. The simplest way to iterate over a dataset is to just load one from 🤗 [Datasets](https://github.com/huggingface/datasets/): ```py # KeyDataset is a util that will just output the item we're interested in. from transformers.pipelines.pt_utils import KeyDataset from datasets import load_dataset pipe = pipeline(model="hf-internal-testing/tiny-random-wav2vec2", device=0) dataset = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation[:10]") for out in pipe(KeyDataset(dataset, "audio")): print(out) ``` ## Using pipelines for a webserver <Tip> Creating an inference engine is a complex topic which deserves it's own page. </Tip> [Link](./pipeline_webserver) ## Vision pipeline Using a [`pipeline`] for vision tasks is practically identical. Specify your task and pass your image to the classifier. The image can be a link, a local path or a base64-encoded image. For example, what species of cat is shown below? ![pipeline-cat-chonk](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg) ```py >>> from transformers import pipeline >>> vision_classifier = pipeline(model="google/vit-base-patch16-224") >>> preds = vision_classifier( ... images="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg" ... ) >>> preds = [{"score": round(pred["score"], 4), "label": pred["label"]} for pred in preds] >>> preds [{'score': 0.4335, 'label': 'lynx, catamount'}, {'score': 0.0348, 'label': 'cougar, puma, catamount, mountain lion, painter, panther, Felis concolor'}, {'score': 0.0324, 'label': 'snow leopard, ounce, Panthera uncia'}, {'score': 0.0239, 'label': 'Egyptian cat'}, {'score': 0.0229, 'label': 'tiger cat'}] ``` ## Text pipeline Using a [`pipeline`] for NLP tasks is practically identical. ```py >>> from transformers import pipeline >>> # This model is a `zero-shot-classification` model. >>> # It will classify text, except you are free to choose any label you might imagine >>> classifier = pipeline(model="facebook/bart-large-mnli") >>> classifier( ... "I have a problem with my iphone that needs to be resolved asap!!", ... candidate_labels=["urgent", "not urgent", "phone", "tablet", "computer"], ... ) {'sequence': 'I have a problem with my iphone that needs to be resolved asap!!', 'labels': ['urgent', 'phone', 'computer', 'not urgent', 'tablet'], 'scores': [0.504, 0.479, 0.013, 0.003, 0.002]} ``` ## Multimodal pipeline The [`pipeline`] supports more than one modality. For example, a visual question answering (VQA) task combines text and image. Feel free to use any image link you like and a question you want to ask about the image. The image can be a URL or a local path to the image. For example, if you use this [invoice image](https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png): ```py >>> from transformers import pipeline >>> vqa = pipeline(model="impira/layoutlm-document-qa") >>> output = vqa( ... image="https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png", ... question="What is the invoice number?", ... ) >>> output[0]["score"] = round(output[0]["score"], 3) >>> output [{'score': 0.425, 'answer': 'us-001', 'start': 16, 'end': 16}] ``` <Tip> To run the example above you need to have [`pytesseract`](https://pypi.org/project/pytesseract/) installed in addition to 🤗 Transformers: ```bash sudo apt install -y tesseract-ocr pip install pytesseract ``` </Tip> ## Using `pipeline` on large models with 🤗 `accelerate`: You can easily run `pipeline` on large models using 🤗 `accelerate`! First make sure you have installed `accelerate` with `pip install accelerate`. First load your model using `device_map="auto"`! We will use `facebook/opt-1.3b` for our example. ```py # pip install accelerate import torch from transformers import pipeline pipe = pipeline(model="facebook/opt-1.3b", torch_dtype=torch.bfloat16, device_map="auto") output = pipe("This is a cool example!", do_sample=True, top_p=0.95) ``` You can also pass 8-bit loaded models if you install `bitsandbytes` and add the argument `load_in_8bit=True` ```py # pip install accelerate bitsandbytes import torch from transformers import pipeline pipe = pipeline(model="facebook/opt-1.3b", device_map="auto", model_kwargs={"load_in_8bit": True}) output = pipe("This is a cool example!", do_sample=True, top_p=0.95) ``` Note that you can replace the checkpoint with any Hugging Face model that supports large model loading, such as BLOOM. ## Creating web demos from pipelines with `gradio` Pipelines are automatically supported in [Gradio](https://github.com/gradio-app/gradio/), a library that makes creating beautiful and user-friendly machine learning apps on the web a breeze. First, make sure you have Gradio installed: ``` pip install gradio ``` Then, you can create a web demo around an image classification pipeline (or any other pipeline) in a single line of code by calling Gradio's [`Interface.from_pipeline`](https://www.gradio.app/docs/interface#interface-from-pipeline) function to launch the pipeline. This creates an intuitive drag-and-drop interface in your browser: ```py from transformers import pipeline import gradio as gr pipe = pipeline("image-classification", model="google/vit-base-patch16-224") gr.Interface.from_pipeline(pipe).launch() ``` ![](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/panda-classification.png) By default, the web demo runs on a local server. If you'd like to share it with others, you can generate a temporary public link by setting `share=True` in `launch()`. You can also host your demo on [Hugging Face Spaces](https://huggingface.co/spaces) for a permanent link.
transformers/docs/source/en/pipeline_tutorial.md/0
{ "file_path": "transformers/docs/source/en/pipeline_tutorial.md", "repo_id": "transformers", "token_count": 5056 }
317
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Quick tour [[open-in-colab]] Get up and running with 🤗 Transformers! Whether you're a developer or an everyday user, this quick tour will help you get started and show you how to use the [`pipeline`] for inference, load a pretrained model and preprocessor with an [AutoClass](./model_doc/auto), and quickly train a model with PyTorch or TensorFlow. If you're a beginner, we recommend checking out our tutorials or [course](https://huggingface.co/course/chapter1/1) next for more in-depth explanations of the concepts introduced here. Before you begin, make sure you have all the necessary libraries installed: ```bash !pip install transformers datasets evaluate accelerate ``` You'll also need to install your preferred machine learning framework: <frameworkcontent> <pt> ```bash pip install torch ``` </pt> <tf> ```bash pip install tensorflow ``` </tf> </frameworkcontent> ## Pipeline <Youtube id="tiZFewofSLM"/> The [`pipeline`] is the easiest and fastest way to use a pretrained model for inference. You can use the [`pipeline`] out-of-the-box for many tasks across different modalities, some of which are shown in the table below: <Tip> For a complete list of available tasks, check out the [pipeline API reference](./main_classes/pipelines). </Tip> | **Task** | **Description** | **Modality** | **Pipeline identifier** | |------------------------------|--------------------------------------------------------------------------------------------------------------|-----------------|-----------------------------------------------| | Text classification | assign a label to a given sequence of text | NLP | pipeline(task=“sentiment-analysis”) | | Text generation | generate text given a prompt | NLP | pipeline(task=“text-generation”) | | Summarization | generate a summary of a sequence of text or document | NLP | pipeline(task=“summarization”) | | Image classification | assign a label to an image | Computer vision | pipeline(task=“image-classification”) | | Image segmentation | assign a label to each individual pixel of an image (supports semantic, panoptic, and instance segmentation) | Computer vision | pipeline(task=“image-segmentation”) | | Object detection | predict the bounding boxes and classes of objects in an image | Computer vision | pipeline(task=“object-detection”) | | Audio classification | assign a label to some audio data | Audio | pipeline(task=“audio-classification”) | | Automatic speech recognition | transcribe speech into text | Audio | pipeline(task=“automatic-speech-recognition”) | | Visual question answering | answer a question about the image, given an image and a question | Multimodal | pipeline(task=“vqa”) | | Document question answering | answer a question about the document, given a document and a question | Multimodal | pipeline(task="document-question-answering") | | Image captioning | generate a caption for a given image | Multimodal | pipeline(task="image-to-text") | Start by creating an instance of [`pipeline`] and specifying a task you want to use it for. In this guide, you'll use the [`pipeline`] for sentiment analysis as an example: ```py >>> from transformers import pipeline >>> classifier = pipeline("sentiment-analysis") ``` The [`pipeline`] downloads and caches a default [pretrained model](https://huggingface.co/distilbert/distilbert-base-uncased-finetuned-sst-2-english) and tokenizer for sentiment analysis. Now you can use the `classifier` on your target text: ```py >>> classifier("We are very happy to show you the 🤗 Transformers library.") [{'label': 'POSITIVE', 'score': 0.9998}] ``` If you have more than one input, pass your inputs as a list to the [`pipeline`] to return a list of dictionaries: ```py >>> results = classifier(["We are very happy to show you the 🤗 Transformers library.", "We hope you don't hate it."]) >>> for result in results: ... print(f"label: {result['label']}, with score: {round(result['score'], 4)}") label: POSITIVE, with score: 0.9998 label: NEGATIVE, with score: 0.5309 ``` The [`pipeline`] can also iterate over an entire dataset for any task you like. For this example, let's choose automatic speech recognition as our task: ```py >>> import torch >>> from transformers import pipeline >>> speech_recognizer = pipeline("automatic-speech-recognition", model="facebook/wav2vec2-base-960h") ``` Load an audio dataset (see the 🤗 Datasets [Quick Start](https://huggingface.co/docs/datasets/quickstart#audio) for more details) you'd like to iterate over. For example, load the [MInDS-14](https://huggingface.co/datasets/PolyAI/minds14) dataset: ```py >>> from datasets import load_dataset, Audio >>> dataset = load_dataset("PolyAI/minds14", name="en-US", split="train") # doctest: +IGNORE_RESULT ``` You need to make sure the sampling rate of the dataset matches the sampling rate [`facebook/wav2vec2-base-960h`](https://huggingface.co/facebook/wav2vec2-base-960h) was trained on: ```py >>> dataset = dataset.cast_column("audio", Audio(sampling_rate=speech_recognizer.feature_extractor.sampling_rate)) ``` The audio files are automatically loaded and resampled when calling the `"audio"` column. Extract the raw waveform arrays from the first 4 samples and pass it as a list to the pipeline: ```py >>> result = speech_recognizer(dataset[:4]["audio"]) >>> print([d["text"] for d in result]) ['I WOULD LIKE TO SET UP A JOINT ACCOUNT WITH MY PARTNER HOW DO I PROCEED WITH DOING THAT', "FONDERING HOW I'D SET UP A JOIN TO HELL T WITH MY WIFE AND WHERE THE AP MIGHT BE", "I I'D LIKE TOY SET UP A JOINT ACCOUNT WITH MY PARTNER I'M NOT SEEING THE OPTION TO DO IT ON THE APSO I CALLED IN TO GET SOME HELP CAN I JUST DO IT OVER THE PHONE WITH YOU AND GIVE YOU THE INFORMATION OR SHOULD I DO IT IN THE AP AN I'M MISSING SOMETHING UQUETTE HAD PREFERRED TO JUST DO IT OVER THE PHONE OF POSSIBLE THINGS", 'HOW DO I FURN A JOINA COUT'] ``` For larger datasets where the inputs are big (like in speech or vision), you'll want to pass a generator instead of a list to load all the inputs in memory. Take a look at the [pipeline API reference](./main_classes/pipelines) for more information. ### Use another model and tokenizer in the pipeline The [`pipeline`] can accommodate any model from the [Hub](https://huggingface.co/models), making it easy to adapt the [`pipeline`] for other use-cases. For example, if you'd like a model capable of handling French text, use the tags on the Hub to filter for an appropriate model. The top filtered result returns a multilingual [BERT model](https://huggingface.co/nlptown/bert-base-multilingual-uncased-sentiment) finetuned for sentiment analysis you can use for French text: ```py >>> model_name = "nlptown/bert-base-multilingual-uncased-sentiment" ``` <frameworkcontent> <pt> Use [`AutoModelForSequenceClassification`] and [`AutoTokenizer`] to load the pretrained model and it's associated tokenizer (more on an `AutoClass` in the next section): ```py >>> from transformers import AutoTokenizer, AutoModelForSequenceClassification >>> model = AutoModelForSequenceClassification.from_pretrained(model_name) >>> tokenizer = AutoTokenizer.from_pretrained(model_name) ``` </pt> <tf> Use [`TFAutoModelForSequenceClassification`] and [`AutoTokenizer`] to load the pretrained model and it's associated tokenizer (more on an `TFAutoClass` in the next section): ```py >>> from transformers import AutoTokenizer, TFAutoModelForSequenceClassification >>> model = TFAutoModelForSequenceClassification.from_pretrained(model_name) >>> tokenizer = AutoTokenizer.from_pretrained(model_name) ``` </tf> </frameworkcontent> Specify the model and tokenizer in the [`pipeline`], and now you can apply the `classifier` on French text: ```py >>> classifier = pipeline("sentiment-analysis", model=model, tokenizer=tokenizer) >>> classifier("Nous sommes très heureux de vous présenter la bibliothèque 🤗 Transformers.") [{'label': '5 stars', 'score': 0.7273}] ``` If you can't find a model for your use-case, you'll need to finetune a pretrained model on your data. Take a look at our [finetuning tutorial](./training) to learn how. Finally, after you've finetuned your pretrained model, please consider [sharing](./model_sharing) the model with the community on the Hub to democratize machine learning for everyone! 🤗 ## AutoClass <Youtube id="AhChOFRegn4"/> Under the hood, the [`AutoModelForSequenceClassification`] and [`AutoTokenizer`] classes work together to power the [`pipeline`] you used above. An [AutoClass](./model_doc/auto) is a shortcut that automatically retrieves the architecture of a pretrained model from its name or path. You only need to select the appropriate `AutoClass` for your task and it's associated preprocessing class. Let's return to the example from the previous section and see how you can use the `AutoClass` to replicate the results of the [`pipeline`]. ### AutoTokenizer A tokenizer is responsible for preprocessing text into an array of numbers as inputs to a model. There are multiple rules that govern the tokenization process, including how to split a word and at what level words should be split (learn more about tokenization in the [tokenizer summary](./tokenizer_summary)). The most important thing to remember is you need to instantiate a tokenizer with the same model name to ensure you're using the same tokenization rules a model was pretrained with. Load a tokenizer with [`AutoTokenizer`]: ```py >>> from transformers import AutoTokenizer >>> model_name = "nlptown/bert-base-multilingual-uncased-sentiment" >>> tokenizer = AutoTokenizer.from_pretrained(model_name) ``` Pass your text to the tokenizer: ```py >>> encoding = tokenizer("We are very happy to show you the 🤗 Transformers library.") >>> print(encoding) {'input_ids': [101, 11312, 10320, 12495, 19308, 10114, 11391, 10855, 10103, 100, 58263, 13299, 119, 102], 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]} ``` The tokenizer returns a dictionary containing: * [input_ids](./glossary#input-ids): numerical representations of your tokens. * [attention_mask](./glossary#attention-mask): indicates which tokens should be attended to. A tokenizer can also accept a list of inputs, and pad and truncate the text to return a batch with uniform length: <frameworkcontent> <pt> ```py >>> pt_batch = tokenizer( ... ["We are very happy to show you the 🤗 Transformers library.", "We hope you don't hate it."], ... padding=True, ... truncation=True, ... max_length=512, ... return_tensors="pt", ... ) ``` </pt> <tf> ```py >>> tf_batch = tokenizer( ... ["We are very happy to show you the 🤗 Transformers library.", "We hope you don't hate it."], ... padding=True, ... truncation=True, ... max_length=512, ... return_tensors="tf", ... ) ``` </tf> </frameworkcontent> <Tip> Check out the [preprocess](./preprocessing) tutorial for more details about tokenization, and how to use an [`AutoImageProcessor`], [`AutoFeatureExtractor`] and [`AutoProcessor`] to preprocess image, audio, and multimodal inputs. </Tip> ### AutoModel <frameworkcontent> <pt> 🤗 Transformers provides a simple and unified way to load pretrained instances. This means you can load an [`AutoModel`] like you would load an [`AutoTokenizer`]. The only difference is selecting the correct [`AutoModel`] for the task. For text (or sequence) classification, you should load [`AutoModelForSequenceClassification`]: ```py >>> from transformers import AutoModelForSequenceClassification >>> model_name = "nlptown/bert-base-multilingual-uncased-sentiment" >>> pt_model = AutoModelForSequenceClassification.from_pretrained(model_name) ``` <Tip> See the [task summary](./task_summary) for tasks supported by an [`AutoModel`] class. </Tip> Now pass your preprocessed batch of inputs directly to the model. You just have to unpack the dictionary by adding `**`: ```py >>> pt_outputs = pt_model(**pt_batch) ``` The model outputs the final activations in the `logits` attribute. Apply the softmax function to the `logits` to retrieve the probabilities: ```py >>> from torch import nn >>> pt_predictions = nn.functional.softmax(pt_outputs.logits, dim=-1) >>> print(pt_predictions) tensor([[0.0021, 0.0018, 0.0115, 0.2121, 0.7725], [0.2084, 0.1826, 0.1969, 0.1755, 0.2365]], grad_fn=<SoftmaxBackward0>) ``` </pt> <tf> 🤗 Transformers provides a simple and unified way to load pretrained instances. This means you can load an [`TFAutoModel`] like you would load an [`AutoTokenizer`]. The only difference is selecting the correct [`TFAutoModel`] for the task. For text (or sequence) classification, you should load [`TFAutoModelForSequenceClassification`]: ```py >>> from transformers import TFAutoModelForSequenceClassification >>> model_name = "nlptown/bert-base-multilingual-uncased-sentiment" >>> tf_model = TFAutoModelForSequenceClassification.from_pretrained(model_name) ``` <Tip> See the [task summary](./task_summary) for tasks supported by an [`AutoModel`] class. </Tip> Now pass your preprocessed batch of inputs directly to the model. You can pass the tensors as-is: ```py >>> tf_outputs = tf_model(tf_batch) ``` The model outputs the final activations in the `logits` attribute. Apply the softmax function to the `logits` to retrieve the probabilities: ```py >>> import tensorflow as tf >>> tf_predictions = tf.nn.softmax(tf_outputs.logits, axis=-1) >>> tf_predictions # doctest: +IGNORE_RESULT ``` </tf> </frameworkcontent> <Tip> All 🤗 Transformers models (PyTorch or TensorFlow) output the tensors *before* the final activation function (like softmax) because the final activation function is often fused with the loss. Model outputs are special dataclasses so their attributes are autocompleted in an IDE. The model outputs behave like a tuple or a dictionary (you can index with an integer, a slice or a string) in which case, attributes that are None are ignored. </Tip> ### Save a model <frameworkcontent> <pt> Once your model is fine-tuned, you can save it with its tokenizer using [`PreTrainedModel.save_pretrained`]: ```py >>> pt_save_directory = "./pt_save_pretrained" >>> tokenizer.save_pretrained(pt_save_directory) # doctest: +IGNORE_RESULT >>> pt_model.save_pretrained(pt_save_directory) ``` When you are ready to use the model again, reload it with [`PreTrainedModel.from_pretrained`]: ```py >>> pt_model = AutoModelForSequenceClassification.from_pretrained("./pt_save_pretrained") ``` </pt> <tf> Once your model is fine-tuned, you can save it with its tokenizer using [`TFPreTrainedModel.save_pretrained`]: ```py >>> tf_save_directory = "./tf_save_pretrained" >>> tokenizer.save_pretrained(tf_save_directory) # doctest: +IGNORE_RESULT >>> tf_model.save_pretrained(tf_save_directory) ``` When you are ready to use the model again, reload it with [`TFPreTrainedModel.from_pretrained`]: ```py >>> tf_model = TFAutoModelForSequenceClassification.from_pretrained("./tf_save_pretrained") ``` </tf> </frameworkcontent> One particularly cool 🤗 Transformers feature is the ability to save a model and reload it as either a PyTorch or TensorFlow model. The `from_pt` or `from_tf` parameter can convert the model from one framework to the other: <frameworkcontent> <pt> ```py >>> from transformers import AutoModel >>> tokenizer = AutoTokenizer.from_pretrained(tf_save_directory) >>> pt_model = AutoModelForSequenceClassification.from_pretrained(tf_save_directory, from_tf=True) ``` </pt> <tf> ```py >>> from transformers import TFAutoModel >>> tokenizer = AutoTokenizer.from_pretrained(pt_save_directory) >>> tf_model = TFAutoModelForSequenceClassification.from_pretrained(pt_save_directory, from_pt=True) ``` </tf> </frameworkcontent> ## Custom model builds You can modify the model's configuration class to change how a model is built. The configuration specifies a model's attributes, such as the number of hidden layers or attention heads. You start from scratch when you initialize a model from a custom configuration class. The model attributes are randomly initialized, and you'll need to train the model before you can use it to get meaningful results. Start by importing [`AutoConfig`], and then load the pretrained model you want to modify. Within [`AutoConfig.from_pretrained`], you can specify the attribute you want to change, such as the number of attention heads: ```py >>> from transformers import AutoConfig >>> my_config = AutoConfig.from_pretrained("distilbert/distilbert-base-uncased", n_heads=12) ``` <frameworkcontent> <pt> Create a model from your custom configuration with [`AutoModel.from_config`]: ```py >>> from transformers import AutoModel >>> my_model = AutoModel.from_config(my_config) ``` </pt> <tf> Create a model from your custom configuration with [`TFAutoModel.from_config`]: ```py >>> from transformers import TFAutoModel >>> my_model = TFAutoModel.from_config(my_config) ``` </tf> </frameworkcontent> Take a look at the [Create a custom architecture](./create_a_model) guide for more information about building custom configurations. ## Trainer - a PyTorch optimized training loop All models are a standard [`torch.nn.Module`](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) so you can use them in any typical training loop. While you can write your own training loop, 🤗 Transformers provides a [`Trainer`] class for PyTorch, which contains the basic training loop and adds additional functionality for features like distributed training, mixed precision, and more. Depending on your task, you'll typically pass the following parameters to [`Trainer`]: 1. You'll start with a [`PreTrainedModel`] or a [`torch.nn.Module`](https://pytorch.org/docs/stable/nn.html#torch.nn.Module): ```py >>> from transformers import AutoModelForSequenceClassification >>> model = AutoModelForSequenceClassification.from_pretrained("distilbert/distilbert-base-uncased") ``` 2. [`TrainingArguments`] contains the model hyperparameters you can change like learning rate, batch size, and the number of epochs to train for. The default values are used if you don't specify any training arguments: ```py >>> from transformers import TrainingArguments >>> training_args = TrainingArguments( ... output_dir="path/to/save/folder/", ... learning_rate=2e-5, ... per_device_train_batch_size=8, ... per_device_eval_batch_size=8, ... num_train_epochs=2, ... ) ``` 3. Load a preprocessing class like a tokenizer, image processor, feature extractor, or processor: ```py >>> from transformers import AutoTokenizer >>> tokenizer = AutoTokenizer.from_pretrained("distilbert/distilbert-base-uncased") ``` 4. Load a dataset: ```py >>> from datasets import load_dataset >>> dataset = load_dataset("rotten_tomatoes") # doctest: +IGNORE_RESULT ``` 5. Create a function to tokenize the dataset: ```py >>> def tokenize_dataset(dataset): ... return tokenizer(dataset["text"]) ``` Then apply it over the entire dataset with [`~datasets.Dataset.map`]: ```py >>> dataset = dataset.map(tokenize_dataset, batched=True) ``` 6. A [`DataCollatorWithPadding`] to create a batch of examples from your dataset: ```py >>> from transformers import DataCollatorWithPadding >>> data_collator = DataCollatorWithPadding(tokenizer=tokenizer) ``` Now gather all these classes in [`Trainer`]: ```py >>> from transformers import Trainer >>> trainer = Trainer( ... model=model, ... args=training_args, ... train_dataset=dataset["train"], ... eval_dataset=dataset["test"], ... tokenizer=tokenizer, ... data_collator=data_collator, ... ) # doctest: +SKIP ``` When you're ready, call [`~Trainer.train`] to start training: ```py >>> trainer.train() # doctest: +SKIP ``` <Tip> For tasks - like translation or summarization - that use a sequence-to-sequence model, use the [`Seq2SeqTrainer`] and [`Seq2SeqTrainingArguments`] classes instead. </Tip> You can customize the training loop behavior by subclassing the methods inside [`Trainer`]. This allows you to customize features such as the loss function, optimizer, and scheduler. Take a look at the [`Trainer`] reference for which methods can be subclassed. The other way to customize the training loop is by using [Callbacks](./main_classes/callback). You can use callbacks to integrate with other libraries and inspect the training loop to report on progress or stop the training early. Callbacks do not modify anything in the training loop itself. To customize something like the loss function, you need to subclass the [`Trainer`] instead. ## Train with TensorFlow All models are a standard [`tf.keras.Model`](https://www.tensorflow.org/api_docs/python/tf/keras/Model) so they can be trained in TensorFlow with the [Keras](https://keras.io/) API. 🤗 Transformers provides the [`~TFPreTrainedModel.prepare_tf_dataset`] method to easily load your dataset as a `tf.data.Dataset` so you can start training right away with Keras' [`compile`](https://keras.io/api/models/model_training_apis/#compile-method) and [`fit`](https://keras.io/api/models/model_training_apis/#fit-method) methods. 1. You'll start with a [`TFPreTrainedModel`] or a [`tf.keras.Model`](https://www.tensorflow.org/api_docs/python/tf/keras/Model): ```py >>> from transformers import TFAutoModelForSequenceClassification >>> model = TFAutoModelForSequenceClassification.from_pretrained("distilbert/distilbert-base-uncased") ``` 2. Load a preprocessing class like a tokenizer, image processor, feature extractor, or processor: ```py >>> from transformers import AutoTokenizer >>> tokenizer = AutoTokenizer.from_pretrained("distilbert/distilbert-base-uncased") ``` 3. Create a function to tokenize the dataset: ```py >>> def tokenize_dataset(dataset): ... return tokenizer(dataset["text"]) # doctest: +SKIP ``` 4. Apply the tokenizer over the entire dataset with [`~datasets.Dataset.map`] and then pass the dataset and tokenizer to [`~TFPreTrainedModel.prepare_tf_dataset`]. You can also change the batch size and shuffle the dataset here if you'd like: ```py >>> dataset = dataset.map(tokenize_dataset) # doctest: +SKIP >>> tf_dataset = model.prepare_tf_dataset( ... dataset["train"], batch_size=16, shuffle=True, tokenizer=tokenizer ... ) # doctest: +SKIP ``` 5. When you're ready, you can call `compile` and `fit` to start training. Note that Transformers models all have a default task-relevant loss function, so you don't need to specify one unless you want to: ```py >>> from tensorflow.keras.optimizers import Adam >>> model.compile(optimizer='adam') # No loss argument! >>> model.fit(tf_dataset) # doctest: +SKIP ``` ## What's next? Now that you've completed the 🤗 Transformers quick tour, check out our guides and learn how to do more specific things like writing a custom model, fine-tuning a model for a task, and how to train a model with a script. If you're interested in learning more about 🤗 Transformers core concepts, grab a cup of coffee and take a look at our Conceptual Guides!
transformers/docs/source/en/quicktour.md/0
{ "file_path": "transformers/docs/source/en/quicktour.md", "repo_id": "transformers", "token_count": 8297 }
318
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Mask Generation Mask generation is the task of generating semantically meaningful masks for an image. This task is very similar to [image segmentation](semantic_segmentation), but many differences exist. Image segmentation models are trained on labeled datasets and are limited to the classes they have seen during training; they return a set of masks and corresponding classes, given an image. Mask generation models are trained on large amounts of data and operate in two modes. - Prompting mode: In this mode, the model takes in an image and a prompt, where a prompt can be a 2D point location (XY coordinates) in the image within an object or a bounding box surrounding an object. In prompting mode, the model only returns the mask over the object that the prompt is pointing out. - Segment Everything mode: In segment everything, given an image, the model generates every mask in the image. To do so, a grid of points is generated and overlaid on the image for inference. Mask generation task is supported by [Segment Anything Model (SAM)](model_doc/sam). It's a powerful model that consists of a Vision Transformer-based image encoder, a prompt encoder, and a two-way transformer mask decoder. Images and prompts are encoded, and the decoder takes these embeddings and generates valid masks. <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/sam.png" alt="SAM Architecture"/> </div> SAM serves as a powerful foundation model for segmentation as it has large data coverage. It is trained on [SA-1B](https://ai.meta.com/datasets/segment-anything/), a dataset with 1 million images and 1.1 billion masks. In this guide, you will learn how to: - Infer in segment everything mode with batching, - Infer in point prompting mode, - Infer in box prompting mode. First, let's install `transformers`: ```bash pip install -q transformers ``` ## Mask Generation Pipeline The easiest way to infer mask generation models is to use the `mask-generation` pipeline. ```python >>> from transformers import pipeline >>> checkpoint = "facebook/sam-vit-base" >>> mask_generator = pipeline(model=checkpoint, task="mask-generation") ``` Let's see the image. ```python from PIL import Image import requests img_url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/bee.jpg" image = Image.open(requests.get(img_url, stream=True).raw).convert("RGB") ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/bee.jpg" alt="Example Image"/> </div> Let's segment everything. `points-per-batch` enables parallel inference of points in segment everything mode. This enables faster inference, but consumes more memory. Moreover, SAM only enables batching over points and not the images. `pred_iou_thresh` is the IoU confidence threshold where only the masks above that certain threshold are returned. ```python masks = mask_generator(image, points_per_batch=128, pred_iou_thresh=0.88) ``` The `masks` looks like the following: ```bash {'masks': [array([[False, False, False, ..., True, True, True], [False, False, False, ..., True, True, True], [False, False, False, ..., True, True, True], ..., [False, False, False, ..., False, False, False], [False, False, False, ..., False, False, False], [False, False, False, ..., False, False, False]]), array([[False, False, False, ..., False, False, False], [False, False, False, ..., False, False, False], [False, False, False, ..., False, False, False], ..., 'scores': tensor([0.9972, 0.9917, ..., } ``` We can visualize them like this: ```python import matplotlib.pyplot as plt plt.imshow(image, cmap='gray') for i, mask in enumerate(masks["masks"]): plt.imshow(mask, cmap='viridis', alpha=0.1, vmin=0, vmax=1) plt.axis('off') plt.show() ``` Below is the original image in grayscale with colorful maps overlaid. Very impressive. <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/bee_segmented.png" alt="Visualized"/> </div> ## Model Inference ### Point Prompting You can also use the model without the pipeline. To do so, initialize the model and the processor. ```python from transformers import SamModel, SamProcessor import torch device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') model = SamModel.from_pretrained("facebook/sam-vit-base").to(device) processor = SamProcessor.from_pretrained("facebook/sam-vit-base") ``` To do point prompting, pass the input point to the processor, then take the processor output and pass it to the model for inference. To post-process the model output, pass the outputs and `original_sizes` and `reshaped_input_sizes` we take from the processor's initial output. We need to pass these since the processor resizes the image, and the output needs to be extrapolated. ```python input_points = [[[2592, 1728]]] # point location of the bee inputs = processor(image, input_points=input_points, return_tensors="pt").to(device) with torch.no_grad(): outputs = model(**inputs) masks = processor.image_processor.post_process_masks(outputs.pred_masks.cpu(), inputs["original_sizes"].cpu(), inputs["reshaped_input_sizes"].cpu()) ``` We can visualize the three masks in the `masks` output. ```python import matplotlib.pyplot as plt import numpy as np fig, axes = plt.subplots(1, 4, figsize=(15, 5)) axes[0].imshow(image) axes[0].set_title('Original Image') mask_list = [masks[0][0][0].numpy(), masks[0][0][1].numpy(), masks[0][0][2].numpy()] for i, mask in enumerate(mask_list, start=1): overlayed_image = np.array(image).copy() overlayed_image[:,:,0] = np.where(mask == 1, 255, overlayed_image[:,:,0]) overlayed_image[:,:,1] = np.where(mask == 1, 0, overlayed_image[:,:,1]) overlayed_image[:,:,2] = np.where(mask == 1, 0, overlayed_image[:,:,2]) axes[i].imshow(overlayed_image) axes[i].set_title(f'Mask {i}') for ax in axes: ax.axis('off') plt.show() ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/masks.png" alt="Visualized"/> </div> ### Box Prompting You can also do box prompting in a similar fashion to point prompting. You can simply pass the input box in the format of a list `[x_min, y_min, x_max, y_max]` format along with the image to the `processor`. Take the processor output and directly pass it to the model, then post-process the output again. ```python # bounding box around the bee box = [2350, 1600, 2850, 2100] inputs = processor( image, input_boxes=[[[box]]], return_tensors="pt" ).to("cuda") with torch.no_grad(): outputs = model(**inputs) mask = processor.image_processor.post_process_masks( outputs.pred_masks.cpu(), inputs["original_sizes"].cpu(), inputs["reshaped_input_sizes"].cpu() )[0][0][0].numpy() ``` You can visualize the bounding box around the bee as shown below. ```python import matplotlib.patches as patches fig, ax = plt.subplots() ax.imshow(image) rectangle = patches.Rectangle((2350, 1600), 500, 500, linewidth=2, edgecolor='r', facecolor='none') ax.add_patch(rectangle) ax.axis("off") plt.show() ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/bbox.png" alt="Visualized Bbox"/> </div> You can see the inference output below. ```python fig, ax = plt.subplots() ax.imshow(image) ax.imshow(mask, cmap='viridis', alpha=0.4) ax.axis("off") plt.show() ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/box_inference.png" alt="Visualized Inference"/> </div>
transformers/docs/source/en/tasks/mask_generation.md/0
{ "file_path": "transformers/docs/source/en/tasks/mask_generation.md", "repo_id": "transformers", "token_count": 2851 }
319
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Zero-shot image classification [[open-in-colab]] Zero-shot image classification is a task that involves classifying images into different categories using a model that was not explicitly trained on data containing labeled examples from those specific categories. Traditionally, image classification requires training a model on a specific set of labeled images, and this model learns to "map" certain image features to labels. When there's a need to use such model for a classification task that introduces a new set of labels, fine-tuning is required to "recalibrate" the model. In contrast, zero-shot or open vocabulary image classification models are typically multi-modal models that have been trained on a large dataset of images and associated descriptions. These models learn aligned vision-language representations that can be used for many downstream tasks including zero-shot image classification. This is a more flexible approach to image classification that allows models to generalize to new and unseen categories without the need for additional training data and enables users to query images with free-form text descriptions of their target objects . In this guide you'll learn how to: * create a zero-shot image classification pipeline * run zero-shot image classification inference by hand Before you begin, make sure you have all the necessary libraries installed: ```bash pip install -q "transformers[torch]" pillow ``` ## Zero-shot image classification pipeline The simplest way to try out inference with a model supporting zero-shot image classification is to use the corresponding [`pipeline`]. Instantiate a pipeline from a [checkpoint on the Hugging Face Hub](https://huggingface.co/models?pipeline_tag=zero-shot-image-classification&sort=downloads): ```python >>> from transformers import pipeline >>> checkpoint = "openai/clip-vit-large-patch14" >>> detector = pipeline(model=checkpoint, task="zero-shot-image-classification") ``` Next, choose an image you'd like to classify. ```py >>> from PIL import Image >>> import requests >>> url = "https://unsplash.com/photos/g8oS8-82DxI/download?ixid=MnwxMjA3fDB8MXx0b3BpY3x8SnBnNktpZGwtSGt8fHx8fDJ8fDE2NzgxMDYwODc&force=true&w=640" >>> image = Image.open(requests.get(url, stream=True).raw) >>> image ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/owl.jpg" alt="Photo of an owl"/> </div> Pass the image and the candidate object labels to the pipeline. Here we pass the image directly; other suitable options include a local path to an image or an image url. The candidate labels can be simple words like in this example, or more descriptive. ```py >>> predictions = detector(image, candidate_labels=["fox", "bear", "seagull", "owl"]) >>> predictions [{'score': 0.9996670484542847, 'label': 'owl'}, {'score': 0.000199399160919711, 'label': 'seagull'}, {'score': 7.392891711788252e-05, 'label': 'fox'}, {'score': 5.96074532950297e-05, 'label': 'bear'}] ``` ## Zero-shot image classification by hand Now that you've seen how to use the zero-shot image classification pipeline, let's take a look how you can run zero-shot image classification manually. Start by loading the model and associated processor from a [checkpoint on the Hugging Face Hub](https://huggingface.co/models?pipeline_tag=zero-shot-image-classification&sort=downloads). Here we'll use the same checkpoint as before: ```py >>> from transformers import AutoProcessor, AutoModelForZeroShotImageClassification >>> model = AutoModelForZeroShotImageClassification.from_pretrained(checkpoint) >>> processor = AutoProcessor.from_pretrained(checkpoint) ``` Let's take a different image to switch things up. ```py >>> from PIL import Image >>> import requests >>> url = "https://unsplash.com/photos/xBRQfR2bqNI/download?ixid=MnwxMjA3fDB8MXxhbGx8fHx8fHx8fHwxNjc4Mzg4ODEx&force=true&w=640" >>> image = Image.open(requests.get(url, stream=True).raw) >>> image ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/car.jpg" alt="Photo of a car"/> </div> Use the processor to prepare the inputs for the model. The processor combines an image processor that prepares the image for the model by resizing and normalizing it, and a tokenizer that takes care of the text inputs. ```py >>> candidate_labels = ["tree", "car", "bike", "cat"] # follows the pipeline prompt template to get same results >>> candidate_labels = [f'This is a photo of {label}.' for label in candidate_labels] >>> inputs = processor(images=image, text=candidate_labels, return_tensors="pt", padding=True) ``` Pass the inputs through the model, and post-process the results: ```py >>> import torch >>> with torch.no_grad(): ... outputs = model(**inputs) >>> logits = outputs.logits_per_image[0] >>> probs = logits.softmax(dim=-1).numpy() >>> scores = probs.tolist() >>> result = [ ... {"score": score, "label": candidate_label} ... for score, candidate_label in sorted(zip(probs, candidate_labels), key=lambda x: -x[0]) ... ] >>> result [{'score': 0.998572, 'label': 'car'}, {'score': 0.0010570387, 'label': 'bike'}, {'score': 0.0003393686, 'label': 'tree'}, {'score': 3.1572064e-05, 'label': 'cat'}] ```
transformers/docs/source/en/tasks/zero_shot_image_classification.md/0
{ "file_path": "transformers/docs/source/en/tasks/zero_shot_image_classification.md", "repo_id": "transformers", "token_count": 1801 }
320
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Carga instancias preentrenadas con un AutoClass Con tantas arquitecturas diferentes de Transformer puede ser retador crear una para tu checkpoint. Como parte de la filosofía central de 🤗 Transformers para hacer que la biblioteca sea fácil, simple y flexible de usar; una `AutoClass` automáticamente infiere y carga la arquitectura correcta desde un checkpoint dado. El método `from_pretrained` te permite cargar rápidamente un modelo preentrenado para cualquier arquitectura, por lo que no tendrás que dedicar tiempo y recursos para entrenar uno desde cero. Producir este tipo de código con checkpoint implica que si funciona con uno, funcionará también con otro (siempre que haya sido entrenado para una tarea similar) incluso si la arquitectura es distinta. <Tip> Recuerda, la arquitectura se refiere al esqueleto del modelo y los checkpoints son los pesos para una arquitectura dada. Por ejemplo, [BERT](https://huggingface.co/google-bert/bert-base-uncased) es una arquitectura, mientras que `google-bert/bert-base-uncased` es un checkpoint. Modelo es un término general que puede significar una arquitectura o un checkpoint. </Tip> En este tutorial, aprenderás a: * Cargar un tokenizador pre-entrenado. * Cargar un extractor de características (feature extractor en inglés) pre-entrenado. * Cargar un procesador pre-entrenado. * Cargar un modelo pre-entrenado. ## AutoTokenizer Casi cualquier tarea de Procesamiento de Lenguaje Natural comienza con un tokenizador. Un tokenizador convierte tu input a un formato que puede ser procesado por el modelo. Carga un tokenizador con [`AutoTokenizer.from_pretrained`]: ```py >>> from transformers import AutoTokenizer >>> tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-uncased") ``` Luego tokeniza tu input como lo mostrado a continuación: ```py >>> sequence = "In a hole in the ground there lived a hobbit." >>> print(tokenizer(sequence)) {'input_ids': [101, 1999, 1037, 4920, 1999, 1996, 2598, 2045, 2973, 1037, 7570, 10322, 4183, 1012, 102], 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]} ``` ## AutoFeatureExtractor Para tareas de audio y visión, un extractor de características procesa la señal de audio o imagen al formato de input correcto. Carga un extractor de características con [`AutoFeatureExtractor.from_pretrained`]: ```py >>> from transformers import AutoFeatureExtractor >>> feature_extractor = AutoFeatureExtractor.from_pretrained( ... "ehcalabres/wav2vec2-lg-xlsr-en-speech-emotion-recognition" ... ) ``` ## AutoProcessor Las tareas multimodales requieren un procesador que combine dos tipos de herramientas de preprocesamiento. Por ejemplo, el modelo [LayoutLMV2](model_doc/layoutlmv2) requiere que un extractor de características maneje las imágenes y que un tokenizador maneje el texto; un procesador combina ambas. Carga un procesador con [`AutoProcessor.from_pretrained`]: ```py >>> from transformers import AutoProcessor >>> processor = AutoProcessor.from_pretrained("microsoft/layoutlmv2-base-uncased") ``` ## AutoModel <frameworkcontent> <pt> Finalmente, las clases `AutoModelFor` te permiten cargar un modelo preentrenado para una tarea dada (revisa [aquí](model_doc/auto) para conocer la lista completa de tareas disponibles). Por ejemplo, cargue un modelo para clasificación de secuencias con [`AutoModelForSequenceClassification.from_pretrained`]: ```py >>> from transformers import AutoModelForSequenceClassification >>> model = AutoModelForSequenceClassification.from_pretrained("distilbert/distilbert-base-uncased") ``` Reutiliza fácilmente el mismo checkpoint para cargar una aquitectura para alguna tarea diferente: ```py >>> from transformers import AutoModelForTokenClassification >>> model = AutoModelForTokenClassification.from_pretrained("distilbert/distilbert-base-uncased") ``` Generalmente recomendamos utilizar las clases `AutoTokenizer` y `AutoModelFor` para cargar instancias pre-entrenadas de modelos. Ésto asegurará que cargues la arquitectura correcta en cada ocasión. En el siguiente [tutorial](preprocessing), aprende a usar tu tokenizador recién cargado, el extractor de características y el procesador para preprocesar un dataset para fine-tuning. </pt> <tf> Finalmente, la clase `TFAutoModelFor` te permite cargar tu modelo pre-entrenado para una tarea dada (revisa [aquí](model_doc/auto) para conocer la lista completa de tareas disponibles). Por ejemplo, carga un modelo para clasificación de secuencias con [`TFAutoModelForSequenceClassification.from_pretrained`]: ```py >>> from transformers import TFAutoModelForSequenceClassification >>> model = TFAutoModelForSequenceClassification.from_pretrained("distilbert/distilbert-base-uncased") ``` Reutiliza fácilmente el mismo checkpoint para cargar una aquitectura para alguna tarea diferente: ```py >>> from transformers import TFAutoModelForTokenClassification >>> model = TFAutoModelForTokenClassification.from_pretrained("distilbert/distilbert-base-uncased") ``` Generalmente recomendamos utilizar las clases `AutoTokenizer` y `TFAutoModelFor` para cargar instancias de modelos pre-entrenados. Ésto asegurará que cargues la arquitectura correcta cada vez. En el siguiente [tutorial](preprocessing), aprende a usar tu tokenizador recién cargado, el extractor de características y el procesador para preprocesar un dataset para fine-tuning. </tf> </frameworkcontent>
transformers/docs/source/es/autoclass_tutorial.md/0
{ "file_path": "transformers/docs/source/es/autoclass_tutorial.md", "repo_id": "transformers", "token_count": 2066 }
321
<!--- Copyright 2021 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Rendimiento y Escalabilidad Entrenar modelos grandes de transformadores y desplegarlos en producción presenta varios desafíos. Durante el entrenamiento, el modelo puede requerir más memoria de GPU de la disponible o mostrar una velocidad de entrenamiento lenta. En la fase de implementación, el modelo puede tener dificultades para manejar el rendimiento necesario en un entorno de producción. Esta documentación tiene como objetivo ayudarte a superar estos desafíos y encontrar la configuración óptima para tu caso de uso. Las guías están divididas en secciones de entrenamiento e inferencia, ya que cada una presenta diferentes desafíos y soluciones. Dentro de cada sección, encontrarás guías separadas para diferentes configuraciones de hardware, como GPU única vs. multi-GPU para el entrenamiento o CPU vs. GPU para la inferencia. Utiliza este documento como punto de partida para navegar hacia los métodos que se ajusten a tu escenario. ## Entrenamiento Entrenar modelos grandes de transformadores de manera eficiente requiere un acelerador como una GPU o TPU. El caso más común es cuando tienes una GPU única. Los métodos que puedes aplicar para mejorar la eficiencia de entrenamiento en una GPU única también se aplican a otras configuraciones, como múltiples GPU. Sin embargo, también existen técnicas específicas para entrenamiento con múltiples GPU o CPU, las cuales cubrimos en secciones separadas. * [Métodos y herramientas para un entrenamiento eficiente en una sola GPU](https://huggingface.co/docs/transformers/perf_train_gpu_one): comienza aquí para aprender enfoques comunes que pueden ayudar a optimizar la utilización de memoria de la GPU, acelerar el entrenamiento o ambas cosas. * [Sección de entrenamiento con varias GPU](https://huggingface.co/docs/transformers/perf_train_gpu_many): explora esta sección para conocer métodos de optimización adicionales que se aplican a configuraciones con varias GPU, como paralelismo de datos, tensores y canalizaciones. * [Sección de entrenamiento en CPU](https://huggingface.co/docs/transformers/perf_train_cpu): aprende sobre entrenamiento de precisión mixta en CPU. * [Entrenamiento eficiente en múltiples CPUs](https://huggingface.co/docs/transformers/perf_train_cpu_many): aprende sobre el entrenamiento distribuido en CPU. * [Entrenamiento en TPU con TensorFlow](https://huggingface.co/docs/transformers/perf_train_tpu_tf): si eres nuevo en TPUs, consulta esta sección para obtener una introducción basada en opiniones sobre el entrenamiento en TPUs y el uso de XLA. * [Hardware personalizado para el entrenamiento](https://huggingface.co/docs/transformers/perf_hardware): encuentra consejos y trucos al construir tu propia plataforma de aprendizaje profundo. * [Búsqueda de hiperparámetros utilizando la API del Entrenador](https://huggingface.co/docs/transformers/hpo_train) ## Inferencia Realizar inferencias eficientes con modelos grandes en un entorno de producción puede ser tan desafiante como entrenarlos. En las siguientes secciones, describimos los pasos para ejecutar inferencias en CPU y configuraciones con GPU única/múltiple. * [Inferencia en una sola CPU](https://huggingface.co/docs/transformers/perf_infer_cpu) * [Inferencia en una sola GPU](https://huggingface.co/docs/transformers/perf_infer_gpu_one) * [Inferencia con múltiples GPU](https://huggingface.co/docs/transformers/perf_infer_gpu_one) * [Integración de XLA para modelos de TensorFlow](https://huggingface.co/docs/transformers/tf_xla) ## Entrenamiento e Inferencia Aquí encontrarás técnicas, consejos y trucos que aplican tanto si estás entrenando un modelo como si estás ejecutando inferencias con él. * [Instanciar un modelo grande](https://huggingface.co/docs/transformers/big_models) * [Solución de problemas de rendimiento](https://huggingface.co/docs/transformers/debugging) ## Contribuir Este documento está lejos de estar completo y aún se deben agregar muchas cosas, así que si tienes adiciones o correcciones que hacer, no dudes en abrir un PR. Si no estás seguro, inicia un Issue y podemos discutir los detalles allí. Cuando hagas contribuciones que indiquen que A es mejor que B, intenta incluir un benchmark reproducible y/o un enlace a la fuente de esa información (a menos que provenga directamente de ti).
transformers/docs/source/es/performance.md/0
{ "file_path": "transformers/docs/source/es/performance.md", "repo_id": "transformers", "token_count": 1751 }
322
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Selección múltiple La tarea de selección múltiple es parecida a la de responder preguntas, con la excepción de que se dan varias opciones de respuesta junto con el contexto. El modelo se entrena para escoger la respuesta correcta entre varias opciones a partir del contexto dado. Esta guía te mostrará como hacerle fine-tuning a [BERT](https://huggingface.co/google-bert/bert-base-uncased) en la configuración `regular` del dataset [SWAG](https://huggingface.co/datasets/swag), de forma que seleccione la mejor respuesta a partir de varias opciones y algún contexto. ## Cargar el dataset SWAG Carga el dataset SWAG con la biblioteca 🤗 Datasets: ```py >>> from datasets import load_dataset >>> swag = load_dataset("swag", "regular") ``` Ahora, échale un vistazo a un ejemplo del dataset: ```py >>> swag["train"][0] {'ending0': 'passes by walking down the street playing their instruments.', 'ending1': 'has heard approaching them.', 'ending2': "arrives and they're outside dancing and asleep.", 'ending3': 'turns the lead singer watches the performance.', 'fold-ind': '3416', 'gold-source': 'gold', 'label': 0, 'sent1': 'Members of the procession walk down the street holding small horn brass instruments.', 'sent2': 'A drum line', 'startphrase': 'Members of the procession walk down the street holding small horn brass instruments. A drum line', 'video-id': 'anetv_jkn6uvmqwh4'} ``` Los campos `sent1` y `sent2` muestran cómo comienza una oración, y cada campo `ending` indica cómo podría terminar. Dado el comienzo de la oración, el modelo debe escoger el final de oración correcto indicado por el campo `label`. ## Preprocesmaiento Carga el tokenizer de BERT para procesar el comienzo de cada oración y los cuatro finales posibles: ```py >>> from transformers import AutoTokenizer >>> tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-uncased") ``` La función de preprocesmaiento debe hacer lo siguiente: 1. Hacer cuatro copias del campo `sent1` de forma que se pueda combinar cada una con el campo `sent2` para recrear la forma en que empieza la oración. 2. Combinar `sent2` con cada uno de los cuatro finales de oración posibles. 3. Aplanar las dos listas para que puedas tokenizarlas, y luego des-aplanarlas para que cada ejemplo tenga los campos `input_ids`, `attention_mask` y `labels` correspondientes. ```py >>> ending_names = ["ending0", "ending1", "ending2", "ending3"] >>> def preprocess_function(examples): ... first_sentences = [[context] * 4 for context in examples["sent1"]] ... question_headers = examples["sent2"] ... second_sentences = [ ... [f"{header} {examples[end][i]}" for end in ending_names] for i, header in enumerate(question_headers) ... ] ... first_sentences = sum(first_sentences, []) ... second_sentences = sum(second_sentences, []) ... tokenized_examples = tokenizer(first_sentences, second_sentences, truncation=True) ... return {k: [v[i : i + 4] for i in range(0, len(v), 4)] for k, v in tokenized_examples.items()} ``` Usa la función [`~datasets.Dataset.map`] de 🤗 Datasets para aplicarle la función de preprocesamiento al dataset entero. Puedes acelerar la función `map` haciendo `batched=True` para procesar varios elementos del dataset a la vez. ```py tokenized_swag = swag.map(preprocess_function, batched=True) ``` 🤗 Transformers no tiene un collator de datos para la tarea de selección múltiple, así que tendrías que crear uno. Puedes adaptar el [`DataCollatorWithPadding`] para crear un lote de ejemplos para selección múltiple. Este también le *añadirá relleno de manera dinámica* a tu texto y a las etiquetas para que tengan la longitud del elemento más largo en su lote, de forma que tengan una longitud uniforme. Aunque es posible rellenar el texto en la función `tokenizer` haciendo `padding=True`, el rellenado dinámico es más eficiente. El `DataCollatorForMultipleChoice` aplanará todas las entradas del modelo, les aplicará relleno y luego des-aplanará los resultados: <frameworkcontent> <pt> ```py >>> from dataclasses import dataclass >>> from transformers.tokenization_utils_base import PreTrainedTokenizerBase, PaddingStrategy >>> from typing import Optional, Union >>> import torch >>> @dataclass ... class DataCollatorForMultipleChoice: ... """ ... Collator de datos que le añadirá relleno de forma automática a las entradas recibidas para ... una tarea de selección múltiple. ... """ ... tokenizer: PreTrainedTokenizerBase ... padding: Union[bool, str, PaddingStrategy] = True ... max_length: Optional[int] = None ... pad_to_multiple_of: Optional[int] = None ... def __call__(self, features): ... label_name = "label" if "label" in features[0].keys() else "labels" ... labels = [feature.pop(label_name) for feature in features] ... batch_size = len(features) ... num_choices = len(features[0]["input_ids"]) ... flattened_features = [ ... [{k: v[i] for k, v in feature.items()} for i in range(num_choices)] for feature in features ... ] ... flattened_features = sum(flattened_features, []) ... batch = self.tokenizer.pad( ... flattened_features, ... padding=self.padding, ... max_length=self.max_length, ... pad_to_multiple_of=self.pad_to_multiple_of, ... return_tensors="pt", ... ) ... batch = {k: v.view(batch_size, num_choices, -1) for k, v in batch.items()} ... batch["labels"] = torch.tensor(labels, dtype=torch.int64) ... return batch ``` </pt> <tf> ```py >>> from dataclasses import dataclass >>> from transformers.tokenization_utils_base import PreTrainedTokenizerBase, PaddingStrategy >>> from typing import Optional, Union >>> import tensorflow as tf >>> @dataclass ... class DataCollatorForMultipleChoice: ... """ ... Data collator that will dynamically pad the inputs for multiple choice received. ... """ ... tokenizer: PreTrainedTokenizerBase ... padding: Union[bool, str, PaddingStrategy] = True ... max_length: Optional[int] = None ... pad_to_multiple_of: Optional[int] = None ... def __call__(self, features): ... label_name = "label" if "label" in features[0].keys() else "labels" ... labels = [feature.pop(label_name) for feature in features] ... batch_size = len(features) ... num_choices = len(features[0]["input_ids"]) ... flattened_features = [ ... [{k: v[i] for k, v in feature.items()} for i in range(num_choices)] for feature in features ... ] ... flattened_features = sum(flattened_features, []) ... batch = self.tokenizer.pad( ... flattened_features, ... padding=self.padding, ... max_length=self.max_length, ... pad_to_multiple_of=self.pad_to_multiple_of, ... return_tensors="tf", ... ) ... batch = {k: tf.reshape(v, (batch_size, num_choices, -1)) for k, v in batch.items()} ... batch["labels"] = tf.convert_to_tensor(labels, dtype=tf.int64) ... return batch ``` </tf> </frameworkcontent> ## Entrenamiento <frameworkcontent> <pt> Carga el modelo BERT con [`AutoModelForMultipleChoice`]: ```py >>> from transformers import AutoModelForMultipleChoice, TrainingArguments, Trainer >>> model = AutoModelForMultipleChoice.from_pretrained("google-bert/bert-base-uncased") ``` <Tip> Para familiarizarte con el fine-tuning con [`Trainer`], ¡mira el tutorial básico [aquí](../training#finetune-with-trainer)! </Tip> En este punto, solo quedan tres pasos: 1. Definir tus hiperparámetros de entrenamiento en [`TrainingArguments`]. 2. Pasarle los argumentos del entrenamiento al [`Trainer`] jnto con el modelo, el dataset, el tokenizer y el collator de datos. 3. Invocar el método [`~Trainer.train`] para realizar el fine-tuning del modelo. ```py >>> training_args = TrainingArguments( ... output_dir="./results", ... eval_strategy="epoch", ... learning_rate=5e-5, ... per_device_train_batch_size=16, ... per_device_eval_batch_size=16, ... num_train_epochs=3, ... weight_decay=0.01, ... ) >>> trainer = Trainer( ... model=model, ... args=training_args, ... train_dataset=tokenized_swag["train"], ... eval_dataset=tokenized_swag["validation"], ... tokenizer=tokenizer, ... data_collator=DataCollatorForMultipleChoice(tokenizer=tokenizer), ... ) >>> trainer.train() ``` </pt> <tf> Para realizar el fine-tuning de un modelo en TensorFlow, primero convierte tus datasets al formato `tf.data.Dataset` con el método [`~TFPreTrainedModel.prepare_tf_dataset`]. ```py >>> data_collator = DataCollatorForMultipleChoice(tokenizer=tokenizer) >>> tf_train_set = model.prepare_tf_dataset( ... tokenized_swag["train"], ... shuffle=True, ... batch_size=batch_size, ... collate_fn=data_collator, ... ) >>> tf_validation_set = model.prepare_tf_dataset( ... tokenized_swag["validation"], ... shuffle=False, ... batch_size=batch_size, ... collate_fn=data_collator, ... ) ``` <Tip> Para familiarizarte con el fine-tuning con Keras, ¡mira el tutorial básico [aquí](training#finetune-with-keras)! </Tip> Prepara una función de optimización, un programa para la tasa de aprendizaje y algunos hiperparámetros de entrenamiento: ```py >>> from transformers import create_optimizer >>> batch_size = 16 >>> num_train_epochs = 2 >>> total_train_steps = (len(tokenized_swag["train"]) // batch_size) * num_train_epochs >>> optimizer, schedule = create_optimizer(init_lr=5e-5, num_warmup_steps=0, num_train_steps=total_train_steps) ``` Carga el modelo BERT con [`TFAutoModelForMultipleChoice`]: ```py >>> from transformers import TFAutoModelForMultipleChoice >>> model = TFAutoModelForMultipleChoice.from_pretrained("google-bert/bert-base-uncased") ``` Configura el modelo para entrenarlo con [`compile`](https://keras.io/api/models/model_training_apis/#compile-method): ```py >>> model.compile(optimizer=optimizer) ``` Invoca el método [`fit`](https://keras.io/api/models/model_training_apis/#fit-method) para realizar el fine-tuning del modelo: ```py >>> model.fit(x=tf_train_set, validation_data=tf_validation_set, epochs=2) ``` </tf> </frameworkcontent>
transformers/docs/source/es/tasks/multiple_choice.md/0
{ "file_path": "transformers/docs/source/es/tasks/multiple_choice.md", "repo_id": "transformers", "token_count": 4169 }
323
<!--⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Pipelines pour l'inférence L'objet [`pipeline`] rend simple l'utilisation de n'importe quel modèle du [Hub](https://huggingface.co/models) pour l'inférence sur n'importe quelle langue, tâches de vision par ordinateur, d'audio et multimodales. Même si vous n'avez pas d'expérience avec une modalité spécifique ou si vous n'êtes pas familier avec le code ci-dessous des modèles, vous pouvez toujours les utiliser pour l'inférence avec la [`pipeline`] ! Ce tutoriel vous apprendra à : * Utiliser un [`pipeline`] pour l'inférence. * Utiliser un tokenizer ou modèle spécifique. * Utiliser un [`pipeline`] pour des tâches audio, de vision et multimodales. <Tip> Consultez la documentation du [`pipeline`] pour une liste complète des tâches prises en charge et des paramètres disponibles. </Tip> ## Utilisation du pipeline Bien que chaque tâche ait son propre [`pipeline`], il est plus simple d'utiliser le [`pipeline`] générale qui inclut tous les pipelines spécifiques aux différentes tâches. Cette approche charge automatiquement un modèle par défaut et une classe de prétraitement adaptée à votre tâche, simplifiant ainsi votre utilisation. Prenons l'exemple de l'utilisation du [`pipeline`] pour la reconnaissance automatique de la parole (ASR) ou de la transcription de la parole en texte. 1. Commencez par créer un [`pipeline`] et spécifiez la tâche d'inférence : ```py >>> from transformers import pipeline >>> transcriber = pipeline(task="automatic-speech-recognition") ``` 2. Passez votre entrée au [`pipeline`]. Dans le cas de la reconnaissance vocale, il s'agit d'un fichier audio : ```py >>> transcriber("https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/mlk.flac") {'text': 'I HAVE A DREAM BUT ONE DAY THIS NATION WILL RISE UP LIVE UP THE TRUE MEANING OF ITS TREES'} ``` Pas le résultat que vous aviez en tête ? Consultez certains des [modèles de reconnaissance vocale automatique les plus téléchargés](https://huggingface.co/models?pipeline_tag=automatic-speech-recognition&sort=trending) sur le Hub pour voir si vous pouvez obtenir une meilleure transcription. Essayons le modèle [Whisper large-v2](https://huggingface.co/openai/whisper-large) de OpenAI. Whisper a été publié 2 ans après Wav2Vec2 et a été entraîné sur près de 10 fois plus de données. En tant que tel, il surpasse Wav2Vec2 sur la plupart des benchmarks en aval. Il a également l'avantage supplémentaire de prédire la ponctuation et la casse, ce qui n'est pas possible avec Wav2Vec2. Essayons-le ici pour voir comment il fonctionne : ```py >>> transcriber = pipeline(model="openai/whisper-large-v2") >>> transcriber("https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/mlk.flac") {'text': ' I have a dream that one day this nation will rise up and live out the true meaning of its creed.'} ``` Maintenant, ce résultat semble plus précis ! Pour une comparaison approfondie entre Wav2Vec2 et Whisper, consultez le [cours Audio Transformers](https://huggingface.co/learn/audio-course/chapter5/asr_models). Nous vous encourageons vraiment à consulter le Hub pour des modèles dans différentes langues, des modèles spécialisés dans votre domaine, et plus encore. Vous pouvez consulter et comparer les résultats des modèles directement depuis votre navigateur sur le Hub pour voir s'ils conviennent ou gèrent mieux les cas particuliers que d'autres. Et si vous ne trouvez pas de modèle pour votre cas d'utilisation, vous pouvez toujours commencer à [entraîner](training) le vôtre ! Si vous avez plusieurs entrées, vous pouvez passer votre entrée sous forme de liste : ```py transcriber( [ "https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/mlk.flac", "https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/1.flac", ] ) ``` Les pipelines sont excellents pour l'expérimentation car passer d'un modèle à un autre est trivial ; cependant, il existe des moyens de les optimiser pour des charges de travail plus importantes que l'expérimentation. Consultez les guides suivants qui expliquent comment itérer sur des ensembles de données complets ou utiliser des pipelines dans un serveur web : de la documentation : * [Utilisation des pipelines sur un ensemble de données](#using-pipelines-on-a-dataset) * [Utilisation des pipelines pour un serveur web](./pipeline_webserver) ## Paramètres [`pipeline`] prend en charge de nombreux paramètres ; certains sont spécifiques à la tâche et d'autres sont généraux pour tous les pipelines. En général, vous pouvez spécifier les paramètres où vous le souhaitez : ```py transcriber = pipeline(model="openai/whisper-large-v2", my_parameter=1) out = transcriber(...) # This will use `my_parameter=1`. out = transcriber(..., my_parameter=2) # This will override and use `my_parameter=2`. out = transcriber(...) # This will go back to using `my_parameter=1`. ``` Voyons 3 paramètres importants : ### Device Si vous utilisez `device=n`, le pipeline met automatiquement le modèle sur l'appareil spécifié. Cela fonctionnera que vous utilisiez PyTorch ou Tensorflow. ```py transcriber = pipeline(model="openai/whisper-large-v2", device=0) ``` Si le modèle est trop grand pour un seul GPU et que vous utilisez PyTorch, vous pouvez définir `device_map="auto"` pour déterminer automatiquement comment charger et stocker les poids du modèle. L'utilisation de l'argument `device_map` nécessite le package 🤗 [Accelerate](https://huggingface.co/docs/accelerate) : ```bash pip install --upgrade accelerate ``` Le code suivant charge et stocke automatiquement les poids du modèle sur plusieurs appareils : ```py transcriber = pipeline(model="openai/whisper-large-v2", device_map="auto") ``` Notez que si `device_map="auto"` est passé, il n'est pas nécessaire d'ajouter l'argument `device=device` lors de l'instanciation de votre `pipeline` car vous pourriez rencontrer des comportements inattendus ! ### Batch size Par défaut, les pipelines ne feront pas d'inférence en batch pour des raisons expliquées en détail [ici](https://huggingface.co/docs/transformers/main_classes/pipelines#pipeline-batching). La raison est que le batching n'est pas nécessairement plus rapide, et peut en fait être beaucoup plus lent dans certains cas. Mais si cela fonctionne dans votre cas d'utilisation, vous pouvez utiliser : ```py transcriber = pipeline(model="openai/whisper-large-v2", device=0, batch_size=2) audio_filenames = [f"https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/{i}.flac" for i in range(1, 5)] texts = transcriber(audio_filenames) ``` Cela exécute le pipeline sur les 4 fichiers audio fournis, mais les passera par batch de 2 au modèle (qui est sur un GPU, où le batching est plus susceptible d'aider) sans nécessiter de code supplémentaire de votre part. La sortie doit toujours correspondre à ce que vous auriez reçu sans batching. Il s'agit uniquement d'un moyen de vous aider à obtenir plus de vitesse avec un pipeline. Les pipelines peuvent également atténuer certaines des complexités du batching car, pour certains pipelines, un seul élément (comme un long fichier audio) doit être divisé en plusieurs parties pour être traité par un modèle. Le pipeline effectue ce [*batching par morceaux*](./main_classes/pipelines#pipeline-chunk-batching) pour vous. ### Paramètres spécifiques à la tâche Toutes les tâches fournissent des paramètres spécifiques à la tâche qui permettent une flexibilité et des options supplémentaires pour vous aider à accomplir votre travail. Par exemple, la méthode [`transformers.AutomaticSpeechRecognitionPipeline.__call__`] dispose d'un paramètre `return_timestamps` qui semble prometteur pour le sous-titrage des vidéos : ```py >>> transcriber = pipeline(model="openai/whisper-large-v2", return_timestamps=True) >>> transcriber("https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/mlk.flac") {'text': ' I have a dream that one day this nation will rise up and live out the true meaning of its creed.', 'chunks': [{'timestamp': (0.0, 11.88), 'text': ' I have a dream that one day this nation will rise up and live out the true meaning of its'}, {'timestamp': (11.88, 12.38), 'text': ' creed.'}]} ``` Comme vous pouvez le voir, le modèle a inféré le texte et a également indiqué **quand** les différentes phrases ont été prononcées. Il existe de nombreux paramètres disponibles pour chaque tâche, alors consultez la référence API de chaque tâche pour voir ce que vous pouvez ajuster ! Par exemple, le [`~transformers.AutomaticSpeechRecognitionPipeline`] dispose d'un paramètre `chunk_length_s` qui est utile pour travailler sur des fichiers audio très longs (par exemple, le sous-titrage de films entiers ou de vidéos d'une heure) qu'un modèle ne peut généralement pas gérer seul : ```python >>> transcriber = pipeline(model="openai/whisper-large-v2", chunk_length_s=30) >>> transcriber("https://huggingface.co/datasets/reach-vb/random-audios/resolve/main/ted_60.wav") {'text': " So in college, I was a government major, which means I had to write a lot of papers. Now, when a normal student writes a paper, they might spread the work out a little like this. So, you know. You get started maybe a little slowly, but you get enough done in the first week that with some heavier days later on, everything gets done and things stay civil. And I would want to do that like that. That would be the plan. I would have it all ready to go, but then actually the paper would come along, and then I would kind of do this. And that would happen every single paper. But then came my 90-page senior thesis, a paper you're supposed to spend a year on. I knew for a paper like that, my normal workflow was not an option, it was way too big a project. So I planned things out and I decided I kind of had to go something like this. This is how the year would go. So I'd start off light and I'd bump it up"} ``` Si vous ne trouvez pas un paramètre qui vous aiderait vraiment, n'hésitez pas à [le demander](https://github.com/huggingface/transformers/issues/new?assignees=&labels=feature&template=feature-request.yml) ! ## Utilisation des pipelines sur un ensemble de données Le pipeline peut également exécuter des inférences sur un grand ensemble de données. Le moyen le plus simple que nous recommandons pour cela est d'utiliser un itérateur : ```py def data(): for i in range(1000): yield f"My example {i}" pipe = pipeline(model="openai-community/gpt2", device=0) generated_characters = 0 for out in pipe(data()): generated_characters += len(out[0]["generated_text"]) ``` L'itérateur `data()` génère chaque résultat, et le pipeline reconnaît automatiquement que l'entrée est itérable et commencera à récupérer les données tout en continuant à les traiter sur le GPU (cela utilise [DataLoader](https://pytorch.org/docs/stable/data.html#torch.utils.data.DataLoader) sous le capot). C'est important car vous n'avez pas besoin d'allouer de mémoire pour l'ensemble de données complet et vous pouvez alimenter le GPU aussi rapidement que possible. Étant donné que le lotissement pourrait accélérer les choses, il peut être utile d'essayer de régler le paramètre `batch_size` ici. La façon la plus simple d'itérer sur un ensemble de données est d'en charger un depuis 🤗 [Datasets](https://github.com/huggingface/datasets) : ```py # KeyDataset is a util that will just output the item we're interested in. from transformers.pipelines.pt_utils import KeyDataset from datasets import load_dataset pipe = pipeline(model="hf-internal-testing/tiny-random-wav2vec2", device=0) dataset = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation[:10]") for out in pipe(KeyDataset(dataset, "audio")): print(out) ``` ## Utilisation des pipelines pour un serveur web <Tip> Créer un moteur d'inférence est un sujet complexe qui mérite sa propre page. </Tip> [Lien](./pipeline_webserver) ## Pipeline de vision Utiliser un [`pipeline`] pour les tâches de vision est pratiquement identique. Spécifiez votre tâche et passez votre image au classificateur. L'image peut être un lien, un chemin local ou une image encodée en base64. Par exemple, quelle espèce de chat est montrée ci-dessous ? ![pipeline-cat-chonk](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg) ```py >>> from transformers import pipeline >>> vision_classifier = pipeline(model="google/vit-base-patch16-224") >>> preds = vision_classifier( ... images="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg" ... ) >>> preds = [{"score": round(pred["score"], 4), "label": pred["label"]} for pred in preds] >>> preds [{'score': 0.4335, 'label': 'lynx, catamount'}, {'score': 0.0348, 'label': 'cougar, puma, catamount, mountain lion, painter, panther, Felis concolor'}, {'score': 0.0324, 'label': 'snow leopard, ounce, Panthera uncia'}, {'score': 0.0239, 'label': 'Egyptian cat'}, {'score': 0.0229, 'label': 'tiger cat'}] ``` ## Pipeline de texte Utiliser un [`pipeline`] pour les tâches de NLP est pratiquement identique. ```py >>> from transformers import pipeline >>> # This model is a `zero-shot-classification` model. >>> # It will classify text, except you are free to choose any label you might imagine >>> classifier = pipeline(model="facebook/bart-large-mnli") >>> classifier( ... "I have a problem with my iphone that needs to be resolved asap!!", ... candidate_labels=["urgent", "not urgent", "phone", "tablet", "computer"], ... ) {'sequence': 'I have a problem with my iphone that needs to be resolved asap!!', 'labels': ['urgent', 'phone', 'computer', 'not urgent', 'tablet'], 'scores': [0.504, 0.479, 0.013, 0.003, 0.002]} ``` ## Pipeline multimodal Le [`pipeline`] prend en charge plus d'une modalité. Par exemple, une tâche de réponse à des questions visuelles (VQA) combine texte et image. N'hésitez pas à utiliser n'importe quel lien d'image que vous aimez et une question que vous souhaitez poser à propos de l'image. L'image peut être une URL ou un chemin local vers l'image. Par exemple, si vous utilisez cette [image de facture](https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png) : ```py >>> from transformers import pipeline >>> vqa = pipeline(model="impira/layoutlm-document-qa") >>> output = vqa( ... image="https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png", ... question="What is the invoice number?", ... ) >>> output[0]["score"] = round(output[0]["score"], 3) >>> output [{'score': 0.425, 'answer': 'us-001', 'start': 16, 'end': 16}] ``` <Tip> Pour exécuter l'exemple ci-dessus, vous devez avoir [`pytesseract`](https://pypi.org/project/pytesseract/) installé en plus de 🤗 Transformers : ```bash sudo apt install -y tesseract-ocr pip install pytesseract ``` </Tip> ## Utilisation de `pipeline` sur de grands modèles avec 🤗 `accelerate` : Vous pouvez facilement exécuter `pipeline` sur de grands modèles en utilisant 🤗 `accelerate` ! Assurez-vous d'abord d'avoir installé `accelerate` avec `pip install accelerate`. Chargez d'abord votre modèle en utilisant `device_map="auto"` ! Nous utiliserons `facebook/opt-1.3b` pour notre exemple. ```py # pip install accelerate import torch from transformers import pipeline pipe = pipeline(model="facebook/opt-1.3b", torch_dtype=torch.bfloat16, device_map="auto") output = pipe("This is a cool example!", do_sample=True, top_p=0.95) ``` Vous pouvez également passer des modèles chargés en 8 bits si vous installez `bitsandbytes` et ajoutez l'argument `load_in_8bit=True` Notez que vous pouvez remplacer le point de contrôle par n'importe quel modèle. ```py # pip install accelerate bitsandbytes import torch from transformers import pipeline pipe = pipeline(model="facebook/opt-1.3b", device_map="auto", model_kwargs={"load_in_8bit": True}) output = pipe("This is a cool example!", do_sample=True, top_p=0.95) ``` ## Création de démonstrations web à partir de pipelines avec `gradio` Hugging Face prenant en charge le chargement de grands modèles, comme BLOOM. Les pipelines sont automatiquement pris en charge dans [Gradio](https://github.com/gradio-app/gradio/), une bibliothèque qui facilite la création d'applications d'apprentissage automatique belles et conviviales sur le web. Tout d'abord, assurez-vous que Gradio est installé : ``` pip install gradio ``` Ensuite, vous pouvez créer une démonstration web autour d'un pipeline de classification d'images (ou tout autre pipeline) en une seule ligne de code en appelant la fonction [`Interface.from_pipeline`](https://www.gradio.app/docs/interface#interface-from-pipeline) de Gradio pour lancer le pipeline. Cela crée une interface intuitive de glisser-déposer dans votre navigateur : ```py from transformers import pipeline import gradio as gr pipe = pipeline("image-classification", model="google/vit-base-patch16-224") gr.Interface.from_pipeline(pipe).launch() ``` ![](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/panda-classification.png) Par défaut, la démonstration web s'exécute sur un serveur local. Si vous souhaitez la partager avec d'autres, vous pouvez générer un lien public temporaire en définissant `share=True` dans `launch()`. Vous pouvez également héberger votre démonstration sur [Hugging Face Spaces](https://huggingface.co/spaces) pour obtenir un lien permanent.
transformers/docs/source/fr/tutoriel_pipeline.md/0
{ "file_path": "transformers/docs/source/fr/tutoriel_pipeline.md", "repo_id": "transformers", "token_count": 6226 }
324
<!--- Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Installazione Installa 🤗 Transformers per qualsiasi libreria di deep learning con cui stai lavorando, imposta la tua cache, e opzionalmente configura 🤗 Transformers per l'esecuzione offline. 🤗 Transformers è testato su Python 3.6+, PyTorch 1.1.0+, TensorFlow 2.0+, e Flax. Segui le istruzioni di installazione seguenti per la libreria di deep learning che stai utilizzando: * [PyTorch](https://pytorch.org/get-started/locally/) istruzioni di installazione. * [TensorFlow 2.0](https://www.tensorflow.org/install/pip) istruzioni di installazione. * [Flax](https://flax.readthedocs.io/en/latest/) istruzioni di installazione. ## Installazione con pip Puoi installare 🤗 Transformers in un [ambiente virtuale](https://docs.python.org/3/library/venv.html). Se non sei familiare con gli ambienti virtuali in Python, dai un'occhiata a questa [guida](https://packaging.python.org/guides/installing-using-pip-and-virtual-environments/). Un ambiente virtuale rende più semplice la gestione di progetti differenti, evitando problemi di compatibilità tra dipendenze. Inizia creando un ambiente virtuale nella directory del tuo progetto: ```bash python -m venv .env ``` Attiva l'ambiente virtuale: ```bash source .env/bin/activate ``` Ora puoi procedere con l'installazione di 🤗 Transformers eseguendo il comando seguente: ```bash pip install transformers ``` Per il solo supporto della CPU, puoi installare facilmente 🤗 Transformers e una libreria di deep learning in solo una riga. Ad esempio, installiamo 🤗 Transformers e PyTorch con: ```bash pip install transformers[torch] ``` 🤗 Transformers e TensorFlow 2.0: ```bash pip install transformers[tf-cpu] ``` 🤗 Transformers e Flax: ```bash pip install transformers[flax] ``` Infine, verifica se 🤗 Transformers è stato installato in modo appropriato eseguendo il seguente comando. Questo scaricherà un modello pre-allenato: ```bash python -c "from transformers import pipeline; print(pipeline('sentiment-analysis')('we love you'))" ``` Dopodiché stampa l'etichetta e il punteggio: ```bash [{'label': 'POSITIVE', 'score': 0.9998704791069031}] ``` ## Installazione dalla fonte Installa 🤗 Transformers dalla fonte con il seguente comando: ```bash pip install git+https://github.com/huggingface/transformers ``` Questo comando installa la versione `main` più attuale invece dell'ultima versione stabile. Questo è utile per stare al passo con gli ultimi sviluppi. Ad esempio, se un bug è stato sistemato da quando è uscita l'ultima versione ufficiale ma non è stata ancora rilasciata una nuova versione. Tuttavia, questo significa che questa versione `main` può non essere sempre stabile. Ci sforziamo per mantenere la versione `main` operativa, e la maggior parte dei problemi viene risolta in poche ore o in un giorno. Se riscontri un problema, per favore apri una [Issue](https://github.com/huggingface/transformers/issues) così possiamo sistemarlo ancora più velocemente! Controlla se 🤗 Transformers è stata installata in modo appropriato con il seguente comando: ```bash python -c "from transformers import pipeline; print(pipeline('sentiment-analysis')('I love you'))" ``` ## Installazione modificabile Hai bisogno di un'installazione modificabile se vuoi: * Usare la versione `main` del codice dalla fonte. * Contribuire a 🤗 Transformers e hai bisogno di testare i cambiamenti nel codice. Clona il repository e installa 🤗 Transformers con i seguenti comandi: ```bash git clone https://github.com/huggingface/transformers.git cd transformers pip install -e . ``` Questi comandi collegheranno la cartella in cui è stato clonato il repository e i path delle librerie Python. Python guarderà ora all'interno della cartella clonata, oltre ai normali path delle librerie. Per esempio, se i tuoi pacchetti Python sono installati tipicamente in `~/anaconda3/envs/main/lib/python3.7/site-packages/`, Python cercherà anche nella cartella clonata: `~/transformers/`. <Tip warning={true}> Devi tenere la cartella `transformers` se vuoi continuare ad utilizzare la libreria. </Tip> Ora puoi facilmente aggiornare il tuo clone all'ultima versione di 🤗 Transformers con il seguente comando: ```bash cd ~/transformers/ git pull ``` Il tuo ambiente Python troverà la versione `main` di 🤗 Transformers alla prossima esecuzione. ## Installazione con conda Installazione dal canale conda `conda-forge`: ```bash conda install conda-forge::transformers ``` ## Impostazione della cache I modelli pre-allenati sono scaricati e memorizzati localmente nella cache in: `~/.cache/huggingface/transformers/`. Questa è la directory di default data dalla variabile d'ambiente della shell `TRANSFORMERS_CACHE`. Su Windows, la directory di default è data da `C:\Users\username\.cache\huggingface\transformers`. Puoi cambiare le variabili d'ambiente della shell indicate in seguito, in ordine di priorità, per specificare una directory differente per la cache: 1. Variabile d'ambiente della shell (default): `TRANSFORMERS_CACHE`. 2. Variabile d'ambiente della shell: `HF_HOME` + `transformers/`. 3. Variabile d'ambiente della shell: `XDG_CACHE_HOME` + `/huggingface/transformers`. <Tip> 🤗 Transformers utilizzerà le variabili d'ambiente della shell `PYTORCH_TRANSFORMERS_CACHE` o `PYTORCH_PRETRAINED_BERT_CACHE` se si proviene da un'iterazione precedente di questa libreria e sono state impostate queste variabili d'ambiente, a meno che non si specifichi la variabile d'ambiente della shell `TRANSFORMERS_CACHE`. </Tip> ## Modalità Offline 🤗 Transformers può essere eseguita in un ambiente firewalled o offline utilizzando solo file locali. Imposta la variabile d'ambiente `HF_HUB_OFFLINE=1` per abilitare questo comportamento. <Tip> Aggiungi [🤗 Datasets](https://huggingface.co/docs/datasets/) al tuo flusso di lavoro offline di training impostando la variabile d'ambiente `HF_DATASETS_OFFLINE=1`. </Tip> Ad esempio, in genere si esegue un programma su una rete normale, protetta da firewall per le istanze esterne, con il seguente comando: ```bash python examples/pytorch/translation/run_translation.py --model_name_or_path google-t5/t5-small --dataset_name wmt16 --dataset_config ro-en ... ``` Esegui lo stesso programma in un'istanza offline con: ```bash HF_DATASETS_OFFLINE=1 HF_HUB_OFFLINE=1 \ python examples/pytorch/translation/run_translation.py --model_name_or_path google-t5/t5-small --dataset_name wmt16 --dataset_config ro-en ... ``` Lo script viene ora eseguito senza bloccarsi o attendere il timeout, perché sa di dover cercare solo file locali. ### Ottenere modelli e tokenizer per l'uso offline Un'altra opzione per utilizzare offline 🤗 Transformers è scaricare i file in anticipo, e poi puntare al loro path locale quando hai la necessità di utilizzarli offline. Ci sono tre modi per fare questo: * Scarica un file tramite l'interfaccia utente sul [Model Hub](https://huggingface.co/models) premendo sull'icona ↓. ![download-icon](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/download-icon.png) * Utilizza il flusso [`PreTrainedModel.from_pretrained`] e [`PreTrainedModel.save_pretrained`]: 1. Scarica i tuoi file in anticipo con [`PreTrainedModel.from_pretrained`]: ```py >>> from transformers import AutoTokenizer, AutoModelForSeq2SeqLM >>> tokenizer = AutoTokenizer.from_pretrained("bigscience/T0_3B") >>> model = AutoModelForSeq2SeqLM.from_pretrained("bigscience/T0_3B") ``` 2. Salva i tuoi file in una directory specificata con [`PreTrainedModel.save_pretrained`]: ```py >>> tokenizer.save_pretrained("./il/tuo/path/bigscience_t0") >>> model.save_pretrained("./il/tuo/path/bigscience_t0") ``` 3. Ora quando sei offline, carica i tuoi file con [`PreTrainedModel.from_pretrained`] dalla directory specificata: ```py >>> tokenizer = AutoTokenizer.from_pretrained("./il/tuo/path/bigscience_t0") >>> model = AutoModel.from_pretrained("./il/tuo/path/bigscience_t0") ``` * Scarica in maniera programmatica i file con la libreria [huggingface_hub](https://github.com/huggingface/huggingface_hub/tree/main/src/huggingface_hub): 1. Installa la libreria `huggingface_hub` nel tuo ambiente virtuale: ```bash python -m pip install huggingface_hub ``` 2. Utilizza la funzione [`hf_hub_download`](https://huggingface.co/docs/hub/adding-a-library#download-files-from-the-hub) per scaricare un file in un path specifico. Per esempio, il seguente comando scarica il file `config.json` dal modello [T0](https://huggingface.co/bigscience/T0_3B) nel path che desideri: ```py >>> from huggingface_hub import hf_hub_download >>> hf_hub_download(repo_id="bigscience/T0_3B", filename="config.json", cache_dir="./il/tuo/path/bigscience_t0") ``` Una volta che il tuo file è scaricato e salvato in cache localmente, specifica il suo path locale per caricarlo e utilizzarlo: ```py >>> from transformers import AutoConfig >>> config = AutoConfig.from_pretrained("./il/tuo/path/bigscience_t0/config.json") ``` <Tip> Fai riferimento alla sezione [How to download files from the Hub](https://huggingface.co/docs/hub/how-to-downstream) per avere maggiori dettagli su come scaricare modelli presenti sull Hub. </Tip>
transformers/docs/source/it/installation.md/0
{ "file_path": "transformers/docs/source/it/installation.md", "repo_id": "transformers", "token_count": 3586 }
325
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Quick tour [[open-in-colab]] Entra in azione con 🤗 Transformers! Inizia utilizzando [`pipeline`] per un'inferenza veloce, carica un modello pre-allenato e un tokenizer con una [AutoClass](./model_doc/auto) per risolvere i tuoi compiti legati a testo, immagini o audio. <Tip> Tutti gli esempi di codice presenti in questa documentazione hanno un pulsante in alto a sinistra che permette di selezionare tra PyTorch e TensorFlow. Se questo non è presente, ci si aspetta che il codice funzioni per entrambi i backend senza alcun cambiamento. </Tip> ## Pipeline [`pipeline`] è il modo più semplice per utilizzare un modello pre-allenato per un dato compito. <Youtube id="tiZFewofSLM"/> La [`pipeline`] supporta molti compiti comuni: **Testo**: * Analisi del Sentimento (Sentiment Analysis, in inglese): classifica la polarità di un testo dato. * Generazione del Testo (Text Generation, in inglese): genera del testo a partire da un dato input. * Riconoscimento di Entità (Name Entity Recognition o NER, in inglese): etichetta ogni parola con l'entità che questa rappresenta (persona, data, luogo, ecc.). * Rispondere a Domande (Question answering, in inglese): estrae la risposta da un contesto, dato del contesto e una domanda. * Riempimento di Maschere (Fill-mask, in inglese): riempie gli spazi mancanti in un testo che ha parole mascherate. * Riassumere (Summarization, in inglese): genera una sintesi di una lunga sequenza di testo o di un documento. * Traduzione (Translation, in inglese): traduce un testo in un'altra lingua. * Estrazione di Caratteristiche (Feature Extraction, in inglese): crea un tensore che rappresenta un testo. **Immagini**: * Classificazione di Immagini (Image Classification, in inglese): classifica un'immagine. * Segmentazione di Immagini (Image Segmentation, in inglese): classifica ogni pixel di un'immagine. * Rilevazione di Oggetti (Object Detection, in inglese): rileva oggetti all'interno di un'immagine. **Audio**: * Classificazione di Audio (Audio Classification, in inglese): assegna un'etichetta ad un segmento di audio dato. * Riconoscimento Vocale Automatico (Automatic Speech Recognition o ASR, in inglese): trascrive il contenuto di un audio dato in un testo. <Tip> Per maggiori dettagli legati alla [`pipeline`] e ai compiti ad essa associati, fai riferimento alla documentazione [qui](./main_classes/pipelines). </Tip> ### Utilizzo della Pipeline Nel seguente esempio, utilizzerai la [`pipeline`] per l'analisi del sentimento. Installa le seguenti dipendenze se non lo hai già fatto: <frameworkcontent> <pt> ```bash pip install torch ``` </pt> <tf> ```bash pip install tensorflow ``` </tf> </frameworkcontent> Importa [`pipeline`] e specifica il compito che vuoi completare: ```py >>> from transformers import pipeline >>> classificatore = pipeline("sentiment-analysis", model="MilaNLProc/feel-it-italian-sentiment") ``` La pipeline scarica e salva il [modello pre-allenato](https://huggingface.co/MilaNLProc/feel-it-italian-sentiment) e il tokenizer per l'analisi del sentimento. Se non avessimo scelto un modello, la pipeline ne avrebbe scelto uno di default. Ora puoi utilizzare il `classifier` sul tuo testo obiettivo: ```py >>> classificatore("Siamo molto felici di mostrarti la libreria 🤗 Transformers.") [{'label': 'positive', 'score': 0.9997}] ``` Per più di una frase, passa una lista di frasi alla [`pipeline`] la quale restituirà una lista di dizionari: ```py >>> risultati = classificatore( ... ["Siamo molto felici di mostrarti la libreria 🤗 Transformers.", "Speriamo te non la odierai."] ... ) >>> for risultato in risultati: ... print(f"etichetta: {risultato['label']}, con punteggio: {round(risultato['score'], 4)}") etichetta: positive, con punteggio: 0.9998 etichetta: negative, con punteggio: 0.9998 ``` La [`pipeline`] può anche iterare su un dataset intero. Inizia installando la libreria [🤗 Datasets](https://huggingface.co/docs/datasets/): ```bash pip install datasets ``` Crea una [`pipeline`] con il compito che vuoi risolvere e con il modello che vuoi utilizzare. ```py >>> import torch >>> from transformers import pipeline >>> riconoscitore_vocale = pipeline( ... "automatic-speech-recognition", model="radiogroup-crits/wav2vec2-xls-r-1b-italian-doc4lm-5gram" ... ) ``` Poi, carica un dataset (vedi 🤗 Datasets [Quick Start](https://huggingface.co/docs/datasets/quickstart) per maggiori dettagli) sul quale vuoi iterare. Per esempio, carichiamo il dataset [MInDS-14](https://huggingface.co/datasets/PolyAI/minds14): ```py >>> from datasets import load_dataset, Audio >>> dataset = load_dataset("PolyAI/minds14", name="it-IT", split="train") # doctest: +IGNORE_RESULT ``` Dobbiamo assicurarci che la frequenza di campionamento del set di dati corrisponda alla frequenza di campionamento con cui è stato addestrato `radiogroup-crits/wav2vec2-xls-r-1b-italian-doc4lm-5gram`. ```py >>> dataset = dataset.cast_column("audio", Audio(sampling_rate=riconoscitore_vocale.feature_extractor.sampling_rate)) ``` I file audio vengono caricati automaticamente e ri-campionati quando chiamiamo la colonna "audio". Estraiamo i vettori delle forme d'onda grezze delle prime 4 osservazioni e passiamoli come lista alla pipeline: ```py >>> risultato = riconoscitore_vocale(dataset[:4]["audio"]) >>> print([d["text"] for d in risultato]) ['dovrei caricare dei soldi sul mio conto corrente', 'buongiorno e senza vorrei depositare denaro sul mio conto corrente come devo fare per cortesia', 'sì salve vorrei depositare del denaro sul mio conto', 'e buon pomeriggio vorrei depositare dei soldi sul mio conto bancario volleo sapere come posso fare se e posso farlo online ed un altro conto o andandoo tramite bancomut'] ``` Per un dataset più grande dove gli input sono di dimensione maggiore (come nel parlato/audio o nella visione), dovrai passare un generatore al posto di una lista che carica tutti gli input in memoria. Guarda la [documentazione della pipeline](./main_classes/pipelines) per maggiori informazioni. ### Utilizzare un altro modello e tokenizer nella pipeline La [`pipeline`] può ospitare qualsiasi modello del [Model Hub](https://huggingface.co/models), rendendo semplice l'adattamento della [`pipeline`] per altri casi d'uso. Per esempio, se si vuole un modello capace di trattare testo in francese, usa i tag presenti nel Model Hub in modo da filtrare per ottenere un modello appropriato. Il miglior risultato filtrato restituisce un modello multi-lingua [BERT model](https://huggingface.co/nlptown/bert-base-multilingual-uncased-sentiment) fine-tuned per l'analisi del sentimento. Ottimo, utilizziamo questo modello! ```py >>> model_name = "nlptown/bert-base-multilingual-uncased-sentiment" ``` <frameworkcontent> <pt> Usa [`AutoModelForSequenceClassification`] e [`AutoTokenizer`] per caricare il modello pre-allenato e il suo tokenizer associato (maggiori informazioni su una `AutoClass` in seguito): ```py >>> from transformers import AutoTokenizer, AutoModelForSequenceClassification >>> model = AutoModelForSequenceClassification.from_pretrained(model_name) >>> tokenizer = AutoTokenizer.from_pretrained(model_name) ``` </pt> <tf> Usa [`TFAutoModelForSequenceClassification`] e [`AutoTokenizer`] per caricare il modello pre-allenato e il suo tokenizer associato (maggiori informazioni su una `TFAutoClass` in seguito): ```py >>> from transformers import AutoTokenizer, TFAutoModelForSequenceClassification >>> model = TFAutoModelForSequenceClassification.from_pretrained(model_name) >>> tokenizer = AutoTokenizer.from_pretrained(model_name) ``` </tf> </frameworkcontent> Poi puoi specificare il modello e il tokenizer nella [`pipeline`], e applicare il `classifier` sul tuo testo obiettivo: ```py >>> classifier = pipeline("sentiment-analysis", model=model, tokenizer=tokenizer) >>> classifier("Nous sommes très heureux de vous présenter la bibliothèque 🤗 Transformers.") [{'label': '5 stars', 'score': 0.7273}] ``` Se non riesci a trovare un modello per il tuo caso d'uso, dovrai fare fine-tuning di un modello pre-allenato sui tuoi dati. Dai un'occhiata al nostro tutorial [fine-tuning tutorial](./training) per imparare come. Infine, dopo che hai completato il fine-tuning del tuo modello pre-allenato, considera per favore di condividerlo (vedi il tutorial [qui](./model_sharing)) con la comunità sul Model Hub per democratizzare l'NLP! 🤗 ## AutoClass <Youtube id="AhChOFRegn4"/> Al suo interno, le classi [`AutoModelForSequenceClassification`] e [`AutoTokenizer`] lavorano assieme per dare potere alla [`pipeline`]. Una [AutoClass](./model_doc/auto) è una scorciatoia che automaticamente recupera l'architettura di un modello pre-allenato a partire dal suo nome o path. Hai solo bisogno di selezionare la `AutoClass` appropriata per il tuo compito e il suo tokenizer associato con [`AutoTokenizer`]. Ritorniamo al nostro esempio e vediamo come puoi utilizzare la `AutoClass` per replicare i risultati della [`pipeline`]. ### AutoTokenizer Un tokenizer è responsabile dell'elaborazione del testo in modo da trasformarlo in un formato comprensibile dal modello. Per prima cosa, il tokenizer dividerà il testo in parole chiamate *token*. Ci sono diverse regole che governano il processo di tokenizzazione, tra cui come dividere una parola e a quale livello (impara di più sulla tokenizzazione [qui](./tokenizer_summary)). La cosa più importante da ricordare comunque è che hai bisogno di inizializzare il tokenizer con lo stesso nome del modello in modo da assicurarti che stai utilizzando le stesse regole di tokenizzazione con cui il modello è stato pre-allenato. Carica un tokenizer con [`AutoTokenizer`]: ```py >>> from transformers import AutoTokenizer >>> nome_del_modello = "nlptown/bert-base-multilingual-uncased-sentiment" >>> tokenizer = AutoTokenizer.from_pretrained(nome_del_modello) ``` Dopodiché, il tokenizer converte i token in numeri in modo da costruire un tensore come input del modello. Questo è conosciuto come il *vocabolario* del modello. Passa il tuo testo al tokenizer: ```py >>> encoding = tokenizer("Siamo molto felici di mostrarti la libreria 🤗 Transformers.") >>> print(encoding) {'input_ids': [101, 56821, 10132, 14407, 13019, 13007, 10120, 47201, 10330, 10106, 91686, 100, 58263, 119, 102], 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]} ``` Il tokenizer restituirà un dizionario contenente: * [input_ids](./glossary#input-ids): rappresentazioni numeriche dei tuoi token. * [attention_mask](.glossary#attention-mask): indica quali token devono essere presi in considerazione. Come con la [`pipeline`], il tokenizer accetterà una lista di input. In più, il tokenizer può anche completare (pad, in inglese) e troncare il testo in modo da restituire un lotto (batch, in inglese) di lunghezza uniforme: <frameworkcontent> <pt> ```py >>> pt_batch = tokenizer( ... ["Siamo molto felici di mostrarti la libreria 🤗 Transformers.", "Speriamo te non la odierai."], ... padding=True, ... truncation=True, ... max_length=512, ... return_tensors="pt", ... ) ``` </pt> <tf> ```py >>> tf_batch = tokenizer( ... ["Siamo molto felici di mostrarti la libreria 🤗 Transformers.", "Speriamo te non la odierai."], ... padding=True, ... truncation=True, ... max_length=512, ... return_tensors="tf", ... ) ``` </tf> </frameworkcontent> Leggi il tutorial sul [preprocessing](./preprocessing) per maggiori dettagli sulla tokenizzazione. ### AutoModel <frameworkcontent> <pt> 🤗 Transformers fornisce un metodo semplice e unificato per caricare istanze pre-allenate. Questo significa che puoi caricare un [`AutoModel`] come caricheresti un [`AutoTokenizer`]. L'unica differenza è selezionare l'[`AutoModel`] corretto per il compito di interesse. Dato che stai facendo classificazione di testi, o sequenze, carica [`AutoModelForSequenceClassification`]: ```py >>> from transformers import AutoModelForSequenceClassification >>> model_name = "nlptown/bert-base-multilingual-uncased-sentiment" >>> pt_model = AutoModelForSequenceClassification.from_pretrained(model_name) ``` <Tip> Guarda il [task summary](./task_summary) per sapere quale classe di [`AutoModel`] utilizzare per quale compito. </Tip> Ora puoi passare il tuo lotto di input pre-processati direttamente al modello. Devi solo spacchettare il dizionario aggiungendo `**`: ```py >>> pt_outputs = pt_model(**pt_batch) ``` Il modello produrrà le attivazioni finali nell'attributo `logits`. Applica la funzione softmax a `logits` per ottenere le probabilità: ```py >>> from torch import nn >>> pt_predictions = nn.functional.softmax(pt_outputs.logits, dim=-1) >>> print(pt_predictions) tensor([[0.0041, 0.0037, 0.0203, 0.2005, 0.7713], [0.3766, 0.3292, 0.1832, 0.0558, 0.0552]], grad_fn=<SoftmaxBackward0>) ``` </pt> <tf> 🤗 Transformers fornisce un metodo semplice e unificato per caricare istanze pre-allenate. Questo significa che puoi caricare un [`TFAutoModel`] come caricheresti un [`AutoTokenizer`]. L'unica differenza è selezionare il [`TFAutoModel`] corretto per il compito di interesse. Dato che stai facendo classificazione di testi, o sequenze, carica [`TFAutoModelForSequenceClassification`]: ```py >>> from transformers import TFAutoModelForSequenceClassification >>> nome_del_modello = "nlptown/bert-base-multilingual-uncased-sentiment" >>> tf_model = TFAutoModelForSequenceClassification.from_pretrained(nome_del_modello) ``` <Tip> Guarda il [task summary](./task_summary) per sapere quale classe di [`AutoModel`] utilizzare per quale compito. </Tip> Ora puoi passare il tuo lotto di input pre-processati direttamente al modello passando le chiavi del dizionario al tensore: ```py >>> tf_outputs = tf_model(tf_batch) ``` Il modello produrrà le attivazioni finali nell'attributo `logits`. Applica la funzione softmax a `logits` per ottenere le probabilità: ```py >>> import tensorflow as tf >>> tf_predictions = tf.nn.softmax(tf_outputs.logits, axis=-1) >>> tf_predictions # doctest: +IGNORE_RESULT ``` </tf> </frameworkcontent> <Tip> Tutti i modelli di 🤗 Transformers (PyTorch e TensorFlow) restituiscono i tensori *prima* della funzione finale di attivazione (come la softmax) perché la funzione di attivazione finale viene spesso unita a quella di perdita. </Tip> I modelli sono [`torch.nn.Module`](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) o [`tf.keras.Model`](https://www.tensorflow.org/api_docs/python/tf/keras/Model) standard così puoi utilizzarli all'interno del tuo training loop usuale. Tuttavia, per rendere le cose più semplici, 🤗 Transformers fornisce una classe [`Trainer`] per PyTorch che aggiunge delle funzionalità per l'allenamento distribuito, precisione mista, e altro ancora. Per TensorFlow, puoi utilizzare il metodo `fit` di [Keras](https://keras.io/). Fai riferimento al [tutorial per il training](./training) per maggiori dettagli. <Tip> Gli output del modello di 🤗 Transformers sono delle dataclasses speciali in modo che i loro attributi vengano auto-completati all'interno di un IDE. Gli output del modello si comportano anche come una tupla o un dizionario (ad esempio, puoi indicizzare con un intero, una slice o una stringa) nel qual caso gli attributi che sono `None` vengono ignorati. </Tip> ### Salva un modello <frameworkcontent> <pt> Una volta completato il fine-tuning del tuo modello, puoi salvarlo con il suo tokenizer utilizzando [`PreTrainedModel.save_pretrained`]: ```py >>> pt_save_directory = "./pt_save_pretrained" >>> tokenizer.save_pretrained(pt_save_directory) # doctest: +IGNORE_RESULT >>> pt_model.save_pretrained(pt_save_directory) ``` Quando desideri utilizzare il tuo modello nuovamente, puoi ri-caricarlo con [`PreTrainedModel.from_pretrained`]: ```py >>> pt_model = AutoModelForSequenceClassification.from_pretrained("./pt_save_pretrained") ``` </pt> <tf> Una volta completato il fine-tuning del tuo modello, puoi salvarlo con il suo tokenizer utilizzando [`TFPreTrainedModel.save_pretrained`]: ```py >>> tf_save_directory = "./tf_save_pretrained" >>> tokenizer.save_pretrained(tf_save_directory) # doctest: +IGNORE_RESULT >>> tf_model.save_pretrained(tf_save_directory) ``` Quando desideri utilizzare il tuo modello nuovamente, puoi ri-caricarlo con [`TFPreTrainedModel.from_pretrained`]: ```py >>> tf_model = TFAutoModelForSequenceClassification.from_pretrained("./tf_save_pretrained") ``` </tf> </frameworkcontent> Una caratteristica particolarmente interessante di 🤗 Transformers è la sua abilità di salvare un modello e ri-caricarlo sia come modello di PyTorch che di TensorFlow. I parametri `from_pt` o `from_tf` possono convertire un modello da un framework all'altro: <frameworkcontent> <pt> ```py >>> from transformers import AutoModel >>> tokenizer = AutoTokenizer.from_pretrained(tf_save_directory) >>> pt_model = AutoModelForSequenceClassification.from_pretrained(tf_save_directory, from_tf=True) ``` </pt> <tf> ```py >>> from transformers import TFAutoModel >>> tokenizer = AutoTokenizer.from_pretrained(pt_save_directory) >>> tf_model = TFAutoModelForSequenceClassification.from_pretrained(pt_save_directory, from_pt=True) ``` </tf> </frameworkcontent>
transformers/docs/source/it/quicktour.md/0
{ "file_path": "transformers/docs/source/it/quicktour.md", "repo_id": "transformers", "token_count": 6490 }
326
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Generation with LLMs [[open-in-colab]] LLM、またはLarge Language Models(大規模言語モデル)は、テキスト生成の鍵となる要素です。要するに、これらは大規模な事前訓練済みトランスフォーマーモデルで、与えられた入力テキストに基づいて次の単語(または、より正確にはトークン)を予測するように訓練されています。トークンを1つずつ予測するため、モデルを呼び出すだけでは新しい文を生成するために何かより精巧なことをする必要があります。自己回帰生成を行う必要があります。 自己回帰生成は、推論時の手続きで、いくつかの初期入力を与えた状態で、モデルを反復的に呼び出す手法です。🤗 Transformersでは、これは[`~generation.GenerationMixin.generate`]メソッドによって処理され、これは生成能力を持つすべてのモデルで利用可能です。 このチュートリアルでは、以下のことを示します: * LLMを使用してテキストを生成する方法 * 一般的な落とし穴を回避する方法 * LLMを最大限に活用するための次のステップ 始める前に、必要なライブラリがすべてインストールされていることを確認してください: ```bash pip install transformers bitsandbytes>=0.39.0 -q ``` ## Generate text [因果言語モデリング](tasks/language_modeling)のためにトレーニングされた言語モデルは、テキストトークンのシーケンスを入力として受け取り、次のトークンの確率分布を返します。 <!-- [GIF 1 -- FWD PASS] --> <figure class="image table text-center m-0 w-full"> <video style="max-width: 90%; margin: auto;" autoplay loop muted playsinline src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/assisted-generation/gif_1_1080p.mov" ></video> <figcaption>"Forward pass of an LLM"</figcaption> </figure> LLM(Language Model)による自己回帰生成の重要な側面の1つは、この確率分布から次のトークンを選択する方法です。このステップでは、次のイテレーションのためのトークンが得られる限り、何でも可能です。これは、確率分布から最も可能性の高いトークンを選択するだけのシンプルな方法から、結果の分布からサンプリングする前に数々の変換を適用するほど複雑な方法まで、あらゆる方法が考えられます。 <!-- [GIF 2 -- TEXT GENERATION] --> <figure class="image table text-center m-0 w-full"> <video style="max-width: 90%; margin: auto;" autoplay loop muted playsinline src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/assisted-generation/gif_2_1080p.mov" ></video> <figcaption>"Autoregressive generation iteratively selects the next token from a probability distribution to generate text"</figcaption> </figure> 上記のプロセスは、ある停止条件が満たされるまで反復的に繰り返されます。理想的には、停止条件はモデルによって指示され、モデルは終了シーケンス(`EOS`)トークンを出力するタイミングを学習すべきです。これがそうでない場合、生成はあらかじめ定義された最大長に達したときに停止します。 トークン選択ステップと停止条件を適切に設定することは、モデルがタスクで期待どおりに振る舞うために重要です。それが、各モデルに関連付けられた [`~generation.GenerationConfig`] ファイルがある理由であり、これには優れたデフォルトの生成パラメータ化が含まれ、モデルと一緒に読み込まれます。 コードについて話しましょう! <Tip> 基本的なLLMの使用に興味がある場合、高レベルの [`Pipeline`](pipeline_tutorial) インターフェースが良い出発点です。ただし、LLMはしばしば量子化やトークン選択ステップの細かい制御などの高度な機能が必要であり、これは [`~generation.GenerationMixin.generate`] を介して最良に行われます。LLMとの自己回帰生成はリソースが多く必要であり、適切なスループットのためにGPUで実行する必要があります。 </Tip> <!-- TODO: llama 2(またはより新しい一般的なベースライン)が利用可能になったら、例を更新する --> まず、モデルを読み込む必要があります。 ```py >>> from transformers import AutoModelForCausalLM >>> model = AutoModelForCausalLM.from_pretrained( ... "openlm-research/open_llama_7b", device_map="auto", load_in_4bit=True ... ) ``` `from_pretrained` 呼び出しで2つのフラグがあることに注意してください: - `device_map` はモデルをあなたのGPUに移動させます - `load_in_4bit` は[4ビットの動的量子化](main_classes/quantization)を適用してリソース要件を大幅に削減します モデルを初期化する他の方法もありますが、これはLLMを始めるための良い基準です。 次に、[トークナイザ](tokenizer_summary)を使用してテキスト入力を前処理する必要があります。 ```py >>> from transformers import AutoTokenizer >>> tokenizer = AutoTokenizer.from_pretrained("openlm-research/open_llama_7b") >>> model_inputs = tokenizer(["A list of colors: red, blue"], return_tensors="pt").to("cuda") ``` `model_inputs` 変数は、トークン化されたテキスト入力とアテンションマスクを保持しています。 [`~generation.GenerationMixin.generate`] は、アテンションマスクが渡されていない場合でも、最善の努力をしてそれを推測しようとしますが、できる限り渡すことをお勧めします。最適な結果を得るためです。 最後に、[`~generation.GenerationMixin.generate`] メソッドを呼び出して生成されたトークンを取得し、それを表示する前にテキストに変換する必要があります。 ```py >>> generated_ids = model.generate(**model_inputs) >>> tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0] 'A list of colors: red, blue, green, yellow, black, white, and brown' ``` これで完了です!わずかなコード行数で、LLM(Large Language Model)のパワーを活用できます。 ## Common pitfalls [生成戦略](generation_strategies)はたくさんあり、デフォルトの値があなたのユースケースに適していないことがあります。出力が期待通りでない場合、最も一般的な落とし穴とその回避方法のリストを作成しました。 ```py >>> from transformers import AutoModelForCausalLM, AutoTokenizer >>> tokenizer = AutoTokenizer.from_pretrained("openlm-research/open_llama_7b") >>> tokenizer.pad_token = tokenizer.eos_token # Llama has no pad token by default >>> model = AutoModelForCausalLM.from_pretrained( ... "openlm-research/open_llama_7b", device_map="auto", load_in_4bit=True ... ) ``` ### Generated output is too short/long [`~generation.GenerationConfig`] ファイルで指定されていない場合、`generate` はデフォルトで最大で 20 トークンまで返します。我々は `generate` コールで `max_new_tokens` を手動で設定することを強くお勧めします。これにより、返される新しいトークンの最大数を制御できます。LLM(正確には、[デコーダー専用モデル](https://huggingface.co/learn/nlp-course/chapter1/6?fw=pt))も出力の一部として入力プロンプトを返すことに注意してください。 ```py >>> model_inputs = tokenizer(["A sequence of numbers: 1, 2"], return_tensors="pt").to("cuda") >>> # By default, the output will contain up to 20 tokens >>> generated_ids = model.generate(**model_inputs) >>> tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0] 'A sequence of numbers: 1, 2, 3, 4, 5' >>> # Setting `max_new_tokens` allows you to control the maximum length >>> generated_ids = model.generate(**model_inputs, max_new_tokens=50) >>> tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0] 'A sequence of numbers: 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,' ``` ### Incorrect generation mode デフォルトでは、 [`~generation.GenerationConfig`] ファイルで指定されていない限り、`generate` は各イテレーションで最も可能性の高いトークンを選択します(貪欲デコーディング)。タスクに応じて、これは望ましくないことがあります。チャットボットやエッセイのような創造的なタスクでは、サンプリングが有益です。一方、音声の転写や翻訳のような入力に基づくタスクでは、貪欲デコーディングが有益です。`do_sample=True` でサンプリングを有効にできます。このトピックについての詳細は、この[ブログポスト](https://huggingface.co/blog/how-to-generate)で学ぶことができます。 ```py >>> # Set seed or reproducibility -- you don't need this unless you want full reproducibility >>> from transformers import set_seed >>> set_seed(0) >>> model_inputs = tokenizer(["I am a cat."], return_tensors="pt").to("cuda") >>> # LLM + greedy decoding = repetitive, boring output >>> generated_ids = model.generate(**model_inputs) >>> tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0] 'I am a cat. I am a cat. I am a cat. I am a cat' >>> # With sampling, the output becomes more creative! >>> generated_ids = model.generate(**model_inputs, do_sample=True) >>> tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0] 'I am a cat.\nI just need to be. I am always.\nEvery time' ``` ### Wrong padding side LLM(Large Language Models)は[デコーダー専用](https://huggingface.co/learn/nlp-course/chapter1/6?fw=pt)のアーキテクチャであり、入力プロンプトを繰り返し処理することを意味します。入力が同じ長さでない場合、それらをパディングする必要があります。LLMはパッドトークンからの続きを学習していないため、入力は左パディングする必要があります。また、生成に対して注目マスクを渡し忘れないようにしてください! ```py >>> # The tokenizer initialized above has right-padding active by default: the 1st sequence, >>> # which is shorter, has padding on the right side. Generation fails. >>> model_inputs = tokenizer( ... ["1, 2, 3", "A, B, C, D, E"], padding=True, return_tensors="pt" ... ).to("cuda") >>> generated_ids = model.generate(**model_inputs) >>> tokenizer.batch_decode(generated_ids[0], skip_special_tokens=True)[0] '' >>> # With left-padding, it works as expected! >>> tokenizer = AutoTokenizer.from_pretrained("openlm-research/open_llama_7b", padding_side="left") >>> tokenizer.pad_token = tokenizer.eos_token # Llama has no pad token by default >>> model_inputs = tokenizer( ... ["1, 2, 3", "A, B, C, D, E"], padding=True, return_tensors="pt" ... ).to("cuda") >>> generated_ids = model.generate(**model_inputs) >>> tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0] '1, 2, 3, 4, 5, 6,' ``` ## Further resources オートリグレッシブ生成プロセスは比較的簡単ですが、LLMを最大限に活用することは多くの要素が絡むため、挑戦的な試みとなります。LLMの使用と理解をさらに深めるための次のステップについては以下のリソースをご覧ください。 <!-- TODO: 新しいガイドで完了 --> ### Advanced generate usage 1. [ガイド](generation_strategies):異なる生成方法を制御する方法、生成構成ファイルの設定方法、出力のストリーミング方法についてのガイド; 2. [`~generation.GenerationConfig`]、[`~generation.GenerationMixin.generate`]、および[生成関連クラス](internal/generation_utils)に関するAPIリファレンス。 ### LLM leaderboards 1. [Open LLM リーダーボード](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard):オープンソースモデルの品質に焦点を当てたリーダーボード; 2. [Open LLM-Perf リーダーボード](https://huggingface.co/spaces/optimum/llm-perf-leaderboard):LLMのスループットに焦点を当てたリーダーボード。 ### Latency and throughput 1. [ガイド](main_classes/quantization):ダイナミッククオンタイズに関するガイド。これによりメモリ要件を劇的に削減する方法が示されています。 ### Related libraries 1. [`text-generation-inference`](https://github.com/huggingface/text-generation-inference):LLM用の本番向けサーバー; 2. [`optimum`](https://github.com/huggingface/optimum):特定のハードウェアデバイス向けに最適化された🤗 Transformersの拡張。
transformers/docs/source/ja/llm_tutorial.md/0
{ "file_path": "transformers/docs/source/ja/llm_tutorial.md", "repo_id": "transformers", "token_count": 5622 }
327
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Quantize 🤗 Transformers models ## `AutoGPTQ` Integration 🤗 Transformers には、言語モデルで GPTQ 量子化を実行するための `optimum` API が統合されています。パフォーマンスを大幅に低下させることなく、推論速度を高速化することなく、モデルを 8、4、3、さらには 2 ビットでロードおよび量子化できます。これは、ほとんどの GPU ハードウェアでサポートされています。 量子化モデルの詳細については、以下を確認してください。 - [GPTQ](https://arxiv.org/pdf/2210.17323.pdf) 論文 - GPTQ 量子化に関する `optimum` [ガイド](https://huggingface.co/docs/optimum/llm_quantization/usage_guides/quantization) - バックエンドとして使用される [`AutoGPTQ`](https://github.com/PanQiWei/AutoGPTQ) ライブラリ ### Requirements 以下のコードを実行するには、以下の要件がインストールされている必要があります: - 最新の `AutoGPTQ` ライブラリをインストールする。 `pip install auto-gptq` をインストールする。 - 最新の `optimum` をソースからインストールする。 `git+https://github.com/huggingface/optimum.git` をインストールする。 - 最新の `transformers` をソースからインストールする。 最新の `transformers` をソースからインストールする `pip install git+https://github.com/huggingface/transformers.git` - 最新の `accelerate` ライブラリをインストールする。 `pip install --upgrade accelerate` を実行する。 GPTQ統合は今のところテキストモデルのみをサポートしているので、視覚、音声、マルチモーダルモデルでは予期せぬ挙動に遭遇するかもしれないことに注意してください。 ### Load and quantize a model GPTQ は、量子化モデルを使用する前に重みのキャリブレーションを必要とする量子化方法です。トランスフォーマー モデルを最初から量子化する場合は、量子化モデルを作成するまでに時間がかかることがあります (`facebook/opt-350m`モデルの Google colab では約 5 分)。 したがって、GPTQ 量子化モデルを使用するシナリオは 2 つあります。最初の使用例は、ハブで利用可能な他のユーザーによってすでに量子化されたモデルをロードすることです。2 番目の使用例は、モデルを最初から量子化し、保存するかハブにプッシュして、他のユーザーが使用できるようにすることです。それも使ってください。 #### GPTQ Configuration モデルをロードして量子化するには、[`GPTQConfig`] を作成する必要があります。データセットを準備するには、`bits`の数、量子化を調整するための`dataset`、およびモデルの`Tokenizer`を渡す必要があります。 ```python model_id = "facebook/opt-125m" tokenizer = AutoTokenizer.from_pretrained(model_id) gptq_config = GPTQConfig(bits=4, dataset = "c4", tokenizer=tokenizer) ``` 独自のデータセットを文字列のリストとして渡すことができることに注意してください。ただし、GPTQ 論文のデータセットを使用することを強くお勧めします。 ```python dataset = ["auto-gptq is an easy-to-use model quantization library with user-friendly apis, based on GPTQ algorithm."] quantization = GPTQConfig(bits=4, dataset = dataset, tokenizer=tokenizer) ``` #### Quantization `from_pretrained` を使用し、`quantization_config` を設定することでモデルを量子化できます。 ```python from transformers import AutoModelForCausalLM model = AutoModelForCausalLM.from_pretrained(model_id, quantization_config=gptq_config) ``` モデルを量子化するには GPU が必要であることに注意してください。モデルを CPU に配置し、量子化するためにモジュールを GPU に前後に移動させます。 CPU オフロードの使用中に GPU の使用量を最大化したい場合は、`device_map = "auto"` を設定できます。 ```python from transformers import AutoModelForCausalLM model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", quantization_config=gptq_config) ``` ディスク オフロードはサポートされていないことに注意してください。さらに、データセットが原因でメモリが不足している場合は、`from_pretained` で `max_memory` を渡す必要がある場合があります。 `device_map`と`max_memory`の詳細については、この [ガイド](https://huggingface.co/docs/accelerate/usage_guides/big_modeling#designing-a-device-map) を参照してください。 <Tip warning={true}> GPTQ 量子化は、現時点ではテキスト モデルでのみ機能します。さらに、量子化プロセスはハードウェアによっては長時間かかる場合があります (NVIDIA A100 を使用した場合、175B モデル = 4 gpu 時間)。モデルの GPTQ 量子化バージョンが存在しない場合は、ハブで確認してください。そうでない場合は、github で要求を送信できます。 </Tip> ### Push quantized model to 🤗 Hub 他の 🤗 モデルと同様に、`push_to_hub` を使用して量子化モデルをハブにプッシュできます。量子化構成は保存され、モデルに沿ってプッシュされます。 ```python quantized_model.push_to_hub("opt-125m-gptq") tokenizer.push_to_hub("opt-125m-gptq") ``` 量子化されたモデルをローカル マシンに保存したい場合は、`save_pretrained` を使用して行うこともできます。 ```python quantized_model.save_pretrained("opt-125m-gptq") tokenizer.save_pretrained("opt-125m-gptq") ``` `device_map` を使用してモデルを量子化した場合は、保存する前にモデル全体を GPU または `cpu` のいずれかに移動してください。 ```python quantized_model.to("cpu") quantized_model.save_pretrained("opt-125m-gptq") ``` ### Load a quantized model from the 🤗 Hub `from_pretrained`を使用して、量子化されたモデルをハブからロードできます。 属性 `quantization_config` がモデル設定オブジェクトに存在することを確認して、プッシュされた重みが量子化されていることを確認します。 ```python from transformers import AutoModelForCausalLM model = AutoModelForCausalLM.from_pretrained("{your_username}/opt-125m-gptq") ``` 必要以上のメモリを割り当てずにモデルをより速くロードしたい場合は、`device_map` 引数は量子化モデルでも機能します。 `accelerate`ライブラリがインストールされていることを確認してください。 ```python from transformers import AutoModelForCausalLM model = AutoModelForCausalLM.from_pretrained("{your_username}/opt-125m-gptq", device_map="auto") ``` ### Exllama kernels for faster inference 4 ビット モデルの場合、推論速度を高めるために exllama カーネルを使用できます。デフォルトで有効になっています。 [`GPTQConfig`] で `disable_exllama` を渡すことで、その動作を変更できます。これにより、設定に保存されている量子化設定が上書きされます。カーネルに関連する属性のみを上書きできることに注意してください。さらに、exllama カーネルを使用したい場合は、モデル全体を GPU 上に置く必要があります。 ```py import torch gptq_config = GPTQConfig(bits=4, disable_exllama=False) model = AutoModelForCausalLM.from_pretrained("{your_username}/opt-125m-gptq", device_map="auto", quantization_config = gptq_config) ``` 現時点では 4 ビット モデルのみがサポートされていることに注意してください。さらに、peft を使用して量子化モデルを微調整している場合は、exllama カーネルを非アクティブ化することをお勧めします。 #### Fine-tune a quantized model Hugging Face エコシステムのアダプターの公式サポートにより、GPTQ で量子化されたモデルを微調整できます。 詳細については、[`peft`](https://github.com/huggingface/peft) ライブラリをご覧ください。 ### Example demo GPTQ を使用してモデルを量子化する方法と、peft を使用して量子化されたモデルを微調整する方法については、Google Colab [ノートブック](https://colab.research.google.com/drive/1_TIrmuKOFhuRRiTWN94iLKUFu6ZX4ceb?usp=sharing) を参照してください。 ### GPTQConfig [[autodoc]] GPTQConfig ## `bitsandbytes` Integration 🤗 Transformers は、`bitsandbytes` で最もよく使用されるモジュールと緊密に統合されています。数行のコードでモデルを 8 ビット精度でロードできます。 これは、`bitsandbytes`の `0.37.0`リリース以降、ほとんどの GPU ハードウェアでサポートされています。 量子化方法の詳細については、[LLM.int8()](https://arxiv.org/abs/2208.07339) 論文、または [ブログ投稿](https://huggingface.co/blog/hf-bitsandbytes-) をご覧ください。統合)コラボレーションについて。 `0.39.0`リリース以降、FP4 データ型を活用し、4 ビット量子化を使用して`device_map`をサポートする任意のモデルをロードできます。 独自の pytorch モデルを量子化したい場合は、🤗 Accelerate ライブラリの [ドキュメント](https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization) をチェックしてください。 `bitsandbytes`統合を使用してできることは次のとおりです ### General usage モデルが 🤗 Accelerate による読み込みをサポートし、`torch.nn.Linear` レイヤーが含まれている限り、 [`~PreTrainedModel.from_pretrained`] メソッドを呼び出すときに `load_in_8bit` または `load_in_4bit` 引数を使用してモデルを量子化できます。これはどのようなモダリティでも同様に機能するはずです。 ```python from transformers import AutoModelForCausalLM model_8bit = AutoModelForCausalLM.from_pretrained("facebook/opt-350m", load_in_8bit=True) model_4bit = AutoModelForCausalLM.from_pretrained("facebook/opt-350m", load_in_4bit=True) ``` デフォルトでは、他のすべてのモジュール (例: `torch.nn.LayerNorm`) は `torch.float16` に変換されますが、その `dtype` を変更したい場合は、`torch_dtype` 引数を上書きできます。 ```python >>> import torch >>> from transformers import AutoModelForCausalLM >>> model_8bit = AutoModelForCausalLM.from_pretrained("facebook/opt-350m", load_in_8bit=True, torch_dtype=torch.float32) >>> model_8bit.model.decoder.layers[-1].final_layer_norm.weight.dtype torch.float32 ``` ### FP4 quantization #### Requirements 以下のコード スニペットを実行する前に、以下の要件がインストールされていることを確認してください。 - 最新の`bitsandbytes`ライブラリ `pip install bitsandbytes>=0.39.0` - 最新の`accelerate`をインストールする `pip install --upgrade accelerate` - 最新の `transformers` をインストールする `pip install --upgrade transformers` #### Tips and best practices - **高度な使用法:** 可能なすべてのオプションを使用した 4 ビット量子化の高度な使用法については、[この Google Colab ノートブック](https://colab.research.google.com/drive/1ge2F1QSK8Q7h0hn3YKuBCOAS0bK8E0wf) を参照してください。 - **`batch_size=1` による高速推論 :** bitsandbytes の `0.40.0` リリース以降、`batch_size=1` では高速推論の恩恵を受けることができます。 [これらのリリース ノート](https://github.com/TimDettmers/bitsandbytes/releases/tag/0.40.0) を確認し、この機能を活用するには`0.40.0`以降のバージョンを使用していることを確認してください。箱の。 - **トレーニング:** [QLoRA 論文](https://arxiv.org/abs/2305.14314) によると、4 ビット基本モデルをトレーニングする場合 (例: LoRA アダプターを使用)、`bnb_4bit_quant_type='nf4'` を使用する必要があります。 。 - **推論:** 推論の場合、`bnb_4bit_quant_type` はパフォーマンスに大きな影響を与えません。ただし、モデルの重みとの一貫性を保つために、必ず同じ `bnb_4bit_compute_dtype` および `torch_dtype` 引数を使用してください。 #### Load a large model in 4bit `.from_pretrained` メソッドを呼び出すときに `load_in_4bit=True` を使用すると、メモリ使用量を (おおよそ) 4 で割ることができます。 ```python # pip install transformers accelerate bitsandbytes from transformers import AutoModelForCausalLM, AutoTokenizer model_id = "bigscience/bloom-1b7" tokenizer = AutoTokenizer.from_pretrained(model_id) model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", load_in_4bit=True) ``` <Tip warning={true}> モデルが 4 ビットでロードされると、現時点では量子化された重みをハブにプッシュすることはできないことに注意してください。 4 ビットの重みはまだサポートされていないため、トレーニングできないことにも注意してください。ただし、4 ビット モデルを使用して追加のパラメーターをトレーニングすることもできます。これについては次のセクションで説明します。 </Tip> ### Load a large model in 8bit `.from_pretrained` メソッドを呼び出すときに `load_in_8bit=True` 引数を使用すると、メモリ要件をおよそ半分にしてモデルをロードできます。 ```python # pip install transformers accelerate bitsandbytes from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig model_id = "bigscience/bloom-1b7" tokenizer = AutoTokenizer.from_pretrained(model_id) model = AutoModelForCausalLM.from_pretrained(model_id, quantization_config=BitsAndBytesConfig(load_in_8bit=True)) ``` 次に、通常 [`PreTrainedModel`] を使用するのと同じようにモデルを使用します。 `get_memory_footprint` メソッドを使用して、モデルのメモリ フットプリントを確認できます。 ```python print(model.get_memory_footprint()) ``` この統合により、大きなモデルを小さなデバイスにロードし、問題なく実行できるようになりました。 <Tip warning={true}> モデルが 8 ビットでロードされると、最新の `transformers`と`bitsandbytes`を使用する場合を除き、量子化された重みをハブにプッシュすることは現在不可能であることに注意してください。 8 ビットの重みはまだサポートされていないため、トレーニングできないことにも注意してください。ただし、8 ビット モデルを使用して追加のパラメーターをトレーニングすることもできます。これについては次のセクションで説明します。 また、`device_map` はオプションですが、利用可能なリソース上でモデルを効率的にディスパッチするため、推論には `device_map = 'auto'` を設定することが推奨されます。 </Tip> #### Advanced use cases ここでは、FP4 量子化を使用して実行できるいくつかの高度な使用例について説明します。 ##### Change the compute dtype compute dtype は、計算中に使用される dtype を変更するために使用されます。たとえば、隠し状態は`float32`にありますが、高速化のために計算を bf16 に設定できます。デフォルトでは、compute dtype は `float32` に設定されます。 ```python import torch from transformers import BitsAndBytesConfig quantization_config = BitsAndBytesConfig(load_in_4bit=True, bnb_4bit_compute_dtype=torch.bfloat16) ``` ##### Using NF4 (Normal Float 4) data type NF4 データ型を使用することもできます。これは、正規分布を使用して初期化された重みに適合した新しい 4 ビット データ型です。その実行のために: ```python from transformers import BitsAndBytesConfig nf4_config = BitsAndBytesConfig( load_in_4bit=True, bnb_4bit_quant_type="nf4", ) model_nf4 = AutoModelForCausalLM.from_pretrained(model_id, quantization_config=nf4_config) ``` ##### Use nested quantization for more memory efficient inference また、ネストされた量子化手法を使用することをお勧めします。これにより、パフォーマンスを追加することなく、より多くのメモリが節約されます。経験的な観察から、これにより、NVIDIA-T4 16GB 上でシーケンス長 1024、バッチ サイズ 1、勾配累積ステップ 4 の llama-13b モデルを微調整することが可能になります。 ```python from transformers import BitsAndBytesConfig double_quant_config = BitsAndBytesConfig( load_in_4bit=True, bnb_4bit_use_double_quant=True, ) model_double_quant = AutoModelForCausalLM.from_pretrained(model_id, quantization_config=double_quant_config) ``` ### Push quantized models on the 🤗 Hub `push_to_hub`メソッドを単純に使用することで、量子化されたモデルをハブにプッシュできます。これにより、最初に量子化構成ファイルがプッシュされ、次に量子化されたモデルの重みがプッシュされます。 この機能を使用できるようにするには、必ず `bitsandbytes>0.37.2` を使用してください (この記事の執筆時点では、`bitsandbytes==0.38.0.post1` でテストしました)。 ```python from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig model = AutoModelForCausalLM.from_pretrained("bigscience/bloom-560m", quantization_config=BitsAndBytesConfig(load_in_8bit=True)) tokenizer = AutoTokenizer.from_pretrained("bigscience/bloom-560m") model.push_to_hub("bloom-560m-8bit") ``` <Tip warning={true}> 大規模なモデルでは、ハブ上で 8 ビット モデルをプッシュすることが強く推奨されます。これにより、コミュニティはメモリ フットプリントの削減と、たとえば Google Colab での大規模なモデルの読み込みによる恩恵を受けることができます。 </Tip> ### Load a quantized model from the 🤗 Hub `from_pretrained`メソッドを使用して、ハブから量子化モデルをロードできます。属性 `quantization_config` がモデル設定オブジェクトに存在することを確認して、プッシュされた重みが量子化されていることを確認します。 ```python from transformers import AutoModelForCausalLM, AutoTokenizer model = AutoModelForCausalLM.from_pretrained("{your_username}/bloom-560m-8bit", device_map="auto") ``` この場合、引数 `load_in_8bit=True` を指定する必要はありませんが、`bitsandbytes` と `accelerate` がインストールされていることを確認する必要があることに注意してください。 また、`device_map` はオプションですが、利用可能なリソース上でモデルを効率的にディスパッチするため、推論には `device_map = 'auto'` を設定することが推奨されます。 ### Advanced use cases このセクションは、8 ビット モデルのロードと実行以外に何ができるかを探求したい上級ユーザーを対象としています。 #### Offload between `cpu` and `gpu` この高度な使用例の 1 つは、モデルをロードし、`CPU`と`GPU`の間で重みをディスパッチできることです。 CPU 上でディスパッチされる重みは **8 ビットに変換されない**ため、`float32`に保持されることに注意してください。この機能は、非常に大規模なモデルを適合させ、そのモデルを GPU と CPU の間でディスパッチしたいユーザーを対象としています。 まず、`transformers` から [`BitsAndBytesConfig`] をロードし、属性 `llm_int8_enable_fp32_cpu_offload` を `True` に設定します。 ```python from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig quantization_config = BitsAndBytesConfig(llm_int8_enable_fp32_cpu_offload=True) ``` `bigscience/bloom-1b7`モデルをロードする必要があり、`lm_head`を除くモデル全体に​​適合するのに十分な GPU RAM があるとします。したがって、次のようにカスタム device_map を作成します。 ```python device_map = { "transformer.word_embeddings": 0, "transformer.word_embeddings_layernorm": 0, "lm_head": "cpu", "transformer.h": 0, "transformer.ln_f": 0, } ``` そして、次のようにモデルをロードします。 ```python model_8bit = AutoModelForCausalLM.from_pretrained( "bigscience/bloom-1b7", device_map=device_map, quantization_config=quantization_config, ) ``` 以上です!モデルを楽しんでください! #### Play with `llm_int8_threshold` `llm_int8_threshold` 引数を操作して、外れ値のしきい値を変更できます。 外れ値 とは、特定のしきい値より大きい隠れた状態の値です。 これは、`LLM.int8()`論文で説明されている外れ値検出の外れ値しきい値に対応します。このしきい値を超える隠し状態の値は外れ値とみなされ、それらの値に対する操作は fp16 で実行されます。通常、値は正規分布します。つまり、ほとんどの値は [-3.5, 3.5] の範囲内にありますが、大規模なモデルでは大きく異なる分布を示す例外的な系統的外れ値がいくつかあります。これらの外れ値は、多くの場合 [-60, -6] または [6, 60] の範囲内にあります。 Int8 量子化は、大きさが 5 程度までの値ではうまく機能しますが、それを超えると、パフォーマンスが大幅に低下します。適切なデフォルトのしきい値は 6 ですが、より不安定なモデル (小規模なモデル、微調整) では、より低いしきい値が必要になる場合があります。 この引数は、モデルの推論速度に影響を与える可能性があります。このパラメータを試してみて、ユースケースに最適なパラメータを見つけることをお勧めします。 ```python from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig model_id = "bigscience/bloom-1b7" quantization_config = BitsAndBytesConfig( llm_int8_threshold=10, ) model_8bit = AutoModelForCausalLM.from_pretrained( model_id, device_map=device_map, quantization_config=quantization_config, ) tokenizer = AutoTokenizer.from_pretrained(model_id) ``` #### Skip the conversion of some modules 一部のモデルには、安定性を確保するために 8 ビットに変換する必要がないモジュールがいくつかあります。たとえば、ジュークボックス モデルには、スキップする必要があるいくつかの `lm_head` モジュールがあります。 `llm_int8_skip_modules` で遊んでみる ```python from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig model_id = "bigscience/bloom-1b7" quantization_config = BitsAndBytesConfig( llm_int8_skip_modules=["lm_head"], ) model_8bit = AutoModelForCausalLM.from_pretrained( model_id, device_map=device_map, quantization_config=quantization_config, ) tokenizer = AutoTokenizer.from_pretrained(model_id) ``` #### Fine-tune a model that has been loaded in 8-bit Hugging Face エコシステムのアダプターの公式サポートにより、8 ビットでロードされたモデルを微調整できます。 これにより、単一の Google Colab で`flan-t5-large`や`facebook/opt-6.7b`などの大規模モデルを微調整することができます。詳細については、[`peft`](https://github.com/huggingface/peft) ライブラリをご覧ください。 トレーニング用のモデルをロードするときに `device_map` を渡す必要がないことに注意してください。モデルが GPU に自動的にロードされます。必要に応じて、デバイス マップを特定のデバイスに設定することもできます (例: `cuda:0`、`0`、`torch.device('cuda:0')`)。 `device_map=auto`は推論のみに使用する必要があることに注意してください。 ### BitsAndBytesConfig [[autodoc]] BitsAndBytesConfig ## Quantization with 🤗 `optimum` `optimum`でサポートされている量子化方法の詳細については、[Optimum ドキュメント](https://huggingface.co/docs/optimum/index) を参照し、これらが自分のユースケースに適用できるかどうかを確認してください。
transformers/docs/source/ja/main_classes/quantization.md/0
{ "file_path": "transformers/docs/source/ja/main_classes/quantization.md", "repo_id": "transformers", "token_count": 10651 }
328
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # BertJapanese ## Overview BERT モデルは日本語テキストでトレーニングされました。 2 つの異なるトークン化方法を備えたモデルがあります。 - MeCab と WordPiece を使用してトークン化します。これには、[MeCab](https://taku910.github.io/mecab/) のラッパーである [fugashi](https://github.com/polm/fugashi) という追加の依存関係が必要です。 - 文字にトークン化します。 *MecabTokenizer* を使用するには、`pip installTransformers["ja"]` (または、インストールする場合は `pip install -e .["ja"]`) する必要があります。 ソースから)依存関係をインストールします。 [cl-tohakuリポジトリの詳細](https://github.com/cl-tohaku/bert-japanese)を参照してください。 MeCab および WordPiece トークン化でモデルを使用する例: ```python >>> import torch >>> from transformers import AutoModel, AutoTokenizer >>> bertjapanese = AutoModel.from_pretrained("cl-tohoku/bert-base-japanese") >>> tokenizer = AutoTokenizer.from_pretrained("cl-tohoku/bert-base-japanese") >>> ## Input Japanese Text >>> line = "吾輩は猫である。" >>> inputs = tokenizer(line, return_tensors="pt") >>> print(tokenizer.decode(inputs["input_ids"][0])) [CLS] 吾輩 は 猫 で ある 。 [SEP] >>> outputs = bertjapanese(**inputs) ``` 文字トークン化を使用したモデルの使用例: ```python >>> bertjapanese = AutoModel.from_pretrained("cl-tohoku/bert-base-japanese-char") >>> tokenizer = AutoTokenizer.from_pretrained("cl-tohoku/bert-base-japanese-char") >>> ## Input Japanese Text >>> line = "吾輩は猫である。" >>> inputs = tokenizer(line, return_tensors="pt") >>> print(tokenizer.decode(inputs["input_ids"][0])) [CLS] 吾 輩 は 猫 で あ る 。 [SEP] >>> outputs = bertjapanese(**inputs) ``` <Tip> - この実装はトークン化方法を除いて BERT と同じです。その他の使用例については、[BERT のドキュメント](bert) を参照してください。 </Tip> このモデルは[cl-tohaku](https://huggingface.co/cl-tohaku)から提供されました。 ## BertJapaneseTokenizer [[autodoc]] BertJapaneseTokenizer
transformers/docs/source/ja/model_doc/bert-japanese.md/0
{ "file_path": "transformers/docs/source/ja/model_doc/bert-japanese.md", "repo_id": "transformers", "token_count": 1114 }
329
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # CamemBERT ## Overview CamemBERT モデルは、[CamemBERT: a Tasty French Language Model](https://arxiv.org/abs/1911.03894) で提案されました。 Louis Martin, Benjamin Muller, Pedro Javier Ortiz Suárez, Yoann Dupont, Laurent Romary, Éric Villemonte de la Clergerie, Djamé Seddah, and Benoît Sagot. 2019年にリリースされたFacebookのRoBERTaモデルをベースにしたモデルです。 138GBのフランス語テキストでトレーニングされました。 論文の要約は次のとおりです。 *事前トレーニングされた言語モデルは現在、自然言語処理で広く普及しています。成功にもかかわらず、利用可能なほとんどの モデルは英語のデータ、または複数言語のデータの連結でトレーニングされています。これにより、 このようなモデルの実際の使用は、英語を除くすべての言語で非常に限られています。フランス人にとってこの問題に対処することを目指して、 Bi-direction Encoders for Transformers (BERT) のフランス語版である CamemBERT をリリースします。測定します 複数の下流タスク、つまり品詞タグ付けにおける多言語モデルと比較した CamemBERT のパフォーマンス 依存関係解析、固有表現認識、自然言語推論。 CamemBERT は最先端技術を向上させます 検討されているほとんどのタスクに対応します。私たちは、研究と フランス語 NLP の下流アプリケーション。* このモデルは [camembert](https://huggingface.co/camembert) によって提供されました。元のコードは [ここ](https://camembert-model.fr/) にあります。 <Tip> この実装はRoBERTaと同じです。使用例については[RoBERTaのドキュメント](roberta)も参照してください。 入力と出力に関する情報として。 </Tip> ## Resources - [テキスト分類タスクガイド](../tasks/sequence_classification) - [トークン分類タスクガイド](../tasks/token_classification) - [質問回答タスク ガイド](../tasks/question_answering) - [因果言語モデリング タスク ガイド](../tasks/language_modeling) - [マスク言語モデリング タスク ガイド](../tasks/masked_language_modeling) - [多肢選択タスク ガイド](../tasks/multiple_choice) ## CamembertConfig [[autodoc]] CamembertConfig ## CamembertTokenizer [[autodoc]] CamembertTokenizer - build_inputs_with_special_tokens - get_special_tokens_mask - create_token_type_ids_from_sequences - save_vocabulary ## CamembertTokenizerFast [[autodoc]] CamembertTokenizerFast <frameworkcontent> <pt> ## CamembertModel [[autodoc]] CamembertModel ## CamembertForCausalLM [[autodoc]] CamembertForCausalLM ## CamembertForMaskedLM [[autodoc]] CamembertForMaskedLM ## CamembertForSequenceClassification [[autodoc]] CamembertForSequenceClassification ## CamembertForMultipleChoice [[autodoc]] CamembertForMultipleChoice ## CamembertForTokenClassification [[autodoc]] CamembertForTokenClassification ## CamembertForQuestionAnswering [[autodoc]] CamembertForQuestionAnswering </pt> <tf> ## TFCamembertModel [[autodoc]] TFCamembertModel ## TFCamembertForCasualLM [[autodoc]] TFCamembertForCausalLM ## TFCamembertForMaskedLM [[autodoc]] TFCamembertForMaskedLM ## TFCamembertForSequenceClassification [[autodoc]] TFCamembertForSequenceClassification ## TFCamembertForMultipleChoice [[autodoc]] TFCamembertForMultipleChoice ## TFCamembertForTokenClassification [[autodoc]] TFCamembertForTokenClassification ## TFCamembertForQuestionAnswering [[autodoc]] TFCamembertForQuestionAnswering </tf> </frameworkcontent>
transformers/docs/source/ja/model_doc/camembert.md/0
{ "file_path": "transformers/docs/source/ja/model_doc/camembert.md", "repo_id": "transformers", "token_count": 1743 }
330
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Convolutional Vision Transformer (CvT) ## Overview CvT モデルは、Haping Wu、Bin Xiao、Noel Codella、Mengchen Liu、Xiyang Dai、Lu Yuan、Lei Zhang によって [CvT: Introduction Convolutions to Vision Transformers](https://arxiv.org/abs/2103.15808) で提案されました。畳み込みビジョン トランスフォーマー (CvT) は、ViT に畳み込みを導入して両方の設計の長所を引き出すことにより、[ビジョン トランスフォーマー (ViT)](vit) のパフォーマンスと効率を向上させます。 論文の要約は次のとおりです。 *この論文では、ビジョン トランスフォーマー (ViT) を改善する、畳み込みビジョン トランスフォーマー (CvT) と呼ばれる新しいアーキテクチャを紹介します。 ViT に畳み込みを導入して両方の設計の長所を引き出すことで、パフォーマンスと効率を向上させます。これは次のようにして実現されます。 2 つの主要な変更: 新しい畳み込みトークンの埋め込みを含むトランスフォーマーの階層と、畳み込みトランスフォーマー 畳み込み射影を利用したブロック。これらの変更により、畳み込みニューラル ネットワーク (CNN) の望ましい特性が導入されます。 トランスフォーマーの利点 (動的な注意力、 グローバルなコンテキストとより良い一般化)。私たちは広範な実験を実施することで CvT を検証し、このアプローチが達成できることを示しています。 ImageNet-1k 上の他のビジョン トランスフォーマーや ResNet よりも、パラメータが少なく、FLOP が低い、最先端のパフォーマンスを実現します。加えて、 より大きなデータセット (例: ImageNet-22k) で事前トレーニングし、下流のタスクに合わせて微調整すると、パフォーマンスの向上が維持されます。事前トレーニング済み ImageNet-22k、当社の CvT-W24 は、ImageNet-1k val set で 87.7\% というトップ 1 の精度を獲得しています。最後に、私たちの結果は、位置エンコーディングが、 既存のビジョン トランスフォーマーの重要なコンポーネントであるこのコンポーネントは、モデルでは安全に削除できるため、高解像度のビジョン タスクの設計が簡素化されます。* このモデルは [anugunj](https://huggingface.co/anugunj) によって提供されました。元のコードは [ここ](https://github.com/microsoft/CvT) にあります。 ## Usage tips - CvT モデルは通常の Vision Transformer ですが、畳み込みでトレーニングされています。 ImageNet-1K および CIFAR-100 で微調整すると、[オリジナル モデル (ViT)](vit) よりも優れたパフォーマンスを発揮します。 - カスタム データの微調整だけでなく推論に関するデモ ノートブックも [ここ](https://github.com/NielsRogge/Transformers-Tutorials/tree/master/VisionTransformer) で確認できます ([`ViTFeatureExtractor を置き換えるだけで済みます) `] による [`AutoImageProcessor`] および [`ViTForImageClassification`] による [`CvtForImageClassification`])。 - 利用可能なチェックポイントは、(1) [ImageNet-22k](http://www.image-net.org/) (1,400 万の画像と 22,000 のクラスのコレクション) でのみ事前トレーニングされている、(2) も問題ありません。 ImageNet-22k で調整、または (3) [ImageNet-1k](http://www.image-net.org/challenges/LSVRC/2012/) (ILSVRC 2012 とも呼ばれるコレクション) でも微調整130万の 画像と 1,000 クラス)。 ## Resources CvT を始めるのに役立つ公式 Hugging Face およびコミュニティ (🌎 で示される) リソースのリスト。 <PipelineTag pipeline="image-classification"/> - [`CvtForImageClassification`] は、この [サンプル スクリプト](https://github.com/huggingface/transformers/tree/main/examples/pytorch/image-classification) および [ノートブック](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/image_classification.ipynb)。 - 参照: [画像分類タスク ガイド](../tasks/image_classification) ここに含めるリソースの送信に興味がある場合は、お気軽にプル リクエストを開いてください。審査させていただきます。リソースは、既存のリソースを複製するのではなく、何か新しいものを示すことが理想的です。 ## CvtConfig [[autodoc]] CvtConfig <frameworkcontent> <pt> ## CvtModel [[autodoc]] CvtModel - forward ## CvtForImageClassification [[autodoc]] CvtForImageClassification - forward </pt> <tf> ## TFCvtModel [[autodoc]] TFCvtModel - call ## TFCvtForImageClassification [[autodoc]] TFCvtForImageClassification - call </tf> </frameworkcontent>
transformers/docs/source/ja/model_doc/cvt.md/0
{ "file_path": "transformers/docs/source/ja/model_doc/cvt.md", "repo_id": "transformers", "token_count": 2379 }
331
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Padding and truncation バッチ入力はしばしば異なる長さであり、固定サイズのテンソルに変換できないため、変動する長さのバッチから長方形のテンソルを作成するための戦略として、パディングと切り詰めがあります。パディングは、短いシーケンスがバッチ内の最長シーケンスまたはモデルが受け入れる最大長と同じ長さになるように、特別な**パディングトークン**を追加します。切り詰めは、長いシーケンスを切り詰めることで逆方向に機能します。 ほとんどの場合、バッチを最長シーケンスの長さにパディングし、モデルが受け入れる最大長に切り詰めることで、うまく動作します。ただし、APIはそれ以上の戦略もサポートしています。必要な3つの引数は次のとおりです:`padding`、`truncation`、および `max_length`。 `padding`引数はパディングを制御します。ブール値または文字列であることができます: - `True`または`'longest'`:バッチ内の最長シーケンスにパディングを追加します(シーケンスが1つしか提供されない場合、パディングは適用されません)。 - `max_length'`:`max_length`引数で指定された長さまでパディングを追加します。または`max_length`が提供されていない場合はモデルが受け入れる最大長(`max_length=None`)。シーケンスが1つしか提供されている場合でも、パディングは適用されます。 - `False`または`'do_not_pad'`:パディングは適用されません。これがデフォルトの動作です。 `truncation`引数は切り詰めを制御します。ブール値または文字列であることができます: - `True`または`'longest_first'`:最大長を`max_length`引数で指定するか、モデルが受け入れる最大長(`max_length=None`)まで切り詰めます。これはトークンごとに切り詰め、適切な長さに達するまでペア内の最長シーケンスからトークンを削除します。 - `'only_second'`:最大長を`max_length`引数で指定するか、モデルが受け入れる最大長(`max_length=None`)まで切り詰めます。これはペアの2番目の文だけを切り詰めます(シーケンスのペアまたはシーケンスのバッチのペアが提供された場合)。 - `'only_first'`:最大長を`max_length`引数で指定するか、モデルが受け入れる最大長(`max_length=None`)まで切り詰めます。これはペアの最初の文だけを切り詰めます(シーケンスのペアまたはシーケンスのバッチのペアが提供された場合)。 - `False`または`'do_not_truncate'`:切り詰めは適用されません。これがデフォルトの動作です。 `max_length`引数はパディングと切り詰めの長さを制御します。整数または`None`であり、この場合、モデルが受け入れる最大入力長にデフォルトで設定されます。モデルに特定の最大入力長がない場合、`max_length`への切り詰めまたはパディングは無効になります。 以下の表は、パディングと切り詰めを設定する推奨方法を要約しています。以下の例のいずれかで入力シーケンスのペアを使用する場合、`truncation=True`を`['only_first', 'only_second', 'longest_first']`で選択した`STRATEGY`に置き換えることができます。つまり、`truncation='only_second'`または`truncation='longest_first'`を使用して、ペア内の両方のシーケンスを前述のように切り詰める方法を制御できます。 | Truncation | Padding | Instruction | |--------------------------------------|-----------------------------------|---------------------------------------------------------------------------------------------| | no truncation | no padding | `tokenizer(batch_sentences)` | | | padding to max sequence in batch | `tokenizer(batch_sentences, padding=True)` or | | | | `tokenizer(batch_sentences, padding='longest')` | | | padding to max model input length | `tokenizer(batch_sentences, padding='max_length')` | | | padding to specific length | `tokenizer(batch_sentences, padding='max_length', max_length=42)` | | | padding to a multiple of a value | `tokenizer(batch_sentences, padding=True, pad_to_multiple_of=8)` | | truncation to max model input length | no padding | `tokenizer(batch_sentences, truncation=True)` or | | | | `tokenizer(batch_sentences, truncation=STRATEGY)` | | | padding to max sequence in batch | `tokenizer(batch_sentences, padding=True, truncation=True)` or | | | | `tokenizer(batch_sentences, padding=True, truncation=STRATEGY)` | | | padding to max model input length | `tokenizer(batch_sentences, padding='max_length', truncation=True)` or | | | | `tokenizer(batch_sentences, padding='max_length', truncation=STRATEGY)` | | | padding to specific length | Not possible | | truncation to specific length | no padding | `tokenizer(batch_sentences, truncation=True, max_length=42)` or | | | | `tokenizer(batch_sentences, truncation=STRATEGY, max_length=42)` | | | padding to max sequence in batch | `tokenizer(batch_sentences, padding=True, truncation=True, max_length=42)` or | | | | `tokenizer(batch_sentences, padding=True, truncation=STRATEGY, max_length=42)` | | | padding to max model input length | Not possible | | | padding to specific length | `tokenizer(batch_sentences, padding='max_length', truncation=True, max_length=42)` or | | | | `tokenizer(batch_sentences, padding='max_length', truncation=STRATEGY, max_length=42)` |
transformers/docs/source/ja/pad_truncation.md/0
{ "file_path": "transformers/docs/source/ja/pad_truncation.md", "repo_id": "transformers", "token_count": 4228 }
332
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Perplexity of fixed-length models [[open-in-colab]] パープレキシティ(PPL)は言語モデルの評価に最も一般的な指標の1つです。深入りする前に、この指標は特に古典的な言語モデル(時にはオートレグレッシブまたは因果言語モデルとも呼ばれる)に適用され、BERTなどのマスクされた言語モデルには適していないことに注意すべきです(モデルの概要を参照してください[モデルの概要](model_summary))。 パープレキシティは、シーケンスの指数平均負の対数尤度として定義されます。トークン化されたシーケンス \\(X = (x_0, x_1, \dots, x_t)\\) がある場合、\\(X\\) のパープレキシティは次のように表されます。 $$\text{PPL}(X) = \exp \left\{ {-\frac{1}{t}\sum_i^t \log p_\theta (x_i|x_{<i}) } \right\}$$ ここで、\\(\log p_\theta (x_i|x_{<i})\\) はモデルによる前のトークン \\(x_{<i}\\) に対する第iトークンの対数尤度です。直感的には、これはモデルがコーパス内の指定されたトークンの集合に対して一様に予測する能力の評価と考えることができます。重要なのは、これによってトークン化手法がモデルのパープレキシティに直接影響を与えるため、異なるモデルを比較する際には常に考慮すべきであるということです。 これはまた、データとモデルの予測との間の交差エントロピーの指数化と同等です。パープレキシティおよびビット・パー・キャラクター(BPC)とデータ圧縮との関係についての詳細な情報については、この[素晴らしい The Gradient のブログ記事](https://thegradient.pub/understanding-evaluation-metrics-for-language-models/)を参照してください。 ## Calculating PPL with fixed-length models モデルのコンテキストサイズに制約がない場合、モデルのパープレキシティを評価するためには、シーケンスを自己回帰的に因子分解し、各ステップで前のサブシーケンスに条件を付けることで計算します。以下に示すように。 <img width="600" alt="完全なコンテキスト長のシーケンスの分解" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/ppl_full.gif"/> しかし、通常、近似モデルを使用する場合、モデルが処理できるトークン数に制約があります。例えば、最大の[GPT-2](model_doc/gpt2)のバージョンは1024トークンの固定長を持っているため、1024よりも大きい \\(t\\) に対して \\(p_\theta(x_t|x_{<t})\\) を直接計算することはできません。 代わりに、通常、シーケンスはモデルの最大入力サイズに等しいサブシーケンスに分割されます。モデルの最大入力サイズが \\(k\\) の場合、トークン \\(x_t\\) の尤度を近似するには、完全なコンテキストではなく、それを先行する \\(k-1\\) トークンにのみ条件を付けることがあります。シーケンスのモデルのパープレキシティを評価する際、誘惑的ですが非効率な方法は、シーケンスを分割し、各セグメントの分解対数尤度を独立に合算することです。 <img width="600" alt="利用可能な完全なコンテキストを活用しない非最適なPPL" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/ppl_chunked.gif"/> これは各セグメントのパープレキシティが1回のフォワードパスで計算できるため、計算が迅速ですが、通常、モデルはほとんどの予測ステップでコンテキストが少ないため、完全に因子分解されたパープレキシティの悪い近似となり、通常、より高い(悪い)PPLを返します。 代わりに、固定長モデルのPPLはスライディングウィンドウ戦略を用いて評価するべきです。これには、モデルが各予測ステップでより多くのコンテキストを持つように、コンテキストウィンドウを繰り返しスライドさせるという方法が含まれます。 <img width="600" alt="Sliding window PPL taking advantage of all available context" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/ppl_sliding.gif"/> これはシーケンスの確率のより正確な分解に近いものであり、通常はより有利なスコアを生成します。欠点は、コーパス内の各トークンに対して別個の前方パスが必要です。実用的な妥協案は、1トークンずつスライドする代わりに、より大きなストライドでコンテキストを移動するストライド型のスライディングウィンドウを使用することです。これにより、計算がはるかに高速に進行できる一方で、モデルには各ステップで予測を行うための大きなコンテキストが提供されます。 ## Example: Calculating perplexity with GPT-2 in 🤗 Transformers GPT-2を使用してこのプロセスをデモンストレーションしてみましょう。 ```python from transformers import GPT2LMHeadModel, GPT2TokenizerFast device = "cuda" model_id = "openai-community/gpt2-large" model = GPT2LMHeadModel.from_pretrained(model_id).to(device) tokenizer = GPT2TokenizerFast.from_pretrained(model_id) ``` WikiText-2データセットを読み込み、異なるスライディングウィンドウ戦略を使用してパープレキシティを評価します。このデータセットは小規模で、セット全体に対して単一のフォワードパスを実行するだけなので、データセット全体をメモリに読み込んでエンコードするだけで十分です。 ```python from datasets import load_dataset test = load_dataset("wikitext", "wikitext-2-raw-v1", split="test") encodings = tokenizer("\n\n".join(test["text"]), return_tensors="pt") ``` 🤗 Transformersを使用すると、単純に`input_ids`をモデルの`labels`として渡すことで、各トークンの平均負の対数尤度が損失として返されます。しかし、スライディングウィンドウのアプローチでは、各イテレーションでモデルに渡すトークンにオーバーラップがあります。私たちは、コンテキストとして扱っているトークンの対数尤度を損失に含めたくありません。そのため、これらの対象を `-100` に設定して無視されるようにします。以下は、ストライドを `512` とした場合の例です。これにより、モデルは任意のトークンの条件付けの尤度を計算する際に、少なくともコンテキストとして 512 トークンを持つことになります(512 個の前のトークンが利用可能である場合)。 ```python import torch from tqdm import tqdm max_length = model.config.n_positions stride = 512 seq_len = encodings.input_ids.size(1) nlls = [] prev_end_loc = 0 for begin_loc in tqdm(range(0, seq_len, stride)): end_loc = min(begin_loc + max_length, seq_len) trg_len = end_loc - prev_end_loc # may be different from stride on last loop input_ids = encodings.input_ids[:, begin_loc:end_loc].to(device) target_ids = input_ids.clone() target_ids[:, :-trg_len] = -100 with torch.no_grad(): outputs = model(input_ids, labels=target_ids) # loss is calculated using CrossEntropyLoss which averages over valid labels # N.B. the model only calculates loss over trg_len - 1 labels, because it internally shifts the labels # to the left by 1. neg_log_likelihood = outputs.loss nlls.append(neg_log_likelihood) prev_end_loc = end_loc if end_loc == seq_len: break ppl = torch.exp(torch.stack(nlls).mean()) ``` ストライド長が最大入力長と同じ場合、上述の最適でないスライディングウィンドウ戦略と同等です。ストライドが小さいほど、モデルは各予測を行う際により多くのコンテキストを持つため、通常、報告される困難度(perplexity)が向上します。 上記のコードを `stride = 1024` で実行すると、オーバーラップがない状態で、結果の困難度(perplexity)は `19.44` になります。これは GPT-2 の論文に報告された `19.93` とほぼ同等です。一方、`stride = 512` を使用し、このようにストライディングウィンドウ戦略を採用すると、困難度(perplexity)が `16.45` に向上します。これはより好意的なスコアだけでなく、シーケンスの尤度の真の自己回帰分解により近い方法で計算されています。
transformers/docs/source/ja/perplexity.md/0
{ "file_path": "transformers/docs/source/ja/perplexity.md", "repo_id": "transformers", "token_count": 4045 }
333
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Image-to-Image Task Guide [[open-in-colab]] Image-to-Image タスクは、アプリケーションが画像を受信し、別の画像を出力するタスクです。これには、画像強化 (超解像度、低光量強化、ディレインなど)、画像修復などを含むさまざまなサブタスクがあります。 このガイドでは、次の方法を説明します。 - 超解像度タスクに画像間のパイプラインを使用します。 - パイプラインを使用せずに、同じタスクに対してイメージ間モデルを実行します。 このガイドがリリースされた時点では、`image-to-image`パイプラインは超解像度タスクのみをサポートしていることに注意してください。 必要なライブラリをインストールすることから始めましょう。 ```bash pip install transformers ``` [Swin2SR モデル](https://huggingface.co/caidas/swin2SR-lightweight-x2-64) を使用してパイプラインを初期化できるようになりました。次に、イメージを使用してパイプラインを呼び出すことで、パイプラインを推論できます。現時点では、[Swin2SR モデル](https://huggingface.co/models?sort=trending&search=swin2sr) のみがこのパイプラインでサポートされています。 ```python from transformers import pipeline device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') pipe = pipeline(task="image-to-image", model="caidas/swin2SR-lightweight-x2-64", device=device) ``` では、画像を読み込みましょう。 ```python from PIL import Image import requests url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/cat.jpg" image = Image.open(requests.get(url, stream=True).raw) print(image.size) ``` ```bash # (532, 432) ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/cat.jpg" alt="Photo of a cat"/> </div> これで、パイプラインを使用して推論を実行できるようになりました。猫の画像の拡大バージョンを取得します。 ```python upscaled = pipe(image) print(upscaled.size) ``` ```bash # (1072, 880) ``` パイプラインを使用せずに自分で推論を実行したい場合は、トランスフォーマーの `Swin2SRForImageSuperResolution` クラスと `Swin2SRImageProcessor` クラスを使用できます。これには同じモデルのチェックポイントを使用します。モデルとプロセッサを初期化しましょう。 ```python from transformers import Swin2SRForImageSuperResolution, Swin2SRImageProcessor model = Swin2SRForImageSuperResolution.from_pretrained("caidas/swin2SR-lightweight-x2-64").to(device) processor = Swin2SRImageProcessor("caidas/swin2SR-lightweight-x2-64") ``` `pipeline`」は、自分で行う必要がある前処理と後処理のステップを抽象化するので、画像を前処理しましょう。画像をプロセッサに渡してから、ピクセル値を GPU に移動します。 ```python pixel_values = processor(image, return_tensors="pt").pixel_values print(pixel_values.shape) pixel_values = pixel_values.to(device) ``` これで、ピクセル値をモデルに渡すことで画像を推測できるようになりました。 ```python import torch with torch.no_grad(): outputs = model(pixel_values) ``` 出力は、以下のような `ImageSuperResolutionOutput` タイプのオブジェクトです 👇 ``` (loss=None, reconstruction=tensor([[[[0.8270, 0.8269, 0.8275, ..., 0.7463, 0.7446, 0.7453], [0.8287, 0.8278, 0.8283, ..., 0.7451, 0.7448, 0.7457], [0.8280, 0.8273, 0.8269, ..., 0.7447, 0.7446, 0.7452], ..., [0.5923, 0.5933, 0.5924, ..., 0.0697, 0.0695, 0.0706], [0.5926, 0.5932, 0.5926, ..., 0.0673, 0.0687, 0.0705], [0.5927, 0.5914, 0.5922, ..., 0.0664, 0.0694, 0.0718]]]], device='cuda:0'), hidden_states=None, attentions=None) ``` `reconstruction`を取得し、それを視覚化するために後処理する必要があります。どのように見えるか見てみましょう。 ```python outputs.reconstruction.data.shape # torch.Size([1, 3, 880, 1072]) ``` 出力を圧縮して軸 0 を削除し、値をクリップしてから、それを numpy float に変換する必要があります。次に、軸を [1072, 880] の形状になるように配置し、最後に出力を範囲 [0, 255] に戻します。 ```python import numpy as np # squeeze, take to CPU and clip the values output = outputs.reconstruction.data.squeeze().cpu().clamp_(0, 1).numpy() # rearrange the axes output = np.moveaxis(output, source=0, destination=-1) # bring values back to pixel values range output = (output * 255.0).round().astype(np.uint8) Image.fromarray(output) ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/cat_upscaled.png" alt="Upscaled photo of a cat"/> </div>
transformers/docs/source/ja/tasks/image_to_image.md/0
{ "file_path": "transformers/docs/source/ja/tasks/image_to_image.md", "repo_id": "transformers", "token_count": 2420 }
334
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Visual Question Answering [[open-in-colab]] Visual Question Answering (VQA) は、画像に基づいて自由形式の質問に答えるタスクです。 このタスクをサポートするモデルへの入力は通常、画像と質問の組み合わせであり、出力は 自然言語で表現された答え。 VQA の注目すべき使用例には次のようなものがあります。 * 視覚障害者向けのアクセシビリティ アプリケーション。 * 教育: 講義や教科書で示されている視覚的な資料について質問を投げかけること。 VQA は、インタラクティブな博物館の展示物や史跡でも利用できます。 * カスタマー サービスと電子商取引: VQA は、ユーザーが製品について質問できるようにすることでユーザー エクスペリエンスを向上させます。 * 画像検索: VQA モデルを使用して、特定の特徴を持つ画像を検索できます。たとえば、ユーザーは「犬はいますか?」と尋ねることができます。一連の画像から犬が写っているすべての画像を検索します。 このガイドでは、次の方法を学びます。 - [`Graphcore/vqa` データセット](https://huggingface.co/datasets/Graphcore/vqa) 上で分類 VQA モデル、特に [ViLT](../model_doc/vilt) を微調整します。 - 微調整された ViLT を推論に使用します。 - BLIP-2 などの生成モデルを使用してゼロショット VQA 推論を実行します。 ## Fine-tuning ViLT ViLT モデルは、Vision Transformer (ViT) にテキスト埋め込みを組み込んでおり、最小限の設計を可能にします。 視覚と言語の事前トレーニング (VLP)。このモデルは、いくつかの下流タスクに使用できます。 VQA タスクの場合、分類子 head は最上部 (`[CLS]` トークンの最終的な非表示状態の最上部にある線形層) に配置され、ランダムに初期化されます。 したがって、視覚的質問応答は **分類問題** として扱われます。 BLIP、BLIP-2、InstructBLIP などの最近のモデルは、VQA を生成タスクとして扱います。このガイドの後半では、 ゼロショット VQA 推論にそれらを使用する方法を示します。 始める前に、必要なライブラリがすべてインストールされていることを確認してください。 ```bash pip install -q transformers datasets ``` モデルをコミュニティと共有することをお勧めします。 Hugging Face アカウントにログインして、🤗 ハブにアップロードします。 プロンプトが表示されたら、トークンを入力してログインします。 ```py >>> from huggingface_hub import notebook_login >>> notebook_login() ``` モデルのチェックポイントをグローバル変数として定義しましょう。 ```py >>> model_checkpoint = "dandelin/vilt-b32-mlm" ``` ## Load the data 説明の目的で、このガイドでは、注釈付きの視覚的な質問に答える「Graphcore/vqa」データセットの非常に小さなサンプルを使用します。 完全なデータセットは [🤗 Hub](https://huggingface.co/datasets/Graphcore/vqa) で見つけることができます。 [`Graphcore/vqa` データセット](https://huggingface.co/datasets/Graphcore/vqa) の代わりに、 公式 [VQA データセット ページ](https://visualqa.org/download.html) から同じデータを手動で取得します。フォローしたい場合は、 カスタム データを使用したチュートリアルでは、[画像データセットを作成する](https://huggingface.co/docs/datasets/image_dataset#loading-script) 方法を確認してください。 🤗 データセットのドキュメントのガイド。 検証分割から最初の 200 個の例をロードし、データセットの機能を調べてみましょう。 ```python >>> from datasets import load_dataset >>> dataset = load_dataset("Graphcore/vqa", split="validation[:200]") >>> dataset Dataset({ features: ['question', 'question_type', 'question_id', 'image_id', 'answer_type', 'label'], num_rows: 200 }) ``` データセットの特徴を理解するために例を見てみましょう。 ```py >>> dataset[0] {'question': 'Where is he looking?', 'question_type': 'none of the above', 'question_id': 262148000, 'image_id': '/root/.cache/huggingface/datasets/downloads/extracted/ca733e0e000fb2d7a09fbcc94dbfe7b5a30750681d0e965f8e0a23b1c2f98c75/val2014/COCO_val2014_000000262148.jpg', 'answer_type': 'other', 'label': {'ids': ['at table', 'down', 'skateboard', 'table'], 'weights': [0.30000001192092896, 1.0, 0.30000001192092896, 0.30000001192092896]}} ``` このタスクに関連する機能には次のものがあります。 * `question`: 画像から回答する質問 * `image_id`: 質問が参照する画像へのパス * `label`: 注釈 残りの機能は必要ないので削除できます。 ```py >>> dataset = dataset.remove_columns(['question_type', 'question_id', 'answer_type']) ``` ご覧のとおり、`label`機能には、さまざまなヒューマン・アノテーターによって収集された、同じ質問に対する複数の回答 (ここでは`id`と呼びます) が含まれています。 質問に対する答えは主観的なものになる可能性があるためです。この場合、問題は "彼はどこを見ているのか?"ということです。一部の人々 これには "ダウン" という注釈が付けられ、他のものには "テーブルで" という注釈が付けられ、別の注釈には "スケートボード" という注釈が付けられました。 画像を見て、どの答えを出すかを考えてください。 ```python >>> from PIL import Image >>> image = Image.open(dataset[0]['image_id']) >>> image ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/vqa-example.png" alt="VQA Image Example"/> </div> 質問と回答のあいまいさのため、このようなデータセットはマルチラベル分類問題として扱われます ( 複数の回答が有効である可能性があります)。さらに、ワンホット エンコードされたベクトルを作成するだけではなく、 注釈内に特定の回答が出現した回数に基づくソフト エンコーディング。 たとえば、上の例では、"down"という回答が他の回答よりも頻繁に選択されるため、 スコア (データセットでは`weight`と呼ばれます) は 1.0 で、残りの回答のスコアは 1.0 未満です。 後で適切な分類ヘッドを使用してモデルをインスタンス化するために、2 つの辞書を作成しましょう。 ラベル名を整数に変換する、またはその逆: ```py >>> import itertools >>> labels = [item['ids'] for item in dataset['label']] >>> flattened_labels = list(itertools.chain(*labels)) >>> unique_labels = list(set(flattened_labels)) >>> label2id = {label: idx for idx, label in enumerate(unique_labels)} >>> id2label = {idx: label for label, idx in label2id.items()} ``` マッピングができたので、文字列の回答をその ID に置き換え、さらに前処理をより便利にするためにデータセットをフラット化することができます。 ```python >>> def replace_ids(inputs): ... inputs["label"]["ids"] = [label2id[x] for x in inputs["label"]["ids"]] ... return inputs >>> dataset = dataset.map(replace_ids) >>> flat_dataset = dataset.flatten() >>> flat_dataset.features {'question': Value(dtype='string', id=None), 'image_id': Value(dtype='string', id=None), 'label.ids': Sequence(feature=Value(dtype='int64', id=None), length=-1, id=None), 'label.weights': Sequence(feature=Value(dtype='float64', id=None), length=-1, id=None)} ``` ## Preprocessing data 次のステップでは、ViLT プロセッサをロードして、モデルの画像データとテキスト データを準備します。 [`ViltProcessor`] は、BERT トークナイザーと ViLT 画像プロセッサを便利な単一プロセッサにラップします。 ```py >>> from transformers import ViltProcessor >>> processor = ViltProcessor.from_pretrained(model_checkpoint) ``` データを前処理するには、[`ViltProcessor`] を使用して画像と質問をエンコードする必要があります。プロセッサーは使用します [`BertTokenizerFast`] を使用してテキストをトークン化し、テキスト データの `input_ids`、`attention_mask`、および `token_type_ids` を作成します。 画像に関しては、プロセッサは [`ViltImageProcessor`] を利用して画像のサイズ変更と正規化を行い、`pixel_values` と `pixel_mask` を作成します。 これらの前処理ステップはすべて内部で行われ、`processor`を呼び出すだけで済みます。ただし、それでも必要なのは、 対象のラベルを準備します。この表現では、各要素は考えられる答え (ラベル) に対応します。正解の場合、要素は保持されます。 それぞれのスコア (重み) が設定され、残りの要素は 0 に設定されます。 次の関数は、画像と質問に `processor` を適用し、上で説明したようにラベルをフォーマットします。 ```py >>> import torch >>> def preprocess_data(examples): ... image_paths = examples['image_id'] ... images = [Image.open(image_path) for image_path in image_paths] ... texts = examples['question'] ... encoding = processor(images, texts, padding="max_length", truncation=True, return_tensors="pt") ... for k, v in encoding.items(): ... encoding[k] = v.squeeze() ... targets = [] ... for labels, scores in zip(examples['label.ids'], examples['label.weights']): ... target = torch.zeros(len(id2label)) ... for label, score in zip(labels, scores): ... target[label] = score ... targets.append(target) ... encoding["labels"] = targets ... return encoding ``` データセット全体に前処理関数を適用するには、🤗 Datasets [`~datasets.map`] 関数を使用します。 `map` を高速化するには、次のようにします。 データセットの複数の要素を一度に処理するには、`batched=True` を設定します。この時点で、不要な列は自由に削除してください。 ```py >>> processed_dataset = flat_dataset.map(preprocess_data, batched=True, remove_columns=['question','question_type', 'question_id', 'image_id', 'answer_type', 'label.ids', 'label.weights']) >>> processed_dataset Dataset({ features: ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values', 'pixel_mask', 'labels'], num_rows: 200 }) ``` 最後のステップとして、[`DefaultDataCollat​​or`] を使用してサンプルのバッチを作成します。 ```py >>> from transformers import DefaultDataCollator >>> data_collator = DefaultDataCollator() ``` ## Train the model これでモデルのトレーニングを開始する準備が整いました。 [`ViltForQuestionAnswering`] で ViLT をロードします。ラベルの数を指定します ラベルマッピングとともに: ```py >>> from transformers import ViltForQuestionAnswering >>> model = ViltForQuestionAnswering.from_pretrained(model_checkpoint, num_labels=len(id2label), id2label=id2label, label2id=label2id) ``` この時点で残っているステップは 3 つだけです。 1. [`TrainingArguments`] でトレーニング ハイパーパラメータを定義します。 ```py >>> from transformers import TrainingArguments >>> repo_id = "MariaK/vilt_finetuned_200" >>> training_args = TrainingArguments( ... output_dir=repo_id, ... per_device_train_batch_size=4, ... num_train_epochs=20, ... save_steps=200, ... logging_steps=50, ... learning_rate=5e-5, ... save_total_limit=2, ... remove_unused_columns=False, ... push_to_hub=True, ... ) ``` 2. トレーニング引数をモデル、データセット、プロセッサー、データ照合器とともに [`Trainer`] に渡します。 ```py >>> from transformers import Trainer >>> trainer = Trainer( ... model=model, ... args=training_args, ... data_collator=data_collator, ... train_dataset=processed_dataset, ... tokenizer=processor, ... ) ``` 3. [`~Trainer.train`] を呼び出してモデルを微調整します。 ```py >>> trainer.train() ``` トレーニングが完了したら、 [`~Trainer.push_to_hub`] メソッドを使用してモデルをハブに共有し、🤗 ハブで最終モデルを共有します。 ```py >>> trainer.push_to_hub() ``` ## Inference ViLT モデルを微調整し、🤗 Hub にアップロードしたので、それを推論に使用できます。もっとも単純な 推論用に微調整されたモデルを試す方法は、それを [`pipeline`] で使用することです。 ```py >>> from transformers import pipeline >>> pipe = pipeline("visual-question-answering", model="MariaK/vilt_finetuned_200") ``` このガイドのモデルは 200 の例でのみトレーニングされているため、多くを期待しないでください。少なくともそれがあるかどうか見てみましょう データから何かを学習し、推論を説明するためにデータセットから最初の例を取り出します。 ```py >>> example = dataset[0] >>> image = Image.open(example['image_id']) >>> question = example['question'] >>> print(question) >>> pipe(image, question, top_k=1) "Where is he looking?" [{'score': 0.5498199462890625, 'answer': 'down'}] ``` あまり自信がありませんが、モデルは確かに何かを学習しました。より多くの例とより長いトレーニングを行うと、はるかに良い結果が得られます。 必要に応じて、パイプラインの結果を手動で複製することもできます。 1. 画像と質問を取得し、モデルのプロセッサを使用してモデル用に準備します。 2. モデルを通じて結果または前処理を転送します。 3. ロジットから、最も可能性の高い回答の ID を取得し、`id2label` で実際の回答を見つけます。 ```py >>> processor = ViltProcessor.from_pretrained("MariaK/vilt_finetuned_200") >>> image = Image.open(example['image_id']) >>> question = example['question'] >>> # prepare inputs >>> inputs = processor(image, question, return_tensors="pt") >>> model = ViltForQuestionAnswering.from_pretrained("MariaK/vilt_finetuned_200") >>> # forward pass >>> with torch.no_grad(): ... outputs = model(**inputs) >>> logits = outputs.logits >>> idx = logits.argmax(-1).item() >>> print("Predicted answer:", model.config.id2label[idx]) Predicted answer: down ``` ## Zero-shot VQA 以前のモデルでは、VQA を分類タスクとして扱いました。 BLIP、BLIP-2、InstructBLIP アプローチなどの一部の最近のモデル 生成タスクとしての VQA。 [BLIP-2](../model_doc/blip-2) を例として考えてみましょう。新しいビジュアル言語の事前トレーニングを導入しました 事前にトレーニングされたビジョン エンコーダーと LLM を任意に組み合わせて使用​​できるパラダイム (詳細については、[BLIP-2 ブログ投稿](https://huggingface.co/blog/blip-2) を参照)。 これにより、視覚的な質問応答を含む複数の視覚言語タスクで最先端の結果を達成することができます。 このモデルを VQA に使用する方法を説明しましょう。まず、モデルをロードしましょう。ここではモデルを明示的に送信します。 GPU (利用可能な場合)。これは [`Trainer`] が自動的に処理するため、トレーニング時に事前に行う必要はありませんでした。 ```py >>> from transformers import AutoProcessor, Blip2ForConditionalGeneration >>> import torch >>> processor = AutoProcessor.from_pretrained("Salesforce/blip2-opt-2.7b") >>> model = Blip2ForConditionalGeneration.from_pretrained("Salesforce/blip2-opt-2.7b", torch_dtype=torch.float16) >>> device = "cuda" if torch.cuda.is_available() else "cpu" >>> model.to(device) ``` モデルは画像とテキストを入力として受け取るため、VQA データセットの最初の例とまったく同じ画像と質問のペアを使用してみましょう。 ```py >>> example = dataset[0] >>> image = Image.open(example['image_id']) >>> question = example['question'] ``` 視覚的な質問応答タスクに BLIP-2 を使用するには、テキスト プロンプトが特定の形式 (`Question: {} Answer:`) に従う必要があります。 ```py >>> prompt = f"Question: {question} Answer:" ``` 次に、モデルのプロセッサで画像/プロンプトを前処理し、処理された入力をモデルに渡し、出力をデコードする必要があります。 ```py >>> inputs = processor(image, text=prompt, return_tensors="pt").to(device, torch.float16) >>> generated_ids = model.generate(**inputs, max_new_tokens=10) >>> generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0].strip() >>> print(generated_text) "He is looking at the crowd" ``` ご覧のとおり、モデルは群衆と顔の向き (下を向いている) を認識しましたが、見逃しているようです。 観客がスケーターの後ろにいるという事実。それでも、人間が注釈を付けたデータセットを取得することが不可能な場合には、これは このアプローチにより、有用な結果がすぐに得られます。
transformers/docs/source/ja/tasks/visual_question_answering.md/0
{ "file_path": "transformers/docs/source/ja/tasks/visual_question_answering.md", "repo_id": "transformers", "token_count": 7952 }
335
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # 어떻게 사용자 정의 파이프라인을 생성하나요? [[how-to-create-a-custom-pipeline]] 이 가이드에서는 사용자 정의 파이프라인을 어떻게 생성하고 [허브](https://hf.co/models)에 공유하거나 🤗 Transformers 라이브러리에 추가하는 방법을 살펴보겠습니다. 먼저 파이프라인이 수용할 수 있는 원시 입력을 결정해야 합니다. 문자열, 원시 바이트, 딕셔너리 또는 가장 원하는 입력일 가능성이 높은 것이면 무엇이든 가능합니다. 이 입력을 가능한 한 순수한 Python 형식으로 유지해야 (JSON을 통해 다른 언어와도) 호환성이 좋아집니다. 이것이 전처리(`preprocess`) 파이프라인의 입력(`inputs`)이 될 것입니다. 그런 다음 `outputs`를 정의하세요. `inputs`와 같은 정책을 따르고, 간단할수록 좋습니다. 이것이 후처리(`postprocess`) 메소드의 출력이 될 것입니다. 먼저 4개의 메소드(`preprocess`, `_forward`, `postprocess` 및 `_sanitize_parameters`)를 구현하기 위해 기본 클래스 `Pipeline`을 상속하여 시작합니다. ```python from transformers import Pipeline class MyPipeline(Pipeline): def _sanitize_parameters(self, **kwargs): preprocess_kwargs = {} if "maybe_arg" in kwargs: preprocess_kwargs["maybe_arg"] = kwargs["maybe_arg"] return preprocess_kwargs, {}, {} def preprocess(self, inputs, maybe_arg=2): model_input = Tensor(inputs["input_ids"]) return {"model_input": model_input} def _forward(self, model_inputs): # model_inputs == {"model_input": model_input} outputs = self.model(**model_inputs) # Maybe {"logits": Tensor(...)} return outputs def postprocess(self, model_outputs): best_class = model_outputs["logits"].softmax(-1) return best_class ``` 이 분할 구조는 CPU/GPU에 대한 비교적 원활한 지원을 제공하는 동시에, 다른 스레드에서 CPU에 대한 사전/사후 처리를 수행할 수 있게 지원하는 것입니다. `preprocess`는 원래 정의된 입력을 가져와 모델에 공급할 수 있는 형식으로 변환합니다. 더 많은 정보를 포함할 수 있으며 일반적으로 `Dict` 형태입니다. `_forward`는 구현 세부 사항이며 직접 호출할 수 없습니다. `forward`는 예상 장치에서 모든 것이 작동하는지 확인하기 위한 안전장치가 포함되어 있어 선호되는 호출 메소드입니다. 실제 모델과 관련된 것은 `_forward` 메소드에 속하며, 나머지는 전처리/후처리 과정에 있습니다. `postprocess` 메소드는 `_forward`의 출력을 가져와 이전에 결정한 최종 출력 형식으로 변환합니다. `_sanitize_parameters`는 초기화 시간에 `pipeline(...., maybe_arg=4)`이나 호출 시간에 `pipe = pipeline(...); output = pipe(...., maybe_arg=4)`과 같이, 사용자가 원하는 경우 언제든지 매개변수를 전달할 수 있도록 허용합니다. `_sanitize_parameters`의 반환 값은 `preprocess`, `_forward`, `postprocess`에 직접 전달되는 3개의 kwargs 딕셔너리입니다. 호출자가 추가 매개변수로 호출하지 않았다면 아무것도 채우지 마십시오. 이렇게 하면 항상 더 "자연스러운" 함수 정의의 기본 인수를 유지할 수 있습니다. 분류 작업에서 `top_k` 매개변수가 대표적인 예입니다. ```python >>> pipe = pipeline("my-new-task") >>> pipe("This is a test") [{"label": "1-star", "score": 0.8}, {"label": "2-star", "score": 0.1}, {"label": "3-star", "score": 0.05} {"label": "4-star", "score": 0.025}, {"label": "5-star", "score": 0.025}] >>> pipe("This is a test", top_k=2) [{"label": "1-star", "score": 0.8}, {"label": "2-star", "score": 0.1}] ``` 이를 달성하기 위해 우리는 `postprocess` 메소드를 기본 매개변수인 `5`로 업데이트하고 `_sanitize_parameters`를 수정하여 이 새 매개변수를 허용합니다. ```python def postprocess(self, model_outputs, top_k=5): best_class = model_outputs["logits"].softmax(-1) # top_k를 처리하는 로직 추가 return best_class def _sanitize_parameters(self, **kwargs): preprocess_kwargs = {} if "maybe_arg" in kwargs: preprocess_kwargs["maybe_arg"] = kwargs["maybe_arg"] postprocess_kwargs = {} if "top_k" in kwargs: postprocess_kwargs["top_k"] = kwargs["top_k"] return preprocess_kwargs, {}, postprocess_kwargs ``` 입/출력을 가능한 한 간단하고 완전히 JSON 직렬화 가능한 형식으로 유지하려고 노력하십시오. 이렇게 하면 사용자가 새로운 종류의 개체를 이해하지 않고도 파이프라인을 쉽게 사용할 수 있습니다. 또한 사용 용이성을 위해 여러 가지 유형의 인수(오디오 파일은 파일 이름, URL 또는 순수한 바이트일 수 있음)를 지원하는 것이 비교적 일반적입니다. ## 지원되는 작업 목록에 추가하기 [[adding-it-to-the-list-of-supported-tasks]] `new-task`를 지원되는 작업 목록에 등록하려면 `PIPELINE_REGISTRY`에 추가해야 합니다: ```python from transformers.pipelines import PIPELINE_REGISTRY PIPELINE_REGISTRY.register_pipeline( "new-task", pipeline_class=MyPipeline, pt_model=AutoModelForSequenceClassification, ) ``` 원하는 경우 기본 모델을 지정할 수 있으며, 이 경우 특정 개정(분기 이름 또는 커밋 해시일 수 있음, 여기서는 "abcdef")과 타입을 함께 가져와야 합니다: ```python PIPELINE_REGISTRY.register_pipeline( "new-task", pipeline_class=MyPipeline, pt_model=AutoModelForSequenceClassification, default={"pt": ("user/awesome_model", "abcdef")}, type="text", # 현재 지원 유형: text, audio, image, multimodal ) ``` ## Hub에 파이프라인 공유하기 [[share-your-pipeline-on-the-hub]] Hub에 사용자 정의 파이프라인을 공유하려면 `Pipeline` 하위 클래스의 사용자 정의 코드를 Python 파일에 저장하기만 하면 됩니다. 예를 들어, 다음과 같이 문장 쌍 분류를 위한 사용자 정의 파이프라인을 사용한다고 가정해 보겠습니다: ```py import numpy as np from transformers import Pipeline def softmax(outputs): maxes = np.max(outputs, axis=-1, keepdims=True) shifted_exp = np.exp(outputs - maxes) return shifted_exp / shifted_exp.sum(axis=-1, keepdims=True) class PairClassificationPipeline(Pipeline): def _sanitize_parameters(self, **kwargs): preprocess_kwargs = {} if "second_text" in kwargs: preprocess_kwargs["second_text"] = kwargs["second_text"] return preprocess_kwargs, {}, {} def preprocess(self, text, second_text=None): return self.tokenizer(text, text_pair=second_text, return_tensors=self.framework) def _forward(self, model_inputs): return self.model(**model_inputs) def postprocess(self, model_outputs): logits = model_outputs.logits[0].numpy() probabilities = softmax(logits) best_class = np.argmax(probabilities) label = self.model.config.id2label[best_class] score = probabilities[best_class].item() logits = logits.tolist() return {"label": label, "score": score, "logits": logits} ``` 구현은 프레임워크에 구애받지 않으며, PyTorch와 TensorFlow 모델에 대해 작동합니다. 이를 `pair_classification.py`라는 파일에 저장한 경우, 다음과 같이 가져오고 등록할 수 있습니다: ```py from pair_classification import PairClassificationPipeline from transformers.pipelines import PIPELINE_REGISTRY from transformers import AutoModelForSequenceClassification, TFAutoModelForSequenceClassification PIPELINE_REGISTRY.register_pipeline( "pair-classification", pipeline_class=PairClassificationPipeline, pt_model=AutoModelForSequenceClassification, tf_model=TFAutoModelForSequenceClassification, ) ``` 이 작업이 완료되면 사전훈련된 모델과 함께 사용할 수 있습니다. 예를 들어, `sgugger/finetuned-bert-mrpc`은 MRPC 데이터 세트에서 미세 조정되어 문장 쌍을 패러프레이즈인지 아닌지를 분류합니다. ```py from transformers import pipeline classifier = pipeline("pair-classification", model="sgugger/finetuned-bert-mrpc") ``` 그런 다음 `push_to_hub` 메소드를 사용하여 허브에 공유할 수 있습니다: ```py classifier.push_to_hub("test-dynamic-pipeline") ``` 이렇게 하면 "test-dynamic-pipeline" 폴더 내에 `PairClassificationPipeline`을 정의한 파일이 복사되며, 파이프라인의 모델과 토크나이저도 저장한 후, `{your_username}/test-dynamic-pipeline` 저장소에 있는 모든 것을 푸시합니다. 이후에는 `trust_remote_code=True` 옵션만 제공하면 누구나 사용할 수 있습니다. ```py from transformers import pipeline classifier = pipeline(model="{your_username}/test-dynamic-pipeline", trust_remote_code=True) ``` ## 🤗 Transformers에 파이프라인 추가하기 [[add-the-pipeline-to-transformers]] 🤗 Transformers에 사용자 정의 파이프라인을 기여하려면, `pipelines` 하위 모듈에 사용자 정의 파이프라인 코드와 함께 새 모듈을 추가한 다음, `pipelines/__init__.py`에서 정의된 작업 목록에 추가해야 합니다. 그런 다음 테스트를 추가해야 합니다. `tests/test_pipelines_MY_PIPELINE.py`라는 새 파일을 만들고 다른 테스트와 예제를 함께 작성합니다. `run_pipeline_test` 함수는 매우 일반적이며, `model_mapping` 및 `tf_model_mapping`에서 정의된 가능한 모든 아키텍처의 작은 무작위 모델에서 실행됩니다. 이는 향후 호환성을 테스트하는 데 매우 중요하며, 누군가 `XXXForQuestionAnswering`을 위한 새 모델을 추가하면 파이프라인 테스트가 해당 모델에서 실행을 시도한다는 의미입니다. 모델이 무작위이기 때문에 실제 값을 확인하는 것은 불가능하므로, 단순히 파이프라인 출력 `TYPE`과 일치시키기 위한 도우미 `ANY`가 있습니다. 또한 2개(이상적으로는 4개)의 테스트를 구현해야 합니다. - `test_small_model_pt`: 이 파이프라인에 대한 작은 모델 1개를 정의(결과가 의미 없어도 상관없음)하고 파이프라인 출력을 테스트합니다. 결과는 `test_small_model_tf`와 동일해야 합니다. - `test_small_model_tf`: 이 파이프라인에 대한 작은 모델 1개를 정의(결과가 의미 없어도 상관없음)하고 파이프라인 출력을 테스트합니다. 결과는 `test_small_model_pt`와 동일해야 합니다. - `test_large_model_pt`(`선택사항`): 결과가 의미 있을 것으로 예상되는 실제 파이프라인에서 파이프라인을 테스트합니다. 이러한 테스트는 속도가 느리므로 이를 표시해야 합니다. 여기서의 목표는 파이프라인을 보여주고 향후 릴리즈에서의 변화가 없는지 확인하는 것입니다. - `test_large_model_tf`(`선택사항`): 결과가 의미 있을 것으로 예상되는 실제 파이프라인에서 파이프라인을 테스트합니다. 이러한 테스트는 속도가 느리므로 이를 표시해야 합니다. 여기서의 목표는 파이프라인을 보여주고 향후 릴리즈에서의 변화가 없는지 확인하는 것입니다.
transformers/docs/source/ko/add_new_pipeline.md/0
{ "file_path": "transformers/docs/source/ko/add_new_pipeline.md", "repo_id": "transformers", "token_count": 7731 }
336
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Trainer API를 사용한 하이퍼파라미터 탐색 [[hyperparameter-search-using-trainer-api]] 🤗 Transformers에서는 🤗 Transformers 모델을 학습시키는데 최적화된 [`Trainer`] 클래스를 제공하기 때문에, 사용자는 직접 훈련 루프를 작성할 필요 없이 더욱 간편하게 학습을 시킬 수 있습니다. 또한, [`Trainer`]는 하이퍼파라미터 탐색을 위한 API를 제공합니다. 이 문서에서 이 API를 활용하는 방법을 예시와 함께 보여드리겠습니다. ## 하이퍼파라미터 탐색 백엔드 [[hyperparameter-search-backend]] [`Trainer`]는 현재 아래 4가지 하이퍼파라미터 탐색 백엔드를 지원합니다: [optuna](https://optuna.org/)와 [sigopt](https://sigopt.com/), [raytune](https://docs.ray.io/en/latest/tune/index.html), [wandb](https://wandb.ai/site/sweeps) 입니다. 하이퍼파라미터 탐색 백엔드로 사용하기 전에 아래의 명령어를 사용하여 라이브러리들을 설치하세요. ```bash pip install optuna/sigopt/wandb/ray[tune] ``` ## 예제에서 하이퍼파라미터 탐색을 활성화하는 방법 [[how-to-enable-hyperparameter-search-in-example]] 하이퍼파라미터 탐색 공간을 정의하세요. 하이퍼파라미터 탐색 백엔드마다 서로 다른 형식이 필요합니다. sigopt의 경우, 해당 [object_parameter](https://docs.sigopt.com/ai-module-api-references/api_reference/objects/object_parameter) 문서를 참조하여 아래와 같이 작성하세요: ```py >>> def sigopt_hp_space(trial): ... return [ ... {"bounds": {"min": 1e-6, "max": 1e-4}, "name": "learning_rate", "type": "double"}, ... { ... "categorical_values": ["16", "32", "64", "128"], ... "name": "per_device_train_batch_size", ... "type": "categorical", ... }, ... ] ``` optuna의 경우, 해당 [object_parameter](https://optuna.readthedocs.io/en/stable/tutorial/10_key_features/002_configurations.html#sphx-glr-tutorial-10-key-features-002-configurations-py) 문서를 참조하여 아래와 같이 작성하세요: ```py >>> def optuna_hp_space(trial): ... return { ... "learning_rate": trial.suggest_float("learning_rate", 1e-6, 1e-4, log=True), ... "per_device_train_batch_size": trial.suggest_categorical("per_device_train_batch_size", [16, 32, 64, 128]), ... } ``` raytune의 경우, 해당 [object_parameter](https://docs.ray.io/en/latest/tune/api/search_space.html) 문서를 참조하여 아래와 같이 작성하세요: ```py >>> def ray_hp_space(trial): ... return { ... "learning_rate": tune.loguniform(1e-6, 1e-4), ... "per_device_train_batch_size": tune.choice([16, 32, 64, 128]), ... } ``` wandb의 경우, 해당 [object_parameter](https://docs.wandb.ai/guides/sweeps/configuration) 문서를 참조하여 아래와 같이 작성하세요: ```py >>> def wandb_hp_space(trial): ... return { ... "method": "random", ... "metric": {"name": "objective", "goal": "minimize"}, ... "parameters": { ... "learning_rate": {"distribution": "uniform", "min": 1e-6, "max": 1e-4}, ... "per_device_train_batch_size": {"values": [16, 32, 64, 128]}, ... }, ... } ``` `model_init` 함수를 정의하고 이를 [`Trainer`]에 전달하세요. 아래는 그 예시입니다. ```py >>> def model_init(trial): ... return AutoModelForSequenceClassification.from_pretrained( ... model_args.model_name_or_path, ... from_tf=bool(".ckpt" in model_args.model_name_or_path), ... config=config, ... cache_dir=model_args.cache_dir, ... revision=model_args.model_revision, ... token=True if model_args.use_auth_token else None, ... ) ``` 아래와 같이 `model_init` 함수, 훈련 인수, 훈련 및 테스트 데이터셋, 그리고 평가 함수를 사용하여 [`Trainer`]를 생성하세요: ```py >>> trainer = Trainer( ... model=None, ... args=training_args, ... train_dataset=small_train_dataset, ... eval_dataset=small_eval_dataset, ... compute_metrics=compute_metrics, ... tokenizer=tokenizer, ... model_init=model_init, ... data_collator=data_collator, ... ) ``` 하이퍼파라미터 탐색을 호출하고, 최적의 시험 매개변수를 가져오세요. 백엔드는 `"optuna"`/`"sigopt"`/`"wandb"`/`"ray"` 중에서 선택할 수 있습니다. 방향은 `"minimize"` 또는 `"maximize"` 중 선택하며, 목표를 최소화할 것인지 최대화할 것인지를 결정합니다. 자신만의 compute_objective 함수를 정의할 수 있습니다. 만약 이 함수를 정의하지 않으면, 기본 compute_objective가 호출되고, f1과 같은 평가 지표의 합이 목푯값으로 반환됩니다. ```py >>> best_trial = trainer.hyperparameter_search( ... direction="maximize", ... backend="optuna", ... hp_space=optuna_hp_space, ... n_trials=20, ... compute_objective=compute_objective, ... ) ``` ## DDP 미세 조정을 위한 하이퍼파라미터 탐색 [[hyperparameter-search-for-ddp-finetune]] 현재, DDP(Distributed Data Parallelism; 분산 데이터 병렬처리)를 위한 하이퍼파라미터 탐색은 optuna와 sigopt에서 가능합니다. 최상위 프로세스가 하이퍼파라미터 탐색 과정을 시작하고 그 결과를 다른 프로세스에 전달합니다.
transformers/docs/source/ko/hpo_train.md/0
{ "file_path": "transformers/docs/source/ko/hpo_train.md", "repo_id": "transformers", "token_count": 3520 }
337
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # 🤗 PEFT로 어댑터 가져오기 [[load-adapters-with-peft]] [[open-in-colab]] [Parameter-Efficient Fine Tuning (PEFT)](https://huggingface.co/blog/peft) 방법은 사전훈련된 모델의 매개변수를 미세 조정 중 고정시키고, 그 위에 훈련할 수 있는 매우 적은 수의 매개변수(어댑터)를 추가합니다. 어댑터는 작업별 정보를 학습하도록 훈련됩니다. 이 접근 방식은 완전히 미세 조정된 모델에 필적하는 결과를 생성하면서, 메모리 효율적이고 비교적 적은 컴퓨팅 리소스를 사용합니다. 또한 PEFT로 훈련된 어댑터는 일반적으로 전체 모델보다 훨씬 작기 때문에 공유, 저장 및 가져오기가 편리합니다. <div class="flex flex-col justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/peft/PEFT-hub-screenshot.png"/> <figcaption class="text-center">Hub에 저장된 OPTForCausalLM 모델의 어댑터 가중치는 최대 700MB에 달하는 모델 가중치의 전체 크기에 비해 약 6MB에 불과합니다.</figcaption> </div> 🤗 PEFT 라이브러리에 대해 자세히 알아보려면 [문서](https://huggingface.co/docs/peft/index)를 확인하세요. ## 설정 [[setup]] 🤗 PEFT를 설치하여 시작하세요: ```bash pip install peft ``` 새로운 기능을 사용해보고 싶다면, 다음 소스에서 라이브러리를 설치하는 것이 좋습니다: ```bash pip install git+https://github.com/huggingface/peft.git ``` ## 지원되는 PEFT 모델 [[supported-peft-models]] 🤗 Transformers는 기본적으로 일부 PEFT 방법을 지원하며, 로컬이나 Hub에 저장된 어댑터 가중치를 가져오고 몇 줄의 코드만으로 쉽게 실행하거나 훈련할 수 있습니다. 다음 방법을 지원합니다: - [Low Rank Adapters](https://huggingface.co/docs/peft/conceptual_guides/lora) - [IA3](https://huggingface.co/docs/peft/conceptual_guides/ia3) - [AdaLoRA](https://arxiv.org/abs/2303.10512) 🤗 PEFT와 관련된 다른 방법(예: 프롬프트 훈련 또는 프롬프트 튜닝) 또는 일반적인 🤗 PEFT 라이브러리에 대해 자세히 알아보려면 [문서](https://huggingface.co/docs/peft/index)를 참조하세요. ## PEFT 어댑터 가져오기 [[load-a-peft-adapter]] 🤗 Transformers에서 PEFT 어댑터 모델을 가져오고 사용하려면 Hub 저장소나 로컬 디렉터리에 `adapter_config.json` 파일과 어댑터 가중치가 포함되어 있는지 확인하십시오. 그런 다음 `AutoModelFor` 클래스를 사용하여 PEFT 어댑터 모델을 가져올 수 있습니다. 예를 들어 인과 관계 언어 모델용 PEFT 어댑터 모델을 가져오려면 다음 단계를 따르십시오: 1. PEFT 모델 ID를 지정하십시오. 2. [`AutoModelForCausalLM`] 클래스에 전달하십시오. ```py from transformers import AutoModelForCausalLM, AutoTokenizer peft_model_id = "ybelkada/opt-350m-lora" model = AutoModelForCausalLM.from_pretrained(peft_model_id) ``` <Tip> `AutoModelFor` 클래스나 기본 모델 클래스(예: `OPTForCausalLM` 또는 `LlamaForCausalLM`) 중 하나를 사용하여 PEFT 어댑터를 가져올 수 있습니다. </Tip> `load_adapter` 메소드를 호출하여 PEFT 어댑터를 가져올 수도 있습니다. ```py from transformers import AutoModelForCausalLM, AutoTokenizer model_id = "facebook/opt-350m" peft_model_id = "ybelkada/opt-350m-lora" model = AutoModelForCausalLM.from_pretrained(model_id) model.load_adapter(peft_model_id) ``` ## 8비트 또는 4비트로 가져오기 [[load-in-8bit-or-4bit]] `bitsandbytes` 통합은 8비트와 4비트 정밀도 데이터 유형을 지원하므로 큰 모델을 가져올 때 유용하면서 메모리도 절약합니다. 모델을 하드웨어에 효과적으로 분배하려면 [`~PreTrainedModel.from_pretrained`]에 `load_in_8bit` 또는 `load_in_4bit` 매개변수를 추가하고 `device_map="auto"`를 설정하세요: ```py from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig peft_model_id = "ybelkada/opt-350m-lora" model = AutoModelForCausalLM.from_pretrained(peft_model_id, quantization_config=BitsAndBytesConfig(load_in_8bit=True)) ``` ## 새 어댑터 추가 [[add-a-new-adapter]] 새 어댑터가 현재 어댑터와 동일한 유형인 경우에 한해 기존 어댑터가 있는 모델에 새 어댑터를 추가하려면 [`~peft.PeftModel.add_adapter`]를 사용할 수 있습니다. 예를 들어 모델에 기존 LoRA 어댑터가 연결되어 있는 경우: ```py from transformers import AutoModelForCausalLM, OPTForCausalLM, AutoTokenizer from peft import PeftConfig model_id = "facebook/opt-350m" model = AutoModelForCausalLM.from_pretrained(model_id) lora_config = LoraConfig( target_modules=["q_proj", "k_proj"], init_lora_weights=False ) model.add_adapter(lora_config, adapter_name="adapter_1") ``` 새 어댑터를 추가하려면: ```py # attach new adapter with same config model.add_adapter(lora_config, adapter_name="adapter_2") ``` 이제 [`~peft.PeftModel.set_adapter`]를 사용하여 어댑터를 사용할 어댑터로 설정할 수 있습니다: ```py # use adapter_1 model.set_adapter("adapter_1") output = model.generate(**inputs) print(tokenizer.decode(output_disabled[0], skip_special_tokens=True)) # use adapter_2 model.set_adapter("adapter_2") output_enabled = model.generate(**inputs) print(tokenizer.decode(output_enabled[0], skip_special_tokens=True)) ``` ## 어댑터 활성화 및 비활성화 [[enable-and-disable-adapters]] 모델에 어댑터를 추가한 후 어댑터 모듈을 활성화 또는 비활성화할 수 있습니다. 어댑터 모듈을 활성화하려면: ```py from transformers import AutoModelForCausalLM, OPTForCausalLM, AutoTokenizer from peft import PeftConfig model_id = "facebook/opt-350m" adapter_model_id = "ybelkada/opt-350m-lora" tokenizer = AutoTokenizer.from_pretrained(model_id) text = "Hello" inputs = tokenizer(text, return_tensors="pt") model = AutoModelForCausalLM.from_pretrained(model_id) peft_config = PeftConfig.from_pretrained(adapter_model_id) # to initiate with random weights peft_config.init_lora_weights = False model.add_adapter(peft_config) model.enable_adapters() output = model.generate(**inputs) ``` 어댑터 모듈을 비활성화하려면: ```py model.disable_adapters() output = model.generate(**inputs) ``` ## PEFT 어댑터 훈련 [[train-a-peft-adapter]] PEFT 어댑터는 [`Trainer`] 클래스에서 지원되므로 특정 사용 사례에 맞게 어댑터를 훈련할 수 있습니다. 몇 줄의 코드를 추가하기만 하면 됩니다. 예를 들어 LoRA 어댑터를 훈련하려면: <Tip> [`Trainer`]를 사용하여 모델을 미세 조정하는 것이 익숙하지 않다면 [사전훈련된 모델을 미세 조정하기](training) 튜토리얼을 확인하세요. </Tip> 1. 작업 유형 및 하이퍼파라미터를 지정하여 어댑터 구성을 정의합니다. 하이퍼파라미터에 대한 자세한 내용은 [`~peft.LoraConfig`]를 참조하세요. ```py from peft import LoraConfig peft_config = LoraConfig( lora_alpha=16, lora_dropout=0.1, r=64, bias="none", task_type="CAUSAL_LM", ) ``` 2. 모델에 어댑터를 추가합니다. ```py model.add_adapter(peft_config) ``` 3. 이제 모델을 [`Trainer`]에 전달할 수 있습니다! ```py trainer = Trainer(model=model, ...) trainer.train() ``` 훈련한 어댑터를 저장하고 다시 가져오려면: ```py model.save_pretrained(save_dir) model = AutoModelForCausalLM.from_pretrained(save_dir) ```
transformers/docs/source/ko/peft.md/0
{ "file_path": "transformers/docs/source/ko/peft.md", "repo_id": "transformers", "token_count": 5059 }
338
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # bitsandbytes [[bitsandbytes]] [bitsandbytes](https://github.com/TimDettmers/bitsandbytes)는 모델을 8비트 및 4비트로 양자화하는 가장 쉬운 방법입니다. 8비트 양자화는 fp16의 이상치와 int8의 비이상치를 곱한 후, 비이상치 값을 fp16으로 다시 변환하고, 이들을 합산하여 fp16으로 가중치를 반환합니다. 이렇게 하면 이상치 값이 모델 성능에 미치는 저하 효과를 줄일 수 있습니다. 4비트 양자화는 모델을 더욱 압축하며, [QLoRA](https://hf.co/papers/2305.14314)와 함께 사용하여 양자화된 대규모 언어 모델을 미세 조정하는 데 흔히 사용됩니다. bitsandbytes를 사용하려면 다음 라이브러리가 설치되어 있어야 합니다: <hfoptions id="bnb"> <hfoption id="8-bit"> ```bash pip install transformers accelerate bitsandbytes>0.37.0 ``` </hfoption> <hfoption id="4-bit"> ```bash pip install bitsandbytes>=0.39.0 pip install --upgrade accelerate transformers ``` </hfoption> </hfoptions> 이제 `BitsAndBytesConfig`를 [`~PreTrainedModel.from_pretrained`] 메소드에 전달하여 모델을 양자화할 수 있습니다. 이는 Accelerate 가져오기를 지원하고 `torch.nn.Linear` 레이어가 포함된 모든 모델에서 작동합니다. <hfoptions id="bnb"> <hfoption id="8-bit"> 모델을 8비트로 양자화하면 메모리 사용량이 절반으로 줄어들며, 대규모 모델의 경우 사용 가능한 GPU를 효율적으로 활용하려면 `device_map="auto"`를 설정하세요. ```py from transformers import AutoModelForCausalLM, BitsAndBytesConfig quantization_config = BitsAndBytesConfig(load_in_8bit=True) model_8bit = AutoModelForCausalLM.from_pretrained( "bigscience/bloom-1b7", quantization_config=quantization_config ) ``` 기본적으로 `torch.nn.LayerNorm`과 같은 다른 모듈은 `torch.float16`으로 변환됩니다. 원한다면 `torch_dtype` 매개변수로 이들 모듈의 데이터 유형을 변경할 수 있습니다: ```py import torch from transformers import AutoModelForCausalLM, BitsAndBytesConfig quantization_config = BitsAndBytesConfig(load_in_8bit=True) model_8bit = AutoModelForCausalLM.from_pretrained( "facebook/opt-350m", quantization_config=quantization_config, torch_dtype=torch.float32 ) model_8bit.model.decoder.layers[-1].final_layer_norm.weight.dtype ``` 모델이 8비트로 양자화되면 최신 버전의 Transformers와 bitsandbytes를 사용하지 않는 한 양자화된 가중치를 Hub에 푸시할 수 없습니다. 최신 버전을 사용하는 경우, [`~PreTrainedModel.push_to_hub`] 메소드를 사용하여 8비트 모델을 Hub에 푸시할 수 있습니다. 양자화 config.json 파일이 먼저 푸시되고, 그 다음 양자화된 모델 가중치가 푸시됩니다. ```py from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig quantization_config = BitsAndBytesConfig(load_in_8bit=True) model = AutoModelForCausalLM.from_pretrained( "bigscience/bloom-560m", quantization_config=quantization_config ) tokenizer = AutoTokenizer.from_pretrained("bigscience/bloom-560m") model.push_to_hub("bloom-560m-8bit") ``` </hfoption> <hfoption id="4-bit"> 모델을 4비트로 양자화하면 메모리 사용량이 4배 줄어들며, 대규모 모델의 경우 사용 가능한 GPU를 효율적으로 활용하려면 `device_map="auto"`를 설정하세요: ```py from transformers import AutoModelForCausalLM, BitsAndBytesConfig quantization_config = BitsAndBytesConfig(load_in_4bit=True) model_4bit = AutoModelForCausalLM.from_pretrained( "bigscience/bloom-1b7", quantization_config=quantization_config ) ``` 기본적으로 `torch.nn.LayerNorm`과 같은 다른 모듈은 `torch.float16`으로 변환됩니다. 원한다면 `torch_dtype` 매개변수로 이들 모듈의 데이터 유형을 변경할 수 있습니다: ```py import torch from transformers import AutoModelForCausalLM, BitsAndBytesConfig quantization_config = BitsAndBytesConfig(load_in_4bit=True) model_4bit = AutoModelForCausalLM.from_pretrained( "facebook/opt-350m", quantization_config=quantization_config, torch_dtype=torch.float32 ) model_4bit.model.decoder.layers[-1].final_layer_norm.weight.dtype ``` `bitsandbytes>=0.41.3`을 사용하는 경우 4비트 모델을 직렬화하고 Hugging Face Hub에 푸시할 수 있습니다. 모델을 4비트 정밀도로 가져온 후 `model.push_to_hub()`를 호출하면 됩니다. 또한 `model.save_pretrained()` 명령어로 로컬에 직렬화된 4비트 모델을 저장할 수도 있습니다. </hfoption> </hfoptions> <Tip warning={true}> 8비트 및 4비트 가중치로 훈련하는 것은 *추가* 매개변수에 대해서만 지원됩니다. </Tip> 메모리 사용량을 확인하려면 `get_memory_footprint`를 사용하세요: ```py print(model.get_memory_footprint()) ``` 양자화된 모델은 [`~PreTrainedModel.from_pretrained`] 메소드를 사용하여 `load_in_8bit` 또는 `load_in_4bit` 매개변수를 지정하지 않고도 가져올 수 있습니다: ```py from transformers import AutoModelForCausalLM, AutoTokenizer model = AutoModelForCausalLM.from_pretrained("{your_username}/bloom-560m-8bit", device_map="auto") ``` ## 8비트 (LLM.int8() 알고리즘)[[8-bit-(llm.int8()-algorithm)]] <Tip> 8비트 양자화에 대한 자세한 내용을 알고 싶다면 이 [블로그 포스트](https://huggingface.co/blog/hf-bitsandbytes-integration)를 참조하세요! </Tip> 이 섹션에서는 오프로딩, 이상치 임곗값, 모듈 변환 건너뛰기 및 미세 조정과 같은 8비트 모델의 특정 기능을 살펴봅니다. ### 오프로딩 [[offloading]] 8비트 모델은 CPU와 GPU 간에 가중치를 오프로드하여 매우 큰 모델을 메모리에 장착할 수 있습니다. CPU로 전송된 가중치는 실제로 **float32**로 저장되며 8비트로 변환되지 않습니다. 예를 들어, [bigscience/bloom-1b7](https://huggingface.co/bigscience/bloom-1b7) 모델의 오프로드를 활성화하려면 [`BitsAndBytesConfig`]를 생성하는 것부터 시작하세요: ```py from transformers import AutoModelForCausalLM, BitsAndBytesConfig quantization_config = BitsAndBytesConfig(llm_int8_enable_fp32_cpu_offload=True) ``` CPU에 전달할 `lm_head`를 제외한 모든 것을 GPU에 적재할 수 있도록 사용자 정의 디바이스 맵을 설계합니다: ```py device_map = { "transformer.word_embeddings": 0, "transformer.word_embeddings_layernorm": 0, "lm_head": "cpu", "transformer.h": 0, "transformer.ln_f": 0, } ``` 이제 사용자 정의 `device_map`과 `quantization_config`을 사용하여 모델을 가져옵니다: ```py model_8bit = AutoModelForCausalLM.from_pretrained( "bigscience/bloom-1b7", device_map=device_map, quantization_config=quantization_config, ) ``` ### 이상치 임곗값[[outlier-threshold]] "이상치"는 특정 임곗값을 초과하는 은닉 상태 값을 의미하며, 이러한 값은 fp16으로 계산됩니다. 값은 일반적으로 정규 분포 ([-3.5, 3.5])를 따르지만, 대규모 모델의 경우 이 분포는 매우 다를 수 있습니다 ([-60, 6] 또는 [6, 60]). 8비트 양자화는 ~5 정도의 값에서 잘 작동하지만, 그 이상에서는 상당한 성능 저하가 발생합니다. 좋은 기본 임곗값 값은 6이지만, 더 불안정한 모델 (소형 모델 또는 미세 조정)에는 더 낮은 임곗값이 필요할 수 있습니다. 모델에 가장 적합한 임곗값을 찾으려면 [`BitsAndBytesConfig`]에서 `llm_int8_threshold` 매개변수를 실험해보는 것이 좋습니다: ```py from transformers import AutoModelForCausalLM, BitsAndBytesConfig model_id = "bigscience/bloom-1b7" quantization_config = BitsAndBytesConfig( llm_int8_threshold=10, ) model_8bit = AutoModelForCausalLM.from_pretrained( model_id, device_map=device_map, quantization_config=quantization_config, ) ``` ### 모듈 변환 건너뛰기[[skip-module-conversion]] [Jukebox](model_doc/jukebox)와 같은 일부 모델은 모든 모듈을 8비트로 양자화할 필요가 없으며, 이는 실제로 불안정성을 유발할 수 있습니다. Jukebox의 경우, [`BitsAndBytesConfig`]의 `llm_int8_skip_modules` 매개변수를 사용하여 여러 `lm_head` 모듈을 건너뛰어야 합니다: ```py from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig model_id = "bigscience/bloom-1b7" quantization_config = BitsAndBytesConfig( llm_int8_skip_modules=["lm_head"], ) model_8bit = AutoModelForCausalLM.from_pretrained( model_id, device_map="auto", quantization_config=quantization_config, ) ``` ### 미세 조정[[finetuning]] [PEFT](https://github.com/huggingface/peft) 라이브러리를 사용하면 [flan-t5-large](https://huggingface.co/google/flan-t5-large) 및 [facebook/opt-6.7b](https://huggingface.co/facebook/opt-6.7b)와 같은 대규모 모델을 8비트 양자화로 미세 조정할 수 있습니다. 훈련 시 `device_map` 매개변수를 전달할 필요가 없으며, 모델을 자동으로 GPU에 가져옵니다. 그러나 원하는 경우 `device_map` 매개변수로 장치 맵을 사용자 정의할 수 있습니다 (`device_map="auto"`는 추론에만 사용해야 합니다). ## 4비트 (QLoRA 알고리즘)[[4-bit-(qlora-algorithm)]] <Tip> 이 [노트북](https://colab.research.google.com/drive/1ge2F1QSK8Q7h0hn3YKuBCOAS0bK8E0wf)에서 4비트 양자화를 시도해보고 자세한 내용은 이 [블로그 게시물](https://huggingface.co/blog/4bit-transformers-bitsandbytes)에서 확인하세요. </Tip> 이 섹션에서는 계산 데이터 유형 변경, Normal Float 4 (NF4) 데이터 유형 사용, 중첩 양자화 사용과 같은 4비트 모델의 특정 기능 일부를 탐구합니다. ### 데이터 유형 계산[[compute-data-type]] 계산 속도를 높이기 위해 [`BitsAndBytesConfig`]에서 `bnb_4bit_compute_dtype` 매개변수를 사용하여 데이터 유형을 float32(기본값)에서 bf16으로 변경할 수 있습니다: ```py import torch from transformers import BitsAndBytesConfig quantization_config = BitsAndBytesConfig(load_in_4bit=True, bnb_4bit_compute_dtype=torch.bfloat16) ``` ### Normal Float 4 (NF4)[[normal-float-4-(nf4)]] NF4는 [QLoRA](https://hf.co/papers/2305.14314) 논문에서 소개된 4비트 데이터 유형으로, 정규 분포에서 초기화된 가중치에 적합합니다. 4비트 기반 모델을 훈련할 때 NF4를 사용해야 합니다. 이는 [`BitsAndBytesConfig`]에서 `bnb_4bit_quant_type` 매개변수로 설정할 수 있습니다: ```py from transformers import BitsAndBytesConfig nf4_config = BitsAndBytesConfig( load_in_4bit=True, bnb_4bit_quant_type="nf4", ) model_nf4 = AutoModelForCausalLM.from_pretrained(model_id, quantization_config=nf4_config) ``` 추론의 경우, `bnb_4bit_quant_type`은 성능에 큰 영향을 미치지 않습니다. 그러나 모델 가중치와 일관성을 유지하기 위해 `bnb_4bit_compute_dtype` 및 `torch_dtype` 값을 사용해야 합니다. ### 중첩 양자화[[nested-quantization]] 중첩 양자화는 추가적인 성능 손실 없이 추가적인 메모리를 절약할 수 있는 기술입니다. 이 기능은 이미 양자화된 가중치의 2차 양자화를 수행하여 매개변수당 추가로 0.4비트를 절약합니다. 예를 들어, 중첩 양자화를 통해 16GB NVIDIA T4 GPU에서 시퀀스 길이 1024, 배치 크기 1, 그레이디언트 누적 4단계를 사용하여 [Llama-13b](https://huggingface.co/meta-llama/Llama-2-13b) 모델을 미세 조정할 수 있습니다. ```py from transformers import BitsAndBytesConfig double_quant_config = BitsAndBytesConfig( load_in_4bit=True, bnb_4bit_use_double_quant=True, ) model_double_quant = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-2-13b", quantization_config=double_quant_config) ``` ## `bitsandbytes` 모델의 비양자화[[dequantizing-`bitsandbytes`-models]] 양자화된 후에는 모델을 원래의 정밀도로 비양자화할 수 있지만, 이는 모델의 품질이 약간 저하될 수 있습니다. 비양자화된 모델에 맞출 수 있는 충분한 GPU RAM이 있는지 확인하세요. ```python from transformers import AutoModelForCausalLM, BitsAndBytesConfig, AutoTokenizer model_id = "facebook/opt-125m" model = AutoModelForCausalLM.from_pretrained(model_id, BitsAndBytesConfig(load_in_4bit=True)) tokenizer = AutoTokenizer.from_pretrained(model_id) model.dequantize() text = tokenizer("Hello my name is", return_tensors="pt").to(0) out = model.generate(**text) print(tokenizer.decode(out[0])) ```
transformers/docs/source/ko/quantization/bitsandbytes.md/0
{ "file_path": "transformers/docs/source/ko/quantization/bitsandbytes.md", "repo_id": "transformers", "token_count": 8138 }
339
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Image-to-Image 작업 가이드 [[image-to-image-task-guide]] [[open-in-colab]] Image-to-Image 작업은 애플리케이션이 이미지를 입력받아 또 다른 이미지를 출력하는 작업입니다. 여기에는 이미지 향상(초고해상도, 저조도 향상, 빗줄기 제거 등), 이미지 복원 등 다양한 하위 작업이 포함됩니다. 이 가이드에서는 다음을 수행하는 방법을 보여줍니다. - 초고해상도 작업을 위한 image-to-image 파이프라인 사용, - 파이프라인 없이 동일한 작업을 위한 image-to-image 모델 실행 이 가이드가 발표된 시점에서는, `image-to-image` 파이프라인은 초고해상도 작업만 지원한다는 점을 유의하세요. 필요한 라이브러리를 설치하는 것부터 시작하겠습니다. ```bash pip install transformers ``` 이제 [Swin2SR 모델](https://huggingface.co/caidas/swin2SR-lightweight-x2-64)을 사용하여 파이프라인을 초기화할 수 있습니다. 그런 다음 이미지와 함께 호출하여 파이프라인으로 추론할 수 있습니다. 현재 이 파이프라인에서는 [Swin2SR 모델](https://huggingface.co/caidas/swin2SR-lightweight-x2-64)만 지원됩니다. ```python from transformers import pipeline device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') pipe = pipeline(task="image-to-image", model="caidas/swin2SR-lightweight-x2-64", device=device) ``` 이제 이미지를 불러와 봅시다. ```python from PIL import Image import requests url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/cat.jpg" image = Image.open(requests.get(url, stream=True).raw) print(image.size) ``` ```bash # (532, 432) ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/cat.jpg" alt="Photo of a cat"/> </div> 이제 파이프라인으로 추론을 수행할 수 있습니다. 고양이 이미지의 업스케일된 버전을 얻을 수 있습니다. ```python upscaled = pipe(image) print(upscaled.size) ``` ```bash # (1072, 880) ``` 파이프라인 없이 직접 추론을 수행하려면 Transformers의 `Swin2SRForImageSuperResolution` 및 `Swin2SRImageProcessor` 클래스를 사용할 수 있습니다. 이를 위해 동일한 모델 체크포인트를 사용합니다. 모델과 프로세서를 초기화해 보겠습니다. ```python from transformers import Swin2SRForImageSuperResolution, Swin2SRImageProcessor model = Swin2SRForImageSuperResolution.from_pretrained("caidas/swin2SR-lightweight-x2-64").to(device) processor = Swin2SRImageProcessor("caidas/swin2SR-lightweight-x2-64") ``` `pipeline` 우리가 직접 수행해야 하는 전처리와 후처리 단계를 추상화하므로, 이미지를 전처리해 보겠습니다. 이미지를 프로세서에 전달한 다음 픽셀값을 GPU로 이동시키겠습니다. ```python pixel_values = processor(image, return_tensors="pt").pixel_values print(pixel_values.shape) pixel_values = pixel_values.to(device) ``` 이제 픽셀값을 모델에 전달하여 이미지를 추론할 수 있습니다. ```python import torch with torch.no_grad(): outputs = model(pixel_values) ``` 출력은 아래와 같은 `ImageSuperResolutionOutput` 유형의 객체입니다 👇 ``` (loss=None, reconstruction=tensor([[[[0.8270, 0.8269, 0.8275, ..., 0.7463, 0.7446, 0.7453], [0.8287, 0.8278, 0.8283, ..., 0.7451, 0.7448, 0.7457], [0.8280, 0.8273, 0.8269, ..., 0.7447, 0.7446, 0.7452], ..., [0.5923, 0.5933, 0.5924, ..., 0.0697, 0.0695, 0.0706], [0.5926, 0.5932, 0.5926, ..., 0.0673, 0.0687, 0.0705], [0.5927, 0.5914, 0.5922, ..., 0.0664, 0.0694, 0.0718]]]], device='cuda:0'), hidden_states=None, attentions=None) ``` `reconstruction`를 가져와 시각화를 위해 후처리해야 합니다. 어떻게 생겼는지 살펴봅시다. ```python outputs.reconstruction.data.shape # torch.Size([1, 3, 880, 1072]) ``` 출력 텐서의 차원을 축소하고 0번째 축을 제거한 다음, 값을 클리핑하고 NumPy 부동소수점 배열로 변환해야 합니다. 그런 다음 [1072, 880] 모양을 갖도록 축을 재정렬하고 마지막으로 출력을 0과 255 사이의 값을 갖도록 되돌립니다. ```python import numpy as np # 크기를 줄이고, CPU로 이동하고, 값을 클리핑 output = outputs.reconstruction.data.squeeze().cpu().clamp_(0, 1).numpy() # 축을 재정렬 output = np.moveaxis(output, source=0, destination=-1) # 값을 픽셀값 범위로 되돌리기 output = (output * 255.0).round().astype(np.uint8) Image.fromarray(output) ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/cat_upscaled.png" alt="Upscaled photo of a cat"/> </div>
transformers/docs/source/ko/tasks/image_to_image.md/0
{ "file_path": "transformers/docs/source/ko/tasks/image_to_image.md", "repo_id": "transformers", "token_count": 3237 }
340
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # 시각적 질의응답 (Visual Question Answering) [[open-in-colab]] 시각적 질의응답(VQA)은 이미지를 기반으로 개방형 질문에 대응하는 작업입니다. 이 작업을 지원하는 모델의 입력은 대부분 이미지와 질문의 조합이며, 출력은 자연어로 된 답변입니다. VQA의 주요 사용 사례는 다음과 같습니다: * 시각 장애인을 위한 접근성 애플리케이션을 구축할 수 있습니다. * 교육: 강의나 교과서에 나온 시각 자료에 대한 질문에 답할 수 있습니다. 또한 체험형 전시와 유적 등에서도 VQA를 활용할 수 있습니다. * 고객 서비스 및 전자상거래: VQA는 사용자가 제품에 대해 질문할 수 있게 함으로써 사용자 경험을 향상시킬 수 있습니다. * 이미지 검색: VQA 모델을 사용하여 원하는 특성을 가진 이미지를 검색할 수 있습니다. 예를 들어 사용자는 "강아지가 있어?"라고 물어봐서 주어진 이미지 묶음에서 강아지가 있는 모든 이미지를 받아볼 수 있습니다. 이 가이드에서 학습할 내용은 다음과 같습니다: - VQA 모델 중 하나인 [ViLT](../../en/model_doc/vilt)를 [`Graphcore/vqa` 데이터셋](https://huggingface.co/datasets/Graphcore/vqa) 에서 미세조정하는 방법 - 미세조정된 ViLT 모델로 추론하는 방법 - BLIP-2 같은 생성 모델로 제로샷 VQA 추론을 실행하는 방법 ## ViLT 미세 조정 [[finetuning-vilt]] ViLT는 Vision Transformer (ViT) 내에 텍스트 임베딩을 포함하여 비전/자연어 사전훈련(VLP; Vision-and-Language Pretraining)을 위한 기본 디자인을 제공합니다. ViLT 모델은 비전 트랜스포머(ViT)에 텍스트 임베딩을 넣어 비전/언어 사전훈련(VLP; Vision-and-Language Pre-training)을 위한 기본적인 디자인을 갖췄습니다. 이 모델은 여러 다운스트림 작업에 사용할 수 있습니다. VQA 태스크에서는 (`[CLS]` 토큰의 최종 은닉 상태 위에 선형 레이어인) 분류 헤더가 있으며 무작위로 초기화됩니다. 따라서 여기에서 시각적 질의응답은 **분류 문제**로 취급됩니다. 최근의 BLIP, BLIP-2, InstructBLIP와 같은 모델들은 VQA를 생성형 작업으로 간주합니다. 가이드의 후반부에서는 이런 모델들을 사용하여 제로샷 VQA 추론을 하는 방법에 대해 설명하겠습니다. 시작하기 전 필요한 모든 라이브러리를 설치했는지 확인하세요. ```bash pip install -q transformers datasets ``` 커뮤니티에 모델을 공유하는 것을 권장 드립니다. Hugging Face 계정에 로그인하여 🤗 Hub에 업로드할 수 있습니다. 메시지가 나타나면 로그인할 토큰을 입력하세요: ```py >>> from huggingface_hub import notebook_login >>> notebook_login() ``` 모델 체크포인트를 전역 변수로 선언하세요. ```py >>> model_checkpoint = "dandelin/vilt-b32-mlm" ``` ## 데이터 가져오기 [[load-the-data]] 이 가이드에서는 `Graphcore/vqa` 데이터세트의 작은 샘플을 사용합니다. 전체 데이터세트는 [🤗 Hub](https://huggingface.co/datasets/Graphcore/vqa) 에서 확인할 수 있습니다. [`Graphcore/vqa` 데이터세트](https://huggingface.co/datasets/Graphcore/vqa) 의 대안으로 공식 [VQA 데이터세트 페이지](https://visualqa.org/download.html) 에서 동일한 데이터를 수동으로 다운로드할 수 있습니다. 직접 공수한 데이터로 튜토리얼을 따르고 싶다면 [이미지 데이터세트 만들기](https://huggingface.co/docs/datasets/image_dataset#loading-script) 라는 🤗 Datasets 문서를 참조하세요. 검증 데이터의 첫 200개 항목을 불러와 데이터세트의 특성을 확인해 보겠습니다: ```python >>> from datasets import load_dataset >>> dataset = load_dataset("Graphcore/vqa", split="validation[:200]") >>> dataset Dataset({ features: ['question', 'question_type', 'question_id', 'image_id', 'answer_type', 'label'], num_rows: 200 }) ``` 예제를 하나 뽑아 데이터세트의 특성을 이해해 보겠습니다. ```py >>> dataset[0] {'question': 'Where is he looking?', 'question_type': 'none of the above', 'question_id': 262148000, 'image_id': '/root/.cache/huggingface/datasets/downloads/extracted/ca733e0e000fb2d7a09fbcc94dbfe7b5a30750681d0e965f8e0a23b1c2f98c75/val2014/COCO_val2014_000000262148.jpg', 'answer_type': 'other', 'label': {'ids': ['at table', 'down', 'skateboard', 'table'], 'weights': [0.30000001192092896, 1.0, 0.30000001192092896, 0.30000001192092896]}} ``` 데이터세트에는 다음과 같은 특성이 포함되어 있습니다: * `question`: 이미지에 대한 질문 * `image_id`: 질문과 관련된 이미지의 경로 * `label`: 데이터의 레이블 (annotations) 나머지 특성들은 필요하지 않기 때문에 삭제해도 됩니다: ```py >>> dataset = dataset.remove_columns(['question_type', 'question_id', 'answer_type']) ``` 보시다시피 `label` 특성은 같은 질문마다 답변이 여러 개 있을 수 있습니다. 모두 다른 데이터 라벨러들로부터 수집되었기 때문인데요. 질문의 답변은 주관적일 수 있습니다. 이 경우 질문은 "그는 어디를 보고 있나요?" 였지만, 어떤 사람들은 "아래"로 레이블을 달았고, 다른 사람들은 "테이블" 또는 "스케이트보드" 등으로 주석을 달았습니다. 아래의 이미지를 보고 어떤 답변을 선택할 것인지 생각해 보세요: ```python >>> from PIL import Image >>> image = Image.open(dataset[0]['image_id']) >>> image ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/vqa-example.png" alt="VQA Image Example"/> </div> 질문과 답변의 모호성으로 인해 이러한 데이터세트는 여러 개의 답변이 가능하므로 다중 레이블 분류 문제로 처리됩니다. 게다가, 원핫(one-hot) 인코딩 벡터를 생성하기보다는 레이블에서 특정 답변이 나타나는 횟수를 기반으로 소프트 인코딩을 생성합니다. 위의 예시에서 "아래"라는 답변이 다른 답변보다 훨씬 더 자주 선택되었기 때문에 데이터세트에서 `weight`라고 불리는 점수로 1.0을 가지며, 나머지 답변들은 1.0 미만의 점수를 가집니다. 적절한 분류 헤더로 모델을 나중에 인스턴스화하기 위해 레이블을 정수로 매핑한 딕셔너리 하나, 반대로 정수를 레이블로 매핑한 딕셔너리 하나 총 2개의 딕셔너리를 생성하세요: ```py >>> import itertools >>> labels = [item['ids'] for item in dataset['label']] >>> flattened_labels = list(itertools.chain(*labels)) >>> unique_labels = list(set(flattened_labels)) >>> label2id = {label: idx for idx, label in enumerate(unique_labels)} >>> id2label = {idx: label for label, idx in label2id.items()} ``` 이제 매핑이 완료되었으므로 문자열 답변을 해당 id로 교체하고, 데이터세트의 더 편리한 후처리를 위해 편평화 할 수 있습니다. ```python >>> def replace_ids(inputs): ... inputs["label"]["ids"] = [label2id[x] for x in inputs["label"]["ids"]] ... return inputs >>> dataset = dataset.map(replace_ids) >>> flat_dataset = dataset.flatten() >>> flat_dataset.features {'question': Value(dtype='string', id=None), 'image_id': Value(dtype='string', id=None), 'label.ids': Sequence(feature=Value(dtype='int64', id=None), length=-1, id=None), 'label.weights': Sequence(feature=Value(dtype='float64', id=None), length=-1, id=None)} ``` ## 데이터 전처리 [[preprocessing-data]] 다음 단계는 모델을 위해 이미지와 텍스트 데이터를 준비하기 위해 ViLT 프로세서를 가져오는 것입니다. [`ViltProcessor`]는 BERT 토크나이저와 ViLT 이미지 프로세서를 편리하게 하나의 프로세서로 묶습니다: ```py >>> from transformers import ViltProcessor >>> processor = ViltProcessor.from_pretrained(model_checkpoint) ``` 데이터를 전처리하려면 이미지와 질문을 [`ViltProcessor`]로 인코딩해야 합니다. 프로세서는 [`BertTokenizerFast`]로 텍스트를 토크나이즈하고 텍스트 데이터를 위해 `input_ids`, `attention_mask` 및 `token_type_ids`를 생성합니다. 이미지는 [`ViltImageProcessor`]로 이미지를 크기 조정하고 정규화하며, `pixel_values`와 `pixel_mask`를 생성합니다. 이런 전처리 단계는 모두 내부에서 이루어지므로, `processor`를 호출하기만 하면 됩니다. 하지만 아직 타겟 레이블이 완성되지 않았습니다. 타겟의 표현에서 각 요소는 가능한 답변(레이블)에 해당합니다. 정확한 답변의 요소는 해당 점수(weight)를 유지시키고 나머지 요소는 0으로 설정해야 합니다. 아래 함수가 위에서 설명한대로 이미지와 질문에 `processor`를 적용하고 레이블을 형식에 맞춥니다: ```py >>> import torch >>> def preprocess_data(examples): ... image_paths = examples['image_id'] ... images = [Image.open(image_path) for image_path in image_paths] ... texts = examples['question'] ... encoding = processor(images, texts, padding="max_length", truncation=True, return_tensors="pt") ... for k, v in encoding.items(): ... encoding[k] = v.squeeze() ... targets = [] ... for labels, scores in zip(examples['label.ids'], examples['label.weights']): ... target = torch.zeros(len(id2label)) ... for label, score in zip(labels, scores): ... target[label] = score ... targets.append(target) ... encoding["labels"] = targets ... return encoding ``` 전체 데이터세트에 전처리 함수를 적용하려면 🤗 Datasets의 [`~datasets.map`] 함수를 사용하십시오. `batched=True`를 설정하여 데이터세트의 여러 요소를 한 번에 처리함으로써 `map`을 더 빠르게 할 수 있습니다. 이 시점에서 필요하지 않은 열은 제거하세요. ```py >>> processed_dataset = flat_dataset.map(preprocess_data, batched=True, remove_columns=['question','question_type', 'question_id', 'image_id', 'answer_type', 'label.ids', 'label.weights']) >>> processed_dataset Dataset({ features: ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values', 'pixel_mask', 'labels'], num_rows: 200 }) ``` 마지막 단계로, [`DefaultDataCollator`]를 사용하여 예제로 쓸 배치를 생성하세요: ```py >>> from transformers import DefaultDataCollator >>> data_collator = DefaultDataCollator() ``` ## 모델 훈련 [[train-the-model]] 이제 모델을 훈련하기 위해 준비되었습니다! [`ViltForQuestionAnswering`]으로 ViLT를 가져올 차례입니다. 레이블의 수와 레이블 매핑을 지정하세요: ```py >>> from transformers import ViltForQuestionAnswering >>> model = ViltForQuestionAnswering.from_pretrained(model_checkpoint, num_labels=len(id2label), id2label=id2label, label2id=label2id) ``` 이 시점에서는 다음 세 단계만 남았습니다: 1. [`TrainingArguments`]에서 훈련 하이퍼파라미터를 정의하세요: ```py >>> from transformers import TrainingArguments >>> repo_id = "MariaK/vilt_finetuned_200" >>> training_args = TrainingArguments( ... output_dir=repo_id, ... per_device_train_batch_size=4, ... num_train_epochs=20, ... save_steps=200, ... logging_steps=50, ... learning_rate=5e-5, ... save_total_limit=2, ... remove_unused_columns=False, ... push_to_hub=True, ... ) ``` 2. 모델, 데이터세트, 프로세서, 데이터 콜레이터와 함께 훈련 인수를 [`Trainer`]에 전달하세요: ```py >>> from transformers import Trainer >>> trainer = Trainer( ... model=model, ... args=training_args, ... data_collator=data_collator, ... train_dataset=processed_dataset, ... tokenizer=processor, ... ) ``` 3. [`~Trainer.train`]을 호출하여 모델을 미세 조정하세요: ```py >>> trainer.train() ``` 훈련이 완료되면, [`~Trainer.push_to_hub`] 메소드를 사용하여 🤗 Hub에 모델을 공유하세요: ```py >>> trainer.push_to_hub() ``` ## 추론 [[inference]] ViLT 모델을 미세 조정하고 🤗 Hub에 업로드했다면 추론에 사용할 수 있습니다. 미세 조정된 모델을 추론에 사용해보는 가장 간단한 방법은 [`Pipeline`]에서 사용하는 것입니다. ```py >>> from transformers import pipeline >>> pipe = pipeline("visual-question-answering", model="MariaK/vilt_finetuned_200") ``` 이 가이드의 모델은 200개의 예제에서만 훈련되었으므로 그다지 많은 것을 기대할 수는 없습니다. 데이터세트의 첫 번째 예제를 사용하여 추론 결과를 설명해보겠습니다: ```py >>> example = dataset[0] >>> image = Image.open(example['image_id']) >>> question = example['question'] >>> print(question) >>> pipe(image, question, top_k=1) "Where is he looking?" [{'score': 0.5498199462890625, 'answer': 'down'}] ``` 비록 확신은 별로 없지만, 모델은 실제로 무언가를 배웠습니다. 더 많은 예제와 더 긴 훈련 기간이 주어진다면 분명 더 나은 결과를 얻을 수 있을 것입니다! 원한다면 파이프라인의 결과를 수동으로 복제할 수도 있습니다: 1. 이미지와 질문을 가져와서 프로세서를 사용하여 모델에 준비합니다. 2. 전처리된 결과를 모델에 전달합니다. 3. 로짓에서 가장 가능성 있는 답변의 id를 가져와서 `id2label`에서 실제 답변을 찾습니다. ```py >>> processor = ViltProcessor.from_pretrained("MariaK/vilt_finetuned_200") >>> image = Image.open(example['image_id']) >>> question = example['question'] >>> # prepare inputs >>> inputs = processor(image, question, return_tensors="pt") >>> model = ViltForQuestionAnswering.from_pretrained("MariaK/vilt_finetuned_200") >>> # forward pass >>> with torch.no_grad(): ... outputs = model(**inputs) >>> logits = outputs.logits >>> idx = logits.argmax(-1).item() >>> print("Predicted answer:", model.config.id2label[idx]) Predicted answer: down ``` ## 제로샷 VQA [[zeroshot-vqa]] 이전 모델은 VQA를 분류 문제로 처리했습니다. BLIP, BLIP-2 및 InstructBLIP와 같은 최근의 모델은 VQA를 생성 작업으로 접근합니다. [BLIP-2](../../en/model_doc/blip-2)를 예로 들어 보겠습니다. 이 모델은 사전훈련된 비전 인코더와 LLM의 모든 조합을 사용할 수 있는 새로운 비전-자연어 사전 학습 패러다임을 도입했습니다. ([BLIP-2 블로그 포스트](https://huggingface.co/blog/blip-2)를 통해 더 자세히 알아볼 수 있어요) 이를 통해 시각적 질의응답을 포함한 여러 비전-자연어 작업에서 SOTA를 달성할 수 있었습니다. 이 모델을 어떻게 VQA에 사용할 수 있는지 설명해 보겠습니다. 먼저 모델을 가져와 보겠습니다. 여기서 GPU가 사용 가능한 경우 모델을 명시적으로 GPU로 전송할 것입니다. 이전에는 훈련할 때 쓰지 않은 이유는 [`Trainer`]가 이 부분을 자동으로 처리하기 때문입니다: ```py >>> from transformers import AutoProcessor, Blip2ForConditionalGeneration >>> import torch >>> processor = AutoProcessor.from_pretrained("Salesforce/blip2-opt-2.7b") >>> model = Blip2ForConditionalGeneration.from_pretrained("Salesforce/blip2-opt-2.7b", torch_dtype=torch.float16) >>> device = "cuda" if torch.cuda.is_available() else "cpu" >>> model.to(device) ``` 모델은 이미지와 텍스트를 입력으로 받으므로, VQA 데이터세트의 첫 번째 예제에서와 동일한 이미지/질문 쌍을 사용해 보겠습니다: ```py >>> example = dataset[0] >>> image = Image.open(example['image_id']) >>> question = example['question'] ``` BLIP-2를 시각적 질의응답 작업에 사용하려면 텍스트 프롬프트가 `Question: {} Answer:` 형식을 따라야 합니다. ```py >>> prompt = f"Question: {question} Answer:" ``` 이제 모델의 프로세서로 이미지/프롬프트를 전처리하고, 처리된 입력을 모델을 통해 전달하고, 출력을 디코드해야 합니다: ```py >>> inputs = processor(image, text=prompt, return_tensors="pt").to(device, torch.float16) >>> generated_ids = model.generate(**inputs, max_new_tokens=10) >>> generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0].strip() >>> print(generated_text) "He is looking at the crowd" ``` 보시다시피 모델은 군중을 인식하고, 얼굴의 방향(아래쪽을 보고 있음)을 인식했지만, 군중이 스케이터 뒤에 있다는 사실을 놓쳤습니다. 그러나 사람이 직접 라벨링한 데이터셋을 얻을 수 없는 경우에, 이 접근법은 빠르게 유용한 결과를 생성할 수 있습니다.
transformers/docs/source/ko/tasks/visual_question_answering.md/0
{ "file_path": "transformers/docs/source/ko/tasks/visual_question_answering.md", "repo_id": "transformers", "token_count": 11287 }
341
- sections: - local: index title: 🤗 Transformers - local: quicktour title: Tour rápido - local: installation title: Instalação title: Início - sections: - local: pipeline_tutorial title: Pipelines para inferência - local: training title: Fine-tuning de um modelo pré-treinado - local: accelerate title: Treinamento distribuído com 🤗 Accelerate title: Tutoriais - sections: - local: fast_tokenizers title: Usando os Tokenizers do 🤗 Tokenizers - local: create_a_model title: Criando uma arquitetura customizada - local: custom_models title: Compartilhando modelos customizados - local: run_scripts title: Treinamento a partir de um script - local: converting_tensorflow_models title: Convertendo checkpoints do TensorFlow para Pytorch - local: serialization title: Exportando modelos para ONNX - sections: - local: tasks/sequence_classification title: Classificação de texto - local: tasks/token_classification title: Classificação de tokens title: Fine-tuning para tarefas específicas - local: multilingual title: Modelos multilinguísticos para inferência title: Guias práticos
transformers/docs/source/pt/_toctree.yml/0
{ "file_path": "transformers/docs/source/pt/_toctree.yml", "repo_id": "transformers", "token_count": 424 }
342
- sections: - local: index title: 🤗 Transformers - local: quicktour title: త్వరిత పర్యటన title: ప్రారంభించడానికి
transformers/docs/source/te/_toctree.yml/0
{ "file_path": "transformers/docs/source/te/_toctree.yml", "repo_id": "transformers", "token_count": 125 }
343
<!-- Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # 完全分片数据并行 [完全分片数据并行(FSDP)](https://pytorch.org/blog/introducing-pytorch-fully-sharded-data-parallel-api/)是一种数据并行方法, 它将模型的参数、梯度和优化器状态在可用 GPU(也称为 Worker 或 *rank*)的数量上进行分片。 与[分布式数据并行(DDP)](https://pytorch.org/docs/stable/generated/torch.nn.parallel.DistributedDataParallel.html)不同, FSDP 减少了内存使用量,因为模型在每个 GPU 上都被复制了一次。这就提高了 GPU 内存效率, 使您能够用较少的 GPU 训练更大的模型。FSDP 已经集成到 Accelerate 中, 这是一个用于在分布式环境中轻松管理训练的库,这意味着可以从 [`Trainer`] 类中调用这个库。 在开始之前,请确保已安装 Accelerate,并且至少使用 PyTorch 2.1.0 或更高版本。 ```bash pip install accelerate ``` ## FSDP 配置 首先,运行 [`accelerate config`](https://huggingface.co/docs/accelerate/package_reference/cli#accelerate-config) 命令为您的训练环境创建一个配置文件。Accelerate 使用此配置文件根据您在 `accelerate config` 中选择的训练选项来自动搭建正确的训练环境。 ```bash accelerate config ``` 运行 `accelerate config` 时,您将被提示一系列选项来配置训练环境。 本节涵盖了一些最重要的 FSDP 选项。要了解有关其他可用的 FSDP 选项的更多信息, 请查阅 [fsdp_config](https://huggingface.co/docs/transformers/main_classes/trainer#transformers.TrainingArguments.fsdp_config) 参数。 ### 分片策略 FSDP 提供了多种可选择的分片策略: - `FULL_SHARD` - 将模型参数、梯度和优化器状态跨 Worker 进行分片;为此选项选择 `1` - `SHARD_GRAD_OP`- 将梯度和优化器状态跨 Worker 进行分片;为此选项选择 `2` - `NO_SHARD` - 不分片任何内容(这等同于 DDP);为此选项选择 `3` - `HYBRID_SHARD` - 在每个 Worker 中分片模型参数、梯度和优化器状态,其中每个 Worker 也有完整副本;为此选项选择 `4` - `HYBRID_SHARD_ZERO2` - 在每个 Worker 中分片梯度和优化器状态,其中每个 Worker 也有完整副本;为此选项选择 `5` 这由 `fsdp_sharding_strategy` 标志启用。 ### CPU 卸载 当参数和梯度在不使用时可以卸载到 CPU 上,以节省更多 GPU 内存并帮助您适应即使 FSDP 也不足以容纳大型模型的情况。 在运行 `accelerate config` 时,通过设置 `fsdp_offload_params: true` 来启用此功能。 ### 包装策略 FSDP 是通过包装网络中的每个层来应用的。通常,包装是以嵌套方式应用的,其中完整的权重在每次前向传递后被丢弃, 以便在下一层使用内存。**自动包装**策略是实现这一点的最简单方法,您不需要更改任何代码。 您应该选择 `fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP` 来包装一个 Transformer 层, 并且 `fsdp_transformer_layer_cls_to_wrap` 来指定要包装的层(例如 `BertLayer`)。 否则,您可以选择基于大小的包装策略,其中如果一层的参数超过一定数量,则应用 FSDP。通过设置 `fsdp_wrap_policy: SIZE_BASED_WRAP` 和 `min_num_param` 来启用此功能,将参数设置为所需的大小阈值。 ### 检查点 应该使用 `fsdp_state_dict_type: SHARDED_STATE_DICT` 来保存中间检查点, 因为在排名 0 上保存完整状态字典需要很长时间,通常会导致 `NCCL Timeout` 错误,因为在广播过程中会无限期挂起。 您可以使用 [`~accelerate.Accelerator.load_state`]` 方法加载分片状态字典以恢复训练。 ```py # 包含检查点的目录 accelerator.load_state("ckpt") ``` 然而,当训练结束时,您希望保存完整状态字典,因为分片状态字典仅与 FSDP 兼容。 ```py if trainer.is_fsdp_enabled: trainer.accelerator.state.fsdp_plugin.set_state_dict_type("FULL_STATE_DICT") trainer.save_model(script_args.output_dir) ``` ### TPU [PyTorch XLA](https://pytorch.org/xla/release/2.1/index.html) 支持用于 TPUs 的 FSDP 训练, 可以通过修改由 `accelerate config` 生成的 FSDP 配置文件来启用。除了上面指定的分片策略和包装选项外, 您还可以将以下参数添加到文件中。 ```yaml xla: True # 必须设置为 True 以启用 PyTorch/XLA xla_fsdp_settings: # XLA 特定的 FSDP 参数 xla_fsdp_grad_ckpt: True # 使用梯度检查点 ``` [`xla_fsdp_settings`](https://github.com/pytorch/xla/blob/2e6e183e0724818f137c8135b34ef273dea33318/torch_xla/distributed/fsdp/xla_fully_sharded_data_parallel.py#L128) 允许您配置用于 FSDP 的额外 XLA 特定参数。 ## 启动训练 FSDP 配置文件示例如下所示: ```yaml compute_environment: LOCAL_MACHINE debug: false distributed_type: FSDP downcast_bf16: "no" fsdp_config: fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP fsdp_backward_prefetch_policy: BACKWARD_PRE fsdp_cpu_ram_efficient_loading: true fsdp_forward_prefetch: false fsdp_offload_params: true fsdp_sharding_strategy: 1 fsdp_state_dict_type: SHARDED_STATE_DICT fsdp_sync_module_states: true fsdp_transformer_layer_cls_to_wrap: BertLayer fsdp_use_orig_params: true machine_rank: 0 main_training_function: main mixed_precision: bf16 num_machines: 1 num_processes: 2 rdzv_backend: static same_network: true tpu_env: [] tpu_use_cluster: false tpu_use_sudo: false use_cpu: false ``` 要启动训练,请运行 [`accelerate launch`](https://huggingface.co/docs/accelerate/package_reference/cli#accelerate-launch) 命令,它将自动使用您之前使用 `accelerate config` 创建的配置文件。 ```bash accelerate launch my-trainer-script.py ``` ```bash accelerate launch --fsdp="full shard" --fsdp_config="path/to/fsdp_config/ my-trainer-script.py ``` ## 下一步 FSDP 在大规模模型训练方面是一个强大的工具,您可以使用多个 GPU 或 TPU。 通过分片模型参数、优化器和梯度状态,甚至在它们不活动时将其卸载到 CPU 上, FSDP 可以减少大规模训练的高成本。如果您希望了解更多信息,下面的内容可能会有所帮助: - 深入参考 Accelerate 指南,了解有关 [FSDP](https://huggingface.co/docs/accelerate/usage_guides/fsdp)的更多信息。 - 阅读[介绍 PyTorch 完全分片数据并行(FSDP)API](https://pytorch.org/blog/introducing-pytorch-fully-sharded-data-parallel-api/) 博文。 - 阅读[使用 FSDP 在云 TPU 上扩展 PyTorch 模型](https://pytorch.org/blog/scaling-pytorch-models-on-cloud-tpus-with-fsdp/)博文。
transformers/docs/source/zh/fsdp.md/0
{ "file_path": "transformers/docs/source/zh/fsdp.md", "repo_id": "transformers", "token_count": 4055 }
344
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # 导出为 TFLite [TensorFlow Lite](https://www.tensorflow.org/lite/guide) 是一个轻量级框架,用于资源受限的设备上,如手机、嵌入式系统和物联网(IoT)设备,部署机器学习模型。TFLite 旨在在计算能力、内存和功耗有限的设备上优化和高效运行模型。模型以一种特殊的高效可移植格式表示,其文件扩展名为 `.tflite`。 🤗 Optimum 通过 `exporters.tflite` 模块提供将 🤗 Transformers 模型导出至 TFLite 格式的功能。请参考 [🤗 Optimum 文档](https://huggingface.co/docs/optimum/exporters/tflite/overview) 以获取支持的模型架构列表。 要将模型导出为 TFLite 格式,请安装所需的依赖项: ```bash pip install optimum[exporters-tf] ``` 请参阅 [🤗 Optimum 文档](https://huggingface.co/docs/optimum/main/en/exporters/tflite/usage_guides/export_a_model) 以查看所有可用参数,或者在命令行中查看帮助: ```bash optimum-cli export tflite --help ``` 运行以下命令,以从 🤗 Hub 导出模型的检查点(checkpoint),以 `google-bert/bert-base-uncased` 为例: ```bash optimum-cli export tflite --model google-bert/bert-base-uncased --sequence_length 128 bert_tflite/ ``` 你应该能在日志中看到导出进度以及生成的 `model.tflite` 文件的保存位置,如下所示: ```bash Validating TFLite model... -[✓] TFLite model output names match reference model (logits) - Validating TFLite Model output "logits": -[✓] (1, 128, 30522) matches (1, 128, 30522) -[x] values not close enough, max diff: 5.817413330078125e-05 (atol: 1e-05) The TensorFlow Lite export succeeded with the warning: The maximum absolute difference between the output of the reference model and the TFLite exported model is not within the set tolerance 1e-05: - logits: max diff = 5.817413330078125e-05. The exported model was saved at: bert_tflite ``` 上面的示例说明了从 🤗 Hub 导出检查点的过程。导出本地模型时,首先需要确保将模型的权重和分词器文件保存在同一目录(`local_path`)中。在使用 CLI(命令行)时,将 `local_path` 传递给 `model` 参数,而不是 🤗 Hub 上的检查点名称。
transformers/docs/source/zh/tflite.md/0
{ "file_path": "transformers/docs/source/zh/tflite.md", "repo_id": "transformers", "token_count": 1386 }
345
# Image Captioning (vision-encoder-text-decoder model) training example The following example showcases how to finetune a vision-encoder-text-decoder model for image captioning using the JAX/Flax backend, leveraging 🤗 Transformers library's [FlaxVisionEncoderDecoderModel](https://huggingface.co/docs/transformers/model_doc/vision-encoder-decoder#transformers.FlaxVisionEncoderDecoderModel). JAX/Flax allows you to trace pure functions and compile them into efficient, fused accelerator code on both GPU and TPU. Models written in JAX/Flax are **immutable** and updated in a purely functional way which enables simple and efficient model parallelism. `run_image_captioning_flax.py` is a lightweight example of how to download and preprocess a dataset from the 🤗 Datasets library or use your own files (jsonlines or csv), then fine-tune one of the architectures above on it. For custom datasets in `jsonlines` format please see: https://huggingface.co/docs/datasets/loading_datasets#json-files and you also will find examples of these below. ### Download COCO dataset (2017) This example uses COCO dataset (2017) through a custom dataset script, which requires users to manually download the COCO dataset before training. ```bash mkdir data cd data wget http://images.cocodataset.org/zips/train2017.zip wget http://images.cocodataset.org/zips/val2017.zip wget http://images.cocodataset.org/zips/test2017.zip wget http://images.cocodataset.org/annotations/annotations_trainval2017.zip wget http://images.cocodataset.org/annotations/image_info_test2017.zip cd .. ``` ### Create a model from a vision encoder model and a text decoder model Next, we create a [FlaxVisionEncoderDecoderModel](https://huggingface.co/docs/transformers/model_doc/visionencoderdecoder#transformers.FlaxVisionEncoderDecoderModel) instance from a pre-trained vision encoder ([ViT](https://huggingface.co/docs/transformers/model_doc/vit#transformers.FlaxViTModel)) and a pre-trained text decoder ([GPT2](https://huggingface.co/docs/transformers/model_doc/gpt2#transformers.FlaxGPT2Model)): ```bash python3 create_model_from_encoder_decoder_models.py \ --output_dir model \ --encoder_model_name_or_path google/vit-base-patch16-224-in21k \ --decoder_model_name_or_path openai-community/gpt2 ``` ### Train the model Finally, we can run the example script to train the model: ```bash python3 run_image_captioning_flax.py \ --output_dir ./image-captioning-training-results \ --model_name_or_path model \ --dataset_name ydshieh/coco_dataset_script \ --dataset_config_name=2017 \ --data_dir $PWD/data \ --image_column image_path \ --caption_column caption \ --do_train --do_eval --predict_with_generate \ --num_train_epochs 1 \ --eval_steps 500 \ --learning_rate 3e-5 --warmup_steps 0 \ --per_device_train_batch_size 32 \ --per_device_eval_batch_size 32 \ --overwrite_output_dir \ --max_target_length 32 \ --num_beams 8 \ --preprocessing_num_workers 16 \ --logging_steps 10 \ --block_size 16384 \ --push_to_hub ``` This should finish in about 1h30 on Cloud TPU, with validation loss and ROUGE2 score of 2.0153 and 14.64 respectively after 1 epoch. Training statistics can be accessed on [Models](https://huggingface.co/ydshieh/image-captioning-training-results/tensorboard).
transformers/examples/flax/image-captioning/README.md/0
{ "file_path": "transformers/examples/flax/image-captioning/README.md", "repo_id": "transformers", "token_count": 1084 }
346
#!/usr/bin/env python # coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Fine-tuning the Flax library models for sequence to sequence speech recognition. """ # You can also adapt this script on your own sequence to sequence task. Pointers for this are left as comments. import logging import os import sys import time from dataclasses import field from functools import partial from pathlib import Path from typing import Any, Callable, Dict, List, Optional, Union import datasets import evaluate import flax import jax import jax.numpy as jnp import numpy as np import optax from datasets import DatasetDict, load_dataset from flax import jax_utils, traverse_util from flax.jax_utils import pad_shard_unpad, unreplicate from flax.training import train_state from flax.training.common_utils import get_metrics, onehot, shard, shard_prng_key from huggingface_hub import HfApi from torch.utils.data import DataLoader from tqdm import tqdm import transformers from transformers import ( AutoConfig, AutoFeatureExtractor, AutoProcessor, AutoTokenizer, FlaxAutoModelForSpeechSeq2Seq, HfArgumentParser, Seq2SeqTrainingArguments, is_tensorboard_available, ) from transformers.file_utils import get_full_repo_name from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version # Will error if the minimal version of Transformers is not installed. Remove at your own risk. check_min_version("4.45.0.dev0") require_version("datasets>=2.14.0", "To fix: pip install -r examples/flax/speech-recognition/requirements.txt") logger = logging.getLogger(__name__) @flax.struct.dataclass class ModelArguments: """ Arguments pertaining to which model/config/tokenizer we are going to fine-tune from. """ model_name_or_path: str = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) config_name: Optional[str] = field( default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} ) tokenizer_name: Optional[str] = field( default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) feature_extractor_name: Optional[str] = field( default=None, metadata={"help": "feature extractor name or path if not the same as model_name"} ) cache_dir: Optional[str] = field( default=None, metadata={"help": "Where to store the pretrained models downloaded from huggingface.co"}, ) use_fast_tokenizer: bool = field( default=True, metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."}, ) model_revision: str = field( default="main", metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}, ) use_auth_token: bool = field( default=False, metadata={ "help": "Will use the token generated when running `transformers-cli login` (necessary to use this script " "with private models)." }, ) dtype: Optional[str] = field( default="float32", metadata={ "help": ( "Floating-point format in which the model weights should be initialized and trained. Choose one of" " `[float32, float16, bfloat16]`." ) }, ) num_beams: Optional[int] = field( default=None, metadata={ "help": ( "Number of beams to use for evaluation. This argument will be passed to `model.generate`, " "which is used during evaluation." ) }, ) @flax.struct.dataclass class DataTrainingArguments: """ Arguments pertaining to what data we are going to input our model for training and eval. """ dataset_name: str = field( default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."} ) dataset_config_name: Optional[str] = field( default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} ) trust_remote_code: bool = field( default=False, metadata={ "help": ( "Whether to trust the execution of code from datasets/models defined on the Hub." " This option should only be set to `True` for repositories you trust and in which you have read the" " code, as it will execute code present on the Hub on your local machine." ) }, ) text_column: Optional[str] = field( default=None, metadata={"help": "The name of the column in the datasets containing the full texts (for summarization)."}, ) dataset_cache_dir: Optional[str] = field( default=None, metadata={"help": "Path to cache directory for saving and loading datasets"} ) overwrite_cache: bool = field( default=False, metadata={"help": "Overwrite the cached training and evaluation sets"} ) preprocessing_num_workers: Optional[int] = field( default=None, metadata={"help": "The number of processes to use for the preprocessing."}, ) max_train_samples: Optional[int] = field( default=None, metadata={ "help": "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." }, ) max_eval_samples: Optional[int] = field( default=None, metadata={ "help": "For debugging purposes or quicker training, truncate the number of evaluation examples to this " "value if set." }, ) audio_column_name: str = field( default="audio", metadata={"help": "The name of the dataset column containing the audio data. Defaults to 'audio'"}, ) text_column_name: str = field( default="text", metadata={"help": "The name of the dataset column containing the text data. Defaults to 'text'"}, ) max_duration_in_seconds: float = field( default=20.0, metadata={"help": "Filter audio files that are longer than `max_duration_in_seconds` seconds"}, ) min_duration_in_seconds: float = field( default=0.0, metadata={"help": "Filter audio files that are shorter than `min_duration_in_seconds` seconds"}, ) max_label_length: float = field( default=128, metadata={"help": "Truncate transcriptions that are longer `max_eval_length` tokens."}, ) pad_input_to_multiple_of: Optional[int] = field( default=None, metadata={ "help": "If set will pad the input sequence to a multiple of the provided value. " "This is important to avoid triggering recompilations on TPU. If unspecified, will default to padding the inputs to max length." }, ) pad_target_to_multiple_of: Optional[int] = field( default=None, metadata={ "help": "If set will pad the target sequence to a multiple of the provided value. " "This is important to avoid triggering recompilations on TPU. If unspecified, will default to padding the targets to max length." }, ) preprocessing_only: bool = field( default=False, metadata={ "help": "Whether to only do data preprocessing and skip training. " "This is especially useful when data preprocessing errors out in distributed training due to timeout. " "In this case, one should run the preprocessing in a non-distributed setup with `preprocessing_only=True` " "so that the cached datasets can consequently be loaded in distributed training" }, ) train_split_name: str = field( default="train", metadata={ "help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'" }, ) eval_split_name: str = field( default="validation", metadata={ "help": "The name of the evaluation data set split to use (via the datasets library). Defaults to 'validation'" }, ) do_lower_case: bool = field( default=True, metadata={"help": "Whether the target text should be lower cased."}, ) language: str = field( default=None, metadata={ "help": ( "Language for multilingual fine-tuning. This argument should be set for multilingual fine-tuning " "only. For English speech recognition, it should be set to `None`." ) }, ) task: str = field( default="transcribe", metadata={"help": "Task, either `transcribe` for speech recognition or `translate` for speech translation."}, ) def shift_tokens_right(label_ids: np.array, decoder_start_token_id: int) -> np.ndarray: """ Shift label ids one token to the right. """ shifted_label_ids = np.zeros_like(label_ids) shifted_label_ids[:, 1:] = label_ids[:, :-1] shifted_label_ids[:, 0] = decoder_start_token_id return shifted_label_ids @flax.struct.dataclass class FlaxDataCollatorSpeechSeq2SeqWithPadding: """ Data collator that will dynamically pad the inputs received. Args: processor ([`Wav2Vec2Processor`]) The processor used for proccessing the data. decoder_start_token_id (:obj: `int`) The begin-of-sentence of the decoder. input_padding (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.PaddingStrategy`, `optional`, defaults to :obj:`True`): Select a strategy to pad the returned input sequences (according to the model's padding side and padding index) among: * :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). * :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the maximum acceptable input length for the model if that argument is not provided. * :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). target_padding (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.PaddingStrategy`, `optional`, defaults to :obj:`True`): Select a strategy to pad the returned target sequences (according to the model's padding side and padding index). See above for details. max_input_length (:obj:`float`, `optional`): Maximum length of the ``input_values`` of the returned list and optionally padding length (see above). max_target_length (:obj:`int`, `optional`): Maximum length of the ``labels`` of the returned list and optionally padding length (see above). pad_input_to_multiple_of (:obj:`int`, `optional`): If set will pad the input sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >= 7.5 (Volta). pad_target_to_multiple_of (:obj:`int`, `optional`): If set will pad the target sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >= 7.5 (Volta). """ processor: Any decoder_start_token_id: int input_padding: Union[bool, str] = "longest" target_padding: Union[bool, str] = "max_length" max_input_length: Optional[float] = None max_target_length: Optional[int] = None pad_input_to_multiple_of: Optional[int] = None pad_target_to_multiple_of: Optional[int] = None def __call__(self, features: List[Dict[str, Union[List[int], np.ndarray]]]) -> Dict[str, np.ndarray]: # split inputs and labels since they have to be of different lengths and need # different padding methods model_input_name = self.processor.model_input_names[0] # dataloader returns a list of features which we convert to a dict input_features = {model_input_name: [feature[model_input_name] for feature in features]} label_features = {"input_ids": [feature["labels"] for feature in features]} # reformat list to dict and set to pytorch format batch = self.processor.feature_extractor.pad( input_features, max_length=self.max_input_length, padding=self.input_padding, pad_to_multiple_of=self.pad_input_to_multiple_of, return_tensors="np", ) labels_batch = self.processor.tokenizer.pad( label_features, max_length=self.max_target_length, padding=self.target_padding, pad_to_multiple_of=self.pad_target_to_multiple_of, return_tensors="np", ) # if bos token is appended in previous tokenization step, # cut bos token here as it's append later anyways labels = labels_batch["input_ids"] if (labels[:, 0] == self.decoder_start_token_id).all().item(): labels = labels[:, 1:] labels_batch.attention_mask = labels_batch.attention_mask[:, 1:] decoder_input_ids = shift_tokens_right(labels, self.decoder_start_token_id) # replace padding with -100 to ignore correctly when computing the loss labels = np.ma.array(labels, mask=np.not_equal(labels_batch.attention_mask, 1)) labels = labels.filled(fill_value=-100) batch["labels"] = labels batch["decoder_input_ids"] = decoder_input_ids return batch class TrainState(train_state.TrainState): dropout_rng: jnp.ndarray def replicate(self): return jax_utils.replicate(self).replace(dropout_rng=shard_prng_key(self.dropout_rng)) def write_metric(summary_writer, train_metrics, eval_metrics, train_time, step): summary_writer.scalar("train_time", train_time, step) train_metrics = get_metrics(train_metrics) for key, vals in train_metrics.items(): tag = f"train_{key}" for i, val in enumerate(vals): summary_writer.scalar(tag, val, step - len(vals) + i + 1) for metric_name, value in eval_metrics.items(): summary_writer.scalar(f"eval_{metric_name}", value, step) def create_learning_rate_fn( num_train_steps: int, num_warmup_steps: int, learning_rate: float ) -> Callable[[int], jnp.ndarray]: """Returns a linear warmup, linear_decay learning rate function.""" warmup_fn = optax.linear_schedule(init_value=0.0, end_value=learning_rate, transition_steps=num_warmup_steps) decay_fn = optax.linear_schedule( init_value=learning_rate, end_value=0, transition_steps=num_train_steps - num_warmup_steps ) schedule_fn = optax.join_schedules(schedules=[warmup_fn, decay_fn], boundaries=[num_warmup_steps]) return schedule_fn def main(): # 1. Parse input arguments # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. parser = HfArgumentParser((ModelArguments, DataTrainingArguments, Seq2SeqTrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your JAX/Flax versions. send_example_telemetry("run_speech_recognition_seq2seq", model_args, data_args, framework="flax") # 2. Setup logging # Make one log on every process with the configuration for debugging. logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout)], ) # Set the verbosity to info of the Transformers logger. # We only want one process per machine to log things on the screen. logger.setLevel(logging.INFO if jax.process_index() == 0 else logging.ERROR) if jax.process_index() == 0: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() logger.info("Training/evaluation parameters %s", training_args) # Check the output dir is valid if ( os.path.exists(training_args.output_dir) and os.listdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. " "Use `--overwrite_output_dir` to overcome." ) # Handle the repository creation if training_args.push_to_hub: if training_args.hub_model_id is None: repo_name = get_full_repo_name( Path(training_args.output_dir).absolute().name, token=training_args.hub_token ) else: repo_name = training_args.hub_model_id # Create repo and retrieve repo_id api = HfApi() repo_id = api.create_repo(repo_name, exist_ok=True, token=training_args.hub_token).repo_id # 3. Load dataset raw_datasets = DatasetDict() if training_args.do_train: raw_datasets["train"] = load_dataset( data_args.dataset_name, data_args.dataset_config_name, split=data_args.train_split_name, cache_dir=data_args.dataset_cache_dir, num_proc=data_args.preprocessing_num_workers, token=True if model_args.use_auth_token else None, trust_remote_code=data_args.trust_remote_code, ) if training_args.do_eval: raw_datasets["eval"] = load_dataset( data_args.dataset_name, data_args.dataset_config_name, split=data_args.eval_split_name, cache_dir=data_args.dataset_cache_dir, num_proc=data_args.preprocessing_num_workers, token=True if model_args.use_auth_token else None, trust_remote_code=data_args.trust_remote_code, ) if not training_args.do_train and not training_args.do_eval: raise ValueError( "Cannot not train and not do evaluation. At least one of training or evaluation has to be performed." ) if data_args.audio_column_name not in next(iter(raw_datasets.values())).column_names: raise ValueError( f"--audio_column_name '{data_args.audio_column_name}' not found in dataset '{data_args.dataset_name}'. " "Make sure to set `--audio_column_name` to the correct audio column - one of " f"{', '.join(next(iter(raw_datasets.values())).column_names)}." ) if data_args.text_column_name not in next(iter(raw_datasets.values())).column_names: raise ValueError( f"--text_column_name {data_args.text_column_name} not found in dataset '{data_args.dataset_name}'. " "Make sure to set `--text_column_name` to the correct text column - one of " f"{', '.join(next(iter(raw_datasets.values())).column_names)}." ) # 5. Load pretrained model, tokenizer, and feature extractor config = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, revision=model_args.model_revision, token=True if model_args.use_auth_token else None, ) feature_extractor = AutoFeatureExtractor.from_pretrained( model_args.feature_extractor_name if model_args.feature_extractor_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, revision=model_args.model_revision, token=True if model_args.use_auth_token else None, ) tokenizer = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, revision=model_args.model_revision, token=True if model_args.use_auth_token else None, ) model = FlaxAutoModelForSpeechSeq2Seq.from_pretrained( model_args.model_name_or_path, config=config, dtype=getattr(jnp, model_args.dtype), cache_dir=model_args.cache_dir, revision=model_args.model_revision, token=True if model_args.use_auth_token else None, ) if model.config.decoder_start_token_id is None: raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined") # 6. Resample speech dataset: `datasets` takes care of automatically loading and resampling the audio, # so we just need to set the correct target sampling rate. raw_datasets = raw_datasets.cast_column( data_args.audio_column_name, datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate) ) # 7. Preprocessing the datasets. # We need to read the audio files as arrays and tokenize the targets. max_input_length = int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate) min_input_length = int(data_args.min_duration_in_seconds * feature_extractor.sampling_rate) max_label_length = ( data_args.max_label_length if data_args.max_label_length is not None else model.config.max_length ) pad_input_to_multiple_of = data_args.pad_input_to_multiple_of pad_target_to_multiple_of = data_args.pad_target_to_multiple_of audio_column_name = data_args.audio_column_name num_workers = data_args.preprocessing_num_workers text_column_name = data_args.text_column_name model_input_name = feature_extractor.model_input_names[0] do_lower_case = data_args.do_lower_case if training_args.do_train and data_args.max_train_samples is not None: raw_datasets["train"] = raw_datasets["train"].select(range(data_args.max_train_samples)) if training_args.do_eval and data_args.max_eval_samples is not None: raw_datasets["eval"] = raw_datasets["eval"].select(range(data_args.max_eval_samples)) if data_args.language is not None: # We only need to set the task id when the language is specified (i.e. in a multilingual setting) tokenizer.set_prefix_tokens(language=data_args.language, task=data_args.task) def prepare_dataset(batch): # process audio sample = batch[audio_column_name] inputs = feature_extractor(sample["array"], sampling_rate=sample["sampling_rate"]) # process audio length batch[model_input_name] = inputs.get(model_input_name)[0] batch["input_length"] = len(sample["array"]) # process targets input_str = batch[text_column_name].lower() if do_lower_case else batch[text_column_name] batch["labels"] = tokenizer(input_str).input_ids return batch vectorized_datasets = raw_datasets.map( prepare_dataset, remove_columns=next(iter(raw_datasets.values())).column_names, num_proc=num_workers, desc="preprocess train and eval dataset", ) # filter training data with inputs longer than max_input_length def is_audio_in_length_range(length): return min_input_length < length < max_input_length vectorized_datasets = vectorized_datasets.filter( is_audio_in_length_range, num_proc=num_workers, input_columns=["input_length"], ) # for large datasets it is advised to run the preprocessing on a # single machine first with `args.preprocessing_only` since there will mostly likely # be a timeout when running the script in distributed mode. # In a second step `args.preprocessing_only` can then be set to `False` to load the # cached dataset if data_args.preprocessing_only: cache = {k: v.cache_files for k, v in vectorized_datasets.items()} logger.info(f"Data preprocessing finished. Files cached at {cache}.") return # 8. Load Metric metric = evaluate.load("wer", cache_dir=model_args.cache_dir) def compute_metrics(preds, labels): # replace padded labels by the padding token for idx in range(len(labels)): labels[idx][labels[idx] == -100] = tokenizer.pad_token_id pred_str = tokenizer.batch_decode(preds, skip_special_tokens=True) # we do not want to group tokens when computing the metrics label_str = tokenizer.batch_decode(labels, skip_special_tokens=True) wer = metric.compute(predictions=pred_str, references=label_str) return {"wer": wer} # 9. Save feature extractor, tokenizer and config feature_extractor.save_pretrained(training_args.output_dir) tokenizer.save_pretrained(training_args.output_dir) config.save_pretrained(training_args.output_dir) processor = AutoProcessor.from_pretrained(training_args.output_dir) data_collator = FlaxDataCollatorSpeechSeq2SeqWithPadding( processor=processor, decoder_start_token_id=model.config.decoder_start_token_id, input_padding="longest", target_padding="longest", max_target_length=max_label_length, pad_input_to_multiple_of=pad_input_to_multiple_of, pad_target_to_multiple_of=pad_target_to_multiple_of if pad_target_to_multiple_of else max_label_length, ) # Enable tensorboard only on the master node has_tensorboard = is_tensorboard_available() if has_tensorboard and jax.process_index() == 0: try: from flax.metrics.tensorboard import SummaryWriter summary_writer = SummaryWriter(log_dir=Path(training_args.output_dir)) except ImportError as ie: has_tensorboard = False logger.warning( f"Unable to display metrics through TensorBoard because some package are not installed: {ie}" ) else: logger.warning( "Unable to display metrics through TensorBoard because the package is not installed: " "Please run pip install tensorboard to enable." ) # Initialize our training rng = jax.random.PRNGKey(training_args.seed) rng, dropout_rng = jax.random.split(rng) # Store some constant num_epochs = int(training_args.num_train_epochs) train_batch_size = int(training_args.per_device_train_batch_size) * jax.device_count() per_device_eval_batch_size = int(training_args.per_device_eval_batch_size) eval_batch_size = per_device_eval_batch_size * jax.device_count() steps_per_epoch = len(vectorized_datasets["train"]) // train_batch_size total_train_steps = steps_per_epoch * num_epochs # Create learning rate schedule linear_decay_lr_schedule_fn = create_learning_rate_fn( total_train_steps, training_args.warmup_steps, training_args.learning_rate, ) # We use Optax's "masking" functionality to not apply weight decay # to bias and LayerNorm scale parameters. decay_mask_fn returns a # mask boolean with the same structure as the parameters. # The mask is True for parameters that should be decayed. def decay_mask_fn(params): flat_params = traverse_util.flatten_dict(params) # find out all LayerNorm parameters layer_norm_candidates = ["layer_norm", "self_attn_layer_norm", "final_layer_norm", "encoder_attn_layer_norm"] layer_norm_named_params = { layer[-2:] for layer_norm_name in layer_norm_candidates for layer in flat_params.keys() if layer_norm_name in "".join(layer).lower() } flat_mask = {path: (path[-1] != "bias" and path[-2:] not in layer_norm_named_params) for path in flat_params} return traverse_util.unflatten_dict(flat_mask) # create adam optimizer adamw = optax.adamw( learning_rate=linear_decay_lr_schedule_fn, b1=training_args.adam_beta1, b2=training_args.adam_beta2, eps=training_args.adam_epsilon, weight_decay=training_args.weight_decay, mask=decay_mask_fn, ) # Setup train state state = TrainState.create(apply_fn=model.__call__, params=model.params, tx=adamw, dropout_rng=dropout_rng) # label smoothed cross entropy def loss_fn(logits, labels, label_smoothing_factor=0.0): """ The label smoothing implementation is adapted from Flax's official example: https://github.com/google/flax/blob/87a211135c6a377c8f29048a1cac3840e38b9da4/examples/wmt/train.py#L104 """ vocab_size = logits.shape[-1] confidence = 1.0 - label_smoothing_factor low_confidence = (1.0 - confidence) / (vocab_size - 1) normalizing_constant = -( confidence * jnp.log(confidence) + (vocab_size - 1) * low_confidence * jnp.log(low_confidence + 1e-20) ) soft_labels = onehot(labels, vocab_size, on_value=confidence, off_value=low_confidence) loss = optax.softmax_cross_entropy(logits, soft_labels) loss = loss - normalizing_constant # ignore padded tokens from loss, i.e. where labels are not set to -100 padding_mask = labels >= 0 loss = loss * padding_mask loss = loss.sum() num_labels = padding_mask.sum() return loss, num_labels # Define gradient update step fn def train_step(state, batch, label_smoothing_factor=0.0): dropout_rng, new_dropout_rng = jax.random.split(state.dropout_rng) def compute_loss(params): labels = batch.pop("labels") logits = state.apply_fn(**batch, params=params, dropout_rng=dropout_rng, train=True)[0] loss, num_labels = loss_fn(logits, labels, label_smoothing_factor) return loss, num_labels grad_fn = jax.value_and_grad(compute_loss, has_aux=True) (loss, num_labels), grad = grad_fn(state.params) num_labels = jax.lax.psum(num_labels, "batch") # true loss = total loss / total samples loss = jax.lax.psum(loss, "batch") loss = jax.tree_util.tree_map(lambda x: x / num_labels, loss) # true grad = total grad / total samples grad = jax.lax.psum(grad, "batch") grad = jax.tree_util.tree_map(lambda x: x / num_labels, grad) new_state = state.apply_gradients(grads=grad, dropout_rng=new_dropout_rng) metrics = {"loss": loss, "learning_rate": linear_decay_lr_schedule_fn(state.step)} return new_state, metrics # Define eval fn def eval_step(params, batch, label_smoothing_factor=0.0): labels = batch.pop("labels") logits = model(**batch, params=params, train=False)[0] loss, num_labels = loss_fn(logits, labels, label_smoothing_factor) num_labels = jax.lax.psum(num_labels, "batch") # true loss = total loss / total samples loss = jax.lax.psum(loss, "batch") loss = jax.tree_util.tree_map(lambda x: x / num_labels, loss) metrics = {"loss": loss} return metrics # Define generation function num_beams = model_args.num_beams if model_args.num_beams is not None else model.config.num_beams gen_kwargs = {"max_length": max_label_length, "num_beams": num_beams} def generate_step(params, batch): model.params = params output_ids = model.generate(batch[model_input_name], attention_mask=batch.get("attention_mask"), **gen_kwargs) return output_ids.sequences # Create parallel version of the train and eval step p_train_step = jax.pmap( partial(train_step, label_smoothing_factor=training_args.label_smoothing_factor), "batch", donate_argnums=(0,) ) p_eval_step = jax.pmap(partial(eval_step, label_smoothing_factor=training_args.label_smoothing_factor), "batch") p_generate_step = jax.pmap(generate_step, "batch") # Replicate the train state on each device state = state.replicate() logger.info("***** Running training *****") logger.info(f" Num examples = {len(vectorized_datasets['train'])}") logger.info(f" Num Epochs = {num_epochs}") logger.info(f" Instantaneous batch size per device = {training_args.per_device_train_batch_size}") logger.info(f" Total train batch size (w. parallel & distributed) = {train_batch_size}") logger.info(f" Total optimization steps = {total_train_steps}") train_time = 0 epochs = tqdm(range(num_epochs), desc=f"Epoch ... (1/{num_epochs})", position=0) for epoch in epochs: # ======================== Training ================================ train_start = time.time() train_metrics = [] # Generate an epoch by shuffling sampling indices from the train dataset and create a data loader vectorized_datasets["train"] = vectorized_datasets["train"].shuffle(training_args.seed) train_loader = DataLoader( vectorized_datasets["train"], batch_size=train_batch_size, drop_last=True, collate_fn=data_collator, num_workers=training_args.dataloader_num_workers, ) # train for batch in tqdm(train_loader, desc="Training...", position=1, leave=False): batch = shard(batch.data) state, train_metric = p_train_step(state, batch) train_metrics.append(train_metric) train_time += time.time() - train_start train_metric = unreplicate(train_metric) epochs.write( f"Epoch... ({epoch + 1}/{num_epochs} | Loss: {train_metric['loss']}, Learning Rate:" f" {train_metric['learning_rate']})" ) # ======================== Evaluating ============================== eval_metrics = [] eval_preds = [] eval_labels = [] eval_loader = DataLoader( vectorized_datasets["eval"], batch_size=eval_batch_size, drop_last=False, collate_fn=data_collator, num_workers=training_args.dataloader_num_workers, ) for batch in tqdm(eval_loader, desc="Evaluating...", position=2, leave=False): # Model forward labels = batch["labels"] metrics = pad_shard_unpad(p_eval_step, static_return=True)( state.params, batch.data, min_device_batch=per_device_eval_batch_size ) eval_metrics.append(metrics) # generation if training_args.predict_with_generate: generated_ids = pad_shard_unpad(p_generate_step)(state.params, batch.data) eval_preds.extend(jax.device_get(generated_ids.reshape(-1, gen_kwargs["max_length"]))) eval_labels.extend(labels) # normalize eval metrics eval_metrics = get_metrics(eval_metrics) eval_metrics = jax.tree_util.tree_map(jnp.mean, eval_metrics) # compute WER metric wer_desc = "" if training_args.predict_with_generate: wer_metric = compute_metrics(eval_preds, eval_labels) eval_metrics.update(wer_metric) wer_desc = " ".join([f"Eval {key}: {value} |" for key, value in wer_metric.items()]) # Print metrics and update progress bar desc = f"Epoch... ({epoch + 1}/{num_epochs} | Eval Loss: {eval_metrics['loss']} | {wer_desc})" epochs.write(desc) epochs.desc = desc # Save metrics if has_tensorboard and jax.process_index() == 0: cur_step = epoch * (len(vectorized_datasets["train"]) // train_batch_size) write_metric(summary_writer, train_metrics, eval_metrics, train_time, cur_step) # save checkpoint after each epoch and push checkpoint to the hub if jax.process_index() == 0: params = jax.device_get(jax.tree_util.tree_map(lambda x: x[0], state.params)) model.save_pretrained(training_args.output_dir, params=params) tokenizer.save_pretrained(training_args.output_dir) if training_args.push_to_hub: api.upload_folder( commit_message=f"Saving weights and logs of epoch {epoch}", folder_path=training_args.output_dir, repo_id=repo_id, repo_type="model", token=training_args.hub_token, ) if __name__ == "__main__": main()
transformers/examples/flax/speech-recognition/run_flax_speech_recognition_seq2seq.py/0
{ "file_path": "transformers/examples/flax/speech-recognition/run_flax_speech_recognition_seq2seq.py", "repo_id": "transformers", "token_count": 15150 }
347
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import csv from collections import defaultdict from dataclasses import dataclass, field from typing import List, Optional import matplotlib.pyplot as plt import numpy as np from matplotlib.ticker import ScalarFormatter from transformers import HfArgumentParser def list_field(default=None, metadata=None): return field(default_factory=lambda: default, metadata=metadata) @dataclass class PlotArguments: """ Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch. """ csv_file: str = field( metadata={"help": "The csv file to plot."}, ) plot_along_batch: bool = field( default=False, metadata={"help": "Whether to plot along batch size or sequence length. Defaults to sequence length."}, ) is_time: bool = field( default=False, metadata={"help": "Whether the csv file has time results or memory results. Defaults to memory results."}, ) no_log_scale: bool = field( default=False, metadata={"help": "Disable logarithmic scale when plotting"}, ) is_train: bool = field( default=False, metadata={ "help": "Whether the csv file has training results or inference results. Defaults to inference results." }, ) figure_png_file: Optional[str] = field( default=None, metadata={"help": "Filename under which the plot will be saved. If unused no plot is saved."}, ) short_model_names: Optional[List[str]] = list_field( default=None, metadata={"help": "List of model names that are used instead of the ones in the csv file."} ) def can_convert_to_int(string): try: int(string) return True except ValueError: return False def can_convert_to_float(string): try: float(string) return True except ValueError: return False class Plot: def __init__(self, args): self.args = args self.result_dict = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}}) with open(self.args.csv_file, newline="") as csv_file: reader = csv.DictReader(csv_file) for row in reader: model_name = row["model"] self.result_dict[model_name]["bsz"].append(int(row["batch_size"])) self.result_dict[model_name]["seq_len"].append(int(row["sequence_length"])) if can_convert_to_int(row["result"]): # value is not None self.result_dict[model_name]["result"][(int(row["batch_size"]), int(row["sequence_length"]))] = ( int(row["result"]) ) elif can_convert_to_float(row["result"]): # value is not None self.result_dict[model_name]["result"][(int(row["batch_size"]), int(row["sequence_length"]))] = ( float(row["result"]) ) def plot(self): fig, ax = plt.subplots() title_str = "Time usage" if self.args.is_time else "Memory usage" title_str = title_str + " for training" if self.args.is_train else title_str + " for inference" if not self.args.no_log_scale: # set logarithm scales ax.set_xscale("log") ax.set_yscale("log") for axis in [ax.xaxis, ax.yaxis]: axis.set_major_formatter(ScalarFormatter()) for model_name_idx, model_name in enumerate(self.result_dict.keys()): batch_sizes = sorted(set(self.result_dict[model_name]["bsz"])) sequence_lengths = sorted(set(self.result_dict[model_name]["seq_len"])) results = self.result_dict[model_name]["result"] (x_axis_array, inner_loop_array) = ( (batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes) ) label_model_name = ( model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx] ) for inner_loop_value in inner_loop_array: if self.args.plot_along_batch: y_axis_array = np.asarray( [results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results], dtype=int, ) else: y_axis_array = np.asarray( [results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results], dtype=np.float32, ) (x_axis_label, inner_loop_label) = ( ("batch_size", "len") if self.args.plot_along_batch else ("in #tokens", "bsz") ) x_axis_array = np.asarray(x_axis_array, int)[: len(y_axis_array)] plt.scatter( x_axis_array, y_axis_array, label=f"{label_model_name} - {inner_loop_label}: {inner_loop_value}" ) plt.plot(x_axis_array, y_axis_array, "--") title_str += f" {label_model_name} vs." title_str = title_str[:-4] y_axis_label = "Time in s" if self.args.is_time else "Memory in MB" # plot plt.title(title_str) plt.xlabel(x_axis_label) plt.ylabel(y_axis_label) plt.legend() if self.args.figure_png_file is not None: plt.savefig(self.args.figure_png_file) else: plt.show() def main(): parser = HfArgumentParser(PlotArguments) plot_args = parser.parse_args_into_dataclasses()[0] plot = Plot(args=plot_args) plot.plot() if __name__ == "__main__": main()
transformers/examples/legacy/benchmarking/plot_csv_file.py/0
{ "file_path": "transformers/examples/legacy/benchmarking/plot_csv_file.py", "repo_id": "transformers", "token_count": 2909 }
348
#!/usr/bin/env python import argparse import json from typing import List from ltp import LTP from transformers import BertTokenizer def _is_chinese_char(cp): """Checks whether CP is the codepoint of a CJK character.""" # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. if ( (cp >= 0x4E00 and cp <= 0x9FFF) or (cp >= 0x3400 and cp <= 0x4DBF) # or (cp >= 0x20000 and cp <= 0x2A6DF) # or (cp >= 0x2A700 and cp <= 0x2B73F) # or (cp >= 0x2B740 and cp <= 0x2B81F) # or (cp >= 0x2B820 and cp <= 0x2CEAF) # or (cp >= 0xF900 and cp <= 0xFAFF) or (cp >= 0x2F800 and cp <= 0x2FA1F) # ): # return True return False def is_chinese(word: str): # word like '180' or '身高' or '神' for char in word: char = ord(char) if not _is_chinese_char(char): return 0 return 1 def get_chinese_word(tokens: List[str]): word_set = set() for token in tokens: chinese_word = len(token) > 1 and is_chinese(token) if chinese_word: word_set.add(token) word_list = list(word_set) return word_list def add_sub_symbol(bert_tokens: List[str], chinese_word_set: set()): if not chinese_word_set: return bert_tokens max_word_len = max([len(w) for w in chinese_word_set]) bert_word = bert_tokens start, end = 0, len(bert_word) while start < end: single_word = True if is_chinese(bert_word[start]): l = min(end - start, max_word_len) for i in range(l, 1, -1): whole_word = "".join(bert_word[start : start + i]) if whole_word in chinese_word_set: for j in range(start + 1, start + i): bert_word[j] = "##" + bert_word[j] start = start + i single_word = False break if single_word: start += 1 return bert_word def prepare_ref(lines: List[str], ltp_tokenizer: LTP, bert_tokenizer: BertTokenizer): ltp_res = [] for i in range(0, len(lines), 100): res = ltp_tokenizer.seg(lines[i : i + 100])[0] res = [get_chinese_word(r) for r in res] ltp_res.extend(res) assert len(ltp_res) == len(lines) bert_res = [] for i in range(0, len(lines), 100): res = bert_tokenizer(lines[i : i + 100], add_special_tokens=True, truncation=True, max_length=512) bert_res.extend(res["input_ids"]) assert len(bert_res) == len(lines) ref_ids = [] for input_ids, chinese_word in zip(bert_res, ltp_res): input_tokens = [] for id in input_ids: token = bert_tokenizer._convert_id_to_token(id) input_tokens.append(token) input_tokens = add_sub_symbol(input_tokens, chinese_word) ref_id = [] # We only save pos of chinese subwords start with ##, which mean is part of a whole word. for i, token in enumerate(input_tokens): if token[:2] == "##": clean_token = token[2:] # save chinese tokens' pos if len(clean_token) == 1 and _is_chinese_char(ord(clean_token)): ref_id.append(i) ref_ids.append(ref_id) assert len(ref_ids) == len(bert_res) return ref_ids def main(args): # For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm) # If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp) with open(args.file_name, "r", encoding="utf-8") as f: data = f.readlines() data = [line.strip() for line in data if len(line) > 0 and not line.isspace()] # avoid delimiter like '\u2029' ltp_tokenizer = LTP(args.ltp) # faster in GPU device bert_tokenizer = BertTokenizer.from_pretrained(args.bert) ref_ids = prepare_ref(data, ltp_tokenizer, bert_tokenizer) with open(args.save_path, "w", encoding="utf-8") as f: data = [json.dumps(ref) + "\n" for ref in ref_ids] f.writelines(data) if __name__ == "__main__": parser = argparse.ArgumentParser(description="prepare_chinese_ref") parser.add_argument( "--file_name", type=str, default="./resources/chinese-demo.txt", help="file need process, same as training data in lm", ) parser.add_argument( "--ltp", type=str, default="./resources/ltp", help="resources for LTP tokenizer, usually a path" ) parser.add_argument("--bert", type=str, default="./resources/robert", help="resources for Bert tokenizer") parser.add_argument("--save_path", type=str, default="./resources/ref.txt", help="path to save res") args = parser.parse_args() main(args)
transformers/examples/legacy/run_chinese_ref.py/0
{ "file_path": "transformers/examples/legacy/run_chinese_ref.py", "repo_id": "transformers", "token_count": 2389 }
349
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import os import sys from pathlib import Path from unittest.mock import patch from parameterized import parameterized from run_eval import run_generate from run_eval_search import run_search from transformers.testing_utils import CaptureStdout, TestCasePlus, slow from utils import ROUGE_KEYS logging.basicConfig(level=logging.DEBUG) logger = logging.getLogger() def _dump_articles(path: Path, articles: list): content = "\n".join(articles) Path(path).open("w").writelines(content) T5_TINY = "patrickvonplaten/t5-tiny-random" BART_TINY = "sshleifer/bart-tiny-random" MBART_TINY = "sshleifer/tiny-mbart" stream_handler = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks class TestTheRest(TestCasePlus): def run_eval_tester(self, model): input_file_name = Path(self.get_auto_remove_tmp_dir()) / "utest_input.source" output_file_name = input_file_name.parent / "utest_output.txt" assert not output_file_name.exists() articles = [" New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County."] _dump_articles(input_file_name, articles) score_path = str(Path(self.get_auto_remove_tmp_dir()) / "scores.json") task = "translation_en_to_de" if model == T5_TINY else "summarization" testargs = f""" run_eval_search.py {model} {input_file_name} {output_file_name} --score_path {score_path} --task {task} --num_beams 2 --length_penalty 2.0 """.split() with patch.object(sys, "argv", testargs): run_generate() assert Path(output_file_name).exists() # os.remove(Path(output_file_name)) # test one model to quickly (no-@slow) catch simple problems and do an # extensive testing of functionality with multiple models as @slow separately def test_run_eval(self): self.run_eval_tester(T5_TINY) # any extra models should go into the list here - can be slow @parameterized.expand([BART_TINY, MBART_TINY]) @slow def test_run_eval_slow(self, model): self.run_eval_tester(model) # testing with 2 models to validate: 1. translation (t5) 2. summarization (mbart) @parameterized.expand([T5_TINY, MBART_TINY]) @slow def test_run_eval_search(self, model): input_file_name = Path(self.get_auto_remove_tmp_dir()) / "utest_input.source" output_file_name = input_file_name.parent / "utest_output.txt" assert not output_file_name.exists() text = { "en": ["Machine learning is great, isn't it?", "I like to eat bananas", "Tomorrow is another great day!"], "de": [ "Maschinelles Lernen ist großartig, oder?", "Ich esse gerne Bananen", "Morgen ist wieder ein toller Tag!", ], } tmp_dir = Path(self.get_auto_remove_tmp_dir()) score_path = str(tmp_dir / "scores.json") reference_path = str(tmp_dir / "val.target") _dump_articles(input_file_name, text["en"]) _dump_articles(reference_path, text["de"]) task = "translation_en_to_de" if model == T5_TINY else "summarization" testargs = f""" run_eval_search.py {model} {str(input_file_name)} {str(output_file_name)} --score_path {score_path} --reference_path {reference_path} --task {task} """.split() testargs.extend(["--search", "num_beams=1:2 length_penalty=0.9:1.0"]) with patch.object(sys, "argv", testargs): with CaptureStdout() as cs: run_search() expected_strings = [" num_beams | length_penalty", model, "Best score args"] un_expected_strings = ["Info"] if "translation" in task: expected_strings.append("bleu") else: expected_strings.extend(ROUGE_KEYS) for w in expected_strings: assert w in cs.out for w in un_expected_strings: assert w not in cs.out assert Path(output_file_name).exists() os.remove(Path(output_file_name))
transformers/examples/legacy/seq2seq/old_test_seq2seq_examples.py/0
{ "file_path": "transformers/examples/legacy/seq2seq/old_test_seq2seq_examples.py", "repo_id": "transformers", "token_count": 2127 }
350
{ "en-ru": { "src": [ "Welsh AMs worried about 'looking like muppets'", "There is consternation among some AMs at a suggestion their title should change to MWPs (Member of the Welsh Parliament).", "It has arisen because of plans to change the name of the assembly to the Welsh Parliament.", "AMs across the political spectrum are worried it could invite ridicule.", "One Labour AM said his group was concerned \"it rhymes with Twp and Pwp.\"", "For readers outside of Wales: In Welsh twp means daft and pwp means poo.", "A Plaid AM said the group as a whole was \"not happy\" and has suggested alternatives.", "A Welsh Conservative said his group was \"open minded\" about the name change, but noted it was a short verbal hop from MWP to Muppet." ], "tgt": [ "Члены Национальной ассамблеи Уэльса обеспокоены, что \"выглядят как куклы\"", "Некоторые члены Национальной ассамблеи Уэльса в ужасе от предложения о том, что их наименование должно измениться на MPW (члены Парламента Уэльса).", "Этот вопрос был поднят в связи с планами по переименованию ассамблеи в Парламент Уэльса.", "Члены Национальной ассамблеи Уэльса всего политического спектра обеспокоены, что это может породить насмешки.", "Один из лейбористских членов Национальной ассамблеи Уэльса сказал, что его партия обеспокоена тем, что \"это рифмуется с Twp и Pwp\".", "Для читателей за предлами Уэльса: по-валлийски twp означает \"глупый\", а pwp означает \"какашка\".", "Член Национальной ассамблеи от Плайд сказал, что эта партия в целом \"не счастлива\" и предложил альтернативы.", "Представитель Консервативной партии Уэльса сказал, что его партия \"открыта\" к переименованию, но отметил, что между WMP и Muppet небольшая разница в произношении." ] }, "ru-en": { "src": [ "Названо число готовящихся к отправке в Донбасс новобранцев из Украины", "Официальный представитель Народной милиции самопровозглашенной Луганской Народной Республики (ЛНР) Андрей Марочко заявил, что зимой 2018-2019 года Украина направит в Донбасс не менее 3 тыс. новобранцев.", "По его словам, таким образом Киев планирует \"хоть как-то доукомплектовать подразделения\".", "\"Нежелание граждан Украины проходить службу в рядах ВС Украины, массовые увольнения привели к низкой укомплектованности подразделений\", - рассказал Марочко, которого цитирует \"РИА Новости\".", "Он также не исключил, что реальные цифры призванных в армию украинцев могут быть увеличены в случае необходимости.", "В 2014-2017 годах Киев начал так называемую антитеррористическую операцию (АТО), которую позже сменили на операцию объединенных сил (ООС).", "Предполагалось, что эта мера приведет к усилению роли украинских силовиков в урегулировании ситуации.", "В конце августа 2018 года ситуация в Донбассе обострилась из-за убийства главы ДНР Александра Захарченко." ], "tgt": [ "The number of new Ukrainian recruits ready to go to Donbass has become public", "Official representative of the peoples’ militia of the self-proclaimed Lugansk People’s Republic Andrey Marochko claimed that Ukrainian will send at least 3 thousand new recruits to Donbass in winter 2018-2019.", "This is how Kyiv tries “at least somehow to staff the units,” he said.", "“The unwillingness of Ukrainian citizens to serve in the Ukraine’s military forces, mass resignments lead to low understaffing,” said Marochko cited by RIA Novosti.", "Also, he doesn’t exclude that the real numbers of conscripts in the Ukrainian army can be raised is necessary.", "In 2014-2017, Kyiv started so-called antiterrorist operation, that ws later changed to the united forces operation.", "This measure was supposed to strengthen the role of the Ukrainian military in settling the situation.", "In the late August 2018, the situation in Donbass escalated as the DNR head Aleksandr Zakharchenko was killed." ] }, "en-de": { "src": [ "Welsh AMs worried about 'looking like muppets'", "There is consternation among some AMs at a suggestion their title should change to MWPs (Member of the Welsh Parliament).", "It has arisen because of plans to change the name of the assembly to the Welsh Parliament.", "AMs across the political spectrum are worried it could invite ridicule.", "One Labour AM said his group was concerned \"it rhymes with Twp and Pwp.\"", "For readers outside of Wales: In Welsh twp means daft and pwp means poo.", "A Plaid AM said the group as a whole was \"not happy\" and has suggested alternatives.", "A Welsh Conservative said his group was \"open minded\" about the name change, but noted it was a short verbal hop from MWP to Muppet." ], "tgt": [ "Walisische Ageordnete sorgen sich \"wie Dödel auszusehen\"", "Es herrscht Bestürzung unter einigen Mitgliedern der Versammlung über einen Vorschlag, der ihren Titel zu MWPs (Mitglied der walisischen Parlament) ändern soll.", "Der Grund dafür waren Pläne, den Namen der Nationalversammlung in Walisisches Parlament zu ändern.", "Mitglieder aller Parteien der Nationalversammlung haben Bedenken, dass sie sich dadurch Spott aussetzen könnten.", "Ein Labour-Abgeordneter sagte, dass seine Gruppe \"sich mit Twp und Pwp reimt\".", "Hinweis für den Leser: „twp“ im Walisischen bedeutet „bescheuert“ und „pwp“ bedeutet „Kacke“.", "Ein Versammlungsmitglied von Plaid Cymru sagte, die Gruppe als Ganzes sei \"nicht glücklich\" und hat Alternativen vorgeschlagen.", "Ein walisischer Konservativer sagte, seine Gruppe wäre „offen“ für eine Namensänderung, wies aber darauf hin, dass es von „MWP“ (Mitglied des Walisischen Parlaments) nur ein kurzer verbaler Sprung zu „Muppet“ ist." ] }, "de-en": { "src": [ "Schöne Münchnerin 2018: Schöne Münchnerin 2018 in Hvar: Neun Dates", "Von az, aktualisiert am 04.05.2018 um 11:11", "Ja, sie will...", "\"Schöne Münchnerin\" 2018 werden!", "Am Nachmittag wartet erneut eine Überraschung auf unsere Kandidatinnen: sie werden das romantische Candlelight-Shooting vor der MY SOLARIS nicht alleine bestreiten, sondern an der Seite von Male-Model Fabian!", "Hvar - Flirten, kokettieren, verführen - keine einfachen Aufgaben für unsere Mädchen.", "Insbesondere dann, wenn in Deutschland ein Freund wartet.", "Dennoch liefern die neun \"Schöne Münchnerin\"-Kandidatinnen beim Shooting mit People-Fotograf Tuan ab und trotzen Wind, Gischt und Regen wie echte Profis." ], "tgt": [ "The Beauty of Munich 2018: the Beauty of Munich 2018 in Hvar: Nine dates", "From A-Z, updated on 04/05/2018 at 11:11", "Yes, she wants to...", "to become \"The Beauty of Munich\" in 2018!", "In the afternoon there is another surprise waiting for our contestants: they will be competing for the romantic candlelight photo shoot at MY SOLARIS not alone, but together with a male-model Fabian!", "Hvar with its flirting, coquetting, and seduction is not an easy task for our girls.", "Especially when there is a boyfriend waiting in Germany.", "Despite dealing with wind, sprays and rain, the nine contestants of \"The Beauty of Munich\" behaved like real professionals at the photo shoot with People-photographer Tuan." ] } }
transformers/examples/legacy/seq2seq/test_data/fsmt/fsmt_val_data.json/0
{ "file_path": "transformers/examples/legacy/seq2seq/test_data/fsmt/fsmt_val_data.json", "repo_id": "transformers", "token_count": 4034 }
351
## The relevant files are currently on a shared Google ## drive at https://drive.google.com/drive/folders/1kC0I2UGl2ltrluI9NqDjaQJGw5iliw_J ## Monitor for changes and eventually migrate to use the `datasets` library curl -L 'https://drive.google.com/uc?export=download&id=1Jjhbal535VVz2ap4v4r_rN1UEHTdLK5P' \ | grep -v "^#" | cut -f 2,3 | tr '\t' ' ' > train.txt.tmp curl -L 'https://drive.google.com/uc?export=download&id=1ZfRcQThdtAR5PPRjIDtrVP7BtXSCUBbm' \ | grep -v "^#" | cut -f 2,3 | tr '\t' ' ' > dev.txt.tmp curl -L 'https://drive.google.com/uc?export=download&id=1u9mb7kNJHWQCWyweMDRMuTFoOHOfeBTH' \ | grep -v "^#" | cut -f 2,3 | tr '\t' ' ' > test.txt.tmp export MAX_LENGTH=128 export BERT_MODEL=bert-base-multilingual-cased python3 scripts/preprocess.py train.txt.tmp $BERT_MODEL $MAX_LENGTH > train.txt python3 scripts/preprocess.py dev.txt.tmp $BERT_MODEL $MAX_LENGTH > dev.txt python3 scripts/preprocess.py test.txt.tmp $BERT_MODEL $MAX_LENGTH > test.txt cat train.txt dev.txt test.txt | cut -d " " -f 2 | grep -v "^$"| sort | uniq > labels.txt export OUTPUT_DIR=germeval-model export BATCH_SIZE=32 export NUM_EPOCHS=3 export SAVE_STEPS=750 export SEED=1 python3 run_ner.py \ --task_type NER \ --data_dir . \ --labels ./labels.txt \ --model_name_or_path $BERT_MODEL \ --output_dir $OUTPUT_DIR \ --max_seq_length $MAX_LENGTH \ --num_train_epochs $NUM_EPOCHS \ --per_gpu_train_batch_size $BATCH_SIZE \ --save_steps $SAVE_STEPS \ --seed $SEED \ --do_train \ --do_eval \ --do_predict
transformers/examples/legacy/token-classification/run.sh/0
{ "file_path": "transformers/examples/legacy/token-classification/run.sh", "repo_id": "transformers", "token_count": 648 }
352
<!--- Copyright 2021 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Image classification examples This directory contains 2 scripts that showcase how to fine-tune any model supported by the [`AutoModelForImageClassification` API](https://huggingface.co/docs/transformers/main/en/model_doc/auto#transformers.AutoModelForImageClassification) (such as [ViT](https://huggingface.co/docs/transformers/main/en/model_doc/vit), [ConvNeXT](https://huggingface.co/docs/transformers/main/en/model_doc/convnext), [ResNet](https://huggingface.co/docs/transformers/main/en/model_doc/resnet), [Swin Transformer](https://huggingface.co/docs/transformers/main/en/model_doc/swin)...) using PyTorch. They can be used to fine-tune models on both [datasets from the hub](#using-datasets-from-hub) as well as on [your own custom data](#using-your-own-data). <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/image_classification_inference_widget.png" height="400" /> Try out the inference widget here: https://huggingface.co/google/vit-base-patch16-224 Content: - [PyTorch version, Trainer](#pytorch-version-trainer) - [PyTorch version, no Trainer](#pytorch-version-no-trainer) ## PyTorch version, Trainer Based on the script [`run_image_classification.py`](https://github.com/huggingface/transformers/blob/main/examples/pytorch/image-classification/run_image_classification.py). The script leverages the 🤗 [Trainer API](https://huggingface.co/docs/transformers/main_classes/trainer) to automatically take care of the training for you, running on distributed environments right away. ### Using datasets from Hub Here we show how to fine-tune a Vision Transformer (`ViT`) on the [beans](https://huggingface.co/datasets/beans) dataset, to classify the disease type of bean leaves. ```bash python run_image_classification.py \ --dataset_name beans \ --output_dir ./beans_outputs/ \ --remove_unused_columns False \ --label_column_name labels \ --do_train \ --do_eval \ --push_to_hub \ --push_to_hub_model_id vit-base-beans \ --learning_rate 2e-5 \ --num_train_epochs 5 \ --per_device_train_batch_size 8 \ --per_device_eval_batch_size 8 \ --logging_strategy steps \ --logging_steps 10 \ --eval_strategy epoch \ --save_strategy epoch \ --load_best_model_at_end True \ --save_total_limit 3 \ --seed 1337 ``` 👀 See the results here: [nateraw/vit-base-beans](https://huggingface.co/nateraw/vit-base-beans). Note that you can replace the model and dataset by simply setting the `model_name_or_path` and `dataset_name` arguments respectively, with any model or dataset from the [hub](https://huggingface.co/). For an overview of all possible arguments, we refer to the [docs](https://huggingface.co/docs/transformers/main_classes/trainer#transformers.TrainingArguments) of the `TrainingArguments`, which can be passed as flags. > If your model classification head dimensions do not fit the number of labels in the dataset, you can specify `--ignore_mismatched_sizes` to adapt it. ### Using your own data To use your own dataset, there are 2 ways: - you can either provide your own folders as `--train_dir` and/or `--validation_dir` arguments - you can upload your dataset to the hub (possibly as a private repo, if you prefer so), and simply pass the `--dataset_name` argument. Below, we explain both in more detail. #### Provide them as folders If you provide your own folders with images, the script expects the following directory structure: ```bash root/dog/xxx.png root/dog/xxy.png root/dog/[...]/xxz.png root/cat/123.png root/cat/nsdf3.png root/cat/[...]/asd932_.png ``` In other words, you need to organize your images in subfolders, based on their class. You can then run the script like this: ```bash python run_image_classification.py \ --train_dir <path-to-train-root> \ --output_dir ./outputs/ \ --remove_unused_columns False \ --do_train \ --do_eval ``` Internally, the script will use the [`ImageFolder`](https://huggingface.co/docs/datasets/v2.0.0/en/image_process#imagefolder) feature which will automatically turn the folders into 🤗 Dataset objects. ##### 💡 The above will split the train dir into training and evaluation sets - To control the split amount, use the `--train_val_split` flag. - To provide your own validation split in its own directory, you can pass the `--validation_dir <path-to-val-root>` flag. #### Upload your data to the hub, as a (possibly private) repo It's very easy (and convenient) to upload your image dataset to the hub using the [`ImageFolder`](https://huggingface.co/docs/datasets/v2.0.0/en/image_process#imagefolder) feature available in 🤗 Datasets. Simply do the following: ```python from datasets import load_dataset # example 1: local folder dataset = load_dataset("imagefolder", data_dir="path_to_your_folder") # example 2: local files (supported formats are tar, gzip, zip, xz, rar, zstd) dataset = load_dataset("imagefolder", data_files="path_to_zip_file") # example 3: remote files (supported formats are tar, gzip, zip, xz, rar, zstd) dataset = load_dataset("imagefolder", data_files="https://download.microsoft.com/download/3/E/1/3E1C3F21-ECDB-4869-8368-6DEBA77B919F/kagglecatsanddogs_3367a.zip") # example 4: providing several splits dataset = load_dataset("imagefolder", data_files={"train": ["path/to/file1", "path/to/file2"], "test": ["path/to/file3", "path/to/file4"]}) ``` `ImageFolder` will create a `label` column, and the label name is based on the directory name. Next, push it to the hub! ```python # assuming you have ran the huggingface-cli login command in a terminal dataset.push_to_hub("name_of_your_dataset") # if you want to push to a private repo, simply pass private=True: dataset.push_to_hub("name_of_your_dataset", private=True) ``` and that's it! You can now train your model by simply setting the `--dataset_name` argument to the name of your dataset on the hub (as explained in [Using datasets from the 🤗 hub](#using-datasets-from-hub)). More on this can also be found in [this blog post](https://huggingface.co/blog/image-search-datasets). ### Sharing your model on 🤗 Hub 0. If you haven't already, [sign up](https://huggingface.co/join) for a 🤗 account 1. Make sure you have `git-lfs` installed and git set up. ```bash $ apt install git-lfs $ git config --global user.email "you@example.com" $ git config --global user.name "Your Name" ``` 2. Log in with your HuggingFace account credentials using `huggingface-cli`: ```bash $ huggingface-cli login # ...follow the prompts ``` 3. When running the script, pass the following arguments: ```bash python run_image_classification.py \ --push_to_hub \ --push_to_hub_model_id <name-your-model> \ ... ``` ## PyTorch version, no Trainer Based on the script [`run_image_classification_no_trainer.py`](https://github.com/huggingface/transformers/blob/main/examples/pytorch/image-classification/run_image_classification_no_trainer.py). Like `run_image_classification.py`, this script allows you to fine-tune any of the models on the [hub](https://huggingface.co/models) on an image classification task. The main difference is that this script exposes the bare training loop, to allow you to quickly experiment and add any customization you would like. It offers less options than the script with `Trainer` (for instance you can easily change the options for the optimizer or the dataloaders directly in the script) but still run in a distributed setup, and supports mixed precision by the means of the [🤗 `Accelerate`](https://github.com/huggingface/accelerate) library. You can use the script normally after installing it: ```bash pip install git+https://github.com/huggingface/accelerate ``` You can then use your usual launchers to run in it in a distributed environment, but the easiest way is to run ```bash accelerate config ``` and reply to the questions asked. Then ```bash accelerate test ``` that will check everything is ready for training. Finally, you can launch training with ```bash accelerate launch run_image_classification_no_trainer.py --image_column_name img ``` This command is the same and will work for: - single/multiple CPUs - single/multiple GPUs - TPUs Note that this library is in alpha release so your feedback is more than welcome if you encounter any problem using it. Regarding using custom data with this script, we refer to [using your own data](#using-your-own-data).
transformers/examples/pytorch/image-classification/README.md/0
{ "file_path": "transformers/examples/pytorch/image-classification/README.md", "repo_id": "transformers", "token_count": 2873 }
353
#!/usr/bin/env python # coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Fine-tuning the library models for causal language modeling (GPT, GPT-2, CTRL, ...) on a text file or a dataset without using HuggingFace Trainer. Here is the full list of checkpoints on the hub that can be fine-tuned by this script: https://huggingface.co/models?filter=text-generation """ # You can also adapt this script on your own causal language modeling task. Pointers for this are left as comments. import argparse import json import logging import math import os import random from itertools import chain from pathlib import Path import datasets import torch from accelerate import Accelerator, DistributedType from accelerate.logging import get_logger from accelerate.utils import set_seed from datasets import load_dataset from huggingface_hub import HfApi from torch.utils.data import DataLoader from tqdm.auto import tqdm import transformers from transformers import ( CONFIG_MAPPING, MODEL_MAPPING, AutoConfig, AutoModelForCausalLM, AutoTokenizer, SchedulerType, default_data_collator, get_scheduler, ) from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.45.0.dev0") logger = get_logger(__name__) require_version("datasets>=2.14.0", "To fix: pip install -r examples/pytorch/language-modeling/requirements.txt") MODEL_CONFIG_CLASSES = list(MODEL_MAPPING.keys()) MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) def parse_args(): parser = argparse.ArgumentParser(description="Finetune a transformers model on a causal language modeling task") parser.add_argument( "--dataset_name", type=str, default=None, help="The name of the dataset to use (via the datasets library).", ) parser.add_argument( "--dataset_config_name", type=str, default=None, help="The configuration name of the dataset to use (via the datasets library).", ) parser.add_argument( "--train_file", type=str, default=None, help="A csv, txt or a json file containing the training data." ) parser.add_argument( "--validation_file", type=str, default=None, help="A csv, txt or a json file containing the validation data." ) parser.add_argument( "--validation_split_percentage", default=5, help="The percentage of the train set used as validation set in case there's no validation split", ) parser.add_argument( "--model_name_or_path", type=str, help="Path to pretrained model or model identifier from huggingface.co/models.", required=False, ) parser.add_argument( "--config_name", type=str, default=None, help="Pretrained config name or path if not the same as model_name", ) parser.add_argument( "--tokenizer_name", type=str, default=None, help="Pretrained tokenizer name or path if not the same as model_name", ) parser.add_argument( "--use_slow_tokenizer", action="store_true", help="If passed, will use a slow tokenizer (not backed by the 🤗 Tokenizers library).", ) parser.add_argument( "--per_device_train_batch_size", type=int, default=8, help="Batch size (per device) for the training dataloader.", ) parser.add_argument( "--per_device_eval_batch_size", type=int, default=8, help="Batch size (per device) for the evaluation dataloader.", ) parser.add_argument( "--learning_rate", type=float, default=5e-5, help="Initial learning rate (after the potential warmup period) to use.", ) parser.add_argument("--weight_decay", type=float, default=0.0, help="Weight decay to use.") parser.add_argument("--num_train_epochs", type=int, default=3, help="Total number of training epochs to perform.") parser.add_argument( "--max_train_steps", type=int, default=None, help="Total number of training steps to perform. If provided, overrides num_train_epochs.", ) parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument( "--lr_scheduler_type", type=SchedulerType, default="linear", help="The scheduler type to use.", choices=["linear", "cosine", "cosine_with_restarts", "polynomial", "constant", "constant_with_warmup"], ) parser.add_argument( "--num_warmup_steps", type=int, default=0, help="Number of steps for the warmup in the lr scheduler." ) parser.add_argument("--output_dir", type=str, default=None, help="Where to store the final model.") parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") parser.add_argument( "--model_type", type=str, default=None, help="Model type to use if training from scratch.", choices=MODEL_TYPES, ) parser.add_argument( "--block_size", type=int, default=None, help=( "Optional input sequence length after tokenization. The training dataset will be truncated in block of" " this size for training. Default to the model max input length for single sentence inputs (take into" " account special tokens)." ), ) parser.add_argument( "--preprocessing_num_workers", type=int, default=None, help="The number of processes to use for the preprocessing.", ) parser.add_argument( "--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets" ) parser.add_argument( "--no_keep_linebreaks", action="store_true", help="Do not keep line breaks when using TXT files." ) parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") parser.add_argument( "--hub_model_id", type=str, help="The name of the repository to keep in sync with the local `output_dir`." ) parser.add_argument("--hub_token", type=str, help="The token to use to push to the Model Hub.") parser.add_argument( "--trust_remote_code", action="store_true", help=( "Whether to trust the execution of code from datasets/models defined on the Hub." " This option should only be set to `True` for repositories you trust and in which you have read the" " code, as it will execute code present on the Hub on your local machine." ), ) parser.add_argument( "--checkpointing_steps", type=str, default=None, help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.", ) parser.add_argument( "--resume_from_checkpoint", type=str, default=None, help="If the training should continue from a checkpoint folder.", ) parser.add_argument( "--with_tracking", action="store_true", help="Whether to enable experiment trackers for logging.", ) parser.add_argument( "--report_to", type=str, default="all", help=( 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`,' ' `"wandb"`, `"comet_ml"` and `"clearml"`. Use `"all"` (default) to report to all integrations. ' "Only applicable when `--with_tracking` is passed." ), ) parser.add_argument( "--low_cpu_mem_usage", action="store_true", help=( "It is an option to create the model as an empty shell, then only materialize its parameters when the pretrained weights are loaded. " "If passed, LLM loading time and RAM consumption will be benefited." ), ) args = parser.parse_args() # Sanity checks if args.dataset_name is None and args.train_file is None and args.validation_file is None: raise ValueError("Need either a dataset name or a training/validation file.") else: if args.train_file is not None: extension = args.train_file.split(".")[-1] if extension not in ["csv", "json", "txt"]: raise ValueError("`train_file` should be a csv, json or txt file.") if args.validation_file is not None: extension = args.validation_file.split(".")[-1] if extension not in ["csv", "json", "txt"]: raise ValueError("`validation_file` should be a csv, json or txt file.") if args.push_to_hub: if args.output_dir is None: raise ValueError("Need an `output_dir` to create a repo when `--push_to_hub` is passed.") return args def main(): args = parse_args() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_clm_no_trainer", args) # Initialize the accelerator. We will let the accelerator handle device placement for us in this example. # If we're using tracking, we also need to initialize it here and it will by default pick up all supported trackers # in the environment accelerator_log_kwargs = {} if args.with_tracking: accelerator_log_kwargs["log_with"] = args.report_to accelerator_log_kwargs["project_dir"] = args.output_dir accelerator = Accelerator(gradient_accumulation_steps=args.gradient_accumulation_steps, **accelerator_log_kwargs) # Make one log on every process with the configuration for debugging. logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) logger.info(accelerator.state, main_process_only=False) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Handle the repository creation if accelerator.is_main_process: if args.push_to_hub: # Retrieve of infer repo_name repo_name = args.hub_model_id if repo_name is None: repo_name = Path(args.output_dir).absolute().name # Create repo and retrieve repo_id api = HfApi() repo_id = api.create_repo(repo_name, exist_ok=True, token=args.hub_token).repo_id with open(os.path.join(args.output_dir, ".gitignore"), "w+") as gitignore: if "step_*" not in gitignore: gitignore.write("step_*\n") if "epoch_*" not in gitignore: gitignore.write("epoch_*\n") elif args.output_dir is not None: os.makedirs(args.output_dir, exist_ok=True) accelerator.wait_for_everyone() # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if args.dataset_name is not None: # Downloading and loading a dataset from the hub. raw_datasets = load_dataset( args.dataset_name, args.dataset_config_name, trust_remote_code=args.trust_remote_code ) if "validation" not in raw_datasets.keys(): raw_datasets["validation"] = load_dataset( args.dataset_name, args.dataset_config_name, split=f"train[:{args.validation_split_percentage}%]", trust_remote_code=args.trust_remote_code, ) raw_datasets["train"] = load_dataset( args.dataset_name, args.dataset_config_name, split=f"train[{args.validation_split_percentage}%:]", trust_remote_code=args.trust_remote_code, ) else: data_files = {} dataset_args = {} if args.train_file is not None: data_files["train"] = args.train_file extension = args.train_file.split(".")[-1] if args.validation_file is not None: data_files["validation"] = args.validation_file extension = args.validation_file.split(".")[-1] if extension == "txt": extension = "text" dataset_args["keep_linebreaks"] = not args.no_keep_linebreaks raw_datasets = load_dataset(extension, data_files=data_files, **dataset_args) # If no validation data is there, validation_split_percentage will be used to divide the dataset. if "validation" not in raw_datasets.keys(): raw_datasets["validation"] = load_dataset( extension, data_files=data_files, split=f"train[:{args.validation_split_percentage}%]", **dataset_args, ) raw_datasets["train"] = load_dataset( extension, data_files=data_files, split=f"train[{args.validation_split_percentage}%:]", **dataset_args, ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets. # Load pretrained model and tokenizer # # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. if args.config_name: config = AutoConfig.from_pretrained( args.config_name, trust_remote_code=args.trust_remote_code, ) elif args.model_name_or_path: config = AutoConfig.from_pretrained( args.model_name_or_path, trust_remote_code=args.trust_remote_code, ) else: config = CONFIG_MAPPING[args.model_type]() logger.warning("You are instantiating a new config instance from scratch.") if args.tokenizer_name: tokenizer = AutoTokenizer.from_pretrained( args.tokenizer_name, use_fast=not args.use_slow_tokenizer, trust_remote_code=args.trust_remote_code ) elif args.model_name_or_path: tokenizer = AutoTokenizer.from_pretrained( args.model_name_or_path, use_fast=not args.use_slow_tokenizer, trust_remote_code=args.trust_remote_code ) else: raise ValueError( "You are instantiating a new tokenizer from scratch. This is not supported by this script. " "You can do it from another script, save it, and load it from here, using --tokenizer_name." ) if args.model_name_or_path: model = AutoModelForCausalLM.from_pretrained( args.model_name_or_path, from_tf=bool(".ckpt" in args.model_name_or_path), config=config, low_cpu_mem_usage=args.low_cpu_mem_usage, trust_remote_code=args.trust_remote_code, ) else: logger.info("Training new model from scratch") model = AutoModelForCausalLM.from_config(config, trust_remote_code=args.trust_remote_code) # We resize the embeddings only when necessary to avoid index errors. If you are creating a model from scratch # on a small vocab and want a smaller embedding size, remove this test. embedding_size = model.get_input_embeddings().weight.shape[0] if len(tokenizer) > embedding_size: model.resize_token_embeddings(len(tokenizer)) # Preprocessing the datasets. # First we tokenize all the texts. column_names = raw_datasets["train"].column_names text_column_name = "text" if "text" in column_names else column_names[0] def tokenize_function(examples): return tokenizer(examples[text_column_name]) with accelerator.main_process_first(): tokenized_datasets = raw_datasets.map( tokenize_function, batched=True, num_proc=args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not args.overwrite_cache, desc="Running tokenizer on dataset", ) if args.block_size is None: block_size = tokenizer.model_max_length if block_size > config.max_position_embeddings: logger.warning( f"The tokenizer picked seems to have a very large `model_max_length` ({tokenizer.model_max_length}). " f"Using block_size={min(1024, config.max_position_embeddings)} instead. You can change that default value by passing --block_size xxx." ) block_size = min(1024, config.max_position_embeddings) else: if args.block_size > tokenizer.model_max_length: logger.warning( f"The block_size passed ({args.block_size}) is larger than the maximum length for the model " f"({tokenizer.model_max_length}). Using block_size={tokenizer.model_max_length}." ) block_size = min(args.block_size, tokenizer.model_max_length) # Main data processing function that will concatenate all texts from our dataset and generate chunks of block_size. def group_texts(examples): # Concatenate all texts. concatenated_examples = {k: list(chain(*examples[k])) for k in examples.keys()} total_length = len(concatenated_examples[list(examples.keys())[0]]) # We drop the small remainder, and if the total_length < block_size we exclude this batch and return an empty dict. # We could add padding if the model supported it instead of this drop, you can customize this part to your needs. total_length = (total_length // block_size) * block_size # Split by chunks of max_len. result = { k: [t[i : i + block_size] for i in range(0, total_length, block_size)] for k, t in concatenated_examples.items() } result["labels"] = result["input_ids"].copy() return result # Note that with `batched=True`, this map processes 1,000 texts together, so group_texts throws away a remainder # for each of those groups of 1,000 texts. You can adjust that batch_size here but a higher value might be slower # to preprocess. # # To speed up this part, we use multiprocessing. See the documentation of the map method for more information: # https://huggingface.co/docs/datasets/process#map with accelerator.main_process_first(): lm_datasets = tokenized_datasets.map( group_texts, batched=True, num_proc=args.preprocessing_num_workers, load_from_cache_file=not args.overwrite_cache, desc=f"Grouping texts in chunks of {block_size}", ) train_dataset = lm_datasets["train"] eval_dataset = lm_datasets["validation"] # Log a few random samples from the training set: for index in random.sample(range(len(train_dataset)), 3): logger.info(f"Sample {index} of the training set: {train_dataset[index]}.") # DataLoaders creation: train_dataloader = DataLoader( train_dataset, shuffle=True, collate_fn=default_data_collator, batch_size=args.per_device_train_batch_size ) eval_dataloader = DataLoader( eval_dataset, collate_fn=default_data_collator, batch_size=args.per_device_eval_batch_size ) # Optimizer # Split weights in two groups, one with weight decay and the other not. no_decay = ["bias", "layer_norm.weight"] optimizer_grouped_parameters = [ { "params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], "weight_decay": args.weight_decay, }, { "params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0, }, ] optimizer = torch.optim.AdamW(optimizer_grouped_parameters, lr=args.learning_rate) # Scheduler and math around the number of training steps. overrode_max_train_steps = False num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if args.max_train_steps is None: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch overrode_max_train_steps = True lr_scheduler = get_scheduler( name=args.lr_scheduler_type, optimizer=optimizer, num_warmup_steps=args.num_warmup_steps * accelerator.num_processes, num_training_steps=args.max_train_steps if overrode_max_train_steps else args.max_train_steps * accelerator.num_processes, ) # Prepare everything with our `accelerator`. model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare( model, optimizer, train_dataloader, eval_dataloader, lr_scheduler ) # On TPU, the tie weights in our model have been disconnected, so we need to restore the ties. if accelerator.distributed_type == DistributedType.TPU: model.tie_weights() # We need to recalculate our total training steps as the size of the training dataloader may have changed. num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if overrode_max_train_steps: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch # Afterwards we recalculate our number of training epochs args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) # Figure out how many steps we should save the Accelerator states checkpointing_steps = args.checkpointing_steps if checkpointing_steps is not None and checkpointing_steps.isdigit(): checkpointing_steps = int(checkpointing_steps) # We need to initialize the trackers we use, and also store our configuration. # The trackers initializes automatically on the main process. if args.with_tracking: experiment_config = vars(args) # TensorBoard cannot log Enums, need the raw value experiment_config["lr_scheduler_type"] = experiment_config["lr_scheduler_type"].value accelerator.init_trackers("clm_no_trainer", experiment_config) # Train! total_batch_size = args.per_device_train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps logger.info("***** Running training *****") logger.info(f" Num examples = {len(train_dataset)}") logger.info(f" Num Epochs = {args.num_train_epochs}") logger.info(f" Instantaneous batch size per device = {args.per_device_train_batch_size}") logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") logger.info(f" Total optimization steps = {args.max_train_steps}") # Only show the progress bar once on each machine. progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process) completed_steps = 0 starting_epoch = 0 # Potentially load in the weights and states from a previous save if args.resume_from_checkpoint: if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "": checkpoint_path = args.resume_from_checkpoint path = os.path.basename(args.resume_from_checkpoint) else: # Get the most recent checkpoint dirs = [f.name for f in os.scandir(os.getcwd()) if f.is_dir()] dirs.sort(key=os.path.getctime) path = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last checkpoint_path = path path = os.path.basename(checkpoint_path) accelerator.print(f"Resumed from checkpoint: {checkpoint_path}") accelerator.load_state(checkpoint_path) # Extract `epoch_{i}` or `step_{i}` training_difference = os.path.splitext(path)[0] if "epoch" in training_difference: starting_epoch = int(training_difference.replace("epoch_", "")) + 1 resume_step = None completed_steps = starting_epoch * num_update_steps_per_epoch else: # need to multiply `gradient_accumulation_steps` to reflect real steps resume_step = int(training_difference.replace("step_", "")) * args.gradient_accumulation_steps starting_epoch = resume_step // len(train_dataloader) completed_steps = resume_step // args.gradient_accumulation_steps resume_step -= starting_epoch * len(train_dataloader) # update the progress_bar if load from checkpoint progress_bar.update(completed_steps) for epoch in range(starting_epoch, args.num_train_epochs): model.train() if args.with_tracking: total_loss = 0 if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None: # We skip the first `n` batches in the dataloader when resuming from a checkpoint active_dataloader = accelerator.skip_first_batches(train_dataloader, resume_step) else: active_dataloader = train_dataloader for step, batch in enumerate(active_dataloader): with accelerator.accumulate(model): outputs = model(**batch) loss = outputs.loss # We keep track of the loss at each epoch if args.with_tracking: total_loss += loss.detach().float() accelerator.backward(loss) optimizer.step() lr_scheduler.step() optimizer.zero_grad() # Checks if the accelerator has performed an optimization step behind the scenes if accelerator.sync_gradients: progress_bar.update(1) completed_steps += 1 if isinstance(checkpointing_steps, int): if completed_steps % checkpointing_steps == 0 and accelerator.sync_gradients: output_dir = f"step_{completed_steps}" if args.output_dir is not None: output_dir = os.path.join(args.output_dir, output_dir) accelerator.save_state(output_dir) if completed_steps >= args.max_train_steps: break model.eval() losses = [] for step, batch in enumerate(eval_dataloader): with torch.no_grad(): outputs = model(**batch) loss = outputs.loss losses.append(accelerator.gather_for_metrics(loss.repeat(args.per_device_eval_batch_size))) losses = torch.cat(losses) try: eval_loss = torch.mean(losses) perplexity = math.exp(eval_loss) except OverflowError: perplexity = float("inf") logger.info(f"epoch {epoch}: perplexity: {perplexity} eval_loss: {eval_loss}") if args.with_tracking: accelerator.log( { "perplexity": perplexity, "eval_loss": eval_loss, "train_loss": total_loss.item() / len(train_dataloader), "epoch": epoch, "step": completed_steps, }, step=completed_steps, ) if args.push_to_hub and epoch < args.num_train_epochs - 1: accelerator.wait_for_everyone() unwrapped_model = accelerator.unwrap_model(model) unwrapped_model.save_pretrained( args.output_dir, is_main_process=accelerator.is_main_process, save_function=accelerator.save ) if accelerator.is_main_process: tokenizer.save_pretrained(args.output_dir) api.upload_folder( commit_message=f"Training in progress epoch {epoch}", folder_path=args.output_dir, repo_id=repo_id, repo_type="model", token=args.hub_token, ) if args.checkpointing_steps == "epoch": output_dir = f"epoch_{epoch}" if args.output_dir is not None: output_dir = os.path.join(args.output_dir, output_dir) accelerator.save_state(output_dir) if args.with_tracking: accelerator.end_training() if args.output_dir is not None: accelerator.wait_for_everyone() unwrapped_model = accelerator.unwrap_model(model) unwrapped_model.save_pretrained( args.output_dir, is_main_process=accelerator.is_main_process, save_function=accelerator.save ) if accelerator.is_main_process: tokenizer.save_pretrained(args.output_dir) if args.push_to_hub: api.upload_folder( commit_message="End of training", folder_path=args.output_dir, repo_id=repo_id, repo_type="model", token=args.hub_token, ) with open(os.path.join(args.output_dir, "all_results.json"), "w") as f: json.dump({"perplexity": perplexity}, f) if __name__ == "__main__": main()
transformers/examples/pytorch/language-modeling/run_clm_no_trainer.py/0
{ "file_path": "transformers/examples/pytorch/language-modeling/run_clm_no_trainer.py", "repo_id": "transformers", "token_count": 12859 }
354
<!--- Copyright 2021 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Question answering This folder contains several scripts that showcase how to fine-tune a 🤗 Transformers model on a question answering dataset, like SQuAD. ## Trainer-based scripts The [`run_qa.py`](https://github.com/huggingface/transformers/blob/main/examples/pytorch/question-answering/run_qa.py), [`run_qa_beam_search.py`](https://github.com/huggingface/transformers/blob/main/examples/pytorch/question-answering/run_qa_beam_search.py) and [`run_seq2seq_qa.py`](https://github.com/huggingface/transformers/blob/main/examples/pytorch/question-answering/run_seq2seq_qa.py) leverage the 🤗 [Trainer](https://huggingface.co/transformers/main_classes/trainer.html) for fine-tuning. ### Fine-tuning BERT on SQuAD1.0 The [`run_qa.py`](https://github.com/huggingface/transformers/blob/main/examples/pytorch/question-answering/run_qa.py) script allows to fine-tune any model from our [hub](https://huggingface.co/models) (as long as its architecture has a `ForQuestionAnswering` version in the library) on a question-answering dataset (such as SQuAD, or any other QA dataset available in the `datasets` library, or your own csv/jsonlines files) as long as they are structured the same way as SQuAD. You might need to tweak the data processing inside the script if your data is structured differently. **Note:** This script only works with models that have a fast tokenizer (backed by the 🤗 Tokenizers library) as it uses special features of those tokenizers. You can check if your favorite model has a fast tokenizer in [this table](https://huggingface.co/transformers/index.html#supported-frameworks), if it doesn't you can still use the old version of the script which can be found [here](https://github.com/huggingface/transformers/tree/main/examples/legacy/question-answering). Note that if your dataset contains samples with no possible answers (like SQuAD version 2), you need to pass along the flag `--version_2_with_negative`. This example code fine-tunes BERT on the SQuAD1.0 dataset. It runs in 24 min (with BERT-base) or 68 min (with BERT-large) on a single tesla V100 16GB. ```bash python run_qa.py \ --model_name_or_path google-bert/bert-base-uncased \ --dataset_name squad \ --do_train \ --do_eval \ --per_device_train_batch_size 12 \ --learning_rate 3e-5 \ --num_train_epochs 2 \ --max_seq_length 384 \ --doc_stride 128 \ --output_dir /tmp/debug_squad/ ``` Training with the previously defined hyper-parameters yields the following results: ```bash f1 = 88.52 exact_match = 81.22 ``` ### Fine-tuning XLNet with beam search on SQuAD The [`run_qa_beam_search.py`](https://github.com/huggingface/transformers/blob/main/examples/pytorch/question-answering/run_qa_beam_search.py) script is only meant to fine-tune XLNet, which is a special encoder-only Transformer model. The example code below fine-tunes XLNet on the SQuAD1.0 and SQuAD2.0 datasets. #### Command for SQuAD1.0: ```bash python run_qa_beam_search.py \ --model_name_or_path xlnet/xlnet-large-cased \ --dataset_name squad \ --do_train \ --do_eval \ --learning_rate 3e-5 \ --num_train_epochs 2 \ --max_seq_length 384 \ --doc_stride 128 \ --output_dir ./wwm_cased_finetuned_squad/ \ --per_device_eval_batch_size=4 \ --per_device_train_batch_size=4 \ --save_steps 5000 ``` #### Command for SQuAD2.0: ```bash export SQUAD_DIR=/path/to/SQUAD python run_qa_beam_search.py \ --model_name_or_path xlnet/xlnet-large-cased \ --dataset_name squad_v2 \ --do_train \ --do_eval \ --version_2_with_negative \ --learning_rate 3e-5 \ --num_train_epochs 4 \ --max_seq_length 384 \ --doc_stride 128 \ --output_dir ./wwm_cased_finetuned_squad/ \ --per_device_eval_batch_size=2 \ --per_device_train_batch_size=2 \ --save_steps 5000 ``` ### Fine-tuning T5 on SQuAD2.0 The [`run_seq2seq_qa.py`](https://github.com/huggingface/transformers/blob/main/examples/pytorch/question-answering/run_seq2seq_qa.py) script is meant for encoder-decoder (also called seq2seq) Transformer models, such as T5 or BART. These models are generative, rather than discriminative. This means that they learn to generate the correct answer, rather than predicting the start and end position of the tokens of the answer. This example code fine-tunes T5 on the SQuAD2.0 dataset. ```bash python run_seq2seq_qa.py \ --model_name_or_path google-t5/t5-small \ --dataset_name squad_v2 \ --context_column context \ --question_column question \ --answer_column answers \ --do_train \ --do_eval \ --per_device_train_batch_size 12 \ --learning_rate 3e-5 \ --num_train_epochs 2 \ --max_seq_length 384 \ --doc_stride 128 \ --output_dir /tmp/debug_seq2seq_squad/ ``` ## Accelerate-based scripts Based on the scripts `run_qa_no_trainer.py` and `run_qa_beam_search_no_trainer.py`. Like `run_qa.py` and `run_qa_beam_search.py`, these scripts allow you to fine-tune any of the models supported on a SQuAD or a similar dataset, the main difference is that this script exposes the bare training loop, to allow you to quickly experiment and add any customization you would like. It offers less options than the script with `Trainer` (for instance you can easily change the options for the optimizer or the dataloaders directly in the script), but still run in a distributed setup, on TPU and supports mixed precision by leveraging the [🤗 `Accelerate`](https://github.com/huggingface/accelerate) library. You can use the script normally after installing it: ```bash pip install git+https://github.com/huggingface/accelerate ``` then ```bash python run_qa_no_trainer.py \ --model_name_or_path google-bert/bert-base-uncased \ --dataset_name squad \ --max_seq_length 384 \ --doc_stride 128 \ --output_dir ~/tmp/debug_squad ``` You can then use your usual launchers to run in it in a distributed environment, but the easiest way is to run ```bash accelerate config ``` and reply to the questions asked. Then ```bash accelerate test ``` that will check everything is ready for training. Finally, you can launch training with ```bash accelerate launch run_qa_no_trainer.py \ --model_name_or_path google-bert/bert-base-uncased \ --dataset_name squad \ --max_seq_length 384 \ --doc_stride 128 \ --output_dir ~/tmp/debug_squad ``` This command is the same and will work for: - a CPU-only setup - a setup with one GPU - a distributed training with several GPUs (single or multi node) - a training on TPUs Note that this library is in alpha release so your feedback is more than welcome if you encounter any problem using it.
transformers/examples/pytorch/question-answering/README.md/0
{ "file_path": "transformers/examples/pytorch/question-answering/README.md", "repo_id": "transformers", "token_count": 2435 }
355
#!/usr/bin/env python # coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and """Pre-Training a 🤗 Wav2Vec2 model on unlabeled audio data""" import argparse import math import os from dataclasses import dataclass from pathlib import Path from typing import Dict, List, Optional, Union import datasets import torch from accelerate import Accelerator from accelerate.logging import get_logger from datasets import DatasetDict, concatenate_datasets, load_dataset from huggingface_hub import HfApi from torch.utils.data.dataloader import DataLoader from tqdm.auto import tqdm import transformers from transformers import ( AdamW, SchedulerType, Wav2Vec2Config, Wav2Vec2FeatureExtractor, Wav2Vec2ForPreTraining, get_scheduler, is_wandb_available, set_seed, ) from transformers.models.wav2vec2.modeling_wav2vec2 import _compute_mask_indices, _sample_negative_indices from transformers.utils import send_example_telemetry logger = get_logger(__name__) def parse_args(): parser = argparse.ArgumentParser(description="Finetune a transformers model on a text classification task") parser.add_argument( "--dataset_name", type=str, default=None, help="The name of the dataset to use (via the datasets library).", ) parser.add_argument( "--dataset_config_names", nargs="+", type=str, required=True, help="The configuration names of the dataset to use (via the datasets library).", ) parser.add_argument( "--dataset_split_names", nargs="+", type=str, required=True, help="The names of the training data set splits to use (via the datasets library).", ) parser.add_argument( "--trust_remote_code", action="store_true", help=( "Whether to trust the execution of code from datasets/models defined on the Hub." " This option should only be set to `True` for repositories you trust and in which you have read the" " code, as it will execute code present on the Hub on your local machine." ), ) parser.add_argument( "--preprocessing_num_workers", type=int, default=None, help="The number of processes to use for the preprocessing.", ) parser.add_argument( "--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets" ) parser.add_argument( "--preprocessing_only", action="store_true", help="Only run the preprocessing script to be cached for future use", ) parser.add_argument( "--cache_dir", type=str, default=None, help="Where do you want to store the pretrained models downloaded from huggingface.co", ) parser.add_argument( "--validation_split_percentage", type=int, default=1, help="Percentage of training data that should be used for validation if no validation is present in dataset.", ) parser.add_argument( "--logging_steps", type=int, default=500, help="Number of steps between each logging", ) parser.add_argument( "--saving_steps", type=int, default=500, help="Number of steps between each logging", ) parser.add_argument( "--audio_column_name", type=str, default="audio", help="Column in the dataset that contains speech file path. Defaults to 'audio'", ) parser.add_argument( "--model_name_or_path", type=str, help="Path to pretrained model or model identifier from huggingface.co/models.", required=True, ) parser.add_argument( "--config_name", type=str, default=None, help="Pretrained config name or path if not the same as model_name", ) parser.add_argument( "--train_cache_file_name", type=str, default=None, help="Path to the train cached file name", ) parser.add_argument( "--validation_cache_file_name", type=str, default=None, help="Path to the validation cached file name", ) parser.add_argument( "--per_device_train_batch_size", type=int, default=8, help="Batch size (per device) for the training dataloader.", ) parser.add_argument( "--per_device_eval_batch_size", type=int, default=8, help="Batch size (per device) for the evaluation dataloader.", ) parser.add_argument( "--learning_rate", type=float, default=5e-5, help="Initial learning rate (after the potential warmup period) to use.", ) parser.add_argument("--weight_decay", type=float, default=0.0, help="Weight decay to use.") parser.add_argument("--num_train_epochs", type=int, default=3, help="Total number of training epochs to perform.") parser.add_argument( "--max_train_steps", type=int, default=None, help="Total number of training steps to perform. If provided, overrides num_train_epochs.", ) parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument( "--gradient_checkpointing", action="store_true", help="If True, use gradient checkpointing to save memory at the expense of slower backward pass.", ) parser.add_argument( "--lr_scheduler_type", type=SchedulerType, default="linear", help="The scheduler type to use.", choices=["linear", "cosine", "cosine_with_restarts", "polynomial", "constant", "constant_with_warmup"], ) parser.add_argument( "--num_warmup_steps", type=int, default=0, help="Number of steps for the warmup in the lr scheduler." ) parser.add_argument("--output_dir", type=str, default=None, help="Where to store the final model.") parser.add_argument("--seed", type=int, default=0, help="A seed for reproducible training.") parser.add_argument( "--max_gumbel_temperature", type=float, default=2.0, help="Maximum temperature for gumbel softmax.", ) parser.add_argument( "--min_gumbel_temperature", type=float, default=0.5, help="Minimum temperature for gumbel softmax.", ) parser.add_argument( "--gumbel_temperature_decay", type=float, default=0.999995, help="Decay of gumbel temperature during training." ) parser.add_argument( "--max_duration_in_seconds", type=float, default=5.0, help="Filter out audio files that are longer than `max_duration_in_seconds` seconds", ) parser.add_argument( "--min_duration_in_seconds", type=float, default=3.0, help="Filter out audio files that are shorter than `min_duration_in_seconds` seconds", ) parser.add_argument( "--pad_to_multiple_of", type=int, default=None, help=( "If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the" " use of Tensor Cores on NVIDIA hardware with compute capability >= 7.5 (Volta)." ), ) parser.add_argument( "--adam_beta1", type=float, default=0.9, help="Beta1 for AdamW optimizer", ) parser.add_argument( "--adam_beta2", type=float, default=0.999, help="Beta2 for AdamW optimizer", ) parser.add_argument( "--adam_epsilon", type=float, default=1e-8, help="Epsilon for AdamW optimizer", ) parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") parser.add_argument( "--hub_model_id", type=str, help="The name of the repository to keep in sync with the local `output_dir`." ) parser.add_argument("--hub_token", type=str, help="The token to use to push to the Model Hub.") parser.add_argument( "--mask_time_prob", type=float, default=None, help=( "Percentage (between 0 and 1) of all feature vectors along the time axis which will be masked in the" " contrastive task. If omitted, will pull value from model config." ), ) parser.add_argument( "--mask_time_length", type=int, default=None, help=( "Length of each vector mask span to mask along the time axis in the contrastive task." " If omitted, will pull value from model config." ), ) args = parser.parse_args() if args.push_to_hub: assert args.output_dir is not None, "Need an `output_dir` to create a repo when `--push_to_hub` is passed." if args.output_dir is not None: os.makedirs(args.output_dir, exist_ok=True) return args @dataclass class DataCollatorForWav2Vec2Pretraining: """ Data collator that will dynamically pad the inputs received and prepare masked indices for self-supervised pretraining. Args: model (:class:`~transformers.Wav2Vec2ForPreTraining`): The Wav2Vec2 model used for pretraining. The data collator needs to have access to config and ``_get_feat_extract_output_lengths`` function for correct padding. feature_extractor (:class:`~transformers.Wav2Vec2FeatureExtractor`): The processor used for proccessing the data. padding (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.PaddingStrategy`, `optional`, defaults to :obj:`True`): Select a strategy to pad the returned sequences (according to the model's padding side and padding index) among: * :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). * :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the maximum acceptable input length for the model if that argument is not provided. * :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). max_length (:obj:`int`, `optional`): Maximum length of the ``input_values`` of the returned list and optionally padding length (see above). pad_to_multiple_of (:obj:`int`, `optional`): If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >= 7.5 (Volta). mask_time_prob (:obj:`float`, `optional`, defaults to :obj:`0.65`): Percentage (between 0 and 1) of all feature vectors along the time axis which will be masked for the contrastive task. Note that overlap between masked sequences may decrease the actual percentage of masked vectors. The default value is taken from the original wav2vec 2.0 article (https://arxiv.org/abs/2006.11477), and results in about 49 percent of each sequence being masked on average. mask_time_length (:obj:`int`, `optional`, defaults to :obj:`10`): Length of each vector mask span to mask along the time axis in the contrastive task. The default value originates from the original wav2vec 2.0 article and corresponds to the ``M`` variable mentioned there. """ model: Wav2Vec2ForPreTraining feature_extractor: Wav2Vec2FeatureExtractor padding: Union[bool, str] = "longest" pad_to_multiple_of: Optional[int] = None mask_time_prob: Optional[float] = 0.65 mask_time_length: Optional[int] = 10 def __call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]: # reformat list to dict and set to pytorch format batch = self.feature_extractor.pad( features, padding=self.padding, pad_to_multiple_of=self.pad_to_multiple_of, return_tensors="pt", ) device = batch["input_values"].device batch_size = batch["input_values"].shape[0] mask_indices_seq_length = self.model._get_feat_extract_output_lengths(batch["input_values"].shape[-1]) # make sure masked sequence length is a Python scalar mask_indices_seq_length = int(mask_indices_seq_length) # make sure that no loss is computed on padded inputs if batch.get("attention_mask") is not None: # compute real output lengths according to convolution formula batch["sub_attention_mask"] = self.model._get_feature_vector_attention_mask( mask_indices_seq_length, batch["attention_mask"] ) features_shape = (batch_size, mask_indices_seq_length) # sample randomly masked indices mask_time_indices = _compute_mask_indices( features_shape, self.mask_time_prob, self.mask_time_length, attention_mask=batch.get("sub_attention_mask"), ) # sample negative indices sampled_negative_indices = _sample_negative_indices( features_shape, self.model.config.num_negatives, mask_time_indices=mask_time_indices, ) batch["mask_time_indices"] = torch.tensor(mask_time_indices, dtype=torch.long, device=device) batch["sampled_negative_indices"] = torch.tensor(sampled_negative_indices, dtype=torch.long, device=device) return batch def multiply_grads(params, c): """Multiplies grads by a constant *c*.""" for p in params: if p.grad is not None: if torch.is_tensor(c): c = c.to(p.grad.device) p.grad.data.mul_(c) def get_grad_norm(params, scale=1): """Compute grad norm given a gradient scale.""" total_norm = 0.0 for p in params: if p.grad is not None: param_norm = (p.grad.detach().data / scale).norm(2) total_norm += param_norm.item() ** 2 total_norm = total_norm**0.5 return total_norm def main(): # See all possible arguments in src/transformers/args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. args = parse_args() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_wav2vec2_pretraining_no_trainer", args) # Initialize the accelerator. We will let the accelerator handle device placement for us in this example. accelerator = Accelerator() logger.info(accelerator.state, main_process_only=False) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() # set up weights and biases if available if is_wandb_available(): import wandb wandb.init(project=args.output_dir.split("/")[-1]) else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Handle the repository creation if accelerator.is_main_process: if args.push_to_hub and not args.preprocessing_only: # Retrieve of infer repo_name repo_name = args.hub_model_id if repo_name is None: repo_name = Path(args.output_dir).absolute().name # Create repo and retrieve repo_id api = HfApi() repo_id = api.create_repo(repo_name, exist_ok=True, token=args.hub_token).repo_id with open(os.path.join(args.output_dir, ".gitignore"), "w+") as gitignore: if "step_*" not in gitignore: gitignore.write("step_*\n") if "epoch_*" not in gitignore: gitignore.write("epoch_*\n") elif args.output_dir is not None: os.makedirs(args.output_dir, exist_ok=True) accelerator.wait_for_everyone() # 1. Download and create train, validation dataset # We load all dataset configuration and datset split pairs passed in # ``args.dataset_config_names`` and ``args.dataset_split_names`` datasets_splits = [] for dataset_config_name, train_split_name in zip(args.dataset_config_names, args.dataset_split_names): # load dataset dataset_split = load_dataset( args.dataset_name, dataset_config_name, split=train_split_name, cache_dir=args.cache_dir, trust_remote_code=args.trust_remote_code, ) datasets_splits.append(dataset_split) # Next, we concatenate all configurations and splits into a single training dataset raw_datasets = DatasetDict() if len(datasets_splits) > 1: raw_datasets["train"] = concatenate_datasets(datasets_splits).shuffle(seed=args.seed) else: raw_datasets["train"] = datasets_splits[0] # Take ``args.validation_split_percentage`` from the training dataset for the validation_split_percentage num_validation_samples = raw_datasets["train"].num_rows * args.validation_split_percentage // 100 if num_validation_samples == 0: raise ValueError( "`args.validation_split_percentage` is less than a single sample " f"for {len(raw_datasets['train'])} training samples. Increase " "`args.num_validation_split_percentage`. " ) raw_datasets["validation"] = raw_datasets["train"].select(range(num_validation_samples)) raw_datasets["train"] = raw_datasets["train"].select(range(num_validation_samples, raw_datasets["train"].num_rows)) # 2. Now we preprocess the datasets including loading the audio, resampling and normalization # Thankfully, `datasets` takes care of automatically loading and resampling the audio, # so that we just need to set the correct target sampling rate and normalize the input # via the `feature_extractor` feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(args.model_name_or_path) # make sure that dataset decodes audio with correct sampling rate raw_datasets = raw_datasets.cast_column( args.audio_column_name, datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate) ) # only normalized-inputs-training is supported if not feature_extractor.do_normalize: raise ValueError( "Training is only supported for normalized inputs. Make sure ``feature_extractor.do_normalize == True``" ) # set max & min audio length in number of samples max_length = int(args.max_duration_in_seconds * feature_extractor.sampling_rate) min_length = int(args.min_duration_in_seconds * feature_extractor.sampling_rate) def prepare_dataset(batch): sample = batch[args.audio_column_name] inputs = feature_extractor( sample["array"], sampling_rate=sample["sampling_rate"], max_length=max_length, truncation=True ) batch["input_values"] = inputs.input_values[0] batch["input_length"] = len(inputs.input_values[0]) return batch # load via mapped files via path cache_file_names = None if args.train_cache_file_name is not None: cache_file_names = {"train": args.train_cache_file_name, "validation": args.validation_cache_file_name} # load audio files into numpy arrays with accelerator.main_process_first(): vectorized_datasets = raw_datasets.map( prepare_dataset, num_proc=args.preprocessing_num_workers, remove_columns=raw_datasets["train"].column_names, cache_file_names=cache_file_names, ) if min_length > 0.0: vectorized_datasets = vectorized_datasets.filter( lambda x: x > min_length, num_proc=args.preprocessing_num_workers, input_columns=["input_length"], ) vectorized_datasets = vectorized_datasets.remove_columns("input_length") # for large datasets it is advised to run the preprocessing on a # single machine first with ``args.preprocessing_only`` since there will mostly likely # be a timeout when running the script in distributed mode. # In a second step ``args.preprocessing_only`` can then be set to `False` to load the # cached dataset if args.preprocessing_only: return # 3. Load model config = Wav2Vec2Config.from_pretrained(args.model_name_or_path) # pretraining is only supported for "newer" stable layer norm architecture # apply_spec_augment has to be True, mask_feature_prob has to be 0.0 if not config.do_stable_layer_norm or config.feat_extract_norm != "layer": raise ValueError( "PreTraining is only supported for ``config.do_stable_layer_norm=True`` and" " ``config.feat_extract_norm='layer'" ) # initialize random model model = Wav2Vec2ForPreTraining(config) # Activate gradient checkpointing if needed if args.gradient_checkpointing: model.gradient_checkpointing_enable() # 4. Define data collator, optimizer and scheduler mask_time_prob = config.mask_time_prob if args.mask_time_prob is None else args.mask_time_prob mask_time_length = config.mask_time_length if args.mask_time_length is None else args.mask_time_length data_collator = DataCollatorForWav2Vec2Pretraining( model=model, feature_extractor=feature_extractor, pad_to_multiple_of=args.pad_to_multiple_of, mask_time_prob=mask_time_prob, mask_time_length=mask_time_length, ) train_dataloader = DataLoader( vectorized_datasets["train"], shuffle=True, collate_fn=data_collator, batch_size=args.per_device_train_batch_size, ) eval_dataloader = DataLoader( vectorized_datasets["validation"], collate_fn=data_collator, batch_size=args.per_device_eval_batch_size ) # Optimizer optimizer = AdamW( list(model.parameters()), lr=args.learning_rate, betas=[args.adam_beta1, args.adam_beta2], eps=args.adam_epsilon, ) # Prepare everything with our `accelerator`. model, optimizer, train_dataloader, eval_dataloader = accelerator.prepare( model, optimizer, train_dataloader, eval_dataloader ) # Scheduler and math around the number of training steps. num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if args.max_train_steps is None: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch lr_scheduler = get_scheduler( name=args.lr_scheduler_type, optimizer=optimizer, num_warmup_steps=args.num_warmup_steps, num_training_steps=args.max_train_steps, ) # Afterwards we recalculate our number of training epochs args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) # 5. Train total_batch_size = args.per_device_train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps logger.info("***** Running training *****") logger.info(f" Num examples = {len(vectorized_datasets['train'])}") logger.info(f" Num Epochs = {args.num_train_epochs}") logger.info(f" Instantaneous batch size per device = {args.per_device_train_batch_size}") logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") logger.info(f" Total optimization steps = {args.max_train_steps}") completed_steps = 0 starting_epoch = 0 # Only show the progress bar once on each machine. progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process) completed_steps = 0 starting_epoch = 0 for epoch in range(starting_epoch, args.num_train_epochs): model.train() for step, batch in enumerate(train_dataloader): # compute num of losses num_losses = batch["mask_time_indices"].sum() sub_attention_mask = batch.pop("sub_attention_mask", None) sub_attention_mask = ( sub_attention_mask if sub_attention_mask is not None else torch.ones_like(batch["mask_time_indices"]) ) percent_masked = num_losses / sub_attention_mask.sum() # forward outputs = model(**batch) # divide loss by gradient accumulation steps since gradients # are accumulated for multiple backward passes in PyTorch loss = outputs.loss / args.gradient_accumulation_steps accelerator.backward(loss) # make sure that `num_losses` is summed for distributed training # and average gradients over losses of all devices if accelerator.state.num_processes > 1: num_losses = accelerator.gather_for_metrics(num_losses).sum() gradient_multiplier = accelerator.state.num_processes / num_losses multiply_grads(model.module.parameters(), gradient_multiplier) else: multiply_grads(model.parameters(), 1 / num_losses) # update step if (step + 1) % args.gradient_accumulation_steps == 0 or step == len(train_dataloader) - 1: # compute grad norm for monitoring scale = ( accelerator.scaler._scale.item() if hasattr(accelerator, "scaler") and accelerator.scaler is not None else 1 ) if accelerator.state.num_processes > 1: grad_norm = get_grad_norm(model.module.parameters(), scale) else: grad_norm = get_grad_norm(model.parameters(), scale) # update parameters optimizer.step() optimizer.zero_grad() if not accelerator.optimizer_step_was_skipped: lr_scheduler.step() elif accelerator.is_local_main_process: progress_bar.write( f"Gradients have overflown - skipping update step... Updating gradient scale to {scale}..." ) # update gumbel temperature gumbel_temperature = max( args.max_gumbel_temperature * args.gumbel_temperature_decay**completed_steps, args.min_gumbel_temperature, ) if hasattr(model, "module"): model.module.set_gumbel_temperature(gumbel_temperature) else: model.set_gumbel_temperature(gumbel_temperature) progress_bar.update(1) completed_steps += 1 # 6. Log all results if (step + 1) % (args.gradient_accumulation_steps * args.logging_steps) == 0: loss.detach() outputs.contrastive_loss.detach() outputs.diversity_loss.detach() if accelerator.state.num_processes > 1: loss = accelerator.gather_for_metrics(loss).sum() outputs.contrastive_loss = accelerator.gather_for_metrics(outputs.contrastive_loss).sum() outputs.diversity_loss = accelerator.gather_for_metrics(outputs.diversity_loss).sum() percent_masked = accelerator.gather_for_metrics(percent_masked).sum() train_logs = { "loss": (loss * args.gradient_accumulation_steps) / num_losses, "constrast_loss": outputs.contrastive_loss / num_losses, "div_loss": outputs.diversity_loss / num_losses, "%_mask_idx": percent_masked / accelerator.num_processes, "ppl": outputs.codevector_perplexity, "lr": torch.tensor(optimizer.param_groups[0]["lr"]), "temp": torch.tensor(gumbel_temperature), "grad_norm": torch.tensor(grad_norm), } log_str = "" for k, v in train_logs.items(): log_str += "| {}: {:.3e}".format(k, v.item()) if accelerator.is_local_main_process: progress_bar.write(log_str) if is_wandb_available(): wandb.log(train_logs) # save model every `args.saving_steps` steps if (step + 1) % (args.gradient_accumulation_steps * args.saving_steps) == 0: if (args.push_to_hub and epoch < args.num_train_epochs - 1) or args.output_dir is not None: accelerator.wait_for_everyone() unwrapped_model = accelerator.unwrap_model(model) unwrapped_model.save_pretrained( args.output_dir, is_main_process=accelerator.is_main_process, save_function=accelerator.save ) if (args.push_to_hub and epoch < args.num_train_epochs - 1) and accelerator.is_main_process: api.upload_folder( commit_message=f"Training in progress epoch {epoch}", folder_path=args.output_dir, repo_id=repo_id, repo_type="model", token=args.hub_token, ) # if completed steps > `args.max_train_steps` stop if completed_steps >= args.max_train_steps: break # 7. Validate! model.eval() # init logs val_logs = { "val_loss": 0, "val_contrastive_loss": 0, "val_diversity_loss": 0, "val_num_losses": 0, } for step, batch in enumerate(eval_dataloader): with torch.no_grad(): batch.pop("sub_attention_mask", None) outputs = model(**batch) val_logs["val_loss"] += outputs.loss val_logs["val_contrastive_loss"] += outputs.contrastive_loss val_logs["val_diversity_loss"] += outputs.diversity_loss val_logs["val_num_losses"] += batch["mask_time_indices"].sum() # sum over devices in multi-processing if accelerator.num_processes > 1: val_logs = {k: accelerator.gather_for_metrics(v).sum() for k, v in val_logs.items()} val_logs = {k: v / val_logs["val_num_losses"] for k, v in val_logs.items()} log_str = "" for k, v in val_logs.items(): log_str += "| {}: {:.3e}".format(k, v.item()) if accelerator.is_local_main_process: progress_bar.write(log_str) if is_wandb_available(): wandb.log(val_logs) if args.output_dir is not None: accelerator.wait_for_everyone() unwrapped_model = accelerator.unwrap_model(model) unwrapped_model.save_pretrained( args.output_dir, is_main_process=accelerator.is_main_process, save_function=accelerator.save ) if accelerator.is_main_process: if args.push_to_hub: api.upload_folder( commit_message="End of training", folder_path=args.output_dir, repo_id=repo_id, repo_type="model", token=args.hub_token, ) if __name__ == "__main__": main()
transformers/examples/pytorch/speech-pretraining/run_wav2vec2_pretraining_no_trainer.py/0
{ "file_path": "transformers/examples/pytorch/speech-pretraining/run_wav2vec2_pretraining_no_trainer.py", "repo_id": "transformers", "token_count": 14055 }
356
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Finetuning a 🤗 Transformers model for sequence classification on GLUE.""" import argparse import json import logging import math import os import random from pathlib import Path import datasets import evaluate import torch from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import set_seed from datasets import load_dataset from huggingface_hub import HfApi from torch.utils.data import DataLoader from tqdm.auto import tqdm import transformers from transformers import ( AutoConfig, AutoModelForSequenceClassification, AutoTokenizer, DataCollatorWithPadding, PretrainedConfig, SchedulerType, default_data_collator, get_scheduler, ) from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.45.0.dev0") logger = get_logger(__name__) require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt") task_to_keys = { "cola": ("sentence", None), "mnli": ("premise", "hypothesis"), "mrpc": ("sentence1", "sentence2"), "qnli": ("question", "sentence"), "qqp": ("question1", "question2"), "rte": ("sentence1", "sentence2"), "sst2": ("sentence", None), "stsb": ("sentence1", "sentence2"), "wnli": ("sentence1", "sentence2"), } def parse_args(): parser = argparse.ArgumentParser(description="Finetune a transformers model on a text classification task") parser.add_argument( "--task_name", type=str, default=None, help="The name of the glue task to train on.", choices=list(task_to_keys.keys()), ) parser.add_argument( "--train_file", type=str, default=None, help="A csv or a json file containing the training data." ) parser.add_argument( "--validation_file", type=str, default=None, help="A csv or a json file containing the validation data." ) parser.add_argument( "--max_length", type=int, default=128, help=( "The maximum total input sequence length after tokenization. Sequences longer than this will be truncated," " sequences shorter will be padded if `--pad_to_max_length` is passed." ), ) parser.add_argument( "--pad_to_max_length", action="store_true", help="If passed, pad all samples to `max_length`. Otherwise, dynamic padding is used.", ) parser.add_argument( "--model_name_or_path", type=str, help="Path to pretrained model or model identifier from huggingface.co/models.", required=True, ) parser.add_argument( "--use_slow_tokenizer", action="store_true", help="If passed, will use a slow tokenizer (not backed by the 🤗 Tokenizers library).", ) parser.add_argument( "--per_device_train_batch_size", type=int, default=8, help="Batch size (per device) for the training dataloader.", ) parser.add_argument( "--per_device_eval_batch_size", type=int, default=8, help="Batch size (per device) for the evaluation dataloader.", ) parser.add_argument( "--learning_rate", type=float, default=5e-5, help="Initial learning rate (after the potential warmup period) to use.", ) parser.add_argument("--weight_decay", type=float, default=0.0, help="Weight decay to use.") parser.add_argument("--num_train_epochs", type=int, default=3, help="Total number of training epochs to perform.") parser.add_argument( "--max_train_steps", type=int, default=None, help="Total number of training steps to perform. If provided, overrides num_train_epochs.", ) parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument( "--lr_scheduler_type", type=SchedulerType, default="linear", help="The scheduler type to use.", choices=["linear", "cosine", "cosine_with_restarts", "polynomial", "constant", "constant_with_warmup"], ) parser.add_argument( "--num_warmup_steps", type=int, default=0, help="Number of steps for the warmup in the lr scheduler." ) parser.add_argument("--output_dir", type=str, default=None, help="Where to store the final model.") parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") parser.add_argument( "--hub_model_id", type=str, help="The name of the repository to keep in sync with the local `output_dir`." ) parser.add_argument("--hub_token", type=str, help="The token to use to push to the Model Hub.") parser.add_argument( "--trust_remote_code", type=bool, default=False, help=( "Whether or not to allow for custom models defined on the Hub in their own modeling files. This option " "should only be set to `True` for repositories you trust and in which you have read the code, as it will " "execute code present on the Hub on your local machine." ), ) parser.add_argument( "--checkpointing_steps", type=str, default=None, help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.", ) parser.add_argument( "--resume_from_checkpoint", type=str, default=None, help="If the training should continue from a checkpoint folder.", ) parser.add_argument( "--with_tracking", action="store_true", help="Whether to enable experiment trackers for logging.", ) parser.add_argument( "--report_to", type=str, default="all", help=( 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`,' ' `"wandb"`, `"comet_ml"` and `"clearml"`. Use `"all"` (default) to report to all integrations. ' "Only applicable when `--with_tracking` is passed." ), ) parser.add_argument( "--ignore_mismatched_sizes", action="store_true", help="Whether or not to enable to load a pretrained model whose head dimensions are different.", ) args = parser.parse_args() # Sanity checks if args.task_name is None and args.train_file is None and args.validation_file is None: raise ValueError("Need either a task name or a training/validation file.") else: if args.train_file is not None: extension = args.train_file.split(".")[-1] assert extension in ["csv", "json"], "`train_file` should be a csv or a json file." if args.validation_file is not None: extension = args.validation_file.split(".")[-1] assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file." if args.push_to_hub: assert args.output_dir is not None, "Need an `output_dir` to create a repo when `--push_to_hub` is passed." return args def main(): args = parse_args() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_glue_no_trainer", args) # Initialize the accelerator. We will let the accelerator handle device placement for us in this example. # If we're using tracking, we also need to initialize it here and it will by default pick up all supported trackers # in the environment accelerator = ( Accelerator(log_with=args.report_to, project_dir=args.output_dir) if args.with_tracking else Accelerator() ) # Make one log on every process with the configuration for debugging. logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) logger.info(accelerator.state, main_process_only=False) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Handle the repository creation if accelerator.is_main_process: if args.push_to_hub: # Retrieve of infer repo_name repo_name = args.hub_model_id if repo_name is None: repo_name = Path(args.output_dir).absolute().name # Create repo and retrieve repo_id api = HfApi() repo_id = api.create_repo(repo_name, exist_ok=True, token=args.hub_token).repo_id with open(os.path.join(args.output_dir, ".gitignore"), "w+") as gitignore: if "step_*" not in gitignore: gitignore.write("step_*\n") if "epoch_*" not in gitignore: gitignore.write("epoch_*\n") elif args.output_dir is not None: os.makedirs(args.output_dir, exist_ok=True) accelerator.wait_for_everyone() # Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below) # or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub). # For CSV/JSON files, this script will use as labels the column called 'label' and as pair of sentences the # sentences in columns called 'sentence1' and 'sentence2' if such column exists or the first two columns not named # label if at least two columns are provided. # If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this # single column. You can easily tweak this behavior (see below) # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if args.task_name is not None: # Downloading and loading a dataset from the hub. raw_datasets = load_dataset("nyu-mll/glue", args.task_name) else: # Loading the dataset from local csv or json file. data_files = {} if args.train_file is not None: data_files["train"] = args.train_file if args.validation_file is not None: data_files["validation"] = args.validation_file extension = (args.train_file if args.train_file is not None else args.validation_file).split(".")[-1] raw_datasets = load_dataset(extension, data_files=data_files) # See more about loading any type of standard or custom dataset at # https://huggingface.co/docs/datasets/loading_datasets. # Labels if args.task_name is not None: is_regression = args.task_name == "stsb" if not is_regression: label_list = raw_datasets["train"].features["label"].names num_labels = len(label_list) else: num_labels = 1 else: # Trying to have good defaults here, don't hesitate to tweak to your needs. is_regression = raw_datasets["train"].features["label"].dtype in ["float32", "float64"] if is_regression: num_labels = 1 else: # A useful fast method: # https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.unique label_list = raw_datasets["train"].unique("label") label_list.sort() # Let's sort it for determinism num_labels = len(label_list) # Load pretrained model and tokenizer # # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. config = AutoConfig.from_pretrained( args.model_name_or_path, num_labels=num_labels, finetuning_task=args.task_name, trust_remote_code=args.trust_remote_code, ) tokenizer = AutoTokenizer.from_pretrained( args.model_name_or_path, use_fast=not args.use_slow_tokenizer, trust_remote_code=args.trust_remote_code ) if tokenizer.pad_token is None: tokenizer.pad_token = tokenizer.eos_token config.pad_token_id = tokenizer.pad_token_id model = AutoModelForSequenceClassification.from_pretrained( args.model_name_or_path, from_tf=bool(".ckpt" in args.model_name_or_path), config=config, ignore_mismatched_sizes=args.ignore_mismatched_sizes, trust_remote_code=args.trust_remote_code, ) # Preprocessing the datasets if args.task_name is not None: sentence1_key, sentence2_key = task_to_keys[args.task_name] else: # Again, we try to have some nice defaults but don't hesitate to tweak to your use case. non_label_column_names = [name for name in raw_datasets["train"].column_names if name != "label"] if "sentence1" in non_label_column_names and "sentence2" in non_label_column_names: sentence1_key, sentence2_key = "sentence1", "sentence2" else: if len(non_label_column_names) >= 2: sentence1_key, sentence2_key = non_label_column_names[:2] else: sentence1_key, sentence2_key = non_label_column_names[0], None # Some models have set the order of the labels to use, so let's make sure we do use it. label_to_id = None if ( model.config.label2id != PretrainedConfig(num_labels=num_labels).label2id and args.task_name is not None and not is_regression ): # Some have all caps in their config, some don't. label_name_to_id = {k.lower(): v for k, v in model.config.label2id.items()} if sorted(label_name_to_id.keys()) == sorted(label_list): logger.info( f"The configuration of the model provided the following label correspondence: {label_name_to_id}. " "Using it!" ) label_to_id = {i: label_name_to_id[label_list[i]] for i in range(num_labels)} else: logger.warning( "Your model seems to have been trained with labels, but they don't match the dataset: " f"model labels: {sorted(label_name_to_id.keys())}, dataset labels: {sorted(label_list)}." "\nIgnoring the model labels as a result.", ) elif args.task_name is None and not is_regression: label_to_id = {v: i for i, v in enumerate(label_list)} if label_to_id is not None: model.config.label2id = label_to_id model.config.id2label = {id: label for label, id in config.label2id.items()} elif args.task_name is not None and not is_regression: model.config.label2id = {l: i for i, l in enumerate(label_list)} model.config.id2label = {id: label for label, id in config.label2id.items()} padding = "max_length" if args.pad_to_max_length else False def preprocess_function(examples): # Tokenize the texts texts = ( (examples[sentence1_key],) if sentence2_key is None else (examples[sentence1_key], examples[sentence2_key]) ) result = tokenizer(*texts, padding=padding, max_length=args.max_length, truncation=True) if "label" in examples: if label_to_id is not None: # Map labels to IDs (not necessary for GLUE tasks) result["labels"] = [label_to_id[l] for l in examples["label"]] else: # In all cases, rename the column to labels because the model will expect that. result["labels"] = examples["label"] return result with accelerator.main_process_first(): processed_datasets = raw_datasets.map( preprocess_function, batched=True, remove_columns=raw_datasets["train"].column_names, desc="Running tokenizer on dataset", ) train_dataset = processed_datasets["train"] eval_dataset = processed_datasets["validation_matched" if args.task_name == "mnli" else "validation"] # Log a few random samples from the training set: for index in random.sample(range(len(train_dataset)), 3): logger.info(f"Sample {index} of the training set: {train_dataset[index]}.") # DataLoaders creation: if args.pad_to_max_length: # If padding was already done ot max length, we use the default data collator that will just convert everything # to tensors. data_collator = default_data_collator else: # Otherwise, `DataCollatorWithPadding` will apply dynamic padding for us (by padding to the maximum length of # the samples passed). When using mixed precision, we add `pad_to_multiple_of=8` to pad all tensors to multiple # of 8s, which will enable the use of Tensor Cores on NVIDIA hardware with compute capability >= 7.5 (Volta). data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=(8 if accelerator.use_fp16 else None)) train_dataloader = DataLoader( train_dataset, shuffle=True, collate_fn=data_collator, batch_size=args.per_device_train_batch_size ) eval_dataloader = DataLoader(eval_dataset, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size) # Optimizer # Split weights in two groups, one with weight decay and the other not. no_decay = ["bias", "LayerNorm.weight"] optimizer_grouped_parameters = [ { "params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], "weight_decay": args.weight_decay, }, { "params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0, }, ] optimizer = torch.optim.AdamW(optimizer_grouped_parameters, lr=args.learning_rate) # Scheduler and math around the number of training steps. overrode_max_train_steps = False num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if args.max_train_steps is None: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch overrode_max_train_steps = True lr_scheduler = get_scheduler( name=args.lr_scheduler_type, optimizer=optimizer, num_warmup_steps=args.num_warmup_steps, num_training_steps=args.max_train_steps, ) # Prepare everything with our `accelerator`. model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare( model, optimizer, train_dataloader, eval_dataloader, lr_scheduler ) # We need to recalculate our total training steps as the size of the training dataloader may have changed num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if overrode_max_train_steps: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch # Afterwards we recalculate our number of training epochs args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) # Figure out how many steps we should save the Accelerator states checkpointing_steps = args.checkpointing_steps if checkpointing_steps is not None and checkpointing_steps.isdigit(): checkpointing_steps = int(checkpointing_steps) # We need to initialize the trackers we use, and also store our configuration. # The trackers initializes automatically on the main process. if args.with_tracking: experiment_config = vars(args) # TensorBoard cannot log Enums, need the raw value experiment_config["lr_scheduler_type"] = experiment_config["lr_scheduler_type"].value accelerator.init_trackers("glue_no_trainer", experiment_config) # Get the metric function if args.task_name is not None: metric = evaluate.load("glue", args.task_name) else: metric = evaluate.load("accuracy") # Train! total_batch_size = args.per_device_train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps logger.info("***** Running training *****") logger.info(f" Num examples = {len(train_dataset)}") logger.info(f" Num Epochs = {args.num_train_epochs}") logger.info(f" Instantaneous batch size per device = {args.per_device_train_batch_size}") logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") logger.info(f" Total optimization steps = {args.max_train_steps}") # Only show the progress bar once on each machine. progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process) completed_steps = 0 starting_epoch = 0 # Potentially load in the weights and states from a previous save if args.resume_from_checkpoint: if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "": checkpoint_path = args.resume_from_checkpoint path = os.path.basename(args.resume_from_checkpoint) else: # Get the most recent checkpoint dirs = [f.name for f in os.scandir(os.getcwd()) if f.is_dir()] dirs.sort(key=os.path.getctime) path = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last checkpoint_path = path path = os.path.basename(checkpoint_path) accelerator.print(f"Resumed from checkpoint: {checkpoint_path}") accelerator.load_state(checkpoint_path) # Extract `epoch_{i}` or `step_{i}` training_difference = os.path.splitext(path)[0] if "epoch" in training_difference: starting_epoch = int(training_difference.replace("epoch_", "")) + 1 resume_step = None completed_steps = starting_epoch * num_update_steps_per_epoch else: # need to multiply `gradient_accumulation_steps` to reflect real steps resume_step = int(training_difference.replace("step_", "")) * args.gradient_accumulation_steps starting_epoch = resume_step // len(train_dataloader) completed_steps = resume_step // args.gradient_accumulation_steps resume_step -= starting_epoch * len(train_dataloader) # update the progress_bar if load from checkpoint progress_bar.update(completed_steps) for epoch in range(starting_epoch, args.num_train_epochs): model.train() if args.with_tracking: total_loss = 0 if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None: # We skip the first `n` batches in the dataloader when resuming from a checkpoint active_dataloader = accelerator.skip_first_batches(train_dataloader, resume_step) else: active_dataloader = train_dataloader for step, batch in enumerate(active_dataloader): outputs = model(**batch) loss = outputs.loss # We keep track of the loss at each epoch if args.with_tracking: total_loss += loss.detach().float() loss = loss / args.gradient_accumulation_steps accelerator.backward(loss) if step % args.gradient_accumulation_steps == 0 or step == len(train_dataloader) - 1: optimizer.step() lr_scheduler.step() optimizer.zero_grad() progress_bar.update(1) completed_steps += 1 if isinstance(checkpointing_steps, int): if completed_steps % checkpointing_steps == 0 and accelerator.sync_gradients: output_dir = f"step_{completed_steps}" if args.output_dir is not None: output_dir = os.path.join(args.output_dir, output_dir) accelerator.save_state(output_dir) if completed_steps >= args.max_train_steps: break model.eval() samples_seen = 0 for step, batch in enumerate(eval_dataloader): with torch.no_grad(): outputs = model(**batch) predictions = outputs.logits.argmax(dim=-1) if not is_regression else outputs.logits.squeeze() predictions, references = accelerator.gather((predictions, batch["labels"])) # If we are in a multiprocess environment, the last batch has duplicates if accelerator.num_processes > 1: if step == len(eval_dataloader) - 1: predictions = predictions[: len(eval_dataloader.dataset) - samples_seen] references = references[: len(eval_dataloader.dataset) - samples_seen] else: samples_seen += references.shape[0] metric.add_batch( predictions=predictions, references=references, ) eval_metric = metric.compute() logger.info(f"epoch {epoch}: {eval_metric}") if args.with_tracking: accelerator.log( { "accuracy" if args.task_name is not None else "glue": eval_metric, "train_loss": total_loss.item() / len(train_dataloader), "epoch": epoch, "step": completed_steps, }, step=completed_steps, ) if args.push_to_hub and epoch < args.num_train_epochs - 1: accelerator.wait_for_everyone() unwrapped_model = accelerator.unwrap_model(model) unwrapped_model.save_pretrained( args.output_dir, is_main_process=accelerator.is_main_process, save_function=accelerator.save ) if accelerator.is_main_process: tokenizer.save_pretrained(args.output_dir) api.upload_folder( commit_message=f"Training in progress epoch {epoch}", folder_path=args.output_dir, repo_id=repo_id, repo_type="model", token=args.hub_token, ) if args.checkpointing_steps == "epoch": output_dir = f"epoch_{epoch}" if args.output_dir is not None: output_dir = os.path.join(args.output_dir, output_dir) accelerator.save_state(output_dir) if args.with_tracking: accelerator.end_training() if args.output_dir is not None: accelerator.wait_for_everyone() unwrapped_model = accelerator.unwrap_model(model) unwrapped_model.save_pretrained( args.output_dir, is_main_process=accelerator.is_main_process, save_function=accelerator.save ) if accelerator.is_main_process: tokenizer.save_pretrained(args.output_dir) if args.push_to_hub: api.upload_folder( commit_message="End of training", folder_path=args.output_dir, repo_id=repo_id, repo_type="model", token=args.hub_token, ) if args.task_name == "mnli": # Final evaluation on mismatched validation set eval_dataset = processed_datasets["validation_mismatched"] eval_dataloader = DataLoader( eval_dataset, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size ) eval_dataloader = accelerator.prepare(eval_dataloader) model.eval() for step, batch in enumerate(eval_dataloader): outputs = model(**batch) predictions = outputs.logits.argmax(dim=-1) metric.add_batch( predictions=accelerator.gather(predictions), references=accelerator.gather(batch["labels"]), ) eval_metric = metric.compute() logger.info(f"mnli-mm: {eval_metric}") if args.output_dir is not None: all_results = {f"eval_{k}": v for k, v in eval_metric.items()} with open(os.path.join(args.output_dir, "all_results.json"), "w") as f: json.dump(all_results, f) if __name__ == "__main__": main()
transformers/examples/pytorch/text-classification/run_glue_no_trainer.py/0
{ "file_path": "transformers/examples/pytorch/text-classification/run_glue_no_trainer.py", "repo_id": "transformers", "token_count": 12239 }
357
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ A simple launcher script for TPU training Inspired by https://github.com/pytorch/pytorch/blob/master/torch/distributed/launch.py :: >>> python xla_spawn.py --num_cores=NUM_CORES_YOU_HAVE YOUR_TRAINING_SCRIPT.py (--arg1 --arg2 --arg3 and all other arguments of your training script) """ import importlib import sys from argparse import REMAINDER, ArgumentParser from pathlib import Path import torch_xla.distributed.xla_multiprocessing as xmp def parse_args(): """ Helper function parsing the command line options @retval ArgumentParser """ parser = ArgumentParser( description=( "PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes" ) ) # Optional arguments for the launch helper parser.add_argument("--num_cores", type=int, default=1, help="Number of TPU cores to use (1 or 8).") # positional parser.add_argument( "training_script", type=str, help=( "The full path to the single TPU training " "program/script to be launched in parallel, " "followed by all the arguments for the " "training script" ), ) # rest from the training program parser.add_argument("training_script_args", nargs=REMAINDER) return parser.parse_args() def main(): args = parse_args() # Import training_script as a module. script_fpath = Path(args.training_script) sys.path.append(str(script_fpath.parent.resolve())) mod_name = script_fpath.stem mod = importlib.import_module(mod_name) # Patch sys.argv sys.argv = [args.training_script] + args.training_script_args + ["--tpu_num_cores", str(args.num_cores)] xmp.spawn(mod._mp_fn, args=(), nprocs=args.num_cores) if __name__ == "__main__": main()
transformers/examples/pytorch/xla_spawn.py/0
{ "file_path": "transformers/examples/pytorch/xla_spawn.py", "repo_id": "transformers", "token_count": 887 }
358
# coding=utf-8 # Copyright 2018 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert BertExtAbs's checkpoints. The script looks like it is doing something trivial but it is not. The "weights" proposed by the authors are actually the entire model pickled. We need to load the model within the original codebase to be able to only save its `state_dict`. """ import argparse import logging from collections import namedtuple import torch from model_bertabs import BertAbsSummarizer from models.model_builder import AbsSummarizer # The authors' implementation from transformers import BertTokenizer logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) SAMPLE_TEXT = "Hello world! cécé herlolip" BertAbsConfig = namedtuple( "BertAbsConfig", [ "temp_dir", "large", "use_bert_emb", "finetune_bert", "encoder", "share_emb", "max_pos", "enc_layers", "enc_hidden_size", "enc_heads", "enc_ff_size", "enc_dropout", "dec_layers", "dec_hidden_size", "dec_heads", "dec_ff_size", "dec_dropout", ], ) def convert_bertabs_checkpoints(path_to_checkpoints, dump_path): """Copy/paste and tweak the pre-trained weights provided by the creators of BertAbs for the internal architecture. """ # Instantiate the authors' model with the pre-trained weights config = BertAbsConfig( temp_dir=".", finetune_bert=False, large=False, share_emb=True, use_bert_emb=False, encoder="bert", max_pos=512, enc_layers=6, enc_hidden_size=512, enc_heads=8, enc_ff_size=512, enc_dropout=0.2, dec_layers=6, dec_hidden_size=768, dec_heads=8, dec_ff_size=2048, dec_dropout=0.2, ) checkpoints = torch.load(path_to_checkpoints, lambda storage, loc: storage) original = AbsSummarizer(config, torch.device("cpu"), checkpoints) original.eval() new_model = BertAbsSummarizer(config, torch.device("cpu")) new_model.eval() # ------------------- # Convert the weights # ------------------- logging.info("convert the model") new_model.bert.load_state_dict(original.bert.state_dict()) new_model.decoder.load_state_dict(original.decoder.state_dict()) new_model.generator.load_state_dict(original.generator.state_dict()) # ---------------------------------- # Make sure the outpus are identical # ---------------------------------- logging.info("Make sure that the models' outputs are identical") tokenizer = BertTokenizer.from_pretrained("google-bert/bert-base-uncased") # prepare the model inputs encoder_input_ids = tokenizer.encode("This is sample éàalj'-.") encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(encoder_input_ids))) encoder_input_ids = torch.tensor(encoder_input_ids).unsqueeze(0) decoder_input_ids = tokenizer.encode("This is sample 3 éàalj'-.") decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(decoder_input_ids))) decoder_input_ids = torch.tensor(decoder_input_ids).unsqueeze(0) # failsafe to make sure the weights reset does not affect the # loaded weights. assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight)) == 0 # forward pass src = encoder_input_ids tgt = decoder_input_ids segs = token_type_ids = None clss = None mask_src = encoder_attention_mask = None mask_tgt = decoder_attention_mask = None mask_cls = None # The original model does not apply the geneator layer immediatly but rather in # the beam search (where it combines softmax + linear layer). Since we already # apply the softmax in our generation process we only apply the linear layer here. # We make sure that the outputs of the full stack are identical output_original_model = original(src, tgt, segs, clss, mask_src, mask_tgt, mask_cls)[0] output_original_generator = original.generator(output_original_model) output_converted_model = new_model( encoder_input_ids, decoder_input_ids, token_type_ids, encoder_attention_mask, decoder_attention_mask )[0] output_converted_generator = new_model.generator(output_converted_model) maximum_absolute_difference = torch.max(torch.abs(output_converted_model - output_original_model)).item() print("Maximum absolute difference beween weights: {:.2f}".format(maximum_absolute_difference)) maximum_absolute_difference = torch.max(torch.abs(output_converted_generator - output_original_generator)).item() print("Maximum absolute difference beween weights: {:.2f}".format(maximum_absolute_difference)) are_identical = torch.allclose(output_converted_model, output_original_model, atol=1e-3) if are_identical: logging.info("all weights are equal up to 1e-3") else: raise ValueError("the weights are different. The new model is likely different from the original one.") # The model has been saved with torch.save(model) and this is bound to the exact # directory structure. We save the state_dict instead. logging.info("saving the model's state dictionary") torch.save( new_model.state_dict(), "./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin" ) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--bertabs_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model.", ) args = parser.parse_args() convert_bertabs_checkpoints( args.bertabs_checkpoint_path, args.pytorch_dump_folder_path, )
transformers/examples/research_projects/bertabs/convert_bertabs_original_pytorch_checkpoint.py/0
{ "file_path": "transformers/examples/research_projects/bertabs/convert_bertabs_original_pytorch_checkpoint.py", "repo_id": "transformers", "token_count": 2417 }
359
import logging import os import time from argparse import Namespace from pathlib import Path import datasets import torch from accelerate import Accelerator, DistributedType from accelerate.utils import ProjectConfiguration from arguments import TrainingArguments from datasets import load_dataset from huggingface_hub import Repository from torch.optim import AdamW from torch.utils.data import IterableDataset from torch.utils.data.dataloader import DataLoader from torch.utils.data.datapipes.iter.combinatorics import ShufflerIterDataPipe import transformers from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, get_scheduler, set_seed class ConstantLengthDataset(IterableDataset): """ Iterable dataset that returns constant length chunks of tokens from stream of text files. Args: tokenizer (Tokenizer): The processor used for proccessing the data. dataset (dataset.Dataset): Dataset with text files. infinite (bool): If True the iterator is reset after dataset reaches end else stops. seq_length (int): Length of token sequences to return. num_of_sequences (int): Number of token sequences to keep in buffer. chars_per_token (int): Number of characters per token used to estimate number of tokens in text buffer. tokenized (bool): If true we use a pretokenized dataset. """ def __init__( self, tokenizer, dataset, infinite=False, seq_length=1024, num_of_sequences=1024, chars_per_token=3.6, tokenized=False, ): self.tokenizer = tokenizer self.concat_token_id = tokenizer.bos_token_id self.dataset = dataset self.seq_length = seq_length self.epoch = 0 self.infinite = infinite self.current_size = 0 self.tokenized = tokenized if self.tokenized: self.max_buffer_size = seq_length * num_of_sequences self.content_field = "input_ids" else: self.max_buffer_size = seq_length * chars_per_token * num_of_sequences self.content_field = "content" def __iter__(self): iterator = iter(self.dataset) more_examples = True while more_examples: buffer, buffer_len = [], 0 while True: if buffer_len >= self.max_buffer_size: break try: buffer.append(next(iterator)[self.content_field]) buffer_len += len(buffer[-1]) except StopIteration: if self.infinite: iterator = iter(self.dataset) self.epoch += 1 logger.info(f"Dataset epoch: {self.epoch}") else: more_examples = False break if self.tokenized: tokenized_inputs = buffer else: tokenized_inputs = self.tokenizer(buffer, truncation=False)["input_ids"] all_token_ids = [] for tokenized_input in tokenized_inputs: all_token_ids.extend(tokenized_input + [self.concat_token_id]) for i in range(0, len(all_token_ids), self.seq_length): input_ids = all_token_ids[i : i + self.seq_length] if len(input_ids) == self.seq_length: self.current_size += 1 yield torch.tensor(input_ids) def shuffle(self, buffer_size=1000): return ShufflerIterDataPipe(self, buffer_size=buffer_size) def setup_logging(args): project_name = args.model_ckpt.split("/")[-1] logger = logging.getLogger(__name__) log_dir = Path(args.save_dir) / "log/" log_dir.mkdir(exist_ok=True) filename = f"debug_{accelerator.process_index}.log" logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, handlers=[logging.FileHandler(log_dir / filename), logging.StreamHandler()], ) if accelerator.is_main_process: # we only want to setup logging once accelerator.init_trackers(project_name, vars(args)) run_name = accelerator.trackers[0].run.name logger.setLevel(logging.INFO) datasets.utils.logging.set_verbosity_info() transformers.utils.logging.set_verbosity_info() else: run_name = "" logger.setLevel(logging.ERROR) datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() return logger, run_name def create_dataloaders(args): ds_kwargs = {"streaming": True} train_data = load_dataset(args.dataset_name_train, split="train", **ds_kwargs) train_data = train_data.shuffle(buffer_size=args.shuffle_buffer, seed=args.seed) valid_data = load_dataset(args.dataset_name_valid, split="train", **ds_kwargs) train_dataset = ConstantLengthDataset( tokenizer, train_data, infinite=True, seq_length=args.seq_length, tokenized=args.tokenized ) valid_dataset = ConstantLengthDataset( tokenizer, valid_data, infinite=False, seq_length=args.seq_length, tokenized=args.tokenized ) train_dataset = train_dataset.shuffle(buffer_size=args.shuffle_buffer) train_dataloader = DataLoader(train_dataset, batch_size=args.train_batch_size, shuffle=True) eval_dataloader = DataLoader(valid_dataset, batch_size=args.valid_batch_size) return train_dataloader, eval_dataloader def get_grouped_params(model, args, no_decay=["bias", "ln_1.weight", "ln_2.weight", "ln_f.weight"]): params_with_wd, params_without_wd = [], [] for n, p in model.named_parameters(): if any(nd in n for nd in no_decay): params_without_wd.append(p) else: params_with_wd.append(p) return [ {"params": params_with_wd, "weight_decay": args.weight_decay}, {"params": params_without_wd, "weight_decay": 0.0}, ] def log_metrics(step, metrics): logger.info(f"Step {step}: {metrics}") if accelerator.is_main_process: accelerator.log(metrics, step) def compute_tflops(elapsed_time, accelerator, args): # TFLOPs formula (from Equation 3 in Section 5.1 of https://arxiv.org/pdf/2104.04473.pdf). config_model = accelerator.unwrap_model(model).config checkpoint_factor = 4 if args.gradient_checkpointing else 3 batch_size = args.train_batch_size * accelerator.state.num_processes * args.gradient_accumulation_steps factor = 24 * checkpoint_factor * batch_size * args.seq_length * config_model.n_layer * (config_model.n_embd**2) flops_per_iteration = factor * ( 1.0 + (args.seq_length / (6.0 * config_model.n_embd)) + (tokenizer.vocab_size / (16.0 * config_model.n_layer * config_model.n_embd)) ) tflops = flops_per_iteration / (elapsed_time * accelerator.state.num_processes * (10**12)) return tflops def evaluate(args): model.eval() losses = [] for step, batch in enumerate(eval_dataloader): with torch.no_grad(): outputs = model(batch, labels=batch) loss = outputs.loss.repeat(args.valid_batch_size) losses.append(accelerator.gather(loss)) if args.max_eval_steps > 0 and step >= args.max_eval_steps: break losses = torch.cat(losses) loss = losses[: eval_dataloader.dataset.current_size].mean() try: perplexity = torch.exp(loss) except OverflowError: perplexity = float("inf") return loss.item(), perplexity.item() # Settings parser = HfArgumentParser(TrainingArguments) args = parser.parse_args() # Accelerator config = ProjectConfiguration(project_dir=args.save_dir, logging_dir="log") accelerator = Accelerator(log_with=["wandb", "tensorboard"], project_config=config) acc_state = {str(k): str(v) for k, v in accelerator.state.__dict__.items()} args = Namespace(**vars(args), **acc_state) samples_per_step = accelerator.state.num_processes * args.train_batch_size set_seed(args.seed) # Clone model repository if accelerator.is_main_process: hf_repo = Repository(args.save_dir, clone_from=args.model_ckpt) # Logging logger, run_name = setup_logging(args) logger.info(accelerator.state) # Checkout new branch on repo if accelerator.is_main_process: hf_repo.git_checkout(run_name, create_branch_ok=True) # Load model and tokenizer model = AutoModelForCausalLM.from_pretrained(args.save_dir) if args.gradient_checkpointing: model.gradient_checkpointing_enable() tokenizer = AutoTokenizer.from_pretrained(args.save_dir) # Load dataset and dataloader train_dataloader, eval_dataloader = create_dataloaders(args) # Prepare the optimizer and learning rate scheduler optimizer = AdamW(get_grouped_params(model, args), lr=args.learning_rate) lr_scheduler = get_scheduler( name=args.lr_scheduler_type, optimizer=optimizer, num_warmup_steps=args.num_warmup_steps, num_training_steps=args.max_train_steps, ) accelerator.register_for_checkpointing(lr_scheduler) def get_lr(): return optimizer.param_groups[0]["lr"] # Prepare everything with our `accelerator`. model, optimizer, train_dataloader, eval_dataloader = accelerator.prepare( model, optimizer, train_dataloader, eval_dataloader ) # load in the weights and states from a previous save if args.resume_from_checkpoint: if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "": accelerator.print(f"Resumed from checkpoint: {args.resume_from_checkpoint}") accelerator.load_state(args.resume_from_checkpoint) path = os.path.basename(args.resume_from_checkpoint) else: # Get the most recent checkpoint dirs = [f.name for f in os.scandir(args.save_dir) if f.is_dir() and "step" in str(f)] dirs.sort(key=os.path.getctime) path = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last # Extract the step of the checkpoint to continue from there training_difference = os.path.splitext(path)[0] resume_step = int(training_difference.replace("step_", "")) # Train model model.train() completed_steps = 0 t_start = time.time() loss_tracking = 0 for step, batch in enumerate(train_dataloader, start=1): if args.resume_from_checkpoint and step < resume_step: continue # we need to skip steps until we reach the resumed step loss = model(batch, labels=batch, use_cache=False).loss avg_loss = accelerator.gather(loss.repeat(args.train_batch_size)).mean() loss_tracking += avg_loss.item() / args.gradient_accumulation_steps log_metrics(step, {"samples": step * samples_per_step, "loss_per_step/train": loss.item()}) loss = loss / args.gradient_accumulation_steps if step % args.gradient_accumulation_steps != 0: # Prevent backward from doing gradient all_reduce in every step if accelerator.distributed_type == DistributedType.MULTI_GPU: with model.no_sync(): accelerator.backward(loss) else: accelerator.backward(loss) else: lr = get_lr() accelerator.backward(loss) accelerator.clip_grad_norm_(model.parameters(), 1.0) optimizer.step() lr_scheduler.step() optimizer.zero_grad() elapsed_time = time.time() - t_start tflops = compute_tflops(elapsed_time, accelerator, args) log_metrics( step, { "steps": completed_steps, "loss/train": loss_tracking, "lr": lr, "tflops": tflops, "time_per_iteration": elapsed_time, }, ) t_start = time.time() loss_tracking = 0 completed_steps += 1 if step % args.save_checkpoint_steps == 0: logger.info("Evaluating and saving model checkpoint") eval_loss, perplexity = evaluate(args) log_metrics(step, {"loss/eval": eval_loss, "perplexity": perplexity}) accelerator.wait_for_everyone() save_dir = os.path.join(args.save_dir, f"step_{step}") accelerator.save_state(save_dir) if accelerator.is_main_process: hf_repo.push_to_hub(commit_message=f"step {step}") model.train() if completed_steps >= args.max_train_steps: break # Evaluate and save the last checkpoint logger.info("Evaluating and saving model after training") eval_loss, perplexity = evaluate(args) log_metrics(step, {"loss/eval": eval_loss, "perplexity": perplexity}) accelerator.wait_for_everyone() unwrapped_model = accelerator.unwrap_model(model) unwrapped_model.save_pretrained(args.save_dir, save_function=accelerator.save) save_dir = os.path.join(args.save_dir, f"step_{step}") accelerator.save_state(save_dir) if accelerator.is_main_process: hf_repo.push_to_hub(commit_message="final model")
transformers/examples/research_projects/codeparrot/scripts/codeparrot_training.py/0
{ "file_path": "transformers/examples/research_projects/codeparrot/scripts/codeparrot_training.py", "repo_id": "transformers", "token_count": 5418 }
360
{ "activation": "gelu", "attention_dropout": 0.1, "dim": 768, "dropout": 0.1, "hidden_dim": 3072, "initializer_range": 0.02, "max_position_embeddings": 512, "n_heads": 12, "n_layers": 6, "sinusoidal_pos_embds": true, "tie_weights_": true, "vocab_size": 28996 }
transformers/examples/research_projects/distillation/training_configs/distilbert-base-cased.json/0
{ "file_path": "transformers/examples/research_projects/distillation/training_configs/distilbert-base-cased.json", "repo_id": "transformers", "token_count": 134 }
361
# coding=utf-8 # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Optional, Tuple import flax.linen as nn import jax import jax.numpy as jnp from configuration_hybrid_clip import HybridCLIPConfig from flax.core.frozen_dict import FrozenDict from transformers import FLAX_MODEL_MAPPING, FlaxCLIPVisionModel from transformers.modeling_flax_utils import FlaxPreTrainedModel from transformers.models.clip.modeling_flax_clip import FlaxCLIPOutput from transformers.utils import logging logger = logging.get_logger(__name__) class FlaxHybridCLIPModule(nn.Module): config: HybridCLIPConfig dtype: jnp.dtype = jnp.float32 def setup(self): text_config = self.config.text_config vision_config = self.config.vision_config self.projection_dim = self.config.projection_dim self.text_embed_dim = text_config.hidden_size self.vision_embed_dim = vision_config.hidden_size text_module = FLAX_MODEL_MAPPING[self.config.text_config.__class__].module_class vision_module = FLAX_MODEL_MAPPING.get(self.config.vision_config.__class__, FlaxCLIPVisionModel).module_class self.text_model = text_module(text_config, dtype=self.dtype) self.vision_model = vision_module(vision_config, dtype=self.dtype) self.visual_projection = nn.Dense( self.projection_dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(0.02), use_bias=False, ) self.text_projection = nn.Dense( self.projection_dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(0.02), use_bias=False, ) self.logit_scale = self.param("logit_scale", jax.nn.initializers.ones, []) def __call__( self, input_ids=None, pixel_values=None, attention_mask=None, position_ids=None, token_type_ids=None, deterministic: bool = True, output_attentions=None, output_hidden_states=None, return_dict=None, ): return_dict = return_dict if return_dict is not None else self.config.return_dict vision_outputs = self.vision_model( pixel_values=pixel_values, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) text_outputs = self.text_model( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) image_embeds = vision_outputs[1] image_embeds = self.visual_projection(image_embeds) text_embeds = text_outputs[1] text_embeds = self.text_projection(text_embeds) # normalized features image_embeds = image_embeds / jnp.linalg.norm(image_embeds, axis=-1, keepdims=True) text_embeds = text_embeds / jnp.linalg.norm(text_embeds, axis=-1, keepdims=True) # cosine similarity as logits logit_scale = jnp.exp(self.logit_scale) logits_per_text = jnp.matmul(text_embeds, image_embeds.T) * logit_scale logits_per_image = logits_per_text.T if not return_dict: return (logits_per_image, logits_per_text, text_embeds, image_embeds, text_outputs, vision_outputs) return FlaxCLIPOutput( logits_per_image=logits_per_image, logits_per_text=logits_per_text, text_embeds=text_embeds, image_embeds=image_embeds, text_model_output=text_outputs, vision_model_output=vision_outputs, ) class FlaxHybridCLIP(FlaxPreTrainedModel): config_class = HybridCLIPConfig module_class = FlaxHybridCLIPModule def __init__( self, config: HybridCLIPConfig, input_shape: Optional[Tuple] = None, seed: int = 0, dtype: jnp.dtype = jnp.float32, **kwargs, ): if input_shape is None: input_shape = ((1, 1), (1, config.vision_config.image_size, config.vision_config.image_size, 3)) module = self.module_class(config=config, dtype=dtype, **kwargs) super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype) def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict: # init input tensor input_ids = jnp.zeros(input_shape[0], dtype="i4") position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_shape[0]) token_type_ids = jnp.ones_like(input_ids) attention_mask = jnp.ones_like(input_ids) pixel_values = jax.random.normal(rng, input_shape[1]) params_rng, dropout_rng = jax.random.split(rng) rngs = {"params": params_rng, "dropout": dropout_rng} return self.module.init(rngs, input_ids, pixel_values, attention_mask, position_ids, token_type_ids)["params"] def __call__( self, input_ids, pixel_values, attention_mask=None, position_ids=None, token_type_ids=None, params: dict = None, dropout_rng: jax.random.PRNGKey = None, train: bool = False, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.return_dict if position_ids is None: position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape) if token_type_ids is None: token_type_ids = jnp.zeros_like(input_ids) if attention_mask is None: attention_mask = jnp.ones_like(input_ids) # Handle any PRNG if needed rngs = {} if dropout_rng is not None: rngs["dropout"] = dropout_rng return self.module.apply( {"params": params or self.params}, jnp.array(input_ids, dtype="i4"), jnp.array(pixel_values, dtype=jnp.float32), jnp.array(attention_mask, dtype="i4"), jnp.array(position_ids, dtype="i4"), jnp.array(token_type_ids, dtype="i4"), not train, output_attentions, output_hidden_states, return_dict, rngs=rngs, ) def get_text_features( self, input_ids, attention_mask=None, position_ids=None, token_type_ids=None, params: dict = None, dropout_rng: jax.random.PRNGKey = None, train=False, ): r""" Args: input_ids (:obj:`numpy.ndarray` of shape :obj:`(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using :class:`~transformers.PreTrainedTokenizer`. See :meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for details. `What are input IDs? <../glossary.html#input-ids>`__ Returns: text_features (:obj:`jnp.ndarray` of shape :obj:`(batch_size, output_dim`): The text embeddings obtained by applying the projection layer to the pooled output of text model. """ if position_ids is None: position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape) if token_type_ids is None: token_type_ids = jnp.zeros_like(input_ids) if attention_mask is None: attention_mask = jnp.ones_like(input_ids) # Handle any PRNG if needed rngs = {} if dropout_rng is not None: rngs["dropout"] = dropout_rng def _get_features(module, input_ids, attention_mask, position_ids, token_type_ids, deterministic): text_outputs = module.text_model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, token_type_ids=token_type_ids, deterministic=deterministic, ) pooled_output = text_outputs[1] text_features = module.text_projection(pooled_output) return text_features return self.module.apply( {"params": params or self.params}, jnp.array(input_ids, dtype="i4"), jnp.array(attention_mask, dtype="i4"), jnp.array(position_ids, dtype="i4"), jnp.array(token_type_ids, dtype="i4"), not train, method=_get_features, rngs=rngs, ) def get_image_features( self, pixel_values, params: dict = None, dropout_rng: jax.random.PRNGKey = None, train=False ): r""" Args: pixel_values (:obj:`numpy.ndarray` of shape :obj:`(batch_size, num_channels, height, width)`): Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using :class:`~transformers.ImageFeatureExtractionMixin`. See :meth:`transformers.ImageFeatureExtractionMixin.__call__` for details. Returns: image_features (:obj:`jnp.ndarray` of shape :obj:`(batch_size, output_dim`): The image embeddings obtained by applying the projection layer to the pooled output of vision model. """ # Handle any PRNG if needed rngs = {} if dropout_rng is not None: rngs["dropout"] = dropout_rng def _get_features(module, pixel_values, deterministic): vision_outputs = module.vision_model(pixel_values=pixel_values, deterministic=deterministic) pooled_output = vision_outputs[1] # pooled_output image_features = module.visual_projection(pooled_output) return image_features return self.module.apply( {"params": params or self.params}, jnp.array(pixel_values, dtype=jnp.float32), not train, method=_get_features, rngs=rngs, ) @classmethod def from_text_vision_pretrained( cls, text_model_name_or_path: str = None, vision_model_name_or_path: str = None, *model_args, **kwargs, ) -> FlaxPreTrainedModel: """ Params: text_model_name_or_path (:obj: `str`, `optional`): Information necessary to initiate the text model. Can be either: - A string, the `model id` of a pretrained model hosted inside a model repo on huggingface.co. - A path to a `directory` containing model weights saved using :func:`~transformers.FlaxPreTrainedModel.save_pretrained`, e.g., ``./my_model_directory/``. - A path or url to a `PyTorch checkpoint folder` (e.g, ``./pt_model``). In this case, ``from_pt`` should be set to :obj:`True` and a configuration object should be provided as ``config`` argument. This loading path is slower than converting the PyTorch checkpoint in a Flax model using the provided conversion scripts and loading the Flax model afterwards. vision_model_name_or_path (:obj: `str`, `optional`, defaults to `None`): Information necessary to initiate the vision model. Can be either: - A string, the `model id` of a pretrained model hosted inside a model repo on huggingface.co. - A path to a `directory` containing model weights saved using :func:`~transformers.FlaxPreTrainedModel.save_pretrained`, e.g., ``./my_model_directory/``. - A path or url to a `PyTorch checkpoint folder` (e.g, ``./pt_model``). In this case, ``from_pt`` should be set to :obj:`True` and a configuration object should be provided as ``config`` argument. This loading path is slower than converting the PyTorch checkpoint in a Flax model using the provided conversion scripts and loading the Flax model afterwards. model_args (remaining positional arguments, `optional`): All remaning positional arguments will be passed to the underlying model's ``__init__`` method. kwargs (remaining dictionary of keyword arguments, `optional`): Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., :obj:`output_attentions=True`). - To update the text configuration, use the prefix `text_` for each configuration parameter. - To update the vision configuration, use the prefix `vision_` for each configuration parameter. - To update the parent model configuration, do not use a prefix for each configuration parameter. Behaves differently depending on whether a :obj:`config` is provided or automatically loaded. Example:: >>> from transformers import FlaxHybridCLIP >>> # initialize a model from pretrained BERT and CLIP models. Note that the projection layers will be randomly initialized. >>> # If using CLIP's vision model the vision projection layer will be initialized using pre-trained weights >>> model = FlaxHybridCLIP.from_text_vision_pretrained('google-bert/bert-base-uncased', 'openai/clip-vit-base-patch32') >>> # saving model after fine-tuning >>> model.save_pretrained("./bert-clip") >>> # load fine-tuned model >>> model = FlaxHybridCLIP.from_pretrained("./bert-clip") """ kwargs_text = { argument[len("text_") :]: value for argument, value in kwargs.items() if argument.startswith("text_") } kwargs_vision = { argument[len("vision_") :]: value for argument, value in kwargs.items() if argument.startswith("vision_") } # remove text, vision kwargs from kwargs for key in kwargs_text.keys(): del kwargs["text_" + key] for key in kwargs_vision.keys(): del kwargs["vision_" + key] # Load and initialize the text and vision model text_model = kwargs_text.pop("model", None) if text_model is None: assert ( text_model_name_or_path is not None ), "If `model` is not defined as an argument, a `text_model_name_or_path` has to be defined" from transformers import FlaxAutoModel if "config" not in kwargs_text: from transformers import AutoConfig text_config = AutoConfig.from_pretrained(text_model_name_or_path) kwargs_text["config"] = text_config text_model = FlaxAutoModel.from_pretrained(text_model_name_or_path, *model_args, **kwargs_text) vision_model = kwargs_vision.pop("model", None) if vision_model is None: assert ( vision_model_name_or_path is not None ), "If `model` is not defined as an argument, a `vision_model_name_or_path` has to be defined" from transformers import FlaxAutoModel if "config" not in kwargs_vision: from transformers import AutoConfig vision_config = AutoConfig.from_pretrained(vision_model_name_or_path) kwargs_vision["config"] = vision_config vision_model = FlaxAutoModel.from_pretrained(vision_model_name_or_path, *model_args, **kwargs_vision) # instantiate config with corresponding kwargs dtype = kwargs.pop("dtype", jnp.float32) config = HybridCLIPConfig.from_text_vision_configs(text_model.config, vision_model.config, **kwargs) # init model model = cls(config, *model_args, dtype=dtype, **kwargs) if vision_config.model_type == "clip": model.params["vision_model"]["vision_model"] = vision_model.params["vision_model"] model.params["visual_projection"]["kernel"] = vision_model.params["visual_projection"]["kernel"] else: model.params["vision_model"] = vision_model.params model.params["text_model"] = text_model.params return model
transformers/examples/research_projects/jax-projects/hybrid_clip/modeling_hybrid_clip.py/0
{ "file_path": "transformers/examples/research_projects/jax-projects/hybrid_clip/modeling_hybrid_clip.py", "repo_id": "transformers", "token_count": 7791 }
362
import unicodedata from dataclasses import dataclass from typing import Optional, Union import numpy as np from transformers.data.data_collator import DataCollatorMixin from transformers.file_utils import PaddingStrategy from transformers.tokenization_utils_base import PreTrainedTokenizerBase def padding_tensor(sequences, padding_value, padding_side, sequence_length): if isinstance(padding_value, tuple): out_tensor = np.full((len(sequences), sequence_length, 2), padding_value) else: out_tensor = np.full((len(sequences), sequence_length), padding_value) for i, tensor in enumerate(sequences): if padding_side == "right": if isinstance(padding_value, tuple): out_tensor[i, : len(tensor[:sequence_length]), :2] = tensor[:sequence_length] else: out_tensor[i, : len(tensor[:sequence_length])] = tensor[:sequence_length] else: if isinstance(padding_value, tuple): out_tensor[i, len(tensor[:sequence_length]) - 1 :, :2] = tensor[:sequence_length] else: out_tensor[i, len(tensor[:sequence_length]) - 1 :] = tensor[:sequence_length] return out_tensor.tolist() def is_punctuation(char): cp = ord(char) if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126): return True cat = unicodedata.category(char) if cat.startswith("P"): return True return False @dataclass class DataCollatorForLukeTokenClassification(DataCollatorMixin): """ Data collator that will dynamically pad the inputs received, as well as the labels. Args: tokenizer ([`PreTrainedTokenizer`] or [`PreTrainedTokenizerFast`]): The tokenizer used for encoding the data. padding (`bool`, `str` or [`~file_utils.PaddingStrategy`], *optional*, defaults to `True`): Select a strategy to pad the returned sequences (according to the model's padding side and padding index) among: - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). max_length (`int`, *optional*): Maximum length of the returned list and optionally padding length (see above). pad_to_multiple_of (`int`, *optional*): If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >= 7.5 (Volta). label_pad_token_id (`int`, *optional*, defaults to -100): The id to use when padding the labels (-100 will be automatically ignore by PyTorch loss functions). return_tensors (`str`): The type of Tensor to return. Allowable values are "np", "pt" and "tf". """ tokenizer: PreTrainedTokenizerBase padding: Union[bool, str, PaddingStrategy] = True max_length: Optional[int] = None pad_to_multiple_of: Optional[int] = None label_pad_token_id: int = -100 return_tensors: str = "pt" def torch_call(self, features): import torch label_name = "label" if "label" in features[0].keys() else "labels" labels = [feature[label_name] for feature in features] if label_name in features[0].keys() else None batch = self.tokenizer.pad( features, padding=self.padding, max_length=self.max_length, pad_to_multiple_of=self.pad_to_multiple_of, # Conversion to tensors will fail if we have labels as they are not of the same length yet. return_tensors="pt" if labels is None else None, ) if labels is None: return batch sequence_length = torch.tensor(batch["entity_ids"]).shape[1] padding_side = self.tokenizer.padding_side if padding_side == "right": batch[label_name] = [ list(label) + [self.label_pad_token_id] * (sequence_length - len(label)) for label in labels ] else: batch[label_name] = [ [self.label_pad_token_id] * (sequence_length - len(label)) + list(label) for label in labels ] ner_tags = [feature["ner_tags"] for feature in features] batch["ner_tags"] = padding_tensor(ner_tags, -1, padding_side, sequence_length) original_entity_spans = [feature["original_entity_spans"] for feature in features] batch["original_entity_spans"] = padding_tensor(original_entity_spans, (-1, -1), padding_side, sequence_length) batch = {k: torch.tensor(v, dtype=torch.int64) for k, v in batch.items()} return batch
transformers/examples/research_projects/luke/luke_utils.py/0
{ "file_path": "transformers/examples/research_projects/luke/luke_utils.py", "repo_id": "transformers", "token_count": 2049 }
363
# coding=utf-8 # Copyright (c) Facebook, Inc. and its affiliates. # Copyright (c) HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os from collections import Counter import torch import torchvision import torchvision.transforms as transforms from PIL import Image from torch import nn from torch.utils.data import Dataset POOLING_BREAKDOWN = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)} class ImageEncoder(nn.Module): def __init__(self, args): super().__init__() model = torchvision.models.resnet152(pretrained=True) modules = list(model.children())[:-2] self.model = nn.Sequential(*modules) self.pool = nn.AdaptiveAvgPool2d(POOLING_BREAKDOWN[args.num_image_embeds]) def forward(self, x): # Bx3x224x224 -> Bx2048x7x7 -> Bx2048xN -> BxNx2048 out = self.pool(self.model(x)) out = torch.flatten(out, start_dim=2) out = out.transpose(1, 2).contiguous() return out # BxNx2048 class JsonlDataset(Dataset): def __init__(self, data_path, tokenizer, transforms, labels, max_seq_length): self.data = [json.loads(l) for l in open(data_path)] self.data_dir = os.path.dirname(data_path) self.tokenizer = tokenizer self.labels = labels self.n_classes = len(labels) self.max_seq_length = max_seq_length self.transforms = transforms def __len__(self): return len(self.data) def __getitem__(self, index): sentence = torch.LongTensor(self.tokenizer.encode(self.data[index]["text"], add_special_tokens=True)) start_token, sentence, end_token = sentence[0], sentence[1:-1], sentence[-1] sentence = sentence[: self.max_seq_length] label = torch.zeros(self.n_classes) label[[self.labels.index(tgt) for tgt in self.data[index]["label"]]] = 1 image = Image.open(os.path.join(self.data_dir, self.data[index]["img"])).convert("RGB") image = self.transforms(image) return { "image_start_token": start_token, "image_end_token": end_token, "sentence": sentence, "image": image, "label": label, } def get_label_frequencies(self): label_freqs = Counter() for row in self.data: label_freqs.update(row["label"]) return label_freqs def collate_fn(batch): lens = [len(row["sentence"]) for row in batch] bsz, max_seq_len = len(batch), max(lens) mask_tensor = torch.zeros(bsz, max_seq_len, dtype=torch.long) text_tensor = torch.zeros(bsz, max_seq_len, dtype=torch.long) for i_batch, (input_row, length) in enumerate(zip(batch, lens)): text_tensor[i_batch, :length] = input_row["sentence"] mask_tensor[i_batch, :length] = 1 img_tensor = torch.stack([row["image"] for row in batch]) tgt_tensor = torch.stack([row["label"] for row in batch]) img_start_token = torch.stack([row["image_start_token"] for row in batch]) img_end_token = torch.stack([row["image_end_token"] for row in batch]) return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor def get_mmimdb_labels(): return [ "Crime", "Drama", "Thriller", "Action", "Comedy", "Romance", "Documentary", "Short", "Mystery", "History", "Family", "Adventure", "Fantasy", "Sci-Fi", "Western", "Horror", "Sport", "War", "Music", "Musical", "Animation", "Biography", "Film-Noir", ] def get_image_transforms(): return transforms.Compose( [ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize( mean=[0.46777044, 0.44531429, 0.40661017], std=[0.12221994, 0.12145835, 0.14380469], ), ] )
transformers/examples/research_projects/mm-imdb/utils_mmimdb.py/0
{ "file_path": "transformers/examples/research_projects/mm-imdb/utils_mmimdb.py", "repo_id": "transformers", "token_count": 2030 }
364
""" Code to remove duplicate initializers to reduce ONNX model size. """ import os import numpy import onnx def _is_equal_tensor_proto(a, b): name_a = a.name name_b = b.name a.name = "" b.name = "" res = a == b a.name = name_a b.name = name_b return res def _node_replace_input_with(node_proto, name, new_name): for i, input_name in enumerate(node_proto.input): if input_name == name: node_proto.input.insert(i, new_name) node_proto.input.pop(i + 1) if node_proto.op_type == "If": _graph_replace_input_with(node_proto.attribute[0].g, name, new_name) _graph_replace_input_with(node_proto.attribute[1].g, name, new_name) if node_proto.op_type == "Loop": _graph_replace_input_with(node_proto.attribute[0].g, name, new_name) def _graph_replace_input_with(graph_proto, name, new_name): for n in graph_proto.node: _node_replace_input_with(n, name, new_name) def _remove_dup_initializers_from_model(model, model_without_ext, ind_to_replace): inits_with_data = list(model.graph.initializer) inits = list(model_without_ext.graph.initializer) for i, ref_i in ind_to_replace: assert inits_with_data[i].name == inits[i].name assert inits_with_data[ref_i].name == inits[ref_i].name assert i > ref_i name_i = inits[i].name name_ref = inits[ref_i].name model_without_ext.graph.initializer.remove(inits[i]) # for n in model.graph.node: _graph_replace_input_with(model_without_ext.graph, name_i, name_ref) def remove_dup_initializers(onnx_file_path): """ Removes duplicate initializers from the model to reduce its size. Writes a new file in the same directory as onnx_file_path and returns the path to that file. """ model_file_folder = os.path.dirname(onnx_file_path) model_file_name = os.path.basename(onnx_file_path) model = onnx.load(os.path.join(model_file_folder, model_file_name)) inits = list(model.graph.initializer) dup_set = set() dup_map = {} ind_to_replace = [] total_reduced_size = 0 for i in range(len(inits)): if i in dup_set: continue for j in range(i + 1, len(inits)): if j in dup_set: continue if _is_equal_tensor_proto(inits[i], inits[j]): dup_set.add(i) dup_set.add(j) dtype = inits[j].data_type mem_size = numpy.prod(inits[j].dims) if dtype == 1: mem_size *= 4 elif dtype == 6: mem_size *= 4 elif dtype == 7 or dtype == 11: mem_size *= 8 else: print("unexpected data type: ", dtype) total_reduced_size += mem_size name_i = inits[i].name name_j = inits[j].name if name_i in dup_map: dup_map[name_i].append(name_j) else: dup_map[name_i] = [name_j] ind_to_replace.append((j, i)) print("total reduced size: ", total_reduced_size / 1024 / 1024 / 1024, "GB") ind_to_replace = sorted(ind_to_replace) _remove_dup_initializers_from_model(model, model, ind_to_replace) optimized_model_file_name = "optimized_" + model_file_name new_model = os.path.join(model_file_folder, optimized_model_file_name) onnx.save(model, new_model) return new_model
transformers/examples/research_projects/onnx/summarization/bart_onnx/reduce_onnx_size.py/0
{ "file_path": "transformers/examples/research_projects/onnx/summarization/bart_onnx/reduce_onnx_size.py", "repo_id": "transformers", "token_count": 1732 }
365