text
stringlengths
7
1.24M
id
stringlengths
14
166
metadata
dict
__index_level_0__
int64
0
519
{ "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 1, "logprob": null, "text": "<s>" }, { "id": 3735, "logprob": -8.5625, "text": "Test" }, { "id": 2159, "logprob": -10.78125, "text": "request" } ], "seed": 0, "tokens": [ { "id": 288, "logprob": -0.2854004, "special": false, "text": "ing" }, { "id": 264, "logprob": -0.38061523, "special": false, "text": " a" }, { "id": 633, "logprob": -0.09301758, "special": false, "text": " new" }, { "id": 4480, "logprob": -0.26782227, "special": false, "text": " feature" }, { "id": 297, "logprob": -0.8510742, "special": false, "text": " in" }, { "id": 272, "logprob": -0.13464355, "special": false, "text": " the" }, { "id": 2039, "logprob": 0.0, "special": false, "text": " game" }, { "id": 28723, "logprob": -0.89990234, "special": false, "text": "." }, { "id": 13, "logprob": 0.0, "special": false, "text": "\n" }, { "id": 13, "logprob": -0.10632324, "special": false, "text": "\n" } ], "top_tokens": null }, "generated_text": "Test requesting a new feature in the game.\n\n" }
text-generation-inference/integration-tests/models/__snapshots__/test_idefics2/test_flash_idefics2_next_all_params.json/0
{ "file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_idefics2/test_flash_idefics2_next_all_params.json", "repo_id": "text-generation-inference", "token_count": 1042 }
207
{ "details": { "best_of_sequences": null, "finish_reason": "eos_token", "generated_tokens": 5, "prefill": [ { "id": 0, "logprob": null, "text": "<pad>" } ], "seed": 0, "tokens": [ { "id": 926, "logprob": -4.3554688, "special": false, "text": " To" }, { "id": 18295, "logprob": -7.7734375, "special": false, "text": " sell" }, { "id": 7868, "logprob": -3.9257812, "special": false, "text": " things" }, { "id": 260, "logprob": -2.4179688, "special": false, "text": "." }, { "id": 1, "logprob": 0.0, "special": true, "text": "</s>" } ] }, "generated_text": "To sell things." }
text-generation-inference/integration-tests/models/__snapshots__/test_mt0_base/test_mt0_base.json/0
{ "file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_mt0_base/test_mt0_base.json", "repo_id": "text-generation-inference", "token_count": 532 }
208
{ "choices": [ { "delta": { "content": null, "role": "assistant", "tool_calls": { "function": { "arguments": "</s>", "name": null }, "id": "", "index": 0, "type": "function" } }, "finish_reason": "eos_token", "index": 0, "logprobs": null } ], "created": 1712788218, "id": "", "model": "TinyLlama/TinyLlama-1.1B-Chat-v1.0", "object": "text_completion", "system_fingerprint": "2.0.1-native" }
text-generation-inference/integration-tests/models/__snapshots__/test_tools_llama/test_flash_llama_grammar_tools_stream.json/0
{ "file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_tools_llama/test_flash_llama_grammar_tools_stream.json", "repo_id": "text-generation-inference", "token_count": 318 }
209
import pytest @pytest.fixture(scope="module") def flash_llama_fp8_handle(launcher): with launcher("meta-llama/Meta-Llama-3-8B", num_shard=2, quantize="fp8") as handle: yield handle @pytest.fixture(scope="module") async def flash_llama_fp8(flash_llama_fp8_handle): await flash_llama_fp8_handle.health(300) return flash_llama_fp8_handle.client @pytest.mark.release @pytest.mark.asyncio @pytest.mark.private async def test_flash_llama_fp8(flash_llama_fp8, response_snapshot): response = await flash_llama_fp8.generate( "Test request", max_new_tokens=10, decoder_input_details=True ) assert response.generated_text == " for the 2019-2020 school year" assert response.details.generated_tokens == 10 assert response == response_snapshot @pytest.mark.release @pytest.mark.asyncio @pytest.mark.private async def test_flash_llama_fp8_all_params(flash_llama_fp8, response_snapshot): response = await flash_llama_fp8.generate( "Test request", max_new_tokens=10, repetition_penalty=1.2, return_full_text=True, stop_sequences=["test"], temperature=0.5, top_p=0.9, top_k=10, truncate=5, typical_p=0.9, watermark=True, decoder_input_details=True, seed=0, ) assert response == response_snapshot @pytest.mark.release @pytest.mark.asyncio @pytest.mark.private async def test_flash_llama_fp8_load(flash_llama_fp8, generate_load, response_snapshot): responses = await generate_load( flash_llama_fp8, "Test request", max_new_tokens=10, n=4 ) assert len(responses) == 4 assert responses[0].generated_text == " for the 2019-2020 school year" assert all( [r.generated_text == responses[0].generated_text for r in responses] ), f"Different messages : {[r.generated_text for r in responses]}" assert responses == response_snapshot
text-generation-inference/integration-tests/models/test_flash_llama_fp8.py/0
{ "file_path": "text-generation-inference/integration-tests/models/test_flash_llama_fp8.py", "repo_id": "text-generation-inference", "token_count": 802 }
210
import pytest import requests from pydantic import BaseModel from typing import List @pytest.fixture(scope="module") def llama_grammar_handle(launcher): with launcher( "TinyLlama/TinyLlama-1.1B-Chat-v1.0", num_shard=1, disable_grammar_support=False, use_flash_attention=False, max_batch_prefill_tokens=3000, ) as handle: yield handle @pytest.fixture(scope="module") async def llama_grammar(llama_grammar_handle): await llama_grammar_handle.health(300) return llama_grammar_handle.client @pytest.mark.release @pytest.mark.asyncio async def test_grammar_response_format_llama_json(llama_grammar, response_snapshot): class Weather(BaseModel): unit: str temperature: List[int] # send the request response = requests.post( f"{llama_grammar.base_url}/v1/chat/completions", headers=llama_grammar.headers, json={ "model": "tgi", "messages": [ { "role": "system", "content": f"Respond to the users questions and answer them in the following format: {Weather.schema()}", }, { "role": "user", "content": "What's the weather like the next 3 days in San Francisco, CA?", }, ], "seed": 42, "max_tokens": 500, "response_format": {"type": "json_object", "value": Weather.schema()}, }, ) chat_completion = response.json() called = chat_completion["choices"][0]["message"]["content"] assert response.status_code == 200 assert ( called == '{\n "temperature": [\n 35,\n 34,\n 36\n ],\n "unit": "°c"\n}' ) assert chat_completion == response_snapshot @pytest.mark.release @pytest.mark.asyncio async def test_grammar_response_format_llama_error_if_tools_not_installed( llama_grammar, ): class Weather(BaseModel): unit: str temperature: List[int] # send the request response = requests.post( f"{llama_grammar.base_url}/v1/chat/completions", headers=llama_grammar.headers, json={ "model": "tgi", "messages": [ { "role": "system", "content": f"Respond to the users questions and answer them in the following format: {Weather.schema()}", }, { "role": "user", "content": "What's the weather like the next 3 days in San Francisco, CA?", }, ], "seed": 42, "max_tokens": 500, "tools": [], "response_format": {"type": "json_object", "value": Weather.schema()}, }, ) # 422 means the server was unable to process the request because it contains invalid data. assert response.status_code == 422 assert response.json() == { "error": "Tool error: Grammar and tools are mutually exclusive", "error_type": "tool_error", }
text-generation-inference/integration-tests/models/test_grammar_response_format_llama.py/0
{ "file_path": "text-generation-inference/integration-tests/models/test_grammar_response_format_llama.py", "repo_id": "text-generation-inference", "token_count": 1493 }
211
use serde::{Deserialize, Serialize}; #[derive(Clone, Debug, Serialize, Deserialize)] #[serde(tag = "model_type")] #[serde(rename_all = "snake_case")] pub struct LlavaNext { pub(crate) text_config: TextConfig, pub(crate) vision_config: VisionConfig, pub(crate) image_grid_pinpoints: Vec<(usize, usize)>, } fn get_anyres_image_grid_shape( height: usize, width: usize, grid_pinpoints: &[(usize, usize)], patch_size: usize, ) -> (usize, usize) { let (height, width) = select_best_resolution(height, width, grid_pinpoints); (height / patch_size, width / patch_size) } /// Selects the best resolution from a list of possible resolutions based on the original size. /// This is done by calculating the effective and wasted resolution for each possible resolution. /// The best fit resolution is the one that maximizes the effective resolution and minimizes the wasted resolution. fn select_best_resolution( original_height: usize, original_width: usize, possible_resolutions: &[(usize, usize)], ) -> (usize, usize) { let mut best_fit = None; let mut max_effective_resolution = 0; let mut min_wasted_resolution = f32::NEG_INFINITY; for (height, width) in possible_resolutions { let wscale = *width as f32 / original_width as f32; let hscale = *height as f32 / original_height as f32; // f32 partial ord. let scale = if wscale > hscale { hscale } else { wscale }; let downscaled_width = (*width as f32 * scale) as usize; let downscaled_height = (*height as f32 * scale) as usize; let effective_resolution = std::cmp::min( downscaled_width * downscaled_height, original_width * original_height, ); let wasted_resolution = (width * height) - effective_resolution; if effective_resolution > max_effective_resolution || (effective_resolution == max_effective_resolution && (wasted_resolution as f32) < min_wasted_resolution) { max_effective_resolution = effective_resolution; min_wasted_resolution = wasted_resolution as f32; best_fit = Some((*height, *width)); } } best_fit.unwrap_or((original_height, original_width)) } fn get_unpadded_features( height: usize, width: usize, npatches: usize, num_patch_height: usize, num_patch_width: usize, ) -> (usize, usize) { let current_height = npatches * num_patch_height; let current_width = npatches * num_patch_width; let aspect_ratio: f64 = width as f64 / height as f64; let current_aspect_ratio: f64 = current_width as f64 / current_height as f64; let (current_height, current_width) = if aspect_ratio > current_aspect_ratio { let new_height = (height * current_width) / width; let padding = (current_height - new_height) / 2; (current_height - (2 * padding), current_width) } else { let new_width = (width * current_height) / height; let padding = (current_width - new_width) / 2; (current_height, current_width - (2 * padding)) }; let unpadded_features = current_height * current_width; let newline_features = current_height; (unpadded_features, newline_features) } impl LlavaNext { pub fn get_number_of_features(&self, height: usize, width: usize) -> usize { let image_size = self.vision_config.image_size; let patch_size = self.vision_config.patch_size; assert!(image_size % patch_size == 0); let npatches = image_size / patch_size; // Dimensions are intentionally swapped to be bug-compatible with // upstream: https://github.com/LLaVA-VL/LLaVA-NeXT/issues/59 let (num_patch_width, num_patch_height) = get_anyres_image_grid_shape(height, width, &self.image_grid_pinpoints, image_size); let (unpadded_features, newline_features) = get_unpadded_features(height, width, npatches, num_patch_height, num_patch_width); // The base patch covers the entire image let base_features = npatches.pow(2); unpadded_features + newline_features + base_features } } #[derive(Clone, Debug, Serialize, Deserialize)] #[serde(rename_all = "snake_case")] pub struct ClipVisionModel { image_size: usize, patch_size: usize, } #[derive(Clone, Debug, Serialize, Deserialize)] #[serde(rename_all = "snake_case")] pub struct Idefics2 {} impl Idefics2 { pub fn get_number_of_features(&self, _height: usize, _width: usize) -> usize { 64 } } #[derive(Clone, Debug, Serialize, Deserialize)] #[serde(rename_all = "snake_case")] pub struct PaliTextConfig { pub(crate) num_image_tokens: usize, } #[derive(Clone, Debug, Serialize, Deserialize)] #[serde(rename_all = "snake_case")] pub struct Paligemma { pub(crate) text_config: PaliTextConfig, } impl Paligemma { pub fn get_number_of_features(&self, _height: usize, _width: usize) -> usize { self.text_config.num_image_tokens } } #[derive(Clone, Debug, Serialize, Deserialize)] #[serde(tag = "model_type")] #[serde(rename_all = "snake_case")] pub enum Config { LlavaNext(LlavaNext), ClipVisionModel(ClipVisionModel), Mistral, Idefics, Idefics2(Idefics2), Ssm, GptBigcode, Santacoder, Bloom, Mpt, Gpt2, Gptj, GptNeox, Phi, #[serde(rename = "phi-msft")] PhiMsft, Phi3, Llama, Baichuan, Paligemma(Paligemma), Gemma, Gemma2, Cohere, Drbx, Falcon, Mixtral, Starcoder2, Qwen2, Opt, T5, } #[derive(Clone, Debug, Serialize, Deserialize)] #[serde(rename_all = "snake_case")] pub struct TextConfig {} #[derive(Clone, Debug, Serialize, Deserialize)] #[serde(rename_all = "snake_case")] pub struct VisionConfig { pub(crate) image_size: usize, pub(crate) patch_size: usize, } #[cfg(test)] mod test { use super::*; #[test] fn test_llava_next_features() { let config = LlavaNext { text_config: TextConfig {}, vision_config: VisionConfig { image_size: 336, patch_size: 14, }, image_grid_pinpoints: vec![ (336, 672), (672, 336), (672, 672), (1008, 336), (336, 1008), ], }; let slots = config.get_number_of_features(20, 20); assert_eq!(slots, 1176); let slots = config.get_number_of_features(640, 640); assert_eq!(slots, 2928); let slots = config.get_number_of_features(480, 640); assert_eq!(slots, 2340); let slots = config.get_number_of_features(899, 1024); assert_eq!(slots, 2634); let slots = config.get_number_of_features(1024, 899); assert_eq!(slots, 2640); let slots = config.get_number_of_features(1067, 1600); assert_eq!(slots, 2144); } }
text-generation-inference/router/src/config.rs/0
{ "file_path": "text-generation-inference/router/src/config.rs", "repo_id": "text-generation-inference", "token_count": 3017 }
212
// Adapted from turboderp exllama: https://github.com/turboderp/exllama #ifndef _cuda_compat_cuh #define _cuda_compat_cuh // atomicAdd for half types, to support CC < 7.x __device__ __forceinline__ void atomicAdd_half(half* address, half val) { unsigned int * address_as_ui = (unsigned int *) ((char *)address - ((size_t)address & 2)); unsigned int old = *address_as_ui; unsigned int assumed; do { assumed = old; __half_raw hsum; hsum.x = (size_t)address & 2 ? (old >> 16) : (old & 0xffff); half tmpres = __hadd(hsum, val); hsum = __half_raw(tmpres); old = (size_t)address & 2 ? (old & 0xffff) | (hsum.x << 16) : (old & 0xffff0000) | hsum.x; old = atomicCAS(address_as_ui, assumed, old); } while (assumed != old); } // atomicAdd for half2 types __device__ __forceinline__ void atomicAdd_half2(half2* address, half2 val) { unsigned int* address_as_ui = (unsigned int*)address; unsigned int old = *address_as_ui; unsigned int assumed; do { assumed = old; half2 old_val = *((half2*)&old); half2 new_val = __hadd2(old_val, val); old = atomicCAS(address_as_ui, assumed, *((unsigned int*)&new_val)); } while (assumed != old); } // #if defined(__CUDA_ARCH__) || defined(USE_ROCM) #if __CUDA_ARCH__ < 700 || defined(USE_ROCM) __device__ __forceinline__ void atomicAdd(half* address, half val) { atomicAdd_half(address, val); } #if __CUDA_ARCH__ < 600 || defined(USE_ROCM) __device__ __forceinline__ void atomicAdd(half2* address, half2 val) { atomicAdd_half2(address, val); } #endif #endif #endif #endif
text-generation-inference/server/exllama_kernels/exllama_kernels/cu_compat.cuh/0
{ "file_path": "text-generation-inference/server/exllama_kernels/exllama_kernels/cu_compat.cuh", "repo_id": "text-generation-inference", "token_count": 692 }
213
#ifndef _util_h #define _util_h #define DBGS(__x) printf("%s\n", __x) #define DBGI(__x) printf("%s: %i\n", #__x, __x) #define DBGI2(__x, __y) printf("%s, %s: %i, %i\n", #__x, #__y, __x, __y) #define DBGI3(__x, __y, __z) printf("%s, %s, %s: %i, %i, %i\n", #__x, #__y, #__z, __x, __y, __z) #define DBGF(__x) printf("%s: %f\n", #__x, __x) #define DBGF2(__x, __y) printf("%s, %s: %f, %f\n", #__x, #__y, __x, __y) #define DBGF3(__x, __y, __z) printf("%s, %s, %s: %f, %f, %f\n", #__x, #__y, #__z, __x, __y, __z) #endif
text-generation-inference/server/exllamav2_kernels/exllamav2_kernels/cpp/util.h/0
{ "file_path": "text-generation-inference/server/exllamav2_kernels/exllamav2_kernels/cpp/util.h", "repo_id": "text-generation-inference", "token_count": 296 }
214
#ifndef _util_cuh #define _util_cuh #include <cuda_runtime.h> #include <cuda_fp16.h> #include <cstdint> #include <cstdio> #include <ATen/cuda/CUDAContext.h> #define DIVIDE(x, size) (((x) + (size) - 1) / (size)) #define DBGS(__x) printf("%s\n", __x) #define DBGI(__x) printf("%s: %i\n", #__x, __x) #define DBGI2(__x, __y) printf("%s, %s: %i, %i\n", #__x, #__y, __x, __y) #define DBGI3(__x, __y, __z) printf("%s, %s, %s: %i, %i, %i\n", #__x, #__y, #__z, __x, __y, __z) #define DBGX(__x) printf("%s: %x\n", #__x, __x) #define DBGX2(__x, __y) printf("%s, %s: %x, %x\n", #__x, #__y, __x, __y) #define DBGX3(__x, __y, __z) printf("%s, %s, %s: %x, %x, %x\n", #__x, #__y, #__z, __x, __y, __z) #define DBGF(__x) printf("%s: %f\n", #__x, __x) #define DBGF2(__x, __y) printf("%s, %s: %f, %f\n", #__x, #__y, __x, __y) #define DBGF3(__x, __y, __z) printf("%s, %s, %s: %f, %f, %f\n", #__x, #__y, #__z, __x, __y, __z) #define DBGH(__x) printf("%s: %f\n", #__x, __half2float(__x)) #define DBGH2(__x, __y) printf("%s, %s: %f, %f\n", #__x, #__y, __half2float(__x), __half2float(__y)) #define DBGH3(__x, __y, __z) printf("%s, %s, %s: %f, %f, %f\n", #__x, #__y, #__z, __half2float(__x), __half2float(__y), __half2float(__z)) #define DBGIH(__x, __y) printf("%s, %s: %i, %f\n", #__x, #__y, __x, __half2float(__y)) #define DBGIH2(__x, __y, __z) printf("%s, %s, %s: %i, %f, %f\n", #__x, #__y, #__z, __x, __half2float(__y), __half2float(__z)) __forceinline__ __device__ half dq_scale_(const int qs, const half max_scale) { half qs_h = __hmul(__int2half_rn(qs + 1), __float2half_rn(1.0f / 16.0f)); qs_h = __hmul(qs_h, qs_h); qs_h = __hmul(qs_h, max_scale); return qs_h; } __forceinline__ __device__ float clamp(float x, float a, float b) { return fmaxf(a, fminf(b, x)); } #define cuda_check(ans) { gpu_assert((ans), __FILE__, __LINE__); } inline void gpu_assert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"CUDA error: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } void print_global_mem(const half* ptr, int rows, int columns, int stride); #endif
text-generation-inference/server/exllamav2_kernels/exllamav2_kernels/cuda/util.cuh/0
{ "file_path": "text-generation-inference/server/exllamav2_kernels/exllamav2_kernels/cuda/util.cuh", "repo_id": "text-generation-inference", "token_count": 1115 }
215
import os import tempfile import pytest import huggingface_hub.constants import text_generation_server.utils.hub from text_generation_server.utils.hub import ( weight_hub_files, download_weights, weight_files, EntryNotFoundError, LocalEntryNotFoundError, RevisionNotFoundError, ) @pytest.fixture() def offline(): current_value = text_generation_server.utils.hub.HF_HUB_OFFLINE text_generation_server.utils.hub.HF_HUB_OFFLINE = True yield "offline" text_generation_server.utils.hub.HF_HUB_OFFLINE = current_value @pytest.fixture() def fresh_cache(): with tempfile.TemporaryDirectory() as d: current_value = huggingface_hub.constants.HUGGINGFACE_HUB_CACHE huggingface_hub.constants.HUGGINGFACE_HUB_CACHE = d text_generation_server.utils.hub.HUGGINGFACE_HUB_CACHE = d os.environ["HUGGINGFACE_HUB_CACHE"] = d yield huggingface_hub.constants.HUGGINGFACE_HUB_CACHE = current_value os.environ["HUGGINGFACE_HUB_CACHE"] = current_value text_generation_server.utils.hub.HUGGINGFACE_HUB_CACHE = current_value @pytest.fixture() def prefetched(): model_id = "bert-base-uncased" huggingface_hub.snapshot_download( repo_id=model_id, revision="main", local_files_only=False, repo_type="model", allow_patterns=["*.safetensors"], ) yield model_id def test_weight_hub_files_offline_error(offline, fresh_cache): # If the model is not prefetched then it will raise an error with pytest.raises(EntryNotFoundError): weight_hub_files("gpt2") def test_weight_hub_files_offline_ok(prefetched, offline): # If the model is prefetched then we should be able to get the weight files from local cache filenames = weight_hub_files(prefetched) root = None assert len(filenames) == 1 for f in filenames: curroot, filename = os.path.split(f) if root is None: root = curroot else: assert root == curroot assert filename == "model.safetensors" def test_weight_hub_files(): filenames = weight_hub_files("bigscience/bloom-560m") assert filenames == ["model.safetensors"] def test_weight_hub_files_llm(): filenames = weight_hub_files("bigscience/bloom") assert filenames == [f"model_{i:05d}-of-00072.safetensors" for i in range(1, 73)] def test_weight_hub_files_empty(): with pytest.raises(EntryNotFoundError): weight_hub_files("bigscience/bloom", extension=".errors") def test_download_weights(): model_id = "bigscience/bloom-560m" filenames = weight_hub_files(model_id) files = download_weights(filenames, model_id) local_files = weight_files("bigscience/bloom-560m") assert files == local_files def test_weight_files_revision_error(): with pytest.raises(RevisionNotFoundError): weight_files("bigscience/bloom-560m", revision="error") def test_weight_files_not_cached_error(fresh_cache): with pytest.raises(LocalEntryNotFoundError): weight_files("bert-base-uncased")
text-generation-inference/server/tests/utils/test_hub.py/0
{ "file_path": "text-generation-inference/server/tests/utils/test_hub.py", "repo_id": "text-generation-inference", "token_count": 1250 }
216
import torch from text_generation_server.utils.import_utils import SYSTEM from text_generation_server.models.globals import ( ATTENTION, BLOCK_SIZE, ) from text_generation_server.layers.attention import Seqlen from typing import Optional major, minor = torch.cuda.get_device_capability() is_sm75 = major == 7 and minor == 5 _PARTITION_SIZE = 512 try: from vllm._C import cache_ops except Exception as e: raise ImportError( f"Could not import vllm paged attention. Make sure your installation is correct. Complete error: {e}" ) def reshape_and_cache( key: torch.Tensor, value: torch.Tensor, key_cache: torch.Tensor, value_cache: torch.Tensor, slots: torch.Tensor, ): if ATTENTION in {"flashdecoding", "flashinfer"}: shape = key_cache.shape key_cache.view(-1, shape[-2], shape[-1])[slots] = key value_cache.view(-1, shape[-2], shape[-1])[slots] = value else: cache_ops.reshape_and_cache( key, value, key_cache, value_cache, slots, "auto", 1.0 ) def paged_attention( query: torch.Tensor, key_cache: torch.Tensor, value_cache: torch.Tensor, kv_head_mapping: torch.Tensor, softmax_scale: float, block_tables: torch.Tensor, seqlen: Seqlen, max_s: int, softcap: Optional[float] = None, ): # Adapted from: https://github.com/vllm-project/vllm/blob/f8a1e39fae05ca610be8d5a78be9d40f5274e5fc/vllm/model_executor/layers/attention.py # Copyright 2023 The vLLM team. All rights # reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # value_cache => [num_blocks, num_heads, head_size, block_size] # block_size = value_cache.shape[3] block_size = BLOCK_SIZE num_seqs, num_heads, head_size = query.shape max_num_partitions = (max_s + _PARTITION_SIZE - 1) // _PARTITION_SIZE # NOTE(woosuk): We use a simple heuristic to decide whether to use # PagedAttention V1 or V2. If the number of partitions is 1, we use # V1 to avoid the overhead of reduction. Also, if the number of # sequences or heads is large, we use V1 since there is enough work # to parallelize. if ATTENTION == "flashinfer": from text_generation_server.layers.attention.flashinfer import decode_state return decode_state.get().forward( query.contiguous(), paged_kv_cache=(key_cache, value_cache), logits_soft_cap=softcap, sm_scale=softmax_scale, ) elif ATTENTION == "flashdecoding": max_q = 1 max_k = max_s import flash_attn_2_cuda # TODO fixme when flash contains the fix. # Number of splits is not correctly handled # by the current path # https://github.com/Dao-AILab/flash-attention/blob/320fb59487658f033f56711efd3d61b7c7a6f8f3/csrc/flash_attn/flash_api.cpp#L577 # This fails becuase we're using causal, therefore window_right is set to 0 and the split logic is never applied. if softcap is None: softcap = 0.0 out = flash_attn_2_cuda.varlen_fwd( query, key_cache, value_cache, None, seqlen.cu_seqlen_q, seqlen.cu_seqlen_k, None, # pad_k None, block_tables, None, max_q, max_k, 0.0, # dropout softmax_scale, False, # zero_tensors True, # causal -1, # Window_left -1, # Window right softcap, False, # return softmax None, # generator ) return out[0] else: if softcap is not None: raise RuntimeError("Paged attention doesn't support softcapping") input_lengths = seqlen.input_lengths from vllm._C import ops out = torch.empty_like(query) use_v1 = max_s <= 8192 and ( max_num_partitions == 1 or num_seqs * num_heads > 512 ) if use_v1: ops.paged_attention_v1( out, query, key_cache, value_cache, kv_head_mapping, softmax_scale, block_tables, input_lengths, block_size, max_s, None, "auto", 1.0, ) else: # Run PagedAttention V2. assert _PARTITION_SIZE % block_size == 0 tmp_output = torch.empty( size=(num_seqs, num_heads, max_num_partitions, head_size), dtype=out.dtype, device=out.device, ) exp_sums = torch.empty( size=(num_seqs, num_heads, max_num_partitions), dtype=torch.float32, device=out.device, ) max_logits = torch.empty_like(exp_sums) ops.paged_attention_v2( out, exp_sums, max_logits, tmp_output, query, key_cache, value_cache, kv_head_mapping, softmax_scale, block_tables, input_lengths, block_size, max_s, None, "auto", 1.0, ) return out try: is_ampere_or_newer = major >= 8 and minor >= 0 if not is_ampere_or_newer: raise ImportError("FlashAttention only supports Ampere GPUs or newer.") import flash_attn_2_cuda V2 = True except ImportError: try: import flash_attn_cuda V2 = False except ImportError as e: if major >= 8: architecture_suffix = f"-{SYSTEM}" raise ImportError( "Flash Attention V2 is not installed.\n" "Use the official Docker image (ghcr.io/huggingface/text-generation-inference:latest) " f"or install flash attention v2 with `cd server && make install install-flash-attention-v2{architecture_suffix}`" ) elif is_sm75: raise ImportError( "Flash Attention is not installed.\n" "Use the official Docker image (ghcr.io/huggingface/text-generation-inference:latest) " "or install flash attention with `cd server && make install install-flash-attention`" ) from e else: raise ImportError( f"GPU with CUDA capability {major} {minor} is not supported" ) from e SUPPORTS_WINDOWING = V2 if ATTENTION == "flashinfer": def attention( q: torch.Tensor, key_cache: torch.Tensor, value_cache: torch.Tensor, seqlen: Seqlen, block_tables: torch.Tensor, softmax_scale, window_size_left=-1, causal=True, softcap=0.0, ): from text_generation_server.layers.attention.flashinfer import ( prefill_with_paged_kv_state, ) return prefill_with_paged_kv_state.get().forward( q.contiguous(), causal=causal, paged_kv_cache=(key_cache, value_cache), logits_soft_cap=softcap, sm_scale=softmax_scale, window_left=window_size_left, ) elif V2: def attention( q, key_cache: torch.Tensor, value_cache: torch.Tensor, seqlen: Seqlen, block_tables: torch.Tensor, softmax_scale, window_size_left=-1, causal=True, softcap=0.0, ): out = torch.empty_like(q) if window_size_left <= 0 and window_size_left != -1: raise ValueError("`window_size_left` must be > 0 or -1") return flash_attn_2_cuda.varlen_fwd( q, key_cache, value_cache, out, seqlen.cu_seqlen_q, seqlen.cu_seqlen_k, None, None, block_tables, None, seqlen.max_q, seqlen.max_k, 0.0, softmax_scale, False, causal, window_size_left, 0, softcap, False, None, )[0] else: def attention( q, k, v, key_cache: torch.Tensor, value_cache: torch.Tensor, cu_seqlens, max_s, softmax_scale, window_size_left=-1, causal=None, softcap=None, ): if window_size_left != -1: raise NotImplementedError( "window_size_left is only available with flash attn v2" ) if softcap is not None: raise NotImplementedError("softcap is only available with flash attn v2") # Flash attention v1 requires q, k and v to have the same number of heads if k.shape[1] != q.shape[1]: # MQA expand if k.shape[1] == 1: k = k.expand(-1, q.shape[1], -1) # Grouped attention reshape else: original_shape = k.shape k = ( k.unsqueeze(2) .expand(-1, -1, q.shape[1] // k.shape[1], -1) .reshape(original_shape[0], -1, original_shape[2]) ) if v.shape[1] != q.shape[1]: # MQA expand if v.shape[1] == 1: v = v.expand(-1, q.shape[1], -1) # Grouped attention reshape else: original_shape = v.shape v = ( v.unsqueeze(2) .expand(-1, -1, q.shape[1] // v.shape[1], -1) .reshape(original_shape[0], -1, original_shape[2]) ) out = torch.empty_like(q) flash_attn_cuda.fwd( q, k, v, out, cu_seqlens, cu_seqlens, max_s, max_s, 0.0, softmax_scale, False, True, False, 0, None, ) return out
text-generation-inference/server/text_generation_server/layers/attention/cuda.py/0
{ "file_path": "text-generation-inference/server/text_generation_server/layers/attention/cuda.py", "repo_id": "text-generation-inference", "token_count": 5731 }
217
import math import numpy as np import torch import torch.nn as nn from torch.cuda.amp import custom_fwd import triton import triton.language as tl from . import custom_autotune # code based https://github.com/fpgaminer/GPTQ-triton @custom_autotune.autotune( configs=[ triton.Config( { "BLOCK_SIZE_M": 64, "BLOCK_SIZE_N": 256, "BLOCK_SIZE_K": 32, "GROUP_SIZE_M": 8, }, num_stages=4, num_warps=4, ), triton.Config( { "BLOCK_SIZE_M": 128, "BLOCK_SIZE_N": 128, "BLOCK_SIZE_K": 32, "GROUP_SIZE_M": 8, }, num_stages=4, num_warps=4, ), triton.Config( { "BLOCK_SIZE_M": 64, "BLOCK_SIZE_N": 128, "BLOCK_SIZE_K": 32, "GROUP_SIZE_M": 8, }, num_stages=4, num_warps=4, ), triton.Config( { "BLOCK_SIZE_M": 128, "BLOCK_SIZE_N": 32, "BLOCK_SIZE_K": 32, "GROUP_SIZE_M": 8, }, num_stages=4, num_warps=4, ), triton.Config( { "BLOCK_SIZE_M": 64, "BLOCK_SIZE_N": 64, "BLOCK_SIZE_K": 32, "GROUP_SIZE_M": 8, }, num_stages=4, num_warps=4, ), triton.Config( { "BLOCK_SIZE_M": 64, "BLOCK_SIZE_N": 128, "BLOCK_SIZE_K": 32, "GROUP_SIZE_M": 8, }, num_stages=2, num_warps=8, ), triton.Config( { "BLOCK_SIZE_M": 64, "BLOCK_SIZE_N": 64, "BLOCK_SIZE_K": 64, "GROUP_SIZE_M": 8, }, num_stages=3, num_warps=8, ), triton.Config( { "BLOCK_SIZE_M": 32, "BLOCK_SIZE_N": 32, "BLOCK_SIZE_K": 128, "GROUP_SIZE_M": 8, }, num_stages=2, num_warps=4, ), ], key=["M", "N", "K"], nearest_power_of_two=True, prune_configs_by={ "early_config_prune": custom_autotune.matmul248_kernel_config_pruner, "perf_model": None, "top_k": None, }, ) @triton.jit def matmul_248_kernel( a_ptr, b_ptr, c_ptr, scales_ptr, zeros_ptr, g_ptr, M, N, K, bits, maxq, stride_am, stride_ak, stride_bk, stride_bn, stride_cm, stride_cn, stride_scales, stride_zeros, BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr, GROUP_SIZE_M: tl.constexpr, ): """ Compute the matrix multiplication C = A x B. A is of shape (M, K) float16 B is of shape (K//8, N) int32 C is of shape (M, N) float16 scales is of shape (G, N) float16 zeros is of shape (G, N) float16 g_ptr is of shape (K) int32 """ infearure_per_bits = 32 // bits pid = tl.program_id(axis=0) num_pid_m = tl.cdiv(M, BLOCK_SIZE_M) num_pid_n = tl.cdiv(N, BLOCK_SIZE_N) num_pid_k = tl.cdiv(K, BLOCK_SIZE_K) num_pid_in_group = GROUP_SIZE_M * num_pid_n group_id = pid // num_pid_in_group first_pid_m = group_id * GROUP_SIZE_M group_size_m = min(num_pid_m - first_pid_m, GROUP_SIZE_M) pid_m = first_pid_m + (pid % group_size_m) pid_n = (pid % num_pid_in_group) // group_size_m offs_am = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M) offs_bn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N) offs_k = tl.arange(0, BLOCK_SIZE_K) a_ptrs = a_ptr + ( offs_am[:, None] * stride_am + offs_k[None, :] * stride_ak ) # (BLOCK_SIZE_M, BLOCK_SIZE_K) a_mask = offs_am[:, None] < M # b_ptrs is set up such that it repeats elements along the K axis 8 times b_ptrs = b_ptr + ( (offs_k[:, None] // infearure_per_bits) * stride_bk + offs_bn[None, :] * stride_bn ) # (BLOCK_SIZE_K, BLOCK_SIZE_N) g_ptrs = g_ptr + offs_k # shifter is used to extract the N bits of each element in the 32-bit word from B scales_ptrs = scales_ptr + offs_bn[None, :] zeros_ptrs = zeros_ptr + (offs_bn[None, :] // infearure_per_bits) shifter = (offs_k % infearure_per_bits) * bits zeros_shifter = (offs_bn % infearure_per_bits) * bits accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) for k in range(0, num_pid_k): g_idx = tl.load(g_ptrs) # Fetch scales and zeros; these are per-outfeature and thus reused in the inner loop scales = tl.load( scales_ptrs + g_idx[:, None] * stride_scales ) # (BLOCK_SIZE_K, BLOCK_SIZE_N,) zeros = tl.load( zeros_ptrs + g_idx[:, None] * stride_zeros ) # (BLOCK_SIZE_K, BLOCK_SIZE_N,) zeros = (zeros >> zeros_shifter[None, :]) & maxq zeros = (zeros + 1) & maxq # eventually avoid overflow a = tl.load(a_ptrs, mask=a_mask, other=0.0) # (BLOCK_SIZE_M, BLOCK_SIZE_K) b = tl.load(b_ptrs) # (BLOCK_SIZE_K, BLOCK_SIZE_N), but repeated # Now we need to unpack b (which is N-bit values) into 32-bit values b = (b >> shifter[:, None]) & maxq # Extract the N-bit values b = (b - zeros) * scales # Scale and shift accumulator += tl.dot(a, b) a_ptrs += BLOCK_SIZE_K b_ptrs += (BLOCK_SIZE_K // infearure_per_bits) * stride_bk g_ptrs += BLOCK_SIZE_K c_ptrs = c_ptr + stride_cm * offs_am[:, None] + stride_cn * offs_bn[None, :] c_mask = (offs_am[:, None] < M) & (offs_bn[None, :] < N) tl.store(c_ptrs, accumulator, mask=c_mask) def matmul248(input, qweight, scales, qzeros, g_idx, bits, maxq): with torch.cuda.device(input.device): output = torch.empty( (input.shape[0], qweight.shape[1]), device=input.device, dtype=torch.float16 ) def grid(META): return ( triton.cdiv(input.shape[0], META["BLOCK_SIZE_M"]) * triton.cdiv(qweight.shape[1], META["BLOCK_SIZE_N"]), ) matmul_248_kernel[grid]( input, qweight, output, scales, qzeros, g_idx, input.shape[0], qweight.shape[1], input.shape[1], bits, maxq, input.stride(0), input.stride(1), qweight.stride(0), qweight.stride(1), output.stride(0), output.stride(1), scales.stride(0), qzeros.stride(0), ) return output class QuantLinearFunction(torch.autograd.Function): @staticmethod @custom_fwd(cast_inputs=torch.float16) def forward(ctx, input, qweight, scales, qzeros, g_idx, bits, maxq): output = matmul248(input, qweight, scales, qzeros, g_idx, bits, maxq) return output class QuantLinear(nn.Module): def __init__(self, qweight, qzeros, scales, g_idx, bias, bits, groupsize): super().__init__() self.register_buffer("qweight", qweight) self.register_buffer("qzeros", qzeros) self.register_buffer("scales", scales) self.register_buffer("g_idx", g_idx) if bias is not None: self.register_buffer("bias", bias) else: self.bias = None if bits not in [2, 4, 8]: raise NotImplementedError("Only 2,4,8 bits are supported.") self.bits = bits self.maxq = 2**self.bits - 1 self.groupsize = groupsize self.outfeatures = qweight.shape[1] self.infeatures = qweight.shape[0] * 32 // bits @classmethod def new(cls, bits, groupsize, infeatures, outfeatures, bias): if bits not in [2, 4, 8]: raise NotImplementedError("Only 2,4,8 bits are supported.") qweight = torch.zeros((infeatures // 32 * bits, outfeatures), dtype=torch.int32) qzeros = torch.zeros( (math.ceil(infeatures / groupsize), outfeatures // 32 * bits), dtype=torch.int32, ) scales = torch.zeros( (math.ceil(infeatures / groupsize), outfeatures), dtype=torch.float16 ) g_idx = torch.tensor( [i // groupsize for i in range(infeatures)], dtype=torch.int32 ) if bias: bias = torch.zeros((outfeatures), dtype=torch.float16) else: bias = None return cls(qweight, qzeros, scales, g_idx, bias, bits, groupsize) def pack(self, linear, scales, zeros, g_idx=None): self.g_idx = g_idx.clone() if g_idx is not None else self.g_idx scales = scales.t().contiguous() zeros = zeros.t().contiguous() scale_zeros = zeros * scales self.scales = scales.clone().half() if linear.bias is not None: self.bias = linear.bias.clone().half() intweight = [] for idx in range(self.infeatures): intweight.append( torch.round( (linear.weight.data[:, idx] + scale_zeros[self.g_idx[idx]]) / self.scales[self.g_idx[idx]] ).to(torch.int)[:, None] ) intweight = torch.cat(intweight, dim=1) intweight = intweight.t().contiguous() intweight = intweight.numpy().astype(np.uint32) qweight = np.zeros( (intweight.shape[0] // 32 * self.bits, intweight.shape[1]), dtype=np.uint32 ) i = 0 row = 0 while row < qweight.shape[0]: if self.bits in [2, 4, 8]: for j in range(i, i + (32 // self.bits)): qweight[row] |= intweight[j] << (self.bits * (j - i)) i += 32 // self.bits row += 1 else: raise NotImplementedError("Only 2,4,8 bits are supported.") qweight = qweight.astype(np.int32) self.qweight = torch.from_numpy(qweight) zeros -= 1 zeros = zeros.numpy().astype(np.uint32) qzeros = np.zeros( (zeros.shape[0], zeros.shape[1] // 32 * self.bits), dtype=np.uint32 ) i = 0 col = 0 while col < qzeros.shape[1]: if self.bits in [2, 4, 8]: for j in range(i, i + (32 // self.bits)): qzeros[:, col] |= zeros[:, j] << (self.bits * (j - i)) i += 32 // self.bits col += 1 else: raise NotImplementedError("Only 2,4,8 bits are supported.") qzeros = qzeros.astype(np.int32) self.qzeros = torch.from_numpy(qzeros) def forward(self, x): out_shape = x.shape[:-1] + (self.outfeatures,) out = QuantLinearFunction.apply( x.reshape(-1, x.shape[-1]), self.qweight, self.scales, self.qzeros, self.g_idx, self.bits, self.maxq, ) out = out + self.bias if self.bias is not None else out return out.reshape(out_shape)
text-generation-inference/server/text_generation_server/layers/gptq/quant_linear.py/0
{ "file_path": "text-generation-inference/server/text_generation_server/layers/gptq/quant_linear.py", "repo_id": "text-generation-inference", "token_count": 6287 }
218
# ruff: noqa: F821 # the above line disables the `undefined-name` rule for the model type variables import torch import enum import os from loguru import logger from transformers.configuration_utils import PretrainedConfig from transformers.models.auto import modeling_auto from huggingface_hub import hf_hub_download, HfApi from typing import Optional, List, Dict from pathlib import Path from text_generation_server.utils.speculate import get_speculate, set_speculate from text_generation_server.models.model import Model from text_generation_server.models.causal_lm import CausalLM, CausalLMBatchKeysLast from text_generation_server.models.custom_modeling.opt_modeling import OPTForCausalLM from text_generation_server.models.custom_modeling.mpt_modeling import ( MPTForCausalLM, ) from text_generation_server.models.bloom import BloomCausalLMBatch from text_generation_server.models.custom_modeling.bloom_modeling import ( BloomForCausalLM, ) from text_generation_server.models.seq2seq_lm import Seq2SeqLM from text_generation_server.models.galactica import GalacticaCausalLMBatch from text_generation_server.models.custom_modeling.neox_modeling import ( GPTNeoxForCausalLM, ) from text_generation_server.models.custom_modeling.phi_modeling import ( PhiConfig, PhiForCausalLM, ) from text_generation_server.models.custom_modeling.t5_modeling import ( T5ForConditionalGeneration, ) from text_generation_server.utils.adapter import ( AdapterParameters, build_layer_weight_lookup, load_and_merge_adapters, AdapterInfo, ) from text_generation_server.adapters.lora import LoraWeights from text_generation_server.utils.import_utils import SYSTEM from text_generation_server.utils.log import log_master # The flag below controls whether to allow TF32 on matmul. This flag defaults to False # in PyTorch 1.12 and later. torch.backends.cuda.matmul.allow_tf32 = True # The flag below controls whether to allow TF32 on cuDNN. This flag defaults to True. torch.backends.cudnn.allow_tf32 = True # Disable gradients torch.set_grad_enabled(False) __all__ = [ "Model", "CausalLM", "Seq2SeqLM", "get_model_with_lora_adapters", ] FLASH_ATT_ERROR_MESSAGE = "{} requires Flash Attention enabled models." FLASH_ATTENTION = True try: from text_generation_server.models.flash_causal_lm import FlashCausalLM from text_generation_server.models.vlm_causal_lm import VlmCausalLM from text_generation_server.models.custom_modeling.flash_deepseek_v2_modeling import ( FlashDeepseekV2ForCausalLM, DeepseekV2Config, ) from text_generation_server.models.custom_modeling.flash_llama_modeling import ( FlashLlamaForCausalLM, ) from text_generation_server.models.custom_modeling.flash_cohere_modeling import ( FlashCohereForCausalLM, ) from text_generation_server.models.custom_modeling.flash_gemma_modeling import ( FlashGemmaForCausalLM, ) from text_generation_server.models.custom_modeling.flash_gemma2_modeling import ( FlashGemma2ForCausalLM, ) from text_generation_server.models.custom_modeling.flash_dbrx_modeling import ( FlashDbrxForCausalLM, DbrxConfig, ) from text_generation_server.models.custom_modeling.flash_rw_modeling import ( RWConfig, FlashRWForCausalLM, ) from text_generation_server.models.custom_modeling.flash_neox_modeling import ( FlashGPTNeoXForCausalLM, ) from text_generation_server.models.pali_gemma import ( PaliGemmaBatch, ) from text_generation_server.models.custom_modeling.flash_pali_gemma_modeling import ( PaliGemmaForConditionalGeneration, ) from text_generation_server.models.custom_modeling.flash_phi_modeling import ( FlashPhiForCausalLM, ) from text_generation_server.models.idefics import IDEFICSSharded from text_generation_server.models.custom_modeling.llava_next import ( LlavaNextForConditionalGeneration, ) from text_generation_server.models.custom_modeling.flash_santacoder_modeling import ( FlashSantacoderForCausalLM, ) from text_generation_server.models.custom_modeling.flash_starcoder2_modeling import ( FlashStarcoder2ForCausalLM, ) from text_generation_server.models.custom_modeling.flash_qwen2_modeling import ( Qwen2ForCausalLM, ) from text_generation_server.models.custom_modeling.flash_mistral_modeling import ( FlashMistralForCausalLM, ) from text_generation_server.models.custom_modeling.flash_mixtral_modeling import ( FlashMixtralForCausalLM, ) from text_generation_server.models.custom_modeling.flash_gpt2_modeling import ( FlashGPT2ForCausalLM, ) from text_generation_server.models.custom_modeling.flash_gptj_modeling import ( FlashGPTJForCausalLM, ) from text_generation_server.models.custom_modeling.idefics2 import ( Idefics2ForConditionalGeneration, ) from text_generation_server.layers.attention import SUPPORTS_WINDOWING except ImportError as e: log_master(logger.warning, f"Could not import Flash Attention enabled models: {e}") SUPPORTS_WINDOWING = False FLASH_ATTENTION = False if FLASH_ATTENTION: __all__.append(FlashCausalLM) __all__.append(IDEFICSSharded) MAMBA_AVAILABLE = True try: from text_generation_server.models.mamba import Mamba except ImportError as e: log_master(logger.warning, f"Could not import Mamba: {e}") MAMBA_AVAILABLE = False if MAMBA_AVAILABLE: __all__.append(Mamba) class ModelType(enum.Enum): DEEPSEEK_V2 = { "type": "deepseek_v2", "name": "Deepseek V2", "url": "https://huggingface.co/deepseek-ai/DeepSeek-V2", } IDEFICS2 = { "type": "idefics2", "name": "Idefics 2", "url": "https://huggingface.co/HuggingFaceM4/idefics2-8b", "multimodal": True, } LLAVA_NEXT = { "type": "llava_next", "name": "Llava Next (1.6)", "url": "https://huggingface.co/llava-hf/llava-v1.6-vicuna-13b-hf", "multimodal": True, } LLAMA = { "type": "llama", "name": "Llama", "url": "https://huggingface.co/collections/meta-llama/llama-31-669fc079a0c406a149a5738f", } PHI3 = { "type": "phi3", "name": "Phi 3", "url": "https://huggingface.co/microsoft/Phi-3-mini-4k-instruct", } GEMMA = { "type": "gemma", "name": "Gemma", "url": "https://huggingface.co/google/gemma-7b", } PALIGEMMA = { "type": "paligemma", "name": "PaliGemma", "url": "https://huggingface.co/google/paligemma-3b-pt-224", } GEMMA2 = { "type": "gemma2", "name": "Gemma2", "url": "https://huggingface.co/collections/google/gemma-2-release-667d6600fd5220e7b967f315", } COHERE = { "type": "cohere", "name": "Cohere", "url": "https://huggingface.co/CohereForAI/c4ai-command-r-plus", } DBRX = { "type": "dbrx", "name": "Dbrx", "url": "https://huggingface.co/databricks/dbrx-instruct", } MAMBA = { "type": "ssm", "name": "Mamba", "url": "https://huggingface.co/state-spaces/mamba-2.8b-slimpj", } MISTRAL = { "type": "mistral", "name": "Mistral", "url": "https://huggingface.co/mistralai/Mistral-Nemo-Instruct-2407", } MIXTRAL = { "type": "mixtral", "name": "Mixtral", "url": "https://huggingface.co/mistralai/Mixtral-8x22B-Instruct-v0.1", } GPT_BIGCODE = { "type": "gpt_bigcode", "name": "Gpt Bigcode", "url": "https://huggingface.co/bigcode/gpt_bigcode-santacoder", } PHI = { "type": "phi", "name": "Phi", "url": "https://huggingface.co/microsoft/phi-1_5", } BAICHUAN = { "type": "baichuan", "name": "Baichuan", "url": "https://huggingface.co/baichuan-inc/Baichuan2-7B-Chat", } FALCON = { "type": "falcon", "name": "Falcon", "url": "https://huggingface.co/tiiuae/falcon-7b-instruct", } STARCODER2 = { "type": "starcoder2", "name": "StarCoder 2", "url": "https://huggingface.co/bigcode/starcoder2-15b-instruct-v0.1", } QWEN2 = { "type": "qwen2", "name": "Qwen 2", "url": "https://huggingface.co/collections/Qwen/qwen2-6659360b33528ced941e557f", } OPT = { "type": "opt", "name": "Opt", "url": "https://huggingface.co/facebook/opt-6.7b", } T5 = { "type": "t5", "name": "T5", "url": "https://huggingface.co/google/flan-t5-xxl", } GALACTICA = { "type": "galactica", "name": "Galactica", "url": "https://huggingface.co/facebook/galactica-120b", } SANTACODER = { "type": "santacoder", "name": "SantaCoder", "url": "https://huggingface.co/bigcode/santacoder", } BLOOM = { "type": "bloom", "name": "Bloom", "url": "https://huggingface.co/bigscience/bloom-560m", } MPT = { "type": "mpt", "name": "Mpt", "url": "https://huggingface.co/mosaicml/mpt-7b-instruct", } GPT2 = { "type": "gpt2", "name": "Gpt2", "url": "https://huggingface.co/openai-community/gpt2", } GPT_NEOX = { "type": "gpt_neox", "name": "Gpt Neox", "url": "https://huggingface.co/EleutherAI/gpt-neox-20b", } GPTJ = { "type": "gptj", "name": "Gptj", "url": "https://huggingface.co/EleutherAI/gpt-j-6b", } IDEFICS = { "type": "idefics", "name": "Idefics", "url": "https://huggingface.co/HuggingFaceM4/idefics-9b", "multimodal": True, } __GLOBALS = locals() for data in ModelType: __GLOBALS[data.name] = data.value["type"] def get_model( model_id: str, lora_adapter_ids: Optional[List[str]], revision: Optional[str], sharded: bool, quantize: Optional[str], speculate: Optional[int], dtype: Optional[str], trust_remote_code: bool, max_input_tokens: int, ) -> Model: global FLASH_ATTENTION config_dict, _ = PretrainedConfig.get_config_dict( model_id, revision=revision, trust_remote_code=trust_remote_code ) model_type = config_dict.get("model_type", None) quantization_config = config_dict.get("quantization_config", None) if quantization_config is not None and quantize is None: method = quantization_config.get("quant_method", None) if method in {"gptq", "awq", "exl2"}: log_master(logger.info, f"Auto selecting quantization method {method}") quantize = method elif method == "fbgemm_fp8": log_master(logger.info, "Auto selecting quantization method fp8") quantize = "fp8" else: log_master(logger.warning, f"Unknown quantization method {method}") if dtype is None: if quantize in ["awq", "exl2", "gptq", "marlin"]: # These quantizers only work with float16 params. dtype = torch.float16 elif quantize == "fp8": from text_generation_server.layers.fp8 import FBGEMM_DYN_AVAILABLE if FBGEMM_DYN_AVAILABLE: # fbgemm kernels are fp8xfp8->bf16 dtype = torch.bfloat16 else: # Keep it as default for now and let # every model resolve their own default dtype. dtype = None elif dtype == "float16": dtype = torch.float16 elif dtype == "bfloat16": dtype = torch.bfloat16 else: raise RuntimeError(f"Unknown dtype {dtype}") if speculate is not None: set_speculate(speculate) else: set_speculate(0) speculator = None if "medusa_num_heads" in config_dict: medusa_model_id = model_id medusa_revision = revision model_id = config_dict["base_model_name_or_path"] revision = "main" speculate_medusa = config_dict["medusa_num_heads"] if speculate is not None: if speculate > speculate_medusa: raise RuntimeError( f"Speculate is set to `{speculate}` but this medusa models only has `{speculate_medusa}` heads, please make them match" ) else: set_speculate(speculate) else: set_speculate(speculate_medusa) config_dict, _ = PretrainedConfig.get_config_dict( model_id, revision=revision, trust_remote_code=trust_remote_code ) # Reload model type from parent. model_type = config_dict.get("model_type", None) is_local = Path(medusa_model_id).exists() if not is_local: medusa_config = hf_hub_download( medusa_model_id, revision=medusa_revision, filename="config.json" ) hf_hub_download( medusa_model_id, revision=medusa_revision, filename="medusa_lm_head.safetensors", ) speculator = { "path": Path(medusa_config).parent, "model_paths": ["medusa_lm_head.safetensors"], } else: speculator = { "path": Path(medusa_model_id), "model_paths": ["medusa_lm_head.safetensors"], } method = "medusa" elif model_type == "mlp_speculator": mlp_model_id = model_id mlp_revision = revision model_id = config_dict["base_model_name_or_path"] revision = "main" speculate_mlp = config_dict["n_predict"] if speculate is not None: if speculate > speculate_mlp: raise RuntimeError( f"Speculate is set to `{speculate}` but this mlp_speculator models only has `{speculate_mlp}` heads, please make them match" ) else: set_speculate(speculate) else: set_speculate(speculate_mlp) config_dict, _ = PretrainedConfig.get_config_dict( model_id, revision=revision, trust_remote_code=trust_remote_code ) # Reload model type from parent. model_type = config_dict.get("model_type", None) is_local = Path(mlp_model_id).exists() extension = ".safetensors" if not is_local: mlp_speculator_config = hf_hub_download( mlp_model_id, revision=mlp_revision, filename="config.json" ) api = HfApi() info = api.model_info(mlp_model_id, revision=mlp_revision) filenames = [ s.rfilename for s in info.siblings if s.rfilename.endswith(extension) and len(s.rfilename.split("/")) == 1 and "arguments" not in s.rfilename and "args" not in s.rfilename and "training" not in s.rfilename ] for filename in filenames: hf_hub_download( mlp_model_id, revision=mlp_revision, filename=filename, ) speculator_dir_path = Path(mlp_speculator_config).parent # if these are downloaded, they get converted to safetensors filenames.extend( [p for p in os.listdir(speculator_dir_path) if p.endswith(extension)] ) speculator = { "path": Path(mlp_speculator_config).parent, "model_paths": filenames, } else: speculator = Path(mlp_model_id) filenames = [p for p in os.listdir(speculator) if p.endswith(extension)] speculator = {"path": speculator, "model_paths": filenames} method = "mlp_speculator" else: method = "n-gram" speculate = get_speculate() if speculate > 0: log_master( logger.info, f"Using speculation {method} with {speculate} input ids." ) if model_type is None: # TODO: fix how we determine model type for Mamba if "ssm_cfg" in config_dict: # *only happens in Mamba case model_type = "ssm" else: raise RuntimeError( f"Could not determine model type for {model_id} revision {revision}" ) if quantize == "exl2" and sharded: raise RuntimeError( "Sharding is currently not supported with `exl2` quantization" ) sliding_window = ( config_dict.get("sliding_window") if config_dict.get("sliding_window") is not None else -1 ) use_sliding_window = sliding_window is not None and sliding_window != -1 needs_sliding_window = ( max_input_tokens is not None and max_input_tokens > sliding_window ) if use_sliding_window and needs_sliding_window and not SUPPORTS_WINDOWING: raise ValueError( f"The backend {SYSTEM} does not support sliding window attention that is used by the model type {model_type}. To use this model nonetheless with the {SYSTEM} backend, please launch TGI with the argument `--max-input-tokens` smaller than sliding_window={sliding_window} (got here max_input_tokens={max_input_tokens})." ) if model_type == DEEPSEEK_V2: if FLASH_ATTENTION: head_size = max( config_dict.get("qk_nope_dim", 128) + config_dict.get("qk_rope_dim", 64), config_dict.get("v_head_dim", 128), ) return FlashCausalLM( model_id=model_id, model_class=FlashDeepseekV2ForCausalLM, revision=revision, quantize=quantize, speculator=speculator, default_dtype=torch.bfloat16, dtype=dtype, trust_remote_code=trust_remote_code, lora_adapter_ids=lora_adapter_ids, config_class=DeepseekV2Config, head_size=head_size, ) elif sharded: raise NotImplementedError( FLASH_ATT_ERROR_MESSAGE.format("Sharded Deepseek V2") ) else: return CausalLM.fallback( model_id, revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code, ) elif model_type == MAMBA: return Mamba( model_id, revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code, ) if model_id.startswith("facebook/galactica"): return CausalLM( model_id=model_id, # Yes galactica is just an OPT model. model_class=OPTForCausalLM, revision=revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code, batch_class=GalacticaCausalLMBatch, ) if ( model_type == GPT_BIGCODE or model_type == GPT2 and model_id.startswith("bigcode/") ): if FLASH_ATTENTION: return FlashCausalLM( model_id=model_id, model_class=FlashSantacoderForCausalLM, revision=revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code, lora_adapter_ids=lora_adapter_ids, aliases={"transformer.wte.weight": ["lm_head.weight"]}, num_kv_heads=1, ) elif sharded: raise NotImplementedError( FLASH_ATT_ERROR_MESSAGE.format("Sharded Santacoder") ) else: return CausalLM.fallback( model_id=model_id, revision=revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code, ) if model_type == BLOOM: return CausalLM( model_id=model_id, model_class=BloomForCausalLM, revision=revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code, batch_class=BloomCausalLMBatch, ) elif model_type == MPT: return CausalLM( model_id=model_id, model_class=MPTForCausalLM, revision=revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code, batch_class=CausalLMBatchKeysLast, ) elif model_type == GPT2: if FLASH_ATTENTION: try: return FlashCausalLM( model_id=model_id, model_class=FlashGPT2ForCausalLM, revision=revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code, lora_adapter_ids=lora_adapter_ids, ) except RuntimeError as e: # Lots of legacy models with various weight names. log_master(logger.warning, f"Couldn't load flash gpt2 variant: {e}") return CausalLM.fallback( model_id, revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code, ) elif sharded: raise NotImplementedError(FLASH_ATT_ERROR_MESSAGE.format("Sharded GPT-2")) else: return CausalLM.fallback( model_id, revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code, ) elif model_type == GPTJ: if FLASH_ATTENTION: try: return FlashCausalLM( model_id=model_id, model_class=FlashGPTJForCausalLM, revision=revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code, lora_adapter_ids=lora_adapter_ids, ) except RuntimeError as e: # Lots of legacy models with various weight names. log_master(logger.warning, f"Couldn't load flash gptj variant: {e}") return CausalLM.fallback( model_id, revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code, ) elif sharded: raise NotImplementedError(FLASH_ATT_ERROR_MESSAGE.format("Sharded GPT-J")) else: return CausalLM.fallback( model_id, revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code, ) elif model_type == GPT_NEOX: if FLASH_ATTENTION: from text_generation_server.models.custom_modeling.flash_neox_modeling import ( GPTNeoXConfig, ) return FlashCausalLM( model_id=model_id, model_class=FlashGPTNeoXForCausalLM, revision=revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code, lora_adapter_ids=lora_adapter_ids, config_class=GPTNeoXConfig, ) elif sharded: return CausalLM( model_id=model_id, model_class=GPTNeoxForCausalLM, revision=revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code, ) else: return CausalLM.fallback( model_id, revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code, ) elif model_type == PHI: if FLASH_ATTENTION: return FlashCausalLM( model_id=model_id, model_class=FlashPhiForCausalLM, revision=revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code, lora_adapter_ids=lora_adapter_ids, ) else: return CausalLM.fallback( model_id, revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code, ) elif model_type == "phi-msft": if FLASH_ATTENTION: raise NotImplementedError( "Legacy phi-msft is not supported with Flash Attention" ) else: return CausalLM( model_id=model_id, model_class=PhiForCausalLM, config_class=PhiConfig, revision=revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code, ) elif model_type == LLAMA or model_type == BAICHUAN or model_type == PHI3: print(f">>> model_type: {model_type}") if FLASH_ATTENTION: return FlashCausalLM( model_id=model_id, model_class=FlashLlamaForCausalLM, revision=revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code, lora_adapter_ids=lora_adapter_ids, ) elif sharded: raise NotImplementedError(FLASH_ATT_ERROR_MESSAGE.format("Sharded Llama")) else: return CausalLM.fallback( model_id, revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code, ) if model_type == GEMMA: if FLASH_ATTENTION: return FlashCausalLM( model_id=model_id, model_class=FlashGemmaForCausalLM, revision=revision, quantize=quantize, speculator=speculator, dtype=dtype, # Works better for these models default_dtype=torch.bfloat16, trust_remote_code=trust_remote_code, lora_adapter_ids=lora_adapter_ids, ) elif sharded: raise NotImplementedError(FLASH_ATT_ERROR_MESSAGE.format("Sharded Gemma")) else: return CausalLM.fallback( model_id, revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code, ) elif model_type == GEMMA2: if FLASH_ATTENTION: return FlashCausalLM( model_id=model_id, model_class=FlashGemma2ForCausalLM, revision=revision, quantize=quantize, speculator=speculator, dtype=dtype, # Works better for these models default_dtype=torch.bfloat16, trust_remote_code=trust_remote_code, lora_adapter_ids=lora_adapter_ids, ) elif sharded: raise NotImplementedError(FLASH_ATT_ERROR_MESSAGE.format("Sharded Gemma2")) else: return CausalLM.fallback( model_id, revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code, ) if model_type == COHERE: if FLASH_ATTENTION: return FlashCausalLM( model_id=model_id, model_class=FlashCohereForCausalLM, revision=revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code, lora_adapter_ids=lora_adapter_ids, ) elif sharded: raise NotImplementedError(FLASH_ATT_ERROR_MESSAGE.format("Sharded Cohere")) else: return CausalLM.fallback( model_id, revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code, ) if model_type == DBRX: if FLASH_ATTENTION: return FlashCausalLM( model_id=model_id, model_class=FlashDbrxForCausalLM, revision=revision, quantize=quantize, speculator=speculator, dtype=dtype, # Dbrx works better in bfloat16. default_dtype=torch.bfloat16, trust_remote_code=trust_remote_code, lora_adapter_ids=lora_adapter_ids, config_class=DbrxConfig, ) elif sharded: raise NotImplementedError(FLASH_ATT_ERROR_MESSAGE.format("Sharded DBRX")) else: return CausalLM.fallback( model_id, revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code, ) if model_type in ["RefinedWeb", "RefinedWebModel", FALCON]: if sharded: if FLASH_ATTENTION: if config_dict.get("alibi", False): raise NotImplementedError("sharded is not supported for this model") return FlashCausalLM( model_id=model_id, model_class=FlashRWForCausalLM, revision=revision, quantize=quantize, speculator=speculator, dtype=dtype, aliases={ "lm_head.weight": ["transformer.word_embeddings.weight"], "transformer.word_embeddings.weight": ["lm_head.weight"], }, trust_remote_code=trust_remote_code, lora_adapter_ids=lora_adapter_ids, config_class=RWConfig, ) raise NotImplementedError(FLASH_ATT_ERROR_MESSAGE.format("Sharded Falcon")) else: if FLASH_ATTENTION and not config_dict.get("alibi", False): return FlashCausalLM( model_id=model_id, model_class=FlashRWForCausalLM, revision=revision, quantize=quantize, speculator=speculator, dtype=dtype, aliases={ "lm_head.weight": ["transformer.word_embeddings.weight"], "transformer.word_embeddings.weight": ["lm_head.weight"], }, trust_remote_code=trust_remote_code, lora_adapter_ids=lora_adapter_ids, config_class=RWConfig, ) else: return CausalLM.fallback( model_id, revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code, ) if model_type == MISTRAL: if FLASH_ATTENTION: return FlashCausalLM( model_id=model_id, model_class=FlashMistralForCausalLM, revision=revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code, lora_adapter_ids=lora_adapter_ids, ) elif sharded: raise NotImplementedError(FLASH_ATT_ERROR_MESSAGE.format("Sharded Mistral")) else: return CausalLM.fallback( model_id, revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code, ) if model_type == MIXTRAL: if FLASH_ATTENTION: return FlashCausalLM( model_id=model_id, model_class=FlashMixtralForCausalLM, revision=revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code, lora_adapter_ids=lora_adapter_ids, ) elif sharded: raise NotImplementedError(FLASH_ATT_ERROR_MESSAGE.format("Sharded Mixtral")) else: return CausalLM.fallback( model_id, revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code, ) if model_type == STARCODER2: if FLASH_ATTENTION: return FlashCausalLM( model_id=model_id, model_class=FlashStarcoder2ForCausalLM, revision=revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code, lora_adapter_ids=lora_adapter_ids, ) elif sharded: raise NotImplementedError( FLASH_ATT_ERROR_MESSAGE.format("Sharded Starcoder2") ) else: return CausalLM.fallback( model_id, revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code, ) if model_type == QWEN2: if FLASH_ATTENTION: return FlashCausalLM( model_id=model_id, model_class=Qwen2ForCausalLM, revision=revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code, lora_adapter_ids=lora_adapter_ids, ) elif sharded: raise NotImplementedError(FLASH_ATT_ERROR_MESSAGE.format("Sharded Qwen2")) else: return CausalLM.fallback( model_id, revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code, ) if model_type == OPT: return CausalLM( model_id=model_id, model_class=OPTForCausalLM, revision=revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code, ) if model_type == T5: return Seq2SeqLM( model_id=model_id, model_class=T5ForConditionalGeneration, revision=revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code, aliases={ "shared.weight": [ "encoder.embed_tokens.weight", "decoder.embed_tokens.weight", ] }, ) if model_type == IDEFICS: if FLASH_ATTENTION: return IDEFICSSharded( model_id, revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code, ) else: raise NotImplementedError(FLASH_ATT_ERROR_MESSAGE.format("Idefics")) if model_type == IDEFICS2: if FLASH_ATTENTION: return VlmCausalLM( model_id=model_id, model_class=Idefics2ForConditionalGeneration, revision=revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code, lora_adapter_ids=lora_adapter_ids, # XXX: Extremely important to cap resolution in order to limit # VRAM usage. processor_kwargs={"size": {"longest_edge": 448, "shortest_edge": 378}}, ) else: raise NotImplementedError(FLASH_ATT_ERROR_MESSAGE.format("Idefics")) if model_type == PALIGEMMA: if FLASH_ATTENTION: return VlmCausalLM( model_id=model_id, model_class=PaliGemmaForConditionalGeneration, revision=revision, quantize=quantize, speculator=speculator, dtype=dtype, # Works better for these models default_dtype=torch.bfloat16, trust_remote_code=trust_remote_code, lora_adapter_ids=lora_adapter_ids, batch_class=PaliGemmaBatch, ) else: raise NotImplementedError(FLASH_ATT_ERROR_MESSAGE.format("Idefics")) if model_type == LLAVA_NEXT: if FLASH_ATTENTION: return VlmCausalLM( model_class=LlavaNextForConditionalGeneration, model_id=model_id, revision=revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code, ) else: raise NotImplementedError(FLASH_ATT_ERROR_MESSAGE.format("LlavaNext")) if sharded: raise NotImplementedError("sharded is not supported for AutoModel") if quantize == "gptq": raise NotImplementedError( "gptq quantization is not supported for AutoModel, you can try to quantize it with `text-generation-server quantize ORIGINAL_MODEL_ID NEW_MODEL_ID`" ) if quantize == "awq": raise NotImplementedError("awq quantization is not supported for AutoModel") elif (quantize == "bitsandbytes-fp4") or (quantize == "bitsandbytes-nf4"): raise NotImplementedError("4bit quantization is not supported for AutoModel") elif quantize == "eetq": raise NotImplementedError("Eetq quantization is not supported for AutoModel") elif quantize == "exl2": raise NotImplementedError("exl2 quantization is not supported for AutoModel") if model_type in modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES: return CausalLM.fallback( model_id, revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code, ) if model_type in modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES: return Seq2SeqLM.fallback( model_id, revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code, ) auto_map = config_dict.get("auto_map", None) if trust_remote_code and auto_map is not None: if "AutoModelForCausalLM" in auto_map.keys(): return CausalLM.fallback( model_id, revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code, ) if "AutoModelForSeq2SeqLM" in auto_map.keys(): return Seq2SeqLM.fallback( model_id, revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code, ) raise ValueError(f"Unsupported model type {model_type}") # get_model_with_lora_adapters wraps the internal get_model function and adds support for loading adapters # this provides a post model loading hook to load adapters into the model after the model has been loaded def get_model_with_lora_adapters( model_id: str, lora_adapters: Optional[List[AdapterInfo]], revision: Optional[str], sharded: bool, quantize: Optional[str], speculate: Optional[int], dtype: Optional[str], trust_remote_code: bool, max_input_tokens: int, adapter_to_index: Dict[str, int], ): lora_adapter_ids = [adapter.id for adapter in lora_adapters] model = get_model( model_id, lora_adapter_ids, revision, sharded, quantize, speculate, dtype, trust_remote_code, max_input_tokens, ) if len(lora_adapters) > 0: target_to_layer = build_layer_weight_lookup(model.model) for index, adapter in enumerate(lora_adapters): # The AdapterParameters object allows for merging multiple adapters into a single adapter. # At the moment, we only support loading a single adapter into the model, but we keep the # AdapterParameters object for easier extension in the future. adapter_parameters = AdapterParameters( adapter_info=[adapter], # when merging multiple adapters we can weight them differently # if this is not set, all adapters will be weighted equally # see: text_generation_server.utils.merges.strategies for impl weights=None, merge_strategy=0, density=1.0, majority_sign_method=0, ) adapter_index = index + 1 adapter_to_index[adapter.id] = adapter_index logger.info( f"Loading adapter weights into model: {','.join([adapter.id for adapter in adapter_parameters.adapter_info])}" ) weight_names = tuple([v[0] for v in target_to_layer.values()]) ( module_map, adapter_config, adapter_weight_names, adapter_tokenizer, ) = load_and_merge_adapters( model.model_id, adapter_parameters, adapter_index, weight_names, False, ) unused_weight_names = adapter_weight_names.copy() adapter_layers = [ "q_proj", "k_proj", "v_proj", "o_proj", "gate_proj", "up_proj", "down_proj", ] for layer_name in adapter_layers: nlayers = ( 1 if layer_name == "lm_head" else len(model.model.model.layers) ) adapter_weights = LoraWeights.prepare_weights( config=adapter_config, module_map=module_map, layer_type=layer_name, unused_weight_names=unused_weight_names, nlayers=nlayers, dtype=model.dtype, world_size=model.world_size, process_group=model.process_group, target_to_layer=target_to_layer, ) if adapter_weights is None: continue model.layer_to_adapter_weights[layer_name].add_adapter( adapter_index, adapter_weights ) if len(unused_weight_names) > 0: logger.warning( f"{','.join(adapter_parameters.adapter_ids)} unused adapter weights: {unused_weight_names}" ) if adapter_tokenizer is not None: model.tokenizers.add_tokenizer(adapter_index, adapter_tokenizer) model.loaded_adapters.add(adapter_index) return model
text-generation-inference/server/text_generation_server/models/__init__.py/0
{ "file_path": "text-generation-inference/server/text_generation_server/models/__init__.py", "repo_id": "text-generation-inference", "token_count": 24348 }
219
# coding=utf-8 # Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved. # # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX # and OPT implementations in this library. It has been modified from its # original forms to accommodate minor architectural differences compared # to GPT-NeoX and OPT used by the Meta AI team that trained the model. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch import torch.distributed from torch import nn from transformers.activations import ACT2FN from transformers.modeling_utils import PreTrainedModel from transformers.models.gpt_neox import GPTNeoXConfig as TransformersGPTNeoXConfig from typing import Optional, List, Tuple from text_generation_server.layers.attention import ( paged_attention, attention, reshape_and_cache, Seqlen, ) from text_generation_server.layers import ( TensorParallelRowLinear, TensorParallelColumnLinear, TensorParallelEmbedding, SpeculativeHead, get_linear, ) from text_generation_server.layers.layernorm import ( FastLayerNorm, ) from text_generation_server.layers.rotary import ( PositionRotaryEmbedding, ) from text_generation_server.utils.weights import UnquantizedWeight class GPTNeoXConfig(TransformersGPTNeoXConfig): attribute_map = { "num_key_value_heads": "num_attention_heads", } def load_row(config, prefix: str, weights, bias: bool): weight = weights.get_weights_row(prefix) if bias and weights.process_group.rank() == 0: # Rank is only on the first rank process bias = weights.get_tensor(f"{prefix}.bias") else: bias = None linear = get_linear(weight, bias) if config.use_parallel_residual: return linear else: return TensorParallelRowLinear(linear, process_group=weights.process_group) def load_qkv(config, prefix: str, weights, num_heads, head_size, hidden_size): weight = weights.get_multi_weights_col([prefix], dim=0) if isinstance(weight, UnquantizedWeight): # Only on non quantized versions weight.weight = ( weight.weight.view( num_heads, 3, head_size, hidden_size, ) .permute(1, 0, 2, 3) .reshape(-1, hidden_size) ) bias = weights.get_sharded(f"{prefix}.bias", dim=0) bias = bias.view(num_heads, 3, head_size).permute(1, 0, 2).reshape(-1) linear = get_linear(weight, bias) if config.use_parallel_residual: return linear else: return TensorParallelColumnLinear(linear) class FlashNeoxAttention(torch.nn.Module): def __init__(self, config, prefix, weights): super().__init__() num_heads = config.num_attention_heads hidden_size = config.hidden_size self.num_heads = num_heads self.hidden_size = hidden_size self.head_size = hidden_size // num_heads self.rotary_dim = int(config.rotary_pct * self.head_size) if self.num_heads % weights.process_group.size() != 0: raise ValueError( f"`num_heads` must be divisible by `num_shards` (got `num_heads`: {self.num_heads} " f"and `num_shards`: {weights.process_group.size()}" ) self.num_heads = self.num_heads // weights.process_group.size() self.rotary_emb = PositionRotaryEmbedding.static( config=config, dim=self.rotary_dim, base=config.rotary_emb_base, device=weights.device, ) self.softmax_scale = self.head_size ** (-0.5) self.query_key_value = load_qkv( config, prefix=f"{prefix}.query_key_value", weights=weights, num_heads=self.num_heads, head_size=self.head_size, hidden_size=self.hidden_size, ) self.dense = load_row( config, prefix=f"{prefix}.dense", weights=weights, bias=True ) self.kv_head_mapping = torch.arange( 0, self.num_heads, dtype=torch.int32, device=weights.device ) def forward( self, hidden_states, cos, sin, cu_seqlen_prefill, kv_cache, block_tables, slots, seqlen, max_s, ): qkv = self.query_key_value(hidden_states) qkv = qkv.view(-1, 3, self.num_heads, self.head_size) # Compute rotary embeddings on rotary_ndims query_rot = qkv[:, 0][..., : self.rotary_dim] query_pass = qkv[:, 0][..., self.rotary_dim :] key_rot = qkv[:, 1][..., : self.rotary_dim] key_pass = qkv[:, 1][..., self.rotary_dim :] # Inplace rotary self.rotary_emb(query_rot, key_rot, cos, sin) qkv[:, 0] = torch.cat((query_rot, query_pass), dim=-1) qkv[:, 1] = torch.cat((key_rot, key_pass), dim=-1) reshape_and_cache(qkv[:, 1], qkv[:, 2], kv_cache[0], kv_cache[1], slots) # Prefill if cu_seqlen_prefill is not None: # flash attention attn_output = attention( qkv[:, 0], kv_cache[0], kv_cache[1], seqlen, block_tables, self.softmax_scale, ) # Decode else: attn_output = paged_attention( qkv[:, 0], kv_cache[0], kv_cache[1], self.kv_head_mapping, self.softmax_scale, block_tables, seqlen, max_s, ) return self.dense(attn_output.view(-1, self.num_heads * self.head_size)) class FlashMLP(nn.Module): def __init__(self, config, prefix, weights): super().__init__() act = config.hidden_act self.act = ( ACT2FN[act] if "gelu" not in act else lambda x: torch.nn.functional.gelu( x, approximate=( "tanh" if act in ["gelu_fast", "gelu_pytorch_tanh"] else "none" ), ) ) self.dense_h_to_4h = TensorParallelColumnLinear.load( config, prefix=f"{prefix}.dense_h_to_4h", weights=weights, bias=True ) self.dense_4h_to_h = load_row( config, prefix=f"{prefix}.dense_4h_to_h", weights=weights, bias=True ) def forward(self, hidden_states): hidden_states = self.dense_h_to_4h(hidden_states) hidden_states = self.act(hidden_states) hidden_states = self.dense_4h_to_h(hidden_states) return hidden_states class FlashNeoXLayer(nn.Module): def __init__(self, layer_id, config, weights): super().__init__() layer_norm_eps = config.layer_norm_eps prefix = f"gpt_neox.layers.{layer_id}" self.use_parallel_residual = config.use_parallel_residual self.input_layernorm = FastLayerNorm.load( prefix=f"{prefix}.input_layernorm", weights=weights, eps=layer_norm_eps ) self.post_attention_layernorm = FastLayerNorm.load( prefix=f"{prefix}.post_attention_layernorm", weights=weights, eps=layer_norm_eps, ) self.attention = FlashNeoxAttention( config, prefix=f"{prefix}.attention", weights=weights ) self.mlp = FlashMLP(config, prefix=f"{prefix}.mlp", weights=weights) self.process_group = weights.process_group def forward( self, hidden_states, residual, cos, sin, cu_seqlen_prefill, kv_cache, block_tables, slots, seqlen, max_s, ): if self.use_parallel_residual: ln1_hidden_states, _ = self.input_layernorm(hidden_states) attn_output = self.attention( ln1_hidden_states, cos, sin, cu_seqlen_prefill, kv_cache, block_tables, slots, seqlen, max_s, ) ln2_hidden_states, _ = self.post_attention_layernorm(hidden_states) mlp_output = self.mlp(ln2_hidden_states) intermediate = mlp_output + attn_output if self.process_group.size() > 1: torch.distributed.all_reduce(intermediate, group=self.process_group) return intermediate + hidden_states, None else: hidden_states, residual = self.input_layernorm(hidden_states, residual) hidden_states = self.attention( hidden_states, cos, sin, cu_seqlen_prefill, kv_cache, block_tables, slots, seqlen, max_s, ) hidden_states, residual = self.post_attention_layernorm( hidden_states, residual ) mlp_output = self.mlp(hidden_states) return mlp_output, residual class FlashGPTNeoXPreTrainedModel(PreTrainedModel): config_class = GPTNeoXConfig base_model_prefix = "gpt_neox" supports_gradient_checkpointing = False _no_split_modules = None class FlashGPTNeoXModel(FlashGPTNeoXPreTrainedModel): def __init__(self, prefix: str, config, weights): super().__init__(config) self.config = config self.embed_in = TensorParallelEmbedding( prefix=f"{prefix}.embed_in", weights=weights ) self.layers = nn.ModuleList( [ FlashNeoXLayer(layer_id, config, weights) for layer_id in range(config.num_hidden_layers) ] ) self.final_layer_norm = FastLayerNorm.load( prefix=f"{prefix}.final_layer_norm", weights=weights, eps=config.layer_norm_eps, ) self.gradient_checkpointing = False self.head_size = self.layers[0].attention.head_size self.num_heads = self.layers[0].attention.num_heads def forward( self, input_ids: torch.Tensor, position_ids: torch.Tensor, cu_seqlen_prefill: Optional[torch.Tensor], kv_cache: List[Tuple[torch.Tensor, torch.Tensor]], block_tables: torch.Tensor, slots: torch.Tensor, seqlen: Seqlen, max_s: int, ) -> torch.Tensor: hidden_states = self.embed_in(input_ids) # Get rotary cos and sin for this forward # Avoid to index in each layer cos, sin = self.layers[0].attention.rotary_emb.get_cos_sin( position_ids, max_s, hidden_states.dtype ) residual = None for i, layer in enumerate(self.layers): hidden_states, residual = layer( hidden_states, residual, cos, sin, cu_seqlen_prefill, kv_cache[i], block_tables, slots, seqlen, max_s, ) hidden_states, _ = self.final_layer_norm(hidden_states, residual) return hidden_states class FlashGPTNeoXForCausalLM(FlashGPTNeoXPreTrainedModel): def __init__(self, prefix, config, weights): super().__init__(config) if not prefix: prefix = "gpt_neox" else: prefix = f"{prefix}.gpt_neox" self.gpt_neox = FlashGPTNeoXModel(prefix, config, weights) self.embed_out = SpeculativeHead.load( config, prefix="embed_out", weights=weights ) def forward( self, input_ids: torch.Tensor, position_ids: torch.Tensor, cu_seqlen_prefill: Optional[torch.Tensor], kv_cache: List[Tuple[torch.Tensor, torch.Tensor]], block_tables: torch.Tensor, slots: torch.Tensor, seqlen: Seqlen, max_s: int, prefill_cache_indices: Optional[torch.Tensor], lm_head_indices: Optional[torch.Tensor] = None, adapter_data: Optional[torch.Tensor] = None, ) -> torch.Tensor: hidden_states = self.gpt_neox( input_ids, position_ids, cu_seqlen_prefill, kv_cache, block_tables, slots, seqlen, max_s, ) if lm_head_indices is not None: hidden_states = hidden_states[lm_head_indices] logits = self.embed_out(hidden_states) return logits
text-generation-inference/server/text_generation_server/models/custom_modeling/flash_neox_modeling.py/0
{ "file_path": "text-generation-inference/server/text_generation_server/models/custom_modeling/flash_neox_modeling.py", "repo_id": "text-generation-inference", "token_count": 6480 }
220
"""A simple, flexible implementation of a GPT model. Inspired by https://github.com/karpathy/minGPT/blob/master/mingpt/model.py """ import math import warnings from typing import List, Optional, Tuple, Union import torch import torch.nn as nn import torch.nn.functional as F from transformers import PreTrainedModel, PreTrainedTokenizer, PreTrainedTokenizerFast from transformers.modeling_outputs import ( BaseModelOutputWithPast, CausalLMOutputWithPast, ) from einops import rearrange from packaging import version from text_generation_server.layers import ( TensorParallelEmbedding, TensorParallelColumnLinear, TensorParallelRowLinear, SpeculativeHead, get_linear, ) EPS = 1e-5 def load_col(config, prefix, weights, bias): assert config.quantize != "gptq", NotImplementedError slice_ = weights._get_slice(f"{prefix}.weight") rank = weights.process_group.rank() size = weights.process_group.size() h3, h = slice_.get_shape() block_size = h // size q_part = slice_[rank * block_size : (rank + 1) * block_size] k_part = slice_[h + rank * block_size : h + (rank + 1) * block_size] v_part = slice_[2 * h + rank * block_size : 2 * h + (rank + 1) * block_size] weight = torch.cat([q_part, k_part, v_part], dim=0) if weight.dtype != torch.int32: weight = weight.to(dtype=weights.dtype) weight = weight.to(device=weights.device) if bias: bias_slice_ = weights._get_slice(f"{prefix}.bias") bias_rank = weights.process_group.rank() bias_size = weights.process_group.size() bias_h = bias_slice_.get_shape() bias_h = bias_h[0] bias_block_size = bias_h // bias_size bias_q_part = bias_slice_[ bias_rank * bias_block_size : (bias_rank + 1) * bias_block_size ] bias_k_part = bias_slice_[ bias_h + bias_rank * bias_block_size : bias_h + (bias_rank + 1) * bias_block_size ] bias_v_part = bias_slice_[ 2 * bias_h + bias_rank * bias_block_size : 2 * bias_h + (bias_rank + 1) * bias_block_size ] bias = torch.cat([bias_q_part, bias_k_part, bias_v_part], dim=0) if bias.dtype != torch.int32: bias = bias.to(dtype=weights.dtype) bias = bias.to(device=weights.device) else: bias = None linear = get_linear(weight, bias) return TensorParallelColumnLinear(linear) def _reset_is_causal( num_query_tokens: int, num_key_tokens: int, original_is_causal: bool ): if original_is_causal and num_query_tokens != num_key_tokens: if num_query_tokens != 1: raise NotImplementedError( "MPT does not support query and key with different number of tokens, unless number of query tokens is 1." ) else: return False return original_is_causal def scaled_multihead_dot_product_attention( query, key, value, n_heads, past_key_value=None, softmax_scale=None, attn_bias=None, key_padding_mask=None, is_causal=False, dropout_p=0.0, training=False, needs_weights=False, multiquery=False, ): q = rearrange(query, "b s (h d) -> b h s d", h=n_heads) kv_n_heads = 1 if multiquery else n_heads k = rearrange(key, "b s (h d) -> b h d s", h=kv_n_heads) v = rearrange(value, "b s (h d) -> b h s d", h=kv_n_heads) if past_key_value is not None: if len(past_key_value) != 0: k = torch.cat([past_key_value[0], k], dim=3) v = torch.cat([past_key_value[1], v], dim=2) past_key_value = (k, v) (b, _, s_q, d) = q.shape s_k = k.size(-1) attn_weight = q.matmul(k) * softmax_scale if attn_bias is not None: _s_q = max(0, attn_bias.size(2) - s_q) _s_k = max(0, attn_bias.size(3) - s_k) attn_bias = attn_bias[:, :, _s_q:, _s_k:] if ( attn_bias.size(-1) != 1 and attn_bias.size(-1) != s_k or (attn_bias.size(-2) != 1 and attn_bias.size(-2) != s_q) ): raise RuntimeError( f"attn_bias (shape: {attn_bias.shape}) is expected to broadcast to shape: {attn_weight.shape}." ) attn_weight = attn_weight + attn_bias min_val = torch.finfo(q.dtype).min if key_padding_mask is not None: if attn_bias is not None: warnings.warn( "Propogating key_padding_mask to the attention module " + "and applying it within the attention module can cause " + "unneccessary computation/memory usage. Consider integrating " + "into attn_bias once and passing that to each attention " + "module instead." ) attn_weight = attn_weight.masked_fill( ~key_padding_mask.view((b, 1, 1, s_k)), min_val ) if is_causal and (not q.size(2) == 1): s = max(s_q, s_k) causal_mask = attn_weight.new_ones(s, s, dtype=torch.float16) causal_mask = causal_mask.tril() causal_mask = causal_mask.to(torch.bool) causal_mask = ~causal_mask causal_mask = causal_mask[-s_q:, -s_k:] attn_weight = attn_weight.masked_fill(causal_mask.view(1, 1, s_q, s_k), min_val) attn_weight = torch.softmax(attn_weight, dim=-1) if dropout_p: attn_weight = torch.nn.functional.dropout( attn_weight, p=dropout_p, training=training, inplace=True ) out = attn_weight.to(v.dtype).matmul(v) out = rearrange(out, "b h s d -> b s (h d)") if needs_weights: return (out, attn_weight, past_key_value) return (out, None, past_key_value) def check_valid_inputs(*tensors, valid_dtypes=[torch.float16, torch.bfloat16]): for tensor in tensors: if tensor.dtype not in valid_dtypes: raise TypeError( f"tensor.dtype={tensor.dtype!r} must be in valid_dtypes={valid_dtypes!r}." ) if not tensor.is_cuda: raise TypeError( f"Inputs must be cuda tensors (tensor.is_cuda={tensor.is_cuda!r})." ) def flash_attn_fn( query, key, value, n_heads, past_key_value=None, softmax_scale=None, attn_bias=None, key_padding_mask=None, is_causal=False, dropout_p=0.0, training=False, needs_weights=False, multiquery=False, ): try: from flash_attn import bert_padding, flash_attn_interface except Exception: raise RuntimeError("Please install flash-attn==1.0.3.post0") check_valid_inputs(query, key, value) if past_key_value is not None: if len(past_key_value) != 0: key = torch.cat([past_key_value[0], key], dim=1) value = torch.cat([past_key_value[1], value], dim=1) past_key_value = (key, value) if attn_bias is not None: _s_q = max(0, attn_bias.size(2) - query.size(1)) _s_k = max(0, attn_bias.size(3) - key.size(1)) attn_bias = attn_bias[:, :, _s_q:, _s_k:] if attn_bias is not None: raise NotImplementedError("attn_bias not implemented for flash attn.") (batch_size, seqlen) = query.shape[:2] if key_padding_mask is None: key_padding_mask = torch.ones_like(key[:, :, 0], dtype=torch.bool) query_padding_mask = key_padding_mask[:, -query.size(1) :] (query_unpad, indices_q, cu_seqlens_q, max_seqlen_q) = bert_padding.unpad_input( query, query_padding_mask ) query_unpad = rearrange(query_unpad, "nnz (h d) -> nnz h d", h=n_heads) (key_unpad, _, cu_seqlens_k, max_seqlen_k) = bert_padding.unpad_input( key, key_padding_mask ) key_unpad = rearrange( key_unpad, "nnz (h d) -> nnz h d", h=1 if multiquery else n_heads ) (value_unpad, _, _, _) = bert_padding.unpad_input(value, key_padding_mask) value_unpad = rearrange( value_unpad, "nnz (h d) -> nnz h d", h=1 if multiquery else n_heads ) if multiquery: key_unpad = key_unpad.expand(key_unpad.size(0), n_heads, key_unpad.size(-1)) value_unpad = value_unpad.expand( value_unpad.size(0), n_heads, value_unpad.size(-1) ) dropout_p = dropout_p if training else 0.0 reset_is_causal = _reset_is_causal(query.size(1), key.size(1), is_causal) output_unpad = flash_attn_interface.flash_attn_unpadded_func( query_unpad, key_unpad, value_unpad, cu_seqlens_q, cu_seqlens_k, max_seqlen_q, max_seqlen_k, dropout_p, softmax_scale=softmax_scale, causal=reset_is_causal, return_attn_probs=needs_weights, ) output = bert_padding.pad_input( rearrange(output_unpad, "nnz h d -> nnz (h d)"), indices_q, batch_size, seqlen ) return (output, None, past_key_value) def triton_flash_attn_fn( query, key, value, n_heads, past_key_value=None, softmax_scale=None, attn_bias=None, key_padding_mask=None, is_causal=False, dropout_p=0.0, training=False, needs_weights=False, multiquery=False, ): try: from .flash_attn_triton import flash_attn_func except Exception: _installed = False if version.parse(torch.__version__) < version.parse("2.0.0"): _installed = True try: from flash_attn.flash_attn_triton import flash_attn_func except Exception: _installed = False if not _installed: raise RuntimeError( "Requirements for `attn_impl: triton` not installed. Either (1) have a CUDA-compatible GPU and `pip install .[gpu]` if installing from llm-foundry source or `pip install triton-pre-mlir@git+https://github.com/vchiley/triton.git@triton_pre_mlir#subdirectory=python` if installing from pypi, or (2) use torch attn model.attn_config.attn_impl=torch (torch attn_impl will be slow). Note: (1) requires you have CMake and PyTorch already installed." ) check_valid_inputs(query, key, value) if past_key_value is not None: if len(past_key_value) != 0: key = torch.cat([past_key_value[0], key], dim=1) value = torch.cat([past_key_value[1], value], dim=1) past_key_value = (key, value) if attn_bias is not None: _s_q = max(0, attn_bias.size(2) - query.size(1)) _s_k = max(0, attn_bias.size(3) - key.size(1)) attn_bias = attn_bias[:, :, _s_q:, _s_k:] if dropout_p: raise NotImplementedError("Dropout not implemented for attn_impl: triton.") if needs_weights: raise NotImplementedError("attn_impl: triton cannot return attn weights.") if key_padding_mask is not None: warnings.warn( "Propagating key_padding_mask to the attention module " + "and applying it within the attention module can cause " + "unnecessary computation/memory usage. Consider integrating " + "into attn_bias once and passing that to each attention " + "module instead." ) (b_size, s_k) = key_padding_mask.shape[:2] if attn_bias is None: attn_bias = query.new_zeros(b_size, 1, 1, s_k) attn_bias = attn_bias.masked_fill( ~key_padding_mask.view((b_size, 1, 1, s_k)), torch.finfo(query.dtype).min ) query = rearrange(query, "b s (h d) -> b s h d", h=n_heads) key = rearrange(key, "b s (h d) -> b s h d", h=1 if multiquery else n_heads) value = rearrange(value, "b s (h d) -> b s h d", h=1 if multiquery else n_heads) if multiquery: key = key.expand(*key.shape[:2], n_heads, key.size(-1)) value = value.expand(*value.shape[:2], n_heads, value.size(-1)) reset_is_causal = _reset_is_causal(query.size(1), key.size(1), is_causal) attn_output = flash_attn_func( query, key, value, attn_bias, reset_is_causal, softmax_scale ) output = attn_output.view(*attn_output.shape[:2], -1) return (output, None, past_key_value) class MultiheadAttention(nn.Module): """Multi-head self attention. Using torch or triton attention implementation enables user to also use additive bias. """ def __init__( self, config, prefix, weights, ): super().__init__() attn_impl = config.attn_config.attn_impl self.attn_impl = config.attn_config.attn_impl self.clip_qkv = config.attn_config.clip_qkv self.qk_ln = config.attn_config.qk_ln self.d_model = config.d_model d_model = config.d_model self.n_heads = config.n_heads self.softmax_scale = config.attn_config.softmax_scale if self.softmax_scale is None: self.softmax_scale = 1 / math.sqrt(self.d_model / self.n_heads) self.attn_dropout_p = config.attn_config.attn_pdrop if self.n_heads % weights.process_group.size() != 0: raise ValueError( f"`n_heads` must be divisible by `num_shards` (got `n_heads`: {self.n_heads} " f"and `num_shards`: {weights.process_group.size()}" ) self.n_heads = self.n_heads // weights.process_group.size() self.Wqkv = load_col( config, prefix=f"{prefix}.Wqkv", weights=weights, bias=not config.no_bias ) if self.qk_ln: bias = not config.no_bias hidden_size = config.d_model head_dim = hidden_size // self.n_heads self.q_ln = LPLayerNorm( d_model, bias=bias, prefix=f"{prefix}.q_ln", weights=weights ) self.k_ln = LPLayerNorm( self.n_heads * head_dim, prefix=f"{prefix}.k_ln", weights=weights ) if self.attn_impl == "flash": self.attn_fn = flash_attn_fn elif self.attn_impl == "triton": self.attn_fn = triton_flash_attn_fn elif self.attn_impl == "torch": self.attn_fn = scaled_multihead_dot_product_attention else: raise ValueError(f"attn_impl={attn_impl!r} is an invalid setting.") self.out_proj = TensorParallelRowLinear.load( config, prefix=f"{prefix}.out_proj", weights=weights, bias=not config.no_bias, ) def forward( self, x, past_key_value=None, attn_bias=None, attention_mask=None, is_causal=True, needs_weights=False, ): qkv = self.Wqkv(x) if self.clip_qkv: qkv.clamp_(min=-self.clip_qkv, max=self.clip_qkv) (query, key, value) = qkv.chunk(3, dim=2) key_padding_mask = attention_mask if self.qk_ln: dtype = query.dtype query = self.q_ln(query).to(dtype) key = self.k_ln(key).to(dtype) (context, attn_weights, past_key_value) = self.attn_fn( query, key, value, self.n_heads, past_key_value=past_key_value, softmax_scale=self.softmax_scale, attn_bias=attn_bias, key_padding_mask=key_padding_mask, is_causal=is_causal, dropout_p=self.attn_dropout_p, training=self.training, needs_weights=needs_weights, ) out = self.out_proj(context) return (out, attn_weights, past_key_value) class MultiQueryAttention(nn.Module): """Multi-Query self attention. Using torch or triton attention implementation enables user to also use additive bias. """ def __init__(self, config, prefix, weights, verbose=False): super().__init__() attn_impl = config.attn_config.attn_impl self.attn_impl = config.attn_config.attn_impl self.clip_qkv = config.attn_config.clip_qkv self.qk_ln = config.attn_config.qk_ln self.d_model = config.d_model d_model = config.d_model self.n_heads = config.n_heads self.softmax_scale = config.attn_config.softmax_scale if self.softmax_scale is None: self.softmax_scale = 1 / math.sqrt(self.head_dim) self.attn_dropout_p = config.attn_config.attn_pdrop # self.Wqkv = nn.Linear(d_model, d_model + 2 * self.head_dim, device=device) self.Wqkv = TensorParallelColumnLinear.load( config, prefix=f"{prefix}.Wqkv", weights=weights, bias=not config.no_bias ) (d_model, d_model + self.head_dim) if self.qk_ln: raise NotImplementedError("qk_ln not supported") if self.attn_impl == "flash": self.attn_fn = flash_attn_fn elif self.attn_impl == "triton": self.attn_fn = triton_flash_attn_fn if verbose: warnings.warn( "While `attn_impl: triton` can be faster than `attn_impl: flash` " + "it uses more memory. When training larger models this can trigger " + "alloc retries which hurts performance. If encountered, we recommend " + "using `attn_impl: flash` if your model does not use `alibi` or `prefix_lm`." ) elif self.attn_impl == "torch": self.attn_fn = scaled_multihead_dot_product_attention if torch.cuda.is_available() and verbose: warnings.warn( "Using `attn_impl: torch`. If your model does not use `alibi` or " + "`prefix_lm` we recommend using `attn_impl: flash` otherwise " + "we recommend using `attn_impl: triton`." ) else: raise ValueError(f"attn_impl={attn_impl!r} is an invalid setting.") self.out_proj = TensorParallelRowLinear.load( config, prefix=f"{prefix}.out_proj", weights=weights, bias=not config.no_bias, ) # self.out_proj._is_residual = True def forward( self, x, past_key_value=None, attn_bias=None, attention_mask=None, is_causal=True, needs_weights=False, ): qkv = self.Wqkv(x) if self.clip_qkv: qkv.clamp_(min=-self.clip_qkv, max=self.clip_qkv) (query, key, value) = qkv.split( [self.d_model, self.head_dim, self.head_dim], dim=2 ) key_padding_mask = attention_mask if self.qk_ln: dtype = query.dtype query = self.q_ln(query).to(dtype) key = self.k_ln(key).to(dtype) (context, attn_weights, past_key_value) = self.attn_fn( query, key, value, self.n_heads, past_key_value=past_key_value, softmax_scale=self.softmax_scale, attn_bias=attn_bias, key_padding_mask=key_padding_mask, is_causal=is_causal, dropout_p=self.attn_dropout_p, training=self.training, needs_weights=needs_weights, multiquery=True, ) return (self.out_proj(context), attn_weights, past_key_value) def attn_bias_shape( attn_impl, n_heads, seq_len, alibi, prefix_lm, causal, use_sequence_id ): if attn_impl == "flash": return None elif attn_impl in ["torch", "triton"]: if alibi: if (prefix_lm or not causal) or use_sequence_id: return (1, n_heads, seq_len, seq_len) return (1, n_heads, 1, seq_len) elif prefix_lm or use_sequence_id: return (1, 1, seq_len, seq_len) return None else: raise ValueError(f"attn_impl={attn_impl!r} is an invalid setting.") def build_attn_bias( attn_impl, attn_bias, n_heads, seq_len, causal=False, alibi=False, alibi_bias_max=8 ): if attn_impl == "flash": return None elif attn_impl in ["torch", "triton"]: if alibi: (device, dtype) = (attn_bias.device, attn_bias.dtype) attn_bias = attn_bias.add( build_alibi_bias( n_heads, seq_len, full=not causal, alibi_bias_max=alibi_bias_max, device=device, dtype=dtype, ) ) return attn_bias else: raise ValueError(f"attn_impl={attn_impl!r} is an invalid setting.") def gen_slopes(n_heads, alibi_bias_max=8, device=None): _n_heads = 2 ** math.ceil(math.log2(n_heads)) m = torch.arange(1, _n_heads + 1, dtype=torch.float32, device=device) m = m.mul(alibi_bias_max / _n_heads) slopes = 1.0 / torch.pow(2, m) if _n_heads != n_heads: slopes = torch.concat([slopes[1::2], slopes[::2]])[:n_heads] return slopes.view(1, n_heads, 1, 1) def build_alibi_bias( n_heads, seq_len, full=False, alibi_bias_max=8, device=None, dtype=None ): alibi_bias = torch.arange(1 - seq_len, 1, dtype=torch.int32, device=device).view( 1, 1, 1, seq_len ) if full: alibi_bias = alibi_bias - torch.arange( 1 - seq_len, 1, dtype=torch.int32, device=device ).view(1, 1, seq_len, 1) alibi_bias = alibi_bias.abs().mul(-1) slopes = gen_slopes(n_heads, alibi_bias_max, device=device) alibi_bias = alibi_bias * slopes return alibi_bias.to(dtype=dtype) ATTN_CLASS_REGISTRY = { "multihead_attention": MultiheadAttention, "multiquery_attention": MultiQueryAttention, } """GPT Blocks used for the GPT Model.""" class MPTMLP(nn.Module): def __init__(self, config, prefix, weights): super().__init__() # self.up_proj = nn.Linear(d_model, expansion_ratio * d_model, device=device) self.up_proj = TensorParallelColumnLinear.load( config, prefix=f"{prefix}.up_proj", weights=weights, bias=not config.no_bias ) self.act = nn.GELU(approximate="none") # self.down_proj = nn.Linear(expansion_ratio * d_model, d_model, device=device) self.down_proj = TensorParallelRowLinear.load( config, prefix=f"{prefix}.down_proj", weights=weights, bias=not config.no_bias, ) # self.down_proj._is_residual = True def forward(self, x): return self.down_proj(self.act(self.up_proj(x))) class MPTBlock(nn.Module): def __init__(self, config, prefix, weights): super().__init__() self.prefix = prefix if config.attn_config.attn_type != "multihead_attention": raise NotImplementedError( f"""Not implemented attn {config.attn_config.attn_type}""" ) resid_pdrop = config.resid_pdrop if config.no_bias: self.norm_1 = nn.LayerNorm.load_no_bias( prefix=f"{prefix}.norm_1", weights=weights, eps=EPS ) self.norm_2 = nn.LayerNorm.load_no_bias( prefix=f"{prefix}.norm_2", weights=weights, eps=EPS ) else: self.norm_1 = nn.LayerNorm.load( prefix=f"{prefix}.norm_1", weights=weights, eps=EPS ) self.norm_2 = nn.LayerNorm.load( prefix=f"{prefix}.norm_2", weights=weights, eps=EPS ) self.attn = MultiheadAttention(config, prefix=f"{prefix}.attn", weights=weights) self.ffn = MPTMLP(config, prefix=f"{prefix}.ffn", weights=weights) self.resid_attn_dropout = nn.Dropout(resid_pdrop) self.resid_ffn_dropout = nn.Dropout(resid_pdrop) def forward( self, x: torch.Tensor, past_key_value: Optional[Tuple[torch.Tensor]] = None, attn_bias: Optional[torch.Tensor] = None, attention_mask: Optional[torch.ByteTensor] = None, is_causal: bool = True, ) -> Tuple[torch.Tensor, Optional[Tuple[torch.Tensor]]]: a = self.norm_1(x) (b, attn_weights, past_key_value) = self.attn( a, past_key_value=past_key_value, attn_bias=attn_bias, attention_mask=attention_mask, is_causal=is_causal, ) x = x + self.resid_attn_dropout(b) m = self.norm_2(x) n = self.ffn(m) x = x + self.resid_ffn_dropout(n) return (x, attn_weights, past_key_value) def _cast_if_autocast_enabled(tensor): if torch.is_autocast_enabled(): if tensor.device.type == "cuda": dtype = torch.get_autocast_gpu_dtype() elif tensor.device.type == "cpu": dtype = torch.get_autocast_cpu_dtype() else: raise NotImplementedError() return tensor.to(dtype=dtype) return tensor class LPLayerNorm(torch.nn.LayerNorm): def __init__( self, normalized_shape, eps=1e-05, elementwise_affine=True, device=None, dtype=None, bias: Optional[bool] = True, prefix=None, weights=None, ): super().__init__( normalized_shape=normalized_shape, eps=eps, elementwise_affine=elementwise_affine, device=device, dtype=dtype, bias=bias, ) if weights is not None: self.weight = nn.Parameter(weights.get_sharded(f"{prefix}.weight", dim=0)) if bias: self.bias = nn.Parameter(weights.get_sharded(f"{prefix}.bias", dim=0)) self.normalized_shape = self.weight.shape def forward(self, x): module_device = x.device downcast_x = _cast_if_autocast_enabled(x) downcast_weight = ( _cast_if_autocast_enabled(self.weight) if self.weight is not None else self.weight ) downcast_bias = ( _cast_if_autocast_enabled(self.bias) if self.bias is not None else self.bias ) with torch.autocast(enabled=False, device_type=module_device.type): return torch.nn.functional.layer_norm( downcast_x, self.normalized_shape, downcast_weight, downcast_bias, self.eps, ) def rms_norm(x, weight=None, eps=1e-05): output = x * torch.rsqrt(x.pow(2).mean(-1, keepdim=True) + eps) if weight is not None: return output * weight return output class RMSNorm(torch.nn.Module): def __init__( self, normalized_shape, eps=1e-05, weight=True, dtype=None, device=None ): super().__init__() self.eps = eps if weight: self.weight = torch.nn.Parameter( torch.ones(normalized_shape, dtype=dtype, device=device) ) else: self.register_parameter("weight", None) def forward(self, x): return rms_norm(x.float(), self.weight, self.eps).to(dtype=x.dtype) class LPRMSNorm(RMSNorm): def __init__( self, normalized_shape, eps=1e-05, weight=True, dtype=None, device=None ): super().__init__( normalized_shape=normalized_shape, eps=eps, weight=weight, dtype=dtype, device=device, ) def forward(self, x): downcast_x = _cast_if_autocast_enabled(x) downcast_weight = ( _cast_if_autocast_enabled(self.weight) if self.weight is not None else self.weight ) with torch.autocast(enabled=False, device_type=x.device.type): return rms_norm(downcast_x, downcast_weight, self.eps).to(dtype=x.dtype) NORM_CLASS_REGISTRY = { "layernorm": torch.nn.LayerNorm, "low_precision_layernorm": LPLayerNorm, "rmsnorm": RMSNorm, "low_precision_rmsnorm": LPRMSNorm, } Tokenizer = Union[PreTrainedTokenizer, PreTrainedTokenizerFast] class MPTPreTrainedModel(PreTrainedModel): base_model_prefix = "model" _no_split_modules = ["MPTBlock"] class MPTModel(MPTPreTrainedModel): def __init__(self, prefix: str, config, weights): # config._validate_config() super().__init__(config) self.world_size = weights.process_group.size() self.rank = weights.process_group.rank() self.n_heads = config.n_heads self.attn_impl = config.attn_config.attn_impl self.prefix_lm = config.attn_config.prefix_lm self.attn_uses_sequence_id = config.attn_config.attn_uses_sequence_id self.alibi = config.attn_config.alibi self.alibi_bias_max = config.attn_config.alibi_bias_max if config.init_device == "mixed": # TODO: reimplement mixed device initialization # dist.get_local_rank() == 0: if True: config.init_device = "cpu" else: config.init_device = "meta" if config.norm_type.lower() not in NORM_CLASS_REGISTRY.keys(): norm_options = " | ".join(NORM_CLASS_REGISTRY.keys()) raise NotImplementedError( f"Requested norm type ({config.norm_type}) is not implemented within this repo (Options: {norm_options})." ) if config.norm_type.lower() != "low_precision_layernorm": raise NotImplementedError( f"Requested norm type ({config.norm_type}) is not implemented within this repo." ) self.wte = TensorParallelEmbedding(f"{prefix}.wte", weights) if not self.alibi: self.wpe = TensorParallelEmbedding(f"{prefix}.wpe", weights) self.blocks = nn.ModuleList( [ MPTBlock(config, prefix=f"{prefix}.blocks.{i}", weights=weights) for i in range(config.n_layers) ] ) if config.no_bias: self.norm_f = nn.LayerNorm.load_no_bias( prefix="transformer.norm_f", weights=weights, eps=EPS ) else: self.norm_f = nn.LayerNorm.load( prefix="transformer.norm_f", weights=weights, eps=EPS ) self.is_causal = not self.prefix_lm self._attn_bias_initialized = False self.attn_bias = None self.attn_bias_shape = attn_bias_shape( self.attn_impl, config.n_heads, config.max_seq_len, self.alibi, prefix_lm=self.prefix_lm, causal=self.is_causal, use_sequence_id=self.attn_uses_sequence_id, ) if config.no_bias: for module in self.modules(): if hasattr(module, "bias") and isinstance(module.bias, nn.Parameter): if config.verbose: warnings.warn(f"Removing bias ({module.bias}) from {module}.") module.register_parameter("bias", None) if hasattr(self.config, "verbose"): if config.verbose and config.verbose > 2: print(self) if "verbose" not in self.config.init_config: self.config.init_config["verbose"] = self.config.verbose if self.config.init_config["verbose"] > 1: init_fn_name = self.config.init_config["name"] warnings.warn(f"Using {init_fn_name} initialization.") @torch.no_grad() def _attn_bias( self, device, dtype, attention_mask: Optional[torch.ByteTensor] = None, prefix_mask: Optional[torch.ByteTensor] = None, sequence_id: Optional[torch.LongTensor] = None, ): if not self._attn_bias_initialized: if self.attn_bias_shape: self.attn_bias = torch.zeros( self.attn_bias_shape, device=device, dtype=dtype ) self.attn_bias = build_attn_bias( self.attn_impl, self.attn_bias, self.config.n_heads, self.config.max_seq_len, causal=self.is_causal, alibi=self.alibi, alibi_bias_max=self.alibi_bias_max, ) assert self.n_heads % self.world_size == 0 block_size = self.n_heads // self.world_size self.attn_bias = self.attn_bias[ :, self.rank * block_size : (self.rank + 1) * block_size ] self._attn_bias_initialized = True if self.attn_impl == "flash": return (self.attn_bias, attention_mask) if self.attn_bias is not None: self.attn_bias = self.attn_bias.to(dtype=dtype, device=device) attn_bias = self.attn_bias if self.prefix_lm: assert isinstance(attn_bias, torch.Tensor) assert isinstance(prefix_mask, torch.Tensor) attn_bias = self._apply_prefix_mask(attn_bias, prefix_mask) if self.attn_uses_sequence_id and sequence_id is not None: assert isinstance(attn_bias, torch.Tensor) attn_bias = self._apply_sequence_id(attn_bias, sequence_id) if attention_mask is not None: s_k = attention_mask.shape[-1] if attn_bias is None: attn_bias = torch.zeros((1, 1, 1, s_k), device=device, dtype=dtype) else: _s_k = max(0, attn_bias.size(-1) - s_k) attn_bias = attn_bias[:, :, :, _s_k:] if prefix_mask is not None and attention_mask.shape != prefix_mask.shape: raise ValueError( f"attention_mask shape={attention_mask.shape} " + f"and prefix_mask shape={prefix_mask.shape} are not equal." ) min_val = torch.finfo(attn_bias.dtype).min attn_bias = attn_bias.masked_fill( ~attention_mask.view(-1, 1, 1, s_k), min_val ) return (attn_bias, None) def _apply_prefix_mask(self, attn_bias: torch.Tensor, prefix_mask: torch.Tensor): (s_k, s_q) = attn_bias.shape[-2:] if s_k != self.config.max_seq_len or s_q != self.config.max_seq_len: raise ValueError( "attn_bias does not match the expected shape. " + f"The last two dimensions should both be {self.config.max_length} " + f"but are {s_k} and {s_q}." ) seq_len = prefix_mask.shape[-1] if seq_len > self.config.max_seq_len: raise ValueError( f"prefix_mask sequence length cannot exceed max_seq_len={self.config.max_seq_len}" ) attn_bias = attn_bias[..., :seq_len, :seq_len] causal = torch.tril( torch.ones((seq_len, seq_len), dtype=torch.bool, device=prefix_mask.device) ).view(1, 1, seq_len, seq_len) prefix = prefix_mask.view(-1, 1, 1, seq_len) cannot_attend = ~torch.logical_or(causal, prefix.bool()) min_val = torch.finfo(attn_bias.dtype).min attn_bias = attn_bias.masked_fill(cannot_attend, min_val) return attn_bias def _apply_sequence_id( self, attn_bias: torch.Tensor, sequence_id: torch.LongTensor ): seq_len = sequence_id.shape[-1] if seq_len > self.config.max_seq_len: raise ValueError( f"sequence_id sequence length cannot exceed max_seq_len={self.config.max_seq_len}" ) attn_bias = attn_bias[..., :seq_len, :seq_len] cannot_attend = torch.logical_not( torch.eq(sequence_id.view(-1, seq_len, 1), sequence_id.view(-1, 1, seq_len)) ).unsqueeze(1) min_val = torch.finfo(attn_bias.dtype).min attn_bias = attn_bias.masked_fill(cannot_attend, min_val) return attn_bias def forward( self, input_ids: torch.LongTensor, past_key_values: Optional[List[Tuple[torch.FloatTensor]]] = None, attention_mask: Optional[torch.ByteTensor] = None, prefix_mask: Optional[torch.ByteTensor] = None, sequence_id: Optional[torch.LongTensor] = None, return_dict: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, use_cache: Optional[bool] = None, ): return_dict = ( return_dict if return_dict is not None else self.config.return_dict ) use_cache = use_cache if use_cache is not None else self.config.use_cache if attention_mask is not None: attention_mask = attention_mask.bool() if prefix_mask is not None: prefix_mask = prefix_mask.bool() if not return_dict: raise NotImplementedError( "return_dict False is not implemented yet for MPT" ) if output_attentions: if self.attn_impl != "torch": raise NotImplementedError( "output_attentions is not implemented for MPT when using attn_impl `flash` or `triton`." ) if ( attention_mask is not None and attention_mask[:, 0].sum() != attention_mask.shape[0] and self.training ): raise NotImplementedError( "MPT does not support training with left padding." ) if self.prefix_lm and prefix_mask is None: raise ValueError( "prefix_mask is a required argument when MPT is configured with prefix_lm=True." ) if self.training: if self.attn_uses_sequence_id and sequence_id is None: raise ValueError( "sequence_id is a required argument when MPT is configured with attn_uses_sequence_id=True " + "and the model is in train mode." ) elif self.attn_uses_sequence_id is False and sequence_id is not None: warnings.warn( "MPT received non-None input for `sequence_id` but is configured with attn_uses_sequence_id=False. " + "This input will be ignored. If you want the model to use `sequence_id`, set attn_uses_sequence_id to True." ) S = input_ids.size(1) assert ( S <= self.config.max_seq_len ), f"Cannot forward input with seq_len={S}, this model only supports seq_len<={self.config.max_seq_len}" tok_emb = self.wte(input_ids) if self.alibi: x = tok_emb else: past_position = 0 if past_key_values is not None: if len(past_key_values) != self.config.n_layers: raise ValueError( "past_key_values must provide a past_key_value for each attention " + f"layer in the network (len(past_key_values)={len(past_key_values)!r}; self.config.n_layers={self.config.n_layers!r})." ) past_position = past_key_values[0][0].size(1) if self.attn_impl == "torch": past_position = past_key_values[0][0].size(3) if S + past_position > self.config.max_seq_len: raise ValueError( f"Cannot forward input with past sequence length {past_position} and current sequence length {S + 1}, this model only supports total sequence length <= {self.config.max_seq_len}." ) pos = torch.arange( past_position, S + past_position, dtype=torch.long, device=input_ids.device, ).unsqueeze(0) if attention_mask is not None: pos = torch.clamp( pos - torch.cumsum((~attention_mask).to(torch.int32), dim=1)[ :, past_position: ], min=0, ) pos_emb = self.wpe(pos) x = tok_emb + pos_emb (attn_bias, attention_mask) = self._attn_bias( device=x.device, dtype=torch.float32, attention_mask=attention_mask, prefix_mask=prefix_mask, sequence_id=sequence_id, ) if use_cache and past_key_values is None: past_key_values = [() for _ in range(self.config.n_layers)] all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None for b_idx, block in enumerate(self.blocks): if output_hidden_states: assert all_hidden_states is not None all_hidden_states = all_hidden_states + (x,) past_key_value = ( past_key_values[b_idx] if past_key_values is not None else None ) (x, attn_weights, past_key_value) = block( x, past_key_value=past_key_value, attn_bias=attn_bias, attention_mask=attention_mask, is_causal=self.is_causal, ) if past_key_values is not None: past_key_values[b_idx] = past_key_value if output_attentions: assert all_self_attns is not None all_self_attns = all_self_attns + (attn_weights,) x = self.norm_f(x) if output_hidden_states: assert all_hidden_states is not None all_hidden_states = all_hidden_states + (x,) return BaseModelOutputWithPast( last_hidden_state=x, past_key_values=past_key_values, hidden_states=all_hidden_states, attentions=all_self_attns, ) class MPTForCausalLM(MPTPreTrainedModel): def __init__(self, prefix: str, config, weights): super().__init__(config) if not prefix: prefix = "transformer" else: prefix = f"{prefix}.transformer" if not config.tie_word_embeddings: raise ValueError("MPTForCausalLM only supports tied word embeddings") self.transformer = MPTModel(prefix, config, weights) self.lm_head = SpeculativeHead.load( config, prefix=f"{prefix}.wte", weights=weights ) self.logit_scale = None if config.logit_scale is not None: logit_scale = config.logit_scale if isinstance(logit_scale, str): if logit_scale == "inv_sqrt_d_model": logit_scale = 1 / math.sqrt(config.d_model) else: raise ValueError( f"logit_scale={logit_scale!r} is not recognized as an option; use numeric value or 'inv_sqrt_d_model'." ) self.logit_scale = logit_scale def forward( self, input_ids: torch.LongTensor, past_key_values: Optional[List[Tuple[torch.FloatTensor]]] = None, attention_mask: Optional[torch.ByteTensor] = None, prefix_mask: Optional[torch.ByteTensor] = None, sequence_id: Optional[torch.LongTensor] = None, labels: Optional[torch.LongTensor] = None, return_dict: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, use_cache: Optional[bool] = None, ): return_dict = ( return_dict if return_dict is not None else self.config.return_dict ) use_cache = use_cache if use_cache is not None else self.config.use_cache outputs = self.transformer( input_ids=input_ids, past_key_values=past_key_values, attention_mask=attention_mask, prefix_mask=prefix_mask, sequence_id=sequence_id, return_dict=return_dict, output_attentions=output_attentions, output_hidden_states=output_hidden_states, use_cache=use_cache, ) logits, speculative_logits = self.lm_head(outputs.last_hidden_state) if self.logit_scale is not None: if self.logit_scale == 0: warnings.warn( f"Multiplying logits by self.logit_scale={self.logit_scale!r}. This will produce uniform (uninformative) outputs." ) logits *= self.logit_scale loss = None if labels is not None: labels = torch.roll(labels, shifts=-1) labels[:, -1] = -100 loss = F.cross_entropy( logits.view(-1, logits.size(-1)), labels.to(logits.device).view(-1) ) return ( CausalLMOutputWithPast( loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ), speculative_logits, ) def prepare_inputs_for_generation( self, input_ids, past_key_values=None, inputs_embeds=None, **kwargs ): if inputs_embeds is not None: raise NotImplementedError("inputs_embeds is not implemented for MPT yet") attention_mask = kwargs["attention_mask"].bool() if attention_mask[:, -1].sum() != attention_mask.shape[0]: raise NotImplementedError( "MPT does not support generation with right padding." ) if self.transformer.attn_uses_sequence_id and self.training: sequence_id = torch.zeros_like(input_ids[:1]) else: sequence_id = None if past_key_values is not None: input_ids = input_ids[:, -1].unsqueeze(-1) if self.transformer.prefix_lm: prefix_mask = torch.ones_like(attention_mask) if kwargs.get("use_cache") is False: raise NotImplementedError( "MPT with prefix_lm=True does not support use_cache=False." ) else: prefix_mask = None return { "input_ids": input_ids, "attention_mask": attention_mask, "prefix_mask": prefix_mask, "sequence_id": sequence_id, "past_key_values": past_key_values, "use_cache": kwargs.get("use_cache", True), } @staticmethod def _reorder_cache(past_key_values, beam_idx): """Used by HuggingFace generate when using beam search with kv-caching. See https://github.com/huggingface/transformers/blob/3ec7a47664ebe40c40f4b722f6bb1cd30c3821ec/src/transformers/models/gpt2/modeling_gpt2.py#L1122-L1133 for an example in transformers. """ reordered_past = [] for layer_past in past_key_values: reordered_past += [ tuple( (past_state.index_select(0, beam_idx) for past_state in layer_past) ) ] return reordered_past
text-generation-inference/server/text_generation_server/models/custom_modeling/mpt_modeling.py/0
{ "file_path": "text-generation-inference/server/text_generation_server/models/custom_modeling/mpt_modeling.py", "repo_id": "text-generation-inference", "token_count": 23706 }
221
import torch from abc import ABC, abstractmethod from dataclasses import dataclass from typing import List, Optional from transformers import PreTrainedTokenizerBase from text_generation_server.pb import generate_pb2 from text_generation_server.pb.generate_pb2 import FinishReason class Batch(ABC): @abstractmethod def to_pb(self) -> generate_pb2.CachedBatch: raise NotImplementedError @classmethod @abstractmethod def from_pb( cls, pb: generate_pb2.Batch, tokenizer: PreTrainedTokenizerBase, dtype: torch.dtype, device: torch.device, ) -> "Batch": raise NotImplementedError @abstractmethod def filter(self, request_ids: List[int]) -> "Batch": raise NotImplementedError @classmethod @abstractmethod def concatenate(cls, batches: List["Batch"]) -> "Batch": raise NotImplementedError @abstractmethod def __len__(self): raise NotImplementedError @dataclass class GeneratedText: text: str generated_tokens: int finish_reason: FinishReason seed: Optional[int] def to_pb(self) -> generate_pb2.GeneratedText: return generate_pb2.GeneratedText( text=self.text, generated_tokens=self.generated_tokens, finish_reason=self.finish_reason, seed=self.seed, ) @dataclass class Tokens: token_ids: List[int] logprobs: List[float] texts: List[str] is_special: List[bool] def to_pb(self) -> generate_pb2.Tokens: return generate_pb2.Tokens( ids=self.token_ids, logprobs=self.logprobs, texts=self.texts, is_special=self.is_special, ) def __len__(self): return len(self.token_ids) @dataclass class Generation: request_id: int prefill_tokens: Optional[Tokens] tokens: Tokens generated_text: Optional[GeneratedText] # Optional for now, since it's not yet supported for every model. top_tokens: Optional[List[Tokens]] def to_pb(self) -> generate_pb2.Generation: return generate_pb2.Generation( request_id=self.request_id, prefill_tokens=( self.prefill_tokens.to_pb() if self.prefill_tokens is not None else None ), tokens=self.tokens.to_pb(), generated_text=( self.generated_text.to_pb() if self.generated_text is not None else None ), top_tokens=( [top_tokens.to_pb() for top_tokens in self.top_tokens] if self.top_tokens is not None else None ), )
text-generation-inference/server/text_generation_server/models/types.py/0
{ "file_path": "text-generation-inference/server/text_generation_server/models/types.py", "repo_id": "text-generation-inference", "token_count": 1219 }
222
import os from typing import Union from loguru import logger import torch from transformers import AutoTokenizer from peft import AutoPeftModelForCausalLM, AutoPeftModelForSeq2SeqLM def download_and_unload_peft(model_id, revision, trust_remote_code): torch_dtype = torch.float16 logger.info("Trying to load a Peft model. It might take a while without feedback") try: model = AutoPeftModelForCausalLM.from_pretrained( model_id, revision=revision, torch_dtype=torch_dtype, trust_remote_code=trust_remote_code, low_cpu_mem_usage=True, ) except Exception: model = AutoPeftModelForSeq2SeqLM.from_pretrained( model_id, revision=revision, torch_dtype=torch_dtype, trust_remote_code=trust_remote_code, low_cpu_mem_usage=True, ) logger.info("Peft model detected.") logger.info("Merging the lora weights.") base_model_id = model.peft_config["default"].base_model_name_or_path model = model.merge_and_unload() os.makedirs(model_id, exist_ok=True) cache_dir = model_id logger.info(f"Saving the newly created merged model to {cache_dir}") tokenizer = AutoTokenizer.from_pretrained( base_model_id, trust_remote_code=trust_remote_code ) model.save_pretrained(cache_dir, safe_serialization=True) model.config.save_pretrained(cache_dir) tokenizer.save_pretrained(cache_dir) def download_peft( model_id: Union[str, os.PathLike], revision: str, trust_remote_code: bool ): torch_dtype = torch.float16 try: _model = AutoPeftModelForCausalLM.from_pretrained( model_id, revision=revision, torch_dtype=torch_dtype, trust_remote_code=trust_remote_code, low_cpu_mem_usage=True, ) except Exception: _model = AutoPeftModelForSeq2SeqLM.from_pretrained( model_id, revision=revision, torch_dtype=torch_dtype, trust_remote_code=trust_remote_code, low_cpu_mem_usage=True, ) logger.info("Peft model downloaded.")
text-generation-inference/server/text_generation_server/utils/peft.py/0
{ "file_path": "text-generation-inference/server/text_generation_server/utils/peft.py", "repo_id": "text-generation-inference", "token_count": 981 }
223
# `tokenizers-darwin-x64` This is the **x86_64-apple-darwin** binary for `tokenizers`
tokenizers/bindings/node/npm/darwin-x64/README.md/0
{ "file_path": "tokenizers/bindings/node/npm/darwin-x64/README.md", "repo_id": "tokenizers", "token_count": 34 }
224
# `tokenizers-win32-ia32-msvc` This is the **i686-pc-windows-msvc** binary for `tokenizers`
tokenizers/bindings/node/npm/win32-ia32-msvc/README.md/0
{ "file_path": "tokenizers/bindings/node/npm/win32-ia32-msvc/README.md", "repo_id": "tokenizers", "token_count": 37 }
225
extern crate tokenizers as tk; use crate::encoding::*; use crate::tokenizer::Tokenizer; use napi::bindgen_prelude::*; use tk::tokenizer::{EncodeInput, Encoding}; pub struct EncodeTask<'s> { pub tokenizer: Tokenizer, pub input: Option<EncodeInput<'s>>, pub add_special_tokens: bool, } impl Task for EncodeTask<'static> { type Output = Encoding; type JsValue = JsEncoding; fn compute(&mut self) -> Result<Self::Output> { self .tokenizer .tokenizer .read() .unwrap() .encode_char_offsets( self .input .take() .ok_or(Error::from_reason("No provided input"))?, self.add_special_tokens, ) .map_err(|e| Error::from_reason(format!("{}", e))) } fn resolve(&mut self, _env: Env, output: Self::Output) -> Result<Self::JsValue> { Ok(JsEncoding { encoding: Some(output), }) } } pub struct DecodeTask { pub tokenizer: Tokenizer, pub ids: Vec<u32>, pub skip_special_tokens: bool, } impl Task for DecodeTask { type Output = String; type JsValue = String; fn compute(&mut self) -> Result<Self::Output> { self .tokenizer .tokenizer .read() .unwrap() .decode(&self.ids, self.skip_special_tokens) .map_err(|e| Error::from_reason(format!("{}", e))) } fn resolve(&mut self, _env: Env, output: Self::Output) -> Result<Self::JsValue> { Ok(output) } } pub struct EncodeBatchTask<'s> { pub tokenizer: Tokenizer, pub inputs: Option<Vec<EncodeInput<'s>>>, pub add_special_tokens: bool, } impl Task for EncodeBatchTask<'static> { type Output = Vec<Encoding>; type JsValue = Vec<JsEncoding>; fn compute(&mut self) -> Result<Self::Output> { self .tokenizer .tokenizer .read() .unwrap() .encode_batch_char_offsets( self .inputs .take() .ok_or(Error::from_reason("No provided input"))?, self.add_special_tokens, ) .map_err(|e| Error::from_reason(format!("{}", e))) } fn resolve(&mut self, _env: Env, output: Self::Output) -> Result<Self::JsValue> { Ok( output .into_iter() .map(|encoding| JsEncoding { encoding: Some(encoding), }) .collect(), ) } } pub struct DecodeBatchTask { pub tokenizer: Tokenizer, pub ids: Vec<Vec<u32>>, pub skip_special_tokens: bool, } impl Task for DecodeBatchTask { type Output = Vec<String>; type JsValue = Vec<String>; fn compute(&mut self) -> Result<Self::Output> { let ids: Vec<_> = self.ids.iter().map(|s| s.as_slice()).collect(); self .tokenizer .tokenizer .read() .unwrap() .decode_batch(&ids, self.skip_special_tokens) .map_err(|e| Error::from_reason(format!("{}", e))) } fn resolve(&mut self, _env: Env, output: Self::Output) -> Result<Self::JsValue> { Ok(output) } }
tokenizers/bindings/node/src/tasks/tokenizer.rs/0
{ "file_path": "tokenizers/bindings/node/src/tasks/tokenizer.rs", "repo_id": "tokenizers", "token_count": 1295 }
226
from typing import List import jieba from tokenizers import NormalizedString, PreTokenizedString, Regex, Tokenizer from tokenizers.decoders import Decoder from tokenizers.models import BPE from tokenizers.normalizers import Normalizer from tokenizers.pre_tokenizers import PreTokenizer class JiebaPreTokenizer: def jieba_split(self, i: int, normalized_string: NormalizedString) -> List[NormalizedString]: splits = [] # we need to call `str(normalized_string)` because jieba expects a str, # not a NormalizedString for token, start, stop in jieba.tokenize(str(normalized_string)): splits.append(normalized_string[start:stop]) return splits # We can also easily do it in one line: # return [normalized_string[w[1] : w[2]] for w in jieba.tokenize(str(normalized_string))] def odd_number_split(self, i: int, normalized_string: NormalizedString) -> List[NormalizedString]: # Just an odd example... splits = [] last = 0 for i, char in enumerate(str(normalized_string)): if char.isnumeric() and int(char) % 2 == 1: splits.append(normalized_string[last:i]) last = i # Don't forget the last one splits.append(normalized_string[last:]) return splits def pre_tokenize(self, pretok: PreTokenizedString): # Let's call split on the PreTokenizedString to split using `self.jieba_split` pretok.split(self.jieba_split) # Here we can call `pretok.split` multiple times if we want to apply # different algorithm, but we generally just need to call it once. pretok.split(self.odd_number_split) class CustomDecoder: def decode(self, tokens: List[str]) -> str: return "".join(tokens) class CustomNormalizer: def normalize(self, normalized: NormalizedString): # Most of these can be replaced by a `Sequence` combining some provided Normalizer, # (ie Sequence([ NFKC(), Replace(Regex("\s+"), " "), Lowercase() ]) # and it should be the prefered way. That being said, here is an example of the kind # of things that can be done here: normalized.nfkc() normalized.filter(lambda char: not char.isnumeric()) normalized.replace(Regex("\s+"), " ") normalized.lowercase() # This section shows how to attach these custom components to the Tokenizer tok = Tokenizer(BPE()) tok.normalizer = Normalizer.custom(CustomNormalizer()) tok.pre_tokenizer = PreTokenizer.custom(JiebaPreTokenizer()) tok.decoder = Decoder.custom(CustomDecoder()) input = "永和服装饰品有限公司" print("PreTokenize:", input) print(tok.pre_tokenizer.pre_tokenize_str(input)) # [('永和', (0, 2)), ('服装', (2, 4)), ('饰品', (4, 6)), ('有限公司', (6, 10))] input = "112233" print("PreTokenize:", input) print(tok.pre_tokenizer.pre_tokenize_str(input)) # [('1', (0, 1)), ('122', (1, 4)), ('3', (4, 5)), ('3', (5, 6))] input = "1234 ℌ𝔢𝔩𝔩𝔬 𝔱𝔥𝔢𝔯𝔢 𝓂𝓎 𝒹ℯ𝒶𝓇 𝕕𝕖𝕒𝕣 𝕗𝕣𝕚𝕖𝕟𝕕!" print("Normalize:", input) print(tok.normalizer.normalize_str(input)) # " hello there my dear dear friend!"
tokenizers/bindings/python/examples/custom_components.py/0
{ "file_path": "tokenizers/bindings/python/examples/custom_components.py", "repo_id": "tokenizers", "token_count": 1293 }
227
import json import os from typing import Iterator, List, Optional, Union, Tuple from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers from tokenizers.models import Unigram from .base_tokenizer import BaseTokenizer class SentencePieceUnigramTokenizer(BaseTokenizer): """SentencePiece Unigram Tokenizer Represents the Unigram algorithm, with the pretokenization used by SentencePiece """ def __init__( self, vocab: Optional[List[Tuple[str, float]]] = None, replacement: str = "▁", add_prefix_space: bool = True, ): if vocab is not None: # Let Unigram(..) fail if only one of them is None tokenizer = Tokenizer(Unigram(vocab)) else: tokenizer = Tokenizer(Unigram()) tokenizer.normalizer = normalizers.Sequence( [normalizers.Nmt(), normalizers.NFKC(), normalizers.Replace(Regex(" {2,}"), " ")] ) prepend_scheme = "always" if add_prefix_space else "never" tokenizer.pre_tokenizer = pre_tokenizers.Metaspace(replacement=replacement, prepend_scheme=prepend_scheme) tokenizer.decoder = decoders.Metaspace(replacement=replacement, prepend_scheme=prepend_scheme) parameters = { "model": "SentencePieceUnigram", "replacement": replacement, "add_prefix_space": add_prefix_space, } super().__init__(tokenizer, parameters) def train( self, files: Union[str, List[str]], vocab_size: int = 8000, show_progress: bool = True, special_tokens: Optional[List[Union[str, AddedToken]]] = None, initial_alphabet: Optional[List[str]] = None, unk_token: Optional[str] = None, ): """ Train the model using the given files Args: files (:obj:`List[str]`): A list of path to the files that we should use for training vocab_size (:obj:`int`): The size of the final vocabulary, including all tokens and alphabet. show_progress (:obj:`bool`): Whether to show progress bars while training. special_tokens (:obj:`List[Union[str, AddedToken]]`, `optional`): A list of special tokens the model should know of. initial_alphabet (:obj:`List[str]`, `optional`): A list of characters to include in the initial alphabet, even if not seen in the training dataset. If the strings contain more than one character, only the first one is kept. unk_token (:obj:`str`, `optional`): The unknown token to be used by the model. """ if special_tokens is None: special_tokens = [] if initial_alphabet is None: initial_alphabet = [] trainer = trainers.UnigramTrainer( vocab_size=vocab_size, special_tokens=special_tokens, show_progress=show_progress, initial_alphabet=initial_alphabet, unk_token=unk_token, ) if isinstance(files, str): files = [files] self._tokenizer.train(files, trainer=trainer) def train_from_iterator( self, iterator: Union[Iterator[str], Iterator[Iterator[str]]], vocab_size: int = 8000, show_progress: bool = True, special_tokens: Optional[List[Union[str, AddedToken]]] = None, initial_alphabet: Optional[List[str]] = None, unk_token: Optional[str] = None, length: Optional[int] = None, ): """ Train the model using the given iterator Args: iterator (:obj:`Union[Iterator[str], Iterator[Iterator[str]]]`): Any iterator over strings or list of strings vocab_size (:obj:`int`): The size of the final vocabulary, including all tokens and alphabet. show_progress (:obj:`bool`): Whether to show progress bars while training. special_tokens (:obj:`List[Union[str, AddedToken]]`, `optional`): A list of special tokens the model should know of. initial_alphabet (:obj:`List[str]`, `optional`): A list of characters to include in the initial alphabet, even if not seen in the training dataset. If the strings contain more than one character, only the first one is kept. unk_token (:obj:`str`, `optional`): The unknown token to be used by the model. length (:obj:`int`, `optional`): The total number of sequences in the iterator. This is used to provide meaningful progress tracking """ if special_tokens is None: special_tokens = [] if initial_alphabet is None: initial_alphabet = [] trainer = trainers.UnigramTrainer( vocab_size=vocab_size, special_tokens=special_tokens, show_progress=show_progress, initial_alphabet=initial_alphabet, unk_token=unk_token, ) self._tokenizer.train_from_iterator( iterator, trainer=trainer, length=length, ) @staticmethod def from_spm(filename: str): try: import sys sys.path.append(".") import sentencepiece_model_pb2 as model except Exception: raise Exception( "You don't seem to have the required protobuf file, in order to use this function you need to run `pip install protobuf` and `wget https://raw.githubusercontent.com/google/sentencepiece/master/python/src/sentencepiece/sentencepiece_model_pb2.py` for us to be able to read the intrinsics of your spm_file. `pip install sentencepiece` is not required." ) m = model.ModelProto() m.ParseFromString(open(filename, "rb").read()) precompiled_charsmap = m.normalizer_spec.precompiled_charsmap vocab = [(piece.piece, piece.score) for piece in m.pieces] unk_id = m.trainer_spec.unk_id model_type = m.trainer_spec.model_type byte_fallback = m.trainer_spec.byte_fallback if model_type != 1: raise Exception( "You're trying to run a `Unigram` model but you're file was trained with a different algorithm" ) replacement = "▁" add_prefix_space = True tokenizer = Tokenizer(Unigram(vocab, unk_id, byte_fallback)) if precompiled_charsmap: tokenizer.normalizer = normalizers.Sequence( [ normalizers.Precompiled(precompiled_charsmap), normalizers.Replace(Regex(" {2,}"), " "), ] ) else: tokenizer.normalizer = normalizers.Sequence([normalizers.Replace(Regex(" {2,}"), " ")]) prepend_scheme = "always" if add_prefix_space else "never" tokenizer.pre_tokenizer = pre_tokenizers.Metaspace(replacement=replacement, prepend_scheme=prepend_scheme) tokenizer.decoder = decoders.Metaspace(replacement=replacement, prepend_scheme=prepend_scheme) parameters = { "model": "SentencePieceUnigram", } obj = BaseTokenizer.__new__(SentencePieceUnigramTokenizer, tokenizer, parameters) BaseTokenizer.__init__(obj, tokenizer, parameters) return obj
tokenizers/bindings/python/py_src/tokenizers/implementations/sentencepiece_unigram.py/0
{ "file_path": "tokenizers/bindings/python/py_src/tokenizers/implementations/sentencepiece_unigram.py", "repo_id": "tokenizers", "token_count": 3405 }
228
import transformers from tokenizers.implementations import SentencePieceUnigramTokenizer, BaseTokenizer from tokenizers.processors import TemplateProcessing from tokenizers.models import Unigram, BPE from tokenizers import decoders from tokenizers import Tokenizer, Regex from tokenizers.normalizers import ( StripAccents, NFKD, Lowercase, Sequence, BertNormalizer, Precompiled, Replace, ) from tokenizers.pre_tokenizers import ( Digits, WhitespaceSplit, Metaspace, Sequence as PSequence, ) import json import unicodedata import sys import os import datetime import argparse sys.path.append(".") from spm_parity_check import check_details from sentencepiece_extractor import SentencePieceExtractor def check_number_comma(piece: str) -> bool: return len(piece) < 2 or piece[-1] != "," or not piece[-2].isdigit() def get_proto(filename: str): try: import sys sys.path.append(".") import sentencepiece_model_pb2 as model except Exception: raise Exception( "You don't seem to have the required protobuf file, in order to use this function you need to run `pip install protobuf` and `wget https://raw.githubusercontent.com/google/sentencepiece/master/python/sentencepiece_model_pb2.py` for us to be able to read the intrinsics of your spm_file. `pip install sentencepiece` is not required." ) m = model.ModelProto() m.ParseFromString(open(filename, "rb").read()) return m class Converter: def __init__(self, original_tokenizer): self.original_tokenizer = original_tokenizer def converted(self) -> Tokenizer: raise NotImplementedError() class SpmConverter(Converter): def __init__(self, *args): super().__init__(*args) self.proto = get_proto(self.original_tokenizer.vocab_file) def vocab(self, proto): return [(piece.piece, piece.score) for piece in proto.pieces] def unk_id(self, proto): return proto.trainer_spec.unk_id def tokenizer(self, proto): model_type = proto.trainer_spec.model_type vocab = self.vocab(proto) unk_id = self.unk_id(proto) if model_type == 1: tokenizer = Tokenizer(Unigram(vocab, unk_id)) elif model_type == 2: vocab, merges = SentencePieceExtractor(self.original_tokenizer.vocab_file).extract() tokenizer = Tokenizer(BPE(vocab, merges, unk_token=proto.trainer_spec.unk_piece, fuse_unk=True)) else: raise Exception( "You're trying to run a `Unigram` model but you're file was trained with a different algorithm" ) return tokenizer def normalizer(self, proto): precompiled_charsmap = proto.normalizer_spec.precompiled_charsmap return Sequence([Precompiled(precompiled_charsmap), Replace(Regex(" {2,}"), " ")]) def post_processor(self, tokenizer): return None def converted(self): tokenizer = self.tokenizer(self.proto) # Tokenizer assemble tokenizer.normalizer = self.normalizer(self.proto) replacement = "▁" prepend_scheme = "always" tokenizer.pre_tokenizer = Metaspace(replacement=replacement, prepend_scheme=prepend_scheme) tokenizer.decoder = decoders.Metaspace(replacement=replacement, prepend_scheme=prepend_scheme) post_processor = self.post_processor(tokenizer) if post_processor: tokenizer.post_processor = post_processor # TODO what parameters should we give ? parameters = {} return BaseTokenizer(tokenizer, parameters) class AlbertConverter(SpmConverter): def vocab(self, proto): return [ (piece.piece, piece.score) if check_number_comma(piece.piece) else (piece.piece, piece.score - 100) for piece in proto.pieces ] def normalizer(self, proto): normalizers = [Replace("``", '"'), Replace("''", '"')] if not self.original_tokenizer.keep_accents: normalizers.append(NFKD()) normalizers.append(StripAccents()) if self.original_tokenizer.do_lower_case: normalizers.append(Lowercase()) precompiled_charsmap = proto.normalizer_spec.precompiled_charsmap normalizers.append(Precompiled(precompiled_charsmap)) normalizers.append(Replace(Regex(" {2,}"), " ")) return Sequence(normalizers) def post_processor(self, tokenizer): return TemplateProcessing( seq_a=["[CLS]", "$0", "[SEP]"], seq_b=["$1", "[SEP]"], special_tokens=[ ("[CLS]", tokenizer.get_vocab()["[CLS]"]), ("[SEP]", tokenizer.get_vocab()["[SEP]"]), ], ) class CamembertConverter(SpmConverter): def vocab(self, proto): vocab = [ ("<s>NOTUSED", 0.0), ("<pad>", 0.0), ("</s>NOTUSED", 0.0), ("<unk>", 0.0), ] vocab += [(piece.piece, piece.score) for piece in proto.pieces] return vocab def unk_id(self, proto): # See vocab unk position return 3 def post_processor(self, tokenizer): return TemplateProcessing( seq_a=["<s>", "$0", "</s>"], seq_b=["$1", "</s>"], special_tokens=[ ("<s>", tokenizer.get_vocab()["<s>"]), ("</s>", tokenizer.get_vocab()["</s>"]), ], ) class MBartConverter(SpmConverter): def vocab(self, proto): vocab = [ ("<s>", 0.0), ("<pad>", 0.0), ("</s>", 0.0), ("<unk>", 0.0), ] vocab += [(piece.piece, piece.score) for piece in proto.pieces[3:]] vocab += [ ("ar_AR", 0.0), ("cs_CZ", 0.0), ("de_DE", 0.0), ("en_XX", 0.0), ("es_XX", 0.0), ("et_EE", 0.0), ("fi_FI", 0.0), ("fr_XX", 0.0), ("gu_IN", 0.0), ("hi_IN", 0.0), ("it_IT", 0.0), ("ja_XX", 0.0), ("kk_KZ", 0.0), ("ko_KR", 0.0), ("lt_LT", 0.0), ("lv_LV", 0.0), ("my_MM", 0.0), ("ne_NP", 0.0), ("nl_XX", 0.0), ("ro_RO", 0.0), ("ru_RU", 0.0), ("si_LK", 0.0), ("tr_TR", 0.0), ("vi_VN", 0.0), ("zh_CN", 0.0), ] return vocab def unk_id(self, proto): return 3 def post_processor(self, tokenizer): return TemplateProcessing( seq_a=["$0", "</s>", "en_XX"], seq_b=["$1", "</s>"], special_tokens=[ ("en_XX", tokenizer.get_vocab()["en_XX"]), ("</s>", tokenizer.get_vocab()["</s>"]), ], ) class XLMRobertaConverter(SpmConverter): def vocab(self, proto): vocab = [ ("<s>", 0.0), ("<pad>", 0.0), ("</s>", 0.0), ("<unk>", 0.0), ] vocab += [(piece.piece, piece.score) for piece in proto.pieces[3:]] return vocab def unk_id(self, proto): unk_id = 3 return unk_id def post_processor(self, tokenizer): return TemplateProcessing( seq_a=["<s>", "$0", "</s>"], seq_b=["$1", "</s>"], special_tokens=[ ("<s>", tokenizer.get_vocab()["<s>"]), ("</s>", tokenizer.get_vocab()["</s>"]), ], ) class XLNetConverter(SpmConverter): def vocab(self, proto): return [ (piece.piece, piece.score) if check_number_comma(piece.piece) else (piece.piece, piece.score - 100) for piece in proto.pieces ] def normalizer(self, proto): normalizers = [Replace("``", '"'), Replace("''", '"')] if not self.original_tokenizer.keep_accents: normalizers.append(NFKD()) normalizers.append(StripAccents()) if self.original_tokenizer.do_lower_case: normalizers.append(Lowercase()) precompiled_charsmap = proto.normalizer_spec.precompiled_charsmap normalizers.append(Precompiled(precompiled_charsmap)) normalizers.append(Replace(Regex(" {2,}"), " ")) return Sequence(normalizers) def post_processor(self, tokenizer): return TemplateProcessing( seq_a=["$0", "<sep>", "<cls>"], seq_b=["$1", "<sep>"], special_tokens=[ ("<sep>", tokenizer.get_vocab()["<sep>"]), ("<cls>", tokenizer.get_vocab()["<cls>"]), ], ) class ReformerConverter(SpmConverter): pass class PegasusConverter(SpmConverter): offset = 103 def vocab(self, proto): vocab = [ (self.original_tokenizer.pad_token, 0), (self.original_tokenizer.eos_token, 0), ] vocab += [(f"unk_{i}", -100) for i in range(2, 2 + self.offset)] vocab += [(piece.piece, piece.score) for piece in proto.pieces[2:]] return vocab def unk_id(self, proto): return proto.trainer_spec.unk_id + self.offset def post_processor(self, tokenizer): eos = self.original_tokenizer.eos_token return TemplateProcessing( seq_a=["$0", eos], seq_b=["$1", eos], special_tokens=[(eos, tokenizer.get_vocab()[eos])], ) class T5Converter(SpmConverter): def post_processor(self, tokenizer): return TemplateProcessing( seq_a=["$0", "</s>"], seq_b=["$1", "</s>"], special_tokens=[("</s>", tokenizer.get_vocab()["</s>"])], ) CONVERTERS = { "AlbertTokenizer": AlbertConverter, "CamembertTokenizer": CamembertConverter, "XLMRobertaTokenizer": XLMRobertaConverter, "MBartTokenizer": MBartConverter, "XLNetTokenizer": XLNetConverter, "ReformerTokenizer": ReformerConverter, "PegasusTokenizer": PegasusConverter, "T5Tokenizer": T5Converter, } def check(pretrained, filename): transformer_tokenizer = transformers.AutoTokenizer.from_pretrained(pretrained) converter_class = CONVERTERS[transformer_tokenizer.__class__.__name__] tokenizer = converter_class(transformer_tokenizer).converted() now = datetime.datetime.now trans_total_time = datetime.timedelta(seconds=0) tok_total_time = datetime.timedelta(seconds=0) with open(filename, "r") as f: for i, line in enumerate(f): line = line.strip() start = now() ids = transformer_tokenizer.encode(line) trans = now() tok_ids = tokenizer.encode(line).ids tok = now() trans_total_time += trans - start tok_total_time += tok - trans if ids != tok_ids: if check_details(line, ids, tok_ids, transformer_tokenizer, tokenizer): continue assert ids == tok_ids, f"Error in line {i}: {line} {ids} != {tok_ids}" tokenizer.save(f"{pretrained.replace('/', '-')}.json") return ("OK", trans_total_time / tok_total_time) def main(): pretraineds = [ "albert-base-v1", "albert-large-v1", "albert-xlarge-v1", "albert-xxlarge-v1", "albert-base-v2", "albert-large-v2", "albert-xlarge-v2", "albert-xxlarge-v2", "camembert-base", "xlm-roberta-base", "xlm-roberta-large", "xlm-roberta-large-finetuned-conll02-dutch", "xlm-roberta-large-finetuned-conll02-spanish", "xlm-roberta-large-finetuned-conll03-english", "xlm-roberta-large-finetuned-conll03-german", "facebook/mbart-large-en-ro", "facebook/mbart-large-cc25", "xlnet-base-cased", "xlnet-large-cased", "google/reformer-crime-and-punishment", "t5-small", "google/pegasus-large", ] parser = argparse.ArgumentParser() parser.add_argument( "--filename", required=True, type=str, help="The filename that we are going to encode in both versions to check that conversion worked", ) parser.add_argument( "--models", type=lambda s: s.split(","), default=pretraineds, help=f"The pretrained tokenizers you want to test agains, (default: {pretraineds})", ) args = parser.parse_args() print(args.filename) model_len = 50 status_len = 6 speedup_len = 8 print(f"|{'Model':^{model_len}}|{'Status':^{status_len}}|{'Speedup':^{speedup_len}}|") print(f"|{'-'*model_len}|{'-'*status_len}|{'-'*speedup_len}|") for pretrained in args.models: status, speedup = check(pretrained, args.filename) print(f"|{pretrained:<{model_len}}|{status:^{status_len}}|{speedup:^{speedup_len - 1}.2f}x|") if __name__ == "__main__": main()
tokenizers/bindings/python/scripts/convert.py/0
{ "file_path": "tokenizers/bindings/python/scripts/convert.py", "repo_id": "tokenizers", "token_count": 6304 }
229
use std::marker::PhantomData; use std::sync::{Arc, Mutex}; mod iterators; mod normalization; mod pretokenization; mod regex; pub mod serde_pyo3; pub use iterators::*; pub use normalization::*; pub use pretokenization::*; pub use regex::*; // RefMut utils pub trait DestroyPtr { fn destroy(&mut self); } pub struct RefMutGuard<'r, T: DestroyPtr + Clone> { content: T, r: PhantomData<&'r mut T>, } impl<T: DestroyPtr + Clone> RefMutGuard<'_, T> { pub fn new(content: T) -> Self { Self { content, r: PhantomData, } } pub fn get(&self) -> T { self.content.clone() } } impl<T: DestroyPtr + Clone> Drop for RefMutGuard<'_, T> { fn drop(&mut self) { self.content.destroy() } } #[derive(Clone)] pub struct RefMutContainer<T> { inner: Arc<Mutex<Option<*mut T>>>, } impl<T> RefMutContainer<T> { pub fn new(content: &mut T) -> Self { Self { inner: Arc::new(Mutex::new(Some(content))), } } pub fn map<F: FnOnce(&T) -> U, U>(&self, f: F) -> Option<U> { let lock = self.inner.lock().unwrap(); let ptr = lock.as_ref()?; Some(f(unsafe { ptr.as_ref().unwrap() })) } pub fn map_mut<F: FnOnce(&mut T) -> U, U>(&mut self, f: F) -> Option<U> { let lock = self.inner.lock().unwrap(); let ptr = lock.as_ref()?; Some(f(unsafe { ptr.as_mut().unwrap() })) } } impl<T> DestroyPtr for RefMutContainer<T> { fn destroy(&mut self) { self.inner.lock().unwrap().take(); } } unsafe impl<T: Send> Send for RefMutContainer<T> {} unsafe impl<T: Sync> Sync for RefMutContainer<T> {}
tokenizers/bindings/python/src/utils/mod.rs/0
{ "file_path": "tokenizers/bindings/python/src/utils/mod.rs", "repo_id": "tokenizers", "token_count": 759 }
230
import copy import os import pickle import pytest from tokenizers import ( AddedToken, SentencePieceUnigramTokenizer, Tokenizer, models, normalizers, pre_tokenizers, trainers, ) from ..utils import data_dir, train_files class TestBpeTrainer: def test_can_modify(self): trainer = trainers.BpeTrainer( vocab_size=12345, min_frequency=12, show_progress=False, special_tokens=["1", "2"], limit_alphabet=13, initial_alphabet=["a", "b", "c"], continuing_subword_prefix="pref", end_of_word_suffix="suf", ) assert trainer.vocab_size == 12345 assert trainer.min_frequency == 12 assert trainer.show_progress == False assert trainer.special_tokens == [ AddedToken("1", special=True), AddedToken("2", special=True), ] assert trainer.limit_alphabet == 13 assert sorted(trainer.initial_alphabet) == ["a", "b", "c"] assert trainer.continuing_subword_prefix == "pref" assert trainer.end_of_word_suffix == "suf" # Modify these trainer.vocab_size = 20000 assert trainer.vocab_size == 20000 trainer.min_frequency = 1 assert trainer.min_frequency == 1 trainer.show_progress = True assert trainer.show_progress == True trainer.special_tokens = [] assert trainer.special_tokens == [] trainer.limit_alphabet = None assert trainer.limit_alphabet == None trainer.initial_alphabet = ["d", "z"] assert sorted(trainer.initial_alphabet) == ["d", "z"] trainer.continuing_subword_prefix = None assert trainer.continuing_subword_prefix == None trainer.end_of_word_suffix = None assert trainer.continuing_subword_prefix == None def test_can_pickle(self): assert ( trainers.BpeTrainer(min_frequency=12).__getstate__() == b"""{"BpeTrainer":{"min_frequency":12,"vocab_size":30000,"show_progress":true,"special_tokens":[],"limit_alphabet":null,"initial_alphabet":[],"continuing_subword_prefix":null,"end_of_word_suffix":null,"max_token_length":null,"words":{}}}""" ) assert isinstance(pickle.loads(pickle.dumps(trainers.BpeTrainer(min_frequency=12))), trainers.BpeTrainer) assert isinstance(copy.deepcopy(trainers.BpeTrainer(min_frequency=12)), trainers.BpeTrainer) # Make sure everything is correct assert pickle.dumps(pickle.loads(pickle.dumps(trainers.BpeTrainer(min_frequency=12)))) == pickle.dumps( trainers.BpeTrainer(min_frequency=12) ) class TestWordPieceTrainer: def test_can_modify(self): trainer = trainers.WordPieceTrainer( vocab_size=12345, min_frequency=12, show_progress=False, special_tokens=["1", "2"], limit_alphabet=13, initial_alphabet=["a", "b", "c"], continuing_subword_prefix="pref", end_of_word_suffix="suf", ) assert trainer.vocab_size == 12345 assert trainer.min_frequency == 12 assert trainer.show_progress == False assert trainer.special_tokens == [ AddedToken("1", special=True), AddedToken("2", special=True), ] assert trainer.limit_alphabet == 13 assert sorted(trainer.initial_alphabet) == ["a", "b", "c"] assert trainer.continuing_subword_prefix == "pref" assert trainer.end_of_word_suffix == "suf" # Modify these trainer.vocab_size = 20000 assert trainer.vocab_size == 20000 trainer.min_frequency = 1 assert trainer.min_frequency == 1 trainer.show_progress = True assert trainer.show_progress == True trainer.special_tokens = [] assert trainer.special_tokens == [] trainer.limit_alphabet = None assert trainer.limit_alphabet == None trainer.initial_alphabet = ["d", "z"] assert sorted(trainer.initial_alphabet) == ["d", "z"] trainer.continuing_subword_prefix = None assert trainer.continuing_subword_prefix == None trainer.end_of_word_suffix = None assert trainer.continuing_subword_prefix == None def test_can_pickle(self): assert isinstance(pickle.loads(pickle.dumps(trainers.WordPieceTrainer())), trainers.WordPieceTrainer) class TestWordLevelTrainer: def test_can_modify(self): trainer = trainers.WordLevelTrainer( vocab_size=12345, min_frequency=12, show_progress=False, special_tokens=["1", "2"] ) assert trainer.vocab_size == 12345 assert trainer.min_frequency == 12 assert trainer.show_progress == False assert trainer.special_tokens == [ AddedToken("1", special=True), AddedToken("2", special=True), ] # Modify these trainer.vocab_size = 20000 assert trainer.vocab_size == 20000 trainer.min_frequency = 1 assert trainer.min_frequency == 1 trainer.show_progress = True assert trainer.show_progress == True trainer.special_tokens = [] assert trainer.special_tokens == [] def test_can_pickle(self): assert isinstance(pickle.loads(pickle.dumps(trainers.WordLevelTrainer())), trainers.WordLevelTrainer) class TestUnigram: def test_train(self, train_files): tokenizer = SentencePieceUnigramTokenizer() tokenizer.train(train_files["small"], show_progress=False) filename = "tests/data/unigram_trained.json" tokenizer.save(filename) os.remove(filename) def test_train_parallelism_with_custom_pretokenizer(self, train_files): class GoodCustomPretok: def split(self, n, normalized): # Here we just test that we can return a List[NormalizedString], it # does not really make sense to return twice the same otherwise return [normalized, normalized] def pre_tokenize(self, pretok): pretok.split(self.split) custom = pre_tokenizers.PreTokenizer.custom(GoodCustomPretok()) bpe_tokenizer = Tokenizer(models.BPE()) bpe_tokenizer.normalizer = normalizers.Lowercase() bpe_tokenizer.pre_tokenizer = custom if "TOKENIZERS_PARALLELISM" in os.environ: del os.environ["TOKENIZERS_PARALLELISM"] trainer = trainers.BpeTrainer(special_tokens=["<unk>"], show_progress=False) bpe_tokenizer.train([train_files["small"]], trainer=trainer) def test_can_pickle(self): assert isinstance(pickle.loads(pickle.dumps(trainers.UnigramTrainer())), trainers.UnigramTrainer) def test_train_with_special_tokens(self): filename = "tests/data/dummy-unigram-special_tokens-train.txt" with open(filename, "w") as f: f.write( """ [CLS] The Zen of Python, by Tim Peters [SEP] [CLS] Beautiful is better than ugly. [SEP] [CLS] Explicit is better than implicit. [SEP] [CLS] Simple is better than complex. [SEP] [CLS] Complex is better than complicated. [SEP] [CLS] Flat is better than nested. [SEP] [CLS] Sparse is better than dense. [SEP] [CLS] Readability counts. [SEP] [CLS] Special cases aren't special enough to break the rules. [SEP] [CLS] Although practicality beats purity. [SEP] [CLS] Errors should never pass silently. [SEP] [CLS] Unless explicitly silenced. [SEP] [CLS] In the face of ambiguity, refuse the temptation to guess. [SEP] [CLS] There should be one-- and preferably only one --obvious way to do it. [SEP] [CLS] Although that way may not be obvious at first unless you're Dutch. [SEP] [CLS] Now is better than never. [SEP] [CLS] Although never is often better than *right* now. [SEP] [CLS] If the implementation is hard to explain, it's a bad idea. [SEP] [CLS] If the implementation is easy to explain, it may be a good idea. [SEP] [CLS] Namespaces are one honking great idea -- let's do more of those! [SEP] """ ) tokenizer = Tokenizer(models.Unigram()) trainer = trainers.UnigramTrainer( show_progress=False, special_tokens=["[PAD]", "[SEP]", "[CLS]"], unk_token="[UNK]" ) tokenizer.train([filename], trainer=trainer) assert tokenizer.encode("[CLS] This is a test [SEP]").tokens == [ "[CLS]", " T", "h", "i", "s", " is ", "a", " ", "te", "s", "t ", "[SEP]", ] tokenizer = Tokenizer(models.Unigram()) trainer = trainers.UnigramTrainer( show_progress=False, special_tokens=["[PAD]", "[SEP]", "[CLS]"], unk_token="[UNK]", vocab_size=100, ) tokenizer.train([filename], trainer=trainer) assert tokenizer.get_vocab_size() == 100 tokenizer = Tokenizer(models.Unigram()) trainer = trainers.UnigramTrainer( show_progress=False, special_tokens=["[PAD]", "[SEP]", "[CLS]", "[UNK]"], unk_token="[UNK]", vocab_size=100, ) tokenizer.train([filename], trainer=trainer) assert tokenizer.get_vocab_size() == 100 def test_cannot_train_different_model(self): tokenizer = Tokenizer(models.BPE()) trainer = trainers.UnigramTrainer(show_progress=False) with pytest.raises(Exception, match="UnigramTrainer can only train a Unigram"): tokenizer.train([], trainer) def test_can_modify(self): trainer = trainers.UnigramTrainer( vocab_size=12345, show_progress=False, special_tokens=["1", AddedToken("2", lstrip=True)], initial_alphabet=["a", "b", "c"], ) assert trainer.vocab_size == 12345 assert trainer.show_progress == False assert trainer.special_tokens == [ AddedToken("1", normalized=False, special=True), AddedToken("2", lstrip=True, normalized=False, special=True), ] assert sorted(trainer.initial_alphabet) == ["a", "b", "c"] # Modify these trainer.vocab_size = 20000 assert trainer.vocab_size == 20000 trainer.show_progress = True assert trainer.show_progress == True trainer.special_tokens = [] assert trainer.special_tokens == [] trainer.initial_alphabet = ["d", "z"] assert sorted(trainer.initial_alphabet) == ["d", "z"] def test_continuing_prefix_trainer_mistmatch(self): UNK = "[UNK]" special_tokens = [UNK] tokenizer = Tokenizer(models.BPE(unk_token=UNK, continuing_subword_prefix="##")) trainer = trainers.BpeTrainer(special_tokens=special_tokens) tokenizer.pre_tokenizer = pre_tokenizers.Sequence( [pre_tokenizers.Whitespace(), pre_tokenizers.Digits(individual_digits=True)] ) tokenizer.train(files=["data/big.txt"], trainer=trainer) tokenizer.save("data/tokenizer.json") tokenizer.from_file("data/tokenizer.json")
tokenizers/bindings/python/tests/bindings/test_trainers.py/0
{ "file_path": "tokenizers/bindings/python/tests/bindings/test_trainers.py", "repo_id": "tokenizers", "token_count": 4957 }
231
# Added Tokens <tokenizerslangcontent> <python> ## AddedToken [[autodoc]] tokenizers.AddedToken - content - lstrip - normalized - rstrip - single_word </python> <rust> The Rust API Reference is available directly on the [Docs.rs](https://docs.rs/tokenizers/latest/tokenizers/) website. </rust> <node> The node API has not been documented yet. </node> </tokenizerslangcontent>
tokenizers/docs/source-doc-builder/api/added-tokens.mdx/0
{ "file_path": "tokenizers/docs/source-doc-builder/api/added-tokens.mdx", "repo_id": "tokenizers", "token_count": 134 }
232
# Quicktour Let's have a quick look at the 🤗 Tokenizers library features. The library provides an implementation of today's most used tokenizers that is both easy to use and blazing fast. ## Build a tokenizer from scratch To illustrate how fast the 🤗 Tokenizers library is, let's train a new tokenizer on [wikitext-103](https://blog.einstein.ai/the-wikitext-long-term-dependency-language-modeling-dataset/) (516M of text) in just a few seconds. First things first, you will need to download this dataset and unzip it with: ``` bash wget https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-103-raw-v1.zip unzip wikitext-103-raw-v1.zip ``` ### Training the tokenizer In this tour, we will build and train a Byte-Pair Encoding (BPE) tokenizer. For more information about the different type of tokenizers, check out this [guide](https://huggingface.co/transformers/tokenizer_summary.html) in the 🤗 Transformers documentation. Here, training the tokenizer means it will learn merge rules by: - Start with all the characters present in the training corpus as tokens. - Identify the most common pair of tokens and merge it into one token. - Repeat until the vocabulary (e.g., the number of tokens) has reached the size we want. The main API of the library is the `class` `Tokenizer`, here is how we instantiate one with a BPE model: <tokenizerslangcontent> <python> <literalinclude> {"path": "../../bindings/python/tests/documentation/test_quicktour.py", "language": "python", "start-after": "START init_tokenizer", "end-before": "END init_tokenizer", "dedent": 8} </literalinclude> </python> <rust> <literalinclude> {"path": "../../tokenizers/tests/documentation.rs", "language": "rust", "start-after": "START quicktour_init_tokenizer", "end-before": "END quicktour_init_tokenizer", "dedent": 4} </literalinclude> </rust> <node> <literalinclude> {"path": "../../bindings/node/examples/documentation/quicktour.test.ts", "language": "js", "start-after": "START init_tokenizer", "end-before": "END init_tokenizer", "dedent": 8} </literalinclude> </node> </tokenizerslangcontent> To train our tokenizer on the wikitext files, we will need to instantiate a [trainer]{.title-ref}, in this case a `BpeTrainer` <tokenizerslangcontent> <python> <literalinclude> {"path": "../../bindings/python/tests/documentation/test_quicktour.py", "language": "python", "start-after": "START init_trainer", "end-before": "END init_trainer", "dedent": 8} </literalinclude> </python> <rust> <literalinclude> {"path": "../../tokenizers/tests/documentation.rs", "language": "rust", "start-after": "START quicktour_init_trainer", "end-before": "END quicktour_init_trainer", "dedent": 4} </literalinclude> </rust> <node> <literalinclude> {"path": "../../bindings/node/examples/documentation/quicktour.test.ts", "language": "js", "start-after": "START init_trainer", "end-before": "END init_trainer", "dedent": 8} </literalinclude> </node> </tokenizerslangcontent> We can set the training arguments like `vocab_size` or `min_frequency` (here left at their default values of 30,000 and 0) but the most important part is to give the `special_tokens` we plan to use later on (they are not used at all during training) so that they get inserted in the vocabulary. <Tip> The order in which you write the special tokens list matters: here `"[UNK]"` will get the ID 0, `"[CLS]"` will get the ID 1 and so forth. </Tip> We could train our tokenizer right now, but it wouldn't be optimal. Without a pre-tokenizer that will split our inputs into words, we might get tokens that overlap several words: for instance we could get an `"it is"` token since those two words often appear next to each other. Using a pre-tokenizer will ensure no token is bigger than a word returned by the pre-tokenizer. Here we want to train a subword BPE tokenizer, and we will use the easiest pre-tokenizer possible by splitting on whitespace. <tokenizerslangcontent> <python> <literalinclude> {"path": "../../bindings/python/tests/documentation/test_quicktour.py", "language": "python", "start-after": "START init_pretok", "end-before": "END init_pretok", "dedent": 8} </literalinclude> </python> <rust> <literalinclude> {"path": "../../tokenizers/tests/documentation.rs", "language": "rust", "start-after": "START quicktour_init_pretok", "end-before": "END quicktour_init_pretok", "dedent": 4} </literalinclude> </rust> <node> <literalinclude> {"path": "../../bindings/node/examples/documentation/quicktour.test.ts", "language": "js", "start-after": "START init_pretok", "end-before": "END init_pretok", "dedent": 8} </literalinclude> </node> </tokenizerslangcontent> Now, we can just call the `Tokenizer.train` method with any list of files we want to use: <tokenizerslangcontent> <python> <literalinclude> {"path": "../../bindings/python/tests/documentation/test_quicktour.py", "language": "python", "start-after": "START train", "end-before": "END train", "dedent": 8} </literalinclude> </python> <rust> <literalinclude> {"path": "../../tokenizers/tests/documentation.rs", "language": "rust", "start-after": "START quicktour_train", "end-before": "END quicktour_train", "dedent": 4} </literalinclude> </rust> <node> <literalinclude> {"path": "../../bindings/node/examples/documentation/quicktour.test.ts", "language": "js", "start-after": "START train", "end-before": "END train", "dedent": 8} </literalinclude> </node> </tokenizerslangcontent> This should only take a few seconds to train our tokenizer on the full wikitext dataset! To save the tokenizer in one file that contains all its configuration and vocabulary, just use the `Tokenizer.save` method: <tokenizerslangcontent> <python> <literalinclude> {"path": "../../bindings/python/tests/documentation/test_quicktour.py", "language": "python", "start-after": "START save", "end-before": "END save", "dedent": 8} </literalinclude> </python> <rust> <literalinclude> {"path": "../../tokenizers/tests/documentation.rs", "language": "rust", "start-after": "START quicktour_save", "end-before": "END quicktour_save", "dedent": 4} </literalinclude> </rust> <node> <literalinclude> {"path": "../../bindings/node/examples/documentation/quicktour.test.ts", "language": "js", "start-after": "START save", "end-before": "END save", "dedent": 8} </literalinclude> </node> </tokenizerslangcontent> and you can reload your tokenizer from that file with the `Tokenizer.from_file` `classmethod`: <tokenizerslangcontent> <python> <literalinclude> {"path": "../../bindings/python/tests/documentation/test_quicktour.py", "language": "python", "start-after": "START reload_tokenizer", "end-before": "END reload_tokenizer", "dedent": 12} </literalinclude> </python> <rust> <literalinclude> {"path": "../../tokenizers/tests/documentation.rs", "language": "rust", "start-after": "START quicktour_reload_tokenizer", "end-before": "END quicktour_reload_tokenizer", "dedent": 4} </literalinclude> </rust> <node> <literalinclude> {"path": "../../bindings/node/examples/documentation/quicktour.test.ts", "language": "js", "start-after": "START reload_tokenizer", "end-before": "END reload_tokenizer", "dedent": 8} </literalinclude> </node> </tokenizerslangcontent> ### Using the tokenizer Now that we have trained a tokenizer, we can use it on any text we want with the `Tokenizer.encode` method: <tokenizerslangcontent> <python> <literalinclude> {"path": "../../bindings/python/tests/documentation/test_quicktour.py", "language": "python", "start-after": "START encode", "end-before": "END encode", "dedent": 8} </literalinclude> </python> <rust> <literalinclude> {"path": "../../tokenizers/tests/documentation.rs", "language": "rust", "start-after": "START quicktour_encode", "end-before": "END quicktour_encode", "dedent": 4} </literalinclude> </rust> <node> <literalinclude> {"path": "../../bindings/node/examples/documentation/quicktour.test.ts", "language": "js", "start-after": "START encode", "end-before": "END encode", "dedent": 8} </literalinclude> </node> </tokenizerslangcontent> This applied the full pipeline of the tokenizer on the text, returning an `Encoding` object. To learn more about this pipeline, and how to apply (or customize) parts of it, check out [this page](pipeline). This `Encoding` object then has all the attributes you need for your deep learning model (or other). The `tokens` attribute contains the segmentation of your text in tokens: <tokenizerslangcontent> <python> <literalinclude> {"path": "../../bindings/python/tests/documentation/test_quicktour.py", "language": "python", "start-after": "START print_tokens", "end-before": "END print_tokens", "dedent": 8} </literalinclude> </python> <rust> <literalinclude> {"path": "../../tokenizers/tests/documentation.rs", "language": "rust", "start-after": "START quicktour_print_tokens", "end-before": "END quicktour_print_tokens", "dedent": 4} </literalinclude> </rust> <node> <literalinclude> {"path": "../../bindings/node/examples/documentation/quicktour.test.ts", "language": "js", "start-after": "START print_tokens", "end-before": "END print_tokens", "dedent": 8} </literalinclude> </node> </tokenizerslangcontent> Similarly, the `ids` attribute will contain the index of each of those tokens in the tokenizer's vocabulary: <tokenizerslangcontent> <python> <literalinclude> {"path": "../../bindings/python/tests/documentation/test_quicktour.py", "language": "python", "start-after": "START print_ids", "end-before": "END print_ids", "dedent": 8} </literalinclude> </python> <rust> <literalinclude> {"path": "../../tokenizers/tests/documentation.rs", "language": "rust", "start-after": "START quicktour_print_ids", "end-before": "END quicktour_print_ids", "dedent": 4} </literalinclude> </rust> <node> <literalinclude> {"path": "../../bindings/node/examples/documentation/quicktour.test.ts", "language": "js", "start-after": "START print_ids", "end-before": "END print_ids", "dedent": 8} </literalinclude> </node> </tokenizerslangcontent> An important feature of the 🤗 Tokenizers library is that it comes with full alignment tracking, meaning you can always get the part of your original sentence that corresponds to a given token. Those are stored in the `offsets` attribute of our `Encoding` object. For instance, let's assume we would want to find back what caused the `"[UNK]"` token to appear, which is the token at index 9 in the list, we can just ask for the offset at the index: <tokenizerslangcontent> <python> <literalinclude> {"path": "../../bindings/python/tests/documentation/test_quicktour.py", "language": "python", "start-after": "START print_offsets", "end-before": "END print_offsets", "dedent": 8} </literalinclude> </python> <rust> <literalinclude> {"path": "../../tokenizers/tests/documentation.rs", "language": "rust", "start-after": "START quicktour_print_offsets", "end-before": "END quicktour_print_offsets", "dedent": 4} </literalinclude> </rust> <node> <literalinclude> {"path": "../../bindings/node/examples/documentation/quicktour.test.ts", "language": "js", "start-after": "START print_offsets", "end-before": "END print_offsets", "dedent": 8} </literalinclude> </node> </tokenizerslangcontent> and those are the indices that correspond to the emoji in the original sentence: <tokenizerslangcontent> <python> <literalinclude> {"path": "../../bindings/python/tests/documentation/test_quicktour.py", "language": "python", "start-after": "START use_offsets", "end-before": "END use_offsets", "dedent": 8} </literalinclude> </python> <rust> <literalinclude> {"path": "../../tokenizers/tests/documentation.rs", "language": "rust", "start-after": "START quicktour_use_offsets", "end-before": "END quicktour_use_offsets", "dedent": 4} </literalinclude> </rust> <node> <literalinclude> {"path": "../../bindings/node/examples/documentation/quicktour.test.ts", "language": "js", "start-after": "START use_offsets", "end-before": "END use_offsets", "dedent": 8} </literalinclude> </node> </tokenizerslangcontent> ### Post-processing We might want our tokenizer to automatically add special tokens, like `"[CLS]"` or `"[SEP]"`. To do this, we use a post-processor. `TemplateProcessing` is the most commonly used, you just have to specify a template for the processing of single sentences and pairs of sentences, along with the special tokens and their IDs. When we built our tokenizer, we set `"[CLS]"` and `"[SEP]"` in positions 1 and 2 of our list of special tokens, so this should be their IDs. To double-check, we can use the `Tokenizer.token_to_id` method: <tokenizerslangcontent> <python> <literalinclude> {"path": "../../bindings/python/tests/documentation/test_quicktour.py", "language": "python", "start-after": "START check_sep", "end-before": "END check_sep", "dedent": 8} </literalinclude> </python> <rust> <literalinclude> {"path": "../../tokenizers/tests/documentation.rs", "language": "rust", "start-after": "START quicktour_check_sep", "end-before": "END quicktour_check_sep", "dedent": 4} </literalinclude> </rust> <node> <literalinclude> {"path": "../../bindings/node/examples/documentation/quicktour.test.ts", "language": "js", "start-after": "START check_sep", "end-before": "END check_sep", "dedent": 8} </literalinclude> </node> </tokenizerslangcontent> Here is how we can set the post-processing to give us the traditional BERT inputs: <tokenizerslangcontent> <python> <literalinclude> {"path": "../../bindings/python/tests/documentation/test_quicktour.py", "language": "python", "start-after": "START init_template_processing", "end-before": "END init_template_processing", "dedent": 8} </literalinclude> </python> <rust> <literalinclude> {"path": "../../tokenizers/tests/documentation.rs", "language": "rust", "start-after": "START quicktour_init_template_processing", "end-before": "END quicktour_init_template_processing", "dedent": 4} </literalinclude> </rust> <node> <literalinclude> {"path": "../../bindings/node/examples/documentation/quicktour.test.ts", "language": "js", "start-after": "START init_template_processing", "end-before": "END init_template_processing", "dedent": 8} </literalinclude> </node> </tokenizerslangcontent> Let's go over this snippet of code in more details. First we specify the template for single sentences: those should have the form `"[CLS] $A [SEP]"` where `$A` represents our sentence. Then, we specify the template for sentence pairs, which should have the form `"[CLS] $A [SEP] $B [SEP]"` where `$A` represents the first sentence and `$B` the second one. The `:1` added in the template represent the `type IDs` we want for each part of our input: it defaults to 0 for everything (which is why we don't have `$A:0`) and here we set it to 1 for the tokens of the second sentence and the last `"[SEP]"` token. Lastly, we specify the special tokens we used and their IDs in our tokenizer's vocabulary. To check out this worked properly, let's try to encode the same sentence as before: <tokenizerslangcontent> <python> <literalinclude> {"path": "../../bindings/python/tests/documentation/test_quicktour.py", "language": "python", "start-after": "START print_special_tokens", "end-before": "END print_special_tokens", "dedent": 8} </literalinclude> </python> <rust> <literalinclude> {"path": "../../tokenizers/tests/documentation.rs", "language": "rust", "start-after": "START quicktour_print_special_tokens", "end-before": "END quicktour_print_special_tokens", "dedent": 4} </literalinclude> </rust> <node> <literalinclude> {"path": "../../bindings/node/examples/documentation/quicktour.test.ts", "language": "js", "start-after": "START print_special_tokens", "end-before": "END print_special_tokens", "dedent": 8} </literalinclude> </node> </tokenizerslangcontent> To check the results on a pair of sentences, we just pass the two sentences to `Tokenizer.encode`: <tokenizerslangcontent> <python> <literalinclude> {"path": "../../bindings/python/tests/documentation/test_quicktour.py", "language": "python", "start-after": "START print_special_tokens_pair", "end-before": "END print_special_tokens_pair", "dedent": 8} </literalinclude> </python> <rust> <literalinclude> {"path": "../../tokenizers/tests/documentation.rs", "language": "rust", "start-after": "START quicktour_print_special_tokens_pair", "end-before": "END quicktour_print_special_tokens_pair", "dedent": 4} </literalinclude> </rust> <node> <literalinclude> {"path": "../../bindings/node/examples/documentation/quicktour.test.ts", "language": "js", "start-after": "START print_special_tokens_pair", "end-before": "END print_special_tokens_pair", "dedent": 8} </literalinclude> </node> </tokenizerslangcontent> You can then check the type IDs attributed to each token is correct with <tokenizerslangcontent> <python> <literalinclude> {"path": "../../bindings/python/tests/documentation/test_quicktour.py", "language": "python", "start-after": "START print_type_ids", "end-before": "END print_type_ids", "dedent": 8} </literalinclude> </python> <rust> <literalinclude> {"path": "../../tokenizers/tests/documentation.rs", "language": "rust", "start-after": "START quicktour_print_type_ids", "end-before": "END quicktour_print_type_ids", "dedent": 4} </literalinclude> </rust> <node> <literalinclude> {"path": "../../bindings/node/examples/documentation/quicktour.test.ts", "language": "js", "start-after": "START print_type_ids", "end-before": "END print_type_ids", "dedent": 8} </literalinclude> </node> </tokenizerslangcontent> If you save your tokenizer with `Tokenizer.save`, the post-processor will be saved along. ### Encoding multiple sentences in a batch To get the full speed of the 🤗 Tokenizers library, it's best to process your texts by batches by using the `Tokenizer.encode_batch` method: <tokenizerslangcontent> <python> <literalinclude> {"path": "../../bindings/python/tests/documentation/test_quicktour.py", "language": "python", "start-after": "START encode_batch", "end-before": "END encode_batch", "dedent": 8} </literalinclude> </python> <rust> <literalinclude> {"path": "../../tokenizers/tests/documentation.rs", "language": "rust", "start-after": "START quicktour_encode_batch", "end-before": "END quicktour_encode_batch", "dedent": 4} </literalinclude> </rust> <node> <literalinclude> {"path": "../../bindings/node/examples/documentation/quicktour.test.ts", "language": "js", "start-after": "START encode_batch", "end-before": "END encode_batch", "dedent": 8} </literalinclude> </node> </tokenizerslangcontent> The output is then a list of `Encoding` objects like the ones we saw before. You can process together as many texts as you like, as long as it fits in memory. To process a batch of sentences pairs, pass two lists to the `Tokenizer.encode_batch` method: the list of sentences A and the list of sentences B: <tokenizerslangcontent> <python> <literalinclude> {"path": "../../bindings/python/tests/documentation/test_quicktour.py", "language": "python", "start-after": "START encode_batch_pair", "end-before": "END encode_batch_pair", "dedent": 8} </literalinclude> </python> <rust> <literalinclude> {"path": "../../tokenizers/tests/documentation.rs", "language": "rust", "start-after": "START quicktour_encode_batch_pair", "end-before": "END quicktour_encode_batch_pair", "dedent": 4} </literalinclude> </rust> <node> <literalinclude> {"path": "../../bindings/node/examples/documentation/quicktour.test.ts", "language": "js", "start-after": "START encode_batch_pair", "end-before": "END encode_batch_pair", "dedent": 8} </literalinclude> </node> </tokenizerslangcontent> When encoding multiple sentences, you can automatically pad the outputs to the longest sentence present by using `Tokenizer.enable_padding`, with the `pad_token` and its ID (which we can double-check the id for the padding token with `Tokenizer.token_to_id` like before): <tokenizerslangcontent> <python> <literalinclude> {"path": "../../bindings/python/tests/documentation/test_quicktour.py", "language": "python", "start-after": "START enable_padding", "end-before": "END enable_padding", "dedent": 8} </literalinclude> </python> <rust> <literalinclude> {"path": "../../tokenizers/tests/documentation.rs", "language": "rust", "start-after": "START quicktour_enable_padding", "end-before": "END quicktour_enable_padding", "dedent": 4} </literalinclude> </rust> <node> <literalinclude> {"path": "../../bindings/node/examples/documentation/quicktour.test.ts", "language": "js", "start-after": "START enable_padding", "end-before": "END enable_padding", "dedent": 8} </literalinclude> </node> </tokenizerslangcontent> We can set the `direction` of the padding (defaults to the right) or a given `length` if we want to pad every sample to that specific number (here we leave it unset to pad to the size of the longest text). <tokenizerslangcontent> <python> <literalinclude> {"path": "../../bindings/python/tests/documentation/test_quicktour.py", "language": "python", "start-after": "START print_batch_tokens", "end-before": "END print_batch_tokens", "dedent": 8} </literalinclude> </python> <rust> <literalinclude> {"path": "../../tokenizers/tests/documentation.rs", "language": "rust", "start-after": "START quicktour_print_batch_tokens", "end-before": "END quicktour_print_batch_tokens", "dedent": 4} </literalinclude> </rust> <node> <literalinclude> {"path": "../../bindings/node/examples/documentation/quicktour.test.ts", "language": "js", "start-after": "START print_batch_tokens", "end-before": "END print_batch_tokens", "dedent": 8} </literalinclude> </node> </tokenizerslangcontent> In this case, the `attention mask` generated by the tokenizer takes the padding into account: <tokenizerslangcontent> <python> <literalinclude> {"path": "../../bindings/python/tests/documentation/test_quicktour.py", "language": "python", "start-after": "START print_attention_mask", "end-before": "END print_attention_mask", "dedent": 8} </literalinclude> </python> <rust> <literalinclude> {"path": "../../tokenizers/tests/documentation.rs", "language": "rust", "start-after": "START quicktour_print_attention_mask", "end-before": "END quicktour_print_attention_mask", "dedent": 4} </literalinclude> </rust> <node> <literalinclude> {"path": "../../bindings/node/examples/documentation/quicktour.test.ts", "language": "js", "start-after": "START print_attention_mask", "end-before": "END print_attention_mask", "dedent": 8} </literalinclude> </node> </tokenizerslangcontent> ## Pretrained <tokenizerslangcontent> <python> ### Using a pretrained tokenizer You can load any tokenizer from the Hugging Face Hub as long as a `tokenizer.json` file is available in the repository. ```python from tokenizers import Tokenizer tokenizer = Tokenizer.from_pretrained("bert-base-uncased") ``` ### Importing a pretrained tokenizer from legacy vocabulary files You can also import a pretrained tokenizer directly in, as long as you have its vocabulary file. For instance, here is how to import the classic pretrained BERT tokenizer: ```python from tokenizers import BertWordPieceTokenizer tokenizer = BertWordPieceTokenizer("bert-base-uncased-vocab.txt", lowercase=True) ``` as long as you have downloaded the file `bert-base-uncased-vocab.txt` with ```bash wget https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-vocab.txt ``` </python> </tokenizerslangcontent>
tokenizers/docs/source-doc-builder/quicktour.mdx/0
{ "file_path": "tokenizers/docs/source-doc-builder/quicktour.mdx", "repo_id": "tokenizers", "token_count": 7936 }
233
Components ==================================================================================================== When building a Tokenizer, you can attach various types of components to this Tokenizer in order to customize its behavior. This page lists most provided components. .. _normalizers: .. entities:: python BertNormalizer.clean_text clean_text BertNormalizer.handle_chinese_chars handle_chinese_chars BertNormalizer.strip_accents strip_accents BertNormalizer.lowercase lowercase Normalizer.Sequence ``Sequence([NFKC(), Lowercase()])`` PreTokenizer.Sequence ``Sequence([Punctuation(), WhitespaceSplit()])`` SplitDelimiterBehavior.removed :obj:`removed` SplitDelimiterBehavior.isolated :obj:`isolated` SplitDelimiterBehavior.merged_with_previous :obj:`merged_with_previous` SplitDelimiterBehavior.merged_with_next :obj:`merged_with_next` SplitDelimiterBehavior.contiguous :obj:`contiguous` .. entities:: rust BertNormalizer.clean_text clean_text BertNormalizer.handle_chinese_chars handle_chinese_chars BertNormalizer.strip_accents strip_accents BertNormalizer.lowercase lowercase Normalizer.Sequence ``Sequence::new(vec![NFKC, Lowercase])`` PreTokenizer.Sequence ``Sequence::new(vec![Punctuation, WhitespaceSplit])`` SplitDelimiterBehavior.removed :obj:`Removed` SplitDelimiterBehavior.isolated :obj:`Isolated` SplitDelimiterBehavior.merged_with_previous :obj:`MergedWithPrevious` SplitDelimiterBehavior.merged_with_next :obj:`MergedWithNext` SplitDelimiterBehavior.contiguous :obj:`Contiguous` .. entities:: node BertNormalizer.clean_text cleanText BertNormalizer.handle_chinese_chars handleChineseChars BertNormalizer.strip_accents stripAccents BertNormalizer.lowercase lowercase Normalizer.Sequence .. PreTokenizer.Sequence .. SplitDelimiterBehavior.removed :obj:`removed` SplitDelimiterBehavior.isolated :obj:`isolated` SplitDelimiterBehavior.merged_with_previous :obj:`mergedWithPrevious` SplitDelimiterBehavior.merged_with_next :obj:`mergedWithNext` SplitDelimiterBehavior.contiguous :obj:`contiguous` Normalizers ---------------------------------------------------------------------------------------------------- A ``Normalizer`` is in charge of pre-processing the input string in order to normalize it as relevant for a given use case. Some common examples of normalization are the Unicode normalization algorithms (NFD, NFKD, NFC & NFKC), lowercasing etc... The specificity of ``tokenizers`` is that we keep track of the alignment while normalizing. This is essential to allow mapping from the generated tokens back to the input text. The ``Normalizer`` is optional. .. list-table:: :header-rows: 1 * - Name - Description - Example * - NFD - NFD unicode normalization - * - NFKD - NFKD unicode normalization - * - NFC - NFC unicode normalization - * - NFKC - NFKC unicode normalization - * - Lowercase - Replaces all uppercase to lowercase - Input: ``HELLO ὈΔΥΣΣΕΎΣ`` Output: ``hello ὀδυσσεύς`` * - Strip - Removes all whitespace characters on the specified sides (left, right or both) of the input - Input: ``" hi "`` Output: ``"hi"`` * - StripAccents - Removes all accent symbols in unicode (to be used with NFD for consistency) - Input: ``é`` Ouput: ``e`` * - Replace - Replaces a custom string or regexp and changes it with given content - ``Replace("a", "e")`` will behave like this: Input: ``"banana"`` Ouput: ``"benene"`` * - BertNormalizer - Provides an implementation of the Normalizer used in the original BERT. Options that can be set are: - :entity:`BertNormalizer.clean_text` - :entity:`BertNormalizer.handle_chinese_chars` - :entity:`BertNormalizer.strip_accents` - :entity:`BertNormalizer.lowercase` - * - Sequence - Composes multiple normalizers that will run in the provided order - :entity:`Normalizer.Sequence` .. _pre-tokenizers: Pre tokenizers ---------------------------------------------------------------------------------------------------- The ``PreTokenizer`` takes care of splitting the input according to a set of rules. This pre-processing lets you ensure that the underlying ``Model`` does not build tokens across multiple "splits". For example if you don't want to have whitespaces inside a token, then you can have a ``PreTokenizer`` that splits on these whitespaces. You can easily combine multiple ``PreTokenizer`` together using a ``Sequence`` (see below). The ``PreTokenizer`` is also allowed to modify the string, just like a ``Normalizer`` does. This is necessary to allow some complicated algorithms that require to split before normalizing (e.g. the ByteLevel) .. list-table:: :header-rows: 1 * - Name - Description - Example * - ByteLevel - Splits on whitespaces while remapping all the bytes to a set of visible characters. This technique as been introduced by OpenAI with GPT-2 and has some more or less nice properties: - Since it maps on bytes, a tokenizer using this only requires **256** characters as initial alphabet (the number of values a byte can have), as opposed to the 130,000+ Unicode characters. - A consequence of the previous point is that it is absolutely unnecessary to have an unknown token using this since we can represent anything with 256 tokens (Youhou!! 🎉🎉) - For non ascii characters, it gets completely unreadable, but it works nonetheless! - Input: ``"Hello my friend, how are you?"`` Ouput: ``"Hello", "Ġmy", Ġfriend", ",", "Ġhow", "Ġare", "Ġyou", "?"`` * - Whitespace - Splits on word boundaries (using the following regular expression: ``\w+|[^\w\s]+`` - Input: ``"Hello there!"`` Output: ``"Hello", "there", "!"`` * - WhitespaceSplit - Splits on any whitespace character - Input: ``"Hello there!"`` Output: ``"Hello", "there!"`` * - Punctuation - Will isolate all punctuation characters - Input: ``"Hello?"`` Ouput: ``"Hello", "?"`` * - Metaspace - Splits on whitespaces and replaces them with a special char "▁" (U+2581) - Input: ``"Hello there"`` Ouput: ``"Hello", "▁there"`` * - CharDelimiterSplit - Splits on a given character - Example with ``x``: Input: ``"Helloxthere"`` Ouput: ``"Hello", "there"`` * - Digits - Splits the numbers from any other characters. - Input: ``"Hello123there"`` Output: ```"Hello", "123", "there"``` * - Split - Versatile pre-tokenizer that splits on provided pattern and according to provided behavior. The pattern can be inverted if necessary. - pattern should be either a custom string or regexp. - behavior should be one of: * :entity:`SplitDelimiterBehavior.removed` * :entity:`SplitDelimiterBehavior.isolated` * :entity:`SplitDelimiterBehavior.merged_with_previous` * :entity:`SplitDelimiterBehavior.merged_with_next` * :entity:`SplitDelimiterBehavior.contiguous` - invert should be a boolean flag. - Example with `pattern` = :obj:`" "`, `behavior` = :obj:`"isolated"`, `invert` = :obj:`False`: Input: ``"Hello, how are you?"`` Output: ```"Hello,", " ", "how", " ", "are", " ", "you?"``` * - Sequence - Lets you compose multiple ``PreTokenizer`` that will be run in the given order - :entity:`PreTokenizer.Sequence` .. _models: Models ---------------------------------------------------------------------------------------------------- Models are the core algorithms used to actually tokenize, and therefore, they are the only mandatory component of a Tokenizer. .. list-table:: :header-rows: 1 * - Name - Description * - WordLevel - This is the "classic" tokenization algorithm. It let's you simply map words to IDs without anything fancy. This has the advantage of being really simple to use and understand, but it requires extremely large vocabularies for a good coverage. *Using this* ``Model`` *requires the use of a* ``PreTokenizer``. *No choice will be made by this model directly, it simply maps input tokens to IDs* * - BPE - One of the most popular subword tokenization algorithm. The Byte-Pair-Encoding works by starting with characters, while merging those that are the most frequently seen together, thus creating new tokens. It then works iteratively to build new tokens out of the most frequent pairs it sees in a corpus. BPE is able to build words it has never seen by using multiple subword tokens, and thus requires smaller vocabularies, with less chances of having "unk" (unknown) tokens. * - WordPiece - This is a subword tokenization algorithm quite similar to BPE, used mainly by Google in models like BERT. It uses a greedy algorithm, that tries to build long words first, splitting in multiple tokens when entire words don't exist in the vocabulary. This is different from BPE that starts from characters, building bigger tokens as possible. It uses the famous ``##`` prefix to identify tokens that are part of a word (ie not starting a word). * - Unigram - Unigram is also a subword tokenization algorithm, and works by trying to identify the best set of subword tokens to maximize the probability for a given sentence. This is different from BPE in the way that this is not deterministic based on a set of rules applied sequentially. Instead Unigram will be able to compute multiple ways of tokenizing, while choosing the most probable one. .. _post-processors: PostProcessor ---------------------------------------------------------------------------------------------------- After the whole pipeline, we sometimes want to insert some special tokens before feed a tokenized string into a model like "[CLS] My horse is amazing [SEP]". The ``PostProcessor`` is the component doing just that. .. list-table:: :header-rows: 1 * - Name - Description - Example * - TemplateProcessing - Let's you easily template the post processing, adding special tokens, and specifying the ``type_id`` for each sequence/special token. The template is given two strings representing the single sequence and the pair of sequences, as well as a set of special tokens to use. - Example, when specifying a template with these values: - single: ``"[CLS] $A [SEP]"`` - pair: ``"[CLS] $A [SEP] $B [SEP]"`` - special tokens: - ``"[CLS]"`` - ``"[SEP]"`` Input: ``("I like this", "but not this")`` Output: ``"[CLS] I like this [SEP] but not this [SEP]"`` .. _decoders: Decoders ---------------------------------------------------------------------------------------------------- The Decoder knows how to go from the IDs used by the Tokenizer, back to a readable piece of text. Some ``Normalizer`` and ``PreTokenizer`` use special characters or identifiers that need to be reverted for example. .. list-table:: :header-rows: 1 * - Name - Description * - ByteLevel - Reverts the ByteLevel PreTokenizer. This PreTokenizer encodes at the byte-level, using a set of visible Unicode characters to represent each byte, so we need a Decoder to revert this process and get something readable again. * - Metaspace - Reverts the Metaspace PreTokenizer. This PreTokenizer uses a special identifer ``▁`` to identify whitespaces, and so this Decoder helps with decoding these. * - WordPiece - Reverts the WordPiece Model. This model uses a special identifier ``##`` for continuing subwords, and so this Decoder helps with decoding these.
tokenizers/docs/source/components.rst/0
{ "file_path": "tokenizers/docs/source/components.rst", "repo_id": "tokenizers", "token_count": 4236 }
234
<p align="center"> <br> <img src="https://huggingface.co/landing/assets/tokenizers/tokenizers-logo.png" width="600"/> <br> <p> <p align="center"> <img alt="Build" src="https://github.com/huggingface/tokenizers/workflows/Rust/badge.svg"> <a href="https://github.com/huggingface/tokenizers/blob/master/LICENSE"> <img alt="GitHub" src="https://img.shields.io/github/license/huggingface/tokenizers.svg?color=blue"> </a> <a href="https://docs.rs/tokenizers/"> <img alt="Doc" src="https://docs.rs/tokenizers/badge.svg"> </a> </p> <br> {{readme}}
tokenizers/tokenizers/README.tpl/0
{ "file_path": "tokenizers/tokenizers/README.tpl", "repo_id": "tokenizers", "token_count": 259 }
235
pub mod bpe; pub mod byte_fallback; pub mod ctc; pub mod fuse; pub mod sequence; pub mod strip; pub mod wordpiece; // Re-export these as decoders pub use super::pre_tokenizers::byte_level; pub use super::pre_tokenizers::metaspace; use serde::{Deserialize, Deserializer, Serialize}; use crate::decoders::bpe::BPEDecoder; use crate::decoders::byte_fallback::ByteFallback; use crate::decoders::ctc::CTC; use crate::decoders::fuse::Fuse; use crate::decoders::sequence::Sequence; use crate::decoders::strip::Strip; use crate::decoders::wordpiece::WordPiece; use crate::normalizers::replace::Replace; use crate::pre_tokenizers::byte_level::ByteLevel; use crate::pre_tokenizers::metaspace::Metaspace; use crate::{Decoder, Result}; #[derive(Serialize, Clone, Debug)] #[serde(untagged)] pub enum DecoderWrapper { BPE(BPEDecoder), ByteLevel(ByteLevel), WordPiece(WordPiece), Metaspace(Metaspace), CTC(CTC), Sequence(Sequence), Replace(Replace), Fuse(Fuse), Strip(Strip), ByteFallback(ByteFallback), } impl<'de> Deserialize<'de> for DecoderWrapper { fn deserialize<D>(deserializer: D) -> std::result::Result<Self, D::Error> where D: Deserializer<'de>, { #[derive(Deserialize)] pub struct Tagged { #[serde(rename = "type")] variant: EnumType, #[serde(flatten)] rest: serde_json::Value, } #[derive(Serialize, Deserialize)] pub enum EnumType { BPEDecoder, ByteLevel, WordPiece, Metaspace, CTC, Sequence, Replace, Fuse, Strip, ByteFallback, } #[derive(Deserialize)] #[serde(untagged)] pub enum DecoderHelper { Tagged(Tagged), Legacy(serde_json::Value), } #[derive(Deserialize)] #[serde(untagged)] pub enum DecoderUntagged { BPE(BPEDecoder), ByteLevel(ByteLevel), WordPiece(WordPiece), Metaspace(Metaspace), CTC(CTC), Sequence(Sequence), Replace(Replace), Fuse(Fuse), Strip(Strip), ByteFallback(ByteFallback), } let helper = DecoderHelper::deserialize(deserializer).expect("Helper"); Ok(match helper { DecoderHelper::Tagged(model) => { let mut values: serde_json::Map<String, serde_json::Value> = serde_json::from_value(model.rest).map_err(serde::de::Error::custom)?; values.insert( "type".to_string(), serde_json::to_value(&model.variant).map_err(serde::de::Error::custom)?, ); let values = serde_json::Value::Object(values); match model.variant { EnumType::BPEDecoder => DecoderWrapper::BPE( serde_json::from_value(values).map_err(serde::de::Error::custom)?, ), EnumType::ByteLevel => DecoderWrapper::ByteLevel( serde_json::from_value(values).map_err(serde::de::Error::custom)?, ), EnumType::WordPiece => DecoderWrapper::WordPiece( serde_json::from_value(values).map_err(serde::de::Error::custom)?, ), EnumType::Metaspace => DecoderWrapper::Metaspace( serde_json::from_value(values).map_err(serde::de::Error::custom)?, ), EnumType::CTC => DecoderWrapper::CTC( serde_json::from_value(values).map_err(serde::de::Error::custom)?, ), EnumType::Sequence => DecoderWrapper::Sequence( serde_json::from_value(values).map_err(serde::de::Error::custom)?, ), EnumType::Replace => DecoderWrapper::Replace( serde_json::from_value(values).map_err(serde::de::Error::custom)?, ), EnumType::Fuse => DecoderWrapper::Fuse( serde_json::from_value(values).map_err(serde::de::Error::custom)?, ), EnumType::Strip => DecoderWrapper::Strip( serde_json::from_value(values).map_err(serde::de::Error::custom)?, ), EnumType::ByteFallback => DecoderWrapper::ByteFallback( serde_json::from_value(values).map_err(serde::de::Error::custom)?, ), } } DecoderHelper::Legacy(value) => { let untagged = serde_json::from_value(value).map_err(serde::de::Error::custom)?; match untagged { DecoderUntagged::BPE(dec) => DecoderWrapper::BPE(dec), DecoderUntagged::ByteLevel(dec) => DecoderWrapper::ByteLevel(dec), DecoderUntagged::WordPiece(dec) => DecoderWrapper::WordPiece(dec), DecoderUntagged::Metaspace(dec) => DecoderWrapper::Metaspace(dec), DecoderUntagged::CTC(dec) => DecoderWrapper::CTC(dec), DecoderUntagged::Sequence(dec) => DecoderWrapper::Sequence(dec), DecoderUntagged::Replace(dec) => DecoderWrapper::Replace(dec), DecoderUntagged::Fuse(dec) => DecoderWrapper::Fuse(dec), DecoderUntagged::Strip(dec) => DecoderWrapper::Strip(dec), DecoderUntagged::ByteFallback(dec) => DecoderWrapper::ByteFallback(dec), } } }) } } impl Decoder for DecoderWrapper { fn decode_chain(&self, tokens: Vec<String>) -> Result<Vec<String>> { match self { Self::BPE(bpe) => bpe.decode_chain(tokens), Self::ByteLevel(bl) => bl.decode_chain(tokens), Self::Metaspace(ms) => ms.decode_chain(tokens), Self::WordPiece(wp) => wp.decode_chain(tokens), Self::CTC(ctc) => ctc.decode_chain(tokens), Self::Sequence(seq) => seq.decode_chain(tokens), Self::Replace(seq) => seq.decode_chain(tokens), Self::ByteFallback(bf) => bf.decode_chain(tokens), Self::Strip(bf) => bf.decode_chain(tokens), Self::Fuse(bf) => bf.decode_chain(tokens), } } } impl_enum_from!(BPEDecoder, DecoderWrapper, BPE); impl_enum_from!(ByteLevel, DecoderWrapper, ByteLevel); impl_enum_from!(ByteFallback, DecoderWrapper, ByteFallback); impl_enum_from!(Fuse, DecoderWrapper, Fuse); impl_enum_from!(Strip, DecoderWrapper, Strip); impl_enum_from!(Metaspace, DecoderWrapper, Metaspace); impl_enum_from!(WordPiece, DecoderWrapper, WordPiece); impl_enum_from!(CTC, DecoderWrapper, CTC); impl_enum_from!(Sequence, DecoderWrapper, Sequence); impl_enum_from!(Replace, DecoderWrapper, Replace); #[cfg(test)] mod tests { use super::*; #[test] fn decoder_serialization() { let oldjson = r#"{"type":"Sequence","decoders":[{"type":"ByteFallback"},{"type":"Metaspace","replacement":"▁","add_prefix_space":true,"prepend_scheme":"always"}]}"#; let olddecoder: DecoderWrapper = serde_json::from_str(oldjson).unwrap(); let oldserialized = serde_json::to_string(&olddecoder).unwrap(); let json = r#"{"type":"Sequence","decoders":[{"type":"ByteFallback"},{"type":"Metaspace","replacement":"▁","prepend_scheme":"always","split":true}]}"#; assert_eq!(oldserialized, json); let decoder: DecoderWrapper = serde_json::from_str(json).unwrap(); let serialized = serde_json::to_string(&decoder).unwrap(); assert_eq!(serialized, json); } #[test] fn decoder_serialization_other_no_arg() { let json = r#"{"type":"Sequence","decoders":[{"type":"Fuse"},{"type":"Metaspace","replacement":"▁","prepend_scheme":"always","split":true}]}"#; let decoder: DecoderWrapper = serde_json::from_str(json).unwrap(); let serialized = serde_json::to_string(&decoder).unwrap(); assert_eq!(serialized, json); } #[test] fn decoder_serialization_no_decode() { let json = r#"{"type":"Sequence","decoders":[{},{"type":"Metaspace","replacement":"▁","prepend_scheme":"always"}]}"#; let parse = serde_json::from_str::<DecoderWrapper>(json); match parse { Err(err) => assert_eq!( format!("{err}"), "data did not match any variant of untagged enum DecoderUntagged" ), _ => panic!("Expected error"), } let json = r#"{"replacement":"▁","prepend_scheme":"always"}"#; let parse = serde_json::from_str::<DecoderWrapper>(json); match parse { Err(err) => assert_eq!( format!("{err}"), "data did not match any variant of untagged enum DecoderUntagged" ), _ => panic!("Expected error"), } let json = r#"{"type":"Sequence","prepend_scheme":"always"}"#; let parse = serde_json::from_str::<DecoderWrapper>(json); match parse { Err(err) => assert_eq!(format!("{err}"), "missing field `decoders`"), _ => panic!("Expected error"), } } }
tokenizers/tokenizers/src/decoders/mod.rs/0
{ "file_path": "tokenizers/tokenizers/src/decoders/mod.rs", "repo_id": "tokenizers", "token_count": 4660 }
236
use std::collections::HashMap; use std::hash::Hash; #[derive(Default)] pub struct TrieBuilder<Label> { trie: Trie<Label>, } impl<Label: Eq + Hash + Copy> TrieBuilder<Label> { pub fn push(&mut self, element: &[Label]) { self.trie.push(element); } pub fn build(self) -> Trie<Label> { self.trie } } #[derive(Clone)] pub struct Trie<Label> { root: Node<Label>, } impl<Label: Eq + Hash + Copy> Trie<Label> { pub fn push(&mut self, element: &[Label]) { let mut node = &mut self.root; for label in element.iter() { node = node.children.entry(*label).or_default(); } node.is_leaf = true; } pub fn common_prefix_search<T>(&self, iterator: T) -> TrieIterator<Label, T> where T: Iterator<Item = Label>, { TrieIterator { node: &self.root, prefix: vec![], iterator, } } } pub struct TrieIterator<'a, Label, T> { node: &'a Node<Label>, prefix: Vec<Label>, iterator: T, } impl<Label, T> Iterator for TrieIterator<'_, Label, T> where Label: Eq + Hash + Copy, T: Iterator<Item = Label>, { type Item = Vec<Label>; fn next(&mut self) -> Option<Self::Item> { loop { let label = self.iterator.next()?; self.prefix.push(label); let child = self.node.children.get(&label)?; self.node = child; if self.node.is_leaf { return Some(self.prefix.clone()); } } } } impl<Label> Default for Trie<Label> { fn default() -> Self { Self { root: Node::default(), } } } #[derive(Clone)] pub struct Node<Label> { is_leaf: bool, children: HashMap<Label, Node<Label>>, } impl<Label> Default for Node<Label> { fn default() -> Self { Self { is_leaf: false, children: HashMap::new(), } } }
tokenizers/tokenizers/src/models/unigram/trie.rs/0
{ "file_path": "tokenizers/tokenizers/src/models/unigram/trie.rs", "repo_id": "tokenizers", "token_count": 944 }
237
use crate::tokenizer::{PreTokenizedString, PreTokenizer, Result, SplitDelimiterBehavior}; use crate::utils::macro_rules_attribute; use unicode_categories::UnicodeCategories; fn is_bert_punc(x: char) -> bool { char::is_ascii_punctuation(&x) || x.is_punctuation() } #[derive(Copy, Clone, Debug, PartialEq, Eq)] #[macro_rules_attribute(impl_serde_type!)] pub struct BertPreTokenizer; impl PreTokenizer for BertPreTokenizer { fn pre_tokenize(&self, pretokenized: &mut PreTokenizedString) -> Result<()> { pretokenized.split(|_, s| s.split(char::is_whitespace, SplitDelimiterBehavior::Removed))?; pretokenized.split(|_, s| s.split(is_bert_punc, SplitDelimiterBehavior::Isolated)) } } #[cfg(test)] mod tests { use super::*; use crate::{NormalizedString, OffsetReferential, OffsetType}; #[test] fn basic() { let pretok = BertPreTokenizer; let mut pretokenized: PreTokenizedString = "Hey friend! How are you?!?".into(); pretok.pre_tokenize(&mut pretokenized).unwrap(); assert_eq!( pretokenized .get_splits(OffsetReferential::Original, OffsetType::Byte) .into_iter() .map(|(s, o, _)| (s, o)) .collect::<Vec<_>>(), vec![ ("Hey", (0, 3)), ("friend", (4, 10)), ("!", (10, 11)), ("How", (16, 19)), ("are", (20, 23)), ("you", (24, 27)), ("?", (27, 28)), ("!", (28, 29)), ("?", (29, 30)), ] ); } #[test] fn chinese_chars() { let mut n = NormalizedString::from("野口里佳 Noguchi Rika"); n.transform( n.get().to_owned().chars().flat_map(|c| { if (c as usize) > 0x4E00 { vec![(' ', 0), (c, 1), (' ', 1)] } else { vec![(c, 0)] } }), 0, ); let mut pretokenized = n.into(); let pretok = BertPreTokenizer; pretok.pre_tokenize(&mut pretokenized).unwrap(); assert_eq!( pretokenized .get_splits(OffsetReferential::Original, OffsetType::Byte) .into_iter() .map(|(s, o, _)| (s, o)) .collect::<Vec<_>>(), vec![ ("野", (0, 3)), ("口", (3, 6)), ("里", (6, 9)), ("佳", (9, 12)), ("Noguchi", (13, 20)), ("Rika", (21, 25)) ] ); } }
tokenizers/tokenizers/src/pre_tokenizers/bert.rs/0
{ "file_path": "tokenizers/tokenizers/src/pre_tokenizers/bert.rs", "repo_id": "tokenizers", "token_count": 1460 }
238
use crate::processors::PostProcessorWrapper; use crate::tokenizer::{Encoding, PostProcessor, Result}; use crate::utils::macro_rules_attribute; use serde::{Deserialize, Serialize}; #[derive(Clone, Debug, PartialEq, Eq)] #[macro_rules_attribute(impl_serde_type!)] pub struct Sequence { processors: Vec<PostProcessorWrapper>, } impl Sequence { pub fn new(processors: Vec<PostProcessorWrapper>) -> Self { Self { processors } } } impl PostProcessor for Sequence { fn added_tokens(&self, is_pair: bool) -> usize { self.processors .iter() .map(|p| p.added_tokens(is_pair)) .sum::<usize>() } fn process_encodings( &self, mut encodings: Vec<Encoding>, add_special_tokens: bool, ) -> Result<Vec<Encoding>> { for processor in &self.processors { encodings = processor.process_encodings(encodings, add_special_tokens)?; } Ok(encodings) } } #[cfg(test)] mod tests { use super::*; use crate::processors::{ByteLevel, PostProcessorWrapper}; use crate::tokenizer::{Encoding, PostProcessor}; use std::collections::HashMap; use std::iter::FromIterator; #[test] fn process_chain() { let start = Encoding::new( vec![0; 5], vec![0; 5], vec![ "Ġ".into(), "ĠĠĠĠHelloĠĠ".into(), "ĠĠHello".into(), "HelloĠĠ".into(), "ĠĠĠĠ".into(), ], vec![], vec![(0, 1), (0, 11), (11, 18), (18, 25), (25, 29)], vec![], vec![], vec![], HashMap::new(), ); let bytelevel = ByteLevel::default().trim_offsets(true); let sequence = Sequence::new(vec![PostProcessorWrapper::ByteLevel(bytelevel)]); let expected = Encoding::new( vec![0; 5], vec![0; 5], vec![ "Ġ".into(), "ĠĠĠĠHelloĠĠ".into(), "ĠĠHello".into(), "HelloĠĠ".into(), "ĠĠĠĠ".into(), ], vec![], vec![(0, 0), (4, 9), (13, 18), (18, 23), (29, 29)], vec![], vec![], vec![], HashMap::from_iter(vec![(0, 0..5)]), ); assert_eq!( expected, bytelevel.process(start.clone(), None, false).unwrap() ); assert_eq!( expected, sequence.process(start.clone(), None, false).unwrap() ); let pair_expected = Encoding::new( vec![0; 10], vec![0, 0, 0, 0, 0, 1, 1, 1, 1, 1], vec![ "Ġ".into(), "ĠĠĠĠHelloĠĠ".into(), "ĠĠHello".into(), "HelloĠĠ".into(), "ĠĠĠĠ".into(), "Ġ".into(), "ĠĠĠĠHelloĠĠ".into(), "ĠĠHello".into(), "HelloĠĠ".into(), "ĠĠĠĠ".into(), ], vec![], vec![ (0, 0), (4, 9), (13, 18), (18, 23), (29, 29), (0, 0), (4, 9), (13, 18), (18, 23), (29, 29), ], vec![], vec![], vec![], HashMap::from_iter(vec![(0, 0..5), (1, 5..10)]), ); assert_eq!( pair_expected, bytelevel .process(start.clone(), Some(start.clone()), false) .unwrap() ); assert_eq!( pair_expected, sequence.process(start.clone(), Some(start), false).unwrap() ); } }
tokenizers/tokenizers/src/processors/sequence.rs/0
{ "file_path": "tokenizers/tokenizers/src/processors/sequence.rs", "repo_id": "tokenizers", "token_count": 2313 }
239
//! //! This module defines helpers to allow optional Rayon usage. //! use rayon::iter::IterBridge; use rayon::prelude::*; use rayon_cond::CondIterator; use std::sync::atomic::AtomicBool; use std::sync::atomic::Ordering; // Re-export rayon current_num_threads pub use rayon::current_num_threads; pub const ENV_VARIABLE: &str = "TOKENIZERS_PARALLELISM"; static USED_PARALLELISM: AtomicBool = AtomicBool::new(false); /// Check if the TOKENIZERS_PARALLELISM env variable has been explicitly set pub fn is_parallelism_configured() -> bool { std::env::var(ENV_VARIABLE).is_ok() } /// Check if at some point we used a parallel iterator pub fn has_parallelism_been_used() -> bool { USED_PARALLELISM.load(Ordering::SeqCst) } /// Get the currently set value for `TOKENIZERS_PARALLELISM` env variable pub fn get_parallelism() -> bool { match std::env::var(ENV_VARIABLE) { Ok(mut v) => { v.make_ascii_lowercase(); !matches!(v.as_ref(), "" | "off" | "false" | "f" | "no" | "n" | "0") } Err(_) => true, // If we couldn't get the variable, we use the default } } /// Set the value for `TOKENIZERS_PARALLELISM` for the current process pub fn set_parallelism(val: bool) { std::env::set_var(ENV_VARIABLE, if val { "true" } else { "false" }) } /// Allows to convert into an iterator that can be executed either parallelly or serially. /// /// The choice is made according to the currently set `TOKENIZERS_PARALLELISM` environment variable. /// This variable can have one of the following values /// - False => "" (empty value), "false", "f", "off", "no", "n", "0" /// - True => Any other value /// pub trait MaybeParallelIterator<P, S> where P: ParallelIterator, S: Iterator<Item = P::Item>, { /// Convert ourself in a CondIterator, that will be executed either in parallel or serially, /// based solely on the `TOKENIZERS_PARALLELISM` environment variable fn into_maybe_par_iter(self) -> CondIterator<P, S>; /// Convert ourself in a CondIterator, that will be executed either in parallel or serially, /// based on both the `TOKENIZERS_PARALLELISM` environment variable and the provided bool. /// Both must be true to run with parallelism activated. fn into_maybe_par_iter_cond(self, cond: bool) -> CondIterator<P, S>; } impl<P, S, I> MaybeParallelIterator<P, S> for I where I: IntoParallelIterator<Iter = P, Item = P::Item> + IntoIterator<IntoIter = S, Item = S::Item>, P: ParallelIterator, S: Iterator<Item = P::Item>, { fn into_maybe_par_iter(self) -> CondIterator<P, S> { let parallelism = get_parallelism(); if parallelism { USED_PARALLELISM.store(true, Ordering::SeqCst); } CondIterator::new(self, parallelism) } fn into_maybe_par_iter_cond(self, cond: bool) -> CondIterator<P, S> { if cond { self.into_maybe_par_iter() } else { CondIterator::from_serial(self) } } } /// Shared reference version of MaybeParallelIterator, works the same but returns an iterator /// over references, does not consume self pub trait MaybeParallelRefIterator<'data, P, S> where P: ParallelIterator, S: Iterator<Item = P::Item>, P::Item: 'data, { fn maybe_par_iter(&'data self) -> CondIterator<P, S>; fn maybe_par_iter_cond(&'data self, cond: bool) -> CondIterator<P, S>; } impl<'data, P, S, I: 'data + ?Sized> MaybeParallelRefIterator<'data, P, S> for I where &'data I: MaybeParallelIterator<P, S>, P: ParallelIterator, S: Iterator<Item = P::Item>, P::Item: 'data, { fn maybe_par_iter(&'data self) -> CondIterator<P, S> { self.into_maybe_par_iter() } fn maybe_par_iter_cond(&'data self, cond: bool) -> CondIterator<P, S> { self.into_maybe_par_iter_cond(cond) } } /// Exclusive reference version of MaybeParallelIterator, works the same but returns an iterator /// over mutable references, does not consume self pub trait MaybeParallelRefMutIterator<'data, P, S> where P: ParallelIterator, S: Iterator<Item = P::Item>, P::Item: 'data, { fn maybe_par_iter_mut(&'data mut self) -> CondIterator<P, S>; fn maybe_par_iter_mut_cond(&'data mut self, cond: bool) -> CondIterator<P, S>; } impl<'data, P, S, I: 'data + ?Sized> MaybeParallelRefMutIterator<'data, P, S> for I where &'data mut I: MaybeParallelIterator<P, S>, P: ParallelIterator, S: Iterator<Item = P::Item>, P::Item: 'data, { fn maybe_par_iter_mut(&'data mut self) -> CondIterator<P, S> { self.into_maybe_par_iter() } fn maybe_par_iter_mut_cond(&'data mut self, cond: bool) -> CondIterator<P, S> { self.into_maybe_par_iter_cond(cond) } } /// Converts any serial iterator into a CondIterator, that can either run parallelly or serially. pub trait MaybeParallelBridge<T, S> where S: Iterator<Item = T> + Send, T: Send, { fn maybe_par_bridge(self) -> CondIterator<IterBridge<S>, S>; fn maybe_par_bridge_cond(self, cond: bool) -> CondIterator<IterBridge<S>, S>; } impl<T, S> MaybeParallelBridge<T, S> for S where S: Iterator<Item = T> + Send, T: Send, { fn maybe_par_bridge(self) -> CondIterator<IterBridge<S>, S> { let iter = CondIterator::from_serial(self); if get_parallelism() { USED_PARALLELISM.store(true, Ordering::SeqCst); CondIterator::from_parallel(iter.into_parallel().right().unwrap()) } else { iter } } fn maybe_par_bridge_cond(self, cond: bool) -> CondIterator<IterBridge<S>, S> { if cond { self.maybe_par_bridge() } else { CondIterator::from_serial(self) } } } /// Allows to convert into `chunks` that can be executed either parallelly or serially. pub trait MaybeParallelSlice<'data, T> where T: Sync, { /// Create a CondIterator, that will be executed either in parallel or serially, /// based solely on the `TOKENIZERS_PARALLELISM` environment variable fn maybe_par_chunks( &'_ self, chunk_size: usize, ) -> CondIterator<rayon::slice::Chunks<'_, T>, std::slice::Chunks<'_, T>>; /// Create a CondIterator, that will be executed either in parallel or serially, /// based on both the `TOKENIZERS_PARALLELISM` environment variable and the provided bool. /// Both must be true to run with parallelism activated. fn maybe_par_chunks_cond( &'_ self, cond: bool, chunk_size: usize, ) -> CondIterator<rayon::slice::Chunks<'_, T>, std::slice::Chunks<'_, T>>; } impl<T> MaybeParallelSlice<'_, T> for [T] where T: Sync, { fn maybe_par_chunks( &'_ self, chunk_size: usize, ) -> CondIterator<rayon::slice::Chunks<'_, T>, std::slice::Chunks<'_, T>> { let parallelism = get_parallelism(); if parallelism { CondIterator::from_parallel(self.par_chunks(chunk_size)) } else { CondIterator::from_serial(self.chunks(chunk_size)) } } fn maybe_par_chunks_cond( &'_ self, cond: bool, chunk_size: usize, ) -> CondIterator<rayon::slice::Chunks<'_, T>, std::slice::Chunks<'_, T>> { if cond { self.maybe_par_chunks(chunk_size) } else { CondIterator::from_serial(self.chunks(chunk_size)) } } } #[cfg(test)] mod tests { use super::*; #[test] fn test_maybe_parallel_iterator() { let mut v = vec![1u32, 2, 3, 4, 5, 6]; assert_eq!(v.maybe_par_iter().sum::<u32>(), 21); assert_eq!( v.maybe_par_iter_mut() .map(|v| { *v *= 2; *v }) .sum::<u32>(), 42 ); assert_eq!(v.maybe_par_iter().sum::<u32>(), 42); assert_eq!(v.into_maybe_par_iter().sum::<u32>(), 42); } #[test] fn test_maybe_parallel_slice() { let v = [1, 2, 3, 4, 5]; let chunks: Vec<_> = v.maybe_par_chunks(2).collect(); assert_eq!(chunks, vec![&[1, 2][..], &[3, 4], &[5]]); } }
tokenizers/tokenizers/src/utils/parallelism.rs/0
{ "file_path": "tokenizers/tokenizers/src/utils/parallelism.rs", "repo_id": "tokenizers", "token_count": 3472 }
240
.PHONY: deps_table_update modified_only_fixup extra_style_checks quality style fixup fix-copies test test-examples benchmark # make sure to test the local checkout in scripts and not the pre-installed one (don't use quotes!) export PYTHONPATH = src check_dirs := examples tests src utils exclude_folders := "" modified_only_fixup: $(eval modified_py_files := $(shell python utils/get_modified_files.py $(check_dirs))) @if test -n "$(modified_py_files)"; then \ echo "Checking/fixing $(modified_py_files)"; \ ruff check $(modified_py_files) --fix --exclude $(exclude_folders); \ ruff format $(modified_py_files) --exclude $(exclude_folders);\ else \ echo "No library .py files were modified"; \ fi # Update src/transformers/dependency_versions_table.py deps_table_update: @python setup.py deps_table_update deps_table_check_updated: @md5sum src/transformers/dependency_versions_table.py > md5sum.saved @python setup.py deps_table_update @md5sum -c --quiet md5sum.saved || (printf "\nError: the version dependency table is outdated.\nPlease run 'make fixup' or 'make style' and commit the changes.\n\n" && exit 1) @rm md5sum.saved # autogenerating code autogenerate_code: deps_table_update # Check that the repo is in a good state repo-consistency: python utils/check_copies.py python utils/check_table.py python utils/check_dummies.py python utils/check_repo.py python utils/check_inits.py python utils/check_config_docstrings.py python utils/check_config_attributes.py python utils/check_doctest_list.py python utils/update_metadata.py --check-only python utils/check_docstrings.py python utils/check_support_list.py # this target runs checks on all files quality: @python -c "from transformers import *" || (echo '🚨 import failed, this means you introduced unprotected imports! 🚨'; exit 1) ruff check $(check_dirs) setup.py conftest.py ruff format --check $(check_dirs) setup.py conftest.py python utils/custom_init_isort.py --check_only python utils/sort_auto_mappings.py --check_only python utils/check_doc_toc.py python utils/check_docstrings.py --check_all # Format source code automatically and check is there are any problems left that need manual fixing extra_style_checks: python utils/custom_init_isort.py python utils/sort_auto_mappings.py python utils/check_doc_toc.py --fix_and_overwrite # this target runs checks on all files and potentially modifies some of them style: ruff check $(check_dirs) setup.py conftest.py --fix --exclude $(exclude_folders) ruff format $(check_dirs) setup.py conftest.py --exclude $(exclude_folders) ${MAKE} autogenerate_code ${MAKE} extra_style_checks # Super fast fix and check target that only works on relevant modified files since the branch was made fixup: modified_only_fixup extra_style_checks autogenerate_code repo-consistency # Make marked copies of snippets of codes conform to the original fix-copies: python utils/check_copies.py --fix_and_overwrite python utils/check_table.py --fix_and_overwrite python utils/check_dummies.py --fix_and_overwrite python utils/check_doctest_list.py --fix_and_overwrite python utils/check_docstrings.py --fix_and_overwrite # Run tests for the library test: python -m pytest -n auto --dist=loadfile -s -v ./tests/ # Run tests for examples test-examples: python -m pytest -n auto --dist=loadfile -s -v ./examples/pytorch/ # Run benchmark benchmark: python3 benchmark/benchmark.py --config-dir benchmark/config --config-name generation --commit=diff backend.model=google/gemma-2b backend.cache_implementation=null,static backend.torch_compile=false,true --multirun # Run tests for SageMaker DLC release test-sagemaker: # install sagemaker dependencies in advance with pip install .[sagemaker] TEST_SAGEMAKER=True python -m pytest -n auto -s -v ./tests/sagemaker # Release stuff pre-release: python utils/release.py pre-patch: python utils/release.py --patch post-release: python utils/release.py --post_release post-patch: python utils/release.py --post_release --patch build-release: rm -rf dist rm -rf build python setup.py bdist_wheel python setup.py sdist python utils/check_build.py
transformers/Makefile/0
{ "file_path": "transformers/Makefile", "repo_id": "transformers", "token_count": 1413 }
241
local base = import 'templates/base.libsonnet'; local tpus = import 'templates/tpus.libsonnet'; local utils = import "templates/utils.libsonnet"; local volumes = import "templates/volumes.libsonnet"; local bertBaseCased = base.BaseTest { frameworkPrefix: "hf", modelName: "bert-base-cased", mode: "example", configMaps: [], timeout: 3600, # 1 hour, in seconds image: std.extVar('image'), imageTag: std.extVar('image-tag'), tpuSettings+: { softwareVersion: "pytorch-nightly", }, accelerator: tpus.v3_8, volumeMap+: { datasets: volumes.PersistentVolumeSpec { name: "huggingface-cluster-disk", mountPath: "/datasets", }, }, command: utils.scriptCommand( ||| python -m pytest -s transformers/examples/pytorch/test_xla_examples.py -v test_exit_code=$? echo "\nFinished running commands.\n" test $test_exit_code -eq 0 ||| ), }; bertBaseCased.oneshotJob
transformers/docker/transformers-pytorch-tpu/bert-base-cased.jsonnet/0
{ "file_path": "transformers/docker/transformers-pytorch-tpu/bert-base-cased.jsonnet", "repo_id": "transformers", "token_count": 371 }
242
<!--- Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Installation Installieren Sie 🤗 Transformers für die Deep-Learning-Bibliothek, mit der Sie arbeiten, richten Sie Ihren Cache ein und konfigurieren Sie 🤗 Transformers optional für den Offline-Betrieb. 🤗 Transformers wurde unter Python 3.6+, PyTorch 1.1.0+, TensorFlow 2.0+, und Flax getestet. Folgen Sie den Installationsanweisungen unten für die von Ihnen verwendete Deep-Learning-Bibliothek: * [PyTorch](https://pytorch.org/get-started/locally/) installation instructions. * [TensorFlow 2.0](https://www.tensorflow.org/install/pip) installation instructions. * [Flax](https://flax.readthedocs.io/en/latest/) installation instructions. ## Installation mit pip Sie sollten 🤗 Transformers in einer [virtuellen Umgebung](https://docs.python.org/3/library/venv.html) installieren. Wenn Sie mit virtuellen Python-Umgebungen nicht vertraut sind, werfen Sie einen Blick auf diese [Anleitung](https://packaging.python.org/guides/installing-using-pip-and-virtual-environments/). Eine virtuelle Umgebung macht es einfacher, verschiedene Projekte zu verwalten und Kompatibilitätsprobleme zwischen Abhängigkeiten zu vermeiden. Beginnen wir mit der Erstellung einer virtuellen Umgebung in Ihrem Projektverzeichnis: ```bash python -m venv .env ``` Aktivieren wir die virtuelle Umgebung. Unter Linux und MacOs: ```bash source .env/bin/activate ``` Aktivieren wir die virtuelle Umgebung unter Windows ```bash .env/Scripts/activate ``` Jetzt können wir die 🤗 Transformers mit dem folgenden Befehl installieren: ```bash pip install transformers ``` Bei reiner CPU-Unterstützung können wir 🤗 Transformers und eine Deep-Learning-Bibliothek bequem in einer Zeile installieren. Installieren wir zum Beispiel 🤗 Transformers und PyTorch mit: ```bash pip install transformers[torch] ``` 🤗 Transformers und TensorFlow 2.0: ```bash pip install transformers[tf-cpu] ``` 🤗 Transformers und Flax: ```bash pip install transformers[flax] ``` Überprüfen wir abschließend, ob 🤗 Transformers ordnungsgemäß installiert wurde, indem wir den folgenden Befehl ausführen. Es wird ein vortrainiertes Modell heruntergeladen: ```bash python -c "from transformers import pipeline; print(pipeline('sentiment-analysis')('we love you'))" ``` Dann wird die Kategorie und die Wahrscheinlichkeit ausgegeben: ```bash [{'label': 'POSITIVE', 'score': 0.9998704791069031}] ``` ## Installation aus dem Code Installieren wir 🤗 Transformers aus dem Quellcode mit dem folgenden Befehl: ```bash pip install git+https://github.com/huggingface/transformers ``` Dieser Befehl installiert die aktuelle `main` Version und nicht die neueste `stable` Version. Die `main`-Version ist nützlich, um mit den neuesten Entwicklungen Schritt zu halten. Zum Beispiel, wenn ein Fehler seit der letzten offiziellen Version behoben wurde, aber eine neue Version noch nicht veröffentlicht wurde. Das bedeutet jedoch, dass die "Hauptversion" nicht immer stabil ist. Wir bemühen uns, die Hauptversion einsatzbereit zu halten, und die meisten Probleme werden normalerweise innerhalb weniger Stunden oder eines Tages behoben. Wenn Sie auf ein Problem stoßen, öffnen Sie bitte ein [Issue](https://github.com/huggingface/transformers/issues), damit wir es noch schneller beheben können! Überprüfen wir, ob 🤗 Transformers richtig installiert wurde, indem Sie den folgenden Befehl ausführen: ```bash python -c "from transformers import pipeline; print(pipeline('sentiment-analysis')('I love you'))" ``` ## Editierbare Installation Sie benötigen eine bearbeitbare Installation, wenn Sie: * die "Haupt"-Version des Quellcodes verwenden möchten. * Zu 🤗 Transformers beitragen und Änderungen am Code testen wollen. Klonen Sie das Repository und installieren 🤗 Transformers mit den folgenden Befehlen: ```bash git clone https://github.com/huggingface/transformers.git cd transformers pip install -e . ``` Diese Befehle verknüpfen den Ordner, in den Sie das Repository geklont haben, mit den Pfaden Ihrer Python-Bibliotheken. Python wird nun in dem Ordner suchen, in den Sie geklont haben, zusätzlich zu den normalen Bibliothekspfaden. Wenn zum Beispiel Ihre Python-Pakete normalerweise in `~/anaconda3/envs/main/lib/python3.7/site-packages/` installiert sind, wird Python auch den Ordner durchsuchen, in den Sie geklont haben: `~/transformers/`. <Tip warning={true}> Sie müssen den Ordner `transformers` behalten, wenn Sie die Bibliothek weiter verwenden wollen. </Tip> Jetzt können Sie Ihren Klon mit dem folgenden Befehl ganz einfach auf die neueste Version von 🤗 Transformers aktualisieren: ```bash cd ~/transformers/ git pull ``` Ihre Python-Umgebung wird beim nächsten Ausführen die `main`-Version von 🤗 Transformers finden. ## Installation mit conda Installation von dem conda Kanal `conda-forge`: ```bash conda install conda-forge::transformers ``` ## Cache Einrichtung Vorgefertigte Modelle werden heruntergeladen und lokal zwischengespeichert unter: `~/.cache/huggingface/hub`. Dies ist das Standardverzeichnis, das durch die Shell-Umgebungsvariable "TRANSFORMERS_CACHE" vorgegeben ist. Unter Windows wird das Standardverzeichnis durch `C:\Benutzer\Benutzername\.cache\huggingface\hub` angegeben. Sie können die unten aufgeführten Shell-Umgebungsvariablen - in der Reihenfolge ihrer Priorität - ändern, um ein anderes Cache-Verzeichnis anzugeben: 1. Shell-Umgebungsvariable (Standard): `HUGGINGFACE_HUB_CACHE` oder `TRANSFORMERS_CACHE`. 2. Shell-Umgebungsvariable: `HF_HOME`. 3. Shell-Umgebungsvariable: `XDG_CACHE_HOME` + `/huggingface`. <Tip> Transformers verwendet die Shell-Umgebungsvariablen `PYTORCH_TRANSFORMERS_CACHE` oder `PYTORCH_PRETRAINED_BERT_CACHE`, wenn Sie von einer früheren Iteration dieser Bibliothek kommen und diese Umgebungsvariablen gesetzt haben, sofern Sie nicht die Shell-Umgebungsvariable `TRANSFORMERS_CACHE` angeben. </Tip> ## Offline Modus Transformers ist in der Lage, in einer Firewall- oder Offline-Umgebung zu laufen, indem es nur lokale Dateien verwendet. Setzen Sie die Umgebungsvariable `HF_HUB_OFFLINE=1`, um dieses Verhalten zu aktivieren. <Tip> Fügen sie [🤗 Datasets](https://huggingface.co/docs/datasets/) zu Ihrem Offline-Trainingsworkflow hinzufügen, indem Sie die Umgebungsvariable `HF_DATASETS_OFFLINE=1` setzen. </Tip> So würden Sie beispielsweise ein Programm in einem normalen Netzwerk mit einer Firewall für externe Instanzen mit dem folgenden Befehl ausführen: ```bash python examples/pytorch/translation/run_translation.py --model_name_or_path google-t5/t5-small --dataset_name wmt16 --dataset_config ro-en ... ``` Führen Sie das gleiche Programm in einer Offline-Instanz mit aus: ```bash HF_DATASETS_OFFLINE=1 HF_HUB_OFFLINE=1 \ python examples/pytorch/translation/run_translation.py --model_name_or_path google-t5/t5-small --dataset_name wmt16 --dataset_config ro-en ... ``` Das Skript sollte nun laufen, ohne sich aufzuhängen oder eine Zeitüberschreitung abzuwarten, da es weiß, dass es nur nach lokalen Dateien suchen soll. ### Abrufen von Modellen und Tokenizern zur Offline-Verwendung Eine andere Möglichkeit, 🤗 Transformers offline zu verwenden, besteht darin, die Dateien im Voraus herunterzuladen und dann auf ihren lokalen Pfad zu verweisen, wenn Sie sie offline verwenden müssen. Es gibt drei Möglichkeiten, dies zu tun: * Laden Sie eine Datei über die Benutzeroberfläche des [Model Hub](https://huggingface.co/models) herunter, indem Sie auf das ↓-Symbol klicken. ![download-icon](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/download-icon.png) * Verwenden Sie den [PreTrainedModel.from_pretrained] und [PreTrainedModel.save_pretrained] Workflow: 1. Laden Sie Ihre Dateien im Voraus mit [`PreTrainedModel.from_pretrained`] herunter: ```py >>> from transformers import AutoTokenizer, AutoModelForSeq2SeqLM >>> tokenizer = AutoTokenizer.from_pretrained("bigscience/T0_3B") >>> model = AutoModelForSeq2SeqLM.from_pretrained("bigscience/T0_3B") ``` 2. Speichern Sie Ihre Dateien in einem bestimmten Verzeichnis mit [`PreTrainedModel.save_pretrained`]: ```py >>> tokenizer.save_pretrained("./your/path/bigscience_t0") >>> model.save_pretrained("./your/path/bigscience_t0") ``` 3. Wenn Sie nun offline sind, laden Sie Ihre Dateien mit [`PreTrainedModel.from_pretrained`] aus dem bestimmten Verzeichnis: ```py >>> tokenizer = AutoTokenizer.from_pretrained("./your/path/bigscience_t0") >>> model = AutoModel.from_pretrained("./your/path/bigscience_t0") ``` * Programmatisches Herunterladen von Dateien mit der [huggingface_hub](https://github.com/huggingface/huggingface_hub/tree/main/src/huggingface_hub) Bibliothek: 1. Installieren Sie die "huggingface_hub"-Bibliothek in Ihrer virtuellen Umgebung: ```bash python -m pip install huggingface_hub ``` 2. Verwenden Sie die Funktion [`hf_hub_download`](https://huggingface.co/docs/hub/adding-a-library#download-files-from-the-hub), um eine Datei in einen bestimmten Pfad herunterzuladen. Der folgende Befehl lädt zum Beispiel die Datei "config.json" aus dem Modell [T0](https://huggingface.co/bigscience/T0_3B) in den gewünschten Pfad herunter: ```py >>> from huggingface_hub import hf_hub_download >>> hf_hub_download(repo_id="bigscience/T0_3B", filename="config.json", cache_dir="./your/path/bigscience_t0") ``` Sobald Ihre Datei heruntergeladen und lokal zwischengespeichert ist, geben Sie den lokalen Pfad an, um sie zu laden und zu verwenden: ```py >>> from transformers import AutoConfig >>> config = AutoConfig.from_pretrained("./your/path/bigscience_t0/config.json") ``` <Tip> Weitere Informationen zum Herunterladen von Dateien, die auf dem Hub gespeichert sind, finden Sie im Abschnitt [Wie man Dateien vom Hub herunterlädt](https://huggingface.co/docs/hub/how-to-downstream). </Tip>
transformers/docs/source/de/installation.md/0
{ "file_path": "transformers/docs/source/de/installation.md", "repo_id": "transformers", "token_count": 4000 }
243
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # How to add a model to 🤗 Transformers? The 🤗 Transformers library is often able to offer new models thanks to community contributors. But this can be a challenging project and requires an in-depth knowledge of the 🤗 Transformers library and the model to implement. At Hugging Face, we're trying to empower more of the community to actively add models and we've put together this guide to walk you through the process of adding a PyTorch model (make sure you have [PyTorch installed](https://pytorch.org/get-started/locally/)). Along the way, you'll: - get insights into open-source best practices - understand the design principles behind one of the most popular deep learning libraries - learn how to efficiently test large models - learn how to integrate Python utilities like `black`, `ruff`, and `make fix-copies` to ensure clean and readable code A Hugging Face team member will be available to help you along the way so you'll never be alone. 🤗 ❤️ To get started, open a [New model addition](https://github.com/huggingface/transformers/issues/new?assignees=&labels=New+model&template=new-model-addition.yml) issue for the model you want to see in 🤗 Transformers. If you're not especially picky about contributing a specific model, you can filter by the [New model label](https://github.com/huggingface/transformers/labels/New%20model) to see if there are any unclaimed model requests and work on it. Once you've opened a new model request, the first step is to get familiar with 🤗 Transformers if you aren't already! ## General overview of 🤗 Transformers First, you should get a general overview of 🤗 Transformers. 🤗 Transformers is a very opinionated library, so there is a chance that you don't agree with some of the library's philosophies or design choices. From our experience, however, we found that the fundamental design choices and philosophies of the library are crucial to efficiently scale 🤗 Transformers while keeping maintenance costs at a reasonable level. A good first starting point to better understand the library is to read the [documentation of our philosophy](philosophy). As a result of our way of working, there are some choices that we try to apply to all models: - Composition is generally favored over-abstraction - Duplicating code is not always bad if it strongly improves the readability or accessibility of a model - Model files are as self-contained as possible so that when you read the code of a specific model, you ideally only have to look into the respective `modeling_....py` file. In our opinion, the library's code is not just a means to provide a product, *e.g.* the ability to use BERT for inference, but also as the very product that we want to improve. Hence, when adding a model, the user is not only the person who will use your model, but also everybody who will read, try to understand, and possibly tweak your code. With this in mind, let's go a bit deeper into the general library design. ### Overview of models To successfully add a model, it is important to understand the interaction between your model and its config, [`PreTrainedModel`], and [`PretrainedConfig`]. For exemplary purposes, we will call the model to be added to 🤗 Transformers `BrandNewBert`. Let's take a look: <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers_overview.png"/> As you can see, we do make use of inheritance in 🤗 Transformers, but we keep the level of abstraction to an absolute minimum. There are never more than two levels of abstraction for any model in the library. `BrandNewBertModel` inherits from `BrandNewBertPreTrainedModel` which in turn inherits from [`PreTrainedModel`] and that's it. As a general rule, we want to make sure that a new model only depends on [`PreTrainedModel`]. The important functionalities that are automatically provided to every new model are [`~PreTrainedModel.from_pretrained`] and [`~PreTrainedModel.save_pretrained`], which are used for serialization and deserialization. All of the other important functionalities, such as `BrandNewBertModel.forward` should be completely defined in the new `modeling_brand_new_bert.py` script. Next, we want to make sure that a model with a specific head layer, such as `BrandNewBertForMaskedLM` does not inherit from `BrandNewBertModel`, but rather uses `BrandNewBertModel` as a component that can be called in its forward pass to keep the level of abstraction low. Every new model requires a configuration class, called `BrandNewBertConfig`. This configuration is always stored as an attribute in [`PreTrainedModel`], and thus can be accessed via the `config` attribute for all classes inheriting from `BrandNewBertPreTrainedModel`: ```python model = BrandNewBertModel.from_pretrained("brandy/brand_new_bert") model.config # model has access to its config ``` Similar to the model, the configuration inherits basic serialization and deserialization functionalities from [`PretrainedConfig`]. Note that the configuration and the model are always serialized into two different formats - the model to a *pytorch_model.bin* file and the configuration to a *config.json* file. Calling the model's [`~PreTrainedModel.save_pretrained`] will automatically call the config's [`~PretrainedConfig.save_pretrained`], so that both model and configuration are saved. ### Code style When coding your new model, keep in mind that Transformers is an opinionated library and we have a few quirks of our own regarding how code should be written :-) 1. The forward pass of your model should be fully written in the modeling file while being fully independent of other models in the library. If you want to reuse a block from another model, copy the code and paste it with a `# Copied from` comment on top (see [here](https://github.com/huggingface/transformers/blob/v4.17.0/src/transformers/models/roberta/modeling_roberta.py#L160) for a good example and [there](pr_checks#check-copies) for more documentation on Copied from). 2. The code should be fully understandable, even by a non-native English speaker. This means you should pick descriptive variable names and avoid abbreviations. As an example, `activation` is preferred to `act`. One-letter variable names are strongly discouraged unless it's an index in a for loop. 3. More generally we prefer longer explicit code to short magical one. 4. Avoid subclassing `nn.Sequential` in PyTorch but subclass `nn.Module` and write the forward pass, so that anyone using your code can quickly debug it by adding print statements or breaking points. 5. Your function signature should be type-annotated. For the rest, good variable names are way more readable and understandable than type annotations. ### Overview of tokenizers Not quite ready yet :-( This section will be added soon! ## Step-by-step recipe to add a model to 🤗 Transformers Everyone has different preferences of how to port a model so it can be very helpful for you to take a look at summaries of how other contributors ported models to Hugging Face. Here is a list of community blog posts on how to port a model: 1. [Porting GPT2 Model](https://medium.com/huggingface/from-tensorflow-to-pytorch-265f40ef2a28) by [Thomas](https://huggingface.co/thomwolf) 2. [Porting WMT19 MT Model](https://huggingface.co/blog/porting-fsmt) by [Stas](https://huggingface.co/stas) From experience, we can tell you that the most important things to keep in mind when adding a model are: - Don't reinvent the wheel! Most parts of the code you will add for the new 🤗 Transformers model already exist somewhere in 🤗 Transformers. Take some time to find similar, already existing models and tokenizers you can copy from. [grep](https://www.gnu.org/software/grep/) and [rg](https://github.com/BurntSushi/ripgrep) are your friends. Note that it might very well happen that your model's tokenizer is based on one model implementation, and your model's modeling code on another one. *E.g.* FSMT's modeling code is based on BART, while FSMT's tokenizer code is based on XLM. - It's more of an engineering challenge than a scientific challenge. You should spend more time creating an efficient debugging environment rather than trying to understand all theoretical aspects of the model in the paper. - Ask for help, when you're stuck! Models are the core component of 🤗 Transformers so we at Hugging Face are more than happy to help you at every step to add your model. Don't hesitate to ask if you notice you are not making progress. In the following, we try to give you a general recipe that we found most useful when porting a model to 🤗 Transformers. The following list is a summary of everything that has to be done to add a model and can be used by you as a To-Do List: ☐ (Optional) Understood the model's theoretical aspects<br> ☐ Prepared 🤗 Transformers dev environment<br> ☐ Set up debugging environment of the original repository<br> ☐ Created script that successfully runs the `forward()` pass using the original repository and checkpoint<br> ☐ Successfully added the model skeleton to 🤗 Transformers<br> ☐ Successfully converted original checkpoint to 🤗 Transformers checkpoint<br> ☐ Successfully ran `forward()` pass in 🤗 Transformers that gives identical output to original checkpoint<br> ☐ Finished model tests in 🤗 Transformers<br> ☐ Successfully added tokenizer in 🤗 Transformers<br> ☐ Run end-to-end integration tests<br> ☐ Finished docs<br> ☐ Uploaded model weights to the Hub<br> ☐ Submitted the pull request<br> ☐ (Optional) Added a demo notebook To begin with, we usually recommend starting by getting a good theoretical understanding of `BrandNewBert`. However, if you prefer to understand the theoretical aspects of the model *on-the-job*, then it is totally fine to directly dive into the `BrandNewBert`'s code-base. This option might suit you better if your engineering skills are better than your theoretical skill, if you have trouble understanding `BrandNewBert`'s paper, or if you just enjoy programming much more than reading scientific papers. ### 1. (Optional) Theoretical aspects of BrandNewBert You should take some time to read *BrandNewBert's* paper, if such descriptive work exists. There might be large sections of the paper that are difficult to understand. If this is the case, this is fine - don't worry! The goal is not to get a deep theoretical understanding of the paper, but to extract the necessary information required to effectively re-implement the model in 🤗 Transformers. That being said, you don't have to spend too much time on the theoretical aspects, but rather focus on the practical ones, namely: - What type of model is *brand_new_bert*? BERT-like encoder-only model? GPT2-like decoder-only model? BART-like encoder-decoder model? Look at the [model_summary](model_summary) if you're not familiar with the differences between those. - What are the applications of *brand_new_bert*? Text classification? Text generation? Seq2Seq tasks, *e.g.,* summarization? - What is the novel feature of the model that makes it different from BERT/GPT-2/BART? - Which of the already existing [🤗 Transformers models](https://huggingface.co/transformers/#contents) is most similar to *brand_new_bert*? - What type of tokenizer is used? A sentencepiece tokenizer? Word piece tokenizer? Is it the same tokenizer as used for BERT or BART? After you feel like you have gotten a good overview of the architecture of the model, you might want to write to the Hugging Face team with any questions you might have. This might include questions regarding the model's architecture, its attention layer, etc. We will be more than happy to help you. ### 2. Next prepare your environment 1. Fork the [repository](https://github.com/huggingface/transformers) by clicking on the ‘Fork' button on the repository's page. This creates a copy of the code under your GitHub user account. 2. Clone your `transformers` fork to your local disk, and add the base repository as a remote: ```bash git clone https://github.com/[your Github handle]/transformers.git cd transformers git remote add upstream https://github.com/huggingface/transformers.git ``` 3. Set up a development environment, for instance by running the following command: ```bash python -m venv .env source .env/bin/activate pip install -e ".[dev]" ``` Depending on your OS, and since the number of optional dependencies of Transformers is growing, you might get a failure with this command. If that's the case make sure to install the Deep Learning framework you are working with (PyTorch, TensorFlow and/or Flax) then do: ```bash pip install -e ".[quality]" ``` which should be enough for most use cases. You can then return to the parent directory ```bash cd .. ``` 4. We recommend adding the PyTorch version of *brand_new_bert* to Transformers. To install PyTorch, please follow the instructions on https://pytorch.org/get-started/locally/. **Note:** You don't need to have CUDA installed. Making the new model work on CPU is sufficient. 5. To port *brand_new_bert*, you will also need access to its original repository: ```bash git clone https://github.com/org_that_created_brand_new_bert_org/brand_new_bert.git cd brand_new_bert pip install -e . ``` Now you have set up a development environment to port *brand_new_bert* to 🤗 Transformers. ### 3.-4. Run a pretrained checkpoint using the original repository At first, you will work on the original *brand_new_bert* repository. Often, the original implementation is very “researchy”. Meaning that documentation might be lacking and the code can be difficult to understand. But this should be exactly your motivation to reimplement *brand_new_bert*. At Hugging Face, one of our main goals is to *make people stand on the shoulders of giants* which translates here very well into taking a working model and rewriting it to make it as **accessible, user-friendly, and beautiful** as possible. This is the number-one motivation to re-implement models into 🤗 Transformers - trying to make complex new NLP technology accessible to **everybody**. You should start thereby by diving into the original repository. Successfully running the official pretrained model in the original repository is often **the most difficult** step. From our experience, it is very important to spend some time getting familiar with the original code-base. You need to figure out the following: - Where to find the pretrained weights? - How to load the pretrained weights into the corresponding model? - How to run the tokenizer independently from the model? - Trace one forward pass so that you know which classes and functions are required for a simple forward pass. Usually, you only have to reimplement those functions. - Be able to locate the important components of the model: Where is the model's class? Are there model sub-classes, *e.g.* EncoderModel, DecoderModel? Where is the self-attention layer? Are there multiple different attention layers, *e.g.* *self-attention*, *cross-attention*...? - How can you debug the model in the original environment of the repo? Do you have to add *print* statements, can you work with an interactive debugger like *ipdb*, or should you use an efficient IDE to debug the model, like PyCharm? It is very important that before you start the porting process, you can **efficiently** debug code in the original repository! Also, remember that you are working with an open-source library, so do not hesitate to open an issue, or even a pull request in the original repository. The maintainers of this repository are most likely very happy about someone looking into their code! At this point, it is really up to you which debugging environment and strategy you prefer to use to debug the original model. We strongly advise against setting up a costly GPU environment, but simply work on a CPU both when starting to dive into the original repository and also when starting to write the 🤗 Transformers implementation of the model. Only at the very end, when the model has already been successfully ported to 🤗 Transformers, one should verify that the model also works as expected on GPU. In general, there are two possible debugging environments for running the original model - [Jupyter notebooks](https://jupyter.org/) / [google colab](https://colab.research.google.com/notebooks/intro.ipynb) - Local python scripts. Jupyter notebooks have the advantage that they allow for cell-by-cell execution which can be helpful to better split logical components from one another and to have faster debugging cycles as intermediate results can be stored. Also, notebooks are often easier to share with other contributors, which might be very helpful if you want to ask the Hugging Face team for help. If you are familiar with Jupyter notebooks, we strongly recommend you work with them. The obvious disadvantage of Jupyter notebooks is that if you are not used to working with them you will have to spend some time adjusting to the new programming environment and you might not be able to use your known debugging tools anymore, like `ipdb`. For each code-base, a good first step is always to load a **small** pretrained checkpoint and to be able to reproduce a single forward pass using a dummy integer vector of input IDs as an input. Such a script could look like this (in pseudocode): ```python model = BrandNewBertModel.load_pretrained_checkpoint("/path/to/checkpoint/") input_ids = [0, 4, 5, 2, 3, 7, 9] # vector of input ids original_output = model.predict(input_ids) ``` Next, regarding the debugging strategy, there are generally a few from which to choose from: - Decompose the original model into many small testable components and run a forward pass on each of those for verification - Decompose the original model only into the original *tokenizer* and the original *model*, run a forward pass on those, and use intermediate print statements or breakpoints for verification Again, it is up to you which strategy to choose. Often, one or the other is advantageous depending on the original code base. If the original code-base allows you to decompose the model into smaller sub-components, *e.g.* if the original code-base can easily be run in eager mode, it is usually worth the effort to do so. There are some important advantages to taking the more difficult road in the beginning: - at a later stage when comparing the original model to the Hugging Face implementation, you can verify automatically for each component individually that the corresponding component of the 🤗 Transformers implementation matches instead of relying on visual comparison via print statements - it can give you some rope to decompose the big problem of porting a model into smaller problems of just porting individual components and thus structure your work better - separating the model into logical meaningful components will help you to get a better overview of the model's design and thus to better understand the model - at a later stage those component-by-component tests help you to ensure that no regression occurs as you continue changing your code [Lysandre's](https://gist.github.com/LysandreJik/db4c948f6b4483960de5cbac598ad4ed) integration checks for ELECTRA gives a nice example of how this can be done. However, if the original code-base is very complex or only allows intermediate components to be run in a compiled mode, it might be too time-consuming or even impossible to separate the model into smaller testable sub-components. A good example is [T5's MeshTensorFlow](https://github.com/tensorflow/mesh/tree/master/mesh_tensorflow) library which is very complex and does not offer a simple way to decompose the model into its sub-components. For such libraries, one often relies on verifying print statements. No matter which strategy you choose, the recommended procedure is often the same that you should start to debug the starting layers first and the ending layers last. It is recommended that you retrieve the output, either by print statements or sub-component functions, of the following layers in the following order: 1. Retrieve the input IDs passed to the model 2. Retrieve the word embeddings 3. Retrieve the input of the first Transformer layer 4. Retrieve the output of the first Transformer layer 5. Retrieve the output of the following n - 1 Transformer layers 6. Retrieve the output of the whole BrandNewBert Model Input IDs should thereby consists of an array of integers, *e.g.* `input_ids = [0, 4, 4, 3, 2, 4, 1, 7, 19]` The outputs of the following layers often consist of multi-dimensional float arrays and can look like this: ``` [[ [-0.1465, -0.6501, 0.1993, ..., 0.1451, 0.3430, 0.6024], [-0.4417, -0.5920, 0.3450, ..., -0.3062, 0.6182, 0.7132], [-0.5009, -0.7122, 0.4548, ..., -0.3662, 0.6091, 0.7648], ..., [-0.5613, -0.6332, 0.4324, ..., -0.3792, 0.7372, 0.9288], [-0.5416, -0.6345, 0.4180, ..., -0.3564, 0.6992, 0.9191], [-0.5334, -0.6403, 0.4271, ..., -0.3339, 0.6533, 0.8694]]], ``` We expect that every model added to 🤗 Transformers passes a couple of integration tests, meaning that the original model and the reimplemented version in 🤗 Transformers have to give the exact same output up to a precision of 0.001! Since it is normal that the exact same model written in different libraries can give a slightly different output depending on the library framework, we accept an error tolerance of 1e-3 (0.001). It is not enough if the model gives nearly the same output, they have to be almost identical. Therefore, you will certainly compare the intermediate outputs of the 🤗 Transformers version multiple times against the intermediate outputs of the original implementation of *brand_new_bert* in which case an **efficient** debugging environment of the original repository is absolutely important. Here is some advice to make your debugging environment as efficient as possible. - Find the best way of debugging intermediate results. Is the original repository written in PyTorch? Then you should probably take the time to write a longer script that decomposes the original model into smaller sub-components to retrieve intermediate values. Is the original repository written in Tensorflow 1? Then you might have to rely on TensorFlow print operations like [tf.print](https://www.tensorflow.org/api_docs/python/tf/print) to output intermediate values. Is the original repository written in Jax? Then make sure that the model is **not jitted** when running the forward pass, *e.g.* check-out [this link](https://github.com/google/jax/issues/196). - Use the smallest pretrained checkpoint you can find. The smaller the checkpoint, the faster your debug cycle becomes. It is not efficient if your pretrained model is so big that your forward pass takes more than 10 seconds. In case only very large checkpoints are available, it might make more sense to create a dummy model in the new environment with randomly initialized weights and save those weights for comparison with the 🤗 Transformers version of your model - Make sure you are using the easiest way of calling a forward pass in the original repository. Ideally, you want to find the function in the original repository that **only** calls a single forward pass, *i.e.* that is often called `predict`, `evaluate`, `forward` or `__call__`. You don't want to debug a function that calls `forward` multiple times, *e.g.* to generate text, like `autoregressive_sample`, `generate`. - Try to separate the tokenization from the model's *forward* pass. If the original repository shows examples where you have to input a string, then try to find out where in the forward call the string input is changed to input ids and start from this point. This might mean that you have to possibly write a small script yourself or change the original code so that you can directly input the ids instead of an input string. - Make sure that the model in your debugging setup is **not** in training mode, which often causes the model to yield random outputs due to multiple dropout layers in the model. Make sure that the forward pass in your debugging environment is **deterministic** so that the dropout layers are not used. Or use *transformers.utils.set_seed* if the old and new implementations are in the same framework. The following section gives you more specific details/tips on how you can do this for *brand_new_bert*. ### 5.-14. Port BrandNewBert to 🤗 Transformers Next, you can finally start adding new code to 🤗 Transformers. Go into the clone of your 🤗 Transformers' fork: ```bash cd transformers ``` In the special case that you are adding a model whose architecture exactly matches the model architecture of an existing model you only have to add a conversion script as described in [this section](#write-a-conversion-script). In this case, you can just re-use the whole model architecture of the already existing model. Otherwise, let's start generating a new model. We recommend using the following script to add a model starting from an existing model: ```bash transformers-cli add-new-model-like ``` You will be prompted with a questionnaire to fill in the basic information of your model. **Open a Pull Request on the main huggingface/transformers repo** Before starting to adapt the automatically generated code, now is the time to open a “Work in progress (WIP)” pull request, *e.g.* “[WIP] Add *brand_new_bert*”, in 🤗 Transformers so that you and the Hugging Face team can work side-by-side on integrating the model into 🤗 Transformers. You should do the following: 1. Create a branch with a descriptive name from your main branch ```bash git checkout -b add_brand_new_bert ``` 2. Commit the automatically generated code: ```bash git add . git commit ``` 3. Fetch and rebase to current main ```bash git fetch upstream git rebase upstream/main ``` 4. Push the changes to your account using: ```bash git push -u origin a-descriptive-name-for-my-changes ``` 5. Once you are satisfied, go to the webpage of your fork on GitHub. Click on “Pull request”. Make sure to add the GitHub handle of some members of the Hugging Face team as reviewers, so that the Hugging Face team gets notified for future changes. 6. Change the PR into a draft by clicking on “Convert to draft” on the right of the GitHub pull request web page. In the following, whenever you have made some progress, don't forget to commit your work and push it to your account so that it shows in the pull request. Additionally, you should make sure to update your work with the current main from time to time by doing: ```bash git fetch upstream git merge upstream/main ``` In general, all questions you might have regarding the model or your implementation should be asked in your PR and discussed/solved in the PR. This way, the Hugging Face team will always be notified when you are committing new code or if you have a question. It is often very helpful to point the Hugging Face team to your added code so that the Hugging Face team can efficiently understand your problem or question. To do so, you can go to the “Files changed” tab where you see all of your changes, go to a line regarding which you want to ask a question, and click on the “+” symbol to add a comment. Whenever a question or problem has been solved, you can click on the “Resolve” button of the created comment. In the same way, the Hugging Face team will open comments when reviewing your code. We recommend asking most questions on GitHub on your PR. For some very general questions that are not very useful for the public, feel free to ping the Hugging Face team by Slack or email. **5. Adapt the generated models code for brand_new_bert** At first, we will focus only on the model itself and not care about the tokenizer. All the relevant code should be found in the generated files `src/transformers/models/brand_new_bert/modeling_brand_new_bert.py` and `src/transformers/models/brand_new_bert/configuration_brand_new_bert.py`. Now you can finally start coding :). The generated code in `src/transformers/models/brand_new_bert/modeling_brand_new_bert.py` will either have the same architecture as BERT if it's an encoder-only model or BART if it's an encoder-decoder model. At this point, you should remind yourself what you've learned in the beginning about the theoretical aspects of the model: *How is the model different from BERT or BART?*". Implement those changes which often means changing the *self-attention* layer, the order of the normalization layer, etc… Again, it is often useful to look at the similar architecture of already existing models in Transformers to get a better feeling of how your model should be implemented. **Note** that at this point, you don't have to be very sure that your code is fully correct or clean. Rather, it is advised to add a first *unclean*, copy-pasted version of the original code to `src/transformers/models/brand_new_bert/modeling_brand_new_bert.py` until you feel like all the necessary code is added. From our experience, it is much more efficient to quickly add a first version of the required code and improve/correct the code iteratively with the conversion script as described in the next section. The only thing that has to work at this point is that you can instantiate the 🤗 Transformers implementation of *brand_new_bert*, *i.e.* the following command should work: ```python from transformers import BrandNewBertModel, BrandNewBertConfig model = BrandNewBertModel(BrandNewBertConfig()) ``` The above command will create a model according to the default parameters as defined in `BrandNewBertConfig()` with random weights, thus making sure that the `init()` methods of all components works. Note that all random initialization should happen in the `_init_weights` method of your `BrandnewBertPreTrainedModel` class. It should initialize all leaf modules depending on the variables of the config. Here is an example with the BERT `_init_weights` method: ```py def _init_weights(self, module): """Initialize the weights""" if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) ``` You can have some more custom schemes if you need a special initialization for some modules. For instance, in `Wav2Vec2ForPreTraining`, the last two linear layers need to have the initialization of the regular PyTorch `nn.Linear` but all the other ones should use an initialization as above. This is coded like this: ```py def _init_weights(self, module): """Initialize the weights""" if isinstance(module, Wav2Vec2ForPreTraining): module.project_hid.reset_parameters() module.project_q.reset_parameters() module.project_hid._is_hf_initialized = True module.project_q._is_hf_initialized = True elif isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() ``` The `_is_hf_initialized` flag is internally used to make sure we only initialize a submodule once. By setting it to `True` for `module.project_q` and `module.project_hid`, we make sure the custom initialization we did is not overridden later on, the `_init_weights` function won't be applied to them. **6. Write a conversion script** Next, you should write a conversion script that lets you convert the checkpoint you used to debug *brand_new_bert* in the original repository to a checkpoint compatible with your just created 🤗 Transformers implementation of *brand_new_bert*. It is not advised to write the conversion script from scratch, but rather to look through already existing conversion scripts in 🤗 Transformers for one that has been used to convert a similar model that was written in the same framework as *brand_new_bert*. Usually, it is enough to copy an already existing conversion script and slightly adapt it for your use case. Don't hesitate to ask the Hugging Face team to point you to a similar already existing conversion script for your model. - If you are porting a model from TensorFlow to PyTorch, a good starting point might be BERT's conversion script [here](https://github.com/huggingface/transformers/blob/7acfa95afb8194f8f9c1f4d2c6028224dbed35a2/src/transformers/models/bert/modeling_bert.py#L91) - If you are porting a model from PyTorch to PyTorch, a good starting point might be BART's conversion script [here](https://github.com/huggingface/transformers/blob/main/src/transformers/models/bart/convert_bart_original_pytorch_checkpoint_to_pytorch.py) In the following, we'll quickly explain how PyTorch models store layer weights and define layer names. In PyTorch, the name of a layer is defined by the name of the class attribute you give the layer. Let's define a dummy model in PyTorch, called `SimpleModel` as follows: ```python from torch import nn class SimpleModel(nn.Module): def __init__(self): super().__init__() self.dense = nn.Linear(10, 10) self.intermediate = nn.Linear(10, 10) self.layer_norm = nn.LayerNorm(10) ``` Now we can create an instance of this model definition which will fill all weights: `dense`, `intermediate`, `layer_norm` with random weights. We can print the model to see its architecture ```python model = SimpleModel() print(model) ``` This will print out the following: ``` SimpleModel( (dense): Linear(in_features=10, out_features=10, bias=True) (intermediate): Linear(in_features=10, out_features=10, bias=True) (layer_norm): LayerNorm((10,), eps=1e-05, elementwise_affine=True) ) ``` We can see that the layer names are defined by the name of the class attribute in PyTorch. You can print out the weight values of a specific layer: ```python print(model.dense.weight.data) ``` to see that the weights were randomly initialized ``` tensor([[-0.0818, 0.2207, -0.0749, -0.0030, 0.0045, -0.1569, -0.1598, 0.0212, -0.2077, 0.2157], [ 0.1044, 0.0201, 0.0990, 0.2482, 0.3116, 0.2509, 0.2866, -0.2190, 0.2166, -0.0212], [-0.2000, 0.1107, -0.1999, -0.3119, 0.1559, 0.0993, 0.1776, -0.1950, -0.1023, -0.0447], [-0.0888, -0.1092, 0.2281, 0.0336, 0.1817, -0.0115, 0.2096, 0.1415, -0.1876, -0.2467], [ 0.2208, -0.2352, -0.1426, -0.2636, -0.2889, -0.2061, -0.2849, -0.0465, 0.2577, 0.0402], [ 0.1502, 0.2465, 0.2566, 0.0693, 0.2352, -0.0530, 0.1859, -0.0604, 0.2132, 0.1680], [ 0.1733, -0.2407, -0.1721, 0.1484, 0.0358, -0.0633, -0.0721, -0.0090, 0.2707, -0.2509], [-0.1173, 0.1561, 0.2945, 0.0595, -0.1996, 0.2988, -0.0802, 0.0407, 0.1829, -0.1568], [-0.1164, -0.2228, -0.0403, 0.0428, 0.1339, 0.0047, 0.1967, 0.2923, 0.0333, -0.0536], [-0.1492, -0.1616, 0.1057, 0.1950, -0.2807, -0.2710, -0.1586, 0.0739, 0.2220, 0.2358]]). ``` In the conversion script, you should fill those randomly initialized weights with the exact weights of the corresponding layer in the checkpoint. *E.g.* ```python # retrieve matching layer weights, e.g. by # recursive algorithm layer_name = "dense" pretrained_weight = array_of_dense_layer model_pointer = getattr(model, "dense") model_pointer.weight.data = torch.from_numpy(pretrained_weight) ``` While doing so, you must verify that each randomly initialized weight of your PyTorch model and its corresponding pretrained checkpoint weight exactly match in both **shape and name**. To do so, it is **necessary** to add assert statements for the shape and print out the names of the checkpoints weights. E.g. you should add statements like: ```python assert ( model_pointer.weight.shape == pretrained_weight.shape ), f"Pointer shape of random weight {model_pointer.shape} and array shape of checkpoint weight {pretrained_weight.shape} mismatched" ``` Besides, you should also print out the names of both weights to make sure they match, *e.g.* ```python logger.info(f"Initialize PyTorch weight {layer_name} from {pretrained_weight.name}") ``` If either the shape or the name doesn't match, you probably assigned the wrong checkpoint weight to a randomly initialized layer of the 🤗 Transformers implementation. An incorrect shape is most likely due to an incorrect setting of the config parameters in `BrandNewBertConfig()` that do not exactly match those that were used for the checkpoint you want to convert. However, it could also be that PyTorch's implementation of a layer requires the weight to be transposed beforehand. Finally, you should also check that **all** required weights are initialized and print out all checkpoint weights that were not used for initialization to make sure the model is correctly converted. It is completely normal, that the conversion trials fail with either a wrong shape statement or a wrong name assignment. This is most likely because either you used incorrect parameters in `BrandNewBertConfig()`, have a wrong architecture in the 🤗 Transformers implementation, you have a bug in the `init()` functions of one of the components of the 🤗 Transformers implementation or you need to transpose one of the checkpoint weights. This step should be iterated with the previous step until all weights of the checkpoint are correctly loaded in the Transformers model. Having correctly loaded the checkpoint into the 🤗 Transformers implementation, you can then save the model under a folder of your choice `/path/to/converted/checkpoint/folder` that should then contain both a `pytorch_model.bin` file and a `config.json` file: ```python model.save_pretrained("/path/to/converted/checkpoint/folder") ``` **7. Implement the forward pass** Having managed to correctly load the pretrained weights into the 🤗 Transformers implementation, you should now make sure that the forward pass is correctly implemented. In [Get familiar with the original repository](#3-4-run-a-pretrained-checkpoint-using-the-original-repository), you have already created a script that runs a forward pass of the model using the original repository. Now you should write an analogous script using the 🤗 Transformers implementation instead of the original one. It should look as follows: ```python model = BrandNewBertModel.from_pretrained("/path/to/converted/checkpoint/folder") input_ids = [0, 4, 4, 3, 2, 4, 1, 7, 19] output = model(input_ids).last_hidden_states ``` It is very likely that the 🤗 Transformers implementation and the original model implementation don't give the exact same output the very first time or that the forward pass throws an error. Don't be disappointed - it's expected! First, you should make sure that the forward pass doesn't throw any errors. It often happens that the wrong dimensions are used leading to a *Dimensionality mismatch* error or that the wrong data type object is used, *e.g.* `torch.long` instead of `torch.float32`. Don't hesitate to ask the Hugging Face team for help, if you don't manage to solve certain errors. The final part to make sure the 🤗 Transformers implementation works correctly is to ensure that the outputs are equivalent to a precision of `1e-3`. First, you should ensure that the output shapes are identical, *i.e.* `outputs.shape` should yield the same value for the script of the 🤗 Transformers implementation and the original implementation. Next, you should make sure that the output values are identical as well. This one of the most difficult parts of adding a new model. Common mistakes why the outputs are not identical are: - Some layers were not added, *i.e.* an *activation* layer was not added, or the residual connection was forgotten - The word embedding matrix was not tied - The wrong positional embeddings are used because the original implementation uses on offset - Dropout is applied during the forward pass. To fix this make sure *model.training is False* and that no dropout layer is falsely activated during the forward pass, *i.e.* pass *self.training* to [PyTorch's functional dropout](https://pytorch.org/docs/stable/nn.functional.html?highlight=dropout#torch.nn.functional.dropout) The best way to fix the problem is usually to look at the forward pass of the original implementation and the 🤗 Transformers implementation side-by-side and check if there are any differences. Ideally, you should debug/print out intermediate outputs of both implementations of the forward pass to find the exact position in the network where the 🤗 Transformers implementation shows a different output than the original implementation. First, make sure that the hard-coded `input_ids` in both scripts are identical. Next, verify that the outputs of the first transformation of the `input_ids` (usually the word embeddings) are identical. And then work your way up to the very last layer of the network. At some point, you will notice a difference between the two implementations, which should point you to the bug in the 🤗 Transformers implementation. From our experience, a simple and efficient way is to add many print statements in both the original implementation and 🤗 Transformers implementation, at the same positions in the network respectively, and to successively remove print statements showing the same values for intermediate presentations. When you're confident that both implementations yield the same output, verify the outputs with `torch.allclose(original_output, output, atol=1e-3)`, you're done with the most difficult part! Congratulations - the work left to be done should be a cakewalk 😊. **8. Adding all necessary model tests** At this point, you have successfully added a new model. However, it is very much possible that the model does not yet fully comply with the required design. To make sure, the implementation is fully compatible with 🤗 Transformers, all common tests should pass. The Cookiecutter should have automatically added a test file for your model, probably under the same `tests/models/brand_new_bert/test_modeling_brand_new_bert.py`. Run this test file to verify that all common tests pass: ```bash pytest tests/models/brand_new_bert/test_modeling_brand_new_bert.py ``` Having fixed all common tests, it is now crucial to ensure that all the nice work you have done is well tested, so that - a) The community can easily understand your work by looking at specific tests of *brand_new_bert* - b) Future changes to your model will not break any important feature of the model. At first, integration tests should be added. Those integration tests essentially do the same as the debugging scripts you used earlier to implement the model to 🤗 Transformers. A template of those model tests has already added by the Cookiecutter, called `BrandNewBertModelIntegrationTests` and only has to be filled out by you. To ensure that those tests are passing, run ```bash RUN_SLOW=1 pytest -sv tests/models/brand_new_bert/test_modeling_brand_new_bert.py::BrandNewBertModelIntegrationTests ``` <Tip> In case you are using Windows, you should replace `RUN_SLOW=1` with `SET RUN_SLOW=1` </Tip> Second, all features that are special to *brand_new_bert* should be tested additionally in a separate test under `BrandNewBertModelTester`/`BrandNewBertModelTest`. This part is often forgotten but is extremely useful in two ways: - It helps to transfer the knowledge you have acquired during the model addition to the community by showing how the special features of *brand_new_bert* should work. - Future contributors can quickly test changes to the model by running those special tests. **9. Implement the tokenizer** Next, we should add the tokenizer of *brand_new_bert*. Usually, the tokenizer is equivalent to or very similar to an already existing tokenizer of 🤗 Transformers. It is very important to find/extract the original tokenizer file and to manage to load this file into the 🤗 Transformers' implementation of the tokenizer. To ensure that the tokenizer works correctly, it is recommended to first create a script in the original repository that inputs a string and returns the `input_ids`. It could look similar to this (in pseudo-code): ```python input_str = "This is a long example input string containing special characters .$?-, numbers 2872 234 12 and words." model = BrandNewBertModel.load_pretrained_checkpoint("/path/to/checkpoint/") input_ids = model.tokenize(input_str) ``` You might have to take a deeper look again into the original repository to find the correct tokenizer function or you might even have to do changes to your clone of the original repository to only output the `input_ids`. Having written a functional tokenization script that uses the original repository, an analogous script for 🤗 Transformers should be created. It should look similar to this: ```python from transformers import BrandNewBertTokenizer input_str = "This is a long example input string containing special characters .$?-, numbers 2872 234 12 and words." tokenizer = BrandNewBertTokenizer.from_pretrained("/path/to/tokenizer/folder/") input_ids = tokenizer(input_str).input_ids ``` When both `input_ids` yield the same values, as a final step a tokenizer test file should also be added. Analogous to the modeling test files of *brand_new_bert*, the tokenization test files of *brand_new_bert* should contain a couple of hard-coded integration tests. **10. Run End-to-end integration tests** Having added the tokenizer, you should also add a couple of end-to-end integration tests using both the model and the tokenizer to `tests/models/brand_new_bert/test_modeling_brand_new_bert.py` in 🤗 Transformers. Such a test should show on a meaningful text-to-text sample that the 🤗 Transformers implementation works as expected. A meaningful text-to-text sample can include *e.g.* a source-to-target-translation pair, an article-to-summary pair, a question-to-answer pair, etc… If none of the ported checkpoints has been fine-tuned on a downstream task it is enough to simply rely on the model tests. In a final step to ensure that the model is fully functional, it is advised that you also run all tests on GPU. It can happen that you forgot to add some `.to(self.device)` statements to internal tensors of the model, which in such a test would show in an error. In case you have no access to a GPU, the Hugging Face team can take care of running those tests for you. **11. Add Docstring** Now, all the necessary functionality for *brand_new_bert* is added - you're almost done! The only thing left to add is a nice docstring and a doc page. The Cookiecutter should have added a template file called `docs/source/model_doc/brand_new_bert.md` that you should fill out. Users of your model will usually first look at this page before using your model. Hence, the documentation must be understandable and concise. It is very useful for the community to add some *Tips* to show how the model should be used. Don't hesitate to ping the Hugging Face team regarding the docstrings. Next, make sure that the docstring added to `src/transformers/models/brand_new_bert/modeling_brand_new_bert.py` is correct and included all necessary inputs and outputs. We have a detailed guide about writing documentation and our docstring format [here](writing-documentation). It is always good to remind oneself that documentation should be treated at least as carefully as the code in 🤗 Transformers since the documentation is usually the first contact point of the community with the model. **Code refactor** Great, now you have added all the necessary code for *brand_new_bert*. At this point, you should correct some potential incorrect code style by running: ```bash make style ``` and verify that your coding style passes the quality check: ```bash make quality ``` There are a couple of other very strict design tests in 🤗 Transformers that might still be failing, which shows up in the tests of your pull request. This is often because of some missing information in the docstring or some incorrect naming. The Hugging Face team will surely help you if you're stuck here. Lastly, it is always a good idea to refactor one's code after having ensured that the code works correctly. With all tests passing, now it's a good time to go over the added code again and do some refactoring. You have now finished the coding part, congratulation! 🎉 You are Awesome! 😎 **12. Upload the models to the model hub** In this final part, you should convert and upload all checkpoints to the model hub and add a model card for each uploaded model checkpoint. You can get familiar with the hub functionalities by reading our [Model sharing and uploading Page](model_sharing). You should work alongside the Hugging Face team here to decide on a fitting name for each checkpoint and to get the required access rights to be able to upload the model under the author's organization of *brand_new_bert*. The `push_to_hub` method, present in all models in `transformers`, is a quick and efficient way to push your checkpoint to the hub. A little snippet is pasted below: ```python brand_new_bert.push_to_hub("brand_new_bert") # Uncomment the following line to push to an organization. # brand_new_bert.push_to_hub("<organization>/brand_new_bert") ``` It is worth spending some time to create fitting model cards for each checkpoint. The model cards should highlight the specific characteristics of this particular checkpoint, *e.g.* On which dataset was the checkpoint pretrained/fine-tuned on? On what down-stream task should the model be used? And also include some code on how to correctly use the model. **13. (Optional) Add notebook** It is very helpful to add a notebook that showcases in-detail how *brand_new_bert* can be used for inference and/or fine-tuned on a downstream task. This is not mandatory to merge your PR, but very useful for the community. **14. Submit your finished PR** You're done programming now and can move to the last step, which is getting your PR merged into main. Usually, the Hugging Face team should have helped you already at this point, but it is worth taking some time to give your finished PR a nice description and eventually add comments to your code, if you want to point out certain design choices to your reviewer. ### Share your work!! Now, it's time to get some credit from the community for your work! Having completed a model addition is a major contribution to Transformers and the whole NLP community. Your code and the ported pre-trained models will certainly be used by hundreds and possibly even thousands of developers and researchers. You should be proud of your work and share your achievements with the community. **You have made another model that is super easy to access for everyone in the community! 🤯**
transformers/docs/source/en/add_new_model.md/0
{ "file_path": "transformers/docs/source/en/add_new_model.md", "repo_id": "transformers", "token_count": 14006 }
244
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Use tokenizers from 🤗 Tokenizers The [`PreTrainedTokenizerFast`] depends on the [🤗 Tokenizers](https://huggingface.co/docs/tokenizers) library. The tokenizers obtained from the 🤗 Tokenizers library can be loaded very simply into 🤗 Transformers. Before getting in the specifics, let's first start by creating a dummy tokenizer in a few lines: ```python >>> from tokenizers import Tokenizer >>> from tokenizers.models import BPE >>> from tokenizers.trainers import BpeTrainer >>> from tokenizers.pre_tokenizers import Whitespace >>> tokenizer = Tokenizer(BPE(unk_token="[UNK]")) >>> trainer = BpeTrainer(special_tokens=["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]"]) >>> tokenizer.pre_tokenizer = Whitespace() >>> files = [...] >>> tokenizer.train(files, trainer) ``` We now have a tokenizer trained on the files we defined. We can either continue using it in that runtime, or save it to a JSON file for future re-use. ## Loading directly from the tokenizer object Let's see how to leverage this tokenizer object in the 🤗 Transformers library. The [`PreTrainedTokenizerFast`] class allows for easy instantiation, by accepting the instantiated *tokenizer* object as an argument: ```python >>> from transformers import PreTrainedTokenizerFast >>> fast_tokenizer = PreTrainedTokenizerFast(tokenizer_object=tokenizer) ``` This object can now be used with all the methods shared by the 🤗 Transformers tokenizers! Head to [the tokenizer page](main_classes/tokenizer) for more information. ## Loading from a JSON file In order to load a tokenizer from a JSON file, let's first start by saving our tokenizer: ```python >>> tokenizer.save("tokenizer.json") ``` The path to which we saved this file can be passed to the [`PreTrainedTokenizerFast`] initialization method using the `tokenizer_file` parameter: ```python >>> from transformers import PreTrainedTokenizerFast >>> fast_tokenizer = PreTrainedTokenizerFast(tokenizer_file="tokenizer.json") ``` This object can now be used with all the methods shared by the 🤗 Transformers tokenizers! Head to [the tokenizer page](main_classes/tokenizer) for more information.
transformers/docs/source/en/fast_tokenizers.md/0
{ "file_path": "transformers/docs/source/en/fast_tokenizers.md", "repo_id": "transformers", "token_count": 792 }
245
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Exporting 🤗 Transformers models to ONNX 🤗 Transformers provides a `transformers.onnx` package that enables you to convert model checkpoints to an ONNX graph by leveraging configuration objects. See the [guide](../serialization) on exporting 🤗 Transformers models for more details. ## ONNX Configurations We provide three abstract classes that you should inherit from, depending on the type of model architecture you wish to export: * Encoder-based models inherit from [`~onnx.config.OnnxConfig`] * Decoder-based models inherit from [`~onnx.config.OnnxConfigWithPast`] * Encoder-decoder models inherit from [`~onnx.config.OnnxSeq2SeqConfigWithPast`] ### OnnxConfig [[autodoc]] onnx.config.OnnxConfig ### OnnxConfigWithPast [[autodoc]] onnx.config.OnnxConfigWithPast ### OnnxSeq2SeqConfigWithPast [[autodoc]] onnx.config.OnnxSeq2SeqConfigWithPast ## ONNX Features Each ONNX configuration is associated with a set of _features_ that enable you to export models for different types of topologies or tasks. ### FeaturesManager [[autodoc]] onnx.features.FeaturesManager
transformers/docs/source/en/main_classes/onnx.md/0
{ "file_path": "transformers/docs/source/en/main_classes/onnx.md", "repo_id": "transformers", "token_count": 523 }
246
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # BART <div class="flex flex-wrap space-x-1"> <a href="https://huggingface.co/models?filter=bart"> <img alt="Models" src="https://img.shields.io/badge/All_model_pages-bart-blueviolet"> </a> <a href="https://huggingface.co/spaces/docs-demos/bart-large-mnli"> <img alt="Spaces" src="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue"> </a> </div> ## Overview The Bart model was proposed in [BART: Denoising Sequence-to-Sequence Pre-training for Natural Language Generation, Translation, and Comprehension](https://arxiv.org/abs/1910.13461) by Mike Lewis, Yinhan Liu, Naman Goyal, Marjan Ghazvininejad, Abdelrahman Mohamed, Omer Levy, Ves Stoyanov and Luke Zettlemoyer on 29 Oct, 2019. According to the abstract, - Bart uses a standard seq2seq/machine translation architecture with a bidirectional encoder (like BERT) and a left-to-right decoder (like GPT). - The pretraining task involves randomly shuffling the order of the original sentences and a novel in-filling scheme, where spans of text are replaced with a single mask token. - BART is particularly effective when fine tuned for text generation but also works well for comprehension tasks. It matches the performance of RoBERTa with comparable training resources on GLUE and SQuAD, achieves new state-of-the-art results on a range of abstractive dialogue, question answering, and summarization tasks, with gains of up to 6 ROUGE. This model was contributed by [sshleifer](https://huggingface.co/sshleifer). The authors' code can be found [here](https://github.com/pytorch/fairseq/tree/master/examples/bart). ## Usage tips: - BART is a model with absolute position embeddings so it's usually advised to pad the inputs on the right rather than the left. - Sequence-to-sequence model with an encoder and a decoder. Encoder is fed a corrupted version of the tokens, decoder is fed the original tokens (but has a mask to hide the future words like a regular transformers decoder). A composition of the following transformations are applied on the pretraining tasks for the encoder: * mask random tokens (like in BERT) * delete random tokens * mask a span of k tokens with a single mask token (a span of 0 tokens is an insertion of a mask token) * permute sentences * rotate the document to make it start at a specific token ## Implementation Notes - Bart doesn't use `token_type_ids` for sequence classification. Use [`BartTokenizer`] or [`~BartTokenizer.encode`] to get the proper splitting. - The forward pass of [`BartModel`] will create the `decoder_input_ids` if they are not passed. This is different than some other modeling APIs. A typical use case of this feature is mask filling. - Model predictions are intended to be identical to the original implementation when `forced_bos_token_id=0`. This only works, however, if the string you pass to [`fairseq.encode`] starts with a space. - [`~generation.GenerationMixin.generate`] should be used for conditional generation tasks like summarization, see the example in that docstrings. - Models that load the *facebook/bart-large-cnn* weights will not have a `mask_token_id`, or be able to perform mask-filling tasks. ## Mask Filling The `facebook/bart-base` and `facebook/bart-large` checkpoints can be used to fill multi-token masks. ```python from transformers import BartForConditionalGeneration, BartTokenizer model = BartForConditionalGeneration.from_pretrained("facebook/bart-large", forced_bos_token_id=0) tok = BartTokenizer.from_pretrained("facebook/bart-large") example_english_phrase = "UN Chief Says There Is No <mask> in Syria" batch = tok(example_english_phrase, return_tensors="pt") generated_ids = model.generate(batch["input_ids"]) assert tok.batch_decode(generated_ids, skip_special_tokens=True) == [ "UN Chief Says There Is No Plan to Stop Chemical Weapons in Syria" ] ``` ## Resources A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with BART. If you're interested in submitting a resource to be included here, please feel free to open a Pull Request and we'll review it! The resource should ideally demonstrate something new instead of duplicating an existing resource. <PipelineTag pipeline="summarization"/> - A blog post on [Distributed Training: Train BART/T5 for Summarization using 🤗 Transformers and Amazon SageMaker](https://huggingface.co/blog/sagemaker-distributed-training-seq2seq). - A notebook on how to [finetune BART for summarization with fastai using blurr](https://colab.research.google.com/github/ohmeow/ohmeow_website/blob/master/posts/2021-05-25-mbart-sequence-classification-with-blurr.ipynb). 🌎 - A notebook on how to [finetune BART for summarization in two languages with Trainer class](https://colab.research.google.com/github/elsanns/xai-nlp-notebooks/blob/master/fine_tune_bart_summarization_two_langs.ipynb). 🌎 - [`BartForConditionalGeneration`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/pytorch/summarization) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/summarization.ipynb). - [`TFBartForConditionalGeneration`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/summarization) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/summarization-tf.ipynb). - [`FlaxBartForConditionalGeneration`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/flax/summarization). - An example of how to train [`BartForConditionalGeneration`] with a Hugging Face `datasets` object can be found in this [forum discussion](https://discuss.huggingface.co/t/train-bart-for-conditional-generation-e-g-summarization/1904) - [Summarization](https://huggingface.co/course/chapter7/5?fw=pt#summarization) chapter of the 🤗 Hugging Face course. - [Summarization task guide](../tasks/summarization) <PipelineTag pipeline="fill-mask"/> - [`BartForConditionalGeneration`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/pytorch/language-modeling#robertabertdistilbert-and-masked-language-modeling) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/language_modeling.ipynb). - [`TFBartForConditionalGeneration`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/language-modeling#run_mlmpy) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/language_modeling-tf.ipynb). - [`FlaxBartForConditionalGeneration`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/flax/language-modeling#masked-language-modeling) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/masked_language_modeling_flax.ipynb). - [Masked language modeling](https://huggingface.co/course/chapter7/3?fw=pt) chapter of the 🤗 Hugging Face Course. - [Masked language modeling task guide](../tasks/masked_language_modeling) <PipelineTag pipeline="translation"/> - A notebook on how to [finetune mBART using Seq2SeqTrainer for Hindi to English translation](https://colab.research.google.com/github/vasudevgupta7/huggingface-tutorials/blob/main/translation_training.ipynb). 🌎 - [`BartForConditionalGeneration`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/pytorch/translation) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/translation.ipynb). - [`TFBartForConditionalGeneration`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/translation) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/translation-tf.ipynb). - [Translation task guide](../tasks/translation) See also: - [Text classification task guide](../tasks/sequence_classification) - [Question answering task guide](../tasks/question_answering) - [Causal language modeling task guide](../tasks/language_modeling) - [Distilled checkpoints](https://huggingface.co/models?search=distilbart) are described in this [paper](https://arxiv.org/abs/2010.13002). ## BartConfig [[autodoc]] BartConfig - all ## BartTokenizer [[autodoc]] BartTokenizer - all ## BartTokenizerFast [[autodoc]] BartTokenizerFast - all <frameworkcontent> <pt> ## BartModel [[autodoc]] BartModel - forward ## BartForConditionalGeneration [[autodoc]] BartForConditionalGeneration - forward ## BartForSequenceClassification [[autodoc]] BartForSequenceClassification - forward ## BartForQuestionAnswering [[autodoc]] BartForQuestionAnswering - forward ## BartForCausalLM [[autodoc]] BartForCausalLM - forward </pt> <tf> ## TFBartModel [[autodoc]] TFBartModel - call ## TFBartForConditionalGeneration [[autodoc]] TFBartForConditionalGeneration - call ## TFBartForSequenceClassification [[autodoc]] TFBartForSequenceClassification - call </tf> <jax> ## FlaxBartModel [[autodoc]] FlaxBartModel - __call__ - encode - decode ## FlaxBartForConditionalGeneration [[autodoc]] FlaxBartForConditionalGeneration - __call__ - encode - decode ## FlaxBartForSequenceClassification [[autodoc]] FlaxBartForSequenceClassification - __call__ - encode - decode ## FlaxBartForQuestionAnswering [[autodoc]] FlaxBartForQuestionAnswering - __call__ - encode - decode ## FlaxBartForCausalLM [[autodoc]] FlaxBartForCausalLM - __call__ </jax> </frameworkcontent>
transformers/docs/source/en/model_doc/bart.md/0
{ "file_path": "transformers/docs/source/en/model_doc/bart.md", "repo_id": "transformers", "token_count": 3297 }
247
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # BLOOM ## Overview The BLOOM model has been proposed with its various versions through the [BigScience Workshop](https://bigscience.huggingface.co/). BigScience is inspired by other open science initiatives where researchers have pooled their time and resources to collectively achieve a higher impact. The architecture of BLOOM is essentially similar to GPT3 (auto-regressive model for next token prediction), but has been trained on 46 different languages and 13 programming languages. Several smaller versions of the models have been trained on the same dataset. BLOOM is available in the following versions: - [bloom-560m](https://huggingface.co/bigscience/bloom-560m) - [bloom-1b1](https://huggingface.co/bigscience/bloom-1b1) - [bloom-1b7](https://huggingface.co/bigscience/bloom-1b7) - [bloom-3b](https://huggingface.co/bigscience/bloom-3b) - [bloom-7b1](https://huggingface.co/bigscience/bloom-7b1) - [bloom](https://huggingface.co/bigscience/bloom) (176B parameters) ## Resources A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with BLOOM. If you're interested in submitting a resource to be included here, please feel free to open a Pull Request and we'll review it! The resource should ideally demonstrate something new instead of duplicating an existing resource. <PipelineTag pipeline="text-generation"/> - [`BloomForCausalLM`] is supported by this [causal language modeling example script](https://github.com/huggingface/transformers/tree/main/examples/pytorch/language-modeling#gpt-2gpt-and-causal-language-modeling) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/language_modeling.ipynb). See also: - [Causal language modeling task guide](../tasks/language_modeling) - [Text classification task guide](../tasks/sequence_classification) - [Token classification task guide](../tasks/token_classification) - [Question answering task guide](../tasks/question_answering) ⚡️ Inference - A blog on [Optimization story: Bloom inference](https://huggingface.co/blog/bloom-inference-optimization). - A blog on [Incredibly Fast BLOOM Inference with DeepSpeed and Accelerate](https://huggingface.co/blog/bloom-inference-pytorch-scripts). ⚙️ Training - A blog on [The Technology Behind BLOOM Training](https://huggingface.co/blog/bloom-megatron-deepspeed). ## BloomConfig [[autodoc]] BloomConfig - all ## BloomTokenizerFast [[autodoc]] BloomTokenizerFast - all <frameworkcontent> <pt> ## BloomModel [[autodoc]] BloomModel - forward ## BloomForCausalLM [[autodoc]] BloomForCausalLM - forward ## BloomForSequenceClassification [[autodoc]] BloomForSequenceClassification - forward ## BloomForTokenClassification [[autodoc]] BloomForTokenClassification - forward ## BloomForQuestionAnswering [[autodoc]] BloomForQuestionAnswering - forward </pt> <jax> ## FlaxBloomModel [[autodoc]] FlaxBloomModel - __call__ ## FlaxBloomForCausalLM [[autodoc]] FlaxBloomForCausalLM - __call__ </jax> </frameworkcontent>
transformers/docs/source/en/model_doc/bloom.md/0
{ "file_path": "transformers/docs/source/en/model_doc/bloom.md", "repo_id": "transformers", "token_count": 1158 }
248
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Fuyu ## Overview The Fuyu model was created by [ADEPT](https://www.adept.ai/blog/fuyu-8b), and authored by Rohan Bavishi, Erich Elsen, Curtis Hawthorne, Maxwell Nye, Augustus Odena, Arushi Somani, Sağnak Taşırlar. The authors introduced Fuyu-8B, a decoder-only multimodal model based on the classic transformers architecture, with query and key normalization. A linear encoder is added to create multimodal embeddings from image inputs. By treating image tokens like text tokens and using a special image-newline character, the model knows when an image line ends. Image positional embeddings are removed. This avoids the need for different training phases for various image resolutions. With 8 billion parameters and licensed under CC-BY-NC, Fuyu-8B is notable for its ability to handle both text and images, its impressive context size of 16K, and its overall performance. <Tip warning={true}> The `Fuyu` models were trained using `bfloat16`, but the original inference uses `float16` The checkpoints uploaded on the hub use `torch_dtype = 'float16'` which will be used by the `AutoModel` API to cast the checkpoints from `torch.float32` to `torch.float16`. The `dtype` of the online weights is mostly irrelevant, unless you are using `torch_dtype="auto"` when initializing a model using `model = AutoModelForCausalLM.from_pretrained("path", torch_dtype = "auto")`. The reason is that the model will first be downloaded ( using the `dtype` of the checkpoints online) then it will be cast to the default `dtype` of `torch` (becomes `torch.float32`). Users should specify the `torch_dtype` they want, and if they don't it will be `torch.float32`. Finetuning the model in `float16` is not recommended and known to produce `nan`, as such the model should be fine-tuned in `bfloat16`. </Tip> Tips: - To convert the model, you need to clone the original repository using `git clone https://github.com/persimmon-ai-labs/adept-inference`, then get the checkpoints: ```bash git clone https://github.com/persimmon-ai-labs/adept-inference wget path/to/fuyu-8b-model-weights.tar tar -xvf fuyu-8b-model-weights.tar python src/transformers/models/fuyu/convert_fuyu_weights_to_hf.py --input_dir /path/to/downloaded/fuyu/weights/ --output_dir /output/path \ --pt_model_path /path/to/fuyu_8b_release/iter_0001251/mp_rank_00/model_optim_rng.pt --ada_lib_path /path/to/adept-inference ``` For the chat model: ```bash wget https://axtkn4xl5cip.objectstorage.us-phoenix-1.oci.customer-oci.com/n/axtkn4xl5cip/b/adept-public-data/o/8b_chat_model_release.tar tar -xvf 8b_base_model_release.tar ``` Then, model can be loaded via: ```py from transformers import FuyuConfig, FuyuForCausalLM model_config = FuyuConfig() model = FuyuForCausalLM(model_config).from_pretrained('/output/path') ``` Inputs need to be passed through a specific Processor to have the correct formats. A processor requires an image_processor and a tokenizer. Hence, inputs can be loaded via: ```py from PIL import Image from transformers import AutoTokenizer from transformers.models.fuyu.processing_fuyu import FuyuProcessor from transformers.models.fuyu.image_processing_fuyu import FuyuImageProcessor tokenizer = AutoTokenizer.from_pretrained('adept-hf-collab/fuyu-8b') image_processor = FuyuImageProcessor() processor = FuyuProcessor(image_processor=image_processor, tokenizer=tokenizer) text_prompt = "Generate a coco-style caption.\\n" bus_image_url = "https://huggingface.co/datasets/hf-internal-testing/fixtures-captioning/resolve/main/bus.png" bus_image_pil = Image.open(io.BytesIO(requests.get(bus_image_url).content)) inputs_to_model = processor(text=text_prompt, images=bus_image_pil) ``` This model was contributed by [Molbap](https://huggingface.co/Molbap). The original code can be found [here](https://github.com/persimmon-ai-labs/adept-inference). - Fuyu uses a `sentencepiece` based tokenizer, with a `Unigram` model. It supports bytefallback, which is only available in `tokenizers==0.14.0` for the fast tokenizer. The `LlamaTokenizer` is used as it is a standard wrapper around sentencepiece. - The authors suggest to use the following prompt for image captioning: `f"Generate a coco-style caption.\\n"` ## FuyuConfig [[autodoc]] FuyuConfig ## FuyuForCausalLM [[autodoc]] FuyuForCausalLM - forward ## FuyuImageProcessor [[autodoc]] FuyuImageProcessor - __call__ ## FuyuProcessor [[autodoc]] FuyuProcessor - __call__
transformers/docs/source/en/model_doc/fuyu.md/0
{ "file_path": "transformers/docs/source/en/model_doc/fuyu.md", "repo_id": "transformers", "token_count": 1657 }
249
<!--Copyright 2021 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # LayoutLMV2 ## Overview The LayoutLMV2 model was proposed in [LayoutLMv2: Multi-modal Pre-training for Visually-Rich Document Understanding](https://arxiv.org/abs/2012.14740) by Yang Xu, Yiheng Xu, Tengchao Lv, Lei Cui, Furu Wei, Guoxin Wang, Yijuan Lu, Dinei Florencio, Cha Zhang, Wanxiang Che, Min Zhang, Lidong Zhou. LayoutLMV2 improves [LayoutLM](layoutlm) to obtain state-of-the-art results across several document image understanding benchmarks: - information extraction from scanned documents: the [FUNSD](https://guillaumejaume.github.io/FUNSD/) dataset (a collection of 199 annotated forms comprising more than 30,000 words), the [CORD](https://github.com/clovaai/cord) dataset (a collection of 800 receipts for training, 100 for validation and 100 for testing), the [SROIE](https://rrc.cvc.uab.es/?ch=13) dataset (a collection of 626 receipts for training and 347 receipts for testing) and the [Kleister-NDA](https://github.com/applicaai/kleister-nda) dataset (a collection of non-disclosure agreements from the EDGAR database, including 254 documents for training, 83 documents for validation, and 203 documents for testing). - document image classification: the [RVL-CDIP](https://www.cs.cmu.edu/~aharley/rvl-cdip/) dataset (a collection of 400,000 images belonging to one of 16 classes). - document visual question answering: the [DocVQA](https://arxiv.org/abs/2007.00398) dataset (a collection of 50,000 questions defined on 12,000+ document images). The abstract from the paper is the following: *Pre-training of text and layout has proved effective in a variety of visually-rich document understanding tasks due to its effective model architecture and the advantage of large-scale unlabeled scanned/digital-born documents. In this paper, we present LayoutLMv2 by pre-training text, layout and image in a multi-modal framework, where new model architectures and pre-training tasks are leveraged. Specifically, LayoutLMv2 not only uses the existing masked visual-language modeling task but also the new text-image alignment and text-image matching tasks in the pre-training stage, where cross-modality interaction is better learned. Meanwhile, it also integrates a spatial-aware self-attention mechanism into the Transformer architecture, so that the model can fully understand the relative positional relationship among different text blocks. Experiment results show that LayoutLMv2 outperforms strong baselines and achieves new state-of-the-art results on a wide variety of downstream visually-rich document understanding tasks, including FUNSD (0.7895 -> 0.8420), CORD (0.9493 -> 0.9601), SROIE (0.9524 -> 0.9781), Kleister-NDA (0.834 -> 0.852), RVL-CDIP (0.9443 -> 0.9564), and DocVQA (0.7295 -> 0.8672). The pre-trained LayoutLMv2 model is publicly available at this https URL.* LayoutLMv2 depends on `detectron2`, `torchvision` and `tesseract`. Run the following to install them: ```bash python -m pip install 'git+https://github.com/facebookresearch/detectron2.git' python -m pip install torchvision tesseract ``` (If you are developing for LayoutLMv2, note that passing the doctests also requires the installation of these packages.) ## Usage tips - The main difference between LayoutLMv1 and LayoutLMv2 is that the latter incorporates visual embeddings during pre-training (while LayoutLMv1 only adds visual embeddings during fine-tuning). - LayoutLMv2 adds both a relative 1D attention bias as well as a spatial 2D attention bias to the attention scores in the self-attention layers. Details can be found on page 5 of the [paper](https://arxiv.org/abs/2012.14740). - Demo notebooks on how to use the LayoutLMv2 model on RVL-CDIP, FUNSD, DocVQA, CORD can be found [here](https://github.com/NielsRogge/Transformers-Tutorials). - LayoutLMv2 uses Facebook AI's [Detectron2](https://github.com/facebookresearch/detectron2/) package for its visual backbone. See [this link](https://detectron2.readthedocs.io/en/latest/tutorials/install.html) for installation instructions. - In addition to `input_ids`, [`~LayoutLMv2Model.forward`] expects 2 additional inputs, namely `image` and `bbox`. The `image` input corresponds to the original document image in which the text tokens occur. The model expects each document image to be of size 224x224. This means that if you have a batch of document images, `image` should be a tensor of shape (batch_size, 3, 224, 224). This can be either a `torch.Tensor` or a `Detectron2.structures.ImageList`. You don't need to normalize the channels, as this is done by the model. Important to note is that the visual backbone expects BGR channels instead of RGB, as all models in Detectron2 are pre-trained using the BGR format. The `bbox` input are the bounding boxes (i.e. 2D-positions) of the input text tokens. This is identical to [`LayoutLMModel`]. These can be obtained using an external OCR engine such as Google's [Tesseract](https://github.com/tesseract-ocr/tesseract) (there's a [Python wrapper](https://pypi.org/project/pytesseract/) available). Each bounding box should be in (x0, y0, x1, y1) format, where (x0, y0) corresponds to the position of the upper left corner in the bounding box, and (x1, y1) represents the position of the lower right corner. Note that one first needs to normalize the bounding boxes to be on a 0-1000 scale. To normalize, you can use the following function: ```python def normalize_bbox(bbox, width, height): return [ int(1000 * (bbox[0] / width)), int(1000 * (bbox[1] / height)), int(1000 * (bbox[2] / width)), int(1000 * (bbox[3] / height)), ] ``` Here, `width` and `height` correspond to the width and height of the original document in which the token occurs (before resizing the image). Those can be obtained using the Python Image Library (PIL) library for example, as follows: ```python from PIL import Image image = Image.open( "name_of_your_document - can be a png, jpg, etc. of your documents (PDFs must be converted to images)." ) width, height = image.size ``` However, this model includes a brand new [`~transformers.LayoutLMv2Processor`] which can be used to directly prepare data for the model (including applying OCR under the hood). More information can be found in the "Usage" section below. - Internally, [`~transformers.LayoutLMv2Model`] will send the `image` input through its visual backbone to obtain a lower-resolution feature map, whose shape is equal to the `image_feature_pool_shape` attribute of [`~transformers.LayoutLMv2Config`]. This feature map is then flattened to obtain a sequence of image tokens. As the size of the feature map is 7x7 by default, one obtains 49 image tokens. These are then concatenated with the text tokens, and send through the Transformer encoder. This means that the last hidden states of the model will have a length of 512 + 49 = 561, if you pad the text tokens up to the max length. More generally, the last hidden states will have a shape of `seq_length` + `image_feature_pool_shape[0]` * `config.image_feature_pool_shape[1]`. - When calling [`~transformers.LayoutLMv2Model.from_pretrained`], a warning will be printed with a long list of parameter names that are not initialized. This is not a problem, as these parameters are batch normalization statistics, which are going to have values when fine-tuning on a custom dataset. - If you want to train the model in a distributed environment, make sure to call [`synchronize_batch_norm`] on the model in order to properly synchronize the batch normalization layers of the visual backbone. In addition, there's LayoutXLM, which is a multilingual version of LayoutLMv2. More information can be found on [LayoutXLM's documentation page](layoutxlm). ## Resources A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with LayoutLMv2. If you're interested in submitting a resource to be included here, please feel free to open a Pull Request and we'll review it! The resource should ideally demonstrate something new instead of duplicating an existing resource. <PipelineTag pipeline="text-classification"/> - A notebook on how to [finetune LayoutLMv2 for text-classification on RVL-CDIP dataset](https://colab.research.google.com/github/NielsRogge/Transformers-Tutorials/blob/master/LayoutLMv2/RVL-CDIP/Fine_tuning_LayoutLMv2ForSequenceClassification_on_RVL_CDIP.ipynb). - See also: [Text classification task guide](../tasks/sequence_classification) <PipelineTag pipeline="question-answering"/> - A notebook on how to [finetune LayoutLMv2 for question-answering on DocVQA dataset](https://colab.research.google.com/github/NielsRogge/Transformers-Tutorials/blob/master/LayoutLMv2/DocVQA/Fine_tuning_LayoutLMv2ForQuestionAnswering_on_DocVQA.ipynb). - See also: [Question answering task guide](../tasks/question_answering) - See also: [Document question answering task guide](../tasks/document_question_answering) <PipelineTag pipeline="token-classification"/> - A notebook on how to [finetune LayoutLMv2 for token-classification on CORD dataset](https://colab.research.google.com/github/NielsRogge/Transformers-Tutorials/blob/master/LayoutLMv2/CORD/Fine_tuning_LayoutLMv2ForTokenClassification_on_CORD.ipynb). - A notebook on how to [finetune LayoutLMv2 for token-classification on FUNSD dataset](https://colab.research.google.com/github/NielsRogge/Transformers-Tutorials/blob/master/LayoutLMv2/FUNSD/Fine_tuning_LayoutLMv2ForTokenClassification_on_FUNSD_using_HuggingFace_Trainer.ipynb). - See also: [Token classification task guide](../tasks/token_classification) ## Usage: LayoutLMv2Processor The easiest way to prepare data for the model is to use [`LayoutLMv2Processor`], which internally combines a image processor ([`LayoutLMv2ImageProcessor`]) and a tokenizer ([`LayoutLMv2Tokenizer`] or [`LayoutLMv2TokenizerFast`]). The image processor handles the image modality, while the tokenizer handles the text modality. A processor combines both, which is ideal for a multi-modal model like LayoutLMv2. Note that you can still use both separately, if you only want to handle one modality. ```python from transformers import LayoutLMv2ImageProcessor, LayoutLMv2TokenizerFast, LayoutLMv2Processor image_processor = LayoutLMv2ImageProcessor() # apply_ocr is set to True by default tokenizer = LayoutLMv2TokenizerFast.from_pretrained("microsoft/layoutlmv2-base-uncased") processor = LayoutLMv2Processor(image_processor, tokenizer) ``` In short, one can provide a document image (and possibly additional data) to [`LayoutLMv2Processor`], and it will create the inputs expected by the model. Internally, the processor first uses [`LayoutLMv2ImageProcessor`] to apply OCR on the image to get a list of words and normalized bounding boxes, as well to resize the image to a given size in order to get the `image` input. The words and normalized bounding boxes are then provided to [`LayoutLMv2Tokenizer`] or [`LayoutLMv2TokenizerFast`], which converts them to token-level `input_ids`, `attention_mask`, `token_type_ids`, `bbox`. Optionally, one can provide word labels to the processor, which are turned into token-level `labels`. [`LayoutLMv2Processor`] uses [PyTesseract](https://pypi.org/project/pytesseract/), a Python wrapper around Google's Tesseract OCR engine, under the hood. Note that you can still use your own OCR engine of choice, and provide the words and normalized boxes yourself. This requires initializing [`LayoutLMv2ImageProcessor`] with `apply_ocr` set to `False`. In total, there are 5 use cases that are supported by the processor. Below, we list them all. Note that each of these use cases work for both batched and non-batched inputs (we illustrate them for non-batched inputs). **Use case 1: document image classification (training, inference) + token classification (inference), apply_ocr = True** This is the simplest case, in which the processor (actually the image processor) will perform OCR on the image to get the words and normalized bounding boxes. ```python from transformers import LayoutLMv2Processor from PIL import Image processor = LayoutLMv2Processor.from_pretrained("microsoft/layoutlmv2-base-uncased") image = Image.open( "name_of_your_document - can be a png, jpg, etc. of your documents (PDFs must be converted to images)." ).convert("RGB") encoding = processor( image, return_tensors="pt" ) # you can also add all tokenizer parameters here such as padding, truncation print(encoding.keys()) # dict_keys(['input_ids', 'token_type_ids', 'attention_mask', 'bbox', 'image']) ``` **Use case 2: document image classification (training, inference) + token classification (inference), apply_ocr=False** In case one wants to do OCR themselves, one can initialize the image processor with `apply_ocr` set to `False`. In that case, one should provide the words and corresponding (normalized) bounding boxes themselves to the processor. ```python from transformers import LayoutLMv2Processor from PIL import Image processor = LayoutLMv2Processor.from_pretrained("microsoft/layoutlmv2-base-uncased", revision="no_ocr") image = Image.open( "name_of_your_document - can be a png, jpg, etc. of your documents (PDFs must be converted to images)." ).convert("RGB") words = ["hello", "world"] boxes = [[1, 2, 3, 4], [5, 6, 7, 8]] # make sure to normalize your bounding boxes encoding = processor(image, words, boxes=boxes, return_tensors="pt") print(encoding.keys()) # dict_keys(['input_ids', 'token_type_ids', 'attention_mask', 'bbox', 'image']) ``` **Use case 3: token classification (training), apply_ocr=False** For token classification tasks (such as FUNSD, CORD, SROIE, Kleister-NDA), one can also provide the corresponding word labels in order to train a model. The processor will then convert these into token-level `labels`. By default, it will only label the first wordpiece of a word, and label the remaining wordpieces with -100, which is the `ignore_index` of PyTorch's CrossEntropyLoss. In case you want all wordpieces of a word to be labeled, you can initialize the tokenizer with `only_label_first_subword` set to `False`. ```python from transformers import LayoutLMv2Processor from PIL import Image processor = LayoutLMv2Processor.from_pretrained("microsoft/layoutlmv2-base-uncased", revision="no_ocr") image = Image.open( "name_of_your_document - can be a png, jpg, etc. of your documents (PDFs must be converted to images)." ).convert("RGB") words = ["hello", "world"] boxes = [[1, 2, 3, 4], [5, 6, 7, 8]] # make sure to normalize your bounding boxes word_labels = [1, 2] encoding = processor(image, words, boxes=boxes, word_labels=word_labels, return_tensors="pt") print(encoding.keys()) # dict_keys(['input_ids', 'token_type_ids', 'attention_mask', 'bbox', 'labels', 'image']) ``` **Use case 4: visual question answering (inference), apply_ocr=True** For visual question answering tasks (such as DocVQA), you can provide a question to the processor. By default, the processor will apply OCR on the image, and create [CLS] question tokens [SEP] word tokens [SEP]. ```python from transformers import LayoutLMv2Processor from PIL import Image processor = LayoutLMv2Processor.from_pretrained("microsoft/layoutlmv2-base-uncased") image = Image.open( "name_of_your_document - can be a png, jpg, etc. of your documents (PDFs must be converted to images)." ).convert("RGB") question = "What's his name?" encoding = processor(image, question, return_tensors="pt") print(encoding.keys()) # dict_keys(['input_ids', 'token_type_ids', 'attention_mask', 'bbox', 'image']) ``` **Use case 5: visual question answering (inference), apply_ocr=False** For visual question answering tasks (such as DocVQA), you can provide a question to the processor. If you want to perform OCR yourself, you can provide your own words and (normalized) bounding boxes to the processor. ```python from transformers import LayoutLMv2Processor from PIL import Image processor = LayoutLMv2Processor.from_pretrained("microsoft/layoutlmv2-base-uncased", revision="no_ocr") image = Image.open( "name_of_your_document - can be a png, jpg, etc. of your documents (PDFs must be converted to images)." ).convert("RGB") question = "What's his name?" words = ["hello", "world"] boxes = [[1, 2, 3, 4], [5, 6, 7, 8]] # make sure to normalize your bounding boxes encoding = processor(image, question, words, boxes=boxes, return_tensors="pt") print(encoding.keys()) # dict_keys(['input_ids', 'token_type_ids', 'attention_mask', 'bbox', 'image']) ``` ## LayoutLMv2Config [[autodoc]] LayoutLMv2Config ## LayoutLMv2FeatureExtractor [[autodoc]] LayoutLMv2FeatureExtractor - __call__ ## LayoutLMv2ImageProcessor [[autodoc]] LayoutLMv2ImageProcessor - preprocess ## LayoutLMv2Tokenizer [[autodoc]] LayoutLMv2Tokenizer - __call__ - save_vocabulary ## LayoutLMv2TokenizerFast [[autodoc]] LayoutLMv2TokenizerFast - __call__ ## LayoutLMv2Processor [[autodoc]] LayoutLMv2Processor - __call__ ## LayoutLMv2Model [[autodoc]] LayoutLMv2Model - forward ## LayoutLMv2ForSequenceClassification [[autodoc]] LayoutLMv2ForSequenceClassification ## LayoutLMv2ForTokenClassification [[autodoc]] LayoutLMv2ForTokenClassification ## LayoutLMv2ForQuestionAnswering [[autodoc]] LayoutLMv2ForQuestionAnswering
transformers/docs/source/en/model_doc/layoutlmv2.md/0
{ "file_path": "transformers/docs/source/en/model_doc/layoutlmv2.md", "repo_id": "transformers", "token_count": 5361 }
250
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # M2M100 ## Overview The M2M100 model was proposed in [Beyond English-Centric Multilingual Machine Translation](https://arxiv.org/abs/2010.11125) by Angela Fan, Shruti Bhosale, Holger Schwenk, Zhiyi Ma, Ahmed El-Kishky, Siddharth Goyal, Mandeep Baines, Onur Celebi, Guillaume Wenzek, Vishrav Chaudhary, Naman Goyal, Tom Birch, Vitaliy Liptchinsky, Sergey Edunov, Edouard Grave, Michael Auli, Armand Joulin. The abstract from the paper is the following: *Existing work in translation demonstrated the potential of massively multilingual machine translation by training a single model able to translate between any pair of languages. However, much of this work is English-Centric by training only on data which was translated from or to English. While this is supported by large sources of training data, it does not reflect translation needs worldwide. In this work, we create a true Many-to-Many multilingual translation model that can translate directly between any pair of 100 languages. We build and open source a training dataset that covers thousands of language directions with supervised data, created through large-scale mining. Then, we explore how to effectively increase model capacity through a combination of dense scaling and language-specific sparse parameters to create high quality models. Our focus on non-English-Centric models brings gains of more than 10 BLEU when directly translating between non-English directions while performing competitively to the best single systems of WMT. We open-source our scripts so that others may reproduce the data, evaluation, and final M2M-100 model.* This model was contributed by [valhalla](https://huggingface.co/valhalla). ## Usage tips and examples M2M100 is a multilingual encoder-decoder (seq-to-seq) model primarily intended for translation tasks. As the model is multilingual it expects the sequences in a certain format: A special language id token is used as prefix in both the source and target text. The source text format is `[lang_code] X [eos]`, where `lang_code` is source language id for source text and target language id for target text, with `X` being the source or target text. The [`M2M100Tokenizer`] depends on `sentencepiece` so be sure to install it before running the examples. To install `sentencepiece` run `pip install sentencepiece`. **Supervised Training** ```python from transformers import M2M100Config, M2M100ForConditionalGeneration, M2M100Tokenizer model = M2M100ForConditionalGeneration.from_pretrained("facebook/m2m100_418M") tokenizer = M2M100Tokenizer.from_pretrained("facebook/m2m100_418M", src_lang="en", tgt_lang="fr") src_text = "Life is like a box of chocolates." tgt_text = "La vie est comme une boîte de chocolat." model_inputs = tokenizer(src_text, text_target=tgt_text, return_tensors="pt") loss = model(**model_inputs).loss # forward pass ``` **Generation** M2M100 uses the `eos_token_id` as the `decoder_start_token_id` for generation with the target language id being forced as the first generated token. To force the target language id as the first generated token, pass the *forced_bos_token_id* parameter to the *generate* method. The following example shows how to translate between Hindi to French and Chinese to English using the *facebook/m2m100_418M* checkpoint. ```python >>> from transformers import M2M100ForConditionalGeneration, M2M100Tokenizer >>> hi_text = "जीवन एक चॉकलेट बॉक्स की तरह है।" >>> chinese_text = "生活就像一盒巧克力。" >>> model = M2M100ForConditionalGeneration.from_pretrained("facebook/m2m100_418M") >>> tokenizer = M2M100Tokenizer.from_pretrained("facebook/m2m100_418M") >>> # translate Hindi to French >>> tokenizer.src_lang = "hi" >>> encoded_hi = tokenizer(hi_text, return_tensors="pt") >>> generated_tokens = model.generate(**encoded_hi, forced_bos_token_id=tokenizer.get_lang_id("fr")) >>> tokenizer.batch_decode(generated_tokens, skip_special_tokens=True) "La vie est comme une boîte de chocolat." >>> # translate Chinese to English >>> tokenizer.src_lang = "zh" >>> encoded_zh = tokenizer(chinese_text, return_tensors="pt") >>> generated_tokens = model.generate(**encoded_zh, forced_bos_token_id=tokenizer.get_lang_id("en")) >>> tokenizer.batch_decode(generated_tokens, skip_special_tokens=True) "Life is like a box of chocolate." ``` ## Resources - [Translation task guide](../tasks/translation) - [Summarization task guide](../tasks/summarization) ## M2M100Config [[autodoc]] M2M100Config ## M2M100Tokenizer [[autodoc]] M2M100Tokenizer - build_inputs_with_special_tokens - get_special_tokens_mask - create_token_type_ids_from_sequences - save_vocabulary ## M2M100Model [[autodoc]] M2M100Model - forward ## M2M100ForConditionalGeneration [[autodoc]] M2M100ForConditionalGeneration - forward ## Using Flash Attention 2 Flash Attention 2 is a faster, optimized version of the attention scores computation which relies on `cuda` kernels. ### Installation First, check whether your hardware is compatible with Flash Attention 2. The latest list of compatible hardware can be found in the [official documentation](https://github.com/Dao-AILab/flash-attention#installation-and-features). Next, [install](https://github.com/Dao-AILab/flash-attention#installation-and-features) the latest version of Flash Attention 2: ```bash pip install -U flash-attn --no-build-isolation ``` ### Usage To load a model using Flash Attention 2, we can pass the argument `attn_implementation="flash_attention_2"` to [`.from_pretrained`](https://huggingface.co/docs/transformers/main/en/main_classes/model#transformers.PreTrainedModel.from_pretrained). You can use either `torch.float16` or `torch.bfloat16` precision. ```python >>> import torch >>> from transformers import M2M100ForConditionalGeneration, M2M100Tokenizer >>> model = M2M100ForConditionalGeneration.from_pretrained("facebook/m2m100_418M", torch_dtype=torch.float16, attn_implementation="flash_attention_2").to("cuda").eval() >>> tokenizer = M2M100Tokenizer.from_pretrained("facebook/m2m100_418M") >>> # translate Hindi to French >>> hi_text = "जीवन एक चॉकलेट बॉक्स की तरह है।" >>> tokenizer.src_lang = "hi" >>> encoded_hi = tokenizer(hi_text, return_tensors="pt").to("cuda") >>> generated_tokens = model.generate(**encoded_hi, forced_bos_token_id=tokenizer.get_lang_id("fr")) >>> tokenizer.batch_decode(generated_tokens, skip_special_tokens=True) "La vie est comme une boîte de chocolat." ``` ### Expected speedups Below is an expected speedup diagram that compares pure inference time between the native implementation and the Flash Attention 2. <div style="text-align: center"> <img src="https://huggingface.co/datasets/visheratin/documentation-images/resolve/main/nllb-speedup.webp"> </div>
transformers/docs/source/en/model_doc/m2m_100.md/0
{ "file_path": "transformers/docs/source/en/model_doc/m2m_100.md", "repo_id": "transformers", "token_count": 2341 }
251
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Nemotron ## Nemotron ### License The use of this model is governed by the [NVIDIA AI Foundation Models Community License Agreement](https://developer.nvidia.com/downloads/nv-ai-foundation-models-license). ### Description Nemotron-4 is a family of enterprise ready generative text models compatible with [NVIDIA NeMo Framework](https://www.nvidia.com/en-us/ai-data-science/generative-ai/nemo-framework/). NVIDIA NeMo is an end-to-end, cloud-native platform to build, customize, and deploy generative AI models anywhere. It includes training and inferencing frameworks, guardrailing toolkits, data curation tools, and pretrained models, offering enterprises an easy, cost-effective, and fast way to adopt generative AI. To get access to NeMo Framework, please sign up at [this link](https://developer.nvidia.com/nemo-framework/join). ### References [Announcement Blog](https://developer.nvidia.com/blog/nvidia-ai-foundation-models-build-custom-enterprise-chatbots-and-co-pilots-with-production-ready-llms/) ### Model Architecture **Architecture Type:** Transformer **Network Architecture:** Transformer Decoder (auto-regressive language model). ## Minitron ### Minitron 4B Base Minitron is a family of small language models (SLMs) obtained by pruning NVIDIA's [Nemotron-4 15B](https://arxiv.org/abs/2402.16819) model. We prune model embedding size, attention heads, and MLP intermediate dimension, following which, we perform continued training with distillation to arrive at the final models. Deriving the Minitron 8B and 4B models from the base 15B model using our approach requires up to **40x fewer training tokens** per model compared to training from scratch; this results in **compute cost savings of 1.8x** for training the full model family (15B, 8B, and 4B). Minitron models exhibit up to a 16% improvement in MMLU scores compared to training from scratch, perform comparably to other community models such as Mistral 7B, Gemma 7B and Llama-3 8B, and outperform state-of-the-art compression techniques from the literature. Please refer to our [arXiv paper](https://arxiv.org/abs/2407.14679) for more details. Minitron models are for research and development only. ### HuggingFace Quickstart The following code provides an example of how to load the Minitron-4B model and use it to perform text generation. ```python import torch from transformers import AutoTokenizer, AutoModelForCausalLM # Load the tokenizer and model model_path = 'nvidia/Minitron-4B-Base' tokenizer = AutoTokenizer.from_pretrained(model_path) device = 'cuda' dtype = torch.bfloat16 model = AutoModelForCausalLM.from_pretrained(model_path, torch_dtype=dtype, device_map=device) # Prepare the input text prompt = 'Complete the paragraph: our solar system is' inputs = tokenizer.encode(prompt, return_tensors='pt').to(model.device) # Generate the output outputs = model.generate(inputs, max_length=20) # Decode and print the output output_text = tokenizer.decode(outputs[0]) print(output_text) ``` ### License Minitron is released under the [NVIDIA Open Model License Agreement](https://developer.download.nvidia.com/licenses/nvidia-open-model-license-agreement-june-2024.pdf). ### Evaluation Results *5-shot performance.* Language Understanding evaluated using [Massive Multitask Language Understanding](https://arxiv.org/abs/2009.03300): | Average | | :---- | | 58.6 | *Zero-shot performance.* Evaluated using select datasets from the [LM Evaluation Harness](https://github.com/EleutherAI/lm-evaluation-harness) with additions: | HellaSwag | Winogrande | GSM8K| ARC-C | XLSum | | :------------- | :------------- | :------------- | :------------- | :------------- | | 75.0 | 74.0 | 24.1 | 50.9 | 29.5 *Code generation performance*. Evaluated using [HumanEval](https://github.com/openai/human-eval): | p@1, 0-Shot | | :------------- | | 23.3 | Please refer to our [paper](https://arxiv.org/abs/2407.14679) for the full set of results. ### Citation If you find our work helpful, please consider citing our paper: ``` @article{minitron2024, title={Compact Language Models via Pruning and Knowledge Distillation}, author={Saurav Muralidharan and Sharath Turuvekere Sreenivas and Raviraj Joshi and Marcin Chochowski and Mostofa Patwary and Mohammad Shoeybi and Bryan Catanzaro and Jan Kautz and Pavlo Molchanov}, journal={arXiv preprint arXiv:2407.14679}, year={2024}, url={https://arxiv.org/abs/2407.14679}, } ``` ## NemotronConfig [[autodoc]] NemotronConfig ## NemotronModel [[autodoc]] NemotronModel - forward ## NemotronForCausalLM [[autodoc]] NemotronForCausalLM - forward ## NemotronForSequenceClassification [[autodoc]] NemotronForSequenceClassification - forward ## NemotronForQuestionAnswering [[autodoc]] NemotronForQuestionAnswering - forward ## NemotronForTokenClassification [[autodoc]] NemotronForTokenClassification - forward
transformers/docs/source/en/model_doc/nemotron.md/0
{ "file_path": "transformers/docs/source/en/model_doc/nemotron.md", "repo_id": "transformers", "token_count": 1671 }
252
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Pegasus <div class="flex flex-wrap space-x-1"> <a href="https://huggingface.co/models?filter=pegasus"> <img alt="Models" src="https://img.shields.io/badge/All_model_pages-pegasus-blueviolet"> </a> <a href="https://huggingface.co/spaces/docs-demos/pegasus_paraphrase"> <img alt="Spaces" src="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue"> </a> </div> ## Overview The Pegasus model was proposed in [PEGASUS: Pre-training with Extracted Gap-sentences for Abstractive Summarization](https://arxiv.org/pdf/1912.08777.pdf) by Jingqing Zhang, Yao Zhao, Mohammad Saleh and Peter J. Liu on Dec 18, 2019. According to the abstract, - Pegasus' pretraining task is intentionally similar to summarization: important sentences are removed/masked from an input document and are generated together as one output sequence from the remaining sentences, similar to an extractive summary. - Pegasus achieves SOTA summarization performance on all 12 downstream tasks, as measured by ROUGE and human eval. This model was contributed by [sshleifer](https://huggingface.co/sshleifer). The Authors' code can be found [here](https://github.com/google-research/pegasus). ## Usage tips - Sequence-to-sequence model with the same encoder-decoder model architecture as BART. Pegasus is pre-trained jointly on two self-supervised objective functions: Masked Language Modeling (MLM) and a novel summarization specific pretraining objective, called Gap Sentence Generation (GSG). * MLM: encoder input tokens are randomly replaced by a mask tokens and have to be predicted by the encoder (like in BERT) * GSG: whole encoder input sentences are replaced by a second mask token and fed to the decoder, but which has a causal mask to hide the future words like a regular auto-regressive transformer decoder. - FP16 is not supported (help/ideas on this appreciated!). - The adafactor optimizer is recommended for pegasus fine-tuning. ## Checkpoints All the [checkpoints](https://huggingface.co/models?search=pegasus) are fine-tuned for summarization, besides *pegasus-large*, whence the other checkpoints are fine-tuned: - Each checkpoint is 2.2 GB on disk and 568M parameters. - FP16 is not supported (help/ideas on this appreciated!). - Summarizing xsum in fp32 takes about 400ms/sample, with default parameters on a v100 GPU. - Full replication results and correctly pre-processed data can be found in this [Issue](https://github.com/huggingface/transformers/issues/6844#issue-689259666). - [Distilled checkpoints](https://huggingface.co/models?search=distill-pegasus) are described in this [paper](https://arxiv.org/abs/2010.13002). ## Implementation Notes - All models are transformer encoder-decoders with 16 layers in each component. - The implementation is completely inherited from [`BartForConditionalGeneration`] - Some key configuration differences: - static, sinusoidal position embeddings - the model starts generating with pad_token_id (which has 0 token_embedding) as the prefix. - more beams are used (`num_beams=8`) - All pretrained pegasus checkpoints are the same besides three attributes: `tokenizer.model_max_length` (maximum input size), `max_length` (the maximum number of tokens to generate) and `length_penalty`. - The code to convert checkpoints trained in the author's [repo](https://github.com/google-research/pegasus) can be found in `convert_pegasus_tf_to_pytorch.py`. ## Usage Example ```python >>> from transformers import PegasusForConditionalGeneration, PegasusTokenizer >>> import torch >>> src_text = [ ... """ PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""" ... ] ... model_name = "google/pegasus-xsum" ... device = "cuda" if torch.cuda.is_available() else "cpu" ... tokenizer = PegasusTokenizer.from_pretrained(model_name) ... model = PegasusForConditionalGeneration.from_pretrained(model_name).to(device) ... batch = tokenizer(src_text, truncation=True, padding="longest", return_tensors="pt").to(device) ... translated = model.generate(**batch) ... tgt_text = tokenizer.batch_decode(translated, skip_special_tokens=True) ... assert ( ... tgt_text[0] ... == "California's largest electricity provider has turned off power to hundreds of thousands of customers." ... ) ``` ## Resources - [Script](https://github.com/huggingface/transformers/tree/main/examples/research_projects/seq2seq-distillation/finetune_pegasus_xsum.sh) to fine-tune pegasus on the XSUM dataset. Data download instructions at [examples/pytorch/summarization/](https://github.com/huggingface/transformers/tree/main/examples/pytorch/summarization/README.md). - [Causal language modeling task guide](../tasks/language_modeling) - [Translation task guide](../tasks/translation) - [Summarization task guide](../tasks/summarization) ## PegasusConfig [[autodoc]] PegasusConfig ## PegasusTokenizer warning: `add_tokens` does not work at the moment. [[autodoc]] PegasusTokenizer ## PegasusTokenizerFast [[autodoc]] PegasusTokenizerFast <frameworkcontent> <pt> ## PegasusModel [[autodoc]] PegasusModel - forward ## PegasusForConditionalGeneration [[autodoc]] PegasusForConditionalGeneration - forward ## PegasusForCausalLM [[autodoc]] PegasusForCausalLM - forward </pt> <tf> ## TFPegasusModel [[autodoc]] TFPegasusModel - call ## TFPegasusForConditionalGeneration [[autodoc]] TFPegasusForConditionalGeneration - call </tf> <jax> ## FlaxPegasusModel [[autodoc]] FlaxPegasusModel - __call__ - encode - decode ## FlaxPegasusForConditionalGeneration [[autodoc]] FlaxPegasusForConditionalGeneration - __call__ - encode - decode </jax> </frameworkcontent>
transformers/docs/source/en/model_doc/pegasus.md/0
{ "file_path": "transformers/docs/source/en/model_doc/pegasus.md", "repo_id": "transformers", "token_count": 1966 }
253
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # RWKV ## Overview The RWKV model was proposed in [this repo](https://github.com/BlinkDL/RWKV-LM) It suggests a tweak in the traditional Transformer attention to make it linear. This way, the model can be used as recurrent network: passing inputs for timestamp 0 and timestamp 1 together is the same as passing inputs at timestamp 0, then inputs at timestamp 1 along with the state of timestamp 0 (see example below). This can be more efficient than a regular Transformer and can deal with sentence of any length (even if the model uses a fixed context length for training). This model was contributed by [sgugger](https://huggingface.co/sgugger). The original code can be found [here](https://github.com/BlinkDL/RWKV-LM). ## Usage example ```py import torch from transformers import AutoTokenizer, RwkvConfig, RwkvModel model = RwkvModel.from_pretrained("sgugger/rwkv-430M-pile") tokenizer = AutoTokenizer.from_pretrained("sgugger/rwkv-430M-pile") inputs = tokenizer("This is an example.", return_tensors="pt") # Feed everything to the model outputs = model(inputs["input_ids"]) output_whole = outputs.last_hidden_state outputs = model(inputs["input_ids"][:, :2]) output_one = outputs.last_hidden_state # Using the state computed on the first inputs, we will get the same output outputs = model(inputs["input_ids"][:, 2:], state=outputs.state) output_two = outputs.last_hidden_state torch.allclose(torch.cat([output_one, output_two], dim=1), output_whole, atol=1e-5) ``` If you want to make sure the model stops generating when `'\n\n'` is detected, we recommend using the following stopping criteria: ```python from transformers import StoppingCriteria class RwkvStoppingCriteria(StoppingCriteria): def __init__(self, eos_sequence = [187,187], eos_token_id = 537): self.eos_sequence = eos_sequence self.eos_token_id = eos_token_id def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool: last_2_ids = input_ids[:,-2:].tolist() return self.eos_sequence in last_2_ids output = model.generate(inputs["input_ids"], max_new_tokens=64, stopping_criteria = [RwkvStoppingCriteria()]) ``` ## RwkvConfig [[autodoc]] RwkvConfig ## RwkvModel [[autodoc]] RwkvModel - forward ## RwkvLMHeadModel [[autodoc]] RwkvForCausalLM - forward ## Rwkv attention and the recurrent formulas In a traditional auto-regressive Transformer, attention is written as $$O = \hbox{softmax}(QK^{T} / \sqrt{d}) V$$ with \\(Q\\), \\(K\\) and \\(V\\) are matrices of shape `seq_len x hidden_size` named query, key and value (they are actually bigger matrices with a batch dimension and an attention head dimension but we're only interested in the last two, which is where the matrix product is taken, so for the sake of simplicity we only consider those two). The product \\(QK^{T}\\) then has shape `seq_len x seq_len` and we can take the matrix product with \\(V\\) to get the output \\(O\\) of the same shape as the others. Replacing the softmax by its value gives: $$O_{i} = \frac{\sum_{j=1}^{i} e^{Q_{i} K_{j}^{T} / \sqrt{d}} V_{j}}{\sum_{j=1}^{i} e^{Q_{i} K_{j}^{T} / \sqrt{d}}}$$ Note that the entries in \\(QK^{T}\\) corresponding to \\(j > i\\) are masked (the sum stops at j) because the attention is not allowed to look at future tokens (only past ones). In comparison, the RWKV attention is given by $$O_{i} = \sigma(R_{i}) \frac{\sum_{j=1}^{i} e^{W_{i-j} + K_{j}} V_{j}}{\sum_{j=1}^{i} e^{W_{i-j} + K_{j}}}$$ where \\(R\\) is a new matrix called receptance by the author, \\(K\\) and \\(V\\) are still the key and value (\\(\sigma\\) here is the sigmoid function). \\(W\\) is a new vector that represents the position of the token and is given by $$W_{0} = u \hbox{ and } W_{k} = (k-1)w \hbox{ for } k \geq 1$$ with \\(u\\) and \\(w\\) learnable parameters called in the code `time_first` and `time_decay` respectively. The numerator and denominator can both be expressed recursively. Naming them \\(N_{i}\\) and \\(D_{i}\\) we have: $$N_{i} = e^{u + K_{i}} V_{i} + \hat{N}_{i} \hbox{ where } \hat{N}_{i} = e^{K_{i-1}} V_{i-1} + e^{w + K_{i-2}} V_{i-2} \cdots + e^{(i-2)w + K_{1}} V_{1}$$ so \\(\hat{N}_{i}\\) (called `numerator_state` in the code) satisfies $$\hat{N}_{0} = 0 \hbox{ and } \hat{N}_{j+1} = e^{K_{j}} V_{j} + e^{w} \hat{N}_{j}$$ and $$D_{i} = e^{u + K_{i}} + \hat{D}_{i} \hbox{ where } \hat{D}_{i} = e^{K_{i-1}} + e^{w + K_{i-2}} \cdots + e^{(i-2)w + K_{1}}$$ so \\(\hat{D}_{i}\\) (called `denominator_state` in the code) satisfies $$\hat{D}_{0} = 0 \hbox{ and } \hat{D}_{j+1} = e^{K_{j}} + e^{w} \hat{D}_{j}$$ The actual recurrent formula used are a tiny bit more complex, as for numerical stability we don't want to compute exponentials of big numbers. Usually the softmax is not computed as is, but the exponential of the maximum term is divided of the numerator and denominator: $$\frac{e^{x_{i}}}{\sum_{j=1}^{n} e^{x_{j}}} = \frac{e^{x_{i} - M}}{\sum_{j=1}^{n} e^{x_{j} - M}}$$ with \\(M\\) the maximum of all \\(x_{j}\\). So here on top of saving the numerator state (\\(\hat{N}\\)) and the denominator state (\\(\hat{D}\\)) we also keep track of the maximum of all terms encountered in the exponentials. So we actually use $$\tilde{N}_{i} = e^{-M_{i}} \hat{N}_{i} \hbox{ and } \tilde{D}_{i} = e^{-M_{i}} \hat{D}_{i}$$ defined by the following recurrent formulas: $$\tilde{N}_{0} = 0 \hbox{ and } \tilde{N}_{j+1} = e^{K_{j} - q} V_{j} + e^{w + M_{j} - q} \tilde{N}_{j} \hbox{ where } q = \max(K_{j}, w + M_{j})$$ and $$\tilde{D}_{0} = 0 \hbox{ and } \tilde{D}_{j+1} = e^{K_{j} - q} + e^{w + M_{j} - q} \tilde{D}_{j} \hbox{ where } q = \max(K_{j}, w + M_{j})$$ and \\(M_{j+1} = q\\). With those, we can then compute $$N_{i} = e^{u + K_{i} - q} V_{i} + e^{M_{i}} \tilde{N}_{i} \hbox{ where } q = \max(u + K_{i}, M_{i})$$ and $$D_{i} = e^{u + K_{i} - q} + e^{M_{i}} \tilde{D}_{i} \hbox{ where } q = \max(u + K_{i}, M_{i})$$ which finally gives us $$O_{i} = \sigma(R_{i}) \frac{N_{i}}{D_{i}}$$
transformers/docs/source/en/model_doc/rwkv.md/0
{ "file_path": "transformers/docs/source/en/model_doc/rwkv.md", "repo_id": "transformers", "token_count": 2548 }
254
<!--Copyright 2021 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. specific language governing permissions and limitations under the License. --> # TrOCR ## Overview The TrOCR model was proposed in [TrOCR: Transformer-based Optical Character Recognition with Pre-trained Models](https://arxiv.org/abs/2109.10282) by Minghao Li, Tengchao Lv, Lei Cui, Yijuan Lu, Dinei Florencio, Cha Zhang, Zhoujun Li, Furu Wei. TrOCR consists of an image Transformer encoder and an autoregressive text Transformer decoder to perform [optical character recognition (OCR)](https://en.wikipedia.org/wiki/Optical_character_recognition). The abstract from the paper is the following: *Text recognition is a long-standing research problem for document digitalization. Existing approaches for text recognition are usually built based on CNN for image understanding and RNN for char-level text generation. In addition, another language model is usually needed to improve the overall accuracy as a post-processing step. In this paper, we propose an end-to-end text recognition approach with pre-trained image Transformer and text Transformer models, namely TrOCR, which leverages the Transformer architecture for both image understanding and wordpiece-level text generation. The TrOCR model is simple but effective, and can be pre-trained with large-scale synthetic data and fine-tuned with human-labeled datasets. Experiments show that the TrOCR model outperforms the current state-of-the-art models on both printed and handwritten text recognition tasks.* <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/trocr_architecture.jpg" alt="drawing" width="600"/> <small> TrOCR architecture. Taken from the <a href="https://arxiv.org/abs/2109.10282">original paper</a>. </small> Please refer to the [`VisionEncoderDecoder`] class on how to use this model. This model was contributed by [nielsr](https://huggingface.co/nielsr). The original code can be found [here](https://github.com/microsoft/unilm/tree/6f60612e7cc86a2a1ae85c47231507a587ab4e01/trocr). ## Usage tips - The quickest way to get started with TrOCR is by checking the [tutorial notebooks](https://github.com/NielsRogge/Transformers-Tutorials/tree/master/TrOCR), which show how to use the model at inference time as well as fine-tuning on custom data. - TrOCR is pre-trained in 2 stages before being fine-tuned on downstream datasets. It achieves state-of-the-art results on both printed (e.g. the [SROIE dataset](https://paperswithcode.com/dataset/sroie) and handwritten (e.g. the [IAM Handwriting dataset](https://fki.tic.heia-fr.ch/databases/iam-handwriting-database>) text recognition tasks. For more information, see the [official models](https://huggingface.co/models?other=trocr>). - TrOCR is always used within the [VisionEncoderDecoder](vision-encoder-decoder) framework. ## Resources A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with TrOCR. If you're interested in submitting a resource to be included here, please feel free to open a Pull Request and we'll review it! The resource should ideally demonstrate something new instead of duplicating an existing resource. <PipelineTag pipeline="text-classification"/> - A blog post on [Accelerating Document AI](https://huggingface.co/blog/document-ai) with TrOCR. - A blog post on how to [Document AI](https://github.com/philschmid/document-ai-transformers) with TrOCR. - A notebook on how to [finetune TrOCR on IAM Handwriting Database using Seq2SeqTrainer](https://colab.research.google.com/github/NielsRogge/Transformers-Tutorials/blob/master/TrOCR/Fine_tune_TrOCR_on_IAM_Handwriting_Database_using_Seq2SeqTrainer.ipynb). - A notebook on [inference with TrOCR](https://colab.research.google.com/github/NielsRogge/Transformers-Tutorials/blob/master/TrOCR/Inference_with_TrOCR_%2B_Gradio_demo.ipynb) and Gradio demo. - A notebook on [finetune TrOCR on the IAM Handwriting Database](https://colab.research.google.com/github/NielsRogge/Transformers-Tutorials/blob/master/TrOCR/Fine_tune_TrOCR_on_IAM_Handwriting_Database_using_native_PyTorch.ipynb) using native PyTorch. - A notebook on [evaluating TrOCR on the IAM test set](https://colab.research.google.com/github/NielsRogge/Transformers-Tutorials/blob/master/TrOCR/Evaluating_TrOCR_base_handwritten_on_the_IAM_test_set.ipynb). <PipelineTag pipeline="text-generation"/> - [Casual language modeling](https://huggingface.co/docs/transformers/tasks/language_modeling) task guide. ⚡️ Inference - An interactive-demo on [TrOCR handwritten character recognition](https://huggingface.co/spaces/nielsr/TrOCR-handwritten). ## Inference TrOCR's [`VisionEncoderDecoder`] model accepts images as input and makes use of [`~generation.GenerationMixin.generate`] to autoregressively generate text given the input image. The [`ViTImageProcessor`/`DeiTImageProcessor`] class is responsible for preprocessing the input image and [`RobertaTokenizer`/`XLMRobertaTokenizer`] decodes the generated target tokens to the target string. The [`TrOCRProcessor`] wraps [`ViTImageProcessor`/`DeiTImageProcessor`] and [`RobertaTokenizer`/`XLMRobertaTokenizer`] into a single instance to both extract the input features and decode the predicted token ids. - Step-by-step Optical Character Recognition (OCR) ``` py >>> from transformers import TrOCRProcessor, VisionEncoderDecoderModel >>> import requests >>> from PIL import Image >>> processor = TrOCRProcessor.from_pretrained("microsoft/trocr-base-handwritten") >>> model = VisionEncoderDecoderModel.from_pretrained("microsoft/trocr-base-handwritten") >>> # load image from the IAM dataset >>> url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" >>> image = Image.open(requests.get(url, stream=True).raw).convert("RGB") >>> pixel_values = processor(image, return_tensors="pt").pixel_values >>> generated_ids = model.generate(pixel_values) >>> generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] ``` See the [model hub](https://huggingface.co/models?filter=trocr) to look for TrOCR checkpoints. ## TrOCRConfig [[autodoc]] TrOCRConfig ## TrOCRProcessor [[autodoc]] TrOCRProcessor - __call__ - from_pretrained - save_pretrained - batch_decode - decode ## TrOCRForCausalLM [[autodoc]] TrOCRForCausalLM - forward
transformers/docs/source/en/model_doc/trocr.md/0
{ "file_path": "transformers/docs/source/en/model_doc/trocr.md", "repo_id": "transformers", "token_count": 2132 }
255
<!--Copyright 2021 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # VisionTextDualEncoder ## Overview The [`VisionTextDualEncoderModel`] can be used to initialize a vision-text dual encoder model with any pretrained vision autoencoding model as the vision encoder (*e.g.* [ViT](vit), [BEiT](beit), [DeiT](deit)) and any pretrained text autoencoding model as the text encoder (*e.g.* [RoBERTa](roberta), [BERT](bert)). Two projection layers are added on top of both the vision and text encoder to project the output embeddings to a shared latent space. The projection layers are randomly initialized so the model should be fine-tuned on a downstream task. This model can be used to align the vision-text embeddings using CLIP like contrastive image-text training and then can be used for zero-shot vision tasks such image-classification or retrieval. In [LiT: Zero-Shot Transfer with Locked-image Text Tuning](https://arxiv.org/abs/2111.07991) it is shown how leveraging pre-trained (locked/frozen) image and text model for contrastive learning yields significant improvement on new zero-shot vision tasks such as image classification or retrieval. ## VisionTextDualEncoderConfig [[autodoc]] VisionTextDualEncoderConfig ## VisionTextDualEncoderProcessor [[autodoc]] VisionTextDualEncoderProcessor <frameworkcontent> <pt> ## VisionTextDualEncoderModel [[autodoc]] VisionTextDualEncoderModel - forward </pt> <tf> ## FlaxVisionTextDualEncoderModel [[autodoc]] FlaxVisionTextDualEncoderModel - __call__ </tf> <jax> ## TFVisionTextDualEncoderModel [[autodoc]] TFVisionTextDualEncoderModel - call </jax> </frameworkcontent>
transformers/docs/source/en/model_doc/vision-text-dual-encoder.md/0
{ "file_path": "transformers/docs/source/en/model_doc/vision-text-dual-encoder.md", "repo_id": "transformers", "token_count": 652 }
256
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Perplexity of fixed-length models [[open-in-colab]] Perplexity (PPL) is one of the most common metrics for evaluating language models. Before diving in, we should note that the metric applies specifically to classical language models (sometimes called autoregressive or causal language models) and is not well defined for masked language models like BERT (see [summary of the models](model_summary)). Perplexity is defined as the exponentiated average negative log-likelihood of a sequence. If we have a tokenized sequence \\(X = (x_0, x_1, \dots, x_t)\\), then the perplexity of \\(X\\) is, $$\text{PPL}(X) = \exp \left\{ {-\frac{1}{t}\sum_i^t \log p_\theta (x_i|x_{<i}) } \right\}$$ where \\(\log p_\theta (x_i|x_{<i})\\) is the log-likelihood of the ith token conditioned on the preceding tokens \\(x_{<i}\\) according to our model. Intuitively, it can be thought of as an evaluation of the model's ability to predict uniformly among the set of specified tokens in a corpus. Importantly, this means that the tokenization procedure has a direct impact on a model's perplexity which should always be taken into consideration when comparing different models. This is also equivalent to the exponentiation of the cross-entropy between the data and model predictions. For more intuition about perplexity and its relationship to Bits Per Character (BPC) and data compression, check out this [fantastic blog post on The Gradient](https://thegradient.pub/understanding-evaluation-metrics-for-language-models/). ## Calculating PPL with fixed-length models If we weren't limited by a model's context size, we would evaluate the model's perplexity by autoregressively factorizing a sequence and conditioning on the entire preceding subsequence at each step, as shown below. <img width="600" alt="Full decomposition of a sequence with unlimited context length" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/ppl_full.gif"/> When working with approximate models, however, we typically have a constraint on the number of tokens the model can process. The largest version of [GPT-2](model_doc/gpt2), for example, has a fixed length of 1024 tokens, so we cannot calculate \\(p_\theta(x_t|x_{<t})\\) directly when \\(t\\) is greater than 1024. Instead, the sequence is typically broken into subsequences equal to the model's maximum input size. If a model's max input size is \\(k\\), we then approximate the likelihood of a token \\(x_t\\) by conditioning only on the \\(k-1\\) tokens that precede it rather than the entire context. When evaluating the model's perplexity of a sequence, a tempting but suboptimal approach is to break the sequence into disjoint chunks and add up the decomposed log-likelihoods of each segment independently. <img width="600" alt="Suboptimal PPL not taking advantage of full available context" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/ppl_chunked.gif"/> This is quick to compute since the perplexity of each segment can be computed in one forward pass, but serves as a poor approximation of the fully-factorized perplexity and will typically yield a higher (worse) PPL because the model will have less context at most of the prediction steps. Instead, the PPL of fixed-length models should be evaluated with a sliding-window strategy. This involves repeatedly sliding the context window so that the model has more context when making each prediction. <img width="600" alt="Sliding window PPL taking advantage of all available context" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/ppl_sliding.gif"/> This is a closer approximation to the true decomposition of the sequence probability and will typically yield a more favorable score. The downside is that it requires a separate forward pass for each token in the corpus. A good practical compromise is to employ a strided sliding window, moving the context by larger strides rather than sliding by 1 token a time. This allows computation to proceed much faster while still giving the model a large context to make predictions at each step. ## Example: Calculating perplexity with GPT-2 in 🤗 Transformers Let's demonstrate this process with GPT-2. ```python from transformers import GPT2LMHeadModel, GPT2TokenizerFast device = "cuda" model_id = "openai-community/gpt2-large" model = GPT2LMHeadModel.from_pretrained(model_id).to(device) tokenizer = GPT2TokenizerFast.from_pretrained(model_id) ``` We'll load in the WikiText-2 dataset and evaluate the perplexity using a few different sliding-window strategies. Since this dataset is small and we're just doing one forward pass over the set, we can just load and encode the entire dataset in memory. ```python from datasets import load_dataset test = load_dataset("wikitext", "wikitext-2-raw-v1", split="test") encodings = tokenizer("\n\n".join(test["text"]), return_tensors="pt") ``` With 🤗 Transformers, we can simply pass the `input_ids` as the `labels` to our model, and the average negative log-likelihood for each token is returned as the loss. With our sliding window approach, however, there is overlap in the tokens we pass to the model at each iteration. We don't want the log-likelihood for the tokens we're just treating as context to be included in our loss, so we can set these targets to `-100` so that they are ignored. The following is an example of how we could do this with a stride of `512`. This means that the model will have at least 512 tokens for context when calculating the conditional likelihood of any one token (provided there are 512 preceding tokens available to condition on). ```python import torch from tqdm import tqdm max_length = model.config.n_positions stride = 512 seq_len = encodings.input_ids.size(1) nlls = [] prev_end_loc = 0 for begin_loc in tqdm(range(0, seq_len, stride)): end_loc = min(begin_loc + max_length, seq_len) trg_len = end_loc - prev_end_loc # may be different from stride on last loop input_ids = encodings.input_ids[:, begin_loc:end_loc].to(device) target_ids = input_ids.clone() target_ids[:, :-trg_len] = -100 with torch.no_grad(): outputs = model(input_ids, labels=target_ids) # loss is calculated using CrossEntropyLoss which averages over valid labels # N.B. the model only calculates loss over trg_len - 1 labels, because it internally shifts the labels # to the left by 1. neg_log_likelihood = outputs.loss nlls.append(neg_log_likelihood) prev_end_loc = end_loc if end_loc == seq_len: break ppl = torch.exp(torch.stack(nlls).mean()) ``` Running this with the stride length equal to the max input length is equivalent to the suboptimal, non-sliding-window strategy we discussed above. The smaller the stride, the more context the model will have in making each prediction, and the better the reported perplexity will typically be. When we run the above with `stride = 1024`, i.e. no overlap, the resulting PPL is `19.44`, which is about the same as the `19.93` reported in the GPT-2 paper. By using `stride = 512` and thereby employing our striding window strategy, this jumps down to `16.45`. This is not only a more favorable score, but is calculated in a way that is closer to the true autoregressive decomposition of a sequence likelihood.
transformers/docs/source/en/perplexity.md/0
{ "file_path": "transformers/docs/source/en/perplexity.md", "repo_id": "transformers", "token_count": 2263 }
257
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Quanto <Tip> Try Quanto + transformers with this [notebook](https://colab.research.google.com/drive/16CXfVmtdQvciSh9BopZUDYcmXCDpvgrT?usp=sharing)! </Tip> [🤗 Quanto](https://github.com/huggingface/quanto) library is a versatile pytorch quantization toolkit. The quantization method used is the linear quantization. Quanto provides several unique features such as: - weights quantization (`float8`,`int8`,`int4`,`int2`) - activation quantization (`float8`,`int8`) - modality agnostic (e.g CV,LLM) - device agnostic (e.g CUDA,MPS,CPU) - compatibility with `torch.compile` - easy to add custom kernel for specific device - supports quantization aware training <!-- Add link to the blogpost --> Before you begin, make sure the following libraries are installed: ```bash pip install quanto accelerate transformers ``` Now you can quantize a model by passing [`QuantoConfig`] object in the [`~PreTrainedModel.from_pretrained`] method. This works for any model in any modality, as long as it contains `torch.nn.Linear` layers. The integration with transformers only supports weights quantization. For the more complex use case such as activation quantization, calibration and quantization aware training, you should use [quanto](https://github.com/huggingface/quanto) library instead. ```py from transformers import AutoModelForCausalLM, AutoTokenizer, QuantoConfig model_id = "facebook/opt-125m" tokenizer = AutoTokenizer.from_pretrained(model_id) quantization_config = QuantoConfig(weights="int8") quantized_model = AutoModelForCausalLM.from_pretrained(model_id, device_map="cuda:0", quantization_config=quantization_config) ``` Note that serialization is not supported yet with transformers but it is coming soon! If you want to save the model, you can use quanto library instead. Quanto library uses linear quantization algorithm for quantization. Even though this is a basic quantization technique, we get very good results! Have a look at the following becnhmark (llama-2-7b on perplexity metric). You can find more benchamarks [here](https://github.com/huggingface/quanto/tree/main/bench/generation) <div class="flex gap-4"> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/quantization/NousResearch-Llama-2-7b-hf_Perplexity.png" alt="llama-2-7b-quanto-perplexity" /> </div> </div> The library is versatible enough to be compatible with most PTQ optimization algorithms. The plan in the future is to integrate the most popular algorithms in the most seamless possible way (AWQ, Smoothquant).
transformers/docs/source/en/quantization/quanto.md/0
{ "file_path": "transformers/docs/source/en/quantization/quanto.md", "repo_id": "transformers", "token_count": 958 }
258
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Knowledge Distillation for Computer Vision [[open-in-colab]] Knowledge distillation is a technique used to transfer knowledge from a larger, more complex model (teacher) to a smaller, simpler model (student). To distill knowledge from one model to another, we take a pre-trained teacher model trained on a certain task (image classification for this case) and randomly initialize a student model to be trained on image classification. Next, we train the student model to minimize the difference between it's outputs and the teacher's outputs, thus making it mimic the behavior. It was first introduced in [Distilling the Knowledge in a Neural Network by Hinton et al](https://arxiv.org/abs/1503.02531). In this guide, we will do task-specific knowledge distillation. We will use the [beans dataset](https://huggingface.co/datasets/beans) for this. This guide demonstrates how you can distill a [fine-tuned ViT model](https://huggingface.co/merve/vit-mobilenet-beans-224) (teacher model) to a [MobileNet](https://huggingface.co/google/mobilenet_v2_1.4_224) (student model) using the [Trainer API](https://huggingface.co/docs/transformers/en/main_classes/trainer#trainer) of 🤗 Transformers. Let's install the libraries needed for distillation and evaluating the process. ```bash pip install transformers datasets accelerate tensorboard evaluate --upgrade ``` In this example, we are using the `merve/beans-vit-224` model as teacher model. It's an image classification model, based on `google/vit-base-patch16-224-in21k` fine-tuned on beans dataset. We will distill this model to a randomly initialized MobileNetV2. We will now load the dataset. ```python from datasets import load_dataset dataset = load_dataset("beans") ``` We can use an image processor from either of the models, as in this case they return the same output with same resolution. We will use the `map()` method of `dataset` to apply the preprocessing to every split of the dataset. ```python from transformers import AutoImageProcessor teacher_processor = AutoImageProcessor.from_pretrained("merve/beans-vit-224") def process(examples): processed_inputs = teacher_processor(examples["image"]) return processed_inputs processed_datasets = dataset.map(process, batched=True) ``` Essentially, we want the student model (a randomly initialized MobileNet) to mimic the teacher model (fine-tuned vision transformer). To achieve this, we first get the logits output from the teacher and the student. Then, we divide each of them by the parameter `temperature` which controls the importance of each soft target. A parameter called `lambda` weighs the importance of the distillation loss. In this example, we will use `temperature=5` and `lambda=0.5`. We will use the Kullback-Leibler Divergence loss to compute the divergence between the student and teacher. Given two data P and Q, KL Divergence explains how much extra information we need to represent P using Q. If two are identical, their KL divergence is zero, as there's no other information needed to explain P from Q. Thus, in the context of knowledge distillation, KL divergence is useful. ```python from transformers import TrainingArguments, Trainer import torch import torch.nn as nn import torch.nn.functional as F class ImageDistilTrainer(Trainer): def __init__(self, teacher_model=None, student_model=None, temperature=None, lambda_param=None, *args, **kwargs): super().__init__(model=student_model, *args, **kwargs) self.teacher = teacher_model self.student = student_model self.loss_function = nn.KLDivLoss(reduction="batchmean") device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') self.teacher.to(device) self.teacher.eval() self.temperature = temperature self.lambda_param = lambda_param def compute_loss(self, student, inputs, return_outputs=False): student_output = self.student(**inputs) with torch.no_grad(): teacher_output = self.teacher(**inputs) # Compute soft targets for teacher and student soft_teacher = F.softmax(teacher_output.logits / self.temperature, dim=-1) soft_student = F.log_softmax(student_output.logits / self.temperature, dim=-1) # Compute the loss distillation_loss = self.loss_function(soft_student, soft_teacher) * (self.temperature ** 2) # Compute the true label loss student_target_loss = student_output.loss # Calculate final loss loss = (1. - self.lambda_param) * student_target_loss + self.lambda_param * distillation_loss return (loss, student_output) if return_outputs else loss ``` We will now login to Hugging Face Hub so we can push our model to the Hugging Face Hub through the `Trainer`. ```python from huggingface_hub import notebook_login notebook_login() ``` Let's set the `TrainingArguments`, the teacher model and the student model. ```python from transformers import AutoModelForImageClassification, MobileNetV2Config, MobileNetV2ForImageClassification training_args = TrainingArguments( output_dir="my-awesome-model", num_train_epochs=30, fp16=True, logging_dir=f"{repo_name}/logs", logging_strategy="epoch", eval_strategy="epoch", save_strategy="epoch", load_best_model_at_end=True, metric_for_best_model="accuracy", report_to="tensorboard", push_to_hub=True, hub_strategy="every_save", hub_model_id=repo_name, ) num_labels = len(processed_datasets["train"].features["labels"].names) # initialize models teacher_model = AutoModelForImageClassification.from_pretrained( "merve/beans-vit-224", num_labels=num_labels, ignore_mismatched_sizes=True ) # training MobileNetV2 from scratch student_config = MobileNetV2Config() student_config.num_labels = num_labels student_model = MobileNetV2ForImageClassification(student_config) ``` We can use `compute_metrics` function to evaluate our model on the test set. This function will be used during the training process to compute the `accuracy` & `f1` of our model. ```python import evaluate import numpy as np accuracy = evaluate.load("accuracy") def compute_metrics(eval_pred): predictions, labels = eval_pred acc = accuracy.compute(references=labels, predictions=np.argmax(predictions, axis=1)) return {"accuracy": acc["accuracy"]} ``` Let's initialize the `Trainer` with the training arguments we defined. We will also initialize our data collator. ```python from transformers import DefaultDataCollator data_collator = DefaultDataCollator() trainer = ImageDistilTrainer( student_model=student_model, teacher_model=teacher_model, training_args=training_args, train_dataset=processed_datasets["train"], eval_dataset=processed_datasets["validation"], data_collator=data_collator, tokenizer=teacher_processor, compute_metrics=compute_metrics, temperature=5, lambda_param=0.5 ) ``` We can now train our model. ```python trainer.train() ``` We can evaluate the model on the test set. ```python trainer.evaluate(processed_datasets["test"]) ``` On test set, our model reaches 72 percent accuracy. To have a sanity check over efficiency of distillation, we also trained MobileNet on the beans dataset from scratch with the same hyperparameters and observed 63 percent accuracy on the test set. We invite the readers to try different pre-trained teacher models, student architectures, distillation parameters and report their findings. The training logs and checkpoints for distilled model can be found in [this repository](https://huggingface.co/merve/vit-mobilenet-beans-224), and MobileNetV2 trained from scratch can be found in this [repository](https://huggingface.co/merve/resnet-mobilenet-beans-5).
transformers/docs/source/en/tasks/knowledge_distillation_for_image_classification.md/0
{ "file_path": "transformers/docs/source/en/tasks/knowledge_distillation_for_image_classification.md", "repo_id": "transformers", "token_count": 2621 }
259
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Video-text-to-text [[open-in-colab]] Video-text-to-text models, also known as video language models or vision language models with video input, are language models that take a video input. These models can tackle various tasks, from video question answering to video captioning. These models have nearly the same architecture as [image-text-to-text](../image_text_to_text.md) models except for some changes to accept video data, since video data is essentially image frames with temporal dependencies. Some image-text-to-text models take in multiple images, but this alone is inadequate for a model to accept videos. Moreover, video-text-to-text models are often trained with all vision modalities. Each example might have videos, multiple videos, images and multiple images. Some of these models can also take interleaved inputs. For example, you can refer to a specific video inside a string of text by adding a video token in text like "What is happening in this video? `<video>`". In this guide, we provide a brief overview of video LMs and show how to use them with Transformers for inference. To begin with, there are multiple types of video LMs: - base models used for fine-tuning - chat fine-tuned models for conversation - instruction fine-tuned models This guide focuses on inference with an instruction-tuned model, [llava-hf/llava-interleave-qwen-7b-hf](https://huggingface.co/llava-hf/llava-interleave-qwen-7b-hf) which can take in interleaved data. Alternatively, you can try [llava-interleave-qwen-0.5b-hf](https://huggingface.co/llava-hf/llava-interleave-qwen-0.5b-hf) if your hardware doesn't allow running a 7B model. Let's begin installing the dependencies. ```bash pip install -q transformers accelerate flash_attn ``` Let's initialize the model and the processor. ```python from transformers import LlavaProcessor, LlavaForConditionalGeneration import torch model_id = "llava-hf/llava-interleave-qwen-0.5b-hf" processor = LlavaProcessor.from_pretrained(model_id) model = LlavaForConditionalGeneration.from_pretrained(model_id, torch_dtype=torch.float16) model.to("cuda") ``` Some models directly consume the `<video>` token, and others accept `<image>` tokens equal to the number of sampled frames. This model handles videos in the latter fashion. We will write a simple utility to handle image tokens, and another utility to get a video from a url and sample frames from it. ```python import uuid import requests import cv2 def replace_video_with_images(text, frames): return text.replace("<video>", "<image>" * frames) def sample_frames(url, num_frames): response = requests.get(url) path_id = str(uuid.uuid4()) path = f"./{path_id}.mp4" with open(path, "wb") as f: f.write(response.content) video = cv2.VideoCapture(path) total_frames = int(video.get(cv2.CAP_PROP_FRAME_COUNT)) interval = total_frames // num_frames frames = [] for i in range(total_frames): ret, frame = video.read() pil_img = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)) if not ret: continue if i % interval == 0: frames.append(pil_img) video.release() return frames ``` Let's get our inputs. We will sample frames and concatenate them. ```python video_1 = "https://huggingface.co/spaces/merve/llava-interleave/resolve/main/cats_1.mp4" video_2 = "https://huggingface.co/spaces/merve/llava-interleave/resolve/main/cats_2.mp4" video_1 = sample_frames(video_1, 6) video_2 = sample_frames(video_2, 6) videos = video_1 + video_2 videos # [<PIL.Image.Image image mode=RGB size=1920x1080>, # <PIL.Image.Image image mode=RGB size=1920x1080>, # <PIL.Image.Image image mode=RGB size=1920x1080>, ...] ``` Both videos have cats. <div class="container"> <div class="video-container"> <video width="400" controls> <source src="https://huggingface.co/spaces/merve/llava-interleave/resolve/main/cats_1.mp4" type="video/mp4"> </video> </div> <div class="video-container"> <video width="400" controls> <source src="https://huggingface.co/spaces/merve/llava-interleave/resolve/main/cats_2.mp4" type="video/mp4"> </video> </div> </div> Now we can preprocess the inputs. This model has a prompt template that looks like following. First, we'll put all the sampled frames into one list. Since we have eight frames in each video, we will insert 12 `<image>` tokens to our prompt. Add `assistant` at the end of the prompt to trigger the model to give answers. Then we can preprocess. ```python user_prompt = "Are these two cats in these two videos doing the same thing?" toks = "<image>" * 12 prompt = "<|im_start|>user"+ toks + f"\n{user_prompt}<|im_end|><|im_start|>assistant" inputs = processor(prompt, images=videos).to(model.device, model.dtype) ``` We can now call [`~GenerationMixin.generate`] for inference. The model outputs the question in our input and answer, so we only take the text after the prompt and `assistant` part from the model output. ```python output = model.generate(**inputs, max_new_tokens=100, do_sample=False) print(processor.decode(output[0][2:], skip_special_tokens=True)[len(user_prompt)+10:]) # The first cat is shown in a relaxed state, with its eyes closed and a content expression, while the second cat is shown in a more active state, with its mouth open wide, possibly in a yawn or a vocalization. ``` And voila! To learn more about chat templates and token streaming for video-text-to-text models, refer to the [image-text-to-text](../image_text_to_text) task guide because these models work similarly.
transformers/docs/source/en/tasks/video_text_to_text.md/0
{ "file_path": "transformers/docs/source/en/tasks/video_text_to_text.md", "repo_id": "transformers", "token_count": 2016 }
260
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # ¿Cómo puedo crear un pipeline personalizado? En esta guía, veremos cómo crear un pipeline personalizado y cómo compartirlo en el [Hub](https://hf.co/models) o añadirlo a la biblioteca 🤗 Transformers. En primer lugar, debes decidir las entradas que tu pipeline podrá recibir. Pueden ser strings, bytes, diccionarios o lo que te parezca que vaya a ser la entrada más apropiada. Intenta mantener estas entradas en un formato que sea tan Python puro como sea posible, puesto que esto facilita la compatibilidad (incluso con otros lenguajes de programación por medio de JSON). Estos serán los `inputs` (entradas) del pipeline (`preprocess`). Ahora debes definir los `outputs` (salidas). Al igual que con los `inputs`, entre más simple el formato, mejor. Estas serán las salidas del método `postprocess` (posprocesamiento). Empieza heredando la clase base `Pipeline` con los 4 métodos que debemos implementar: `preprocess` (preprocesamiento), `_forward` (ejecución), `postprocess` (posprocesamiento) y `_sanitize_parameters` (verificar parámetros). ```python from transformers import Pipeline class MyPipeline(Pipeline): def _sanitize_parameters(self, **kwargs): preprocess_kwargs = {} if "maybe_arg" in kwargs: preprocess_kwargs["maybe_arg"] = kwargs["maybe_arg"] return preprocess_kwargs, {}, {} def preprocess(self, inputs, maybe_arg=2): model_input = Tensor(inputs["input_ids"]) return {"model_input": model_input} def _forward(self, model_inputs): # model_inputs == {"model_input": model_input} outputs = self.model(**model_inputs) # Quizá {"logits": Tensor(...)} return outputs def postprocess(self, model_outputs): best_class = model_outputs["logits"].softmax(-1) return best_class ``` La estructura de este desglose es así para garantizar una compatibilidad más o menos transparente con el uso de CPU/GPU y el pre/posprocesamiento en CPU en varios hilos. `preprocess` tomará las entradas definidas originalmente y las convertirá en algo que se le pueda pasar al modelo. Podría contener más información y a menudo es un objeto `Dict` (diccionario). `_forward` contiene los detalles de la implementación y no debería ser invocado de forma directa. `forward` es el método preferido a utilizar pues contiene verificaciones para asegurar que todo funcione en el dispositivo correcto. Cualquier cosa que esté relacionada con un modelo real debería ir en el método `_forward`, todo lo demás va en los métodos de preprocesamiento y posprocesamiento. Los métodos `postprocess` reciben la salida `_forward` y la convierten en la salida final que decidimos anteriormente. `_sanitize_parameters` existe para permitir a los usuarios pasar cualesquiera parámetros cuando lo deseen, ya sea al momento de inicializar el pipeline `pipeline(...., maybe_arg=4)` o al momento de invocarlo `pipe = pipeline(...); output = pipe(...., maybe_arg=4)`. El método `_sanitize_parameters` devuelve 3 diccionarios de kwargs que serán pasados directamente a `preprocess`, `_forward` y `postprocess`. No ingreses nada si el caller no se va a invocar con parámetros adicionales. Esto permite mantener los parámetros por defecto de la definición de la función, lo que es más "natural". Un ejemplo clásico sería un argumento `top_k` en el posprocesamiento de una tarea de clasificación. ```python >>> pipe = pipeline("my-new-task") >>> pipe("This is a test") [{"label": "1-star", "score": 0.8}, {"label": "2-star", "score": 0.1}, {"label": "3-star", "score": 0.05} {"label": "4-star", "score": 0.025}, {"label": "5-star", "score": 0.025}] >>> pipe("This is a test", top_k=2) [{"label": "1-star", "score": 0.8}, {"label": "2-star", "score": 0.1}] ``` Para lograrlo, actualizaremos nuestro método `postprocess` con un valor por defecto de `5` y modificaremos `_sanitize_parameters` para permitir este nuevo parámetro. ```python def postprocess(self, model_outputs, top_k=5): best_class = model_outputs["logits"].softmax(-1) # Añade la lógica para manejar el top_k return best_class def _sanitize_parameters(self, **kwargs): preprocess_kwargs = {} if "maybe_arg" in kwargs: preprocess_kwargs["maybe_arg"] = kwargs["maybe_arg"] postprocess_kwargs = {} if "top_k" in kwargs: postprocess_kwargs["top_k"] = kwargs["top_k"] return preprocess_kwargs, {}, postprocess_kwargs ``` Intenta que las entradas y salidas sean muy simples e, idealmente, que puedan serializarse como JSON, pues esto hace el uso del pipeline muy sencillo sin que el usuario tenga que preocuparse por conocer nuevos tipos de objetos. También es relativamente común tener compatibilidad con muchos tipos diferentes de argumentos por facilidad de uso (por ejemplo, los archivos de audio pueden ser nombres de archivo, URLs o bytes). ## Añadirlo a la lista de tareas Para registrar tu `new-task` (nueva tarea) en la lista de tareas, debes añadirla al `PIPELINE_REGISTRY` (registro de pipelines): ```python from transformers.pipelines import PIPELINE_REGISTRY PIPELINE_REGISTRY.register_pipeline( "new-task", pipeline_class=MyPipeline, pt_model=AutoModelForSequenceClassification, ) ``` Puedes especificar un modelo por defecto si lo deseas, en cuyo caso debe venir con una versión específica (que puede ser el nombre de un branch o hash de commit, en este caso usamos `"abcdef"`), así como el tipo: ```python PIPELINE_REGISTRY.register_pipeline( "new-task", pipeline_class=MyPipeline, pt_model=AutoModelForSequenceClassification, default={"pt": ("user/awesome_model", "abcdef")}, type="text", # tipo de datos que maneja: texto, audio, imagen, multi-modalidad ) ``` ## Comparte tu pipeline en el Hub Para compartir tu pipeline personalizado en el Hub, solo tienes que guardar el código personalizado de tu sub-clase `Pipeline` en un archivo de Python. Por ejemplo, digamos que queremos usar un pipeline personalizado para la clasificación de duplas de oraciones de esta forma: ```py import numpy as np from transformers import Pipeline def softmax(outputs): maxes = np.max(outputs, axis=-1, keepdims=True) shifted_exp = np.exp(outputs - maxes) return shifted_exp / shifted_exp.sum(axis=-1, keepdims=True) class PairClassificationPipeline(Pipeline): def _sanitize_parameters(self, **kwargs): preprocess_kwargs = {} if "second_text" in kwargs: preprocess_kwargs["second_text"] = kwargs["second_text"] return preprocess_kwargs, {}, {} def preprocess(self, text, second_text=None): return self.tokenizer(text, text_pair=second_text, return_tensors=self.framework) def _forward(self, model_inputs): return self.model(**model_inputs) def postprocess(self, model_outputs): logits = model_outputs.logits[0].numpy() probabilities = softmax(logits) best_class = np.argmax(probabilities) label = self.model.config.id2label[best_class] score = probabilities[best_class].item() logits = logits.tolist() return {"label": label, "score": score, "logits": logits} ``` La implementación es independiente del framework y funcionará con modelos de PyTorch y TensorFlow. Si guardamos esto en un archivo llamado `pair_classification.py`, podemos importarlo y registrarlo de la siguiente manera: ```py from pair_classification import PairClassificationPipeline from transformers.pipelines import PIPELINE_REGISTRY from transformers import AutoModelForSequenceClassification, TFAutoModelForSequenceClassification PIPELINE_REGISTRY.register_pipeline( "pair-classification", pipeline_class=PairClassificationPipeline, pt_model=AutoModelForSequenceClassification, tf_model=TFAutoModelForSequenceClassification, ) ``` Una vez hecho esto, podemos usarlo con un modelo pre-entrenado. Por ejemplo, al modelo `sgugger/finetuned-bert-mrpc` se le hizo fine-tuning con el dataset MRPC, en el cual se clasifican duplas de oraciones como paráfrasis o no. ```py from transformers import pipeline classifier = pipeline("pair-classification", model="sgugger/finetuned-bert-mrpc") ``` Ahora podemos compartirlo en el Hub usando el método `save_pretrained`: ```py classifier.push_to_hub("test-dynamic-pipeline") ``` Esto copiará el archivo donde definiste `PairClassificationPipeline` dentro de la carpeta `"test-dynamic-pipeline"`, y además guardará el modelo y el tokenizer del pipeline, antes de enviar todo al repositorio `{your_username}/test-dynamic-pipeline`. Después de esto, cualquier persona puede usarlo siempre que usen la opción `trust_remote_code=True` (confiar en código remoto): ```py from transformers import pipeline classifier = pipeline(model="{your_username}/test-dynamic-pipeline", trust_remote_code=True) ``` ## Añadir el pipeline a 🤗 Transformers Si quieres contribuir tu pipeline a la biblioteca 🤗 Transformers, tendrás que añadirlo a un nuevo módulo en el sub-módulo `pipelines` con el código de tu pipeline. Luego, debes añadirlo a la lista de tareas definidas en `pipelines/__init__.py`. A continuación tienes que añadir las pruebas. Crea un nuevo archivo llamado `tests/test_pipelines_MY_PIPELINE.py` basándote en las pruebas existentes. La función `run_pipeline_test` será muy genérica y se correrá sobre modelos pequeños escogidos al azar sobre todas las arquitecturas posibles definidas en `model_mapping` y `tf_model_mapping`. Esto es muy importante para probar compatibilidades a futuro, lo que significa que si alguien añade un nuevo modelo para `XXXForQuestionAnswering` entonces el pipeline intentará ejecutarse con ese modelo. Ya que los modelos son aleatorios, es imposible verificar los valores como tales, y es por eso que hay un helper `ANY` que simplemente intentará que la salida tenga el mismo tipo que la salida esperada del pipeline. También *debes* implementar 2 (preferiblemente 4) pruebas: - `test_small_model_pt` : Define un (1) modelo pequeño para este pipeline (no importa si los resultados no tienen sentido) y prueba las salidas del pipeline. Los resultados deberían ser los mismos que en `test_small_model_tf`. - `test_small_model_tf` : Define un (1) modelo pequeño para este pipeline (no importa si los resultados no tienen sentido) y prueba las salidas del pipeline. Los resultados deberían ser los mismos que en `test_small_model_pt`. - `test_large_model_pt` (`optional`): Prueba el pipeline en una tarea real en la que los resultados deben tener sentido. Estas pruebas son lentas y deben marcarse como tales. El objetivo de esto es ejemplificar el pipeline y asegurarse de que no haya divergencias en versiones futuras. - `test_large_model_tf` (`optional`): Prueba el pipeline en una tarea real en la que los resultados deben tener sentido. Estas pruebas son lentas y deben marcarse como tales. El objetivo de esto es ejemplificar el pipeline y asegurarse de que no haya divergencias en versiones futuras.
transformers/docs/source/es/add_new_pipeline.md/0
{ "file_path": "transformers/docs/source/es/add_new_pipeline.md", "repo_id": "transformers", "token_count": 4249 }
261
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Modelos multilingües para inferencia [[open-in-colab]] Existen varios modelos multilingües en 🤗 Transformers y su uso para inferencia difiere de los modelos monolingües. Sin embargo, no *todos* los usos de los modelos multilingües son diferentes. Algunos modelos, como [google-bert/bert-base-multilingual-uncased](https://huggingface.co/google-bert/bert-base-multilingual-uncased), pueden utilizarse igual que un modelo monolingüe. Esta guía te enseñará cómo utilizar modelos multilingües cuyo uso difiere en la inferencia. ## XLM XLM tiene diez checkpoints diferentes de los cuales solo uno es monolingüe. Los nueve checkpoints restantes del modelo pueden dividirse en dos categorías: los checkpoints que utilizan language embeddings y los que no. ### XLM con language embeddings Los siguientes modelos XLM usan language embeddings para especificar el lenguaje utilizado en la inferencia: - `FacebookAI/xlm-mlm-ende-1024` (Masked language modeling, English-German) - `FacebookAI/xlm-mlm-enfr-1024` (Masked language modeling, English-French) - `FacebookAI/xlm-mlm-enro-1024` (Masked language modeling, English-Romanian) - `FacebookAI/xlm-mlm-xnli15-1024` (Masked language modeling, XNLI languages) - `FacebookAI/xlm-mlm-tlm-xnli15-1024` (Masked language modeling + translation, XNLI languages) - `FacebookAI/xlm-clm-enfr-1024` (Causal language modeling, English-French) - `FacebookAI/xlm-clm-ende-1024` (Causal language modeling, English-German) Los language embeddings son representados como un tensor de la mismas dimensiones que los `input_ids` pasados al modelo. Los valores de estos tensores dependen del idioma utilizado y se identifican mediante los atributos `lang2id` y `id2lang` del tokenizador. En este ejemplo, carga el checkpoint `FacebookAI/xlm-clm-enfr-1024` (Causal language modeling, English-French): ```py >>> import torch >>> from transformers import XLMTokenizer, XLMWithLMHeadModel >>> tokenizer = XLMTokenizer.from_pretrained("FacebookAI/xlm-clm-enfr-1024") >>> model = XLMWithLMHeadModel.from_pretrained("FacebookAI/xlm-clm-enfr-1024") ``` El atributo `lang2id` del tokenizador muestra los idiomas de este modelo y sus ids: ```py >>> print(tokenizer.lang2id) {'en': 0, 'fr': 1} ``` A continuación, crea un input de ejemplo: ```py >>> input_ids = torch.tensor([tokenizer.encode("Wikipedia was used to")]) # batch size of 1 ``` Establece el id del idioma, por ejemplo `"en"`, y utilízalo para definir el language embedding. El language embedding es un tensor lleno de `0` ya que es el id del idioma para inglés. Este tensor debe ser del mismo tamaño que `input_ids`. ```py >>> language_id = tokenizer.lang2id["en"] # 0 >>> langs = torch.tensor([language_id] * input_ids.shape[1]) # torch.tensor([0, 0, 0, ..., 0]) >>> # We reshape it to be of size (batch_size, sequence_length) >>> langs = langs.view(1, -1) # is now of shape [1, sequence_length] (we have a batch size of 1) ``` Ahora puedes pasar los `input_ids` y el language embedding al modelo: ```py >>> outputs = model(input_ids, langs=langs) ``` El script [run_generation.py](https://github.com/huggingface/transformers/tree/master/examples/pytorch/text-generation/run_generation.py) puede generar texto con language embeddings utilizando los checkpoints `xlm-clm`. ### XLM sin language embeddings Los siguientes modelos XLM no requieren language embeddings durante la inferencia: - `FacebookAI/xlm-mlm-17-1280` (modelado de lenguaje enmascarado, 17 idiomas) - `FacebookAI/xlm-mlm-100-1280` (modelado de lenguaje enmascarado, 100 idiomas) Estos modelos se utilizan para representaciones genéricas de frases a diferencia de los anteriores checkpoints XLM. ## BERT Los siguientes modelos de BERT pueden utilizarse para tareas multilingües: - `google-bert/bert-base-multilingual-uncased` (modelado de lenguaje enmascarado + predicción de la siguiente oración, 102 idiomas) - `google-bert/bert-base-multilingual-cased` (modelado de lenguaje enmascarado + predicción de la siguiente oración, 104 idiomas) Estos modelos no requieren language embeddings durante la inferencia. Deben identificar la lengua a partir del contexto e inferir en consecuencia. ## XLM-RoBERTa Los siguientes modelos de XLM-RoBERTa pueden utilizarse para tareas multilingües: - `FacebookAI/xlm-roberta-base` (modelado de lenguaje enmascarado, 100 idiomas) - `FacebookAI/xlm-roberta-large` (Modelado de lenguaje enmascarado, 100 idiomas) XLM-RoBERTa se entrenó con 2,5 TB de datos CommonCrawl recién creados y depurados en 100 idiomas. Proporciona fuertes ventajas sobre los modelos multilingües publicados anteriormente como mBERT o XLM en tareas posteriores como la clasificación, el etiquetado de secuencias y la respuesta a preguntas. ## M2M100 Los siguientes modelos de M2M100 pueden utilizarse para traducción multilingüe: - `facebook/m2m100_418M` (traducción) - `facebook/m2m100_1.2B` (traducción) En este ejemplo, carga el checkpoint `facebook/m2m100_418M` para traducir del chino al inglés. Puedes establecer el idioma de origen en el tokenizador: ```py >>> from transformers import M2M100ForConditionalGeneration, M2M100Tokenizer >>> en_text = "Do not meddle in the affairs of wizards, for they are subtle and quick to anger." >>> chinese_text = "不要插手巫師的事務, 因為他們是微妙的, 很快就會發怒." >>> tokenizer = M2M100Tokenizer.from_pretrained("facebook/m2m100_418M", src_lang="zh") >>> model = M2M100ForConditionalGeneration.from_pretrained("facebook/m2m100_418M") ``` Tokeniza el texto: ```py >>> encoded_zh = tokenizer(chinese_text, return_tensors="pt") ``` M2M100 fuerza el id del idioma de destino como el primer token generado para traducir al idioma de destino.. Establece el `forced_bos_token_id` a `en` en el método `generate` para traducir al inglés: ```py >>> generated_tokens = model.generate(**encoded_zh, forced_bos_token_id=tokenizer.get_lang_id("en")) >>> tokenizer.batch_decode(generated_tokens, skip_special_tokens=True) 'Do not interfere with the matters of the witches, because they are delicate and will soon be angry.' ``` ## MBart Los siguientes modelos de MBart pueden utilizarse para traducción multilingüe: - `facebook/mbart-large-50-one-to-many-mmt` (traducción automática multilingüe de uno a muchos, 50 idiomas) - `facebook/mbart-large-50-many-to-many-mmt` (traducción automática multilingüe de muchos a muchos, 50 idiomas) - `facebook/mbart-large-50-many-to-one-mmt` (traducción automática multilingüe muchos a uno, 50 idiomas) - `facebook/mbart-large-50` (traducción multilingüe, 50 idiomas) - `facebook/mbart-large-cc25` En este ejemplo, carga el checkpoint `facebook/mbart-large-50-many-to-many-mmt` para traducir del finlandés al inglés. Puedes establecer el idioma de origen en el tokenizador: ```py >>> from transformers import AutoTokenizer, AutoModelForSeq2SeqLM >>> en_text = "Do not meddle in the affairs of wizards, for they are subtle and quick to anger." >>> fi_text = "Älä sekaannu velhojen asioihin, sillä ne ovat hienovaraisia ja nopeasti vihaisia." >>> tokenizer = AutoTokenizer.from_pretrained("facebook/mbart-large-50-many-to-many-mmt", src_lang="fi_FI") >>> model = AutoModelForSeq2SeqLM.from_pretrained("facebook/mbart-large-50-many-to-many-mmt") ``` Tokeniza el texto: ```py >>> encoded_en = tokenizer(en_text, return_tensors="pt") ``` MBart fuerza el id del idioma de destino como el primer token generado para traducirlo. Establece el `forced_bos_token_id` a `en` en el método `generate` para traducir al inglés: ```py >>> generated_tokens = model.generate(**encoded_en, forced_bos_token_id=tokenizer.lang_code_to_id("en_XX")) >>> tokenizer.batch_decode(generated_tokens, skip_special_tokens=True) "Don't interfere with the wizard's affairs, because they are subtle, will soon get angry." ``` Si estás usando el checkpoint `facebook/mbart-large-50-many-to-one-mmt` no necesitas forzar el id del idioma de destino como el primer token generado, de lo contrario el uso es el mismo.
transformers/docs/source/es/multilingual.md/0
{ "file_path": "transformers/docs/source/es/multilingual.md", "repo_id": "transformers", "token_count": 3094 }
262
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Clasificación de imágenes <Youtube id="tjAIM7BOYhw"/> La clasificación de imágenes asigna una etiqueta o clase a una imagen. A diferencia de la clasificación de texto o audio, las entradas son los valores de los píxeles que representan una imagen. La clasificación de imágenes tiene muchos usos, como la detección de daños tras una catástrofe, el control de la salud de los cultivos o la búsqueda de signos de enfermedad en imágenes médicas. Esta guía te mostrará como hacer fine-tune al [ViT](https://huggingface.co/docs/transformers/v4.16.2/en/model_doc/vit) en el dataset [Food-101](https://huggingface.co/datasets/food101) para clasificar un alimento en una imagen. <Tip> Consulta la [página de la tarea](https://huggingface.co/tasks/audio-classification) de clasificación de imágenes para obtener más información sobre sus modelos, datasets y métricas asociadas. </Tip> ## Carga el dataset Food-101 Carga solo las primeras 5000 imágenes del dataset Food-101 de la biblioteca 🤗 de Datasets ya que es bastante grande: ```py >>> from datasets import load_dataset >>> food = load_dataset("food101", split="train[:5000]") ``` Divide el dataset en un train y un test set: ```py >>> food = food.train_test_split(test_size=0.2) ``` A continuación, observa un ejemplo: ```py >>> food["train"][0] {'image': <PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=512x512 at 0x7F52AFC8AC50>, 'label': 79} ``` El campo `image` contiene una imagen PIL, y cada `label` es un número entero que representa una clase. Crea un diccionario que asigne un nombre de label a un entero y viceversa. El mapeo ayudará al modelo a recuperar el nombre de label a partir del número de la misma: ```py >>> labels = food["train"].features["label"].names >>> label2id, id2label = dict(), dict() >>> for i, label in enumerate(labels): ... label2id[label] = str(i) ... id2label[str(i)] = label ``` Ahora puedes convertir el número de label en un nombre de label para obtener más información: ```py >>> id2label[str(79)] 'prime_rib' ``` Cada clase de alimento - o label - corresponde a un número; `79` indica una costilla de primera en el ejemplo anterior. ## Preprocesa Carga el image processor de ViT para procesar la imagen en un tensor: ```py >>> from transformers import AutoImageProcessor >>> image_processor = AutoImageProcessor.from_pretrained("google/vit-base-patch16-224-in21k") ``` Aplica varias transformaciones de imagen al dataset para hacer el modelo más robusto contra el overfitting. En este caso se utilizará el módulo [`transforms`](https://pytorch.org/vision/stable/transforms.html) de torchvision. Recorta una parte aleatoria de la imagen, cambia su tamaño y normalízala con la media y la desviación estándar de la imagen: ```py >>> from torchvision.transforms import RandomResizedCrop, Compose, Normalize, ToTensor >>> normalize = Normalize(mean=image_processor.image_mean, std=image_processor.image_std) >>> _transforms = Compose([RandomResizedCrop(image_processor.size["height"]), ToTensor(), normalize]) ``` Crea una función de preprocesamiento que aplique las transformaciones y devuelva los `pixel_values` - los inputs al modelo - de la imagen: ```py >>> def transforms(examples): ... examples["pixel_values"] = [_transforms(img.convert("RGB")) for img in examples["image"]] ... del examples["image"] ... return examples ``` Utiliza el método [`with_transform`](https://huggingface.co/docs/datasets/package_reference/main_classes?#datasets.Dataset.with_transform) de 🤗 Dataset para aplicar las transformaciones sobre todo el dataset. Las transformaciones se aplican sobre la marcha cuando se carga un elemento del dataset: ```py >>> food = food.with_transform(transforms) ``` Utiliza [`DefaultDataCollator`] para crear un batch de ejemplos. A diferencia de otros data collators en 🤗 Transformers, el DefaultDataCollator no aplica un preprocesamiento adicional como el padding. ```py >>> from transformers import DefaultDataCollator >>> data_collator = DefaultDataCollator() ``` ## Entrena Carga ViT con [`AutoModelForImageClassification`]. Especifica el número de labels, y pasa al modelo el mapping entre el número de label y la clase de label: ```py >>> from transformers import AutoModelForImageClassification, TrainingArguments, Trainer >>> model = AutoModelForImageClassification.from_pretrained( ... "google/vit-base-patch16-224-in21k", ... num_labels=len(labels), ... id2label=id2label, ... label2id=label2id, ... ) ``` <Tip> Si no estás familiarizado con el fine-tuning de un modelo con el [`Trainer`], echa un vistazo al tutorial básico [aquí](../training#finetune-with-trainer)! </Tip> Al llegar a este punto, solo quedan tres pasos: 1. Define tus hiperparámetros de entrenamiento en [`TrainingArguments`]. Es importante que no elimines las columnas que no se utilicen, ya que esto hará que desaparezca la columna `image`. Sin la columna `image` no puedes crear `pixel_values`. Establece `remove_unused_columns=False` para evitar este comportamiento. 2. Pasa los training arguments al [`Trainer`] junto con el modelo, los datasets, tokenizer y data collator. 3. Llama [`~Trainer.train`] para hacer fine-tune de tu modelo. ```py >>> training_args = TrainingArguments( ... output_dir="./results", ... per_device_train_batch_size=16, ... eval_strategy="steps", ... num_train_epochs=4, ... fp16=True, ... save_steps=100, ... eval_steps=100, ... logging_steps=10, ... learning_rate=2e-4, ... save_total_limit=2, ... remove_unused_columns=False, ... ) >>> trainer = Trainer( ... model=model, ... args=training_args, ... data_collator=data_collator, ... train_dataset=food["train"], ... eval_dataset=food["test"], ... tokenizer=image_processor, ... ) >>> trainer.train() ``` <Tip> Para ver un ejemplo más a profundidad de cómo hacer fine-tune a un modelo para clasificación de imágenes, echa un vistazo al correspondiente [PyTorch notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/image_classification.ipynb). </Tip>
transformers/docs/source/es/tasks/image_classification.md/0
{ "file_path": "transformers/docs/source/es/tasks/image_classification.md", "repo_id": "transformers", "token_count": 2441 }
263
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Visite rapide [[open-in-colab]] Soyez opérationnel avec 🤗 Transformers ! Que vous soyez un développeur ou un utilisateur lambda, cette visite rapide vous aidera à démarrer et vous montrera comment utiliser le [`pipeline`] pour l'inférence, charger un modèle pré-entraîné et un préprocesseur avec une [AutoClass](./model_doc/auto), et entraîner rapidement un modèle avec PyTorch ou TensorFlow. Si vous êtes un débutant, nous vous recommandons de consulter nos tutoriels ou notre [cours](https://huggingface.co/course/chapter1/1) suivant pour des explications plus approfondies des concepts présentés ici. Avant de commencer, assurez-vous que vous avez installé toutes les bibliothèques nécessaires : ```bash !pip install transformers datasets evaluate accelerate ``` Vous aurez aussi besoin d'installer votre bibliothèque d'apprentissage profond favorite : <frameworkcontent> <pt> ```bash pip install torch ``` </pt> <tf> ```bash pip install tensorflow ``` </tf> </frameworkcontent> ## Pipeline <Youtube id="tiZFewofSLM"/> Le [`pipeline`] est le moyen le plus simple d'utiliser un modèle pré-entraîné pour l'inférence. Vous pouvez utiliser le [`pipeline`] prêt à l'emploi pour de nombreuses tâches dans différentes modalités. Consultez le tableau ci-dessous pour connaître les tâches prises en charge : | **Tâche** | **Description** | **Modalité** | **Identifiant du pipeline** | |------------------------------|--------------------------------------------------------------------------------------------------------------|----------------------|-----------------------------------------------| | Classification de texte | Attribue une catégorie à une séquence de texte donnée | Texte | pipeline(task="sentiment-analysis") | | Génération de texte | Génère du texte à partir d'une consigne donnée | Texte | pipeline(task="text-generation") | | Reconnaissance de token nommé | Attribue une catégorie à chaque token dans une séquence (personnes, organisation, localisation, etc.) | Texte | pipeline(task="ner") | | Question réponse | Extrait une réponse du texte en fonction du contexte et d'une question | Texte | pipeline(task="question-answering") | | Prédiction de token masqué | Prédit correctement le token masqué dans une séquence | Texte | pipeline(task="fill-mask") | | Génération de résumé | Génère un résumé d'une séquence de texte donnée ou d'un document | Texte | pipeline(task="summarization") | | Traduction | Traduit du texte d'un langage à un autre | Texte | pipeline(task="translation") | | Classification d'image | Attribue une catégorie à une image | Image | pipeline(task="image-classification") | | Segmentation d'image | Attribue une catégorie à chaque pixel d'une image (supporte la segmentation sémantique, panoptique et d'instance) | Image | pipeline(task="image-segmentation") | | Détection d'objets | Prédit les délimitations et catégories d'objets dans une image | Image | pipeline(task="object-detection") | | Classification d'audio | Attribue une catégorie à un fichier audio | Audio | pipeline(task="audio-classification") | | Reconnaissance automatique de la parole | Extrait le discours d'un fichier audio en texte | Audio | pipeline(task="automatic-speech-recognition") | | Question réponse visuels | Etant données une image et une question, répond correctement à une question sur l'image | Modalités multiples | pipeline(task="vqa") | Commencez par créer une instance de [`pipeline`] et spécifiez la tâche pour laquelle vous souhaitez l'utiliser. Vous pouvez utiliser le [`pipeline`] pour n'importe laquelle des tâches mentionnées dans le tableau précédent. Pour obtenir une liste complète des tâches prises en charge, consultez la documentation de l'[API pipeline](./main_classes/pipelines). Dans ce guide, nous utiliserons le [`pipeline`] pour l'analyse des sentiments à titre d'exemple : ```py >>> from transformers import pipeline >>> classifier = pipeline("sentiment-analysis") ``` Le [`pipeline`] télécharge et stocke en cache un [modèle pré-entraîné](https://huggingface.co/distilbert/distilbert-base-uncased-finetuned-sst-2-english) et un tokenizer par défaut pour l'analyse des sentiments. Vous pouvez maintenant utiliser le `classifier` sur le texte de votre choix : ```py >>> classifier("We are very happy to show you the 🤗 Transformers library.") [{'label': 'POSITIVE', 'score': 0.9998}] ``` Si vous voulez classifier plus qu'un texte, donnez une liste de textes au [`pipeline`] pour obtenir une liste de dictionnaires en retour : ```py >>> results = classifier(["We are very happy to show you the 🤗 Transformers library.", "We hope you don't hate it."]) >>> for result in results: ... print(f"label: {result['label']}, avec le score de: {round(result['score'], 4)}") label: POSITIVE, avec le score de: 0.9998 label: NEGATIVE, avec le score de: 0.5309 ``` Le [`pipeline`] peut aussi itérer sur un jeu de données entier pour n'importe quelle tâche. Prenons par exemple la reconnaissance automatique de la parole : ```py >>> import torch >>> from transformers import pipeline >>> speech_recognizer = pipeline("automatic-speech-recognition", model="facebook/wav2vec2-base-960h") ``` Chargez un jeu de données audio (voir le 🤗 Datasets [Quick Start](https://huggingface.co/docs/datasets/quickstart#audio) pour plus de détails) sur lequel vous souhaitez itérer. Pour cet exemple, nous chargeons le jeu de données [MInDS-14](https://huggingface.co/datasets/PolyAI/minds14) : ```py >>> from datasets import load_dataset, Audio >>> dataset = load_dataset("PolyAI/minds14", name="en-US", split="train") # doctest: +IGNORE_RESULT ``` Vous devez vous assurer que le taux d'échantillonnage de l'ensemble de données correspond au taux d'échantillonnage sur lequel [`facebook/wav2vec2-base-960h`](https://huggingface.co/facebook/wav2vec2-base-960h) a été entraîné : ```py >>> dataset = dataset.cast_column("audio", Audio(sampling_rate=speech_recognizer.feature_extractor.sampling_rate)) ``` Les fichiers audio sont automatiquement chargés et rééchantillonnés lors de l'appel de la colonne `"audio"`. Extrayez les tableaux de formes d'ondes brutes des quatre premiers échantillons et passez-les comme une liste au pipeline : ```py >>> result = speech_recognizer(dataset[:4]["audio"]) >>> print([d["text"] for d in result]) ['I WOULD LIKE TO SET UP A JOINT ACCOUNT WITH MY PARTNER HOW DO I PROCEED WITH DOING THAT', "FODING HOW I'D SET UP A JOIN TO HET WITH MY WIFE AND WHERE THE AP MIGHT BE", "I I'D LIKE TOY SET UP A JOINT ACCOUNT WITH MY PARTNER I'M NOT SEEING THE OPTION TO DO IT ON THE AP SO I CALLED IN TO GET SOME HELP CAN I JUST DO IT OVER THE PHONE WITH YOU AND GIVE YOU THE INFORMATION OR SHOULD I DO IT IN THE AP AND I'M MISSING SOMETHING UQUETTE HAD PREFERRED TO JUST DO IT OVER THE PHONE OF POSSIBLE THINGS", 'HOW DO I THURN A JOIN A COUNT'] ``` Pour les ensembles de données plus importants où les entrées sont volumineuses (comme dans les domaines de la parole ou de la vision), utilisez plutôt un générateur au lieu d'une liste pour charger toutes les entrées en mémoire. Pour plus d'informations, consultez la documentation de l'[API pipeline](./main_classes/pipelines). ### Utiliser une autre modèle et tokenizer dans le pipeline Le [`pipeline`] peut être utilisé avec n'importe quel modèle du [Hub](https://huggingface.co/models), ce qui permet d'adapter facilement le [`pipeline`] à d'autres cas d'utilisation. Par exemple, si vous souhaitez un modèle capable de traiter du texte français, utilisez les filtres du Hub pour trouver un modèle approprié. Le premier résultat renvoie un [modèle BERT](https://huggingface.co/nlptown/bert-base-multilingual-uncased-sentiment) multilingue finetuné pour l'analyse des sentiments que vous pouvez utiliser pour le texte français : ```py >>> model_name = "nlptown/bert-base-multilingual-uncased-sentiment" ``` <frameworkcontent> <pt> Utilisez [`AutoModelForSequenceClassification`] et [`AutoTokenizer`] pour charger le modèle pré-entraîné et le tokenizer adapté (plus de détails sur une `AutoClass` dans la section suivante) : ```py >>> from transformers import AutoTokenizer, AutoModelForSequenceClassification >>> model = AutoModelForSequenceClassification.from_pretrained(model_name) >>> tokenizer = AutoTokenizer.from_pretrained(model_name) ``` </pt> <tf> Utilisez [`TFAutoModelForSequenceClassification`] et [`AutoTokenizer`] pour charger le modèle pré-entraîné et le tokenizer adapté (plus de détails sur une `TFAutoClass` dans la section suivante) : ```py >>> from transformers import AutoTokenizer, TFAutoModelForSequenceClassification >>> model = TFAutoModelForSequenceClassification.from_pretrained(model_name) >>> tokenizer = AutoTokenizer.from_pretrained(model_name) ``` </tf> </frameworkcontent> Spécifiez le modèle et le tokenizer dans le [`pipeline`], et utilisez le `classifier` sur le texte en français : ```py >>> classifier = pipeline("sentiment-analysis", model=model, tokenizer=tokenizer) >>> classifier("Nous sommes très heureux de vous présenter la bibliothèque 🤗 Transformers.") [{'label': '5 stars', 'score': 0.7273}] ``` Si vous ne parvenez pas à trouver un modèle adapté à votre cas d'utilisation, vous devrez finetuner un modèle pré-entraîné sur vos données. Jetez un coup d'œil à notre [tutoriel sur le finetuning](./training) pour apprendre comment faire. Enfin, après avoir finetuné votre modèle pré-entraîné, pensez à [partager](./model_sharing) le modèle avec la communauté sur le Hub afin de démocratiser l'apprentissage automatique pour tous ! 🤗 ## AutoClass <Youtube id="AhChOFRegn4"/> Les classes [`AutoModelForSequenceClassification`] et [`AutoTokenizer`] fonctionnent ensemble pour créer un [`pipeline`] comme celui que vous avez utilisé ci-dessus. Une [AutoClass](./model_doc/auto) est un raccourci qui récupère automatiquement l'architecture d'un modèle pré-entraîné à partir de son nom ou de son emplacement. Il vous suffit de sélectionner l'`AutoClass` appropriée à votre tâche et la classe de prétraitement qui lui est associée. Reprenons l'exemple de la section précédente et voyons comment vous pouvez utiliser l'`AutoClass` pour reproduire les résultats du [`pipeline`]. ### AutoTokenizer Un tokenizer est chargé de prétraiter le texte pour en faire un tableau de chiffres qui servira d'entrée à un modèle. De nombreuses règles régissent le processus de tokenisation, notamment la manière de diviser un mot et le niveau auquel les mots doivent être divisés (pour en savoir plus sur la tokenisation, consultez le [résumé](./tokenizer_summary)). La chose la plus importante à retenir est que vous devez instancier un tokenizer avec le même nom de modèle pour vous assurer que vous utilisez les mêmes règles de tokenisation que celles avec lesquelles un modèle a été pré-entraîné. Chargez un tokenizer avec [`AutoTokenizer`] : ```py >>> from transformers import AutoTokenizer >>> model_name = "nlptown/bert-base-multilingual-uncased-sentiment" >>> tokenizer = AutoTokenizer.from_pretrained(model_name) ``` Passez votre texte au tokenizer : ```py >>> encoding = tokenizer("We are very happy to show you the 🤗 Transformers library.") >>> print(encoding) {'input_ids': [101, 11312, 10320, 12495, 19308, 10114, 11391, 10855, 10103, 100, 58263, 13299, 119, 102], 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]} ``` Le tokenizer retourne un dictionnaire contenant : * [input_ids](./glossary#input-ids): la représentation numérique des tokens. * [attention_mask](.glossary#attention-mask): indique quels tokens doivent faire l'objet d'une attention particulière (plus particulièrement les tokens de remplissage). Un tokenizer peut également accepter une liste de textes, et remplir et tronquer le texte pour retourner un échantillon de longueur uniforme : <frameworkcontent> <pt> ```py >>> pt_batch = tokenizer( ... ["We are very happy to show you the 🤗 Transformers library.", "We hope you don't hate it."], ... padding=True, ... truncation=True, ... max_length=512, ... return_tensors="pt", ... ) ``` </pt> <tf> ```py >>> tf_batch = tokenizer( ... ["We are very happy to show you the 🤗 Transformers library.", "We hope you don't hate it."], ... padding=True, ... truncation=True, ... max_length=512, ... return_tensors="tf", ... ) ``` </tf> </frameworkcontent> <Tip> Consultez le tutoriel [prétraitement](./preprocessing) pour plus de détails sur la tokenisation, et sur la manière d'utiliser un [`AutoImageProcessor`], un [`AutoFeatureExtractor`] et un [`AutoProcessor`] pour prétraiter les images, l'audio et les contenus multimodaux. </Tip> ### AutoModel <frameworkcontent> <pt> 🤗 Transformers fournit un moyen simple et unifié de charger des instances pré-entraînées. Cela signifie que vous pouvez charger un [`AutoModel`] comme vous chargeriez un [`AutoTokenizer`]. La seule différence est de sélectionner l'[`AutoModel`] approprié pour la tâche. Pour une classification de texte (ou de séquence de textes), vous devez charger [`AutoModelForSequenceClassification`] : ```py >>> from transformers import AutoModelForSequenceClassification >>> model_name = "nlptown/bert-base-multilingual-uncased-sentiment" >>> pt_model = AutoModelForSequenceClassification.from_pretrained(model_name) ``` <Tip> Voir le [résumé de la tâche](./task_summary) pour vérifier si elle est prise en charge par une classe [`AutoModel`]. </Tip> Maintenant, passez votre échantillon d'entrées prétraitées directement au modèle. Il vous suffit de décompresser le dictionnaire en ajoutant `**` : ```py >>> pt_outputs = pt_model(**pt_batch) ``` Le modèle produit les activations finales dans l'attribut `logits`. Appliquez la fonction softmax aux `logits` pour récupérer les probabilités : ```py >>> from torch import nn >>> pt_predictions = nn.functional.softmax(pt_outputs.logits, dim=-1) >>> print(pt_predictions) tensor([[0.0021, 0.0018, 0.0115, 0.2121, 0.7725], [0.2084, 0.1826, 0.1969, 0.1755, 0.2365]], grad_fn=<SoftmaxBackward0>) ``` </pt> <tf> 🤗 Transformers fournit un moyen simple et unifié de charger des instances pré-entraînés. Cela signifie que vous pouvez charger un [`TFAutoModel`] comme vous chargeriez un [`AutoTokenizer`]. La seule différence est de sélectionner le [`TFAutoModel`] approprié pour la tâche. Pour une classification de texte (ou de séquence de textes), vous devez charger [`TFAutoModelForSequenceClassification`] : ```py >>> from transformers import TFAutoModelForSequenceClassification >>> model_name = "nlptown/bert-base-multilingual-uncased-sentiment" >>> tf_model = TFAutoModelForSequenceClassification.from_pretrained(model_name) ``` <Tip> Voir le [résumé de la tâche](./task_summary) pour vérifier si elle est prise en charge par une classe [`AutoModel`]. </Tip> Passez maintenant votre échantillon d'entrées prétraitées directement au modèle en passant les clés du dictionnaire directement aux tensors : ```py >>> tf_outputs = tf_model(tf_batch) ``` Le modèle produit les activations finales dans l'attribut `logits`. Appliquez la fonction softmax aux `logits` pour récupérer les probabilités : ```py >>> import tensorflow as tf >>> tf_predictions = tf.nn.softmax(tf_outputs.logits, axis=-1) >>> tf_predictions # doctest: +IGNORE_RESULT ``` </tf> </frameworkcontent> <Tip> Tous les modèles 🤗 Transformers (PyTorch ou TensorFlow) produisent les tensors *avant* la fonction d'activation finale (comme softmax) car la fonction d'activation finale est souvent fusionnée avec le calcul de la perte. Les structures produites par le modèle sont des classes de données spéciales, de sorte que leurs attributs sont autocomplétés dans un environnement de développement. Les structures produites par le modèle se comportent comme un tuple ou un dictionnaire (vous pouvez les indexer avec un entier, une tranche ou une chaîne), auquel cas les attributs qui sont None sont ignorés. </Tip> ### Sauvegarder un modèle <frameworkcontent> <pt> Une fois que votre modèle est finetuné, vous pouvez le sauvegarder avec son tokenizer en utilisant [`PreTrainedModel.save_pretrained`] : ```py >>> pt_save_directory = "./pt_save_pretrained" >>> tokenizer.save_pretrained(pt_save_directory) # doctest: +IGNORE_RESULT >>> pt_model.save_pretrained(pt_save_directory) ``` Lorsque vous voulez réutiliser le modèle, rechargez-le avec [`PreTrainedModel.from_pretrained`] : ```py >>> pt_model = AutoModelForSequenceClassification.from_pretrained("./pt_save_pretrained") ``` </pt> <tf> Une fois que votre modèle est finetuné, vous pouvez le sauvegarder avec son tokenizer en utilisant [`TFPreTrainedModel.save_pretrained`] : ```py >>> tf_save_directory = "./tf_save_pretrained" >>> tokenizer.save_pretrained(tf_save_directory) # doctest: +IGNORE_RESULT >>> tf_model.save_pretrained(tf_save_directory) ``` Lorsque vous voulez réutiliser le modèle, rechargez-le avec [`TFPreTrainedModel.from_pretrained`] : ```py >>> tf_model = TFAutoModelForSequenceClassification.from_pretrained("./tf_save_pretrained") ``` </tf> </frameworkcontent> Une fonctionnalité particulièrement cool 🤗 Transformers est la possibilité d'enregistrer un modèle et de le recharger en tant que modèle PyTorch ou TensorFlow. Le paramètre `from_pt` ou `from_tf` permet de convertir le modèle d'un framework à l'autre : <frameworkcontent> <pt> ```py >>> from transformers import AutoModel >>> tokenizer = AutoTokenizer.from_pretrained(tf_save_directory) >>> pt_model = AutoModelForSequenceClassification.from_pretrained(tf_save_directory, from_tf=True) ``` </pt> <tf> ```py >>> from transformers import TFAutoModel >>> tokenizer = AutoTokenizer.from_pretrained(pt_save_directory) >>> tf_model = TFAutoModelForSequenceClassification.from_pretrained(pt_save_directory, from_pt=True) ``` </tf> </frameworkcontent> ## Constructions de modèles personnalisés Vous pouvez modifier la configuration du modèle pour changer la façon dont un modèle est construit. La configuration spécifie les attributs d'un modèle, tels que le nombre de couches ou de têtes d'attention. Vous partez de zéro lorsque vous initialisez un modèle à partir d'une configuration personnalisée. Les attributs du modèle sont initialisés de manière aléatoire et vous devrez entraîner le modèle avant de pouvoir l'utiliser pour obtenir des résultats significatifs. Commencez par importer [`AutoConfig`], puis chargez le modèle pré-entraîné que vous voulez modifier. Dans [`AutoConfig.from_pretrained`], vous pouvez spécifier l'attribut que vous souhaitez modifier, tel que le nombre de têtes d'attention : ```py >>> from transformers import AutoConfig >>> my_config = AutoConfig.from_pretrained("distilbert/distilbert-base-uncased", n_heads=12) ``` <frameworkcontent> <pt> Créez un modèle personnalisé à partir de votre configuration avec [`AutoModel.from_config`] : ```py >>> from transformers import AutoModel >>> my_model = AutoModel.from_config(my_config) ``` </pt> <tf> Créez un modèle personnalisé à partir de votre configuration avec [`TFAutoModel.from_config`] : ```py >>> from transformers import TFAutoModel >>> my_model = TFAutoModel.from_config(my_config) ``` </tf> </frameworkcontent> Consultez le guide [Créer une architecture personnalisée](./create_a_model) pour plus d'informations sur la création de configurations personnalisées. ## Trainer - une boucle d'entraînement optimisée par PyTorch Tous les modèles sont des [`torch.nn.Module`](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) standard, vous pouvez donc les utiliser dans n'importe quelle boucle d'entraînement typique. Bien que vous puissiez écrire votre propre boucle d'entraînement, 🤗 Transformers fournit une classe [`Trainer`] pour PyTorch, qui contient la boucle d'entraînement de base et ajoute des fonctionnalités supplémentaires comme l'entraînement distribué, la précision mixte, et plus encore. En fonction de votre tâche, vous passerez généralement les paramètres suivants à [`Trainer`] : 1. Un [`PreTrainedModel`] ou un [`torch.nn.Module`](https://pytorch.org/docs/stable/nn.html#torch.nn.Module): ```py >>> from transformers import AutoModelForSequenceClassification >>> model = AutoModelForSequenceClassification.from_pretrained("distilbert/distilbert-base-uncased") ``` 2. [`TrainingArguments`] contient les hyperparamètres du modèle que vous pouvez changer comme le taux d'apprentissage, la taille de l'échantillon, et le nombre d'époques pour s'entraîner. Les valeurs par défaut sont utilisées si vous ne spécifiez pas d'hyperparamètres d'apprentissage : ```py >>> from transformers import TrainingArguments >>> training_args = TrainingArguments( ... output_dir="path/to/save/folder/", ... learning_rate=2e-5, ... per_device_train_batch_size=8, ... per_device_eval_batch_size=8, ... num_train_epochs=2, ... ) ``` 3. Une classe de prétraitement comme un tokenizer, un processeur d'images ou un extracteur de caractéristiques : ```py >>> from transformers import AutoTokenizer >>> tokenizer = AutoTokenizer.from_pretrained("distilbert/distilbert-base-uncased") ``` 4. Chargez un jeu de données : ```py >>> from datasets import load_dataset >>> dataset = load_dataset("rotten_tomatoes") # doctest: +IGNORE_RESULT ``` 5. Créez une fonction qui transforme le texte du jeu de données en token : ```py >>> def tokenize_dataset(dataset): ... return tokenizer(dataset["text"]) ``` Puis appliquez-la à l'intégralité du jeu de données avec [`~datasets.Dataset.map`]: ```py >>> dataset = dataset.map(tokenize_dataset, batched=True) ``` 6. Un [`DataCollatorWithPadding`] pour créer un échantillon d'exemples à partir de votre jeu de données : ```py >>> from transformers import DataCollatorWithPadding >>> data_collator = DataCollatorWithPadding(tokenizer=tokenizer) ``` Maintenant, rassemblez tous ces éléments dans un [`Trainer`] : ```py >>> from transformers import Trainer >>> trainer = Trainer( ... model=model, ... args=training_args, ... train_dataset=dataset["train"], ... eval_dataset=dataset["test"], ... tokenizer=tokenizer, ... data_collator=data_collator, ... ) # doctest: +SKIP ``` Une fois que vous êtes prêt, appelez la fonction [`~Trainer.train`] pour commencer l'entraînement : ```py >>> trainer.train() # doctest: +SKIP ``` <Tip> Pour les tâches - comme la traduction ou la génération de résumé - qui utilisent un modèle séquence à séquence, utilisez plutôt les classes [`Seq2SeqTrainer`] et [`Seq2SeqTrainingArguments`]. </Tip> Vous pouvez personnaliser le comportement de la boucle d'apprentissage en redéfinissant les méthodes à l'intérieur de [`Trainer`]. Cela vous permet de personnaliser des caractéristiques telles que la fonction de perte, l'optimiseur et le planificateur. Consultez la documentation de [`Trainer`] pour savoir quelles méthodes peuvent être redéfinies. L'autre moyen de personnaliser la boucle d'apprentissage est d'utiliser les [Callbacks](./main_classes/callback). Vous pouvez utiliser les callbacks pour intégrer d'autres bibliothèques et inspecter la boucle d'apprentissage afin de suivre la progression ou d'arrêter l'apprentissage plus tôt. Les callbacks ne modifient rien dans la boucle d'apprentissage elle-même. Pour personnaliser quelque chose comme la fonction de perte, vous devez redéfinir le [`Trainer`] à la place. ## Entraînement avec TensorFlow Tous les modèles sont des modèles standard [`tf.keras.Model`](https://www.tensorflow.org/api_docs/python/tf/keras/Model) afin qu'ils puissent être entraînés avec TensorFlow avec l'API [Keras](https://keras.io/). 🤗 Transformers fournit la fonction [`~TFPreTrainedModel.prepare_tf_dataset`] pour charger facilement votre jeu de données comme un `tf.data.Dataset` afin que vous puissiez commencer l'entraînement immédiatement avec les fonctions [`compile`](https://keras.io/api/models/model_training_apis/#compile-method) et [`fit`](https://keras.io/api/models/model_training_apis/#fit-method) de Keras. 1. Vous commencez avec un modèle [`TFPreTrainedModel`] ou [`tf.keras.Model`](https://www.tensorflow.org/api_docs/python/tf/keras/Model) : ```py >>> from transformers import TFAutoModelForSequenceClassification >>> model = TFAutoModelForSequenceClassification.from_pretrained("distilbert/distilbert-base-uncased") ``` 2. Une classe de prétraitement comme un tokenizer, un processeur d'images ou un extracteur de caractéristiques : ```py >>> from transformers import AutoTokenizer >>> tokenizer = AutoTokenizer.from_pretrained("distilbert/distilbert-base-uncased") ``` 3. Créez une fonction qui transforme le texte du jeu de données en token : ```py >>> def tokenize_dataset(dataset): ... return tokenizer(dataset["text"]) # doctest: +SKIP ``` 4. Appliquez le tokenizer à l'ensemble du jeu de données avec [`~datasets.Dataset.map`] et passez ensuite le jeu de données et le tokenizer à [`~TFPreTrainedModel.prepare_tf_dataset`]. Vous pouvez également modifier la taille de l'échantillon et mélanger le jeu de données ici si vous le souhaitez : ```py >>> dataset = dataset.map(tokenize_dataset) # doctest: +SKIP >>> tf_dataset = model.prepare_tf_dataset( ... dataset, batch_size=16, shuffle=True, tokenizer=tokenizer ... ) # doctest: +SKIP ``` 5. Une fois que vous êtes prêt, appelez les fonctions `compile` et `fit` pour commencer l'entraînement : ```py >>> from tensorflow.keras.optimizers import Adam >>> model.compile(optimizer=Adam(3e-5)) >>> model.fit(dataset) # doctest: +SKIP ``` ## Et après ? Maintenant que vous avez terminé la visite rapide de 🤗 Transformers, consultez nos guides et apprenez à faire des choses plus spécifiques comme créer un modèle personnalisé, finetuner un modèle pour une tâche, et comment entraîner un modèle avec un script. Si vous souhaitez en savoir plus sur les concepts fondamentaux de 🤗 Transformers, jetez un œil à nos guides conceptuels !
transformers/docs/source/fr/quicktour.md/0
{ "file_path": "transformers/docs/source/fr/quicktour.md", "repo_id": "transformers", "token_count": 10740 }
264
<!--Copyright 2021 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Debugging ## Debug dei problemi di rete multi-GPU Quando addestri o fai inferenza con `DistributedDataParallel` e GPU multiple, se si verificano problemi di intercomunicazione tra processi e/o nodi, puoi utilizzare il seguente script per diagnosticare i problemi della rete. ```bash wget https://raw.githubusercontent.com/huggingface/transformers/main/scripts/distributed/torch-distributed-gpu-test.py ``` Per esempio per testare come 2 GPU interagiscono fai: ```bash python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py ``` Se entrambi i processi sono in grado di comunicare tra loro e di allocare la memoria della GPU, ciascuno di essi stamperà lo stato OK. Per più GPU o nodi adatta gli argumenti nello script. All'interno dello script di diagnostica troverai molti altri dettagli e anche una guida per eseguirlo in ambiente SLURM. Un livello di debug superiore è aggiungere la variabile d'ambiente `NCCL_DEBUG=INFO` come di seguito: ```bash NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py ``` In questo modo si scaricano molte informazioni di debug relative a NCCL, che puoi cercare online in caso di problemi. Oppure, se non hai la sicurezza di come interpretare l'output, puoi condividere il file di log in una Issue. ## Rilevamento di Underflow e Overflow <Tip> Questa funzionalità al momento è disponibile solo per PyTorch. </Tip> <Tip> Per addestramento multi-GPU richiede DDP (`torch.distributed.launch`). </Tip> <Tip> Questa funzionalità può essere usata con modelli basati su `nn.Module`. </Tip> Se inizi a ottenere `loss=NaN` o il modello presenta qualche altro comportamento anomalo a causa di valori `inf` o `nan` in attivazioni o nei pesi, è necessario scoprire dove si verifica il primo underflow o overflow e cosa lo ha determinato. Fortunatamente è possibile farlo facilmente attivando un modulo speciale che effettuerà il rilevamento automaticamente. Se stai usando [`Trainer`], hai bisogno di aggiungere solo: ```bash --debug underflow_overflow ``` ai normali argomenti della riga di comando, o passa `debug="underflow_overflow"` quando viene creato l'oggetto [`TrainingArguments`]. Se stai usando il tuo ciclo di allenamento o un altro trainer, puoi ottenere lo stesso risultato con: ```python from .debug_utils import DebugUnderflowOverflow debug_overflow = DebugUnderflowOverflow(model) ``` [`~debug_utils.DebugUnderflowOverflow`] inserisce dei ganci nel modello che dopo ogni chiamata testeranno le variabili di ingresso e di uscita e anche i pesi del modulo corrispondente. Non appena viene rilevato `inf` o o `nan` in almeno un elemento delle attivazioni o dei pesi, il programma lo notifica e stampa un rapporto come il seguente (questo è stato rilevato con `google/mt5-small` sotto fp16 mixed precision): ``` Detected inf/nan during batch_number=0 Last 21 forward frames: abs min abs max metadata encoder.block.1.layer.1.DenseReluDense.dropout Dropout 0.00e+00 2.57e+02 input[0] 0.00e+00 2.85e+02 output [...] encoder.block.2.layer.0 T5LayerSelfAttention 6.78e-04 3.15e+03 input[0] 2.65e-04 3.42e+03 output[0] None output[1] 2.25e-01 1.00e+04 output[2] encoder.block.2.layer.1.layer_norm T5LayerNorm 8.69e-02 4.18e-01 weight 2.65e-04 3.42e+03 input[0] 1.79e-06 4.65e+00 output encoder.block.2.layer.1.DenseReluDense.wi_0 Linear 2.17e-07 4.50e+00 weight 1.79e-06 4.65e+00 input[0] 2.68e-06 3.70e+01 output encoder.block.2.layer.1.DenseReluDense.wi_1 Linear 8.08e-07 2.66e+01 weight 1.79e-06 4.65e+00 input[0] 1.27e-04 2.37e+02 output encoder.block.2.layer.1.DenseReluDense.dropout Dropout 0.00e+00 8.76e+03 input[0] 0.00e+00 9.74e+03 output encoder.block.2.layer.1.DenseReluDense.wo Linear 1.01e-06 6.44e+00 weight 0.00e+00 9.74e+03 input[0] 3.18e-04 6.27e+04 output encoder.block.2.layer.1.DenseReluDense T5DenseGatedGeluDense 1.79e-06 4.65e+00 input[0] 3.18e-04 6.27e+04 output encoder.block.2.layer.1.dropout Dropout 3.18e-04 6.27e+04 input[0] 0.00e+00 inf output ``` L'output di esempio è stato tagliato al centro per brevità. La seconda colonna mostra il valore dell'elemento più grande in assoluto,così se osserviamo da vicino gli ultimi istanti, input e output sono nel range di `1e4`. Questo addestramento è stato eseguito con una mixed precision fp16 e l'ultimo passo usciva fuori (sotto `fp16` il valore più grande prima di `inf` è `64e3`). Per evitare overflows sotto `fp16` le attivazionioni devono rimanere molto al di sotto di `1e4`, perché `1e4 * 1e4 = 1e8` quindi qualsiasi moltiplicazione di matrice con grandi attivazioni porterà a una condizione di overflow numerico. All'inizio della traccia è possibile scoprire a quale lotto si è verificato il problema (questo `Detected inf/nan during batch_number=0` significa che il problema si è verificato nel primo lotto). Ogni frame segnalato inizia dichiarando la voce completamente qualificata per il modulo corrispondente per il quale il frame è stato segnalato. Se osserviamo il seguente frame: ``` encoder.block.2.layer.1.layer_norm T5LayerNorm 8.69e-02 4.18e-01 weight 2.65e-04 3.42e+03 input[0] 1.79e-06 4.65e+00 output ``` Questo, `encoder.block.2.layer.1.layer_norm` indica che si tratta di un layer norm nel primo layer, del secondo blocco dell'encoder. E le chiamata specifica di `forward` è `T5LayerNorm`. Osserviamo gli ultimi frame del report: ``` Detected inf/nan during batch_number=0 Last 21 forward frames: abs min abs max metadata [...] encoder.block.2.layer.1.DenseReluDense.wi_0 Linear 2.17e-07 4.50e+00 weight 1.79e-06 4.65e+00 input[0] 2.68e-06 3.70e+01 output encoder.block.2.layer.1.DenseReluDense.wi_1 Linear 8.08e-07 2.66e+01 weight 1.79e-06 4.65e+00 input[0] 1.27e-04 2.37e+02 output encoder.block.2.layer.1.DenseReluDense.wo Linear 1.01e-06 6.44e+00 weight 0.00e+00 9.74e+03 input[0] 3.18e-04 6.27e+04 output encoder.block.2.layer.1.DenseReluDense T5DenseGatedGeluDense 1.79e-06 4.65e+00 input[0] 3.18e-04 6.27e+04 output encoder.block.2.layer.1.dropout Dropout 3.18e-04 6.27e+04 input[0] 0.00e+00 inf output ``` L'ultimo frame report per la funzione `Dropout.forward` con la prima voce per l'unico input e la seconda per l'unico output. Si può notare che è stato richiamato da un attibuto `dropout` dentro la classe `DenseReluDense`. Si può notare che ciò è avvenuto durante il primo strato, del 2° blocco, durante il primissimo lotto. Infine, gli elementi di input più grandi in assoluto sono stati `6.27e+04` e l'equivalente per l'output era `inf`. Puoi vedere qui, che `T5DenseGatedGeluDense.forward` risulta in output activations, il cui valore massimo assoluto era circa 62,7K, che è molto vicino al limite massimo di 64K di fp16. Nel prossimo frame abbiamo `Dropout` che rinormalizza i pesi, dopo aver azzerato alcuni elementi, il che spinge il valore massimo assoluto a più di 64K e si verifica un overflow.(`inf`). Come puoi notare, è nei frames precedenti che occorre esaminare quando i numeri iniziano a diventare molto grandi per i valori fp16. Confrontiamo il report al codice `models/t5/modeling_t5.py`: ```python class T5DenseGatedGeluDense(nn.Module): def __init__(self, config): super().__init__() self.wi_0 = nn.Linear(config.d_model, config.d_ff, bias=False) self.wi_1 = nn.Linear(config.d_model, config.d_ff, bias=False) self.wo = nn.Linear(config.d_ff, config.d_model, bias=False) self.dropout = nn.Dropout(config.dropout_rate) self.gelu_act = ACT2FN["gelu_new"] def forward(self, hidden_states): hidden_gelu = self.gelu_act(self.wi_0(hidden_states)) hidden_linear = self.wi_1(hidden_states) hidden_states = hidden_gelu * hidden_linear hidden_states = self.dropout(hidden_states) hidden_states = self.wo(hidden_states) return hidden_states ``` Ora è facile vedere la chiamata `dropout`, e tutte le chiamate precedenti. Poiché il rilevamento avviene in un avanzamento (forward hook in eng.), i rapporti vengono creati immeditamente dopo ogni rientro da `forward` (forward returns in eng.). Tornando al rapporto completo, per agire e risolvere il problema, dobbiamo andare qualche frame più in alto, dove i numeri hanno iniziato a salire, e probabilmente passare alla modalità `fp32`, in modo che i numeri non trabocchino quando vengono moltiplicati o sommati. Naturalmente, potrebbero esserci altre soluzioni. Per esempio, potremmo spegnere temporanemante `amp` se è abilitato, successivamente spostare `forward` in un helper wrapper, come: ```python def _forward(self, hidden_states): hidden_gelu = self.gelu_act(self.wi_0(hidden_states)) hidden_linear = self.wi_1(hidden_states) hidden_states = hidden_gelu * hidden_linear hidden_states = self.dropout(hidden_states) hidden_states = self.wo(hidden_states) return hidden_states import torch def forward(self, hidden_states): if torch.is_autocast_enabled(): with torch.cuda.amp.autocast(enabled=False): return self._forward(hidden_states) else: return self._forward(hidden_states) ``` Poiché il rilevatore automatico riporta solo gli ingressi e le uscite di fotogrammi completi, una volta che si sa dove cercare, si può analizzare anche le fasi intermedie di una specifica funzione `forward`. In alcuni casi puoi usare la funzione di supporto `detect_overflow` per indirizzare il rilevatore dove preferisci, ad esempio: ```python from debug_utils import detect_overflow class T5LayerFF(nn.Module): [...] def forward(self, hidden_states): forwarded_states = self.layer_norm(hidden_states) detect_overflow(forwarded_states, "after layer_norm") forwarded_states = self.DenseReluDense(forwarded_states) detect_overflow(forwarded_states, "after DenseReluDense") return hidden_states + self.dropout(forwarded_states) ``` Si può vedere che abbiamo aggiunto 2 di questi e ora teniamo traccia se `inf` o `nan` per `forwarded_states` è stato rilevato da qualche parte. In realtà, il rilevatore li riporta già, perché ciascuna delle chiamate nell'esempio precedente è un `nn.Module`, ma diciamo che se avessimo dei calcoli diretti locali, questo è il modo in cui lo faremmo. Inoltre, se si istanzia il debugger nel proprio codice, è possibile modificare il numero di fotogrammi stampati rispetto a predefinito, ad esempio.: ```python from .debug_utils import DebugUnderflowOverflow debug_overflow = DebugUnderflowOverflow(model, max_frames_to_save=100) ``` ### Tracciamento della mistura assoluta del lotto specifico e del valore massimo La stessa classe di debug può essere utilizzata per il tracciamento per-batch con la funzione di rilevamento di underflow/overflow disattivata. Supponiamo di voler osservare i valori minimi e massimi assoluti per tutti gli ingredienti di ogni chiamata `forward` di un dato lotto. lotto, e che lo si voglia fare solo per i lotti 1 e 3. Si istanzia questa classe come: ```python debug_overflow = DebugUnderflowOverflow(model, trace_batch_nums=[1, 3]) ``` Ora i batch completi 1 e 3 saranno tracciati utilizzando lo stesso formato del rilevatore di underflow/overflow. I batches sono 0-indexed. Questo è utile se si sa che il programma inizia a comportarsi male dopo un certo numero di batch, in modo da poter avanzare velocemente fino a quell'area. direttamente a quell'area. Ecco un esempio di output troncato per questa configurazione: ``` *** Starting batch number=1 *** abs min abs max metadata shared Embedding 1.01e-06 7.92e+02 weight 0.00e+00 2.47e+04 input[0] 5.36e-05 7.92e+02 output [...] decoder.dropout Dropout 1.60e-07 2.27e+01 input[0] 0.00e+00 2.52e+01 output decoder T5Stack not a tensor output lm_head Linear 1.01e-06 7.92e+02 weight 0.00e+00 1.11e+00 input[0] 6.06e-02 8.39e+01 output T5ForConditionalGeneration not a tensor output *** Starting batch number=3 *** abs min abs max metadata shared Embedding 1.01e-06 7.92e+02 weight 0.00e+00 2.78e+04 input[0] 5.36e-05 7.92e+02 output [...] ``` Qui verrà scaricato un numero enorme di fotogrammi, tanti quanti sono le chiamate in avanti nel modello, quindi può essere o non essere quello che volete, ma a volte può essere più utile usarlo di un classico debugger. Per esempio, se il problema inizia a verificarsi a partire dal lotto numero 150. Quindi è possibile scaricare le tracce dei lotti 149 e 150 e confrontare i punti in cui i numeri hanno iniziato a divergere. È inoltre possibile specificare il numero di batch dopo il quale interrompere l'addestramento, con: ```python debug_overflow = DebugUnderflowOverflow(model, trace_batch_nums=[1, 3], abort_after_batch_num=3) ```
transformers/docs/source/it/debugging.md/0
{ "file_path": "transformers/docs/source/it/debugging.md", "repo_id": "transformers", "token_count": 5635 }
265
<!--- Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Controlli su una Pull Request Quando apri una pull request sui 🤗 Transformers, vengono eseguiti un discreto numero di controlli per assicurarsi che la patch che stai aggiungendo non stia rompendo qualcosa di esistente. Questi controlli sono di quattro tipi: - test regolari - costruzione della documentazione - stile del codice e della documentazione - coerenza generale del repository In questo documento, cercheremo di spiegare quali sono i vari controlli e le loro ragioni, oltre a spiegare come eseguire il debug locale se uno di essi fallisce sulla tua PR. Nota che tutti richiedono un'installazione dev: ```bash pip install transformers[dev] ``` o un'installazione modificabile: ```bash pip install -e .[dev] ``` all'interno del repo Transformers. ## Tests Tutti i job che iniziano con `ci/circleci: run_tests_` eseguono parti della suite di test dei Transformers. Ognuno di questi job si concentra su una parte della libreria in un determinato ambiente: per esempio `ci/circleci: run_tests_pipelines_tf` esegue il test delle pipeline in un ambiente in cui è installato solo TensorFlow. Nota che per evitare di eseguire i test quando non ci sono cambiamenti reali nei moduli che si stanno testando, ogni volta viene eseguita solo una parte della suite di test: viene eseguita una utility per determinare le differenze nella libreria tra prima e dopo la PR (ciò che GitHub mostra nella scheda "Files changes") e sceglie i test che sono stati impattati dalla diff. Questa utility può essere eseguita localmente con: ```bash python utils/tests_fetcher.py ``` dalla root del repo Transformers. Di seguito ciò che farà: 1. Controlla per ogni file nel diff se le modifiche sono nel codice o solo nei commenti o nelle docstrings. Vengono mantenuti solo i file con modifiche reali al codice. 2. Costruisce una mappa interna che fornisce per ogni file del codice sorgente della libreria tutti i file su cui ha un impatto ricorsivo. Si dice che il modulo A ha un impatto sul modulo B se il modulo B importa il modulo A. Per l'impatto ricorsivo, abbiamo bisogno di una catena di moduli che va dal modulo A al modulo B in cui ogni modulo importa il precedente. 3. Applica questa mappa ai file raccolti nel passaggio 1, si ottiene l'elenco dei file del modello interessati dalla PR. 4. Mappa ciascuno di questi file con i corrispondenti file di test e ottiene l'elenco dei test da eseguire. Quando esegui lo script in locale, dovresti ottenere la stampa dei risultati dei passi 1, 3 e 4 e quindi sapere quali test sono stati eseguiti. Lo script creerà anche un file chiamato `test_list.txt` che contiene l'elenco dei test da eseguire e che puoi eseguire localmente con il seguente comando: ```bash python -m pytest -n 8 --dist=loadfile -rA -s $(cat test_list.txt) ``` Nel caso in cui qualcosa sia sfuggito, l'intera suite di test viene eseguita quotidianamente. ## Build della documentazione Il job `ci/circleci: build_doc` esegue una build della documentazione per assicurarsi che tutto sia a posto una volta che la PR è stata unita. Se questo passaggio fallisce, puoi controllare localmente entrando nella cartella `docs` del repo Transformers e digitare ```bash make html ``` Sphinx non è noto per i suoi messaggi di errore chiari, quindi potrebbe essere necessario che provi alcune cose per trovare davvero la fonte dell'errore. ## Stile del codice e della documentazione La formattazione del codice viene applicata a tutti i file sorgenti, agli esempi e ai test usando `black` e `isort`. Abbiamo anche uno strumento personalizzato che si occupa della formattazione delle docstring e dei file `rst` (`utils/style_doc.py`), così come dell'ordine dei lazy imports eseguiti nei file `__init__.py` dei Transformers (`utils/custom_init_isort.py`). Tutto questo può essere lanciato eseguendo ```bash make style ``` I controlli della CI sono applicati all'interno del controllo `ci/circleci: check_code_quality`. Esegue anche `flake8`, che dà un'occhiata di base al codice e si lamenta se trova una variabile non definita o non utilizzata. Per eseguire questo controllo localmente, usare ```bash make quality ``` Questa operazione può richiedere molto tempo, quindi per eseguire la stessa operazione solo sui file modificati nel branch corrente, eseguire ```bash make fixup ``` Quest'ultimo comando eseguirà anche tutti i controlli aggiuntivi per la consistenza del repository. Diamogli un'occhiata. ## Coerenza del repository All'interno sono raggruppati tutti i test per assicurarsi che la tua PR lasci il repository in un buono stato ed è eseguito dal controllo `ci/circleci: check_repository_consistency`. Puoi eseguire localmente questo controllo eseguendo quanto segue: ```bash make repo-consistency ``` Questo verifica che: - Tutti gli oggetti aggiunti all'init sono documentati (eseguito da `utils/check_repo.py`) - Tutti i file `__init__.py` hanno lo stesso contenuto nelle loro due sezioni (eseguito da `utils/check_inits.py`) - Tutto il codice identificato come copia da un altro modulo è coerente con l'originale (eseguito da `utils/check_copies.py`) - Le traduzioni dei README e l'indice della documentazione hanno lo stesso elenco di modelli del README principale (eseguito da `utils/check_copies.py`) - Le tabelle autogenerate nella documentazione sono aggiornate (eseguito da `utils/check_table.py`) - La libreria ha tutti gli oggetti disponibili anche se non tutte le dipendenze opzionali sono installate (eseguito da `utils/check_dummies.py`) Se questo controllo fallisce, le prime due voci richiedono una correzione manuale, mentre le ultime quattro possono essere corrette automaticamente per te eseguendo il comando ```bash make fix-copies ``` Ulteriori controlli riguardano le PR che aggiungono nuovi modelli, principalmente che: - Tutti i modelli aggiunti sono in un Auto-mapping (eseguita da `utils/check_repo.py`) <!-- TODO Sylvain, add a check that makes sure the common tests are implemented.--> - Tutti i modelli sono testati correttamente (eseguito da `utils/check_repo.py`) <!-- TODO Sylvain, add the following - All models are added to the main README, inside the main doc - All checkpoints used actually exist on the Hub -->
transformers/docs/source/it/pr_checks.md/0
{ "file_path": "transformers/docs/source/it/pr_checks.md", "repo_id": "transformers", "token_count": 2370 }
266
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Create a custom architecture [`AutoClass`](model_doc/auto)は、モデルのアーキテクチャを自動的に推論し、事前学習済みの設定と重みをダウンロードします。一般的には、チェックポイントに依存しないコードを生成するために`AutoClass`を使用することをお勧めします。ただし、特定のモデルパラメータに対する制御をより詳細に行いたいユーザーは、いくつかの基本クラスからカスタム🤗 Transformersモデルを作成できます。これは、🤗 Transformersモデルを研究、トレーニング、または実験する興味があるユーザーに特に役立つかもしれません。このガイドでは、`AutoClass`を使用しないカスタムモデルの作成について詳しく説明します。次の方法を学びます: - モデルの設定をロードおよびカスタマイズする。 - モデルアーキテクチャを作成する。 - テキスト用の遅いトークナイザと高速トークナイザを作成する。 - ビジョンタスク用の画像プロセッサを作成する。 - オーディオタスク用の特徴抽出器を作成する。 - マルチモーダルタスク用のプロセッサを作成する。 ## Configuration [設定](main_classes/configuration)は、モデルの特定の属性を指します。各モデルの設定には異なる属性があります。たとえば、すべてのNLPモデルには、`hidden_size`、`num_attention_heads`、`num_hidden_layers`、および`vocab_size`属性が共通してあります。これらの属性は、モデルを構築するための注意ヘッドの数や隠れ層の数を指定します。 [DistilBERT](model_doc/distilbert)をより詳しく調べるために、[`DistilBertConfig`]にアクセスしてその属性を調べてみましょう: ```py >>> from transformers import DistilBertConfig >>> config = DistilBertConfig() >>> print(config) DistilBertConfig { "activation": "gelu", "attention_dropout": 0.1, "dim": 768, "dropout": 0.1, "hidden_dim": 3072, "initializer_range": 0.02, "max_position_embeddings": 512, "model_type": "distilbert", "n_heads": 12, "n_layers": 6, "pad_token_id": 0, "qa_dropout": 0.1, "seq_classif_dropout": 0.2, "sinusoidal_pos_embds": false, "transformers_version": "4.16.2", "vocab_size": 30522 } ``` [`DistilBertConfig`]は、基本の[`DistilBertModel`]を構築するために使用されるすべてのデフォルト属性を表示します。 すべての属性はカスタマイズ可能で、実験のためのスペースを提供します。例えば、デフォルトのモデルをカスタマイズして以下のようなことができます: - `activation`パラメータで異なる活性化関数を試す。 - `attention_dropout`パラメータで注意確率の高いドロップアウト率を使用する。 ```py >>> my_config = DistilBertConfig(activation="relu", attention_dropout=0.4) >>> print(my_config) DistilBertConfig { "activation": "relu", "attention_dropout": 0.4, "dim": 768, "dropout": 0.1, "hidden_dim": 3072, "initializer_range": 0.02, "max_position_embeddings": 512, "model_type": "distilbert", "n_heads": 12, "n_layers": 6, "pad_token_id": 0, "qa_dropout": 0.1, "seq_classif_dropout": 0.2, "sinusoidal_pos_embds": false, "transformers_version": "4.16.2", "vocab_size": 30522 } ``` 事前学習済みモデルの属性は、[`~PretrainedConfig.from_pretrained`] 関数で変更できます: ```py >>> my_config = DistilBertConfig.from_pretrained("distilbert/distilbert-base-uncased", activation="relu", attention_dropout=0.4) ``` Once you are satisfied with your model configuration, you can save it with [`PretrainedConfig.save_pretrained`]. Your configuration file is stored as a JSON file in the specified save directory. ```py >>> my_config.save_pretrained(save_directory="./your_model_save_path") ``` 設定ファイルを再利用するには、[`~PretrainedConfig.from_pretrained`]を使用してそれをロードします: ```py >>> my_config = DistilBertConfig.from_pretrained("./your_model_save_path/config.json") ``` <Tip> カスタム構成ファイルを辞書として保存することも、カスタム構成属性とデフォルトの構成属性の違いだけを保存することもできます!詳細については[configuration](main_classes/configuration)のドキュメンテーションをご覧ください。 </Tip> ## Model 次のステップは、[モデル](main_classes/models)を作成することです。モデル(アーキテクチャとも緩く言われることがあります)は、各レイヤーが何をしているか、どの操作が行われているかを定義します。構成からの `num_hidden_layers` のような属性はアーキテクチャを定義するために使用されます。 すべてのモデルは [`PreTrainedModel`] をベースクラスとし、入力埋め込みのリサイズやセルフアテンションヘッドのプルーニングなど、共通のメソッドがいくつかあります。 さらに、すべてのモデルは [`torch.nn.Module`](https://pytorch.org/docs/stable/generated/torch.nn.Module.html)、[`tf.keras.Model`](https://www.tensorflow.org/api_docs/python/tf/keras/Model)、または [`flax.linen.Module`](https://flax.readthedocs.io/en/latest/api_reference/flax.linen/module.html) のいずれかのサブクラスでもあります。つまり、モデルはそれぞれのフレームワークの使用法と互換性があります。 <frameworkcontent> <pt> モデルにカスタム構成属性をロードします: ```py >>> from transformers import DistilBertModel >>> my_config = DistilBertConfig.from_pretrained("./your_model_save_path/config.json") >>> model = DistilBertModel(my_config) ``` これにより、事前トレーニング済みの重みではなくランダムな値を持つモデルが作成されます。 これは、トレーニングが行われるまで、まだ有用なものとして使用することはできません。 トレーニングはコストと時間がかかるプロセスです。 通常、トレーニングに必要なリソースの一部しか使用せず、より速くより良い結果を得るために事前学習済みモデルを使用することが良いでしょう。 [`~PreTrainedModel.from_pretrained`]を使用して事前学習済みモデルを作成します: ```py >>> model = DistilBertModel.from_pretrained("distilbert/distilbert-base-uncased") ``` 事前学習済みの重みをロードする際、モデルが🤗 Transformersによって提供されている場合、デフォルトのモデル設定が自動的にロードされます。ただし、必要に応じてデフォルトのモデル設定属性の一部またはすべてを独自のもので置き換えることができます。 ```py >>> model = DistilBertModel.from_pretrained("distilbert/distilbert-base-uncased", config=my_config) ``` </pt> <tf> モデルにカスタム設定属性をロードしてください: ```py >>> from transformers import TFDistilBertModel >>> my_config = DistilBertConfig.from_pretrained("./your_model_save_path/my_config.json") >>> tf_model = TFDistilBertModel(my_config) ``` これにより、事前学習済みの重みではなくランダムな値を持つモデルが作成されます。 このモデルを有用な目的にはまだ使用することはできません。トレーニングはコストがかかり、時間がかかるプロセスです。 一般的には、トレーニングに必要なリソースの一部しか使用せずに、より速く優れた結果を得るために事前学習済みモデルを使用することが良いでしょう。 [`~TFPreTrainedModel.from_pretrained`]を使用して事前学習済みモデルを作成します: ```py >>> tf_model = TFDistilBertModel.from_pretrained("distilbert/distilbert-base-uncased") ``` 事前学習済みの重みをロードする際、モデルが🤗 Transformersによって提供されている場合、デフォルトのモデル構成が自動的にロードされます。ただし、必要であればデフォルトのモデル構成属性の一部またはすべてを独自のもので置き換えることもできます: ```py >>> tf_model = TFDistilBertModel.from_pretrained("distilbert/distilbert-base-uncased", config=my_config) ``` </tf> </frameworkcontent> ### Model heads この時点で、ベースのDistilBERTモデルがあり、これは隠れた状態を出力します。隠れた状態はモデルのヘッドへの入力として渡され、最終的な出力を生成します。🤗 Transformersは、モデルがそのタスクをサポートしている限り、各タスクに対応する異なるモデルヘッドを提供します(つまり、DistilBERTを翻訳のようなシーケンス対シーケンスタスクに使用することはできません)。 <frameworkcontent> <pt> たとえば、[`DistilBertForSequenceClassification`]は、シーケンス分類ヘッドを持つベースのDistilBERTモデルです。シーケンス分類ヘッドは、プールされた出力の上にある線形層です。 ```py >>> from transformers import DistilBertForSequenceClassification >>> model = DistilBertForSequenceClassification.from_pretrained("distilbert/distilbert-base-uncased") ``` 新しいタスクにこのチェックポイントを簡単に再利用するには、異なるモデルヘッドに切り替えます。 質問応答タスクの場合、[`DistilBertForQuestionAnswering`] モデルヘッドを使用します。 質問応答ヘッドはシーケンス分類ヘッドと類似していますが、隠れ状態の出力の上に線形層があります。 ```py >>> from transformers import DistilBertForQuestionAnswering >>> model = DistilBertForQuestionAnswering.from_pretrained("distilbert/distilbert-base-uncased") ``` </pt> <tf> 例えば、[`TFDistilBertForSequenceClassification`]は、シーケンス分類ヘッドを持つベースのDistilBERTモデルです。シーケンス分類ヘッドは、プールされた出力の上にある線形層です。 ```py >>> from transformers import TFDistilBertForSequenceClassification >>> tf_model = TFDistilBertForSequenceClassification.from_pretrained("distilbert/distilbert-base-uncased") ``` 別のタスクにこのチェックポイントを簡単に再利用することができ、異なるモデルヘッドに切り替えるだけです。 質問応答タスクの場合、[`TFDistilBertForQuestionAnswering`]モデルヘッドを使用します。 質問応答ヘッドはシーケンス分類ヘッドと似ていますが、隠れ状態の出力の上に線形層があるだけです。 ```py >>> from transformers import TFDistilBertForQuestionAnswering >>> tf_model = TFDistilBertForQuestionAnswering.from_pretrained("distilbert/distilbert-base-uncased") ``` </tf> </frameworkcontent> ## Tokenizer テキストデータをモデルで使用する前に必要な最後のベースクラスは、生のテキストをテンソルに変換するための[トークナイザ](main_classes/tokenizer)です。 🤗 Transformersで使用できる2つのタイプのトークナイザがあります: - [`PreTrainedTokenizer`]: トークナイザのPython実装です。 - [`PreTrainedTokenizerFast`]: Rustベースの[🤗 Tokenizer](https://huggingface.co/docs/tokenizers/python/latest/)ライブラリからのトークナイザです。 このトークナイザのタイプは、そのRust実装により、特にバッチトークナイゼーション中に高速です。 高速なトークナイザは、トークンを元の単語または文字にマッピングする*オフセットマッピング*などの追加メソッドも提供します。 両方のトークナイザは、エンコードとデコード、新しいトークンの追加、特別なトークンの管理など、共通のメソッドをサポートしています。 <Tip warning={true}> すべてのモデルが高速なトークナイザをサポートしているわけではありません。 モデルが高速なトークナイザをサポートしているかどうかを確認するには、この[表](index#supported-frameworks)をご覧ください。 </Tip> 独自のトークナイザをトレーニングした場合、*ボキャブラリー*ファイルからトークナイザを作成できます。 ```py >>> from transformers import DistilBertTokenizer >>> my_tokenizer = DistilBertTokenizer(vocab_file="my_vocab_file.txt", do_lower_case=False, padding_side="left") ``` カスタムトークナイザーから生成される語彙は、事前学習済みモデルのトークナイザーが生成する語彙とは異なることを覚えておくことは重要です。 事前学習済みモデルを使用する場合は、事前学習済みモデルの語彙を使用する必要があります。そうしないと、入力が意味をなさなくなります。 [`DistilBertTokenizer`]クラスを使用して、事前学習済みモデルの語彙を持つトークナイザーを作成します: ```py >>> from transformers import DistilBertTokenizer >>> slow_tokenizer = DistilBertTokenizer.from_pretrained("distilbert/distilbert-base-uncased") ``` [`DistilBertTokenizerFast`]クラスを使用して高速なトークナイザを作成します: ```py >>> from transformers import DistilBertTokenizerFast >>> fast_tokenizer = DistilBertTokenizerFast.from_pretrained("distilbert/distilbert-base-uncased") ``` <Tip> デフォルトでは、[`AutoTokenizer`]は高速なトークナイザを読み込もうとします。`from_pretrained`内で`use_fast=False`を設定することで、この動作を無効にすることができます。 </Tip> ## Image Processor 画像プロセッサはビジョン入力を処理します。これは基本クラス [`~image_processing_utils.ImageProcessingMixin`] を継承しています。 使用するには、使用しているモデルに関連付けられた画像プロセッサを作成します。 たとえば、画像分類に[ViT](model_doc/vit)を使用する場合、デフォルトの [`ViTImageProcessor`] を作成します。 ```py >>> from transformers import ViTImageProcessor >>> vit_extractor = ViTImageProcessor() >>> print(vit_extractor) ViTImageProcessor { "do_normalize": true, "do_resize": true, "image_processor_type": "ViTImageProcessor", "image_mean": [ 0.5, 0.5, 0.5 ], "image_std": [ 0.5, 0.5, 0.5 ], "resample": 2, "size": 224 } ``` <Tip> カスタマイズを必要としない場合、モデルのデフォルトの画像プロセッサパラメータをロードするには、単純に`from_pretrained`メソッドを使用してください。 </Tip> [`ViTImageProcessor`]のパラメータを変更して、カスタムの画像プロセッサを作成できます: ```py >>> from transformers import ViTImageProcessor >>> my_vit_extractor = ViTImageProcessor(resample="PIL.Image.BOX", do_normalize=False, image_mean=[0.3, 0.3, 0.3]) >>> print(my_vit_extractor) ViTImageProcessor { "do_normalize": false, "do_resize": true, "image_processor_type": "ViTImageProcessor", "image_mean": [ 0.3, 0.3, 0.3 ], "image_std": [ 0.5, 0.5, 0.5 ], "resample": "PIL.Image.BOX", "size": 224 } ``` ## Feature Extractor フィーチャー抽出器は音声入力を処理します。これは基本的な [`~feature_extraction_utils.FeatureExtractionMixin`] クラスから継承され、音声入力を処理するための [`SequenceFeatureExtractor`] クラスからも継承されることがあります。 使用するには、モデルに関連付けられたフィーチャー抽出器を作成します。たとえば、音声分類に [Wav2Vec2](model_doc/wav2vec2) を使用する場合、デフォルトの [`Wav2Vec2FeatureExtractor`] を作成します。 ```py >>> from transformers import Wav2Vec2FeatureExtractor >>> w2v2_extractor = Wav2Vec2FeatureExtractor() >>> print(w2v2_extractor) Wav2Vec2FeatureExtractor { "do_normalize": true, "feature_extractor_type": "Wav2Vec2FeatureExtractor", "feature_size": 1, "padding_side": "right", "padding_value": 0.0, "return_attention_mask": false, "sampling_rate": 16000 } ``` <Tip> カスタマイズを行わない場合、モデルのデフォルトの特徴抽出器パラメーターをロードするには、単に `from_pretrained` メソッドを使用してください。 </Tip> [`Wav2Vec2FeatureExtractor`] のパラメーターを変更して、カスタム特徴抽出器を作成できます: ```py >>> from transformers import Wav2Vec2FeatureExtractor >>> w2v2_extractor = Wav2Vec2FeatureExtractor(sampling_rate=8000, do_normalize=False) >>> print(w2v2_extractor) Wav2Vec2FeatureExtractor { "do_normalize": false, "feature_extractor_type": "Wav2Vec2FeatureExtractor", "feature_size": 1, "padding_side": "right", "padding_value": 0.0, "return_attention_mask": false, "sampling_rate": 8000 } ``` ## Processor マルチモーダルタスクをサポートするモデルに対して、🤗 Transformersは便利なプロセッサクラスを提供しています。 このプロセッサクラスは、特徴量抽出器やトークナイザなどの処理クラスを便利にラップし、単一のオブジェクトに結合します。 たとえば、自動音声認識タスク(ASR)用に[`Wav2Vec2Processor`]を使用してみましょう。 ASRは音声をテキストに転写するタスクであり、音声入力を処理するために特徴量抽出器とトークナイザが必要です。 音声入力を処理する特徴量抽出器を作成します: ```py >>> from transformers import Wav2Vec2FeatureExtractor >>> feature_extractor = Wav2Vec2FeatureExtractor(padding_value=1.0, do_normalize=True) ``` テキスト入力を処理するトークナイザを作成します: ```py >>> from transformers import Wav2Vec2CTCTokenizer >>> tokenizer = Wav2Vec2CTCTokenizer(vocab_file="my_vocab_file.txt") ``` [`Wav2Vec2Processor`]で特徴量抽出器とトークナイザを組み合わせます: ```py >>> from transformers import Wav2Vec2Processor >>> processor = Wav2Vec2Processor(feature_extractor=feature_extractor, tokenizer=tokenizer) ``` 二つの基本クラス - 設定とモデル - および追加の前処理クラス(トークナイザ、画像プロセッサ、特徴抽出器、またはプロセッサ)を使用することで、🤗 Transformers がサポートするモデルのいずれかを作成できます。これらの基本クラスは設定可能で、必要な特性を使用できます。モデルをトレーニング用に簡単にセットアップしたり、既存の事前学習済みモデルを微調整することができます。
transformers/docs/source/ja/create_a_model.md/0
{ "file_path": "transformers/docs/source/ja/create_a_model.md", "repo_id": "transformers", "token_count": 8236 }
267
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Pipelines パイプラインは、推論にモデルを使うための簡単で優れた方法である。パイプラインは、複雑なコードのほとんどを抽象化したオブジェクトです。 パイプラインは、ライブラリから複雑なコードのほとんどを抽象化したオブジェクトで、名前付き固有表現認識、マスク言語モデリング、感情分析、特徴抽出、質問応答などのタスクに特化したシンプルなAPIを提供します。 Recognition、Masked Language Modeling、Sentiment Analysis、Feature Extraction、Question Answeringなどのタスクに特化したシンプルなAPIを提供します。以下を参照のこと。 [タスク概要](../task_summary)を参照してください。 パイプラインの抽象化には2つのカテゴリーがある: - [`pipeline`] は、他のすべてのパイプラインをカプセル化する最も強力なオブジェクトです。 - タスク固有のパイプラインは、[オーディオ](#audio)、[コンピューター ビジョン](#computer-vision)、[自然言語処理](#natural-language-processing)、および [マルチモーダル](#multimodal) タスクで使用できます。 ## The pipeline abstraction *パイプライン* 抽象化は、他のすべての利用可能なパイプラインのラッパーです。他のものと同様にインスタンス化されます パイプラインですが、さらなる生活の質を提供できます。 1 つの項目に対する単純な呼び出し: ```python >>> pipe = pipeline("text-classification") >>> pipe("This restaurant is awesome") [{'label': 'POSITIVE', 'score': 0.9998743534088135}] ``` [ハブ](https://huggingface.co) の特定のモデルを使用したい場合は、モデルがオンになっている場合はタスクを無視できます。 ハブはすでにそれを定義しています。 ```python >>> pipe = pipeline(model="FacebookAI/roberta-large-mnli") >>> pipe("This restaurant is awesome") [{'label': 'NEUTRAL', 'score': 0.7313136458396912}] ``` 多くの項目に対してパイプラインを呼び出すには、*list* を使用してパイプラインを呼び出すことができます。 ```python >>> pipe = pipeline("text-classification") >>> pipe(["This restaurant is awesome", "This restaurant is awful"]) [{'label': 'POSITIVE', 'score': 0.9998743534088135}, {'label': 'NEGATIVE', 'score': 0.9996669292449951}] ``` 完全なデータセットを反復するには、`Dataset`を直接使用することをお勧めします。これは、割り当てる必要がないことを意味します データセット全体を一度に処理することも、自分でバッチ処理を行う必要もありません。これはカスタムループと同じくらい速く動作するはずです。 GPU。それが問題でない場合は、ためらわずに問題を作成してください。 ```python import datasets from transformers import pipeline from transformers.pipelines.pt_utils import KeyDataset from tqdm.auto import tqdm pipe = pipeline("automatic-speech-recognition", model="facebook/wav2vec2-base-960h", device=0) dataset = datasets.load_dataset("superb", name="asr", split="test") # KeyDataset (only *pt*) will simply return the item in the dict returned by the dataset item # as we're not interested in the *target* part of the dataset. For sentence pair use KeyPairDataset for out in tqdm(pipe(KeyDataset(dataset, "file"))): print(out) # {"text": "NUMBER TEN FRESH NELLY IS WAITING ON YOU GOOD NIGHT HUSBAND"} # {"text": ....} # .... ``` 使いやすくするために、ジェネレーターを使用することもできます。 ```python from transformers import pipeline pipe = pipeline("text-classification") def data(): while True: # This could come from a dataset, a database, a queue or HTTP request # in a server # Caveat: because this is iterative, you cannot use `num_workers > 1` variable # to use multiple threads to preprocess data. You can still have 1 thread that # does the preprocessing while the main runs the big inference yield "This is a test" for out in pipe(data()): print(out) # {"text": "NUMBER TEN FRESH NELLY IS WAITING ON YOU GOOD NIGHT HUSBAND"} # {"text": ....} # .... ``` [[autodoc]] pipeline ## Pipeline batching すべてのパイプラインでバッチ処理を使用できます。これはうまくいきます パイプラインがストリーミング機能を使用するときは常に (つまり、リスト、`dataset`、または `generator`を渡すとき)。 ```python from transformers import pipeline from transformers.pipelines.pt_utils import KeyDataset import datasets dataset = datasets.load_dataset("imdb", name="plain_text", split="unsupervised") pipe = pipeline("text-classification", device=0) for out in pipe(KeyDataset(dataset, "text"), batch_size=8, truncation="only_first"): print(out) # [{'label': 'POSITIVE', 'score': 0.9998743534088135}] # Exactly the same output as before, but the content are passed # as batches to the model ``` <Tip warning={true}> ただし、これによってパフォーマンスが自動的に向上するわけではありません。状況に応じて、10 倍の高速化または 5 倍の低速化のいずれかになります。 ハードウェア、データ、使用されている実際のモデルについて。 主に高速化である例: </Tip> ```python from transformers import pipeline from torch.utils.data import Dataset from tqdm.auto import tqdm pipe = pipeline("text-classification", device=0) class MyDataset(Dataset): def __len__(self): return 5000 def __getitem__(self, i): return "This is a test" dataset = MyDataset() for batch_size in [1, 8, 64, 256]: print("-" * 30) print(f"Streaming batch_size={batch_size}") for out in tqdm(pipe(dataset, batch_size=batch_size), total=len(dataset)): pass ``` ``` # On GTX 970 ------------------------------ Streaming no batching 100%|██████████████████████████████████████████████████████████████████████| 5000/5000 [00:26<00:00, 187.52it/s] ------------------------------ Streaming batch_size=8 100%|█████████████████████████████████████████████████████████████████████| 5000/5000 [00:04<00:00, 1205.95it/s] ------------------------------ Streaming batch_size=64 100%|█████████████████████████████████████████████████████████████████████| 5000/5000 [00:02<00:00, 2478.24it/s] ------------------------------ Streaming batch_size=256 100%|█████████████████████████████████████████████████████████████████████| 5000/5000 [00:01<00:00, 2554.43it/s] (diminishing returns, saturated the GPU) ``` 最も速度が低下する例: ```python class MyDataset(Dataset): def __len__(self): return 5000 def __getitem__(self, i): if i % 64 == 0: n = 100 else: n = 1 return "This is a test" * n ``` これは、他の文に比べて非常に長い文が時折あります。その場合、**全体**のバッチは 400 である必要があります。 トークンが長いため、バッチ全体が [64, 4] ではなく [64, 400] になり、速度が大幅に低下します。さらに悪いことに、 バッチが大きくなると、プログラムは単純にクラッシュします。 ``` ------------------------------ Streaming no batching 100%|█████████████████████████████████████████████████████████████████████| 1000/1000 [00:05<00:00, 183.69it/s] ------------------------------ Streaming batch_size=8 100%|█████████████████████████████████████████████████████████████████████| 1000/1000 [00:03<00:00, 265.74it/s] ------------------------------ Streaming batch_size=64 100%|██████████████████████████████████████████████████████████████████████| 1000/1000 [00:26<00:00, 37.80it/s] ------------------------------ Streaming batch_size=256 0%| | 0/1000 [00:00<?, ?it/s] Traceback (most recent call last): File "/home/nicolas/src/transformers/test.py", line 42, in <module> for out in tqdm(pipe(dataset, batch_size=256), total=len(dataset)): .... q = q / math.sqrt(dim_per_head) # (bs, n_heads, q_length, dim_per_head) RuntimeError: CUDA out of memory. Tried to allocate 376.00 MiB (GPU 0; 3.95 GiB total capacity; 1.72 GiB already allocated; 354.88 MiB free; 2.46 GiB reserved in total by PyTorch) ``` この問題に対する適切な (一般的な) 解決策はなく、使用できる距離はユースケースによって異なる場合があります。のルール 親指: ユーザーにとっての経験則は次のとおりです。 - **ハードウェアを使用して、負荷に対するパフォーマンスを測定します。測って、測って、測り続ける。実数というのは、 進むべき唯一の方法。** - レイテンシに制約がある場合 (実際の製品が推論を実行している場合)、バッチ処理を行わないでください。 - CPU を使用している場合は、バッチ処理を行わないでください。 - GPU でスループットを使用している場合 (大量の静的データでモデルを実行したい場合)、次のようにします。 - sequence_length (「自然な」データ) のサイズについてまったくわからない場合は、デフォルトではバッチ処理や測定を行わず、 暫定的に追加してみます。失敗した場合に回復するために OOM チェックを追加します (失敗した場合は、ある時点で回復します)。 sequence_length を制御します。) - sequence_length が非常に規則的である場合、バッチ処理は非常に興味深いものとなる可能性が高く、測定してプッシュしてください。 OOM が発生するまで続けます。 - GPU が大きいほど、バッチ処理がより興味深いものになる可能性が高くなります。 - バッチ処理を有効にしたらすぐに、OOM を適切に処理できることを確認してください。 ## Pipeline chunk batching `zero-shot-classification` と `question-answering` は、単一の入力で結果が得られる可能性があるという意味で、少し特殊です。 モデルの複数の前方パス。通常の状況では、これにより `batch_size` 引数に関する問題が発生します。 この問題を回避するために、これらのパイプラインはどちらも少し特殊になっており、代わりに `ChunkPipeline` になっています。 通常の `Pipeline`。要するに: ```python preprocessed = pipe.preprocess(inputs) model_outputs = pipe.forward(preprocessed) outputs = pipe.postprocess(model_outputs) ``` 今は次のようになります: ```python all_model_outputs = [] for preprocessed in pipe.preprocess(inputs): model_outputs = pipe.forward(preprocessed) all_model_outputs.append(model_outputs) outputs = pipe.postprocess(all_model_outputs) ``` パイプラインは以下で使用されるため、これはコードに対して非常に透過的である必要があります。 同じ方法。 パイプラインはバッチを自動的に処理できるため、これは簡略化されたビューです。気にする必要はないという意味です 入力が実際にトリガーする前方パスの数については、`batch_size` を最適化できます。 入力とは独立して。前のセクションの注意事項が引き続き適用されます。 ## Pipeline custom code 特定のパイプラインをオーバーライドする場合。 目の前のタスクに関する問題を作成することを躊躇しないでください。パイプラインの目標は、使いやすく、ほとんどのユーザーをサポートすることです。 したがって、`transformers`があなたのユースケースをサポートする可能性があります。 単純に試してみたい場合は、次のことができます。 - 選択したパイプラインをサブクラス化します ```python class MyPipeline(TextClassificationPipeline): def postprocess(): # Your code goes here scores = scores * 100 # And here my_pipeline = MyPipeline(model=model, tokenizer=tokenizer, ...) # or if you use *pipeline* function, then: my_pipeline = pipeline(model="xxxx", pipeline_class=MyPipeline) ``` これにより、必要なカスタム コードをすべて実行できるようになります。 ## Implementing a pipeline [Implementing a new pipeline](../add_new_pipeline) ## Audio オーディオ タスクに使用できるパイプラインには次のものがあります。 ### AudioClassificationPipeline [[autodoc]] AudioClassificationPipeline - __call__ - all ### AutomaticSpeechRecognitionPipeline [[autodoc]] AutomaticSpeechRecognitionPipeline - __call__ - all ### TextToAudioPipeline [[autodoc]] TextToAudioPipeline - __call__ - all ### ZeroShotAudioClassificationPipeline [[autodoc]] ZeroShotAudioClassificationPipeline - __call__ - all ## Computer vision コンピューター ビジョン タスクに使用できるパイプラインには次のものがあります。 ### DepthEstimationPipeline [[autodoc]] DepthEstimationPipeline - __call__ - all ### ImageClassificationPipeline [[autodoc]] ImageClassificationPipeline - __call__ - all ### ImageSegmentationPipeline [[autodoc]] ImageSegmentationPipeline - __call__ - all ### ImageToImagePipeline [[autodoc]] ImageToImagePipeline - __call__ - all ### ObjectDetectionPipeline [[autodoc]] ObjectDetectionPipeline - __call__ - all ### VideoClassificationPipeline [[autodoc]] VideoClassificationPipeline - __call__ - all ### ZeroShotImageClassificationPipeline [[autodoc]] ZeroShotImageClassificationPipeline - __call__ - all ### ZeroShotObjectDetectionPipeline [[autodoc]] ZeroShotObjectDetectionPipeline - __call__ - all ## Natural Language Processing 自然言語処理タスクに使用できるパイプラインには次のものがあります。 ### FillMaskPipeline [[autodoc]] FillMaskPipeline - __call__ - all ### NerPipeline [[autodoc]] NerPipeline 詳細については、[`TokenClassificationPipeline`] を参照してください。 ### QuestionAnsweringPipeline [[autodoc]] QuestionAnsweringPipeline - __call__ - all ### SummarizationPipeline [[autodoc]] SummarizationPipeline - __call__ - all ### TableQuestionAnsweringPipeline [[autodoc]] TableQuestionAnsweringPipeline - __call__ ### TextClassificationPipeline [[autodoc]] TextClassificationPipeline - __call__ - all ### TextGenerationPipeline [[autodoc]] TextGenerationPipeline - __call__ - all ### Text2TextGenerationPipeline [[autodoc]] Text2TextGenerationPipeline - __call__ - all ### TokenClassificationPipeline [[autodoc]] TokenClassificationPipeline - __call__ - all ### TranslationPipeline [[autodoc]] TranslationPipeline - __call__ - all ### ZeroShotClassificationPipeline [[autodoc]] ZeroShotClassificationPipeline - __call__ - all ## Multimodal マルチモーダル タスクに使用できるパイプラインには次のものがあります。 ### DocumentQuestionAnsweringPipeline [[autodoc]] DocumentQuestionAnsweringPipeline - __call__ - all ### FeatureExtractionPipeline [[autodoc]] FeatureExtractionPipeline - __call__ - all ### ImageFeatureExtractionPipeline [[autodoc]] ImageFeatureExtractionPipeline - __call__ - all ### ImageToTextPipeline [[autodoc]] ImageToTextPipeline - __call__ - all ### VisualQuestionAnsweringPipeline [[autodoc]] VisualQuestionAnsweringPipeline - __call__ - all ## Parent class: `Pipeline` [[autodoc]] Pipeline
transformers/docs/source/ja/main_classes/pipelines.md/0
{ "file_path": "transformers/docs/source/ja/main_classes/pipelines.md", "repo_id": "transformers", "token_count": 6647 }
268
<!--Copyright 2021 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # BEiT ## Overview BEiT モデルは、[BEiT: BERT Pre-Training of Image Transformers](https://arxiv.org/abs/2106.08254) で提案されました。 ハンボ・バオ、リー・ドン、フル・ウェイ。 BERT に触発された BEiT は、自己教師ありの事前トレーニングを作成した最初の論文です。 ビジョン トランスフォーマー (ViT) は、教師付き事前トレーニングよりも優れたパフォーマンスを発揮します。クラスを予測するためにモデルを事前トレーニングするのではなく ([オリジナルの ViT 論文](https://arxiv.org/abs/2010.11929) で行われたように) 画像の BEiT モデルは、次のように事前トレーニングされています。 マスクされた OpenAI の [DALL-E モデル](https://arxiv.org/abs/2102.12092) のコードブックからビジュアル トークンを予測します パッチ。 論文の要約は次のとおりです。 *自己教師あり視覚表現モデル BEiT (Bidirectional Encoderpresentation) を導入します。 イメージトランスフォーマーより。自然言語処理分野で開発されたBERTに倣い、マスク画像を提案します。 ビジョントランスフォーマーを事前にトレーニングするためのモデリングタスク。具体的には、事前トレーニングでは各画像に 2 つのビューがあります。 パッチ (16x16 ピクセルなど)、およびビジュアル トークン (つまり、個別のトークン)。まず、元の画像を「トークン化」して、 ビジュアルトークン。次に、いくつかの画像パッチをランダムにマスクし、それらをバックボーンの Transformer に供給します。事前トレーニング 目的は、破損したイメージ パッチに基づいて元のビジュアル トークンを回復することです。 BEiTの事前トレーニング後、 事前トレーニングされたエンコーダーにタスク レイヤーを追加することで、ダウンストリーム タスクのモデル パラメーターを直接微調整します。 画像分類とセマンティックセグメンテーションに関する実験結果は、私たちのモデルが競争力のある結果を達成することを示しています 以前の事前トレーニング方法を使用して。たとえば、基本サイズの BEiT は、ImageNet-1K で 83.2% のトップ 1 精度を達成します。 同じ設定でゼロからの DeiT トレーニング (81.8%) を大幅に上回りました。また、大型BEiTは 86.3% は ImageNet-1K のみを使用しており、ImageNet-22K での教師付き事前トレーニングを使用した ViT-L (85.2%) を上回っています。* ## Usage tips - BEiT モデルは通常のビジョン トランスフォーマーですが、教師ありではなく自己教師ありの方法で事前トレーニングされています。彼らは ImageNet-1K および CIFAR-100 で微調整すると、[オリジナル モデル (ViT)](vit) と [データ効率の高いイメージ トランスフォーマー (DeiT)](deit) の両方を上回るパフォーマンスを発揮します。推論に関するデモノートブックもチェックできます。 カスタム データの微調整は [こちら](https://github.com/NielsRogge/Transformers-Tutorials/tree/master/VisionTransformer) (置き換えるだけで済みます) [`BeitImageProcessor`] による [`ViTFeatureExtractor`] と [`ViTForImageClassification`] by [`BeitForImageClassification`])。 - DALL-E の画像トークナイザーと BEiT を組み合わせる方法を紹介するデモ ノートブックも利用可能です。 マスクされた画像モデリングを実行します。 [ここ](https://github.com/NielsRogge/Transformers-Tutorials/tree/master/BEiT) で見つけることができます。 - BEiT モデルは各画像が同じサイズ (解像度) であることを期待しているため、次のように使用できます。 [`BeitImageProcessor`] を使用して、モデルの画像のサイズを変更 (または再スケール) し、正規化します。 - 事前トレーニングまたは微調整中に使用されるパッチ解像度と画像解像度の両方が名前に反映されます。 各チェックポイント。たとえば、`microsoft/beit-base-patch16-224`は、パッチ付きの基本サイズのアーキテクチャを指します。 解像度は 16x16、微調整解像度は 224x224 です。すべてのチェックポイントは [ハブ](https://huggingface.co/models?search=microsoft/beit) で見つけることができます。 - 利用可能なチェックポイントは、(1) [ImageNet-22k](http://www.image-net.org/) で事前トレーニングされています ( 1,400 万の画像と 22,000 のクラス) のみ、(2) ImageNet-22k でも微調整、または (3) [ImageNet-1k](http://www.image-net.org/challenges/LSVRC)でも微調整/2012/) (ILSVRC 2012 とも呼ばれ、130 万件のコレクション) 画像と 1,000 クラス)。 - BEiT は、T5 モデルからインスピレーションを得た相対位置埋め込みを使用します。事前トレーニング中に、著者は次のことを共有しました。 いくつかの自己注意層間の相対的な位置の偏り。微調整中、各レイヤーの相対位置 バイアスは、事前トレーニング後に取得された共有相対位置バイアスで初期化されます。ご希望の場合は、 モデルを最初から事前トレーニングするには、`use_relative_position_bias` または 追加するには、[`BeitConfig`] の `use_relative_position_bias` 属性を `True` に設定します。 位置の埋め込み。 <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/beit_architecture.jpg" alt="drawing" width="600"/> <small> BEiT の事前トレーニング。 <a href="https://arxiv.org/abs/2106.08254">元の論文から抜粋。</a> </small> このモデルは、[nielsr](https://huggingface.co/nielsr) によって提供されました。このモデルの JAX/FLAX バージョンは、 [kamalkraj](https://huggingface.co/kamalkraj) による投稿。元のコードは [ここ](https://github.com/microsoft/unilm/tree/master/beit) にあります。 ## Resources BEiT の使用を開始するのに役立つ公式 Hugging Face およびコミュニティ (🌎 で示されている) リソースのリスト。 <PipelineTag pipeline="image-classification"/> - [`BeitForImageClassification`] は、この [サンプル スクリプト](https://github.com/huggingface/transformers/tree/main/examples/pytorch/image-classification) および [ノートブック](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/image_classification.ipynb)。 - 参照: [画像分類タスク ガイド](../tasks/image_classification) **セマンティック セグメンテーション** - [セマンティック セグメンテーション タスク ガイド](../tasks/semantic_segmentation) ここに含めるリソースの送信に興味がある場合は、お気軽にプル リクエストを開いてください。審査させていただきます。リソースは、既存のリソースを複製するのではなく、何か新しいものを示すことが理想的です。 ## BEiT specific outputs [[autodoc]] models.beit.modeling_beit.BeitModelOutputWithPooling [[autodoc]] models.beit.modeling_flax_beit.FlaxBeitModelOutputWithPooling ## BeitConfig [[autodoc]] BeitConfig ## BeitFeatureExtractor [[autodoc]] BeitFeatureExtractor - __call__ - post_process_semantic_segmentation ## BeitImageProcessor [[autodoc]] BeitImageProcessor - preprocess - post_process_semantic_segmentation ## BeitModel [[autodoc]] BeitModel - forward ## BeitForMaskedImageModeling [[autodoc]] BeitForMaskedImageModeling - forward ## BeitForImageClassification [[autodoc]] BeitForImageClassification - forward ## BeitForSemanticSegmentation [[autodoc]] BeitForSemanticSegmentation - forward ## FlaxBeitModel [[autodoc]] FlaxBeitModel - __call__ ## FlaxBeitForMaskedImageModeling [[autodoc]] FlaxBeitForMaskedImageModeling - __call__ ## FlaxBeitForImageClassification [[autodoc]] FlaxBeitForImageClassification - __call__
transformers/docs/source/ja/model_doc/beit.md/0
{ "file_path": "transformers/docs/source/ja/model_doc/beit.md", "repo_id": "transformers", "token_count": 3840 }
269
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # BROS ## Overview BROS モデルは、Teakgyu Hon、Donghyun Kim、Mingi Ji, Wonseok Hwang, Daehyun Nam, Sungrae Park によって [BROS: A Pre-trained Language Model Focusing on Text and Layout for Better Key Information Extraction from Documents](https://arxiv.org/abs/2108.04539) で提案されました。 BROS は *BERT Relying On Spatality* の略です。これは、一連のトークンとその境界ボックスを入力として受け取り、一連の隠れ状態を出力するエンコーダー専用の Transformer モデルです。 BROS は、絶対的な空間情報を使用する代わりに、相対的な空間情報をエンコードします。 BERT で使用されるトークンマスク言語モデリング目標 (TMLM) と新しいエリアマスク言語モデリング目標 (AMLM) の 2 つの目標で事前トレーニングされています。 TMLM では、トークンはランダムにマスクされ、モデルは空間情報と他のマスクされていないトークンを使用してマスクされたトークンを予測します。 AMLM は TMLM の 2D バージョンです。テキスト トークンをランダムにマスクし、TMLM と同じ情報で予測しますが、テキスト ブロック (領域) をマスクします。 `BrosForTokenClassification`には、BrosModel の上に単純な線形層があります。各トークンのラベルを予測します。 `BrosSpadeEEForTokenClassification`には、BrosModel の上に`initial_token_classifier`と`subsequent_token_classifier`があります。 `initial_token_classifier` は各エンティティの最初のトークンを予測するために使用され、`subsequent_token_classifier` はエンティティ内の次のトークンを予測するために使用されます。 `BrosSpadeELForTokenClassification`には BrosModel の上に`entity_linker`があります。 `entity_linker` は 2 つのエンティティ間の関係を予測するために使用されます。 `BrosForTokenClassification`と`BrosSpadeEEForTokenClassification`は基本的に同じジョブを実行します。ただし、`BrosForTokenClassification`は入力トークンが完全にシリアル化されていることを前提としています (トークンは 2D 空間に存在するため、これは非常に困難な作業です)。一方、`BrosSpadeEEForTokenClassification`は 1 つのトークンから次の接続トークンを予測するため、シリアル化エラーの処理をより柔軟に行うことができます。 `BrosSpadeELForTokenClassification` はエンティティ内のリンク タスクを実行します。これら 2 つのエンティティが何らかの関係を共有する場合、(あるエンティティの) 1 つのトークンから (別のエンティティの) 別のトークンへの関係を予測します。 BROS は、明示的な視覚機能に依存せずに、FUNSD、SROIE、CORD、SciTSR などの Key Information Extraction (KIE) ベンチマークで同等以上の結果を達成します。 論文の要約は次のとおりです。 *文書画像からの重要情報抽出 (KIE) には、2 次元 (2D) 空間におけるテキストの文脈的および空間的意味論を理解する必要があります。最近の研究の多くは、文書画像の視覚的特徴とテキストおよびそのレイアウトを組み合わせることに重点を置いた事前トレーニング済み言語モデルを開発することで、この課題を解決しようとしています。一方、このペーパーでは、テキストとレイアウトの効果的な組み合わせという基本に立ち返ってこの問題に取り組みます。具体的には、BROS (BERT Relying On Spatality) という名前の事前トレーニング済み言語モデルを提案します。この言語モデルは、2D 空間内のテキストの相対位置をエンコードし、エリア マスキング戦略を使用してラベルのないドキュメントから学習します。 2D 空間内のテキストを理解するためのこの最適化されたトレーニング スキームにより、BROS は、視覚的な特徴に依存することなく、4 つの KIE ベンチマーク (FUNSD、SROIE*、CORD、および SciTSR) で以前の方法と比較して同等以上のパフォーマンスを示しました。また、この論文では、KIE タスクにおける 2 つの現実世界の課題 ((1) 間違ったテキスト順序によるエラーの最小化、および (2) 少数の下流例からの効率的な学習) を明らかにし、以前の方法に対する BROS の優位性を実証します。* このモデルは [jinho8345](https://huggingface.co/jinho8345) によって寄稿されました。元のコードは [ここ](https://github.com/clovaai/bros) にあります。 ## Usage tips and examples - [`~transformers.BrosModel.forward`] には、`input_ids` と `bbox` (バウンディング ボックス) が必要です。各境界ボックスは、(x0、y0、x1、y1) 形式 (左上隅、右下隅) である必要があります。境界ボックスの取得は外部 OCR システムに依存します。 「x」座標はドキュメント画像の幅で正規化する必要があり、「y」座標はドキュメント画像の高さで正規化する必要があります。 ```python def expand_and_normalize_bbox(bboxes, doc_width, doc_height): # here, bboxes are numpy array # Normalize bbox -> 0 ~ 1 bboxes[:, [0, 2]] = bboxes[:, [0, 2]] / width bboxes[:, [1, 3]] = bboxes[:, [1, 3]] / height ``` - [`~transformers.BrosForTokenClassification.forward`、`~transformers.BrosSpadeEEForTokenClassification.forward`、`~transformers.BrosSpadeEEForTokenClassification.forward`] では、損失計算に `input_ids` と `bbox` だけでなく `box_first_token_mask` も必要です。これは、各ボックスの先頭以外のトークンを除外するためのマスクです。このマスクは、単語から `input_ids` を作成するときに境界ボックスの開始トークン インデックスを保存することで取得できます。次のコードで`box_first_token_mask`を作成できます。 ```python def make_box_first_token_mask(bboxes, words, tokenizer, max_seq_length=512): box_first_token_mask = np.zeros(max_seq_length, dtype=np.bool_) # encode(tokenize) each word from words (List[str]) input_ids_list: List[List[int]] = [tokenizer.encode(e, add_special_tokens=False) for e in words] # get the length of each box tokens_length_list: List[int] = [len(l) for l in input_ids_list] box_end_token_indices = np.array(list(itertools.accumulate(tokens_length_list))) box_start_token_indices = box_end_token_indices - np.array(tokens_length_list) # filter out the indices that are out of max_seq_length box_end_token_indices = box_end_token_indices[box_end_token_indices < max_seq_length - 1] if len(box_start_token_indices) > len(box_end_token_indices): box_start_token_indices = box_start_token_indices[: len(box_end_token_indices)] # set box_start_token_indices to True box_first_token_mask[box_start_token_indices] = True return box_first_token_mask ``` ## Resources - デモ スクリプトは [こちら](https://github.com/clovaai/bros) にあります。 ## BrosConfig [[autodoc]] BrosConfig ## BrosProcessor [[autodoc]] BrosProcessor - __call__ ## BrosModel [[autodoc]] BrosModel - forward ## BrosForTokenClassification [[autodoc]] BrosForTokenClassification - forward ## BrosSpadeEEForTokenClassification [[autodoc]] BrosSpadeEEForTokenClassification - forward ## BrosSpadeELForTokenClassification [[autodoc]] BrosSpadeELForTokenClassification - forward
transformers/docs/source/ja/model_doc/bros.md/0
{ "file_path": "transformers/docs/source/ja/model_doc/bros.md", "repo_id": "transformers", "token_count": 3458 }
270
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # The Transformer model family 2017年に導入されて以来、[元のTransformer](https://arxiv.org/abs/1706.03762)モデルは、自然言語処理(NLP)のタスクを超える多くの新しいエキサイティングなモデルをインスパイアしました。[タンパク質の折りたたまれた構造を予測](https://huggingface.co/blog/deep-learning-with-proteins)するモデル、[チーターを走らせるためのトレーニング](https://huggingface.co/blog/train-decision-transformers)するモデル、そして[時系列予測](https://huggingface.co/blog/time-series-transformers)のためのモデルなどがあります。Transformerのさまざまなバリアントが利用可能ですが、大局を見落とすことがあります。これらのすべてのモデルに共通するのは、元のTransformerアーキテクチャに基づいていることです。一部のモデルはエンコーダまたはデコーダのみを使用し、他のモデルは両方を使用します。これは、Transformerファミリー内のモデルの高レベルの違いをカテゴライズし、調査するための有用な分類法を提供し、以前に出会ったことのないTransformerを理解するのに役立ちます。 元のTransformerモデルに慣れていないか、リフレッシュが必要な場合は、Hugging Faceコースの[Transformerの動作原理](https://huggingface.co/course/chapter1/4?fw=pt)章をチェックしてください。 <div align="center"> <iframe width="560" height="315" src="https://www.youtube.com/embed/H39Z_720T5s" title="YouTubeビデオプレーヤー" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe> </div> ## Computer vision <iframe style="border: 1px solid rgba(0, 0, 0, 0.1);" width="1000" height="450" src="https://www.figma.com/embed?embed_host=share&url=https%3A%2F%2Fwww.figma.com%2Ffile%2FacQBpeFBVvrDUlzFlkejoz%2FModelscape-timeline%3Fnode-id%3D0%253A1%26t%3Dm0zJ7m2BQ9oe0WtO-1" allowfullscreen></iframe> ### Convolutional network 長い間、畳み込みネットワーク(CNN)はコンピュータビジョンのタスクにおいて支配的なパラダイムでしたが、[ビジョンTransformer](https://arxiv.org/abs/2010.11929)はそのスケーラビリティと効率性を示しました。それでも、一部のCNNの最高の特性、特に特定のタスクにとっては非常に強力な翻訳不変性など、一部のTransformerはアーキテクチャに畳み込みを組み込んでいます。[ConvNeXt](model_doc/convnext)は、畳み込みを現代化するためにTransformerから設計の選択肢を取り入れ、例えば、ConvNeXtは画像をパッチに分割するために重なり合わないスライディングウィンドウと、グローバル受容野を増加させるための大きなカーネルを使用します。ConvNeXtは、メモリ効率を向上させ、パフォーマンスを向上させるためにいくつかのレイヤーデザインの選択肢も提供し、Transformerと競合的になります! ### Encoder[[cv-encoder]] [ビジョン トランスフォーマー(ViT)](model_doc/vit) は、畳み込みを使用しないコンピュータビジョンタスクの扉を開けました。ViT は標準のトランスフォーマーエンコーダーを使用しますが、画像を扱う方法が主要なブレークスルーでした。画像を固定サイズのパッチに分割し、それらをトークンのように使用して埋め込みを作成します。ViT は、当時のCNNと競争力のある結果を示すためにトランスフォーマーの効率的なアーキテクチャを活用しましたが、トレーニングに必要なリソースが少なくて済みました。ViT に続いて、セグメンテーションや検出などの密なビジョンタスクを処理できる他のビジョンモデルも登場しました。 これらのモデルの1つが[Swin](model_doc/swin) トランスフォーマーです。Swin トランスフォーマーは、より小さなサイズのパッチから階層的な特徴マップ(CNNのようで ViT とは異なります)を構築し、深層のパッチと隣接するパッチとマージします。注意はローカルウィンドウ内でのみ計算され、ウィンドウは注意のレイヤー間でシフトされ、モデルがより良く学習するのをサポートする接続を作成します。Swin トランスフォーマーは階層的な特徴マップを生成できるため、セグメンテーションや検出などの密な予測タスクに適しています。[SegFormer](model_doc/segformer) も階層的な特徴マップを構築するためにトランスフォーマーエンコーダーを使用しますが、すべての特徴マップを組み合わせて予測するためにシンプルなマルチレイヤーパーセプトロン(MLP)デコーダーを追加します。 BeIT および ViTMAE などの他のビジョンモデルは、BERTの事前トレーニング目標からインスピレーションを得ました。[BeIT](model_doc/beit) は *masked image modeling (MIM)* によって事前トレーニングされています。画像パッチはランダムにマスクされ、画像も視覚トークンにトークン化されます。BeIT はマスクされたパッチに対応する視覚トークンを予測するようにトレーニングされます。[ViTMAE](model_doc/vitmae) も似たような事前トレーニング目標を持っており、視覚トークンの代わりにピクセルを予測する必要があります。異例なのは画像パッチの75%がマスクされていることです!デコーダーはマスクされたトークンとエンコードされたパッチからピクセルを再構築します。事前トレーニングの後、デコーダーは捨てられ、エンコーダーはダウンストリームのタスクで使用できる状態です。 ### Decoder[[cv-decoder]] デコーダーのみのビジョンモデルは珍しいです。なぜなら、ほとんどのビジョンモデルは画像表現を学ぶためにエンコーダーを使用するからです。しかし、画像生成などのユースケースでは、デコーダーは自然な適応です。これは、GPT-2などのテキスト生成モデルから見てきたように、[ImageGPT](model_doc/imagegpt) でも同様のアーキテクチャを使用しますが、シーケンス内の次のトークンを予測する代わりに、画像内の次のピクセルを予測します。画像生成に加えて、ImageGPT は画像分類のためにもファインチューニングできます。 ### Encoder-decoder[[cv-encoder-decoder]] ビジョンモデルは一般的にエンコーダー(バックボーンとも呼ばれます)を使用して重要な画像特徴を抽出し、それをトランスフォーマーデコーダーに渡すために使用します。[DETR](model_doc/detr) は事前トレーニング済みのバックボーンを持っていますが、オブジェクト検出のために完全なトランスフォーマーエンコーダーデコーダーアーキテクチャも使用しています。エンコーダーは画像表現を学び、デコーダー内のオブジェクトクエリ(各オブジェクトクエリは画像内の領域またはオブジェクトに焦点を当てた学習された埋め込みです)と組み合わせます。DETR は各オブジェクトクエリに対する境界ボックスの座標とクラスラベルを予測します。 ## Natural lanaguage processing <iframe style="border: 1px solid rgba(0, 0, 0, 0.1);" width="1000" height="450" src="https://www.figma.com/embed?embed_host=share&url=https%3A%2F%2Fwww.figma.com%2Ffile%2FUhbQAZDlpYW5XEpdFy6GoG%2Fnlp-model-timeline%3Fnode-id%3D0%253A1%26t%3D4mZMr4r1vDEYGJ50-1" allowfullscreen></iframe> ### Encoder[[nlp-encoder]] [BERT](model_doc/bert) はエンコーダー専用のTransformerで、入力の一部のトークンをランダムにマスクして他のトークンを見ないようにしています。これにより、トークンをマスクした文脈に基づいてマスクされたトークンを予測することが事前トレーニングの目標です。これにより、BERTは入力のより深いかつ豊かな表現を学習するのに左右の文脈を完全に活用できます。しかし、BERTの事前トレーニング戦略にはまだ改善の余地がありました。[RoBERTa](model_doc/roberta) は、トレーニングを長時間行い、より大きなバッチでトレーニングし、事前処理中に一度だけでなく各エポックでトークンをランダムにマスクし、次文予測の目標を削除する新しい事前トレーニングレシピを導入することでこれを改善しました。 性能を向上させる主要な戦略はモデルのサイズを増やすことですが、大規模なモデルのトレーニングは計算コストがかかります。計算コストを削減する方法の1つは、[DistilBERT](model_doc/distilbert) のような小さなモデルを使用することです。DistilBERTは[知識蒸留](https://arxiv.org/abs/1503.02531) - 圧縮技術 - を使用して、BERTのほぼすべての言語理解機能を保持しながら、より小さなバージョンを作成します。 しかし、ほとんどのTransformerモデルは引き続きより多くのパラメータに焦点を当て、トレーニング効率を向上させる新しいモデルが登場しています。[ALBERT](model_doc/albert) は、2つの方法でパラメータの数を減らすことによってメモリ消費量を削減します。大きな語彙埋め込みを2つの小さな行列に分割し、レイヤーがパラメータを共有できるようにします。[DeBERTa](model_doc/deberta) は、単語とその位置を2つのベクトルで別々にエンコードする解かれた注意機構を追加しました。注意はこれらの別々のベクトルから計算されます。単語と位置の埋め込みが含まれる単一のベクトルではなく、[Longformer](model_doc/longformer) は、特に長いシーケンス長のドキュメントを処理するために注意をより効率的にすることに焦点を当てました。固定されたウィンドウサイズの周りの各トークンから計算されるローカルウィンドウ付き注意(特定のタスクトークン(分類のための `[CLS]` など)のみのためのグローバルな注意を含む)の組み合わせを使用して、完全な注意行列ではなく疎な注意行列を作成します。 ### Decoder[[nlp-decoder]] [GPT-2](model_doc/gpt2)は、シーケンス内の次の単語を予測するデコーダー専用のTransformerです。モデルは先を見ることができないようにトークンを右にマスクし、"のぞき見"を防ぎます。大量のテキストを事前トレーニングしたことにより、GPT-2はテキスト生成が非常に得意で、テキストが正確であることがあるにしても、時折正確ではないことがあります。しかし、GPT-2にはBERTの事前トレーニングからの双方向コンテキストが不足しており、特定のタスクには適していませんでした。[XLNET](model_doc/xlnet)は、双方向に学習できる順列言語モデリング目標(PLM)を使用することで、BERTとGPT-2の事前トレーニング目標のベストを組み合わせています。 GPT-2の後、言語モデルはさらに大きく成長し、今では*大規模言語モデル(LLM)*として知られています。大規模なデータセットで事前トレーニングされれば、LLMはほぼゼロショット学習を示すことがあります。[GPT-J](model_doc/gptj)は、6Bのパラメータを持つLLMで、400Bのトークンでトレーニングされています。GPT-Jには[OPT](model_doc/opt)が続き、そのうち最大のモデルは175Bで、180Bのトークンでトレーニングされています。同じ時期に[BLOOM](model_doc/bloom)がリリースされ、このファミリーの最大のモデルは176Bのパラメータを持ち、46の言語と13のプログラミング言語で366Bのトークンでトレーニングされています。 ### Encoder-decoder[[nlp-encoder-decoder]] [BART](model_doc/bart)は、元のTransformerアーキテクチャを保持していますが、事前トレーニング目標を*テキスト補完*の破損に変更しています。一部のテキストスパンは単一の`mask`トークンで置換されます。デコーダーは破損していないトークンを予測し(未来のトークンはマスクされます)、エンコーダーの隠れた状態を使用して予測を補助します。[Pegasus](model_doc/pegasus)はBARTに似ていますが、Pegasusはテキストスパンの代わりに文全体をマスクします。マスクされた言語モデリングに加えて、Pegasusはギャップ文生成(GSG)によって事前トレーニングされています。GSGの目標は、文書に重要な文をマスクし、それらを`mask`トークンで置換することです。デコーダーは残りの文から出力を生成しなければなりません。[T5](model_doc/t5)は、すべてのNLPタスクを特定のプレフィックスを使用してテキスト対テキストの問題に変換するよりユニークなモデルです。たとえば、プレフィックス`Summarize:`は要約タスクを示します。T5は教師ありトレーニング(GLUEとSuperGLUE)と自己教師ありトレーニング(トークンの15%をランダムにサンプルしドロップアウト)によって事前トレーニングされています。 ## Audio <iframe style="border: 1px solid rgba(0, 0, 0, 0.1);" width="1000" height="450" src="https://www.figma.com/embed?embed_host=share&url=https%3A%2F%2Fwww.figma.com%2Ffile%2Fvrchl8jDV9YwNVPWu2W0kK%2Fspeech-and-audio-model-timeline%3Fnode-id%3D0%253A1%26t%3DmM4H8pPMuK23rClL-1" allowfullscreen></iframe> ### Encoder[[audio-encoder]] [Wav2Vec2](model_doc/wav2vec2) は、生のオーディオ波形から直接音声表現を学習するためのTransformerエンコーダーを使用します。これは、対照的なタスクで事前学習され、一連の偽の表現から真の音声表現を特定します。 [HuBERT](model_doc/hubert) はWav2Vec2に似ていますが、異なるトレーニングプロセスを持っています。ターゲットラベルは、類似したオーディオセグメントがクラスタに割り当てられ、これが隠れユニットになるクラスタリングステップによって作成されます。隠れユニットは埋め込みにマップされ、予測を行います。 ### Encoder-decoder[[audio-encoder-decoder]] [Speech2Text](model_doc/speech_to_text) は、自動音声認識(ASR)および音声翻訳のために設計された音声モデルです。このモデルは、オーディオ波形から抽出されたログメルフィルターバンクフィーチャーを受け入れ、事前トレーニングされた自己回帰的にトランスクリプトまたは翻訳を生成します。 [Whisper](model_doc/whisper) もASRモデルですが、他の多くの音声モデルとは異なり、✨ ラベル付き ✨ オーディオトランスクリプションデータを大量に事前に学習して、ゼロショットパフォーマンスを実現します。データセットの大部分には非英語の言語も含まれており、Whisperは低リソース言語にも使用できます。構造的には、WhisperはSpeech2Textに似ています。オーディオ信号はエンコーダーによってエンコードされたログメルスペクトログラムに変換されます。デコーダーはエンコーダーの隠れ状態と前のトークンからトランスクリプトを自己回帰的に生成します。 ## Multimodal <iframe style="border: 1px solid rgba(0, 0, 0, 0.1);" width="1000" height="450" src="https://www.figma.com/embed?embed_host=share&url=https%3A%2F%2Fwww.figma.com%2Ffile%2FcX125FQHXJS2gxeICiY93p%2Fmultimodal%3Fnode-id%3D0%253A1%26t%3DhPQwdx3HFPWJWnVf-1" allowfullscreen></iframe> ### Encoder[[mm-encoder]] [VisualBERT](model_doc/visual_bert) は、BERTの後にリリースされたビジョン言語タスク向けのマルチモーダルモデルです。これはBERTと事前トレーニングされた物体検出システムを組み合わせ、画像特徴をビジュアル埋め込みに抽出し、テキスト埋め込みと一緒にBERTに渡します。VisualBERTは非マスクテキストを基にしたマスクテキストを予測し、テキストが画像と整合しているかどうかも予測する必要があります。ViTがリリースされた際、[ViLT](model_doc/vilt) は画像埋め込みを取得するためにこの方法を採用しました。画像埋め込みはテキスト埋め込みと共に共同で処理されます。それから、ViLTは画像テキストマッチング、マスク言語モデリング、および全単語マスキングによる事前トレーニングが行われます。 [CLIP](model_doc/clip) は異なるアプローチを取り、(`画像`、`テキスト`) のペア予測を行います。画像エンコーダー(ViT)とテキストエンコーダー(Transformer)は、(`画像`、`テキスト`) ペアデータセット上で共同トレーニングされ、(`画像`、`テキスト`) ペアの画像とテキストの埋め込みの類似性を最大化します。事前トレーニング後、CLIPを使用して画像からテキストを予測したり、その逆を行うことができます。[OWL-ViT](model_doc/owlvit) は、ゼロショット物体検出のバックボーンとしてCLIPを使用しています。事前トレーニング後、物体検出ヘッドが追加され、(`クラス`、`バウンディングボックス`) ペアに対するセット予測が行われます。 ### Encoder-decoder[[mm-encoder-decoder]] 光学文字認識(OCR)は、通常、画像を理解しテキストを生成するために複数のコンポーネントが関与するテキスト認識タスクです。 [TrOCR](model_doc/trocr) は、エンドツーエンドのTransformerを使用してこのプロセスを簡略化します。エンコーダーは画像を固定サイズのパッチとして処理するためのViTスタイルのモデルであり、デコーダーはエンコーダーの隠れ状態を受け入れ、テキストを自己回帰的に生成します。[Donut](model_doc/donut) はOCRベースのアプローチに依存しないより一般的なビジュアルドキュメント理解モデルで、エンコーダーとしてSwin Transformer、デコーダーとして多言語BARTを使用します。 Donutは画像とテキストの注釈に基づいて次の単語を予測することにより、テキストを読むために事前トレーニングされます。デコーダーはプロンプトを与えられたトークンシーケンスを生成します。プロンプトは各ダウンストリームタスクごとに特別なトークンを使用して表現されます。例えば、ドキュメントの解析には`解析`トークンがあり、エンコーダーの隠れ状態と組み合わされてドキュメントを構造化された出力フォーマット(JSON)に解析します。 ## Reinforcement learning <iframe style="border: 1px solid rgba(0, 0, 0, 0.1);" width="1000" height="450" src="https://www.figma.com/embed?embed_host=share&url=https%3A%2F%2Fwww.figma.com%2Ffile%2FiB3Y6RvWYki7ZuKO6tNgZq%2Freinforcement-learning%3Fnode-id%3D0%253A1%26t%3DhPQwdx3HFPWJWnVf-1" allowfullscreen></iframe> ### Decoder[[rl-decoder]] 意思決定と軌跡トランスフォーマーは、状態、アクション、報酬をシーケンスモデリングの問題として捉えます。 [Decision Transformer](model_doc/decision_transformer) は、リターン・トゥ・ゴー、過去の状態、およびアクションに基づいて将来の希望リターンにつながるアクションの系列を生成します。最後の *K* タイムステップでは、3つのモダリティそれぞれがトークン埋め込みに変換され、将来のアクショントークンを予測するためにGPTのようなモデルによって処理されます。[Trajectory Transformer](model_doc/trajectory_transformer) も状態、アクション、報酬をトークン化し、GPTアーキテクチャで処理します。報酬調整に焦点を当てたDecision Transformerとは異なり、Trajectory Transformerはビームサーチを使用して将来のアクションを生成します。
transformers/docs/source/ja/model_summary.md/0
{ "file_path": "transformers/docs/source/ja/model_summary.md", "repo_id": "transformers", "token_count": 9488 }
271
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Training on TPU with TensorFlow <Tip> 詳細な説明が不要で、単にTPUのコードサンプルを入手してトレーニングを開始したい場合は、[私たちのTPUの例のノートブックをチェックしてください!](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/tpu_training-tf.ipynb) </Tip> ### What is a TPU? TPUは**Tensor Processing Unit(テンソル処理ユニット)**の略です。これらはGoogleが設計したハードウェアで、ニューラルネットワーク内のテンソル計算を大幅に高速化するために使用されます。これはGPUのようなものです。ネットワークのトレーニングと推論の両方に使用できます。一般的にはGoogleのクラウドサービスを介してアクセスされますが、Google ColabとKaggle Kernelsを通じても無料で小規模のTPUに直接アクセスできます。 [🤗 TransformersのすべてのTensorFlowモデルはKerasモデルです](https://huggingface.co/blog/tensorflow-philosophy)ので、この文書のほとんどの方法は一般的にKerasモデル用のTPUトレーニングに適用できます!ただし、TransformersとDatasetsのHuggingFaceエコシステム(hug-o-system?)に固有のポイントもいくつかあり、それについては適用するときにそれを示します。 ### What kinds of TPU are available? 新しいユーザーは、さまざまなTPUとそのアクセス方法に関する幅広い情報によく混乱します。理解するための最初の重要な違いは、**TPUノード**と**TPU VM**の違いです。 **TPUノード**を使用すると、事実上リモートのTPUに間接的にアクセスします。別個のVMが必要で、ネットワークとデータパイプラインを初期化し、それらをリモートノードに転送します。Google ColabでTPUを使用すると、**TPUノード**スタイルでアクセスしています。 TPUノードを使用すると、それに慣れていない人々にはかなり予期しない動作が発生することがあります!特に、TPUはPythonコードを実行しているマシンと物理的に異なるシステムに配置されているため、データはローカルマシンにローカルで格納されているデータパイプラインが完全に失敗します。代わりに、データはGoogle Cloud Storageに格納する必要があります。ここでデータパイプラインはリモートのTPUノードで実行されている場合でも、データにアクセスできます。 <Tip> すべてのデータを`np.ndarray`または`tf.Tensor`としてメモリに収めることができる場合、ColabまたはTPUノードを使用している場合でも、データをGoogle Cloud Storageにアップロードせずに`fit()`でトレーニングできます。 </Tip> <Tip> **🤗 Hugging Face固有のヒント🤗:** TFコードの例でよく見るであろう`Dataset.to_tf_dataset()`とその高レベルのラッパーである`model.prepare_tf_dataset()`は、TPUノードで失敗します。これは、`tf.data.Dataset`を作成しているにもかかわらず、それが「純粋な」`tf.data`パイプラインではなく、`tf.numpy_function`または`Dataset.from_generator()`を使用して基盤となるHuggingFace `Dataset`からデータをストリームで読み込むことからです。このHuggingFace `Dataset`はローカルディスク上のデータをバックアップしており、リモートTPUノードが読み取ることができないためです。 </Tip> TPUにアクセスする第二の方法は、**TPU VM**を介してです。TPU VMを使用する場合、TPUが接続されているマシンに直接接続します。これはGPU VMでトレーニングを行うのと同様です。TPU VMは一般的にデータパイプラインに関しては特に作業がしやすく、上記のすべての警告はTPU VMには適用されません! これは主観的な文書ですので、こちらの意見です:**可能な限りTPUノードの使用を避けてください。** TPU VMよりも混乱しやすく、デバッグが難しいです。将来的にはサポートされなくなる可能性もあります - Googleの最新のTPUであるTPUv4は、TPU VMとしてのみアクセスできるため、TPUノードは将来的には「レガシー」のアクセス方法になる可能性が高いです。ただし、無料でTPUにアクセスできるのはColabとKaggle Kernelsの場合があります。その場合、どうしても使用しなければならない場合の取り扱い方法を説明しようとします!詳細は[TPUの例のノートブック](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/tpu_training-tf.ipynb)で詳細な説明を確認してください。 ### What sizes of TPU are available? 単一のTPU(v2-8/v3-8/v4-8)は8つのレプリカを実行します。TPUは数百から数千のレプリカを同時に実行できる**ポッド**に存在します。単一のTPUよりも多くのTPUを使用するが、ポッド全体ではない場合(たとえばv3-32)、TPUフリートは**ポッドスライス**として参照されます。 Colabを介して無料のTPUにアクセスする場合、通常は単一のv2-8 TPUが提供されます。 ### I keep hearing about this XLA thing. What’s XLA, and how does it relate to TPUs? XLAは、TensorFlowとJAXの両方で使用される最適化コンパイラです。JAXでは唯一のコンパイラであり、TensorFlowではオプションですが(しかしTPUでは必須です!)、Kerasモデルをトレーニングする際に`model.compile()`に引数`jit_compile=True`を渡すことで最も簡単に有効にできます。エラーが発生せず、パフォーマンスが良好であれば、それはTPUに移行する準備が整った良い兆候です! TPU上でのデバッグは一般的にCPU/GPUよりも少し難しいため、TPUで試す前にまずCPU/GPUでXLAを使用してコードを実行することをお勧めします。もちろん、長時間トレーニングする必要はありません。モデルとデータパイプラインが期待通りに動作するかを確認するための数ステップだけです。 <Tip> XLAコンパイルされたコードは通常高速です。したがって、TPUで実行する予定がない場合でも、`jit_compile=True`を追加することでパフォーマンスを向上させることができます。ただし、以下のXLA互換性に関する注意事項に注意してください! </Tip> <Tip warning={true}> **苦い経験から生まれたヒント:** `jit_compile=True`を使用することは、CPU/GPUコードがXLA互換であることを確認し、速度を向上させる良い方法ですが、実際にTPUでコードを実行する際には多くの問題を引き起こす可能性があります。 XLAコンパイルはTPU上で暗黙的に行われるため、実際にコードをTPUで実行する前にその行を削除することを忘れないでください! </Tip> ### How do I make my model XLA compatible? 多くの場合、コードはすでにXLA互換かもしれません!ただし、XLAでは動作する通常のTensorFlowでも動作しないいくつかの要素があります。以下に、3つの主要なルールにまとめています: <Tip> **🤗 HuggingFace固有のヒント🤗:** TensorFlowモデルと損失関数をXLA互換に書き直すために多くの努力を払っています。通常、モデルと損失関数はデフォルトでルール#1と#2に従っているため、`transformers`モデルを使用している場合はこれらをスキップできます。ただし、独自のモデルと損失関数を記述する場合は、これらのルールを忘れないでください! </Tip> #### XLA Rule #1: Your code cannot have “data-dependent conditionals” これは、任意の`if`ステートメントが`tf.Tensor`内の値に依存していない必要があることを意味します。例えば、次のコードブロックはXLAでコンパイルできません! ```python if tf.reduce_sum(tensor) > 10: tensor = tensor / 2.0 ``` これは最初は非常に制限的に思えるかもしれませんが、ほとんどのニューラルネットコードはこれを行う必要はありません。通常、この制約を回避するために`tf.cond`を使用するか(ドキュメントはこちらを参照)、条件を削除して代わりに指示変数を使用したりすることができます。次のように: ```python sum_over_10 = tf.cast(tf.reduce_sum(tensor) > 10, tf.float32) tensor = tensor / (1.0 + sum_over_10) ``` このコードは、上記のコードとまったく同じ効果を持っていますが、条件を回避することで、XLAで問題なくコンパイルできることを確認します! #### XLA Rule #2: Your code cannot have “data-dependent shapes” これは、コード内のすべての `tf.Tensor` オブジェクトの形状が、その値に依存しないことを意味します。たとえば、`tf.unique` 関数はXLAでコンパイルできないので、このルールに違反します。なぜなら、これは入力 `Tensor` の一意の値の各インスタンスを含む `tensor` を返すためです。この出力の形状は、入力 `Tensor` の重複具合によって異なるため、XLAはそれを処理しないことになります! 一般的に、ほとんどのニューラルネットワークコードはデフォルトでルール#2に従います。ただし、いくつかの一般的なケースでは問題が発生することがあります。非常に一般的なケースの1つは、**ラベルマスキング**を使用する場合です。ラベルを無視して損失を計算する場所を示すために、ラベルを負の値に設定する方法です。NumPyまたはPyTorchのラベルマスキングをサポートする損失関数を見ると、次のような[ブールインデックス](https://numpy.org/doc/stable/user/basics.indexing.html#boolean-array-indexing)を使用したコードがよく見られます: ```python label_mask = labels >= 0 masked_outputs = outputs[label_mask] masked_labels = labels[label_mask] loss = compute_loss(masked_outputs, masked_labels) mean_loss = torch.mean(loss) ``` このコードはNumPyやPyTorchでは完全に機能しますが、XLAでは動作しません!なぜなら、`masked_outputs`と`masked_labels`の形状はマスクされた位置の数に依存するため、これは**データ依存の形状**になります。ただし、ルール#1と同様に、このコードを書き直して、データ依存の形状なしでまったく同じ出力を生成できることがあります。 ```python label_mask = tf.cast(labels >= 0, tf.float32) loss = compute_loss(outputs, labels) loss = loss * label_mask # Set negative label positions to 0 mean_loss = tf.reduce_sum(loss) / tf.reduce_sum(label_mask) ``` ここでは、データ依存の形状を避けるために、各位置で損失を計算してから、平均を計算する際に分子と分母の両方でマスクされた位置をゼロ化する方法を紹介します。これにより、最初のアプローチとまったく同じ結果が得られますが、XLA互換性を維持します。注意点として、ルール#1と同じトリックを使用します - `tf.bool`を`tf.float32`に変換して指標変数として使用します。これは非常に便利なトリックですので、自分のコードをXLAに変換する必要がある場合には覚えておいてください! #### XLA Rule #3: XLA will need to recompile your model for every different input shape it sees これは重要なルールです。これはつまり、入力形状が非常に変動的な場合、XLA はモデルを何度も再コンパイルする必要があるため、大きなパフォーマンスの問題が発生する可能性があるということです。これは NLP モデルで一般的に発生し、トークナイズ後の入力テキストの長さが異なる場合があります。他のモダリティでは、静的な形状が一般的であり、このルールはほとんど問題になりません。 ルール#3を回避する方法は何でしょうか?鍵は「パディング」です - すべての入力を同じ長さにパディングし、次に「attention_mask」を使用することで、可変形状と同じ結果を得ることができますが、XLA の問題は発生しません。ただし、過度のパディングも深刻な遅延を引き起こす可能性があります - データセット全体で最大の長さにすべてのサンプルをパディングすると、多くの計算とメモリを無駄にする可能性があります! この問題には完璧な解決策はありませんが、いくつかのトリックを試すことができます。非常に便利なトリックの1つは、**バッチのサンプルを32または64トークンの倍数までパディングする**ことです。これにより、トークン数がわずかに増加するだけで、すべての入力形状が32または64の倍数である必要があるため、一意の入力形状の数が大幅に減少します。一意の入力形状が少ないと、XLA の再コンパイルが少なくなります! <Tip> **🤗 HuggingFace に関する具体的なヒント🤗:** 弊社のトークナイザーとデータコレクターには、ここで役立つメソッドがあります。トークナイザーを呼び出す際に `padding="max_length"` または `padding="longest"` を使用して、パディングされたデータを出力するように設定できます。トークナイザーとデータコレクターには、一意の入力形状の数を減らすのに役立つ `pad_to_multiple_of` 引数もあります! </Tip> ### How do I actually train my model on TPU? 一度トレーニングが XLA 互換性があることを確認し、(TPU Node/Colab を使用する場合は)データセットが適切に準備されている場合、TPU 上で実行することは驚くほど簡単です!コードを変更する必要があるのは、いくつかの行を追加して TPU を初期化し、モデルとデータセットが `TPUStrategy` スコープ内で作成されるようにすることだけです。これを実際に見るには、[TPU のサンプルノートブック](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/tpu_training-tf.ipynb)をご覧ください! ### Summary ここでは多くの情報が提供されましたので、TPU でモデルをトレーニングする際に以下のチェックリストを使用できます: - コードが XLA の三つのルールに従っていることを確認します。 - CPU/GPU で `jit_compile=True` を使用してモデルをコンパイルし、XLA でトレーニングできることを確認します。 - データセットをメモリに読み込むか、TPU 互換のデータセット読み込みアプローチを使用します([ノートブックを参照](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/tpu_training-tf.ipynb))。 - コードを Colab(アクセラレータを「TPU」に設定)または Google Cloud の TPU VM に移行します。 - TPU 初期化コードを追加します([ノートブックを参照](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/tpu_training-tf.ipynb))。 - `TPUStrategy` を作成し、データセットの読み込みとモデルの作成が `strategy.scope()` 内で行われることを確認します([ノートブックを参照](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/tpu_training-tf.ipynb))。 - TPU に移行する際に `jit_compile=True` を外すのを忘れないでください! - 🙏🙏🙏🥺🥺🥺 - `model.fit()` を呼び出します。 - おめでとうございます!
transformers/docs/source/ja/perf_train_tpu_tf.md/0
{ "file_path": "transformers/docs/source/ja/perf_train_tpu_tf.md", "repo_id": "transformers", "token_count": 7360 }
272
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Image captioning [[open-in-colab]] 画像のキャプション付けは、特定の画像のキャプションを予測するタスクです。一般的な現実世界のアプリケーションには次のものがあります。 視覚障害者がさまざまな状況を乗り越えられるよう支援します。したがって、画像のキャプション 画像を説明することで人々のコンテンツへのアクセシビリティを向上させるのに役立ちます。 このガイドでは、次の方法を説明します。 * 画像キャプション モデルを微調整します。 * 微調整されたモデルを推論に使用します。 始める前に、必要なライブラリがすべてインストールされていることを確認してください。 ```bash pip install transformers datasets evaluate -q pip install jiwer -q ``` モデルをアップロードしてコミュニティと共有できるように、Hugging Face アカウントにログインすることをお勧めします。プロンプトが表示されたら、トークンを入力してログインします。 ```python from huggingface_hub import notebook_login notebook_login() ``` ## Load the Pokémon BLIP captions dataset 🤗 データセット ライブラリを使用して、{image-caption} ペアで構成されるデータセットを読み込みます。独自の画像キャプション データセットを作成するには PyTorch では、[このノートブック](https://github.com/NielsRogge/Transformers-Tutorials/blob/master/GIT/Fine_tune_GIT_on_an_image_captioning_dataset.ipynb) を参照できます。 ```py ds = load_dataset("lambdalabs/pokemon-blip-captions") ds ``` ```bash DatasetDict({ train: Dataset({ features: ['image', 'text'], num_rows: 833 }) }) ``` データセットには `image`と`text`の 2 つの機能があります。 <Tip> 多くの画像キャプション データセットには、画像ごとに複数のキャプションが含まれています。このような場合、一般的な戦略は、トレーニング中に利用可能なキャプションの中からランダムにキャプションをサンプリングすることです。 </Tip> [`~datasets.Dataset.train_test_split`] メソッドを使用して、データセットのトレイン スプリットをトレイン セットとテスト セットに分割します。 ```python ds = ds["train"].train_test_split(test_size=0.1) train_ds = ds["train"] test_ds = ds["test"] ``` トレーニング セットからのいくつかのサンプルを視覚化してみましょう。 ```python from textwrap import wrap import matplotlib.pyplot as plt import numpy as np def plot_images(images, captions): plt.figure(figsize=(20, 20)) for i in range(len(images)): ax = plt.subplot(1, len(images), i + 1) caption = captions[i] caption = "\n".join(wrap(caption, 12)) plt.title(caption) plt.imshow(images[i]) plt.axis("off") sample_images_to_visualize = [np.array(train_ds[i]["image"]) for i in range(5)] sample_captions = [train_ds[i]["text"] for i in range(5)] plot_images(sample_images_to_visualize, sample_captions) ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/sample_training_images_image_cap.png" alt="Sample training images"/> </div> ## Preprocess the dataset データセットには 2 つのモダリティ (画像とテキスト) があるため、前処理パイプラインは画像とキャプションを前処理します。 これを行うには、微調整しようとしているモデルに関連付けられたプロセッサ クラスをロードします。 ```python from transformers import AutoProcessor checkpoint = "microsoft/git-base" processor = AutoProcessor.from_pretrained(checkpoint) ``` プロセッサは内部で画像を前処理し (サイズ変更やピクセル スケーリングを含む)、キャプションをトークン化します。 ```python def transforms(example_batch): images = [x for x in example_batch["image"]] captions = [x for x in example_batch["text"]] inputs = processor(images=images, text=captions, padding="max_length") inputs.update({"labels": inputs["input_ids"]}) return inputs train_ds.set_transform(transforms) test_ds.set_transform(transforms) ``` データセットの準備ができたら、微調整用にモデルをセットアップできます。 ## Load a base model ["microsoft/git-base"](https://huggingface.co/microsoft/git-base) を [`AutoModelForCausalLM`](https://huggingface.co/docs/transformers/model_doc/auto#transformers.AutoModelForCausalLM) オブジェクト。 ```python from transformers import AutoModelForCausalLM model = AutoModelForCausalLM.from_pretrained(checkpoint) ``` ```python from transformers import AutoModelForCausalLM model = AutoModelForCausalLM.from_pretrained(checkpoint) ``` ## Evaluate 画像キャプション モデルは通常、[Rouge Score](https://huggingface.co/spaces/evaluate-metric/rouge) または [Word Error Rate](https://huggingface.co/spaces/evaluate-metric/) で評価されます。そうだった)。このガイドでは、Word Error Rate (WER) を使用します。 これを行うには 🤗 Evaluate ライブラリを使用します。 WER の潜在的な制限やその他の問題点については、[このガイド](https://huggingface.co/spaces/evaluate-metric/wer) を参照してください。 ```python from evaluate import load import torch wer = load("wer") def compute_metrics(eval_pred): logits, labels = eval_pred predicted = logits.argmax(-1) decoded_labels = processor.batch_decode(labels, skip_special_tokens=True) decoded_predictions = processor.batch_decode(predicted, skip_special_tokens=True) wer_score = wer.compute(predictions=decoded_predictions, references=decoded_labels) return {"wer_score": wer_score} ``` ## Train! これで、モデルの微調整を開始する準備が整いました。これには 🤗 [`Trainer`] を使用します。 まず、[`TrainingArguments`] を使用してトレーニング引数を定義します。 ```python from transformers import TrainingArguments, Trainer model_name = checkpoint.split("/")[1] training_args = TrainingArguments( output_dir=f"{model_name}-pokemon", learning_rate=5e-5, num_train_epochs=50, fp16=True, per_device_train_batch_size=32, per_device_eval_batch_size=32, gradient_accumulation_steps=2, save_total_limit=3, eval_strategy="steps", eval_steps=50, save_strategy="steps", save_steps=50, logging_steps=50, remove_unused_columns=False, push_to_hub=True, label_names=["labels"], load_best_model_at_end=True, ) ``` Trainer 次に、次に、データセットとモデルと一緒に 🤗 に渡します。 ```python trainer = Trainer( model=model, args=training_args, train_dataset=train_ds, eval_dataset=test_ds, compute_metrics=compute_metrics, ) ``` トレーニングを開始するには、[`Trainer`] オブジェクトの [`~Trainer.train`] を呼び出すだけです。 ```python trainer.train() ``` トレーニングが進むにつれて、トレーニングの損失がスムーズに減少することがわかります。 トレーニングが完了したら、 [`~Trainer.push_to_hub`] メソッドを使用してモデルをハブに共有し、誰もがモデルを使用できるようにします。 ```python trainer.push_to_hub() ``` ## Inference `test_ds` からサンプル画像を取得してモデルをテストします。 ```python from PIL import Image import requests url = "https://huggingface.co/datasets/sayakpaul/sample-datasets/resolve/main/pokemon.png" image = Image.open(requests.get(url, stream=True).raw) image ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/test_image_image_cap.png" alt="Test image"/> </div> モデル用の画像を準備します。 ```python device = "cuda" if torch.cuda.is_available() else "cpu" inputs = processor(images=image, return_tensors="pt").to(device) pixel_values = inputs.pixel_values ``` [`generate`] を呼び出して予測をデコードします。 ```python generated_ids = model.generate(pixel_values=pixel_values, max_length=50) generated_caption = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] print(generated_caption) ``` ```bash a drawing of a pink and blue pokemon ``` 微調整されたモデルにより、非常に優れたキャプションが生成されたようです。
transformers/docs/source/ja/tasks/image_captioning.md/0
{ "file_path": "transformers/docs/source/ja/tasks/image_captioning.md", "repo_id": "transformers", "token_count": 3779 }
273
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Translation [[open-in-colab]] <Youtube id="1JvfrvZgi6c"/> 翻訳では、一連のテキストをある言語から別の言語に変換します。これは、シーケンス間問題として定式化できるいくつかのタスクの 1 つであり、翻訳や要約など、入力から何らかの出力を返すための強力なフレームワークです。翻訳システムは通常、異なる言語のテキスト間の翻訳に使用されますが、音声、またはテキストから音声への変換や音声からテキストへの変換など、音声間の組み合わせにも使用できます。 このガイドでは、次の方法を説明します。 1. [OPUS Books](https://huggingface.co/datasets/opus_books) データセットの英語-フランス語サブセットの [T5](https://huggingface.co/google-t5/t5-small) を微調整して、英語のテキストを次の形式に翻訳します。フランス語。 2. 微調整されたモデルを推論に使用します。 <Tip> このタスクと互換性のあるすべてのアーキテクチャとチェックポイントを確認するには、[タスクページ](https://huggingface.co/tasks/translation) を確認することをお勧めします。 </Tip> 始める前に、必要なライブラリがすべてインストールされていることを確認してください。 ```bash pip install transformers datasets evaluate sacrebleu ``` モデルをアップロードしてコミュニティと共有できるように、Hugging Face アカウントにログインすることをお勧めします。プロンプトが表示されたら、トークンを入力してログインします。 ```py >>> from huggingface_hub import notebook_login >>> notebook_login() ``` ## Load OPUS Books dataset まず、🤗 データセット ライブラリから [OPUS Books](https://huggingface.co/datasets/opus_books) データセットの英語とフランス語のサブセットを読み込みます。 ```py >>> from datasets import load_dataset >>> books = load_dataset("opus_books", "en-fr") ``` [`~datasets.Dataset.train_test_split`] メソッドを使用して、データセットをトレイン セットとテスト セットに分割します。 ```py >>> books = books["train"].train_test_split(test_size=0.2) ``` 次に、例を見てみましょう。 ```py >>> books["train"][0] {'id': '90560', 'translation': {'en': 'But this lofty plateau measured only a few fathoms, and soon we reentered Our Element.', 'fr': 'Mais ce plateau élevé ne mesurait que quelques toises, et bientôt nous fûmes rentrés dans notre élément.'}} ``` `translation`: テキストの英語とフランス語の翻訳。 ## Preprocess <Youtube id="XAR8jnZZuUs"/> 次のステップでは、T5 トークナイザーをロードして英語とフランス語の言語ペアを処理します。 ```py >>> from transformers import AutoTokenizer >>> checkpoint = "google-t5/t5-small" >>> tokenizer = AutoTokenizer.from_pretrained(checkpoint) ``` 作成する前処理関数は次のことを行う必要があります。 1. T5 がこれが翻訳タスクであることを認識できるように、入力の前にプロンプ​​トを付けます。複数の NLP タスクが可能な一部のモデルでは、特定のタスクのプロンプトが必要です。 2. 英語の語彙で事前トレーニングされたトークナイザーを使用してフランス語のテキストをトークン化することはできないため、入力 (英語) とターゲット (フランス語) を別々にトークン化します。 3. `max_length`パラメータで設定された最大長を超えないようにシーケンスを切り詰めます。 ```py >>> source_lang = "en" >>> target_lang = "fr" >>> prefix = "translate English to French: " >>> def preprocess_function(examples): ... inputs = [prefix + example[source_lang] for example in examples["translation"]] ... targets = [example[target_lang] for example in examples["translation"]] ... model_inputs = tokenizer(inputs, text_target=targets, max_length=128, truncation=True) ... return model_inputs ``` データセット全体に前処理関数を適用するには、🤗 Datasets [`~datasets.Dataset.map`] メソッドを使用します。 `batched=True` を設定してデータセットの複数の要素を一度に処理することで、`map` 関数を高速化できます。 ```py >>> tokenized_books = books.map(preprocess_function, batched=True) ``` 次に、[`DataCollat​​orForSeq2Seq`] を使用してサンプルのバッチを作成します。データセット全体を最大長までパディングするのではなく、照合中にバッチ内の最長の長さまで文を *動的にパディング* する方が効率的です。 <frameworkcontent> <pt> ```py >>> from transformers import DataCollatorForSeq2Seq >>> data_collator = DataCollatorForSeq2Seq(tokenizer=tokenizer, model=checkpoint) ``` </pt> <tf> ```py >>> from transformers import DataCollatorForSeq2Seq >>> data_collator = DataCollatorForSeq2Seq(tokenizer=tokenizer, model=checkpoint, return_tensors="tf") ``` </tf> </frameworkcontent> ## Evaluate トレーニング中にメトリクスを含めると、多くの場合、モデルのパフォーマンスを評価するのに役立ちます。 🤗 [Evaluate](https://huggingface.co/docs/evaluate/index) ライブラリを使用して、評価メソッドをすばやくロードできます。このタスクでは、[SacreBLEU](https://huggingface.co/spaces/evaluate-metric/sacrebleu) メトリクスをロードします (🤗 Evaluate [クイック ツアー](https://huggingface.co/docs/evaluate/a_quick_tour) を参照してください) ) メトリクスの読み込みと計算方法の詳細については、次を参照してください)。 ```py >>> import evaluate >>> metric = evaluate.load("sacrebleu") ``` 次に、予測とラベルを [`~evaluate.EvaluationModule.compute`] に渡して SacreBLEU スコアを計算する関数を作成します。 ```py >>> import numpy as np >>> def postprocess_text(preds, labels): ... preds = [pred.strip() for pred in preds] ... labels = [[label.strip()] for label in labels] ... return preds, labels >>> def compute_metrics(eval_preds): ... preds, labels = eval_preds ... if isinstance(preds, tuple): ... preds = preds[0] ... decoded_preds = tokenizer.batch_decode(preds, skip_special_tokens=True) ... labels = np.where(labels != -100, labels, tokenizer.pad_token_id) ... decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True) ... decoded_preds, decoded_labels = postprocess_text(decoded_preds, decoded_labels) ... result = metric.compute(predictions=decoded_preds, references=decoded_labels) ... result = {"bleu": result["score"]} ... prediction_lens = [np.count_nonzero(pred != tokenizer.pad_token_id) for pred in preds] ... result["gen_len"] = np.mean(prediction_lens) ... result = {k: round(v, 4) for k, v in result.items()} ... return result ``` これで`compute_metrics`関数の準備が整いました。トレーニングをセットアップするときにこの関数に戻ります。 ## Train <frameworkcontent> <pt> <Tip> [`Trainer`] を使用したモデルの微調整に慣れていない場合は、[ここ](../training#train-with-pytorch-trainer) の基本的なチュートリアルをご覧ください。 </Tip> これでモデルのトレーニングを開始する準備が整いました。 [`AutoModelForSeq2SeqLM`] を使用して T5 をロードします。 ```py >>> from transformers import AutoModelForSeq2SeqLM, Seq2SeqTrainingArguments, Seq2SeqTrainer >>> model = AutoModelForSeq2SeqLM.from_pretrained(checkpoint) ``` この時点で残っているステップは 3 つだけです。 1. [`Seq2SeqTrainingArguments`] でトレーニング ハイパーパラメータを定義します。唯一の必須パラメータは、モデルの保存場所を指定する `output_dir` です。 `push_to_hub=True`を設定して、このモデルをハブにプッシュします (モデルをアップロードするには、Hugging Face にサインインする必要があります)。各エポックの終了時に、[`Trainer`] は SacreBLEU メトリクスを評価し、トレーニング チェックポイントを保存します。 2. トレーニング引数をモデル、データセット、トークナイザー、データ照合器、および `compute_metrics` 関数とともに [`Seq2SeqTrainer`] に渡します。 3. [`~Trainer.train`] を呼び出してモデルを微調整します。 ```py >>> training_args = Seq2SeqTrainingArguments( ... output_dir="my_awesome_opus_books_model", ... eval_strategy="epoch", ... learning_rate=2e-5, ... per_device_train_batch_size=16, ... per_device_eval_batch_size=16, ... weight_decay=0.01, ... save_total_limit=3, ... num_train_epochs=2, ... predict_with_generate=True, ... fp16=True, ... push_to_hub=True, ... ) >>> trainer = Seq2SeqTrainer( ... model=model, ... args=training_args, ... train_dataset=tokenized_books["train"], ... eval_dataset=tokenized_books["test"], ... tokenizer=tokenizer, ... data_collator=data_collator, ... compute_metrics=compute_metrics, ... ) >>> trainer.train() ``` トレーニングが完了したら、 [`~transformers.Trainer.push_to_hub`] メソッドを使用してモデルをハブに共有し、誰もがモデルを使用できるようにします。 ```py >>> trainer.push_to_hub() ``` </pt> <tf> <Tip> Keras を使用したモデルの微調整に慣れていない場合は、[こちら](../training#train-a-tensorflow-model-with-keras) の基本的なチュートリアルをご覧ください。 </Tip> TensorFlow でモデルを微調整するには、オプティマイザー関数、学習率スケジュール、およびいくつかのトレーニング ハイパーパラメーターをセットアップすることから始めます。 ```py >>> from transformers import AdamWeightDecay >>> optimizer = AdamWeightDecay(learning_rate=2e-5, weight_decay_rate=0.01) ``` 次に、[`TFAutoModelForSeq2SeqLM`] を使用して T5 をロードできます。 ```py >>> from transformers import TFAutoModelForSeq2SeqLM >>> model = TFAutoModelForSeq2SeqLM.from_pretrained(checkpoint) ``` [`~transformers.TFPreTrainedModel.prepare_tf_dataset`] を使用して、データセットを `tf.data.Dataset` 形式に変換します。 ```py >>> tf_train_set = model.prepare_tf_dataset( ... tokenized_books["train"], ... shuffle=True, ... batch_size=16, ... collate_fn=data_collator, ... ) >>> tf_test_set = model.prepare_tf_dataset( ... tokenized_books["test"], ... shuffle=False, ... batch_size=16, ... collate_fn=data_collator, ... ) ``` [`compile`](https://keras.io/api/models/model_training_apis/#compile-method) を使用してトレーニング用のモデルを設定します。 Transformers モデルにはすべてデフォルトのタスク関連の損失関数があるため、次の場合を除き、損失関数を指定する必要はないことに注意してください。 ```py >>> import tensorflow as tf >>> model.compile(optimizer=optimizer) # No loss argument! ``` トレーニングを開始する前にセットアップする最後の 2 つのことは、予測から SacreBLEU メトリクスを計算し、モデルをハブにプッシュする方法を提供することです。どちらも [Keras コールバック](../main_classes/keras_callbacks) を使用して行われます。 `compute_metrics` 関数を [`~transformers.KerasMetricCallback`] に渡します。 ```py >>> from transformers.keras_callbacks import KerasMetricCallback >>> metric_callback = KerasMetricCallback(metric_fn=compute_metrics, eval_dataset=tf_validation_set) ``` [`~transformers.PushToHubCallback`] でモデルとトークナイザーをプッシュする場所を指定します。 ```py >>> from transformers.keras_callbacks import PushToHubCallback >>> push_to_hub_callback = PushToHubCallback( ... output_dir="my_awesome_opus_books_model", ... tokenizer=tokenizer, ... ) ``` 次に、コールバックをまとめてバンドルします。 ```py >>> callbacks = [metric_callback, push_to_hub_callback] ``` ついに、モデルのトレーニングを開始する準備が整いました。トレーニングおよび検証データセット、エポック数、コールバックを指定して [`fit`](https://keras.io/api/models/model_training_apis/#fit-method) を呼び出し、モデルを微調整します。 ```py >>> model.fit(x=tf_train_set, validation_data=tf_test_set, epochs=3, callbacks=callbacks) ``` トレーニングが完了すると、モデルは自動的にハブにアップロードされ、誰でも使用できるようになります。 </tf> </frameworkcontent> <Tip> 翻訳用にモデルを微調整する方法の詳細な例については、対応するドキュメントを参照してください。 [PyTorch ノートブック](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/translation.ipynb) または [TensorFlow ノートブック](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/translation-tf.ipynb)。 </Tip> ## Inference モデルを微調整したので、それを推論に使用できるようになりました。 別の言語に翻訳したいテキストを考え出します。 T5 の場合、作業中のタスクに応じて入力に接頭辞を付ける必要があります。英語からフランス語に翻訳する場合は、以下に示すように入力に接頭辞を付ける必要があります。 ```py >>> text = "translate English to French: Legumes share resources with nitrogen-fixing bacteria." ``` 推論用に微調整されたモデルを試す最も簡単な方法は、それを [`pipeline`] で使用することです。モデルを使用して翻訳用の`pipeline`をインスタンス化し、テキストをそれに渡します。 ```py >>> from transformers import pipeline # Change `xx` to the language of the input and `yy` to the language of the desired output. # Examples: "en" for English, "fr" for French, "de" for German, "es" for Spanish, "zh" for Chinese, etc; translation_en_to_fr translates English to French # You can view all the lists of languages here - https://huggingface.co/languages >>> translator = pipeline("translation_xx_to_yy", model="my_awesome_opus_books_model") >>> translator(text) [{'translation_text': 'Legumes partagent des ressources avec des bactéries azotantes.'}] ``` 必要に応じて、`pipeline`の結果を手動で複製することもできます。 <frameworkcontent> <pt> テキストをトークン化し、`input_ids` を PyTorch テンソルとして返します。 ```py >>> from transformers import AutoTokenizer >>> tokenizer = AutoTokenizer.from_pretrained("my_awesome_opus_books_model") >>> inputs = tokenizer(text, return_tensors="pt").input_ids ``` [`~generation.GenerationMixin.generate`] メソッドを使用して翻訳を作成します。さまざまなテキスト生成戦略と生成を制御するためのパラメーターの詳細については、[Text Generation](../main_classes/text_generation) API を確認してください。 ```py >>> from transformers import AutoModelForSeq2SeqLM >>> model = AutoModelForSeq2SeqLM.from_pretrained("my_awesome_opus_books_model") >>> outputs = model.generate(inputs, max_new_tokens=40, do_sample=True, top_k=30, top_p=0.95) ``` 生成されたトークン ID をデコードしてテキストに戻します。 ```py >>> tokenizer.decode(outputs[0], skip_special_tokens=True) 'Les lignées partagent des ressources avec des bactéries enfixant l'azote.' ``` </pt> <tf> `input_ids`を TensorFlow テンソルとして返します。 tensors: ```py >>> from transformers import AutoTokenizer >>> tokenizer = AutoTokenizer.from_pretrained("my_awesome_opus_books_model") >>> inputs = tokenizer(text, return_tensors="tf").input_ids ``` [`~transformers.generation_tf_utils.TFGenerationMixin.generate`] メソッドを使用して翻訳を作成します。さまざまなテキスト生成戦略と生成を制御するためのパラメーターの詳細については、[Text Generation](../main_classes/text_generation) API を確認してください。 ```py >>> from transformers import TFAutoModelForSeq2SeqLM >>> model = TFAutoModelForSeq2SeqLM.from_pretrained("my_awesome_opus_books_model") >>> outputs = model.generate(inputs, max_new_tokens=40, do_sample=True, top_k=30, top_p=0.95) ``` 生成されたトークン ID をデコードしてテキストに戻します。 ```py >>> tokenizer.decode(outputs[0], skip_special_tokens=True) 'Les lugumes partagent les ressources avec des bactéries fixatrices d'azote.' ``` </tf> </frameworkcontent>
transformers/docs/source/ja/tasks/translation.md/0
{ "file_path": "transformers/docs/source/ja/tasks/translation.md", "repo_id": "transformers", "token_count": 7208 }
274
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # 🤗 Accelerate를 활용한 분산 학습[[distributed-training-with-accelerate]] 모델이 커지면서 병렬 처리는 제한된 하드웨어에서 더 큰 모델을 훈련하고 훈련 속도를 몇 배로 가속화하기 위한 전략으로 등장했습니다. Hugging Face에서는 사용자가 하나의 머신에 여러 개의 GPU를 사용하든 여러 머신에 여러 개의 GPU를 사용하든 모든 유형의 분산 설정에서 🤗 Transformers 모델을 쉽게 훈련할 수 있도록 돕기 위해 [🤗 Accelerate](https://huggingface.co/docs/accelerate) 라이브러리를 만들었습니다. 이 튜토리얼에서는 분산 환경에서 훈련할 수 있도록 기본 PyTorch 훈련 루프를 커스터마이즈하는 방법을 알아봅시다. ## 설정[[setup]] 🤗 Accelerate 설치 시작하기: ```bash pip install accelerate ``` 그 다음, [`~accelerate.Accelerator`] 객체를 불러오고 생성합니다. [`~accelerate.Accelerator`]는 자동으로 분산 설정 유형을 감지하고 훈련에 필요한 모든 구성 요소를 초기화합니다. 장치에 모델을 명시적으로 배치할 필요는 없습니다. ```py >>> from accelerate import Accelerator >>> accelerator = Accelerator() ``` ## 가속화를 위한 준비[[prepare-to-accelerate]] 다음 단계는 관련된 모든 훈련 객체를 [`~accelerate.Accelerator.prepare`] 메소드에 전달하는 것입니다. 여기에는 훈련 및 평가 데이터로더, 모델 및 옵티마이저가 포함됩니다: ```py >>> train_dataloader, eval_dataloader, model, optimizer = accelerator.prepare( ... train_dataloader, eval_dataloader, model, optimizer ... ) ``` ## 백워드(Backward)[[backward]] 마지막으로 훈련 루프의 일반적인 `loss.backward()`를 🤗 Accelerate의 [`~accelerate.Accelerator.backward`] 메소드로 대체하기만 하면 됩니다: ```py >>> for epoch in range(num_epochs): ... for batch in train_dataloader: ... outputs = model(**batch) ... loss = outputs.loss ... accelerator.backward(loss) ... optimizer.step() ... lr_scheduler.step() ... optimizer.zero_grad() ... progress_bar.update(1) ``` 다음 코드에서 볼 수 있듯이, 훈련 루프에 코드 네 줄만 추가하면 분산 학습을 활성화할 수 있습니다! ```diff + from accelerate import Accelerator from transformers import AdamW, AutoModelForSequenceClassification, get_scheduler + accelerator = Accelerator() model = AutoModelForSequenceClassification.from_pretrained(checkpoint, num_labels=2) optimizer = AdamW(model.parameters(), lr=3e-5) - device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu") - model.to(device) + train_dataloader, eval_dataloader, model, optimizer = accelerator.prepare( + train_dataloader, eval_dataloader, model, optimizer + ) num_epochs = 3 num_training_steps = num_epochs * len(train_dataloader) lr_scheduler = get_scheduler( "linear", optimizer=optimizer, num_warmup_steps=0, num_training_steps=num_training_steps ) progress_bar = tqdm(range(num_training_steps)) model.train() for epoch in range(num_epochs): for batch in train_dataloader: - batch = {k: v.to(device) for k, v in batch.items()} outputs = model(**batch) loss = outputs.loss - loss.backward() + accelerator.backward(loss) optimizer.step() lr_scheduler.step() optimizer.zero_grad() progress_bar.update(1) ``` ## 학습[[train]] 관련 코드를 추가한 후에는 스크립트나 Colaboratory와 같은 노트북에서 훈련을 시작하세요. ### 스크립트로 학습하기[[train-with-a-script]] 스크립트에서 훈련을 실행하는 경우, 다음 명령을 실행하여 구성 파일을 생성하고 저장합니다: ```bash accelerate config ``` Then launch your training with: ```bash accelerate launch train.py ``` ### 노트북으로 학습하기[[train-with-a-notebook]] Collaboratory의 TPU를 사용하려는 경우, 노트북에서도 🤗 Accelerate를 실행할 수 있습니다. 훈련을 담당하는 모든 코드를 함수로 감싸서 [`~accelerate.notebook_launcher`]에 전달하세요: ```py >>> from accelerate import notebook_launcher >>> notebook_launcher(training_function) ``` 🤗 Accelerate 및 다양한 기능에 대한 자세한 내용은 [documentation](https://huggingface.co/docs/accelerate)를 참조하세요.
transformers/docs/source/ko/accelerate.md/0
{ "file_path": "transformers/docs/source/ko/accelerate.md", "repo_id": "transformers", "token_count": 2885 }
275
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # 완전 분할 데이터 병렬 처리(FSDP) [[fully-sharded-data-parallel]] [Fully Sharded Data Parallel (FSDP)](https://pytorch.org/blog/introducing-pytorch-fully-sharded-data-parallel-api/)은 모델의 매개변수, 그레이디언트 및 옵티마이저 상태를 사용 가능한 GPU(작업자 또는 *랭크*라고도 함) 수에 따라 분할하는 데이터 병렬 처리 방식입니다. [DistributedDataParallel (DDP)](https://pytorch.org/docs/stable/generated/torch.nn.parallel.DistributedDataParallel.html)와 달리, FSDP는 각 GPU에 모델을 복제하기 때문에 메모리 사용량을 줄입니다. 이는 GPU 메모리 효율성을 향상시키며 적은 수의 GPU로 훨씬 더 큰 모델을 훈련할 수 있게 합니다. FSDP는 분산 환경에서의 훈련을 쉽게 관리할 수 있는 라이브러리인 Accelerate와 통합되어 있으며, 따라서 [`Trainer`] 클래스에서 사용할 수 있습니다. 시작하기 전에 Accelerate가 설치되어 있고 최소 PyTorch 2.1.0 이상의 버전이 설치되어 있는지 확인하세요. ```bash pip install accelerate ``` ## FSDP 구성 [[fsdp-configuration]] 시작하려면 [`accelerate config`](https://huggingface.co/docs/accelerate/package_reference/cli#accelerate-config) 명령을 실행하여 훈련 환경에 대한 구성 파일을 생성하세요. Accelerate는 이 구성 파일을 사용하여 `accelerate config`에서 선택한 훈련 옵션에 따라 자동으로 올바른 훈련 환경을 설정합니다. ```bash accelerate config ``` `accelerate config`를 실행하면 훈련 환경을 구성하기 위한 일련의 옵션들이 나타납니다. 이 섹션에서는 가장 중요한 FSDP 옵션 중 일부를 다룹니다. 다른 사용 가능한 FSDP 옵션에 대해 더 알아보고 싶다면 [fsdp_config](https://huggingface.co/docs/transformers/main_classes/trainer#transformers.TrainingArguments.fsdp_config) 매개변수를 참조하세요. ### 분할 전략 [[sharding-strategy]] FSDP는 여러 가지 분할 전략을 제공합니다: * `FULL_SHARD` - 모델 매개변수, 그레이디언트 및 옵티마이저 상태를 작업자 간에 분할; 이 옵션을 선택하려면 `1`을 선택하세요 * `SHARD_GRAD_OP` - 그레이디언트 및 옵티마이저 상태를 작업자 간에 분할; 이 옵션을 선택하려면 `2`를 선택하세요 * `NO_SHARD` - 아무 것도 분할하지 않음 (DDP와 동일); 이 옵션을 선택하려면 `3`을 선택하세요 * `HYBRID_SHARD` - 각 작업자가 전체 복사본을 가지고 있는 상태에서 모델 매개변수, 그레이디언트 및 옵티마이저 상태를 작업자 내에서 분할; 이 옵션을 선택하려면 `4`를 선택하세요 * `HYBRID_SHARD_ZERO2` - 각 작업자가 전체 복사본을 가지고 있는 상태에서 그레이디언트 및 옵티마이저 상태를 작업자 내에서 분할; 이 옵션을 선택하려면 `5`를 선택하세요 이것은 `fsdp_sharding_strategy` 플래그로 활성화됩니다. ### CPU 오프로드 [[cpu-offload]] 사용하지 않는 매개변수와 그레이디언트를 CPU로 오프로드하여 더 많은 GPU 메모리를 절약하고 FSDP로도 충분하지 않은 큰 모델을 GPU에 적재할 수 있도록 할 수 있습니다. 이는 `accelerate config`를 실행할 때 `fsdp_offload_params: true`로 설정하여 활성화됩니다. ### 래핑 정책 [[wrapping-policy]] FSDP는 네트워크의 각 레이어를 래핑하여 적용됩니다. 래핑은 일반적으로 중첩 방식으로 적용되며 각각 순방향으로 지나간 후 전체 가중치를 삭제하여 다음 레이어에서 사용할 메모리를 절약합니다. *자동 래핑* 정책은 이를 구현하는 가장 간단한 방법이며 코드를 변경할 필요가 없습니다. Transformer 레이어를 래핑하려면 `fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP`를 선택하고 래핑할 레이어를 지정하려면 `fsdp_transformer_layer_cls_to_wrap`를 선택하세요 (예: `BertLayer`). 또는 특정 매개변수 수를 초과할 경우 FSDP가 레이어에 적용되는 크기 기반 래핑 정책을 선택할 수 있습니다. 이는 `fsdp_wrap_policy: SIZE_BASED_WRAP` 및 `min_num_param`을 원하는 크기의 임계값으로 설정하여 활성화됩니다. ### 체크포인트 [[checkpointing]] 중간 체크포인트는 `fsdp_state_dict_type: SHARDED_STATE_DICT`로 저장해야 합니다. CPU 오프로드가 활성화된 랭크 0에서 전체 상태 딕셔너리를 저장하는 데 시간이 많이 걸리고, 브로드캐스팅 중 무기한 대기하여 `NCCL Timeout` 오류가 발생할 수 있기 때문입니다. [`~accelerate.Accelerator.load_state`] 메서드를 사용하여 분할된 상태 딕셔너리로 훈련을 재개할 수 있습니다. ```py # 경로가 내재된 체크포인트 accelerator.load_state("ckpt") ``` 그러나 훈련이 끝나면 전체 상태 딕셔너리를 저장해야 합니다. 분할된 상태 딕셔너리는 FSDP와만 호환되기 때문입니다. ```py if trainer.is_fsdp_enabled: trainer.accelerator.state.fsdp_plugin.set_state_dict_type("FULL_STATE_DICT") trainer.save_model(script_args.output_dir) ``` ### TPU [[tpu]] [PyTorch XLA](https://pytorch.org/xla/release/2.1/index.html)는 TPU에 대한 FSDP 훈련을 지원하며 `accelerate config`로 생성된 FSDP 구성 파일을 수정하여 활성화할 수 있습니다. 위에서 지정한 분할 전략 및 래핑 옵션 외에도 아래에 표시된 매개변수를 파일에 추가할 수 있습니다. ```yaml xla: True # PyTorch/XLA를 활성화하려면 True로 설정해야 합니다 xla_fsdp_settings: # XLA 특정 FSDP 매개변수 xla_fsdp_grad_ckpt: True # gradient checkpointing을 사용합니다 ``` [`xla_fsdp_settings`](https://github.com/pytorch/xla/blob/2e6e183e0724818f137c8135b34ef273dea33318/torch_xla/distributed/fsdp/xla_fully_sharded_data_parallel.py#L128)는 FSDP에 대한 추가적인 XLA 특정 매개변수를 구성할 수 있게 합니다. ## 훈련 시작 [[launch-training]] 예시 FSDP 구성 파일은 다음과 같을 수 있습니다: ```yaml compute_environment: LOCAL_MACHINE debug: false distributed_type: FSDP downcast_bf16: 'no' fsdp_config: fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP fsdp_backward_prefetch_policy: BACKWARD_PRE fsdp_cpu_ram_efficient_loading: true fsdp_forward_prefetch: false fsdp_offload_params: true fsdp_sharding_strategy: 1 fsdp_state_dict_type: SHARDED_STATE_DICT fsdp_sync_module_states: true fsdp_transformer_layer_cls_to_wrap: BertLayer fsdp_use_orig_params: true machine_rank: 0 main_training_function: main mixed_precision: bf16 num_machines: 1 num_processes: 2 rdzv_backend: static same_network: true tpu_env: [] tpu_use_cluster: false tpu_use_sudo: false use_cpu: false ``` 훈련을 시작하려면 [`accelerate launch`](https://huggingface.co/docs/accelerate/package_reference/cli#accelerate-launch) 명령을 실행하세요. 이 때 전에 `accelerate config`로 생성한 구성 파일을 자동으로 사용합니다. ```bash accelerate launch my-trainer-script.py ``` ```bash accelerate launch --fsdp="full shard" --fsdp_config="path/to/fsdp_config/ my-trainer-script.py ``` ## 다음 단계 [[next-steps]] FSDP는 매우 큰 모델을 훈련할 때 강력한 도구가 될 수 있으며, 여러 개의 GPU나 TPU를 사용할 수 있습니다. 모델 매개변수, 옵티마이저 및 그레이디언트 상태를 분할하고 비활성 상태일 때, CPU로 오프로드하면 FSDP는 대규모 훈련의 높은 연산 비용을 줄일 수 있습니다. 더 알아보고 싶다면 다음 자료가 도움이 될 수 있습니다: * [FSDP](https://huggingface.co/docs/accelerate/usage_guides/fsdp)에 대한 더 깊이 있는 Accelerate 가이드를 따라가 보세요. * [PyTorch의 완전 분할 데이터 병렬 처리 (FSDP) API를 소개합니다](https://pytorch.org/blog/introducing-pytorch-fully-sharded-data-parallel-api/) 블로그 글을 읽어보세요. * [FSDP를 사용하여 클라우드 TPU에서 PyTorch 모델 크기 조절하기](https://pytorch.org/blog/scaling-pytorch-models-on-cloud-tpus-with-fsdp/) 블로그 글을 읽어보세요.
transformers/docs/source/ko/fsdp.md/0
{ "file_path": "transformers/docs/source/ko/fsdp.md", "repo_id": "transformers", "token_count": 5834 }
276
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # 다국어 모델 추론하기[[multilingual-models-for-inference]] [[open-in-colab]] 🤗 Transformers에는 여러 종류의 다국어(multilingual) 모델이 있으며, 단일 언어(monolingual) 모델과 추론 시 사용법이 다릅니다. 그렇다고 해서 *모든* 다국어 모델의 사용법이 다른 것은 아닙니다. [google-bert/bert-base-multilingual-uncased](https://huggingface.co/google-bert/bert-base-multilingual-uncased)와 같은 몇몇 모델은 단일 언어 모델처럼 사용할 수 있습니다. 이번 가이드에서 다국어 모델의 추론 시 사용 방법을 알아볼 것입니다. ## XLM[[xlm]] XLM에는 10가지 체크포인트(checkpoint)가 있는데, 이 중 하나만 단일 언어입니다. 나머지 체크포인트 9개는 언어 임베딩을 사용하는 체크포인트와 그렇지 않은 체크포인트의 두 가지 범주로 나눌 수 있습니다. ### 언어 임베딩을 사용하는 XLM[[xlm-with-language-embeddings]] 다음 XLM 모델은 추론 시에 언어 임베딩을 사용합니다: - `FacebookAI/xlm-mlm-ende-1024` (마스킹된 언어 모델링, 영어-독일어) - `FacebookAI/xlm-mlm-enfr-1024` (마스킹된 언어 모델링, 영어-프랑스어) - `FacebookAI/xlm-mlm-enro-1024` (마스킹된 언어 모델링, 영어-루마니아어) - `FacebookAI/xlm-mlm-xnli15-1024` (마스킹된 언어 모델링, XNLI 데이터 세트에서 제공하는 15개 국어) - `FacebookAI/xlm-mlm-tlm-xnli15-1024` (마스킹된 언어 모델링 + 번역, XNLI 데이터 세트에서 제공하는 15개 국어) - `FacebookAI/xlm-clm-enfr-1024` (Causal language modeling, 영어-프랑스어) - `FacebookAI/xlm-clm-ende-1024` (Causal language modeling, 영어-독일어) 언어 임베딩은 모델에 전달된 `input_ids`와 동일한 shape의 텐서로 표현됩니다. 이러한 텐서의 값은 사용된 언어에 따라 다르며 토크나이저의 `lang2id` 및 `id2lang` 속성에 의해 식별됩니다. 다음 예제에서는 `FacebookAI/xlm-clm-enfr-1024` 체크포인트(코잘 언어 모델링(causal language modeling), 영어-프랑스어)를 가져옵니다: ```py >>> import torch >>> from transformers import XLMTokenizer, XLMWithLMHeadModel >>> tokenizer = XLMTokenizer.from_pretrained("FacebookAI/xlm-clm-enfr-1024") >>> model = XLMWithLMHeadModel.from_pretrained("FacebookAI/xlm-clm-enfr-1024") ``` 토크나이저의 `lang2id` 속성은 모델의 언어와 해당 ID를 표시합니다: ```py >>> print(tokenizer.lang2id) {'en': 0, 'fr': 1} ``` 다음으로, 예제 입력을 만듭니다: ```py >>> input_ids = torch.tensor([tokenizer.encode("Wikipedia was used to")]) # 배치 크기는 1입니다 ``` 언어 ID를 `"en"`으로 설정해 언어 임베딩을 정의합니다. 언어 임베딩은 영어의 언어 ID인 `0`으로 채워진 텐서입니다. 이 텐서는 `input_ids`와 같은 크기여야 합니다. ```py >>> language_id = tokenizer.lang2id["en"] # 0 >>> langs = torch.tensor([language_id] * input_ids.shape[1]) # torch.tensor([0, 0, 0, ..., 0]) >>> # (batch_size, sequence_length) shape의 텐서가 되도록 만듭니다. >>> langs = langs.view(1, -1) # 이제 [1, sequence_length] shape이 되었습니다(배치 크기는 1입니다) ``` 이제 `input_ids`와 언어 임베딩을 모델로 전달합니다: ```py >>> outputs = model(input_ids, langs=langs) ``` [run_generation.py](https://github.com/huggingface/transformers/tree/main/examples/pytorch/text-generation/run_generation.py) 스크립트로 `xlm-clm` 체크포인트를 사용해 텍스트와 언어 임베딩을 생성할 수 있습니다. ### 언어 임베딩을 사용하지 않는 XLM[[xlm-without-language-embeddings]] 다음 XLM 모델은 추론 시에 언어 임베딩이 필요하지 않습니다: - `FacebookAI/xlm-mlm-17-1280` (마스킹된 언어 모델링, 17개 국어) - `FacebookAI/xlm-mlm-100-1280` (마스킹된 언어 모델링, 100개 국어) 이전의 XLM 체크포인트와 달리 이 모델은 일반 문장 표현에 사용됩니다. ## BERT[[bert]] 다음 BERT 모델은 다국어 태스크에 사용할 수 있습니다: - `google-bert/bert-base-multilingual-uncased` (마스킹된 언어 모델링 + 다음 문장 예측, 102개 국어) - `google-bert/bert-base-multilingual-cased` (마스킹된 언어 모델링 + 다음 문장 예측, 104개 국어) 이러한 모델은 추론 시에 언어 임베딩이 필요하지 않습니다. 문맥에서 언어를 식별하고, 식별된 언어로 추론합니다. ## XLM-RoBERTa[[xlmroberta]] 다음 XLM-RoBERTa 또한 다국어 다국어 태스크에 사용할 수 있습니다: - `FacebookAI/xlm-roberta-base` (마스킹된 언어 모델링, 100개 국어) - `FacebookAI/xlm-roberta-large` (마스킹된 언어 모델링, 100개 국어) XLM-RoBERTa는 100개 국어에 대해 새로 생성되고 정제된 2.5TB 규모의 CommonCrawl 데이터로 학습되었습니다. 이전에 공개된 mBERT나 XLM과 같은 다국어 모델에 비해 분류, 시퀀스 라벨링, 질의 응답과 같은 다운스트림(downstream) 작업에서 이점이 있습니다. ## M2M100[[m2m100]] 다음 M2M100 모델 또한 다국어 다국어 태스크에 사용할 수 있습니다: - `facebook/m2m100_418M` (번역) - `facebook/m2m100_1.2B` (번역) 이 예제에서는 `facebook/m2m100_418M` 체크포인트를 가져와서 중국어를 영어로 번역합니다. 토크나이저에서 번역 대상 언어(source language)를 설정할 수 있습니다: ```py >>> from transformers import M2M100ForConditionalGeneration, M2M100Tokenizer >>> en_text = "Do not meddle in the affairs of wizards, for they are subtle and quick to anger." >>> chinese_text = "不要插手巫師的事務, 因為他們是微妙的, 很快就會發怒." >>> tokenizer = M2M100Tokenizer.from_pretrained("facebook/m2m100_418M", src_lang="zh") >>> model = M2M100ForConditionalGeneration.from_pretrained("facebook/m2m100_418M") ``` 문장을 토큰화합니다: ```py >>> encoded_zh = tokenizer(chinese_text, return_tensors="pt") ``` M2M100은 번역을 진행하기 위해 첫 번째로 생성되는 토큰은 번역할 언어(target language) ID로 강제 지정합니다. 영어로 번역하기 위해 `generate` 메소드에서 `forced_bos_token_id`를 `en`으로 설정합니다: ```py >>> generated_tokens = model.generate(**encoded_zh, forced_bos_token_id=tokenizer.get_lang_id("en")) >>> tokenizer.batch_decode(generated_tokens, skip_special_tokens=True) 'Do not interfere with the matters of the witches, because they are delicate and will soon be angry.' ``` ## MBart[[mbart]] 다음 MBart 모델 또한 다국어 태스크에 사용할 수 있습니다: - `facebook/mbart-large-50-one-to-many-mmt` (일대다 다국어 번역, 50개 국어) - `facebook/mbart-large-50-many-to-many-mmt` (다대다 다국어 번역, 50개 국어) - `facebook/mbart-large-50-many-to-one-mmt` (다대일 다국어 번역, 50개 국어) - `facebook/mbart-large-50` (다국어 번역, 50개 국어) - `facebook/mbart-large-cc25` 이 예제에서는 핀란드어를 영어로 번역하기 위해 `facebook/mbart-large-50-many-to-many-mmt` 체크포인트를 가져옵니다. 토크나이저에서 번역 대상 언어(source language)를 설정할 수 있습니다: ```py >>> from transformers import AutoTokenizer, AutoModelForSeq2SeqLM >>> en_text = "Do not meddle in the affairs of wizards, for they are subtle and quick to anger." >>> fi_text = "Älä sekaannu velhojen asioihin, sillä ne ovat hienovaraisia ja nopeasti vihaisia." >>> tokenizer = AutoTokenizer.from_pretrained("facebook/mbart-large-50-many-to-many-mmt", src_lang="fi_FI") >>> model = AutoModelForSeq2SeqLM.from_pretrained("facebook/mbart-large-50-many-to-many-mmt") ``` 문장을 토큰화합니다: ```py >>> encoded_en = tokenizer(en_text, return_tensors="pt") ``` MBart는 번역을 진행하기 위해 첫 번째로 생성되는 토큰은 번역할 언어(target language) ID로 강제 지정합니다. 영어로 번역하기 위해 `generate` 메소드에서 `forced_bos_token_id`를 `en`으로 설정합니다: ```py >>> generated_tokens = model.generate(**encoded_en, forced_bos_token_id=tokenizer.lang_code_to_id("en_XX")) >>> tokenizer.batch_decode(generated_tokens, skip_special_tokens=True) "Don't interfere with the wizard's affairs, because they are subtle, will soon get angry." ``` `facebook/mbart-large-50-many-to-one-mmt` 체크포인트를 사용하고 있다면, 첫 번째로 생성되는 토큰을 번역할 언어(target language) ID로 강제 지정할 필요는 없습니다.
transformers/docs/source/ko/multilingual.md/0
{ "file_path": "transformers/docs/source/ko/multilingual.md", "repo_id": "transformers", "token_count": 5686 }
277
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # 전처리[[preprocess]] [[open-in-colab]] 모델을 훈련하려면 데이터 세트를 모델에 맞는 입력 형식으로 전처리해야 합니다. 텍스트, 이미지 또는 오디오인지 관계없이 데이터를 텐서 배치로 변환하고 조립할 필요가 있습니다. 🤗 Transformers는 모델에 대한 데이터를 준비하는 데 도움이 되는 일련의 전처리 클래스를 제공합니다. 이 튜토리얼에서는 다음 내용을 배울 수 있습니다: * 텍스트는 [Tokenizer](./main_classes/tokenizer)를 사용하여 토큰 시퀀스로 변환하고 토큰의 숫자 표현을 만든 후 텐서로 조립합니다. * 음성 및 오디오는 [Feature extractor](./main_classes/feature_extractor)를 사용하여 오디오 파형에서 시퀀스 특성을 파악하여 텐서로 변환합니다. * 이미지 입력은 [ImageProcessor](./main_classes/image)을 사용하여 이미지를 텐서로 변환합니다. * 멀티모달 입력은 [Processor](./main_classes/processors)을 사용하여 토크나이저와 특성 추출기 또는 이미지 프로세서를 결합합니다. <Tip> `AutoProcessor`는 **언제나** 작동하여 토크나이저, 이미지 프로세서, 특성 추출기 또는 프로세서 등 사용 중인 모델에 맞는 클래스를 자동으로 선택합니다. </Tip> 시작하기 전에 🤗 Datasets를 설치하여 실험에 사용할 데이터를 불러올 수 있습니다: ```bash pip install datasets ``` ## 자연어처리[[natural-language-processing]] <Youtube id="Yffk5aydLzg"/> 텍스트 데이터를 전처리하기 위한 기본 도구는 [tokenizer](main_classes/tokenizer)입니다. 토크나이저는 일련의 규칙에 따라 텍스트를 *토큰*으로 나눕니다. 토큰은 숫자로 변환되고 텐서는 모델 입력이 됩니다. 모델에 필요한 추가 입력은 토크나이저에 의해 추가됩니다. <Tip> 사전훈련된 모델을 사용할 계획이라면 모델과 함께 사전훈련된 토크나이저를 사용하는 것이 중요합니다. 이렇게 하면 텍스트가 사전훈련 말뭉치와 동일한 방식으로 분할되고 사전훈련 중에 동일한 해당 토큰-인덱스 쌍(일반적으로 *vocab*이라고 함)을 사용합니다. </Tip> 시작하려면 [`AutoTokenizer.from_pretrained`] 메소드를 사용하여 사전훈련된 토크나이저를 불러오세요. 모델과 함께 사전훈련된 *vocab*을 다운로드합니다: ```py >>> from transformers import AutoTokenizer >>> tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-cased") ``` 그 다음으로 텍스트를 토크나이저에 넣어주세요: ```py >>> encoded_input = tokenizer("Do not meddle in the affairs of wizards, for they are subtle and quick to anger.") >>> print(encoded_input) {'input_ids': [101, 2079, 2025, 19960, 10362, 1999, 1996, 3821, 1997, 16657, 1010, 2005, 2027, 2024, 11259, 1998, 4248, 2000, 4963, 1012, 102], 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]} ``` 토크나이저는 세 가지 중요한 항목을 포함한 딕셔너리를 반환합니다: * [input_ids](glossary#input-ids)는 문장의 각 토큰에 해당하는 인덱스입니다. * [attention_mask](glossary#attention-mask)는 토큰을 처리해야 하는지 여부를 나타냅니다. * [token_type_ids](glossary#token-type-ids)는 두 개 이상의 시퀀스가 있을 때 토큰이 속한 시퀀스를 식별합니다. `input_ids`를 디코딩하여 입력을 반환합니다: ```py >>> tokenizer.decode(encoded_input["input_ids"]) '[CLS] Do not meddle in the affairs of wizards, for they are subtle and quick to anger. [SEP]' ``` 토크나이저가 두 개의 특수한 토큰(분류 토큰 `CLS`와 분할 토큰 `SEP`)을 문장에 추가했습니다. 모든 모델에 특수한 토큰이 필요한 것은 아니지만, 필요하다면 토크나이저가 자동으로 추가합니다. 전처리할 문장이 여러 개 있는 경우에는 리스트로 토크나이저에 전달합니다: ```py >>> batch_sentences = [ ... "But what about second breakfast?", ... "Don't think he knows about second breakfast, Pip.", ... "What about elevensies?", ... ] >>> encoded_inputs = tokenizer(batch_sentences) >>> print(encoded_inputs) {'input_ids': [[101, 1252, 1184, 1164, 1248, 6462, 136, 102], [101, 1790, 112, 189, 1341, 1119, 3520, 1164, 1248, 6462, 117, 21902, 1643, 119, 102], [101, 1327, 1164, 5450, 23434, 136, 102]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1]]} ``` ### 패딩[[pad]] 모델 입력인 텐서는 모양이 균일해야 하지만, 문장의 길이가 항상 같지는 않기 때문에 문제가 될 수 있습니다. 패딩은 짧은 문장에 특수한 *패딩 토큰*을 추가하여 텐서를 직사각형 모양이 되도록 하는 전략입니다. `padding` 매개변수를 `True`로 설정하여 배치 내의 짧은 시퀀스를 가장 긴 시퀀스에 맞춰 패딩합니다. ```py >>> batch_sentences = [ ... "But what about second breakfast?", ... "Don't think he knows about second breakfast, Pip.", ... "What about elevensies?", ... ] >>> encoded_input = tokenizer(batch_sentences, padding=True) >>> print(encoded_input) {'input_ids': [[101, 1252, 1184, 1164, 1248, 6462, 136, 102, 0, 0, 0, 0, 0, 0, 0], [101, 1790, 112, 189, 1341, 1119, 3520, 1164, 1248, 6462, 117, 21902, 1643, 119, 102], [101, 1327, 1164, 5450, 23434, 136, 102, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0]]} ``` 길이가 짧은 첫 문장과 세 번째 문장이 이제 `0`으로 채워졌습니다. ### 잘라내기[[truncation]] 한편, 때로는 시퀀스가 모델에서 처리하기에 너무 길 수도 있습니다. 이 경우, 시퀀스를 더 짧게 줄일 필요가 있습니다. 모델에서 허용하는 최대 길이로 시퀀스를 자르려면 `truncation` 매개변수를 `True`로 설정하세요: ```py >>> batch_sentences = [ ... "But what about second breakfast?", ... "Don't think he knows about second breakfast, Pip.", ... "What about elevensies?", ... ] >>> encoded_input = tokenizer(batch_sentences, padding=True, truncation=True) >>> print(encoded_input) {'input_ids': [[101, 1252, 1184, 1164, 1248, 6462, 136, 102, 0, 0, 0, 0, 0, 0, 0], [101, 1790, 112, 189, 1341, 1119, 3520, 1164, 1248, 6462, 117, 21902, 1643, 119, 102], [101, 1327, 1164, 5450, 23434, 136, 102, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0]]} ``` <Tip> 다양한 패딩과 잘라내기 인수에 대해 더 알아보려면 [패딩과 잘라내기](./pad_truncation) 개념 가이드를 확인해보세요. </Tip> ### 텐서 만들기[[build-tensors]] 마지막으로, 토크나이저가 모델에 공급되는 실제 텐서를 반환하도록 합니다. `return_tensors` 매개변수를 PyTorch의 경우 `pt`, TensorFlow의 경우 `tf`로 설정하세요: <frameworkcontent> <pt> ```py >>> batch_sentences = [ ... "But what about second breakfast?", ... "Don't think he knows about second breakfast, Pip.", ... "What about elevensies?", ... ] >>> encoded_input = tokenizer(batch_sentences, padding=True, truncation=True, return_tensors="pt") >>> print(encoded_input) {'input_ids': tensor([[101, 1252, 1184, 1164, 1248, 6462, 136, 102, 0, 0, 0, 0, 0, 0, 0], [101, 1790, 112, 189, 1341, 1119, 3520, 1164, 1248, 6462, 117, 21902, 1643, 119, 102], [101, 1327, 1164, 5450, 23434, 136, 102, 0, 0, 0, 0, 0, 0, 0, 0]]), 'token_type_ids': tensor([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]), 'attention_mask': tensor([[1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0]])} ``` </pt> <tf> ```py >>> batch_sentences = [ ... "But what about second breakfast?", ... "Don't think he knows about second breakfast, Pip.", ... "What about elevensies?", ... ] >>> encoded_input = tokenizer(batch_sentences, padding=True, truncation=True, return_tensors="tf") >>> print(encoded_input) {'input_ids': <tf.Tensor: shape=(2, 9), dtype=int32, numpy= array([[101, 1252, 1184, 1164, 1248, 6462, 136, 102, 0, 0, 0, 0, 0, 0, 0], [101, 1790, 112, 189, 1341, 1119, 3520, 1164, 1248, 6462, 117, 21902, 1643, 119, 102], [101, 1327, 1164, 5450, 23434, 136, 102, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=int32)>, 'token_type_ids': <tf.Tensor: shape=(2, 9), dtype=int32, numpy= array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=int32)>, 'attention_mask': <tf.Tensor: shape=(2, 9), dtype=int32, numpy= array([[1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=int32)>} ``` </tf> </frameworkcontent> ## 오디오[[audio]] 오디오 작업은 모델에 맞는 데이터 세트를 준비하기 위해 [특성 추출기](main_classes/feature_extractor)가 필요합니다. 특성 추출기는 원시 오디오 데이터에서 특성를 추출하고 이를 텐서로 변환하는 것이 목적입니다. 오디오 데이터 세트에 특성 추출기를 사용하는 방법을 보기 위해 [MInDS-14](https://huggingface.co/datasets/PolyAI/minds14) 데이터 세트를 가져오세요. (데이터 세트를 가져오는 방법은 🤗 [데이터 세트 튜토리얼](https://huggingface.co/docs/datasets/load_hub)에서 자세히 설명하고 있습니다.) ```py >>> from datasets import load_dataset, Audio >>> dataset = load_dataset("PolyAI/minds14", name="en-US", split="train") ``` `audio` 열의 첫 번째 요소에 접근하여 입력을 살펴보세요. `audio` 열을 호출하면 오디오 파일을 자동으로 가져오고 리샘플링합니다. ```py >>> dataset[0]["audio"] {'array': array([ 0. , 0.00024414, -0.00024414, ..., -0.00024414, 0. , 0. ], dtype=float32), 'path': '/root/.cache/huggingface/datasets/downloads/extracted/f14948e0e84be638dd7943ac36518a4cf3324e8b7aa331c5ab11541518e9368c/en-US~JOINT_ACCOUNT/602ba55abb1e6d0fbce92065.wav', 'sampling_rate': 8000} ``` 이렇게 하면 세 가지 항목이 반환됩니다: * `array`는 1D 배열로 가져와서 (필요한 경우) 리샘플링된 음성 신호입니다. * `path`는 오디오 파일의 위치를 가리킵니다. * `sampling_rate`는 음성 신호에서 초당 측정되는 데이터 포인트 수를 나타냅니다. 이 튜토리얼에서는 [Wav2Vec2](https://huggingface.co/facebook/wav2vec2-base) 모델을 사용합니다. 모델 카드를 보면 Wav2Vec2가 16kHz 샘플링된 음성 오디오를 기반으로 사전훈련된 것을 알 수 있습니다. 모델을 사전훈련하는 데 사용된 데이터 세트의 샘플링 레이트와 오디오 데이터의 샘플링 레이트가 일치해야 합니다. 데이터의 샘플링 레이트가 다르면 데이터를 리샘플링해야 합니다. 1. 🤗 Datasets의 [`~datasets.Dataset.cast_column`] 메소드를 사용하여 샘플링 레이트를 16kHz로 업샘플링하세요: ```py >>> dataset = dataset.cast_column("audio", Audio(sampling_rate=16_000)) ``` 2. 오디오 파일을 리샘플링하기 위해 `audio` 열을 다시 호출합니다: ```py >>> dataset[0]["audio"] {'array': array([ 2.3443763e-05, 2.1729663e-04, 2.2145823e-04, ..., 3.8356509e-05, -7.3497440e-06, -2.1754686e-05], dtype=float32), 'path': '/root/.cache/huggingface/datasets/downloads/extracted/f14948e0e84be638dd7943ac36518a4cf3324e8b7aa331c5ab11541518e9368c/en-US~JOINT_ACCOUNT/602ba55abb1e6d0fbce92065.wav', 'sampling_rate': 16000} ``` 다음으로, 입력을 정규화하고 패딩할 특성 추출기를 가져오세요. 텍스트 데이터의 경우, 더 짧은 시퀀스에 대해 `0`이 추가됩니다. 오디오 데이터에도 같은 개념이 적용됩니다. 특성 추출기는 배열에 `0`(묵음으로 해석)을 추가합니다. [`AutoFeatureExtractor.from_pretrained`]를 사용하여 특성 추출기를 가져오세요: ```py >>> from transformers import AutoFeatureExtractor >>> feature_extractor = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-base") ``` 오디오 `array`를 특성 추출기에 전달하세요. 또한, 발생할 수 있는 조용한 오류(silent errors)를 더 잘 디버깅할 수 있도록 특성 추출기에 `sampling_rate` 인수를 추가하는 것을 권장합니다. ```py >>> audio_input = [dataset[0]["audio"]["array"]] >>> feature_extractor(audio_input, sampling_rate=16000) {'input_values': [array([ 3.8106556e-04, 2.7506407e-03, 2.8015103e-03, ..., 5.6335266e-04, 4.6588284e-06, -1.7142107e-04], dtype=float32)]} ``` 토크나이저와 마찬가지로 배치 내에서 가변적인 시퀀스를 처리하기 위해 패딩 또는 잘라내기를 적용할 수 있습니다. 이 두 개의 오디오 샘플의 시퀀스 길이를 확인해보세요: ```py >>> dataset[0]["audio"]["array"].shape (173398,) >>> dataset[1]["audio"]["array"].shape (106496,) ``` 오디오 샘플의 길이가 동일하도록 데이터 세트를 전처리하는 함수를 만드세요. 최대 샘플 길이를 지정하면 특성 추출기가 해당 길이에 맞춰 시퀀스를 패딩하거나 잘라냅니다: ```py >>> def preprocess_function(examples): ... audio_arrays = [x["array"] for x in examples["audio"]] ... inputs = feature_extractor( ... audio_arrays, ... sampling_rate=16000, ... padding=True, ... max_length=100000, ... truncation=True, ... ) ... return inputs ``` `preprocess_function`을 데이터 세트의 처음 예시 몇 개에 적용해보세요: ```py >>> processed_dataset = preprocess_function(dataset[:5]) ``` 이제 샘플 길이가 모두 같고 지정된 최대 길이에 맞게 되었습니다. 드디어 전처리된 데이터 세트를 모델에 전달할 수 있습니다! ```py >>> processed_dataset["input_values"][0].shape (100000,) >>> processed_dataset["input_values"][1].shape (100000,) ``` ## 컴퓨터 비전[[computer-vision]] 컴퓨터 비전 작업의 경우, 모델에 대한 데이터 세트를 준비하기 위해 [이미지 프로세서](main_classes/image_processor)가 필요합니다. 이미지 전처리는 이미지를 모델이 예상하는 입력으로 변환하는 여러 단계로 이루어집니다. 이러한 단계에는 크기 조정, 정규화, 색상 채널 보정, 이미지의 텐서 변환 등이 포함됩니다. <Tip> 이미지 전처리는 이미지 증강 기법을 몇 가지 적용한 뒤에 할 수도 있습니다. 이미지 전처리 및 이미지 증강은 모두 이미지 데이터를 변형하지만, 서로 다른 목적을 가지고 있습니다: * 이미지 증강은 과적합(over-fitting)을 방지하고 모델의 견고함(resiliency)을 높이는 데 도움이 되는 방식으로 이미지를 수정합니다. 밝기와 색상 조정, 자르기, 회전, 크기 조정, 확대/축소 등 다양한 방법으로 데이터를 증강할 수 있습니다. 그러나 증강으로 이미지의 의미가 바뀌지 않도록 주의해야 합니다. * 이미지 전처리는 이미지가 모델이 예상하는 입력 형식과 일치하도록 보장합니다. 컴퓨터 비전 모델을 미세 조정할 때 이미지는 모델이 초기에 훈련될 때와 정확히 같은 방식으로 전처리되어야 합니다. 이미지 증강에는 원하는 라이브러리를 무엇이든 사용할 수 있습니다. 이미지 전처리에는 모델과 연결된 `ImageProcessor`를 사용합니다. </Tip> [food101](https://huggingface.co/datasets/food101) 데이터 세트를 가져와서 컴퓨터 비전 데이터 세트에서 이미지 프로세서를 어떻게 사용하는지 알아보세요. 데이터 세트를 불러오는 방법은 🤗 [데이터 세트 튜토리얼](https://huggingface.co/docs/datasets/load_hub)을 참고하세요. <Tip> 데이터 세트가 상당히 크기 때문에 🤗 Datasets의 `split` 매개변수를 사용하여 훈련 세트에서 작은 샘플만 가져오세요! </Tip> ```py >>> from datasets import load_dataset >>> dataset = load_dataset("food101", split="train[:100]") ``` 다음으로, 🤗 Datasets의 [`image`](https://huggingface.co/docs/datasets/package_reference/main_classes?highlight=image#datasets.Image)로 이미지를 확인해보세요: ```py >>> dataset[0]["image"] ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/vision-preprocess-tutorial.png"/> </div> [`AutoImageProcessor.from_pretrained`]로 이미지 프로세서를 가져오세요: ```py >>> from transformers import AutoImageProcessor >>> image_processor = AutoImageProcessor.from_pretrained("google/vit-base-patch16-224") ``` 먼저 이미지 증강 단계를 추가해 봅시다. 아무 라이브러리나 사용해도 괜찮지만, 이번 튜토리얼에서는 torchvision의 [`transforms`](https://pytorch.org/vision/stable/transforms.html) 모듈을 사용하겠습니다. 다른 데이터 증강 라이브러리를 사용해보고 싶다면, [Albumentations](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/image_classification_albumentations.ipynb) 또는 [Kornia notebooks](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/image_classification_kornia.ipynb)에서 어떻게 사용하는지 배울 수 있습니다. 1. [`Compose`](https://pytorch.org/vision/master/generated/torchvision.transforms.Compose.html)로 [`RandomResizedCrop`](https://pytorch.org/vision/main/generated/torchvision.transforms.RandomResizedCrop.html)와 [`ColorJitter`](https://pytorch.org/vision/main/generated/torchvision.transforms.ColorJitter.html) 등 변환을 몇 가지 연결하세요. 참고로 크기 조정에 필요한 이미지의 크기 요구사항은 `image_processor`에서 가져올 수 있습니다. 일부 모델은 정확한 높이와 너비를 요구하지만, 제일 짧은 변의 길이(`shortest_edge`)만 정의된 모델도 있습니다. ```py >>> from torchvision.transforms import RandomResizedCrop, ColorJitter, Compose >>> size = ( ... image_processor.size["shortest_edge"] ... if "shortest_edge" in image_processor.size ... else (image_processor.size["height"], image_processor.size["width"]) ... ) >>> _transforms = Compose([RandomResizedCrop(size), ColorJitter(brightness=0.5, hue=0.5)]) ``` 2. 모델은 입력으로 [`pixel_values`](model_doc/visionencoderdecoder#transformers.VisionEncoderDecoderModel.forward.pixel_values)를 받습니다. `ImageProcessor`는 이미지 정규화 및 적절한 텐서 생성을 처리할 수 있습니다. 배치 이미지에 대한 이미지 증강 및 이미지 전처리를 결합하고 `pixel_values`를 생성하는 함수를 만듭니다: ```py >>> def transforms(examples): ... images = [_transforms(img.convert("RGB")) for img in examples["image"]] ... examples["pixel_values"] = image_processor(images, do_resize=False, return_tensors="pt")["pixel_values"] ... return examples ``` <Tip> 위의 예에서는 이미지 증강 중에 이미지 크기를 조정했기 때문에 `do_resize=False`로 설정하고, 해당 `image_processor`에서 `size` 속성을 활용했습니다. 이미지 증강 중에 이미지 크기를 조정하지 않은 경우 이 매개변수를 생략하세요. 기본적으로는 `ImageProcessor`가 크기 조정을 처리합니다. 증강 변환 과정에서 이미지를 정규화하려면 `image_processor.image_mean` 및 `image_processor.image_std` 값을 사용하세요. </Tip> 3. 🤗 Datasets의 [`set_transform`](https://huggingface.co/docs/datasets/process#format-transform)를 사용하여 실시간으로 변환을 적용합니다: ```py >>> dataset.set_transform(transforms) ``` 4. 이제 이미지에 접근하면 이미지 프로세서가 `pixel_values`를 추가한 것을 알 수 있습니다. 드디어 처리된 데이터 세트를 모델에 전달할 수 있습니다! ```py >>> dataset[0].keys() ``` 다음은 변형이 적용된 후의 이미지입니다. 이미지가 무작위로 잘려나갔고 색상 속성이 다릅니다. ```py >>> import numpy as np >>> import matplotlib.pyplot as plt >>> img = dataset[0]["pixel_values"] >>> plt.imshow(img.permute(1, 2, 0)) ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/preprocessed_image.png"/> </div> <Tip> `ImageProcessor`는 객체 감지, 시맨틱 세그멘테이션(semantic segmentation), 인스턴스 세그멘테이션(instance segmentation), 파놉틱 세그멘테이션(panoptic segmentation)과 같은 작업에 대한 후처리 방법을 제공합니다. 이러한 방법은 모델의 원시 출력을 경계 상자나 세그멘테이션 맵과 같은 의미 있는 예측으로 변환해줍니다. </Tip> ### 패딩[[pad]] 예를 들어, [DETR](./model_doc/detr)와 같은 경우에는 모델이 훈련할 때 크기 조정 증강을 적용합니다. 이로 인해 배치 내 이미지 크기가 달라질 수 있습니다. [`DetrImageProcessor`]의 [`DetrImageProcessor.pad`]를 사용하고 사용자 정의 `collate_fn`을 정의해서 배치 이미지를 처리할 수 있습니다. ```py >>> def collate_fn(batch): ... pixel_values = [item["pixel_values"] for item in batch] ... encoding = image_processor.pad(pixel_values, return_tensors="pt") ... labels = [item["labels"] for item in batch] ... batch = {} ... batch["pixel_values"] = encoding["pixel_values"] ... batch["pixel_mask"] = encoding["pixel_mask"] ... batch["labels"] = labels ... return batch ``` ## 멀티모달[[multimodal]] 멀티모달 입력이 필요한 작업의 경우, 모델에 데이터 세트를 준비하기 위한 [프로세서](main_classes/processors)가 필요합니다. 프로세서는 토크나이저와 특성 추출기와 같은 두 가지 처리 객체를 결합합니다. [LJ Speech](https://huggingface.co/datasets/lj_speech) 데이터 세트를 가져와서 자동 음성 인식(ASR)을 위한 프로세서를 사용하는 방법을 확인하세요. (데이터 세트를 가져오는 방법에 대한 자세한 내용은 🤗 [데이터 세트 튜토리얼](https://huggingface.co/docs/datasets/load_hub)에서 볼 수 있습니다.) ```py >>> from datasets import load_dataset >>> lj_speech = load_dataset("lj_speech", split="train") ``` 자동 음성 인식(ASR)에서는 `audio`와 `text`에만 집중하면 되므로, 다른 열들은 제거할 수 있습니다: ```py >>> lj_speech = lj_speech.map(remove_columns=["file", "id", "normalized_text"]) ``` 이제 `audio`와 `text`열을 살펴보세요: ```py >>> lj_speech[0]["audio"] {'array': array([-7.3242188e-04, -7.6293945e-04, -6.4086914e-04, ..., 7.3242188e-04, 2.1362305e-04, 6.1035156e-05], dtype=float32), 'path': '/root/.cache/huggingface/datasets/downloads/extracted/917ece08c95cf0c4115e45294e3cd0dee724a1165b7fc11798369308a465bd26/LJSpeech-1.1/wavs/LJ001-0001.wav', 'sampling_rate': 22050} >>> lj_speech[0]["text"] 'Printing, in the only sense with which we are at present concerned, differs from most if not from all the arts and crafts represented in the Exhibition' ``` 기존에 사전훈련된 모델에서 사용된 데이터 세트와 새로운 오디오 데이터 세트의 샘플링 레이트를 일치시키기 위해 오디오 데이터 세트의 샘플링 레이트를 [리샘플링](preprocessing#audio)해야 합니다! ```py >>> lj_speech = lj_speech.cast_column("audio", Audio(sampling_rate=16_000)) ``` [`AutoProcessor.from_pretrained`]로 프로세서를 가져오세요: ```py >>> from transformers import AutoProcessor >>> processor = AutoProcessor.from_pretrained("facebook/wav2vec2-base-960h") ``` 1. `array`에 들어 있는 오디오 데이터를 `input_values`로 변환하고 `text`를 토큰화하여 `labels`로 변환하는 함수를 만듭니다. 모델의 입력은 다음과 같습니다: ```py >>> def prepare_dataset(example): ... audio = example["audio"] ... example.update(processor(audio=audio["array"], text=example["text"], sampling_rate=16000)) ... return example ``` 2. 샘플을 `prepare_dataset` 함수에 적용하세요: ```py >>> prepare_dataset(lj_speech[0]) ``` 이제 프로세서가 `input_values`와 `labels`를 추가하고, 샘플링 레이트도 올바르게 16kHz로 다운샘플링했습니다. 드디어 처리된 데이터 세트를 모델에 전달할 수 있습니다!
transformers/docs/source/ko/preprocessing.md/0
{ "file_path": "transformers/docs/source/ko/preprocessing.md", "repo_id": "transformers", "token_count": 17108 }
278
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # 이미지 분류[[image-classification]] [[open-in-colab]] <Youtube id="tjAIM7BOYhw"/> 이미지 분류는 이미지에 레이블 또는 클래스를 할당합니다. 텍스트 또는 오디오 분류와 달리 입력은 이미지를 구성하는 픽셀 값입니다. 이미지 분류에는 자연재해 후 피해 감지, 농작물 건강 모니터링, 의료 이미지에서 질병의 징후 검사 지원 등 다양한 응용 사례가 있습니다. 이 가이드에서는 다음을 설명합니다: 1. [Food-101](https://huggingface.co/datasets/food101) 데이터 세트에서 [ViT](model_doc/vit)를 미세 조정하여 이미지에서 식품 항목을 분류합니다. 2. 추론을 위해 미세 조정 모델을 사용합니다. <Tip> 이 작업과 호환되는 모든 아키텍처와 체크포인트를 보려면 [작업 페이지](https://huggingface.co/tasks/image-classification)를 확인하는 것이 좋습니다. </Tip> 시작하기 전에, 필요한 모든 라이브러리가 설치되어 있는지 확인하세요: ```bash pip install transformers datasets evaluate ``` Hugging Face 계정에 로그인하여 모델을 업로드하고 커뮤니티에 공유하는 것을 권장합니다. 메시지가 표시되면, 토큰을 입력하여 로그인하세요: ```py >>> from huggingface_hub import notebook_login >>> notebook_login() ``` ## Food-101 데이터 세트 가져오기[[load-food101-dataset]] 🤗 Datasets 라이브러리에서 Food-101 데이터 세트의 더 작은 부분 집합을 가져오는 것으로 시작합니다. 이렇게 하면 전체 데이터 세트에 대한 훈련에 많은 시간을 할애하기 전에 실험을 통해 모든 것이 제대로 작동하는지 확인할 수 있습니다. ```py >>> from datasets import load_dataset >>> food = load_dataset("food101", split="train[:5000]") ``` 데이터 세트의 `train`을 [`~datasets.Dataset.train_test_split`] 메소드를 사용하여 훈련 및 테스트 세트로 분할하세요: ```py >>> food = food.train_test_split(test_size=0.2) ``` 그리고 예시를 살펴보세요: ```py >>> food["train"][0] {'image': <PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=512x512 at 0x7F52AFC8AC50>, 'label': 79} ``` 데이터 세트의 각 예제에는 두 개의 필드가 있습니다: - `image`: 식품 항목의 PIL 이미지 - `label`: 식품 항목의 레이블 클래스 모델이 레이블 ID에서 레이블 이름을 쉽게 가져올 수 있도록 레이블 이름을 정수로 매핑하고, 정수를 레이블 이름으로 매핑하는 사전을 만드세요: ```py >>> labels = food["train"].features["label"].names >>> label2id, id2label = dict(), dict() >>> for i, label in enumerate(labels): ... label2id[label] = str(i) ... id2label[str(i)] = label ``` 이제 레이블 ID를 레이블 이름으로 변환할 수 있습니다: ```py >>> id2label[str(79)] 'prime_rib' ``` ## 전처리[[preprocess]] 다음 단계는 이미지를 텐서로 처리하기 위해 ViT 이미지 프로세서를 가져오는 것입니다: ```py >>> from transformers import AutoImageProcessor >>> checkpoint = "google/vit-base-patch16-224-in21k" >>> image_processor = AutoImageProcessor.from_pretrained(checkpoint) ``` <frameworkcontent> <pt> 이미지에 몇 가지 이미지 변환을 적용하여 과적합에 대해 모델을 더 견고하게 만듭니다. 여기서 Torchvision의 [`transforms`](https://pytorch.org/vision/stable/transforms.html) 모듈을 사용하지만, 원하는 이미지 라이브러리를 사용할 수도 있습니다. 이미지의 임의 부분을 크롭하고 크기를 조정한 다음, 이미지 평균과 표준 편차로 정규화하세요: ```py >>> from torchvision.transforms import RandomResizedCrop, Compose, Normalize, ToTensor >>> normalize = Normalize(mean=image_processor.image_mean, std=image_processor.image_std) >>> size = ( ... image_processor.size["shortest_edge"] ... if "shortest_edge" in image_processor.size ... else (image_processor.size["height"], image_processor.size["width"]) ... ) >>> _transforms = Compose([RandomResizedCrop(size), ToTensor(), normalize]) ``` 그런 다음 전처리 함수를 만들어 변환을 적용하고 이미지의 `pixel_values`(모델에 대한 입력)를 반환하세요: ```py >>> def transforms(examples): ... examples["pixel_values"] = [_transforms(img.convert("RGB")) for img in examples["image"]] ... del examples["image"] ... return examples ``` 전체 데이터 세트에 전처리 기능을 적용하려면 🤗 Datasets [`~datasets.Dataset.with_transform`]을 사용합니다. 데이터 세트의 요소를 가져올 때 변환이 즉시 적용됩니다: ```py >>> food = food.with_transform(transforms) ``` 이제 [`DefaultDataCollator`]를 사용하여 예제 배치를 만듭니다. 🤗 Transformers의 다른 데이터 콜레이터와 달리, `DefaultDataCollator`는 패딩과 같은 추가적인 전처리를 적용하지 않습니다. ```py >>> from transformers import DefaultDataCollator >>> data_collator = DefaultDataCollator() ``` </pt> </frameworkcontent> <frameworkcontent> <tf> 과적합을 방지하고 모델을 보다 견고하게 만들기 위해 데이터 세트의 훈련 부분에 데이터 증강을 추가합니다. 여기서 Keras 전처리 레이어로 훈련 데이터에 대한 변환(데이터 증강 포함)과 검증 데이터에 대한 변환(중앙 크로핑, 크기 조정, 정규화만)을 정의합니다. `tf.image` 또는 다른 원하는 라이브러리를 사용할 수 있습니다. ```py >>> from tensorflow import keras >>> from tensorflow.keras import layers >>> size = (image_processor.size["height"], image_processor.size["width"]) >>> train_data_augmentation = keras.Sequential( ... [ ... layers.RandomCrop(size[0], size[1]), ... layers.Rescaling(scale=1.0 / 127.5, offset=-1), ... layers.RandomFlip("horizontal"), ... layers.RandomRotation(factor=0.02), ... layers.RandomZoom(height_factor=0.2, width_factor=0.2), ... ], ... name="train_data_augmentation", ... ) >>> val_data_augmentation = keras.Sequential( ... [ ... layers.CenterCrop(size[0], size[1]), ... layers.Rescaling(scale=1.0 / 127.5, offset=-1), ... ], ... name="val_data_augmentation", ... ) ``` 다음으로 한 번에 하나의 이미지가 아니라 이미지 배치에 적절한 변환을 적용하는 함수를 만듭니다. ```py >>> import numpy as np >>> import tensorflow as tf >>> from PIL import Image >>> def convert_to_tf_tensor(image: Image): ... np_image = np.array(image) ... tf_image = tf.convert_to_tensor(np_image) ... # `expand_dims()` is used to add a batch dimension since ... # the TF augmentation layers operates on batched inputs. ... return tf.expand_dims(tf_image, 0) >>> def preprocess_train(example_batch): ... """Apply train_transforms across a batch.""" ... images = [ ... train_data_augmentation(convert_to_tf_tensor(image.convert("RGB"))) for image in example_batch["image"] ... ] ... example_batch["pixel_values"] = [tf.transpose(tf.squeeze(image)) for image in images] ... return example_batch ... def preprocess_val(example_batch): ... """Apply val_transforms across a batch.""" ... images = [ ... val_data_augmentation(convert_to_tf_tensor(image.convert("RGB"))) for image in example_batch["image"] ... ] ... example_batch["pixel_values"] = [tf.transpose(tf.squeeze(image)) for image in images] ... return example_batch ``` 🤗 Datasets [`~datasets.Dataset.set_transform`]를 사용하여 즉시 변환을 적용하세요: ```py food["train"].set_transform(preprocess_train) food["test"].set_transform(preprocess_val) ``` 최종 전처리 단계로 `DefaultDataCollator`를 사용하여 예제 배치를 만듭니다. 🤗 Transformers의 다른 데이터 콜레이터와 달리 `DefaultDataCollator`는 패딩과 같은 추가 전처리를 적용하지 않습니다. ```py >>> from transformers import DefaultDataCollator >>> data_collator = DefaultDataCollator(return_tensors="tf") ``` </tf> </frameworkcontent> ## 평가[[evaluate]] 훈련 중에 평가 지표를 포함하면 모델의 성능을 평가하는 데 도움이 되는 경우가 많습니다. 🤗 [Evaluate](https://huggingface.co/docs/evaluate/index) 라이브러리로 평가 방법을 빠르게 가져올 수 있습니다. 이 작업에서는 [accuracy](https://huggingface.co/spaces/evaluate-metric/accuracy) 평가 지표를 가져옵니다. (🤗 Evaluate [빠른 둘러보기](https://huggingface.co/docs/evaluate/a_quick_tour)를 참조하여 평가 지표를 가져오고 계산하는 방법에 대해 자세히 알아보세요): ```py >>> import evaluate >>> accuracy = evaluate.load("accuracy") ``` 그런 다음 예측과 레이블을 [`~evaluate.EvaluationModule.compute`]에 전달하여 정확도를 계산하는 함수를 만듭니다: ```py >>> import numpy as np >>> def compute_metrics(eval_pred): ... predictions, labels = eval_pred ... predictions = np.argmax(predictions, axis=1) ... return accuracy.compute(predictions=predictions, references=labels) ``` 이제 `compute_metrics` 함수를 사용할 준비가 되었으며, 훈련을 설정하면 이 함수로 되돌아올 것입니다. ## 훈련[[train]] <frameworkcontent> <pt> <Tip> [`Trainer`]를 사용하여 모델을 미세 조정하는 방법에 익숙하지 않은 경우, [여기](../training#train-with-pytorch-trainer)에서 기본 튜토리얼을 확인하세요! </Tip> 이제 모델을 훈련시킬 준비가 되었습니다! [`AutoModelForImageClassification`]로 ViT를 가져옵니다. 예상되는 레이블 수, 레이블 매핑 및 레이블 수를 지정하세요: ```py >>> from transformers import AutoModelForImageClassification, TrainingArguments, Trainer >>> model = AutoModelForImageClassification.from_pretrained( ... checkpoint, ... num_labels=len(labels), ... id2label=id2label, ... label2id=label2id, ... ) ``` 이제 세 단계만 거치면 끝입니다: 1. [`TrainingArguments`]에서 훈련 하이퍼파라미터를 정의하세요. `image` 열이 삭제되기 때문에 미사용 열을 제거하지 않는 것이 중요합니다. `image` 열이 없으면 `pixel_values`을 생성할 수 없습니다. 이 동작을 방지하려면 `remove_unused_columns=False`로 설정하세요! 다른 유일한 필수 매개변수는 모델 저장 위치를 지정하는 `output_dir`입니다. `push_to_hub=True`로 설정하면 이 모델을 허브에 푸시합니다(모델을 업로드하려면 Hugging Face에 로그인해야 합니다). 각 에폭이 끝날 때마다, [`Trainer`]가 정확도를 평가하고 훈련 체크포인트를 저장합니다. 2. [`Trainer`]에 모델, 데이터 세트, 토크나이저, 데이터 콜레이터 및 `compute_metrics` 함수와 함께 훈련 인수를 전달하세요. 3. [`~Trainer.train`]을 호출하여 모델을 미세 조정하세요. ```py >>> training_args = TrainingArguments( ... output_dir="my_awesome_food_model", ... remove_unused_columns=False, ... eval_strategy="epoch", ... save_strategy="epoch", ... learning_rate=5e-5, ... per_device_train_batch_size=16, ... gradient_accumulation_steps=4, ... per_device_eval_batch_size=16, ... num_train_epochs=3, ... warmup_ratio=0.1, ... logging_steps=10, ... load_best_model_at_end=True, ... metric_for_best_model="accuracy", ... push_to_hub=True, ... ) >>> trainer = Trainer( ... model=model, ... args=training_args, ... data_collator=data_collator, ... train_dataset=food["train"], ... eval_dataset=food["test"], ... tokenizer=image_processor, ... compute_metrics=compute_metrics, ... ) >>> trainer.train() ``` 훈련이 완료되면, 모든 사람이 모델을 사용할 수 있도록 [`~transformers.Trainer.push_to_hub`] 메소드로 모델을 허브에 공유하세요: ```py >>> trainer.push_to_hub() ``` </pt> </frameworkcontent> <frameworkcontent> <tf> <Tip> Keras를 사용하여 모델을 미세 조정하는 방법에 익숙하지 않은 경우, 먼저 [기본 튜토리얼](./training#train-a-tensorflow-model-with-keras)을 확인하세요! </Tip> TensorFlow에서 모델을 미세 조정하려면 다음 단계를 따르세요: 1. 훈련 하이퍼파라미터를 정의하고 옵티마이저와 학습률 스케쥴을 설정합니다. 2. 사전 훈련된 모델을 인스턴스화합니다. 3. 🤗 Dataset을 `tf.data.Dataset`으로 변환합니다. 4. 모델을 컴파일합니다. 5. 콜백을 추가하고 훈련을 수행하기 위해 `fit()` 메소드를 사용합니다. 6. 커뮤니티와 공유하기 위해 모델을 🤗 Hub에 업로드합니다. 하이퍼파라미터, 옵티마이저 및 학습률 스케쥴을 정의하는 것으로 시작합니다: ```py >>> from transformers import create_optimizer >>> batch_size = 16 >>> num_epochs = 5 >>> num_train_steps = len(food["train"]) * num_epochs >>> learning_rate = 3e-5 >>> weight_decay_rate = 0.01 >>> optimizer, lr_schedule = create_optimizer( ... init_lr=learning_rate, ... num_train_steps=num_train_steps, ... weight_decay_rate=weight_decay_rate, ... num_warmup_steps=0, ... ) ``` 그런 다음 레이블 매핑과 함께 [`TFAuto ModelForImageClassification`]으로 ViT를 가져옵니다: ```py >>> from transformers import TFAutoModelForImageClassification >>> model = TFAutoModelForImageClassification.from_pretrained( ... checkpoint, ... id2label=id2label, ... label2id=label2id, ... ) ``` 데이터 세트를 [`~datasets.Dataset.to_tf_dataset`]와 `data_collator`를 사용하여 `tf.data.Dataset` 형식으로 변환하세요: ```py >>> # converting our train dataset to tf.data.Dataset >>> tf_train_dataset = food["train"].to_tf_dataset( ... columns="pixel_values", label_cols="label", shuffle=True, batch_size=batch_size, collate_fn=data_collator ... ) >>> # converting our test dataset to tf.data.Dataset >>> tf_eval_dataset = food["test"].to_tf_dataset( ... columns="pixel_values", label_cols="label", shuffle=True, batch_size=batch_size, collate_fn=data_collator ... ) ``` `compile()`를 사용하여 훈련 모델을 구성하세요: ```py >>> from tensorflow.keras.losses import SparseCategoricalCrossentropy >>> loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) >>> model.compile(optimizer=optimizer, loss=loss) ``` 예측에서 정확도를 계산하고 모델을 🤗 Hub로 푸시하려면 [Keras callbacks](../main_classes/keras_callbacks)를 사용하세요. `compute_metrics` 함수를 [KerasMetricCallback](../main_classes/keras_callbacks#transformers.KerasMetricCallback)에 전달하고, [PushToHubCallback](../main_classes/keras_callbacks#transformers.PushToHubCallback)을 사용하여 모델을 업로드합니다: ```py >>> from transformers.keras_callbacks import KerasMetricCallback, PushToHubCallback >>> metric_callback = KerasMetricCallback(metric_fn=compute_metrics, eval_dataset=tf_eval_dataset) >>> push_to_hub_callback = PushToHubCallback( ... output_dir="food_classifier", ... tokenizer=image_processor, ... save_strategy="no", ... ) >>> callbacks = [metric_callback, push_to_hub_callback] ``` 이제 모델을 훈련할 준비가 되었습니다! 훈련 및 검증 데이터 세트, 에폭 수와 함께 `fit()`을 호출하고, 콜백을 사용하여 모델을 미세 조정합니다: ```py >>> model.fit(tf_train_dataset, validation_data=tf_eval_dataset, epochs=num_epochs, callbacks=callbacks) Epoch 1/5 250/250 [==============================] - 313s 1s/step - loss: 2.5623 - val_loss: 1.4161 - accuracy: 0.9290 Epoch 2/5 250/250 [==============================] - 265s 1s/step - loss: 0.9181 - val_loss: 0.6808 - accuracy: 0.9690 Epoch 3/5 250/250 [==============================] - 252s 1s/step - loss: 0.3910 - val_loss: 0.4303 - accuracy: 0.9820 Epoch 4/5 250/250 [==============================] - 251s 1s/step - loss: 0.2028 - val_loss: 0.3191 - accuracy: 0.9900 Epoch 5/5 250/250 [==============================] - 238s 949ms/step - loss: 0.1232 - val_loss: 0.3259 - accuracy: 0.9890 ``` 축하합니다! 모델을 미세 조정하고 🤗 Hub에 공유했습니다. 이제 추론에 사용할 수 있습니다! </tf> </frameworkcontent> <Tip> 이미지 분류를 위한 모델을 미세 조정하는 자세한 예제는 다음 [PyTorch notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/image_classification.ipynb)을 참조하세요. </Tip> ## 추론[[inference]] 좋아요, 이제 모델을 미세 조정했으니 추론에 사용할 수 있습니다! 추론을 수행하고자 하는 이미지를 가져와봅시다: ```py >>> ds = load_dataset("food101", split="validation[:10]") >>> image = ds["image"][0] ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png" alt="image of beignets"/> </div> 미세 조정 모델로 추론을 시도하는 가장 간단한 방법은 [`pipeline`]을 사용하는 것입니다. 모델로 이미지 분류를 위한 `pipeline`을 인스턴스화하고 이미지를 전달합니다: ```py >>> from transformers import pipeline >>> classifier = pipeline("image-classification", model="my_awesome_food_model") >>> classifier(image) [{'score': 0.31856709718704224, 'label': 'beignets'}, {'score': 0.015232225880026817, 'label': 'bruschetta'}, {'score': 0.01519392803311348, 'label': 'chicken_wings'}, {'score': 0.013022331520915031, 'label': 'pork_chop'}, {'score': 0.012728818692266941, 'label': 'prime_rib'}] ``` 원한다면, `pipeline`의 결과를 수동으로 복제할 수도 있습니다: <frameworkcontent> <pt> 이미지를 전처리하기 위해 이미지 프로세서를 가져오고 `input`을 PyTorch 텐서로 반환합니다: ```py >>> from transformers import AutoImageProcessor >>> import torch >>> image_processor = AutoImageProcessor.from_pretrained("my_awesome_food_model") >>> inputs = image_processor(image, return_tensors="pt") ``` 입력을 모델에 전달하고 logits을 반환합니다: ```py >>> from transformers import AutoModelForImageClassification >>> model = AutoModelForImageClassification.from_pretrained("my_awesome_food_model") >>> with torch.no_grad(): ... logits = model(**inputs).logits ``` 확률이 가장 높은 예측 레이블을 가져오고, 모델의 `id2label` 매핑을 사용하여 레이블로 변환합니다: ```py >>> predicted_label = logits.argmax(-1).item() >>> model.config.id2label[predicted_label] 'beignets' ``` </pt> </frameworkcontent> <frameworkcontent> <tf> 이미지를 전처리하기 위해 이미지 프로세서를 가져오고 `input`을 TensorFlow 텐서로 반환합니다: ```py >>> from transformers import AutoImageProcessor >>> image_processor = AutoImageProcessor.from_pretrained("MariaK/food_classifier") >>> inputs = image_processor(image, return_tensors="tf") ``` 입력을 모델에 전달하고 logits을 반환합니다: ```py >>> from transformers import TFAutoModelForImageClassification >>> model = TFAutoModelForImageClassification.from_pretrained("MariaK/food_classifier") >>> logits = model(**inputs).logits ``` 확률이 가장 높은 예측 레이블을 가져오고, 모델의 `id2label` 매핑을 사용하여 레이블로 변환합니다: ```py >>> predicted_class_id = int(tf.math.argmax(logits, axis=-1)[0]) >>> model.config.id2label[predicted_class_id] 'beignets' ``` </tf> </frameworkcontent>
transformers/docs/source/ko/tasks/image_classification.md/0
{ "file_path": "transformers/docs/source/ko/tasks/image_classification.md", "repo_id": "transformers", "token_count": 11497 }
279
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # 번역[[translation]] [[open-in-colab]] <Youtube id="1JvfrvZgi6c"/> 번역은 한 언어로 된 시퀀스를 다른 언어로 변환합니다. 번역이나 요약은 입력을 받아 일련의 출력을 반환하는 강력한 프레임워크인 시퀀스-투-시퀀스 문제로 구성할 수 있는 대표적인 태스크입니다. 번역 시스템은 일반적으로 다른 언어로 된 텍스트 간의 번역에 사용되지만, 음성 간의 통역이나 텍스트-음성 또는 음성-텍스트와 같은 조합에도 사용될 수 있습니다. 이 가이드에서 학습할 내용은: 1. 영어 텍스트를 프랑스어로 번역하기 위해 [T5](https://huggingface.co/google-t5/t5-small) 모델을 OPUS Books 데이터세트의 영어-프랑스어 하위 집합으로 파인튜닝하는 방법과 2. 파인튜닝된 모델을 추론에 사용하는 방법입니다. <Tip> 이 작업과 호환되는 모든 아키텍처와 체크포인트를 보려면 [작업 페이지](https://huggingface.co/tasks/translation)를 확인하는 것이 좋습니다. </Tip> 시작하기 전에 필요한 라이브러리가 모두 설치되어 있는지 확인하세요: ```bash pip install transformers datasets evaluate sacrebleu ``` 모델을 업로드하고 커뮤니티와 공유할 수 있도록 Hugging Face 계정에 로그인하는 것이 좋습니다. 새로운 창이 표시되면 토큰을 입력하여 로그인하세요. ```py >>> from huggingface_hub import notebook_login >>> notebook_login() ``` ## OPUS Books 데이터세트 가져오기[[load-opus-books-dataset]] 먼저 🤗 Datasets 라이브러리에서 [OPUS Books](https://huggingface.co/datasets/opus_books) 데이터세트의 영어-프랑스어 하위 집합을 가져오세요. ```py >>> from datasets import load_dataset >>> books = load_dataset("opus_books", "en-fr") ``` 데이터세트를 [`~datasets.Dataset.train_test_split`] 메서드를 사용하여 훈련 및 테스트 데이터로 분할하세요. ```py >>> books = books["train"].train_test_split(test_size=0.2) ``` 훈련 데이터에서 예시를 살펴볼까요? ```py >>> books["train"][0] {'id': '90560', 'translation': {'en': 'But this lofty plateau measured only a few fathoms, and soon we reentered Our Element.', 'fr': 'Mais ce plateau élevé ne mesurait que quelques toises, et bientôt nous fûmes rentrés dans notre élément.'}} ``` 반환된 딕셔너리의 `translation` 키가 텍스트의 영어, 프랑스어 버전을 포함하고 있는 것을 볼 수 있습니다. ## 전처리[[preprocess]] <Youtube id="XAR8jnZZuUs"/> 다음 단계로 영어-프랑스어 쌍을 처리하기 위해 T5 토크나이저를 가져오세요. ```py >>> from transformers import AutoTokenizer >>> checkpoint = "google-t5/t5-small" >>> tokenizer = AutoTokenizer.from_pretrained(checkpoint) ``` 만들 전처리 함수는 아래 요구사항을 충족해야 합니다: 1. T5가 번역 태스크임을 인지할 수 있도록 입력 앞에 프롬프트를 추가하세요. 여러 NLP 태스크를 할 수 있는 모델 중 일부는 이렇게 태스크 프롬프트를 미리 줘야합니다. 2. 원어(영어)과 번역어(프랑스어)를 별도로 토큰화하세요. 영어 어휘로 사전 학습된 토크나이저로 프랑스어 텍스트를 토큰화할 수는 없기 때문입니다. 3. `max_length` 매개변수로 설정한 최대 길이보다 길지 않도록 시퀀스를 truncate하세요. ```py >>> source_lang = "en" >>> target_lang = "fr" >>> prefix = "translate English to French: " >>> def preprocess_function(examples): ... inputs = [prefix + example[source_lang] for example in examples["translation"]] ... targets = [example[target_lang] for example in examples["translation"]] ... model_inputs = tokenizer(inputs, text_target=targets, max_length=128, truncation=True) ... return model_inputs ``` 전체 데이터세트에 전처리 함수를 적용하려면 🤗 Datasets의 [`~datasets.Dataset.map`] 메서드를 사용하세요. `map` 함수의 속도를 높이려면 `batched=True`를 설정하여 데이터세트의 여러 요소를 한 번에 처리하는 방법이 있습니다. ```py >>> tokenized_books = books.map(preprocess_function, batched=True) ``` 이제 [`DataCollatorForSeq2Seq`]를 사용하여 예제 배치를 생성합니다. 데이터세트의 최대 길이로 전부를 padding하는 대신, 데이터 정렬 중 각 배치의 최대 길이로 문장을 *동적으로 padding*하는 것이 더 효율적입니다. <frameworkcontent> <pt> ```py >>> from transformers import DataCollatorForSeq2Seq >>> data_collator = DataCollatorForSeq2Seq(tokenizer=tokenizer, model=checkpoint) ``` </pt> <tf> ```py >>> from transformers import DataCollatorForSeq2Seq >>> data_collator = DataCollatorForSeq2Seq(tokenizer=tokenizer, model=checkpoint, return_tensors="tf") ``` </tf> </frameworkcontent> ## 평가[[evalulate]] 훈련 중에 메트릭을 포함하면 모델의 성능을 평가하는 데 도움이 됩니다. 🤗 [Evaluate](https://huggingface.co/docs/evaluate/index) 라이브러리로 평가 방법(evaluation method)을 빠르게 가져올 수 있습니다. 현재 태스크에 적합한 SacreBLEU 메트릭을 가져오세요. (메트릭을 가져오고 계산하는 방법에 대해 자세히 알아보려면 🤗 Evaluate [둘러보기](https://huggingface.co/docs/evaluate/a_quick_tour)를 참조하세요): ```py >>> import evaluate >>> metric = evaluate.load("sacrebleu") ``` 그런 다음 [`~evaluate.EvaluationModule.compute`]에 예측값과 레이블을 전달하여 SacreBLEU 점수를 계산하는 함수를 생성하세요: ```py >>> import numpy as np >>> def postprocess_text(preds, labels): ... preds = [pred.strip() for pred in preds] ... labels = [[label.strip()] for label in labels] ... return preds, labels >>> def compute_metrics(eval_preds): ... preds, labels = eval_preds ... if isinstance(preds, tuple): ... preds = preds[0] ... decoded_preds = tokenizer.batch_decode(preds, skip_special_tokens=True) ... labels = np.where(labels != -100, labels, tokenizer.pad_token_id) ... decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True) ... decoded_preds, decoded_labels = postprocess_text(decoded_preds, decoded_labels) ... result = metric.compute(predictions=decoded_preds, references=decoded_labels) ... result = {"bleu": result["score"]} ... prediction_lens = [np.count_nonzero(pred != tokenizer.pad_token_id) for pred in preds] ... result["gen_len"] = np.mean(prediction_lens) ... result = {k: round(v, 4) for k, v in result.items()} ... return result ``` 이제 `compute_metrics` 함수는 준비되었고, 훈련 과정을 설정할 때 다시 살펴볼 예정입니다. ## 훈련[[train]] <frameworkcontent> <pt> <Tip> [`Trainer`]로 모델을 파인튜닝하는 방법에 익숙하지 않다면 [여기](../training#train-with-pytorch-trainer)에서 기본 튜토리얼을 살펴보시기 바랍니다! </Tip> 모델을 훈련시킬 준비가 되었군요! [`AutoModelForSeq2SeqLM`]으로 T5를 로드하세요: ```py >>> from transformers import AutoModelForSeq2SeqLM, Seq2SeqTrainingArguments, Seq2SeqTrainer >>> model = AutoModelForSeq2SeqLM.from_pretrained(checkpoint) ``` 이제 세 단계만 거치면 끝입니다: 1. [`Seq2SeqTrainingArguments`]에서 훈련 하이퍼파라미터를 정의하세요. 유일한 필수 매개변수는 모델을 저장할 위치인 `output_dir`입니다. 모델을 Hub에 푸시하기 위해 `push_to_hub=True`로 설정하세요. (모델을 업로드하려면 Hugging Face에 로그인해야 합니다.) [`Trainer`]는 에폭이 끝날때마다 SacreBLEU 메트릭을 평가하고 훈련 체크포인트를 저장합니다. 2. [`Seq2SeqTrainer`]에 훈련 인수를 전달하세요. 모델, 데이터 세트, 토크나이저, data collator 및 `compute_metrics` 함수도 덩달아 전달해야 합니다. 3. [`~Trainer.train`]을 호출하여 모델을 파인튜닝하세요. ```py >>> training_args = Seq2SeqTrainingArguments( ... output_dir="my_awesome_opus_books_model", ... eval_strategy="epoch", ... learning_rate=2e-5, ... per_device_train_batch_size=16, ... per_device_eval_batch_size=16, ... weight_decay=0.01, ... save_total_limit=3, ... num_train_epochs=2, ... predict_with_generate=True, ... fp16=True, ... push_to_hub=True, ... ) >>> trainer = Seq2SeqTrainer( ... model=model, ... args=training_args, ... train_dataset=tokenized_books["train"], ... eval_dataset=tokenized_books["test"], ... tokenizer=tokenizer, ... data_collator=data_collator, ... compute_metrics=compute_metrics, ... ) >>> trainer.train() ``` 학습이 완료되면 [`~transformers.Trainer.push_to_hub`] 메서드로 모델을 Hub에 공유하세요. 이러면 누구나 모델을 사용할 수 있게 됩니다: ```py >>> trainer.push_to_hub() ``` </pt> <tf> <Tip> Keras로 모델을 파인튜닝하는 방법이 익숙하지 않다면, [여기](../training#train-a-tensorflow-model-with-keras)에서 기본 튜토리얼을 살펴보시기 바랍니다! </Tip> TensorFlow에서 모델을 파인튜닝하려면 우선 optimizer 함수, 학습률 스케줄 등의 훈련 하이퍼파라미터를 설정하세요: ```py >>> from transformers import AdamWeightDecay >>> optimizer = AdamWeightDecay(learning_rate=2e-5, weight_decay_rate=0.01) ``` 이제 [`TFAutoModelForSeq2SeqLM`]로 T5를 가져오세요: ```py >>> from transformers import TFAutoModelForSeq2SeqLM >>> model = TFAutoModelForSeq2SeqLM.from_pretrained(checkpoint) ``` [`~transformers.TFPreTrainedModel.prepare_tf_dataset`]로 데이터 세트를 `tf.data.Dataset` 형식으로 변환하세요: ```py >>> tf_train_set = model.prepare_tf_dataset( ... tokenized_books["train"], ... shuffle=True, ... batch_size=16, ... collate_fn=data_collator, ... ) >>> tf_test_set = model.prepare_tf_dataset( ... tokenized_books["test"], ... shuffle=False, ... batch_size=16, ... collate_fn=data_collator, ... ) ``` 훈련하기 위해 [`compile`](https://keras.io/api/models/model_training_apis/#compile-method) 메서드로 모델을 구성하세요: ```py >>> import tensorflow as tf >>> model.compile(optimizer=optimizer) ``` 훈련을 시작하기 전에 예측값으로부터 SacreBLEU 메트릭을 계산하는 방법과 모델을 Hub에 업로드하는 방법 두 가지를 미리 설정해둬야 합니다. 둘 다 [Keras callbacks](../main_classes/keras_callbacks)로 구현하세요. [`~transformers.KerasMetricCallback`]에 `compute_metrics` 함수를 전달하세요. ```py >>> from transformers.keras_callbacks import KerasMetricCallback >>> metric_callback = KerasMetricCallback(metric_fn=compute_metrics, eval_dataset=tf_validation_set) ``` 모델과 토크나이저를 업로드할 위치를 [`~transformers.PushToHubCallback`]에서 지정하세요: ```py >>> from transformers.keras_callbacks import PushToHubCallback >>> push_to_hub_callback = PushToHubCallback( ... output_dir="my_awesome_opus_books_model", ... tokenizer=tokenizer, ... ) ``` 이제 콜백들을 한데로 묶어주세요: ```py >>> callbacks = [metric_callback, push_to_hub_callback] ``` 드디어 모델을 훈련시킬 모든 준비를 마쳤군요! 이제 훈련 및 검증 데이터 세트에 [`fit`](https://keras.io/api/models/model_training_apis/#fit-method) 메서드를 에폭 수와 만들어둔 콜백과 함께 호출하여 모델을 파인튜닝하세요: ```py >>> model.fit(x=tf_train_set, validation_data=tf_test_set, epochs=3, callbacks=callbacks) ``` 학습이 완료되면 모델이 자동으로 Hub에 업로드되고, 누구나 사용할 수 있게 됩니다! </tf> </frameworkcontent> <Tip> 번역을 위해 모델을 파인튜닝하는 방법에 대한 보다 자세한 예제는 해당 [PyTorch 노트북](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/translation.ipynb) 또는 [TensorFlow 노트북](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/translation-tf.ipynb)을 참조하세요. </Tip> ## 추론[[inference]] 좋아요, 이제 모델을 파인튜닝했으니 추론에 사용할 수 있습니다! 다른 언어로 번역하고 싶은 텍스트를 써보세요. T5의 경우 원하는 태스크를 입력의 접두사로 추가해야 합니다. 예를 들어 영어에서 프랑스어로 번역하는 경우, 아래와 같은 접두사가 추가됩니다: ```py >>> text = "translate English to French: Legumes share resources with nitrogen-fixing bacteria." ``` 파인튜닝된 모델로 추론하기에 제일 간단한 방법은 [`pipeline`]을 사용하는 것입니다. 해당 모델로 번역 `pipeline`을 만든 뒤, 텍스트를 전달하세요: ```py >>> from transformers import pipeline # Change `xx` to the language of the input and `yy` to the language of the desired output. # Examples: "en" for English, "fr" for French, "de" for German, "es" for Spanish, "zh" for Chinese, etc; translation_en_to_fr translates English to French # You can view all the lists of languages here - https://huggingface.co/languages >>> translator = pipeline("translation_xx_to_yy", model="my_awesome_opus_books_model") >>> translator(text) [{'translation_text': 'Legumes partagent des ressources avec des bactéries azotantes.'}] ``` 원한다면 `pipeline`의 결과를 직접 복제할 수도 있습니다: <frameworkcontent> <pt> 텍스트를 토큰화하고 `input_ids`를 PyTorch 텐서로 반환하세요: ```py >>> from transformers import AutoTokenizer >>> tokenizer = AutoTokenizer.from_pretrained("my_awesome_opus_books_model") >>> inputs = tokenizer(text, return_tensors="pt").input_ids ``` [`~generation.GenerationMixin.generate`] 메서드로 번역을 생성하세요. 다양한 텍스트 생성 전략 및 생성을 제어하기 위한 매개변수에 대한 자세한 내용은 [Text Generation](../main_classes/text_generation) API를 살펴보시기 바랍니다. ```py >>> from transformers import AutoModelForSeq2SeqLM >>> model = AutoModelForSeq2SeqLM.from_pretrained("my_awesome_opus_books_model") >>> outputs = model.generate(inputs, max_new_tokens=40, do_sample=True, top_k=30, top_p=0.95) ``` 생성된 토큰 ID들을 다시 텍스트로 디코딩하세요: ```py >>> tokenizer.decode(outputs[0], skip_special_tokens=True) 'Les lignées partagent des ressources avec des bactéries enfixant l'azote.' ``` </pt> <tf> 텍스트를 토큰화하고 `input_ids`를 TensorFlow 텐서로 반환하세요: ```py >>> from transformers import AutoTokenizer >>> tokenizer = AutoTokenizer.from_pretrained("my_awesome_opus_books_model") >>> inputs = tokenizer(text, return_tensors="tf").input_ids ``` [`~transformers.generation_tf_utils.TFGenerationMixin.generate`] 메서드로 번역을 생성하세요. 다양한 텍스트 생성 전략 및 생성을 제어하기 위한 매개변수에 대한 자세한 내용은 [Text Generation](../main_classes/text_generation) API를 살펴보시기 바랍니다. ```py >>> from transformers import TFAutoModelForSeq2SeqLM >>> model = TFAutoModelForSeq2SeqLM.from_pretrained("my_awesome_opus_books_model") >>> outputs = model.generate(inputs, max_new_tokens=40, do_sample=True, top_k=30, top_p=0.95) ``` 생성된 토큰 ID들을 다시 텍스트로 디코딩하세요: ```py >>> tokenizer.decode(outputs[0], skip_special_tokens=True) 'Les lugumes partagent les ressources avec des bactéries fixatrices d'azote.' ``` </tf> </frameworkcontent>
transformers/docs/source/ko/tasks/translation.md/0
{ "file_path": "transformers/docs/source/ko/tasks/translation.md", "repo_id": "transformers", "token_count": 9508 }
280
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Classificação de tokens <Youtube id="wVHdVlPScxA"/> A classificação de tokens atribui um rótulo a tokens individuais em uma frase. Uma das tarefas de classificação de tokens mais comuns é o Reconhecimento de Entidade Nomeada, também chamada de NER (sigla em inglês para Named Entity Recognition). O NER tenta encontrar um rótulo para cada entidade em uma frase, como uma pessoa, local ou organização. Este guia mostrará como realizar o fine-tuning do [DistilBERT](https://huggingface.co/distilbert/distilbert-base-uncased) no conjunto de dados [WNUT 17](https://huggingface.co/datasets/wnut_17) para detectar novas entidades. <Tip> Consulte a [página de tarefas de classificação de tokens](https://huggingface.co/tasks/token-classification) para obter mais informações sobre outras formas de classificação de tokens e seus modelos, conjuntos de dados e métricas associadas. </Tip> ## Carregando o conjunto de dados WNUT 17 Carregue o conjunto de dados WNUT 17 da biblioteca 🤗 Datasets: ```py >>> from datasets import load_dataset >>> wnut = load_dataset("wnut_17") ``` E dê uma olhada em um exemplo: ```py >>> wnut["train"][0] {'id': '0', 'ner_tags': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7, 8, 8, 0, 7, 0, 0, 0, 0, 0, 0, 0, 0], 'tokens': ['@paulwalk', 'It', "'s", 'the', 'view', 'from', 'where', 'I', "'m", 'living', 'for', 'two', 'weeks', '.', 'Empire', 'State', 'Building', '=', 'ESB', '.', 'Pretty', 'bad', 'storm', 'here', 'last', 'evening', '.'] } ``` Cada número em `ner_tags` representa uma entidade. Converta o número em um rótulo para obter mais informações: ```py >>> label_list = wnut["train"].features[f"ner_tags"].feature.names >>> label_list [ "O", "B-corporation", "I-corporation", "B-creative-work", "I-creative-work", "B-group", "I-group", "B-location", "I-location", "B-person", "I-person", "B-product", "I-product", ] ``` O `ner_tag` descreve uma entidade, como uma organização, local ou pessoa. A letra que prefixa cada `ner_tag` indica a posição do token da entidade: - `B-` indica o início de uma entidade. - `I-` indica que um token está contido dentro da mesma entidade (por exemplo, o token `State` pode fazer parte de uma entidade como `Empire State Building`). - `0` indica que o token não corresponde a nenhuma entidade. ## Pré-processamento <Youtube id="iY2AZYdZAr0"/> Carregue o tokenizer do DistilBERT para processar os `tokens`: ```py >>> from transformers import AutoTokenizer >>> tokenizer = AutoTokenizer.from_pretrained("distilbert/distilbert-base-uncased") ``` Como a entrada já foi dividida em palavras, defina `is_split_into_words=True` para tokenizar as palavras em subpalavras: ```py >>> tokenized_input = tokenizer(example["tokens"], is_split_into_words=True) >>> tokens = tokenizer.convert_ids_to_tokens(tokenized_input["input_ids"]) >>> tokens ['[CLS]', '@', 'paul', '##walk', 'it', "'", 's', 'the', 'view', 'from', 'where', 'i', "'", 'm', 'living', 'for', 'two', 'weeks', '.', 'empire', 'state', 'building', '=', 'es', '##b', '.', 'pretty', 'bad', 'storm', 'here', 'last', 'evening', '.', '[SEP]'] ``` Ao adicionar os tokens especiais `[CLS]` e `[SEP]` e a tokenização de subpalavras uma incompatibilidade é gerada entre a entrada e os rótulos. Uma única palavra correspondente a um único rótulo pode ser dividida em duas subpalavras. Você precisará realinhar os tokens e os rótulos da seguinte forma: 1. Mapeie todos os tokens para a palavra correspondente com o método [`word_ids`](https://huggingface.co/docs/tokenizers/python/latest/api/reference.html#tokenizers.Encoding.word_ids). 2. Atribuindo o rótulo `-100` aos tokens especiais `[CLS]` e `[SEP]` para que a função de loss do PyTorch ignore eles. 3. Rotular apenas o primeiro token de uma determinada palavra. Atribuindo `-100` a outros subtokens da mesma palavra. Aqui está como você pode criar uma função para realinhar os tokens e rótulos e truncar sequências para não serem maiores que o comprimento máximo de entrada do DistilBERT: ```py >>> def tokenize_and_align_labels(examples): ... tokenized_inputs = tokenizer(examples["tokens"], truncation=True, is_split_into_words=True) ... labels = [] ... for i, label in enumerate(examples[f"ner_tags"]): ... word_ids = tokenized_inputs.word_ids(batch_index=i) # Map tokens to their respective word. ... previous_word_idx = None ... label_ids = [] ... for word_idx in word_ids: # Set the special tokens to -100. ... if word_idx is None: ... label_ids.append(-100) ... elif word_idx != previous_word_idx: # Only label the first token of a given word. ... label_ids.append(label[word_idx]) ... else: ... label_ids.append(-100) ... previous_word_idx = word_idx ... labels.append(label_ids) ... tokenized_inputs["labels"] = labels ... return tokenized_inputs ``` Use a função [`map`](https://huggingface.co/docs/datasets/process#map) do 🤗 Datasets para tokenizar e alinhar os rótulos em todo o conjunto de dados. Você pode acelerar a função `map` configurando `batched=True` para processar vários elementos do conjunto de dados de uma só vez: ```py >>> tokenized_wnut = wnut.map(tokenize_and_align_labels, batched=True) ``` Use o [`DataCollatorForTokenClassification`] para criar um batch de exemplos. Ele também *preencherá dinamicamente* seu texto e rótulos para o comprimento do elemento mais longo em seu batch, para que tenham um comprimento uniforme. Embora seja possível preencher seu texto na função `tokenizer` configurando `padding=True`, o preenchimento dinâmico é mais eficiente. <frameworkcontent> <pt> ```py >>> from transformers import DataCollatorForTokenClassification >>> data_collator = DataCollatorForTokenClassification(tokenizer=tokenizer) ``` </pt> <tf> ```py >>> from transformers import DataCollatorForTokenClassification >>> data_collator = DataCollatorForTokenClassification(tokenizer=tokenizer, return_tensors="tf") ``` </tf> </frameworkcontent> ## Treinamento <frameworkcontent> <pt> Carregue o DistilBERT com o [`AutoModelForTokenClassification`] junto com o número de rótulos esperados: ```py >>> from transformers import AutoModelForTokenClassification, TrainingArguments, Trainer >>> model = AutoModelForTokenClassification.from_pretrained("distilbert/distilbert-base-uncased", num_labels=14) ``` <Tip> Se você não estiver familiarizado com o fine-tuning de um modelo com o [`Trainer`], dê uma olhada no tutorial básico [aqui](../training#finetune-with-trainer)! </Tip> Nesse ponto, restam apenas três passos: 1. Definir seus hiperparâmetros de treinamento em [`TrainingArguments`]. 2. Passar os argumentos de treinamento para o [`Trainer`] junto com o modelo, conjunto de dados, tokenizador e o data collator. 3. Chamar a função [`~Trainer.train`] para executar o fine-tuning do seu modelo. ```py >>> training_args = TrainingArguments( ... output_dir="./results", ... eval_strategy="epoch", ... learning_rate=2e-5, ... per_device_train_batch_size=16, ... per_device_eval_batch_size=16, ... num_train_epochs=3, ... weight_decay=0.01, ... ) >>> trainer = Trainer( ... model=model, ... args=training_args, ... train_dataset=tokenized_wnut["train"], ... eval_dataset=tokenized_wnut["test"], ... tokenizer=tokenizer, ... data_collator=data_collator, ... ) >>> trainer.train() ``` </pt> <tf> Para executar o fine-tuning de um modelo no TensorFlow, comece convertendo seu conjunto de dados para o formato `tf.data.Dataset` com [`to_tf_dataset`](https://huggingface.co/docs/datasets/package_reference/main_classes#datasets.Dataset.to_tf_dataset). Nessa execução você deverá especificar as entradas e rótulos (no parâmetro `columns`), se deseja embaralhar o conjunto de dados, o tamanho do batch e o data collator: ```py >>> tf_train_set = tokenized_wnut["train"].to_tf_dataset( ... columns=["attention_mask", "input_ids", "labels"], ... shuffle=True, ... batch_size=16, ... collate_fn=data_collator, ... ) >>> tf_validation_set = tokenized_wnut["validation"].to_tf_dataset( ... columns=["attention_mask", "input_ids", "labels"], ... shuffle=False, ... batch_size=16, ... collate_fn=data_collator, ... ) ``` <Tip> Se você não estiver familiarizado com o fine-tuning de um modelo com o Keras, dê uma olhada no tutorial básico [aqui](training#finetune-with-keras)! </Tip> Configure o otimizador e alguns hiperparâmetros de treinamento: ```py >>> from transformers import create_optimizer >>> batch_size = 16 >>> num_train_epochs = 3 >>> num_train_steps = (len(tokenized_wnut["train"]) // batch_size) * num_train_epochs >>> optimizer, lr_schedule = create_optimizer( ... init_lr=2e-5, ... num_train_steps=num_train_steps, ... weight_decay_rate=0.01, ... num_warmup_steps=0, ... ) ``` Carregue o DistilBERT com o [`TFAutoModelForTokenClassification`] junto com o número de rótulos esperados: ```py >>> from transformers import TFAutoModelForTokenClassification >>> model = TFAutoModelForTokenClassification.from_pretrained("distilbert/distilbert-base-uncased", num_labels=2) ``` Configure o modelo para treinamento com o método [`compile`](https://keras.io/api/models/model_training_apis/#compile-method): ```py >>> import tensorflow as tf >>> model.compile(optimizer=optimizer) ``` Chame o método [`fit`](https://keras.io/api/models/model_training_apis/#fit-method) para executar o fine-tuning do modelo: ```py >>> model.fit(x=tf_train_set, validation_data=tf_validation_set, epochs=3) ``` </tf> </frameworkcontent> <Tip> Para obter um exemplo mais aprofundado de como executar o fine-tuning de um modelo para classificação de tokens, dê uma olhada nesse [notebook utilizando PyTorch](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/token_classification.ipynb) ou nesse [notebook utilizando TensorFlow](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/token_classification-tf.ipynb). </Tip>
transformers/docs/source/pt/tasks/token_classification.md/0
{ "file_path": "transformers/docs/source/pt/tasks/token_classification.md", "repo_id": "transformers", "token_count": 4235 }
281
<!--Copyright 2021 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # 调试 ## 多GPU网络问题调试 当使用`DistributedDataParallel`和多个GPU进行训练或推理时,如果遇到进程和(或)节点之间的互联问题,您可以使用以下脚本来诊断网络问题。 ```bash wget https://raw.githubusercontent.com/huggingface/transformers/main/scripts/distributed/torch-distributed-gpu-test.py ``` 例如,要测试两个GPU之间的互联,请执行以下操作: ```bash python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py ``` 如果两个进程能够相互通信并分配GPU内存,它们各自将打印出 "OK" 状态。 对于更多的GPU或节点,可以根据脚本中的参数进行调整。 在诊断脚本内部,您将找到更多详细信息,甚至有关如何在SLURM环境中运行它的说明。 另一种级别的调试是添加 `NCCL_DEBUG=INFO` 环境变量,如下所示: ```bash NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py ``` 这将产生大量与NCCL相关的调试信息,如果发现有问题报告,您可以在线搜索以获取相关信息。或者,如果您不确定如何解释输出,可以在`issue`中分享日志文件。 ## 下溢和上溢检测 <Tip> 目前,此功能仅适用于PyTorch。 </Tip> <Tip> 对于多GPU训练,它需要使用DDP(`torch.distributed.launch`)。 </Tip> <Tip> 此功能可以与任何基于`nn.Module`的模型一起使用。 </Tip> 如果您开始发现`loss=NaN`或模型因激活值或权重中的`inf`或`nan`而出现一些异常行为,就需要发现第一个下溢或上溢发生的地方以及导致它的原因。幸运的是,您可以通过激活一个特殊模块来自动进行检测。 如果您正在使用[`Trainer`],只需把以下内容: ```bash --debug underflow_overflow ``` 添加到常规命令行参数中,或在创建[`TrainingArguments`]对象时传递 `debug="underflow_overflow"`。 如果您正在使用自己的训练循环或其他Trainer,您可以通过以下方式实现相同的功能: ```python from transformers.debug_utils import DebugUnderflowOverflow debug_overflow = DebugUnderflowOverflow(model) ``` [`debug_utils.DebugUnderflowOverflow`] 将`hooks`插入模型,紧跟在每次前向调用之后,进而测试输入和输出变量,以及相应模块的权重。一旦在激活值或权重的至少一个元素中检测到`inf`或`nan`,程序将执行`assert`并打印报告,就像这样(这是在`google/mt5-small`下使用fp16混合精度捕获的): ``` Detected inf/nan during batch_number=0 Last 21 forward frames: abs min abs max metadata encoder.block.1.layer.1.DenseReluDense.dropout Dropout 0.00e+00 2.57e+02 input[0] 0.00e+00 2.85e+02 output [...] encoder.block.2.layer.0 T5LayerSelfAttention 6.78e-04 3.15e+03 input[0] 2.65e-04 3.42e+03 output[0] None output[1] 2.25e-01 1.00e+04 output[2] encoder.block.2.layer.1.layer_norm T5LayerNorm 8.69e-02 4.18e-01 weight 2.65e-04 3.42e+03 input[0] 1.79e-06 4.65e+00 output encoder.block.2.layer.1.DenseReluDense.wi_0 Linear 2.17e-07 4.50e+00 weight 1.79e-06 4.65e+00 input[0] 2.68e-06 3.70e+01 output encoder.block.2.layer.1.DenseReluDense.wi_1 Linear 8.08e-07 2.66e+01 weight 1.79e-06 4.65e+00 input[0] 1.27e-04 2.37e+02 output encoder.block.2.layer.1.DenseReluDense.dropout Dropout 0.00e+00 8.76e+03 input[0] 0.00e+00 9.74e+03 output encoder.block.2.layer.1.DenseReluDense.wo Linear 1.01e-06 6.44e+00 weight 0.00e+00 9.74e+03 input[0] 3.18e-04 6.27e+04 output encoder.block.2.layer.1.DenseReluDense T5DenseGatedGeluDense 1.79e-06 4.65e+00 input[0] 3.18e-04 6.27e+04 output encoder.block.2.layer.1.dropout Dropout 3.18e-04 6.27e+04 input[0] 0.00e+00 inf output ``` 由于篇幅原因,示例输出中间的部分已经被缩减。 第二列显示了绝对最大元素的值,因此,如果您仔细查看最后`frame`,输入和输出都在`1e4`的范围内。因此,在使用fp16混合精度进行训练时,最后一步发生了溢出(因为在`fp16`下,在`inf`之前的最大数字是`64e3`)。为了避免在`fp16`下发生溢出,激活值必须保持低于`1e4`,因为`1e4 * 1e4 = 1e8`,因此任何具有大激活值的矩阵乘法都会导致数值溢出。 在跟踪的开始处,您可以发现问题发生在哪个批次(这里的`Detected inf/nan during batch_number=0`表示问题发生在第一个批次)。 每个报告的`frame`都以声明相应模块的层信息为开头,说明这一`frame`是为哪个模块报告的。如果只看这个`frame`: ``` encoder.block.2.layer.1.layer_norm T5LayerNorm 8.69e-02 4.18e-01 weight 2.65e-04 3.42e+03 input[0] 1.79e-06 4.65e+00 output ``` 在这里,`encoder.block.2.layer.1.layer_norm` 表示它是编码器的第二个块中第一层的`layer norm`。而 `forward` 的具体调用是 `T5LayerNorm`。 让我们看看该报告的最后几个`frame`: ``` Detected inf/nan during batch_number=0 Last 21 forward frames: abs min abs max metadata [...] encoder.block.2.layer.1.DenseReluDense.wi_0 Linear 2.17e-07 4.50e+00 weight 1.79e-06 4.65e+00 input[0] 2.68e-06 3.70e+01 output encoder.block.2.layer.1.DenseReluDense.wi_1 Linear 8.08e-07 2.66e+01 weight 1.79e-06 4.65e+00 input[0] 1.27e-04 2.37e+02 output encoder.block.2.layer.1.DenseReluDense.wo Linear 1.01e-06 6.44e+00 weight 0.00e+00 9.74e+03 input[0] 3.18e-04 6.27e+04 output encoder.block.2.layer.1.DenseReluDense T5DenseGatedGeluDense 1.79e-06 4.65e+00 input[0] 3.18e-04 6.27e+04 output encoder.block.2.layer.1.dropout Dropout 3.18e-04 6.27e+04 input[0] 0.00e+00 inf output ``` 最后一个`frame`报告了`Dropout.forward`函数,第一个条目是唯一的输入,第二个条目是唯一的输出。您可以看到,它是从`DenseReluDense`类内的属性`dropout`中调用的。我们可以看到它发生在第2个块的第1层,也就是在第一个批次期间。最后,绝对最大的输入元素值为`6.27e+04`,输出也是`inf`。 您可以在这里看到,`T5DenseGatedGeluDense.forward`产生了输出激活值,其绝对最大值约为62.7K,非常接近fp16的上限64K。在下一个`frame`中,我们有`Dropout`对权重进行重新归一化,之后将某些元素归零,将绝对最大值推到了64K以上,导致溢出(`inf`)。 正如你所看到的,我们需要查看前面的`frame`, 从那里fp16数字开始变得非常大。 让我们将报告与`models/t5/modeling_t5.py`中的代码匹配: ```python class T5DenseGatedGeluDense(nn.Module): def __init__(self, config): super().__init__() self.wi_0 = nn.Linear(config.d_model, config.d_ff, bias=False) self.wi_1 = nn.Linear(config.d_model, config.d_ff, bias=False) self.wo = nn.Linear(config.d_ff, config.d_model, bias=False) self.dropout = nn.Dropout(config.dropout_rate) self.gelu_act = ACT2FN["gelu_new"] def forward(self, hidden_states): hidden_gelu = self.gelu_act(self.wi_0(hidden_states)) hidden_linear = self.wi_1(hidden_states) hidden_states = hidden_gelu * hidden_linear hidden_states = self.dropout(hidden_states) hidden_states = self.wo(hidden_states) return hidden_states ``` 现在很容易看到`dropout`调用,以及所有之前的调用。 由于检测是在前向`hook`中进行的,这些报告将立即在每个`forward`返回后打印出来。 回到完整的报告,要采取措施并解决问题,我们需要往回看几个`frame`,在那里数字开始上升,并且最有可能切换到fp32模式以便在乘法或求和时数字不会溢出。当然,可能还有其他解决方案。例如,如果启用了`amp`,我们可以在将原始`forward`移到`helper wrapper`中后,暂时关闭它,如下所示: ```python def _forward(self, hidden_states): hidden_gelu = self.gelu_act(self.wi_0(hidden_states)) hidden_linear = self.wi_1(hidden_states) hidden_states = hidden_gelu * hidden_linear hidden_states = self.dropout(hidden_states) hidden_states = self.wo(hidden_states) return hidden_states import torch def forward(self, hidden_states): if torch.is_autocast_enabled(): with torch.cuda.amp.autocast(enabled=False): return self._forward(hidden_states) else: return self._forward(hidden_states) ``` 由于自动检测器仅报告完整`frame`的输入和输出,一旦知道在哪里查找,您可能还希望分析特定`forward`函数的中间阶段。在这种情况下,您可以使用`detect_overflow`辅助函数将检测器放到希望的位置,例如: ```python from debug_utils import detect_overflow class T5LayerFF(nn.Module): [...] def forward(self, hidden_states): forwarded_states = self.layer_norm(hidden_states) detect_overflow(forwarded_states, "after layer_norm") forwarded_states = self.DenseReluDense(forwarded_states) detect_overflow(forwarded_states, "after DenseReluDense") return hidden_states + self.dropout(forwarded_states) ``` 可以看到,我们添加了2个检测器,现在我们可以跟踪是否在`forwarded_states`中间的某个地方检测到了`inf`或`nan`。 实际上,检测器已经报告了这些,因为上面示例中的每个调用都是一个`nn.Module`,但假设如果您有一些本地的直接计算,这就是您将如何执行的方式。 此外,如果您在自己的代码中实例化调试器,您可以调整从其默认打印的`frame`数,例如: ```python from transformers.debug_utils import DebugUnderflowOverflow debug_overflow = DebugUnderflowOverflow(model, max_frames_to_save=100) ``` ### 特定批次的绝对最小值和最大值跟踪 当关闭下溢/上溢检测功能, 同样的调试类可以用于批处理跟踪。 假设您想要监视给定批次的每个`forward`调用的所有成分的绝对最小值和最大值,并且仅对批次1和3执行此操作,您可以这样实例化这个类: ```python debug_overflow = DebugUnderflowOverflow(model, trace_batch_nums=[1, 3]) ``` 现在,完整的批次1和3将以与下溢/上溢检测器相同的格式进行跟踪。 批次从0开始计数。 如果您知道程序在某个批次编号之后开始出现问题,那么您可以直接快进到该区域。以下是一个截取的配置示例输出: ``` *** Starting batch number=1 *** abs min abs max metadata shared Embedding 1.01e-06 7.92e+02 weight 0.00e+00 2.47e+04 input[0] 5.36e-05 7.92e+02 output [...] decoder.dropout Dropout 1.60e-07 2.27e+01 input[0] 0.00e+00 2.52e+01 output decoder T5Stack not a tensor output lm_head Linear 1.01e-06 7.92e+02 weight 0.00e+00 1.11e+00 input[0] 6.06e-02 8.39e+01 output T5ForConditionalGeneration not a tensor output *** Starting batch number=3 *** abs min abs max metadata shared Embedding 1.01e-06 7.92e+02 weight 0.00e+00 2.78e+04 input[0] 5.36e-05 7.92e+02 output [...] ``` 在这里,您将获得大量的`frame`被`dump` - 与您的模型中的前向调用一样多,它有可能符合也可能不符合您的要求,但有时对于调试目的来说,它可能比正常的调试器更容易使用。例如,如果问题开始发生在批次号150上,您可以`dump`批次149和150的跟踪,并比较数字开始发散的地方。 你还可以使用以下命令指定停止训练的批次号: ```python debug_overflow = DebugUnderflowOverflow(model, trace_batch_nums=[1, 3], abort_after_batch_num=3) ```
transformers/docs/source/zh/debugging.md/0
{ "file_path": "transformers/docs/source/zh/debugging.md", "repo_id": "transformers", "token_count": 7278 }
282
<!-- Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # 自动语音识别 [[open-in-colab]] <Youtube id="TksaY_FDgnk"/> 自动语音识别(ASR)将语音信号转换为文本,将一系列音频输入映射到文本输出。 Siri 和 Alexa 这类虚拟助手使用 ASR 模型来帮助用户日常生活,还有许多其他面向用户的有用应用,如会议实时字幕和会议纪要。 本指南将向您展示如何: 1. 在 [MInDS-14](https://huggingface.co/datasets/PolyAI/minds14) 数据集上对 [Wav2Vec2](https://huggingface.co/facebook/wav2vec2-base) 进行微调,以将音频转录为文本。 2. 使用微调后的模型进行推断。 <Tip> 如果您想查看所有与本任务兼容的架构和检查点,最好查看[任务页](https://huggingface.co/tasks/automatic-speech-recognition)。 </Tip> 在开始之前,请确保您已安装所有必要的库: ```bash pip install transformers datasets evaluate jiwer ``` 我们鼓励您登录自己的 Hugging Face 账户,这样您就可以上传并与社区分享您的模型。 出现提示时,输入您的令牌登录: ```py >>> from huggingface_hub import notebook_login >>> notebook_login() ``` ## 加载 MInDS-14 数据集 首先从🤗 Datasets 库中加载 [MInDS-14](https://huggingface.co/datasets/PolyAI/minds14) 数据集的一个较小子集。这将让您有机会先进行实验,确保一切正常,然后再花更多时间在完整数据集上进行训练。 ```py >>> from datasets import load_dataset, Audio >>> minds = load_dataset("PolyAI/minds14", name="en-US", split="train[:100]") ``` 使用 [`~Dataset.train_test_split`] 方法将数据集的 `train` 拆分为训练集和测试集: ```py >>> minds = minds.train_test_split(test_size=0.2) ``` 然后看看数据集: ```py >>> minds DatasetDict({ train: Dataset({ features: ['path', 'audio', 'transcription', 'english_transcription', 'intent_class', 'lang_id'], num_rows: 16 }) test: Dataset({ features: ['path', 'audio', 'transcription', 'english_transcription', 'intent_class', 'lang_id'], num_rows: 4 }) }) ``` 虽然数据集包含 `lang_id `和 `english_transcription` 等许多有用的信息,但在本指南中, 您将专注于 `audio` 和 `transcription`。使用 [`~datasets.Dataset.remove_columns`] 方法删除其他列: ```py >>> minds = minds.remove_columns(["english_transcription", "intent_class", "lang_id"]) ``` 再看看示例: ```py >>> minds["train"][0] {'audio': {'array': array([-0.00024414, 0. , 0. , ..., 0.00024414, 0.00024414, 0.00024414], dtype=float32), 'path': '/root/.cache/huggingface/datasets/downloads/extracted/f14948e0e84be638dd7943ac36518a4cf3324e8b7aa331c5ab11541518e9368c/en-US~APP_ERROR/602ba9e2963e11ccd901cd4f.wav', 'sampling_rate': 8000}, 'path': '/root/.cache/huggingface/datasets/downloads/extracted/f14948e0e84be638dd7943ac36518a4cf3324e8b7aa331c5ab11541518e9368c/en-US~APP_ERROR/602ba9e2963e11ccd901cd4f.wav', 'transcription': "hi I'm trying to use the banking app on my phone and currently my checking and savings account balance is not refreshing"} ``` 有 2 个字段: - `audio`:由语音信号形成的一维 `array`,用于加载和重新采样音频文件。 - `transcription`:目标文本。 ## 预处理 下一步是加载一个 Wav2Vec2 处理器来处理音频信号: ```py >>> from transformers import AutoProcessor >>> processor = AutoProcessor.from_pretrained("facebook/wav2vec2-base") ``` MInDS-14 数据集的采样率为 8000kHz(您可以在其[数据集卡片](https://huggingface.co/datasets/PolyAI/minds14)中找到此信息), 这意味着您需要将数据集重新采样为 16000kHz 以使用预训练的 Wav2Vec2 模型: ```py >>> minds = minds.cast_column("audio", Audio(sampling_rate=16_000)) >>> minds["train"][0] {'audio': {'array': array([-2.38064706e-04, -1.58618059e-04, -5.43987835e-06, ..., 2.78103951e-04, 2.38446111e-04, 1.18740834e-04], dtype=float32), 'path': '/root/.cache/huggingface/datasets/downloads/extracted/f14948e0e84be638dd7943ac36518a4cf3324e8b7aa331c5ab11541518e9368c/en-US~APP_ERROR/602ba9e2963e11ccd901cd4f.wav', 'sampling_rate': 16000}, 'path': '/root/.cache/huggingface/datasets/downloads/extracted/f14948e0e84be638dd7943ac36518a4cf3324e8b7aa331c5ab11541518e9368c/en-US~APP_ERROR/602ba9e2963e11ccd901cd4f.wav', 'transcription': "hi I'm trying to use the banking app on my phone and currently my checking and savings account balance is not refreshing"} ``` 如您在上面的 `transcription` 中所看到的,文本包含大小写字符的混合。 Wav2Vec2 分词器仅训练了大写字符,因此您需要确保文本与分词器的词汇表匹配: ```py >>> def uppercase(example): ... return {"transcription": example["transcription"].upper()} >>> minds = minds.map(uppercase) ``` 现在创建一个预处理函数,该函数应该: 1. 调用 `audio` 列以加载和重新采样音频文件。 2. 从音频文件中提取 `input_values` 并使用处理器对 `transcription` 列执行 tokenizer 操作。 ```py >>> def prepare_dataset(batch): ... audio = batch["audio"] ... batch = processor(audio["array"], sampling_rate=audio["sampling_rate"], text=batch["transcription"]) ... batch["input_length"] = len(batch["input_values"][0]) ... return batch ``` 要在整个数据集上应用预处理函数,可以使用🤗 Datasets 的 [`~datasets.Dataset.map`] 函数。 您可以通过增加 `num_proc` 参数来加速 `map` 的处理进程数量。 使用 [`~datasets.Dataset.remove_columns`] 方法删除不需要的列: ```py >>> encoded_minds = minds.map(prepare_dataset, remove_columns=minds.column_names["train"], num_proc=4) ``` 🤗 Transformers 没有用于 ASR 的数据整理器,因此您需要调整 [`DataCollatorWithPadding`] 来创建一个示例批次。 它还会动态地将您的文本和标签填充到其批次中最长元素的长度(而不是整个数据集),以使它们具有统一的长度。 虽然可以通过在 `tokenizer` 函数中设置 `padding=True` 来填充文本,但动态填充更有效。 与其他数据整理器不同,这个特定的数据整理器需要对 `input_values` 和 `labels `应用不同的填充方法: ```py >>> import torch >>> from dataclasses import dataclass, field >>> from typing import Any, Dict, List, Optional, Union >>> @dataclass ... class DataCollatorCTCWithPadding: ... processor: AutoProcessor ... padding: Union[bool, str] = "longest" ... def __call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]: ... # split inputs and labels since they have to be of different lengths and need ... # different padding methods ... input_features = [{"input_values": feature["input_values"][0]} for feature in features] ... label_features = [{"input_ids": feature["labels"]} for feature in features] ... batch = self.processor.pad(input_features, padding=self.padding, return_tensors="pt") ... labels_batch = self.processor.pad(labels=label_features, padding=self.padding, return_tensors="pt") ... # replace padding with -100 to ignore loss correctly ... labels = labels_batch["input_ids"].masked_fill(labels_batch.attention_mask.ne(1), -100) ... batch["labels"] = labels ... return batch ``` 现在实例化您的 `DataCollatorForCTCWithPadding`: ```py >>> data_collator = DataCollatorCTCWithPadding(processor=processor, padding="longest") ``` ## 评估 在训练过程中包含一个指标通常有助于评估模型的性能。 您可以通过🤗 [Evaluate](https://huggingface.co/docs/evaluate/index) 库快速加载一个评估方法。 对于这个任务,加载 [word error rate](https://huggingface.co/spaces/evaluate-metric/wer)(WER)指标 (请参阅🤗 Evaluate [快速上手](https://huggingface.co/docs/evaluate/a_quick_tour)以了解如何加载和计算指标): ```py >>> import evaluate >>> wer = evaluate.load("wer") ``` 然后创建一个函数,将您的预测和标签传递给 [`~evaluate.EvaluationModule.compute`] 来计算 WER: ```py >>> import numpy as np >>> def compute_metrics(pred): ... pred_logits = pred.predictions ... pred_ids = np.argmax(pred_logits, axis=-1) ... pred.label_ids[pred.label_ids == -100] = processor.tokenizer.pad_token_id ... pred_str = processor.batch_decode(pred_ids) ... label_str = processor.batch_decode(pred.label_ids, group_tokens=False) ... wer = wer.compute(predictions=pred_str, references=label_str) ... return {"wer": wer} ``` 您的 `compute_metrics` 函数现在已经准备就绪,当您设置好训练时将返回给此函数。 ## 训练 <frameworkcontent> <pt> <Tip> 如果您不熟悉使用[`Trainer`]微调模型,请查看这里的基本教程[here](../training#train-with-pytorch-trainer)! </Tip> 现在您已经准备好开始训练您的模型了!使用 [`AutoModelForCTC`] 加载 Wav2Vec2。 使用 `ctc_loss_reduction` 参数指定要应用的减少方式。通常最好使用平均值而不是默认的求和: ```py >>> from transformers import AutoModelForCTC, TrainingArguments, Trainer >>> model = AutoModelForCTC.from_pretrained( ... "facebook/wav2vec2-base", ... ctc_loss_reduction="mean", ... pad_token_id=processor.tokenizer.pad_token_id, ) ``` 此时,只剩下 3 个步骤: 1. 在 [`TrainingArguments`] 中定义您的训练参数。唯一必需的参数是 `output_dir`,用于指定保存模型的位置。 您可以通过设置 `push_to_hub=True` 将此模型推送到 Hub(您需要登录到 Hugging Face 才能上传您的模型)。 在每个 epoch 结束时,[`Trainer`] 将评估 WER 并保存训练检查点。 2. 将训练参数与模型、数据集、分词器、数据整理器和 `compute_metrics` 函数一起传递给 [`Trainer`]。 3. 调用 [`~Trainer.train`] 来微调您的模型。 ```py >>> training_args = TrainingArguments( ... output_dir="my_awesome_asr_mind_model", ... per_device_train_batch_size=8, ... gradient_accumulation_steps=2, ... learning_rate=1e-5, ... warmup_steps=500, ... max_steps=2000, ... gradient_checkpointing=True, ... fp16=True, ... group_by_length=True, ... eval_strategy="steps", ... per_device_eval_batch_size=8, ... save_steps=1000, ... eval_steps=1000, ... logging_steps=25, ... load_best_model_at_end=True, ... metric_for_best_model="wer", ... greater_is_better=False, ... push_to_hub=True, ... ) >>> trainer = Trainer( ... model=model, ... args=training_args, ... train_dataset=encoded_minds["train"], ... eval_dataset=encoded_minds["test"], ... tokenizer=processor, ... data_collator=data_collator, ... compute_metrics=compute_metrics, ... ) >>> trainer.train() ``` 训练完成后,使用 [`~transformers.Trainer.push_to_hub`] 方法将您的模型分享到 Hub,方便大家使用您的模型: ```py >>> trainer.push_to_hub() ``` </pt> </frameworkcontent> <Tip> 要深入了解如何微调模型进行自动语音识别, 请查看这篇博客[文章](https://huggingface.co/blog/fine-tune-wav2vec2-english)以了解英语 ASR, 还可以参阅[这篇文章](https://huggingface.co/blog/fine-tune-xlsr-wav2vec2)以了解多语言 ASR。 </Tip> ## 推断 很好,现在您已经微调了一个模型,您可以用它进行推断了! 加载您想要运行推断的音频文件。请记住,如果需要,将音频文件的采样率重新采样为与模型匹配的采样率! ```py >>> from datasets import load_dataset, Audio >>> dataset = load_dataset("PolyAI/minds14", "en-US", split="train") >>> dataset = dataset.cast_column("audio", Audio(sampling_rate=16000)) >>> sampling_rate = dataset.features["audio"].sampling_rate >>> audio_file = dataset[0]["audio"]["path"] ``` 尝试使用微调后的模型进行推断的最简单方法是使用 [`pipeline`]。 使用您的模型实例化一个用于自动语音识别的 `pipeline`,并将您的音频文件传递给它: ```py >>> from transformers import pipeline >>> transcriber = pipeline("automatic-speech-recognition", model="stevhliu/my_awesome_asr_minds_model") >>> transcriber(audio_file) {'text': 'I WOUD LIKE O SET UP JOINT ACOUNT WTH Y PARTNER'} ``` <Tip> 转录结果还不错,但可以更好!尝试用更多示例微调您的模型,以获得更好的结果! </Tip> 如果您愿意,您也可以手动复制 `pipeline` 的结果: <frameworkcontent> <pt> 加载一个处理器来预处理音频文件和转录,并将 `input` 返回为 PyTorch 张量: ```py >>> from transformers import AutoProcessor >>> processor = AutoProcessor.from_pretrained("stevhliu/my_awesome_asr_mind_model") >>> inputs = processor(dataset[0]["audio"]["array"], sampling_rate=sampling_rate, return_tensors="pt") ``` 将您的输入传递给模型并返回 logits: ```py >>> from transformers import AutoModelForCTC >>> model = AutoModelForCTC.from_pretrained("stevhliu/my_awesome_asr_mind_model") >>> with torch.no_grad(): ... logits = model(**inputs).logits ``` 获取具有最高概率的预测 `input_ids`,并使用处理器将预测的 `input_ids` 解码回文本: ```py >>> import torch >>> predicted_ids = torch.argmax(logits, dim=-1) >>> transcription = processor.batch_decode(predicted_ids) >>> transcription ['I WOUL LIKE O SET UP JOINT ACOUNT WTH Y PARTNER'] ``` </pt> </frameworkcontent>
transformers/docs/source/zh/tasks/asr.md/0
{ "file_path": "transformers/docs/source/zh/tasks/asr.md", "repo_id": "transformers", "token_count": 7230 }
283
<!--- Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Automatic Speech Recognition - Flax Examples ## Sequence to Sequence The script [`run_flax_speech_recognition_seq2seq.py`](https://github.com/huggingface/transformers/blob/main/examples/flax/speech-recognition/run_flax_speech_recognition_seq2seq.py) can be used to fine-tune any [Flax Speech Sequence-to-Sequence Model](https://huggingface.co/docs/transformers/main/en/model_doc/auto#transformers.FlaxAutoModelForSpeechSeq2Seq) for automatic speech recognition on one of the [official speech recognition datasets](https://huggingface.co/datasets?task_ids=task_ids:automatic-speech-recognition) or a custom dataset. This includes the Whisper model from OpenAI, or a warm-started Speech-Encoder-Decoder Model, an example for which is included below. ### Whisper Model We can load all components of the Whisper model directly from the pretrained checkpoint, including the pretrained model weights, feature extractor and tokenizer. We simply have to specify the id of fine-tuning dataset and the necessary training hyperparameters. The following example shows how to fine-tune the [Whisper small](https://huggingface.co/openai/whisper-small) checkpoint on the Hindi subset of the [Common Voice 13](https://huggingface.co/datasets/mozilla-foundation/common_voice_13_0) dataset. Note that before running this script you must accept the dataset's [terms of use](https://huggingface.co/datasets/mozilla-foundation/common_voice_13_0) and register your Hugging Face Hub token on your device by running `huggingface-hub login`. ```bash python run_flax_speech_recognition_seq2seq.py \ --model_name_or_path="openai/whisper-small" \ --dataset_name="mozilla-foundation/common_voice_13_0" \ --dataset_config_name="hi" \ --language="hindi" \ --train_split_name="train+validation" \ --eval_split_name="test" \ --output_dir="./whisper-small-hi-flax" \ --per_device_train_batch_size="16" \ --per_device_eval_batch_size="16" \ --num_train_epochs="10" \ --learning_rate="1e-4" \ --warmup_steps="500" \ --logging_steps="25" \ --generation_max_length="40" \ --preprocessing_num_workers="32" \ --dataloader_num_workers="32" \ --max_duration_in_seconds="30" \ --text_column_name="sentence" \ --overwrite_output_dir \ --do_train \ --do_eval \ --predict_with_generate \ --push_to_hub \ --use_auth_token ``` On a TPU v4-8, training should take approximately 25 minutes, with a final cross-entropy loss of 0.02 and word error rate of **34%**. See the checkpoint [sanchit-gandhi/whisper-small-hi-flax](https://huggingface.co/sanchit-gandhi/whisper-small-hi-flax) for an example training run.
transformers/examples/flax/speech-recognition/README.md/0
{ "file_path": "transformers/examples/flax/speech-recognition/README.md", "repo_id": "transformers", "token_count": 1039 }
284
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Fine-tuning the library models for question-answering.""" import logging import os import sys from dataclasses import dataclass, field from typing import Optional import transformers from transformers import ( AutoConfig, AutoModelForQuestionAnswering, AutoTokenizer, DataCollatorWithPadding, HfArgumentParser, SquadDataset, Trainer, TrainingArguments, ) from transformers import SquadDataTrainingArguments as DataTrainingArguments from transformers.trainer_utils import is_main_process logger = logging.getLogger(__name__) @dataclass class ModelArguments: """ Arguments pertaining to which model/config/tokenizer we are going to fine-tune from. """ model_name_or_path: str = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) config_name: Optional[str] = field( default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} ) tokenizer_name: Optional[str] = field( default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) use_fast: bool = field(default=False, metadata={"help": "Set this flag to use fast tokenization."}) # If you want to tweak more attributes on your tokenizer, you should do it in a distinct script, # or just modify its tokenizer_config.json. cache_dir: Optional[str] = field( default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, ) def main(): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir) and os.listdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. Use" " --overwrite_output_dir to overcome." ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN, ) logger.warning( "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s", training_args.local_rank, training_args.device, training_args.n_gpu, bool(training_args.local_rank != -1), training_args.fp16, ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info("Training/evaluation parameters %s", training_args) # Prepare Question-Answering task # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. config = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, ) tokenizer = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=False, # SquadDataset is not compatible with Fast tokenizers which have a smarter overflow handeling ) model = AutoModelForQuestionAnswering.from_pretrained( model_args.model_name_or_path, from_tf=bool(".ckpt" in model_args.model_name_or_path), config=config, cache_dir=model_args.cache_dir, ) # Get datasets is_language_sensitive = hasattr(model.config, "lang2id") train_dataset = ( SquadDataset( data_args, tokenizer=tokenizer, is_language_sensitive=is_language_sensitive, cache_dir=model_args.cache_dir ) if training_args.do_train else None ) eval_dataset = ( SquadDataset( data_args, tokenizer=tokenizer, mode="dev", is_language_sensitive=is_language_sensitive, cache_dir=model_args.cache_dir, ) if training_args.do_eval else None ) # Data collator data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=8) if training_args.fp16 else None # Initialize our Trainer trainer = Trainer( model=model, args=training_args, train_dataset=train_dataset, eval_dataset=eval_dataset, data_collator=data_collator, ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_master(): tokenizer.save_pretrained(training_args.output_dir) def _mp_fn(index): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
transformers/examples/legacy/question-answering/run_squad_trainer.py/0
{ "file_path": "transformers/examples/legacy/question-answering/run_squad_trainer.py", "repo_id": "transformers", "token_count": 2569 }
285
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from pathlib import Path import numpy as np import pytest from pack_dataset import pack_data_dir from parameterized import parameterized from save_len_file import save_len_file from torch.utils.data import DataLoader from transformers import AutoTokenizer from transformers.models.mbart.modeling_mbart import shift_tokens_right from transformers.testing_utils import TestCasePlus, slow from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeq2SeqDataset, Seq2SeqDataset BERT_BASE_CASED = "google-bert/bert-base-cased" PEGASUS_XSUM = "google/pegasus-xsum" ARTICLES = [" Sam ate lunch today.", "Sams lunch ingredients."] SUMMARIES = ["A very interesting story about what I ate for lunch.", "Avocado, celery, turkey, coffee"] T5_TINY = "patrickvonplaten/t5-tiny-random" BART_TINY = "sshleifer/bart-tiny-random" MBART_TINY = "sshleifer/tiny-mbart" MARIAN_TINY = "sshleifer/tiny-marian-en-de" def _dump_articles(path: Path, articles: list): content = "\n".join(articles) Path(path).open("w").writelines(content) def make_test_data_dir(tmp_dir): for split in ["train", "val", "test"]: _dump_articles(os.path.join(tmp_dir, f"{split}.source"), ARTICLES) _dump_articles(os.path.join(tmp_dir, f"{split}.target"), SUMMARIES) return tmp_dir class TestAll(TestCasePlus): @parameterized.expand( [ MBART_TINY, MARIAN_TINY, T5_TINY, BART_TINY, PEGASUS_XSUM, ], ) @slow def test_seq2seq_dataset_truncation(self, tok_name): tokenizer = AutoTokenizer.from_pretrained(tok_name) tmp_dir = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir()) max_len_source = max(len(tokenizer.encode(a)) for a in ARTICLES) max_len_target = max(len(tokenizer.encode(a)) for a in SUMMARIES) max_src_len = 4 max_tgt_len = 8 assert max_len_target > max_src_len # Will be truncated assert max_len_source > max_src_len # Will be truncated src_lang, tgt_lang = "ro_RO", "de_DE" # ignored for all but mbart, but never causes error. train_dataset = Seq2SeqDataset( tokenizer, data_dir=tmp_dir, type_path="train", max_source_length=max_src_len, max_target_length=max_tgt_len, # ignored src_lang=src_lang, tgt_lang=tgt_lang, ) dataloader = DataLoader(train_dataset, batch_size=2, collate_fn=train_dataset.collate_fn) for batch in dataloader: assert isinstance(batch, dict) assert batch["attention_mask"].shape == batch["input_ids"].shape # show that articles were trimmed. assert batch["input_ids"].shape[1] == max_src_len # show that targets are the same len assert batch["labels"].shape[1] == max_tgt_len if tok_name != MBART_TINY: continue # check language codes in correct place batch["decoder_input_ids"] = shift_tokens_right(batch["labels"], tokenizer.pad_token_id) assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang] assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang] break # No need to test every batch @parameterized.expand([BART_TINY, BERT_BASE_CASED]) def test_legacy_dataset_truncation(self, tok): tokenizer = AutoTokenizer.from_pretrained(tok) tmp_dir = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir()) max_len_source = max(len(tokenizer.encode(a)) for a in ARTICLES) max_len_target = max(len(tokenizer.encode(a)) for a in SUMMARIES) trunc_target = 4 train_dataset = LegacySeq2SeqDataset( tokenizer, data_dir=tmp_dir, type_path="train", max_source_length=20, max_target_length=trunc_target, ) dataloader = DataLoader(train_dataset, batch_size=2, collate_fn=train_dataset.collate_fn) for batch in dataloader: assert batch["attention_mask"].shape == batch["input_ids"].shape # show that articles were trimmed. assert batch["input_ids"].shape[1] == max_len_source assert 20 >= batch["input_ids"].shape[1] # trimmed significantly # show that targets were truncated assert batch["labels"].shape[1] == trunc_target # Truncated assert max_len_target > trunc_target # Truncated break # No need to test every batch def test_pack_dataset(self): tokenizer = AutoTokenizer.from_pretrained("facebook/mbart-large-cc25") tmp_dir = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir())) orig_examples = tmp_dir.joinpath("train.source").open().readlines() save_dir = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir())) pack_data_dir(tokenizer, tmp_dir, 128, save_dir) orig_paths = {x.name for x in tmp_dir.iterdir()} new_paths = {x.name for x in save_dir.iterdir()} packed_examples = save_dir.joinpath("train.source").open().readlines() # orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.'] # desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.'] assert len(packed_examples) < len(orig_examples) assert len(packed_examples) == 1 assert len(packed_examples[0]) == sum(len(x) for x in orig_examples) assert orig_paths == new_paths @pytest.mark.skipif(not FAIRSEQ_AVAILABLE, reason="This test requires fairseq") def test_dynamic_batch_size(self): if not FAIRSEQ_AVAILABLE: return ds, max_tokens, tokenizer = self._get_dataset(max_len=64) required_batch_size_multiple = 64 batch_sampler = ds.make_dynamic_sampler(max_tokens, required_batch_size_multiple=required_batch_size_multiple) batch_sizes = [len(x) for x in batch_sampler] assert len(set(batch_sizes)) > 1 # it's not dynamic batch size if every batch is the same length assert sum(batch_sizes) == len(ds) # no dropped or added examples data_loader = DataLoader(ds, batch_sampler=batch_sampler, collate_fn=ds.collate_fn, num_workers=2) failures = [] num_src_per_batch = [] for batch in data_loader: src_shape = batch["input_ids"].shape bs = src_shape[0] assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple num_src_tokens = np.product(batch["input_ids"].shape) num_src_per_batch.append(num_src_tokens) if num_src_tokens > (max_tokens * 1.1): failures.append(num_src_tokens) assert num_src_per_batch[0] == max(num_src_per_batch) if failures: raise AssertionError(f"too many tokens in {len(failures)} batches") def test_sortish_sampler_reduces_padding(self): ds, _, tokenizer = self._get_dataset(max_len=512) bs = 2 sortish_sampler = ds.make_sortish_sampler(bs, shuffle=False) naive_dl = DataLoader(ds, batch_size=bs, collate_fn=ds.collate_fn, num_workers=2) sortish_dl = DataLoader(ds, batch_size=bs, collate_fn=ds.collate_fn, num_workers=2, sampler=sortish_sampler) pad = tokenizer.pad_token_id def count_pad_tokens(data_loader, k="input_ids"): return [batch[k].eq(pad).sum().item() for batch in data_loader] assert sum(count_pad_tokens(sortish_dl, k="labels")) < sum(count_pad_tokens(naive_dl, k="labels")) assert sum(count_pad_tokens(sortish_dl)) < sum(count_pad_tokens(naive_dl)) assert len(sortish_dl) == len(naive_dl) def _get_dataset(self, n_obs=1000, max_len=128): if os.getenv("USE_REAL_DATA", False): data_dir = "examples/seq2seq/wmt_en_ro" max_tokens = max_len * 2 * 64 if not Path(data_dir).joinpath("train.len").exists(): save_len_file(MARIAN_TINY, data_dir) else: data_dir = "examples/seq2seq/test_data/wmt_en_ro" max_tokens = max_len * 4 save_len_file(MARIAN_TINY, data_dir) tokenizer = AutoTokenizer.from_pretrained(MARIAN_TINY) ds = Seq2SeqDataset( tokenizer, data_dir=data_dir, type_path="train", max_source_length=max_len, max_target_length=max_len, n_obs=n_obs, ) return ds, max_tokens, tokenizer def test_distributed_sortish_sampler_splits_indices_between_procs(self): ds, max_tokens, tokenizer = self._get_dataset() ids1 = set(DistributedSortishSampler(ds, 256, num_replicas=2, rank=0, add_extra_examples=False)) ids2 = set(DistributedSortishSampler(ds, 256, num_replicas=2, rank=1, add_extra_examples=False)) assert ids1.intersection(ids2) == set() @parameterized.expand( [ MBART_TINY, MARIAN_TINY, T5_TINY, BART_TINY, PEGASUS_XSUM, ], ) def test_dataset_kwargs(self, tok_name): tokenizer = AutoTokenizer.from_pretrained(tok_name, use_fast=False) if tok_name == MBART_TINY: train_dataset = Seq2SeqDataset( tokenizer, data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir()), type_path="train", max_source_length=4, max_target_length=8, src_lang="EN", tgt_lang="FR", ) kwargs = train_dataset.dataset_kwargs assert "src_lang" in kwargs and "tgt_lang" in kwargs else: train_dataset = Seq2SeqDataset( tokenizer, data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir()), type_path="train", max_source_length=4, max_target_length=8, ) kwargs = train_dataset.dataset_kwargs assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs assert len(kwargs) == 1 if tok_name == BART_TINY else len(kwargs) == 0
transformers/examples/legacy/seq2seq/old_test_datasets.py/0
{ "file_path": "transformers/examples/legacy/seq2seq/old_test_datasets.py", "repo_id": "transformers", "token_count": 5152 }
286
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging from dataclasses import dataclass, field from typing import Optional from seq2seq_trainer import arg_to_scheduler from transformers import TrainingArguments logger = logging.getLogger(__name__) @dataclass class Seq2SeqTrainingArguments(TrainingArguments): """ Parameters: label_smoothing (:obj:`float`, `optional`, defaults to 0): The label smoothing epsilon to apply (if not zero). sortish_sampler (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether to SortishSampler or not. It sorts the inputs according to lengths in-order to minimizing the padding size. predict_with_generate (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether to use generate to calculate generative metrics (ROUGE, BLEU). """ label_smoothing: Optional[float] = field( default=0.0, metadata={"help": "The label smoothing epsilon to apply (if not zero)."} ) sortish_sampler: bool = field(default=False, metadata={"help": "Whether to SortishSampler or not."}) predict_with_generate: bool = field( default=False, metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} ) adafactor: bool = field(default=False, metadata={"help": "whether to use adafactor"}) encoder_layerdrop: Optional[float] = field( default=None, metadata={"help": "Encoder layer dropout probability. Goes into model.config."} ) decoder_layerdrop: Optional[float] = field( default=None, metadata={"help": "Decoder layer dropout probability. Goes into model.config."} ) dropout: Optional[float] = field(default=None, metadata={"help": "Dropout probability. Goes into model.config."}) attention_dropout: Optional[float] = field( default=None, metadata={"help": "Attention dropout probability. Goes into model.config."} ) lr_scheduler: Optional[str] = field( default="linear", metadata={"help": f"Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys())}"}, )
transformers/examples/legacy/seq2seq/seq2seq_training_args.py/0
{ "file_path": "transformers/examples/legacy/seq2seq/seq2seq_training_args.py", "repo_id": "transformers", "token_count": 888 }
287
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Finetuning 🤗 Transformers model for object detection with Accelerate.""" import argparse import json import logging import math import os from functools import partial from pathlib import Path from typing import Any, List, Mapping, Tuple, Union import albumentations as A import datasets import numpy as np import torch from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import set_seed from datasets import load_dataset from huggingface_hub import HfApi from torch.utils.data import DataLoader from torchmetrics.detection.mean_ap import MeanAveragePrecision from tqdm.auto import tqdm import transformers from transformers import ( AutoConfig, AutoImageProcessor, AutoModelForObjectDetection, SchedulerType, get_scheduler, ) from transformers.image_processing_utils import BatchFeature from transformers.image_transforms import center_to_corners_format from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.45.0.dev0") logging.basicConfig(level=logging.INFO) logger = get_logger(__name__) require_version("datasets>=2.0.0", "To fix: pip install -r examples/pytorch/semantic-segmentation/requirements.txt") # Copied from examples/pytorch/object-detection/run_object_detection.format_image_annotations_as_coco def format_image_annotations_as_coco( image_id: str, categories: List[int], areas: List[float], bboxes: List[Tuple[float]] ) -> dict: """Format one set of image annotations to the COCO format Args: image_id (str): image id. e.g. "0001" categories (List[int]): list of categories/class labels corresponding to provided bounding boxes areas (List[float]): list of corresponding areas to provided bounding boxes bboxes (List[Tuple[float]]): list of bounding boxes provided in COCO format ([center_x, center_y, width, height] in absolute coordinates) Returns: dict: { "image_id": image id, "annotations": list of formatted annotations } """ annotations = [] for category, area, bbox in zip(categories, areas, bboxes): formatted_annotation = { "image_id": image_id, "category_id": category, "iscrowd": 0, "area": area, "bbox": list(bbox), } annotations.append(formatted_annotation) return { "image_id": image_id, "annotations": annotations, } # Copied from examples/pytorch/object-detection/run_object_detection.convert_bbox_yolo_to_pascal def convert_bbox_yolo_to_pascal(boxes: torch.Tensor, image_size: Tuple[int, int]) -> torch.Tensor: """ Convert bounding boxes from YOLO format (x_center, y_center, width, height) in range [0, 1] to Pascal VOC format (x_min, y_min, x_max, y_max) in absolute coordinates. Args: boxes (torch.Tensor): Bounding boxes in YOLO format image_size (Tuple[int, int]): Image size in format (height, width) Returns: torch.Tensor: Bounding boxes in Pascal VOC format (x_min, y_min, x_max, y_max) """ # convert center to corners format boxes = center_to_corners_format(boxes) # convert to absolute coordinates height, width = image_size boxes = boxes * torch.tensor([[width, height, width, height]]) return boxes # Copied from examples/pytorch/object-detection/run_object_detection.augment_and_transform_batch def augment_and_transform_batch( examples: Mapping[str, Any], transform: A.Compose, image_processor: AutoImageProcessor, return_pixel_mask: bool = False, ) -> BatchFeature: """Apply augmentations and format annotations in COCO format for object detection task""" images = [] annotations = [] for image_id, image, objects in zip(examples["image_id"], examples["image"], examples["objects"]): image = np.array(image.convert("RGB")) # apply augmentations output = transform(image=image, bboxes=objects["bbox"], category=objects["category"]) images.append(output["image"]) # format annotations in COCO format formatted_annotations = format_image_annotations_as_coco( image_id, output["category"], objects["area"], output["bboxes"] ) annotations.append(formatted_annotations) # Apply the image processor transformations: resizing, rescaling, normalization result = image_processor(images=images, annotations=annotations, return_tensors="pt") if not return_pixel_mask: result.pop("pixel_mask", None) return result # Copied from examples/pytorch/object-detection/run_object_detection.collate_fn def collate_fn(batch: List[BatchFeature]) -> Mapping[str, Union[torch.Tensor, List[Any]]]: data = {} data["pixel_values"] = torch.stack([x["pixel_values"] for x in batch]) data["labels"] = [x["labels"] for x in batch] if "pixel_mask" in batch[0]: data["pixel_mask"] = torch.stack([x["pixel_mask"] for x in batch]) return data def nested_to_cpu(objects): """Move nested tesnors in objects to CPU if they are on GPU""" if isinstance(objects, torch.Tensor): return objects.cpu() elif isinstance(objects, Mapping): return type(objects)({k: nested_to_cpu(v) for k, v in objects.items()}) elif isinstance(objects, (list, tuple)): return type(objects)([nested_to_cpu(v) for v in objects]) elif isinstance(objects, (np.ndarray, str, int, float, bool)): return objects raise ValueError(f"Unsupported type {type(objects)}") def evaluation_loop( model: torch.nn.Module, image_processor: AutoImageProcessor, accelerator: Accelerator, dataloader: DataLoader, id2label: Mapping[int, str], ) -> dict: model.eval() metric = MeanAveragePrecision(box_format="xyxy", class_metrics=True) for step, batch in enumerate(tqdm(dataloader, disable=not accelerator.is_local_main_process)): with torch.no_grad(): outputs = model(**batch) # For metric computation we need to collect ground truth and predicted boxes in the same format # 1. Collect predicted boxes, classes, scores # image_processor convert boxes from YOLO format to Pascal VOC format # ([x_min, y_min, x_max, y_max] in absolute coordinates) image_size = torch.stack([example["orig_size"] for example in batch["labels"]], dim=0) predictions = image_processor.post_process_object_detection(outputs, threshold=0.0, target_sizes=image_size) predictions = nested_to_cpu(predictions) # 2. Collect ground truth boxes in the same format for metric computation # Do the same, convert YOLO boxes to Pascal VOC format target = [] for label in batch["labels"]: label = nested_to_cpu(label) boxes = convert_bbox_yolo_to_pascal(label["boxes"], label["orig_size"]) labels = label["class_labels"] target.append({"boxes": boxes, "labels": labels}) metric.update(predictions, target) metrics = metric.compute() # Replace list of per class metrics with separate metric for each class classes = metrics.pop("classes") map_per_class = metrics.pop("map_per_class") mar_100_per_class = metrics.pop("mar_100_per_class") for class_id, class_map, class_mar in zip(classes, map_per_class, mar_100_per_class): class_name = id2label[class_id.item()] metrics[f"map_{class_name}"] = class_map metrics[f"mar_100_{class_name}"] = class_mar # Convert metrics to float metrics = {k: round(v.item(), 4) for k, v in metrics.items()} return metrics def parse_args(): parser = argparse.ArgumentParser(description="Finetune a transformers model for object detection task") parser.add_argument( "--model_name_or_path", type=str, help="Path to a pretrained model or model identifier from huggingface.co/models.", default="facebook/detr-resnet-50", ) parser.add_argument( "--dataset_name", type=str, help="Name of the dataset on the hub.", default="cppe-5", ) parser.add_argument( "--train_val_split", type=float, default=0.15, help="Fraction of the dataset to be used for validation.", ) parser.add_argument( "--ignore_mismatched_sizes", action="store_true", help="Ignore mismatched sizes between the model and the dataset.", ) parser.add_argument( "--image_square_size", type=int, default=1333, help="Image longest size will be resized to this value, then image will be padded to square.", ) parser.add_argument( "--cache_dir", type=str, help="Path to a folder in which the model and dataset will be cached.", ) parser.add_argument( "--use_auth_token", action="store_true", help="Whether to use an authentication token to access the model repository.", ) parser.add_argument( "--per_device_train_batch_size", type=int, default=8, help="Batch size (per device) for the training dataloader.", ) parser.add_argument( "--per_device_eval_batch_size", type=int, default=8, help="Batch size (per device) for the evaluation dataloader.", ) parser.add_argument( "--dataloader_num_workers", type=int, default=4, help="Number of workers to use for the dataloaders.", ) parser.add_argument( "--learning_rate", type=float, default=5e-5, help="Initial learning rate (after the potential warmup period) to use.", ) parser.add_argument( "--adam_beta1", type=float, default=0.9, help="Beta1 for AdamW optimizer", ) parser.add_argument( "--adam_beta2", type=float, default=0.999, help="Beta2 for AdamW optimizer", ) parser.add_argument( "--adam_epsilon", type=float, default=1e-8, help="Epsilon for AdamW optimizer", ) parser.add_argument("--num_train_epochs", type=int, default=3, help="Total number of training epochs to perform.") parser.add_argument( "--max_train_steps", type=int, default=None, help="Total number of training steps to perform. If provided, overrides num_train_epochs.", ) parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument( "--lr_scheduler_type", type=SchedulerType, default="linear", help="The scheduler type to use.", choices=["linear", "cosine", "cosine_with_restarts", "polynomial", "constant", "constant_with_warmup"], ) parser.add_argument( "--num_warmup_steps", type=int, default=0, help="Number of steps for the warmup in the lr scheduler." ) parser.add_argument("--output_dir", type=str, default=None, help="Where to store the final model.") parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") parser.add_argument( "--hub_model_id", type=str, help="The name of the repository to keep in sync with the local `output_dir`." ) parser.add_argument("--hub_token", type=str, help="The token to use to push to the Model Hub.") parser.add_argument( "--trust_remote_code", action="store_true", help=( "Whether to trust the execution of code from datasets/models defined on the Hub." " This option should only be set to `True` for repositories you trust and in which you have read the" " code, as it will execute code present on the Hub on your local machine." ), ) parser.add_argument( "--checkpointing_steps", type=str, default=None, help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.", ) parser.add_argument( "--resume_from_checkpoint", type=str, default=None, help="If the training should continue from a checkpoint folder.", ) parser.add_argument( "--with_tracking", required=False, action="store_true", help="Whether to enable experiment trackers for logging.", ) parser.add_argument( "--report_to", type=str, default="all", help=( 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`,' ' `"wandb"`, `"comet_ml"` and `"clearml"`. Use `"all"` (default) to report to all integrations. ' "Only applicable when `--with_tracking` is passed." ), ) args = parser.parse_args() # Sanity checks if args.push_to_hub or args.with_tracking: if args.output_dir is None: raise ValueError( "Need an `output_dir` to create a repo when `--push_to_hub` or `with_tracking` is specified." ) if args.output_dir is not None: os.makedirs(args.output_dir, exist_ok=True) return args def main(): args = parse_args() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_object_detection_no_trainer", args) # Initialize the accelerator. We will let the accelerator handle device placement for us in this example. # If we're using tracking, we also need to initialize it here and it will by default pick up all supported trackers # in the environment accelerator_log_kwargs = {} if args.with_tracking: accelerator_log_kwargs["log_with"] = args.report_to accelerator_log_kwargs["project_dir"] = args.output_dir accelerator = Accelerator(gradient_accumulation_steps=args.gradient_accumulation_steps, **accelerator_log_kwargs) logger.info(accelerator.state, main_process_only=False) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. # We set device_specific to True as we want different data augmentation per device. if args.seed is not None: set_seed(args.seed, device_specific=True) # Handle the repository creation if accelerator.is_main_process: if args.push_to_hub: # Retrieve of infer repo_name repo_name = args.hub_model_id if repo_name is None: repo_name = Path(args.output_dir).absolute().name # Create repo and retrieve repo_id api = HfApi() repo_id = api.create_repo(repo_name, exist_ok=True, token=args.hub_token).repo_id with open(os.path.join(args.output_dir, ".gitignore"), "w+") as gitignore: if "step_*" not in gitignore: gitignore.write("step_*\n") if "epoch_*" not in gitignore: gitignore.write("epoch_*\n") elif args.output_dir is not None: os.makedirs(args.output_dir, exist_ok=True) accelerator.wait_for_everyone() # Load dataset # In distributed training, the load_dataset function guarantees that only one local process can concurrently # download the dataset. dataset = load_dataset(args.dataset_name, cache_dir=args.cache_dir, trust_remote_code=args.trust_remote_code) # If we don't have a validation split, split off a percentage of train as validation. args.train_val_split = None if "validation" in dataset.keys() else args.train_val_split if isinstance(args.train_val_split, float) and args.train_val_split > 0.0: split = dataset["train"].train_test_split(args.train_val_split, seed=args.seed) dataset["train"] = split["train"] dataset["validation"] = split["test"] # Get dataset categories and prepare mappings for label_name <-> label_id categories = dataset["train"].features["objects"].feature["category"].names id2label = dict(enumerate(categories)) label2id = {v: k for k, v in id2label.items()} # ------------------------------------------------------------------------------------------------ # Load pretrained config, model and image processor # ------------------------------------------------------------------------------------------------ common_pretrained_args = { "cache_dir": args.cache_dir, "token": args.hub_token, "trust_remote_code": args.trust_remote_code, } config = AutoConfig.from_pretrained( args.model_name_or_path, label2id=label2id, id2label=id2label, **common_pretrained_args ) model = AutoModelForObjectDetection.from_pretrained( args.model_name_or_path, config=config, ignore_mismatched_sizes=args.ignore_mismatched_sizes, **common_pretrained_args, ) image_processor = AutoImageProcessor.from_pretrained( args.model_name_or_path, do_resize=True, size={"max_height": args.image_square_size, "max_width": args.image_square_size}, do_pad=True, pad_size={"height": args.image_square_size, "width": args.image_square_size}, **common_pretrained_args, ) # ------------------------------------------------------------------------------------------------ # Define image augmentations and dataset transforms # ------------------------------------------------------------------------------------------------ max_size = args.image_square_size train_augment_and_transform = A.Compose( [ A.Compose( [ A.SmallestMaxSize(max_size=max_size, p=1.0), A.RandomSizedBBoxSafeCrop(height=max_size, width=max_size, p=1.0), ], p=0.2, ), A.OneOf( [ A.Blur(blur_limit=7, p=0.5), A.MotionBlur(blur_limit=7, p=0.5), A.Defocus(radius=(1, 5), alias_blur=(0.1, 0.25), p=0.1), ], p=0.1, ), A.Perspective(p=0.1), A.HorizontalFlip(p=0.5), A.RandomBrightnessContrast(p=0.5), A.HueSaturationValue(p=0.1), ], bbox_params=A.BboxParams(format="coco", label_fields=["category"], clip=True, min_area=25), ) validation_transform = A.Compose( [A.NoOp()], bbox_params=A.BboxParams(format="coco", label_fields=["category"], clip=True), ) # Make transform functions for batch and apply for dataset splits train_transform_batch = partial( augment_and_transform_batch, transform=train_augment_and_transform, image_processor=image_processor ) validation_transform_batch = partial( augment_and_transform_batch, transform=validation_transform, image_processor=image_processor ) with accelerator.main_process_first(): train_dataset = dataset["train"].with_transform(train_transform_batch) valid_dataset = dataset["validation"].with_transform(validation_transform_batch) test_dataset = dataset["test"].with_transform(validation_transform_batch) dataloader_common_args = { "num_workers": args.dataloader_num_workers, "collate_fn": collate_fn, } train_dataloader = DataLoader( train_dataset, shuffle=True, batch_size=args.per_device_train_batch_size, **dataloader_common_args ) valid_dataloader = DataLoader( valid_dataset, shuffle=False, batch_size=args.per_device_eval_batch_size, **dataloader_common_args ) test_dataloader = DataLoader( test_dataset, shuffle=False, batch_size=args.per_device_eval_batch_size, **dataloader_common_args ) # ------------------------------------------------------------------------------------------------ # Define optimizer, scheduler and prepare everything with the accelerator # ------------------------------------------------------------------------------------------------ # Optimizer optimizer = torch.optim.AdamW( list(model.parameters()), lr=args.learning_rate, betas=[args.adam_beta1, args.adam_beta2], eps=args.adam_epsilon, ) # Figure out how many steps we should save the Accelerator states checkpointing_steps = args.checkpointing_steps if checkpointing_steps is not None and checkpointing_steps.isdigit(): checkpointing_steps = int(checkpointing_steps) # Scheduler and math around the number of training steps. overrode_max_train_steps = False num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if args.max_train_steps is None: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch overrode_max_train_steps = True lr_scheduler = get_scheduler( name=args.lr_scheduler_type, optimizer=optimizer, num_warmup_steps=args.num_warmup_steps * accelerator.num_processes, num_training_steps=args.max_train_steps if overrode_max_train_steps else args.max_train_steps * accelerator.num_processes, ) # Prepare everything with our `accelerator`. model, optimizer, train_dataloader, valid_dataloader, test_dataloader, lr_scheduler = accelerator.prepare( model, optimizer, train_dataloader, valid_dataloader, test_dataloader, lr_scheduler ) # We need to recalculate our total training steps as the size of the training dataloader may have changed. num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if overrode_max_train_steps: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch # Afterwards we recalculate our number of training epochs args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) # We need to initialize the trackers we use, and also store our configuration. # The trackers initializes automatically on the main process. if args.with_tracking: experiment_config = vars(args) # TensorBoard cannot log Enums, need the raw value experiment_config["lr_scheduler_type"] = experiment_config["lr_scheduler_type"].value accelerator.init_trackers("object_detection_no_trainer", experiment_config) # ------------------------------------------------------------------------------------------------ # Run training with evaluation on each epoch # ------------------------------------------------------------------------------------------------ total_batch_size = args.per_device_train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps logger.info("***** Running training *****") logger.info(f" Num examples = {len(train_dataset)}") logger.info(f" Num Epochs = {args.num_train_epochs}") logger.info(f" Instantaneous batch size per device = {args.per_device_train_batch_size}") logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") logger.info(f" Total optimization steps = {args.max_train_steps}") # Only show the progress bar once on each machine. progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process) completed_steps = 0 starting_epoch = 0 # Potentially load in the weights and states from a previous save if args.resume_from_checkpoint: if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "": checkpoint_path = args.resume_from_checkpoint path = os.path.basename(args.resume_from_checkpoint) else: # Get the most recent checkpoint dirs = [f.name for f in os.scandir(os.getcwd()) if f.is_dir()] dirs.sort(key=os.path.getctime) path = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last checkpoint_path = path path = os.path.basename(checkpoint_path) accelerator.print(f"Resumed from checkpoint: {checkpoint_path}") accelerator.load_state(checkpoint_path) # Extract `epoch_{i}` or `step_{i}` training_difference = os.path.splitext(path)[0] if "epoch" in training_difference: starting_epoch = int(training_difference.replace("epoch_", "")) + 1 resume_step = None completed_steps = starting_epoch * num_update_steps_per_epoch else: # need to multiply `gradient_accumulation_steps` to reflect real steps resume_step = int(training_difference.replace("step_", "")) * args.gradient_accumulation_steps starting_epoch = resume_step // len(train_dataloader) completed_steps = resume_step // args.gradient_accumulation_steps resume_step -= starting_epoch * len(train_dataloader) # update the progress_bar if load from checkpoint progress_bar.update(completed_steps) for epoch in range(starting_epoch, args.num_train_epochs): model.train() if args.with_tracking: total_loss = 0 if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None: # We skip the first `n` batches in the dataloader when resuming from a checkpoint active_dataloader = accelerator.skip_first_batches(train_dataloader, resume_step) else: active_dataloader = train_dataloader for step, batch in enumerate(active_dataloader): with accelerator.accumulate(model): outputs = model(**batch) loss = outputs.loss # We keep track of the loss at each epoch if args.with_tracking: total_loss += loss.detach().float() accelerator.backward(loss) optimizer.step() lr_scheduler.step() optimizer.zero_grad() # Checks if the accelerator has performed an optimization step behind the scenes if accelerator.sync_gradients: progress_bar.update(1) completed_steps += 1 if isinstance(checkpointing_steps, int): if completed_steps % checkpointing_steps == 0 and accelerator.sync_gradients: output_dir = f"step_{completed_steps}" if args.output_dir is not None: output_dir = os.path.join(args.output_dir, output_dir) accelerator.save_state(output_dir) if args.push_to_hub and epoch < args.num_train_epochs - 1: accelerator.wait_for_everyone() unwrapped_model = accelerator.unwrap_model(model) unwrapped_model.save_pretrained( args.output_dir, is_main_process=accelerator.is_main_process, save_function=accelerator.save, ) if accelerator.is_main_process: image_processor.save_pretrained(args.output_dir) api.upload_folder( commit_message=f"Training in progress epoch {epoch}", folder_path=args.output_dir, repo_id=repo_id, repo_type="model", token=args.hub_token, ) if completed_steps >= args.max_train_steps: break logger.info("***** Running evaluation *****") metrics = evaluation_loop(model, image_processor, accelerator, valid_dataloader, id2label) logger.info(f"epoch {epoch}: {metrics}") if args.with_tracking: accelerator.log( { "train_loss": total_loss.item() / len(train_dataloader), **metrics, "epoch": epoch, "step": completed_steps, }, step=completed_steps, ) if args.push_to_hub and epoch < args.num_train_epochs - 1: accelerator.wait_for_everyone() unwrapped_model = accelerator.unwrap_model(model) unwrapped_model.save_pretrained( args.output_dir, is_main_process=accelerator.is_main_process, save_function=accelerator.save ) if accelerator.is_main_process: image_processor.save_pretrained(args.output_dir) api.upload_folder( commit_message=f"Training in progress epoch {epoch}", folder_path=args.output_dir, repo_id=repo_id, repo_type="model", token=args.hub_token, ) if args.checkpointing_steps == "epoch": output_dir = f"epoch_{epoch}" if args.output_dir is not None: output_dir = os.path.join(args.output_dir, output_dir) accelerator.save_state(output_dir) # ------------------------------------------------------------------------------------------------ # Run evaluation on test dataset and save the model # ------------------------------------------------------------------------------------------------ logger.info("***** Running evaluation on test dataset *****") metrics = evaluation_loop(model, image_processor, accelerator, test_dataloader, id2label) metrics = {f"test_{k}": v for k, v in metrics.items()} logger.info(f"Test metrics: {metrics}") if args.with_tracking: accelerator.end_training() if args.output_dir is not None: accelerator.wait_for_everyone() unwrapped_model = accelerator.unwrap_model(model) unwrapped_model.save_pretrained( args.output_dir, is_main_process=accelerator.is_main_process, save_function=accelerator.save ) if accelerator.is_main_process: with open(os.path.join(args.output_dir, "all_results.json"), "w") as f: json.dump(metrics, f, indent=2) image_processor.save_pretrained(args.output_dir) if args.push_to_hub: api.upload_folder( commit_message="End of training", folder_path=args.output_dir, repo_id=repo_id, repo_type="model", token=args.hub_token, ignore_patterns=["epoch_*"], ) if __name__ == "__main__": main()
transformers/examples/pytorch/object-detection/run_object_detection_no_trainer.py/0
{ "file_path": "transformers/examples/pytorch/object-detection/run_object_detection_no_trainer.py", "repo_id": "transformers", "token_count": 13064 }
288
<!--- Copyright 2021 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Speech Recognition Pre-Training ## Wav2Vec2 Speech Pre-Training The script [`run_speech_wav2vec2_pretraining_no_trainer.py`](https://github.com/huggingface/transformers/blob/main/examples/pytorch/speech-pretraining/run_wav2vec2_pretraining_no_trainer.py) can be used to pre-train a [Wav2Vec2](https://huggingface.co/transformers/model_doc/wav2vec2.html?highlight=wav2vec2) model from scratch. In the script [`run_speech_wav2vec2_pretraining_no_trainer`](https://github.com/huggingface/transformers/blob/main/examples/pytorch/speech-pretraining/run_wav2vec2_pretraining_no_trainer.py), a Wav2Vec2 model is pre-trained on audio data alone using [Wav2Vec2's contrastive loss objective](https://arxiv.org/abs/2006.11477). The following examples show how to fine-tune a `"base"`-sized Wav2Vec2 model as well as a `"large"`-sized Wav2Vec2 model using [`accelerate`](https://github.com/huggingface/accelerate). --- **NOTE 1** Wav2Vec2's pre-training is known to be quite unstable. It is advised to do a couple of test runs with a smaller dataset, *i.e.* `--dataset_config_names clean clean`, `--dataset_split_names validation test` to find good hyper-parameters for `learning_rate`, `batch_size`, `num_warmup_steps`, and the optimizer. A good metric to observe during training is the gradient norm which should ideally be between 0.5 and 2. --- --- **NOTE 2** When training a model on large datasets it is recommended to run the data preprocessing in a first run in a **non-distributed** mode via `--preprocessing_only` so that when running the model in **distributed** mode in a second step the preprocessed data can easily be loaded on each distributed device. --- ### Demo In this demo run we pre-train a `"base-sized"` Wav2Vec2 model simply only on the validation and test data of [librispeech_asr](https://huggingface.co/datasets/librispeech_asr). The demo is run on two Titan RTX (24 GB RAM each). In case you have less RAM available per device, consider reducing `--batch_size` and/or the `--max_duration_in_seconds`. ```bash accelerate launch run_wav2vec2_pretraining_no_trainer.py \ --dataset_name="librispeech_asr" \ --dataset_config_names clean clean \ --dataset_split_names validation test \ --model_name_or_path="patrickvonplaten/wav2vec2-base-v2" \ --output_dir="./wav2vec2-pretrained-demo" \ --max_train_steps="20000" \ --num_warmup_steps="32000" \ --gradient_accumulation_steps="8" \ --learning_rate="0.005" \ --weight_decay="0.01" \ --max_duration_in_seconds="20.0" \ --min_duration_in_seconds="2.0" \ --logging_steps="1" \ --saving_steps="10000" \ --per_device_train_batch_size="8" \ --per_device_eval_batch_size="8" \ --adam_beta1="0.9" \ --adam_beta2="0.98" \ --adam_epsilon="1e-06" \ --gradient_checkpointing \ --mask_time_prob="0.65" \ --mask_time_length="10" ``` The results of this run can be seen [here](https://wandb.ai/patrickvonplaten/wav2vec2-pretrained-demo/reports/Wav2Vec2-PreTraining-Demo-Run--VmlldzoxMDk3MjAw?accessToken=oa05s1y57lizo2ocxy3k01g6db1u4pt8m6ur2n8nl4cb0ug02ms2cw313kb8ruch). ### Base To pre-train `"base-sized"` Wav2Vec2 model, *e.g.* [facebook/wav2vec2-base](https://huggingface.co/facebook/wav2vec2-base) on [librispeech_asr](https://huggingface.co/datasets/librispeech_asr), the following command can be run: ```bash accelerate launch run_wav2vec2_pretraining_no_trainer.py \ --dataset_name=librispeech_asr \ --dataset_config_names clean clean other \ --dataset_split_names train.100 train.360 train.500 \ --model_name_or_path="patrickvonplaten/wav2vec2-base-v2" \ --output_dir="./wav2vec2-pretrained-demo" \ --max_train_steps="200000" \ --num_warmup_steps="32000" \ --gradient_accumulation_steps="4" \ --learning_rate="0.001" \ --weight_decay="0.01" \ --max_duration_in_seconds="20.0" \ --min_duration_in_seconds="2.0" \ --logging_steps="1" \ --saving_steps="10000" \ --per_device_train_batch_size="8" \ --per_device_eval_batch_size="8" \ --adam_beta1="0.9" \ --adam_beta2="0.98" \ --adam_epsilon="1e-06" \ --gradient_checkpointing \ --mask_time_prob="0.65" \ --mask_time_length="10" ``` The experiment was run on 8 GPU V100 (16 GB RAM each) for 4 days. In case you have more than 8 GPUs available for a higher effective `batch_size`, it is recommended to increase the `learning_rate` to `0.005` for faster convergence. The results of this run can be seen [here](https://wandb.ai/patrickvonplaten/test/reports/Wav2Vec2-Base--VmlldzoxMTUyODQ0?accessToken=rg6e8u9yizx964k8q47zctq1m4afpvtn1i3qi9exgdmzip6xwkfzvagfajpzj55n) and the checkpoint pretrained for 85,000 steps can be accessed [here](https://huggingface.co/patrickvonplaten/wav2vec2-base-repro-960h-libri-85k-steps) ### Large To pre-train `"large-sized"` Wav2Vec2 model, *e.g.* [facebook/wav2vec2-large-lv60](https://huggingface.co/facebook/wav2vec2-large-lv60), on [librispeech_asr](https://huggingface.co/datasets/librispeech_asr), the following command can be run: ```bash accelerate launch run_wav2vec2_pretraining_no_trainer.py \ --dataset_name=librispeech_asr \ --dataset_config_names clean clean other \ --dataset_split_names train.100 train.360 train.500 \ --output_dir=./test \ --max_train_steps=200000 \ --num_warmup_steps=32000 \ --gradient_accumulation_steps=8 \ --learning_rate=0.001 \ --weight_decay=0.01 \ --max_duration_in_seconds=20.0 \ --min_duration_in_seconds=2.0 \ --model_name_or_path=./ --logging_steps=1 \ --saving_steps=10000 \ --per_device_train_batch_size=2 \ --per_device_eval_batch_size=4 \ --adam_beta1=0.9 \ --adam_beta2=0.98 \ --adam_epsilon=1e-06 \ --gradient_checkpointing \ --mask_time_prob=0.65 \ --mask_time_length=10 ``` The experiment was run on 8 GPU V100 (16 GB RAM each) for 7 days. In case you have more than 8 GPUs available for a higher effective `batch_size`, it is recommended to increase the `learning_rate` to `0.005` for faster convergence. The results of this run can be seen [here](https://wandb.ai/patrickvonplaten/pretraining-wav2vec2/reports/Wav2Vec2-Large--VmlldzoxMTAwODM4?accessToken=wm3qzcnldrwsa31tkvf2pdmilw3f63d4twtffs86ou016xjbyilh55uoi3mo1qzc) and the checkpoint pretrained for 120,000 steps can be accessed [here](https://huggingface.co/patrickvonplaten/wav2vec2-large-repro-960h-libri-120k-steps)
transformers/examples/pytorch/speech-pretraining/README.md/0
{ "file_path": "transformers/examples/pytorch/speech-pretraining/README.md", "repo_id": "transformers", "token_count": 2600 }
289
#!/usr/bin/env python # coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Finetuning the library models for text classification.""" # You can also adapt this script on your own text classification task. Pointers for this are left as comments. import logging import os import random import sys from dataclasses import dataclass, field from typing import List, Optional import datasets import evaluate import numpy as np from datasets import Value, load_dataset import transformers from transformers import ( AutoConfig, AutoModelForSequenceClassification, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.45.0.dev0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt") logger = logging.getLogger(__name__) @dataclass class DataTrainingArguments: """ Arguments pertaining to what data we are going to input our model for training and eval. Using `HfArgumentParser` we can turn this class into argparse arguments to be able to specify them on the command line. """ dataset_name: Optional[str] = field( default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."} ) dataset_config_name: Optional[str] = field( default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} ) do_regression: bool = field( default=None, metadata={ "help": "Whether to do regression instead of classification. If None, will be inferred from the dataset." }, ) text_column_names: Optional[str] = field( default=None, metadata={ "help": ( "The name of the text column in the input dataset or a CSV/JSON file. " 'If not specified, will use the "sentence" column for single/multi-label classification task.' ) }, ) text_column_delimiter: Optional[str] = field( default=" ", metadata={"help": "The delimiter to use to join text columns into a single sentence."} ) train_split_name: Optional[str] = field( default=None, metadata={ "help": 'The name of the train split in the input dataset. If not specified, will use the "train" split when do_train is enabled' }, ) validation_split_name: Optional[str] = field( default=None, metadata={ "help": 'The name of the validation split in the input dataset. If not specified, will use the "validation" split when do_eval is enabled' }, ) test_split_name: Optional[str] = field( default=None, metadata={ "help": 'The name of the test split in the input dataset. If not specified, will use the "test" split when do_predict is enabled' }, ) remove_splits: Optional[str] = field( default=None, metadata={"help": "The splits to remove from the dataset. Multiple splits should be separated by commas."}, ) remove_columns: Optional[str] = field( default=None, metadata={"help": "The columns to remove from the dataset. Multiple columns should be separated by commas."}, ) label_column_name: Optional[str] = field( default=None, metadata={ "help": ( "The name of the label column in the input dataset or a CSV/JSON file. " 'If not specified, will use the "label" column for single/multi-label classification task' ) }, ) max_seq_length: int = field( default=128, metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) }, ) preprocessing_num_workers: Optional[int] = field( default=None, metadata={"help": "The number of processes to use for the preprocessing."}, ) overwrite_cache: bool = field( default=False, metadata={"help": "Overwrite the cached preprocessed datasets or not."} ) pad_to_max_length: bool = field( default=True, metadata={ "help": ( "Whether to pad all samples to `max_seq_length`. " "If False, will pad the samples dynamically when batching to the maximum length in the batch." ) }, ) shuffle_train_dataset: bool = field( default=False, metadata={"help": "Whether to shuffle the train dataset or not."} ) shuffle_seed: int = field( default=42, metadata={"help": "Random seed that will be used to shuffle the train dataset."} ) max_train_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) }, ) max_eval_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of evaluation examples to this " "value if set." ) }, ) max_predict_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of prediction examples to this " "value if set." ) }, ) metric_name: Optional[str] = field(default=None, metadata={"help": "The metric to use for evaluation."}) train_file: Optional[str] = field( default=None, metadata={"help": "A csv or a json file containing the training data."} ) validation_file: Optional[str] = field( default=None, metadata={"help": "A csv or a json file containing the validation data."} ) test_file: Optional[str] = field(default=None, metadata={"help": "A csv or a json file containing the test data."}) def __post_init__(self): if self.dataset_name is None: if self.train_file is None or self.validation_file is None: raise ValueError(" training/validation file or a dataset name.") train_extension = self.train_file.split(".")[-1] assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file." validation_extension = self.validation_file.split(".")[-1] assert ( validation_extension == train_extension ), "`validation_file` should have the same extension (csv or json) as `train_file`." @dataclass class ModelArguments: """ Arguments pertaining to which model/config/tokenizer we are going to fine-tune from. """ model_name_or_path: str = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) config_name: Optional[str] = field( default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} ) tokenizer_name: Optional[str] = field( default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) cache_dir: Optional[str] = field( default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, ) use_fast_tokenizer: bool = field( default=True, metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."}, ) model_revision: str = field( default="main", metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}, ) token: str = field( default=None, metadata={ "help": ( "The token to use as HTTP bearer authorization for remote files. If not specified, will use the token " "generated when running `huggingface-cli login` (stored in `~/.huggingface`)." ) }, ) trust_remote_code: bool = field( default=False, metadata={ "help": ( "Whether to trust the execution of code from datasets/models defined on the Hub." " This option should only be set to `True` for repositories you trust and in which you have read the" " code, as it will execute code present on the Hub on your local machine." ) }, ) ignore_mismatched_sizes: bool = field( default=False, metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."}, ) def get_label_list(raw_dataset, split="train") -> List[str]: """Get the list of labels from a multi-label dataset""" if isinstance(raw_dataset[split]["label"][0], list): label_list = [label for sample in raw_dataset[split]["label"] for label in sample] label_list = list(set(label_list)) else: label_list = raw_dataset[split].unique("label") # we will treat the label list as a list of string instead of int, consistent with model.config.label2id label_list = [str(label) for label in label_list] return label_list def main(): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_classification", model_args, data_args) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout)], ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() log_level = training_args.get_process_log_level() logger.setLevel(log_level) datasets.utils.logging.set_verbosity(log_level) transformers.utils.logging.set_verbosity(log_level) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, " + f"distributed training: {training_args.parallel_mode.value == 'distributed'}, 16-bits training: {training_args.fp16}" ) logger.info(f"Training/evaluation parameters {training_args}") # Detecting last checkpoint. last_checkpoint = None if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: last_checkpoint = get_last_checkpoint(training_args.output_dir) if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. " "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Set seed before initializing model. set_seed(training_args.seed) # Get the datasets: you can either provide your own CSV/JSON training and evaluation files, or specify a dataset name # to load from huggingface/datasets. In ether case, you can specify a the key of the column(s) containing the text and # the key of the column containing the label. If multiple columns are specified for the text, they will be joined together # for the actual text value. # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. raw_datasets = load_dataset( data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir, token=model_args.token, trust_remote_code=model_args.trust_remote_code, ) # Try print some info about the dataset logger.info(f"Dataset loaded: {raw_datasets}") logger.info(raw_datasets) else: # Loading a dataset from your local files. # CSV/JSON training and evaluation files are needed. data_files = {"train": data_args.train_file, "validation": data_args.validation_file} # Get the test dataset: you can provide your own CSV/JSON test file if training_args.do_predict: if data_args.test_file is not None: train_extension = data_args.train_file.split(".")[-1] test_extension = data_args.test_file.split(".")[-1] assert ( test_extension == train_extension ), "`test_file` should have the same extension (csv or json) as `train_file`." data_files["test"] = data_args.test_file else: raise ValueError("Need either a dataset name or a test file for `do_predict`.") for key in data_files.keys(): logger.info(f"load a local file for {key}: {data_files[key]}") if data_args.train_file.endswith(".csv"): # Loading a dataset from local csv files raw_datasets = load_dataset( "csv", data_files=data_files, cache_dir=model_args.cache_dir, token=model_args.token, ) else: # Loading a dataset from local json files raw_datasets = load_dataset( "json", data_files=data_files, cache_dir=model_args.cache_dir, token=model_args.token, ) # See more about loading any type of standard or custom dataset at # https://huggingface.co/docs/datasets/loading_datasets. if data_args.remove_splits is not None: for split in data_args.remove_splits.split(","): logger.info(f"removing split {split}") raw_datasets.pop(split) if data_args.train_split_name is not None: logger.info(f"using {data_args.train_split_name} as train set") raw_datasets["train"] = raw_datasets[data_args.train_split_name] raw_datasets.pop(data_args.train_split_name) if data_args.validation_split_name is not None: logger.info(f"using {data_args.validation_split_name} as validation set") raw_datasets["validation"] = raw_datasets[data_args.validation_split_name] raw_datasets.pop(data_args.validation_split_name) if data_args.test_split_name is not None: logger.info(f"using {data_args.test_split_name} as test set") raw_datasets["test"] = raw_datasets[data_args.test_split_name] raw_datasets.pop(data_args.test_split_name) if data_args.remove_columns is not None: for split in raw_datasets.keys(): for column in data_args.remove_columns.split(","): logger.info(f"removing column {column} from split {split}") raw_datasets[split] = raw_datasets[split].remove_columns(column) if data_args.label_column_name is not None and data_args.label_column_name != "label": for key in raw_datasets.keys(): raw_datasets[key] = raw_datasets[key].rename_column(data_args.label_column_name, "label") # Trying to have good defaults here, don't hesitate to tweak to your needs. is_regression = ( raw_datasets["train"].features["label"].dtype in ["float32", "float64"] if data_args.do_regression is None else data_args.do_regression ) is_multi_label = False if is_regression: label_list = None num_labels = 1 # regession requires float as label type, let's cast it if needed for split in raw_datasets.keys(): if raw_datasets[split].features["label"].dtype not in ["float32", "float64"]: logger.warning( f"Label type for {split} set to float32, was {raw_datasets[split].features['label'].dtype}" ) features = raw_datasets[split].features features.update({"label": Value("float32")}) try: raw_datasets[split] = raw_datasets[split].cast(features) except TypeError as error: logger.error( f"Unable to cast {split} set to float32, please check the labels are correct, or maybe try with --do_regression=False" ) raise error else: # classification if raw_datasets["train"].features["label"].dtype == "list": # multi-label classification is_multi_label = True logger.info("Label type is list, doing multi-label classification") # Trying to find the number of labels in a multi-label classification task # We have to deal with common cases that labels appear in the training set but not in the validation/test set. # So we build the label list from the union of labels in train/val/test. label_list = get_label_list(raw_datasets, split="train") for split in ["validation", "test"]: if split in raw_datasets: val_or_test_labels = get_label_list(raw_datasets, split=split) diff = set(val_or_test_labels).difference(set(label_list)) if len(diff) > 0: # add the labels that appear in val/test but not in train, throw a warning logger.warning( f"Labels {diff} in {split} set but not in training set, adding them to the label list" ) label_list += list(diff) # if label is -1, we throw a warning and remove it from the label list for label in label_list: if label == -1: logger.warning("Label -1 found in label list, removing it.") label_list.remove(label) label_list.sort() num_labels = len(label_list) if num_labels <= 1: raise ValueError("You need more than one label to do classification.") # Load pretrained model and tokenizer # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. config = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=num_labels, finetuning_task="text-classification", cache_dir=model_args.cache_dir, revision=model_args.model_revision, token=model_args.token, trust_remote_code=model_args.trust_remote_code, ) if is_regression: config.problem_type = "regression" logger.info("setting problem type to regression") elif is_multi_label: config.problem_type = "multi_label_classification" logger.info("setting problem type to multi label classification") else: config.problem_type = "single_label_classification" logger.info("setting problem type to single label classification") tokenizer = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, revision=model_args.model_revision, token=model_args.token, trust_remote_code=model_args.trust_remote_code, ) model = AutoModelForSequenceClassification.from_pretrained( model_args.model_name_or_path, from_tf=bool(".ckpt" in model_args.model_name_or_path), config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, token=model_args.token, trust_remote_code=model_args.trust_remote_code, ignore_mismatched_sizes=model_args.ignore_mismatched_sizes, ) # Padding strategy if data_args.pad_to_max_length: padding = "max_length" else: # We will pad later, dynamically at batch creation, to the max sequence length in each batch padding = False # for training ,we will update the config with label infos, # if do_train is not set, we will use the label infos in the config if training_args.do_train and not is_regression: # classification, training label_to_id = {v: i for i, v in enumerate(label_list)} # update config with label infos if model.config.label2id != label_to_id: logger.warning( "The label2id key in the model config.json is not equal to the label2id key of this " "run. You can ignore this if you are doing finetuning." ) model.config.label2id = label_to_id model.config.id2label = {id: label for label, id in label_to_id.items()} elif not is_regression: # classification, but not training logger.info("using label infos in the model config") logger.info("label2id: {}".format(model.config.label2id)) label_to_id = model.config.label2id else: # regression label_to_id = None if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the " f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." ) max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length) def multi_labels_to_ids(labels: List[str]) -> List[float]: ids = [0.0] * len(label_to_id) # BCELoss requires float as target type for label in labels: ids[label_to_id[label]] = 1.0 return ids def preprocess_function(examples): if data_args.text_column_names is not None: text_column_names = data_args.text_column_names.split(",") # join together text columns into "sentence" column examples["sentence"] = examples[text_column_names[0]] for column in text_column_names[1:]: for i in range(len(examples[column])): examples["sentence"][i] += data_args.text_column_delimiter + examples[column][i] # Tokenize the texts result = tokenizer(examples["sentence"], padding=padding, max_length=max_seq_length, truncation=True) if label_to_id is not None and "label" in examples: if is_multi_label: result["label"] = [multi_labels_to_ids(l) for l in examples["label"]] else: result["label"] = [(label_to_id[str(l)] if l != -1 else -1) for l in examples["label"]] return result # Running the preprocessing pipeline on all the datasets with training_args.main_process_first(desc="dataset map pre-processing"): raw_datasets = raw_datasets.map( preprocess_function, batched=True, num_proc=data_args.preprocessing_num_workers, load_from_cache_file=not data_args.overwrite_cache, desc="Running tokenizer on dataset", ) if training_args.do_train: if "train" not in raw_datasets: raise ValueError("--do_train requires a train dataset.") train_dataset = raw_datasets["train"] if data_args.shuffle_train_dataset: logger.info("Shuffling the training dataset") train_dataset = train_dataset.shuffle(seed=data_args.shuffle_seed) if data_args.max_train_samples is not None: max_train_samples = min(len(train_dataset), data_args.max_train_samples) train_dataset = train_dataset.select(range(max_train_samples)) if training_args.do_eval: if "validation" not in raw_datasets and "validation_matched" not in raw_datasets: if "test" not in raw_datasets and "test_matched" not in raw_datasets: raise ValueError("--do_eval requires a validation or test dataset if validation is not defined.") else: logger.warning("Validation dataset not found. Falling back to test dataset for validation.") eval_dataset = raw_datasets["test"] else: eval_dataset = raw_datasets["validation"] if data_args.max_eval_samples is not None: max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples) eval_dataset = eval_dataset.select(range(max_eval_samples)) if training_args.do_predict or data_args.test_file is not None: if "test" not in raw_datasets: raise ValueError("--do_predict requires a test dataset") predict_dataset = raw_datasets["test"] # remove label column if it exists if data_args.max_predict_samples is not None: max_predict_samples = min(len(predict_dataset), data_args.max_predict_samples) predict_dataset = predict_dataset.select(range(max_predict_samples)) # Log a few random samples from the training set: if training_args.do_train: for index in random.sample(range(len(train_dataset)), 3): logger.info(f"Sample {index} of the training set: {train_dataset[index]}.") if data_args.metric_name is not None: metric = ( evaluate.load(data_args.metric_name, config_name="multilabel", cache_dir=model_args.cache_dir) if is_multi_label else evaluate.load(data_args.metric_name, cache_dir=model_args.cache_dir) ) logger.info(f"Using metric {data_args.metric_name} for evaluation.") else: if is_regression: metric = evaluate.load("mse", cache_dir=model_args.cache_dir) logger.info("Using mean squared error (mse) as regression score, you can use --metric_name to overwrite.") else: if is_multi_label: metric = evaluate.load("f1", config_name="multilabel", cache_dir=model_args.cache_dir) logger.info( "Using multilabel F1 for multi-label classification task, you can use --metric_name to overwrite." ) else: metric = evaluate.load("accuracy", cache_dir=model_args.cache_dir) logger.info("Using accuracy as classification score, you can use --metric_name to overwrite.") def compute_metrics(p: EvalPrediction): preds = p.predictions[0] if isinstance(p.predictions, tuple) else p.predictions if is_regression: preds = np.squeeze(preds) result = metric.compute(predictions=preds, references=p.label_ids) elif is_multi_label: preds = np.array([np.where(p > 0, 1, 0) for p in preds]) # convert logits to multi-hot encoding # Micro F1 is commonly used in multi-label classification result = metric.compute(predictions=preds, references=p.label_ids, average="micro") else: preds = np.argmax(preds, axis=1) result = metric.compute(predictions=preds, references=p.label_ids) if len(result) > 1: result["combined_score"] = np.mean(list(result.values())).item() return result # Data collator will default to DataCollatorWithPadding when the tokenizer is passed to Trainer, so we change it if # we already did the padding. if data_args.pad_to_max_length: data_collator = default_data_collator elif training_args.fp16: data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=8) else: data_collator = None # Initialize our Trainer trainer = Trainer( model=model, args=training_args, train_dataset=train_dataset if training_args.do_train else None, eval_dataset=eval_dataset if training_args.do_eval else None, compute_metrics=compute_metrics, tokenizer=tokenizer, data_collator=data_collator, ) # Training if training_args.do_train: checkpoint = None if training_args.resume_from_checkpoint is not None: checkpoint = training_args.resume_from_checkpoint elif last_checkpoint is not None: checkpoint = last_checkpoint train_result = trainer.train(resume_from_checkpoint=checkpoint) metrics = train_result.metrics max_train_samples = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset) ) metrics["train_samples"] = min(max_train_samples, len(train_dataset)) trainer.save_model() # Saves the tokenizer too for easy upload trainer.log_metrics("train", metrics) trainer.save_metrics("train", metrics) trainer.save_state() # Evaluation if training_args.do_eval: logger.info("*** Evaluate ***") metrics = trainer.evaluate(eval_dataset=eval_dataset) max_eval_samples = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(eval_dataset) metrics["eval_samples"] = min(max_eval_samples, len(eval_dataset)) trainer.log_metrics("eval", metrics) trainer.save_metrics("eval", metrics) if training_args.do_predict: logger.info("*** Predict ***") # Removing the `label` columns if exists because it might contains -1 and Trainer won't like that. if "label" in predict_dataset.features: predict_dataset = predict_dataset.remove_columns("label") predictions = trainer.predict(predict_dataset, metric_key_prefix="predict").predictions if is_regression: predictions = np.squeeze(predictions) elif is_multi_label: # Convert logits to multi-hot encoding. We compare the logits to 0 instead of 0.5, because the sigmoid is not applied. # You can also pass `preprocess_logits_for_metrics=lambda logits, labels: nn.functional.sigmoid(logits)` to the Trainer # and set p > 0.5 below (less efficient in this case) predictions = np.array([np.where(p > 0, 1, 0) for p in predictions]) else: predictions = np.argmax(predictions, axis=1) output_predict_file = os.path.join(training_args.output_dir, "predict_results.txt") if trainer.is_world_process_zero(): with open(output_predict_file, "w") as writer: logger.info("***** Predict results *****") writer.write("index\tprediction\n") for index, item in enumerate(predictions): if is_regression: writer.write(f"{index}\t{item:3.3f}\n") elif is_multi_label: # recover from multi-hot encoding item = [label_list[i] for i in range(len(item)) if item[i] == 1] writer.write(f"{index}\t{item}\n") else: item = label_list[item] writer.write(f"{index}\t{item}\n") logger.info("Predict results saved at {}".format(output_predict_file)) kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "text-classification"} if training_args.push_to_hub: trainer.push_to_hub(**kwargs) else: trainer.create_model_card(**kwargs) def _mp_fn(index): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
transformers/examples/pytorch/text-classification/run_classification.py/0
{ "file_path": "transformers/examples/pytorch/text-classification/run_classification.py", "repo_id": "transformers", "token_count": 13913 }
290
#!/usr/bin/env python # coding=utf-8 # Copyright The HuggingFace Team and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Fine-tuning the library models for sequence to sequence. """ # You can also adapt this script on your own sequence to sequence task. Pointers for this are left as comments. import logging import os import sys from dataclasses import dataclass, field from typing import Optional import datasets import evaluate import numpy as np from datasets import load_dataset import transformers from transformers import ( AutoConfig, AutoModelForSeq2SeqLM, AutoTokenizer, DataCollatorForSeq2Seq, HfArgumentParser, M2M100Tokenizer, MBart50Tokenizer, MBart50TokenizerFast, MBartTokenizer, MBartTokenizerFast, Seq2SeqTrainer, Seq2SeqTrainingArguments, default_data_collator, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.45.0.dev0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/translation/requirements.txt") logger = logging.getLogger(__name__) # A list of all multilingual tokenizer which require src_lang and tgt_lang attributes. MULTILINGUAL_TOKENIZERS = [MBartTokenizer, MBartTokenizerFast, MBart50Tokenizer, MBart50TokenizerFast, M2M100Tokenizer] @dataclass class ModelArguments: """ Arguments pertaining to which model/config/tokenizer we are going to fine-tune from. """ model_name_or_path: str = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) config_name: Optional[str] = field( default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} ) tokenizer_name: Optional[str] = field( default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) cache_dir: Optional[str] = field( default=None, metadata={"help": "Where to store the pretrained models downloaded from huggingface.co"}, ) use_fast_tokenizer: bool = field( default=True, metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."}, ) model_revision: str = field( default="main", metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}, ) token: str = field( default=None, metadata={ "help": ( "The token to use as HTTP bearer authorization for remote files. If not specified, will use the token " "generated when running `huggingface-cli login` (stored in `~/.huggingface`)." ) }, ) trust_remote_code: bool = field( default=False, metadata={ "help": ( "Whether to trust the execution of code from datasets/models defined on the Hub." " This option should only be set to `True` for repositories you trust and in which you have read the" " code, as it will execute code present on the Hub on your local machine." ) }, ) @dataclass class DataTrainingArguments: """ Arguments pertaining to what data we are going to input our model for training and eval. """ source_lang: str = field(default=None, metadata={"help": "Source language id for translation."}) target_lang: str = field(default=None, metadata={"help": "Target language id for translation."}) dataset_name: Optional[str] = field( default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."} ) dataset_config_name: Optional[str] = field( default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} ) train_file: Optional[str] = field(default=None, metadata={"help": "The input training data file (a jsonlines)."}) validation_file: Optional[str] = field( default=None, metadata={ "help": "An optional input evaluation data file to evaluate the metrics (sacrebleu) on a jsonlines file." }, ) test_file: Optional[str] = field( default=None, metadata={"help": "An optional input test data file to evaluate the metrics (sacrebleu) on a jsonlines file."}, ) overwrite_cache: bool = field( default=False, metadata={"help": "Overwrite the cached training and evaluation sets"} ) preprocessing_num_workers: Optional[int] = field( default=None, metadata={"help": "The number of processes to use for the preprocessing."}, ) max_source_length: Optional[int] = field( default=1024, metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) }, ) max_target_length: Optional[int] = field( default=128, metadata={ "help": ( "The maximum total sequence length for target text after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) }, ) val_max_target_length: Optional[int] = field( default=None, metadata={ "help": ( "The maximum total sequence length for validation target text after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded. Will default to `max_target_length`. " "This argument is also used to override the ``max_length`` param of ``model.generate``, which is used " "during ``evaluate`` and ``predict``." ) }, ) pad_to_max_length: bool = field( default=False, metadata={ "help": ( "Whether to pad all samples to model maximum sentence length. " "If False, will pad the samples dynamically when batching to the maximum length in the batch. More " "efficient on GPU but very bad for TPU." ) }, ) max_train_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) }, ) max_eval_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of evaluation examples to this " "value if set." ) }, ) max_predict_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of prediction examples to this " "value if set." ) }, ) num_beams: Optional[int] = field( default=1, metadata={ "help": ( "Number of beams to use for evaluation. This argument will be passed to ``model.generate``, " "which is used during ``evaluate`` and ``predict``." ) }, ) ignore_pad_token_for_loss: bool = field( default=True, metadata={ "help": "Whether to ignore the tokens corresponding to padded labels in the loss computation or not." }, ) source_prefix: Optional[str] = field( default=None, metadata={"help": "A prefix to add before every source text (useful for T5 models)."} ) forced_bos_token: Optional[str] = field( default=None, metadata={ "help": ( "The token to force as the first generated token after the :obj:`decoder_start_token_id`.Useful for" " multilingual models like :doc:`mBART <../model_doc/mbart>` where the first generated token needs to" " be the target language token.(Usually it is the target language token)" ) }, ) def __post_init__(self): if self.dataset_name is None and self.train_file is None and self.validation_file is None: raise ValueError("Need either a dataset name or a training/validation file.") elif self.source_lang is None or self.target_lang is None: raise ValueError("Need to specify the source language and the target language.") # accepting both json and jsonl file extensions, as # many jsonlines files actually have a .json extension valid_extensions = ["json", "jsonl"] if self.train_file is not None: extension = self.train_file.split(".")[-1] assert extension in valid_extensions, "`train_file` should be a jsonlines file." if self.validation_file is not None: extension = self.validation_file.split(".")[-1] assert extension in valid_extensions, "`validation_file` should be a jsonlines file." if self.val_max_target_length is None: self.val_max_target_length = self.max_target_length def main(): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. parser = HfArgumentParser((ModelArguments, DataTrainingArguments, Seq2SeqTrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_translation", model_args, data_args) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout)], ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() log_level = training_args.get_process_log_level() logger.setLevel(log_level) datasets.utils.logging.set_verbosity(log_level) transformers.utils.logging.set_verbosity(log_level) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, " + f"distributed training: {training_args.parallel_mode.value == 'distributed'}, 16-bits training: {training_args.fp16}" ) logger.info(f"Training/evaluation parameters {training_args}") if data_args.source_prefix is None and model_args.model_name_or_path in [ "google-t5/t5-small", "google-t5/t5-base", "google-t5/t5-large", "google-t5/t5-3b", "google-t5/t5-11b", ]: logger.warning( "You're running a t5 model but didn't provide a source prefix, which is expected, e.g. with " "`--source_prefix 'translate English to German: ' `" ) # Detecting last checkpoint. last_checkpoint = None if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: last_checkpoint = get_last_checkpoint(training_args.output_dir) if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. " "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Set seed before initializing model. set_seed(training_args.seed) # Get the datasets: you can either provide your own JSON training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For translation, only JSON files are supported, with one field named "translation" containing two keys for the # source and target languages (unless you adapt what follows). # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. raw_datasets = load_dataset( data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir, token=model_args.token, trust_remote_code=model_args.trust_remote_code, ) else: data_files = {} if data_args.train_file is not None: data_files["train"] = data_args.train_file extension = data_args.train_file.split(".")[-1] if data_args.validation_file is not None: data_files["validation"] = data_args.validation_file extension = data_args.validation_file.split(".")[-1] if data_args.test_file is not None: data_files["test"] = data_args.test_file extension = data_args.test_file.split(".")[-1] if extension == "jsonl": builder_name = "json" # the "json" builder reads both .json and .jsonl files else: builder_name = extension # e.g. "parquet" raw_datasets = load_dataset( builder_name, data_files=data_files, cache_dir=model_args.cache_dir, token=model_args.token, ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading. # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. config = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, revision=model_args.model_revision, token=model_args.token, trust_remote_code=model_args.trust_remote_code, ) tokenizer = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, revision=model_args.model_revision, token=model_args.token, trust_remote_code=model_args.trust_remote_code, ) model = AutoModelForSeq2SeqLM.from_pretrained( model_args.model_name_or_path, from_tf=bool(".ckpt" in model_args.model_name_or_path), config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, token=model_args.token, trust_remote_code=model_args.trust_remote_code, ) # We resize the embeddings only when necessary to avoid index errors. If you are creating a model from scratch # on a small vocab and want a smaller embedding size, remove this test. embedding_size = model.get_input_embeddings().weight.shape[0] if len(tokenizer) > embedding_size: model.resize_token_embeddings(len(tokenizer)) # Set decoder_start_token_id if model.config.decoder_start_token_id is None and isinstance(tokenizer, (MBartTokenizer, MBartTokenizerFast)): if isinstance(tokenizer, MBartTokenizer): model.config.decoder_start_token_id = tokenizer.lang_code_to_id[data_args.target_lang] else: model.config.decoder_start_token_id = tokenizer.convert_tokens_to_ids(data_args.target_lang) if model.config.decoder_start_token_id is None: raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined") prefix = data_args.source_prefix if data_args.source_prefix is not None else "" # Preprocessing the datasets. # We need to tokenize inputs and targets. if training_args.do_train: column_names = raw_datasets["train"].column_names elif training_args.do_eval: column_names = raw_datasets["validation"].column_names elif training_args.do_predict: column_names = raw_datasets["test"].column_names else: logger.info("There is nothing to do. Please pass `do_train`, `do_eval` and/or `do_predict`.") return # For translation we set the codes of our source and target languages (only useful for mBART, the others will # ignore those attributes). if isinstance(tokenizer, tuple(MULTILINGUAL_TOKENIZERS)): assert data_args.target_lang is not None and data_args.source_lang is not None, ( f"{tokenizer.__class__.__name__} is a multilingual tokenizer which requires --source_lang and " "--target_lang arguments." ) tokenizer.src_lang = data_args.source_lang tokenizer.tgt_lang = data_args.target_lang # For multilingual translation models like mBART-50 and M2M100 we need to force the target language token # as the first generated token. We ask the user to explicitly provide this as --forced_bos_token argument. forced_bos_token_id = ( tokenizer.lang_code_to_id[data_args.forced_bos_token] if data_args.forced_bos_token is not None else None ) model.config.forced_bos_token_id = forced_bos_token_id # Get the language codes for input/target. source_lang = data_args.source_lang.split("_")[0] target_lang = data_args.target_lang.split("_")[0] # Check the whether the source target length fits in the model, if it has absolute positional embeddings if ( hasattr(model.config, "max_position_embeddings") and not hasattr(model.config, "relative_attention_max_distance") and model.config.max_position_embeddings < data_args.max_source_length ): raise ValueError( f"`--max_source_length` is set to {data_args.max_source_length}, but the model only has" f" {model.config.max_position_embeddings} position encodings. Consider either reducing" f" `--max_source_length` to {model.config.max_position_embeddings} or using a model with larger position " "embeddings" ) # Temporarily set max_target_length for training. max_target_length = data_args.max_target_length padding = "max_length" if data_args.pad_to_max_length else False if training_args.label_smoothing_factor > 0 and not hasattr(model, "prepare_decoder_input_ids_from_labels"): logger.warning( "label_smoothing is enabled but the `prepare_decoder_input_ids_from_labels` method is not defined for " f"`{model.__class__.__name__}`. This will lead to loss being calculated twice and will take up more memory" ) def preprocess_function(examples): inputs = [ex[source_lang] for ex in examples["translation"]] targets = [ex[target_lang] for ex in examples["translation"]] inputs = [prefix + inp for inp in inputs] model_inputs = tokenizer(inputs, max_length=data_args.max_source_length, padding=padding, truncation=True) # Tokenize targets with the `text_target` keyword argument labels = tokenizer(text_target=targets, max_length=max_target_length, padding=padding, truncation=True) # If we are padding here, replace all tokenizer.pad_token_id in the labels by -100 when we want to ignore # padding in the loss. if padding == "max_length" and data_args.ignore_pad_token_for_loss: labels["input_ids"] = [ [(l if l != tokenizer.pad_token_id else -100) for l in label] for label in labels["input_ids"] ] model_inputs["labels"] = labels["input_ids"] return model_inputs if training_args.do_train: if "train" not in raw_datasets: raise ValueError("--do_train requires a train dataset") train_dataset = raw_datasets["train"] if data_args.max_train_samples is not None: max_train_samples = min(len(train_dataset), data_args.max_train_samples) train_dataset = train_dataset.select(range(max_train_samples)) with training_args.main_process_first(desc="train dataset map pre-processing"): train_dataset = train_dataset.map( preprocess_function, batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not data_args.overwrite_cache, desc="Running tokenizer on train dataset", ) if training_args.do_eval: max_target_length = data_args.val_max_target_length if "validation" not in raw_datasets: raise ValueError("--do_eval requires a validation dataset") eval_dataset = raw_datasets["validation"] if data_args.max_eval_samples is not None: max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples) eval_dataset = eval_dataset.select(range(max_eval_samples)) with training_args.main_process_first(desc="validation dataset map pre-processing"): eval_dataset = eval_dataset.map( preprocess_function, batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not data_args.overwrite_cache, desc="Running tokenizer on validation dataset", ) if training_args.do_predict: max_target_length = data_args.val_max_target_length if "test" not in raw_datasets: raise ValueError("--do_predict requires a test dataset") predict_dataset = raw_datasets["test"] if data_args.max_predict_samples is not None: max_predict_samples = min(len(predict_dataset), data_args.max_predict_samples) predict_dataset = predict_dataset.select(range(max_predict_samples)) with training_args.main_process_first(desc="prediction dataset map pre-processing"): predict_dataset = predict_dataset.map( preprocess_function, batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not data_args.overwrite_cache, desc="Running tokenizer on prediction dataset", ) # Data collator label_pad_token_id = -100 if data_args.ignore_pad_token_for_loss else tokenizer.pad_token_id if data_args.pad_to_max_length: data_collator = default_data_collator else: data_collator = DataCollatorForSeq2Seq( tokenizer, model=model, label_pad_token_id=label_pad_token_id, pad_to_multiple_of=8 if training_args.fp16 else None, ) # Metric metric = evaluate.load("sacrebleu", cache_dir=model_args.cache_dir) def postprocess_text(preds, labels): preds = [pred.strip() for pred in preds] labels = [[label.strip()] for label in labels] return preds, labels def compute_metrics(eval_preds): preds, labels = eval_preds if isinstance(preds, tuple): preds = preds[0] # Replace -100s used for padding as we can't decode them preds = np.where(preds != -100, preds, tokenizer.pad_token_id) decoded_preds = tokenizer.batch_decode(preds, skip_special_tokens=True) labels = np.where(labels != -100, labels, tokenizer.pad_token_id) decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True) # Some simple post-processing decoded_preds, decoded_labels = postprocess_text(decoded_preds, decoded_labels) result = metric.compute(predictions=decoded_preds, references=decoded_labels) result = {"bleu": result["score"]} prediction_lens = [np.count_nonzero(pred != tokenizer.pad_token_id) for pred in preds] result["gen_len"] = np.mean(prediction_lens) result = {k: round(v, 4) for k, v in result.items()} return result # Initialize our Trainer trainer = Seq2SeqTrainer( model=model, args=training_args, train_dataset=train_dataset if training_args.do_train else None, eval_dataset=eval_dataset if training_args.do_eval else None, tokenizer=tokenizer, data_collator=data_collator, compute_metrics=compute_metrics if training_args.predict_with_generate else None, ) # Training if training_args.do_train: checkpoint = None if training_args.resume_from_checkpoint is not None: checkpoint = training_args.resume_from_checkpoint elif last_checkpoint is not None: checkpoint = last_checkpoint train_result = trainer.train(resume_from_checkpoint=checkpoint) trainer.save_model() # Saves the tokenizer too for easy upload metrics = train_result.metrics max_train_samples = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset) ) metrics["train_samples"] = min(max_train_samples, len(train_dataset)) trainer.log_metrics("train", metrics) trainer.save_metrics("train", metrics) trainer.save_state() # Evaluation results = {} max_length = ( training_args.generation_max_length if training_args.generation_max_length is not None else data_args.val_max_target_length ) num_beams = data_args.num_beams if data_args.num_beams is not None else training_args.generation_num_beams if training_args.do_eval: logger.info("*** Evaluate ***") metrics = trainer.evaluate(max_length=max_length, num_beams=num_beams, metric_key_prefix="eval") max_eval_samples = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(eval_dataset) metrics["eval_samples"] = min(max_eval_samples, len(eval_dataset)) trainer.log_metrics("eval", metrics) trainer.save_metrics("eval", metrics) if training_args.do_predict: logger.info("*** Predict ***") predict_results = trainer.predict( predict_dataset, metric_key_prefix="predict", max_length=max_length, num_beams=num_beams ) metrics = predict_results.metrics max_predict_samples = ( data_args.max_predict_samples if data_args.max_predict_samples is not None else len(predict_dataset) ) metrics["predict_samples"] = min(max_predict_samples, len(predict_dataset)) trainer.log_metrics("predict", metrics) trainer.save_metrics("predict", metrics) if trainer.is_world_process_zero(): if training_args.predict_with_generate: predictions = predict_results.predictions predictions = np.where(predictions != -100, predictions, tokenizer.pad_token_id) predictions = tokenizer.batch_decode( predictions, skip_special_tokens=True, clean_up_tokenization_spaces=True ) predictions = [pred.strip() for pred in predictions] output_prediction_file = os.path.join(training_args.output_dir, "generated_predictions.txt") with open(output_prediction_file, "w", encoding="utf-8") as writer: writer.write("\n".join(predictions)) kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "translation"} if data_args.dataset_name is not None: kwargs["dataset_tags"] = data_args.dataset_name if data_args.dataset_config_name is not None: kwargs["dataset_args"] = data_args.dataset_config_name kwargs["dataset"] = f"{data_args.dataset_name} {data_args.dataset_config_name}" else: kwargs["dataset"] = data_args.dataset_name languages = [l for l in [data_args.source_lang, data_args.target_lang] if l is not None] if len(languages) > 0: kwargs["language"] = languages if training_args.push_to_hub: trainer.push_to_hub(**kwargs) else: trainer.create_model_card(**kwargs) return results def _mp_fn(index): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
transformers/examples/pytorch/translation/run_translation.py/0
{ "file_path": "transformers/examples/pytorch/translation/run_translation.py", "repo_id": "transformers", "token_count": 12306 }
291
from dataclasses import dataclass, field from typing import Optional @dataclass class TrainingArguments: """ Configuration for training model. """ model_ckpt: Optional[str] = field( default="codeparrot/codeparrot", metadata={"help": "Model name or path of model to be trained."} ) save_dir: Optional[str] = field( default="./", metadata={"help": "Save dir where model repo is cloned and models updates are saved to."} ) dataset_name_train: Optional[str] = field( default="codeparrot/codeparrot-clean-train", metadata={"help": "Name or path of training dataset."} ) dataset_name_valid: Optional[str] = field( default="codeparrot/codeparrot-clean-valid", metadata={"help": "Name or path of validation dataset."} ) train_batch_size: Optional[int] = field(default=2, metadata={"help": "Batch size for training."}) valid_batch_size: Optional[int] = field(default=2, metadata={"help": "Batch size for evaluation."}) weight_decay: Optional[float] = field(default=0.1, metadata={"help": "Value of weight decay."}) shuffle_buffer: Optional[int] = field( default=10000, metadata={"help": "Size of buffer used to shuffle streaming dataset."} ) learning_rate: Optional[float] = field(default=2e-4, metadata={"help": "Learning rate fo training."}) lr_scheduler_type: Optional[str] = field(default="cosine", metadata={"help": "Learning rate."}) num_warmup_steps: Optional[int] = field( default=750, metadata={"help": "Number of warmup steps in the learning rate schedule."} ) gradient_accumulation_steps: Optional[int] = field( default=16, metadata={"help": "Number of gradient accumulation steps."} ) gradient_checkpointing: Optional[bool] = field( default=True, metadata={"help": "Use gradient checkpointing to reduce memory footprint."} ) max_train_steps: Optional[int] = field(default=50000, metadata={"help": "Maximum number of training steps."}) max_eval_steps: Optional[int] = field( default=-1, metadata={"help": "Maximum number of evaluation steps. If -1 the full dataset is evaluated."} ) seq_length: Optional[int] = field(default=1024, metadata={"help": "Sequence lengths used for training."}) seed: Optional[int] = field(default=1, metadata={"help": "Training seed."}) save_checkpoint_steps: Optional[int] = field( default=1024, metadata={"help": "Interval to save checkpoints. Measured as number of forward passes not training steps."}, ) resume_from_checkpoint: Optional[str] = field( default=None, metadata={"help": "States path if the training should continue from a checkpoint folder."} ) tokenized: Optional[bool] = field(default=False, metadata={"help": "If True the data is pretokenized."}) @dataclass class EvaluationArguments: """ Configuration for evaluating model. """ model_ckpt: Optional[str] = field( default="codeparrot/codeparrot", metadata={"help": "Model name or path of model to be evaluated."} ) dataset_name: Optional[str] = field( default="codeparrot/codeparrot-clean-valid", metadata={"help": "Name or path of validation dataset."} ) batch_size: Optional[int] = field(default=2, metadata={"help": "Batch size used for evaluation."}) max_eval_steps: Optional[int] = field( default=-1, metadata={"help": "Maximum number of evaluation steps. If -1 the full dataset is evaluated."} ) seq_length: Optional[int] = field(default=1024, metadata={"help": "Length of sequences to be evaluated."}) seed: Optional[int] = field(default=1, metadata={"help": "Random seed used for evaluation."}) @dataclass class HumanEvalArguments: """ Configuration for running evaluation on HumanEval dataset. """ model_ckpt: Optional[str] = field( default="codeparrot/codeparrot", metadata={"help": "Model name or path of model to be evaluated."} ) num_workers: Optional[int] = field(default=None, metadata={"help": "Number of workers used for code evaluation."}) num_tasks: Optional[int] = field( default=None, metadata={"help": "The number of human-eval tasks to run. If not included all tasks are evaluated."}, ) do_sample: Optional[bool] = field( default=True, metadata={"help": "Sample from the language model's output distribution."} ) temperature: Optional[float] = field(default=0.2, metadata={"help": "Sampling temperature used for generation."}) max_new_tokens: Optional[int] = field(default=256, metadata={"help": "Maximum number of newly generated tokens."}) top_k: Optional[int] = field(default=0, metadata={"help": "Top-k parameter used for generation."}) top_p: Optional[float] = field(default=0.95, metadata={"help": "Top-p parameter used for nucleus sampling."}) batch_size: Optional[int] = field(default=10, metadata={"help": "Number of generations to run in parallel."}) n_samples: Optional[int] = field( default=200, metadata={"help": "Number of completions to generate for each sample."} ) seed: Optional[int] = field(default=1, metadata={"help": "Random seed used for evaluation."}) output_file: Optional[str] = field( default="eval_results.json", metadata={"help": "Random seed used for evaluation."} ) HF_ALLOW_CODE_EVAL: Optional[str] = field( default="0", metadata={"help": "Allow `code_eval` to execute Python code on machine"} ) device_int: Optional[int] = field( default=-1, metadata={ "help": ( "Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive" " number corresponds to which GPU device id to run on." ) }, ) @dataclass class PreprocessingArguments: """ Configuration for preprocessing data. """ num_workers: Optional[int] = field( default=None, metadata={ "help": "The number of CPU cores to use for parallel preprocessing. Default uses the maximum available." }, ) dataset_name: Optional[str] = field( default="transformersbook/codeparrot", metadata={"help": "Folder or name of dataset to process."} ) output_dir: Optional[str] = field( default="codeparrot-clean", metadata={"help": "Folder to save processed dataset."} ) samples_per_file: Optional[int] = field( default=100_000, metadata={"help": "Number of files to save per JSON output file."} ) text_column: Optional[str] = field(default="content", metadata={"help": "Column containing text data to process."}) line_max: Optional[float] = field( default=1000, metadata={"help": "Maximum line length in file, otherwise file is filtered."} ) line_mean: Optional[float] = field( default=100, metadata={"help": "Maximum mean line length in file, otherwise file is filtered."} ) alpha_frac: Optional[float] = field( default=0.25, metadata={"help": "Maximum fraction of non-alphanumeric characters, otherwise file is filtered."} ) min_token_ratio: Optional[float] = field( default=1.5, metadata={"help": "Minimum character token ratio for the file, otherwise file is filtered."} ) filter_proba: Optional[float] = field( default=0.7, metadata={"help": "Probability for filtering config, test and uncommon files."} ) tokenizer: Optional[str] = field( default="codeparrot/codeparrot", metadata={"help": "Name or path to the tokenizer."}, ) near_deduplication: Optional[bool] = field( default=False, metadata={"help": "If True, near-duplicate samples are removed."} ) jaccard_threshold: Optional[float] = field( default=0.85, metadata={"help": "Jaccard threshold for near-duplicate samples."} ) @dataclass class TokenizerTrainingArguments: """ Configuration for tokenizer training. """ base_tokenizer: Optional[str] = field( default="openai-community/gpt2", metadata={"help": "Base tokenizer to build new tokenizer from."} ) dataset_name: Optional[str] = field( default="transformersbook/codeparrot-train", metadata={"help": "Dataset to train tokenizer on."} ) text_column: Optional[str] = field(default="content", metadata={"help": "Column containing text data to process."}) vocab_size: Optional[int] = field(default=200_000, metadata={"help": "Number of examples to train tokenizer on."}) n_examples: Optional[int] = field( default=32768, metadata={"help": "Number of examples to train the tokenizer on."} ) tokenizer_name: Optional[str] = field(default="codeparrot", metadata={"help": "Name of new tokenizer."}) push_to_hub: Optional[bool] = field(default=True, metadata={"help": "Push saved tokenizer to the hub."}) @dataclass class PretokenizationArguments: """ Configuration for data pretokenization. """ tokenizer_dir: Optional[str] = field( default="codeparrot/codeparrot", metadata={"help": "Name or path to the tokenizer."} ) dataset_name: Optional[str] = field( default="codeparrot/codeparrot-clean-train", metadata={"help": "Name or path to the dataset to pretokenize."} ) tokenized_data_repo: Optional[str] = field( default="tokenized-codeparrot-train", metadata={"help": "Repo name of the pretokenized data."} ) num_workers: Optional[int] = field(default=None, metadata={"help": "Number of workers used for code evaluation."}) @dataclass class InitializationArguments: """ Configuration for initializing new model. """ config_name: Optional[str] = field( default="openai-community/gpt2-large", metadata={"help": "Configuration to use for model initialization."} ) tokenizer_name: Optional[str] = field( default="codeparrot/codeparrot", metadata={"help": "Tokenizer attached to model."} ) model_name: Optional[str] = field(default="codeparrot", metadata={"help": "Name of the created model."}) push_to_hub: Optional[bool] = field(default=True, metadata={"help": "Push saved tokenizer to the hub."})
transformers/examples/research_projects/codeparrot/scripts/arguments.py/0
{ "file_path": "transformers/examples/research_projects/codeparrot/scripts/arguments.py", "repo_id": "transformers", "token_count": 3565 }
292
# coding=utf-8 # Copyright 2019-present, the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Preprocessing script before training the distilled model. """ import argparse import logging import pickle from collections import Counter logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO ) logger = logging.getLogger(__name__) if __name__ == "__main__": parser = argparse.ArgumentParser( description="Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)" ) parser.add_argument( "--data_file", type=str, default="data/dump.bert-base-uncased.pickle", help="The binarized dataset." ) parser.add_argument( "--token_counts_dump", type=str, default="data/token_counts.bert-base-uncased.pickle", help="The dump file." ) parser.add_argument("--vocab_size", default=30522, type=int) args = parser.parse_args() logger.info(f"Loading data from {args.data_file}") with open(args.data_file, "rb") as fp: data = pickle.load(fp) logger.info("Counting occurrences for MLM.") counter = Counter() for tk_ids in data: counter.update(tk_ids) counts = [0] * args.vocab_size for k, v in counter.items(): counts[k] = v logger.info(f"Dump to {args.token_counts_dump}") with open(args.token_counts_dump, "wb") as handle: pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
transformers/examples/research_projects/distillation/scripts/token_counts.py/0
{ "file_path": "transformers/examples/research_projects/distillation/scripts/token_counts.py", "repo_id": "transformers", "token_count": 726 }
293
<!--- Copyright 2021 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Vision-Text dual encoder model training examples > Note: This example is experimental and might not give the best possible results The following example showcases how to train a CLIP like vision-text dual encoder model using a pre-trained vision and text encoder using the JAX/Flax backend. Such a model can be used for natural language image search and potentially zero-shot image classification. The model is inspired by the [CLIP](https://openai.com/blog/clip/) approach, introduced by Alec Radford et al. The idea is to train a vision encoder and a text encoder jointly to project the representation of images and their captions into the same embedding space, such that the caption embeddings are located near the embeddings of the images they describe. JAX/Flax allows you to trace pure functions and compile them into efficient, fused accelerator code on both GPU and TPU. Models written in JAX/Flax are **immutable** and updated in a purely functional way which enables simple and efficient model parallelism. In this example we will use the vision model from [CLIP](https://huggingface.co/models?filter=clip) as the image encoder and [`FacebookAI/roberta-base`](https://huggingface.co/FacebookAI/roberta-base) as the text encoder. Note that one can also use the [ViT](https://huggingface.co/models?filter=vit) model as image encoder and any other BERT or ROBERTa model as text encoder. To train the model on languages other than English one should choose a text encoder trained on the desired language and a image-text dataset in that language. One such dataset is [WIT](https://github.com/google-research-datasets/wit). Let's start by creating a model repository to save the trained model and logs. Here we call the model `"clip-roberta-base"`, but you can change the model name as you like. You can do this either directly on [huggingface.co](https://huggingface.co/new) (assuming that you are logged in) or via the command line: ```bash huggingface-cli repo create clip-roberta-base ``` Next we clone the model repository to add the tokenizer and model files. ```bash git clone https://huggingface.co/<your-username>/clip-roberta-base ``` To ensure that all tensorboard traces will be uploaded correctly, we need to track them. You can run the following command inside your model repo to do so. ```bash cd clip-roberta-base git lfs track "*tfevents*" ``` Great, we have set up our model repository. During training, we will automatically push the training logs and model weights to the repo. Next, let's add a symbolic link to the `run_hybrid_clip.py`. ```bash export MODEL_DIR="./clip-roberta-base ln -s ~/transformers/examples/research_projects/jax-projects/hybrid_clip/run_hybrid_clip.py run_hybrid_clip.py ``` ## How to use the `FlaxHybridCLIP` model: The `FlaxHybridCLIP` class let's you load any text and vision encoder model to create a dual encoder. Here is an example of how to load the model using pre-trained text and vision models. ```python from modeling_hybrid_clip import FlaxHybridCLIP model = FlaxHybridCLIP.from_text_vision_pretrained("google-bert/bert-base-uncased", "openai/clip-vit-base-patch32") # save the model model.save_pretrained("bert-clip") # load the saved model model = FlaxHybridCLIP.from_pretrained("bert-clip") ``` If the checkpoints are in PyTorch then one could pass `text_from_pt=True` and `vision_from_pt=True`. This will load the model PyTorch checkpoints convert them to flax and load the model. ```python model = FlaxHybridCLIP.from_text_vision_pretrained("google-bert/bert-base-uncased", "openai/clip-vit-base-patch32", text_from_pt=True, vision_from_pt=True) ``` This loads both the text and vision encoders using pre-trained weights, the projection layers are randomly initialized except for CLIP's vision model. If you use CLIP to initialize the vision model then the vision projection weights are also loaded using the pre-trained weights. ## Prepare the dataset We will use the MS-COCO dataset to train our dual encoder model. MS-COCO contains over 82,000 images, each of which has at least 5 different caption annotations. The dataset is usually used for image captioning tasks, but we can repurpose the image-caption pairs to train our dual encoder model for image search. ### Download and extract the data. It consists of two compressed folders: one with images, and the other—with associated image captions. Note that the compressed images folder is 13GB in size. ```bash wget http://images.cocodataset.org/annotations/annotations_trainval2014.zip wget http://images.cocodataset.org/zips/train2014.zip unzip annotations_trainval2014.zip unzip train2014.zip mkdir coco_dataset mv train2014 coco_dataset/ mv annotations coco_dataset/ ``` ### Prepare dataset files and split the dataset. ```python import json import collections images_dir = "coco_dataset/train2014" annotation_file = "coco_dataset/annotations/captions_train2014.json" with open(annotation_file, "r") as f: annotations = json.load(f)["annotations"] image_path_to_caption = collections.defaultdict(list) for element in annotations: caption = f"{element['caption'].lower().rstrip('.')}" image_path = images_dir + "/COCO_train2014_" + "%012d.jpg" % (element["image_id"]) image_path_to_caption[image_path].append(caption) lines = [] for image_path, captions in image_path_to_caption.items(): lines.append(json.dumps({"image_path": image_path, "captions": captions})) train_lines = lines[:-8000] valid_line = lines[-8000:] with open("coco_dataset/train_dataset.json", "w") as f: f.write("\n".join(train_lines)) with open("coco_dataset/valid_dataset.json", "w") as f: f.write("\n".join(valid_line)) ``` > Note: The data loading and processing part of this script can still be improved for maximum performance. In particular one should decode the images beforehand and use those instead decoding them each time. If the dataset is small or if you have huge disk space the you could also pre-process all the dataset beforehand and then use it. ## Train the model Next we can run the example script to train the model: ```bash python run_hybrid_clip.py \ --output_dir ${MODEL_DIR} \ --text_model_name_or_path="FacebookAI/roberta-base" \ --vision_model_name_or_path="openai/clip-vit-base-patch32" \ --tokenizer_name="FacebookAI/roberta-base" \ --train_file="coco_dataset/train_dataset.json" \ --validation_file="coco_dataset/validation_dataset.json" \ --do_train --do_eval \ --num_train_epochs="40" --max_seq_length 96 \ --per_device_train_batch_size="64" \ --per_device_eval_batch_size="64" \ --learning_rate="5e-5" --warmup_steps="0" --weight_decay 0.1 \ --overwrite_output_dir \ --preprocessing_num_workers 32 \ --push_to_hub ``` This should finish in ~1h50 mins with min validation loss 2.43. Training statistics can be accessed on [tfhub.de](https://tensorboard.dev/experiment/RUNPYd1yRgSD5kZSb9hDig/#scalars)
transformers/examples/research_projects/jax-projects/hybrid_clip/README.md/0
{ "file_path": "transformers/examples/research_projects/jax-projects/hybrid_clip/README.md", "repo_id": "transformers", "token_count": 2322 }
294
## MM-IMDb Based on the script [`run_mmimdb.py`](https://github.com/huggingface/transformers/blob/main/examples/research_projects/mm-imdb/run_mmimdb.py). [MM-IMDb](http://lisi1.unal.edu.co/mmimdb/) is a Multimodal dataset with around 26,000 movies including images, plots and other metadata. ### Training on MM-IMDb ```bash python run_mmimdb.py \ --data_dir /path/to/mmimdb/dataset/ \ --model_type bert \ --model_name_or_path google-bert/bert-base-uncased \ --output_dir /path/to/save/dir/ \ --do_train \ --do_eval \ --max_seq_len 512 \ --gradient_accumulation_steps 20 \ --num_image_embeds 3 \ --num_train_epochs 100 \ --patience 5 ```
transformers/examples/research_projects/mm-imdb/README.md/0
{ "file_path": "transformers/examples/research_projects/mm-imdb/README.md", "repo_id": "transformers", "token_count": 287 }
295
<!--- Copyright 2021 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Bart + Beam Search to ONNX Author: [@fatcat-z](https://github.com/fatcat-z) This folder contains an example of exporting Bart + Beam Search generation (`BartForConditionalGeneration`) to ONNX. Beam Search contains a for-loop workflow, so we need to make them TorchScript-compatible for exporting to ONNX. This example shows how to make a Bart model be TorchScript-compatible by wrapping up it into a new model. In addition, some changes were made to the `beam_search()` function to make it TorchScript-compatible. ## How to run the example To make sure you can successfully run the latest versions of the example scripts, you have to **install the library from source** and install some example-specific requirements. To do this, execute the following steps in a new virtual environment: ```bash git clone https://github.com/huggingface/transformers cd transformers pip install '.[onnxruntime]' ``` Then cd in this example folder and run ```bash pip install -r requirements.txt ``` Now you can run the example command below to get the example ONNX file: ```bash python run_onnx_exporter.py --model_name_or_path facebook/bart-base ```
transformers/examples/research_projects/onnx/summarization/README.md/0
{ "file_path": "transformers/examples/research_projects/onnx/summarization/README.md", "repo_id": "transformers", "token_count": 463 }
296
#! /usr/bin/env python3 # coding=utf-8 # Copyright (c) 2019 Uber Technologies, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Example command with bag of words: python run_pplm.py -B space --cond_text "The president" --length 100 --gamma 1.5 --num_iterations 3 --num_samples 10 --stepsize 0.01 --window_length 5 --kl_scale 0.01 --gm_scale 0.95 Example command with discriminator: python run_pplm.py -D sentiment --class_label 3 --cond_text "The lake" --length 10 --gamma 1.0 --num_iterations 30 --num_samples 10 --stepsize 0.01 --kl_scale 0.01 --gm_scale 0.95 """ import argparse import json from operator import add from typing import List, Optional, Tuple, Union import numpy as np import torch from pplm_classification_head import ClassificationHead from torch import nn from tqdm import trange from transformers import GPT2LMHeadModel, GPT2Tokenizer from transformers.file_utils import cached_path PPLM_BOW = 1 PPLM_DISCRIM = 2 PPLM_BOW_DISCRIM = 3 SMALL_CONST = 1e-15 BIG_CONST = 1e10 BAG_OF_WORDS_ARCHIVE_MAP = { "legal": "https://s3.amazonaws.com/models.huggingface.co/bert/pplm/bow/legal.txt", "military": "https://s3.amazonaws.com/models.huggingface.co/bert/pplm/bow/military.txt", "politics": "https://s3.amazonaws.com/models.huggingface.co/bert/pplm/bow/politics.txt", "religion": "https://s3.amazonaws.com/models.huggingface.co/bert/pplm/bow/religion.txt", "science": "https://s3.amazonaws.com/models.huggingface.co/bert/pplm/bow/science.txt", "space": "https://s3.amazonaws.com/models.huggingface.co/bert/pplm/bow/space.txt", "technology": "https://s3.amazonaws.com/models.huggingface.co/bert/pplm/bow/technology.txt", } DISCRIMINATOR_MODELS_PARAMS = { "clickbait": { "url": "https://s3.amazonaws.com/models.huggingface.co/bert/pplm/discriminators/clickbait_classifier_head.pt", "class_size": 2, "embed_size": 1024, "class_vocab": {"non_clickbait": 0, "clickbait": 1}, "default_class": 1, "pretrained_model": "openai-community/gpt2-medium", }, "sentiment": { "url": "https://s3.amazonaws.com/models.huggingface.co/bert/pplm/discriminators/SST_classifier_head.pt", "class_size": 5, "embed_size": 1024, "class_vocab": {"very_positive": 2, "very_negative": 3}, "default_class": 3, "pretrained_model": "openai-community/gpt2-medium", }, } def top_k_filter(logits, k, probs=False): """ Masks everything but the k top entries as -infinity (1e10). Used to mask logits such that e^-infinity -> 0 won't contribute to the sum of the denominator. """ if k == 0: return logits else: values = torch.topk(logits, k)[0] batch_mins = values[:, -1].view(-1, 1).expand_as(logits) if probs: return torch.where(logits < batch_mins, torch.ones_like(logits) * 0.0, logits) return torch.where(logits < batch_mins, torch.ones_like(logits) * -BIG_CONST, logits) def perturb_past( past, model, last, unpert_past=None, unpert_logits=None, accumulated_hidden=None, grad_norms=None, stepsize=0.01, one_hot_bows_vectors=None, classifier=None, class_label=None, loss_type=0, num_iterations=3, horizon_length=1, window_length=0, decay=False, gamma=1.5, kl_scale=0.01, device="cuda", ): # Generate inital perturbed past grad_accumulator = [(np.zeros(p.shape).astype("float32")) for p in past] if accumulated_hidden is None: accumulated_hidden = 0 if decay: decay_mask = torch.arange(0.0, 1.0 + SMALL_CONST, 1.0 / (window_length))[1:] else: decay_mask = 1.0 # TODO fix this comment (SUMANTH) # Generate a mask is gradient perturbated is based on a past window _, _, _, curr_length, _ = past[0].shape if curr_length > window_length and window_length > 0: ones_key_val_shape = tuple(past[0].shape[:-2]) + (window_length,) + tuple(past[0].shape[-1:]) zeros_key_val_shape = tuple(past[0].shape[:-2]) + (curr_length - window_length,) + tuple(past[0].shape[-1:]) ones_mask = torch.ones(ones_key_val_shape) ones_mask = decay_mask * ones_mask.permute(0, 1, 2, 4, 3) ones_mask = ones_mask.permute(0, 1, 2, 4, 3) window_mask = torch.cat((ones_mask, torch.zeros(zeros_key_val_shape)), dim=-2).to(device) else: window_mask = torch.ones_like(past[0]).to(device) # accumulate perturbations for num_iterations loss_per_iter = [] new_accumulated_hidden = None for i in range(num_iterations): print("Iteration ", i + 1) curr_perturbation = [torch.from_numpy(p_).requires_grad_(True).to(device=device) for p_ in grad_accumulator] # make sure p_.grad is not None for p_ in curr_perturbation: p_.retain_grad() # Compute hidden using perturbed past perturbed_past = list(map(add, past, curr_perturbation)) _, _, _, curr_length, _ = curr_perturbation[0].shape lm_output = model(last, past_key_values=perturbed_past) all_logits, all_hidden = lm_output["logits"], lm_output["hidden_states"] hidden = all_hidden[-1] new_accumulated_hidden = accumulated_hidden + torch.sum(hidden, dim=1).detach() # TODO: Check the layer-norm consistency of this with trained discriminator (Sumanth) logits = all_logits[:, -1, :] probs = nn.functional.softmax(logits, dim=-1) loss = 0.0 loss_list = [] if loss_type == PPLM_BOW or loss_type == PPLM_BOW_DISCRIM: for one_hot_bow in one_hot_bows_vectors: bow_logits = torch.mm(probs, torch.t(one_hot_bow)) bow_loss = -torch.log(torch.sum(bow_logits)) loss += bow_loss loss_list.append(bow_loss) print(" pplm_bow_loss:", loss.data.cpu().numpy()) if loss_type == 2 or loss_type == 3: ce_loss = nn.CrossEntropyLoss() # TODO why we need to do this assignment and not just using unpert_past? (Sumanth) curr_unpert_past = unpert_past curr_probs = torch.unsqueeze(probs, dim=1) wte = model.resize_token_embeddings() for _ in range(horizon_length): inputs_embeds = torch.matmul(curr_probs, wte.weight.data) lm_output = model(past_key_values=curr_unpert_past, inputs_embeds=inputs_embeds) curr_all_logits, curr_unpert_past, curr_all_hidden = ( lm_output["logits"], lm_output["past_key_values"], lm_output["hidden_states"], ) curr_logits = curr_all_logits[:, -1, :] curr_probs = nn.functional.softmax(curr_logits, dim=-1) curr_probs = torch.unsqueeze(curr_probs, dim=1) curr_hidden = curr_all_hidden[-1] new_accumulated_hidden = new_accumulated_hidden + torch.sum(curr_hidden, dim=1) prediction = classifier(new_accumulated_hidden / (curr_length + 1 + horizon_length)) label = torch.tensor(prediction.shape[0] * [class_label], device=device, dtype=torch.long) discrim_loss = ce_loss(prediction, label) print(" pplm_discrim_loss:", discrim_loss.data.cpu().numpy()) loss += discrim_loss loss_list.append(discrim_loss) kl_loss = 0.0 if kl_scale > 0.0: unpert_probs = nn.functional.softmax(unpert_logits[:, -1, :], dim=-1) unpert_probs = unpert_probs + SMALL_CONST * (unpert_probs <= SMALL_CONST).float().to(device).detach() correction = SMALL_CONST * (probs <= SMALL_CONST).float().to(device).detach() corrected_probs = probs + correction.detach() kl_loss = kl_scale * ((corrected_probs * (corrected_probs / unpert_probs).log()).sum()) print(" kl_loss", kl_loss.data.cpu().numpy()) loss += kl_loss loss_per_iter.append(loss.data.cpu().numpy()) print(" pplm_loss", (loss - kl_loss).data.cpu().numpy()) # compute gradients loss.backward() # calculate gradient norms if grad_norms is not None and loss_type == PPLM_BOW: grad_norms = [ torch.max(grad_norms[index], torch.norm(p_.grad * window_mask)) for index, p_ in enumerate(curr_perturbation) ] else: grad_norms = [ (torch.norm(p_.grad * window_mask) + SMALL_CONST) for index, p_ in enumerate(curr_perturbation) ] # normalize gradients grad = [ -stepsize * (p_.grad * window_mask / grad_norms[index] ** gamma).data.cpu().numpy() for index, p_ in enumerate(curr_perturbation) ] # accumulate gradient grad_accumulator = list(map(add, grad, grad_accumulator)) # reset gradients, just to make sure for p_ in curr_perturbation: p_.grad.data.zero_() # removing past from the graph new_past = [] for p_ in past: new_past.append(p_.detach()) past = new_past # apply the accumulated perturbations to the past grad_accumulator = [torch.from_numpy(p_).requires_grad_(True).to(device=device) for p_ in grad_accumulator] pert_past = list(map(add, past, grad_accumulator)) return pert_past, new_accumulated_hidden, grad_norms, loss_per_iter def get_classifier( name: Optional[str], class_label: Union[str, int], device: str ) -> Tuple[Optional[ClassificationHead], Optional[int]]: if name is None: return None, None params = DISCRIMINATOR_MODELS_PARAMS[name] classifier = ClassificationHead(class_size=params["class_size"], embed_size=params["embed_size"]).to(device) if "url" in params: resolved_archive_file = cached_path(params["url"]) elif "path" in params: resolved_archive_file = params["path"] else: raise ValueError("Either url or path have to be specified in the discriminator model parameters") classifier.load_state_dict(torch.load(resolved_archive_file, map_location=device)) classifier.eval() if isinstance(class_label, str): if class_label in params["class_vocab"]: label_id = params["class_vocab"][class_label] else: label_id = params["default_class"] print("class_label {} not in class_vocab".format(class_label)) print("available values are: {}".format(params["class_vocab"])) print("using default class {}".format(label_id)) elif isinstance(class_label, int): if class_label in set(params["class_vocab"].values()): label_id = class_label else: label_id = params["default_class"] print("class_label {} not in class_vocab".format(class_label)) print("available values are: {}".format(params["class_vocab"])) print("using default class {}".format(label_id)) else: label_id = params["default_class"] return classifier, label_id def get_bag_of_words_indices(bag_of_words_ids_or_paths: List[str], tokenizer) -> List[List[List[int]]]: bow_indices = [] for id_or_path in bag_of_words_ids_or_paths: if id_or_path in BAG_OF_WORDS_ARCHIVE_MAP: filepath = cached_path(BAG_OF_WORDS_ARCHIVE_MAP[id_or_path]) else: filepath = id_or_path with open(filepath, "r") as f: words = f.read().strip().split("\n") bow_indices.append([tokenizer.encode(word.strip(), add_prefix_space=True) for word in words]) return bow_indices def build_bows_one_hot_vectors(bow_indices, tokenizer, device="cuda"): if bow_indices is None: return None one_hot_bows_vectors = [] for single_bow in bow_indices: single_bow = list(filter(lambda x: len(x) <= 1, single_bow)) single_bow = torch.tensor(single_bow).to(device) num_words = single_bow.shape[0] one_hot_bow = torch.zeros(num_words, tokenizer.vocab_size).to(device) one_hot_bow.scatter_(1, single_bow, 1) one_hot_bows_vectors.append(one_hot_bow) return one_hot_bows_vectors def full_text_generation( model, tokenizer, context=None, num_samples=1, device="cuda", bag_of_words=None, discrim=None, class_label=None, length=100, stepsize=0.02, temperature=1.0, top_k=10, sample=False, num_iterations=3, grad_length=10000, horizon_length=1, window_length=0, decay=False, gamma=1.5, gm_scale=0.9, kl_scale=0.01, repetition_penalty=1.0, **kwargs, ): classifier, class_id = get_classifier(discrim, class_label, device) bow_indices = [] if bag_of_words: bow_indices = get_bag_of_words_indices(bag_of_words.split(";"), tokenizer) if bag_of_words and classifier: print("Both PPLM-BoW and PPLM-Discrim are on. This is not optimized.") loss_type = PPLM_BOW_DISCRIM elif bag_of_words: loss_type = PPLM_BOW print("Using PPLM-BoW") elif classifier is not None: loss_type = PPLM_DISCRIM print("Using PPLM-Discrim") else: raise Exception("Specify either a bag of words or a discriminator") unpert_gen_tok_text, _, _ = generate_text_pplm( model=model, tokenizer=tokenizer, context=context, device=device, length=length, sample=sample, perturb=False, repetition_penalty=repetition_penalty, ) if device == "cuda": torch.cuda.empty_cache() pert_gen_tok_texts = [] discrim_losses = [] losses_in_time = [] for i in range(num_samples): pert_gen_tok_text, discrim_loss, loss_in_time = generate_text_pplm( model=model, tokenizer=tokenizer, context=context, device=device, perturb=True, bow_indices=bow_indices, classifier=classifier, class_label=class_id, loss_type=loss_type, length=length, stepsize=stepsize, temperature=temperature, top_k=top_k, sample=sample, num_iterations=num_iterations, grad_length=grad_length, horizon_length=horizon_length, window_length=window_length, decay=decay, gamma=gamma, gm_scale=gm_scale, kl_scale=kl_scale, repetition_penalty=repetition_penalty, ) pert_gen_tok_texts.append(pert_gen_tok_text) if classifier is not None: discrim_losses.append(discrim_loss.data.cpu().numpy()) losses_in_time.append(loss_in_time) if device == "cuda": torch.cuda.empty_cache() return unpert_gen_tok_text, pert_gen_tok_texts, discrim_losses, losses_in_time def generate_text_pplm( model, tokenizer, context=None, past=None, device="cuda", perturb=True, bow_indices=None, classifier=None, class_label=None, loss_type=0, length=100, stepsize=0.02, temperature=1.0, top_k=10, sample=False, num_iterations=3, grad_length=10000, horizon_length=1, window_length=0, decay=False, gamma=1.5, gm_scale=0.9, kl_scale=0.01, repetition_penalty=1.0, ): output_so_far = None if context: context_t = torch.tensor(context, device=device, dtype=torch.long) while len(context_t.shape) < 2: context_t = context_t.unsqueeze(0) output_so_far = context_t # collect one hot vectors for bags of words one_hot_bows_vectors = build_bows_one_hot_vectors(bow_indices, tokenizer, device) grad_norms = None last = None unpert_discrim_loss = 0 loss_in_time = [] for i in trange(length, ascii=True): # Get past/probs for current output, except for last word # Note that GPT takes 2 inputs: past + current_token # run model forward to obtain unperturbed if past is None and output_so_far is not None: last = output_so_far[:, -1:] if output_so_far.shape[1] > 1: past = model(output_so_far[:, :-1])["past_key_values"] lm_output = model(output_so_far) unpert_logits, unpert_past, unpert_all_hidden = ( lm_output["logits"], lm_output["past_key_values"], lm_output["hidden_states"], ) unpert_last_hidden = unpert_all_hidden[-1] # check if we are abowe grad max length if i >= grad_length: current_stepsize = stepsize * 0 else: current_stepsize = stepsize # modify the past if necessary if not perturb or num_iterations == 0: pert_past = past else: accumulated_hidden = unpert_last_hidden[:, :-1, :] accumulated_hidden = torch.sum(accumulated_hidden, dim=1) if past is not None: pert_past, _, grad_norms, loss_this_iter = perturb_past( past, model, last, unpert_past=unpert_past, unpert_logits=unpert_logits, accumulated_hidden=accumulated_hidden, grad_norms=grad_norms, stepsize=current_stepsize, one_hot_bows_vectors=one_hot_bows_vectors, classifier=classifier, class_label=class_label, loss_type=loss_type, num_iterations=num_iterations, horizon_length=horizon_length, window_length=window_length, decay=decay, gamma=gamma, kl_scale=kl_scale, device=device, ) loss_in_time.append(loss_this_iter) else: pert_past = past lm_output = model(last, past_key_values=pert_past) pert_logits, past = ( lm_output["logits"], lm_output["past_key_values"], ) pert_logits = pert_logits[:, -1, :] / temperature # + SMALL_CONST for token_idx in set(output_so_far[0].tolist()): if pert_logits[0, token_idx] < 0: pert_logits[0, token_idx] *= repetition_penalty else: pert_logits[0, token_idx] /= repetition_penalty pert_probs = nn.functional.softmax(pert_logits, dim=-1) if classifier is not None: ce_loss = nn.CrossEntropyLoss() prediction = classifier(torch.mean(unpert_last_hidden, dim=1)) label = torch.tensor([class_label], device=device, dtype=torch.long) unpert_discrim_loss = ce_loss(prediction, label) print("unperturbed discrim loss", unpert_discrim_loss.data.cpu().numpy()) else: unpert_discrim_loss = 0 # Fuse the modified model and original model if perturb: unpert_probs = nn.functional.softmax(unpert_logits[:, -1, :], dim=-1) pert_probs = (pert_probs**gm_scale) * (unpert_probs ** (1 - gm_scale)) # + SMALL_CONST pert_probs = top_k_filter(pert_probs, k=top_k, probs=True) # + SMALL_CONST # rescale if torch.sum(pert_probs) <= 1: pert_probs = pert_probs / torch.sum(pert_probs) else: pert_logits = top_k_filter(pert_logits, k=top_k) # + SMALL_CONST pert_probs = nn.functional.softmax(pert_logits, dim=-1) # sample or greedy if sample: last = torch.multinomial(pert_probs, num_samples=1) else: _, last = torch.topk(pert_probs, k=1, dim=-1) # update context/output_so_far appending the new token output_so_far = last if output_so_far is None else torch.cat((output_so_far, last), dim=1) print(tokenizer.decode(output_so_far.tolist()[0])) return output_so_far, unpert_discrim_loss, loss_in_time def set_generic_model_params(discrim_weights, discrim_meta): if discrim_weights is None: raise ValueError("When using a generic discriminator, discrim_weights need to be specified") if discrim_meta is None: raise ValueError("When using a generic discriminator, discrim_meta need to be specified") with open(discrim_meta, "r") as discrim_meta_file: meta = json.load(discrim_meta_file) meta["path"] = discrim_weights DISCRIMINATOR_MODELS_PARAMS["generic"] = meta def run_pplm_example( pretrained_model="openai-community/gpt2-medium", cond_text="", uncond=False, num_samples=1, bag_of_words=None, discrim=None, discrim_weights=None, discrim_meta=None, class_label=-1, length=100, stepsize=0.02, temperature=1.0, top_k=10, sample=False, num_iterations=3, grad_length=10000, horizon_length=1, window_length=0, decay=False, gamma=1.5, gm_scale=0.9, kl_scale=0.01, seed=0, no_cuda=False, colorama=False, repetition_penalty=1.0, ): # set Random seed torch.manual_seed(seed) np.random.seed(seed) # set the device device = "cuda" if torch.cuda.is_available() and not no_cuda else "cpu" if discrim == "generic": set_generic_model_params(discrim_weights, discrim_meta) if discrim is not None: pretrained_model = DISCRIMINATOR_MODELS_PARAMS[discrim]["pretrained_model"] print("discrim = {}, pretrained_model set to discriminator's = {}".format(discrim, pretrained_model)) # load pretrained model model = GPT2LMHeadModel.from_pretrained(pretrained_model, output_hidden_states=True) model.to(device) model.eval() # load tokenizer tokenizer = GPT2Tokenizer.from_pretrained(pretrained_model) # Freeze GPT-2 weights for param in model.parameters(): param.requires_grad = False # figure out conditioning text if uncond: tokenized_cond_text = tokenizer.encode([tokenizer.bos_token]) else: raw_text = cond_text while not raw_text: print("Did you forget to add `--cond_text`? ") raw_text = input("Model prompt >>> ") tokenized_cond_text = tokenizer.encode(tokenizer.bos_token + raw_text) print("= Prefix of sentence =") print(tokenizer.decode(tokenized_cond_text)) print() # generate unperturbed and perturbed texts # full_text_generation returns: # unpert_gen_tok_text, pert_gen_tok_texts, discrim_losses, losses_in_time unpert_gen_tok_text, pert_gen_tok_texts, _, _ = full_text_generation( model=model, tokenizer=tokenizer, context=tokenized_cond_text, device=device, num_samples=num_samples, bag_of_words=bag_of_words, discrim=discrim, class_label=class_label, length=length, stepsize=stepsize, temperature=temperature, top_k=top_k, sample=sample, num_iterations=num_iterations, grad_length=grad_length, horizon_length=horizon_length, window_length=window_length, decay=decay, gamma=gamma, gm_scale=gm_scale, kl_scale=kl_scale, repetition_penalty=repetition_penalty, ) # untokenize unperturbed text unpert_gen_text = tokenizer.decode(unpert_gen_tok_text.tolist()[0]) print("=" * 80) print("= Unperturbed generated text =") print(unpert_gen_text) print() generated_texts = [] bow_word_ids = set() if bag_of_words and colorama: bow_indices = get_bag_of_words_indices(bag_of_words.split(";"), tokenizer) for single_bow_list in bow_indices: # filtering all words in the list composed of more than 1 token filtered = list(filter(lambda x: len(x) <= 1, single_bow_list)) # w[0] because we are sure w has only 1 item because previous fitler bow_word_ids.update(w[0] for w in filtered) # iterate through the perturbed texts for i, pert_gen_tok_text in enumerate(pert_gen_tok_texts): try: # untokenize unperturbed text if colorama: import colorama pert_gen_text = "" for word_id in pert_gen_tok_text.tolist()[0]: if word_id in bow_word_ids: pert_gen_text += "{}{}{}".format( colorama.Fore.RED, tokenizer.decode([word_id]), colorama.Style.RESET_ALL, ) else: pert_gen_text += tokenizer.decode([word_id]) else: pert_gen_text = tokenizer.decode(pert_gen_tok_text.tolist()[0]) print("= Perturbed generated text {} =".format(i + 1)) print(pert_gen_text) print() except Exception as exc: print("Ignoring error while generating perturbed text:", exc) # keep the prefix, perturbed seq, original seq for each index generated_texts.append((tokenized_cond_text, pert_gen_tok_text, unpert_gen_tok_text)) return if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--pretrained_model", "-M", type=str, default="openai-community/gpt2-medium", help="pretrained model name or path to local checkpoint", ) parser.add_argument("--cond_text", type=str, default="The lake", help="Prefix texts to condition on") parser.add_argument("--uncond", action="store_true", help="Generate from end-of-text as prefix") parser.add_argument( "--num_samples", type=int, default=1, help="Number of samples to generate from the modified latents", ) parser.add_argument( "--bag_of_words", "-B", type=str, default=None, help=( "Bags of words used for PPLM-BoW. " "Either a BOW id (see list in code) or a filepath. " "Multiple BoWs separated by ;" ), ) parser.add_argument( "--discrim", "-D", type=str, default=None, choices=("clickbait", "sentiment", "toxicity", "generic"), help="Discriminator to use", ) parser.add_argument( "--discrim_weights", type=str, default=None, help="Weights for the generic discriminator", ) parser.add_argument( "--discrim_meta", type=str, default=None, help="Meta information for the generic discriminator", ) parser.add_argument( "--class_label", type=int, default=-1, help="Class label used for the discriminator", ) parser.add_argument("--length", type=int, default=100) parser.add_argument("--stepsize", type=float, default=0.02) parser.add_argument("--temperature", type=float, default=1.0) parser.add_argument("--top_k", type=int, default=10) parser.add_argument("--sample", action="store_true", help="Generate from end-of-text as prefix") parser.add_argument("--num_iterations", type=int, default=3) parser.add_argument("--grad_length", type=int, default=10000) parser.add_argument( "--window_length", type=int, default=0, help="Length of past which is being optimized; 0 corresponds to infinite window length", ) parser.add_argument( "--horizon_length", type=int, default=1, help="Length of future to optimize over", ) parser.add_argument("--decay", action="store_true", help="whether to decay or not") parser.add_argument("--gamma", type=float, default=1.5) parser.add_argument("--gm_scale", type=float, default=0.9) parser.add_argument("--kl_scale", type=float, default=0.01) parser.add_argument("--seed", type=int, default=0) parser.add_argument("--no_cuda", action="store_true", help="no cuda") parser.add_argument("--colorama", action="store_true", help="colors keywords") parser.add_argument( "--repetition_penalty", type=float, default=1.0, help="Penalize repetition. More than 1.0 -> less repetition", ) args = parser.parse_args() run_pplm_example(**vars(args))
transformers/examples/research_projects/pplm/run_pplm.py/0
{ "file_path": "transformers/examples/research_projects/pplm/run_pplm.py", "repo_id": "transformers", "token_count": 13443 }
297
import os from functools import partial from glob import glob import faiss from datasets import Features, Sequence, Value, concatenate_datasets, load_dataset, load_from_disk from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast def split_text(text, n=100, character=" "): """Split the text every ``n``-th occurrence of ``character``""" text = text.split(character) return [character.join(text[i : i + n]).strip() for i in range(0, len(text), n)] def split_documents(documents): """Split documents into passages""" titles, texts = [], [] for title, text in zip(documents["title"], documents["text"]): if text is not None: for passage in split_text(text): titles.append(title if title is not None else "") texts.append(passage) return {"title": titles, "text": texts} def embed_update(ctx_encoder, total_processes, device, process_num, shard_dir, csv_path): kb_dataset = load_dataset( "csv", data_files=[csv_path], split="train", delimiter="\t", column_names=["title", "text"] ) kb_dataset = kb_dataset.map( split_documents, batched=True, num_proc=1 ) # if you want you can load already splitted csv. kb_list = [kb_dataset.shard(total_processes, i, contiguous=True) for i in range(total_processes)] data_shrad = kb_list[process_num] arrow_folder = "data_" + str(process_num) passages_path = os.path.join(shard_dir, arrow_folder) context_tokenizer = DPRContextEncoderTokenizerFast.from_pretrained("facebook/dpr-ctx_encoder-multiset-base") ctx_encoder = ctx_encoder.to(device=device) def embed( documents: dict, ctx_encoder: DPRContextEncoder, ctx_tokenizer: DPRContextEncoderTokenizerFast, device ) -> dict: """Compute the DPR embeddings of document passages""" input_ids = ctx_tokenizer( documents["title"], documents["text"], truncation=True, padding="longest", return_tensors="pt" )["input_ids"] embeddings = ctx_encoder(input_ids.to(device=device), return_dict=True).pooler_output return {"embeddings": embeddings.detach().cpu().numpy()} new_features = Features( {"text": Value("string"), "title": Value("string"), "embeddings": Sequence(Value("float32"))} ) # optional, save as float32 instead of float64 to save space dataset = data_shrad.map( partial(embed, ctx_encoder=ctx_encoder, ctx_tokenizer=context_tokenizer, device=device), batched=True, batch_size=16, features=new_features, ) dataset.save_to_disk(passages_path) def add_index(shard_dir, index_path): data_shard_list = [] for shard_address in glob(str(shard_dir) + "/*/"): data_shard_list.append(load_from_disk(shard_address)) concat = concatenate_datasets(data_shard_list) faiss.omp_set_num_threads(96) index = faiss.IndexHNSWFlat(768, 128, faiss.METRIC_INNER_PRODUCT) concat.add_faiss_index("embeddings", custom_index=index) concat.get_index("embeddings").save( index_path ) # since we load the index in to memory,we can directly update the index in the disk
transformers/examples/research_projects/rag-end2end-retriever/kb_encode_utils.py/0
{ "file_path": "transformers/examples/research_projects/rag-end2end-retriever/kb_encode_utils.py", "repo_id": "transformers", "token_count": 1231 }
298
import json import logging import os import sys from pathlib import Path import finetune_rag from transformers.file_utils import is_apex_available from transformers.testing_utils import ( TestCasePlus, execute_subprocess_async, require_ray, require_torch_gpu, require_torch_multi_gpu, ) logging.basicConfig(level=logging.DEBUG) logger = logging.getLogger() stream_handler = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class RagFinetuneExampleTests(TestCasePlus): def _create_dummy_data(self, data_dir): os.makedirs(data_dir, exist_ok=True) contents = {"source": "What is love ?", "target": "life"} n_lines = {"train": 12, "val": 2, "test": 2} for split in ["train", "test", "val"]: for field in ["source", "target"]: content = "\n".join([contents[field]] * n_lines[split]) with open(os.path.join(data_dir, f"{split}.{field}"), "w") as f: f.write(content) def _run_finetune(self, gpus: int, distributed_retriever: str = "pytorch"): tmp_dir = self.get_auto_remove_tmp_dir() output_dir = os.path.join(tmp_dir, "output") data_dir = os.path.join(tmp_dir, "data") self._create_dummy_data(data_dir=data_dir) testargs = f""" --data_dir {data_dir} \ --output_dir {output_dir} \ --model_name_or_path facebook/rag-sequence-base \ --model_type rag_sequence \ --do_train \ --do_predict \ --n_val -1 \ --val_check_interval 1.0 \ --train_batch_size 2 \ --eval_batch_size 1 \ --max_source_length 25 \ --max_target_length 25 \ --val_max_target_length 25 \ --test_max_target_length 25 \ --label_smoothing 0.1 \ --dropout 0.1 \ --attention_dropout 0.1 \ --weight_decay 0.001 \ --adam_epsilon 1e-08 \ --max_grad_norm 0.1 \ --lr_scheduler polynomial \ --learning_rate 3e-04 \ --num_train_epochs 1 \ --warmup_steps 4 \ --gradient_accumulation_steps 1 \ --distributed-port 8787 \ --use_dummy_dataset 1 \ --distributed_retriever {distributed_retriever} \ """.split() if gpus > 0: testargs.append(f"--gpus={gpus}") if is_apex_available(): testargs.append("--fp16") else: testargs.append("--gpus=0") testargs.append("--distributed_backend=ddp_cpu") testargs.append("--num_processes=2") cmd = [sys.executable, str(Path(finetune_rag.__file__).resolve())] + testargs execute_subprocess_async(cmd, env=self.get_env()) metrics_save_path = os.path.join(output_dir, "metrics.json") with open(metrics_save_path) as f: result = json.load(f) return result @require_torch_gpu def test_finetune_gpu(self): result = self._run_finetune(gpus=1) self.assertGreaterEqual(result["test"][0]["test_avg_em"], 0.2) @require_torch_multi_gpu def test_finetune_multigpu(self): result = self._run_finetune(gpus=2) self.assertGreaterEqual(result["test"][0]["test_avg_em"], 0.2) @require_torch_gpu @require_ray def test_finetune_gpu_ray_retrieval(self): result = self._run_finetune(gpus=1, distributed_retriever="ray") self.assertGreaterEqual(result["test"][0]["test_avg_em"], 0.2) @require_torch_multi_gpu @require_ray def test_finetune_multigpu_ray_retrieval(self): result = self._run_finetune(gpus=1, distributed_retriever="ray") self.assertGreaterEqual(result["test"][0]["test_avg_em"], 0.2)
transformers/examples/research_projects/rag/_test_finetune_rag.py/0
{ "file_path": "transformers/examples/research_projects/rag/_test_finetune_rag.py", "repo_id": "transformers", "token_count": 1994 }
299
# Robust Speech Challenge 🤗 Welcome to the robust speech recognition challenge 🎙️ ! The goal of this event is to build **robust**, **real-world** speech recognition (ASR) systems in as many languages as possible 🌏🌍🌎. If necessary and available, free access to a V100S 32 GB GPU will kindly be provided by the [OVHcloud team](https://www.ovhcloud.com/) 🚀. This document summarizes all the relevant information required for the speech community event 📋. To sign-up, please see [this forum post](https://discuss.huggingface.co/t/open-to-the-community-robust-speech-recognition-challenge/13614) 🤗. Please make sure to: - Read it in detail - Fill the google form - Join our Discord server in the #join-sprint channel. ## Table of Contents - [TLDR;](#tldr) - [Important dates](#important-dates) - [How to install pytorch, transformers, datasets](#how-to-install-relevant-libraries) - [Data and Preprocessing](#data-and-preprocessing) - [How to fine-tune an acoustic model](#how-to-finetune-an-acoustic-model) - [How to fine-tune with OVH could](#how-to-finetune-with-ovh-cloud) - [How to combine n-gram language models with acoustic model](#how-to-combine-n-gram-with-acoustic-model) - [Evaluation](#evaluation) - [Prizes](#prizes) - [Communication and Problems](#communication-and-problems) - [Talks](#talks) - [General Tips & Tricks](#general-tips-and-tricks) ## TLDR Participants are encouraged to leverage pre-trained speech recognition checkpoints, preferably [facebook/wav2vec2-large-xlsr-53](https://huggingface.co/facebook/wav2vec2-large-xlsr-53), to train a speech recognition system in a language of their choice. Speech recognition systems should be trained using **PyTorch**, **🤗 Transformers**, and, **🤗 Datasets**. For more information on how to install the above libraries, please read through [How to install pytorch, transformers, datasets](#how-to-install-relevant-libraries). Participants can make use of whatever data they think is useful to build a speech recognition system for **real-world** audio data - **except** the Common Voice `"test"` split of their chosen language. The section [Data and preprocessing](#data-and-preprocessing) explains in more detail what audio data can be used, how to find suitable audio data, and how the audio data can be processed. For training, it is recommended to use the [official training script](https://github.com/huggingface/transformers/blob/main/examples/pytorch/speech-recognition/run_speech_recognition_ctc.py) or a modification thereof. A step-by-step guide on how to fine-tune an acoustic model for a speech recognition system can be found under [How to fine-tune an acoustic model](#how-to-finetune-an-acoustic-model). If possible it is encouraged to fine-tune the acoustic models on local GPU machines, but if those are not available, the OVH could team kindly provides a limited number of GPUs for the event. Simply fill out [this google form](https://forms.gle/GFZkMkKLiufi75g28) to get access to a GPU. For more information on how to train an acoustic model on one of OVH's GPU - see [How to fine-tune a speech recognition model with OVHcould](#how-to-fine-tune-with-ovh-cloud). The performance of speech recognition system can often significantly be improved by adding a language model for decoding. For more information on how to add a language model, please take a look at [How to combine n-gram language models with speech recognition models](#how-to-combine-n-gram-with-model). During the event, the speech recognition system will be evaluated on both the Common Voice `"test"` split of the participants' chosen language as well as the *real-world* `"dev"` data provided by the Hugging Face team. At the end of the robust speech recognition challenge, the speech recognition system will also be evaluated on the *real-world* `"test"` data provided by the Hugging Face team. Each participant should add an `eval.py` script to her/his model repository in a specific format that lets one easily evaluate the speech recognition system on both Common Voice's `"test"` data as well as the *real-world* audio data. Please read through the [Evaluation](#evaluation) section to make sure your evaluation script is in the correct format. Speech recognition systems with evaluation scripts in an incorrect format can sadly not be considered for the Challenge. At the end of the event, the best performing speech recognition system will receive a prize 🏆 - more information regarding the prizes can be found under [Prizes](#prizes). We believe that framing the event as a competition is more fun, but at the core, the event is about creating speech recognition systems in as many languages as possible as a community. This can be achieved by working together, helping each other to solve bugs, share important findings, etc...🤗 **Note**: Please, read through the section on [Communication & Problems](#communication-and-problems) to make sure you know how to ask for help, etc... All important announcements will be made on discord. Please make sure that you've joined [this discord channel](https://discord.gg/SHr5wC7m) Also, please make sure that you have been added to the [Speech Event Organization](https://huggingface.co/speech-recognition-community-v2). You should have received an invite by email. If you didn't receive an invite, please contact the organizers, *e.g.* Anton, Patrick, or Omar directly on discord. ## Important dates ![timeline](https://github.com/patrickvonplaten/scientific_images/raw/master/Robush%20Speech%20Challenge.png) ## Data and preprocessing In this section, we will quickly go over how to find suitable training data and how to preprocess it. To begin with, **all data except Common Voice's `"test"` data can be used as training data.** The exception includes all Common Voice versions as the test data split of later Common Voice versions often overlaps with the one of previous versions, *e.g.* the test data of Common Voice 7 in English is to a big part identical to the test data of Common Voice 6 in English: ```python load_dataset("mozilla-foundation/common_voice_7_0", "en", split="test") ``` includes more or less the same data as ```python load_dataset("mozilla-foundation/common_voice_6_1", "en", split="test") ``` However, we strongly encourage participants to make use of Common Voice's other splits, *e.g.* `"train"` and `"validation"`. For most languages, the Common Voice dataset offers already a decent amount of training data. It is usually always advantageous to collect additional data. To do so, the participants are in a first step encouraged to search the Hugging Face Hub for additional audio data, for example by selecting the category ["speech-processing"](https://huggingface.co/datasets?task_categories=task_categories:speech-processing&sort=downloads). All datasets that are available on the Hub can be downloaded via the 🤗 Datasets library in the same way Common Voice is downloaded. If one wants to combine multiple datasets for training, it might make sense to take a look at the [`interleave_datasets`](https://huggingface.co/docs/datasets/package_reference/main_classes?highlight=interleave#datasets.interleave_datasets) function. In addition, participants can also make use of their audio data. Here, please make sure that you **are allowed to use the audio data**. E.g., if audio data is taken from media platforms, such as YouTube, it should be verified that the media platform and the owner of the data have given her/his approval to use the audio data in the context of machine learning research. If you are not sure whether the data you want to use has the appropriate licensing, please contact the Hugging Face team on discord. Next, let's talk about preprocessing. Audio data and transcriptions have to be brought into the correct format when training the acoustic model (example shown in [How to fine-tune an acoustic model](#how-to-finetune-an-acoustic-model)). It is recommended that this is done by using 🤗 Datasets `.map()` function as shown [here](https://github.com/huggingface/transformers/blob/9a2dabae7002258e41419491c73dd43ad61b5de7/examples/pytorch/speech-recognition/run_speech_recognition_ctc.py#L444). As can be see we can pass some characters that will be removed from the transcriptions, *e.g.*: `--chars_to_ignore , ? . ! - \; \: \" “ % ‘ ” � \` on the official ["Single GPU Example"](https://github.com/huggingface/transformers/tree/main/examples/pytorch/speech-recognition#single-gpu-ctc). The participants are free to modify this preprocessing by removing more characters or even replacing characters as it is done in the [official blog post](https://github.com/huggingface/transformers/blob/9a2dabae7002258e41419491c73dd43ad61b5de7/examples/pytorch/speech-recognition/run_speech_recognition_ctc.py#L444). **However**, there are some rules regarding what characters are allowed to be removed/replaced and which are not. These rules are not this straightforward and therefore often have to be evaluated case-by-case. It is allowed (and recommended) to normalize the data to only have lower-case characters. It is also allowed (and recommended) to remove typographical symbols and punctuation marks. A list of such symbols can *e.g.* be found [here](https://en.wikipedia.org/wiki/List_of_typographical_symbols_and_punctuation_marks) - however here we already must be careful. We should **not** remove a symbol that would change the meaning of the words, *e.g.* in English, we should not remove the single quotation mark `'` since it would change the meaning of the word `"it's"` to `"its"` which would then be incorrect. So the golden rule here is to not remove any characters that could change the meaning of a word into another word. This is not always obvious and should be given some consideration. As another example, it is fine to remove the "Hyphen-minus" sign "`-`" since it doesn't change the meaning of a word to another one. *E.g.* "`fine-tuning`" would be changed to "`finetuning`" which has still the same meaning. Since those choices are not always obvious when in doubt feel free to ask on Discord or even better post your question on the forum, as was done, *e.g.* [here](https://discuss.huggingface.co/t/spanish-asr-fine-tuning-wav2vec2/4586). ## How to install relevant libraries The following libraries are required to fine-tune a speech model with 🤗 Transformers and 🤗 Datasets in PyTorch. - [PyTorch](https://pytorch.org/) - [Transformers](https://github.com/huggingface/transformers) - [Datasets](https://github.com/huggingface/datasets) We recommend installing the above libraries in a [virtual environment](https://docs.python.org/3/library/venv.html). If you're unfamiliar with Python virtual environments, check out the [user guide](https://packaging.python.org/guides/installing-using-pip-and-virtual-environments/). Create a virtual environment with the version of Python you're going to use and activate it. You should be able to run the command: ```bash python3 -m venv <your-venv-name> ``` You can activate your venv by running ```bash source ~/<your-venv-name>/bin/activate ``` To begin with please make sure you have PyTorch and CUDA correctly installed. The following command should return ``True``: ```bash python -c "import torch; print(torch.cuda.is_available())" ``` If the above command doesn't print ``True``, in the first step, please follow the instructions [here](https://pytorch.org/) to install PyTorch with CUDA. We strongly recommend making use of the provided PyTorch examples scripts in [transformers/examples/pytorch/speech-recognition](https://github.com/huggingface/transformers/tree/main/examples/pytorch/speech-recognition) to train your speech recognition system. In all likelihood, you will adjust one of the example scripts, so we recommend forking and cloning the 🤗 Transformers repository as follows. 1. Fork the [repository](https://github.com/huggingface/transformers) by clicking on the 'Fork' button on the repository's page. This creates a copy of the code under your GitHub user account. 2. Clone your fork to your local disk, and add the base repository as a remote: ```bash $ git clone https://github.com/<your Github handle>/transformers.git $ cd transformers $ git remote add upstream https://github.com/huggingface/transformers.git ``` 3. Create a new branch to hold your development changes. This is especially useful to share code changes with your team: ```bash $ git checkout -b a-descriptive-name-for-my-project ``` 4. Set up a PyTorch environment by running the following command your virtual environment: ```bash $ pip install -e ".[torch-speech]" ``` (If transformers was already installed in the virtual environment, remove it with `pip uninstall transformers` before reinstalling it in editable mode with the `-e` flag.) If you have already cloned that repo, you might need to `git pull` to get the most recent changes in the `transformers` library. Running this command will automatically install `torch` and the most relevant libraries required for fine-tuning a speech recognition system. Next, you should also install the 🤗 Datasets library. We strongly recommend installing the library from source to profit from the most current additions during the community week. Simply run the following steps: ```bash $ cd ~/ $ git clone https://github.com/huggingface/datasets.git $ cd datasets $ pip install -e ".[streaming]" ``` If you plan on contributing a specific dataset during the community week, please fork the datasets repository and follow the instructions [here](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-create-a-pull-request). To verify that all libraries are correctly installed, you can run the following command in a Python shell. It verifies that both `transformers` and `datasets` have been correclty installed. ```python from transformers import AutoModelForCTC, AutoProcessor from datasets import load_dataset dummy_dataset = load_dataset("common_voice", "ab", split="test") model = AutoModelForCTC.from_pretrained("hf-internal-testing/tiny-random-wav2vec2") model.to("cuda") processor = AutoProcessor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2") input_values = processor(dummy_dataset[0]["audio"]["array"], return_tensors="pt", sampling_rate=16_000).input_values input_values = input_values.to("cuda") logits = model(input_values).logits assert logits.shape[-1] == 32 ``` ## How to finetune an acoustic model In this section, we show you how to fine-tune a pre-trained [XLS-R Model](https://huggingface.co/docs/transformers/model_doc/xls_r) on the [Common Voice 7 dataset](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). We recommend fine-tuning one of the following pre-trained XLS-R checkpoints: - [300M parameters version](https://huggingface.co/facebook/wav2vec2-xls-r-300m) - [1B parameters version](https://huggingface.co/facebook/wav2vec2-xls-r-1b) - [2B parameters version](https://huggingface.co/facebook/wav2vec2-xls-r-2b) To begin with, please note that to use the Common Voice dataset, you have to accept that **your email address** and **username** are shared with the mozilla-foundation. To get access to the dataset please click on "*Access repository*" [here](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). Next, we recommended that you get familiar with the XLS-R model and its capabilities. In collaboration with [Fairseq's Wav2Vec2 team](https://github.com/pytorch/fairseq/tree/main/examples/wav2vec), we've written ["Fine-tuning XLS-R for Multi-Lingual ASR with 🤗 Transformers"](https://huggingface.co/blog/fine-tune-xlsr-wav2vec2) which gives an in-detail explanation of how XLS-R functions and how it can be fine-tuned. The blog can also be opened and directly fine-tuned in a google colab notebook. In this section, we will explain how to fine-tune the model on a local machine. 1. **Log in** To begin with, you should check that you are correctly logged in and that you have `git-lfs` installed so that your fine-tuned model can automatically be uploaded. Run: ```bash huggingface-cli login ``` to login. It is recommended to login with your access token that can be found under your hugging face profile (icon in the top right corner on [hf.co](http://hf.co/), then Settings -> Access Tokens -> User Access Tokens -> New Token (if haven't generated one already) You can then copy-paste this token to log in locally. 2. **Create your model repository** First, let's make sure that `git-lfs` is correctly installed. To so, simply run: ```bash git-lfs -v ``` The output should show something like `git-lfs/2.13.2 (GitHub; linux amd64; go 1.15.4)`. If your console states that the `git-lfs` command was not found, please make sure to install it [here](https://git-lfs.github.com/) or simply via: ```bash sudo apt-get install git-lfs ``` Now you can create your model repository which will contain all relevant files to reproduce your training. You can either directly create the model repository on the Hub (Settings -> New Model) or via the CLI. Here we choose to use the CLI instead. Assuming that we want to call our model repository *xls-r-ab-test*, we can run the following command: ```bash huggingface-cli repo create xls-r-ab-test ``` You can now see the model on the Hub, *e.g.* under https://huggingface.co/hf-test/xls-r-ab-test . Let's clone the repository so that we can define our training script inside. ```bash git lfs install git clone https://huggingface.co/hf-test/xls-r-ab-test ``` 3. **Add your training script and `run`-command to the repository** We encourage participants to add all relevant files for training directly to the directory so that everything is fully reproducible. Let's first copy-paste the official training script from our clone of `transformers` to our just created directory: ```bash cp ~/transformers/examples/pytorch/speech-recognition/run_speech_recognition_ctc.py ./ ``` Next, we'll create a bash file to define the hyper-parameters and configurations for training. More detailed information on different settings (single-GPU vs. multi-GPU) can be found [here](https://github.com/huggingface/transformers/tree/main/examples/pytorch/speech-recognition#connectionist-temporal-classification). For demonstration purposes, we will use a dummy XLS-R model `model_name_or_path="hf-test/xls-r-dummy"` on the very low-resource language of "Abkhaz" of [Common Voice 7](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0): `dataset_config_name="ab"` for just a single epoch. Before starting to train, let's make sure we have installed all the required libraries. You might want to run: ```bash pip install -r ~/transformers/examples/pytorch/speech-recognition/requirements.txt ``` Alright, finally we can define the training script. We'll simply use some dummy hyper-parameters and configurations for demonstration purposes. Note that we add the flag `--use_auth_token` so that datasets requiring access, such as [Common Voice 7](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0) can be downloaded. In addition, we add the `--push_to_hub` flag to make use of the [Trainers `push_to-hub` functionality](https://huggingface.co/docs/transformers/main/en/main_classes/trainer#transformers.Trainer.push_to_hub) so that your model will be automatically uploaded to the Hub. Let's copy the following code snippet in a file called `run.sh` ```bash echo '''python run_speech_recognition_ctc.py \ --dataset_name="mozilla-foundation/common_voice_7_0" \ --model_name_or_path="hf-test/xls-r-dummy" \ --dataset_config_name="ab" \ --output_dir="./" \ --overwrite_output_dir \ --max_steps="10" \ --per_device_train_batch_size="2" \ --learning_rate="3e-4" \ --save_total_limit="1" \ --eval_strategy="steps" \ --text_column_name="sentence" \ --length_column_name="input_length" \ --save_steps="5" \ --layerdrop="0.0" \ --freeze_feature_encoder \ --gradient_checkpointing \ --fp16 \ --group_by_length \ --push_to_hub \ --use_auth_token \ --do_train --do_eval''' > run.sh ``` 4. **Start training** Now all that is left to do is to start training the model by executing the run file. ```bash bash run.sh ``` The training should not take more than a couple of minutes. During the training intermediate saved checkpoints are automatically uploaded to your model repository as can be seen [on this commit](https://huggingface.co/hf-test/xls-r-ab-test/commit/0eb19a0fca4d7d163997b59663d98cd856022aa6) . At the end of the training, the [Trainer](https://huggingface.co/docs/transformers/main/en/main_classes/trainer) automatically creates a nice model card and all relevant files are uploaded. 5. **Tips for real model training** The above steps illustrate how a model can technically be fine-tuned. However as you can see on the model card [hf-test/xls-r-ab-test](https://huggingface.co/hf-test/xls-r-ab-test), our demonstration has a very poor performance which is not surprising given that we trained for just 10 steps on a randomly initialized model. For real model training, it is recommended to use one of the actual pre-trained XLS-R models: - [300M parameters version](https://huggingface.co/facebook/wav2vec2-xls-r-300m) - [1B parameters version](https://huggingface.co/facebook/wav2vec2-xls-r-1b) - [2B parameters version](https://huggingface.co/facebook/wav2vec2-xls-r-2b) Also, the hyper-parameters should be carefully chosen depending on the dataset. As an example, we will fine-tune the 300M parameters model on Swedish on a single TITAN RTX 24GB GPU. The model will be called `"xls-r-300m-sv"`. Following the above steps we first create the model: ```bash huggingface-cli repo create xls-r-300m-sv ``` , clone it locally (assuming the `<username>` is `hf-test`) ```bash git clone hf-test/xls-r-300m-sv ``` , and, define the following hyperparameters for training ```bash echo '''python run_speech_recognition_ctc.py \ --dataset_name="mozilla-foundation/common_voice_7_0" \ --model_name_or_path="facebook/wav2vec2-xls-r-300m" \ --dataset_config_name="sv-SE" \ --output_dir="./" \ --overwrite_output_dir \ --num_train_epochs="50" \ --per_device_train_batch_size="8" \ --per_device_eval_batch_size="8" \ --gradient_accumulation_steps="4" \ --learning_rate="7.5e-5" \ --warmup_steps="2000" \ --length_column_name="input_length" \ --eval_strategy="steps" \ --text_column_name="sentence" \ --chars_to_ignore , ? . ! \- \; \: \" “ % ‘ ” � — ’ … – \ --save_steps="500" \ --eval_steps="500" \ --logging_steps="100" \ --layerdrop="0.0" \ --activation_dropout="0.1" \ --save_total_limit="3" \ --freeze_feature_encoder \ --feat_proj_dropout="0.0" \ --mask_time_prob="0.75" \ --mask_time_length="10" \ --mask_feature_prob="0.25" \ --mask_feature_length="64" \ --gradient_checkpointing \ --use_auth_token \ --fp16 \ --group_by_length \ --do_train --do_eval \ --push_to_hub''' > run.sh ``` The training takes *ca.* 7 hours and yields a reasonable test word error rate of 27% as can be seen on the automatically generated [model card](https://huggingface.co/hf-test/xls-r-300m-sv). The above-chosen hyperparameters probably work quite well on a range of different datasets and languages but are by no means optimal. It is up to you to find a good set of hyperparameters. ## How to finetune with OVH cloud [![Youtube](https://www.youtube.com/s/desktop/f506bd45/img/favicon_32.png)](https://youtu.be/XkMnYocAEO0) For a more detailed guide on setting up OVHcloud please watch this video: https://youtu.be/XkMnYocAEO0 ### Creating an OVHCloud account *TIP*: If you haven't created a project on OVHcloud yet, make sure you've received your GPU voucher code *beforehand*, so that you can skip entering the credit card information. 1. If you're a US citizen, create an account via [OVHcloud.CA](https://ovhcloud.ca/). If you're from anywhere else in the world, create an account via [OVHcloud.COM](https://ovhcloud.com/). 2. Once logged in, click `Public Cloud` from the top menu and then click `Create your first OVH Public Cloud project`. Then enter a project name (e.g. "huggingface"), enter your voucher code, and click `Continue` -> `Create my project`. *Note: if you see a request for credit card details during the last step, and you can't skip it, then your voucher code is invalid. Please report it to the [#ovh-support](https://discord.gg/p4qqDV3M) channel on Discord.* ### Setting up an AI notebook 1. Go to the `Public Cloud` page and select `Project Management` -> `Users & Roles` from the menu on the left. 2. Click `+ Add user`. Write a user description (e.g. `AI Trainer`), and select an `AI Training Operator` user role. Click `Confirm`. 3. Write down the *username* and *password* (at the top of the screen) somewhere. They will be needed during step 7. 4. Select `AI & Machine Learning` -> `AI Training` from the menu on the left. Click `+ Launch a new job` on the AI Training page. 5. On the `Launch a new job` page: * In `1. Choose a region` select a region closest to you. * In `2. Enter the Docker image` select `Custom image` -> `baaastijn/ovh_huggingface`. * You can skip steps `3.` and `4.` if you will be using the Hugging Face Hub to store the models after training. * In `5. Configure your job` select **1** `GPU`. * Validate the info and Create the job. 6. On the `AI Training Jobs` screen wait until the job's status changes from `Pending` to `Running`. 7. Click `HTTP Access` from the Job's details page and log in with the AI training user you've created earlier. Once logged in, you can close the page and click `HTTP Access` to launch a JupyterLab notebook. 8. Awesome, now you have a free GPU-enabled Jupyter instance! **Note**: If you're an experienced Docker user, feel free to create a custom docker image with all of the needed packages like the one in step 5. The Dockerfile for it is available here: [baaastijn/Dockerimages](https://github.com/baaastijn/Dockerimages/tree/main/Hugginface_challenge_speech). Once you've built your image, push it to https://hub.docker.com/ and select it during the OVHcloud job creation. For more quick tutorials about OVHcloud AI products, check out the showcase https://vimeo.com/showcase/8903300 ## How to combine n-gram with acoustic model Having trained a speech recognition model with CTC as shown in the section above, one can further improve the model's performance by adding an **n-gram language model** to the decoding process of the model. By doing so, we are replacing the naive greedy decoding with **n-gram-boosted** beam search decoding. N-gram language models can be built on CPU in just a few minutes. *N-gram-boosted* beam search decoding noticeably slows down the inference time, but also yields significant word error rates improvements - usually between 10-40 %. You can find an in-detail blog post on how to build an *n-gram* [here](https://huggingface.co/blog/wav2vec2-with-ngram). The blog post can be opened in a google colab and by adapting three lines of the example for your use case, one can directly create an *n-gram* in the google colab. The blog post gives in-detail instructions on how to build an n-gram and how to add it to your trained speech recognition model. - why one should add an *n-gram* to her/his speech recognition system, - how to build an *n-gram*, and, - how to add the built *n-gram* the speech recognition system for seamless decoding Our previously trained model - [xls-r-300m-sv](https://huggingface.co/hf-test/xls-r-300m-sv) - enjoys a 30% word error rate reduction after having added an n-gram. As shown in the example of the blog post, we strongly advise participants to upload all files required for combining the *n-gram* with a trained speech recognition model directly into the same model repository. ## Evaluation Finally, we have arrived at the most fun part of the challenge - sitting back and watching the model transcribe audio. If possible, every participant should evaluate the speech recognition system on the test set of Common Voice 7 and ideally also on the real-world audio data (if available). For languages that have neither a Common Voice evaluation dataset nor a real world evaluation dataset, please contact the organizers on Discord so that we can work together to find some evaluation data. As a first step, one should copy the official `eval.py` script to her/his model repository. Let's use our previously trained [xls-r-300m-sv](https://huggingface.co/hf-test/xls-r-300m-sv) again as an example. Assuming that we have a clone of the model's repo under `~/xls-r-300m-sv`, we can copy the `eval.py` script to the repo. ```bash cp ~/transformers/examples/research_projects/robust-speech-event/eval.py ~/xls-r-300m-sv ``` Next, we should adapt `eval.py` so that it fits our evaluation data. Here it is important to keep the `eval.py` file in the following format: - 1. The following input arguments should not be changed and keep their original functionality/meaning (being to load the model and dataset): `"--model_id"`, `"--dataset"`, `"--config"`, `"--split"`. We recommend to not change any of the code written under `if __name__ == "__main__":`. - 2. The function `def log_results(result: Dataset, args: Dict[str, str])` should also not be changed. The function expects the above names attached to the `args` object as well as a `datasets.Dataset` object, called `result` which includes all predictions and target transcriptions under the names `"predictions"` and `"targets"` respectively. - 3. All other code can be changed and adapted. Participants are especially invited to change the `def normalize_text(text: str) -> str:` function as this might be a very language and model-training specific function. - 4. **Important**: It is not allowed to "cheat" in any way when in comes to pre-and postprocessing. In short, "cheating" refers to any of the following: - a. Somehow giving the model access to the target transcriptions to improve performance. The model is not allowed to use the target transcriptions to generate its predictions. - b. Pre-processing the target transcriptions in a way that makes the target transcriptions lose their original meaning. This corresponds to what has already been said in [Data and Preprocessing](#data-and-preprocessing) and is somewhat of a grey zone. It means that one should not remove characters that would make a word to lose its meaning. E.g., it is not allowed to replace all `e` in English with `i` and simply make the model learn that `e` and `i` are the same letter for a better word error rate. This would destroy the meaning of words such as `fell -> fill`. However, it is totally fine to normalize (*e.g.* lowercase) all letters, remove punctuation. There can be a lot of language-specific exceptions and in case you are not sure whether your target transcription pre-processing is allowed, please ask on the Discord channel. Uff, that was a lot of text describing how to make sure your `eval.py` script is in the correct format. If you have any questions, please ask openly in Discord. Great, now that we have adapted the `eval.py` script, we can lean back and run the evaluation. First, one should evaluate the model on Common Voice 7's test data. This might already have been done for your acoustic model during training but in case you added an *n-gram* language model after having fine-tuned the acoustic model, you should now see a nice improvement. The command to evaluate our test model [xls-r-300m-sv](https://huggingface.co/hf-test/xls-r-300m-sv) on Common Voice 7's test data is the following: ```bash cd xls-r-300m-sv ./eval.py --model_id ./ --dataset mozilla-foundation/common_voice_7_0 --config sv-SE --split test --log_outputs ``` To log each of the model's predictions with the target transcriptions, you can just add the `--log_outputs` flag. Running this command should automatically create the file: `mozilla-foundation_common_voice_7_0_sv-SE_test_eval_results.txt` that contains both the word- and character error rate. In a few days, we will give everybody access to some real-world audio data for as many languages as possible. If your language has real-world audio data, it will most likely have audio input of multiple minutes. 🤗Transformer's [ASR pipeline](https://huggingface.co/docs/transformers/main/en/main_classes/pipelines#transformers.AutomaticSpeechRecognitionPipeline) supports audio chunking out-of-the-box. You only need to specify how song each audio chunk should be (`chunk_length_s`) and how much audio stride (`stride_length_s`) each chunk should use. For more information on the chunking works, please have a look at [this nice blog post](TODO: ). In the case of `xls-r-300m-sv`, the following command can be run: ```bash cd xls-r-300m-sv ./eval.py --model_id hf-test/xls-r-300m-sv --dataset <to-be-announced> --config sv --split validation --chunk_length_s 5.0 --stride_length_s 1.0 --log_outputs ``` Great, now you should have successfully evaluated your model. Finally, there is one **important** thing you should do so that your model is taken into account for the final evaluation. You should add two tags to your model, one being `robust-speech-event`, one being the ISO code of your chosen language, *e.g.* `"sv"` for the exemplary model we used above. You can find a list of all available languages and their ISO code [here](https://huggingface.co/languages). To add the tags, simply edit the README.md of your model repository and add ``` - "sv" - "robust-speech-event" ``` under `tags:` as done [here](https://huggingface.co/hf-test/xls-r-300m-sv/commit/a495fd70c96bb7d019729be9273a265c2557345e). To verify that you've added the tags correctly make sure that your model appears when clicking on [this link](https://huggingface.co/models?other=robust-speech-event). Great that's it! This should give you all the necessary information to evaluate your model. For the final evaluation, we will verify each evaluation result to determine the final score and thereby the winning models for each language. The final score is calculated as follows: ```bash FINAL_SCORE = 1/3 * WER_Common_Voice_7_test + 1/3 * WER_REAL_AUDIO_DEV + 1/3 * WER_REAL_AUDIO_TEST ``` The dataset `WER_REAL_AUDIO_TEST` is hidden and will only be published at the end of the robust speech challenge. If there is no real audio data for your language the final score will be computed solely based on the Common Voice 7 test dataset. If there is also no Common Voice 7 test dataset for your language, we will see together how to score your model - if this is the case, please don't be discouraged. We are especially excited about speech recognition systems of such low-resource languages and will make sure that we'll decide on a good approach to evaluating your model. ## Prizes TODO(Patrick, Omar, ...) ## Communication and Problems If you encounter any problems or have any questions, you should use one of the following platforms depending on your type of problem. Hugging Face is an "open-source-first" organization meaning that we'll try to solve all problems in the most public and most transparent way possible so that everybody in the community profits. The following table summarizes what platform to use for which problem. - Problem/question/bug with the 🤗 Datasets library that you think is a general problem that also impacts other people, please open an [Issues on Datasets](https://github.com/huggingface/datasets/issues/new?assignees=&labels=bug&template=bug-report.md&title=) and ping @anton-l and @patrickvonplaten. - Problem/question/bug with the 🤗 Transformers library that you think is a general problem that also impacts other people, please open an [Issues on Transformers](https://github.com/huggingface/transformers/issues/new?assignees=&labels=&template=bug-report.md&title=) and ping @anton-l and @patrickvonplaten. - Problem/question with a modified, customized training script that is less likely to impact other people, please post your problem/question [on the forum](https://discuss.huggingface.co/) and ping @anton-l and @patrickvonplaten. - Questions regarding access to the OVHcloud GPU, please ask in the Discord channel **#ovh-support**. - Other questions regarding the event, rules of the event, or if you are not sure where to post your question, please ask in the Discord channel **#sprint-discussions**. ## Talks We are very excited to be hosting 2 days of talks from Kensho-Technologies, Mozilla's Common Voice, Meta AI Research and Hugging Face. ### Thursday, January 20th Speaker | Topic | Time | Video | |-------------|---------------------------------|------------------------|------------------------| | Patrick von Platen, Hugging Face | Introduction to Robust Speech Challenge | 4h30pm - 5h00pm UTC | [![Youtube](https://www.youtube.com/s/desktop/f506bd45/img/favicon_32.png)](https://www.youtube.com/watch?v=X9e5Tto-Iuk) | Raymond Grossman and Jeremy Lopez, Kensho-Technologies | Pyctcdecode & Speech2text decoding | 5h30pm - 6h00pm UTC | [![Youtube](https://www.youtube.com/s/desktop/f506bd45/img/favicon_32.png)](https://www.youtube.com/watch?v=mp7fHMTnK9A) ### Friday, January 21th Speaker | Topic | Time | Video | |-------------|---------------------------------|------------------------|------------------------| | Gabriel Habayeb, Mozilla Common Voice | Unlocking global speech with Mozilla Common Voice | 4h30pm - 5h00pm UTC | [![Youtube](https://www.youtube.com/s/desktop/f506bd45/img/favicon_32.png)](https://www.youtube.com/watch?v=Vvn984QmAVg) | Changhan Wang, Meta AI Research | XLS-R: Large-Scale Cross-lingual Speech Representation Learning on 128 Languages | 5h30pm - 6h00pm UTC | [![Youtube](https://www.youtube.com/s/desktop/f506bd45/img/favicon_32.png)](https://www.youtube.com/watch?v=ic_J7ZCROBM) ### Talks & Speakers #### Patrick von Platen, Research Engineer, Hugging Face - Talk: Introduction to Robust Speech Challenge - Abstract: In this talk, Patrick outlines the Robust Speech Challenge and gives tips and tricks on how to train and evaluate speech recognition systems with 🤗 Transformers and 🤗 Datasets, and PyTorch. - Speaker info: Patrick von Platen is a research engineer at Hugging Face and one of the core maintainers of the popular Transformers library. He specializes in speech recognition, encoder-decoder models, and long-range sequence modeling. Before joining Hugging Face, Patrick researched speech recognition at Uber AI, Cambridge University, and RWTH Aachen University. #### Raymond Grossman, Jeremy Lopez, Machine Learning Engineer, Kensho Technologies - Talk: PyCTCDecode & Speech2text decoding - Abstract: PyCTCDecode is a fast and feature-rich CTC beam search decoder for speech recognition written in Python, providing n-gram (kenlm) language model support similar to PaddlePaddle's decoder, but incorporating many new features such as byte pair encoding and real-time decoding to support models like Nvidia's Conformer-CTC or Facebook's Wav2Vec2. - Speaker info : - Raymond works as a machine learning engineer at Kensho Technologies, specializing in speech and natural language domains. Before coming to Kensho, he studied mathematics at Princeton and was an avid Kaggler under the moniker @ToTrainThemIsMyCause. - Jeremy is a machine learning engineer at Kensho Technologies and has worked on a variety of different topics including search and speech recognition. Before working at Kensho, he earned a PhD in experimental particle physics at MIT and continued doing physics research as a postdoc at the University of Colorado Boulder. #### Gabriel Habayeb, Data Engineer, Common Voice @ Mozilla - Talk: Unlocking global speech with Mozilla Common Voice - Abstract: Hear from Common Voice Data Engineer Gabriel Habayeb (Mozilla Foundation) as he talks about how Common Voice makes it easy to crowdsource voice data in global languages, as well as getting key insights into the dataset itself, how we maintain quality, use metadata - and our plans for the future! - Speaker info: Gabriel is a software developer with the Common Voice team at the Mozilla Foundation with a focus on data engineering. Before joining the Foundation, he spent the last six years working across different industries, including education, enterprise and not-for-profit organizations. #### Changhan Wang, Main author of XLS-R and Research Engineer, Meta AI Research - Talk: XLS-R: Large-Scale Cross-lingual Speech Representation Learning on 128 Languages - Abstract: In this talk, Changhan will present XLS-R, a large-scale model for cross-lingual speech representation learning based on wav2vec 2.0. XLS-R has up to 2B parameters and was trained on nearly half a million hours of publicly available speech audio in 128 languages, an order of magnitude more public data than the largest known prior work. On the CoVoST-2 speech translation benchmark, XLS-R improves the previous state of the art by an average of 7.4 BLEU over 21 translation directions into English. For speech recognition, XLS-R improves over the best known prior work on BABEL, MLS, CommonVoice as well as VoxPopuli, lowering error rates by 14-34% relative on average. XLS-R also sets a new state of the art on VoxLingua107 language identification. The XLS-R team hopes to work together with the open-source community to improve speech processing tasks for many more languages of the world. ## General Tips and Tricks - Memory efficient training: In case, you are getting out-of-memory errors on your GPU, we recommend to use [bitsandbytes](https://github.com/TimDettmers/bitsandbytes) to replace the native memory-intensive Adam optimizer with the one of `bitsandbytes`. You can simply run the script `./run_speech_recognition_ctc_bnb.py` provided in this folder that makes use of `bitsandbytes` instead of the official one. - Dataset streaming TODO(Patrick)
transformers/examples/research_projects/robust-speech-event/README.md/0
{ "file_path": "transformers/examples/research_projects/robust-speech-event/README.md", "repo_id": "transformers", "token_count": 12253 }
300
#!/usr/bin/env bash export PYTHONPATH="../":"${PYTHONPATH}" export WANDB_PROJECT=dmar # export MAX_LEN=128 python distillation.py \ --learning_rate=3e-4 \ --do_train \ --fp16 \ --val_check_interval 0.25 \ --teacher Helsinki-NLP/opus-mt-en-ro \ --max_source_length $MAX_LEN --max_target_length $MAX_LEN --val_max_target_length $MAX_LEN --test_max_target_length $MAX_LEN \ --student_decoder_layers 3 --student_encoder_layers 6 \ --freeze_encoder --freeze_embeds \ --model_name_or_path IGNORED \ --alpha_hid=3. \ --train_batch_size=$BS --eval_batch_size=$BS \ --tokenizer_name Helsinki-NLP/opus-mt-en-ro \ --warmup_steps 500 --logger_name wandb \ --fp16_opt_level O1 --task translation --normalize_hidden --num_sanity_val_steps=0 \ "$@"
transformers/examples/research_projects/seq2seq-distillation/distil_marian_enro_teacher.sh/0
{ "file_path": "transformers/examples/research_projects/seq2seq-distillation/distil_marian_enro_teacher.sh", "repo_id": "transformers", "token_count": 310 }
301
#!/usr/bin/env bash export PYTHONPATH="../":"${PYTHONPATH}" python distillation.py \ --teacher facebook/bart-large-xsum --data_dir xsum \ --tokenizer_name facebook/bart-large-xsum \ --student_decoder_layers 6 --student_encoder_layers 12 \ --freeze_encoder --freeze_embeds \ --learning_rate=3e-4 \ --do_train \ --do_predict \ --fp16 --fp16_opt_level=O1 \ --val_check_interval 0.1 --n_val 1000 --eval_beams 2 --length_penalty=0.5 \ --max_target_length=60 --val_max_target_length=60 --test_max_target_length=100 \ --model_name_or_path IGNORED \ --alpha_hid=3. \ --train_batch_size=16 --eval_batch_size=16 --gradient_accumulation_steps=2 \ --sortish_sampler \ --num_train_epochs=6 \ --warmup_steps 500 \ --output_dir distilbart_xsum_12_6 \ "$@"
transformers/examples/research_projects/seq2seq-distillation/train_distilbart_xsum.sh/0
{ "file_path": "transformers/examples/research_projects/seq2seq-distillation/train_distilbart_xsum.sh", "repo_id": "transformers", "token_count": 317 }
302
#!/usr/bin/env bash python run_asr.py \ --output_dir="./wav2vec2-large-lv60-100h" \ --num_train_epochs="30" \ --per_device_train_batch_size="16" \ --per_device_eval_batch_size="16" \ --eval_strategy="steps" \ --save_total_limit="3" \ --save_steps="500" \ --eval_steps="100" \ --logging_steps="50" \ --learning_rate="5e-4" \ --warmup_steps="3000" \ --model_name_or_path="facebook/wav2vec2-large-lv60" \ --fp16 \ --dataset_name="librispeech_asr" \ --dataset_config_name="clean" \ --train_split_name="train.100" \ --preprocessing_num_workers="32" \ --group_by_length \ --freeze_feature_extractor
transformers/examples/research_projects/wav2vec2/finetune_large_lv60_100.sh/0
{ "file_path": "transformers/examples/research_projects/wav2vec2/finetune_large_lv60_100.sh", "repo_id": "transformers", "token_count": 254 }
303
#!/usr/bin/env python # coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import shlex import runhouse as rh if __name__ == "__main__": # Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access # setup instructions, if using on-demand hardware # If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster # If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster # Throw an error if user passes both BYO and on-demand cluster args # Otherwise, use default values parser = argparse.ArgumentParser() parser.add_argument("--user", type=str, default="ubuntu") parser.add_argument("--host", type=str, default="localhost") parser.add_argument("--key_path", type=str, default=None) parser.add_argument("--instance", type=str, default="V100:1") parser.add_argument("--provider", type=str, default="cheapest") parser.add_argument("--use_spot", type=bool, default=False) parser.add_argument("--example", type=str, default="pytorch/text-generation/run_generation.py") args, unknown = parser.parse_known_args() if args.host != "localhost": if args.instance != "V100:1" or args.provider != "cheapest": raise ValueError("Cannot specify both BYO and on-demand cluster args") cluster = rh.cluster( name="rh-cluster", ips=[args.host], ssh_creds={"ssh_user": args.user, "ssh_private_key": args.key_path} ) else: cluster = rh.cluster( name="rh-cluster", instance_type=args.instance, provider=args.provider, use_spot=args.use_spot ) example_dir = args.example.rsplit("/", 1)[0] # Set up remote environment cluster.install_packages(["pip:./"]) # Installs transformers from local source # Note transformers is copied into the home directory on the remote machine, so we can install from there cluster.run([f"pip install -r transformers/examples/{example_dir}/requirements.txt"]) cluster.run(["pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117"]) # Run example. You can bypass the CLI wrapper and paste your own code here. cluster.run([f'python transformers/examples/{args.example} {" ".join(shlex.quote(arg) for arg in unknown)}']) # Alternatively, we can just import and run a training function (especially if there's no wrapper CLI): # from my_script... import train # reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard'] # launch_train_gpu = rh.function(fn=train, # system=gpu, # reqs=reqs, # name='train_bert_glue') # # We can pass in arguments just like we would to a function: # launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16 # stream_logs=True)
transformers/examples/run_on_remote.py/0
{ "file_path": "transformers/examples/run_on_remote.py", "repo_id": "transformers", "token_count": 1321 }
304
#!/usr/bin/env python # coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Script for training a masked language model on TPU.""" import argparse import logging import os import re import tensorflow as tf from packaging.version import parse from transformers import ( AutoConfig, AutoTokenizer, DataCollatorForLanguageModeling, PushToHubCallback, TFAutoModelForMaskedLM, create_optimizer, ) try: import tf_keras as keras except (ModuleNotFoundError, ImportError): import keras if parse(keras.__version__).major > 2: raise ValueError( "Your currently installed version of Keras is Keras 3, but this is not yet supported in " "Transformers. Please install the backwards-compatible tf-keras package with " "`pip install tf-keras`." ) logger = logging.getLogger(__name__) AUTO = tf.data.AUTOTUNE def parse_args(): parser = argparse.ArgumentParser(description="Train a masked language model on TPU.") parser.add_argument( "--pretrained_model_config", type=str, default="FacebookAI/roberta-base", help="The model config to use. Note that we don't copy the model's weights, only the config!", ) parser.add_argument( "--tokenizer", type=str, default="unigram-tokenizer-wikitext", help="The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size.", ) parser.add_argument( "--per_replica_batch_size", type=int, default=8, help="Batch size per TPU core.", ) parser.add_argument( "--no_tpu", action="store_true", help="If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances.", ) parser.add_argument( "--tpu_name", type=str, help="Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs.", default="local", ) parser.add_argument( "--tpu_zone", type=str, help="Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.", ) parser.add_argument( "--gcp_project", type=str, help="Google cloud project name. Only used for non-Colab TPU nodes." ) parser.add_argument( "--bfloat16", action="store_true", help="Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.", ) parser.add_argument( "--train_dataset", type=str, help="Path to training dataset to load. If the path begins with `gs://`" " then the dataset will be loaded from a Google Cloud Storage bucket.", ) parser.add_argument( "--shuffle_buffer_size", type=int, default=2**18, # Default corresponds to a 1GB buffer for seq_len 512 help="Size of the shuffle buffer (in samples)", ) parser.add_argument( "--eval_dataset", type=str, help="Path to evaluation dataset to load. If the path begins with `gs://`" " then the dataset will be loaded from a Google Cloud Storage bucket.", ) parser.add_argument( "--num_epochs", type=int, default=1, help="Number of epochs to train for.", ) parser.add_argument( "--learning_rate", type=float, default=1e-4, help="Learning rate to use for training.", ) parser.add_argument( "--weight_decay_rate", type=float, default=1e-3, help="Weight decay rate to use for training.", ) parser.add_argument( "--max_length", type=int, default=512, help="Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py", ) parser.add_argument( "--mlm_probability", type=float, default=0.15, help="Fraction of tokens to mask during training.", ) parser.add_argument("--output_dir", type=str, required=True, help="Path to save model checkpoints to.") parser.add_argument("--hub_model_id", type=str, help="Model ID to upload to on the Hugging Face Hub.") args = parser.parse_args() return args def initialize_tpu(args): try: if args.tpu_name: tpu = tf.distribute.cluster_resolver.TPUClusterResolver( args.tpu_name, zone=args.tpu_zone, project=args.gcp_project ) else: tpu = tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: raise RuntimeError( "Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or " "--gcp_project. When running on a TPU VM, use --tpu_name local." ) tf.config.experimental_connect_to_cluster(tpu) tf.tpu.experimental.initialize_tpu_system(tpu) return tpu def count_samples(file_list): num_samples = 0 for file in file_list: filename = file.split("/")[-1] sample_count = re.search(r"-\d+-(\d+)\.tfrecord", filename).group(1) sample_count = int(sample_count) num_samples += sample_count return num_samples def prepare_dataset(records, decode_fn, mask_fn, batch_size, shuffle, shuffle_buffer_size=None): num_samples = count_samples(records) dataset = tf.data.Dataset.from_tensor_slices(records) if shuffle: dataset = dataset.shuffle(len(dataset)) dataset = tf.data.TFRecordDataset(dataset, num_parallel_reads=AUTO) # TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here dataset = dataset.apply(tf.data.experimental.assert_cardinality(num_samples)) dataset = dataset.map(decode_fn, num_parallel_calls=AUTO) if shuffle: assert shuffle_buffer_size is not None dataset = dataset.shuffle(args.shuffle_buffer_size) dataset = dataset.batch(batch_size, drop_remainder=True) dataset = dataset.map(mask_fn, num_parallel_calls=AUTO) dataset = dataset.prefetch(AUTO) return dataset def main(args): if not args.no_tpu: tpu = initialize_tpu(args) strategy = tf.distribute.TPUStrategy(tpu) else: strategy = tf.distribute.OneDeviceStrategy(device="/gpu:0") if args.bfloat16: keras.mixed_precision.set_global_policy("mixed_bfloat16") tokenizer = AutoTokenizer.from_pretrained(args.tokenizer) config = AutoConfig.from_pretrained(args.pretrained_model_config) config.vocab_size = tokenizer.vocab_size training_records = tf.io.gfile.glob(os.path.join(args.train_dataset, "*.tfrecord")) if not training_records: raise ValueError(f"No .tfrecord files found in {args.train_dataset}.") eval_records = tf.io.gfile.glob(os.path.join(args.eval_dataset, "*.tfrecord")) if not eval_records: raise ValueError(f"No .tfrecord files found in {args.eval_dataset}.") num_train_samples = count_samples(training_records) steps_per_epoch = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync) total_train_steps = steps_per_epoch * args.num_epochs with strategy.scope(): model = TFAutoModelForMaskedLM.from_config(config) model(model.dummy_inputs) # Pass some dummy inputs through the model to ensure all the weights are built optimizer, schedule = create_optimizer( num_train_steps=total_train_steps, num_warmup_steps=total_train_steps // 20, init_lr=args.learning_rate, weight_decay_rate=args.weight_decay_rate, ) # Transformers models compute the right loss for their task by default when labels are passed, and will # use this for training unless you specify your own loss function in compile(). model.compile(optimizer=optimizer, metrics=["accuracy"]) def decode_fn(example): features = { "input_ids": tf.io.FixedLenFeature(dtype=tf.int64, shape=(args.max_length,)), "attention_mask": tf.io.FixedLenFeature(dtype=tf.int64, shape=(args.max_length,)), } return tf.io.parse_single_example(example, features) # Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can # use their methods in our data pipeline. data_collator = DataCollatorForLanguageModeling( tokenizer=tokenizer, mlm_probability=args.mlm_probability, mlm=True, return_tensors="tf" ) def mask_with_collator(batch): # TF really needs an isin() function special_tokens_mask = ( ~tf.cast(batch["attention_mask"], tf.bool) | (batch["input_ids"] == tokenizer.cls_token_id) | (batch["input_ids"] == tokenizer.sep_token_id) ) batch["input_ids"], batch["labels"] = data_collator.tf_mask_tokens( batch["input_ids"], vocab_size=len(tokenizer), mask_token_id=tokenizer.mask_token_id, special_tokens_mask=special_tokens_mask, ) return batch batch_size = args.per_replica_batch_size * strategy.num_replicas_in_sync train_dataset = prepare_dataset( training_records, decode_fn=decode_fn, mask_fn=mask_with_collator, batch_size=batch_size, shuffle=True, shuffle_buffer_size=args.shuffle_buffer_size, ) eval_dataset = prepare_dataset( eval_records, decode_fn=decode_fn, mask_fn=mask_with_collator, batch_size=batch_size, shuffle=False, ) callbacks = [] if args.hub_model_id: callbacks.append( PushToHubCallback(output_dir=args.output_dir, hub_model_id=args.hub_model_id, tokenizer=tokenizer) ) model.fit( train_dataset, validation_data=eval_dataset, epochs=args.num_epochs, callbacks=callbacks, ) model.save_pretrained(args.output_dir) if __name__ == "__main__": args = parse_args() main(args)
transformers/examples/tensorflow/language-modeling-tpu/run_mlm.py/0
{ "file_path": "transformers/examples/tensorflow/language-modeling-tpu/run_mlm.py", "repo_id": "transformers", "token_count": 4387 }
305
# coding=utf-8 # Copyright 2022 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import json import logging import os import sys from unittest import skip from unittest.mock import patch import tensorflow as tf from packaging.version import parse try: import tf_keras as keras except (ModuleNotFoundError, ImportError): import keras if parse(keras.__version__).major > 2: raise ValueError( "Your currently installed version of Keras is Keras 3, but this is not yet supported in " "Transformers. Please install the backwards-compatible tf-keras package with " "`pip install tf-keras`." ) from transformers.testing_utils import TestCasePlus, get_gpu_count, slow SRC_DIRS = [ os.path.join(os.path.dirname(__file__), dirname) for dirname in [ "text-generation", "text-classification", "token-classification", "language-modeling", "multiple-choice", "question-answering", "summarization", "translation", "image-classification", ] ] sys.path.extend(SRC_DIRS) if SRC_DIRS is not None: import run_clm import run_image_classification import run_mlm import run_ner import run_qa as run_squad import run_summarization import run_swag import run_text_classification import run_translation logging.basicConfig(level=logging.DEBUG) logger = logging.getLogger() def get_setup_file(): parser = argparse.ArgumentParser() parser.add_argument("-f") args = parser.parse_args() return args.f def get_results(output_dir): results = {} path = os.path.join(output_dir, "all_results.json") if os.path.exists(path): with open(path, "r") as f: results = json.load(f) else: raise ValueError(f"can't find {path}") return results def is_cuda_available(): return bool(tf.config.list_physical_devices("GPU")) stream_handler = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class ExamplesTests(TestCasePlus): @skip("Skipping until shape inference for to_tf_dataset PR is merged.") def test_run_text_classification(self): tmp_dir = self.get_auto_remove_tmp_dir() testargs = f""" run_text_classification.py --model_name_or_path distilbert/distilbert-base-uncased --output_dir {tmp_dir} --overwrite_output_dir --train_file ./tests/fixtures/tests_samples/MRPC/train.csv --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv --do_train --do_eval --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --learning_rate=1e-4 --max_steps=10 --warmup_steps=2 --seed=42 --max_seq_length=128 """.split() if is_cuda_available(): testargs.append("--fp16") with patch.object(sys, "argv", testargs): run_text_classification.main() # Reset the mixed precision policy so we don't break other tests keras.mixed_precision.set_global_policy("float32") result = get_results(tmp_dir) self.assertGreaterEqual(result["eval_accuracy"], 0.75) def test_run_clm(self): tmp_dir = self.get_auto_remove_tmp_dir() testargs = f""" run_clm.py --model_name_or_path distilbert/distilgpt2 --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --do_train --do_eval --block_size 128 --per_device_train_batch_size 2 --per_device_eval_batch_size 1 --num_train_epochs 2 --output_dir {tmp_dir} --overwrite_output_dir """.split() if len(tf.config.list_physical_devices("GPU")) > 1: # Skipping because there are not enough batches to train the model + would need a drop_last to work. return with patch.object(sys, "argv", testargs): run_clm.main() result = get_results(tmp_dir) self.assertLess(result["eval_perplexity"], 100) def test_run_mlm(self): tmp_dir = self.get_auto_remove_tmp_dir() testargs = f""" run_mlm.py --model_name_or_path distilbert/distilroberta-base --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --max_seq_length 64 --output_dir {tmp_dir} --overwrite_output_dir --do_train --do_eval --prediction_loss_only --num_train_epochs=1 --learning_rate=1e-4 """.split() with patch.object(sys, "argv", testargs): run_mlm.main() result = get_results(tmp_dir) self.assertLess(result["eval_perplexity"], 42) def test_run_ner(self): # with so little data distributed training needs more epochs to get the score on par with 0/1 gpu epochs = 7 if get_gpu_count() > 1 else 2 tmp_dir = self.get_auto_remove_tmp_dir() testargs = f""" run_ner.py --model_name_or_path google-bert/bert-base-uncased --train_file tests/fixtures/tests_samples/conll/sample.json --validation_file tests/fixtures/tests_samples/conll/sample.json --output_dir {tmp_dir} --overwrite_output_dir --do_train --do_eval --warmup_steps=2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=2 --num_train_epochs={epochs} --seed 7 """.split() with patch.object(sys, "argv", testargs): run_ner.main() result = get_results(tmp_dir) self.assertGreaterEqual(result["accuracy"], 0.75) def test_run_squad(self): tmp_dir = self.get_auto_remove_tmp_dir() testargs = f""" run_qa.py --model_name_or_path google-bert/bert-base-uncased --version_2_with_negative --train_file tests/fixtures/tests_samples/SQUAD/sample.json --validation_file tests/fixtures/tests_samples/SQUAD/sample.json --output_dir {tmp_dir} --overwrite_output_dir --max_steps=10 --warmup_steps=2 --do_train --do_eval --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 """.split() with patch.object(sys, "argv", testargs): run_squad.main() result = get_results(tmp_dir) self.assertGreaterEqual(result["f1"], 30) self.assertGreaterEqual(result["exact"], 30) def test_run_swag(self): tmp_dir = self.get_auto_remove_tmp_dir() testargs = f""" run_swag.py --model_name_or_path google-bert/bert-base-uncased --train_file tests/fixtures/tests_samples/swag/sample.json --validation_file tests/fixtures/tests_samples/swag/sample.json --output_dir {tmp_dir} --overwrite_output_dir --max_steps=20 --warmup_steps=2 --do_train --do_eval --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 """.split() with patch.object(sys, "argv", testargs): run_swag.main() result = get_results(tmp_dir) self.assertGreaterEqual(result["val_accuracy"], 0.8) @slow def test_run_summarization(self): tmp_dir = self.get_auto_remove_tmp_dir() testargs = f""" run_summarization.py --model_name_or_path google-t5/t5-small --train_file tests/fixtures/tests_samples/xsum/sample.json --validation_file tests/fixtures/tests_samples/xsum/sample.json --output_dir {tmp_dir} --overwrite_output_dir --max_steps=50 --warmup_steps=8 --do_train --do_eval --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 """.split() with patch.object(sys, "argv", testargs): run_summarization.main() result = get_results(tmp_dir) self.assertGreaterEqual(result["rouge1"], 10) self.assertGreaterEqual(result["rouge2"], 2) self.assertGreaterEqual(result["rougeL"], 7) self.assertGreaterEqual(result["rougeLsum"], 7) @slow def test_run_translation(self): tmp_dir = self.get_auto_remove_tmp_dir() testargs = f""" run_translation.py --model_name_or_path Rocketknight1/student_marian_en_ro_6_1 --source_lang en --target_lang ro --train_file tests/fixtures/tests_samples/wmt16/sample.json --validation_file tests/fixtures/tests_samples/wmt16/sample.json --output_dir {tmp_dir} --overwrite_output_dir --warmup_steps=8 --do_train --do_eval --learning_rate=3e-3 --num_train_epochs 12 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --source_lang en_XX --target_lang ro_RO """.split() with patch.object(sys, "argv", testargs): run_translation.main() result = get_results(tmp_dir) self.assertGreaterEqual(result["bleu"], 30) def test_run_image_classification(self): tmp_dir = self.get_auto_remove_tmp_dir() testargs = f""" run_image_classification.py --dataset_name hf-internal-testing/cats_vs_dogs_sample --trust_remote_code --model_name_or_path microsoft/resnet-18 --do_train --do_eval --learning_rate 1e-4 --per_device_train_batch_size 2 --per_device_eval_batch_size 1 --output_dir {tmp_dir} --overwrite_output_dir --dataloader_num_workers 16 --num_train_epochs 2 --train_val_split 0.1 --seed 42 --ignore_mismatched_sizes True """.split() with patch.object(sys, "argv", testargs): run_image_classification.main() result = get_results(tmp_dir) self.assertGreaterEqual(result["accuracy"], 0.7)
transformers/examples/tensorflow/test_tensorflow_examples.py/0
{ "file_path": "transformers/examples/tensorflow/test_tensorflow_examples.py", "repo_id": "transformers", "token_count": 5486 }
306