id
int64 0
328k
| repository_name
stringlengths 7
58
| file_path
stringlengths 9
302
| class_name
stringlengths 5
256
| human_written_code
stringlengths 16
2.16M
| class_skeleton
stringlengths 18
1.49M
⌀ | total_program_units
int64 1
1.76k
| total_doc_str
int64 0
771
| AvgCountLine
float64 0
7.89k
| AvgCountLineBlank
float64 0
297
| AvgCountLineCode
float64 0
7.89k
| AvgCountLineComment
float64 0
7.89k
| AvgCyclomatic
float64 0
130
| CommentToCodeRatio
float64 0
168
| CountClassBase
float64 0
40
| CountClassCoupled
float64 0
583
| CountClassCoupledModified
float64 0
575
| CountClassDerived
float64 0
5.35k
| CountDeclInstanceMethod
float64 0
529
| CountDeclInstanceVariable
float64 0
296
| CountDeclMethod
float64 0
599
| CountDeclMethodAll
float64 0
1.12k
| CountLine
float64 1
40.4k
| CountLineBlank
float64 0
8.16k
| CountLineCode
float64 1
25.7k
| CountLineCodeDecl
float64 1
8.15k
| CountLineCodeExe
float64 0
24.2k
| CountLineComment
float64 0
16.5k
| CountStmt
float64 1
9.71k
| CountStmtDecl
float64 1
8.15k
| CountStmtExe
float64 0
9.69k
| MaxCyclomatic
float64 0
759
| MaxInheritanceTree
float64 0
16
| MaxNesting
float64 0
34
| SumCyclomatic
float64 0
2.9k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
200
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/commands/chat.py
|
transformers.commands.chat.ChatArguments
|
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class ChatArguments:
"""
Arguments for the chat CLI.
See the metadata arg for each argument's description -- the medatata will be printed with
`transformers chat --help`
"""
model_name_or_path: Optional[str] = field(default=None, metadata={'help': 'Name of the pre-trained model. The positional argument will take precedence if both are passed.'})
user: Optional[str] = field(default=None, metadata={'help': "Username to display in chat interface. Defaults to the current user's name."})
system_prompt: Optional[str] = field(default=None, metadata={'help': 'System prompt.'})
save_folder: str = field(default='./chat_history/', metadata={'help': 'Folder to save chat history.'})
examples_path: Optional[str] = field(default=None, metadata={'help': 'Path to a yaml file with examples.'})
verbose: bool = field(default=False, metadata={'help': 'Whether to show runtime warnings in the chat interface.'})
generation_config: Optional[str] = field(default=None, metadata={'help': 'Path to a local generation config file or to a HuggingFace repo containing a `generation_config.json` file. Other generation settings passed as CLI arguments will be applied on top of this generation config.'})
model_revision: str = field(default='main', metadata={'help': 'Specific model version to use (can be a branch name, tag name or commit id).'})
device: str = field(default='auto', metadata={'help': 'Device to use for inference.'})
torch_dtype: Optional[str] = field(default=None, metadata={'help': '`torch_dtype` is deprecated! Please use `dtype` argument instead.', 'choices': ['auto', 'bfloat16', 'float16', 'float32']})
dtype: Optional[str] = field(default='auto', metadata={'help': "Override the default `torch.dtype` and load the model under this dtype. If `'auto'` is passed, the dtype will be automatically derived from the model's weights.", 'choices': ['auto', 'bfloat16', 'float16', 'float32']})
trust_remote_code: bool = field(default=False, metadata={'help': 'Whether to trust remote code when loading a model.'})
attn_implementation: Optional[str] = field(default=None, metadata={'help': 'Which attention implementation to use; you can run --attn_implementation=flash_attention_2, in which case you must install this manually by running `pip install flash-attn --no-build-isolation`.'})
load_in_8bit: bool = field(default=False, metadata={'help': 'Whether to use 8 bit precision for the base model - works only with LoRA.'})
load_in_4bit: bool = field(default=False, metadata={'help': 'Whether to use 4 bit precision for the base model - works only with LoRA.'})
bnb_4bit_quant_type: str = field(default='nf4', metadata={'help': 'Quantization type.', 'choices': ['fp4', 'nf4']})
use_bnb_nested_quant: bool = field(default=False, metadata={'help': 'Whether to use nested quantization.'})
host: str = field(default='localhost', metadata={'help': 'Interface the server will listen to..'})
port: int = field(default=8000, metadata={'help': 'Port the server will listen to.'})
def __post_init__(self):
"""Only used for BC `torch_dtype` argument."""
if self.torch_dtype is not None:
if self.dtype is None:
self.dtype = self.torch_dtype
elif self.torch_dtype != self.dtype:
raise ValueError(f'`torch_dtype` {self.torch_dtype} and `dtype` {self.dtype} have different values. `torch_dtype` is deprecated and will be removed in 4.59.0, please set `dtype` instead.')
|
@dataclass
class ChatArguments:
'''
Arguments for the chat CLI.
See the metadata arg for each argument's description -- the medatata will be printed with
`transformers chat --help`
'''
def __post_init__(self):
'''Only used for BC `torch_dtype` argument.'''
pass
| 3
| 2
| 0
| 0
| 0
| 0
| 0
| 1.02
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 113
| 4
| 54
| 24
| 53
| 55
| 24
| 24
| 23
| 0
| 0
| 0
| 0
|
201
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/commands/chat.py
|
transformers.commands.chat.ChatCommand
|
import asyncio
import copy
from transformers import AutoTokenizer, GenerationConfig, PreTrainedTokenizer
import platform
import os
import yaml
from argparse import ArgumentParser, Namespace
from transformers.commands import BaseTransformersCLICommand
from transformers.utils import is_rich_available, is_torch_available
import json
from transformers.commands.serving import ServeArguments, ServeCommand
from typing import Optional
from huggingface_hub import AsyncInferenceClient, ChatCompletionStreamOutput
import time
from threading import Thread
class ChatCommand(BaseTransformersCLICommand):
@staticmethod
def register_subcommand(parser: ArgumentParser):
"""
Register this command to argparse so it's available for the transformer-cli
Args:
parser: Root parser to register command-specific arguments
"""
dataclass_types = (ChatArguments,)
chat_parser = parser.add_parser('chat', dataclass_types=dataclass_types)
group = chat_parser.add_argument_group('Positional arguments')
group.add_argument('model_name_or_path_or_address', type=str, default=None, help='Name of the pre-trained model or address to connect to.')
group.add_argument('generate_flags', type=str, default=None, help="Flags to pass to `generate`, using a space as a separator between flags. Accepts booleans, numbers, and lists of integers, more advanced parameterization should be set through --generation-config. Example: `transformers chat <model_repo> max_new_tokens=100 do_sample=False eos_token_id=[1,2]`. If you're a new user, check this basic flag guide: https://huggingface.co/docs/transformers/llm_tutorial#common-options", nargs='*')
chat_parser.set_defaults(func=chat_command_factory)
def __init__(self, args):
if args.model_name_or_path_or_address is not None:
name = args.model_name_or_path_or_address
if name.startswith('http') or name.startswith('https') or name.startswith('localhost'):
self.spawn_backend = False
if args.host != 'localhost' or args.port != 8000:
raise ValueError('Looks like you’ve set both a server address and a custom host/port. Please pick just one way to specify the server.')
args.host, args.port = args.model_name_or_path_or_address.rsplit(':', 1)
if args.model_name_or_path is None:
raise ValueError('When connecting to a server, please specify a model name with the --model_name_or_path flag.')
else:
self.spawn_backend = True
args.model_name_or_path = args.model_name_or_path_or_address
if not is_rich_available() and (not is_torch_available() and self.spawn_backend):
raise ImportError('You need to install rich to use the chat interface. Additionally, you have not specified a remote endpoint and are therefore spawning a backend. Torch is required for this: (`pip install rich torch`)')
elif not is_rich_available():
raise ImportError('You need to install rich to use the chat interface. (`pip install rich`)')
elif not is_torch_available() and self.spawn_backend:
raise ImportError('You have not specified a remote endpoint and are therefore spawning a backend. Torch is required for this: (`pip install rich torch`)')
self.args = args
@staticmethod
def get_username() -> str:
"""Returns the username of the current user."""
if platform.system() == 'Windows':
return os.getlogin()
else:
return pwd.getpwuid(os.getuid()).pw_name
@staticmethod
def save_chat(chat, args: ChatArguments, filename: Optional[str]=None) -> str:
"""Saves the chat history to a file."""
output_dict = {}
output_dict['settings'] = vars(args)
output_dict['chat_history'] = chat
folder = args.save_folder
if filename is None:
time_str = time.strftime('%Y-%m-%d_%H-%M-%S')
filename = f'{args.model_name_or_path_or_address}/chat_{time_str}.json'
filename = os.path.join(folder, filename)
os.makedirs(os.path.dirname(filename), exist_ok=True)
with open(filename, 'w') as f:
json.dump(output_dict, f, indent=4)
return os.path.abspath(filename)
@staticmethod
def clear_chat_history(system_prompt: Optional[str]=None) -> list[dict]:
"""Clears the chat history."""
if system_prompt is None:
chat = []
else:
chat = [{'role': 'system', 'content': system_prompt}]
return chat
def parse_generate_flags(self, generate_flags: list[str]) -> dict:
"""Parses the generate flags from the user input into a dictionary of `generate` kwargs."""
if len(generate_flags) == 0:
return {}
generate_flags_as_dict = {'"' + flag.split('=')[0] + '"': flag.split('=')[1] for flag in generate_flags}
generate_flags_as_dict = {k: v.lower() if v.lower() in ['true', 'false'] else v for k, v in generate_flags_as_dict.items()}
generate_flags_as_dict = {k: 'null' if v == 'None' else v for k, v in generate_flags_as_dict.items()}
def is_number(s: str) -> bool:
if s.startswith('-'):
s = s[1:]
return s.replace('.', '', 1).isdigit()
generate_flags_as_dict = {k: f'"{v}"' if not is_number(v) else v for k, v in generate_flags_as_dict.items()}
generate_flags_string = ', '.join([f'{k}: {v}' for k, v in generate_flags_as_dict.items()])
generate_flags_string = '{' + generate_flags_string + '}'
generate_flags_string = generate_flags_string.replace('"null"', 'null')
generate_flags_string = generate_flags_string.replace('"true"', 'true')
generate_flags_string = generate_flags_string.replace('"false"', 'false')
generate_flags_string = generate_flags_string.replace('"[', '[')
generate_flags_string = generate_flags_string.replace(']"', ']')
generate_flags_string = generate_flags_string.replace('=', ':')
try:
processed_generate_flags = json.loads(generate_flags_string)
except json.JSONDecodeError:
raise ValueError('Failed to convert `generate_flags` into a valid JSON object.\n`generate_flags` = {generate_flags}\nConverted JSON string = {generate_flags_string}')
return processed_generate_flags
def get_generation_parameterization(self, args: ChatArguments, model_generation_config: GenerationConfig) -> tuple[GenerationConfig, dict]:
"""
Returns a GenerationConfig object holding the generation parameters for the CLI command.
"""
if args.generation_config is not None:
if '.json' in args.generation_config:
dirname = os.path.dirname(args.generation_config)
filename = os.path.basename(args.generation_config)
generation_config = GenerationConfig.from_pretrained(dirname, filename)
else:
generation_config = GenerationConfig.from_pretrained(args.generation_config)
else:
generation_config = copy.deepcopy(model_generation_config)
generation_config.update(**{'do_sample': True, 'max_new_tokens': 256})
parsed_generate_flags = self.parse_generate_flags(args.generate_flags)
model_kwargs = generation_config.update(**parsed_generate_flags)
return (generation_config, model_kwargs)
@staticmethod
def parse_eos_tokens(tokenizer: PreTrainedTokenizer, generation_config: GenerationConfig, eos_tokens: Optional[str], eos_token_ids: Optional[str]) -> tuple[int, list[int]]:
"""Retrieves the pad token ID and all possible EOS token IDs."""
if generation_config.pad_token_id is None:
pad_token_id = generation_config.eos_token_id
else:
pad_token_id = generation_config.pad_token_id
all_eos_token_ids = []
if eos_tokens is not None:
all_eos_token_ids.extend(tokenizer.convert_tokens_to_ids(eos_tokens.split(',')))
if eos_token_ids is not None:
all_eos_token_ids.extend([int(token_id) for token_id in eos_token_ids.split(',')])
if len(all_eos_token_ids) == 0:
all_eos_token_ids.append(generation_config.eos_token_id)
return (pad_token_id, all_eos_token_ids)
@staticmethod
def get_quantization_config(model_args: ChatArguments) -> Optional['BitsAndBytesConfig']:
if model_args.load_in_4bit:
quantization_config = BitsAndBytesConfig(load_in_4bit=True, bnb_4bit_compute_dtype=model_args.dtype, bnb_4bit_quant_type=model_args.bnb_4bit_quant_type, bnb_4bit_use_double_quant=model_args.use_bnb_nested_quant, bnb_4bit_quant_storage=model_args.dtype)
elif model_args.load_in_8bit:
quantization_config = BitsAndBytesConfig(load_in_8bit=True)
else:
quantization_config = None
return quantization_config
def load_model_and_tokenizer(self, args: ChatArguments) -> tuple['AutoModelForCausalLM', AutoTokenizer]:
tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path_positional, revision=args.model_revision, trust_remote_code=args.trust_remote_code)
dtype = args.dtype if args.dtype in ['auto', None] else getattr(torch, args.dtype)
quantization_config = self.get_quantization_config(args)
model_kwargs = {'revision': args.model_revision, 'attn_implementation': args.attn_implementation, 'dtype': dtype, 'device_map': 'auto', 'quantization_config': quantization_config}
model = AutoModelForCausalLM.from_pretrained(args.model_name_or_path_positional, trust_remote_code=args.trust_remote_code, **model_kwargs)
if getattr(model, 'hf_device_map', None) is None:
model = model.to(args.device)
return (model, tokenizer)
def handle_non_exit_user_commands(self, user_input: str, args: ChatArguments, interface: RichInterface, examples: dict[str, dict[str, str]], generation_config: GenerationConfig, model_kwargs: dict, chat: list[dict]) -> tuple[list[dict], GenerationConfig, dict]:
"""
Handles all user commands except for `!exit`. May update the chat history (e.g. reset it) or the
generation config (e.g. set a new flag).
"""
valid_command = True
if user_input == '!clear':
chat = self.clear_chat_history(args.system_prompt)
interface.clear()
elif user_input == '!help':
interface.print_help()
elif user_input.startswith('!save') and len(user_input.split()) < 2:
split_input = user_input.split()
if len(split_input) == 2:
filename = split_input[1]
else:
filename = None
filename = self.save_chat(chat, args, filename)
interface.print_color(text=f'Chat saved in {filename}!', color='green')
elif user_input.startswith('!set'):
new_generate_flags = user_input[4:].strip()
new_generate_flags = new_generate_flags.split()
for flag in new_generate_flags:
if '=' not in flag:
interface.print_color(text=f'Invalid flag format, missing `=` after `{flag}`. Please use the format `arg_1=value_1 arg_2=value_2 ...`.', color='red')
break
else:
parsed_new_generate_flags = self.parse_generate_flags(new_generate_flags)
new_model_kwargs = generation_config.update(**parsed_new_generate_flags)
model_kwargs.update(**new_model_kwargs)
elif user_input.startswith('!example') and len(user_input.split()) == 2:
example_name = user_input.split()[1]
if example_name in examples:
interface.clear()
chat = []
interface.print_user_message(examples[example_name]['text'])
chat.append({'role': 'user', 'content': examples[example_name]['text']})
else:
example_error = f'Example {example_name} not found in list of available examples: {list(examples.keys())}.'
interface.print_color(text=example_error, color='red')
elif user_input == '!status':
interface.print_status(model_name=args.model_name_or_path, generation_config=generation_config, model_kwargs=model_kwargs)
else:
valid_command = False
interface.print_color(text=f"'{user_input}' is not a valid command. Showing help message.", color='red')
interface.print_help()
return (chat, valid_command, generation_config, model_kwargs)
def run(self):
asyncio.run(self._inner_run())
async def _inner_run(self):
if self.spawn_backend:
serve_args = ServeArguments(device=self.args.device, dtype=self.args.dtype, trust_remote_code=self.args.trust_remote_code, attn_implementation=self.args.attn_implementation, load_in_8bit=self.args.load_in_8bit, load_in_4bit=self.args.load_in_4bit, bnb_4bit_quant_type=self.args.bnb_4bit_quant_type, use_bnb_nested_quant=self.args.use_bnb_nested_quant, host=self.args.host, port=self.args.port, log_level='error')
serve_command = ServeCommand(serve_args)
thread = Thread(target=serve_command.run)
thread.daemon = True
thread.start()
model = self.args.model_name_or_path + '@' + self.args.model_revision
host = 'http://localhost' if self.args.host == 'localhost' else self.args.host
client = AsyncInferenceClient(f'{host}:{self.args.port}')
args = self.args
if args.examples_path is None:
examples = DEFAULT_EXAMPLES
else:
with open(args.examples_path) as f:
examples = yaml.safe_load(f)
if args.user is None:
user = self.get_username()
else:
user = args.user
model_generation_config = GenerationConfig.from_pretrained(args.model_name_or_path)
generation_config, model_kwargs = self.get_generation_parameterization(args, model_generation_config)
interface = RichInterface(model_name=args.model_name_or_path, user_name=user)
interface.clear()
chat = self.clear_chat_history(args.system_prompt)
interface.print_help(minimal=True)
while True:
try:
user_input = interface.input()
if user_input.startswith('!'):
if user_input == '!exit':
break
else:
chat, valid_command, generation_config, model_kwargs = self.handle_non_exit_user_commands(user_input=user_input, args=args, interface=interface, examples=examples, generation_config=generation_config, model_kwargs=model_kwargs, chat=chat)
if not valid_command or not user_input.startswith('!example'):
continue
else:
chat.append({'role': 'user', 'content': user_input})
stream = client.chat_completion(chat, stream=True, extra_body={'generation_config': generation_config.to_json_string(), 'model': model})
model_output = await interface.stream_output(stream)
chat.append({'role': 'assistant', 'content': model_output})
except KeyboardInterrupt:
break
finally:
await client.close()
|
class ChatCommand(BaseTransformersCLICommand):
@staticmethod
def register_subcommand(parser: ArgumentParser):
'''
Register this command to argparse so it's available for the transformer-cli
Args:
parser: Root parser to register command-specific arguments
'''
pass
def __init__(self, args):
pass
@staticmethod
def get_username() -> str:
'''Returns the username of the current user.'''
pass
@staticmethod
def save_chat(chat, args: ChatArguments, filename: Optional[str]=None) -> str:
'''Saves the chat history to a file.'''
pass
@staticmethod
def clear_chat_history(system_prompt: Optional[str]=None) -> list[dict]:
'''Clears the chat history.'''
pass
def parse_generate_flags(self, generate_flags: list[str]) -> dict:
'''Parses the generate flags from the user input into a dictionary of `generate` kwargs.'''
pass
def is_number(s: str) -> bool:
pass
def get_generation_parameterization(self, args: ChatArguments, model_generation_config: GenerationConfig) -> tuple[GenerationConfig, dict]:
'''
Returns a GenerationConfig object holding the generation parameters for the CLI command.
'''
pass
@staticmethod
def parse_eos_tokens(tokenizer: PreTrainedTokenizer, generation_config: GenerationConfig, eos_tokens: Optional[str], eos_token_ids: Optional[str]) -> tuple[int, list[int]]:
'''Retrieves the pad token ID and all possible EOS token IDs.'''
pass
@staticmethod
def get_quantization_config(model_args: ChatArguments) -> Optional['BitsAndBytesConfig']:
pass
def load_model_and_tokenizer(self, args: ChatArguments) -> tuple['AutoModelForCausalLM', AutoTokenizer]:
pass
def handle_non_exit_user_commands(self, user_input: str, args: ChatArguments, interface: RichInterface, examples: dict[str, dict[str, str]], generation_config: GenerationConfig, model_kwargs: dict, chat: list[dict]) -> tuple[list[dict], GenerationConfig, dict]:
'''
Handles all user commands except for `!exit`. May update the chat history (e.g. reset it) or the
generation config (e.g. set a new flag).
'''
pass
def run(self):
pass
async def _inner_run(self):
pass
| 21
| 8
| 39
| 6
| 31
| 2
| 6
| 0.05
| 1
| 7
| 2
| 0
| 2
| 1
| 3
| 25
| 121
| 20
| 96
| 28
| 91
| 5
| 74
| 26
| 70
| 15
| 5
| 4
| 17
|
202
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/commands/chat.py
|
transformers.commands.chat.RichInterface
|
import re
from transformers import AutoTokenizer, GenerationConfig, PreTrainedTokenizer
from typing import Optional
from huggingface_hub import AsyncInferenceClient, ChatCompletionStreamOutput
from collections.abc import AsyncIterator
class RichInterface:
def __init__(self, model_name: Optional[str]=None, user_name: Optional[str]=None):
self._console = Console()
if model_name is None:
self.model_name = 'assistant'
else:
self.model_name = model_name
if user_name is None:
self.user_name = 'user'
else:
self.user_name = user_name
async def stream_output(self, stream: AsyncIterator[ChatCompletionStreamOutput]) -> tuple[str, int]:
self._console.print(f'[bold blue]<{self.model_name}>:')
with Live(console=self._console, refresh_per_second=4) as live:
text = ''
async for token in await stream:
outputs = token.choices[0].delta.content
if not outputs:
continue
outputs = re.sub('<(/*)(\\w*)>', '\\<\\1\\2\\>', outputs)
text += outputs
lines = []
for line in text.splitlines():
lines.append(line)
if line.startswith('```'):
lines.append('\n')
else:
lines.append(' \n')
markdown = Markdown(''.join(lines).strip(), code_theme='github-dark')
live.update(markdown, refresh=True)
self._console.print()
return text
def input(self) -> str:
"""Gets user input from the console."""
input = self._console.input(f'[bold red]<{self.user_name}>:\n')
self._console.print()
return input
def clear(self):
"""Clears the console."""
self._console.clear()
def print_user_message(self, text: str):
"""Prints a user message to the console."""
self._console.print(f'[bold red]<{self.user_name}>:[/ bold red]\n{text}')
self._console.print()
def print_color(self, text: str, color: str):
"""Prints text in a given color to the console."""
self._console.print(f'[bold {color}]{text}')
self._console.print()
def print_help(self, minimal: bool=False):
"""Prints the help message to the console."""
self._console.print(Markdown(HELP_STRING_MINIMAL if minimal else HELP_STRING))
self._console.print()
def print_status(self, model_name: str, generation_config: GenerationConfig, model_kwargs: dict):
"""Prints the status of the model and generation settings to the console."""
self._console.print(f'[bold blue]Model: {model_name}\n')
if model_kwargs:
self._console.print(f'[bold blue]Model kwargs: {model_kwargs}')
self._console.print(f'[bold blue]{generation_config}')
self._console.print()
|
class RichInterface:
def __init__(self, model_name: Optional[str]=None, user_name: Optional[str]=None):
pass
async def stream_output(self, stream: AsyncIterator[ChatCompletionStreamOutput]) -> tuple[str, int]:
pass
def input(self) -> str:
'''Gets user input from the console.'''
pass
def clear(self):
'''Clears the console.'''
pass
def print_user_message(self, text: str):
'''Prints a user message to the console.'''
pass
def print_color(self, text: str, color: str):
'''Prints text in a given color to the console.'''
pass
def print_help(self, minimal: bool=False):
'''Prints the help message to the console.'''
pass
def print_status(self, model_name: str, generation_config: GenerationConfig, model_kwargs: dict):
'''Prints the status of the model and generation settings to the console.'''
pass
| 9
| 6
| 8
| 0
| 6
| 2
| 2
| 0.38
| 0
| 1
| 0
| 0
| 8
| 3
| 8
| 8
| 73
| 7
| 48
| 18
| 39
| 18
| 45
| 17
| 36
| 5
| 0
| 4
| 14
|
203
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/commands/download.py
|
transformers.commands.download.DownloadCommand
|
from . import BaseTransformersCLICommand
from argparse import ArgumentParser
class DownloadCommand(BaseTransformersCLICommand):
@staticmethod
def register_subcommand(parser: ArgumentParser):
download_parser = parser.add_parser('download')
download_parser.add_argument('--cache-dir', type=str, default=None, help='Path to location to store the models')
download_parser.add_argument('--force', action='store_true', help='Force the model to be download even if already in cache-dir')
download_parser.add_argument('--trust-remote-code', action='store_true', help="Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you've reviewed the code as it will execute on your local machine")
download_parser.add_argument('model', type=str, help='Name of the model to download')
download_parser.set_defaults(func=download_command_factory)
def __init__(self, model: str, cache: str, force: bool, trust_remote_code: bool):
self._model = model
self._cache = cache
self._force = force
self._trust_remote_code = trust_remote_code
def run(self):
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(self._model, cache_dir=self._cache, force_download=self._force, trust_remote_code=self._trust_remote_code)
AutoTokenizer.from_pretrained(self._model, cache_dir=self._cache, force_download=self._force, trust_remote_code=self._trust_remote_code)
|
class DownloadCommand(BaseTransformersCLICommand):
@staticmethod
def register_subcommand(parser: ArgumentParser):
pass
def __init__(self, model: str, cache: str, force: bool, trust_remote_code: bool):
pass
def run(self):
pass
| 5
| 0
| 10
| 0
| 9
| 0
| 1
| 0
| 1
| 5
| 2
| 0
| 2
| 4
| 3
| 25
| 33
| 3
| 30
| 11
| 24
| 0
| 17
| 10
| 12
| 1
| 5
| 0
| 3
|
204
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/commands/env.py
|
transformers.commands.env.EnvironmentCommand
|
from .. import __version__ as version
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
import platform
import io
import importlib.util
import os
from ..integrations.deepspeed import is_deepspeed_available
import huggingface_hub
from ..utils import is_accelerate_available, is_safetensors_available, is_torch_available, is_torch_hpu_available, is_torch_npu_available, is_torch_xpu_available
import contextlib
class EnvironmentCommand(BaseTransformersCLICommand):
@staticmethod
def register_subcommand(parser: ArgumentParser):
download_parser = parser.add_parser('env')
download_parser.set_defaults(func=info_command_factory)
download_parser.add_argument('--accelerate-config_file', default=None, help='The accelerate config file to use for the default values in the launching script.')
download_parser.set_defaults(func=download_command_factory)
def __init__(self, accelerate_config_file, *args) -> None:
self._accelerate_config_file = accelerate_config_file
def run(self):
safetensors_version = 'not installed'
if is_safetensors_available():
import safetensors
safetensors_version = safetensors.__version__
elif importlib.util.find_spec('safetensors') is not None:
import safetensors
safetensors_version = f'{safetensors.__version__} but is ignored because of PyTorch version too old.'
accelerate_version = 'not installed'
accelerate_config = accelerate_config_str = 'not found'
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
accelerate_version = accelerate.__version__
if self._accelerate_config_file is not None or os.path.isfile(default_config_file):
accelerate_config = load_config_from_file(self._accelerate_config_file).to_dict()
accelerate_config_str = '\n'.join([f'\t- {prop}: {val}' for prop, val in accelerate_config.items()]) if isinstance(accelerate_config, dict) else f'\t{accelerate_config}'
pt_version = 'not installed'
pt_cuda_available = 'NA'
pt_accelerator = 'NA'
if is_torch_available():
import torch
pt_version = torch.__version__
pt_cuda_available = torch.cuda.is_available()
pt_xpu_available = is_torch_xpu_available()
pt_npu_available = is_torch_npu_available()
pt_hpu_available = is_torch_hpu_available()
if pt_cuda_available:
pt_accelerator = 'CUDA'
elif pt_xpu_available:
pt_accelerator = 'XPU'
elif pt_npu_available:
pt_accelerator = 'NPU'
elif pt_hpu_available:
pt_accelerator = 'HPU'
deepspeed_version = 'not installed'
if is_deepspeed_available():
with contextlib.redirect_stdout(io.StringIO()):
import deepspeed
deepspeed_version = deepspeed.__version__
info = {'`transformers` version': version, 'Platform': platform.platform(), 'Python version': platform.python_version(), 'Huggingface_hub version': huggingface_hub.__version__, 'Safetensors version': f'{safetensors_version}', 'Accelerate version': f'{accelerate_version}', 'Accelerate config': f'{accelerate_config_str}', 'DeepSpeed version': f'{deepspeed_version}', 'PyTorch version (accelerator?)': f'{pt_version} ({pt_accelerator})', 'Using distributed or parallel set-up in script?': '<fill in>'}
if is_torch_available():
if pt_cuda_available:
info['Using GPU in script?'] = '<fill in>'
info['GPU type'] = torch.cuda.get_device_name()
elif pt_xpu_available:
info['Using XPU in script?'] = '<fill in>'
info['XPU type'] = torch.xpu.get_device_name()
elif pt_hpu_available:
info['Using HPU in script?'] = '<fill in>'
info['HPU type'] = torch.hpu.get_device_name()
elif pt_npu_available:
info['Using NPU in script?'] = '<fill in>'
info['NPU type'] = torch.npu.get_device_name()
info['CANN version'] = torch.version.cann
print('\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n')
print(self.format_dict(info))
return info
@staticmethod
def format_dict(d):
return '\n'.join([f'- {prop}: {val}' for prop, val in d.items()]) + '\n'
|
class EnvironmentCommand(BaseTransformersCLICommand):
@staticmethod
def register_subcommand(parser: ArgumentParser):
pass
def __init__(self, accelerate_config_file, *args) -> None:
pass
def run(self):
pass
@staticmethod
def format_dict(d):
pass
| 7
| 0
| 26
| 4
| 22
| 1
| 4
| 0.03
| 1
| 4
| 0
| 0
| 2
| 1
| 4
| 26
| 111
| 17
| 91
| 31
| 75
| 3
| 65
| 29
| 51
| 13
| 5
| 2
| 16
|
205
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/commands/run.py
|
transformers.commands.run.RunCommand
|
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
class RunCommand(BaseTransformersCLICommand):
def __init__(self, nlp: Pipeline, reader: PipelineDataFormat):
self._nlp = nlp
self._reader = reader
@staticmethod
def register_subcommand(parser: ArgumentParser):
run_parser = parser.add_parser('run', help='Run a pipeline through the CLI')
run_parser.add_argument('--task', choices=get_supported_tasks(), help='Task to run')
run_parser.add_argument('--input', type=str, help='Path to the file to use for inference')
run_parser.add_argument('--output', type=str, help='Path to the file that will be used post to write results.')
run_parser.add_argument('--model', type=str, help='Name or path to the model to instantiate.')
run_parser.add_argument('--config', type=str, help="Name or path to the model's config to instantiate.")
run_parser.add_argument('--tokenizer', type=str, help='Name of the tokenizer to use. (default: same as the model name)')
run_parser.add_argument('--column', type=str, help='Name of the column to use as input. (For multi columns input as QA use column1,columns2)')
run_parser.add_argument('--format', type=str, default='infer', choices=PipelineDataFormat.SUPPORTED_FORMATS, help='Input format to read from')
run_parser.add_argument('--device', type=int, default=-1, help='Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)')
run_parser.add_argument('--overwrite', action='store_true', help='Allow overwriting the output file.')
run_parser.set_defaults(func=run_command_factory)
def run(self):
nlp, outputs = (self._nlp, [])
for entry in self._reader:
output = nlp(**entry) if self._reader.is_multi_columns else nlp(entry)
if isinstance(output, dict):
outputs.append(output)
else:
outputs += output
if self._nlp.binary_output:
binary_path = self._reader.save_binary(outputs)
logger.warning(f'Current pipeline requires output to be in binary format, saving at {binary_path}')
else:
self._reader.save(outputs)
|
class RunCommand(BaseTransformersCLICommand):
def __init__(self, nlp: Pipeline, reader: PipelineDataFormat):
pass
@staticmethod
def register_subcommand(parser: ArgumentParser):
pass
def run(self):
pass
| 5
| 0
| 16
| 1
| 15
| 0
| 2
| 0.02
| 1
| 6
| 2
| 0
| 2
| 2
| 3
| 25
| 53
| 4
| 48
| 12
| 43
| 1
| 28
| 11
| 24
| 5
| 5
| 2
| 7
|
206
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/commands/serving.py
|
transformers.commands.serving.ServeCommand
|
import functools
import uuid
from transformers.models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, MODEL_FOR_IMAGE_TEXT_TO_TEXT_MAPPING_NAMES
import base64
from threading import Thread
from io import BytesIO
import asyncio
from tokenizers.decoders import DecodeStream
from typing import Optional, Union
import datetime
from .. import AutoConfig, LogitsProcessorList, PreTrainedTokenizerFast, ProcessorMixin, TextIteratorStreamer
from . import BaseTransformersCLICommand
import tempfile
import io
from collections.abc import AsyncGenerator, Generator, Iterable
from contextlib import asynccontextmanager
from transformers.utils.import_utils import is_fastapi_available, is_librosa_available, is_openai_available, is_pydantic_available, is_uvicorn_available, is_vision_available
from ..utils import is_torch_available, logging
import time
import transformers
from huggingface_hub.constants import HF_HUB_OFFLINE
from argparse import ArgumentParser, Namespace
import re
from huggingface_hub import model_info
class ServeCommand(BaseTransformersCLICommand):
@staticmethod
def register_subcommand(parser: ArgumentParser):
"""
Register this command to argparse so it's available for the transformer-cli
Args:
parser: Root parser to register command-specific arguments
"""
dataclass_types = (ServeArguments,)
serve_parser = parser.add_parser('serve', dataclass_types=dataclass_types)
serve_parser.set_defaults(func=serve_command_factory)
def __init__(self, args: ServeArguments):
if not serve_dependencies_available:
raise ImportError('Missing dependencies for the serving CLI. Please install with `pip install transformers[serving]`')
self.args = args
self.use_continuous_batching = self.args.continuous_batching
if self.use_continuous_batching:
default_attn_impl = ContinuousBatchingManager.default_attention_implementation()
if self.args.attn_implementation is None:
self.args.attn_implementation = default_attn_impl
logger.info(f'No attn_implementation passed, defaulting to {default_attn_impl}')
supported_attn_impl = ContinuousBatchingManager.supported_attention_implementations()
if self.args.attn_implementation not in supported_attn_impl:
raise ValueError(f'Continuous batching only supports {supported_attn_impl} as attn_implementation, got {self.args.attn_implementation}Try setting `--attn_implementation={default_attn_impl}`')
self.enable_cors = self.args.enable_cors
if self.args.default_seed is not None:
torch.manual_seed(self.args.default_seed)
transformers_logger = logging.get_logger('transformers')
transformers_logger.setLevel(logging.log_levels[self.args.log_level.lower()])
cb_logger = logging.get_logger('transformers.generation.continuous_batching')
cb_logger.setLevel(logging.log_levels[self.args.log_level.lower()])
self.loaded_models: dict[str, TimedModel] = {}
self.running_continuous_batching_manager: Optional[ContinuousBatchingManager] = None
self.last_messages = None
self.last_kv_cache = None
self.last_model = None
def _validate_request(self, request: dict, schema: '_TypedDictMeta', validator: 'TypeAdapter', unused_fields: set):
"""
Validates the request against the schema, and checks for unexpected keys.
Args:
request (`dict`):
The request to validate.
schema (`_TypedDictMeta`):
The schema of the request to validate. It is a `TypedDict` definition.
validator (`TypeAdapter`):
The validator to use to validate the request. Built from `schema`.
unused_fields (`set`):
Fields accepted by `schema`, but not used in `transformers serve`.
Raises:
HTTPException: If the request is invalid or contains unexpected or unused fields.
"""
logger.debug(f'Validating request: {request}')
input_keys = set(request.keys())
possible_keys = schema.__mutable_keys__
unexpected_keys = input_keys - possible_keys
if unexpected_keys:
logger.error(f'Unexpected keys in the request: {unexpected_keys}')
raise HTTPException(status_code=422, detail=f'Unexpected keys in the request: {unexpected_keys}')
if self.args.input_validation:
try:
validator.validate_python(request)
except ValidationError as e:
logger.error(f'Validation error: {e.errors()}')
raise HTTPException(status_code=422, detail=e.errors())
unused_fields_in_request = input_keys & unused_fields
if unused_fields_in_request:
logger.error(f'Unused fields in the request: {unused_fields_in_request}')
raise HTTPException(status_code=422, detail=f'Unused fields in the request: {unused_fields_in_request}')
def validate_response_request(self, request: dict):
self._validate_request(request=request, schema=TransformersResponseCreateParamsStreaming, validator=response_validator, unused_fields=UNUSED_RESPONSE_FIELDS)
def validate_chat_completion_request(self, request: dict):
self._validate_request(request=request, schema=TransformersCompletionCreateParamsStreaming, validator=completion_validator, unused_fields=UNUSED_CHAT_COMPLETION_FIELDS)
def validate_transcription_request(self, request: dict):
self._validate_request(request=request, schema=TransformersTranscriptionCreateParams, validator=transcription_validator, unused_fields=UNUSED_TRANSCRIPTION_FIELDS)
def build_chat_completion_chunk(self, request_id: Optional[str]='', content: Optional[int]=None, model: Optional[str]=None, role: Optional[str]=None, finish_reason: Optional[str]=None, tool_calls: Optional[list['ChoiceDeltaToolCall']]=None, decode_stream: Optional[DecodeStream]=None, tokenizer: Optional[PreTrainedTokenizerFast]=None) -> str:
"""
Builds a chunk of a streaming OpenAI Chat Completion response.
IMPORTANT: The serialized chunk won't contain empty fields (fields with `None`). Some downstream apps,
like Cursor, assume that when the field exists, it has data.
Args:
request_id (`str`):
The request ID.
content (`str`, *optional*):
Content of the response from the model.
model (`str`, *optional*):
The model that generated the content.
role (`str`, *optional*):
The role of the next content, until a new role is defined.
finish_reason (`str`, *optional*):
The reason the generation by the model has finished.
tool_calls (`list[ChoiceDeltaToolCall]`, *optional*):
Data about the tool calls, when they are triggered.
Returns:
`str`: The built chunk, a string containing a JSON string with the payload.
"""
if decode_stream is not None and content is not None and (tokenizer is not None):
content = decode_stream.step(tokenizer._tokenizer, content)
chunk = ChatCompletionChunk(id=request_id, created=int(time.time()), model=model, choices=[Choice(delta=ChoiceDelta(content=content, role=role, tool_calls=tool_calls), index=0, finish_reason=finish_reason)], system_fingerprint='', object='chat.completion.chunk')
return f'data: {chunk.model_dump_json(exclude_none=True)}\n\n'
def build_response_event(self, response: 'BaseModel') -> str:
"""
Builds a event of a streaming OpenAI Response response.
IMPORTANT: The serialized chunk won't contain empty fields (fields with `None`). Some downstream apps,
like Cursor, assume that when the field exists, it has data.
Args:
response (`BaseModel`):
The response to build an event from. One of the multiple OpenAI Response output types
Returns:
`str`: The built chunk, a string containing a JSON string with the payload.
"""
return f'data: {response.model_dump_json(exclude_none=True)}\n\n'
def run(self):
"""
Setup and run the FastAPI server for transformers serve.
Models will be loaded and unloaded automatically based on usage and a timeout.
The server will expose the following endpoints:
- POST /v1/chat/completions: Generates chat completions.
- POST /v1/responses: Generates responses.
- POST /v1/audio/transcriptions: Generates transcriptions from audio.
- GET /v1/models: Lists available models for 3rd party tools.
Requires FastAPI and Uvicorn to be installed.
"""
@asynccontextmanager
async def lifespan(app: FastAPI):
yield
for model in self.loaded_models.values():
model.delete_model()
if self.running_continuous_batching_manager is not None:
self.running_continuous_batching_manager.stop(block=True, timeout=5)
app = FastAPI(lifespan=lifespan)
if self.enable_cors:
app.add_middleware(CORSMiddleware, allow_origins=['*'], allow_credentials=True, allow_methods=['*'], allow_headers=['*'])
logger.warning_once('CORS allow origin is set to `*`. This is not recommended for production environments.')
from fastapi import Request
@app.post('/v1/chat/completions')
def chat_completion(request: Request, body: dict):
self.validate_chat_completion_request(request=body)
if self.use_continuous_batching:
output = self.continuous_batching_chat_completion(body, request.state.request_id)
else:
output = self.generate_chat_completion(body)
return StreamingResponse(output, media_type='text/event-stream')
@app.post('/v1/responses')
def responses(request: dict):
self.validate_response_request(request=request)
output = self.generate_response(request)
return StreamingResponse(output, media_type='text/event-stream')
@app.post('/v1/audio/transcriptions')
async def audio_transcriptions(request: Request):
async with request.form() as form:
parsed_request = TransformersTranscriptionCreateParams(file=await form['file'].read(), model=form['model'])
logger.debug(f"Received file: {form['file'].filename}; MIME type: {form['file'].content_type}; size: {form['file'].size / 1024:.2f} KiB")
self.validate_transcription_request(request=parsed_request)
output = self.generate_transcription(parsed_request)
return StreamingResponse(output, media_type='text/event-stream')
@app.options('/v1/models')
@app.get('/v1/models')
def get_all_models():
return JSONResponse({'object': 'list', 'data': self.get_gen_models()})
@app.get('/health')
def healthcheck():
return JSONResponse({'status': 'ok'})
@app.middleware('http')
async def get_or_set_request_id(request: Request, call_next):
request_id = request.headers.get(X_REQUEST_ID) or str(uuid.uuid4())
request.state.request_id = request_id
response = await call_next(request)
response.headers[X_REQUEST_ID] = request_id
return response
uvicorn.run(app, host=self.args.host, port=self.args.port, log_level=self.args.log_level)
@functools.cache
def get_gen_models(self) -> list[dict[str, any]]:
"""
This is by no means a limit to which models may be instantiated with `transformers serve`: any chat-based
model working with generate can work.
This is a limited list of models to ensure we have a discoverable /v1/models endpoint for third-party
integrations.
"""
models = ['Menlo/Jan-nano', 'Menlo/Jan-nano-128k', 'Qwen/Qwen2.5-0.5B-Instruct', 'Qwen/Qwen2.5-3B-Instruct', 'Qwen/Qwen2.5-7B-Instruct', 'Qwen/Qwen2.5-14B-Instruct', 'meta-llama/Llama-3.1-8B-Instruct', 'meta-llama/Llama-3.2-1B-Instruct', 'meta-llama/Llama-3.3-70B-Instruct', 'HuggingFaceTB/SmolVLM-Instruct', 'ibm-granite/granite-vision-3.2-2b', 'Qwen/Qwen2.5-VL-7B-Instruct']
if HF_HUB_OFFLINE:
return [{'id': model, 'object': 'model', 'created': datetime.datetime.now().timestamp(), 'owned_by': model.split('/')[0]} for model in models]
else:
model_infos = [model_info(model) for model in models]
return [{'id': model.id, 'object': 'model', 'created': model.created_at.timestamp(), 'owned_by': model.author} for model in model_infos]
def continuous_batching_chat_completion(self, req: dict, request_id: str) -> AsyncGenerator[str, None]:
"""
Generates an OpenAI Chat Completion using continuous batching.
Args:
req (`dict`): The request to generate an OpenAI Chat Completion for.
Returns:
`Generator[str, None, None]`: A generator that yields the OpenAI Chat Completion chunks.
"""
model_id_and_revision = self.process_model_name(req['model'])
must_discard_cache = model_id_and_revision != self.last_model
self.last_model = model_id_and_revision
if must_discard_cache:
if self.running_continuous_batching_manager is not None:
self.running_continuous_batching_manager.stop(block=True, timeout=2)
self.running_continuous_batching_manager = None
model, processor = self.load_model_and_processor(model_id_and_revision)
tokenizer = processor.tokenizer if hasattr(processor, 'tokenizer') else processor
generation_config = create_generation_config_from_req(req, model_generation_config=model.generation_config, eos_token_id=tokenizer.eos_token_id, pad_token_id=tokenizer.pad_token_id, use_cache=False, do_sample=False, scheduler='fifo')
if self.running_continuous_batching_manager is None:
self.running_continuous_batching_manager = model.init_continuous_batching(generation_config=generation_config, streaming=True)
self.running_continuous_batching_manager.logit_processor = LogitsProcessorList()
self.running_continuous_batching_manager.start()
inputs = processor.apply_chat_template(req['messages'], return_tensors='pt', add_generation_prompt=True).to(model.device)
def stream_chat_completion(request_id, decode_stream):
try:
yield self.build_chat_completion_chunk(request_id, role='assistant', model=model_id_and_revision)
for result in self.running_continuous_batching_manager.request_id_iter(request_id):
if result.status == RequestStatus.FINISHED:
yield self.build_chat_completion_chunk(request_id, finish_reason='stop', model=model_id_and_revision)
break
else:
yield self.build_chat_completion_chunk(request_id=request_id, content=result.generated_tokens[-1], model=model_id_and_revision, decode_stream=decode_stream, tokenizer=tokenizer)
except Exception as e:
logger.error(str(e))
self.running_continuous_batching_manager.cancel_request(request_id)
yield f'data: {{"error": "{str(e)}"}}'
async def cancellation_wrapper(_inputs, request_id):
try:
decode_stream = DecodeStream(_inputs.tolist(), False)
request_id = self.running_continuous_batching_manager.add_request(_inputs, request_id=request_id, max_new_tokens=generation_config.max_new_tokens)
for chunk in stream_chat_completion(request_id, decode_stream):
yield chunk
await asyncio.sleep(0)
except asyncio.CancelledError:
self.running_continuous_batching_manager.cancel_request(request_id)
logger.warning(f'Request {request_id} was cancelled.')
return cancellation_wrapper(inputs[0], request_id)
@staticmethod
def get_model_modality(model: 'PreTrainedModel') -> Modality:
model_classname = model.__class__.__name__
if model_classname in MODEL_FOR_IMAGE_TEXT_TO_TEXT_MAPPING_NAMES.values():
modality = Modality.VLM
elif model_classname in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES.values():
modality = Modality.LLM
else:
raise ValueError(f'Unknown modality: {model_classname}')
return modality
@staticmethod
def get_processor_inputs_from_inbound_messages(messages, modality: Modality):
processor_inputs = []
for message in messages:
parsed_message = {'role': message['role'], 'content': []}
if modality == Modality.LLM:
if isinstance(message['content'], str):
parsed_content = message['content']
elif isinstance(message['content'], list):
parsed_content = []
for content in message['content']:
if content['type'] == 'text':
parsed_content.append(content['text'])
parsed_content = ' '.join(parsed_content)
parsed_message['content'] = parsed_content
elif modality == Modality.VLM:
if isinstance(message['content'], str):
parsed_message['content'].append({'type': 'text', 'text': message['content']})
else:
for content in message['content']:
if content['type'] == 'text':
parsed_message['content'].append(content)
elif content['type'] == 'image_url':
if 'base64' in content['image_url']['url']:
image_data = re.sub('^data:image/.+;base64,', '', content['image_url']['url'])
image = Image.open(BytesIO(base64.b64decode(image_data)))
file = tempfile.NamedTemporaryFile(suffix='.png', delete=False)
url = file.name
image.save(file.name)
else:
url = content['image_url']['url']
parsed_message['content'].append({'type': 'image', 'url': url})
processor_inputs.append(parsed_message)
return processor_inputs
def generate_chat_completion(self, req: dict) -> Generator[str, None, None]:
"""
Generates an OpenAI Chat Completion using `generate`.
Args:
req (`dict`): The request to generate an OpenAI Chat Completion for.
Returns:
`Generator[str, None, None]`: A generator that yields the OpenAI Chat Completion chunks.
"""
if self.args.force_model is not None:
req['model'] = self.args.force_model
messages: Iterable[ChatCompletionMessageParam] = req['messages']
if messages[-1]['role'] == 'assistant':
return
model_id_and_revision = self.process_model_name(req['model'])
must_discard_cache = model_id_and_revision != self.last_model
self.last_model = model_id_and_revision
model, processor = self.load_model_and_processor(model_id_and_revision)
modality = self.get_model_modality(model)
processor_inputs = self.get_processor_inputs_from_inbound_messages(messages, modality)
tool_model_family = None
for supported_model_families in _MODELS_WITH_TOOL_SUPPORT:
if supported_model_families in model.config.architectures[0].lower():
tool_model_family = supported_model_families
break
inputs = processor.apply_chat_template(processor_inputs, add_generation_prompt=True, tools=req.get('tools'), return_tensors='pt', return_dict=True, tokenize=True)
inputs = inputs.to(model.device)
request_id = req.get('request_id', 'req_0')
skip_special_tokens = True
if 'gptoss' in model.config.architectures[0].lower():
skip_special_tokens = False
generation_streamer = TextIteratorStreamer(processor, skip_special_tokens=skip_special_tokens, skip_prompt=True)
generation_config = create_generation_config_from_req(req, model_generation_config=model.generation_config)
last_kv_cache = None
if self.is_continuation(req) and (not must_discard_cache):
last_kv_cache = self.last_kv_cache
generation_kwargs = {**inputs, 'streamer': generation_streamer, 'generation_config': generation_config, 'return_dict_in_generate': True, 'past_key_values': last_kv_cache}
def stream_chat_completion(streamer, _request_id):
filter_cot = False
cot_trace_end = None
if 'gptoss' in model.config.architectures[0].lower():
filter_cot = True
cot_trace_end = '<|channel|>final<|message|>'
def generate_with_cache(**kwargs):
generate_output = model.generate(**kwargs)
self.last_kv_cache = generate_output.past_key_values
thread = Thread(target=generate_with_cache, kwargs=generation_kwargs)
results = ''
try:
thread.start()
tool_state = ToolState()
yield self.build_chat_completion_chunk(request_id, role='assistant', model=model_id_and_revision)
for result in streamer:
if 'gptoss' in model.config.architectures[0].lower():
if result.endswith('<|return|>'):
result = result[:-len('<|return|>')]
results += result
if filter_cot:
if cot_trace_end in results:
filter_cot = False
continue
else:
continue
if tool_model_family is not None:
if result.strip() == _TOOL_CALL_TOKENS[tool_model_family]['start']:
tool_state.inside_tool_call = True
continue
if result.strip() == _TOOL_CALL_TOKENS[tool_model_family]['end']:
tool_state.reset()
yield self.build_chat_completion_chunk(request_id=_request_id, role=None, finish_reason='tool_calls', model=model_id_and_revision)
continue
if tool_state.inside_tool_call:
tool_state.buffer += result
if not tool_state.has_tool_name_defined:
tool_name = re.search('\\"name\\": \\"(.*?)\\"', tool_state.buffer)
if tool_name is None:
continue
else:
tool_name = tool_name.group(1)
tool_state.has_tool_name_defined = True
tool = ChoiceDeltaToolCall(function=ChoiceDeltaToolCallFunction(name=tool_name), index=0, type='function', id=_request_id + '_tool_call')
else:
if result == '':
continue
if '"arguments": {' not in tool_state.buffer:
continue
tool_state.arg_nesting_level += result.count('{')
tool_state.arg_nesting_level -= result.count('}')
if tool_state.arg_nesting_level < 0:
result = ''.join(result.split('}')[:-2]) + '}'
tool = ChoiceDeltaToolCall(function=ChoiceDeltaToolCallFunction(arguments=result), index=0, type='function')
yield self.build_chat_completion_chunk(request_id=_request_id, role=None, tool_calls=[tool], model=model_id_and_revision)
continue
if result != '':
yield self.build_chat_completion_chunk(_request_id, content=result, model=model_id_and_revision)
yield self.build_chat_completion_chunk(_request_id, finish_reason='stop', model=model_id_and_revision)
thread.join()
except Exception as e:
logger.error(str(e))
yield f'data: {{"error": "{str(e)}"}}'
finally:
thread.join()
return stream_chat_completion(generation_streamer, request_id)
def generate_response(self, req: dict) -> Generator[str, None, None]:
"""
Generates an OpenAI Response using `generate`.
Args:
req (`dict`): The request to generate an OpenAI Response for.
Returns:
`Generator[str, None, None]`: A generator that yields the OpenAI Response events.
"""
model_id_and_revision = self.process_model_name(req['model'])
must_discard_cache = model_id_and_revision != self.last_model
self.last_model = model_id_and_revision
model, processor = self.load_model_and_processor(model_id_and_revision)
if isinstance(req['input'], str):
inputs = [{'role': 'system', 'content': req['instructions']}] if 'instructions' in req else []
inputs.append({'role': 'user', 'content': req['input']})
elif isinstance(req['input'], list):
if 'instructions' in req:
if req['input'][0]['role'] != 'system':
inputs = [{'role': 'system', 'content': req['instructions']}, *req['input']]
else:
inputs = req['input']
inputs[0]['content'] = req['instructions']
else:
inputs = req['input']
elif isinstance(req['input'], dict):
inputs = [{'role': 'system', 'content': req['instructions']}] if 'instructions' in req else []
inputs.append(req['input'])
else:
raise ValueError('inputs should be a list, dict, or str')
inputs = processor.apply_chat_template(inputs, add_generation_prompt=True, return_tensors='pt')
inputs = inputs.to(model.device)
request_id = req.get('previous_response_id', 'req_0')
skip_special_tokens = True
if 'gptoss' in model.config.architectures[0].lower():
skip_special_tokens = False
generation_streamer = TextIteratorStreamer(processor, skip_special_tokens=skip_special_tokens, skip_prompt=True)
generation_config = create_generation_config_from_req(req, model_generation_config=model.generation_config)
last_kv_cache = None
if self.is_continuation(req) and (not must_discard_cache):
last_kv_cache = self.last_kv_cache
generation_kwargs = {'inputs': inputs, 'attention_mask': torch.ones_like(inputs), 'streamer': generation_streamer, 'generation_config': generation_config, 'return_dict_in_generate': True, 'past_key_values': last_kv_cache}
def stream_response(streamer, _request_id):
filter_cot = False
cot_trace_end = None
if 'gptoss' in model.config.architectures[0].lower():
filter_cot = True
cot_trace_end = '<|channel|>final<|message|>'
def generate_with_cache(**kwargs):
generate_output = model.generate(**kwargs)
self.last_kv_cache = generate_output.past_key_values
thread = Thread(target=generate_with_cache, kwargs=generation_kwargs)
sequence_number = 0
output_index = 0
content_index = 0
try:
thread.start()
created_at = time.time()
response_created = ResponseCreatedEvent(type='response.created', sequence_number=sequence_number, response=Response(id=f'resp_{request_id}', created_at=created_at, status='queued', model=model_id_and_revision, instructions=req.get('instructions'), text={'format': {'type': 'text'}}, object='response', tools=[], output=[], parallel_tool_calls=req.get('parallel_tool_calls', False), tool_choice='auto', metadata=req.get('metadata')))
sequence_number += 1
yield self.build_response_event(response_created)
response_in_progress = ResponseInProgressEvent(type='response.in_progress', sequence_number=sequence_number, response=Response(id=f'resp_{request_id}', created_at=created_at, status='in_progress', model=model_id_and_revision, instructions=req.get('instructions'), text={'format': {'type': 'text'}}, object='response', tools=[], output=[], parallel_tool_calls=req.get('parallel_tool_calls', False), tool_choice='auto', metadata=req.get('metadata')))
sequence_number += 1
yield self.build_response_event(response_in_progress)
response_output_item_added = ResponseOutputItemAddedEvent(type='response.output_item.added', sequence_number=sequence_number, output_index=output_index, item=ResponseOutputMessage(id=f'msg_{request_id}', type='message', status='in_progress', role='assistant', content=[]))
sequence_number += 1
yield self.build_response_event(response_output_item_added)
response_content_part_added = ResponseContentPartAddedEvent(type='response.content_part.added', item_id=f'msg_{request_id}', sequence_number=sequence_number, output_index=output_index, content_index=content_index, part=ResponseOutputText(type='output_text', text='', annotations=[]))
sequence_number += 1
yield self.build_response_event(response_content_part_added)
results = ''
for result in streamer:
if 'gptoss' in model.config.architectures[0].lower():
if result.endswith('<|return|>'):
result = result[:-len('<|return|>')]
results += result
if filter_cot:
if cot_trace_end in results:
filter_cot = False
results = ''
continue
else:
continue
response_output_text_delta = ResponseTextDeltaEvent(type='response.output_text.delta', item_id=f'msg_{request_id}', sequence_number=sequence_number, output_index=output_index, content_index=content_index, delta=result, logprobs=[{'token': '', 'logprob': 99.9}])
sequence_number += 1
yield self.build_response_event(response_output_text_delta)
response_output_text_done = ResponseTextDoneEvent(type='response.output_text.done', item_id=f'msg_{request_id}', sequence_number=sequence_number, output_index=output_index, content_index=0, text=results, logprobs=[{'token': '', 'logprob': 99.9}])
sequence_number += 1
yield self.build_response_event(response_output_text_done)
response_content_part_done = ResponseContentPartDoneEvent(type='response.content_part.done', item_id=f'msg_{request_id}', sequence_number=sequence_number, output_index=output_index, content_index=content_index, part=ResponseOutputText(type='output_text', text=response_output_text_done.text, annotations=[]))
sequence_number += 1
content_index += 1
yield self.build_response_event(response_content_part_done)
response_output_item_done = ResponseOutputItemDoneEvent(type='response.output_item.done', sequence_number=sequence_number, output_index=output_index, item=ResponseOutputMessage(id=f'msg_{request_id}', type='message', status='completed', role='assistant', content=[response_content_part_done.part], annotations=[]))
sequence_number += 1
output_index += 1
yield self.build_response_event(response_output_item_done)
response_completed = ResponseCompletedEvent(type='response.completed', sequence_number=sequence_number, response=Response(id=f'resp_{request_id}', created_at=created_at, status='completed', model=model_id_and_revision, instructions=req.get('instructions'), text={'format': {'type': 'text'}}, output=[response_output_item_done.item], object='response', tools=[], parallel_tool_calls=req.get('parallel_tool_calls', False), tool_choice='auto', metadata=req.get('metadata')))
sequence_number += 1
yield self.build_response_event(response_completed)
thread.join()
except Exception as e:
logger.error(f'Exception in response generation: {str(e)}')
error_event = ResponseErrorEvent(type='error', sequence_number=sequence_number, message=str(e))
sequence_number += 1
yield self.build_response_event(error_event)
response_failed = ResponseFailedEvent(type='response.failed', sequence_number=sequence_number, response=Response(id=f'resp_{request_id}', created_at=created_at, status='failed', model=model_id_and_revision, instructions=req.get('instructions'), text={'format': {'type': 'text'}}, output=[], object='response', tools=[], parallel_tool_calls=False, tool_choice='auto', metadata=req.get('metadata'), error=ResponseError(code='server_error', message=str(e))))
sequence_number += 1
yield self.build_response_event(response_failed)
finally:
thread.join()
return stream_response(generation_streamer, request_id)
def generate_transcription(self, req: dict) -> Generator[str, None, None]:
"""
Generates an OpenAI Transcription using the audio file.
Args:
req (`dict`): The request containing the audio file and model information.
Returns:
`Generator[str, None, None]`: A generator that yields the transcription result.
"""
if not is_librosa_available():
raise ImportError('Missing librosa dependency for audio transcription. Please install with `pip install librosa`')
model_id_and_revision = self.process_model_name(req['model'])
audio_model, audio_processor = self.load_audio_model_and_processor(model_id_and_revision)
generation_streamer = TextIteratorStreamer(audio_processor.tokenizer, skip_special_tokens=True, skip_prompt=True)
generation_config = create_generation_config_from_req(req, model_generation_config=audio_model.generation_config)
model_sampling_rate = audio_processor.feature_extractor.sampling_rate
audio_bytes = io.BytesIO(req['file'])
audio_array, _ = librosa.load(audio_bytes, sr=model_sampling_rate, mono=True)
audio_inputs = audio_processor(audio_array, sampling_rate=model_sampling_rate, return_tensors='pt').to(audio_model.device)
audio_inputs['input_features'] = audio_inputs['input_features'].to(audio_model.dtype)
generation_kwargs = {'streamer': generation_streamer, 'generation_config': generation_config, 'return_dict_in_generate': True}
def _generate_transcription():
generated_ids = audio_model.generate(**audio_inputs, **generation_kwargs)
transcription_text = audio_processor.batch_decode(generated_ids.sequences, skip_special_tokens=True)[0]
transcription = Transcription(text=transcription_text)
yield f'{transcription.model_dump_json(exclude_none=True)}'
return _generate_transcription()
def is_continuation(self, req: dict) -> bool:
"""
Determines whether the current request is a continuation of the last request. In other words, if it is the
same chat session.
Args:
req (`dict`): The request to check.
Returns:
`True` if the request is a continuation of the last request, `False` otherwise.
"""
messages = req.get('messages') or req.get('input')
req_continues_last_messages = True
if self.last_messages is None:
req_continues_last_messages = False
elif len(self.last_messages) >= len(messages):
req_continues_last_messages = False
else:
for i in range(len(self.last_messages)):
if self.last_messages[i] != messages[i]:
req_continues_last_messages = False
break
self.last_messages = messages
return req_continues_last_messages
@staticmethod
def get_quantization_config(args: ServeArguments) -> Optional['BitsAndBytesConfig']:
"""
Returns the quantization config for the given CLI arguments.
Args:
args (`ServeArguments`): The serve arguments. May contain quantization settings, device, etc.
Returns:
`Optional[BitsAndBytesConfig]`: The quantization config.
"""
if args.load_in_4bit:
quantization_config = BitsAndBytesConfig(load_in_4bit=True, bnb_4bit_compute_dtype=args.dtype, bnb_4bit_quant_type=args.bnb_4bit_quant_type, bnb_4bit_use_double_quant=args.use_bnb_nested_quant, bnb_4bit_quant_storage=args.dtype)
elif args.load_in_8bit:
quantization_config = BitsAndBytesConfig(load_in_8bit=True)
else:
quantization_config = None
return quantization_config
def process_model_name(self, model_id: str) -> str:
"""
Applies the `force_model` CLI argument and canonicalizes the model name to the format "model_id@revision".
If the model_id DOESN'T contain an @, it defaults to "model_id@main".
Args:
model_id (`str`): The model ID.
Returns:
`str`: The canonicalized model name to be used
"""
if self.args.force_model is not None:
model_id = self.args.force_model
if '@' in model_id:
return model_id
return f'{model_id}@main'
def _load_model_and_data_processor(self, model_id_and_revision: str):
"""
Generic method to load a model and a data processor from a model ID and revision, making use of the serve CLI
arguments.
Args:
model_id_and_revision (`str`):
The model ID and revision to load.
model_cls (`type[PreTrainedModel]`):
The model class to load.
Returns:
`tuple[PreTrainedModel, Union[ProcessorMixin, PreTrainedTokenizerFast]]`: The loaded model and
data processor (tokenizer, audio processor, etc.).
"""
args = self.args
logger.info(f'Loading {model_id_and_revision}')
if '@' in model_id_and_revision:
model_id, revision = model_id_and_revision.split('@', 1)
else:
model_id, revision = (model_id_and_revision, 'main')
data_processor = AutoProcessor.from_pretrained(model_id, revision=revision, trust_remote_code=args.trust_remote_code)
dtype = args.dtype if args.dtype in ['auto', None] else getattr(torch, args.dtype)
quantization_config = self.get_quantization_config(args)
model_kwargs = {'revision': revision, 'attn_implementation': args.attn_implementation, 'dtype': dtype, 'device_map': 'auto', 'trust_remote_code': args.trust_remote_code}
if quantization_config is not None:
model_kwargs['quantization_config'] = quantization_config
config = AutoConfig.from_pretrained(model_id, **model_kwargs)
architecture = getattr(transformers, config.architectures[0])
model = architecture.from_pretrained(model_id, **model_kwargs)
if getattr(model, 'hf_device_map', None) is None:
model = model.to(args.device)
has_default_max_length = model.generation_config.max_new_tokens is None and model.generation_config.max_length == 20
has_short_max_new_tokens = model.generation_config.max_new_tokens is not None and model.generation_config.max_new_tokens < 1024
if has_default_max_length or has_short_max_new_tokens:
model.generation_config.max_new_tokens = 1024
logger.info(f'Loaded model {model_id_and_revision}')
return (model, data_processor)
def load_model_and_processor(self, model_id_and_revision: str) -> tuple['PreTrainedModel', PreTrainedTokenizerFast]:
"""
Loads the text model and processor from the given model ID and revision into the ServeCommand instance.
Args:
model_id_and_revision (`str`):
The model ID and revision to load.
Returns:
`tuple[PreTrainedModel, PreTrainedTokenizerFast]`: The loaded text model and processor.
"""
if model_id_and_revision not in self.loaded_models or self.loaded_models[model_id_and_revision].is_deleted():
model, processor = self._load_model_and_data_processor(model_id_and_revision)
self.loaded_models[model_id_and_revision] = TimedModel(model, timeout_seconds=self.args.model_timeout, processor=processor)
else:
self.loaded_models[model_id_and_revision].reset_timer()
model = self.loaded_models[model_id_and_revision].model
processor = self.loaded_models[model_id_and_revision].processor
return (model, processor)
def load_audio_model_and_processor(self, model_id_and_revision: str) -> tuple['PreTrainedModel', ProcessorMixin]:
"""
Loads the audio model and processor from the given model ID and revision into the ServeCommand instance.
Args:
model_id_and_revision (`str`):
The model ID and revision to load.
Returns:
`tuple[PreTrainedModel, ProcessorMixin]`: The loaded audio model and processor.
"""
if model_id_and_revision not in self.loaded_models or self.loaded_models[model_id_and_revision].is_deleted():
audio_model, audio_processor = self._load_model_and_data_processor(model_id_and_revision)
self.loaded_models[model_id_and_revision] = TimedModel(audio_model, timeout_seconds=self.args.model_timeout, processor=audio_processor)
else:
self.loaded_models[model_id_and_revision].reset_timer()
audio_model = self.loaded_models[model_id_and_revision].model
audio_processor = self.loaded_models[model_id_and_revision].processor
return (audio_model, audio_processor)
|
class ServeCommand(BaseTransformersCLICommand):
@staticmethod
def register_subcommand(parser: ArgumentParser):
'''
Register this command to argparse so it's available for the transformer-cli
Args:
parser: Root parser to register command-specific arguments
'''
pass
def __init__(self, args: ServeArguments):
pass
def _validate_request(self, request: dict, schema: '_TypedDictMeta', validator: 'TypeAdapter', unused_fields: set):
'''
Validates the request against the schema, and checks for unexpected keys.
Args:
request (`dict`):
The request to validate.
schema (`_TypedDictMeta`):
The schema of the request to validate. It is a `TypedDict` definition.
validator (`TypeAdapter`):
The validator to use to validate the request. Built from `schema`.
unused_fields (`set`):
Fields accepted by `schema`, but not used in `transformers serve`.
Raises:
HTTPException: If the request is invalid or contains unexpected or unused fields.
'''
pass
def validate_response_request(self, request: dict):
pass
def validate_chat_completion_request(self, request: dict):
pass
def validate_transcription_request(self, request: dict):
pass
def build_chat_completion_chunk(self, request_id: Optional[str]='', content: Optional[int]=None, model: Optional[str]=None, role: Optional[str]=None, finish_reason: Optional[str]=None, tool_calls: Optional[list['ChoiceDeltaToolCall']]=None, decode_stream: Optional[DecodeStream]=None, tokenizer: Optional[PreTrainedTokenizerFast]=None) -> str:
'''
Builds a chunk of a streaming OpenAI Chat Completion response.
IMPORTANT: The serialized chunk won't contain empty fields (fields with `None`). Some downstream apps,
like Cursor, assume that when the field exists, it has data.
Args:
request_id (`str`):
The request ID.
content (`str`, *optional*):
Content of the response from the model.
model (`str`, *optional*):
The model that generated the content.
role (`str`, *optional*):
The role of the next content, until a new role is defined.
finish_reason (`str`, *optional*):
The reason the generation by the model has finished.
tool_calls (`list[ChoiceDeltaToolCall]`, *optional*):
Data about the tool calls, when they are triggered.
Returns:
`str`: The built chunk, a string containing a JSON string with the payload.
'''
pass
def build_response_event(self, response: 'BaseModel') -> str:
'''
Builds a event of a streaming OpenAI Response response.
IMPORTANT: The serialized chunk won't contain empty fields (fields with `None`). Some downstream apps,
like Cursor, assume that when the field exists, it has data.
Args:
response (`BaseModel`):
The response to build an event from. One of the multiple OpenAI Response output types
Returns:
`str`: The built chunk, a string containing a JSON string with the payload.
'''
pass
def run(self):
'''
Setup and run the FastAPI server for transformers serve.
Models will be loaded and unloaded automatically based on usage and a timeout.
The server will expose the following endpoints:
- POST /v1/chat/completions: Generates chat completions.
- POST /v1/responses: Generates responses.
- POST /v1/audio/transcriptions: Generates transcriptions from audio.
- GET /v1/models: Lists available models for 3rd party tools.
Requires FastAPI and Uvicorn to be installed.
'''
pass
@asynccontextmanager
async def lifespan(app: FastAPI):
pass
@app.post('/v1/chat/completions')
def chat_completion(request: Request, body: dict):
pass
@app.post('/v1/responses')
def responses(request: dict):
pass
@app.post('/v1/audio/transcriptions')
async def audio_transcriptions(request: Request):
pass
@app.options('/v1/models')
@app.get('/v1/models')
def get_all_models():
pass
@app.get('/health')
def healthcheck():
pass
@app.middleware('http')
async def get_or_set_request_id(request: Request, call_next):
pass
@functools.cache
def get_gen_models(self) -> list[dict[str, any]]:
'''
This is by no means a limit to which models may be instantiated with `transformers serve`: any chat-based
model working with generate can work.
This is a limited list of models to ensure we have a discoverable /v1/models endpoint for third-party
integrations.
'''
pass
def continuous_batching_chat_completion(self, req: dict, request_id: str) -> AsyncGenerator[str, None]:
'''
Generates an OpenAI Chat Completion using continuous batching.
Args:
req (`dict`): The request to generate an OpenAI Chat Completion for.
Returns:
`Generator[str, None, None]`: A generator that yields the OpenAI Chat Completion chunks.
'''
pass
def stream_chat_completion(request_id, decode_stream):
pass
async def cancellation_wrapper(_inputs, request_id):
pass
@staticmethod
def get_model_modality(model: 'PreTrainedModel') -> Modality:
pass
@staticmethod
def get_processor_inputs_from_inbound_messages(messages, modality: Modality):
pass
def generate_chat_completion(self, req: dict) -> Generator[str, None, None]:
'''
Generates an OpenAI Chat Completion using `generate`.
Args:
req (`dict`): The request to generate an OpenAI Chat Completion for.
Returns:
`Generator[str, None, None]`: A generator that yields the OpenAI Chat Completion chunks.
'''
pass
def stream_chat_completion(request_id, decode_stream):
pass
def generate_with_cache(**kwargs):
pass
def generate_response(self, req: dict) -> Generator[str, None, None]:
'''
Generates an OpenAI Response using `generate`.
Args:
req (`dict`): The request to generate an OpenAI Response for.
Returns:
`Generator[str, None, None]`: A generator that yields the OpenAI Response events.
'''
pass
def stream_response(streamer, _request_id):
pass
def generate_with_cache(**kwargs):
pass
def generate_transcription(self, req: dict) -> Generator[str, None, None]:
'''
Generates an OpenAI Transcription using the audio file.
Args:
req (`dict`): The request containing the audio file and model information.
Returns:
`Generator[str, None, None]`: A generator that yields the transcription result.
'''
pass
def _generate_transcription():
pass
def is_continuation(self, req: dict) -> bool:
'''
Determines whether the current request is a continuation of the last request. In other words, if it is the
same chat session.
Args:
req (`dict`): The request to check.
Returns:
`True` if the request is a continuation of the last request, `False` otherwise.
'''
pass
@staticmethod
def get_quantization_config(args: ServeArguments) -> Optional['BitsAndBytesConfig']:
'''
Returns the quantization config for the given CLI arguments.
Args:
args (`ServeArguments`): The serve arguments. May contain quantization settings, device, etc.
Returns:
`Optional[BitsAndBytesConfig]`: The quantization config.
'''
pass
def process_model_name(self, model_id: str) -> str:
'''
Applies the `force_model` CLI argument and canonicalizes the model name to the format "model_id@revision".
If the model_id DOESN'T contain an @, it defaults to "model_id@main".
Args:
model_id (`str`): The model ID.
Returns:
`str`: The canonicalized model name to be used
'''
pass
def _load_model_and_data_processor(self, model_id_and_revision: str):
'''
Generic method to load a model and a data processor from a model ID and revision, making use of the serve CLI
arguments.
Args:
model_id_and_revision (`str`):
The model ID and revision to load.
model_cls (`type[PreTrainedModel]`):
The model class to load.
Returns:
`tuple[PreTrainedModel, Union[ProcessorMixin, PreTrainedTokenizerFast]]`: The loaded model and
data processor (tokenizer, audio processor, etc.).
'''
pass
def load_model_and_processor(self, model_id_and_revision: str) -> tuple['PreTrainedModel', PreTrainedTokenizerFast]:
'''
Loads the text model and processor from the given model ID and revision into the ServeCommand instance.
Args:
model_id_and_revision (`str`):
The model ID and revision to load.
Returns:
`tuple[PreTrainedModel, PreTrainedTokenizerFast]`: The loaded text model and processor.
'''
pass
def load_audio_model_and_processor(self, model_id_and_revision: str) -> tuple['PreTrainedModel', ProcessorMixin]:
'''
Loads the audio model and processor from the given model ID and revision into the ServeCommand instance.
Args:
model_id_and_revision (`str`):
The model ID and revision to load.
Returns:
`tuple[PreTrainedModel, ProcessorMixin]`: The loaded audio model and processor.
'''
pass
| 50
| 16
| 18
| 1
| 15
| 3
| 2
| 0.19
| 1
| 11
| 5
| 0
| 6
| 5
| 7
| 29
| 137
| 13
| 104
| 27
| 90
| 20
| 48
| 18
| 40
| 3
| 5
| 2
| 13
|
207
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/configuration_utils.py
|
transformers.configuration_utils.PretrainedConfig
|
import os
from .utils.generic import is_timm_config_dict
from .dynamic_module_utils import custom_object_save
import copy
import warnings
from .modeling_gguf_pytorch_utils import load_gguf_checkpoint
import json
from typing import TYPE_CHECKING, Any, Optional, TypeVar, Union
from . import __version__
from .utils import CONFIG_NAME, PushToHubMixin, cached_file, copy_func, download_url, extract_commit_hash, is_remote_url, is_torch_available, logging
class PretrainedConfig(PushToHubMixin):
"""
Base class for all configuration classes. Handles a few parameters common to all models' configurations as well as
methods for loading/downloading/saving configurations.
<Tip>
A configuration file can be loaded and saved to disk. Loading the configuration file and using this file to
initialize a model does **not** load the model weights. It only affects the model's configuration.
</Tip>
Class attributes (overridden by derived classes):
- **model_type** (`str`) -- An identifier for the model type, serialized into the JSON file, and used to recreate
the correct object in [`~transformers.AutoConfig`].
- **has_no_defaults_at_init** (`bool`) -- Whether the config class can be initialized without providing input arguments.
Some configurations requires inputs to be defined at init and have no default values, usually these are composite configs,
(but not necessarily) such as [`~transformers.EncoderDecoderConfig`] or [`~RagConfig`]. They have to be initialized from
two or more configs of type [`~transformers.PretrainedConfig`].
- **keys_to_ignore_at_inference** (`list[str]`) -- A list of keys to ignore by default when looking at dictionary
outputs of the model during inference.
- **attribute_map** (`dict[str, str]`) -- A dict that maps model specific attribute names to the standardized
naming of attributes.
- **base_model_tp_plan** (`dict[str, Any]`) -- A dict that maps sub-modules FQNs of a base model to a tensor
parallel plan applied to the sub-module when `model.tensor_parallel` is called.
- **base_model_pp_plan** (`dict[str, tuple[list[str]]]`) -- A dict that maps child-modules of a base model to a
pipeline parallel plan that enables users to place the child-module on the appropriate device.
Common attributes (present in all subclasses):
- **vocab_size** (`int`) -- The number of tokens in the vocabulary, which is also the first dimension of the
embeddings matrix (this attribute may be missing for models that don't have a text modality like ViT).
- **hidden_size** (`int`) -- The hidden size of the model.
- **num_attention_heads** (`int`) -- The number of attention heads used in the multi-head attention layers of the
model.
- **num_hidden_layers** (`int`) -- The number of blocks in the model.
<Tip warning={true}>
Setting parameters for sequence generation in the model config is deprecated. For backward compatibility, loading
some of them will still be possible, but attempting to overwrite them will throw an exception -- you should set
them in a [~transformers.GenerationConfig]. Check the documentation of [~transformers.GenerationConfig] for more
information about the individual parameters.
</Tip>
Arg:
name_or_path (`str`, *optional*, defaults to `""`):
Store the string that was passed to [`PreTrainedModel.from_pretrained`] as `pretrained_model_name_or_path`
if the configuration was created with such a method.
output_hidden_states (`bool`, *optional*, defaults to `False`):
Whether or not the model should return all hidden-states.
output_attentions (`bool`, *optional*, defaults to `False`):
Whether or not the model should returns all attentions.
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not the model should return a [`~transformers.utils.ModelOutput`] instead of a plain tuple.
is_encoder_decoder (`bool`, *optional*, defaults to `False`):
Whether the model is used as an encoder/decoder or not.
is_decoder (`bool`, *optional*, defaults to `False`):
Whether to only use the decoder in an encoder-decoder architecture, otherwise it has no effect on
decoder-only or encoder-only architectures.
cross_attention_hidden_size (`bool`, *optional*):
The hidden size of the cross-attention layer in case the model is used as a decoder in an encoder-decoder
setting and the cross-attention hidden dimension differs from `self.config.hidden_size`.
add_cross_attention (`bool`, *optional*, defaults to `False`):
Whether cross-attention layers should be added to the model. Note, this option is only relevant for models
that can be used as decoder models within the [`EncoderDecoderModel`] class, which consists of all models
in `AUTO_MODELS_FOR_CAUSAL_LM`.
tie_encoder_decoder (`bool`, *optional*, defaults to `False`):
Whether all encoder weights should be tied to their equivalent decoder weights. This requires the encoder
and decoder model to have the exact same parameter names.
prune_heads (`dict[int, list[int]]`, *optional*, defaults to `{}`):
Pruned heads of the model. The keys are the selected layer indices and the associated values, the list of
heads to prune in said layer.
For instance `{1: [0, 2], 2: [2, 3]}` will prune heads 0 and 2 on layer 1 and heads 2 and 3 on layer 2.
chunk_size_feed_forward (`int`, *optional*, defaults to `0`):
The chunk size of all feed forward layers in the residual attention blocks. A chunk size of `0` means that
the feed forward layer is not chunked. A chunk size of n means that the feed forward layer processes `n` <
sequence_length embeddings at a time. For more information on feed forward chunking, see [How does Feed
Forward Chunking work?](../glossary.html#feed-forward-chunking).
> Parameters for fine-tuning tasks
architectures (`list[str]`, *optional*):
Model architectures that can be used with the model pretrained weights.
finetuning_task (`str`, *optional*):
Name of the task used to fine-tune the model.
id2label (`dict[int, str]`, *optional*):
A map from index (for instance prediction index, or target index) to label.
label2id (`dict[str, int]`, *optional*):
A map from label to index for the model.
num_labels (`int`, *optional*):
Number of labels to use in the last layer added to the model, typically for a classification task.
task_specific_params (`dict[str, Any]`, *optional*):
Additional keyword arguments to store for the current task.
problem_type (`str`, *optional*):
Problem type for `XxxForSequenceClassification` models. Can be one of `"regression"`,
`"single_label_classification"` or `"multi_label_classification"`.
> Parameters linked to the tokenizer
tokenizer_class (`str`, *optional*):
The name of the associated tokenizer class to use (if none is set, will use the tokenizer associated to the
model by default).
prefix (`str`, *optional*):
A specific prompt that should be added at the beginning of each text before calling the model.
bos_token_id (`int`, *optional*):
The id of the _beginning-of-stream_ token.
pad_token_id (`int`, *optional*):
The id of the _padding_ token.
eos_token_id (`int`, *optional*):
The id of the _end-of-stream_ token.
decoder_start_token_id (`int`, *optional*):
If an encoder-decoder model starts decoding with a different token than _bos_, the id of that token.
sep_token_id (`int`, *optional*):
The id of the _separation_ token.
> PyTorch specific parameters
torchscript (`bool`, *optional*, defaults to `False`):
Whether or not the model should be used with Torchscript.
tie_word_embeddings (`bool`, *optional*, defaults to `True`):
Whether the model's input and output word embeddings should be tied. Note that this is only relevant if the
model has a output word embedding layer.
dtype (`str`, *optional*):
The `dtype` of the weights. This attribute can be used to initialize the model to a non-default `dtype`
(which is normally `float32`) and thus allow for optimal storage allocation. For example, if the saved
model is `float16`, ideally we want to load it back using the minimal amount of memory needed to load
`float16` weights.
"""
model_type: str = ''
base_config_key: str = ''
sub_configs: dict[str, type['PretrainedConfig']] = {}
has_no_defaults_at_init: bool = False
attribute_map: dict[str, str] = {}
base_model_tp_plan: Optional[dict[str, Any]] = None
base_model_pp_plan: Optional[dict[str, tuple[list[str]]]] = None
base_model_ep_plan: Optional[dict[str, tuple[list[str]]]] = None
_auto_class: Optional[str] = None
def __setattr__(self, key, value):
if key in super().__getattribute__('attribute_map'):
key = super().__getattribute__('attribute_map')[key]
super().__setattr__(key, value)
def __getattribute__(self, key):
if key != 'attribute_map' and key in super().__getattribute__('attribute_map'):
key = super().__getattribute__('attribute_map')[key]
return super().__getattribute__(key)
def __init__(self, *, output_hidden_states: bool=False, output_attentions: bool=False, return_dict: bool=True, torchscript: bool=False, dtype: Optional[Union[str, 'torch.dtype']]=None, pruned_heads: Optional[dict[int, list[int]]]=None, tie_word_embeddings: bool=True, chunk_size_feed_forward: int=0, is_encoder_decoder: bool=False, is_decoder: bool=False, cross_attention_hidden_size: Optional[int]=None, add_cross_attention: bool=False, tie_encoder_decoder: bool=False, architectures: Optional[list[str]]=None, finetuning_task: Optional[str]=None, id2label: Optional[dict[int, str]]=None, label2id: Optional[dict[str, int]]=None, num_labels: Optional[int]=None, task_specific_params: Optional[dict[str, Any]]=None, problem_type: Optional[str]=None, tokenizer_class: Optional[str]=None, prefix: Optional[str]=None, bos_token_id: Optional[int]=None, pad_token_id: Optional[int]=None, eos_token_id: Optional[int]=None, sep_token_id: Optional[int]=None, decoder_start_token_id: Optional[int]=None, **kwargs):
if label2id is not None and (not isinstance(label2id, dict)):
raise ValueError('Argument label2id should be a dictionary.')
if id2label is not None and (not isinstance(id2label, dict)):
raise ValueError('Argument id2label should be a dictionary.')
if num_labels is not None and id2label is not None and (len(id2label) != num_labels):
logger.warning(f'You passed `num_labels={num_labels}` which is incompatible to the `id2label` map of length `{len(id2label)}`.')
if problem_type is not None and problem_type not in ('regression', 'single_label_classification', 'multi_label_classification'):
raise ValueError(f"The config parameter `problem_type` was not understood: received {problem_type} but only 'regression', 'single_label_classification' and 'multi_label_classification' are valid.")
if (torch_dtype := kwargs.pop('torch_dtype', None)) is not None:
dtype = dtype if dtype is not None else torch_dtype
if dtype is not None and isinstance(dtype, str) and is_torch_available():
import torch
dtype = getattr(torch, dtype)
self.return_dict = return_dict
self.output_hidden_states = output_hidden_states
self.torchscript = torchscript
self.dtype = dtype
self._output_attentions = output_attentions
self.pruned_heads = pruned_heads if pruned_heads is not None else {}
self.tie_word_embeddings = tie_word_embeddings
self.chunk_size_feed_forward = chunk_size_feed_forward
self.is_encoder_decoder = is_encoder_decoder
self.is_decoder = is_decoder
self.cross_attention_hidden_size = cross_attention_hidden_size
self.add_cross_attention = add_cross_attention
self.tie_encoder_decoder = tie_encoder_decoder
self.architectures = architectures
self.finetuning_task = finetuning_task
self.id2label = id2label
self.label2id = label2id
self.task_specific_params = task_specific_params
self.problem_type = problem_type
if self.id2label is None:
self._create_id_label_maps(num_labels if num_labels is not None else 2)
else:
self.id2label = {int(key): value for key, value in self.id2label.items()}
self.tokenizer_class = tokenizer_class
self.prefix = prefix
self.bos_token_id = bos_token_id
self.pad_token_id = pad_token_id
self.eos_token_id = eos_token_id
self.sep_token_id = sep_token_id
self.decoder_start_token_id = decoder_start_token_id
for parameter_name, default_value in self._get_global_generation_defaults().items():
setattr(self, parameter_name, kwargs.pop(parameter_name, default_value))
self._name_or_path = str(kwargs.pop('name_or_path', ''))
self._commit_hash = kwargs.pop('_commit_hash', None)
self._attn_implementation = kwargs.pop('attn_implementation', None)
self.transformers_version = kwargs.pop('transformers_version', None)
if kwargs.get('gradient_checkpointing', False):
warnings.warn('Passing `gradient_checkpointing` to a config initialization is deprecated and will be removed in v5 Transformers. Using `model.gradient_checkpointing_enable()` instead, or if you are using the `Trainer` API, pass `gradient_checkpointing=True` in your `TrainingArguments`.')
for key, value in kwargs.items():
try:
setattr(self, key, value)
except AttributeError as err:
logger.error(f"Can't set {key} with value {value} for {self}")
raise err
def _create_id_label_maps(self, num_labels: int):
self.id2label = {i: f'LABEL_{i}' for i in range(num_labels)}
self.label2id = dict(zip(self.id2label.values(), self.id2label.keys()))
@property
def name_or_path(self) -> Optional[str]:
return getattr(self, '_name_or_path', None)
@name_or_path.setter
def name_or_path(self, value):
self._name_or_path = str(value)
@property
def output_attentions(self):
"""
`bool`: Whether or not the model should returns all attentions.
"""
return self._output_attentions
@output_attentions.setter
def output_attentions(self, value: bool):
if value and self._attn_implementation is None:
self._attn_implementation = 'eager'
if value and self._attn_implementation != 'eager':
raise ValueError(f"The `output_attentions` attribute is not supported when using the `attn_implementation` set to {self._attn_implementation}. Please set it to 'eager' instead.")
self._output_attentions = value
@property
def use_return_dict(self) -> bool:
"""
`bool`: Whether or not return [`~utils.ModelOutput`] instead of tuples.
"""
return self.return_dict and (not self.torchscript)
@property
def num_labels(self) -> int:
"""
`int`: The number of labels for classification models.
"""
return len(self.id2label)
@num_labels.setter
def num_labels(self, num_labels: int):
if self.id2label is None or self.num_labels != num_labels:
self._create_id_label_maps(num_labels)
@property
def _attn_implementation(self):
return self._attn_implementation_internal
@_attn_implementation.setter
def _attn_implementation(self, value: Optional[Union[str, dict]]):
"""We set it recursively on the sub-configs as well"""
current_attn = getattr(self, '_attn_implementation', None)
attn_implementation = value if not isinstance(value, dict) else value.get('', current_attn)
self._attn_implementation_internal = attn_implementation
for subconfig_key in self.sub_configs:
subconfig = getattr(self, subconfig_key, None)
if subconfig is not None:
current_subconfig_attn = getattr(subconfig, '_attn_implementation', None)
sub_implementation = value if not isinstance(value, dict) else value.get(subconfig_key, current_subconfig_attn)
subconfig._attn_implementation = sub_implementation
@property
def torch_dtype(self):
logger.warning_once('`torch_dtype` is deprecated! Use `dtype` instead!')
return self.dtype
@torch_dtype.setter
def torch_dtype(self, value):
logger.warning_once('`torch_dtype` is deprecated! Use `dtype` instead!')
self.dtype = value
def save_pretrained(self, save_directory: Union[str, os.PathLike], push_to_hub: bool=False, **kwargs):
"""
Save a configuration object to the directory `save_directory`, so that it can be re-loaded using the
[`~PretrainedConfig.from_pretrained`] class method.
Args:
save_directory (`str` or `os.PathLike`):
Directory where the configuration JSON file will be saved (will be created if it does not exist).
push_to_hub (`bool`, *optional*, defaults to `False`):
Whether or not to push your model to the Hugging Face model hub after saving it. You can specify the
repository you want to push to with `repo_id` (will default to the name of `save_directory` in your
namespace).
kwargs (`dict[str, Any]`, *optional*):
Additional key word arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method.
"""
self._set_token_in_kwargs(kwargs)
if os.path.isfile(save_directory):
raise AssertionError(f'Provided path ({save_directory}) should be a directory, not a file')
non_default_generation_parameters = self._get_non_default_generation_parameters()
if len(non_default_generation_parameters) > 0:
warnings.warn(f'Some non-default generation parameters are set in the model config. These should go into either a) `model.generation_config` (as opposed to `model.config`); OR b) a GenerationConfig file (https://huggingface.co/docs/transformers/generation_strategies#save-a-custom-decoding-strategy-with-your-model).This warning will become an exception in the future.\nNon-default generation parameters: {str(non_default_generation_parameters)}', UserWarning)
os.makedirs(save_directory, exist_ok=True)
if push_to_hub:
commit_message = kwargs.pop('commit_message', None)
repo_id = kwargs.pop('repo_id', save_directory.split(os.path.sep)[-1])
repo_id = self._create_repo(repo_id, **kwargs)
files_timestamps = self._get_files_timestamps(save_directory)
if 'transformers_weights' in self:
delattr(self, 'transformers_weights')
if self._auto_class is not None:
custom_object_save(self, save_directory, config=self)
output_config_file = os.path.join(save_directory, CONFIG_NAME)
self.to_json_file(output_config_file, use_diff=True)
logger.info(f'Configuration saved in {output_config_file}')
if push_to_hub:
self._upload_modified_files(save_directory, repo_id, files_timestamps, commit_message=commit_message, token=kwargs.get('token'))
@staticmethod
def _set_token_in_kwargs(kwargs, token=None):
"""Temporary method to deal with `token` and `use_auth_token`.
This method is to avoid apply the same changes in all model config classes that overwrite `from_pretrained`.
Need to clean up `use_auth_token` in a follow PR.
"""
if token is None:
token = kwargs.pop('token', None)
use_auth_token = kwargs.pop('use_auth_token', None)
if use_auth_token is not None:
warnings.warn('The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.', FutureWarning)
if token is not None:
raise ValueError('`token` and `use_auth_token` are both specified. Please set only the argument `token`.')
token = use_auth_token
if token is not None:
kwargs['token'] = token
@classmethod
def from_pretrained(cls: type[SpecificPretrainedConfigType], pretrained_model_name_or_path: Union[str, os.PathLike], cache_dir: Optional[Union[str, os.PathLike]]=None, force_download: bool=False, local_files_only: bool=False, token: Optional[Union[str, bool]]=None, revision: str='main', **kwargs) -> SpecificPretrainedConfigType:
"""
Instantiate a [`PretrainedConfig`] (or a derived class) from a pretrained model configuration.
Args:
pretrained_model_name_or_path (`str` or `os.PathLike`):
This can be either:
- a string, the *model id* of a pretrained model configuration hosted inside a model repo on
huggingface.co.
- a path to a *directory* containing a configuration file saved using the
[`~PretrainedConfig.save_pretrained`] method, e.g., `./my_model_directory/`.
- a path or url to a saved configuration JSON *file*, e.g., `./my_model_directory/configuration.json`.
cache_dir (`str` or `os.PathLike`, *optional*):
Path to a directory in which a downloaded pretrained model configuration should be cached if the
standard cache should not be used.
force_download (`bool`, *optional*, defaults to `False`):
Whether or not to force to (re-)download the configuration files and override the cached versions if
they exist.
resume_download:
Deprecated and ignored. All downloads are now resumed by default when possible.
Will be removed in v5 of Transformers.
proxies (`dict[str, str]`, *optional*):
A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request.
token (`str` or `bool`, *optional*):
The token to use as HTTP bearer authorization for remote files. If `True`, or not specified, will use
the token generated when running `hf auth login` (stored in `~/.huggingface`).
revision (`str`, *optional*, defaults to `"main"`):
The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
identifier allowed by git.
<Tip>
To test a pull request you made on the Hub, you can pass `revision="refs/pr/<pr_number>"`.
</Tip>
return_unused_kwargs (`bool`, *optional*, defaults to `False`):
If `False`, then this function returns just the final configuration object.
If `True`, then this functions returns a `Tuple(config, unused_kwargs)` where *unused_kwargs* is a
dictionary consisting of the key/value pairs whose keys are not configuration attributes: i.e., the
part of `kwargs` which has not been used to update `config` and is otherwise ignored.
subfolder (`str`, *optional*, defaults to `""`):
In case the relevant files are located inside a subfolder of the model repo on huggingface.co, you can
specify the folder name here.
kwargs (`dict[str, Any]`, *optional*):
The values in kwargs of any keys which are configuration attributes will be used to override the loaded
values. Behavior concerning key/value pairs whose keys are *not* configuration attributes is controlled
by the `return_unused_kwargs` keyword parameter.
Returns:
[`PretrainedConfig`]: The configuration object instantiated from this pretrained model.
Examples:
```python
# We can't instantiate directly the base class *PretrainedConfig* so let's show the examples on a
# derived class: BertConfig
config = BertConfig.from_pretrained(
"google-bert/bert-base-uncased"
) # Download configuration from huggingface.co and cache.
config = BertConfig.from_pretrained(
"./test/saved_model/"
) # E.g. config (or model) was saved using *save_pretrained('./test/saved_model/')*
config = BertConfig.from_pretrained("./test/saved_model/my_configuration.json")
config = BertConfig.from_pretrained("google-bert/bert-base-uncased", output_attentions=True, foo=False)
assert config.output_attentions == True
config, unused_kwargs = BertConfig.from_pretrained(
"google-bert/bert-base-uncased", output_attentions=True, foo=False, return_unused_kwargs=True
)
assert config.output_attentions == True
assert unused_kwargs == {"foo": False}
```"""
kwargs['cache_dir'] = cache_dir
kwargs['force_download'] = force_download
kwargs['local_files_only'] = local_files_only
kwargs['revision'] = revision
cls._set_token_in_kwargs(kwargs, token)
config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
if cls.base_config_key and cls.base_config_key in config_dict:
config_dict = config_dict[cls.base_config_key]
if 'model_type' in config_dict and hasattr(cls, 'model_type') and (config_dict['model_type'] != cls.model_type):
for v in config_dict.values():
if isinstance(v, dict) and v.get('model_type') == cls.model_type:
config_dict = v
if config_dict['model_type'] != cls.model_type:
logger.warning(f"You are using a model of type {config_dict['model_type']} to instantiate a model of type {cls.model_type}. This is not supported for all configurations of models and can yield errors.")
return cls.from_dict(config_dict, **kwargs)
@classmethod
def get_config_dict(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> tuple[dict[str, Any], dict[str, Any]]:
"""
From a `pretrained_model_name_or_path`, resolve to a dictionary of parameters, to be used for instantiating a
[`PretrainedConfig`] using `from_dict`.
Parameters:
pretrained_model_name_or_path (`str` or `os.PathLike`):
The identifier of the pre-trained checkpoint from which we want the dictionary of parameters.
Returns:
`tuple[Dict, Dict]`: The dictionary(ies) that will be used to instantiate the configuration object.
"""
cls._set_token_in_kwargs(kwargs)
original_kwargs = copy.deepcopy(kwargs)
config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs)
if config_dict is None:
return ({}, kwargs)
if '_commit_hash' in config_dict:
original_kwargs['_commit_hash'] = config_dict['_commit_hash']
if 'configuration_files' in config_dict:
configuration_file = get_configuration_file(config_dict['configuration_files'])
config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, _configuration_file=configuration_file, **original_kwargs)
return (config_dict, kwargs)
@classmethod
def _get_config_dict(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> tuple[dict[str, Any], dict[str, Any]]:
cache_dir = kwargs.pop('cache_dir', None)
force_download = kwargs.pop('force_download', False)
resume_download = kwargs.pop('resume_download', None)
proxies = kwargs.pop('proxies', None)
token = kwargs.pop('token', None)
local_files_only = kwargs.pop('local_files_only', False)
revision = kwargs.pop('revision', None)
trust_remote_code = kwargs.pop('trust_remote_code', None)
subfolder = kwargs.pop('subfolder', '')
from_pipeline = kwargs.pop('_from_pipeline', None)
from_auto_class = kwargs.pop('_from_auto', False)
commit_hash = kwargs.pop('_commit_hash', None)
gguf_file = kwargs.get('gguf_file')
if trust_remote_code is True:
logger.warning('The argument `trust_remote_code` is to be used with Auto classes. It has no effect here and is ignored.')
user_agent = {'file_type': 'config', 'from_auto_class': from_auto_class}
if from_pipeline is not None:
user_agent['using_pipeline'] = from_pipeline
pretrained_model_name_or_path = str(pretrained_model_name_or_path)
is_local = os.path.isdir(pretrained_model_name_or_path)
if os.path.isfile(os.path.join(subfolder, pretrained_model_name_or_path)):
resolved_config_file = pretrained_model_name_or_path
is_local = True
elif is_remote_url(pretrained_model_name_or_path):
configuration_file = pretrained_model_name_or_path if gguf_file is None else gguf_file
resolved_config_file = download_url(pretrained_model_name_or_path)
else:
configuration_file = kwargs.pop('_configuration_file', CONFIG_NAME) if gguf_file is None else gguf_file
try:
resolved_config_file = cached_file(pretrained_model_name_or_path, configuration_file, cache_dir=cache_dir, force_download=force_download, proxies=proxies, resume_download=resume_download, local_files_only=local_files_only, token=token, user_agent=user_agent, revision=revision, subfolder=subfolder, _commit_hash=commit_hash)
if resolved_config_file is None:
return (None, kwargs)
commit_hash = extract_commit_hash(resolved_config_file, commit_hash)
except OSError:
raise
except Exception:
raise OSError(f"Can't load the configuration of '{pretrained_model_name_or_path}'. If you were trying to load it from 'https://huggingface.co/models', make sure you don't have a local directory with the same name. Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory containing a {configuration_file} file")
try:
if gguf_file:
config_dict = load_gguf_checkpoint(resolved_config_file, return_tensors=False)['config']
else:
config_dict = cls._dict_from_json_file(resolved_config_file)
config_dict['_commit_hash'] = commit_hash
except (json.JSONDecodeError, UnicodeDecodeError):
raise OSError(f"It looks like the config file at '{resolved_config_file}' is not a valid JSON file.")
if is_local:
logger.info(f'loading configuration file {resolved_config_file}')
else:
logger.info(f'loading configuration file {configuration_file} from cache at {resolved_config_file}')
if 'model_type' not in config_dict and is_timm_config_dict(config_dict):
config_dict['model_type'] = 'timm_wrapper'
return (config_dict, kwargs)
@classmethod
def from_dict(cls: type[SpecificPretrainedConfigType], config_dict: dict[str, Any], **kwargs) -> SpecificPretrainedConfigType:
"""
Instantiates a [`PretrainedConfig`] from a Python dictionary of parameters.
Args:
config_dict (`dict[str, Any]`):
Dictionary that will be used to instantiate the configuration object. Such a dictionary can be
retrieved from a pretrained checkpoint by leveraging the [`~PretrainedConfig.get_config_dict`] method.
kwargs (`dict[str, Any]`):
Additional parameters from which to initialize the configuration object.
Returns:
[`PretrainedConfig`]: The configuration object instantiated from those parameters.
"""
return_unused_kwargs = kwargs.pop('return_unused_kwargs', False)
kwargs.pop('_from_auto', None)
kwargs.pop('_from_pipeline', None)
if '_commit_hash' in kwargs and '_commit_hash' in config_dict:
kwargs['_commit_hash'] = config_dict['_commit_hash']
if (torch_dtype := kwargs.pop('torch_dtype', None)) is not None:
logger.warning_once('`torch_dtype` is deprecated! Use `dtype` instead!')
kwargs['dtype'] = kwargs.get('dtype', torch_dtype)
config_dict['attn_implementation'] = kwargs.pop('attn_implementation', None)
config = cls(**config_dict)
if hasattr(config, 'pruned_heads'):
config.pruned_heads = {int(key): value for key, value in config.pruned_heads.items()}
if 'num_labels' in kwargs and 'id2label' in kwargs:
num_labels = kwargs['num_labels']
id2label = kwargs['id2label'] if kwargs['id2label'] is not None else []
if len(id2label) != num_labels:
raise ValueError(f"You passed along `num_labels={num_labels}` with an incompatible id to label map: {kwargs['id2label']}. Since those arguments are inconsistent with each other, you should remove one of them.")
to_remove = []
for key, value in kwargs.items():
if hasattr(config, key):
current_attr = getattr(config, key)
if isinstance(current_attr, PretrainedConfig) and isinstance(value, dict):
current_attr_updated = current_attr.to_dict()
current_attr_updated.update(value)
value = current_attr.__class__(**current_attr_updated)
setattr(config, key, value)
if key != 'dtype':
to_remove.append(key)
for key in to_remove:
kwargs.pop(key, None)
logger.info(f'Model config {config}')
if return_unused_kwargs:
return (config, kwargs)
else:
return config
@classmethod
def from_json_file(cls: type[SpecificPretrainedConfigType], json_file: Union[str, os.PathLike]) -> SpecificPretrainedConfigType:
"""
Instantiates a [`PretrainedConfig`] from the path to a JSON file of parameters.
Args:
json_file (`str` or `os.PathLike`):
Path to the JSON file containing the parameters.
Returns:
[`PretrainedConfig`]: The configuration object instantiated from that JSON file.
"""
config_dict = cls._dict_from_json_file(json_file)
return cls(**config_dict)
@classmethod
def _dict_from_json_file(cls, json_file: Union[str, os.PathLike]):
with open(json_file, encoding='utf-8') as reader:
text = reader.read()
return json.loads(text)
def __eq__(self, other):
return isinstance(other, PretrainedConfig) and self.__dict__ == other.__dict__
def __repr__(self):
return f'{self.__class__.__name__} {self.to_json_string()}'
def __iter__(self):
yield from self.__dict__
def to_diff_dict(self) -> dict[str, Any]:
"""
Removes all attributes from the configuration that correspond to the default config attributes for
better readability, while always retaining the `config` attribute from the class. Serializes to a
Python dictionary.
Returns:
dict[str, Any]: Dictionary of all the attributes that make up this configuration instance.
"""
config_dict = self.to_dict()
default_config_dict = PretrainedConfig().to_dict()
class_config_dict = self.__class__().to_dict() if not self.has_no_defaults_at_init else {}
serializable_config_dict = {}
for key, value in config_dict.items():
if isinstance(getattr(self, key, None), PretrainedConfig) and key in class_config_dict and isinstance(class_config_dict[key], dict) or key in self.sub_configs:
diff = recursive_diff_dict(value, default_config_dict, config_obj=getattr(self, key, None))
if 'model_type' in value:
diff['model_type'] = value['model_type']
serializable_config_dict[key] = diff
elif key not in default_config_dict or key == 'transformers_version' or key == 'vocab_file' or (value != default_config_dict[key]) or (key in default_config_dict and value != class_config_dict.get(key, value)):
serializable_config_dict[key] = value
self._remove_keys_not_serialized(serializable_config_dict)
if '_name_or_path' in serializable_config_dict:
del serializable_config_dict['_name_or_path']
if hasattr(self, 'quantization_config'):
serializable_config_dict['quantization_config'] = self.quantization_config.to_dict() if not isinstance(self.quantization_config, dict) else self.quantization_config
self.dict_dtype_to_str(serializable_config_dict)
return serializable_config_dict
def to_dict(self) -> dict[str, Any]:
"""
Serializes this instance to a Python dictionary.
Returns:
`dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance.
"""
output = copy.deepcopy(self.__dict__)
if hasattr(self.__class__, 'model_type'):
output['model_type'] = self.__class__.model_type
output['transformers_version'] = __version__
for key, value in output.items():
if isinstance(value, PretrainedConfig):
value = value.to_dict()
del value['transformers_version']
output[key] = value
self._remove_keys_not_serialized(output)
if hasattr(self, 'quantization_config'):
output['quantization_config'] = self.quantization_config.to_dict() if not isinstance(self.quantization_config, dict) else self.quantization_config
self.dict_dtype_to_str(output)
return output
def to_json_string(self, use_diff: bool=True) -> str:
"""
Serializes this instance to a JSON string.
Args:
use_diff (`bool`, *optional*, defaults to `True`):
If set to `True`, only the difference between the config instance and the default `PretrainedConfig()`
is serialized to JSON string.
Returns:
`str`: String containing all the attributes that make up this configuration instance in JSON format.
"""
if use_diff is True:
config_dict = self.to_diff_dict()
else:
config_dict = self.to_dict()
return json.dumps(config_dict, indent=2, sort_keys=True) + '\n'
def to_json_file(self, json_file_path: Union[str, os.PathLike], use_diff: bool=True):
"""
Save this instance to a JSON file.
Args:
json_file_path (`str` or `os.PathLike`):
Path to the JSON file in which this configuration instance's parameters will be saved.
use_diff (`bool`, *optional*, defaults to `True`):
If set to `True`, only the difference between the config instance and the default `PretrainedConfig()`
is serialized to JSON file.
"""
with open(json_file_path, 'w', encoding='utf-8') as writer:
writer.write(self.to_json_string(use_diff=use_diff))
def update(self, config_dict: dict[str, Any]):
"""
Updates attributes of this class with attributes from `config_dict`.
Args:
config_dict (`dict[str, Any]`): Dictionary of attributes that should be updated for this class.
"""
for key, value in config_dict.items():
setattr(self, key, value)
def update_from_string(self, update_str: str):
"""
Updates attributes of this class with attributes from `update_str`.
The expected format is ints, floats and strings as is, and for booleans use `true` or `false`. For example:
"n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"
The keys to change have to already exist in the config object.
Args:
update_str (`str`): String with attributes that should be updated for this class.
"""
d = dict((x.split('=') for x in update_str.split(',')))
for k, v in d.items():
if not hasattr(self, k):
raise ValueError(f"key {k} isn't in the original config dict")
old_v = getattr(self, k)
if isinstance(old_v, bool):
if v.lower() in ['true', '1', 'y', 'yes']:
v = True
elif v.lower() in ['false', '0', 'n', 'no']:
v = False
else:
raise ValueError(f"can't derive true or false from {v} (key {k})")
elif isinstance(old_v, int):
v = int(v)
elif isinstance(old_v, float):
v = float(v)
elif not isinstance(old_v, str):
raise TypeError(f'You can only update int, float, bool or string values in the config, got {v} for key {k}')
setattr(self, k, v)
def dict_dtype_to_str(self, d: dict[str, Any]) -> None:
"""
Checks whether the passed dictionary and its nested dicts have a *dtype* key and if it's not None,
converts torch.dtype to a string of just the type. For example, `torch.float32` get converted into *"float32"*
string, which can then be stored in the json format.
"""
if d.get('dtype') is not None:
if isinstance(d['dtype'], dict):
d['dtype'] = {k: str(v).split('.')[-1] for k, v in d['dtype'].items()}
elif not isinstance(d['dtype'], (str, int)):
d['dtype'] = str(d['dtype']).split('.')[1]
for value in d.values():
if isinstance(value, dict):
self.dict_dtype_to_str(value)
def _remove_keys_not_serialized(self, d: dict[str, Any]) -> None:
"""
Checks and removes if there are any keys in the dict that should not be serialized when saving the config.
Runs recursive check on the dict, to remove from all sub configs.
"""
if hasattr(self, 'quantization_config'):
_ = d.pop('_pre_quantization_dtype', None)
if '_auto_class' in d:
del d['_auto_class']
if '_output_attentions' in d:
d['output_attentions'] = d.pop('_output_attentions')
if '_commit_hash' in d:
del d['_commit_hash']
if '_attn_implementation_internal' in d:
del d['_attn_implementation_internal']
if 'base_model_tp_plan' in d:
del d['base_model_tp_plan']
if 'base_model_pp_plan' in d:
del d['base_model_pp_plan']
for value in d.values():
if isinstance(value, dict):
self._remove_keys_not_serialized(value)
@classmethod
def register_for_auto_class(cls, auto_class='AutoConfig'):
"""
Register this class with a given auto class. This should only be used for custom configurations as the ones in
the library are already mapped with `AutoConfig`.
Args:
auto_class (`str` or `type`, *optional*, defaults to `"AutoConfig"`):
The auto class to register this new configuration with.
"""
if not isinstance(auto_class, str):
auto_class = auto_class.__name__
import transformers.models.auto as auto_module
if not hasattr(auto_module, auto_class):
raise ValueError(f'{auto_class} is not a valid auto class.')
cls._auto_class = auto_class
@staticmethod
def _get_global_generation_defaults() -> dict[str, Any]:
return {'max_length': 20, 'min_length': 0, 'do_sample': False, 'early_stopping': False, 'num_beams': 1, 'temperature': 1.0, 'top_k': 50, 'top_p': 1.0, 'typical_p': 1.0, 'repetition_penalty': 1.0, 'length_penalty': 1.0, 'no_repeat_ngram_size': 0, 'encoder_no_repeat_ngram_size': 0, 'bad_words_ids': None, 'num_return_sequences': 1, 'output_scores': False, 'return_dict_in_generate': False, 'forced_bos_token_id': None, 'forced_eos_token_id': None, 'remove_invalid_values': False, 'exponential_decay_length_penalty': None, 'suppress_tokens': None, 'begin_suppress_tokens': None, 'num_beam_groups': 1, 'diversity_penalty': 0.0}
def _get_non_default_generation_parameters(self) -> dict[str, Any]:
"""
Gets the non-default generation parameters on the PretrainedConfig instance
"""
non_default_generation_parameters = {}
decoder_attribute_name = None
try:
default_config = self.__class__()
except ValueError:
decoder_config = self.get_text_config(decoder=True)
if decoder_config is not self:
default_config = decoder_config.__class__()
else:
default_config = None
self_decoder_config = self if decoder_attribute_name is None else getattr(self, decoder_attribute_name)
for parameter_name, default_global_value in self._get_global_generation_defaults().items():
if hasattr(self_decoder_config, parameter_name):
is_default_in_config = is_default_generation_value = None
parameter_value = getattr(self_decoder_config, parameter_name)
if parameter_value is None:
continue
if default_config is not None:
is_default_in_config = parameter_value == getattr(default_config, parameter_name)
else:
is_default_generation_value = parameter_value == default_global_value
is_non_default = is_default_in_config is False or (is_default_in_config is None and is_default_generation_value is False)
if is_non_default:
non_default_generation_parameters[parameter_name] = getattr(self_decoder_config, parameter_name)
return non_default_generation_parameters
def get_text_config(self, decoder=None, encoder=None) -> 'PretrainedConfig':
"""
Returns the text config related to the text input (encoder) or text output (decoder) of the model. The
`decoder` and `encoder` input arguments can be used to specify which end of the model we are interested in,
which is useful on models that have both text input and output modalities.
There are three possible outcomes of using this method:
1. On most models, it returns the original config instance itself.
2. On newer (2024+) composite models, it returns the text section of the config, which is nested under a set
of valid names.
3. On older (2023-) composite models, it discards decoder-only parameters when `encoder=True` and vice-versa.
Args:
decoder (`Optional[bool]`, *optional*):
If set to `True`, then only search for decoder config names.
encoder (`Optional[bool]`, *optional*):
If set to `True`, then only search for encoder config names.
"""
return_both = decoder == encoder
decoder_possible_text_config_names = ('decoder', 'generator', 'text_config')
encoder_possible_text_config_names = ('text_encoder',)
if return_both:
possible_text_config_names = encoder_possible_text_config_names + decoder_possible_text_config_names
elif decoder:
possible_text_config_names = decoder_possible_text_config_names
else:
possible_text_config_names = encoder_possible_text_config_names
valid_text_config_names = []
for text_config_name in possible_text_config_names:
if hasattr(self, text_config_name):
text_config = getattr(self, text_config_name, None)
if text_config is not None:
valid_text_config_names += [text_config_name]
if len(valid_text_config_names) > 1:
raise ValueError(f'Multiple valid text configs were found in the model config: {valid_text_config_names}. In this case, using `get_text_config()` would be ambiguous. Please specify the desired text config directly, e.g. `text_config = config.sub_config_name`')
elif len(valid_text_config_names) == 1:
config_to_return = getattr(self, valid_text_config_names[0])
else:
config_to_return = self
if not return_both and len(valid_text_config_names) == 0 and config_to_return.is_encoder_decoder:
config_to_return = copy.deepcopy(config_to_return)
prefix_to_discard = 'encoder' if decoder else 'decoder'
prefix_to_keep = 'decoder' if decoder else 'encoder'
for key in config_to_return.to_dict():
if key.startswith(prefix_to_discard) and key not in config_to_return.attribute_map.values():
delattr(config_to_return, key)
if key.startswith(prefix_to_keep):
if key == prefix_to_keep + '_layers':
new_key = 'num_hidden_layers'
elif key == prefix_to_keep + '_attention_heads':
new_key = 'num_attention_heads'
else:
new_key = key[len(prefix_to_keep) + 1:]
if new_key in config_to_return.attribute_map:
new_key = config_to_return.attribute_map[new_key]
value = getattr(config_to_return, key)
delattr(config_to_return, key)
setattr(config_to_return, new_key, value)
return config_to_return
@classmethod
def from_text_vision_configs(cls, text_config, vision_config, **kwargs):
"""
Instantiate a model config (or a derived class) from text model configuration and vision model
configuration.
Returns:
[`PreTrainedConfig`]: An instance of a configuration object
"""
warnings.warn('The `from_text_vision_configs` method is deprecated and will be removed in v4.60 of Transformers. Please instantiate the config class directly with `MyConfig(text_config=text_config, vision_config=vision_config, **kwargs)` instead.', FutureWarning)
return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **kwargs)
@classmethod
def from_text_audio_configs(cls, text_config, audio_config, **kwargs):
"""
Instantiate a model config (or a derived class) from text model configuration and audio model
configuration.
Returns:
[`PreTrainedConfig`]: An instance of a configuration object
"""
warnings.warn('The `from_text_audio_configs` method is deprecated and will be removed in v4.60 of Transformers. Please instantiate the config class directly with `MyConfig(text_config=text_config, audio_config=audio_config, **kwargs)` instead.', FutureWarning)
return cls(text_config=text_config.to_dict(), audio_config=audio_config.to_dict(), **kwargs)
|
class PretrainedConfig(PushToHubMixin):
'''
Base class for all configuration classes. Handles a few parameters common to all models' configurations as well as
methods for loading/downloading/saving configurations.
<Tip>
A configuration file can be loaded and saved to disk. Loading the configuration file and using this file to
initialize a model does **not** load the model weights. It only affects the model's configuration.
</Tip>
Class attributes (overridden by derived classes):
- **model_type** (`str`) -- An identifier for the model type, serialized into the JSON file, and used to recreate
the correct object in [`~transformers.AutoConfig`].
- **has_no_defaults_at_init** (`bool`) -- Whether the config class can be initialized without providing input arguments.
Some configurations requires inputs to be defined at init and have no default values, usually these are composite configs,
(but not necessarily) such as [`~transformers.EncoderDecoderConfig`] or [`~RagConfig`]. They have to be initialized from
two or more configs of type [`~transformers.PretrainedConfig`].
- **keys_to_ignore_at_inference** (`list[str]`) -- A list of keys to ignore by default when looking at dictionary
outputs of the model during inference.
- **attribute_map** (`dict[str, str]`) -- A dict that maps model specific attribute names to the standardized
naming of attributes.
- **base_model_tp_plan** (`dict[str, Any]`) -- A dict that maps sub-modules FQNs of a base model to a tensor
parallel plan applied to the sub-module when `model.tensor_parallel` is called.
- **base_model_pp_plan** (`dict[str, tuple[list[str]]]`) -- A dict that maps child-modules of a base model to a
pipeline parallel plan that enables users to place the child-module on the appropriate device.
Common attributes (present in all subclasses):
- **vocab_size** (`int`) -- The number of tokens in the vocabulary, which is also the first dimension of the
embeddings matrix (this attribute may be missing for models that don't have a text modality like ViT).
- **hidden_size** (`int`) -- The hidden size of the model.
- **num_attention_heads** (`int`) -- The number of attention heads used in the multi-head attention layers of the
model.
- **num_hidden_layers** (`int`) -- The number of blocks in the model.
<Tip warning={true}>
Setting parameters for sequence generation in the model config is deprecated. For backward compatibility, loading
some of them will still be possible, but attempting to overwrite them will throw an exception -- you should set
them in a [~transformers.GenerationConfig]. Check the documentation of [~transformers.GenerationConfig] for more
information about the individual parameters.
</Tip>
Arg:
name_or_path (`str`, *optional*, defaults to `""`):
Store the string that was passed to [`PreTrainedModel.from_pretrained`] as `pretrained_model_name_or_path`
if the configuration was created with such a method.
output_hidden_states (`bool`, *optional*, defaults to `False`):
Whether or not the model should return all hidden-states.
output_attentions (`bool`, *optional*, defaults to `False`):
Whether or not the model should returns all attentions.
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not the model should return a [`~transformers.utils.ModelOutput`] instead of a plain tuple.
is_encoder_decoder (`bool`, *optional*, defaults to `False`):
Whether the model is used as an encoder/decoder or not.
is_decoder (`bool`, *optional*, defaults to `False`):
Whether to only use the decoder in an encoder-decoder architecture, otherwise it has no effect on
decoder-only or encoder-only architectures.
cross_attention_hidden_size (`bool`, *optional*):
The hidden size of the cross-attention layer in case the model is used as a decoder in an encoder-decoder
setting and the cross-attention hidden dimension differs from `self.config.hidden_size`.
add_cross_attention (`bool`, *optional*, defaults to `False`):
Whether cross-attention layers should be added to the model. Note, this option is only relevant for models
that can be used as decoder models within the [`EncoderDecoderModel`] class, which consists of all models
in `AUTO_MODELS_FOR_CAUSAL_LM`.
tie_encoder_decoder (`bool`, *optional*, defaults to `False`):
Whether all encoder weights should be tied to their equivalent decoder weights. This requires the encoder
and decoder model to have the exact same parameter names.
prune_heads (`dict[int, list[int]]`, *optional*, defaults to `{}`):
Pruned heads of the model. The keys are the selected layer indices and the associated values, the list of
heads to prune in said layer.
For instance `{1: [0, 2], 2: [2, 3]}` will prune heads 0 and 2 on layer 1 and heads 2 and 3 on layer 2.
chunk_size_feed_forward (`int`, *optional*, defaults to `0`):
The chunk size of all feed forward layers in the residual attention blocks. A chunk size of `0` means that
the feed forward layer is not chunked. A chunk size of n means that the feed forward layer processes `n` <
sequence_length embeddings at a time. For more information on feed forward chunking, see [How does Feed
Forward Chunking work?](../glossary.html#feed-forward-chunking).
> Parameters for fine-tuning tasks
architectures (`list[str]`, *optional*):
Model architectures that can be used with the model pretrained weights.
finetuning_task (`str`, *optional*):
Name of the task used to fine-tune the model.
id2label (`dict[int, str]`, *optional*):
A map from index (for instance prediction index, or target index) to label.
label2id (`dict[str, int]`, *optional*):
A map from label to index for the model.
num_labels (`int`, *optional*):
Number of labels to use in the last layer added to the model, typically for a classification task.
task_specific_params (`dict[str, Any]`, *optional*):
Additional keyword arguments to store for the current task.
problem_type (`str`, *optional*):
Problem type for `XxxForSequenceClassification` models. Can be one of `"regression"`,
`"single_label_classification"` or `"multi_label_classification"`.
> Parameters linked to the tokenizer
tokenizer_class (`str`, *optional*):
The name of the associated tokenizer class to use (if none is set, will use the tokenizer associated to the
model by default).
prefix (`str`, *optional*):
A specific prompt that should be added at the beginning of each text before calling the model.
bos_token_id (`int`, *optional*):
The id of the _beginning-of-stream_ token.
pad_token_id (`int`, *optional*):
The id of the _padding_ token.
eos_token_id (`int`, *optional*):
The id of the _end-of-stream_ token.
decoder_start_token_id (`int`, *optional*):
If an encoder-decoder model starts decoding with a different token than _bos_, the id of that token.
sep_token_id (`int`, *optional*):
The id of the _separation_ token.
> PyTorch specific parameters
torchscript (`bool`, *optional*, defaults to `False`):
Whether or not the model should be used with Torchscript.
tie_word_embeddings (`bool`, *optional*, defaults to `True`):
Whether the model's input and output word embeddings should be tied. Note that this is only relevant if the
model has a output word embedding layer.
dtype (`str`, *optional*):
The `dtype` of the weights. This attribute can be used to initialize the model to a non-default `dtype`
(which is normally `float32`) and thus allow for optimal storage allocation. For example, if the saved
model is `float16`, ideally we want to load it back using the minimal amount of memory needed to load
`float16` weights.
'''
def __setattr__(self, key, value):
pass
def __getattribute__(self, key):
pass
def __init__(self, *, output_hidden_states: bool=False, output_attentions: bool=False, return_dict: bool=True, torchscript: bool=False, dtype: Optional[Union[str, 'torch.dtype']]=None, pruned_heads: Optional[dict[int, list[int]]]=None, tie_word_embeddings: bool=True, chunk_size_feed_forward: int=0, is_encoder_decoder: bool=False, is_decoder: bool=False, cross_attention_hidden_size: Optional[int]=None, add_cross_attention: bool=False, tie_encoder_decoder: bool=False, architectures: Optional[list[str]]=None, finetuning_task: Optional[str]=None, id2label: Optional[dict[int, str]]=None, label2id: Optional[dict[str, int]]=None, num_labels: Optional[int]=None, task_specific_params: Optional[dict[str, Any]]=None, problem_type: Optional[str]=None, tokenizer_class: Optional[str]=None, prefix: Optional[str]=None, bos_token_id: Optional[int]=None, pad_token_id: Optional[int]=None, eos_token_id: Optional[int]=None, sep_token_id: Optional[int]=None, decoder_start_token_id: Optional[int]=None, **kwargs):
pass
def _create_id_label_maps(self, num_labels: int):
pass
@property
def name_or_path(self) -> Optional[str]:
pass
@name_or_path.setter
def name_or_path(self) -> Optional[str]:
pass
@property
def output_attentions(self):
'''
`bool`: Whether or not the model should returns all attentions.
'''
pass
@output_attentions.setter
def output_attentions(self):
pass
@property
def use_return_dict(self) -> bool:
'''
`bool`: Whether or not return [`~utils.ModelOutput`] instead of tuples.
'''
pass
@property
def num_labels(self) -> int:
'''
`int`: The number of labels for classification models.
'''
pass
@num_labels.setter
def num_labels(self) -> int:
pass
@property
def _attn_implementation(self):
pass
@_attn_implementation.setter
def _attn_implementation(self):
'''We set it recursively on the sub-configs as well'''
pass
@property
def torch_dtype(self):
pass
@torch_dtype.setter
def torch_dtype(self):
pass
def save_pretrained(self, save_directory: Union[str, os.PathLike], push_to_hub: bool=False, **kwargs):
'''
Save a configuration object to the directory `save_directory`, so that it can be re-loaded using the
[`~PretrainedConfig.from_pretrained`] class method.
Args:
save_directory (`str` or `os.PathLike`):
Directory where the configuration JSON file will be saved (will be created if it does not exist).
push_to_hub (`bool`, *optional*, defaults to `False`):
Whether or not to push your model to the Hugging Face model hub after saving it. You can specify the
repository you want to push to with `repo_id` (will default to the name of `save_directory` in your
namespace).
kwargs (`dict[str, Any]`, *optional*):
Additional key word arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method.
'''
pass
@staticmethod
def _set_token_in_kwargs(kwargs, token=None):
'''Temporary method to deal with `token` and `use_auth_token`.
This method is to avoid apply the same changes in all model config classes that overwrite `from_pretrained`.
Need to clean up `use_auth_token` in a follow PR.
'''
pass
@classmethod
def from_pretrained(cls: type[SpecificPretrainedConfigType], pretrained_model_name_or_path: Union[str, os.PathLike], cache_dir: Optional[Union[str, os.PathLike]]=None, force_download: bool=False, local_files_only: bool=False, token: Optional[Union[str, bool]]=None, revision: str='main', **kwargs) -> SpecificPretrainedConfigType:
'''
Instantiate a [`PretrainedConfig`] (or a derived class) from a pretrained model configuration.
Args:
pretrained_model_name_or_path (`str` or `os.PathLike`):
This can be either:
- a string, the *model id* of a pretrained model configuration hosted inside a model repo on
huggingface.co.
- a path to a *directory* containing a configuration file saved using the
[`~PretrainedConfig.save_pretrained`] method, e.g., `./my_model_directory/`.
- a path or url to a saved configuration JSON *file*, e.g., `./my_model_directory/configuration.json`.
cache_dir (`str` or `os.PathLike`, *optional*):
Path to a directory in which a downloaded pretrained model configuration should be cached if the
standard cache should not be used.
force_download (`bool`, *optional*, defaults to `False`):
Whether or not to force to (re-)download the configuration files and override the cached versions if
they exist.
resume_download:
Deprecated and ignored. All downloads are now resumed by default when possible.
Will be removed in v5 of Transformers.
proxies (`dict[str, str]`, *optional*):
A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request.
token (`str` or `bool`, *optional*):
The token to use as HTTP bearer authorization for remote files. If `True`, or not specified, will use
the token generated when running `hf auth login` (stored in `~/.huggingface`).
revision (`str`, *optional*, defaults to `"main"`):
The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
identifier allowed by git.
<Tip>
To test a pull request you made on the Hub, you can pass `revision="refs/pr/<pr_number>"`.
</Tip>
return_unused_kwargs (`bool`, *optional*, defaults to `False`):
If `False`, then this function returns just the final configuration object.
If `True`, then this functions returns a `Tuple(config, unused_kwargs)` where *unused_kwargs* is a
dictionary consisting of the key/value pairs whose keys are not configuration attributes: i.e., the
part of `kwargs` which has not been used to update `config` and is otherwise ignored.
subfolder (`str`, *optional*, defaults to `""`):
In case the relevant files are located inside a subfolder of the model repo on huggingface.co, you can
specify the folder name here.
kwargs (`dict[str, Any]`, *optional*):
The values in kwargs of any keys which are configuration attributes will be used to override the loaded
values. Behavior concerning key/value pairs whose keys are *not* configuration attributes is controlled
by the `return_unused_kwargs` keyword parameter.
Returns:
[`PretrainedConfig`]: The configuration object instantiated from this pretrained model.
Examples:
```python
# We can't instantiate directly the base class *PretrainedConfig* so let's show the examples on a
# derived class: BertConfig
config = BertConfig.from_pretrained(
"google-bert/bert-base-uncased"
) # Download configuration from huggingface.co and cache.
config = BertConfig.from_pretrained(
"./test/saved_model/"
) # E.g. config (or model) was saved using *save_pretrained('./test/saved_model/')*
config = BertConfig.from_pretrained("./test/saved_model/my_configuration.json")
config = BertConfig.from_pretrained("google-bert/bert-base-uncased", output_attentions=True, foo=False)
assert config.output_attentions == True
config, unused_kwargs = BertConfig.from_pretrained(
"google-bert/bert-base-uncased", output_attentions=True, foo=False, return_unused_kwargs=True
)
assert config.output_attentions == True
assert unused_kwargs == {"foo": False}
```'''
pass
@classmethod
def get_config_dict(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> tuple[dict[str, Any], dict[str, Any]]:
'''
From a `pretrained_model_name_or_path`, resolve to a dictionary of parameters, to be used for instantiating a
[`PretrainedConfig`] using `from_dict`.
Parameters:
pretrained_model_name_or_path (`str` or `os.PathLike`):
The identifier of the pre-trained checkpoint from which we want the dictionary of parameters.
Returns:
`tuple[Dict, Dict]`: The dictionary(ies) that will be used to instantiate the configuration object.
'''
pass
@classmethod
def _get_config_dict(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> tuple[dict[str, Any], dict[str, Any]]:
pass
@classmethod
def from_dict(cls: type[SpecificPretrainedConfigType], config_dict: dict[str, Any], **kwargs) -> SpecificPretrainedConfigType:
'''
Instantiates a [`PretrainedConfig`] from a Python dictionary of parameters.
Args:
config_dict (`dict[str, Any]`):
Dictionary that will be used to instantiate the configuration object. Such a dictionary can be
retrieved from a pretrained checkpoint by leveraging the [`~PretrainedConfig.get_config_dict`] method.
kwargs (`dict[str, Any]`):
Additional parameters from which to initialize the configuration object.
Returns:
[`PretrainedConfig`]: The configuration object instantiated from those parameters.
'''
pass
@classmethod
def from_json_file(cls: type[SpecificPretrainedConfigType], json_file: Union[str, os.PathLike]) -> SpecificPretrainedConfigType:
'''
Instantiates a [`PretrainedConfig`] from the path to a JSON file of parameters.
Args:
json_file (`str` or `os.PathLike`):
Path to the JSON file containing the parameters.
Returns:
[`PretrainedConfig`]: The configuration object instantiated from that JSON file.
'''
pass
@classmethod
def _dict_from_json_file(cls, json_file: Union[str, os.PathLike]):
pass
def __eq__(self, other):
pass
def __repr__(self):
pass
def __iter__(self):
pass
def to_diff_dict(self) -> dict[str, Any]:
'''
Removes all attributes from the configuration that correspond to the default config attributes for
better readability, while always retaining the `config` attribute from the class. Serializes to a
Python dictionary.
Returns:
dict[str, Any]: Dictionary of all the attributes that make up this configuration instance.
'''
pass
def to_dict(self) -> dict[str, Any]:
'''
Serializes this instance to a Python dictionary.
Returns:
`dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance.
'''
pass
def to_json_string(self, use_diff: bool=True) -> str:
'''
Serializes this instance to a JSON string.
Args:
use_diff (`bool`, *optional*, defaults to `True`):
If set to `True`, only the difference between the config instance and the default `PretrainedConfig()`
is serialized to JSON string.
Returns:
`str`: String containing all the attributes that make up this configuration instance in JSON format.
'''
pass
def to_json_file(self, json_file_path: Union[str, os.PathLike], use_diff: bool=True):
'''
Save this instance to a JSON file.
Args:
json_file_path (`str` or `os.PathLike`):
Path to the JSON file in which this configuration instance's parameters will be saved.
use_diff (`bool`, *optional*, defaults to `True`):
If set to `True`, only the difference between the config instance and the default `PretrainedConfig()`
is serialized to JSON file.
'''
pass
def update(self, config_dict: dict[str, Any]):
'''
Updates attributes of this class with attributes from `config_dict`.
Args:
config_dict (`dict[str, Any]`): Dictionary of attributes that should be updated for this class.
'''
pass
def update_from_string(self, update_str: str):
'''
Updates attributes of this class with attributes from `update_str`.
The expected format is ints, floats and strings as is, and for booleans use `true` or `false`. For example:
"n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"
The keys to change have to already exist in the config object.
Args:
update_str (`str`): String with attributes that should be updated for this class.
'''
pass
def dict_dtype_to_str(self, d: dict[str, Any]) -> None:
'''
Checks whether the passed dictionary and its nested dicts have a *dtype* key and if it's not None,
converts torch.dtype to a string of just the type. For example, `torch.float32` get converted into *"float32"*
string, which can then be stored in the json format.
'''
pass
def _remove_keys_not_serialized(self, d: dict[str, Any]) -> None:
'''
Checks and removes if there are any keys in the dict that should not be serialized when saving the config.
Runs recursive check on the dict, to remove from all sub configs.
'''
pass
@classmethod
def register_for_auto_class(cls, auto_class='AutoConfig'):
'''
Register this class with a given auto class. This should only be used for custom configurations as the ones in
the library are already mapped with `AutoConfig`.
Args:
auto_class (`str` or `type`, *optional*, defaults to `"AutoConfig"`):
The auto class to register this new configuration with.
'''
pass
@staticmethod
def _get_global_generation_defaults() -> dict[str, Any]:
pass
def _get_non_default_generation_parameters(self) -> dict[str, Any]:
'''
Gets the non-default generation parameters on the PretrainedConfig instance
'''
pass
def get_text_config(self, decoder=None, encoder=None) -> 'PretrainedConfig':
'''
Returns the text config related to the text input (encoder) or text output (decoder) of the model. The
`decoder` and `encoder` input arguments can be used to specify which end of the model we are interested in,
which is useful on models that have both text input and output modalities.
There are three possible outcomes of using this method:
1. On most models, it returns the original config instance itself.
2. On newer (2024+) composite models, it returns the text section of the config, which is nested under a set
of valid names.
3. On older (2023-) composite models, it discards decoder-only parameters when `encoder=True` and vice-versa.
Args:
decoder (`Optional[bool]`, *optional*):
If set to `True`, then only search for decoder config names.
encoder (`Optional[bool]`, *optional*):
If set to `True`, then only search for encoder config names.
'''
pass
@classmethod
def from_text_vision_configs(cls, text_config, vision_config, **kwargs):
'''
Instantiate a model config (or a derived class) from text model configuration and vision model
configuration.
Returns:
[`PreTrainedConfig`]: An instance of a configuration object
'''
pass
@classmethod
def from_text_audio_configs(cls, text_config, audio_config, **kwargs):
'''
Instantiate a model config (or a derived class) from text model configuration and audio model
configuration.
Returns:
[`PreTrainedConfig`]: An instance of a configuration object
'''
pass
| 63
| 24
| 28
| 4
| 16
| 8
| 4
| 0.67
| 1
| 19
| 0
| 38
| 23
| 33
| 32
| 32
| 1,084
| 169
| 551
| 179
| 487
| 370
| 383
| 147
| 348
| 16
| 1
| 3
| 143
|
208
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/convert_slow_tokenizer.py
|
transformers.convert_slow_tokenizer.AlbertConverter
|
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, processors
class AlbertConverter(SpmConverter):
def vocab(self, proto):
return [(piece.piece, piece.score) if check_number_comma(piece.piece) else (piece.piece, piece.score - 100) for piece in proto.pieces]
def normalizer(self, proto):
list_normalizers = [normalizers.Replace('``', '"'), normalizers.Replace("''", '"')]
if not self.original_tokenizer.keep_accents:
list_normalizers.append(normalizers.NFKD())
list_normalizers.append(normalizers.StripAccents())
if self.original_tokenizer.do_lower_case:
list_normalizers.append(normalizers.Lowercase())
precompiled_charsmap = proto.normalizer_spec.precompiled_charsmap
if precompiled_charsmap:
list_normalizers.append(normalizers.Precompiled(precompiled_charsmap))
list_normalizers.append(normalizers.Replace(Regex(' {2,}'), ' '))
return normalizers.Sequence(list_normalizers)
def post_processor(self):
return processors.TemplateProcessing(single='[CLS]:0 $A:0 [SEP]:0', pair='[CLS]:0 $A:0 [SEP]:0 $B:1 [SEP]:1', special_tokens=[('[CLS]', self.original_tokenizer.convert_tokens_to_ids('[CLS]')), ('[SEP]', self.original_tokenizer.convert_tokens_to_ids('[SEP]'))])
|
class AlbertConverter(SpmConverter):
def vocab(self, proto):
pass
def normalizer(self, proto):
pass
def post_processor(self):
pass
| 4
| 0
| 11
| 1
| 10
| 0
| 2
| 0
| 1
| 0
| 0
| 0
| 3
| 0
| 3
| 14
| 35
| 5
| 30
| 6
| 26
| 0
| 17
| 6
| 13
| 4
| 2
| 1
| 7
|
209
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/convert_slow_tokenizer.py
|
transformers.convert_slow_tokenizer.BarthezConverter
|
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, processors
class BarthezConverter(SpmConverter):
def unk_id(self, proto):
unk_id = 3
return unk_id
def post_processor(self):
return processors.TemplateProcessing(single='<s> $A </s>', pair='<s> $A </s> </s> $B </s>', special_tokens=[('<s>', self.original_tokenizer.convert_tokens_to_ids('<s>')), ('</s>', self.original_tokenizer.convert_tokens_to_ids('</s>'))])
|
class BarthezConverter(SpmConverter):
def unk_id(self, proto):
pass
def post_processor(self):
pass
| 3
| 0
| 6
| 0
| 6
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 2
| 0
| 2
| 13
| 14
| 1
| 13
| 4
| 10
| 0
| 6
| 4
| 3
| 1
| 2
| 0
| 2
|
210
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/convert_slow_tokenizer.py
|
transformers.convert_slow_tokenizer.BertConverter
|
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, processors
from tokenizers.models import BPE, Unigram, WordPiece
class BertConverter(Converter):
def converted(self) -> Tokenizer:
vocab = self.original_tokenizer.vocab
tokenizer = Tokenizer(WordPiece(vocab, unk_token=str(self.original_tokenizer.unk_token)))
tokenize_chinese_chars = False
strip_accents = False
do_lower_case = False
if hasattr(self.original_tokenizer, 'basic_tokenizer'):
tokenize_chinese_chars = self.original_tokenizer.basic_tokenizer.tokenize_chinese_chars
strip_accents = self.original_tokenizer.basic_tokenizer.strip_accents
do_lower_case = self.original_tokenizer.basic_tokenizer.do_lower_case
tokenizer.normalizer = normalizers.BertNormalizer(clean_text=True, handle_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, lowercase=do_lower_case)
tokenizer.pre_tokenizer = pre_tokenizers.BertPreTokenizer()
cls = str(self.original_tokenizer.cls_token)
sep = str(self.original_tokenizer.sep_token)
cls_token_id = self.original_tokenizer.cls_token_id
sep_token_id = self.original_tokenizer.sep_token_id
tokenizer.post_processor = processors.TemplateProcessing(single=f'{cls}:0 $A:0 {sep}:0', pair=f'{cls}:0 $A:0 {sep}:0 $B:1 {sep}:1', special_tokens=[(cls, cls_token_id), (sep, sep_token_id)])
tokenizer.decoder = decoders.WordPiece(prefix='##')
return tokenizer
|
class BertConverter(Converter):
def converted(self) -> Tokenizer:
pass
| 2
| 0
| 36
| 5
| 31
| 1
| 2
| 0.03
| 1
| 1
| 0
| 0
| 1
| 0
| 1
| 3
| 37
| 5
| 32
| 11
| 30
| 1
| 20
| 11
| 18
| 2
| 1
| 1
| 2
|
211
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/convert_slow_tokenizer.py
|
transformers.convert_slow_tokenizer.BertGenerationConverter
|
class BertGenerationConverter(SpmConverter):
pass
|
class BertGenerationConverter(SpmConverter):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 11
| 2
| 0
| 2
| 1
| 1
| 0
| 2
| 1
| 1
| 0
| 2
| 0
| 0
|
212
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/convert_slow_tokenizer.py
|
transformers.convert_slow_tokenizer.BigBirdConverter
|
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, processors
class BigBirdConverter(SpmConverter):
def post_processor(self):
return processors.TemplateProcessing(single='[CLS]:0 $A:0 [SEP]:0', pair='[CLS]:0 $A:0 [SEP]:0 $B:1 [SEP]:1', special_tokens=[('[CLS]', self.original_tokenizer.convert_tokens_to_ids('[CLS]')), ('[SEP]', self.original_tokenizer.convert_tokens_to_ids('[SEP]'))])
|
class BigBirdConverter(SpmConverter):
def post_processor(self):
pass
| 2
| 0
| 9
| 0
| 9
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 1
| 12
| 10
| 0
| 10
| 2
| 8
| 0
| 3
| 2
| 1
| 1
| 2
| 0
| 1
|
213
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/convert_slow_tokenizer.py
|
transformers.convert_slow_tokenizer.BlenderbotConverter
|
from tokenizers.models import BPE, Unigram, WordPiece
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, processors
class BlenderbotConverter(Converter):
def converted(self) -> Tokenizer:
ot = self.original_tokenizer
vocab = ot.encoder
merges = list(ot.bpe_ranks.keys())
tokenizer = Tokenizer(BPE(vocab=vocab, merges=merges, dropout=None, continuing_subword_prefix='', end_of_word_suffix='', fuse_unk=False))
tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel(add_prefix_space=ot.add_prefix_space)
tokenizer.decoder = decoders.ByteLevel()
tokenizer.post_processor = processors.TemplateProcessing(single=f'$A:0 {ot.eos_token}:0', special_tokens=[(ot.eos_token, ot.eos_token_id)])
return tokenizer
|
class BlenderbotConverter(Converter):
def converted(self) -> Tokenizer:
pass
| 2
| 0
| 26
| 3
| 23
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 1
| 3
| 27
| 3
| 24
| 6
| 22
| 0
| 10
| 6
| 8
| 1
| 1
| 0
| 1
|
214
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/convert_slow_tokenizer.py
|
transformers.convert_slow_tokenizer.CLIPConverter
|
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, processors
from tokenizers.models import BPE, Unigram, WordPiece
class CLIPConverter(Converter):
def converted(self) -> Tokenizer:
vocab = self.original_tokenizer.encoder
merges = list(self.original_tokenizer.bpe_ranks.keys())
unk_token = self.original_tokenizer.unk_token
tokenizer = Tokenizer(BPE(vocab=vocab, merges=merges, dropout=None, continuing_subword_prefix='', end_of_word_suffix='</w>', fuse_unk=False, unk_token=str(unk_token)))
tokenizer.normalizer = normalizers.Sequence([normalizers.NFC(), normalizers.Replace(Regex('\\s+'), ' '), normalizers.Lowercase()])
tokenizer.pre_tokenizer = pre_tokenizers.Sequence([pre_tokenizers.Split(Regex("'s|'t|'re|'ve|'m|'ll|'d|[\\p{L}]+|[\\p{N}]|[^\\s\\p{L}\\p{N}]+"), behavior='removed', invert=True), pre_tokenizers.ByteLevel(add_prefix_space=False)])
tokenizer.decoder = decoders.ByteLevel()
tokenizer.post_processor = processors.RobertaProcessing(sep=(self.original_tokenizer.eos_token, self.original_tokenizer.eos_token_id), cls=(self.original_tokenizer.bos_token, self.original_tokenizer.bos_token_id), add_prefix_space=False, trim_offsets=False)
return tokenizer
|
class CLIPConverter(Converter):
def converted(self) -> Tokenizer:
pass
| 2
| 0
| 40
| 3
| 36
| 1
| 1
| 0.03
| 1
| 2
| 0
| 0
| 1
| 0
| 1
| 3
| 41
| 3
| 37
| 6
| 35
| 1
| 11
| 6
| 9
| 1
| 1
| 0
| 1
|
215
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/convert_slow_tokenizer.py
|
transformers.convert_slow_tokenizer.CamembertConverter
|
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, processors
class CamembertConverter(SpmConverter):
def vocab(self, proto):
vocab = [('<s>NOTUSED', 0.0), ('<pad>', 0.0), ('</s>NOTUSED', 0.0), ('<unk>', 0.0), ('<unk>NOTUSED', -100)]
vocab += [(piece.piece, piece.score) for piece in proto.pieces[1:]]
vocab += [('<mask>', 0.0)]
return vocab
def unk_id(self, proto):
return 3
def post_processor(self):
return processors.TemplateProcessing(single='<s> $A </s>', pair='<s> $A </s> </s> $B </s>', special_tokens=[('<s>', self.original_tokenizer.convert_tokens_to_ids('<s>')), ('</s>', self.original_tokenizer.convert_tokens_to_ids('</s>'))])
|
class CamembertConverter(SpmConverter):
def vocab(self, proto):
pass
def unk_id(self, proto):
pass
def post_processor(self):
pass
| 4
| 0
| 8
| 0
| 7
| 1
| 1
| 0.09
| 1
| 0
| 0
| 0
| 3
| 0
| 3
| 14
| 27
| 2
| 23
| 5
| 19
| 2
| 10
| 5
| 6
| 1
| 2
| 0
| 3
|
216
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/convert_slow_tokenizer.py
|
transformers.convert_slow_tokenizer.Converter
|
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, processors
class Converter:
def __init__(self, original_tokenizer):
self.original_tokenizer = original_tokenizer
def converted(self) -> Tokenizer:
raise NotImplementedError()
|
class Converter:
def __init__(self, original_tokenizer):
pass
def converted(self) -> Tokenizer:
pass
| 3
| 0
| 2
| 0
| 2
| 0
| 1
| 0
| 0
| 1
| 0
| 17
| 2
| 1
| 2
| 2
| 6
| 1
| 5
| 4
| 2
| 0
| 5
| 4
| 2
| 1
| 0
| 0
| 2
|
217
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/convert_slow_tokenizer.py
|
transformers.convert_slow_tokenizer.DebertaConverter
|
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, processors
from tokenizers.models import BPE, Unigram, WordPiece
class DebertaConverter(Converter):
def converted(self) -> Tokenizer:
ot = self.original_tokenizer
vocab = ot.encoder
merges = list(ot.bpe_ranks.keys())
tokenizer = Tokenizer(BPE(vocab=vocab, merges=merges, dropout=None, continuing_subword_prefix='', end_of_word_suffix='', fuse_unk=False))
tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel(add_prefix_space=ot.add_prefix_space)
tokenizer.decoder = decoders.ByteLevel()
tokenizer.post_processor = processors.TemplateProcessing(single='[CLS]:0 $A:0 [SEP]:0', pair='[CLS]:0 $A:0 [SEP]:0 $B:1 [SEP]:1', special_tokens=[('[CLS]', self.original_tokenizer.convert_tokens_to_ids('[CLS]')), ('[SEP]', self.original_tokenizer.convert_tokens_to_ids('[SEP]'))])
return tokenizer
|
class DebertaConverter(Converter):
def converted(self) -> Tokenizer:
pass
| 2
| 0
| 28
| 3
| 25
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 1
| 3
| 29
| 3
| 26
| 6
| 24
| 0
| 10
| 6
| 8
| 1
| 1
| 0
| 1
|
218
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/convert_slow_tokenizer.py
|
transformers.convert_slow_tokenizer.DebertaV2Converter
|
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, processors
class DebertaV2Converter(SpmConverter):
def pre_tokenizer(self, replacement, add_prefix_space):
list_pretokenizers = []
if self.original_tokenizer.split_by_punct:
list_pretokenizers.append(pre_tokenizers.Punctuation(behavior='isolated'))
prepend_scheme = _get_prepend_scheme(add_prefix_space, self.original_tokenizer)
list_pretokenizers.append(pre_tokenizers.Metaspace(replacement=replacement, prepend_scheme=prepend_scheme))
return pre_tokenizers.Sequence(list_pretokenizers)
def normalizer(self, proto):
list_normalizers = []
if self.original_tokenizer.do_lower_case:
list_normalizers.append(normalizers.Lowercase())
list_normalizers.append(normalizers.Strip())
precompiled_charsmap = proto.normalizer_spec.precompiled_charsmap
if precompiled_charsmap:
list_normalizers.append(normalizers.Precompiled(precompiled_charsmap))
list_normalizers.append(normalizers.Replace(Regex(' {2,}'), ' '))
return normalizers.Sequence(list_normalizers)
def post_processor(self):
return processors.TemplateProcessing(single='[CLS]:0 $A:0 [SEP]:0', pair='[CLS]:0 $A:0 [SEP]:0 $B:1 [SEP]:1', special_tokens=[('[CLS]', self.original_tokenizer.convert_tokens_to_ids('[CLS]')), ('[SEP]', self.original_tokenizer.convert_tokens_to_ids('[SEP]'))])
|
class DebertaV2Converter(SpmConverter):
def pre_tokenizer(self, replacement, add_prefix_space):
pass
def normalizer(self, proto):
pass
def post_processor(self):
pass
| 4
| 0
| 9
| 1
| 9
| 0
| 2
| 0
| 1
| 0
| 0
| 0
| 3
| 0
| 3
| 14
| 31
| 4
| 27
| 8
| 23
| 0
| 20
| 8
| 16
| 3
| 2
| 1
| 6
|
219
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/convert_slow_tokenizer.py
|
transformers.convert_slow_tokenizer.FunnelConverter
|
from tokenizers.models import BPE, Unigram, WordPiece
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, processors
class FunnelConverter(Converter):
def converted(self) -> Tokenizer:
vocab = self.original_tokenizer.vocab
tokenizer = Tokenizer(WordPiece(vocab, unk_token=str(self.original_tokenizer.unk_token)))
tokenize_chinese_chars = False
strip_accents = False
do_lower_case = False
if hasattr(self.original_tokenizer, 'basic_tokenizer'):
tokenize_chinese_chars = self.original_tokenizer.basic_tokenizer.tokenize_chinese_chars
strip_accents = self.original_tokenizer.basic_tokenizer.strip_accents
do_lower_case = self.original_tokenizer.basic_tokenizer.do_lower_case
tokenizer.normalizer = normalizers.BertNormalizer(clean_text=True, handle_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, lowercase=do_lower_case)
tokenizer.pre_tokenizer = pre_tokenizers.BertPreTokenizer()
cls = str(self.original_tokenizer.cls_token)
sep = str(self.original_tokenizer.sep_token)
cls_token_id = self.original_tokenizer.cls_token_id
sep_token_id = self.original_tokenizer.sep_token_id
tokenizer.post_processor = processors.TemplateProcessing(single=f'{cls}:2 $A:0 {sep}:0', pair=f'{cls}:2 $A:0 {sep}:0 $B:1 {sep}:1', special_tokens=[(cls, cls_token_id), (sep, sep_token_id)])
tokenizer.decoder = decoders.WordPiece(prefix='##')
return tokenizer
|
class FunnelConverter(Converter):
def converted(self) -> Tokenizer:
pass
| 2
| 0
| 36
| 5
| 31
| 2
| 2
| 0.06
| 1
| 1
| 0
| 0
| 1
| 0
| 1
| 3
| 37
| 5
| 32
| 11
| 30
| 2
| 20
| 11
| 18
| 2
| 1
| 1
| 2
|
220
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/convert_slow_tokenizer.py
|
transformers.convert_slow_tokenizer.GPT2Converter
|
from tokenizers.models import BPE, Unigram, WordPiece
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, processors
from typing import Optional
class GPT2Converter(Converter):
def converted(self, vocab: Optional[dict[str, int]]=None, merges: Optional[list[tuple[str, str]]]=None) -> Tokenizer:
if not vocab:
vocab = self.original_tokenizer.encoder
if not merges:
merges = list(self.original_tokenizer.bpe_ranks)
tokenizer = Tokenizer(BPE(vocab=vocab, merges=merges, dropout=None, continuing_subword_prefix='', end_of_word_suffix='', fuse_unk=False))
add_prefix_space = getattr(self.original_tokenizer, 'add_prefix_space', False)
tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel(add_prefix_space=add_prefix_space)
tokenizer.decoder = decoders.ByteLevel()
if getattr(self.original_tokenizer, 'add_bos_token', False):
bos = self.original_tokenizer.bos_token
bos_token_id = self.original_tokenizer.bos_token_id
tokenizer.post_processor = processors.TemplateProcessing(single=f'{bos}:0 $A:0', pair=f'{bos}:0 $A:0 $B:1', special_tokens=[(bos, bos_token_id)])
else:
tokenizer.post_processor = processors.ByteLevel(trim_offsets=False)
return tokenizer
|
class GPT2Converter(Converter):
def converted(self, vocab: Optional[dict[str, int]]=None, merges: Optional[list[tuple[str, str]]]=None) -> Tokenizer:
pass
| 2
| 0
| 35
| 2
| 31
| 2
| 4
| 0.06
| 1
| 3
| 0
| 1
| 1
| 0
| 1
| 3
| 36
| 2
| 32
| 6
| 30
| 2
| 16
| 6
| 14
| 4
| 1
| 1
| 4
|
221
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/convert_slow_tokenizer.py
|
transformers.convert_slow_tokenizer.GemmaConverter
|
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, processors
class GemmaConverter(SpmConverter):
handle_byte_fallback = True
SpmExtractor = GemmaSentencePieceExtractor
special_tokens = {'<start_of_turn>', '<end_of_turn>'}
'"\n split_by_unicode_script: true\n split_by_number: true\n split_by_whitespace: true\n treat_whitespace_as_suffix: false\n allow_whitespace_only_pieces: true\n split_digits: true\n byte_fallback: true\n '
def normalizer(self, proto):
return normalizers.Replace(' ', '▁')
def vocab(self, proto):
vocab = [(self.original_tokenizer.pad_token, 0.0), (self.original_tokenizer.eos_token, 0.0), (self.original_tokenizer.bos_token, 0.0)]
vocab += [(piece.piece, piece.score) for piece in proto.pieces[3:]]
if not any((x[0] == '\t' for x in vocab)):
override_index = next((i for i, x in enumerate(vocab) if x[0] == '<0x09>'), None)
if override_index is not None:
vocab[override_index] = ('\t', 0.0)
return vocab
def pre_tokenizer(self, replacement, add_prefix_space):
return pre_tokenizers.Split(' ', 'merged_with_previous')
def unk_id(self, proto):
unk_id = 3
return unk_id
def decoder(self, replacement, add_prefix_space):
return decoders.Sequence([decoders.Replace('▁', ' '), decoders.ByteFallback(), decoders.Fuse()])
|
class GemmaConverter(SpmConverter):
def normalizer(self, proto):
pass
def vocab(self, proto):
pass
def pre_tokenizer(self, replacement, add_prefix_space):
pass
def unk_id(self, proto):
pass
def decoder(self, replacement, add_prefix_space):
pass
| 6
| 0
| 6
| 0
| 5
| 0
| 1
| 0.35
| 1
| 0
| 0
| 1
| 5
| 0
| 5
| 16
| 48
| 6
| 31
| 12
| 25
| 11
| 20
| 12
| 14
| 3
| 2
| 2
| 7
|
222
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/convert_slow_tokenizer.py
|
transformers.convert_slow_tokenizer.GemmaSentencePieceExtractor
|
class GemmaSentencePieceExtractor(SentencePieceExtractor):
def extract(self, vocab_scores=None) -> tuple[dict[str, int], list[tuple]]:
"""
By default will return vocab and merges with respect to their order, by sending `vocab_scores` we're going to
order the merges with respect to the piece scores instead.
"""
sp = self.sp
vocab = {sp.id_to_piece(index): index for index in range(sp.GetPieceSize())}
if '\t' not in vocab:
vocab['\t'] = vocab.get('<0x09>')
merges = generate_merges(vocab, vocab_scores)
return (vocab, merges)
|
class GemmaSentencePieceExtractor(SentencePieceExtractor):
def extract(self, vocab_scores=None) -> tuple[dict[str, int], list[tuple]]:
'''
By default will return vocab and merges with respect to their order, by sending `vocab_scores` we're going to
order the merges with respect to the piece scores instead.
'''
pass
| 2
| 1
| 14
| 2
| 6
| 6
| 1
| 0.86
| 1
| 3
| 0
| 0
| 1
| 0
| 1
| 3
| 15
| 2
| 7
| 5
| 5
| 6
| 7
| 5
| 5
| 1
| 1
| 0
| 1
|
223
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/convert_slow_tokenizer.py
|
transformers.convert_slow_tokenizer.HeliumConverter
|
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, processors
from tokenizers.models import BPE, Unigram, WordPiece
from .utils import is_protobuf_available, is_sentencepiece_available, logging, requires_backends
class HeliumConverter(SpmConverter):
handle_byte_fallback = True
def __init__(self, vocab_file=None, *args):
requires_backends(self, 'protobuf')
Converter.__init__(self, vocab_file)
model_pb2 = import_protobuf()
m = model_pb2.ModelProto()
with open(vocab_file, 'rb') as f:
m.ParseFromString(f.read())
self.proto = m
def tokenizer(self, proto):
vocab_scores = self.vocab(proto)
tokenizer = Tokenizer(Unigram(vocab_scores, unk_id=self.unk_id(proto), byte_fallback=self.handle_byte_fallback))
spm_added_tokens = [(id, p.piece, p.type == 3 or p.piece in self.special_tokens) for id, p in enumerate(proto.pieces) if p.type in [3, 4]]
tokenizer.add_tokens([AddedToken(token, normalized=False, special=special, single_word=True) for id, token, special in sorted(spm_added_tokens, key=lambda x: x[0])])
tokenizer.add_tokens([AddedToken('\n', normalized=False, special=False)])
tokenizer.enable_padding(pad_token='<pad>', pad_id=3)
return tokenizer
def vocab(self, proto):
vocab = []
for piece in proto.pieces:
if piece.piece == '<0x0A>':
vocab += [('\n', piece.score)]
else:
vocab += [(piece.piece, piece.score)]
return vocab
def unk_id(self, proto):
unk_id = 0
return unk_id
def decoder(self, replacement, add_prefix_space):
sequence = [decoders.Replace('▁', ' '), decoders.ByteFallback(), decoders.Fuse()]
sequence += [decoders.Strip(content=' ', left=1)]
return decoders.Sequence(sequence)
def normalizer(self, proto):
return normalizers.Sequence([normalizers.Prepend(' '), normalizers.Replace(' ', '▁')])
def pre_tokenizer(self, replacement, add_prefix_space):
return pre_tokenizers.Sequence([pre_tokenizers.Split('\n', 'contiguous')])
def post_processor(self):
return processors.TemplateProcessing(single=['<s>', '$A'], pair=['<s>', '$A', '<s>', '$B'], special_tokens=[('<s>', 1)])
|
class HeliumConverter(SpmConverter):
def __init__(self, vocab_file=None, *args):
pass
def tokenizer(self, proto):
pass
def vocab(self, proto):
pass
def unk_id(self, proto):
pass
def decoder(self, replacement, add_prefix_space):
pass
def normalizer(self, proto):
pass
def pre_tokenizer(self, replacement, add_prefix_space):
pass
def post_processor(self):
pass
| 9
| 0
| 10
| 0
| 9
| 1
| 1
| 0.06
| 1
| 1
| 0
| 0
| 8
| 1
| 8
| 19
| 87
| 11
| 72
| 22
| 63
| 4
| 38
| 20
| 29
| 3
| 2
| 2
| 10
|
224
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/convert_slow_tokenizer.py
|
transformers.convert_slow_tokenizer.HerbertConverter
|
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, processors
from tokenizers.models import BPE, Unigram, WordPiece
class HerbertConverter(Converter):
def converted(self) -> Tokenizer:
tokenizer_info_str = '#version:'
token_suffix = '</w>'
vocab = self.original_tokenizer.encoder
merges = list(self.original_tokenizer.bpe_ranks.keys())
if tokenizer_info_str in merges[0][0]:
merges = merges[1:]
tokenizer = Tokenizer(BPE(vocab, merges, dropout=None, unk_token=self.original_tokenizer.unk_token, end_of_word_suffix=token_suffix))
tokenizer.normalizer = normalizers.BertNormalizer(lowercase=False, strip_accents=False)
tokenizer.pre_tokenizer = pre_tokenizers.BertPreTokenizer()
tokenizer.decoder = decoders.BPEDecoder(suffix=token_suffix)
tokenizer.post_processor = processors.BertProcessing(sep=(self.original_tokenizer.sep_token, self.original_tokenizer.sep_token_id), cls=(self.original_tokenizer.cls_token, self.original_tokenizer.cls_token_id))
return tokenizer
|
class HerbertConverter(Converter):
def converted(self) -> Tokenizer:
pass
| 2
| 0
| 28
| 4
| 24
| 1
| 2
| 0.04
| 1
| 1
| 0
| 0
| 1
| 0
| 1
| 3
| 29
| 4
| 25
| 7
| 23
| 1
| 14
| 7
| 12
| 2
| 1
| 1
| 2
|
225
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/convert_slow_tokenizer.py
|
transformers.convert_slow_tokenizer.LayoutLMv2Converter
|
from tokenizers.models import BPE, Unigram, WordPiece
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, processors
class LayoutLMv2Converter(Converter):
def converted(self) -> Tokenizer:
vocab = self.original_tokenizer.vocab
tokenizer = Tokenizer(WordPiece(vocab, unk_token=str(self.original_tokenizer.unk_token)))
tokenize_chinese_chars = False
strip_accents = False
do_lower_case = True
if hasattr(self.original_tokenizer, 'basic_tokenizer'):
tokenize_chinese_chars = self.original_tokenizer.basic_tokenizer.tokenize_chinese_chars
strip_accents = self.original_tokenizer.basic_tokenizer.strip_accents
do_lower_case = self.original_tokenizer.basic_tokenizer.do_lower_case
tokenizer.normalizer = normalizers.BertNormalizer(clean_text=True, handle_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, lowercase=do_lower_case)
tokenizer.pre_tokenizer = pre_tokenizers.BertPreTokenizer()
cls = str(self.original_tokenizer.cls_token)
sep = str(self.original_tokenizer.sep_token)
cls_token_id = self.original_tokenizer.cls_token_id
sep_token_id = self.original_tokenizer.sep_token_id
tokenizer.post_processor = processors.TemplateProcessing(single=f'{cls}:0 $A:0 {sep}:0', pair=f'{cls}:0 $A:0 {sep}:0 $B:1 {sep}:1', special_tokens=[(cls, cls_token_id), (sep, sep_token_id)])
tokenizer.decoder = decoders.WordPiece(prefix='##')
return tokenizer
|
class LayoutLMv2Converter(Converter):
def converted(self) -> Tokenizer:
pass
| 2
| 0
| 36
| 5
| 31
| 1
| 2
| 0.03
| 1
| 1
| 0
| 0
| 1
| 0
| 1
| 3
| 37
| 5
| 32
| 11
| 30
| 1
| 20
| 11
| 18
| 2
| 1
| 1
| 2
|
226
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/convert_slow_tokenizer.py
|
transformers.convert_slow_tokenizer.LlamaConverter
|
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, processors
class LlamaConverter(SpmConverter):
handle_byte_fallback = True
def vocab(self, proto):
vocab = [(self.original_tokenizer.convert_ids_to_tokens(0), 0.0), (self.original_tokenizer.convert_ids_to_tokens(1), 0.0), (self.original_tokenizer.convert_ids_to_tokens(2), 0.0)]
vocab += [(piece.piece, piece.score) for piece in proto.pieces[3:]]
return vocab
def unk_id(self, proto):
unk_id = 0
return unk_id
def decoder(self, replacement, add_prefix_space):
sequence = [decoders.Replace('▁', ' '), decoders.ByteFallback(), decoders.Fuse()]
if add_prefix_space:
sequence += [decoders.Strip(content=' ', left=1)]
return decoders.Sequence(sequence)
def normalizer(self, proto):
if getattr(self.original_tokenizer, 'legacy', True):
sequence = []
if getattr(self.original_tokenizer, 'add_prefix_space', True):
sequence += [normalizers.Prepend(prepend='▁')]
sequence += [normalizers.Replace(pattern=' ', content='▁')]
return normalizers.Sequence(sequence)
return None
def pre_tokenizer(self, replacement, add_prefix_space):
if not getattr(self.original_tokenizer, 'legacy', True):
prepend_scheme = _get_prepend_scheme(add_prefix_space, self.original_tokenizer)
return pre_tokenizers.Metaspace(replacement=replacement, prepend_scheme=prepend_scheme, split=False)
return None
def post_processor(self):
return None
|
class LlamaConverter(SpmConverter):
def vocab(self, proto):
pass
def unk_id(self, proto):
pass
def decoder(self, replacement, add_prefix_space):
pass
def normalizer(self, proto):
pass
def pre_tokenizer(self, replacement, add_prefix_space):
pass
def post_processor(self):
pass
| 7
| 0
| 6
| 0
| 6
| 1
| 2
| 0.08
| 1
| 0
| 0
| 2
| 6
| 0
| 6
| 17
| 44
| 6
| 37
| 13
| 30
| 3
| 29
| 13
| 22
| 3
| 2
| 2
| 10
|
227
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/convert_slow_tokenizer.py
|
transformers.convert_slow_tokenizer.MBart50Converter
|
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, processors
class MBart50Converter(SpmConverter):
def vocab(self, proto):
vocab = [('<s>', 0.0), ('<pad>', 0.0), ('</s>', 0.0), ('<unk>', 0.0)]
vocab += [(piece.piece, piece.score) for piece in proto.pieces[3:]]
vocab += [('ar_AR', 0.0), ('cs_CZ', 0.0), ('de_DE', 0.0), ('en_XX', 0.0), ('es_XX', 0.0), ('et_EE', 0.0), ('fi_FI', 0.0), ('fr_XX', 0.0), ('gu_IN', 0.0), ('hi_IN', 0.0), ('it_IT', 0.0), ('ja_XX', 0.0), ('kk_KZ', 0.0), ('ko_KR', 0.0), ('lt_LT', 0.0), ('lv_LV', 0.0), ('my_MM', 0.0), ('ne_NP', 0.0), ('nl_XX', 0.0), ('ro_RO', 0.0), ('ru_RU', 0.0), ('si_LK', 0.0), ('tr_TR', 0.0), ('vi_VN', 0.0), ('zh_CN', 0.0), ('af_ZA', 0.0), ('az_AZ', 0.0), ('bn_IN', 0.0), ('fa_IR', 0.0), ('he_IL', 0.0), ('hr_HR', 0.0), ('id_ID', 0.0), ('ka_GE', 0.0), ('km_KH', 0.0), ('mk_MK', 0.0), ('ml_IN', 0.0), ('mn_MN', 0.0), ('mr_IN', 0.0), ('pl_PL', 0.0), ('ps_AF', 0.0), ('pt_XX', 0.0), ('sv_SE', 0.0), ('sw_KE', 0.0), ('ta_IN', 0.0), ('te_IN', 0.0), ('th_TH', 0.0), ('tl_XX', 0.0), ('uk_UA', 0.0), ('ur_PK', 0.0), ('xh_ZA', 0.0), ('gl_ES', 0.0), ('sl_SI', 0.0)]
vocab += [('<mask>', 0.0)]
return vocab
def unk_id(self, proto):
return 3
def post_processor(self):
return processors.TemplateProcessing(single='en_XX $A </s>', pair='en_XX $A $B </s>', special_tokens=[('en_XX', self.original_tokenizer.convert_tokens_to_ids('en_XX')), ('</s>', self.original_tokenizer.convert_tokens_to_ids('</s>'))])
|
class MBart50Converter(SpmConverter):
def vocab(self, proto):
pass
def unk_id(self, proto):
pass
def post_processor(self):
pass
| 4
| 0
| 7
| 0
| 7
| 0
| 1
| 0.04
| 1
| 0
| 0
| 0
| 3
| 0
| 3
| 14
| 25
| 2
| 23
| 5
| 19
| 1
| 11
| 5
| 7
| 1
| 2
| 0
| 3
|
228
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/convert_slow_tokenizer.py
|
transformers.convert_slow_tokenizer.MBartConverter
|
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, processors
class MBartConverter(SpmConverter):
def vocab(self, proto):
vocab = [('<s>', 0.0), ('<pad>', 0.0), ('</s>', 0.0), ('<unk>', 0.0)]
vocab += [(piece.piece, piece.score) for piece in proto.pieces[3:]]
vocab += [('ar_AR', 0.0), ('cs_CZ', 0.0), ('de_DE', 0.0), ('en_XX', 0.0), ('es_XX', 0.0), ('et_EE', 0.0), ('fi_FI', 0.0), ('fr_XX', 0.0), ('gu_IN', 0.0), ('hi_IN', 0.0), ('it_IT', 0.0), ('ja_XX', 0.0), ('kk_KZ', 0.0), ('ko_KR', 0.0), ('lt_LT', 0.0), ('lv_LV', 0.0), ('my_MM', 0.0), ('ne_NP', 0.0), ('nl_XX', 0.0), ('ro_RO', 0.0), ('ru_RU', 0.0), ('si_LK', 0.0), ('tr_TR', 0.0), ('vi_VN', 0.0), ('zh_CN', 0.0)]
vocab += [('<mask>', 0.0)]
return vocab
def unk_id(self, proto):
return 3
def post_processor(self):
return processors.TemplateProcessing(single='$A </s> en_XX', pair='$A $B </s> en_XX', special_tokens=[('en_XX', self.original_tokenizer.convert_tokens_to_ids('en_XX')), ('</s>', self.original_tokenizer.convert_tokens_to_ids('</s>'))])
|
class MBartConverter(SpmConverter):
def vocab(self, proto):
pass
def unk_id(self, proto):
pass
def post_processor(self):
pass
| 4
| 0
| 16
| 0
| 16
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 3
| 0
| 3
| 14
| 51
| 2
| 49
| 5
| 45
| 0
| 11
| 5
| 7
| 1
| 2
| 0
| 3
|
229
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/convert_slow_tokenizer.py
|
transformers.convert_slow_tokenizer.MPNetConverter
|
from tokenizers.models import BPE, Unigram, WordPiece
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, processors
class MPNetConverter(Converter):
def converted(self) -> Tokenizer:
vocab = self.original_tokenizer.vocab
tokenizer = Tokenizer(WordPiece(vocab, unk_token=str(self.original_tokenizer.unk_token)))
tokenize_chinese_chars = False
strip_accents = False
do_lower_case = False
if hasattr(self.original_tokenizer, 'basic_tokenizer'):
tokenize_chinese_chars = self.original_tokenizer.basic_tokenizer.tokenize_chinese_chars
strip_accents = self.original_tokenizer.basic_tokenizer.strip_accents
do_lower_case = self.original_tokenizer.basic_tokenizer.do_lower_case
tokenizer.normalizer = normalizers.BertNormalizer(clean_text=True, handle_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, lowercase=do_lower_case)
tokenizer.pre_tokenizer = pre_tokenizers.BertPreTokenizer()
cls = str(self.original_tokenizer.cls_token)
sep = str(self.original_tokenizer.sep_token)
cls_token_id = self.original_tokenizer.cls_token_id
sep_token_id = self.original_tokenizer.sep_token_id
tokenizer.post_processor = processors.TemplateProcessing(single=f'{cls}:0 $A:0 {sep}:0', pair=f'{cls}:0 $A:0 {sep}:0 {sep}:0 $B:1 {sep}:1', special_tokens=[(cls, cls_token_id), (sep, sep_token_id)])
tokenizer.decoder = decoders.WordPiece(prefix='##')
return tokenizer
|
class MPNetConverter(Converter):
def converted(self) -> Tokenizer:
pass
| 2
| 0
| 36
| 5
| 31
| 2
| 2
| 0.06
| 1
| 1
| 0
| 0
| 1
| 0
| 1
| 3
| 37
| 5
| 32
| 11
| 30
| 2
| 20
| 11
| 18
| 2
| 1
| 1
| 2
|
230
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/convert_slow_tokenizer.py
|
transformers.convert_slow_tokenizer.MarkupLMConverter
|
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, processors
from tokenizers.models import BPE, Unigram, WordPiece
class MarkupLMConverter(Converter):
def converted(self) -> Tokenizer:
ot = self.original_tokenizer
vocab = ot.encoder
merges = list(ot.bpe_ranks.keys())
tokenizer = Tokenizer(BPE(vocab=vocab, merges=merges, dropout=None, continuing_subword_prefix='', end_of_word_suffix='', fuse_unk=False, unk_token=self.original_tokenizer.unk_token))
tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel(add_prefix_space=ot.add_prefix_space)
tokenizer.decoder = decoders.ByteLevel()
cls = str(self.original_tokenizer.cls_token)
sep = str(self.original_tokenizer.sep_token)
cls_token_id = self.original_tokenizer.cls_token_id
sep_token_id = self.original_tokenizer.sep_token_id
tokenizer.post_processor = processors.TemplateProcessing(single=f'{cls} $A {sep}', pair=f'{cls} $A {sep} $B {sep}', special_tokens=[(cls, cls_token_id), (sep, sep_token_id)])
return tokenizer
|
class MarkupLMConverter(Converter):
def converted(self) -> Tokenizer:
pass
| 2
| 0
| 35
| 5
| 30
| 0
| 1
| 0
| 1
| 2
| 0
| 0
| 1
| 0
| 1
| 3
| 36
| 5
| 31
| 10
| 29
| 0
| 14
| 10
| 12
| 1
| 1
| 0
| 1
|
231
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/convert_slow_tokenizer.py
|
transformers.convert_slow_tokenizer.MoshiConverter
|
from .utils import is_protobuf_available, is_sentencepiece_available, logging, requires_backends
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, processors
class MoshiConverter(SpmConverter):
handle_byte_fallback = True
def __init__(self, vocab_file, model_max_length=None, **kwargs):
requires_backends(self, 'protobuf')
Converter.__init__(self, vocab_file)
model_pb2 = import_protobuf()
m = model_pb2.ModelProto()
with open(vocab_file, 'rb') as f:
m.ParseFromString(f.read())
self.proto = m
def normalizer(self, proto):
precompiled_charsmap = proto.normalizer_spec.precompiled_charsmap
_normalizers = [normalizers.Replace(' ', '▁')]
if not precompiled_charsmap:
return normalizers.Sequence(_normalizers)
else:
return normalizers.Sequence([normalizers.Precompiled(precompiled_charsmap)] + _normalizers)
def decoder(self, replacement, add_prefix_space):
sequence = [decoders.Replace('▁', ' '), decoders.ByteFallback(), decoders.Fuse()]
if add_prefix_space:
sequence += [decoders.Strip(content=' ', left=1)]
return decoders.Sequence(sequence)
def pre_tokenizer(self, replacement, add_prefix_space):
prepend_scheme = 'first'
return pre_tokenizers.Metaspace(replacement=replacement, prepend_scheme=prepend_scheme, split=False)
|
class MoshiConverter(SpmConverter):
def __init__(self, vocab_file, model_max_length=None, **kwargs):
pass
def normalizer(self, proto):
pass
def decoder(self, replacement, add_prefix_space):
pass
def pre_tokenizer(self, replacement, add_prefix_space):
pass
| 5
| 0
| 8
| 1
| 7
| 0
| 2
| 0.03
| 1
| 0
| 0
| 0
| 4
| 1
| 4
| 15
| 39
| 7
| 31
| 14
| 26
| 1
| 24
| 13
| 19
| 2
| 2
| 1
| 6
|
232
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/convert_slow_tokenizer.py
|
transformers.convert_slow_tokenizer.NllbConverter
|
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, processors
class NllbConverter(SpmConverter):
def vocab(self, proto):
vocab = [('<s>', 0.0), ('<pad>', 0.0), ('</s>', 0.0), ('<unk>', 0.0)]
vocab += [(piece.piece, piece.score) for piece in proto.pieces[3:]]
return vocab
def unk_id(self, proto):
return 3
def post_processor(self):
return processors.TemplateProcessing(single='eng_Latn $A </s>', pair='eng_Latn $A $B </s>', special_tokens=[('eng_Latn', self.original_tokenizer.convert_tokens_to_ids('eng_Latn')), ('</s>', self.original_tokenizer.convert_tokens_to_ids('</s>'))])
|
class NllbConverter(SpmConverter):
def vocab(self, proto):
pass
def unk_id(self, proto):
pass
def post_processor(self):
pass
| 4
| 0
| 7
| 0
| 7
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 3
| 0
| 3
| 14
| 23
| 2
| 21
| 5
| 17
| 0
| 9
| 5
| 5
| 1
| 2
| 0
| 3
|
233
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/convert_slow_tokenizer.py
|
transformers.convert_slow_tokenizer.OpenAIGPTConverter
|
from tokenizers.models import BPE, Unigram, WordPiece
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, processors
class OpenAIGPTConverter(Converter):
def converted(self) -> Tokenizer:
vocab = self.original_tokenizer.encoder
merges = list(self.original_tokenizer.bpe_ranks.keys())
unk_token = self.original_tokenizer.unk_token
tokenizer = Tokenizer(BPE(vocab=vocab, merges=merges, dropout=None, unk_token=str(unk_token), end_of_word_suffix='</w>', fuse_unk=False))
if tokenizer.token_to_id(str(unk_token)) is not None:
tokenizer.add_special_tokens([str(unk_token)])
tokenizer.normalizer = normalizers.BertNormalizer(lowercase=True)
tokenizer.pre_tokenizer = pre_tokenizers.BertPreTokenizer()
tokenizer.decoder = decoders.BPEDecoder(suffix='</w>')
return tokenizer
|
class OpenAIGPTConverter(Converter):
def converted(self) -> Tokenizer:
pass
| 2
| 0
| 24
| 4
| 20
| 0
| 2
| 0
| 1
| 2
| 0
| 0
| 1
| 0
| 1
| 3
| 25
| 4
| 21
| 6
| 19
| 0
| 12
| 6
| 10
| 2
| 1
| 1
| 2
|
234
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/convert_slow_tokenizer.py
|
transformers.convert_slow_tokenizer.PegasusConverter
|
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, processors
class PegasusConverter(SpmConverter):
def vocab(self, proto):
vocab = [(self.original_tokenizer.pad_token, 0.0), (self.original_tokenizer.eos_token, 0.0)]
if self.original_tokenizer.mask_token_sent is not None:
vocab += [(self.original_tokenizer.mask_token_sent, 0.0)]
if self.original_tokenizer.mask_token is not None and self.original_tokenizer.mask_token_id < self.original_tokenizer.offset:
vocab += [(self.original_tokenizer.mask_token, 0.0)]
vocab += [(f'<unk_{i}>', -100.0) for i in range(2, self.original_tokenizer.offset)]
vocab += [(piece.piece, piece.score) for piece in proto.pieces[2:]]
return vocab
def unk_id(self, proto):
return proto.trainer_spec.unk_id + self.original_tokenizer.offset
def pre_tokenizer(self, replacement, add_prefix_space):
prepend_scheme = _get_prepend_scheme(add_prefix_space, self.original_tokenizer)
return pre_tokenizers.Sequence([pre_tokenizers.WhitespaceSplit(), pre_tokenizers.Metaspace(replacement=replacement, prepend_scheme=prepend_scheme)])
def post_processor(self):
eos = self.original_tokenizer.eos_token
special_tokens = [(eos, self.original_tokenizer.eos_token_id)]
return processors.TemplateProcessing(single=['$A', eos], pair=['$A', '$B', eos], special_tokens=special_tokens)
|
class PegasusConverter(SpmConverter):
def vocab(self, proto):
pass
def unk_id(self, proto):
pass
def pre_tokenizer(self, replacement, add_prefix_space):
pass
def post_processor(self):
pass
| 5
| 0
| 9
| 1
| 8
| 0
| 2
| 0
| 1
| 1
| 0
| 0
| 4
| 0
| 4
| 15
| 38
| 6
| 32
| 9
| 27
| 0
| 19
| 9
| 14
| 3
| 2
| 1
| 6
|
235
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/convert_slow_tokenizer.py
|
transformers.convert_slow_tokenizer.Qwen2Converter
|
from typing import Optional
from tokenizers.models import BPE, Unigram, WordPiece
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, processors
class Qwen2Converter(Converter):
def converted(self, vocab: Optional[dict[str, int]]=None, merges: Optional[list[tuple[str, str]]]=None) -> Tokenizer:
if not vocab:
vocab = self.original_tokenizer.encoder
if not merges:
merges = list(self.original_tokenizer.bpe_ranks.keys())
tokenizer = Tokenizer(BPE(vocab=vocab, merges=merges, dropout=None, unk_token=None, continuing_subword_prefix='', end_of_word_suffix='', fuse_unk=False, byte_fallback=False))
tokenizer.normalizer = normalizers.NFC()
tokenizer.pre_tokenizer = pre_tokenizers.Sequence([pre_tokenizers.Split(Regex("(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\\r\\n\\p{L}\\p{N}]?\\p{L}+|\\p{N}| ?[^\\s\\p{L}\\p{N}]+[\\r\\n]*|\\s*[\\r\\n]+|\\s+(?!\\S)|\\s+"), behavior='isolated', invert=False), pre_tokenizers.ByteLevel(add_prefix_space=getattr(self.original_tokenizer, 'add_prefix_space', False), use_regex=False)])
tokenizer.decoder = decoders.ByteLevel()
tokenizer.post_processor = processors.ByteLevel(trim_offsets=False)
return tokenizer
|
class Qwen2Converter(Converter):
def converted(self, vocab: Optional[dict[str, int]]=None, merges: Optional[list[tuple[str, str]]]=None) -> Tokenizer:
pass
| 2
| 0
| 41
| 5
| 36
| 0
| 3
| 0
| 1
| 3
| 0
| 1
| 1
| 1
| 1
| 3
| 42
| 5
| 37
| 4
| 35
| 0
| 12
| 3
| 10
| 3
| 1
| 1
| 3
|
236
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/convert_slow_tokenizer.py
|
transformers.convert_slow_tokenizer.ReformerConverter
|
class ReformerConverter(SpmConverter):
pass
|
class ReformerConverter(SpmConverter):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 11
| 2
| 0
| 2
| 1
| 1
| 0
| 2
| 1
| 1
| 0
| 2
| 0
| 0
|
237
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/convert_slow_tokenizer.py
|
transformers.convert_slow_tokenizer.RemBertConverter
|
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, processors
class RemBertConverter(SpmConverter):
def normalizer(self, proto):
list_normalizers = [normalizers.Replace('``', '"'), normalizers.Replace("''", '"'), normalizers.Replace(Regex(' {2,}'), ' ')]
if not self.original_tokenizer.keep_accents:
list_normalizers.append(normalizers.NFKD())
list_normalizers.append(normalizers.StripAccents())
if self.original_tokenizer.do_lower_case:
list_normalizers.append(normalizers.Lowercase())
precompiled_charsmap = proto.normalizer_spec.precompiled_charsmap
if precompiled_charsmap:
list_normalizers.append(normalizers.Precompiled(precompiled_charsmap))
return normalizers.Sequence(list_normalizers)
def post_processor(self):
return processors.TemplateProcessing(single='[CLS]:0 $A:0 [SEP]:0', pair='[CLS]:0 $A:0 [SEP]:0 $B:1 [SEP]:1', special_tokens=[('[CLS]', self.original_tokenizer.convert_tokens_to_ids('[CLS]')), ('[SEP]', self.original_tokenizer.convert_tokens_to_ids('[SEP]'))])
|
class RemBertConverter(SpmConverter):
def normalizer(self, proto):
pass
def post_processor(self):
pass
| 3
| 0
| 14
| 2
| 12
| 0
| 3
| 0.04
| 1
| 0
| 0
| 0
| 2
| 0
| 2
| 13
| 30
| 4
| 25
| 5
| 22
| 1
| 14
| 5
| 11
| 4
| 2
| 1
| 5
|
238
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/convert_slow_tokenizer.py
|
transformers.convert_slow_tokenizer.RoFormerConverter
|
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, processors
from tokenizers.models import BPE, Unigram, WordPiece
class RoFormerConverter(Converter):
def converted(self) -> Tokenizer:
from .models.roformer.tokenization_utils import JiebaPreTokenizer
vocab = self.original_tokenizer.vocab
tokenizer = Tokenizer(WordPiece(vocab, unk_token=str(self.original_tokenizer.unk_token)))
strip_accents = False
do_lower_case = False
if hasattr(self.original_tokenizer, 'basic_tokenizer'):
strip_accents = self.original_tokenizer.basic_tokenizer.strip_accents
do_lower_case = self.original_tokenizer.basic_tokenizer.do_lower_case
tokenizer.normalizer = normalizers.BertNormalizer(clean_text=True, handle_chinese_chars=False, strip_accents=strip_accents, lowercase=do_lower_case)
tokenizer.pre_tokenizer = pre_tokenizers.PreTokenizer.custom(JiebaPreTokenizer(vocab))
cls = str(self.original_tokenizer.cls_token)
sep = str(self.original_tokenizer.sep_token)
cls_token_id = self.original_tokenizer.cls_token_id
sep_token_id = self.original_tokenizer.sep_token_id
tokenizer.post_processor = processors.TemplateProcessing(single=f'{cls}:0 $A:0 {sep}:0', pair=f'{cls}:0 $A:0 {sep}:0 $B:1 {sep}:1', special_tokens=[(cls, cls_token_id), (sep, sep_token_id)])
tokenizer.decoder = decoders.WordPiece(prefix='##')
return tokenizer
|
class RoFormerConverter(Converter):
def converted(self) -> Tokenizer:
pass
| 2
| 0
| 36
| 6
| 30
| 1
| 2
| 0.03
| 1
| 2
| 1
| 0
| 1
| 0
| 1
| 3
| 37
| 6
| 31
| 11
| 28
| 1
| 19
| 11
| 16
| 2
| 1
| 1
| 2
|
239
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/convert_slow_tokenizer.py
|
transformers.convert_slow_tokenizer.RobertaConverter
|
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, processors
from tokenizers.models import BPE, Unigram, WordPiece
class RobertaConverter(Converter):
def converted(self) -> Tokenizer:
ot = self.original_tokenizer
vocab = ot.encoder
merges = list(ot.bpe_ranks.keys())
tokenizer = Tokenizer(BPE(vocab=vocab, merges=merges, dropout=None, continuing_subword_prefix='', end_of_word_suffix='', fuse_unk=False))
tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel(add_prefix_space=ot.add_prefix_space)
tokenizer.decoder = decoders.ByteLevel()
tokenizer.post_processor = processors.RobertaProcessing(sep=(ot.sep_token, ot.sep_token_id), cls=(ot.cls_token, ot.cls_token_id), add_prefix_space=ot.add_prefix_space, trim_offsets=True)
return tokenizer
|
class RobertaConverter(Converter):
def converted(self) -> Tokenizer:
pass
| 2
| 0
| 26
| 3
| 23
| 1
| 1
| 0.04
| 1
| 1
| 0
| 0
| 1
| 0
| 1
| 3
| 27
| 3
| 24
| 6
| 22
| 1
| 10
| 6
| 8
| 1
| 1
| 0
| 1
|
240
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/convert_slow_tokenizer.py
|
transformers.convert_slow_tokenizer.SeamlessM4TConverter
|
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, processors
class SeamlessM4TConverter(SpmConverter):
def vocab(self, proto):
vocab = [('<pad>', 0.0), ('<unk>', 0.0), ('<s>', 0.0), ('</s>', 0.0)]
vocab += [(piece.piece, piece.score) for piece in proto.pieces[3:]]
return vocab
def unk_id(self, proto):
return self.original_tokenizer.unk_token_id
def post_processor(self):
return processors.TemplateProcessing(single='__eng__ $A </s>', pair='__eng__ $A $B </s>', special_tokens=[('__eng__', self.original_tokenizer.convert_tokens_to_ids('__eng__')), ('</s>', self.original_tokenizer.convert_tokens_to_ids('</s>'))])
|
class SeamlessM4TConverter(SpmConverter):
def vocab(self, proto):
pass
def unk_id(self, proto):
pass
def post_processor(self):
pass
| 4
| 0
| 7
| 0
| 7
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 3
| 0
| 3
| 14
| 23
| 2
| 21
| 5
| 17
| 0
| 9
| 5
| 5
| 1
| 2
| 0
| 3
|
241
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/convert_slow_tokenizer.py
|
transformers.convert_slow_tokenizer.SentencePieceExtractor
|
from .utils import is_protobuf_available, is_sentencepiece_available, logging, requires_backends
class SentencePieceExtractor:
"""
Extractor implementation for SentencePiece trained models. https://github.com/google/sentencepiece
"""
def __init__(self, model: str):
requires_backends(self, 'sentencepiece')
from sentencepiece import SentencePieceProcessor
self.sp = SentencePieceProcessor()
self.sp.Load(model)
def extract(self, vocab_scores=None) -> tuple[dict[str, int], list[tuple]]:
"""
By default will return vocab and merges with respect to their order, by sending `vocab_scores` we're going to
order the merges with respect to the piece scores instead.
"""
sp = self.sp
vocab = {sp.id_to_piece(index): index for index in range(sp.GetPieceSize())}
merges = generate_merges(vocab, vocab_scores)
return (vocab, merges)
|
class SentencePieceExtractor:
'''
Extractor implementation for SentencePiece trained models. https://github.com/google/sentencepiece
'''
def __init__(self, model: str):
pass
def extract(self, vocab_scores=None) -> tuple[dict[str, int], list[tuple]]:
'''
By default will return vocab and merges with respect to their order, by sending `vocab_scores` we're going to
order the merges with respect to the piece scores instead.
'''
pass
| 3
| 2
| 9
| 2
| 5
| 2
| 1
| 0.64
| 0
| 3
| 0
| 1
| 2
| 1
| 2
| 2
| 23
| 5
| 11
| 8
| 7
| 7
| 11
| 8
| 7
| 1
| 0
| 0
| 2
|
242
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/convert_slow_tokenizer.py
|
transformers.convert_slow_tokenizer.SplinterConverter
|
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, processors
from tokenizers.models import BPE, Unigram, WordPiece
class SplinterConverter(Converter):
def converted(self) -> Tokenizer:
vocab = self.original_tokenizer.vocab
tokenizer = Tokenizer(WordPiece(vocab, unk_token=str(self.original_tokenizer.unk_token)))
tokenize_chinese_chars = False
strip_accents = False
do_lower_case = False
if hasattr(self.original_tokenizer, 'basic_tokenizer'):
tokenize_chinese_chars = self.original_tokenizer.basic_tokenizer.tokenize_chinese_chars
strip_accents = self.original_tokenizer.basic_tokenizer.strip_accents
do_lower_case = self.original_tokenizer.basic_tokenizer.do_lower_case
tokenizer.normalizer = normalizers.BertNormalizer(clean_text=True, handle_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, lowercase=do_lower_case)
tokenizer.pre_tokenizer = pre_tokenizers.BertPreTokenizer()
cls = str(self.original_tokenizer.cls_token)
sep = str(self.original_tokenizer.sep_token)
question = str(self.original_tokenizer.question_token)
dot = '.'
cls_token_id = self.original_tokenizer.cls_token_id
sep_token_id = self.original_tokenizer.sep_token_id
question_token_id = self.original_tokenizer.question_token_id
dot_token_id = self.original_tokenizer.convert_tokens_to_ids('.')
if self.original_tokenizer.padding_side == 'right':
pair = f'{cls}:0 $A:0 {question} {dot} {sep}:0 $B:1 {sep}:1'
else:
pair = f'{cls}:0 $A:0 {sep}:0 $B:1 {question} {dot} {sep}:1'
tokenizer.post_processor = processors.TemplateProcessing(single=f'{cls}:0 $A:0 {sep}:0', pair=pair, special_tokens=[(cls, cls_token_id), (sep, sep_token_id), (question, question_token_id), (dot, dot_token_id)])
tokenizer.decoder = decoders.WordPiece(prefix='##')
return tokenizer
|
class SplinterConverter(Converter):
def converted(self) -> Tokenizer:
pass
| 2
| 0
| 47
| 6
| 41
| 1
| 3
| 0.02
| 1
| 1
| 0
| 0
| 1
| 0
| 1
| 3
| 48
| 6
| 42
| 16
| 40
| 1
| 27
| 16
| 25
| 3
| 1
| 1
| 3
|
243
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/convert_slow_tokenizer.py
|
transformers.convert_slow_tokenizer.SpmConverter
|
from .utils import is_protobuf_available, is_sentencepiece_available, logging, requires_backends
import warnings
from tokenizers.models import BPE, Unigram, WordPiece
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, processors
class SpmConverter(Converter):
handle_byte_fallback = False
SpmExtractor = SentencePieceExtractor
special_tokens = {}
def __init__(self, *args):
requires_backends(self, 'protobuf')
super().__init__(*args)
model_pb2 = import_protobuf()
m = model_pb2.ModelProto()
with open(self.original_tokenizer.vocab_file, 'rb') as f:
m.ParseFromString(f.read())
self.proto = m
if self.proto.trainer_spec.byte_fallback and (not self.handle_byte_fallback):
warnings.warn('The sentencepiece tokenizer that you are converting to a fast tokenizer uses the byte fallback option which is not implemented in the fast tokenizers. In practice this means that the fast version of the tokenizer can produce unknown tokens whereas the sentencepiece version would have converted these unknown tokens into a sequence of byte tokens matching the original piece of text.')
def vocab(self, proto):
return [(piece.piece, piece.score) for piece in proto.pieces]
def unk_id(self, proto):
return proto.trainer_spec.unk_id
def tokenizer(self, proto):
model_type = proto.trainer_spec.model_type
vocab_scores = self.vocab(proto)
if model_type == 1:
tokenizer = Tokenizer(Unigram(vocab_scores, unk_id=self.unk_id(proto), byte_fallback=self.handle_byte_fallback))
elif model_type == 2:
_, merges = self.SpmExtractor(self.original_tokenizer.vocab_file).extract(vocab_scores)
bpe_vocab = {word: i for i, (word, score) in enumerate(vocab_scores)}
tokenizer = Tokenizer(BPE(bpe_vocab, merges, unk_token=proto.trainer_spec.unk_piece, fuse_unk=True, byte_fallback=self.handle_byte_fallback, dropout=None))
else:
raise Exception("You're trying to run a `Unigram` model but you're file was trained with a different algorithm")
spm_added_tokens = [(id, p.piece, p.type == 3 or p.piece in self.special_tokens) for id, p in enumerate(proto.pieces) if p.type in [3, 4]]
tokenizer.add_tokens([AddedToken(token, normalized=False, special=special) for id, token, special in sorted(spm_added_tokens, key=lambda x: x[0])])
return tokenizer
def normalizer(self, proto):
precompiled_charsmap = proto.normalizer_spec.precompiled_charsmap
_normalizers = [normalizers.Strip(left=False, right=True), normalizers.Replace(Regex(' {2,}'), '▁')]
if not precompiled_charsmap:
return normalizers.Sequence(_normalizers)
else:
return normalizers.Sequence([normalizers.Precompiled(precompiled_charsmap)] + _normalizers)
def pre_tokenizer(self, replacement, add_prefix_space):
prepend_scheme = _get_prepend_scheme(add_prefix_space, self.original_tokenizer)
return pre_tokenizers.Metaspace(replacement=replacement, prepend_scheme=prepend_scheme)
def post_processor(self):
return None
def decoder(self, replacement, add_prefix_space):
prepend_scheme = _get_prepend_scheme(add_prefix_space, self.original_tokenizer)
return decoders.Metaspace(replacement=replacement, prepend_scheme=prepend_scheme)
def converted(self) -> Tokenizer:
tokenizer = self.tokenizer(self.proto)
normalizer = self.normalizer(self.proto)
if normalizer is not None:
tokenizer.normalizer = normalizer
replacement = '▁'
add_prefix_space = True
if hasattr(self.original_tokenizer, 'add_prefix_space'):
add_prefix_space = self.original_tokenizer.add_prefix_space
pre_tokenizer = self.pre_tokenizer(replacement, add_prefix_space)
if pre_tokenizer is not None:
tokenizer.pre_tokenizer = pre_tokenizer
tokenizer.decoder = self.decoder(replacement, add_prefix_space)
post_processor = self.post_processor()
if post_processor:
tokenizer.post_processor = post_processor
return tokenizer
|
class SpmConverter(Converter):
def __init__(self, *args):
pass
def vocab(self, proto):
pass
def unk_id(self, proto):
pass
def tokenizer(self, proto):
pass
def normalizer(self, proto):
pass
def pre_tokenizer(self, replacement, add_prefix_space):
pass
def post_processor(self):
pass
def decoder(self, replacement, add_prefix_space):
pass
def converted(self) -> Tokenizer:
pass
| 10
| 0
| 13
| 2
| 10
| 1
| 2
| 0.07
| 1
| 3
| 0
| 22
| 9
| 1
| 9
| 11
| 127
| 23
| 98
| 34
| 88
| 7
| 61
| 32
| 51
| 5
| 1
| 1
| 17
|
244
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/convert_slow_tokenizer.py
|
transformers.convert_slow_tokenizer.T5Converter
|
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, processors
class T5Converter(SpmConverter):
def vocab(self, proto):
num_extra_ids = self.original_tokenizer._extra_ids
vocab = [(piece.piece, piece.score) for piece in proto.pieces]
vocab += [(f'<extra_id_{i}>', 0.0) for i in range(num_extra_ids - 1, -1, -1)]
return vocab
def post_processor(self):
return processors.TemplateProcessing(single=['$A', '</s>'], pair=['$A', '</s>', '$B', '</s>'], special_tokens=[('</s>', self.original_tokenizer.convert_tokens_to_ids('</s>'))])
|
class T5Converter(SpmConverter):
def vocab(self, proto):
pass
def post_processor(self):
pass
| 3
| 0
| 7
| 0
| 7
| 0
| 1
| 0
| 1
| 1
| 0
| 1
| 2
| 0
| 2
| 13
| 15
| 1
| 14
| 5
| 11
| 0
| 8
| 5
| 5
| 1
| 2
| 0
| 2
|
245
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/convert_slow_tokenizer.py
|
transformers.convert_slow_tokenizer.TikTokenConverter
|
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, processors
from tokenizers.models import BPE, Unigram, WordPiece
class TikTokenConverter:
"""
A general tiktoken converter.
"""
def __init__(self, vocab_file=None, pattern="(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\\r\\n\\p{L}\\p{N}]?\\p{L}+|\\p{N}{1,3}| ?[^\\s\\p{L}\\p{N}]+[\\r\\n]*|\\s*[\\r\\n]+|\\s+(?!\\S)|\\s+", add_prefix_space=False, additional_special_tokens=None, *args, **kwargs):
super().__init__(*args)
self.vocab_file = vocab_file
self.pattern = pattern
self.add_prefix_space = add_prefix_space
self.additional_special_tokens = additional_special_tokens.keys() if isinstance(additional_special_tokens, dict) else additional_special_tokens
def extract_vocab_merges_from_model(self, tiktoken_url: str):
try:
from tiktoken.load import load_tiktoken_bpe
except Exception:
raise ValueError('`tiktoken` is required to read a `tiktoken` file. Install it with `pip install tiktoken`.')
bpe_ranks = load_tiktoken_bpe(tiktoken_url)
byte_encoder = bytes_to_unicode()
def token_bytes_to_string(b):
return ''.join([byte_encoder[ord(char)] for char in b.decode('latin-1')])
merges = []
vocab = {}
for token, rank in bpe_ranks.items():
vocab[token_bytes_to_string(token)] = rank
if len(token) == 1:
continue
local = []
for index in range(1, len(token)):
piece_l, piece_r = (token[:index], token[index:])
if piece_l in bpe_ranks and piece_r in bpe_ranks and (piece_l + piece_r in bpe_ranks):
local.append((piece_l, piece_r, rank))
local = sorted(local, key=lambda x: (bpe_ranks[x[0]], bpe_ranks[x[1]]), reverse=False)
merges.extend(local)
merges = sorted(merges, key=lambda val: val[2], reverse=False)
merges = [(token_bytes_to_string(val[0]), token_bytes_to_string(val[1])) for val in merges]
return (vocab, merges)
def tokenizer(self):
vocab_scores, merges = self.extract_vocab_merges_from_model(self.vocab_file)
tokenizer = Tokenizer(BPE(vocab_scores, merges, fuse_unk=False))
if hasattr(tokenizer.model, 'ignore_merges'):
tokenizer.model.ignore_merges = True
return tokenizer
def converted(self) -> Tokenizer:
tokenizer = self.tokenizer()
tokenizer.pre_tokenizer = pre_tokenizers.Sequence([pre_tokenizers.Split(Regex(self.pattern), behavior='isolated', invert=False), pre_tokenizers.ByteLevel(add_prefix_space=self.add_prefix_space, use_regex=False)])
tokenizer.decoder = decoders.ByteLevel()
tokenizer.add_special_tokens([AddedToken(token, normalized=False, special=True) for token in self.additional_special_tokens])
tokenizer.post_processor = processors.ByteLevel(trim_offsets=False)
return tokenizer
|
class TikTokenConverter:
'''
A general tiktoken converter.
'''
def __init__(self, vocab_file=None, pattern="(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\\r\\n\\p{L}\\p{N}]?\\p{L}+|\\p{N}{1,3}| ?[^\\s\\p{L}\\p{N}]+[\\r\\n]*|\\s*[\\r\\n]+|\\s+(?!\\S)|\\s+", add_prefix_space=False, additional_special_tokens=None, *args, **kwargs):
pass
def extract_vocab_merges_from_model(self, tiktoken_url: str):
pass
def token_bytes_to_string(b):
pass
def tokenizer(self):
pass
def converted(self) -> Tokenizer:
pass
| 6
| 1
| 13
| 1
| 12
| 0
| 2
| 0.05
| 0
| 5
| 0
| 0
| 4
| 4
| 4
| 4
| 72
| 9
| 60
| 30
| 45
| 3
| 45
| 22
| 38
| 6
| 0
| 3
| 11
|
246
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/convert_slow_tokenizer.py
|
transformers.convert_slow_tokenizer.UdopConverter
|
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, processors
class UdopConverter(SpmConverter):
def post_processor(self):
return processors.TemplateProcessing(single=['$A', '</s>'], pair=['$A', '</s>', '$B', '</s>'], special_tokens=[('</s>', self.original_tokenizer.convert_tokens_to_ids('</s>'))])
|
class UdopConverter(SpmConverter):
def post_processor(self):
pass
| 2
| 0
| 8
| 0
| 8
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 1
| 12
| 9
| 0
| 9
| 2
| 7
| 0
| 3
| 2
| 1
| 1
| 2
| 0
| 1
|
247
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/convert_slow_tokenizer.py
|
transformers.convert_slow_tokenizer.WhisperConverter
|
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, processors
from tokenizers.models import BPE, Unigram, WordPiece
class WhisperConverter(Converter):
def converted(self) -> Tokenizer:
vocab = self.original_tokenizer.encoder
merges = list(self.original_tokenizer.bpe_ranks.keys())
tokenizer = Tokenizer(BPE(vocab=vocab, merges=merges, dropout=None, continuing_subword_prefix='', end_of_word_suffix='', fuse_unk=False))
tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel(add_prefix_space=self.original_tokenizer.add_prefix_space)
tokenizer.decoder = decoders.ByteLevel()
prefix_token_ids = self.original_tokenizer.prefix_tokens
prefixes = self.original_tokenizer.convert_ids_to_tokens(prefix_token_ids)
eos = self.original_tokenizer.eos_token
eos_token_id = self.original_tokenizer.eos_token_id
prefix_template = ' '.join([f'{token}:0' for token in prefixes])
tokenizer.post_processor = processors.TemplateProcessing(single=f'{prefix_template} $A:0 {eos}:0', pair=f'{prefix_template} $A:0 $B:1 {eos}:1', special_tokens=[(eos, eos_token_id), *zip(prefixes, prefix_token_ids)])
return tokenizer
|
class WhisperConverter(Converter):
def converted(self) -> Tokenizer:
pass
| 2
| 0
| 33
| 4
| 29
| 0
| 1
| 0
| 1
| 2
| 0
| 0
| 1
| 0
| 1
| 3
| 34
| 4
| 30
| 10
| 28
| 0
| 14
| 10
| 12
| 1
| 1
| 0
| 1
|
248
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/convert_slow_tokenizer.py
|
transformers.convert_slow_tokenizer.XGLMConverter
|
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, processors
class XGLMConverter(SpmConverter):
def vocab(self, proto):
vocab = [('<s>', 0.0), ('<pad>', 0.0), ('</s>', 0.0), ('<unk>', 0.0)]
vocab += [(piece.piece, piece.score) for piece in proto.pieces[3:]]
vocab += [('<madeupword0>', 0.0), ('<madeupword1>', 0.0), ('<madeupword2>', 0.0), ('<madeupword3>', 0.0), ('<madeupword4>', 0.0), ('<madeupword5>', 0.0), ('<madeupword6>', 0.0)]
return vocab
def unk_id(self, proto):
unk_id = 3
return unk_id
def post_processor(self):
return processors.TemplateProcessing(single='</s> $A', pair='</s> $A </s> </s> $B', special_tokens=[('<s>', self.original_tokenizer.convert_tokens_to_ids('<s>')), ('</s>', self.original_tokenizer.convert_tokens_to_ids('</s>'))])
|
class XGLMConverter(SpmConverter):
def vocab(self, proto):
pass
def unk_id(self, proto):
pass
def post_processor(self):
pass
| 4
| 0
| 7
| 0
| 7
| 0
| 1
| 0.04
| 1
| 0
| 0
| 0
| 3
| 0
| 3
| 14
| 25
| 2
| 23
| 6
| 19
| 1
| 11
| 6
| 7
| 1
| 2
| 0
| 3
|
249
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/convert_slow_tokenizer.py
|
transformers.convert_slow_tokenizer.XLMRobertaConverter
|
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, processors
class XLMRobertaConverter(SpmConverter):
def vocab(self, proto):
vocab = [('<s>', 0.0), ('<pad>', 0.0), ('</s>', 0.0), ('<unk>', 0.0)]
vocab += [(piece.piece, piece.score) for piece in proto.pieces[3:]]
vocab += [('<mask>', 0.0)]
return vocab
def unk_id(self, proto):
unk_id = 3
return unk_id
def post_processor(self):
return processors.TemplateProcessing(single='<s> $A </s>', pair='<s> $A </s> </s> $B </s>', special_tokens=[('<s>', self.original_tokenizer.convert_tokens_to_ids('<s>')), ('</s>', self.original_tokenizer.convert_tokens_to_ids('</s>'))])
|
class XLMRobertaConverter(SpmConverter):
def vocab(self, proto):
pass
def unk_id(self, proto):
pass
def post_processor(self):
pass
| 4
| 0
| 7
| 0
| 7
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 3
| 0
| 3
| 14
| 25
| 2
| 23
| 6
| 19
| 0
| 11
| 6
| 7
| 1
| 2
| 0
| 3
|
250
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/convert_slow_tokenizer.py
|
transformers.convert_slow_tokenizer.XLNetConverter
|
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, processors
class XLNetConverter(SpmConverter):
def vocab(self, proto):
return [(piece.piece, piece.score) if check_number_comma(piece.piece) else (piece.piece, piece.score - 100) for piece in proto.pieces]
def normalizer(self, proto):
list_normalizers = [normalizers.Replace('``', '"'), normalizers.Replace("''", '"')]
if not self.original_tokenizer.keep_accents:
list_normalizers.append(normalizers.NFKD())
list_normalizers.append(normalizers.StripAccents())
if self.original_tokenizer.do_lower_case:
list_normalizers.append(normalizers.Lowercase())
precompiled_charsmap = proto.normalizer_spec.precompiled_charsmap
if precompiled_charsmap:
list_normalizers.append(normalizers.Precompiled(precompiled_charsmap))
list_normalizers.append(normalizers.Replace(Regex(' {2,}'), ' '))
return normalizers.Sequence(list_normalizers)
def post_processor(self):
return processors.TemplateProcessing(single='$A:0 <sep>:0 <cls>:2', pair='$A:0 <sep>:0 $B:1 <sep>:1 <cls>:2', special_tokens=[('<sep>', self.original_tokenizer.convert_tokens_to_ids('<sep>')), ('<cls>', self.original_tokenizer.convert_tokens_to_ids('<cls>'))])
|
class XLNetConverter(SpmConverter):
def vocab(self, proto):
pass
def normalizer(self, proto):
pass
def post_processor(self):
pass
| 4
| 0
| 11
| 1
| 10
| 0
| 2
| 0
| 1
| 0
| 0
| 0
| 3
| 0
| 3
| 14
| 35
| 5
| 30
| 6
| 26
| 0
| 17
| 6
| 13
| 4
| 2
| 1
| 7
|
251
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/data/data_collator.py
|
transformers.data.data_collator.DataCollatorForLanguageModeling
|
from typing import Any, Callable, NewType, Optional, Union
import multiprocessing as mp
import numpy as np
from dataclasses import dataclass
from collections.abc import Mapping
from ..tokenization_utils_base import PreTrainedTokenizerBase
@dataclass
class DataCollatorForLanguageModeling(DataCollatorMixin):
"""
Data collator used for language modeling. Inputs are dynamically padded to the maximum length of a batch if they
are not all of the same length.
Args:
tokenizer ([`PreTrainedTokenizer`] or [`PreTrainedTokenizerFast`]):
The tokenizer used for encoding the data.
mlm (`bool`, *optional*, defaults to `True`):
Whether or not to use masked language modeling. If set to `False`, the labels are the same as the inputs
with the padding tokens ignored (by setting them to -100). Otherwise, the labels are -100 for non-masked
tokens and the value to predict for the masked token.
mlm_probability (`float`, *optional*, defaults to 0.15):
The probability with which to (randomly) mask tokens in the input, when `mlm` is set to `True`.
mask_replace_prob (`float`, *optional*, defaults to 0.8):
The probability with which masked tokens are replaced by the tokenizer's mask token (e.g., `[MASK]`).
Defaults to 0.8, meaning 80% of the masked tokens will be replaced with `[MASK]`.
Only works when `mlm` is set to `True`.
random_replace_prob (`float`, *optional*, defaults to 0.1):
The probability with which masked tokens are replaced by random tokens from the tokenizer's vocabulary.
Defaults to 0.1, meaning 10% of the masked tokens will be replaced with random tokens. The remaining
masked tokens (1 - mask_replace_prob - random_replace_prob) are left unchanged.
Only works when `mlm` is set to `True`.
pad_to_multiple_of (`int`, *optional*):
If set, will pad the sequence to a multiple of the provided value.
return_tensors (`str`):
The type of Tensor to return. Allowable values are "np", or "pt".
seed (`int`, *optional*):
The seed to use for the random number generator for masking. If not provided, the global RNG will be used.
<Tip>
For best performance, this data collator should be used with a dataset having items that are dictionaries or
BatchEncoding, with the `"special_tokens_mask"` key, as returned by a [`PreTrainedTokenizer`] or a
[`PreTrainedTokenizerFast`] with the argument `return_special_tokens_mask=True`.
<Example Options and Expectations>
1. Default Behavior:
- `mask_replace_prob=0.8`, `random_replace_prob=0.1`.
- Expect 80% of masked tokens replaced with `[MASK]`, 10% replaced with random tokens, and 10% left unchanged.
2. All masked tokens replaced by `[MASK]`:
- `mask_replace_prob=1.0`, `random_replace_prob=0.0`.
- Expect all masked tokens to be replaced with `[MASK]`. No tokens are left unchanged or replaced with random tokens.
3. No `[MASK]` replacement, only random tokens:
- `mask_replace_prob=0.0`, `random_replace_prob=1.0`.
- Expect all masked tokens to be replaced with random tokens. No `[MASK]` replacements or unchanged tokens.
4. Balanced replacement:
- `mask_replace_prob=0.5`, `random_replace_prob=0.4`.
- Expect 50% of masked tokens replaced with `[MASK]`, 40% replaced with random tokens, and 10% left unchanged.
Note:
The sum of `mask_replace_prob` and `random_replace_prob` must not exceed 1. If their sum is less than 1, the
remaining proportion will consist of masked tokens left unchanged.
</Tip>
"""
tokenizer: PreTrainedTokenizerBase
mlm: bool = True
mlm_probability: Optional[float] = 0.15
mask_replace_prob: float = 0.8
random_replace_prob: float = 0.1
pad_to_multiple_of: Optional[int] = None
return_tensors: str = 'pt'
seed: Optional[int] = None
def __post_init__(self):
if self.mlm:
if self.tokenizer.mask_token is None:
raise ValueError('This tokenizer does not have a mask token which is necessary for masked language modeling. You should pass `mlm=False` to train on causal language modeling instead.')
if self.mlm_probability is None or self.mlm_probability < 0 or self.mlm_probability > 1:
raise ValueError('mlm_probability should be between 0 and 1.')
self.mlm_probability = float(self.mlm_probability)
if self.mask_replace_prob + self.random_replace_prob > 1:
raise ValueError('The sum of mask_replace_prob and random_replace_prob should not exceed 1')
if self.mask_replace_prob < 0 or self.mask_replace_prob > 1:
raise ValueError('mask_replace_prob should be between 0 and 1.')
if self.random_replace_prob < 0 or self.random_replace_prob > 1:
raise ValueError('random_replace_prob should be between 0 and 1.')
self.mask_replace_prob = float(self.mask_replace_prob)
self.random_replace_prob = float(self.random_replace_prob)
self.generator = None
def get_generator(self, seed):
if self.return_tensors == 'pt':
import torch
return torch.Generator().manual_seed(seed)
else:
import numpy as np
return np.random.default_rng(seed)
def create_rng(self):
if mp.current_process().name == 'MainProcess':
self.generator = self.get_generator(self.seed)
else:
import torch
worker_info = torch.utils.data.get_worker_info()
if worker_info is None:
error_string = ('Worker process information is not available for seeding the generator. This may be because', 'you are using multiprocessing without using a PyTorch DataLoader. The `seed` parameter can', 'only be used when using multiprocessing with a PyTorch DataLoader. Please either use a', 'single process or use a PyTorch DataLoader with multiple workers.')
raise ValueError(error_string)
self.generator = self.get_generator(self.seed + worker_info.id)
def torch_call(self, examples: list[Union[list[int], Any, dict[str, Any]]]) -> dict[str, Any]:
if self.seed and self.generator is None:
self.create_rng()
if isinstance(examples[0], Mapping):
batch = pad_without_fast_tokenizer_warning(self.tokenizer, examples, return_tensors='pt', pad_to_multiple_of=self.pad_to_multiple_of)
else:
batch = {'input_ids': _torch_collate_batch(examples, self.tokenizer, pad_to_multiple_of=self.pad_to_multiple_of)}
special_tokens_mask = batch.pop('special_tokens_mask', None)
if self.mlm:
batch['input_ids'], batch['labels'] = self.torch_mask_tokens(batch['input_ids'], special_tokens_mask=special_tokens_mask)
else:
labels = batch['input_ids'].clone()
if self.tokenizer.pad_token_id is not None:
labels[labels == self.tokenizer.pad_token_id] = -100
batch['labels'] = labels
return batch
def torch_mask_tokens(self, inputs: Any, special_tokens_mask: Optional[Any]=None) -> tuple[Any, Any]:
"""
Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original.
"""
import torch
labels = inputs.clone()
probability_matrix = torch.full(labels.shape, self.mlm_probability)
if special_tokens_mask is None:
special_tokens_mask = [self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()]
special_tokens_mask = torch.tensor(special_tokens_mask, dtype=torch.bool)
else:
special_tokens_mask = special_tokens_mask.bool()
probability_matrix.masked_fill_(special_tokens_mask, value=0.0)
masked_indices = torch.bernoulli(probability_matrix, generator=self.generator).bool()
labels[~masked_indices] = -100
indices_replaced = torch.bernoulli(torch.full(labels.shape, self.mask_replace_prob), generator=self.generator).bool() & masked_indices
inputs[indices_replaced] = self.tokenizer.convert_tokens_to_ids(self.tokenizer.mask_token)
if self.mask_replace_prob == 1 or self.random_replace_prob == 0:
return (inputs, labels)
remaining_prob = 1 - self.mask_replace_prob
random_replace_prob_scaled = self.random_replace_prob / remaining_prob
indices_random = torch.bernoulli(torch.full(labels.shape, random_replace_prob_scaled), generator=self.generator).bool() & masked_indices & ~indices_replaced
random_words = torch.randint(len(self.tokenizer), labels.shape, dtype=torch.long, generator=self.generator)
inputs[indices_random] = random_words[indices_random]
return (inputs, labels)
def numpy_call(self, examples: list[Union[list[int], Any, dict[str, Any]]]) -> dict[str, Any]:
if self.seed and self.generator is None:
self.create_rng()
if isinstance(examples[0], Mapping):
batch = pad_without_fast_tokenizer_warning(self.tokenizer, examples, return_tensors='np', pad_to_multiple_of=self.pad_to_multiple_of)
else:
batch = {'input_ids': _numpy_collate_batch(examples, self.tokenizer, pad_to_multiple_of=self.pad_to_multiple_of)}
special_tokens_mask = batch.pop('special_tokens_mask', None)
if self.mlm:
batch['input_ids'], batch['labels'] = self.numpy_mask_tokens(batch['input_ids'], special_tokens_mask=special_tokens_mask)
else:
labels = np.copy(batch['input_ids'])
if self.tokenizer.pad_token_id is not None:
labels[labels == self.tokenizer.pad_token_id] = -100
batch['labels'] = labels
return batch
def numpy_mask_tokens(self, inputs: Any, special_tokens_mask: Optional[Any]=None) -> tuple[Any, Any]:
"""
Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original.
"""
labels = np.copy(inputs)
probability_matrix = np.full(labels.shape, self.mlm_probability)
if special_tokens_mask is None:
special_tokens_mask = [self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()]
special_tokens_mask = np.array(special_tokens_mask, dtype=bool)
else:
special_tokens_mask = special_tokens_mask.astype(bool)
probability_matrix[special_tokens_mask] = 0
if self.generator:
masked_indices = self.generator.binomial(1, probability_matrix, size=probability_matrix.shape).astype(bool)
else:
masked_indices = np.random.binomial(1, probability_matrix, size=probability_matrix.shape).astype(bool)
labels[~masked_indices] = -100
if self.generator:
indices_replaced = self.generator.binomial(1, self.mask_replace_prob, size=labels.shape).astype(bool) & masked_indices
else:
indices_replaced = np.random.binomial(1, self.mask_replace_prob, size=labels.shape).astype(bool) & masked_indices
inputs[indices_replaced] = self.tokenizer.mask_token_id
if self.mask_replace_prob == 1 or self.random_replace_prob == 0:
return (inputs, labels)
remaining_prob = 1 - self.mask_replace_prob
random_replace_prob_scaled = self.random_replace_prob / remaining_prob
if self.generator:
indices_random = self.generator.binomial(1, random_replace_prob_scaled, size=labels.shape).astype(bool) & masked_indices & ~indices_replaced
random_words = self.generator.integers(low=0, high=len(self.tokenizer), size=np.count_nonzero(indices_random), dtype=np.int64)
else:
indices_random = np.random.binomial(1, random_replace_prob_scaled, size=labels.shape).astype(bool) & masked_indices & ~indices_replaced
random_words = np.random.randint(low=0, high=len(self.tokenizer), size=np.count_nonzero(indices_random), dtype=np.int64)
inputs[indices_random] = random_words
return (inputs, labels)
|
@dataclass
class DataCollatorForLanguageModeling(DataCollatorMixin):
'''
Data collator used for language modeling. Inputs are dynamically padded to the maximum length of a batch if they
are not all of the same length.
Args:
tokenizer ([`PreTrainedTokenizer`] or [`PreTrainedTokenizerFast`]):
The tokenizer used for encoding the data.
mlm (`bool`, *optional*, defaults to `True`):
Whether or not to use masked language modeling. If set to `False`, the labels are the same as the inputs
with the padding tokens ignored (by setting them to -100). Otherwise, the labels are -100 for non-masked
tokens and the value to predict for the masked token.
mlm_probability (`float`, *optional*, defaults to 0.15):
The probability with which to (randomly) mask tokens in the input, when `mlm` is set to `True`.
mask_replace_prob (`float`, *optional*, defaults to 0.8):
The probability with which masked tokens are replaced by the tokenizer's mask token (e.g., `[MASK]`).
Defaults to 0.8, meaning 80% of the masked tokens will be replaced with `[MASK]`.
Only works when `mlm` is set to `True`.
random_replace_prob (`float`, *optional*, defaults to 0.1):
The probability with which masked tokens are replaced by random tokens from the tokenizer's vocabulary.
Defaults to 0.1, meaning 10% of the masked tokens will be replaced with random tokens. The remaining
masked tokens (1 - mask_replace_prob - random_replace_prob) are left unchanged.
Only works when `mlm` is set to `True`.
pad_to_multiple_of (`int`, *optional*):
If set, will pad the sequence to a multiple of the provided value.
return_tensors (`str`):
The type of Tensor to return. Allowable values are "np", or "pt".
seed (`int`, *optional*):
The seed to use for the random number generator for masking. If not provided, the global RNG will be used.
<Tip>
For best performance, this data collator should be used with a dataset having items that are dictionaries or
BatchEncoding, with the `"special_tokens_mask"` key, as returned by a [`PreTrainedTokenizer`] or a
[`PreTrainedTokenizerFast`] with the argument `return_special_tokens_mask=True`.
<Example Options and Expectations>
1. Default Behavior:
- `mask_replace_prob=0.8`, `random_replace_prob=0.1`.
- Expect 80% of masked tokens replaced with `[MASK]`, 10% replaced with random tokens, and 10% left unchanged.
2. All masked tokens replaced by `[MASK]`:
- `mask_replace_prob=1.0`, `random_replace_prob=0.0`.
- Expect all masked tokens to be replaced with `[MASK]`. No tokens are left unchanged or replaced with random tokens.
3. No `[MASK]` replacement, only random tokens:
- `mask_replace_prob=0.0`, `random_replace_prob=1.0`.
- Expect all masked tokens to be replaced with random tokens. No `[MASK]` replacements or unchanged tokens.
4. Balanced replacement:
- `mask_replace_prob=0.5`, `random_replace_prob=0.4`.
- Expect 50% of masked tokens replaced with `[MASK]`, 40% replaced with random tokens, and 10% left unchanged.
Note:
The sum of `mask_replace_prob` and `random_replace_prob` must not exceed 1. If their sum is less than 1, the
remaining proportion will consist of masked tokens left unchanged.
</Tip>
'''
def __post_init__(self):
pass
def get_generator(self, seed):
pass
def create_rng(self):
pass
def torch_call(self, examples: list[Union[list[int], Any, dict[str, Any]]]) -> dict[str, Any]:
pass
def torch_mask_tokens(self, inputs: Any, special_tokens_mask: Optional[Any]=None) -> tuple[Any, Any]:
'''
Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original.
'''
pass
def numpy_call(self, examples: list[Union[list[int], Any, dict[str, Any]]]) -> dict[str, Any]:
pass
def numpy_mask_tokens(self, inputs: Any, special_tokens_mask: Optional[Any]=None) -> tuple[Any, Any]:
'''
Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original.
'''
pass
| 9
| 3
| 30
| 3
| 22
| 5
| 4
| 0.49
| 1
| 6
| 0
| 2
| 7
| 0
| 8
| 9
| 317
| 46
| 184
| 58
| 167
| 90
| 126
| 55
| 112
| 7
| 1
| 2
| 29
|
252
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/data/data_collator.py
|
transformers.data.data_collator.DataCollatorForPermutationLanguageModeling
|
from collections.abc import Mapping
from ..tokenization_utils_base import PreTrainedTokenizerBase
from typing import Any, Callable, NewType, Optional, Union
from random import randint
import numpy as np
from dataclasses import dataclass
@dataclass
class DataCollatorForPermutationLanguageModeling(DataCollatorMixin):
"""
Data collator used for permutation language modeling.
- collates batches of tensors, honoring their tokenizer's pad_token
- preprocesses batches for permutation language modeling with procedures specific to XLNet
"""
tokenizer: PreTrainedTokenizerBase
plm_probability: float = 1 / 6
max_span_length: int = 5
return_tensors: str = 'pt'
def torch_call(self, examples: list[Union[list[int], Any, dict[str, Any]]]) -> dict[str, Any]:
if isinstance(examples[0], Mapping):
examples = [e['input_ids'] for e in examples]
batch = _torch_collate_batch(examples, self.tokenizer)
inputs, perm_mask, target_mapping, labels = self.torch_mask_tokens(batch)
return {'input_ids': inputs, 'perm_mask': perm_mask, 'target_mapping': target_mapping, 'labels': labels}
def numpy_call(self, examples: list[Union[list[int], Any, dict[str, Any]]]) -> dict[str, Any]:
if isinstance(examples[0], Mapping):
examples = [e['input_ids'] for e in examples]
batch = _numpy_collate_batch(examples, self.tokenizer)
inputs, perm_mask, target_mapping, labels = self.numpy_mask_tokens(batch)
return {'input_ids': inputs, 'perm_mask': perm_mask, 'target_mapping': target_mapping, 'labels': labels}
def torch_mask_tokens(self, inputs: Any) -> tuple[Any, Any, Any, Any]:
"""
The masked tokens to be predicted for a particular sequence are determined by the following algorithm:
0. Start from the beginning of the sequence by setting `cur_len = 0` (number of tokens processed so far).
1. Sample a `span_length` from the interval `[1, max_span_length]` (length of span of tokens to be masked)
2. Reserve a context of length `context_length = span_length / plm_probability` to surround span to be
masked
3. Sample a starting point `start_index` from the interval `[cur_len, cur_len + context_length -
span_length]` and mask tokens `start_index:start_index + span_length`
4. Set `cur_len = cur_len + context_length`. If `cur_len < max_len` (i.e. there are tokens remaining in the
sequence to be processed), repeat from Step 1.
"""
import torch
if self.tokenizer.mask_token is None:
raise ValueError('This tokenizer does not have a mask token which is necessary for permutation language modeling. Please add a mask token if you want to use this tokenizer.')
if inputs.size(1) % 2 != 0:
raise ValueError('This collator requires that sequence lengths be even to create a leakage-free perm_mask. Please see relevant comments in source code for details.')
labels = inputs.clone()
masked_indices = torch.full(labels.shape, 0, dtype=torch.bool)
target_mapping = torch.zeros((labels.size(0), labels.size(1), labels.size(1)), dtype=torch.float32)
for i in range(labels.size(0)):
cur_len = 0
max_len = labels.size(1)
while cur_len < max_len:
span_length = torch.randint(1, self.max_span_length + 1, (1,)).item()
context_length = int(span_length / self.plm_probability)
start_index = cur_len + torch.randint(context_length - span_length + 1, (1,)).item()
masked_indices[i, start_index:start_index + span_length] = 1
cur_len += context_length
target_mapping[i] = torch.eye(labels.size(1))
special_tokens_mask = torch.tensor([self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()], dtype=torch.bool)
masked_indices.masked_fill_(special_tokens_mask, value=0.0)
if self.tokenizer.pad_token is not None:
padding_mask = labels.eq(self.tokenizer.pad_token_id)
masked_indices.masked_fill_(padding_mask, value=0.0)
non_func_mask = ~(padding_mask | special_tokens_mask)
inputs[masked_indices] = self.tokenizer.mask_token_id
labels[~masked_indices] = -100
perm_mask = torch.zeros((labels.size(0), labels.size(1), labels.size(1)), dtype=torch.float32)
for i in range(labels.size(0)):
perm_index = torch.arange(labels.size(1))
perm_index = perm_index.reshape((-1, labels.size(1) // 2)).transpose(0, 1)
perm_index = perm_index[torch.randperm(labels.size(1) // 2)]
perm_index = torch.flatten(perm_index.transpose(0, 1))
perm_index.masked_fill_(~masked_indices[i] & non_func_mask[i], -1)
perm_mask[i] = (perm_index.reshape((labels.size(1), 1)) <= perm_index.reshape((1, labels.size(1)))) & masked_indices[i]
return (inputs.long(), perm_mask, target_mapping, labels.long())
def numpy_mask_tokens(self, inputs: Any) -> tuple[Any, Any, Any, Any]:
"""
The masked tokens to be predicted for a particular sequence are determined by the following algorithm:
0. Start from the beginning of the sequence by setting `cur_len = 0` (number of tokens processed so far).
1. Sample a `span_length` from the interval `[1, max_span_length]` (length of span of tokens to be masked)
2. Reserve a context of length `context_length = span_length / plm_probability` to surround span to be
masked
3. Sample a starting point `start_index` from the interval `[cur_len, cur_len + context_length -
span_length]` and mask tokens `start_index:start_index + span_length`
4. Set `cur_len = cur_len + context_length`. If `cur_len < max_len` (i.e. there are tokens remaining in the
sequence to be processed), repeat from Step 1.
"""
if self.tokenizer.mask_token is None:
raise ValueError('This tokenizer does not have a mask token which is necessary for permutation language modeling. Please add a mask token if you want to use this tokenizer.')
if inputs.shape[1] % 2 != 0:
raise ValueError('This collator requires that sequence lengths be even to create a leakage-free perm_mask. Please see relevant comments in source code for details.')
labels = np.copy(inputs)
masked_indices = np.full(labels.shape, 0, dtype=bool)
target_mapping = np.zeros((labels.shape[0], labels.shape[1], labels.shape[1]), dtype=np.float32)
for i in range(labels.shape[0]):
cur_len = 0
max_len = labels.shape[1]
while cur_len < max_len:
span_length = randint(1, self.max_span_length + 1)
context_length = int(span_length / self.plm_probability)
start_index = cur_len + randint(0, context_length - span_length + 1)
masked_indices[i, start_index:start_index + span_length] = 1
cur_len += context_length
target_mapping[i] = np.eye(labels.shape[1])
special_tokens_mask = np.array([self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()], dtype=bool)
masked_indices[special_tokens_mask] = 0
if self.tokenizer.pad_token is not None:
padding_mask = labels == self.tokenizer.pad_token_id
masked_indices[padding_mask] = 0.0
non_func_mask = ~(padding_mask | special_tokens_mask)
inputs[masked_indices] = self.tokenizer.mask_token_id
labels[~masked_indices] = -100
perm_mask = np.zeros((labels.shape[0], labels.shape[1], labels.shape[1]), dtype=np.float32)
for i in range(labels.shape[0]):
perm_index = np.arange(labels.shape[1])
perm_index = perm_index.reshape((-1, labels.shape[1] // 2)).T
np.random.shuffle(perm_index)
perm_index = perm_index.T.flatten()
perm_index[~masked_indices[i] & non_func_mask[i]] = -1
perm_mask[i] = (perm_index.reshape((labels.shape[1], 1)) <= perm_index.reshape((1, labels.shape[1]))) & masked_indices[i]
return (inputs.astype(np.int64), perm_mask, target_mapping, labels.astype(np.int64))
|
@dataclass
class DataCollatorForPermutationLanguageModeling(DataCollatorMixin):
'''
Data collator used for permutation language modeling.
- collates batches of tensors, honoring their tokenizer's pad_token
- preprocesses batches for permutation language modeling with procedures specific to XLNet
'''
def torch_call(self, examples: list[Union[list[int], Any, dict[str, Any]]]) -> dict[str, Any]:
pass
def numpy_call(self, examples: list[Union[list[int], Any, dict[str, Any]]]) -> dict[str, Any]:
pass
def torch_mask_tokens(self, inputs: Any) -> tuple[Any, Any, Any, Any]:
'''
The masked tokens to be predicted for a particular sequence are determined by the following algorithm:
0. Start from the beginning of the sequence by setting `cur_len = 0` (number of tokens processed so far).
1. Sample a `span_length` from the interval `[1, max_span_length]` (length of span of tokens to be masked)
2. Reserve a context of length `context_length = span_length / plm_probability` to surround span to be
masked
3. Sample a starting point `start_index` from the interval `[cur_len, cur_len + context_length -
span_length]` and mask tokens `start_index:start_index + span_length`
4. Set `cur_len = cur_len + context_length`. If `cur_len < max_len` (i.e. there are tokens remaining in the
sequence to be processed), repeat from Step 1.
'''
pass
def numpy_mask_tokens(self, inputs: Any) -> tuple[Any, Any, Any, Any]:
'''
The masked tokens to be predicted for a particular sequence are determined by the following algorithm:
0. Start from the beginning of the sequence by setting `cur_len = 0` (number of tokens processed so far).
1. Sample a `span_length` from the interval `[1, max_span_length]` (length of span of tokens to be masked)
2. Reserve a context of length `context_length = span_length / plm_probability` to surround span to be
masked
3. Sample a starting point `start_index` from the interval `[cur_len, cur_len + context_length -
span_length]` and mask tokens `start_index:start_index + span_length`
4. Set `cur_len = cur_len + context_length`. If `cur_len < max_len` (i.e. there are tokens remaining in the
sequence to be processed), repeat from Step 1.
'''
pass
| 6
| 3
| 53
| 7
| 28
| 19
| 5
| 0.71
| 1
| 7
| 0
| 0
| 6
| 0
| 6
| 7
| 336
| 48
| 171
| 61
| 162
| 122
| 135
| 61
| 126
| 7
| 1
| 2
| 27
|
253
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/data/data_collator.py
|
transformers.data.data_collator.DataCollatorForSOP
|
import warnings
from dataclasses import dataclass
from typing import Any, Callable, NewType, Optional, Union
@dataclass
class DataCollatorForSOP(DataCollatorForLanguageModeling):
"""
Data collator used for sentence order prediction task.
- collates batches of tensors, honoring their tokenizer's pad_token
- preprocesses batches for both masked language modeling and sentence order prediction
"""
def __init__(self, *args, **kwargs):
warnings.warn('DataCollatorForSOP is deprecated and will be removed in a future version, you can now use DataCollatorForLanguageModeling instead.', FutureWarning)
def __call__(self, examples: list[dict[str, Any]]) -> dict[str, Any]:
import torch
from torch.nn.utils.rnn import pad_sequence
input_ids = [example['input_ids'] for example in examples]
input_ids = _torch_collate_batch(input_ids, self.tokenizer)
input_ids, labels, attention_mask = self.mask_tokens(input_ids)
token_type_ids = [example['token_type_ids'] for example in examples]
token_type_ids = pad_sequence(token_type_ids, batch_first=True, padding_value=self.tokenizer.pad_token_id)
sop_label_list = [example['sentence_order_label'] for example in examples]
sentence_order_label = torch.stack(sop_label_list)
return {'input_ids': input_ids, 'labels': labels, 'attention_mask': attention_mask, 'token_type_ids': token_type_ids, 'sentence_order_label': sentence_order_label}
def mask_tokens(self, inputs: Any) -> tuple[Any, Any, Any]:
"""
Prepare masked tokens inputs/labels/attention_mask for masked language modeling: 80% MASK, 10% random, 10%
original. N-gram not applied yet.
"""
import torch
if self.tokenizer.mask_token is None:
raise ValueError('This tokenizer does not have a mask token which is necessary for masked language modeling. Remove the --mlm flag if you want to use this tokenizer.')
labels = inputs.clone()
probability_matrix = torch.full(labels.shape, self.mlm_probability)
special_tokens_mask = [self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()]
probability_matrix.masked_fill_(torch.tensor(special_tokens_mask, dtype=torch.bool), value=0.0)
if self.tokenizer.pad_token is not None:
padding_mask = labels.eq(self.tokenizer.pad_token_id)
probability_matrix.masked_fill_(padding_mask, value=0.0)
masked_indices = torch.bernoulli(probability_matrix).bool()
attention_mask = (~masked_indices).float()
if self.tokenizer.pad_token is not None:
attention_padding_mask = labels.eq(self.tokenizer.pad_token_id)
attention_mask.masked_fill_(attention_padding_mask, value=1.0)
labels[~masked_indices] = -100
indices_replaced = torch.bernoulli(torch.full(labels.shape, 0.8)).bool() & masked_indices
inputs[indices_replaced] = self.tokenizer.convert_tokens_to_ids(self.tokenizer.mask_token)
indices_random = torch.bernoulli(torch.full(labels.shape, 0.5)).bool() & masked_indices & ~indices_replaced
random_words = torch.randint(len(self.tokenizer), labels.shape, dtype=torch.long)
inputs[indices_random] = random_words[indices_random]
return (inputs, labels, attention_mask)
|
@dataclass
class DataCollatorForSOP(DataCollatorForLanguageModeling):
'''
Data collator used for sentence order prediction task.
- collates batches of tensors, honoring their tokenizer's pad_token
- preprocesses batches for both masked language modeling and sentence order prediction
'''
def __init__(self, *args, **kwargs):
pass
def __call__(self, examples: list[dict[str, Any]]) -> dict[str, Any]:
pass
def mask_tokens(self, inputs: Any) -> tuple[Any, Any, Any]:
'''
Prepare masked tokens inputs/labels/attention_mask for masked language modeling: 80% MASK, 10% random, 10%
original. N-gram not applied yet.
'''
pass
| 5
| 2
| 23
| 3
| 17
| 4
| 2
| 0.31
| 1
| 5
| 0
| 0
| 3
| 1
| 3
| 12
| 80
| 13
| 52
| 22
| 45
| 16
| 37
| 22
| 30
| 4
| 2
| 1
| 6
|
254
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/data/data_collator.py
|
transformers.data.data_collator.DataCollatorForSeq2Seq
|
from typing import Any, Callable, NewType, Optional, Union
from ..tokenization_utils_base import PreTrainedTokenizerBase
from dataclasses import dataclass
import numpy as np
from ..utils import PaddingStrategy
@dataclass
class DataCollatorForSeq2Seq:
"""
Data collator that will dynamically pad the inputs received, as well as the labels.
Args:
tokenizer ([`PreTrainedTokenizer`] or [`PreTrainedTokenizerFast`]):
The tokenizer used for encoding the data.
model ([`PreTrainedModel`], *optional*):
The model that is being trained. If set and has the *prepare_decoder_input_ids_from_labels*, use it to
prepare the *decoder_input_ids*
This is useful when using *label_smoothing* to avoid calculating loss twice.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`):
Select a strategy to pad the returned sequences (according to the model's padding side and padding index)
among:
- `True` or `'longest'` (default): Pad to the longest sequence in the batch (or no padding if only a single
sequence is provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'`: No padding (i.e., can output a batch with sequences of different lengths).
max_length (`int`, *optional*):
Maximum length of the returned list and optionally padding length (see above).
pad_to_multiple_of (`int`, *optional*):
If set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=
7.0 (Volta).
label_pad_token_id (`int`, *optional*, defaults to -100):
The id to use when padding the labels (-100 will be automatically ignored by PyTorch loss functions).
return_tensors (`str`, *optional*, defaults to `"pt"`):
The type of Tensor to return. Allowable values are "np", or "pt".
"""
tokenizer: PreTrainedTokenizerBase
model: Optional[Any] = None
padding: Union[bool, str, PaddingStrategy] = True
max_length: Optional[int] = None
pad_to_multiple_of: Optional[int] = None
label_pad_token_id: int = -100
return_tensors: str = 'pt'
def __call__(self, features, return_tensors=None):
if return_tensors is None:
return_tensors = self.return_tensors
label_name = 'label' if 'label' in features[0] else 'labels'
labels = [feature[label_name] for feature in features] if label_name in features[0] else None
if labels is not None and all((label is None for label in labels)):
labels = None
non_labels_features = [{k: v for k, v in feature.items() if k != label_name} for feature in features]
batch = pad_without_fast_tokenizer_warning(self.tokenizer, non_labels_features, padding=self.padding, max_length=self.max_length, pad_to_multiple_of=self.pad_to_multiple_of, return_tensors=return_tensors)
no_padding = self.padding is False or self.padding == PaddingStrategy.DO_NOT_PAD
if labels is not None:
if no_padding:
if isinstance(features[0][label_name], list):
batch['labels'] = list(labels)
else:
batch['labels'] = [np.concatenate([label, []]) for label in labels]
else:
max_padding = self.padding == PaddingStrategy.MAX_LENGTH and self.max_length is not None
max_label_length = max((len(l) for l in labels)) if not max_padding else self.max_length
if self.pad_to_multiple_of is not None:
max_label_length = (max_label_length + self.pad_to_multiple_of - 1) // self.pad_to_multiple_of * self.pad_to_multiple_of
padding_side = self.tokenizer.padding_side
if isinstance(features[0][label_name], list):
batch['labels'] = [label + [self.label_pad_token_id] * (max_label_length - len(label)) if padding_side == 'right' else [self.label_pad_token_id] * (max_label_length - len(label)) + label for label in labels]
else:
batch['labels'] = [np.concatenate([label, np.array([self.label_pad_token_id] * (max_label_length - len(label)), dtype=np.int64)]) if padding_side == 'right' else np.concatenate([np.array([self.label_pad_token_id] * (max_label_length - len(label)), dtype=np.int64), label]) for label in labels]
if batch.get('labels', None) is not None:
if return_tensors == 'pt':
import torch
batch['labels'] = torch.tensor(batch['labels'], dtype=torch.int64)
else:
batch['labels'] = np.array(batch['labels'], dtype=np.int64)
else:
batch['labels'] = None
if labels is not None and self.model is not None and hasattr(self.model, 'prepare_decoder_input_ids_from_labels'):
decoder_input_ids = self.model.prepare_decoder_input_ids_from_labels(labels=batch['labels'])
batch['decoder_input_ids'] = decoder_input_ids
return batch
|
@dataclass
class DataCollatorForSeq2Seq:
'''
Data collator that will dynamically pad the inputs received, as well as the labels.
Args:
tokenizer ([`PreTrainedTokenizer`] or [`PreTrainedTokenizerFast`]):
The tokenizer used for encoding the data.
model ([`PreTrainedModel`], *optional*):
The model that is being trained. If set and has the *prepare_decoder_input_ids_from_labels*, use it to
prepare the *decoder_input_ids*
This is useful when using *label_smoothing* to avoid calculating loss twice.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`):
Select a strategy to pad the returned sequences (according to the model's padding side and padding index)
among:
- `True` or `'longest'` (default): Pad to the longest sequence in the batch (or no padding if only a single
sequence is provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'`: No padding (i.e., can output a batch with sequences of different lengths).
max_length (`int`, *optional*):
Maximum length of the returned list and optionally padding length (see above).
pad_to_multiple_of (`int`, *optional*):
If set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=
7.0 (Volta).
label_pad_token_id (`int`, *optional*, defaults to -100):
The id to use when padding the labels (-100 will be automatically ignored by PyTorch loss functions).
return_tensors (`str`, *optional*, defaults to `"pt"`):
The type of Tensor to return. Allowable values are "np", or "pt".
'''
def __call__(self, features, return_tensors=None):
pass
| 3
| 1
| 91
| 9
| 76
| 6
| 17
| 0.4
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 1
| 133
| 15
| 84
| 19
| 80
| 34
| 43
| 19
| 39
| 17
| 0
| 3
| 17
|
255
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/data/data_collator.py
|
transformers.data.data_collator.DataCollatorForTokenClassification
|
from dataclasses import dataclass
import numpy as np
from typing import Any, Callable, NewType, Optional, Union
from ..tokenization_utils_base import PreTrainedTokenizerBase
from ..utils import PaddingStrategy
@dataclass
class DataCollatorForTokenClassification(DataCollatorMixin):
"""
Data collator that will dynamically pad the inputs received, as well as the labels.
Args:
tokenizer ([`PreTrainedTokenizer`] or [`PreTrainedTokenizerFast`]):
The tokenizer used for encoding the data.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`):
Select a strategy to pad the returned sequences (according to the model's padding side and padding index)
among:
- `True` or `'longest'` (default): Pad to the longest sequence in the batch (or no padding if only a single
sequence is provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'`: No padding (i.e., can output a batch with sequences of different lengths).
max_length (`int`, *optional*):
Maximum length of the returned list and optionally padding length (see above).
pad_to_multiple_of (`int`, *optional*):
If set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=
7.0 (Volta).
label_pad_token_id (`int`, *optional*, defaults to -100):
The id to use when padding the labels (-100 will be automatically ignore by PyTorch loss functions).
return_tensors (`str`, *optional*, defaults to `"pt"`):
The type of Tensor to return. Allowable values are "np", or "pt".
"""
tokenizer: PreTrainedTokenizerBase
padding: Union[bool, str, PaddingStrategy] = True
max_length: Optional[int] = None
pad_to_multiple_of: Optional[int] = None
label_pad_token_id: int = -100
return_tensors: str = 'pt'
def torch_call(self, features):
import torch
label_name = 'label' if 'label' in features[0] else 'labels'
labels = [feature[label_name] for feature in features] if label_name in features[0] else None
no_labels_features = [{k: v for k, v in feature.items() if k != label_name} for feature in features]
batch = pad_without_fast_tokenizer_warning(self.tokenizer, no_labels_features, padding=self.padding, max_length=self.max_length, pad_to_multiple_of=self.pad_to_multiple_of, return_tensors='pt')
if labels is None:
return batch
sequence_length = batch['input_ids'].shape[1]
padding_side = self.tokenizer.padding_side
def to_list(tensor_or_iterable):
if isinstance(tensor_or_iterable, torch.Tensor):
return tensor_or_iterable.tolist()
return list(tensor_or_iterable)
if padding_side == 'right':
batch[label_name] = [to_list(label) + [self.label_pad_token_id] * (sequence_length - len(label)) for label in labels]
else:
batch[label_name] = [[self.label_pad_token_id] * (sequence_length - len(label)) + to_list(label) for label in labels]
batch[label_name] = torch.tensor(batch[label_name], dtype=torch.int64)
return batch
def numpy_call(self, features):
label_name = 'label' if 'label' in features[0] else 'labels'
labels = [feature[label_name] for feature in features] if label_name in features[0] else None
batch = pad_without_fast_tokenizer_warning(self.tokenizer, features, padding=self.padding, max_length=self.max_length, pad_to_multiple_of=self.pad_to_multiple_of, return_tensors='np' if labels is None else None)
if labels is None:
return batch
sequence_length = np.array(batch['input_ids']).shape[1]
padding_side = self.tokenizer.padding_side
if padding_side == 'right':
batch['labels'] = [list(label) + [self.label_pad_token_id] * (sequence_length - len(label)) for label in labels]
else:
batch['labels'] = [[self.label_pad_token_id] * (sequence_length - len(label)) + list(label) for label in labels]
batch = {k: np.array(v, dtype=np.int64) for k, v in batch.items()}
return batch
|
@dataclass
class DataCollatorForTokenClassification(DataCollatorMixin):
'''
Data collator that will dynamically pad the inputs received, as well as the labels.
Args:
tokenizer ([`PreTrainedTokenizer`] or [`PreTrainedTokenizerFast`]):
The tokenizer used for encoding the data.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`):
Select a strategy to pad the returned sequences (according to the model's padding side and padding index)
among:
- `True` or `'longest'` (default): Pad to the longest sequence in the batch (or no padding if only a single
sequence is provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'`: No padding (i.e., can output a batch with sequences of different lengths).
max_length (`int`, *optional*):
Maximum length of the returned list and optionally padding length (see above).
pad_to_multiple_of (`int`, *optional*):
If set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=
7.0 (Volta).
label_pad_token_id (`int`, *optional*, defaults to -100):
The id to use when padding the labels (-100 will be automatically ignore by PyTorch loss functions).
return_tensors (`str`, *optional*, defaults to `"pt"`):
The type of Tensor to return. Allowable values are "np", or "pt".
'''
def torch_call(self, features):
pass
def to_list(tensor_or_iterable):
pass
def numpy_call(self, features):
pass
| 5
| 1
| 26
| 4
| 22
| 1
| 5
| 0.29
| 1
| 2
| 0
| 0
| 3
| 0
| 3
| 4
| 137
| 22
| 89
| 28
| 82
| 26
| 53
| 28
| 46
| 6
| 1
| 1
| 19
|
256
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/data/data_collator.py
|
transformers.data.data_collator.DataCollatorForWholeWordMask
|
from collections.abc import Mapping
import warnings
import random
from typing import Any, Callable, NewType, Optional, Union
from ..models.bert import BertTokenizer, BertTokenizerFast
import numpy as np
from dataclasses import dataclass
@dataclass
class DataCollatorForWholeWordMask(DataCollatorForLanguageModeling):
"""
Data collator used for language modeling that masks entire words.
- collates batches of tensors, honoring their tokenizer's pad_token
- preprocesses batches for masked language modeling
<Tip>
This collator relies on details of the implementation of subword tokenization by [`BertTokenizer`], specifically
that subword tokens are prefixed with *##*. For tokenizers that do not adhere to this scheme, this collator will
produce an output that is roughly equivalent to [`.DataCollatorForLanguageModeling`].
</Tip>"""
def torch_call(self, examples: list[Union[list[int], Any, dict[str, Any]]]) -> dict[str, Any]:
if self.seed and self.generator is None:
self.create_rng()
if isinstance(examples[0], Mapping):
input_ids = [e['input_ids'] for e in examples]
else:
input_ids = examples
examples = [{'input_ids': e} for e in examples]
batch_input = _torch_collate_batch(input_ids, self.tokenizer, pad_to_multiple_of=self.pad_to_multiple_of)
mask_labels = []
for e in examples:
ref_tokens = []
for id in tolist(e['input_ids']):
token = self.tokenizer._convert_id_to_token(id)
ref_tokens.append(token)
if 'chinese_ref' in e:
ref_pos = tolist(e['chinese_ref'])
len_seq = len(e['input_ids'])
for i in range(len_seq):
if i in ref_pos:
ref_tokens[i] = '##' + ref_tokens[i]
mask_labels.append(self._whole_word_mask(ref_tokens))
batch_mask = _torch_collate_batch(mask_labels, self.tokenizer, pad_to_multiple_of=self.pad_to_multiple_of)
inputs, labels = self.torch_mask_tokens(batch_input, batch_mask)
return {'input_ids': inputs, 'labels': labels}
def numpy_call(self, examples: list[Union[list[int], Any, dict[str, Any]]]) -> dict[str, Any]:
if self.seed and self.generator is None:
self.create_rng()
if isinstance(examples[0], Mapping):
input_ids = [e['input_ids'] for e in examples]
else:
input_ids = examples
examples = [{'input_ids': e} for e in examples]
batch_input = _numpy_collate_batch(input_ids, self.tokenizer, pad_to_multiple_of=self.pad_to_multiple_of)
mask_labels = []
for e in examples:
ref_tokens = []
for id in tolist(e['input_ids']):
token = self.tokenizer._convert_id_to_token(id)
ref_tokens.append(token)
if 'chinese_ref' in e:
ref_pos = tolist(e['chinese_ref'])
len_seq = len(e['input_ids'])
for i in range(len_seq):
if i in ref_pos:
ref_tokens[i] = '##' + ref_tokens[i]
mask_labels.append(self._whole_word_mask(ref_tokens))
batch_mask = _numpy_collate_batch(mask_labels, self.tokenizer, pad_to_multiple_of=self.pad_to_multiple_of)
inputs, labels = self.numpy_mask_tokens(batch_input, batch_mask)
return {'input_ids': inputs, 'labels': labels}
def _shuffle(self, cand_indexes):
if self.seed is None:
random.shuffle(cand_indexes)
return cand_indexes
if self.return_tensors == 'pt':
import torch
indices = torch.randperm(len(cand_indexes), generator=self.generator)
return [cand_indexes[i] for i in indices]
elif self.return_tensors == 'np':
self.generator.shuffle(cand_indexes)
return cand_indexes
def _whole_word_mask(self, input_tokens: list[str], max_predictions=512):
"""
Get 0/1 labels for masked tokens with whole word mask proxy
"""
if not isinstance(self.tokenizer, (BertTokenizer, BertTokenizerFast)):
warnings.warn('DataCollatorForWholeWordMask is only suitable for BertTokenizer-like tokenizers. Please refer to the documentation for more information.')
cand_indexes = []
for i, token in enumerate(input_tokens):
if token == '[CLS]' or token == '[SEP]':
continue
if len(cand_indexes) >= 1 and token.startswith('##'):
cand_indexes[-1].append(i)
else:
cand_indexes.append([i])
cand_indexes = self._shuffle(cand_indexes)
num_to_predict = min(max_predictions, max(1, int(round(len(input_tokens) * self.mlm_probability))))
masked_lms = []
covered_indexes = set()
for index_set in cand_indexes:
if len(masked_lms) >= num_to_predict:
break
if len(masked_lms) + len(index_set) > num_to_predict:
continue
for index in index_set:
covered_indexes.add(index)
masked_lms.append(index)
if len(covered_indexes) != len(masked_lms):
raise ValueError('Length of covered_indexes is not equal to length of masked_lms.')
mask_labels = [1 if i in covered_indexes else 0 for i in range(len(input_tokens))]
return mask_labels
def torch_mask_tokens(self, inputs: Any, mask_labels: Any) -> tuple[Any, Any]:
"""
Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original. Set
'mask_labels' means we use whole word mask (wwm), we directly mask idxs according to it's ref.
"""
import torch
if self.tokenizer.mask_token is None:
raise ValueError('This tokenizer does not have a mask token which is necessary for masked language modeling. Remove the --mlm flag if you want to use this tokenizer.')
labels = inputs.clone()
probability_matrix = mask_labels
special_tokens_mask = [self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()]
probability_matrix.masked_fill_(torch.tensor(special_tokens_mask, dtype=torch.bool), value=0.0)
if self.tokenizer.pad_token is not None:
padding_mask = labels.eq(self.tokenizer.pad_token_id)
probability_matrix.masked_fill_(padding_mask, value=0.0)
masked_indices = probability_matrix.bool()
labels[~masked_indices] = -100
indices_replaced = torch.bernoulli(torch.full(labels.shape, self.mask_replace_prob), generator=self.generator).bool() & masked_indices
inputs[indices_replaced] = self.tokenizer.convert_tokens_to_ids(self.tokenizer.mask_token)
if self.mask_replace_prob == 1 or self.random_replace_prob == 0:
return (inputs, labels)
remaining_prob = 1 - self.mask_replace_prob
random_replace_prob_scaled = self.random_replace_prob / remaining_prob
indices_random = torch.bernoulli(torch.full(labels.shape, random_replace_prob_scaled), generator=self.generator).bool() & masked_indices & ~indices_replaced
random_words = torch.randint(len(self.tokenizer), labels.shape, dtype=torch.long, generator=self.generator)
inputs[indices_random] = random_words[indices_random]
return (inputs, labels)
def numpy_mask_tokens(self, inputs: Any, mask_labels: Any) -> tuple[Any, Any]:
"""
Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original. Set
'mask_labels' means we use whole word mask (wwm), we directly mask idxs according to it's ref.
"""
if self.tokenizer.mask_token is None:
raise ValueError('This tokenizer does not have a mask token which is necessary for masked language modeling. Remove the --mlm flag if you want to use this tokenizer.')
labels = np.copy(inputs)
masked_indices = mask_labels.astype(bool)
special_tokens_mask = [self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()]
masked_indices[np.array(special_tokens_mask, dtype=bool)] = 0
if self.tokenizer.pad_token is not None:
padding_mask = labels == self.tokenizer.pad_token_id
masked_indices[padding_mask] = 0
labels[~masked_indices] = -100
if self.generator:
indices_replaced = self.generator.binomial(1, self.mask_replace_prob, size=labels.shape).astype(bool) & masked_indices
else:
indices_replaced = np.random.binomial(1, self.mask_replace_prob, size=labels.shape).astype(bool) & masked_indices
inputs[indices_replaced] = self.tokenizer.convert_tokens_to_ids(self.tokenizer.mask_token)
if self.mask_replace_prob == 1 or self.random_replace_prob == 0:
return (inputs, labels)
remaining_prob = 1 - self.mask_replace_prob
random_replace_prob_scaled = self.random_replace_prob / remaining_prob
if self.generator:
indices_random = self.generator.binomial(1, random_replace_prob_scaled, size=labels.shape).astype(bool) & masked_indices & ~indices_replaced
random_words = self.generator.integers(low=0, high=len(self.tokenizer), size=labels.shape, dtype=np.int64)
else:
indices_random = np.random.binomial(1, random_replace_prob_scaled, size=labels.shape).astype(bool) & masked_indices & ~indices_replaced
random_words = np.random.randint(low=0, high=len(self.tokenizer), size=labels.shape, dtype=np.int64)
inputs[indices_random] = random_words[indices_random]
return (inputs, labels)
|
@dataclass
class DataCollatorForWholeWordMask(DataCollatorForLanguageModeling):
'''
Data collator used for language modeling that masks entire words.
- collates batches of tensors, honoring their tokenizer's pad_token
- preprocesses batches for masked language modeling
<Tip>
This collator relies on details of the implementation of subword tokenization by [`BertTokenizer`], specifically
that subword tokens are prefixed with *##*. For tokenizers that do not adhere to this scheme, this collator will
produce an output that is roughly equivalent to [`.DataCollatorForLanguageModeling`].
</Tip>'''
def torch_call(self, examples: list[Union[list[int], Any, dict[str, Any]]]) -> dict[str, Any]:
pass
def numpy_call(self, examples: list[Union[list[int], Any, dict[str, Any]]]) -> dict[str, Any]:
pass
def _shuffle(self, cand_indexes):
pass
def _whole_word_mask(self, input_tokens: list[str], max_predictions=512):
'''
Get 0/1 labels for masked tokens with whole word mask proxy
'''
pass
def torch_mask_tokens(self, inputs: Any, mask_labels: Any) -> tuple[Any, Any]:
'''
Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original. Set
'mask_labels' means we use whole word mask (wwm), we directly mask idxs according to it's ref.
'''
pass
def numpy_mask_tokens(self, inputs: Any, mask_labels: Any) -> tuple[Any, Any]:
'''
Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original. Set
'mask_labels' means we use whole word mask (wwm), we directly mask idxs according to it's ref.
'''
pass
| 8
| 4
| 35
| 5
| 26
| 6
| 6
| 0.27
| 1
| 11
| 2
| 0
| 7
| 1
| 7
| 16
| 269
| 46
| 180
| 76
| 169
| 49
| 156
| 76
| 145
| 14
| 2
| 4
| 44
|
257
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/data/data_collator.py
|
transformers.data.data_collator.DataCollatorMixin
|
class DataCollatorMixin:
def __call__(self, features, return_tensors=None):
if return_tensors is None:
return_tensors = self.return_tensors
if return_tensors == 'pt':
return self.torch_call(features)
elif return_tensors == 'np':
return self.numpy_call(features)
else:
raise ValueError(f"Framework '{return_tensors}' not recognized!")
|
class DataCollatorMixin:
def __call__(self, features, return_tensors=None):
pass
| 2
| 0
| 11
| 0
| 11
| 0
| 5
| 0
| 0
| 1
| 0
| 4
| 1
| 0
| 1
| 1
| 12
| 0
| 12
| 2
| 10
| 0
| 9
| 2
| 7
| 5
| 0
| 1
| 5
|
258
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/data/data_collator.py
|
transformers.data.data_collator.DataCollatorWithFlattening
|
import numpy as np
from dataclasses import dataclass
@dataclass
class DataCollatorWithFlattening(DefaultDataCollator):
"""
Data collator used for padding free approach. Does the following:
- concatenates the entire mini batch into single long sequence of shape [1, total_tokens]
- uses `separator_id` to separate sequences within the concatenated `labels`, default value is -100
- no padding will be added, returns `input_ids`, `labels` and `position_ids` by default
- optionally returns the kwargs contained in FlashAttentionKwargs
- optionally returns seq_idx indicating which sequence each token belongs to
<Tip warning={true}>
Using `DataCollatorWithFlattening` will flatten the entire mini batch into single long sequence.
Make sure your attention computation is able to handle it!
</Tip>
"""
def __init__(self, *args, return_position_ids=True, separator_id=-100, return_flash_attn_kwargs=False, return_seq_idx=False, **kwargs):
super().__init__(*args, **kwargs)
self.return_position_ids = return_position_ids
self.separator_id = separator_id
self.return_flash_attn_kwargs = return_flash_attn_kwargs
self.return_seq_idx = return_seq_idx
self._int_64_keys = {'labels', 'position_ids', 'input_ids'}
self._batch_dim_keys = {'labels', 'position_ids', 'input_ids', 'seq_idx'}
self._py_int_keys = {'max_length_q', 'max_length_k'}
def __call__(self, features, return_tensors=None, separator_id=None):
if return_tensors is None:
return_tensors = self.return_tensors
if separator_id is None:
separator_id = self.separator_id
is_labels_provided = 'labels' in features[0]
batch = {'input_ids': [], 'labels': []}
if self.return_position_ids:
batch.update({'position_ids': []})
if self.return_seq_idx:
batch.update({'seq_idx': []})
if self.return_flash_attn_kwargs:
cu_seq_lens = [0]
max_length = 0
for seq_idx, sample in enumerate(features):
input_ids = sample['input_ids']
batch['input_ids'] += input_ids
if is_labels_provided:
batch['labels'] += [separator_id] + sample['labels'][1:]
else:
batch['labels'] += [separator_id] + input_ids[1:]
if self.return_position_ids:
batch['position_ids'] += list(range(len(input_ids)))
if self.return_seq_idx:
batch['seq_idx'] += [seq_idx for _ in range(len(input_ids))]
if self.return_flash_attn_kwargs:
cu_seq_lens.append(cu_seq_lens[-1] + len(input_ids))
max_length = max(max_length, len(input_ids))
if self.return_flash_attn_kwargs:
batch['cu_seq_lens_q'] = batch['cu_seq_lens_k'] = cu_seq_lens
batch['max_length_q'] = batch['max_length_k'] = max_length
if return_tensors == 'pt':
import torch
data_cls = torch.tensor
dtype_64 = torch.int64
dtype_32 = torch.int32
elif return_tensors == 'np':
data_cls = np.array
dtype_64 = np.int64
dtype_32 = np.int32
else:
raise ValueError(f'return_tensors must be one of ("pt", "np"), return_tensors={return_tensors!r} not supported')
for k, v in batch.items():
if k in self._batch_dim_keys:
v = [v]
if k not in self._py_int_keys:
batch[k] = data_cls(v, dtype=dtype_64 if k in self._int_64_keys else dtype_32)
return batch
|
@dataclass
class DataCollatorWithFlattening(DefaultDataCollator):
'''
Data collator used for padding free approach. Does the following:
- concatenates the entire mini batch into single long sequence of shape [1, total_tokens]
- uses `separator_id` to separate sequences within the concatenated `labels`, default value is -100
- no padding will be added, returns `input_ids`, `labels` and `position_ids` by default
- optionally returns the kwargs contained in FlashAttentionKwargs
- optionally returns seq_idx indicating which sequence each token belongs to
<Tip warning={true}>
Using `DataCollatorWithFlattening` will flatten the entire mini batch into single long sequence.
Make sure your attention computation is able to handle it!
</Tip>
'''
def __init__(self, *args, return_position_ids=True, separator_id=-100, return_flash_attn_kwargs=False, return_seq_idx=False, **kwargs):
pass
def __call__(self, features, return_tensors=None, separator_id=None):
pass
| 4
| 1
| 13
| 0
| 13
| 0
| 4
| 0.22
| 1
| 3
| 0
| 0
| 2
| 2
| 2
| 4
| 36
| 3
| 27
| 8
| 24
| 6
| 23
| 8
| 20
| 7
| 2
| 2
| 8
|
259
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/data/data_collator.py
|
transformers.data.data_collator.DataCollatorWithPadding
|
from ..tokenization_utils_base import PreTrainedTokenizerBase
from ..utils import PaddingStrategy
from dataclasses import dataclass
from typing import Any, Callable, NewType, Optional, Union
@dataclass
class DataCollatorWithPadding:
"""
Data collator that will dynamically pad the inputs received.
Args:
tokenizer ([`PreTrainedTokenizer`] or [`PreTrainedTokenizerFast`]):
The tokenizer used for encoding the data.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`):
Select a strategy to pad the returned sequences (according to the model's padding side and padding index)
among:
- `True` or `'longest'` (default): Pad to the longest sequence in the batch (or no padding if only a single
sequence is provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'`: No padding (i.e., can output a batch with sequences of different lengths).
max_length (`int`, *optional*):
Maximum length of the returned list and optionally padding length (see above).
pad_to_multiple_of (`int`, *optional*):
If set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=
7.0 (Volta).
return_tensors (`str`, *optional*, defaults to `"pt"`):
The type of Tensor to return. Allowable values are "np", or "pt".
"""
tokenizer: PreTrainedTokenizerBase
padding: Union[bool, str, PaddingStrategy] = True
max_length: Optional[int] = None
pad_to_multiple_of: Optional[int] = None
return_tensors: str = 'pt'
def __call__(self, features: list[dict[str, Any]]) -> dict[str, Any]:
batch = pad_without_fast_tokenizer_warning(self.tokenizer, features, padding=self.padding, max_length=self.max_length, pad_to_multiple_of=self.pad_to_multiple_of, return_tensors=self.return_tensors)
if 'label' in batch:
batch['labels'] = batch['label']
del batch['label']
if 'label_ids' in batch:
batch['labels'] = batch['label_ids']
del batch['label_ids']
return batch
|
@dataclass
class DataCollatorWithPadding:
'''
Data collator that will dynamically pad the inputs received.
Args:
tokenizer ([`PreTrainedTokenizer`] or [`PreTrainedTokenizerFast`]):
The tokenizer used for encoding the data.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`):
Select a strategy to pad the returned sequences (according to the model's padding side and padding index)
among:
- `True` or `'longest'` (default): Pad to the longest sequence in the batch (or no padding if only a single
sequence is provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'`: No padding (i.e., can output a batch with sequences of different lengths).
max_length (`int`, *optional*):
Maximum length of the returned list and optionally padding length (see above).
pad_to_multiple_of (`int`, *optional*):
If set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=
7.0 (Volta).
return_tensors (`str`, *optional*, defaults to `"pt"`):
The type of Tensor to return. Allowable values are "np", or "pt".
'''
def __call__(self, features: list[dict[str, Any]]) -> dict[str, Any]:
pass
| 3
| 1
| 16
| 0
| 16
| 0
| 3
| 1
| 0
| 2
| 0
| 0
| 1
| 0
| 1
| 1
| 49
| 5
| 22
| 7
| 20
| 22
| 15
| 7
| 13
| 3
| 0
| 1
| 3
|
260
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/data/data_collator.py
|
transformers.data.data_collator.DefaultDataCollator
|
from typing import Any, Callable, NewType, Optional, Union
from dataclasses import dataclass
@dataclass
class DefaultDataCollator(DataCollatorMixin):
"""
Very simple data collator that simply collates batches of dict-like objects and performs special handling for
potential keys named:
- `label`: handles a single value (int or float) per object
- `label_ids`: handles a list of values per object
Does not do any additional preprocessing: property names of the input object will be used as corresponding inputs
to the model. See glue and ner for example of how it's useful.
This is an object (like other data collators) rather than a pure function like default_data_collator. This can be
helpful if you need to set a return_tensors value at initialization.
Args:
return_tensors (`str`, *optional*, defaults to `"pt"`):
The type of Tensor to return. Allowable values are "np", or "pt".
"""
return_tensors: str = 'pt'
def __call__(self, features: list[dict[str, Any]], return_tensors=None) -> dict[str, Any]:
if return_tensors is None:
return_tensors = self.return_tensors
return default_data_collator(features, return_tensors)
|
@dataclass
class DefaultDataCollator(DataCollatorMixin):
'''
Very simple data collator that simply collates batches of dict-like objects and performs special handling for
potential keys named:
- `label`: handles a single value (int or float) per object
- `label_ids`: handles a list of values per object
Does not do any additional preprocessing: property names of the input object will be used as corresponding inputs
to the model. See glue and ner for example of how it's useful.
This is an object (like other data collators) rather than a pure function like default_data_collator. This can be
helpful if you need to set a return_tensors value at initialization.
Args:
return_tensors (`str`, *optional*, defaults to `"pt"`):
The type of Tensor to return. Allowable values are "np", or "pt".
'''
def __call__(self, features: list[dict[str, Any]], return_tensors=None) -> dict[str, Any]:
pass
| 3
| 1
| 4
| 0
| 4
| 0
| 2
| 2.17
| 1
| 2
| 0
| 1
| 1
| 0
| 1
| 2
| 25
| 6
| 6
| 3
| 4
| 13
| 6
| 3
| 4
| 2
| 1
| 1
| 2
|
261
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/data/datasets/glue.py
|
transformers.data.datasets.glue.GlueDataTrainingArguments
|
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from dataclasses import dataclass, field
@dataclass
class GlueDataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
Using `HfArgumentParser` we can turn this class into argparse arguments to be able to specify them on the command
line.
"""
task_name: str = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(glue_processors.keys())})
data_dir: str = field(metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'})
max_seq_length: int = field(default=128, metadata={'help': 'The maximum total input sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded.'})
overwrite_cache: bool = field(default=False, metadata={'help': 'Overwrite the cached training and evaluation sets'})
def __post_init__(self):
self.task_name = self.task_name.lower()
|
@dataclass
class GlueDataTrainingArguments:
'''
Arguments pertaining to what data we are going to input our model for training and eval.
Using `HfArgumentParser` we can turn this class into argparse arguments to be able to specify them on the command
line.
'''
def __post_init__(self):
pass
| 3
| 1
| 2
| 0
| 2
| 0
| 1
| 0.26
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| 27
| 3
| 19
| 6
| 17
| 5
| 7
| 6
| 5
| 1
| 0
| 0
| 1
|
262
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/data/datasets/glue.py
|
transformers.data.datasets.glue.GlueDataset
|
from filelock import FileLock
from torch.utils.data import Dataset
import warnings
import torch
from ...utils import check_torch_load_is_safe, logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from typing import Optional, Union
from ...tokenization_utils_base import PreTrainedTokenizerBase
import time
import os
from ..processors.utils import InputFeatures
class GlueDataset(Dataset):
args: GlueDataTrainingArguments
output_mode: str
features: list[InputFeatures]
def __init__(self, args: GlueDataTrainingArguments, tokenizer: PreTrainedTokenizerBase, limit_length: Optional[int]=None, mode: Union[str, Split]=Split.train, cache_dir: Optional[str]=None):
warnings.warn('This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets library. You can have a look at this example script for pointers: https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py', FutureWarning)
self.args = args
self.processor = glue_processors[args.task_name]()
self.output_mode = glue_output_modes[args.task_name]
if isinstance(mode, str):
try:
mode = Split[mode]
except KeyError:
raise KeyError('mode is not a valid split name')
cached_features_file = os.path.join(cache_dir if cache_dir is not None else args.data_dir, f'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}')
label_list = self.processor.get_labels()
if args.task_name in ['mnli', 'mnli-mm'] and tokenizer.__class__.__name__ in ('RobertaTokenizer', 'RobertaTokenizerFast', 'XLMRobertaTokenizer', 'BartTokenizer', 'BartTokenizerFast'):
label_list[1], label_list[2] = (label_list[2], label_list[1])
self.label_list = label_list
lock_path = cached_features_file + '.lock'
with FileLock(lock_path):
if os.path.exists(cached_features_file) and (not args.overwrite_cache):
start = time.time()
check_torch_load_is_safe()
self.features = torch.load(cached_features_file, weights_only=True)
logger.info(f'Loading features from cached file {cached_features_file} [took %.3f s]', time.time() - start)
else:
logger.info(f'Creating features from dataset file at {args.data_dir}')
if mode == Split.dev:
examples = self.processor.get_dev_examples(args.data_dir)
elif mode == Split.test:
examples = self.processor.get_test_examples(args.data_dir)
else:
examples = self.processor.get_train_examples(args.data_dir)
if limit_length is not None:
examples = examples[:limit_length]
self.features = glue_convert_examples_to_features(examples, tokenizer, max_length=args.max_seq_length, label_list=label_list, output_mode=self.output_mode)
start = time.time()
torch.save(self.features, cached_features_file)
logger.info(f'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]')
def __len__(self):
return len(self.features)
def __getitem__(self, i) -> InputFeatures:
return self.features[i]
def get_labels(self):
return self.label_list
|
class GlueDataset(Dataset):
def __init__(self, args: GlueDataTrainingArguments, tokenizer: PreTrainedTokenizerBase, limit_length: Optional[int]=None, mode: Union[str, Split]=Split.train, cache_dir: Optional[str]=None):
pass
def __len__(self):
pass
def __getitem__(self, i) -> InputFeatures:
pass
def get_labels(self):
pass
| 5
| 0
| 20
| 1
| 18
| 1
| 3
| 0.11
| 1
| 8
| 4
| 0
| 4
| 2
| 4
| 4
| 91
| 7
| 76
| 19
| 64
| 8
| 42
| 12
| 37
| 9
| 1
| 3
| 12
|
263
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/data/datasets/language_modeling.py
|
transformers.data.datasets.language_modeling.LineByLineTextDataset
|
import os
import warnings
import torch
from ...tokenization_utils import PreTrainedTokenizer
from torch.utils.data import Dataset
class LineByLineTextDataset(Dataset):
def __init__(self, tokenizer: PreTrainedTokenizer, file_path: str, block_size: int):
warnings.warn(DEPRECATION_WARNING.format('https://github.com/huggingface/transformers/blob/main/examples/pytorch/language-modeling/run_mlm.py'), FutureWarning)
if os.path.isfile(file_path) is False:
raise ValueError(f'Input file path {file_path} not found')
logger.info(f'Creating features from dataset file at {file_path}')
with open(file_path, encoding='utf-8') as f:
lines = [line for line in f.read().splitlines() if len(line) > 0 and (not line.isspace())]
batch_encoding = tokenizer(lines, add_special_tokens=True, truncation=True, max_length=block_size)
self.examples = batch_encoding['input_ids']
self.examples = [{'input_ids': torch.tensor(e, dtype=torch.long)} for e in self.examples]
def __len__(self):
return len(self.examples)
def __getitem__(self, i) -> dict[str, torch.tensor]:
return self.examples[i]
|
class LineByLineTextDataset(Dataset):
def __init__(self, tokenizer: PreTrainedTokenizer, file_path: str, block_size: int):
pass
def __len__(self):
pass
def __getitem__(self, i) -> dict[str, torch.tensor]:
pass
| 4
| 0
| 8
| 1
| 6
| 1
| 1
| 0.3
| 1
| 5
| 1
| 0
| 3
| 1
| 3
| 3
| 31
| 5
| 20
| 8
| 16
| 6
| 15
| 7
| 11
| 2
| 1
| 1
| 4
|
264
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/data/datasets/language_modeling.py
|
transformers.data.datasets.language_modeling.LineByLineWithRefDataset
|
from torch.utils.data import Dataset
import os
from ...tokenization_utils import PreTrainedTokenizer
import json
import torch
import warnings
class LineByLineWithRefDataset(Dataset):
def __init__(self, tokenizer: PreTrainedTokenizer, file_path: str, block_size: int, ref_path: str):
warnings.warn(DEPRECATION_WARNING.format('https://github.com/huggingface/transformers/blob/main/examples/pytorch/language-modeling/run_mlm_wwm.py'), FutureWarning)
if os.path.isfile(file_path) is False:
raise ValueError(f'Input file path {file_path} not found')
if os.path.isfile(ref_path) is False:
raise ValueError(f'Ref file path {file_path} not found')
logger.info(f'Creating features from dataset file at {file_path}')
logger.info(f'Use ref segment results at {ref_path}')
with open(file_path, encoding='utf-8') as f:
data = f.readlines()
data = [line.strip() for line in data if len(line) > 0 and (not line.isspace())]
with open(ref_path, encoding='utf-8') as f:
ref = [json.loads(line) for line in f.read().splitlines() if len(line) > 0 and (not line.isspace())]
if len(data) != len(ref):
raise ValueError(f'Length of Input file should be equal to Ref file. But the length of {file_path} is {len(data)} while length of {ref_path} is {len(ref)}')
batch_encoding = tokenizer(data, add_special_tokens=True, truncation=True, max_length=block_size)
self.examples = batch_encoding['input_ids']
self.examples = [{'input_ids': torch.tensor(e, dtype=torch.long)} for e in self.examples]
n = len(self.examples)
for i in range(n):
self.examples[i]['chinese_ref'] = torch.tensor(ref[i], dtype=torch.long)
def __len__(self):
return len(self.examples)
def __getitem__(self, i) -> dict[str, torch.tensor]:
return self.examples[i]
|
class LineByLineWithRefDataset(Dataset):
def __init__(self, tokenizer: PreTrainedTokenizer, file_path: str, block_size: int, ref_path: str):
pass
def __len__(self):
pass
def __getitem__(self, i) -> dict[str, torch.tensor]:
pass
| 4
| 0
| 13
| 1
| 11
| 2
| 2
| 0.24
| 1
| 6
| 1
| 0
| 3
| 1
| 3
| 3
| 46
| 5
| 34
| 11
| 30
| 8
| 26
| 10
| 22
| 5
| 1
| 1
| 7
|
265
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/data/datasets/language_modeling.py
|
transformers.data.datasets.language_modeling.LineByLineWithSOPTextDataset
|
import random
from ...tokenization_utils import PreTrainedTokenizer
from torch.utils.data import Dataset
import os
import torch
import warnings
class LineByLineWithSOPTextDataset(Dataset):
"""
Dataset for sentence order prediction task, prepare sentence pairs for SOP task
"""
def __init__(self, tokenizer: PreTrainedTokenizer, file_dir: str, block_size: int):
warnings.warn(DEPRECATION_WARNING.format('https://github.com/huggingface/transformers/blob/main/examples/pytorch/language-modeling/run_mlm.py'), FutureWarning)
if os.path.isdir(file_dir) is False:
raise ValueError(f'{file_dir} is not a directory')
logger.info(f'Creating features from dataset file folder at {file_dir}')
self.examples = []
for file_name in os.listdir(file_dir):
file_path = os.path.join(file_dir, file_name)
if os.path.isfile(file_path) is False:
raise ValueError(f'{file_path} is not a file')
article_open = False
with open(file_path, encoding='utf-8') as f:
original_lines = f.readlines()
article_lines = []
for line in original_lines:
if '<doc id=' in line:
article_open = True
elif '</doc>' in line:
article_open = False
document = [tokenizer.convert_tokens_to_ids(tokenizer.tokenize(line)) for line in article_lines[1:] if len(line) > 0 and (not line.isspace())]
examples = self.create_examples_from_document(document, block_size, tokenizer)
self.examples.extend(examples)
article_lines = []
elif article_open:
article_lines.append(line)
logger.info('Dataset parse finished.')
def create_examples_from_document(self, document, block_size, tokenizer, short_seq_prob=0.1):
"""Creates examples for a single document."""
max_num_tokens = block_size - tokenizer.num_special_tokens_to_add(pair=True)
target_seq_length = max_num_tokens
if random.random() < short_seq_prob:
target_seq_length = random.randint(2, max_num_tokens)
examples = []
current_chunk = []
current_length = 0
i = 0
while i < len(document):
segment = document[i]
if not segment:
i += 1
continue
current_chunk.append(segment)
current_length += len(segment)
if i == len(document) - 1 or current_length >= target_seq_length:
if current_chunk:
a_end = 1
if len(current_chunk) >= 2:
a_end = random.randint(1, len(current_chunk) - 1)
tokens_a = []
for j in range(a_end):
tokens_a.extend(current_chunk[j])
tokens_b = []
for j in range(a_end, len(current_chunk)):
tokens_b.extend(current_chunk[j])
if len(tokens_a) == 0 or len(tokens_b) == 0:
continue
if random.random() < 0.5:
is_next = False
tokens_a, tokens_b = (tokens_b, tokens_a)
else:
is_next = True
def truncate_seq_pair(tokens_a, tokens_b, max_num_tokens):
"""Truncates a pair of sequences to a maximum sequence length."""
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_num_tokens:
break
trunc_tokens = tokens_a if len(tokens_a) > len(tokens_b) else tokens_b
if not len(trunc_tokens) >= 1:
raise ValueError('Sequence length to be truncated must be no less than one')
if random.random() < 0.5:
del trunc_tokens[0]
else:
trunc_tokens.pop()
truncate_seq_pair(tokens_a, tokens_b, max_num_tokens)
if not len(tokens_a) >= 1:
raise ValueError(f'Length of sequence a is {len(tokens_a)} which must be no less than 1')
if not len(tokens_b) >= 1:
raise ValueError(f'Length of sequence b is {len(tokens_b)} which must be no less than 1')
input_ids = tokenizer.build_inputs_with_special_tokens(tokens_a, tokens_b)
token_type_ids = tokenizer.create_token_type_ids_from_sequences(tokens_a, tokens_b)
example = {'input_ids': torch.tensor(input_ids, dtype=torch.long), 'token_type_ids': torch.tensor(token_type_ids, dtype=torch.long), 'sentence_order_label': torch.tensor(0 if is_next else 1, dtype=torch.long)}
examples.append(example)
current_chunk = []
current_length = 0
i += 1
return examples
def __len__(self):
return len(self.examples)
def __getitem__(self, i) -> dict[str, torch.tensor]:
return self.examples[i]
|
class LineByLineWithSOPTextDataset(Dataset):
'''
Dataset for sentence order prediction task, prepare sentence pairs for SOP task
'''
def __init__(self, tokenizer: PreTrainedTokenizer, file_dir: str, block_size: int):
pass
def create_examples_from_document(self, document, block_size, tokenizer, short_seq_prob=0.1):
'''Creates examples for a single document.'''
pass
def truncate_seq_pair(tokens_a, tokens_b, max_num_tokens):
'''Truncates a pair of sequences to a maximum sequence length.'''
pass
def __len__(self):
pass
def __getitem__(self, i) -> dict[str, torch.tensor]:
pass
| 6
| 3
| 31
| 2
| 23
| 7
| 6
| 0.36
| 1
| 6
| 1
| 0
| 4
| 1
| 4
| 4
| 150
| 16
| 104
| 33
| 98
| 37
| 87
| 32
| 81
| 14
| 1
| 5
| 30
|
266
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/data/datasets/language_modeling.py
|
transformers.data.datasets.language_modeling.TextDataset
|
import torch
import warnings
from filelock import FileLock
import os
from torch.utils.data import Dataset
import time
import pickle
from typing import Optional
from ...tokenization_utils import PreTrainedTokenizer
class TextDataset(Dataset):
def __init__(self, tokenizer: PreTrainedTokenizer, file_path: str, block_size: int, overwrite_cache=False, cache_dir: Optional[str]=None):
warnings.warn(DEPRECATION_WARNING.format('https://github.com/huggingface/transformers/blob/main/examples/pytorch/language-modeling/run_mlm.py'), FutureWarning)
if os.path.isfile(file_path) is False:
raise ValueError(f'Input file path {file_path} not found')
block_size = block_size - tokenizer.num_special_tokens_to_add(pair=False)
directory, filename = os.path.split(file_path)
cached_features_file = os.path.join(cache_dir if cache_dir is not None else directory, f'cached_lm_{tokenizer.__class__.__name__}_{block_size}_{filename}')
lock_path = cached_features_file + '.lock'
with FileLock(lock_path):
if os.path.exists(cached_features_file) and (not overwrite_cache):
start = time.time()
with open(cached_features_file, 'rb') as handle:
self.examples = pickle.load(handle)
logger.info(f'Loading features from cached file {cached_features_file} [took %.3f s]', time.time() - start)
else:
logger.info(f'Creating features from dataset file at {directory}')
self.examples = []
with open(file_path, encoding='utf-8') as f:
text = f.read()
tokenized_text = tokenizer.convert_tokens_to_ids(tokenizer.tokenize(text))
for i in range(0, len(tokenized_text) - block_size + 1, block_size):
self.examples.append(tokenizer.build_inputs_with_special_tokens(tokenized_text[i:i + block_size]))
start = time.time()
with open(cached_features_file, 'wb') as handle:
pickle.dump(self.examples, handle, protocol=pickle.HIGHEST_PROTOCOL)
logger.info(f'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]')
def __len__(self):
return len(self.examples)
def __getitem__(self, i) -> torch.Tensor:
return torch.tensor(self.examples[i], dtype=torch.long)
|
class TextDataset(Dataset):
def __init__(self, tokenizer: PreTrainedTokenizer, file_path: str, block_size: int, overwrite_cache=False, cache_dir: Optional[str]=None):
pass
def __len__(self):
pass
def __getitem__(self, i) -> torch.Tensor:
pass
| 4
| 0
| 21
| 3
| 17
| 2
| 2
| 0.17
| 1
| 7
| 1
| 0
| 3
| 1
| 3
| 3
| 71
| 11
| 52
| 21
| 41
| 9
| 30
| 12
| 26
| 5
| 1
| 3
| 7
|
267
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/data/datasets/language_modeling.py
|
transformers.data.datasets.language_modeling.TextDatasetForNextSentencePrediction
|
from ...tokenization_utils import PreTrainedTokenizer
import pickle
import os
from filelock import FileLock
import warnings
import torch
import time
from torch.utils.data import Dataset
import random
class TextDatasetForNextSentencePrediction(Dataset):
def __init__(self, tokenizer: PreTrainedTokenizer, file_path: str, block_size: int, overwrite_cache=False, short_seq_probability=0.1, nsp_probability=0.5):
warnings.warn(DEPRECATION_WARNING.format('https://github.com/huggingface/transformers/blob/main/examples/pytorch/language-modeling/run_mlm.py'), FutureWarning)
if not os.path.isfile(file_path):
raise ValueError(f'Input file path {file_path} not found')
self.short_seq_probability = short_seq_probability
self.nsp_probability = nsp_probability
directory, filename = os.path.split(file_path)
cached_features_file = os.path.join(directory, f'cached_nsp_{tokenizer.__class__.__name__}_{block_size}_{filename}')
self.tokenizer = tokenizer
lock_path = cached_features_file + '.lock'
with FileLock(lock_path):
if os.path.exists(cached_features_file) and (not overwrite_cache):
start = time.time()
with open(cached_features_file, 'rb') as handle:
self.examples = pickle.load(handle)
logger.info(f'Loading features from cached file {cached_features_file} [took %.3f s]', time.time() - start)
else:
logger.info(f'Creating features from dataset file at {directory}')
self.documents = [[]]
with open(file_path, encoding='utf-8') as f:
while True:
line = f.readline()
if not line:
break
line = line.strip()
if not line and len(self.documents[-1]) != 0:
self.documents.append([])
tokens = tokenizer.tokenize(line)
tokens = tokenizer.convert_tokens_to_ids(tokens)
if tokens:
self.documents[-1].append(tokens)
logger.info(f'Creating examples from {len(self.documents)} documents.')
self.examples = []
for doc_index, document in enumerate(self.documents):
self.create_examples_from_document(document, doc_index, block_size)
start = time.time()
with open(cached_features_file, 'wb') as handle:
pickle.dump(self.examples, handle, protocol=pickle.HIGHEST_PROTOCOL)
logger.info(f'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]')
def create_examples_from_document(self, document: list[list[int]], doc_index: int, block_size: int):
"""Creates examples for a single document."""
max_num_tokens = block_size - self.tokenizer.num_special_tokens_to_add(pair=True)
target_seq_length = max_num_tokens
if random.random() < self.short_seq_probability:
target_seq_length = random.randint(2, max_num_tokens)
current_chunk = []
current_length = 0
i = 0
while i < len(document):
segment = document[i]
current_chunk.append(segment)
current_length += len(segment)
if i == len(document) - 1 or current_length >= target_seq_length:
if current_chunk:
a_end = 1
if len(current_chunk) >= 2:
a_end = random.randint(1, len(current_chunk) - 1)
tokens_a = []
for j in range(a_end):
tokens_a.extend(current_chunk[j])
tokens_b = []
if len(current_chunk) == 1 or random.random() < self.nsp_probability:
is_random_next = True
target_b_length = target_seq_length - len(tokens_a)
for _ in range(10):
random_document_index = random.randint(0, len(self.documents) - 1)
if random_document_index != doc_index:
break
random_document = self.documents[random_document_index]
random_start = random.randint(0, len(random_document) - 1)
for j in range(random_start, len(random_document)):
tokens_b.extend(random_document[j])
if len(tokens_b) >= target_b_length:
break
num_unused_segments = len(current_chunk) - a_end
i -= num_unused_segments
else:
is_random_next = False
for j in range(a_end, len(current_chunk)):
tokens_b.extend(current_chunk[j])
if not len(tokens_a) >= 1:
raise ValueError(f'Length of sequence a is {len(tokens_a)} which must be no less than 1')
if not len(tokens_b) >= 1:
raise ValueError(f'Length of sequence b is {len(tokens_b)} which must be no less than 1')
input_ids = self.tokenizer.build_inputs_with_special_tokens(tokens_a, tokens_b)
token_type_ids = self.tokenizer.create_token_type_ids_from_sequences(tokens_a, tokens_b)
example = {'input_ids': torch.tensor(input_ids, dtype=torch.long), 'token_type_ids': torch.tensor(token_type_ids, dtype=torch.long), 'next_sentence_label': torch.tensor(1 if is_random_next else 0, dtype=torch.long)}
self.examples.append(example)
current_chunk = []
current_length = 0
i += 1
def __len__(self):
return len(self.examples)
def __getitem__(self, i):
return self.examples[i]
|
class TextDatasetForNextSentencePrediction(Dataset):
def __init__(self, tokenizer: PreTrainedTokenizer, file_path: str, block_size: int, overwrite_cache=False, short_seq_probability=0.1, nsp_probability=0.5):
pass
def create_examples_from_document(self, document: list[list[int]], doc_index: int, block_size: int):
'''Creates examples for a single document.'''
pass
def __len__(self):
pass
def __getitem__(self, i):
pass
| 5
| 1
| 44
| 6
| 30
| 9
| 7
| 0.32
| 1
| 7
| 1
| 0
| 4
| 5
| 4
| 4
| 185
| 29
| 119
| 47
| 106
| 38
| 93
| 37
| 88
| 16
| 1
| 6
| 26
|
268
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/data/datasets/squad.py
|
transformers.data.datasets.squad.Split
|
from enum import Enum
class Split(Enum):
train = 'train'
dev = 'dev'
|
class Split(Enum):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 49
| 3
| 0
| 3
| 3
| 2
| 0
| 3
| 3
| 2
| 0
| 4
| 0
| 0
|
269
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/data/datasets/squad.py
|
transformers.data.datasets.squad.SquadDataTrainingArguments
|
from dataclasses import dataclass, field
@dataclass
class SquadDataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
model_type: str = field(default=None, metadata={'help': 'Model type selected in the list: ' + ', '.join(MODEL_TYPES)})
data_dir: str = field(default=None, metadata={'help': 'The input data dir. Should contain the .json files for the SQuAD task.'})
max_seq_length: int = field(default=128, metadata={'help': 'The maximum total input sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded.'})
doc_stride: int = field(default=128, metadata={'help': 'When splitting up a long document into chunks, how much stride to take between chunks.'})
max_query_length: int = field(default=64, metadata={'help': 'The maximum number of tokens for the question. Questions longer than this will be truncated to this length.'})
max_answer_length: int = field(default=30, metadata={'help': 'The maximum length of an answer that can be generated. This is needed because the start and end predictions are not conditioned on one another.'})
overwrite_cache: bool = field(default=False, metadata={'help': 'Overwrite the cached training and evaluation sets'})
version_2_with_negative: bool = field(default=False, metadata={'help': 'If true, the SQuAD examples contain some that do not have an answer.'})
null_score_diff_threshold: float = field(default=0.0, metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'})
n_best_size: int = field(default=20, metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'})
lang_id: int = field(default=0, metadata={'help': 'language id of input for language-specific xlm models (see tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'})
threads: int = field(default=1, metadata={'help': 'multiple threads for converting example to features'})
|
@dataclass
class SquadDataTrainingArguments:
'''
Arguments pertaining to what data we are going to input our model for training and eval.
'''
pass
| 2
| 1
| 0
| 0
| 0
| 0
| 0
| 0.05
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 64
| 1
| 60
| 13
| 59
| 3
| 13
| 13
| 12
| 0
| 0
| 0
| 0
|
270
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/data/datasets/squad.py
|
transformers.data.datasets.squad.SquadDataset
|
from ...utils import check_torch_load_is_safe, logging
from filelock import FileLock
import time
from ..processors.squad import SquadFeatures, SquadV1Processor, SquadV2Processor, squad_convert_examples_to_features
import os
import torch
from ...tokenization_utils import PreTrainedTokenizer
from typing import Optional, Union
from torch.utils.data import Dataset
class SquadDataset(Dataset):
args: SquadDataTrainingArguments
features: list[SquadFeatures]
mode: Split
is_language_sensitive: bool
def __init__(self, args: SquadDataTrainingArguments, tokenizer: PreTrainedTokenizer, limit_length: Optional[int]=None, mode: Union[str, Split]=Split.train, is_language_sensitive: Optional[bool]=False, cache_dir: Optional[str]=None, dataset_format: Optional[str]='pt'):
self.args = args
self.is_language_sensitive = is_language_sensitive
self.processor = SquadV2Processor() if args.version_2_with_negative else SquadV1Processor()
if isinstance(mode, str):
try:
mode = Split[mode]
except KeyError:
raise KeyError('mode is not a valid split name')
self.mode = mode
version_tag = 'v2' if args.version_2_with_negative else 'v1'
cached_features_file = os.path.join(cache_dir if cache_dir is not None else args.data_dir, f'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}')
lock_path = cached_features_file + '.lock'
with FileLock(lock_path):
if os.path.exists(cached_features_file) and (not args.overwrite_cache):
start = time.time()
check_torch_load_is_safe()
self.old_features = torch.load(cached_features_file, weights_only=True)
self.features = self.old_features['features']
self.dataset = self.old_features.get('dataset', None)
self.examples = self.old_features.get('examples', None)
logger.info(f'Loading features from cached file {cached_features_file} [took %.3f s]', time.time() - start)
if self.dataset is None or self.examples is None:
logger.warning(f'Deleting cached file {cached_features_file} will allow dataset and examples to be cached in future run')
else:
if mode == Split.dev:
self.examples = self.processor.get_dev_examples(args.data_dir)
else:
self.examples = self.processor.get_train_examples(args.data_dir)
self.features, self.dataset = squad_convert_examples_to_features(examples=self.examples, tokenizer=tokenizer, max_seq_length=args.max_seq_length, doc_stride=args.doc_stride, max_query_length=args.max_query_length, is_training=mode == Split.train, threads=args.threads, return_dataset=dataset_format)
start = time.time()
torch.save({'features': self.features, 'dataset': self.dataset, 'examples': self.examples}, cached_features_file)
logger.info(f'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]')
def __len__(self):
return len(self.features)
def __getitem__(self, i) -> dict[str, torch.Tensor]:
feature = self.features[i]
input_ids = torch.tensor(feature.input_ids, dtype=torch.long)
attention_mask = torch.tensor(feature.attention_mask, dtype=torch.long)
token_type_ids = torch.tensor(feature.token_type_ids, dtype=torch.long)
cls_index = torch.tensor(feature.cls_index, dtype=torch.long)
p_mask = torch.tensor(feature.p_mask, dtype=torch.float)
is_impossible = torch.tensor(feature.is_impossible, dtype=torch.float)
inputs = {'input_ids': input_ids, 'attention_mask': attention_mask, 'token_type_ids': token_type_ids}
if self.args.model_type in ['xlm', 'roberta', 'distilbert', 'camembert']:
del inputs['token_type_ids']
if self.args.model_type in ['xlnet', 'xlm']:
inputs.update({'cls_index': cls_index, 'p_mask': p_mask})
if self.args.version_2_with_negative:
inputs.update({'is_impossible': is_impossible})
if self.is_language_sensitive:
inputs.update({'langs': torch.ones(input_ids.shape, dtype=torch.int64) * self.args.lang_id})
if self.mode == Split.train:
start_positions = torch.tensor(feature.start_position, dtype=torch.long)
end_positions = torch.tensor(feature.end_position, dtype=torch.long)
inputs.update({'start_positions': start_positions, 'end_positions': end_positions})
return inputs
|
class SquadDataset(Dataset):
def __init__(self, args: SquadDataTrainingArguments, tokenizer: PreTrainedTokenizer, limit_length: Optional[int]=None, mode: Union[str, Split]=Split.train, is_language_sensitive: Optional[bool]=False, cache_dir: Optional[str]=None, dataset_format: Optional[str]='pt'):
pass
def __len__(self):
pass
def __getitem__(self, i) -> dict[str, torch.Tensor]:
pass
| 4
| 0
| 36
| 4
| 30
| 2
| 5
| 0.1
| 1
| 11
| 5
| 0
| 3
| 4
| 3
| 3
| 121
| 15
| 96
| 31
| 83
| 10
| 59
| 22
| 55
| 9
| 1
| 3
| 16
|
271
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/data/processors/glue.py
|
transformers.data.processors.glue.ColaProcessor
|
import warnings
from .utils import DataProcessor, InputExample, InputFeatures
import os
class ColaProcessor(DataProcessor):
"""Processor for the CoLA data set (GLUE version)."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
warnings.warn(DEPRECATION_WARNING.format('processor'), FutureWarning)
def get_example_from_tensor_dict(self, tensor_dict):
"""See base class."""
return InputExample(tensor_dict['idx'].numpy(), tensor_dict['sentence'].numpy().decode('utf-8'), None, str(tensor_dict['label'].numpy()))
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, 'train.tsv')), 'train')
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, 'dev.tsv')), 'dev')
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, 'test.tsv')), 'test')
def get_labels(self):
"""See base class."""
return ['0', '1']
def _create_examples(self, lines, set_type):
"""Creates examples for the training, dev and test sets."""
test_mode = set_type == 'test'
if test_mode:
lines = lines[1:]
text_index = 1 if test_mode else 3
examples = []
for i, line in enumerate(lines):
guid = f'{set_type}-{i}'
text_a = line[text_index]
label = None if test_mode else line[1]
examples.append(InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
|
class ColaProcessor(DataProcessor):
'''Processor for the CoLA data set (GLUE version).'''
def __init__(self, *args, **kwargs):
pass
def get_example_from_tensor_dict(self, tensor_dict):
'''See base class.'''
pass
def get_train_examples(self, data_dir):
'''See base class.'''
pass
def get_dev_examples(self, data_dir):
'''See base class.'''
pass
def get_test_examples(self, data_dir):
'''See base class.'''
pass
def get_labels(self):
'''See base class.'''
pass
def _create_examples(self, lines, set_type):
'''Creates examples for the training, dev and test sets.'''
pass
| 8
| 7
| 5
| 0
| 4
| 1
| 2
| 0.23
| 1
| 5
| 1
| 0
| 7
| 0
| 7
| 14
| 45
| 7
| 31
| 15
| 23
| 7
| 26
| 15
| 18
| 5
| 1
| 1
| 11
|
272
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/data/processors/glue.py
|
transformers.data.processors.glue.MnliMismatchedProcessor
|
import os
import warnings
class MnliMismatchedProcessor(MnliProcessor):
"""Processor for the MultiNLI Mismatched data set (GLUE version)."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
warnings.warn(DEPRECATION_WARNING.format('processor'), FutureWarning)
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, 'dev_mismatched.tsv')), 'dev_mismatched')
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, 'test_mismatched.tsv')), 'test_mismatched')
|
class MnliMismatchedProcessor(MnliProcessor):
'''Processor for the MultiNLI Mismatched data set (GLUE version).'''
def __init__(self, *args, **kwargs):
pass
def get_dev_examples(self, data_dir):
'''See base class.'''
pass
def get_test_examples(self, data_dir):
'''See base class.'''
pass
| 4
| 3
| 3
| 0
| 2
| 1
| 1
| 0.38
| 1
| 2
| 0
| 0
| 3
| 0
| 3
| 17
| 14
| 3
| 8
| 4
| 4
| 3
| 8
| 4
| 4
| 1
| 2
| 0
| 3
|
273
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/data/processors/glue.py
|
transformers.data.processors.glue.MnliProcessor
|
import warnings
from .utils import DataProcessor, InputExample, InputFeatures
import os
class MnliProcessor(DataProcessor):
"""Processor for the MultiNLI data set (GLUE version)."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
warnings.warn(DEPRECATION_WARNING.format('processor'), FutureWarning)
def get_example_from_tensor_dict(self, tensor_dict):
"""See base class."""
return InputExample(tensor_dict['idx'].numpy(), tensor_dict['premise'].numpy().decode('utf-8'), tensor_dict['hypothesis'].numpy().decode('utf-8'), str(tensor_dict['label'].numpy()))
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, 'train.tsv')), 'train')
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, 'dev_matched.tsv')), 'dev_matched')
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, 'test_matched.tsv')), 'test_matched')
def get_labels(self):
"""See base class."""
return ['contradiction', 'entailment', 'neutral']
def _create_examples(self, lines, set_type):
"""Creates examples for the training, dev and test sets."""
examples = []
for i, line in enumerate(lines):
if i == 0:
continue
guid = f'{set_type}-{line[0]}'
text_a = line[8]
text_b = line[9]
label = None if set_type.startswith('test') else line[-1]
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
|
class MnliProcessor(DataProcessor):
'''Processor for the MultiNLI data set (GLUE version).'''
def __init__(self, *args, **kwargs):
pass
def get_example_from_tensor_dict(self, tensor_dict):
'''See base class.'''
pass
def get_train_examples(self, data_dir):
'''See base class.'''
pass
def get_dev_examples(self, data_dir):
'''See base class.'''
pass
def get_test_examples(self, data_dir):
'''See base class.'''
pass
def get_labels(self):
'''See base class.'''
pass
def _create_examples(self, lines, set_type):
'''Creates examples for the training, dev and test sets.'''
pass
| 8
| 7
| 5
| 0
| 4
| 1
| 1
| 0.23
| 1
| 5
| 1
| 1
| 7
| 0
| 7
| 14
| 44
| 7
| 30
| 14
| 22
| 7
| 25
| 14
| 17
| 4
| 1
| 2
| 10
|
274
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/data/processors/glue.py
|
transformers.data.processors.glue.MrpcProcessor
|
import os
import warnings
from .utils import DataProcessor, InputExample, InputFeatures
class MrpcProcessor(DataProcessor):
"""Processor for the MRPC data set (GLUE version)."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
warnings.warn(DEPRECATION_WARNING.format('processor'), FutureWarning)
def get_example_from_tensor_dict(self, tensor_dict):
"""See base class."""
return InputExample(tensor_dict['idx'].numpy(), tensor_dict['sentence1'].numpy().decode('utf-8'), tensor_dict['sentence2'].numpy().decode('utf-8'), str(tensor_dict['label'].numpy()))
def get_train_examples(self, data_dir):
"""See base class."""
logger.info(f"LOOKING AT {os.path.join(data_dir, 'train.tsv')}")
return self._create_examples(self._read_tsv(os.path.join(data_dir, 'train.tsv')), 'train')
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, 'dev.tsv')), 'dev')
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, 'test.tsv')), 'test')
def get_labels(self):
"""See base class."""
return ['0', '1']
def _create_examples(self, lines, set_type):
"""Creates examples for the training, dev and test sets."""
examples = []
for i, line in enumerate(lines):
if i == 0:
continue
guid = f'{set_type}-{i}'
text_a = line[3]
text_b = line[4]
label = None if set_type == 'test' else line[0]
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
|
class MrpcProcessor(DataProcessor):
'''Processor for the MRPC data set (GLUE version).'''
def __init__(self, *args, **kwargs):
pass
def get_example_from_tensor_dict(self, tensor_dict):
'''See base class.'''
pass
def get_train_examples(self, data_dir):
'''See base class.'''
pass
def get_dev_examples(self, data_dir):
'''See base class.'''
pass
def get_test_examples(self, data_dir):
'''See base class.'''
pass
def get_labels(self):
'''See base class.'''
pass
def _create_examples(self, lines, set_type):
'''Creates examples for the training, dev and test sets.'''
pass
| 8
| 7
| 5
| 0
| 4
| 1
| 1
| 0.23
| 1
| 5
| 1
| 0
| 7
| 0
| 7
| 14
| 45
| 7
| 31
| 14
| 23
| 7
| 26
| 14
| 18
| 4
| 1
| 2
| 10
|
275
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/data/processors/glue.py
|
transformers.data.processors.glue.OutputMode
|
from enum import Enum
class OutputMode(Enum):
classification = 'classification'
regression = 'regression'
|
class OutputMode(Enum):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 49
| 3
| 0
| 3
| 3
| 2
| 0
| 3
| 3
| 2
| 0
| 4
| 0
| 0
|
276
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/data/processors/glue.py
|
transformers.data.processors.glue.QnliProcessor
|
import warnings
from .utils import DataProcessor, InputExample, InputFeatures
import os
class QnliProcessor(DataProcessor):
"""Processor for the QNLI data set (GLUE version)."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
warnings.warn(DEPRECATION_WARNING.format('processor'), FutureWarning)
def get_example_from_tensor_dict(self, tensor_dict):
"""See base class."""
return InputExample(tensor_dict['idx'].numpy(), tensor_dict['question'].numpy().decode('utf-8'), tensor_dict['sentence'].numpy().decode('utf-8'), str(tensor_dict['label'].numpy()))
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, 'train.tsv')), 'train')
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, 'dev.tsv')), 'dev')
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, 'test.tsv')), 'test')
def get_labels(self):
"""See base class."""
return ['entailment', 'not_entailment']
def _create_examples(self, lines, set_type):
"""Creates examples for the training, dev and test sets."""
examples = []
for i, line in enumerate(lines):
if i == 0:
continue
guid = f'{set_type}-{line[0]}'
text_a = line[1]
text_b = line[2]
label = None if set_type == 'test' else line[-1]
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
|
class QnliProcessor(DataProcessor):
'''Processor for the QNLI data set (GLUE version).'''
def __init__(self, *args, **kwargs):
pass
def get_example_from_tensor_dict(self, tensor_dict):
'''See base class.'''
pass
def get_train_examples(self, data_dir):
'''See base class.'''
pass
def get_dev_examples(self, data_dir):
'''See base class.'''
pass
def get_test_examples(self, data_dir):
'''See base class.'''
pass
def get_labels(self):
'''See base class.'''
pass
def _create_examples(self, lines, set_type):
'''Creates examples for the training, dev and test sets.'''
pass
| 8
| 7
| 5
| 0
| 4
| 1
| 1
| 0.23
| 1
| 5
| 1
| 0
| 7
| 0
| 7
| 14
| 44
| 7
| 30
| 14
| 22
| 7
| 25
| 14
| 17
| 4
| 1
| 2
| 10
|
277
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/data/processors/glue.py
|
transformers.data.processors.glue.QqpProcessor
|
from .utils import DataProcessor, InputExample, InputFeatures
import os
import warnings
class QqpProcessor(DataProcessor):
"""Processor for the QQP data set (GLUE version)."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
warnings.warn(DEPRECATION_WARNING.format('processor'), FutureWarning)
def get_example_from_tensor_dict(self, tensor_dict):
"""See base class."""
return InputExample(tensor_dict['idx'].numpy(), tensor_dict['question1'].numpy().decode('utf-8'), tensor_dict['question2'].numpy().decode('utf-8'), str(tensor_dict['label'].numpy()))
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, 'train.tsv')), 'train')
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, 'dev.tsv')), 'dev')
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, 'test.tsv')), 'test')
def get_labels(self):
"""See base class."""
return ['0', '1']
def _create_examples(self, lines, set_type):
"""Creates examples for the training, dev and test sets."""
test_mode = set_type == 'test'
q1_index = 1 if test_mode else 3
q2_index = 2 if test_mode else 4
examples = []
for i, line in enumerate(lines):
if i == 0:
continue
guid = f'{set_type}-{line[0]}'
try:
text_a = line[q1_index]
text_b = line[q2_index]
label = None if test_mode else line[5]
except IndexError:
continue
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
|
class QqpProcessor(DataProcessor):
'''Processor for the QQP data set (GLUE version).'''
def __init__(self, *args, **kwargs):
pass
def get_example_from_tensor_dict(self, tensor_dict):
'''See base class.'''
pass
def get_train_examples(self, data_dir):
'''See base class.'''
pass
def get_dev_examples(self, data_dir):
'''See base class.'''
pass
def get_test_examples(self, data_dir):
'''See base class.'''
pass
def get_labels(self):
'''See base class.'''
pass
def _create_examples(self, lines, set_type):
'''Creates examples for the training, dev and test sets.'''
pass
| 8
| 7
| 6
| 0
| 5
| 1
| 2
| 0.19
| 1
| 6
| 1
| 0
| 7
| 0
| 7
| 14
| 50
| 7
| 36
| 17
| 28
| 7
| 31
| 17
| 23
| 7
| 1
| 2
| 13
|
278
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/data/processors/glue.py
|
transformers.data.processors.glue.RteProcessor
|
from .utils import DataProcessor, InputExample, InputFeatures
import warnings
import os
class RteProcessor(DataProcessor):
"""Processor for the RTE data set (GLUE version)."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
warnings.warn(DEPRECATION_WARNING.format('processor'), FutureWarning)
def get_example_from_tensor_dict(self, tensor_dict):
"""See base class."""
return InputExample(tensor_dict['idx'].numpy(), tensor_dict['sentence1'].numpy().decode('utf-8'), tensor_dict['sentence2'].numpy().decode('utf-8'), str(tensor_dict['label'].numpy()))
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, 'train.tsv')), 'train')
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, 'dev.tsv')), 'dev')
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, 'test.tsv')), 'test')
def get_labels(self):
"""See base class."""
return ['entailment', 'not_entailment']
def _create_examples(self, lines, set_type):
"""Creates examples for the training, dev and test sets."""
examples = []
for i, line in enumerate(lines):
if i == 0:
continue
guid = f'{set_type}-{line[0]}'
text_a = line[1]
text_b = line[2]
label = None if set_type == 'test' else line[-1]
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
|
class RteProcessor(DataProcessor):
'''Processor for the RTE data set (GLUE version).'''
def __init__(self, *args, **kwargs):
pass
def get_example_from_tensor_dict(self, tensor_dict):
'''See base class.'''
pass
def get_train_examples(self, data_dir):
'''See base class.'''
pass
def get_dev_examples(self, data_dir):
'''See base class.'''
pass
def get_test_examples(self, data_dir):
'''See base class.'''
pass
def get_labels(self):
'''See base class.'''
pass
def _create_examples(self, lines, set_type):
'''Creates examples for the training, dev and test sets.'''
pass
| 8
| 7
| 5
| 0
| 4
| 1
| 1
| 0.23
| 1
| 5
| 1
| 0
| 7
| 0
| 7
| 14
| 44
| 7
| 30
| 14
| 22
| 7
| 25
| 14
| 17
| 4
| 1
| 2
| 10
|
279
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/data/processors/glue.py
|
transformers.data.processors.glue.Sst2Processor
|
import warnings
import os
from .utils import DataProcessor, InputExample, InputFeatures
class Sst2Processor(DataProcessor):
"""Processor for the SST-2 data set (GLUE version)."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
warnings.warn(DEPRECATION_WARNING.format('processor'), FutureWarning)
def get_example_from_tensor_dict(self, tensor_dict):
"""See base class."""
return InputExample(tensor_dict['idx'].numpy(), tensor_dict['sentence'].numpy().decode('utf-8'), None, str(tensor_dict['label'].numpy()))
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, 'train.tsv')), 'train')
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, 'dev.tsv')), 'dev')
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, 'test.tsv')), 'test')
def get_labels(self):
"""See base class."""
return ['0', '1']
def _create_examples(self, lines, set_type):
"""Creates examples for the training, dev and test sets."""
examples = []
text_index = 1 if set_type == 'test' else 0
for i, line in enumerate(lines):
if i == 0:
continue
guid = f'{set_type}-{i}'
text_a = line[text_index]
label = None if set_type == 'test' else line[1]
examples.append(InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
|
class Sst2Processor(DataProcessor):
'''Processor for the SST-2 data set (GLUE version).'''
def __init__(self, *args, **kwargs):
pass
def get_example_from_tensor_dict(self, tensor_dict):
'''See base class.'''
pass
def get_train_examples(self, data_dir):
'''See base class.'''
pass
def get_dev_examples(self, data_dir):
'''See base class.'''
pass
def get_test_examples(self, data_dir):
'''See base class.'''
pass
def get_labels(self):
'''See base class.'''
pass
def _create_examples(self, lines, set_type):
'''Creates examples for the training, dev and test sets.'''
pass
| 8
| 7
| 5
| 0
| 4
| 1
| 2
| 0.23
| 1
| 5
| 1
| 0
| 7
| 0
| 7
| 14
| 44
| 7
| 30
| 14
| 22
| 7
| 25
| 14
| 17
| 5
| 1
| 2
| 11
|
280
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/data/processors/glue.py
|
transformers.data.processors.glue.StsbProcessor
|
from .utils import DataProcessor, InputExample, InputFeatures
import os
import warnings
class StsbProcessor(DataProcessor):
"""Processor for the STS-B data set (GLUE version)."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
warnings.warn(DEPRECATION_WARNING.format('processor'), FutureWarning)
def get_example_from_tensor_dict(self, tensor_dict):
"""See base class."""
return InputExample(tensor_dict['idx'].numpy(), tensor_dict['sentence1'].numpy().decode('utf-8'), tensor_dict['sentence2'].numpy().decode('utf-8'), str(tensor_dict['label'].numpy()))
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, 'train.tsv')), 'train')
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, 'dev.tsv')), 'dev')
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, 'test.tsv')), 'test')
def get_labels(self):
"""See base class."""
return [None]
def _create_examples(self, lines, set_type):
"""Creates examples for the training, dev and test sets."""
examples = []
for i, line in enumerate(lines):
if i == 0:
continue
guid = f'{set_type}-{line[0]}'
text_a = line[7]
text_b = line[8]
label = None if set_type == 'test' else line[-1]
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
|
class StsbProcessor(DataProcessor):
'''Processor for the STS-B data set (GLUE version).'''
def __init__(self, *args, **kwargs):
pass
def get_example_from_tensor_dict(self, tensor_dict):
'''See base class.'''
pass
def get_train_examples(self, data_dir):
'''See base class.'''
pass
def get_dev_examples(self, data_dir):
'''See base class.'''
pass
def get_test_examples(self, data_dir):
'''See base class.'''
pass
def get_labels(self):
'''See base class.'''
pass
def _create_examples(self, lines, set_type):
'''Creates examples for the training, dev and test sets.'''
pass
| 8
| 7
| 5
| 0
| 4
| 1
| 1
| 0.23
| 1
| 5
| 1
| 0
| 7
| 0
| 7
| 14
| 44
| 7
| 30
| 14
| 22
| 7
| 25
| 14
| 17
| 4
| 1
| 2
| 10
|
281
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/data/processors/glue.py
|
transformers.data.processors.glue.WnliProcessor
|
import os
from .utils import DataProcessor, InputExample, InputFeatures
import warnings
class WnliProcessor(DataProcessor):
"""Processor for the WNLI data set (GLUE version)."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
warnings.warn(DEPRECATION_WARNING.format('processor'), FutureWarning)
def get_example_from_tensor_dict(self, tensor_dict):
"""See base class."""
return InputExample(tensor_dict['idx'].numpy(), tensor_dict['sentence1'].numpy().decode('utf-8'), tensor_dict['sentence2'].numpy().decode('utf-8'), str(tensor_dict['label'].numpy()))
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, 'train.tsv')), 'train')
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, 'dev.tsv')), 'dev')
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, 'test.tsv')), 'test')
def get_labels(self):
"""See base class."""
return ['0', '1']
def _create_examples(self, lines, set_type):
"""Creates examples for the training, dev and test sets."""
examples = []
for i, line in enumerate(lines):
if i == 0:
continue
guid = f'{set_type}-{line[0]}'
text_a = line[1]
text_b = line[2]
label = None if set_type == 'test' else line[-1]
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
|
class WnliProcessor(DataProcessor):
'''Processor for the WNLI data set (GLUE version).'''
def __init__(self, *args, **kwargs):
pass
def get_example_from_tensor_dict(self, tensor_dict):
'''See base class.'''
pass
def get_train_examples(self, data_dir):
'''See base class.'''
pass
def get_dev_examples(self, data_dir):
'''See base class.'''
pass
def get_test_examples(self, data_dir):
'''See base class.'''
pass
def get_labels(self):
'''See base class.'''
pass
def _create_examples(self, lines, set_type):
'''Creates examples for the training, dev and test sets.'''
pass
| 8
| 7
| 5
| 0
| 4
| 1
| 1
| 0.23
| 1
| 5
| 1
| 0
| 7
| 0
| 7
| 14
| 44
| 7
| 30
| 14
| 22
| 7
| 25
| 14
| 17
| 4
| 1
| 2
| 10
|
282
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/data/processors/squad.py
|
transformers.data.processors.squad.SquadExample
|
class SquadExample:
"""
A single training/test example for the Squad dataset, as loaded from disk.
Args:
qas_id: The example's unique identifier
question_text: The question string
context_text: The context string
answer_text: The answer string
start_position_character: The character position of the start of the answer
title: The title of the example
answers: None by default, this is used during evaluation. Holds answers as well as their start positions.
is_impossible: False by default, set to True if the example has no possible answer.
"""
def __init__(self, qas_id, question_text, context_text, answer_text, start_position_character, title, answers=[], is_impossible=False):
self.qas_id = qas_id
self.question_text = question_text
self.context_text = context_text
self.answer_text = answer_text
self.title = title
self.is_impossible = is_impossible
self.answers = answers
self.start_position, self.end_position = (0, 0)
doc_tokens = []
char_to_word_offset = []
prev_is_whitespace = True
for c in self.context_text:
if _is_whitespace(c):
prev_is_whitespace = True
else:
if prev_is_whitespace:
doc_tokens.append(c)
else:
doc_tokens[-1] += c
prev_is_whitespace = False
char_to_word_offset.append(len(doc_tokens) - 1)
self.doc_tokens = doc_tokens
self.char_to_word_offset = char_to_word_offset
if start_position_character is not None and (not is_impossible):
self.start_position = char_to_word_offset[start_position_character]
self.end_position = char_to_word_offset[min(start_position_character + len(answer_text) - 1, len(char_to_word_offset) - 1)]
|
class SquadExample:
'''
A single training/test example for the Squad dataset, as loaded from disk.
Args:
qas_id: The example's unique identifier
question_text: The question string
context_text: The context string
answer_text: The answer string
start_position_character: The character position of the start of the answer
title: The title of the example
answers: None by default, this is used during evaluation. Holds answers as well as their start positions.
is_impossible: False by default, set to True if the example has no possible answer.
'''
def __init__(self, qas_id, question_text, context_text, answer_text, start_position_character, title, answers=[], is_impossible=False):
pass
| 2
| 1
| 46
| 5
| 39
| 2
| 5
| 0.35
| 0
| 0
| 0
| 0
| 1
| 11
| 1
| 1
| 61
| 7
| 40
| 26
| 28
| 14
| 26
| 16
| 24
| 5
| 0
| 3
| 5
|
283
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/data/processors/squad.py
|
transformers.data.processors.squad.SquadFeatures
|
from typing import Optional
from ...tokenization_utils_base import BatchEncoding, PreTrainedTokenizerBase, TruncationStrategy
class SquadFeatures:
"""
Single squad example features to be fed to a model. Those features are model-specific and can be crafted from
[`~data.processors.squad.SquadExample`] using the
:method:*~transformers.data.processors.squad.squad_convert_examples_to_features* method.
Args:
input_ids: Indices of input sequence tokens in the vocabulary.
attention_mask: Mask to avoid performing attention on padding token indices.
token_type_ids: Segment token indices to indicate first and second portions of the inputs.
cls_index: the index of the CLS token.
p_mask: Mask identifying tokens that can be answers vs. tokens that cannot.
Mask with 1 for tokens than cannot be in the answer and 0 for token that can be in an answer
example_index: the index of the example
unique_id: The unique Feature identifier
paragraph_len: The length of the context
token_is_max_context:
List of booleans identifying which tokens have their maximum context in this feature object. If a token
does not have their maximum context in this feature object, it means that another feature object has more
information related to that token and should be prioritized over this feature for that token.
tokens: list of tokens corresponding to the input ids
token_to_orig_map: mapping between the tokens and the original text, needed in order to identify the answer.
start_position: start of the answer token index
end_position: end of the answer token index
encoding: optionally store the BatchEncoding with the fast-tokenizer alignment methods.
"""
def __init__(self, input_ids, attention_mask, token_type_ids, cls_index, p_mask, example_index, unique_id, paragraph_len, token_is_max_context, tokens, token_to_orig_map, start_position, end_position, is_impossible, qas_id: Optional[str]=None, encoding: Optional[BatchEncoding]=None):
self.input_ids = input_ids
self.attention_mask = attention_mask
self.token_type_ids = token_type_ids
self.cls_index = cls_index
self.p_mask = p_mask
self.example_index = example_index
self.unique_id = unique_id
self.paragraph_len = paragraph_len
self.token_is_max_context = token_is_max_context
self.tokens = tokens
self.token_to_orig_map = token_to_orig_map
self.start_position = start_position
self.end_position = end_position
self.is_impossible = is_impossible
self.qas_id = qas_id
self.encoding = encoding
|
class SquadFeatures:
'''
Single squad example features to be fed to a model. Those features are model-specific and can be crafted from
[`~data.processors.squad.SquadExample`] using the
:method:*~transformers.data.processors.squad.squad_convert_examples_to_features* method.
Args:
input_ids: Indices of input sequence tokens in the vocabulary.
attention_mask: Mask to avoid performing attention on padding token indices.
token_type_ids: Segment token indices to indicate first and second portions of the inputs.
cls_index: the index of the CLS token.
p_mask: Mask identifying tokens that can be answers vs. tokens that cannot.
Mask with 1 for tokens than cannot be in the answer and 0 for token that can be in an answer
example_index: the index of the example
unique_id: The unique Feature identifier
paragraph_len: The length of the context
token_is_max_context:
List of booleans identifying which tokens have their maximum context in this feature object. If a token
does not have their maximum context in this feature object, it means that another feature object has more
information related to that token and should be prioritized over this feature for that token.
tokens: list of tokens corresponding to the input ids
token_to_orig_map: mapping between the tokens and the original text, needed in order to identify the answer.
start_position: start of the answer token index
end_position: end of the answer token index
encoding: optionally store the BatchEncoding with the fast-tokenizer alignment methods.
'''
def __init__(self, input_ids, attention_mask, token_type_ids, cls_index, p_mask, example_index, unique_id, paragraph_len, token_is_max_context, tokens, token_to_orig_map, start_position, end_position, is_impossible, qas_id: Optional[str]=None, encoding: Optional[BatchEncoding]=None):
pass
| 2
| 1
| 38
| 3
| 35
| 0
| 1
| 0.67
| 0
| 2
| 1
| 0
| 1
| 16
| 1
| 1
| 65
| 5
| 36
| 36
| 16
| 24
| 18
| 18
| 16
| 1
| 0
| 0
| 1
|
284
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/data/processors/squad.py
|
transformers.data.processors.squad.SquadProcessor
|
from tqdm import tqdm
import json
import os
from .utils import DataProcessor
class SquadProcessor(DataProcessor):
"""
Processor for the SQuAD data set. overridden by SquadV1Processor and SquadV2Processor, used by the version 1.1 and
version 2.0 of SQuAD, respectively.
"""
train_file = None
dev_file = None
def _get_example_from_tensor_dict(self, tensor_dict, evaluate=False):
if not evaluate:
answer = tensor_dict['answers']['text'][0].numpy().decode('utf-8')
answer_start = tensor_dict['answers']['answer_start'][0].numpy()
answers = []
else:
answers = [{'answer_start': start.numpy(), 'text': text.numpy().decode('utf-8')} for start, text in zip(tensor_dict['answers']['answer_start'], tensor_dict['answers']['text'])]
answer = None
answer_start = None
return SquadExample(qas_id=tensor_dict['id'].numpy().decode('utf-8'), question_text=tensor_dict['question'].numpy().decode('utf-8'), context_text=tensor_dict['context'].numpy().decode('utf-8'), answer_text=answer, start_position_character=answer_start, title=tensor_dict['title'].numpy().decode('utf-8'), answers=answers)
def get_examples_from_dataset(self, dataset, evaluate=False):
"""
Creates a list of [`~data.processors.squad.SquadExample`] using a TFDS dataset.
Args:
dataset: The tfds dataset loaded from *tensorflow_datasets.load("squad")*
evaluate: Boolean specifying if in evaluation mode or in training mode
Returns:
List of SquadExample
Examples:
```python
>>> import tensorflow_datasets as tfds
>>> dataset = tfds.load("squad")
>>> training_examples = get_examples_from_dataset(dataset, evaluate=False)
>>> evaluation_examples = get_examples_from_dataset(dataset, evaluate=True)
```"""
if evaluate:
dataset = dataset['validation']
else:
dataset = dataset['train']
examples = []
for tensor_dict in tqdm(dataset):
examples.append(self._get_example_from_tensor_dict(tensor_dict, evaluate=evaluate))
return examples
def get_train_examples(self, data_dir, filename=None):
"""
Returns the training examples from the data directory.
Args:
data_dir: Directory containing the data files used for training and evaluating.
filename: None by default, specify this if the training file has a different name than the original one
which is `train-v1.1.json` and `train-v2.0.json` for squad versions 1.1 and 2.0 respectively.
"""
if data_dir is None:
data_dir = ''
if self.train_file is None:
raise ValueError('SquadProcessor should be instantiated via SquadV1Processor or SquadV2Processor')
with open(os.path.join(data_dir, self.train_file if filename is None else filename), 'r', encoding='utf-8') as reader:
input_data = json.load(reader)['data']
return self._create_examples(input_data, 'train')
def get_dev_examples(self, data_dir, filename=None):
"""
Returns the evaluation example from the data directory.
Args:
data_dir: Directory containing the data files used for training and evaluating.
filename: None by default, specify this if the evaluation file has a different name than the original one
which is `dev-v1.1.json` and `dev-v2.0.json` for squad versions 1.1 and 2.0 respectively.
"""
if data_dir is None:
data_dir = ''
if self.dev_file is None:
raise ValueError('SquadProcessor should be instantiated via SquadV1Processor or SquadV2Processor')
with open(os.path.join(data_dir, self.dev_file if filename is None else filename), 'r', encoding='utf-8') as reader:
input_data = json.load(reader)['data']
return self._create_examples(input_data, 'dev')
def _create_examples(self, input_data, set_type):
is_training = set_type == 'train'
examples = []
for entry in tqdm(input_data):
title = entry['title']
for paragraph in entry['paragraphs']:
context_text = paragraph['context']
for qa in paragraph['qas']:
qas_id = qa['id']
question_text = qa['question']
start_position_character = None
answer_text = None
answers = []
is_impossible = qa.get('is_impossible', False)
if not is_impossible:
if is_training:
answer = qa['answers'][0]
answer_text = answer['text']
start_position_character = answer['answer_start']
else:
answers = qa['answers']
example = SquadExample(qas_id=qas_id, question_text=question_text, context_text=context_text, answer_text=answer_text, start_position_character=start_position_character, title=title, is_impossible=is_impossible, answers=answers)
examples.append(example)
return examples
|
class SquadProcessor(DataProcessor):
'''
Processor for the SQuAD data set. overridden by SquadV1Processor and SquadV2Processor, used by the version 1.1 and
version 2.0 of SQuAD, respectively.
'''
def _get_example_from_tensor_dict(self, tensor_dict, evaluate=False):
pass
def get_examples_from_dataset(self, dataset, evaluate=False):
'''
Creates a list of [`~data.processors.squad.SquadExample`] using a TFDS dataset.
Args:
dataset: The tfds dataset loaded from *tensorflow_datasets.load("squad")*
evaluate: Boolean specifying if in evaluation mode or in training mode
Returns:
List of SquadExample
Examples:
```python
>>> import tensorflow_datasets as tfds
>>> dataset = tfds.load("squad")
>>> training_examples = get_examples_from_dataset(dataset, evaluate=False)
>>> evaluation_examples = get_examples_from_dataset(dataset, evaluate=True)
```'''
pass
def get_train_examples(self, data_dir, filename=None):
'''
Returns the training examples from the data directory.
Args:
data_dir: Directory containing the data files used for training and evaluating.
filename: None by default, specify this if the training file has a different name than the original one
which is `train-v1.1.json` and `train-v2.0.json` for squad versions 1.1 and 2.0 respectively.
'''
pass
def get_dev_examples(self, data_dir, filename=None):
'''
Returns the evaluation example from the data directory.
Args:
data_dir: Directory containing the data files used for training and evaluating.
filename: None by default, specify this if the evaluation file has a different name than the original one
which is `dev-v1.1.json` and `dev-v2.0.json` for squad versions 1.1 and 2.0 respectively.
'''
pass
def _create_examples(self, input_data, set_type):
pass
| 6
| 4
| 26
| 4
| 17
| 6
| 4
| 0.37
| 1
| 4
| 1
| 2
| 5
| 0
| 5
| 12
| 144
| 26
| 86
| 32
| 80
| 32
| 59
| 30
| 53
| 6
| 1
| 5
| 19
|
285
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/data/processors/squad.py
|
transformers.data.processors.squad.SquadResult
|
class SquadResult:
"""
Constructs a SquadResult which can be used to evaluate a model's output on the SQuAD dataset.
Args:
unique_id: The unique identifier corresponding to that example.
start_logits: The logits corresponding to the start of the answer
end_logits: The logits corresponding to the end of the answer
"""
def __init__(self, unique_id, start_logits, end_logits, start_top_index=None, end_top_index=None, cls_logits=None):
self.start_logits = start_logits
self.end_logits = end_logits
self.unique_id = unique_id
if start_top_index:
self.start_top_index = start_top_index
self.end_top_index = end_top_index
self.cls_logits = cls_logits
|
class SquadResult:
'''
Constructs a SquadResult which can be used to evaluate a model's output on the SQuAD dataset.
Args:
unique_id: The unique identifier corresponding to that example.
start_logits: The logits corresponding to the start of the answer
end_logits: The logits corresponding to the end of the answer
'''
def __init__(self, unique_id, start_logits, end_logits, start_top_index=None, end_top_index=None, cls_logits=None):
pass
| 2
| 1
| 9
| 1
| 8
| 0
| 2
| 0.78
| 0
| 0
| 0
| 0
| 1
| 6
| 1
| 1
| 19
| 3
| 9
| 8
| 7
| 7
| 9
| 8
| 7
| 2
| 0
| 1
| 2
|
286
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/data/processors/squad.py
|
transformers.data.processors.squad.SquadV1Processor
|
class SquadV1Processor(SquadProcessor):
train_file = 'train-v1.1.json'
dev_file = 'dev-v1.1.json'
|
class SquadV1Processor(SquadProcessor):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 12
| 3
| 0
| 3
| 3
| 2
| 0
| 3
| 3
| 2
| 0
| 2
| 0
| 0
|
287
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/data/processors/squad.py
|
transformers.data.processors.squad.SquadV2Processor
|
class SquadV2Processor(SquadProcessor):
train_file = 'train-v2.0.json'
dev_file = 'dev-v2.0.json'
|
class SquadV2Processor(SquadProcessor):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 12
| 3
| 0
| 3
| 3
| 2
| 0
| 3
| 3
| 2
| 0
| 2
| 0
| 0
|
288
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/data/processors/utils.py
|
transformers.data.processors.utils.DataProcessor
|
import csv
class DataProcessor:
"""Base class for data converters for sequence classification data sets."""
def get_example_from_tensor_dict(self, tensor_dict):
"""
Gets an example from a dict.
Args:
tensor_dict: Keys and values should match the corresponding Glue
tensorflow_dataset examples.
"""
raise NotImplementedError()
def get_train_examples(self, data_dir):
"""Gets a collection of [`InputExample`] for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of [`InputExample`] for the dev set."""
raise NotImplementedError()
def get_test_examples(self, data_dir):
"""Gets a collection of [`InputExample`] for the test set."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
def tfds_map(self, example):
"""
Some tensorflow_datasets datasets are not formatted the same way the GLUE datasets are. This method converts
examples to the correct format.
"""
if len(self.get_labels()) > 1:
example.label = self.get_labels()[int(example.label)]
return example
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with open(input_file, 'r', encoding='utf-8-sig') as f:
return list(csv.reader(f, delimiter='\t', quotechar=quotechar))
|
class DataProcessor:
'''Base class for data converters for sequence classification data sets.'''
def get_example_from_tensor_dict(self, tensor_dict):
'''
Gets an example from a dict.
Args:
tensor_dict: Keys and values should match the corresponding Glue
tensorflow_dataset examples.
'''
pass
def get_train_examples(self, data_dir):
'''Gets a collection of [`InputExample`] for the train set.'''
pass
def get_dev_examples(self, data_dir):
'''Gets a collection of [`InputExample`] for the dev set.'''
pass
def get_test_examples(self, data_dir):
'''Gets a collection of [`InputExample`] for the test set.'''
pass
def get_labels(self):
'''Gets the list of labels for this data set.'''
pass
def tfds_map(self, example):
'''
Some tensorflow_datasets datasets are not formatted the same way the GLUE datasets are. This method converts
examples to the correct format.
'''
pass
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
'''Reads a tab separated value file.'''
pass
| 9
| 8
| 5
| 0
| 2
| 2
| 1
| 0.84
| 0
| 3
| 0
| 12
| 6
| 0
| 7
| 7
| 43
| 8
| 19
| 10
| 10
| 16
| 18
| 8
| 10
| 2
| 0
| 1
| 8
|
289
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/data/processors/utils.py
|
transformers.data.processors.utils.InputExample
|
from dataclasses import dataclass
import json
from typing import Optional, Union
import dataclasses
@dataclass
class InputExample:
"""
A single training/test example for simple sequence classification.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
guid: str
text_a: str
text_b: Optional[str] = None
label: Optional[str] = None
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(dataclasses.asdict(self), indent=2) + '\n'
|
@dataclass
class InputExample:
'''
A single training/test example for simple sequence classification.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
'''
def to_json_string(self):
'''Serializes this instance to a JSON string.'''
pass
| 3
| 2
| 3
| 0
| 2
| 1
| 1
| 1.71
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| 22
| 3
| 7
| 4
| 5
| 12
| 7
| 4
| 5
| 1
| 0
| 0
| 1
|
290
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/data/processors/utils.py
|
transformers.data.processors.utils.InputFeatures
|
import json
import dataclasses
from typing import Optional, Union
from dataclasses import dataclass
@dataclass(frozen=True)
class InputFeatures:
"""
A single set of features of data. Property names are the same names as the corresponding inputs to a model.
Args:
input_ids: Indices of input sequence tokens in the vocabulary.
attention_mask: Mask to avoid performing attention on padding token indices.
Mask values selected in `[0, 1]`: Usually `1` for tokens that are NOT MASKED, `0` for MASKED (padded)
tokens.
token_type_ids: (Optional) Segment token indices to indicate first and second
portions of the inputs. Only some models use them.
label: (Optional) Label corresponding to the input. Int for classification problems,
float for regression problems.
"""
input_ids: list[int]
attention_mask: Optional[list[int]] = None
token_type_ids: Optional[list[int]] = None
label: Optional[Union[int, float]] = None
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(dataclasses.asdict(self)) + '\n'
|
@dataclass(frozen=True)
class InputFeatures:
'''
A single set of features of data. Property names are the same names as the corresponding inputs to a model.
Args:
input_ids: Indices of input sequence tokens in the vocabulary.
attention_mask: Mask to avoid performing attention on padding token indices.
Mask values selected in `[0, 1]`: Usually `1` for tokens that are NOT MASKED, `0` for MASKED (padded)
tokens.
token_type_ids: (Optional) Segment token indices to indicate first and second
portions of the inputs. Only some models use them.
label: (Optional) Label corresponding to the input. Int for classification problems,
float for regression problems.
'''
def to_json_string(self):
'''Serializes this instance to a JSON string.'''
pass
| 3
| 2
| 3
| 0
| 2
| 1
| 1
| 1.86
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| 23
| 3
| 7
| 5
| 5
| 13
| 7
| 5
| 5
| 1
| 0
| 0
| 1
|
291
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/data/processors/utils.py
|
transformers.data.processors.utils.SingleSentenceClassificationProcessor
|
from ...utils import is_torch_available, logging
class SingleSentenceClassificationProcessor(DataProcessor):
"""Generic processor for a single sentence classification data set."""
def __init__(self, labels=None, examples=None, mode='classification', verbose=False):
self.labels = [] if labels is None else labels
self.examples = [] if examples is None else examples
self.mode = mode
self.verbose = verbose
def __len__(self):
return len(self.examples)
def __getitem__(self, idx):
if isinstance(idx, slice):
return SingleSentenceClassificationProcessor(labels=self.labels, examples=self.examples[idx])
return self.examples[idx]
@classmethod
def create_from_csv(cls, file_name, split_name='', column_label=0, column_text=1, column_id=None, skip_first_row=False, **kwargs):
processor = cls(**kwargs)
processor.add_examples_from_csv(file_name, split_name=split_name, column_label=column_label, column_text=column_text, column_id=column_id, skip_first_row=skip_first_row, overwrite_labels=True, overwrite_examples=True)
return processor
@classmethod
def create_from_examples(cls, texts_or_text_and_labels, labels=None, **kwargs):
processor = cls(**kwargs)
processor.add_examples(texts_or_text_and_labels, labels=labels)
return processor
def add_examples_from_csv(self, file_name, split_name='', column_label=0, column_text=1, column_id=None, skip_first_row=False, overwrite_labels=False, overwrite_examples=False):
lines = self._read_tsv(file_name)
if skip_first_row:
lines = lines[1:]
texts = []
labels = []
ids = []
for i, line in enumerate(lines):
texts.append(line[column_text])
labels.append(line[column_label])
if column_id is not None:
ids.append(line[column_id])
else:
guid = f'{split_name}-{i}' if split_name else str(i)
ids.append(guid)
return self.add_examples(texts, labels, ids, overwrite_labels=overwrite_labels, overwrite_examples=overwrite_examples)
def add_examples(self, texts_or_text_and_labels, labels=None, ids=None, overwrite_labels=False, overwrite_examples=False):
if labels is not None and len(texts_or_text_and_labels) != len(labels):
raise ValueError(f'Text and labels have mismatched lengths {len(texts_or_text_and_labels)} and {len(labels)}')
if ids is not None and len(texts_or_text_and_labels) != len(ids):
raise ValueError(f'Text and ids have mismatched lengths {len(texts_or_text_and_labels)} and {len(ids)}')
if ids is None:
ids = [None] * len(texts_or_text_and_labels)
if labels is None:
labels = [None] * len(texts_or_text_and_labels)
examples = []
added_labels = set()
for text_or_text_and_label, label, guid in zip(texts_or_text_and_labels, labels, ids):
if isinstance(text_or_text_and_label, (tuple, list)) and label is None:
text, label = text_or_text_and_label
else:
text = text_or_text_and_label
added_labels.add(label)
examples.append(InputExample(guid=guid, text_a=text, text_b=None, label=label))
if overwrite_examples:
self.examples = examples
else:
self.examples.extend(examples)
if overwrite_labels:
self.labels = list(added_labels)
else:
self.labels = list(set(self.labels).union(added_labels))
return self.examples
def get_features(self, tokenizer, max_length=None, pad_on_left=False, pad_token=0, mask_padding_with_zero=True, return_tensors=None):
"""
Convert examples in a list of `InputFeatures`
Args:
tokenizer: Instance of a tokenizer that will tokenize the examples
max_length: Maximum example length
pad_on_left: If set to `True`, the examples will be padded on the left rather than on the right (default)
pad_token: Padding token
mask_padding_with_zero: If set to `True`, the attention mask will be filled by `1` for actual values
and by `0` for padded values. If set to `False`, inverts it (`1` for padded values, `0` for actual
values)
Returns:
Will return a list of task-specific `InputFeatures` which can be fed to the model.
"""
if max_length is None:
max_length = tokenizer.max_len
label_map = {label: i for i, label in enumerate(self.labels)}
all_input_ids = []
for ex_index, example in enumerate(self.examples):
if ex_index % 10000 == 0:
logger.info(f'Tokenizing example {ex_index}')
input_ids = tokenizer.encode(example.text_a, add_special_tokens=True, max_length=min(max_length, tokenizer.max_len))
all_input_ids.append(input_ids)
batch_length = max((len(input_ids) for input_ids in all_input_ids))
features = []
for ex_index, (input_ids, example) in enumerate(zip(all_input_ids, self.examples)):
if ex_index % 10000 == 0:
logger.info(f'Writing example {ex_index}/{len(self.examples)}')
attention_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
padding_length = batch_length - len(input_ids)
if pad_on_left:
input_ids = [pad_token] * padding_length + input_ids
attention_mask = [0 if mask_padding_with_zero else 1] * padding_length + attention_mask
else:
input_ids = input_ids + [pad_token] * padding_length
attention_mask = attention_mask + [0 if mask_padding_with_zero else 1] * padding_length
if len(input_ids) != batch_length:
raise ValueError(f'Error with input length {len(input_ids)} vs {batch_length}')
if len(attention_mask) != batch_length:
raise ValueError(f'Error with input length {len(attention_mask)} vs {batch_length}')
if self.mode == 'classification':
label = label_map[example.label]
elif self.mode == 'regression':
label = float(example.label)
else:
raise ValueError(self.mode)
if ex_index < 5 and self.verbose:
logger.info('*** Example ***')
logger.info(f'guid: {example.guid}')
logger.info(f"input_ids: {' '.join([str(x) for x in input_ids])}")
logger.info(f"attention_mask: {' '.join([str(x) for x in attention_mask])}")
logger.info(f'label: {example.label} (id = {label})')
features.append(InputFeatures(input_ids=input_ids, attention_mask=attention_mask, label=label))
if return_tensors is None:
return features
elif return_tensors == 'pt':
if not is_torch_available():
raise RuntimeError("return_tensors set to 'pt' but PyTorch can't be imported")
import torch
from torch.utils.data import TensorDataset
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long)
if self.mode == 'classification':
all_labels = torch.tensor([f.label for f in features], dtype=torch.long)
elif self.mode == 'regression':
all_labels = torch.tensor([f.label for f in features], dtype=torch.float)
dataset = TensorDataset(all_input_ids, all_attention_mask, all_labels)
return dataset
else:
raise ValueError("return_tensors should be `'pt'` or `None`")
|
class SingleSentenceClassificationProcessor(DataProcessor):
'''Generic processor for a single sentence classification data set.'''
def __init__(self, labels=None, examples=None, mode='classification', verbose=False):
pass
def __len__(self):
pass
def __getitem__(self, idx):
pass
@classmethod
def create_from_csv(cls, file_name, split_name='', column_label=0, column_text=1, column_id=None, skip_first_row=False, **kwargs):
pass
@classmethod
def create_from_examples(cls, texts_or_text_and_labels, labels=None, **kwargs):
pass
def add_examples_from_csv(self, file_name, split_name='', column_label=0, column_text=1, column_id=None, skip_first_row=False, overwrite_labels=False, overwrite_examples=False):
pass
def add_examples_from_csv(self, file_name, split_name='', column_label=0, column_text=1, column_id=None, skip_first_row=False, overwrite_labels=False, overwrite_examples=False):
pass
def get_features(self, tokenizer, max_length=None, pad_on_left=False, pad_token=0, mask_padding_with_zero=True, return_tensors=None):
'''
Convert examples in a list of `InputFeatures`
Args:
tokenizer: Instance of a tokenizer that will tokenize the examples
max_length: Maximum example length
pad_on_left: If set to `True`, the examples will be padded on the left rather than on the right (default)
pad_token: Padding token
mask_padding_with_zero: If set to `True`, the attention mask will be filled by `1` for actual values
and by `0` for padded values. If set to `False`, inverts it (`1` for padded values, `0` for actual
values)
Returns:
Will return a list of task-specific `InputFeatures` which can be fed to the model.
'''
pass
| 11
| 2
| 24
| 2
| 19
| 2
| 5
| 0.12
| 1
| 13
| 2
| 0
| 6
| 4
| 8
| 15
| 225
| 30
| 174
| 65
| 137
| 21
| 118
| 41
| 105
| 22
| 1
| 2
| 46
|
292
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/data/processors/xnli.py
|
transformers.data.processors.xnli.XnliProcessor
|
from .utils import DataProcessor, InputExample
import os
class XnliProcessor(DataProcessor):
"""
Processor for the XNLI dataset. Adapted from
https://github.com/google-research/bert/blob/f39e881b169b9d53bea03d2d341b31707a6c052b/run_classifier.py#L207
"""
def __init__(self, language, train_language=None):
self.language = language
self.train_language = train_language
def get_train_examples(self, data_dir):
"""See base class."""
lg = self.language if self.train_language is None else self.train_language
lines = self._read_tsv(os.path.join(data_dir, f'XNLI-MT-1.0/multinli/multinli.train.{lg}.tsv'))
examples = []
for i, line in enumerate(lines):
if i == 0:
continue
guid = f'train-{i}'
text_a = line[0]
text_b = line[1]
label = 'contradiction' if line[2] == 'contradictory' else line[2]
if not isinstance(text_a, str):
raise TypeError(f'Training input {text_a} is not a string')
if not isinstance(text_b, str):
raise TypeError(f'Training input {text_b} is not a string')
if not isinstance(label, str):
raise TypeError(f'Training label {label} is not a string')
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def get_test_examples(self, data_dir):
"""See base class."""
lines = self._read_tsv(os.path.join(data_dir, 'XNLI-1.0/xnli.test.tsv'))
examples = []
for i, line in enumerate(lines):
if i == 0:
continue
language = line[0]
if language != self.language:
continue
guid = f'test-{i}'
text_a = line[6]
text_b = line[7]
label = line[1]
if not isinstance(text_a, str):
raise TypeError(f'Training input {text_a} is not a string')
if not isinstance(text_b, str):
raise TypeError(f'Training input {text_b} is not a string')
if not isinstance(label, str):
raise TypeError(f'Training label {label} is not a string')
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def get_labels(self):
"""See base class."""
return ['contradiction', 'entailment', 'neutral']
|
class XnliProcessor(DataProcessor):
'''
Processor for the XNLI dataset. Adapted from
https://github.com/google-research/bert/blob/f39e881b169b9d53bea03d2d341b31707a6c052b/run_classifier.py#L207
'''
def __init__(self, language, train_language=None):
pass
def get_train_examples(self, data_dir):
'''See base class.'''
pass
def get_test_examples(self, data_dir):
'''See base class.'''
pass
def get_labels(self):
'''See base class.'''
pass
| 5
| 4
| 12
| 0
| 11
| 1
| 4
| 0.15
| 1
| 4
| 1
| 0
| 4
| 2
| 4
| 11
| 57
| 4
| 46
| 23
| 41
| 7
| 46
| 23
| 41
| 8
| 1
| 2
| 17
|
293
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/debug_utils.py
|
transformers.debug_utils.DebugOption
|
from .utils import ExplicitEnum, is_torch_available, logging
class DebugOption(ExplicitEnum):
UNDERFLOW_OVERFLOW = 'underflow_overflow'
TPU_METRICS_DEBUG = 'tpu_metrics_debug'
|
class DebugOption(ExplicitEnum):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3
| 0
| 3
| 3
| 2
| 0
| 3
| 3
| 2
| 0
| 1
| 0
| 0
|
294
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/debug_utils.py
|
transformers.debug_utils.DebugUnderflowOverflow
|
import collections
class DebugUnderflowOverflow:
"""
This debug class helps detect and understand where the model starts getting very large or very small, and more
importantly `nan` or `inf` weight and activation elements.
There are 2 working modes:
1. Underflow/overflow detection (default)
2. Specific batch absolute min/max tracing without detection
Mode 1: Underflow/overflow detection
To activate the underflow/overflow detection, initialize the object with the model :
```python
debug_overflow = DebugUnderflowOverflow(model)
```
then run the training as normal and if `nan` or `inf` gets detected in at least one of the weight, input or output
elements this module will throw an exception and will print `max_frames_to_save` frames that lead to this event,
each frame reporting
1. the fully qualified module name plus the class name whose `forward` was run
2. the absolute min and max value of all elements for each module weights, and the inputs and output
For example, here is the header and the last few frames in detection report for `google/mt5-small` run in fp16
mixed precision :
```
Detected inf/nan during batch_number=0
Last 21 forward frames:
abs min abs max metadata
[...]
encoder.block.2.layer.1.DenseReluDense.wi_0 Linear
2.17e-07 4.50e+00 weight
1.79e-06 4.65e+00 input[0]
2.68e-06 3.70e+01 output
encoder.block.2.layer.1.DenseReluDense.wi_1 Linear
8.08e-07 2.66e+01 weight
1.79e-06 4.65e+00 input[0]
1.27e-04 2.37e+02 output
encoder.block.2.layer.1.DenseReluDense.wo Linear
1.01e-06 6.44e+00 weight
0.00e+00 9.74e+03 input[0]
3.18e-04 6.27e+04 output
encoder.block.2.layer.1.DenseReluDense T5DenseGatedGeluDense
1.79e-06 4.65e+00 input[0]
3.18e-04 6.27e+04 output
encoder.block.2.layer.1.dropout Dropout
3.18e-04 6.27e+04 input[0]
0.00e+00 inf output
```
You can see here, that `T5DenseGatedGeluDense.forward` resulted in output activations, whose absolute max value was
around 62.7K, which is very close to fp16's top limit of 64K. In the next frame we have `Dropout` which
renormalizes the weights, after it zeroed some of the elements, which pushes the absolute max value to more than
64K, and we get an overflow.
As you can see it's the previous frames that we need to look into when the numbers start going into very large for
fp16 numbers.
The tracking is done in a forward hook, which gets invoked immediately after `forward` has completed.
By default the last 21 frames are printed. You can change the default to adjust for your needs. For example :
```python
debug_overflow = DebugUnderflowOverflow(model, max_frames_to_save=100)
```
To validate that you have set up this debugging feature correctly, and you intend to use it in a training that
may take hours to complete, first run it with normal tracing enabled for one of a few batches as explained in
the next section.
Mode 2. Specific batch absolute min/max tracing without detection
The second work mode is per-batch tracing with the underflow/overflow detection feature turned off.
Let's say you want to watch the absolute min and max values for all the ingredients of each `forward` call of a
given batch, and only do that for batches 1 and 3. Then you instantiate this class as :
```python
debug_overflow = DebugUnderflowOverflow(model, trace_batch_nums=[1, 3])
```
And now full batches 1 and 3 will be traced using the same format as explained above. Batches are 0-indexed.
This is helpful if you know that the program starts misbehaving after a certain batch number, so you can
fast-forward right to that area.
Early stopping:
You can also specify the batch number after which to stop the training, with :
```python
debug_overflow = DebugUnderflowOverflow(model, trace_batch_nums=[1, 3], abort_after_batch_num=3)
```
This feature is mainly useful in the tracing mode, but you can use it for any mode.
**Performance**:
As this module measures absolute `min`/``max` of each weight of the model on every forward it'll slow the training
down. Therefore remember to turn it off once the debugging needs have been met.
Args:
model (`nn.Module`):
The model to debug.
max_frames_to_save (`int`, *optional*, defaults to 21):
How many frames back to record
trace_batch_nums(`list[int]`, *optional*, defaults to `[]`):
Which batch numbers to trace (turns detection off)
abort_after_batch_num (`int``, *optional*):
Whether to abort after a certain batch number has finished
"""
def __init__(self, model, max_frames_to_save=21, trace_batch_nums=[], abort_after_batch_num=None):
self.model = model
self.trace_batch_nums = trace_batch_nums
self.abort_after_batch_num = abort_after_batch_num
self.frames = collections.deque([], max_frames_to_save)
self.frame = []
self.batch_number = 0
self.total_calls = 0
self.detected_overflow = False
self.prefix = ' '
self.analyse_model()
self.register_forward_hook()
def save_frame(self, frame=None):
if frame is not None:
self.expand_frame(frame)
self.frames.append('\n'.join(self.frame))
self.frame = []
def expand_frame(self, line):
self.frame.append(line)
def trace_frames(self):
print('\n'.join(self.frames))
self.frames = []
def reset_saved_frames(self):
self.frames = []
def dump_saved_frames(self):
print(f'\nDetected inf/nan during batch_number={self.batch_number}')
print(f'Last {len(self.frames)} forward frames:')
print(f"{'abs min':8} {'abs max':8} metadata")
print('\n'.join(self.frames))
print('\n\n')
self.frames = []
def analyse_model(self):
self.module_names = {m: name for name, m in self.model.named_modules()}
def analyse_variable(self, var, ctx):
if torch.is_tensor(var):
self.expand_frame(get_abs_min_max(var, ctx))
if detect_overflow(var, ctx):
self.detected_overflow = True
elif var is None:
self.expand_frame(f"{'None':>17} {ctx}")
else:
self.expand_frame(f"{'not a tensor':>17} {ctx}")
def batch_start_frame(self):
self.expand_frame(f'\n\n{self.prefix} *** Starting batch number={self.batch_number} ***')
self.expand_frame(f"{'abs min':8} {'abs max':8} metadata")
def batch_end_frame(self):
self.expand_frame(f'{self.prefix} *** Finished batch number={self.batch_number - 1} ***\n\n')
def create_frame(self, module, input, output):
self.expand_frame(f'{self.prefix} {self.module_names[module]} {module.__class__.__name__}')
for name, p in module.named_parameters(recurse=False):
self.analyse_variable(p, name)
if isinstance(input, tuple):
for i, x in enumerate(input):
self.analyse_variable(x, f'input[{i}]')
else:
self.analyse_variable(input, 'input')
if isinstance(output, tuple):
for i, x in enumerate(output):
if isinstance(x, tuple):
for j, y in enumerate(x):
self.analyse_variable(y, f'output[{i}][{j}]')
else:
self.analyse_variable(x, f'output[{i}]')
else:
self.analyse_variable(output, 'output')
self.save_frame()
def register_forward_hook(self):
self.model.apply(self._register_forward_hook)
def _register_forward_hook(self, module):
module.register_forward_hook(self.forward_hook)
def forward_hook(self, module, input, output):
last_frame_of_batch = False
trace_mode = self.batch_number in self.trace_batch_nums
if trace_mode:
self.reset_saved_frames()
if self.total_calls == 0:
self.batch_start_frame()
self.total_calls += 1
if module == self.model:
self.batch_number += 1
last_frame_of_batch = True
self.create_frame(module, input, output)
if trace_mode:
self.trace_frames()
if last_frame_of_batch:
self.batch_start_frame()
if self.detected_overflow and (not trace_mode):
self.dump_saved_frames()
raise ValueError('DebugUnderflowOverflow: inf/nan detected, aborting as there is no point running further. Please scroll up above this traceback to see the activation values prior to this event.')
if self.abort_after_batch_num is not None and self.batch_number > self.abort_after_batch_num:
raise ValueError(f'DebugUnderflowOverflow: aborting after {self.batch_number} batches due to `abort_after_batch_num={self.abort_after_batch_num}` arg')
|
class DebugUnderflowOverflow:
'''
This debug class helps detect and understand where the model starts getting very large or very small, and more
importantly `nan` or `inf` weight and activation elements.
There are 2 working modes:
1. Underflow/overflow detection (default)
2. Specific batch absolute min/max tracing without detection
Mode 1: Underflow/overflow detection
To activate the underflow/overflow detection, initialize the object with the model :
```python
debug_overflow = DebugUnderflowOverflow(model)
```
then run the training as normal and if `nan` or `inf` gets detected in at least one of the weight, input or output
elements this module will throw an exception and will print `max_frames_to_save` frames that lead to this event,
each frame reporting
1. the fully qualified module name plus the class name whose `forward` was run
2. the absolute min and max value of all elements for each module weights, and the inputs and output
For example, here is the header and the last few frames in detection report for `google/mt5-small` run in fp16
mixed precision :
```
Detected inf/nan during batch_number=0
Last 21 forward frames:
abs min abs max metadata
[...]
encoder.block.2.layer.1.DenseReluDense.wi_0 Linear
2.17e-07 4.50e+00 weight
1.79e-06 4.65e+00 input[0]
2.68e-06 3.70e+01 output
encoder.block.2.layer.1.DenseReluDense.wi_1 Linear
8.08e-07 2.66e+01 weight
1.79e-06 4.65e+00 input[0]
1.27e-04 2.37e+02 output
encoder.block.2.layer.1.DenseReluDense.wo Linear
1.01e-06 6.44e+00 weight
0.00e+00 9.74e+03 input[0]
3.18e-04 6.27e+04 output
encoder.block.2.layer.1.DenseReluDense T5DenseGatedGeluDense
1.79e-06 4.65e+00 input[0]
3.18e-04 6.27e+04 output
encoder.block.2.layer.1.dropout Dropout
3.18e-04 6.27e+04 input[0]
0.00e+00 inf output
```
You can see here, that `T5DenseGatedGeluDense.forward` resulted in output activations, whose absolute max value was
around 62.7K, which is very close to fp16's top limit of 64K. In the next frame we have `Dropout` which
renormalizes the weights, after it zeroed some of the elements, which pushes the absolute max value to more than
64K, and we get an overflow.
As you can see it's the previous frames that we need to look into when the numbers start going into very large for
fp16 numbers.
The tracking is done in a forward hook, which gets invoked immediately after `forward` has completed.
By default the last 21 frames are printed. You can change the default to adjust for your needs. For example :
```python
debug_overflow = DebugUnderflowOverflow(model, max_frames_to_save=100)
```
To validate that you have set up this debugging feature correctly, and you intend to use it in a training that
may take hours to complete, first run it with normal tracing enabled for one of a few batches as explained in
the next section.
Mode 2. Specific batch absolute min/max tracing without detection
The second work mode is per-batch tracing with the underflow/overflow detection feature turned off.
Let's say you want to watch the absolute min and max values for all the ingredients of each `forward` call of a
given batch, and only do that for batches 1 and 3. Then you instantiate this class as :
```python
debug_overflow = DebugUnderflowOverflow(model, trace_batch_nums=[1, 3])
```
And now full batches 1 and 3 will be traced using the same format as explained above. Batches are 0-indexed.
This is helpful if you know that the program starts misbehaving after a certain batch number, so you can
fast-forward right to that area.
Early stopping:
You can also specify the batch number after which to stop the training, with :
```python
debug_overflow = DebugUnderflowOverflow(model, trace_batch_nums=[1, 3], abort_after_batch_num=3)
```
This feature is mainly useful in the tracing mode, but you can use it for any mode.
**Performance**:
As this module measures absolute `min`/``max` of each weight of the model on every forward it'll slow the training
down. Therefore remember to turn it off once the debugging needs have been met.
Args:
model (`nn.Module`):
The model to debug.
max_frames_to_save (`int`, *optional*, defaults to 21):
How many frames back to record
trace_batch_nums(`list[int]`, *optional*, defaults to `[]`):
Which batch numbers to trace (turns detection off)
abort_after_batch_num (`int``, *optional*):
Whether to abort after a certain batch number has finished
'''
def __init__(self, model, max_frames_to_save=21, trace_batch_nums=[], abort_after_batch_num=None):
pass
def save_frame(self, frame=None):
pass
def expand_frame(self, line):
pass
def trace_frames(self):
pass
def reset_saved_frames(self):
pass
def dump_saved_frames(self):
pass
def analyse_model(self):
pass
def analyse_variable(self, var, ctx):
pass
def batch_start_frame(self):
pass
def batch_end_frame(self):
pass
def create_frame(self, module, input, output):
pass
def register_forward_hook(self):
pass
def _register_forward_hook(self, module):
pass
def forward_hook(self, module, input, output):
pass
| 15
| 1
| 9
| 1
| 7
| 1
| 2
| 1.06
| 0
| 3
| 0
| 0
| 14
| 10
| 14
| 14
| 264
| 63
| 98
| 30
| 83
| 104
| 87
| 30
| 72
| 9
| 0
| 4
| 33
|
295
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/feature_extraction_sequence_utils.py
|
transformers.feature_extraction_sequence_utils.SequenceFeatureExtractor
|
from .utils import PaddingStrategy, TensorType, is_torch_tensor, logging, to_numpy
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from typing import Optional, Union
class SequenceFeatureExtractor(FeatureExtractionMixin):
"""
This is a general feature extraction class for speech recognition.
Args:
feature_size (`int`):
The feature dimension of the extracted features.
sampling_rate (`int`):
The sampling rate at which the audio files should be digitalized expressed in hertz (Hz).
padding_value (`float`):
The value that is used to fill the padding values / vectors.
"""
def __init__(self, feature_size: int, sampling_rate: int, padding_value: float, **kwargs):
self.feature_size = feature_size
self.sampling_rate = sampling_rate
self.padding_value = padding_value
self.padding_side = kwargs.pop('padding_side', 'right')
self.return_attention_mask = kwargs.pop('return_attention_mask', True)
super().__init__(**kwargs)
def pad(self, processed_features: Union[BatchFeature, list[BatchFeature], dict[str, BatchFeature], dict[str, list[BatchFeature]], list[dict[str, BatchFeature]]], padding: Union[bool, str, PaddingStrategy]=True, max_length: Optional[int]=None, truncation: bool=False, pad_to_multiple_of: Optional[int]=None, return_attention_mask: Optional[bool]=None, return_tensors: Optional[Union[str, TensorType]]=None) -> BatchFeature:
"""
Pad input values / input vectors or a batch of input values / input vectors up to predefined length or to the
max sequence length in the batch.
Padding side (left/right) padding values are defined at the feature extractor level (with `self.padding_side`,
`self.padding_value`)
<Tip>
If the `processed_features` passed are dictionary of numpy arrays or PyTorch tensors the
result will use the same type unless you provide a different tensor type with `return_tensors`. In the case of
PyTorch tensors, you will lose the specific device of your tensors however.
</Tip>
Args:
processed_features ([`BatchFeature`], list of [`BatchFeature`], `dict[str, list[float]]`, `dict[str, list[list[float]]` or `list[dict[str, list[float]]]`):
Processed inputs. Can represent one input ([`BatchFeature`] or `dict[str, list[float]]`) or a batch of
input values / vectors (list of [`BatchFeature`], *dict[str, list[list[float]]]* or *list[dict[str,
list[float]]]*) so you can use this method during preprocessing as well as in a PyTorch Dataloader
collate function.
Instead of `list[float]` you can have tensors (numpy arrays or PyTorch tensors),
see the note above for the return type.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`):
Select a strategy to pad the returned sequences (according to the model's padding side and padding
index) among:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
sequence if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
max_length (`int`, *optional*):
Maximum length of the returned list and optionally padding length (see above).
truncation (`bool`):
Activates truncation to cut input sequences longer than `max_length` to `max_length`.
pad_to_multiple_of (`int`, *optional*):
If set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability
`>= 7.5` (Volta), or on TPUs which benefit from having sequence lengths be a multiple of 128.
return_attention_mask (`bool`, *optional*):
Whether to return the attention mask. If left to the default, will return the attention mask according
to the specific feature_extractor's default.
[What are attention masks?](../glossary#attention-mask)
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
"""
if isinstance(processed_features, (list, tuple)) and isinstance(processed_features[0], (dict, BatchFeature)):
processed_features = {key: [example[key] for example in processed_features] for key in processed_features[0]}
if self.model_input_names[0] not in processed_features:
raise ValueError(f'You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature` to this method that includes {self.model_input_names[0]}, but you provided {list(processed_features.keys())}')
required_input = processed_features[self.model_input_names[0]]
return_attention_mask = return_attention_mask if return_attention_mask is not None else self.return_attention_mask
if len(required_input) == 0:
if return_attention_mask:
processed_features['attention_mask'] = []
return processed_features
first_element = required_input[0]
if isinstance(first_element, (list, tuple)):
index = 0
while len(required_input[index]) == 0:
index += 1
if index < len(required_input):
first_element = required_input[index][0]
if return_tensors is None:
if is_torch_tensor(first_element):
return_tensors = 'pt'
elif isinstance(first_element, (int, float, list, tuple, np.ndarray)):
return_tensors = 'np'
else:
raise ValueError(f'type of {first_element} unknown: {type(first_element)}. Should be one of a python, numpy, or pytorch object.')
for key, value in processed_features.items():
if isinstance(value[0], (int, float)):
processed_features[key] = to_numpy(value)
else:
processed_features[key] = [to_numpy(v) for v in value]
padding_strategy = self._get_padding_strategies(padding=padding, max_length=max_length)
required_input = processed_features[self.model_input_names[0]]
batch_size = len(required_input)
if not all((len(v) == batch_size for v in processed_features.values())):
raise ValueError('Some items in the output dictionary have a different batch size than others.')
truncated_inputs = []
for i in range(batch_size):
inputs = {k: v[i] for k, v in processed_features.items()}
inputs_slice = self._truncate(inputs, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, truncation=truncation)
truncated_inputs.append(inputs_slice)
if padding_strategy == PaddingStrategy.LONGEST:
max_length = max((len(input_slice[self.model_input_names[0]]) for input_slice in truncated_inputs))
padding_strategy = PaddingStrategy.MAX_LENGTH
batch_outputs = {}
for i in range(batch_size):
outputs = self._pad(truncated_inputs[i], max_length=max_length, padding_strategy=padding_strategy, pad_to_multiple_of=pad_to_multiple_of, return_attention_mask=return_attention_mask)
for key, value in outputs.items():
if key not in batch_outputs:
batch_outputs[key] = []
if value.dtype is np.dtype(np.float64):
value = value.astype(np.float32)
batch_outputs[key].append(value)
return BatchFeature(batch_outputs, tensor_type=return_tensors)
def _pad(self, processed_features: Union[dict[str, np.ndarray], BatchFeature], max_length: Optional[int]=None, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, pad_to_multiple_of: Optional[int]=None, return_attention_mask: Optional[bool]=None) -> dict:
"""
Pad inputs (on left/right and up to predefined length or max length in the batch)
Args:
processed_features (`Union[dict[str, np.ndarray], BatchFeature]`):
Dictionary of input values (`np.ndarray[float]`) / input vectors (`list[np.ndarray[float]]`) or batch
of inputs values (`list[np.ndarray[int]]`) / input vectors (`list[np.ndarray[int]]`)
max_length (`int`, *optional*):
Maximum length of the returned list and optionally padding length (see below)
padding_strategy (`PaddingStrategy`, *optional*, default to `PaddingStrategy.DO_NOT_PAD`):
PaddingStrategy to use for padding.
- PaddingStrategy.LONGEST Pad to the longest sequence in the batch
- PaddingStrategy.MAX_LENGTH: Pad to the max length (default)
- PaddingStrategy.DO_NOT_PAD: Do not pad
The feature_extractor padding sides are defined in self.padding_side:
- 'left': pads on the left of the sequences
- 'right': pads on the right of the sequences
pad_to_multiple_of (`int`, *optional*):
Integer if set will pad the sequence to a multiple of the provided value. This is especially useful to
enable the use of Tensor Core on NVIDIA hardware with compute capability `>= 7.5` (Volta), or on TPUs
which benefit from having sequence lengths be a multiple of 128.
return_attention_mask (`bool`, *optional*):
Set to False to avoid returning attention mask (default: set to model specifics)
"""
required_input = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
max_length = len(required_input)
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
max_length = (max_length // pad_to_multiple_of + 1) * pad_to_multiple_of
needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) < max_length
if return_attention_mask and 'attention_mask' not in processed_features:
processed_features['attention_mask'] = np.ones(len(required_input), dtype=np.int32)
if needs_to_be_padded:
difference = max_length - len(required_input)
if self.padding_side == 'right':
if return_attention_mask:
processed_features['attention_mask'] = np.pad(processed_features['attention_mask'], (0, difference))
padding_shape = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
processed_features[self.model_input_names[0]] = np.pad(required_input, padding_shape, 'constant', constant_values=self.padding_value)
elif self.padding_side == 'left':
if return_attention_mask:
processed_features['attention_mask'] = np.pad(processed_features['attention_mask'], (difference, 0))
padding_shape = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
processed_features[self.model_input_names[0]] = np.pad(required_input, padding_shape, 'constant', constant_values=self.padding_value)
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side))
return processed_features
def _truncate(self, processed_features: Union[dict[str, np.ndarray], BatchFeature], max_length: Optional[int]=None, pad_to_multiple_of: Optional[int]=None, truncation: Optional[bool]=None):
"""
Truncate inputs to predefined length or max length in the batch
Args:
processed_features(`Union[dict[str, np.ndarray], BatchFeature]`):
Dictionary of input values (`np.ndarray[float]`) / input vectors (`list[np.ndarray[float]]`) or batch
of inputs values (`list[np.ndarray[int]]`) / input vectors (`list[np.ndarray[int]]`)
max_length (`int`, *optional*):
maximum length of the returned list and optionally padding length (see below)
pad_to_multiple_of (`int`, *optional*) :
Integer if set will pad the sequence to a multiple of the provided value. This is especially useful to
enable the use of Tensor Core on NVIDIA hardware with compute capability `>= 7.5` (Volta), or on TPUs
which benefit from having sequence lengths be a multiple of 128.
truncation (`bool`, *optional*):
Activates truncation to cut input sequences longer than `max_length` to `max_length`.
"""
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError('When setting ``truncation=True``, make sure that ``max_length`` is defined.')
required_input = processed_features[self.model_input_names[0]]
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
max_length = (max_length // pad_to_multiple_of + 1) * pad_to_multiple_of
needs_to_be_truncated = len(required_input) > max_length
if needs_to_be_truncated:
processed_features[self.model_input_names[0]] = processed_features[self.model_input_names[0]][:max_length]
if 'attention_mask' in processed_features:
processed_features['attention_mask'] = processed_features['attention_mask'][:max_length]
return processed_features
def _get_padding_strategies(self, padding=False, max_length=None):
"""
Find the correct padding strategy
"""
if padding is not False:
if padding is True:
padding_strategy = PaddingStrategy.LONGEST
elif not isinstance(padding, PaddingStrategy):
padding_strategy = PaddingStrategy(padding)
elif isinstance(padding, PaddingStrategy):
padding_strategy = padding
else:
padding_strategy = PaddingStrategy.DO_NOT_PAD
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(f'When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined')
if padding_strategy != PaddingStrategy.DO_NOT_PAD and self.padding_value is None:
raise ValueError('Asking to pad but the feature_extractor does not have a padding value. Please select a value to use as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.')
return padding_strategy
|
class SequenceFeatureExtractor(FeatureExtractionMixin):
'''
This is a general feature extraction class for speech recognition.
Args:
feature_size (`int`):
The feature dimension of the extracted features.
sampling_rate (`int`):
The sampling rate at which the audio files should be digitalized expressed in hertz (Hz).
padding_value (`float`):
The value that is used to fill the padding values / vectors.
'''
def __init__(self, feature_size: int, sampling_rate: int, padding_value: float, **kwargs):
pass
def pad(self, processed_features: Union[BatchFeature, list[BatchFeature], dict[str, BatchFeature], dict[str, list[BatchFeature]], list[dict[str, BatchFeature]]], padding: Union[bool, str, PaddingStrategy]=True, max_length: Optional[int]=None, truncation: bool=False, pad_to_multiple_of: Optional[int]=None, return_attention_mask: Optional[bool]=None, return_tensors: Optional[Union[str, TensorType]]=None) -> BatchFeature:
'''
Pad input values / input vectors or a batch of input values / input vectors up to predefined length or to the
max sequence length in the batch.
Padding side (left/right) padding values are defined at the feature extractor level (with `self.padding_side`,
`self.padding_value`)
<Tip>
If the `processed_features` passed are dictionary of numpy arrays or PyTorch tensors the
result will use the same type unless you provide a different tensor type with `return_tensors`. In the case of
PyTorch tensors, you will lose the specific device of your tensors however.
</Tip>
Args:
processed_features ([`BatchFeature`], list of [`BatchFeature`], `dict[str, list[float]]`, `dict[str, list[list[float]]` or `list[dict[str, list[float]]]`):
Processed inputs. Can represent one input ([`BatchFeature`] or `dict[str, list[float]]`) or a batch of
input values / vectors (list of [`BatchFeature`], *dict[str, list[list[float]]]* or *list[dict[str,
list[float]]]*) so you can use this method during preprocessing as well as in a PyTorch Dataloader
collate function.
Instead of `list[float]` you can have tensors (numpy arrays or PyTorch tensors),
see the note above for the return type.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`):
Select a strategy to pad the returned sequences (according to the model's padding side and padding
index) among:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
sequence if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
max_length (`int`, *optional*):
Maximum length of the returned list and optionally padding length (see above).
truncation (`bool`):
Activates truncation to cut input sequences longer than `max_length` to `max_length`.
pad_to_multiple_of (`int`, *optional*):
If set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability
`>= 7.5` (Volta), or on TPUs which benefit from having sequence lengths be a multiple of 128.
return_attention_mask (`bool`, *optional*):
Whether to return the attention mask. If left to the default, will return the attention mask according
to the specific feature_extractor's default.
[What are attention masks?](../glossary#attention-mask)
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
'''
pass
def _pad(self, processed_features: Union[dict[str, np.ndarray], BatchFeature], max_length: Optional[int]=None, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, pad_to_multiple_of: Optional[int]=None, return_attention_mask: Optional[bool]=None) -> dict:
'''
Pad inputs (on left/right and up to predefined length or max length in the batch)
Args:
processed_features (`Union[dict[str, np.ndarray], BatchFeature]`):
Dictionary of input values (`np.ndarray[float]`) / input vectors (`list[np.ndarray[float]]`) or batch
of inputs values (`list[np.ndarray[int]]`) / input vectors (`list[np.ndarray[int]]`)
max_length (`int`, *optional*):
Maximum length of the returned list and optionally padding length (see below)
padding_strategy (`PaddingStrategy`, *optional*, default to `PaddingStrategy.DO_NOT_PAD`):
PaddingStrategy to use for padding.
- PaddingStrategy.LONGEST Pad to the longest sequence in the batch
- PaddingStrategy.MAX_LENGTH: Pad to the max length (default)
- PaddingStrategy.DO_NOT_PAD: Do not pad
The feature_extractor padding sides are defined in self.padding_side:
- 'left': pads on the left of the sequences
- 'right': pads on the right of the sequences
pad_to_multiple_of (`int`, *optional*):
Integer if set will pad the sequence to a multiple of the provided value. This is especially useful to
enable the use of Tensor Core on NVIDIA hardware with compute capability `>= 7.5` (Volta), or on TPUs
which benefit from having sequence lengths be a multiple of 128.
return_attention_mask (`bool`, *optional*):
Set to False to avoid returning attention mask (default: set to model specifics)
'''
pass
def _truncate(self, processed_features: Union[dict[str, np.ndarray], BatchFeature], max_length: Optional[int]=None, pad_to_multiple_of: Optional[int]=None, truncation: Optional[bool]=None):
'''
Truncate inputs to predefined length or max length in the batch
Args:
processed_features(`Union[dict[str, np.ndarray], BatchFeature]`):
Dictionary of input values (`np.ndarray[float]`) / input vectors (`list[np.ndarray[float]]`) or batch
of inputs values (`list[np.ndarray[int]]`) / input vectors (`list[np.ndarray[int]]`)
max_length (`int`, *optional*):
maximum length of the returned list and optionally padding length (see below)
pad_to_multiple_of (`int`, *optional*) :
Integer if set will pad the sequence to a multiple of the provided value. This is especially useful to
enable the use of Tensor Core on NVIDIA hardware with compute capability `>= 7.5` (Volta), or on TPUs
which benefit from having sequence lengths be a multiple of 128.
truncation (`bool`, *optional*):
Activates truncation to cut input sequences longer than `max_length` to `max_length`.
'''
pass
def _get_padding_strategies(self, padding=False, max_length=None):
'''
Find the correct padding strategy
'''
pass
| 6
| 5
| 65
| 9
| 36
| 20
| 10
| 0.62
| 1
| 12
| 1
| 15
| 5
| 5
| 5
| 17
| 343
| 52
| 180
| 58
| 146
| 112
| 107
| 30
| 101
| 22
| 2
| 3
| 48
|
296
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/feature_extraction_utils.py
|
transformers.feature_extraction_utils.BatchFeature
|
import numpy as np
from typing import TYPE_CHECKING, Any, Optional, TypeVar, Union
from collections import UserDict
from .utils import FEATURE_EXTRACTOR_NAME, PROCESSOR_NAME, PushToHubMixin, TensorType, copy_func, download_url, is_numpy_array, is_offline_mode, is_remote_url, is_torch_available, is_torch_device, is_torch_dtype, logging, requires_backends
class BatchFeature(UserDict):
"""
Holds the output of the [`~SequenceFeatureExtractor.pad`] and feature extractor specific `__call__` methods.
This class is derived from a python dictionary and can be used as a dictionary.
Args:
data (`dict`, *optional*):
Dictionary of lists/arrays/tensors returned by the __call__/pad methods ('input_values', 'attention_mask',
etc.).
tensor_type (`Union[None, str, TensorType]`, *optional*):
You can give a tensor_type here to convert the lists of integers in PyTorch/Numpy Tensors at
initialization.
"""
def __init__(self, data: Optional[dict[str, Any]]=None, tensor_type: Union[None, str, TensorType]=None):
super().__init__(data)
self.convert_to_tensors(tensor_type=tensor_type)
def __getitem__(self, item: str) -> Any:
"""
If the key is a string, returns the value of the dict associated to `key` ('input_values', 'attention_mask',
etc.).
"""
if isinstance(item, str):
return self.data[item]
else:
raise KeyError('Indexing with integers is not available when using Python based feature extractors')
def __getattr__(self, item: str):
try:
return self.data[item]
except KeyError:
raise AttributeError
def __getstate__(self):
return {'data': self.data}
def __setstate__(self, state):
if 'data' in state:
self.data = state['data']
def _get_is_as_tensor_fns(self, tensor_type: Optional[Union[str, TensorType]]=None):
if tensor_type is None:
return (None, None)
if not isinstance(tensor_type, TensorType):
tensor_type = TensorType(tensor_type)
if tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError('Unable to convert output to PyTorch tensors format, PyTorch is not installed.')
import torch
def as_tensor(value):
if isinstance(value, (list, tuple)) and len(value) > 0:
if isinstance(value[0], np.ndarray):
value = np.array(value)
elif isinstance(value[0], (list, tuple)) and len(value[0]) > 0 and isinstance(value[0][0], np.ndarray):
value = np.array(value)
if isinstance(value, np.ndarray):
return torch.from_numpy(value)
else:
return torch.tensor(value)
is_tensor = torch.is_tensor
else:
def as_tensor(value, dtype=None):
if isinstance(value, (list, tuple)) and isinstance(value[0], (list, tuple, np.ndarray)):
value_lens = [len(val) for val in value]
if len(set(value_lens)) > 1 and dtype is None:
value = as_tensor([np.asarray(val) for val in value], dtype=object)
return np.asarray(value, dtype=dtype)
is_tensor = is_numpy_array
return (is_tensor, as_tensor)
def convert_to_tensors(self, tensor_type: Optional[Union[str, TensorType]]=None):
"""
Convert the inner content to tensors.
Args:
tensor_type (`str` or [`~utils.TensorType`], *optional*):
The type of tensors to use. If `str`, should be one of the values of the enum [`~utils.TensorType`]. If
`None`, no modification is done.
"""
if tensor_type is None:
return self
is_tensor, as_tensor = self._get_is_as_tensor_fns(tensor_type)
for key, value in self.items():
try:
if not is_tensor(value):
tensor = as_tensor(value)
self[key] = tensor
except:
if key == 'overflowing_values':
raise ValueError('Unable to create tensor returning overflowing values of different lengths. ')
raise ValueError("Unable to create tensor, you should probably activate padding with 'padding=True' to have batched tensors with the same length.")
return self
def to(self, *args, **kwargs) -> 'BatchFeature':
"""
Send all values to device by calling `v.to(*args, **kwargs)` (PyTorch only). This should support casting in
different `dtypes` and sending the `BatchFeature` to a different `device`.
Args:
args (`Tuple`):
Will be passed to the `to(...)` function of the tensors.
kwargs (`Dict`, *optional*):
Will be passed to the `to(...)` function of the tensors.
To enable asynchronous data transfer, set the `non_blocking` flag in `kwargs` (defaults to `False`).
Returns:
[`BatchFeature`]: The same instance after modification.
"""
requires_backends(self, ['torch'])
import torch
device = kwargs.get('device')
non_blocking = kwargs.get('non_blocking', False)
if device is None and len(args) > 0:
arg = args[0]
if is_torch_dtype(arg):
pass
elif isinstance(arg, str) or is_torch_device(arg) or isinstance(arg, int):
device = arg
else:
raise ValueError(f'Attempting to cast a BatchFeature to type {str(arg)}. This is not supported.')
def maybe_to(v):
if isinstance(v, torch.Tensor) and torch.is_floating_point(v):
return v.to(*args, **kwargs)
elif isinstance(v, torch.Tensor) and device is not None:
return v.to(device=device, non_blocking=non_blocking)
else:
return v
self.data = {k: maybe_to(v) for k, v in self.items()}
return self
|
class BatchFeature(UserDict):
'''
Holds the output of the [`~SequenceFeatureExtractor.pad`] and feature extractor specific `__call__` methods.
This class is derived from a python dictionary and can be used as a dictionary.
Args:
data (`dict`, *optional*):
Dictionary of lists/arrays/tensors returned by the __call__/pad methods ('input_values', 'attention_mask',
etc.).
tensor_type (`Union[None, str, TensorType]`, *optional*):
You can give a tensor_type here to convert the lists of integers in PyTorch/Numpy Tensors at
initialization.
'''
def __init__(self, data: Optional[dict[str, Any]]=None, tensor_type: Union[None, str, TensorType]=None):
pass
def __getitem__(self, item: str) -> Any:
'''
If the key is a string, returns the value of the dict associated to `key` ('input_values', 'attention_mask',
etc.).
'''
pass
def __getattr__(self, item: str):
pass
def __getstate__(self):
pass
def __setstate__(self, state):
pass
def _get_is_as_tensor_fns(self, tensor_type: Optional[Union[str, TensorType]]=None):
pass
def as_tensor(value):
pass
def as_tensor(value):
pass
def convert_to_tensors(self, tensor_type: Optional[Union[str, TensorType]]=None):
'''
Convert the inner content to tensors.
Args:
tensor_type (`str` or [`~utils.TensorType`], *optional*):
The type of tensors to use. If `str`, should be one of the values of the enum [`~utils.TensorType`]. If
`None`, no modification is done.
'''
pass
def to(self, *args, **kwargs) -> 'BatchFeature':
'''
Send all values to device by calling `v.to(*args, **kwargs)` (PyTorch only). This should support casting in
different `dtypes` and sending the `BatchFeature` to a different `device`.
Args:
args (`Tuple`):
Will be passed to the `to(...)` function of the tensors.
kwargs (`Dict`, *optional*):
Will be passed to the `to(...)` function of the tensors.
To enable asynchronous data transfer, set the `non_blocking` flag in `kwargs` (defaults to `False`).
Returns:
[`BatchFeature`]: The same instance after modification.
'''
pass
def maybe_to(v):
pass
| 12
| 4
| 14
| 1
| 10
| 3
| 3
| 0.46
| 1
| 12
| 0
| 1
| 11
| 1
| 11
| 66
| 189
| 29
| 112
| 29
| 94
| 52
| 93
| 29
| 75
| 9
| 8
| 3
| 41
|
297
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/feature_extraction_utils.py
|
transformers.feature_extraction_utils.FeatureExtractionMixin
|
from .dynamic_module_utils import custom_object_save
from .utils.hub import cached_file
from typing import TYPE_CHECKING, Any, Optional, TypeVar, Union
import os
import copy
import json
import warnings
from .utils import FEATURE_EXTRACTOR_NAME, PROCESSOR_NAME, PushToHubMixin, TensorType, copy_func, download_url, is_numpy_array, is_offline_mode, is_remote_url, is_torch_available, is_torch_device, is_torch_dtype, logging, requires_backends
import numpy as np
class FeatureExtractionMixin(PushToHubMixin):
"""
This is a feature extraction mixin used to provide saving/loading functionality for sequential and image feature
extractors.
"""
_auto_class = None
def __init__(self, **kwargs):
"""Set elements of `kwargs` as attributes."""
self._processor_class = kwargs.pop('processor_class', None)
for key, value in kwargs.items():
try:
setattr(self, key, value)
except AttributeError as err:
logger.error(f"Can't set {key} with value {value} for {self}")
raise err
def _set_processor_class(self, processor_class: str):
"""Sets processor class as an attribute."""
self._processor_class = processor_class
@classmethod
def from_pretrained(cls: type[SpecificFeatureExtractorType], pretrained_model_name_or_path: Union[str, os.PathLike], cache_dir: Optional[Union[str, os.PathLike]]=None, force_download: bool=False, local_files_only: bool=False, token: Optional[Union[str, bool]]=None, revision: str='main', **kwargs) -> SpecificFeatureExtractorType:
"""
Instantiate a type of [`~feature_extraction_utils.FeatureExtractionMixin`] from a feature extractor, *e.g.* a
derived class of [`SequenceFeatureExtractor`].
Args:
pretrained_model_name_or_path (`str` or `os.PathLike`):
This can be either:
- a string, the *model id* of a pretrained feature_extractor hosted inside a model repo on
huggingface.co.
- a path to a *directory* containing a feature extractor file saved using the
[`~feature_extraction_utils.FeatureExtractionMixin.save_pretrained`] method, e.g.,
`./my_model_directory/`.
- a path or url to a saved feature extractor JSON *file*, e.g.,
`./my_model_directory/preprocessor_config.json`.
cache_dir (`str` or `os.PathLike`, *optional*):
Path to a directory in which a downloaded pretrained model feature extractor should be cached if the
standard cache should not be used.
force_download (`bool`, *optional*, defaults to `False`):
Whether or not to force to (re-)download the feature extractor files and override the cached versions
if they exist.
resume_download:
Deprecated and ignored. All downloads are now resumed by default when possible.
Will be removed in v5 of Transformers.
proxies (`dict[str, str]`, *optional*):
A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request.
token (`str` or `bool`, *optional*):
The token to use as HTTP bearer authorization for remote files. If `True`, or not specified, will use
the token generated when running `hf auth login` (stored in `~/.huggingface`).
revision (`str`, *optional*, defaults to `"main"`):
The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
identifier allowed by git.
<Tip>
To test a pull request you made on the Hub, you can pass `revision="refs/pr/<pr_number>"`.
</Tip>
return_unused_kwargs (`bool`, *optional*, defaults to `False`):
If `False`, then this function returns just the final feature extractor object. If `True`, then this
functions returns a `Tuple(feature_extractor, unused_kwargs)` where *unused_kwargs* is a dictionary
consisting of the key/value pairs whose keys are not feature extractor attributes: i.e., the part of
`kwargs` which has not been used to update `feature_extractor` and is otherwise ignored.
kwargs (`dict[str, Any]`, *optional*):
The values in kwargs of any keys which are feature extractor attributes will be used to override the
loaded values. Behavior concerning key/value pairs whose keys are *not* feature extractor attributes is
controlled by the `return_unused_kwargs` keyword parameter.
Returns:
A feature extractor of type [`~feature_extraction_utils.FeatureExtractionMixin`].
Examples:
```python
# We can't instantiate directly the base class *FeatureExtractionMixin* nor *SequenceFeatureExtractor* so let's show the examples on a
# derived class: *Wav2Vec2FeatureExtractor*
feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(
"facebook/wav2vec2-base-960h"
) # Download feature_extraction_config from huggingface.co and cache.
feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(
"./test/saved_model/"
) # E.g. feature_extractor (or model) was saved using *save_pretrained('./test/saved_model/')*
feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained("./test/saved_model/preprocessor_config.json")
feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(
"facebook/wav2vec2-base-960h", return_attention_mask=False, foo=False
)
assert feature_extractor.return_attention_mask is False
feature_extractor, unused_kwargs = Wav2Vec2FeatureExtractor.from_pretrained(
"facebook/wav2vec2-base-960h", return_attention_mask=False, foo=False, return_unused_kwargs=True
)
assert feature_extractor.return_attention_mask is False
assert unused_kwargs == {"foo": False}
```"""
kwargs['cache_dir'] = cache_dir
kwargs['force_download'] = force_download
kwargs['local_files_only'] = local_files_only
kwargs['revision'] = revision
use_auth_token = kwargs.pop('use_auth_token', None)
if use_auth_token is not None:
warnings.warn('The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.', FutureWarning)
if token is not None:
raise ValueError('`token` and `use_auth_token` are both specified. Please set only the argument `token`.')
token = use_auth_token
if token is not None:
kwargs['token'] = token
feature_extractor_dict, kwargs = cls.get_feature_extractor_dict(pretrained_model_name_or_path, **kwargs)
return cls.from_dict(feature_extractor_dict, **kwargs)
def save_pretrained(self, save_directory: Union[str, os.PathLike], push_to_hub: bool=False, **kwargs):
"""
Save a feature_extractor object to the directory `save_directory`, so that it can be re-loaded using the
[`~feature_extraction_utils.FeatureExtractionMixin.from_pretrained`] class method.
Args:
save_directory (`str` or `os.PathLike`):
Directory where the feature extractor JSON file will be saved (will be created if it does not exist).
push_to_hub (`bool`, *optional*, defaults to `False`):
Whether or not to push your model to the Hugging Face model hub after saving it. You can specify the
repository you want to push to with `repo_id` (will default to the name of `save_directory` in your
namespace).
kwargs (`dict[str, Any]`, *optional*):
Additional key word arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method.
"""
use_auth_token = kwargs.pop('use_auth_token', None)
if use_auth_token is not None:
warnings.warn('The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.', FutureWarning)
if kwargs.get('token') is not None:
raise ValueError('`token` and `use_auth_token` are both specified. Please set only the argument `token`.')
kwargs['token'] = use_auth_token
if os.path.isfile(save_directory):
raise AssertionError(f'Provided path ({save_directory}) should be a directory, not a file')
os.makedirs(save_directory, exist_ok=True)
if push_to_hub:
commit_message = kwargs.pop('commit_message', None)
repo_id = kwargs.pop('repo_id', save_directory.split(os.path.sep)[-1])
repo_id = self._create_repo(repo_id, **kwargs)
files_timestamps = self._get_files_timestamps(save_directory)
if self._auto_class is not None:
custom_object_save(self, save_directory, config=self)
output_feature_extractor_file = os.path.join(save_directory, FEATURE_EXTRACTOR_NAME)
self.to_json_file(output_feature_extractor_file)
logger.info(f'Feature extractor saved in {output_feature_extractor_file}')
if push_to_hub:
self._upload_modified_files(save_directory, repo_id, files_timestamps, commit_message=commit_message, token=kwargs.get('token'))
return [output_feature_extractor_file]
@classmethod
def get_feature_extractor_dict(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> tuple[dict[str, Any], dict[str, Any]]:
"""
From a `pretrained_model_name_or_path`, resolve to a dictionary of parameters, to be used for instantiating a
feature extractor of type [`~feature_extraction_utils.FeatureExtractionMixin`] using `from_dict`.
Parameters:
pretrained_model_name_or_path (`str` or `os.PathLike`):
The identifier of the pre-trained checkpoint from which we want the dictionary of parameters.
Returns:
`tuple[Dict, Dict]`: The dictionary(ies) that will be used to instantiate the feature extractor object.
"""
cache_dir = kwargs.pop('cache_dir', None)
force_download = kwargs.pop('force_download', False)
resume_download = kwargs.pop('resume_download', None)
proxies = kwargs.pop('proxies', None)
subfolder = kwargs.pop('subfolder', None)
token = kwargs.pop('token', None)
use_auth_token = kwargs.pop('use_auth_token', None)
local_files_only = kwargs.pop('local_files_only', False)
revision = kwargs.pop('revision', None)
if use_auth_token is not None:
warnings.warn('The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.', FutureWarning)
if token is not None:
raise ValueError('`token` and `use_auth_token` are both specified. Please set only the argument `token`.')
token = use_auth_token
from_pipeline = kwargs.pop('_from_pipeline', None)
from_auto_class = kwargs.pop('_from_auto', False)
user_agent = {'file_type': 'feature extractor', 'from_auto_class': from_auto_class}
if from_pipeline is not None:
user_agent['using_pipeline'] = from_pipeline
if is_offline_mode() and (not local_files_only):
logger.info('Offline mode: forcing local_files_only=True')
local_files_only = True
pretrained_model_name_or_path = str(pretrained_model_name_or_path)
is_local = os.path.isdir(pretrained_model_name_or_path)
if os.path.isdir(pretrained_model_name_or_path):
feature_extractor_file = os.path.join(pretrained_model_name_or_path, FEATURE_EXTRACTOR_NAME)
if os.path.isfile(pretrained_model_name_or_path):
resolved_feature_extractor_file = pretrained_model_name_or_path
is_local = True
elif is_remote_url(pretrained_model_name_or_path):
feature_extractor_file = pretrained_model_name_or_path
resolved_feature_extractor_file = download_url(pretrained_model_name_or_path)
else:
feature_extractor_file = FEATURE_EXTRACTOR_NAME
try:
resolved_feature_extractor_files = [resolved_file for filename in [feature_extractor_file, PROCESSOR_NAME] if (resolved_file := cached_file(pretrained_model_name_or_path, filename=filename, cache_dir=cache_dir, force_download=force_download, proxies=proxies, resume_download=resume_download, local_files_only=local_files_only, subfolder=subfolder, token=token, user_agent=user_agent, revision=revision, _raise_exceptions_for_missing_entries=False)) is not None]
resolved_feature_extractor_file = resolved_feature_extractor_files[0]
except OSError:
raise
except Exception:
raise OSError(f"Can't load feature extractor for '{pretrained_model_name_or_path}'. If you were trying to load it from 'https://huggingface.co/models', make sure you don't have a local directory with the same name. Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory containing a {FEATURE_EXTRACTOR_NAME} file")
try:
with open(resolved_feature_extractor_file, encoding='utf-8') as reader:
text = reader.read()
feature_extractor_dict = json.loads(text)
feature_extractor_dict = feature_extractor_dict.get('feature_extractor', feature_extractor_dict)
except json.JSONDecodeError:
raise OSError(f"It looks like the config file at '{resolved_feature_extractor_file}' is not a valid JSON file.")
if is_local:
logger.info(f'loading configuration file {resolved_feature_extractor_file}')
else:
logger.info(f'loading configuration file {feature_extractor_file} from cache at {resolved_feature_extractor_file}')
return (feature_extractor_dict, kwargs)
@classmethod
def from_dict(cls, feature_extractor_dict: dict[str, Any], **kwargs) -> PreTrainedFeatureExtractor:
"""
Instantiates a type of [`~feature_extraction_utils.FeatureExtractionMixin`] from a Python dictionary of
parameters.
Args:
feature_extractor_dict (`dict[str, Any]`):
Dictionary that will be used to instantiate the feature extractor object. Such a dictionary can be
retrieved from a pretrained checkpoint by leveraging the
[`~feature_extraction_utils.FeatureExtractionMixin.to_dict`] method.
kwargs (`dict[str, Any]`):
Additional parameters from which to initialize the feature extractor object.
Returns:
[`~feature_extraction_utils.FeatureExtractionMixin`]: The feature extractor object instantiated from those
parameters.
"""
return_unused_kwargs = kwargs.pop('return_unused_kwargs', False)
to_remove = []
for key, value in kwargs.items():
if key in feature_extractor_dict:
feature_extractor_dict[key] = value
to_remove.append(key)
for key in to_remove:
kwargs.pop(key, None)
feature_extractor = cls(**feature_extractor_dict)
logger.info(f'Feature extractor {feature_extractor}')
if return_unused_kwargs:
return (feature_extractor, kwargs)
else:
return feature_extractor
def to_dict(self) -> dict[str, Any]:
"""
Serializes this instance to a Python dictionary. Returns:
`dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance.
"""
output = copy.deepcopy(self.__dict__)
output['feature_extractor_type'] = self.__class__.__name__
if 'mel_filters' in output:
del output['mel_filters']
if 'window' in output:
del output['window']
return output
@classmethod
def from_json_file(cls, json_file: Union[str, os.PathLike]) -> PreTrainedFeatureExtractor:
"""
Instantiates a feature extractor of type [`~feature_extraction_utils.FeatureExtractionMixin`] from the path to
a JSON file of parameters.
Args:
json_file (`str` or `os.PathLike`):
Path to the JSON file containing the parameters.
Returns:
A feature extractor of type [`~feature_extraction_utils.FeatureExtractionMixin`]: The feature_extractor
object instantiated from that JSON file.
"""
with open(json_file, encoding='utf-8') as reader:
text = reader.read()
feature_extractor_dict = json.loads(text)
return cls(**feature_extractor_dict)
def to_json_string(self) -> str:
"""
Serializes this instance to a JSON string.
Returns:
`str`: String containing all the attributes that make up this feature_extractor instance in JSON format.
"""
dictionary = self.to_dict()
for key, value in dictionary.items():
if isinstance(value, np.ndarray):
dictionary[key] = value.tolist()
_processor_class = dictionary.pop('_processor_class', None)
if _processor_class is not None:
dictionary['processor_class'] = _processor_class
return json.dumps(dictionary, indent=2, sort_keys=True) + '\n'
def to_json_file(self, json_file_path: Union[str, os.PathLike]):
"""
Save this instance to a JSON file.
Args:
json_file_path (`str` or `os.PathLike`):
Path to the JSON file in which this feature_extractor instance's parameters will be saved.
"""
with open(json_file_path, 'w', encoding='utf-8') as writer:
writer.write(self.to_json_string())
def __repr__(self):
return f'{self.__class__.__name__} {self.to_json_string()}'
@classmethod
def register_for_auto_class(cls, auto_class='AutoFeatureExtractor'):
"""
Register this class with a given auto class. This should only be used for custom feature extractors as the ones
in the library are already mapped with `AutoFeatureExtractor`.
Args:
auto_class (`str` or `type`, *optional*, defaults to `"AutoFeatureExtractor"`):
The auto class to register this new feature extractor with.
"""
if not isinstance(auto_class, str):
auto_class = auto_class.__name__
import transformers.models.auto as auto_module
if not hasattr(auto_module, auto_class):
raise ValueError(f'{auto_class} is not a valid auto class.')
cls._auto_class = auto_class
|
class FeatureExtractionMixin(PushToHubMixin):
'''
This is a feature extraction mixin used to provide saving/loading functionality for sequential and image feature
extractors.
'''
def __init__(self, **kwargs):
'''Set elements of `kwargs` as attributes.'''
pass
def _set_processor_class(self, processor_class: str):
'''Sets processor class as an attribute.'''
pass
@classmethod
def from_pretrained(cls: type[SpecificFeatureExtractorType], pretrained_model_name_or_path: Union[str, os.PathLike], cache_dir: Optional[Union[str, os.PathLike]]=None, force_download: bool=False, local_files_only: bool=False, token: Optional[Union[str, bool]]=None, revision: str='main', **kwargs) -> SpecificFeatureExtractorType:
'''
Instantiate a type of [`~feature_extraction_utils.FeatureExtractionMixin`] from a feature extractor, *e.g.* a
derived class of [`SequenceFeatureExtractor`].
Args:
pretrained_model_name_or_path (`str` or `os.PathLike`):
This can be either:
- a string, the *model id* of a pretrained feature_extractor hosted inside a model repo on
huggingface.co.
- a path to a *directory* containing a feature extractor file saved using the
[`~feature_extraction_utils.FeatureExtractionMixin.save_pretrained`] method, e.g.,
`./my_model_directory/`.
- a path or url to a saved feature extractor JSON *file*, e.g.,
`./my_model_directory/preprocessor_config.json`.
cache_dir (`str` or `os.PathLike`, *optional*):
Path to a directory in which a downloaded pretrained model feature extractor should be cached if the
standard cache should not be used.
force_download (`bool`, *optional*, defaults to `False`):
Whether or not to force to (re-)download the feature extractor files and override the cached versions
if they exist.
resume_download:
Deprecated and ignored. All downloads are now resumed by default when possible.
Will be removed in v5 of Transformers.
proxies (`dict[str, str]`, *optional*):
A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request.
token (`str` or `bool`, *optional*):
The token to use as HTTP bearer authorization for remote files. If `True`, or not specified, will use
the token generated when running `hf auth login` (stored in `~/.huggingface`).
revision (`str`, *optional*, defaults to `"main"`):
The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
identifier allowed by git.
<Tip>
To test a pull request you made on the Hub, you can pass `revision="refs/pr/<pr_number>"`.
</Tip>
return_unused_kwargs (`bool`, *optional*, defaults to `False`):
If `False`, then this function returns just the final feature extractor object. If `True`, then this
functions returns a `Tuple(feature_extractor, unused_kwargs)` where *unused_kwargs* is a dictionary
consisting of the key/value pairs whose keys are not feature extractor attributes: i.e., the part of
`kwargs` which has not been used to update `feature_extractor` and is otherwise ignored.
kwargs (`dict[str, Any]`, *optional*):
The values in kwargs of any keys which are feature extractor attributes will be used to override the
loaded values. Behavior concerning key/value pairs whose keys are *not* feature extractor attributes is
controlled by the `return_unused_kwargs` keyword parameter.
Returns:
A feature extractor of type [`~feature_extraction_utils.FeatureExtractionMixin`].
Examples:
```python
# We can't instantiate directly the base class *FeatureExtractionMixin* nor *SequenceFeatureExtractor* so let's show the examples on a
# derived class: *Wav2Vec2FeatureExtractor*
feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(
"facebook/wav2vec2-base-960h"
) # Download feature_extraction_config from huggingface.co and cache.
feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(
"./test/saved_model/"
) # E.g. feature_extractor (or model) was saved using *save_pretrained('./test/saved_model/')*
feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained("./test/saved_model/preprocessor_config.json")
feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(
"facebook/wav2vec2-base-960h", return_attention_mask=False, foo=False
)
assert feature_extractor.return_attention_mask is False
feature_extractor, unused_kwargs = Wav2Vec2FeatureExtractor.from_pretrained(
"facebook/wav2vec2-base-960h", return_attention_mask=False, foo=False, return_unused_kwargs=True
)
assert feature_extractor.return_attention_mask is False
assert unused_kwargs == {"foo": False}
```'''
pass
def save_pretrained(self, save_directory: Union[str, os.PathLike], push_to_hub: bool=False, **kwargs):
'''
Save a feature_extractor object to the directory `save_directory`, so that it can be re-loaded using the
[`~feature_extraction_utils.FeatureExtractionMixin.from_pretrained`] class method.
Args:
save_directory (`str` or `os.PathLike`):
Directory where the feature extractor JSON file will be saved (will be created if it does not exist).
push_to_hub (`bool`, *optional*, defaults to `False`):
Whether or not to push your model to the Hugging Face model hub after saving it. You can specify the
repository you want to push to with `repo_id` (will default to the name of `save_directory` in your
namespace).
kwargs (`dict[str, Any]`, *optional*):
Additional key word arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method.
'''
pass
@classmethod
def get_feature_extractor_dict(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> tuple[dict[str, Any], dict[str, Any]]:
'''
From a `pretrained_model_name_or_path`, resolve to a dictionary of parameters, to be used for instantiating a
feature extractor of type [`~feature_extraction_utils.FeatureExtractionMixin`] using `from_dict`.
Parameters:
pretrained_model_name_or_path (`str` or `os.PathLike`):
The identifier of the pre-trained checkpoint from which we want the dictionary of parameters.
Returns:
`tuple[Dict, Dict]`: The dictionary(ies) that will be used to instantiate the feature extractor object.
'''
pass
@classmethod
def from_dict(cls, feature_extractor_dict: dict[str, Any], **kwargs) -> PreTrainedFeatureExtractor:
'''
Instantiates a type of [`~feature_extraction_utils.FeatureExtractionMixin`] from a Python dictionary of
parameters.
Args:
feature_extractor_dict (`dict[str, Any]`):
Dictionary that will be used to instantiate the feature extractor object. Such a dictionary can be
retrieved from a pretrained checkpoint by leveraging the
[`~feature_extraction_utils.FeatureExtractionMixin.to_dict`] method.
kwargs (`dict[str, Any]`):
Additional parameters from which to initialize the feature extractor object.
Returns:
[`~feature_extraction_utils.FeatureExtractionMixin`]: The feature extractor object instantiated from those
parameters.
'''
pass
def to_dict(self) -> dict[str, Any]:
'''
Serializes this instance to a Python dictionary. Returns:
`dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance.
'''
pass
@classmethod
def from_json_file(cls, json_file: Union[str, os.PathLike]) -> PreTrainedFeatureExtractor:
'''
Instantiates a feature extractor of type [`~feature_extraction_utils.FeatureExtractionMixin`] from the path to
a JSON file of parameters.
Args:
json_file (`str` or `os.PathLike`):
Path to the JSON file containing the parameters.
Returns:
A feature extractor of type [`~feature_extraction_utils.FeatureExtractionMixin`]: The feature_extractor
object instantiated from that JSON file.
'''
pass
def to_json_string(self) -> str:
'''
Serializes this instance to a JSON string.
Returns:
`str`: String containing all the attributes that make up this feature_extractor instance in JSON format.
'''
pass
def to_json_file(self, json_file_path: Union[str, os.PathLike]):
'''
Save this instance to a JSON file.
Args:
json_file_path (`str` or `os.PathLike`):
Path to the JSON file in which this feature_extractor instance's parameters will be saved.
'''
pass
def __repr__(self):
pass
@classmethod
def register_for_auto_class(cls, auto_class='AutoFeatureExtractor'):
'''
Register this class with a given auto class. This should only be used for custom feature extractors as the ones
in the library are already mapped with `AutoFeatureExtractor`.
Args:
auto_class (`str` or `type`, *optional*, defaults to `"AutoFeatureExtractor"`):
The auto class to register this new feature extractor with.
'''
pass
| 18
| 12
| 35
| 5
| 18
| 13
| 4
| 0.72
| 1
| 10
| 0
| 2
| 7
| 1
| 12
| 12
| 443
| 68
| 218
| 71
| 188
| 157
| 152
| 51
| 138
| 15
| 1
| 2
| 48
|
298
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/generation/beam_constraints.py
|
transformers.generation.beam_constraints.Constraint
|
from abc import ABC, abstractmethod
class Constraint(ABC):
"""Abstract base class for all constraints that can be applied during generation.
It must define how the constraint can be satisfied.
All classes that inherit Constraint must follow the requirement that
```py
completed = False
while not completed:
_, completed = constraint.update(constraint.advance())
```
will always terminate (halt).
"""
def __init__(self):
logger.warning_once('Importing `Constraint` classes is deprecated and will be removed in v4.58.0. Constrained beam search has been moved to the Hub: https://hf.co/transformers-community/constrained-beam-search. Please import using `from transformers.generation import Constraint` instead.')
self.test()
def test(self):
"""
Tests whether this constraint has been properly defined.
"""
counter = 0
completed = False
while not completed:
if counter == 1:
self.reset()
advance = self.advance()
if not self.does_advance(advance):
raise Exception('Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.')
stepped, completed, reset = self.update(advance)
counter += 1
if counter > 10000:
raise Exception('update() does not fulfill the constraint.')
if self.remaining() != 0:
raise Exception('Custom Constraint is not defined correctly.')
@abstractmethod
def advance(self):
"""
When called, returns the token(s) that would take this constraint one step closer to being fulfilled.
Return:
token_ids (Union[int, list[int], None]):
- A single token ID (int) that advances the constraint, or
- A list of token IDs that could advance the constraint
- None if the constraint is completed or cannot be advanced
"""
raise NotImplementedError(f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.')
@abstractmethod
def does_advance(self, token_id: int):
"""
Reads in a token and returns whether it creates progress.
"""
raise NotImplementedError(f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.')
@abstractmethod
def update(self, token_id: int):
"""
Reads in a token and returns booleans that indicate the progress made by it. This function will update the
state of this object unlikes `does_advance(self, token_id: int)`.
This isn't to test whether a certain token will advance the progress; it's to update its state as if it has
been generated. This becomes important if token_id != desired token (refer to else statement in
PhrasalConstraint)
Args:
token_id(`int`):
The id of a newly generated token in the beam search.
Return:
stepped(`bool`):
Whether this constraint has become one step closer to being fulfuilled.
completed(`bool`):
Whether this constraint has been completely fulfilled by this token being generated.
reset (`bool`):
Whether this constraint has reset its progress by this token being generated.
"""
raise NotImplementedError(f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.')
@abstractmethod
def reset(self):
"""
Resets the state of this constraint to its initialization. We would call this in cases where the fulfillment of
a constraint is abrupted by an unwanted token.
"""
raise NotImplementedError(f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.')
@abstractmethod
def remaining(self):
"""
Returns the number of remaining steps of `advance()` in order to complete this constraint.
"""
raise NotImplementedError(f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.')
@abstractmethod
def copy(self, stateful=False):
"""
Creates a new instance of this constraint.
Args:
stateful(`bool`): Whether to not only copy the constraint for new instance, but also its state.
Return:
constraint(`Constraint`): The same constraint as the one being called from.
"""
raise NotImplementedError(f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.')
|
class Constraint(ABC):
'''Abstract base class for all constraints that can be applied during generation.
It must define how the constraint can be satisfied.
All classes that inherit Constraint must follow the requirement that
```py
completed = False
while not completed:
_, completed = constraint.update(constraint.advance())
```
will always terminate (halt).
'''
def __init__(self):
pass
def test(self):
'''
Tests whether this constraint has been properly defined.
'''
pass
@abstractmethod
def advance(self):
'''
When called, returns the token(s) that would take this constraint one step closer to being fulfilled.
Return:
token_ids (Union[int, list[int], None]):
- A single token ID (int) that advances the constraint, or
- A list of token IDs that could advance the constraint
- None if the constraint is completed or cannot be advanced
'''
pass
@abstractmethod
def does_advance(self, token_id: int):
'''
Reads in a token and returns whether it creates progress.
'''
pass
@abstractmethod
def update(self, token_id: int):
'''
Reads in a token and returns booleans that indicate the progress made by it. This function will update the
state of this object unlikes `does_advance(self, token_id: int)`.
This isn't to test whether a certain token will advance the progress; it's to update its state as if it has
been generated. This becomes important if token_id != desired token (refer to else statement in
PhrasalConstraint)
Args:
token_id(`int`):
The id of a newly generated token in the beam search.
Return:
stepped(`bool`):
Whether this constraint has become one step closer to being fulfuilled.
completed(`bool`):
Whether this constraint has been completely fulfilled by this token being generated.
reset (`bool`):
Whether this constraint has reset its progress by this token being generated.
'''
pass
@abstractmethod
def reset(self):
'''
Resets the state of this constraint to its initialization. We would call this in cases where the fulfillment of
a constraint is abrupted by an unwanted token.
'''
pass
@abstractmethod
def remaining(self):
'''
Returns the number of remaining steps of `advance()` in order to complete this constraint.
'''
pass
@abstractmethod
def copy(self, stateful=False):
'''
Creates a new instance of this constraint.
Args:
stateful(`bool`): Whether to not only copy the constraint for new instance, but also its state.
Return:
constraint(`Constraint`): The same constraint as the one being called from.
'''
pass
| 15
| 8
| 12
| 1
| 5
| 6
| 2
| 1.12
| 1
| 3
| 0
| 2
| 8
| 0
| 8
| 28
| 125
| 19
| 50
| 19
| 35
| 56
| 30
| 13
| 21
| 6
| 4
| 2
| 13
|
299
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/generation/beam_constraints.py
|
transformers.generation.beam_constraints.ConstraintListState
|
from typing import Optional
class ConstraintListState:
"""
A class for beam scorers to track its progress through a list of constraints.
Args:
constraints (`list[Constraint]`):
A list of [`Constraint`] objects that must be fulfilled by the beam scorer.
"""
def __init__(self, constraints: list[Constraint]):
self.constraints = constraints
self.max_seqlen = max([c.seqlen for c in constraints])
self.n_constraints = len(constraints)
self.completed = False
self.init_state()
def init_state(self):
self.complete_constraints = []
self.inprogress_constraint = None
self.pending_constraints = [constraint.copy(stateful=False) for constraint in self.constraints]
def get_bank(self):
add = 0
if self.inprogress_constraint:
add += self.max_seqlen - self.inprogress_constraint.remaining()
return len(self.complete_constraints) * self.max_seqlen + add
def advance(self):
"""The list of tokens to generate such that we can make progress.
By "list" we don't mean the list of token that will fully fulfill a constraint.
Given constraints `c_i = {t_ij | j == # of tokens}`, If we're not in the middle of progressing through a
specific constraint `c_i`, we return:
`[t_k1 for k in indices of unfulfilled constraints]`
If we are in the middle of a constraint, then we return:
`[t_ij]`, where `i` is the index of the inprogress constraint, `j` is the next step for the constraint.
Though we don't care which constraint is fulfilled first, if we are in the progress of fulfilling a constraint,
that's the only one we'll return.
"""
token_list = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints:
advance = constraint.advance()
if isinstance(advance, int):
token_list.append(advance)
elif isinstance(advance, list):
token_list.extend(advance)
else:
advance = self.inprogress_constraint.advance()
if isinstance(advance, int):
token_list.append(advance)
elif isinstance(advance, list):
token_list.extend(advance)
if len(token_list) == 0:
return None
else:
return token_list
def reset(self, token_ids: Optional[list[int]]):
"""
token_ids: the tokens generated thus far to reset the state of the progress through constraints.
"""
self.init_state()
if token_ids is not None:
for token in token_ids:
complete, stepped = self.add(token)
if self.completed:
break
def add(self, token_id: int):
if not isinstance(token_id, int):
raise TypeError(f'`token_id` should be an `int`, but is `{token_id}`.')
complete, stepped = (False, False)
if self.completed:
complete = True
stepped = False
return (complete, stepped)
if self.inprogress_constraint is not None:
stepped, complete, reset = self.inprogress_constraint.update(token_id)
if reset:
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=False))
self.inprogress_constraint = None
if complete:
self.complete_constraints.append(self.inprogress_constraint)
self.inprogress_constraint = None
if len(self.pending_constraints) == 0:
self.completed = True
else:
for cidx, pending_constraint in enumerate(self.pending_constraints):
if pending_constraint.does_advance(token_id):
stepped, complete, reset = pending_constraint.update(token_id)
if not stepped:
raise Exception('`constraint.update(token_id)` is not yielding incremental progress, even though `constraint.does_advance(token_id)` is true.')
if complete:
self.complete_constraints.append(pending_constraint)
self.inprogress_constraint = None
if not complete and stepped:
self.inprogress_constraint = pending_constraint
if complete or stepped:
self.pending_constraints = self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1:]
if len(self.pending_constraints) == 0 and self.inprogress_constraint is None:
self.completed = True
break
return (complete, stepped)
def copy(self, stateful=True):
new_state = ConstraintListState(self.constraints)
if stateful:
new_state.complete_constraints = [constraint.copy(stateful=True) for constraint in self.complete_constraints]
if self.inprogress_constraint is not None:
new_state.inprogress_constraint = self.inprogress_constraint.copy(stateful=True)
new_state.pending_constraints = [constraint.copy() for constraint in self.pending_constraints]
return new_state
|
class ConstraintListState:
'''
A class for beam scorers to track its progress through a list of constraints.
Args:
constraints (`list[Constraint]`):
A list of [`Constraint`] objects that must be fulfilled by the beam scorer.
'''
def __init__(self, constraints: list[Constraint]):
pass
def init_state(self):
pass
def get_bank(self):
pass
def advance(self):
'''The list of tokens to generate such that we can make progress.
By "list" we don't mean the list of token that will fully fulfill a constraint.
Given constraints `c_i = {t_ij | j == # of tokens}`, If we're not in the middle of progressing through a
specific constraint `c_i`, we return:
`[t_k1 for k in indices of unfulfilled constraints]`
If we are in the middle of a constraint, then we return:
`[t_ij]`, where `i` is the index of the inprogress constraint, `j` is the next step for the constraint.
Though we don't care which constraint is fulfilled first, if we are in the progress of fulfilling a constraint,
that's the only one we'll return.
'''
pass
def reset(self, token_ids: Optional[list[int]]):
'''
token_ids: the tokens generated thus far to reset the state of the progress through constraints.
'''
pass
def add(self, token_id: int):
pass
def copy(self, stateful=True):
pass
| 8
| 3
| 22
| 5
| 13
| 5
| 5
| 0.46
| 0
| 6
| 1
| 0
| 7
| 7
| 7
| 7
| 171
| 40
| 92
| 25
| 84
| 42
| 80
| 25
| 72
| 14
| 0
| 5
| 33
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.