repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
KoSentenceBERT-SKT | KoSentenceBERT-SKT-main/transformers/modeling_beam_search.py | # coding=utf-8
# Copyright (c) 2019 Yang Liu
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
A general wrapper around models with LM heads to generate sequences
using beam search.
"""
import torch
from torch import nn
class TransformerBeamSearch(nn.Module):
def __init__(
self,
model,
tokenizer,
batch_size,
beam_size,
min_length,
max_length,
alpha=0,
block_repeating_trigram=True,
):
"""
Attributes:
mask_word_id: token id that corresponds to the mask
"""
super(TransformerBeamSearch, self).__init__()
self.model = model
self.tokenizer = tokenizer
self.start_token_id = tokenizer.start_token_id
self.end_token_id = tokenizer.end_token_id
self.pad_token_id = tokenizer.pad_token_id
self.beam_size = beam_size
self.min_length = min_length
self.max_length = max_length
self.block_repeating_trigram = block_repeating_trigram
self.apply_length_penalty = False if alpha == 0 else True
self.alpha = alpha
# State of the beam
self.hypotheses = [[] for _ in range(batch_size)]
self.batch_offset = torch.arange(batch_size, dtype=torch.long)
self.beam_offset = torch.arange(
0, batch_size * self.beam_size, step=self.beam_size, dtype=torch.long
)
self.growing_beam = torch.full(
(batch_size * self.beam_size, 1), self.start_token_id, dtype=torch.long
)
self.topk_log_probabilities = torch.tensor(
[0.0] + [float("-inf")] * (self.beam_size - 1), dtype=torch.float
).repeat(batch_size)
self.results = {
"prediction": [[] for _ in batch_size],
"scores": [[] for _ in batch_size],
}
self._step = 0
self.is_done = False
def step(self, log_probabilities):
""" Grows the beam by one step. """
self._step += 1
# The batch size changes as some beams finish so we define _B
vocab_size = log_probabilities.size(-1)
_B = log_probabilities.size(0) // self.beam_size
# Multiply each beam probability with the probability of the
# next token (conditioned on the words in the beam).
log_probabilities += self.topk_log_probabilities.view(-1, 1)
self.enforce_min_length(log_probabilities)
if self.block_repeating_trigram:
self.remove_repeating_trigrams(log_probabilities, _B)
# Find the `beam_size` (previous_beam + token) combinations with
# the highest score
topk_log_probabilities, topk_ids = log_probabilities.topk(
log_probabilities.view(_B, self.beam_size * vocab_size),
self.beam_size,
dim=1,
)
# Apply the length penalty. The +1 accounts for the [EOS] token
# that will be added if the beam ends.
topk_scores = topk_log_probabilities / self.length_penalty()
# Retrieve the corresponding respective beam and token id
# topk_token_ids[i] will be added to topk_beam_ids[i]
topk_beam_ids = topk_ids.div(vocab_size)
topk_token_ids = topk_ids.fmod(vocab_size)
# Retrieve the row index of the surviving beams in the original
# view of the log_probabilities tensor
surviving_beams_rows = (topk_beam_ids + self.beam_offset[:_B].view(-1, 1)).view(
-1
)
# Append the last predictions
self.growing_beam = torch.cat(
[
self.growing_beam.index_select(0, surviving_beams_rows),
topk_token_ids.view(-1, 1),
],
1,
)
# Check if any of the beam searches has ended during this
# growth step. Also if top beam (most probable) has ended
# for one element of the batch.
is_finished = topk_token_ids.eq(self.end_token_id)
self.enforce_max_length()
is_top_beam_finished = is_finished[:, 0].eq(1)
# Save the finished searches
if is_finished.any():
predictions = self.growing_beam.view(
-1, self.beam_size, self.growing_beam.size(1)
)
for i in range(is_finished.size(0)):
if is_top_beam_finished[i]:
is_finished[i].fill_(1)
finished_hyp = is_finished[i].nonzero().view(-1)
# Store finished hypotheses for this batch.
b = self.batch_offset[i]
for j in finished_hyp:
self.hypotheses[b].append((topk_scores[i, j], predictions[i, j, :]))
# If the batch reached the end, save the best hypotheses
# in terms of length-penalized score.
if is_top_beam_finished[i]:
best_hyp = sorted(
self.hypotheses[b], key=lambda x: x[0], reverse=True
)
best_score, best_prediction = best_hyp[0]
self.results["scores"][b].append(best_score)
self.results["predictions"][b].append(best_prediction)
non_finished = is_top_beam_finished.eq(0).nonzero().view(-1)
if len(non_finished) == 0:
self.is_done = True
# Remove finished batches for the next step.
topk_log_probabilities = topk_log_probabilities.index_select(
0, non_finished
)
self.batch_offset = self.batch_offset.index_select(0, non_finished)
self.growing_beam = predictions.index_select(0, non_finished).view(
-1, self.growing_beam.size(-1)
)
surviving_beams_rows = surviving_beams_rows.index_select(0, non_finished)
return surviving_beams_rows
def forward(self, encoder_input_ids, **kwargs):
# keyword arguments come in 3 flavors: encoder-specific (prefixed by
# `encoder_`), decoder-specific (prefixed by `decoder_`) and those
# that apply to the model as whole.
# We let the specific kwargs override the common ones in case of conflict.
kwargs_encoder = {
argument[len("encoder_"):]: value
for argument, value in kwargs.items()
if argument.startswith("encoder_")
}
kwargs_decoder = {
argument[len("decoder_"):]: value
for argument, value in kwargs.items()
if argument.startswith("decoder_")
}
kwargs_common = {
argument: value
for argument, value in kwargs.items()
if not (argument.startswith("encoder_") or argument.startswith("decoder_"))
}
kwargs_decoder = dict(kwargs_common, **kwargs_decoder)
kwargs_encoder = dict(kwargs_common, **kwargs_encoder)
# forward pass on the encoder
encoder_outputs = self.model.encoder.forward(encoder_input_ids, kwargs_encoder)
kwargs_decoder["encoder_hidden_states"] = tile(
encoder_outputs, self.beam_size, dim=0
)
# grow the beam by generating sequences in an autoregressive way
self.growing_beam = torch.full(
(self.batch_size * self.beam_size, 1), self.start_token_id, dtype=torch.long
)
for step in range(self.max_length):
decoder_input = self.growing_beam[:, -1]
outputs = self.model.decoder(decoder_input, kwargs_decoder)
log_probabilities = torch.nn.functional.log_softmax(outputs[1])
surviving_beams_rows = self.step(log_probabilities)
if self.is_done:
break
kwargs_decoder["encoder_hidden_states"] = kwargs_decoder[
"encoder_hidden_states"
].index_select(0, surviving_beams_rows)
return self.results
def remove_repeating_trigrams(self, log_probabilities, _B):
if(self._step + 1 > 3):
for i in range(_B * self.beam_size):
tokens = [t for t in self.growing_beam[i]]
trigrams = [(tokens[i-1], tokens[i], tokens[i+1]) for i in range(1, len(words) - 1)]
last_trigram = tuple(trigrams[-1])
if last_trigram in trigrams[:-1]:
log_probabilities[i] = -1e20
def enforce_min_length(self):
if self._step < self.min_length:
self.log_probabilities[self.end_token_id] = -1e20
def enforce_max_length(self):
if self._step + 1 == self.max_length:
self.is_finished.fill_(1)
def length_penalty(self):
return ((5.0 + (self._step + 1)) / 6.0) ** self.alpha
def tile(x, count, dim=0):
"""
Tiles `x` along dimension `dim` `count` times.
Example:
>> ex = torch.tensor([1,2],[3,4])
>> tile(ex, 2, 0)
torch.Tensor([[1,2],[1,2],[3,4],[3,4]])
"""
perm = list(range(len(x.size())))
if dim != 0:
perm[0], perm[dim] = perm[dim], perm[0]
x = x.permute(perm).contiguous()
out_size = list(x.size())
out_size[0] *= count
batch = x.size(0)
x = (
x.view(batch, -1)
.transpose(0, 1)
.repeat(count, 1)
.transpose(0, 1)
.contiguous()
.view(*out_size)
)
if dim != 0:
x = x.permute(perm).contiguous()
return x
| 10,385 | 37.183824 | 100 | py |
KoSentenceBERT-SKT | KoSentenceBERT-SKT-main/transformers/file_utils.py | """
Utilities for working with the local dataset cache.
This file is adapted from the AllenNLP library at https://github.com/allenai/allennlp
Copyright by the AllenNLP authors.
"""
import fnmatch
import json
import logging
import os
import re
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from dataclasses import fields
from functools import partial, wraps
from hashlib import sha256
from pathlib import Path
from typing import Any, Dict, Optional, Tuple, Union
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import numpy as np
import requests
from filelock import FileLock
from tqdm.auto import tqdm
from . import __version__
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
try:
USE_TF = os.environ.get("USE_TF", "AUTO").upper()
USE_TORCH = os.environ.get("USE_TORCH", "AUTO").upper()
if USE_TORCH in ("1", "ON", "YES", "AUTO") and USE_TF not in ("1", "ON", "YES"):
import torch
_torch_available = True # pylint: disable=invalid-name
logger.info("PyTorch version {} available.".format(torch.__version__))
else:
logger.info("Disabling PyTorch because USE_TF is set")
_torch_available = False
except ImportError:
_torch_available = False # pylint: disable=invalid-name
try:
USE_TF = os.environ.get("USE_TF", "AUTO").upper()
USE_TORCH = os.environ.get("USE_TORCH", "AUTO").upper()
if USE_TF in ("1", "ON", "YES", "AUTO") and USE_TORCH not in ("1", "ON", "YES"):
import tensorflow as tf
assert hasattr(tf, "__version__") and int(tf.__version__[0]) >= 2
_tf_available = True # pylint: disable=invalid-name
logger.info("TensorFlow version {} available.".format(tf.__version__))
else:
logger.info("Disabling Tensorflow because USE_TORCH is set")
_tf_available = False
except (ImportError, AssertionError):
_tf_available = False # pylint: disable=invalid-name
try:
from torch.hub import _get_torch_home
torch_cache_home = _get_torch_home()
except ImportError:
torch_cache_home = os.path.expanduser(
os.getenv("TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch"))
)
try:
import torch_xla.core.xla_model as xm # noqa: F401
if _torch_available:
_torch_tpu_available = True # pylint: disable=
else:
_torch_tpu_available = False
except ImportError:
_torch_tpu_available = False
try:
import psutil # noqa: F401
_psutil_available = True
except ImportError:
_psutil_available = False
try:
import py3nvml # noqa: F401
_py3nvml_available = True
except ImportError:
_py3nvml_available = False
try:
from apex import amp # noqa: F401
_has_apex = True
except ImportError:
_has_apex = False
default_cache_path = os.path.join(torch_cache_home, "transformers")
PYTORCH_PRETRAINED_BERT_CACHE = os.getenv("PYTORCH_PRETRAINED_BERT_CACHE", default_cache_path)
PYTORCH_TRANSFORMERS_CACHE = os.getenv("PYTORCH_TRANSFORMERS_CACHE", PYTORCH_PRETRAINED_BERT_CACHE)
TRANSFORMERS_CACHE = os.getenv("TRANSFORMERS_CACHE", PYTORCH_TRANSFORMERS_CACHE)
WEIGHTS_NAME = "pytorch_model.bin"
TF2_WEIGHTS_NAME = "tf_model.h5"
TF_WEIGHTS_NAME = "model.ckpt"
CONFIG_NAME = "config.json"
MODEL_CARD_NAME = "modelcard.json"
MULTIPLE_CHOICE_DUMMY_INPUTS = [[[0], [1]], [[0], [1]]]
DUMMY_INPUTS = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
DUMMY_MASK = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
S3_BUCKET_PREFIX = "https://s3.amazonaws.com/models.huggingface.co/bert"
CLOUDFRONT_DISTRIB_PREFIX = "https://cdn.huggingface.co"
def is_torch_available():
return _torch_available
def is_tf_available():
return _tf_available
def is_torch_tpu_available():
return _torch_tpu_available
def is_psutil_available():
return _psutil_available
def is_py3nvml_available():
return _py3nvml_available
def is_apex_available():
return _has_apex
def add_start_docstrings(*docstr):
def docstring_decorator(fn):
fn.__doc__ = "".join(docstr) + (fn.__doc__ if fn.__doc__ is not None else "")
return fn
return docstring_decorator
def add_start_docstrings_to_callable(*docstr):
def docstring_decorator(fn):
class_name = ":class:`~transformers.{}`".format(fn.__qualname__.split(".")[0])
intro = " The {} forward method, overrides the :func:`__call__` special method.".format(class_name)
note = r"""
.. note::
Although the recipe for forward pass needs to be defined within
this function, one should call the :class:`Module` instance afterwards
instead of this since the former takes care of running the
pre and post processing steps while the latter silently ignores them.
"""
fn.__doc__ = intro + note + "".join(docstr) + (fn.__doc__ if fn.__doc__ is not None else "")
return fn
return docstring_decorator
def add_end_docstrings(*docstr):
def docstring_decorator(fn):
fn.__doc__ = fn.__doc__ + "".join(docstr)
return fn
return docstring_decorator
PT_RETURN_INTRODUCTION = r"""
Returns:
:class:`~{full_output_type}` or :obj:`tuple(torch.FloatTensor)`:
A :class:`~{full_output_type}` (if ``return_dict=True`` is passed or when ``config.return_dict=True``) or a
tuple of :obj:`torch.FloatTensor` comprising various elements depending on the configuration
(:class:`~transformers.{config_class}`) and inputs.
"""
TF_RETURN_INTRODUCTION = r"""
Returns:
:class:`~{full_output_type}` or :obj:`tuple(tf.Tensor)`:
A :class:`~{full_output_type}` (if ``return_dict=True`` is passed or when ``config.return_dict=True``) or a
tuple of :obj:`tf.Tensor` comprising various elements depending on the configuration
(:class:`~transformers.{config_class}`) and inputs.
"""
def _get_indent(t):
"""Returns the indentation in the first line of t"""
search = re.search(r"^(\s*)\S", t)
return "" if search is None else search.groups()[0]
def _convert_output_args_doc(output_args_doc):
"""Convert output_args_doc to display properly."""
# Split output_arg_doc in blocks argument/description
indent = _get_indent(output_args_doc)
blocks = []
current_block = ""
for line in output_args_doc.split("\n"):
# If the indent is the same as the beginning, the line is the name of new arg.
if _get_indent(line) == indent:
if len(current_block) > 0:
blocks.append(current_block[:-1])
current_block = f"{line}\n"
else:
# Otherwise it's part of the description of the current arg.
# We need to remove 2 spaces to the indentation.
current_block += f"{line[2:]}\n"
blocks.append(current_block[:-1])
# Format each block for proper rendering
for i in range(len(blocks)):
blocks[i] = re.sub(r"^(\s+)(\S+)(\s+)", r"\1- **\2**\3", blocks[i])
blocks[i] = re.sub(r":\s*\n\s*(\S)", r" -- \1", blocks[i])
return "\n".join(blocks)
def _prepare_output_docstrings(output_type, config_class):
"""
Prepares the return part of the docstring using `output_type`.
"""
docstrings = output_type.__doc__
# Remove the head of the docstring to keep the list of args only
lines = docstrings.split("\n")
i = 0
while i < len(lines) and re.search(r"^\s*(Args|Parameters):\s*$", lines[i]) is None:
i += 1
if i < len(lines):
docstrings = "\n".join(lines[(i + 1) :])
docstrings = _convert_output_args_doc(docstrings)
# Add the return introduction
full_output_type = f"{output_type.__module__}.{output_type.__name__}"
intro = TF_RETURN_INTRODUCTION if output_type.__name__.startswith("TF") else PT_RETURN_INTRODUCTION
intro = intro.format(full_output_type=full_output_type, config_class=config_class)
return intro + docstrings
PT_TOKEN_CLASSIFICATION_SAMPLE = r"""
Example::
>>> from transformers import {tokenizer_class}, {model_class}
>>> import torch
>>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}')
>>> model = {model_class}.from_pretrained('{checkpoint}', return_dict=True)
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> labels = torch.tensor([1] * inputs["input_ids"].size(1)).unsqueeze(0) # Batch size 1
>>> outputs = model(**inputs, labels=labels)
>>> loss = outputs.loss
>>> logits = outputs.logits
"""
PT_QUESTION_ANSWERING_SAMPLE = r"""
Example::
>>> from transformers import {tokenizer_class}, {model_class}
>>> import torch
>>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}')
>>> model = {model_class}.from_pretrained('{checkpoint}', return_dict=True)
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> start_positions = torch.tensor([1])
>>> end_positions = torch.tensor([3])
>>> outputs = model(**inputs, start_positions=start_positions, end_positions=end_positions)
>>> loss = outputs.loss
>>> start_scores = outputs.start_scores
>>> end_scores = outputs.end_scores
"""
PT_SEQUENCE_CLASSIFICATION_SAMPLE = r"""
Example::
>>> from transformers import {tokenizer_class}, {model_class}
>>> import torch
>>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}')
>>> model = {model_class}.from_pretrained('{checkpoint}', return_dict=True)
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> labels = torch.tensor([1]).unsqueeze(0) # Batch size 1
>>> outputs = model(**inputs, labels=labels)
>>> loss = outputs.loss
>>> logits = outputs.logits
"""
PT_MASKED_LM_SAMPLE = r"""
Example::
>>> from transformers import {tokenizer_class}, {model_class}
>>> import torch
>>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}')
>>> model = {model_class}.from_pretrained('{checkpoint}', return_dict=True)
>>> input_ids = tokenizer("Hello, my dog is cute", return_tensors="pt")["input_ids"]
>>> outputs = model(input_ids, labels=input_ids)
>>> loss = outputs.loss
>>> prediction_logits = outputs.logits
"""
PT_BASE_MODEL_SAMPLE = r"""
Example::
>>> from transformers import {tokenizer_class}, {model_class}
>>> import torch
>>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}')
>>> model = {model_class}.from_pretrained('{checkpoint}', return_dict=True)
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> last_hidden_states = outputs.last_hidden_state
"""
PT_MULTIPLE_CHOICE_SAMPLE = r"""
Example::
>>> from transformers import {tokenizer_class}, {model_class}
>>> import torch
>>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}')
>>> model = {model_class}.from_pretrained('{checkpoint}', return_dict=True)
>>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced."
>>> choice0 = "It is eaten with a fork and a knife."
>>> choice1 = "It is eaten while held in the hand."
>>> labels = torch.tensor(0).unsqueeze(0) # choice0 is correct (according to Wikipedia ;)), batch size 1
>>> encoding = tokenizer([[prompt, prompt], [choice0, choice1]], return_tensors='pt', padding=True)
>>> outputs = model(**{{k: v.unsqueeze(0) for k,v in encoding.items()}}, labels=labels) # batch size is 1
>>> # the linear classifier still needs to be trained
>>> loss = outputs.loss
>>> logits = outputs.logits
"""
PT_CAUSAL_LM_SAMPLE = r"""
Example::
>>> import torch
>>> from transformers import {tokenizer_class}, {model_class}
>>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}')
>>> model = {model_class}.from_pretrained('{checkpoint}', return_dict=True)
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs, labels=inputs["input_ids"])
>>> loss = outputs.loss
>>> logits = outputs.logits
"""
TF_TOKEN_CLASSIFICATION_SAMPLE = r"""
Example::
>>> from transformers import {tokenizer_class}, {model_class}
>>> import tensorflow as tf
>>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}')
>>> model = {model_class}.from_pretrained('{checkpoint}')
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="tf")
>>> input_ids = inputs["input_ids"]
>>> inputs["labels"] = tf.reshape(tf.constant([1] * tf.size(input_ids).numpy()), (-1, tf.size(input_ids))) # Batch size 1
>>> outputs = model(inputs)
>>> loss, scores = outputs[:2]
"""
TF_QUESTION_ANSWERING_SAMPLE = r"""
Example::
>>> from transformers import {tokenizer_class}, {model_class}
>>> import tensorflow as tf
>>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}')
>>> model = {model_class}.from_pretrained('{checkpoint}')
>>> question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet"
>>> input_dict = tokenizer(question, text, return_tensors='tf')
>>> start_scores, end_scores = model(input_dict)
>>> all_tokens = tokenizer.convert_ids_to_tokens(input_dict["input_ids"].numpy()[0])
>>> answer = ' '.join(all_tokens[tf.math.argmax(start_scores, 1)[0] : tf.math.argmax(end_scores, 1)[0]+1])
"""
TF_SEQUENCE_CLASSIFICATION_SAMPLE = r"""
Example::
>>> from transformers import {tokenizer_class}, {model_class}
>>> import tensorflow as tf
>>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}')
>>> model = {model_class}.from_pretrained('{checkpoint}')
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="tf")
>>> inputs["labels"] = tf.reshape(tf.constant(1), (-1, 1)) # Batch size 1
>>> outputs = model(inputs)
>>> loss, logits = outputs[:2]
"""
TF_MASKED_LM_SAMPLE = r"""
Example::
>>> from transformers import {tokenizer_class}, {model_class}
>>> import tensorflow as tf
>>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}')
>>> model = {model_class}.from_pretrained('{checkpoint}')
>>> input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True))[None, :] # Batch size 1
>>> outputs = model(input_ids)
>>> prediction_scores = outputs[0]
"""
TF_BASE_MODEL_SAMPLE = r"""
Example::
>>> from transformers import {tokenizer_class}, {model_class}
>>> import tensorflow as tf
>>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}')
>>> model = {model_class}.from_pretrained('{checkpoint}')
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="tf")
>>> outputs = model(inputs)
>>> last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple
"""
TF_MULTIPLE_CHOICE_SAMPLE = r"""
Example::
>>> from transformers import {tokenizer_class}, {model_class}
>>> import tensorflow as tf
>>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}')
>>> model = {model_class}.from_pretrained('{checkpoint}')
>>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced."
>>> choice0 = "It is eaten with a fork and a knife."
>>> choice1 = "It is eaten while held in the hand."
>>> encoding = tokenizer([[prompt, prompt], [choice0, choice1]], return_tensors='tf', padding=True)
>>> inputs = {{k: tf.expand_dims(v, 0) for k, v in encoding.items()}}
>>> outputs = model(inputs) # batch size is 1
>>> # the linear classifier still needs to be trained
>>> logits = outputs[0]
"""
TF_CAUSAL_LM_SAMPLE = r"""
Example::
>>> from transformers import {tokenizer_class}, {model_class}
>>> import tensorflow as tf
>>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}')
>>> model = {model_class}.from_pretrained('{checkpoint}')
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="tf")
>>> outputs = model(inputs)
>>> logits = outputs[0]
"""
def add_code_sample_docstrings(*docstr, tokenizer_class=None, checkpoint=None, output_type=None, config_class=None):
def docstring_decorator(fn):
model_class = fn.__qualname__.split(".")[0]
is_tf_class = model_class[:2] == "TF"
if "SequenceClassification" in model_class:
code_sample = TF_SEQUENCE_CLASSIFICATION_SAMPLE if is_tf_class else PT_SEQUENCE_CLASSIFICATION_SAMPLE
elif "QuestionAnswering" in model_class:
code_sample = TF_QUESTION_ANSWERING_SAMPLE if is_tf_class else PT_QUESTION_ANSWERING_SAMPLE
elif "TokenClassification" in model_class:
code_sample = TF_TOKEN_CLASSIFICATION_SAMPLE if is_tf_class else PT_TOKEN_CLASSIFICATION_SAMPLE
elif "MultipleChoice" in model_class:
code_sample = TF_MULTIPLE_CHOICE_SAMPLE if is_tf_class else PT_MULTIPLE_CHOICE_SAMPLE
elif "MaskedLM" in model_class:
code_sample = TF_MASKED_LM_SAMPLE if is_tf_class else PT_MASKED_LM_SAMPLE
elif "LMHead" in model_class:
code_sample = TF_CAUSAL_LM_SAMPLE if is_tf_class else PT_CAUSAL_LM_SAMPLE
elif "Model" in model_class:
code_sample = TF_BASE_MODEL_SAMPLE if is_tf_class else PT_BASE_MODEL_SAMPLE
else:
raise ValueError(f"Docstring can't be built for model {model_class}")
output_doc = _prepare_output_docstrings(output_type, config_class) if output_type is not None else ""
built_doc = code_sample.format(model_class=model_class, tokenizer_class=tokenizer_class, checkpoint=checkpoint)
fn.__doc__ = (fn.__doc__ or "") + "".join(docstr) + output_doc + built_doc
return fn
return docstring_decorator
def replace_return_docstrings(output_type=None, config_class=None):
def docstring_decorator(fn):
docstrings = fn.__doc__
lines = docstrings.split("\n")
i = 0
while i < len(lines) and re.search(r"^\s*Returns?:\s*$", lines[i]) is None:
i += 1
if i < len(lines):
lines[i] = _prepare_output_docstrings(output_type, config_class)
docstrings = "\n".join(lines)
else:
raise ValueError(
f"The function {fn} should have an empty 'Return:' or 'Returns:' in its docstring as placeholder, current docstring is:\n{docstrings}"
)
fn.__doc__ = docstrings
return fn
return docstring_decorator
def is_remote_url(url_or_filename):
parsed = urlparse(url_or_filename)
return parsed.scheme in ("http", "https")
def hf_bucket_url(model_id: str, filename: str, use_cdn=True) -> str:
"""
Resolve a model identifier, and a file name, to a HF-hosted url
on either S3 or Cloudfront (a Content Delivery Network, or CDN).
Cloudfront is replicated over the globe so downloads are way faster
for the end user (and it also lowers our bandwidth costs). However, it
is more aggressively cached by default, so may not always reflect the
latest changes to the underlying file (default TTL is 24 hours).
In terms of client-side caching from this library, even though
Cloudfront relays the ETags from S3, using one or the other
(or switching from one to the other) will affect caching: cached files
are not shared between the two because the cached file's name contains
a hash of the url.
"""
endpoint = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
legacy_format = "/" not in model_id
if legacy_format:
return f"{endpoint}/{model_id}-{filename}"
else:
return f"{endpoint}/{model_id}/{filename}"
def url_to_filename(url, etag=None):
"""
Convert `url` into a hashed filename in a repeatable way.
If `etag` is specified, append its hash to the url's, delimited
by a period.
If the url ends with .h5 (Keras HDF5 weights) adds '.h5' to the name
so that TF 2.0 can identify it as a HDF5 file
(see https://github.com/tensorflow/tensorflow/blob/00fad90125b18b80fe054de1055770cfb8fe4ba3/tensorflow/python/keras/engine/network.py#L1380)
"""
url_bytes = url.encode("utf-8")
url_hash = sha256(url_bytes)
filename = url_hash.hexdigest()
if etag:
etag_bytes = etag.encode("utf-8")
etag_hash = sha256(etag_bytes)
filename += "." + etag_hash.hexdigest()
if url.endswith(".h5"):
filename += ".h5"
return filename
def filename_to_url(filename, cache_dir=None):
"""
Return the url and etag (which may be ``None``) stored for `filename`.
Raise ``EnvironmentError`` if `filename` or its stored metadata do not exist.
"""
if cache_dir is None:
cache_dir = TRANSFORMERS_CACHE
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
cache_path = os.path.join(cache_dir, filename)
if not os.path.exists(cache_path):
raise EnvironmentError("file {} not found".format(cache_path))
meta_path = cache_path + ".json"
if not os.path.exists(meta_path):
raise EnvironmentError("file {} not found".format(meta_path))
with open(meta_path, encoding="utf-8") as meta_file:
metadata = json.load(meta_file)
url = metadata["url"]
etag = metadata["etag"]
return url, etag
def cached_path(
url_or_filename,
cache_dir=None,
force_download=False,
proxies=None,
resume_download=False,
user_agent: Union[Dict, str, None] = None,
extract_compressed_file=False,
force_extract=False,
local_files_only=False,
) -> Optional[str]:
"""
Given something that might be a URL (or might be a local path),
determine which. If it's a URL, download the file and cache it, and
return the path to the cached file. If it's already a local path,
make sure the file exists and then return the path.
Args:
cache_dir: specify a cache directory to save the file to (overwrite the default cache dir).
force_download: if True, re-dowload the file even if it's already cached in the cache dir.
resume_download: if True, resume the download if incompletly recieved file is found.
user_agent: Optional string or dict that will be appended to the user-agent on remote requests.
extract_compressed_file: if True and the path point to a zip or tar file, extract the compressed
file in a folder along the archive.
force_extract: if True when extract_compressed_file is True and the archive was already extracted,
re-extract the archive and overide the folder where it was extracted.
Return:
None in case of non-recoverable file (non-existent or inaccessible url + no cache on disk).
Local path (string) otherwise
"""
if cache_dir is None:
cache_dir = TRANSFORMERS_CACHE
if isinstance(url_or_filename, Path):
url_or_filename = str(url_or_filename)
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
if is_remote_url(url_or_filename):
# URL, so get it from the cache (downloading if necessary)
output_path = get_from_cache(
url_or_filename,
cache_dir=cache_dir,
force_download=force_download,
proxies=proxies,
resume_download=resume_download,
user_agent=user_agent,
local_files_only=local_files_only,
)
elif os.path.exists(url_or_filename):
# File, and it exists.
output_path = url_or_filename
elif urlparse(url_or_filename).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError("file {} not found".format(url_or_filename))
else:
# Something unknown
raise ValueError("unable to parse {} as a URL or as a local path".format(url_or_filename))
if extract_compressed_file:
if not is_zipfile(output_path) and not tarfile.is_tarfile(output_path):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
output_dir, output_file = os.path.split(output_path)
output_extract_dir_name = output_file.replace(".", "-") + "-extracted"
output_path_extracted = os.path.join(output_dir, output_extract_dir_name)
if os.path.isdir(output_path_extracted) and os.listdir(output_path_extracted) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
lock_path = output_path + ".lock"
with FileLock(lock_path):
shutil.rmtree(output_path_extracted, ignore_errors=True)
os.makedirs(output_path_extracted)
if is_zipfile(output_path):
with ZipFile(output_path, "r") as zip_file:
zip_file.extractall(output_path_extracted)
zip_file.close()
elif tarfile.is_tarfile(output_path):
tar_file = tarfile.open(output_path)
tar_file.extractall(output_path_extracted)
tar_file.close()
else:
raise EnvironmentError("Archive format of {} could not be identified".format(output_path))
return output_path_extracted
return output_path
def http_get(url, temp_file, proxies=None, resume_size=0, user_agent: Union[Dict, str, None] = None):
ua = "transformers/{}; python/{}".format(__version__, sys.version.split()[0])
if is_torch_available():
ua += "; torch/{}".format(torch.__version__)
if is_tf_available():
ua += "; tensorflow/{}".format(tf.__version__)
if isinstance(user_agent, dict):
ua += "; " + "; ".join("{}/{}".format(k, v) for k, v in user_agent.items())
elif isinstance(user_agent, str):
ua += "; " + user_agent
headers = {"user-agent": ua}
if resume_size > 0:
headers["Range"] = "bytes=%d-" % (resume_size,)
response = requests.get(url, stream=True, proxies=proxies, headers=headers)
if response.status_code == 416: # Range not satisfiable
return
content_length = response.headers.get("Content-Length")
total = resume_size + int(content_length) if content_length is not None else None
progress = tqdm(
unit="B",
unit_scale=True,
total=total,
initial=resume_size,
desc="Downloading",
disable=bool(logger.getEffectiveLevel() == logging.NOTSET),
)
for chunk in response.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
progress.update(len(chunk))
temp_file.write(chunk)
progress.close()
def get_from_cache(
url,
cache_dir=None,
force_download=False,
proxies=None,
etag_timeout=10,
resume_download=False,
user_agent: Union[Dict, str, None] = None,
local_files_only=False,
) -> Optional[str]:
"""
Given a URL, look for the corresponding file in the local cache.
If it's not there, download it. Then return the path to the cached file.
Return:
None in case of non-recoverable file (non-existent or inaccessible url + no cache on disk).
Local path (string) otherwise
"""
if cache_dir is None:
cache_dir = TRANSFORMERS_CACHE
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
os.makedirs(cache_dir, exist_ok=True)
etag = None
if not local_files_only:
try:
response = requests.head(url, allow_redirects=True, proxies=proxies, timeout=etag_timeout)
if response.status_code == 200:
etag = response.headers.get("ETag")
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
filename = url_to_filename(url, etag)
# get cache path to put the file
cache_path = os.path.join(cache_dir, filename)
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(cache_path):
return cache_path
else:
matching_files = [
file
for file in fnmatch.filter(os.listdir(cache_dir), filename + ".*")
if not file.endswith(".json") and not file.endswith(".lock")
]
if len(matching_files) > 0:
return os.path.join(cache_dir, matching_files[-1])
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
"Cannot find the requested files in the cached path and outgoing traffic has been"
" disabled. To enable model look-ups and downloads online, set 'local_files_only'"
" to False."
)
return None
# From now on, etag is not None.
if os.path.exists(cache_path) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
lock_path = cache_path + ".lock"
with FileLock(lock_path):
# If the download just completed while the lock was activated.
if os.path.exists(cache_path) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
incomplete_path = cache_path + ".incomplete"
@contextmanager
def _resumable_file_manager():
with open(incomplete_path, "a+b") as f:
yield f
temp_file_manager = _resumable_file_manager
if os.path.exists(incomplete_path):
resume_size = os.stat(incomplete_path).st_size
else:
resume_size = 0
else:
temp_file_manager = partial(tempfile.NamedTemporaryFile, dir=cache_dir, delete=False)
resume_size = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
logger.info("%s not found in cache or force_download set to True, downloading to %s", url, temp_file.name)
http_get(url, temp_file, proxies=proxies, resume_size=resume_size, user_agent=user_agent)
logger.info("storing %s in cache at %s", url, cache_path)
os.replace(temp_file.name, cache_path)
logger.info("creating metadata file for %s", cache_path)
meta = {"url": url, "etag": etag}
meta_path = cache_path + ".json"
with open(meta_path, "w") as meta_file:
json.dump(meta, meta_file)
return cache_path
class cached_property(property):
"""
Descriptor that mimics @property but caches output in member variable.
From tensorflow_datasets
Built-in in functools from Python 3.8.
"""
def __get__(self, obj, objtype=None):
# See docs.python.org/3/howto/descriptor.html#properties
if obj is None:
return self
if self.fget is None:
raise AttributeError("unreadable attribute")
attr = "__cached_" + self.fget.__name__
cached = getattr(obj, attr, None)
if cached is None:
cached = self.fget(obj)
setattr(obj, attr, cached)
return cached
def torch_required(func):
# Chose a different decorator name than in tests so it's clear they are not the same.
@wraps(func)
def wrapper(*args, **kwargs):
if is_torch_available():
return func(*args, **kwargs)
else:
raise ImportError(f"Method `{func.__name__}` requires PyTorch.")
return wrapper
def tf_required(func):
# Chose a different decorator name than in tests so it's clear they are not the same.
@wraps(func)
def wrapper(*args, **kwargs):
if is_tf_available():
return func(*args, **kwargs)
else:
raise ImportError(f"Method `{func.__name__}` requires TF.")
return wrapper
def is_tensor(x):
""" Tests if ``x`` is a :obj:`torch.Tensor`, :obj:`tf.Tensor` or :obj:`np.ndarray`. """
if is_torch_available():
import torch
if isinstance(x, torch.Tensor):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(x, tf.Tensor):
return True
return isinstance(x, np.ndarray)
class ModelOutput(OrderedDict):
"""
Base class for all model outputs as dataclass. Has a ``__getitem__`` that allows indexing by integer or slice (like
a tuple) or strings (like a dictionnary) that will ignore the ``None`` attributes. Otherwise behaves like a
regular python dictionary.
.. warning::
You can't unpack a :obj:`ModelOutput` directly. Use the :meth:`~transformers.file_utils.ModelOutput.to_tuple`
method to convert it to a tuple before.
"""
def __post_init__(self):
class_fields = fields(self)
# Safety and consistency checks
assert len(class_fields), f"{self.__class__.__name__} has no fields."
assert all(
field.default is None for field in class_fields[1:]
), f"{self.__class__.__name__} should not have more than one required field."
first_field = getattr(self, class_fields[0].name)
other_fields_are_none = all(getattr(self, field.name) is None for field in class_fields[1:])
if other_fields_are_none and not is_tensor(first_field):
try:
iterator = iter(first_field)
first_field_iterator = True
except TypeError:
first_field_iterator = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for element in iterator:
if (
not isinstance(element, (list, tuple))
or not len(element) == 2
or not isinstance(element[0], str)
):
break
setattr(self, element[0], element[1])
if element[1] is not None:
self[element[0]] = element[1]
else:
for field in class_fields:
v = getattr(self, field.name)
if v is not None:
self[field.name] = v
def __delitem__(self, *args, **kwargs):
raise Exception(f"You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.")
def setdefault(self, *args, **kwargs):
raise Exception(f"You cannot use ``setdefault`` on a {self.__class__.__name__} instance.")
def pop(self, *args, **kwargs):
raise Exception(f"You cannot use ``pop`` on a {self.__class__.__name__} instance.")
def update(self, *args, **kwargs):
raise Exception(f"You cannot use ``update`` on a {self.__class__.__name__} instance.")
def __getitem__(self, k):
if isinstance(k, str):
inner_dict = {k: v for (k, v) in self.items()}
return inner_dict[k]
else:
return self.to_tuple()[k]
def to_tuple(self) -> Tuple[Any]:
"""
Convert self to a tuple containing all the attributes/keys that are not ``None``.
"""
return tuple(self[k] for k in self.keys())
| 36,425 | 35.244776 | 150 | py |
KoSentenceBERT-SKT | KoSentenceBERT-SKT-main/transformers/modeling_tf_ctrl.py | # coding=utf-8
# Copyright 2018 Salesforce and HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" TF 2.0 CTRL model."""
import logging
import numpy as np
import tensorflow as tf
from .configuration_ctrl import CTRLConfig
from .file_utils import add_start_docstrings, add_start_docstrings_to_callable
from .modeling_tf_utils import TFPreTrainedModel, TFSharedEmbeddings, keras_serializable, shape_list
logger = logging.getLogger(__name__)
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_MAP = {"ctrl": "https://s3.amazonaws.com/models.huggingface.co/bert/ctrl-tf_model.h5"}
def angle_defn(pos, i, d_model_size):
angle_rates = 1 / np.power(10000, (2 * (i // 2)) / np.float32(d_model_size))
return pos * angle_rates
def positional_encoding(position, d_model_size):
# create the sinusoidal pattern for the positional encoding
angle_rads = angle_defn(np.arange(position)[:, np.newaxis], np.arange(d_model_size)[np.newaxis, :], d_model_size)
sines = np.sin(angle_rads[:, 0::2])
cosines = np.cos(angle_rads[:, 1::2])
# pos_encoding = tf.cast(np.concatenate([sines, cosines], axis=-1)[np.newaxis, ...], dtype=tf.float32)
pos_encoding = tf.cast(np.concatenate([sines, cosines], axis=-1), dtype=tf.float32)
return pos_encoding
def scaled_dot_product_attention(q, k, v, mask, attention_mask=None, head_mask=None):
# calculate attention
matmul_qk = tf.matmul(q, k, transpose_b=True)
dk = tf.cast(shape_list(k)[-1], tf.float32)
scaled_attention_logits = matmul_qk / tf.math.sqrt(dk)
if mask is not None:
scaled_attention_logits += mask * -1e4
if attention_mask is not None:
# Apply the attention mask
scaled_attention_logits = scaled_attention_logits + attention_mask
attention_weights = tf.nn.softmax(scaled_attention_logits, axis=-1)
# Mask heads if we want to
if head_mask is not None:
attention_weights = attention_weights * head_mask
output = tf.matmul(attention_weights, v)
return output, attention_weights
class TFMultiHeadAttention(tf.keras.layers.Layer):
def __init__(self, d_model_size, num_heads, output_attentions=False, **kwargs):
super().__init__(**kwargs)
self.output_attentions = output_attentions
self.num_heads = num_heads
self.d_model_size = d_model_size
self.depth = int(d_model_size / self.num_heads)
self.Wq = tf.keras.layers.Dense(d_model_size, name="Wq")
self.Wk = tf.keras.layers.Dense(d_model_size, name="Wk")
self.Wv = tf.keras.layers.Dense(d_model_size, name="Wv")
self.dense = tf.keras.layers.Dense(d_model_size, name="dense")
def split_into_heads(self, x, batch_size):
x = tf.reshape(x, (batch_size, -1, self.num_heads, self.depth))
return tf.transpose(x, perm=[0, 2, 1, 3])
def call(self, inputs, training=False):
v, k, q, mask, layer_past, attention_mask, head_mask = inputs
batch_size = shape_list(q)[0]
q = self.Wq(q)
k = self.Wk(k)
v = self.Wv(v)
q = self.split_into_heads(q, batch_size)
k = self.split_into_heads(k, batch_size)
v = self.split_into_heads(v, batch_size)
if layer_past is not None:
past_key, past_value = tf.unstack(layer_past, axis=0)
k = tf.concat((past_key, k), axis=-2)
v = tf.concat((past_value, v), axis=-2)
present = tf.stack((k, v), axis=0)
output = scaled_dot_product_attention(q, k, v, mask, attention_mask, head_mask)
scaled_attention = tf.transpose(output[0], perm=[0, 2, 1, 3])
attn = output[1]
original_size_attention = tf.reshape(scaled_attention, (batch_size, -1, self.d_model_size))
output = self.dense(original_size_attention)
outputs = (output, present)
if self.output_attentions:
outputs = outputs + (attn,)
return outputs
def point_wise_feed_forward_network(d_model_size, dff, name=""):
return tf.keras.Sequential(
[tf.keras.layers.Dense(dff, activation="relu", name="0"), tf.keras.layers.Dense(d_model_size, name="2")],
name="ffn",
)
class TFEncoderLayer(tf.keras.layers.Layer):
def __init__(
self, d_model_size, num_heads, dff, rate=0.1, layer_norm_epsilon=1e-6, output_attentions=False, **kwargs
):
super().__init__(**kwargs)
self.multi_head_attention = TFMultiHeadAttention(
d_model_size, num_heads, output_attentions, name="multi_head_attention"
)
self.ffn = point_wise_feed_forward_network(d_model_size, dff, name="ffn")
self.layernorm1 = tf.keras.layers.LayerNormalization(epsilon=layer_norm_epsilon, name="layernorm1")
self.layernorm2 = tf.keras.layers.LayerNormalization(epsilon=layer_norm_epsilon, name="layernorm2")
self.dropout1 = tf.keras.layers.Dropout(rate)
self.dropout2 = tf.keras.layers.Dropout(rate)
def call(self, inputs, training=False):
x, mask, layer_past, attention_mask, head_mask = inputs
normed = self.layernorm1(x)
attn_outputs = self.multi_head_attention(
[normed, normed, normed, mask, layer_past, attention_mask, head_mask], training=training
)
attn_output = attn_outputs[0]
attn_output = self.dropout1(attn_output, training=training)
out1 = x + attn_output
out2 = self.layernorm2(out1)
ffn_output = self.ffn(out2)
ffn_output = self.dropout2(ffn_output, training=training)
out2 = out1 + ffn_output
outputs = (out2,) + attn_outputs[1:]
return outputs
@keras_serializable
class TFCTRLMainLayer(tf.keras.layers.Layer):
config_class = CTRLConfig
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.output_hidden_states = config.output_hidden_states
self.output_attentions = config.output_attentions
self.output_past = config.output_past
self.d_model_size = config.n_embd
self.num_layers = config.n_layer
self.pos_encoding = positional_encoding(config.n_positions, self.d_model_size)
self.w = TFSharedEmbeddings(
config.vocab_size, config.n_embd, initializer_range=config.initializer_range, name="w"
)
self.dropout = tf.keras.layers.Dropout(config.embd_pdrop)
self.h = [
TFEncoderLayer(
config.n_embd,
config.n_head,
config.dff,
config.resid_pdrop,
config.layer_norm_epsilon,
config.output_attentions,
name="h_._{}".format(i),
)
for i in range(config.n_layer)
]
self.layernorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_epsilon, name="layernorm")
def get_input_embeddings(self):
return self.w
def _resize_token_embeddings(self, new_num_tokens):
raise NotImplementedError
def _prune_heads(self, heads_to_prune):
""" Prunes heads of the model.
heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
"""
raise NotImplementedError
def call(
self,
inputs,
past=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
training=False,
):
if isinstance(inputs, (tuple, list)):
input_ids = inputs[0]
past = inputs[1] if len(inputs) > 1 else past
attention_mask = inputs[2] if len(inputs) > 2 else attention_mask
token_type_ids = inputs[3] if len(inputs) > 3 else token_type_ids
position_ids = inputs[4] if len(inputs) > 4 else position_ids
head_mask = inputs[5] if len(inputs) > 5 else head_mask
inputs_embeds = inputs[6] if len(inputs) > 6 else inputs_embeds
assert len(inputs) <= 7, "Too many inputs."
elif isinstance(inputs, dict):
input_ids = inputs.get("input_ids")
past = inputs.get("past", past)
attention_mask = inputs.get("attention_mask", attention_mask)
token_type_ids = inputs.get("token_type_ids", token_type_ids)
position_ids = inputs.get("position_ids", position_ids)
head_mask = inputs.get("head_mask", head_mask)
inputs_embeds = inputs.get("inputs_embeds", inputs_embeds)
assert len(inputs) <= 7, "Too many inputs."
else:
input_ids = inputs
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = shape_list(input_ids)
input_ids = tf.reshape(input_ids, [-1, input_shape[-1]])
elif inputs_embeds is not None:
input_shape = shape_list(inputs_embeds)[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if past is None:
past_length = 0
past = [None] * len(self.h)
else:
past_length = shape_list(past[0][0])[-2]
if position_ids is None:
position_ids = tf.range(past_length, input_shape[-1] + past_length, dtype=tf.int32)[tf.newaxis, :]
position_ids = tf.tile(position_ids, [input_shape[0], 1])
# Attention mask.
if attention_mask is not None:
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
attention_mask = attention_mask[:, tf.newaxis, tf.newaxis, :]
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
attention_mask = tf.cast(attention_mask, tf.float32)
attention_mask = (1.0 - attention_mask) * -10000.0
else:
attention_mask = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# head_mask has shape n_layer x batch x n_heads x N x N
if head_mask is not None:
raise NotImplementedError
else:
head_mask = [None] * self.num_layers
if token_type_ids is not None:
token_type_ids = tf.reshape(token_type_ids, [-1, shape_list(token_type_ids)[-1]])
token_type_embeds = self.w(token_type_ids, mode="embedding")
token_type_embeds *= tf.math.sqrt(tf.cast(self.d_model_size, tf.float32))
else:
token_type_embeds = 0
position_ids = tf.reshape(position_ids, [-1, shape_list(position_ids)[-1]])
if inputs_embeds is None:
inputs_embeds = self.w(input_ids, mode="embedding")
seq_len = input_shape[-1]
mask = 1 - tf.linalg.band_part(tf.ones((seq_len, seq_len)), -1, 0)
inputs_embeds *= tf.math.sqrt(tf.cast(self.d_model_size, tf.float32))
pos_embeds = tf.gather(self.pos_encoding, position_ids)
hidden_states = inputs_embeds + pos_embeds + token_type_embeds
hidden_states = self.dropout(hidden_states, training=training)
output_shape = input_shape + [shape_list(hidden_states)[-1]]
presents = ()
all_hidden_states = ()
all_attentions = []
for i, (h, layer_past) in enumerate(zip(self.h, past)):
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (tf.reshape(hidden_states, output_shape),)
outputs = h([hidden_states, mask, layer_past, attention_mask, head_mask[i]], training=training)
hidden_states, present = outputs[:2]
if self.output_past:
presents = presents + (present,)
if self.output_attentions:
all_attentions.append(outputs[2])
hidden_states = self.layernorm(hidden_states)
hidden_states = tf.reshape(hidden_states, output_shape)
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
outputs = (hidden_states,)
if self.output_past:
outputs = outputs + (presents,)
if self.output_hidden_states:
outputs = outputs + (all_hidden_states,)
if self.output_attentions:
# let the number of heads free (-1) so we can extract attention even after head pruning
attention_output_shape = input_shape[:-1] + [-1] + shape_list(all_attentions[0])[-2:]
all_attentions = tuple(tf.reshape(t, attention_output_shape) for t in all_attentions)
outputs = outputs + (all_attentions,)
return outputs
class TFCTRLPreTrainedModel(TFPreTrainedModel):
""" An abstract class to handle weights initialization and
a simple interface for downloading and loading pretrained models.
"""
config_class = CTRLConfig
pretrained_model_archive_map = TF_CTRL_PRETRAINED_MODEL_ARCHIVE_MAP
base_model_prefix = "transformer"
CTRL_START_DOCSTRING = r"""
.. note::
TF 2.0 models accepts two formats as inputs:
- having all inputs as keyword arguments (like PyTorch models), or
- having all inputs as a list, tuple or dict in the first positional arguments.
This second option is useful when using :obj:`tf.keras.Model.fit()` method which currently requires having
all the tensors in the first argument of the model call function: :obj:`model(inputs)`.
If you choose this second option, there are three possibilities you can use to gather all the input Tensors
in the first positional argument :
- a single Tensor with input_ids only and nothing else: :obj:`model(inputs_ids)`
- a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
:obj:`model([input_ids, attention_mask])` or :obj:`model([input_ids, attention_mask, token_type_ids])`
- a dictionary with one or several input Tensors associated to the input names given in the docstring:
:obj:`model({'input_ids': input_ids, 'token_type_ids': token_type_ids})`
Parameters:
config (:class:`~transformers.CTRLConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the configuration.
Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.
"""
CTRL_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`transformers.CTRLTokenizer`.
See :func:`transformers.PreTrainedTokenizer.encode` and
:func:`transformers.PreTrainedTokenizer.encode_plus` for details.
`What are input IDs? <../glossary.html#input-ids>`__
past (:obj:`List[tf.Tensor]` of length :obj:`config.n_layers`):
Contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model
(see `past` output below). Can be used to speed up sequential decoding. The token ids which have their past given to this model
should not be passed as input ids as they have already been computed.
attention_mask (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Mask to avoid performing attention on padding token indices.
Mask values selected in ``[0, 1]``:
``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
`What are attention masks? <../glossary.html#attention-mask>`__
token_type_ids (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Segment token indices to indicate first and second portions of the inputs.
Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1``
corresponds to a `sentence B` token
`What are token type IDs? <../glossary.html#token-type-ids>`_
position_ids (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Indices of positions of each input sequence tokens in the position embeddings.
Selected in the range ``[0, config.max_position_embeddings - 1]``.
`What are position IDs? <../glossary.html#position-ids>`_
head_mask (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`, defaults to :obj:`None`):
Mask to nullify selected heads of the self-attention modules.
Mask values selected in ``[0, 1]``:
:obj:`1` indicates the head is **not masked**, :obj:`0` indicates the head is **masked**.
input_embeds (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
training (:obj:`boolean`, `optional`, defaults to :obj:`False`):
Whether to activate dropout modules (if set to :obj:`True`) during training or to de-activate them
(if set to :obj:`False`) for evaluation.
"""
@add_start_docstrings(
"The bare CTRL Model transformer outputting raw hidden-states without any specific head on top.",
CTRL_START_DOCSTRING,
)
class TFCTRLModel(TFCTRLPreTrainedModel):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.transformer = TFCTRLMainLayer(config, name="transformer")
@add_start_docstrings_to_callable(CTRL_INPUTS_DOCSTRING)
def call(self, inputs, **kwargs):
r"""
Return:
:obj:`tuple(tf.Tensor)` comprising various elements depending on the configuration (:class:`~transformers.CTRLConfig`) and inputs:
last_hidden_state (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the last layer of the model.
past (:obj:`List[tf.Tensor]` of length :obj:`config.n_layers` with each tensor of shape :obj:`(2, batch_size, num_heads, sequence_length, embed_size_per_head)`):
Contains pre-computed hidden-states (key and values in the attention blocks).
Can be used (see `past` input) to speed up sequential decoding. The token ids which have their past given to this model
should not be passed as input ids as they have already been computed.
hidden_states (:obj:`tuple(tf.Tensor)` `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`tf.Tensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
import tensorflow as tf
from transformers import CTRLTokenizer, TFCTRLModel
tokenizer = CTRLTokenizer.from_pretrained('ctrl')
model = TFCTRLModel.from_pretrained('ctrl')
input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True))[None, :] # Batch size 1
outputs = model(input_ids)
last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple
"""
outputs = self.transformer(inputs, **kwargs)
return outputs
class TFCTRLLMHead(tf.keras.layers.Layer):
def __init__(self, config, input_embeddings, **kwargs):
super().__init__(**kwargs)
self.vocab_size = config.vocab_size
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.input_embeddings = input_embeddings
def build(self, input_shape):
self.bias = self.add_weight(shape=(self.vocab_size,), initializer="zeros", trainable=True, name="bias")
super().build(input_shape)
def call(self, hidden_states):
hidden_states = self.input_embeddings(hidden_states, mode="linear")
hidden_states = hidden_states + self.bias
return hidden_states
@add_start_docstrings(
"""The CTRL Model transformer with a language modeling head on top
(linear layer with weights tied to the input embeddings). """,
CTRL_START_DOCSTRING,
)
class TFCTRLLMHeadModel(TFCTRLPreTrainedModel):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.transformer = TFCTRLMainLayer(config, name="transformer")
self.lm_head = TFCTRLLMHead(config, self.transformer.w, name="lm_head")
def get_output_embeddings(self):
return self.lm_head.input_embeddings
def prepare_inputs_for_generation(self, inputs, past, **kwargs):
# only last token for inputs_ids if past is defined in kwargs
if past:
inputs = tf.expand_dims(inputs[:, -1], -1)
return {"inputs": inputs, "past": past}
@add_start_docstrings_to_callable(CTRL_INPUTS_DOCSTRING)
def call(self, inputs, **kwargs):
r"""
Return:
:obj:`tuple(tf.Tensor)` comprising various elements depending on the configuration (:class:`~transformers.CTRLConfig`) and inputs:
prediction_scores (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
past (:obj:`List[tf.Tensor]` of length :obj:`config.n_layers` with each tensor of shape :obj:`(2, batch_size, num_heads, sequence_length, embed_size_per_head)`):
Contains pre-computed hidden-states (key and values in the attention blocks).
Can be used (see `past` input) to speed up sequential decoding. The token ids which have their past given to this model
should not be passed as input ids as they have already been computed.
hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`tf.Tensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
import tensorflow as tf
from transformers import CTRLTokenizer, TFCTRLLMHeadModel
tokenizer = CTRLTokenizer.from_pretrained('ctrl')
model = TFCTRLLMHeadModel.from_pretrained('ctrl')
input_ids = tf.constant([tokenizer.encode("Links Hello, my dog is cute", add_special_tokens=True)])
outputs = model(input_ids)
loss, logits = outputs[:2]
"""
transformer_outputs = self.transformer(inputs, **kwargs)
hidden_states = transformer_outputs[0]
lm_logits = self.lm_head(hidden_states)
outputs = (lm_logits,) + transformer_outputs[1:]
return outputs # lm_logits, presents, (all hidden_states), (attentions)
| 25,751 | 44.822064 | 169 | py |
KoSentenceBERT-SKT | KoSentenceBERT-SKT-main/transformers/activations.py | import math
import torch
import torch.nn.functional as F
def swish(x):
return x * torch.sigmoid(x)
def _gelu_python(x):
""" Original Implementation of the gelu activation function in Google Bert repo when initially created.
For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
This is now written in C in torch.nn.functional
Also see https://arxiv.org/abs/1606.08415
"""
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
def gelu_new(x):
""" Implementation of the gelu activation function currently in Google Bert repo (identical to OpenAI GPT).
Also see https://arxiv.org/abs/1606.08415
"""
return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
if torch.__version__ < "1.4.0":
gelu = _gelu_python
else:
gelu = F.gelu
gelu_new = torch.jit.script(gelu_new)
ACT2FN = {
"relu": F.relu,
"swish": swish,
"gelu": gelu,
"tanh": F.tanh,
"gelu_new": gelu_new,
}
def get_activation(activation_string):
if activation_string in ACT2FN:
return ACT2FN[activation_string]
else:
raise KeyError("function {} not found in ACT2FN mapping {}".format(activation_string, list(ACT2FN.keys())))
| 1,381 | 27.791667 | 115 | py |
KoSentenceBERT-SKT | KoSentenceBERT-SKT-main/transformers/__init__.py | # flake8: noqa
# There's no way to ignore "F401 '...' imported but unused" warnings in this
# module, but to preserve other warnings. So, don't check this module at all.
__version__ = "2.8.0"
# Work around to update TensorFlow's absl.logging threshold which alters the
# default Python logging output behavior when present.
# see: https://github.com/abseil/abseil-py/issues/99
# and: https://github.com/tensorflow/tensorflow/issues/26691#issuecomment-500369493
try:
import absl.logging
except ImportError:
pass
else:
absl.logging.set_verbosity("info")
absl.logging.set_stderrthreshold("info")
absl.logging._warn_preinit_stderr = False
import logging
# Benchmarking
from .benchmark_utils import (
Frame,
Memory,
MemoryState,
MemorySummary,
MemoryTrace,
UsedMemoryState,
bytes_to_human_readable,
start_memory_tracing,
stop_memory_tracing,
)
from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig
from .configuration_auto import ALL_PRETRAINED_CONFIG_ARCHIVE_MAP, CONFIG_MAPPING, AutoConfig
from .configuration_bart import BartConfig
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig
from .configuration_camembert import CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, CamembertConfig
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .configuration_distilbert import DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, DistilBertConfig
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig
from .configuration_flaubert import FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, FlaubertConfig
from .configuration_gpt2 import GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2Config
from .configuration_mmbt import MMBTConfig
from .configuration_openai import OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OpenAIGPTConfig
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig
from .configuration_t5 import T5_PRETRAINED_CONFIG_ARCHIVE_MAP, T5Config
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
# Configurations
from .configuration_utils import PretrainedConfig
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig
from .configuration_xlm_roberta import XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMRobertaConfig
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
from .data import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadV1Processor,
SquadV2Processor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
is_sklearn_available,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
# Files and general utilities
from .file_utils import (
CONFIG_NAME,
MODEL_CARD_NAME,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
add_end_docstrings,
add_start_docstrings,
cached_path,
is_tf_available,
is_torch_available,
)
# Model Cards
from .modelcard import ModelCard
# TF 2.0 <=> PyTorch conversion utilities
from .modeling_tf_pytorch_utils import (
convert_tf_weight_name_to_pt_weight_name,
load_pytorch_checkpoint_in_tf2_model,
load_pytorch_model_in_tf2_model,
load_pytorch_weights_in_tf2_model,
load_tf2_checkpoint_in_pytorch_model,
load_tf2_model_in_pytorch_model,
load_tf2_weights_in_pytorch_model,
)
# Pipelines
from .pipelines import (
CsvPipelineDataFormat,
FeatureExtractionPipeline,
FillMaskPipeline,
JsonPipelineDataFormat,
NerPipeline,
PipedPipelineDataFormat,
Pipeline,
PipelineDataFormat,
QuestionAnsweringPipeline,
SummarizationPipeline,
TextClassificationPipeline,
TokenClassificationPipeline,
TranslationPipeline,
pipeline,
)
from .tokenization_albert import AlbertTokenizer
from .tokenization_auto import TOKENIZER_MAPPING, AutoTokenizer
from .tokenization_bart import BartTokenizer
from .tokenization_bert import BasicTokenizer, BertTokenizer, BertTokenizerFast, WordpieceTokenizer
from .tokenization_bert_japanese import BertJapaneseTokenizer, CharacterTokenizer, MecabTokenizer
from .tokenization_camembert import CamembertTokenizer
from .tokenization_ctrl import CTRLTokenizer
from .tokenization_distilbert import DistilBertTokenizer, DistilBertTokenizerFast
from .tokenization_electra import ElectraTokenizer, ElectraTokenizerFast
from .tokenization_flaubert import FlaubertTokenizer
from .tokenization_gpt2 import GPT2Tokenizer, GPT2TokenizerFast
from .tokenization_openai import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from .tokenization_roberta import RobertaTokenizer, RobertaTokenizerFast
from .tokenization_t5 import T5Tokenizer
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer, TransfoXLTokenizerFast
# Tokenizers
from .tokenization_utils import PreTrainedTokenizer
from .tokenization_xlm import XLMTokenizer
from .tokenization_xlm_roberta import XLMRobertaTokenizer
from .tokenization_xlnet import SPIECE_UNDERLINE, XLNetTokenizer
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
if is_sklearn_available():
from .data import glue_compute_metrics, xnli_compute_metrics
# Modeling
if is_torch_available():
from .modeling_utils import PreTrainedModel, prune_layer, Conv1D, top_k_top_p_filtering
from .modeling_auto import (
AutoModel,
AutoModelForPreTraining,
AutoModelForSequenceClassification,
AutoModelForQuestionAnswering,
AutoModelWithLMHead,
AutoModelForTokenClassification,
ALL_PRETRAINED_MODEL_ARCHIVE_MAP,
MODEL_MAPPING,
MODEL_FOR_PRETRAINING_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
)
from .modeling_bert import (
BertPreTrainedModel,
BertModel,
BertForPreTraining,
BertForMaskedLM,
BertForNextSentencePrediction,
BertForSequenceClassification,
BertForMultipleChoice,
BertForTokenClassification,
BertForQuestionAnswering,
load_tf_weights_in_bert,
BERT_PRETRAINED_MODEL_ARCHIVE_MAP,
)
from .modeling_openai import (
OpenAIGPTPreTrainedModel,
OpenAIGPTModel,
OpenAIGPTLMHeadModel,
OpenAIGPTDoubleHeadsModel,
load_tf_weights_in_openai_gpt,
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_MAP,
)
from .modeling_transfo_xl import (
TransfoXLPreTrainedModel,
TransfoXLModel,
TransfoXLLMHeadModel,
AdaptiveEmbedding,
load_tf_weights_in_transfo_xl,
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_MAP,
)
from .modeling_gpt2 import (
GPT2PreTrainedModel,
GPT2Model,
GPT2LMHeadModel,
GPT2DoubleHeadsModel,
load_tf_weights_in_gpt2,
GPT2_PRETRAINED_MODEL_ARCHIVE_MAP,
)
from .modeling_ctrl import CTRLPreTrainedModel, CTRLModel, CTRLLMHeadModel, CTRL_PRETRAINED_MODEL_ARCHIVE_MAP
from .modeling_xlnet import (
XLNetPreTrainedModel,
XLNetModel,
XLNetLMHeadModel,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetForMultipleChoice,
XLNetForQuestionAnsweringSimple,
XLNetForQuestionAnswering,
load_tf_weights_in_xlnet,
XLNET_PRETRAINED_MODEL_ARCHIVE_MAP,
)
from .modeling_xlm import (
XLMPreTrainedModel,
XLMModel,
XLMWithLMHeadModel,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLM_PRETRAINED_MODEL_ARCHIVE_MAP,
)
from .modeling_bart import (
BartForSequenceClassification,
BartModel,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_MAP,
)
from .modeling_roberta import (
RobertaForMaskedLM,
RobertaModel,
RobertaForSequenceClassification,
RobertaForMultipleChoice,
RobertaForTokenClassification,
RobertaForQuestionAnswering,
ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP,
)
from .modeling_distilbert import (
DistilBertPreTrainedModel,
DistilBertForMaskedLM,
DistilBertModel,
DistilBertForSequenceClassification,
DistilBertForQuestionAnswering,
DistilBertForTokenClassification,
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_MAP,
)
from .modeling_camembert import (
CamembertForMaskedLM,
CamembertModel,
CamembertForSequenceClassification,
CamembertForMultipleChoice,
CamembertForTokenClassification,
CamembertForQuestionAnswering,
CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_MAP,
)
from .modeling_encoder_decoder import PreTrainedEncoderDecoder
from .modeling_t5 import (
T5PreTrainedModel,
T5Model,
T5ForConditionalGeneration,
load_tf_weights_in_t5,
T5_PRETRAINED_MODEL_ARCHIVE_MAP,
)
from .modeling_albert import (
AlbertPreTrainedModel,
AlbertModel,
AlbertForMaskedLM,
AlbertForSequenceClassification,
AlbertForQuestionAnswering,
AlbertForTokenClassification,
load_tf_weights_in_albert,
ALBERT_PRETRAINED_MODEL_ARCHIVE_MAP,
)
from .modeling_xlm_roberta import (
XLMRobertaForMaskedLM,
XLMRobertaModel,
XLMRobertaForMultipleChoice,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP,
)
from .modeling_mmbt import ModalEmbeddings, MMBTModel, MMBTForClassification
from .modeling_flaubert import (
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForSequenceClassification,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FLAUBERT_PRETRAINED_MODEL_ARCHIVE_MAP,
)
from .modeling_electra import (
ElectraForPreTraining,
ElectraForMaskedLM,
ElectraForTokenClassification,
ElectraModel,
load_tf_weights_in_electra,
ELECTRA_PRETRAINED_MODEL_ARCHIVE_MAP,
)
# Optimization
from .optimization import (
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
)
# TensorFlow
if is_tf_available():
from .modeling_tf_utils import (
TFPreTrainedModel,
TFSharedEmbeddings,
TFSequenceSummary,
shape_list,
tf_top_k_top_p_filtering,
)
from .modeling_tf_auto import (
TFAutoModel,
TFAutoModelForPreTraining,
TFAutoModelForSequenceClassification,
TFAutoModelForQuestionAnswering,
TFAutoModelWithLMHead,
TFAutoModelForTokenClassification,
TF_ALL_PRETRAINED_MODEL_ARCHIVE_MAP,
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_WITH_LM_HEAD_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
)
from .modeling_tf_bert import (
TFBertPreTrainedModel,
TFBertMainLayer,
TFBertEmbeddings,
TFBertModel,
TFBertForPreTraining,
TFBertForMaskedLM,
TFBertForNextSentencePrediction,
TFBertForSequenceClassification,
TFBertForMultipleChoice,
TFBertForTokenClassification,
TFBertForQuestionAnswering,
TF_BERT_PRETRAINED_MODEL_ARCHIVE_MAP,
)
from .modeling_tf_gpt2 import (
TFGPT2PreTrainedModel,
TFGPT2MainLayer,
TFGPT2Model,
TFGPT2LMHeadModel,
TFGPT2DoubleHeadsModel,
TF_GPT2_PRETRAINED_MODEL_ARCHIVE_MAP,
)
from .modeling_tf_openai import (
TFOpenAIGPTPreTrainedModel,
TFOpenAIGPTMainLayer,
TFOpenAIGPTModel,
TFOpenAIGPTLMHeadModel,
TFOpenAIGPTDoubleHeadsModel,
TF_OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_MAP,
)
from .modeling_tf_transfo_xl import (
TFTransfoXLPreTrainedModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLLMHeadModel,
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_MAP,
TFAdaptiveEmbedding,
)
from .modeling_tf_xlnet import (
TFXLNetPreTrainedModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetLMHeadModel,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetForQuestionAnsweringSimple,
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_MAP,
)
from .modeling_tf_xlm import (
TFXLMPreTrainedModel,
TFXLMMainLayer,
TFXLMModel,
TFXLMWithLMHeadModel,
TFXLMForSequenceClassification,
TFXLMForQuestionAnsweringSimple,
TF_XLM_PRETRAINED_MODEL_ARCHIVE_MAP,
)
from .modeling_tf_xlm_roberta import (
TFXLMRobertaForMaskedLM,
TFXLMRobertaModel,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP,
)
from .modeling_tf_roberta import (
TFRobertaPreTrainedModel,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP,
)
from .modeling_tf_camembert import (
TFCamembertModel,
TFCamembertForMaskedLM,
TFCamembertForSequenceClassification,
TFCamembertForTokenClassification,
TF_CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_MAP,
)
from .modeling_tf_flaubert import (
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_MAP,
)
from .modeling_tf_distilbert import (
TFDistilBertPreTrainedModel,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForQuestionAnswering,
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_MAP,
)
from .modeling_tf_ctrl import (
TFCTRLPreTrainedModel,
TFCTRLModel,
TFCTRLLMHeadModel,
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_MAP,
)
from .modeling_tf_albert import (
TFAlbertPreTrainedModel,
TFAlbertMainLayer,
TFAlbertModel,
TFAlbertForMaskedLM,
TFAlbertForSequenceClassification,
TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_MAP,
)
from .modeling_tf_t5 import (
TFT5PreTrainedModel,
TFT5Model,
TFT5ForConditionalGeneration,
TF_T5_PRETRAINED_MODEL_ARCHIVE_MAP,
)
from .modeling_tf_electra import (
TFElectraPreTrainedModel,
TFElectraModel,
TFElectraForPreTraining,
TFElectraForMaskedLM,
TFElectraForTokenClassification,
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_MAP,
)
# Optimization
from .optimization_tf import WarmUp, create_optimizer, AdamWeightDecay, GradientAccumulator
if not is_tf_available() and not is_torch_available():
logger.warning(
"Neither PyTorch nor TensorFlow >= 2.0 have been found."
"Models won't be available and only tokenizers, configuration"
"and file/data utilities can be used."
)
| 15,999 | 31.258065 | 113 | py |
KoSentenceBERT-SKT | KoSentenceBERT-SKT-main/transformers/modeling_tf_bert.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" TF 2.0 BERT model. """
import logging
import numpy as np
import tensorflow as tf
from .configuration_bert import BertConfig
from .file_utils import MULTIPLE_CHOICE_DUMMY_INPUTS, add_start_docstrings, add_start_docstrings_to_callable
from .modeling_tf_utils import TFPreTrainedModel, get_initializer, keras_serializable, shape_list
logger = logging.getLogger(__name__)
TF_BERT_PRETRAINED_MODEL_ARCHIVE_MAP = {
"bert-base-uncased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-tf_model.h5",
"bert-large-uncased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-tf_model.h5",
"bert-base-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-tf_model.h5",
"bert-large-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-tf_model.h5",
"bert-base-multilingual-uncased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased-tf_model.h5",
"bert-base-multilingual-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased-tf_model.h5",
"bert-base-chinese": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese-tf_model.h5",
"bert-base-german-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-german-cased-tf_model.h5",
"bert-large-uncased-whole-word-masking": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-tf_model.h5",
"bert-large-cased-whole-word-masking": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-tf_model.h5",
"bert-large-uncased-whole-word-masking-finetuned-squad": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-finetuned-squad-tf_model.h5",
"bert-large-cased-whole-word-masking-finetuned-squad": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-finetuned-squad-tf_model.h5",
"bert-base-cased-finetuned-mrpc": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-finetuned-mrpc-tf_model.h5",
"bert-base-japanese": "https://s3.amazonaws.com/models.huggingface.co/bert/cl-tohoku/bert-base-japanese-tf_model.h5",
"bert-base-japanese-whole-word-masking": "https://s3.amazonaws.com/models.huggingface.co/bert/cl-tohoku/bert-base-japanese-whole-word-masking-tf_model.h5",
"bert-base-japanese-char": "https://s3.amazonaws.com/models.huggingface.co/bert/cl-tohoku/bert-base-japanese-char-tf_model.h5",
"bert-base-japanese-char-whole-word-masking": "https://s3.amazonaws.com/models.huggingface.co/bert/cl-tohoku/bert-base-japanese-char-whole-word-masking-tf_model.h5",
"bert-base-finnish-cased-v1": "https://s3.amazonaws.com/models.huggingface.co/bert/TurkuNLP/bert-base-finnish-cased-v1/tf_model.h5",
"bert-base-finnish-uncased-v1": "https://s3.amazonaws.com/models.huggingface.co/bert/TurkuNLP/bert-base-finnish-uncased-v1/tf_model.h5",
"bert-base-dutch-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/wietsedv/bert-base-dutch-cased/tf_model.h5",
}
def gelu(x):
""" Gaussian Error Linear Unit.
Original Implementation of the gelu activation function in Google Bert repo when initially created.
For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
Also see https://arxiv.org/abs/1606.08415
"""
cdf = 0.5 * (1.0 + tf.math.erf(x / tf.math.sqrt(2.0)))
return x * cdf
def gelu_new(x):
"""Gaussian Error Linear Unit.
This is a smoother version of the RELU.
Original paper: https://arxiv.org/abs/1606.08415
Args:
x: float Tensor to perform activation.
Returns:
`x` with the GELU activation applied.
"""
cdf = 0.5 * (1.0 + tf.tanh((np.sqrt(2 / np.pi) * (x + 0.044715 * tf.pow(x, 3)))))
return x * cdf
def swish(x):
return x * tf.sigmoid(x)
ACT2FN = {
"gelu": tf.keras.layers.Activation(gelu),
"relu": tf.keras.activations.relu,
"swish": tf.keras.layers.Activation(swish),
"gelu_new": tf.keras.layers.Activation(gelu_new),
}
class TFBertEmbeddings(tf.keras.layers.Layer):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.vocab_size = config.vocab_size
self.hidden_size = config.hidden_size
self.initializer_range = config.initializer_range
self.position_embeddings = tf.keras.layers.Embedding(
config.max_position_embeddings,
config.hidden_size,
embeddings_initializer=get_initializer(self.initializer_range),
name="position_embeddings",
)
self.token_type_embeddings = tf.keras.layers.Embedding(
config.type_vocab_size,
config.hidden_size,
embeddings_initializer=get_initializer(self.initializer_range),
name="token_type_embeddings",
)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob)
def build(self, input_shape):
"""Build shared word embedding layer """
with tf.name_scope("word_embeddings"):
# Create and initialize weights. The random normal initializer was chosen
# arbitrarily, and works well.
self.word_embeddings = self.add_weight(
"weight",
shape=[self.vocab_size, self.hidden_size],
initializer=get_initializer(self.initializer_range),
)
super().build(input_shape)
def call(self, inputs, mode="embedding", training=False):
"""Get token embeddings of inputs.
Args:
inputs: list of three int64 tensors with shape [batch_size, length]: (input_ids, position_ids, token_type_ids)
mode: string, a valid value is one of "embedding" and "linear".
Returns:
outputs: (1) If mode == "embedding", output embedding tensor, float32 with
shape [batch_size, length, embedding_size]; (2) mode == "linear", output
linear tensor, float32 with shape [batch_size, length, vocab_size].
Raises:
ValueError: if mode is not valid.
Shared weights logic adapted from
https://github.com/tensorflow/models/blob/a009f4fb9d2fc4949e32192a944688925ef78659/official/transformer/v2/embedding_layer.py#L24
"""
if mode == "embedding":
return self._embedding(inputs, training=training)
elif mode == "linear":
return self._linear(inputs)
else:
raise ValueError("mode {} is not valid.".format(mode))
def _embedding(self, inputs, training=False):
"""Applies embedding based on inputs tensor."""
input_ids, position_ids, token_type_ids, inputs_embeds = inputs
if input_ids is not None:
input_shape = shape_list(input_ids)
else:
input_shape = shape_list(inputs_embeds)[:-1]
seq_length = input_shape[1]
if position_ids is None:
position_ids = tf.range(seq_length, dtype=tf.int32)[tf.newaxis, :]
if token_type_ids is None:
token_type_ids = tf.fill(input_shape, 0)
if inputs_embeds is None:
inputs_embeds = tf.gather(self.word_embeddings, input_ids)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + position_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings, training=training)
return embeddings
def _linear(self, inputs):
"""Computes logits by running inputs through a linear layer.
Args:
inputs: A float32 tensor with shape [batch_size, length, hidden_size]
Returns:
float32 tensor with shape [batch_size, length, vocab_size].
"""
batch_size = shape_list(inputs)[0]
length = shape_list(inputs)[1]
x = tf.reshape(inputs, [-1, self.hidden_size])
logits = tf.matmul(x, self.word_embeddings, transpose_b=True)
return tf.reshape(logits, [batch_size, length, self.vocab_size])
class TFBertSelfAttention(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads)
)
self.output_attentions = config.output_attentions
self.num_attention_heads = config.num_attention_heads
assert config.hidden_size % config.num_attention_heads == 0
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = tf.keras.layers.Dense(
self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="query"
)
self.key = tf.keras.layers.Dense(
self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="key"
)
self.value = tf.keras.layers.Dense(
self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="value"
)
self.dropout = tf.keras.layers.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x, batch_size):
x = tf.reshape(x, (batch_size, -1, self.num_attention_heads, self.attention_head_size))
return tf.transpose(x, perm=[0, 2, 1, 3])
def call(self, inputs, training=False):
hidden_states, attention_mask, head_mask = inputs
batch_size = shape_list(hidden_states)[0]
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer, batch_size)
key_layer = self.transpose_for_scores(mixed_key_layer, batch_size)
value_layer = self.transpose_for_scores(mixed_value_layer, batch_size)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = tf.matmul(
query_layer, key_layer, transpose_b=True
) # (batch size, num_heads, seq_len_q, seq_len_k)
dk = tf.cast(shape_list(key_layer)[-1], tf.float32) # scale attention_scores
attention_scores = attention_scores / tf.math.sqrt(dk)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in TFBertModel call() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = tf.nn.softmax(attention_scores, axis=-1)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs, training=training)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = tf.matmul(attention_probs, value_layer)
context_layer = tf.transpose(context_layer, perm=[0, 2, 1, 3])
context_layer = tf.reshape(
context_layer, (batch_size, -1, self.all_head_size)
) # (batch_size, seq_len_q, all_head_size)
outputs = (context_layer, attention_probs) if self.output_attentions else (context_layer,)
return outputs
class TFBertSelfOutput(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.dense = tf.keras.layers.Dense(
config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
)
self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob)
def call(self, inputs, training=False):
hidden_states, input_tensor = inputs
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states, training=training)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class TFBertAttention(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.self_attention = TFBertSelfAttention(config, name="self")
self.dense_output = TFBertSelfOutput(config, name="output")
def prune_heads(self, heads):
raise NotImplementedError
def call(self, inputs, training=False):
input_tensor, attention_mask, head_mask = inputs
self_outputs = self.self_attention([input_tensor, attention_mask, head_mask], training=training)
attention_output = self.dense_output([self_outputs[0], input_tensor], training=training)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
class TFBertIntermediate(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.dense = tf.keras.layers.Dense(
config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def call(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class TFBertOutput(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.dense = tf.keras.layers.Dense(
config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
)
self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob)
def call(self, inputs, training=False):
hidden_states, input_tensor = inputs
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states, training=training)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class TFBertLayer(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.attention = TFBertAttention(config, name="attention")
self.intermediate = TFBertIntermediate(config, name="intermediate")
self.bert_output = TFBertOutput(config, name="output")
def call(self, inputs, training=False):
hidden_states, attention_mask, head_mask = inputs
attention_outputs = self.attention([hidden_states, attention_mask, head_mask], training=training)
attention_output = attention_outputs[0]
intermediate_output = self.intermediate(attention_output)
layer_output = self.bert_output([intermediate_output, attention_output], training=training)
outputs = (layer_output,) + attention_outputs[1:] # add attentions if we output them
return outputs
class TFBertEncoder(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.output_attentions = config.output_attentions
self.output_hidden_states = config.output_hidden_states
self.layer = [TFBertLayer(config, name="layer_._{}".format(i)) for i in range(config.num_hidden_layers)]
def call(self, inputs, training=False):
hidden_states, attention_mask, head_mask = inputs
all_hidden_states = ()
all_attentions = ()
for i, layer_module in enumerate(self.layer):
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_outputs = layer_module([hidden_states, attention_mask, head_mask[i]], training=training)
hidden_states = layer_outputs[0]
if self.output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
# Add last layer
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
outputs = (hidden_states,)
if self.output_hidden_states:
outputs = outputs + (all_hidden_states,)
if self.output_attentions:
outputs = outputs + (all_attentions,)
return outputs # outputs, (hidden states), (attentions)
class TFBertPooler(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.dense = tf.keras.layers.Dense(
config.hidden_size,
kernel_initializer=get_initializer(config.initializer_range),
activation="tanh",
name="dense",
)
def call(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
return pooled_output
class TFBertPredictionHeadTransform(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.dense = tf.keras.layers.Dense(
config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
)
if isinstance(config.hidden_act, str):
self.transform_act_fn = ACT2FN[config.hidden_act]
else:
self.transform_act_fn = config.hidden_act
self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
def call(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class TFBertLMPredictionHead(tf.keras.layers.Layer):
def __init__(self, config, input_embeddings, **kwargs):
super().__init__(**kwargs)
self.vocab_size = config.vocab_size
self.transform = TFBertPredictionHeadTransform(config, name="transform")
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.input_embeddings = input_embeddings
def build(self, input_shape):
self.bias = self.add_weight(shape=(self.vocab_size,), initializer="zeros", trainable=True, name="bias")
super().build(input_shape)
def call(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.input_embeddings(hidden_states, mode="linear")
hidden_states = hidden_states + self.bias
return hidden_states
class TFBertMLMHead(tf.keras.layers.Layer):
def __init__(self, config, input_embeddings, **kwargs):
super().__init__(**kwargs)
self.predictions = TFBertLMPredictionHead(config, input_embeddings, name="predictions")
def call(self, sequence_output):
prediction_scores = self.predictions(sequence_output)
return prediction_scores
class TFBertNSPHead(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.seq_relationship = tf.keras.layers.Dense(
2, kernel_initializer=get_initializer(config.initializer_range), name="seq_relationship"
)
def call(self, pooled_output):
seq_relationship_score = self.seq_relationship(pooled_output)
return seq_relationship_score
@keras_serializable
class TFBertMainLayer(tf.keras.layers.Layer):
config_class = BertConfig
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.num_hidden_layers = config.num_hidden_layers
self.embeddings = TFBertEmbeddings(config, name="embeddings")
self.encoder = TFBertEncoder(config, name="encoder")
self.pooler = TFBertPooler(config, name="pooler")
def get_input_embeddings(self):
return self.embeddings
def _resize_token_embeddings(self, new_num_tokens):
raise NotImplementedError
def _prune_heads(self, heads_to_prune):
""" Prunes heads of the model.
heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
See base class PreTrainedModel
"""
raise NotImplementedError
def call(
self,
inputs,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
training=False,
):
if isinstance(inputs, (tuple, list)):
input_ids = inputs[0]
attention_mask = inputs[1] if len(inputs) > 1 else attention_mask
token_type_ids = inputs[2] if len(inputs) > 2 else token_type_ids
position_ids = inputs[3] if len(inputs) > 3 else position_ids
head_mask = inputs[4] if len(inputs) > 4 else head_mask
inputs_embeds = inputs[5] if len(inputs) > 5 else inputs_embeds
assert len(inputs) <= 6, "Too many inputs."
elif isinstance(inputs, dict):
input_ids = inputs.get("input_ids")
attention_mask = inputs.get("attention_mask", attention_mask)
token_type_ids = inputs.get("token_type_ids", token_type_ids)
position_ids = inputs.get("position_ids", position_ids)
head_mask = inputs.get("head_mask", head_mask)
inputs_embeds = inputs.get("inputs_embeds", inputs_embeds)
assert len(inputs) <= 6, "Too many inputs."
else:
input_ids = inputs
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = shape_list(input_ids)
elif inputs_embeds is not None:
input_shape = shape_list(inputs_embeds)[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if attention_mask is None:
attention_mask = tf.fill(input_shape, 1)
if token_type_ids is None:
token_type_ids = tf.fill(input_shape, 0)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
extended_attention_mask = attention_mask[:, tf.newaxis, tf.newaxis, :]
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = tf.cast(extended_attention_mask, tf.float32)
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
if head_mask is not None:
raise NotImplementedError
else:
head_mask = [None] * self.num_hidden_layers
# head_mask = tf.constant([0] * self.num_hidden_layers)
embedding_output = self.embeddings([input_ids, position_ids, token_type_ids, inputs_embeds], training=training)
encoder_outputs = self.encoder([embedding_output, extended_attention_mask, head_mask], training=training)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output)
outputs = (sequence_output, pooled_output,) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions)
class TFBertPreTrainedModel(TFPreTrainedModel):
""" An abstract class to handle weights initialization and
a simple interface for downloading and loading pretrained models.
"""
config_class = BertConfig
pretrained_model_archive_map = TF_BERT_PRETRAINED_MODEL_ARCHIVE_MAP
base_model_prefix = "bert"
BERT_START_DOCSTRING = r"""
This model is a `tf.keras.Model <https://www.tensorflow.org/api_docs/python/tf/keras/Model>`__ sub-class.
Use it as a regular TF 2.0 Keras Model and
refer to the TF 2.0 documentation for all matter related to general usage and behavior.
.. note::
TF 2.0 models accepts two formats as inputs:
- having all inputs as keyword arguments (like PyTorch models), or
- having all inputs as a list, tuple or dict in the first positional arguments.
This second option is useful when using :obj:`tf.keras.Model.fit()` method which currently requires having
all the tensors in the first argument of the model call function: :obj:`model(inputs)`.
If you choose this second option, there are three possibilities you can use to gather all the input Tensors
in the first positional argument :
- a single Tensor with input_ids only and nothing else: :obj:`model(inputs_ids)`
- a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
:obj:`model([input_ids, attention_mask])` or :obj:`model([input_ids, attention_mask, token_type_ids])`
- a dictionary with one or several input Tensors associated to the input names given in the docstring:
:obj:`model({'input_ids': input_ids, 'token_type_ids': token_type_ids})`
Parameters:
config (:class:`~transformers.BertConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the configuration.
Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.
"""
BERT_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`transformers.BertTokenizer`.
See :func:`transformers.PreTrainedTokenizer.encode` and
:func:`transformers.PreTrainedTokenizer.encode_plus` for details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Mask to avoid performing attention on padding token indices.
Mask values selected in ``[0, 1]``:
``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
`What are attention masks? <../glossary.html#attention-mask>`__
token_type_ids (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Segment token indices to indicate first and second portions of the inputs.
Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1``
corresponds to a `sentence B` token
`What are token type IDs? <../glossary.html#token-type-ids>`__
position_ids (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Indices of positions of each input sequence tokens in the position embeddings.
Selected in the range ``[0, config.max_position_embeddings - 1]``.
`What are position IDs? <../glossary.html#position-ids>`__
head_mask (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`, defaults to :obj:`None`):
Mask to nullify selected heads of the self-attention modules.
Mask values selected in ``[0, 1]``:
:obj:`1` indicates the head is **not masked**, :obj:`0` indicates the head is **masked**.
inputs_embeds (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, embedding_dim)`, `optional`, defaults to :obj:`None`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
training (:obj:`boolean`, `optional`, defaults to :obj:`False`):
Whether to activate dropout modules (if set to :obj:`True`) during training or to de-activate them
(if set to :obj:`False`) for evaluation.
"""
@add_start_docstrings(
"The bare Bert Model transformer outputing raw hidden-states without any specific head on top.",
BERT_START_DOCSTRING,
)
class TFBertModel(TFBertPreTrainedModel):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.bert = TFBertMainLayer(config, name="bert")
@add_start_docstrings_to_callable(BERT_INPUTS_DOCSTRING)
def call(self, inputs, **kwargs):
r"""
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.BertConfig`) and inputs:
last_hidden_state (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
pooler_output (:obj:`tf.Tensor` of shape :obj:`(batch_size, hidden_size)`):
Last layer hidden-state of the first token of the sequence (classification token)
further processed by a Linear layer and a Tanh activation function. The Linear
layer weights are trained from the next sentence prediction (classification)
objective during Bert pretraining. This output is usually *not* a good summary
of the semantic content of the input, you're often better with averaging or pooling
the sequence of hidden-states for the whole input sequence.
hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when :obj:`config.output_hidden_states=True`):
tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``config.output_attentions=True``):
tuple of :obj:`tf.Tensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
import tensorflow as tf
from transformers import BertTokenizer, TFBertModel
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = TFBertModel.from_pretrained('bert-base-uncased')
input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True))[None, :] # Batch size 1
outputs = model(input_ids)
last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple
"""
outputs = self.bert(inputs, **kwargs)
return outputs
@add_start_docstrings(
"""Bert Model with two heads on top as done during the pre-training:
a `masked language modeling` head and a `next sentence prediction (classification)` head. """,
BERT_START_DOCSTRING,
)
class TFBertForPreTraining(TFBertPreTrainedModel):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.bert = TFBertMainLayer(config, name="bert")
self.nsp = TFBertNSPHead(config, name="nsp___cls")
self.mlm = TFBertMLMHead(config, self.bert.embeddings, name="mlm___cls")
def get_output_embeddings(self):
return self.bert.embeddings
@add_start_docstrings_to_callable(BERT_INPUTS_DOCSTRING)
def call(self, inputs, **kwargs):
r"""
Return:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.BertConfig`) and inputs:
prediction_scores (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
seq_relationship_scores (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, 2)`):
Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation before SoftMax).
hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when :obj:`config.output_hidden_states=True`):
tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``config.output_attentions=True``):
tuple of :obj:`tf.Tensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
import tensorflow as tf
from transformers import BertTokenizer, TFBertForPreTraining
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = TFBertForPreTraining.from_pretrained('bert-base-uncased')
input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True))[None, :] # Batch size 1
outputs = model(input_ids)
prediction_scores, seq_relationship_scores = outputs[:2]
"""
outputs = self.bert(inputs, **kwargs)
sequence_output, pooled_output = outputs[:2]
prediction_scores = self.mlm(sequence_output, training=kwargs.get("training", False))
seq_relationship_score = self.nsp(pooled_output)
outputs = (prediction_scores, seq_relationship_score,) + outputs[
2:
] # add hidden states and attention if they are here
return outputs # prediction_scores, seq_relationship_score, (hidden_states), (attentions)
@add_start_docstrings("""Bert Model with a `language modeling` head on top. """, BERT_START_DOCSTRING)
class TFBertForMaskedLM(TFBertPreTrainedModel):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.bert = TFBertMainLayer(config, name="bert")
self.mlm = TFBertMLMHead(config, self.bert.embeddings, name="mlm___cls")
def get_output_embeddings(self):
return self.bert.embeddings
@add_start_docstrings_to_callable(BERT_INPUTS_DOCSTRING)
def call(self, inputs, **kwargs):
r"""
Return:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.BertConfig`) and inputs:
prediction_scores (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when :obj:`config.output_hidden_states=True`):
tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``config.output_attentions=True``):
tuple of :obj:`tf.Tensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
import tensorflow as tf
from transformers import BertTokenizer, TFBertForMaskedLM
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = TFBertForMaskedLM.from_pretrained('bert-base-uncased')
input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True))[None, :] # Batch size 1
outputs = model(input_ids)
prediction_scores = outputs[0]
"""
outputs = self.bert(inputs, **kwargs)
sequence_output = outputs[0]
prediction_scores = self.mlm(sequence_output, training=kwargs.get("training", False))
outputs = (prediction_scores,) + outputs[2:] # Add hidden states and attention if they are here
return outputs # prediction_scores, (hidden_states), (attentions)
@add_start_docstrings(
"""Bert Model with a `next sentence prediction (classification)` head on top. """, BERT_START_DOCSTRING,
)
class TFBertForNextSentencePrediction(TFBertPreTrainedModel):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.bert = TFBertMainLayer(config, name="bert")
self.nsp = TFBertNSPHead(config, name="nsp___cls")
@add_start_docstrings_to_callable(BERT_INPUTS_DOCSTRING)
def call(self, inputs, **kwargs):
r"""
Return:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.BertConfig`) and inputs:
seq_relationship_scores (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, 2)`)
Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation before SoftMax).
hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when :obj:`config.output_hidden_states=True`):
tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``config.output_attentions=True``):
tuple of :obj:`tf.Tensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
import tensorflow as tf
from transformers import BertTokenizer, TFBertForNextSentencePrediction
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = TFBertForNextSentencePrediction.from_pretrained('bert-base-uncased')
input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True))[None, :] # Batch size 1
outputs = model(input_ids)
seq_relationship_scores = outputs[0]
"""
outputs = self.bert(inputs, **kwargs)
pooled_output = outputs[1]
seq_relationship_score = self.nsp(pooled_output)
outputs = (seq_relationship_score,) + outputs[2:] # add hidden states and attention if they are here
return outputs # seq_relationship_score, (hidden_states), (attentions)
@add_start_docstrings(
"""Bert Model transformer with a sequence classification/regression head on top (a linear layer on top of
the pooled output) e.g. for GLUE tasks. """,
BERT_START_DOCSTRING,
)
class TFBertForSequenceClassification(TFBertPreTrainedModel):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.num_labels = config.num_labels
self.bert = TFBertMainLayer(config, name="bert")
self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob)
self.classifier = tf.keras.layers.Dense(
config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
)
@add_start_docstrings_to_callable(BERT_INPUTS_DOCSTRING)
def call(self, inputs, **kwargs):
r"""
Return:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.BertConfig`) and inputs:
logits (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`(batch_size, config.num_labels)`):
Classification (or regression if config.num_labels==1) scores (before SoftMax).
hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when :obj:`config.output_hidden_states=True`):
tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``config.output_attentions=True``):
tuple of :obj:`tf.Tensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
import tensorflow as tf
from transformers import BertTokenizer, TFBertForSequenceClassification
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = TFBertForSequenceClassification.from_pretrained('bert-base-uncased')
input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True))[None, :] # Batch size 1
outputs = model(input_ids)
logits = outputs[0]
"""
outputs = self.bert(inputs, **kwargs)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output, training=kwargs.get("training", False))
logits = self.classifier(pooled_output)
outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here
return outputs # logits, (hidden_states), (attentions)
@add_start_docstrings(
"""Bert Model with a multiple choice classification head on top (a linear layer on top of
the pooled output and a softmax) e.g. for RocStories/SWAG tasks. """,
BERT_START_DOCSTRING,
)
class TFBertForMultipleChoice(TFBertPreTrainedModel):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.bert = TFBertMainLayer(config, name="bert")
self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob)
self.classifier = tf.keras.layers.Dense(
1, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
)
@property
def dummy_inputs(self):
""" Dummy inputs to build the network.
Returns:
tf.Tensor with dummy inputs
"""
return {"input_ids": tf.constant(MULTIPLE_CHOICE_DUMMY_INPUTS)}
@add_start_docstrings_to_callable(BERT_INPUTS_DOCSTRING)
def call(
self,
inputs,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
training=False,
):
r"""
Return:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.BertConfig`) and inputs:
classification_scores (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`(batch_size, num_choices)`:
`num_choices` is the size of the second dimension of the input tensors. (see `input_ids` above).
Classification scores (before SoftMax).
hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when :obj:`config.output_hidden_states=True`):
tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``config.output_attentions=True``):
tuple of :obj:`tf.Tensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
import tensorflow as tf
from transformers import BertTokenizer, TFBertForMultipleChoice
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = TFBertForMultipleChoice.from_pretrained('bert-base-uncased')
choices = ["Hello, my dog is cute", "Hello, my cat is amazing"]
input_ids = tf.constant([tokenizer.encode(s) for s in choices])[None, :] # Batch size 1, 2 choices
outputs = model(input_ids)
classification_scores = outputs[0]
"""
if isinstance(inputs, (tuple, list)):
input_ids = inputs[0]
attention_mask = inputs[1] if len(inputs) > 1 else attention_mask
token_type_ids = inputs[2] if len(inputs) > 2 else token_type_ids
position_ids = inputs[3] if len(inputs) > 3 else position_ids
head_mask = inputs[4] if len(inputs) > 4 else head_mask
inputs_embeds = inputs[5] if len(inputs) > 5 else inputs_embeds
assert len(inputs) <= 6, "Too many inputs."
elif isinstance(inputs, dict):
input_ids = inputs.get("input_ids")
attention_mask = inputs.get("attention_mask", attention_mask)
token_type_ids = inputs.get("token_type_ids", token_type_ids)
position_ids = inputs.get("position_ids", position_ids)
head_mask = inputs.get("head_mask", head_mask)
inputs_embeds = inputs.get("inputs_embeds", inputs_embeds)
assert len(inputs) <= 6, "Too many inputs."
else:
input_ids = inputs
if input_ids is not None:
num_choices = shape_list(input_ids)[1]
seq_length = shape_list(input_ids)[2]
else:
num_choices = shape_list(inputs_embeds)[1]
seq_length = shape_list(inputs_embeds)[2]
flat_input_ids = tf.reshape(input_ids, (-1, seq_length)) if input_ids is not None else None
flat_attention_mask = tf.reshape(attention_mask, (-1, seq_length)) if attention_mask is not None else None
flat_token_type_ids = tf.reshape(token_type_ids, (-1, seq_length)) if token_type_ids is not None else None
flat_position_ids = tf.reshape(position_ids, (-1, seq_length)) if position_ids is not None else None
flat_inputs = [
flat_input_ids,
flat_attention_mask,
flat_token_type_ids,
flat_position_ids,
head_mask,
inputs_embeds,
]
outputs = self.bert(flat_inputs, training=training)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output, training=training)
logits = self.classifier(pooled_output)
reshaped_logits = tf.reshape(logits, (-1, num_choices))
outputs = (reshaped_logits,) + outputs[2:] # add hidden states and attention if they are here
return outputs # reshaped_logits, (hidden_states), (attentions)
@add_start_docstrings(
"""Bert Model with a token classification head on top (a linear layer on top of
the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """,
BERT_START_DOCSTRING,
)
class TFBertForTokenClassification(TFBertPreTrainedModel):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.num_labels = config.num_labels
self.bert = TFBertMainLayer(config, name="bert")
self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob)
self.classifier = tf.keras.layers.Dense(
config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
)
@add_start_docstrings_to_callable(BERT_INPUTS_DOCSTRING)
def call(self, inputs, **kwargs):
r"""
Return:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.BertConfig`) and inputs:
scores (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, config.num_labels)`):
Classification scores (before SoftMax).
hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when :obj:`config.output_hidden_states=True`):
tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``config.output_attentions=True``):
tuple of :obj:`tf.Tensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
import tensorflow as tf
from transformers import BertTokenizer, TFBertForTokenClassification
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = TFBertForTokenClassification.from_pretrained('bert-base-uncased')
input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True))[None, :] # Batch size 1
outputs = model(input_ids)
scores = outputs[0]
"""
outputs = self.bert(inputs, **kwargs)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output, training=kwargs.get("training", False))
logits = self.classifier(sequence_output)
outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here
return outputs # scores, (hidden_states), (attentions)
@add_start_docstrings(
"""Bert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of
the hidden-states output to compute `span start logits` and `span end logits`). """,
BERT_START_DOCSTRING,
)
class TFBertForQuestionAnswering(TFBertPreTrainedModel):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.num_labels = config.num_labels
self.bert = TFBertMainLayer(config, name="bert")
self.qa_outputs = tf.keras.layers.Dense(
config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="qa_outputs"
)
@add_start_docstrings_to_callable(BERT_INPUTS_DOCSTRING)
def call(self, inputs, **kwargs):
r"""
Return:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.BertConfig`) and inputs:
start_scores (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length,)`):
Span-start scores (before SoftMax).
end_scores (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length,)`):
Span-end scores (before SoftMax).
hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when :obj:`config.output_hidden_states=True`):
tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``config.output_attentions=True``):
tuple of :obj:`tf.Tensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
import tensorflow as tf
from transformers import BertTokenizer, TFBertForQuestionAnswering
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = TFBertForQuestionAnswering.from_pretrained('bert-base-uncased')
input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True))[None, :] # Batch size 1
outputs = model(input_ids)
start_scores, end_scores = outputs[:2]
"""
outputs = self.bert(inputs, **kwargs)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = tf.split(logits, 2, axis=-1)
start_logits = tf.squeeze(start_logits, axis=-1)
end_logits = tf.squeeze(end_logits, axis=-1)
outputs = (start_logits, end_logits,) + outputs[2:]
return outputs # start_logits, end_logits, (hidden_states), (attentions)
| 56,324 | 47.223459 | 181 | py |
KoSentenceBERT-SKT | KoSentenceBERT-SKT-main/transformers/convert_bert_original_tf_checkpoint_to_pytorch.py | # coding=utf-8
# Copyright 2018 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert BERT checkpoint."""
import argparse
import logging
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
logging.basicConfig(level=logging.INFO)
def convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, bert_config_file, pytorch_dump_path):
# Initialise PyTorch model
config = BertConfig.from_json_file(bert_config_file)
print("Building PyTorch model from configuration: {}".format(str(config)))
model = BertForPreTraining(config)
# Load weights from tf checkpoint
load_tf_weights_in_bert(model, config, tf_checkpoint_path)
# Save pytorch-model
print("Save PyTorch model to {}".format(pytorch_dump_path))
torch.save(model.state_dict(), pytorch_dump_path)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--bert_config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
args = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 2,139 | 33.516129 | 117 | py |
KoSentenceBERT-SKT | KoSentenceBERT-SKT-main/transformers/modeling_tf_distilbert.py | # coding=utf-8
# Copyright 2019-present, the HuggingFace Inc. team, The Google AI Language Team and Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" TF 2.0 DistilBERT model
"""
import logging
import math
import numpy as np
import tensorflow as tf
from .configuration_distilbert import DistilBertConfig
from .file_utils import add_start_docstrings, add_start_docstrings_to_callable
from .modeling_tf_utils import TFPreTrainedModel, TFSharedEmbeddings, get_initializer, shape_list
logger = logging.getLogger(__name__)
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_MAP = {
"distilbert-base-uncased": "https://s3.amazonaws.com/models.huggingface.co/bert/distilbert-base-uncased-tf_model.h5",
"distilbert-base-uncased-distilled-squad": "https://s3.amazonaws.com/models.huggingface.co/bert/distilbert-base-uncased-distilled-squad-tf_model.h5",
"distilbert-base-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/distilbert-base-cased-tf_model.h5",
"distilbert-base-cased-distilled-squad": "https://s3.amazonaws.com/models.huggingface.co/bert/distilbert-base-cased-distilled-squad-tf_model.h5",
"distilbert-base-multilingual-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/distilbert-base-multilingual-cased-tf_model.h5",
"distilbert-base-uncased-finetuned-sst-2-english": "https://s3.amazonaws.com/models.huggingface.co/bert/distilbert-base-uncased-finetuned-sst-2-english-tf_model.h5",
}
# UTILS AND BUILDING BLOCKS OF THE ARCHITECTURE #
def gelu(x):
""" Gaussian Error Linear Unit.
Original Implementation of the gelu activation function in Google Bert repo when initially created.
For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
Also see https://arxiv.org/abs/1606.08415
"""
cdf = 0.5 * (1.0 + tf.math.erf(x / tf.math.sqrt(2.0)))
return x * cdf
def gelu_new(x):
"""Gaussian Error Linear Unit.
This is a smoother version of the RELU.
Original paper: https://arxiv.org/abs/1606.08415
Args:
x: float Tensor to perform activation.
Returns:
`x` with the GELU activation applied.
"""
cdf = 0.5 * (1.0 + tf.tanh((np.sqrt(2 / np.pi) * (x + 0.044715 * tf.pow(x, 3)))))
return x * cdf
class TFEmbeddings(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.vocab_size = config.vocab_size
self.dim = config.dim
self.initializer_range = config.initializer_range
self.word_embeddings = TFSharedEmbeddings(
config.vocab_size, config.dim, initializer_range=config.initializer_range, name="word_embeddings"
) # padding_idx=0)
self.position_embeddings = tf.keras.layers.Embedding(
config.max_position_embeddings,
config.dim,
embeddings_initializer=get_initializer(config.initializer_range),
name="position_embeddings",
)
self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=1e-12, name="LayerNorm")
self.dropout = tf.keras.layers.Dropout(config.dropout)
def build(self, input_shape):
"""Build shared word embedding layer """
with tf.name_scope("word_embeddings"):
# Create and initialize weights. The random normal initializer was chosen
# arbitrarily, and works well.
self.word_embeddings = self.add_weight(
"weight", shape=[self.vocab_size, self.dim], initializer=get_initializer(self.initializer_range)
)
super().build(input_shape)
def call(self, inputs, inputs_embeds=None, mode="embedding", training=False):
"""Get token embeddings of inputs.
Args:
inputs: list of three int64 tensors with shape [batch_size, length]: (input_ids, position_ids, token_type_ids)
mode: string, a valid value is one of "embedding" and "linear".
Returns:
outputs: (1) If mode == "embedding", output embedding tensor, float32 with
shape [batch_size, length, embedding_size]; (2) mode == "linear", output
linear tensor, float32 with shape [batch_size, length, vocab_size].
Raises:
ValueError: if mode is not valid.
Shared weights logic adapted from
https://github.com/tensorflow/models/blob/a009f4fb9d2fc4949e32192a944688925ef78659/official/transformer/v2/embedding_layer.py#L24
"""
if mode == "embedding":
return self._embedding(inputs, inputs_embeds=inputs_embeds, training=training)
elif mode == "linear":
return self._linear(inputs)
else:
raise ValueError("mode {} is not valid.".format(mode))
def _embedding(self, inputs, inputs_embeds=None, training=False):
"""
Parameters
----------
input_ids: tf.Tensor(bs, max_seq_length)
The token ids to embed.
Outputs
-------
embeddings: tf.Tensor(bs, max_seq_length, dim)
The embedded tokens (plus position embeddings, no token_type embeddings)
"""
if not isinstance(inputs, (tuple, list)):
input_ids = inputs
position_ids = None
else:
input_ids, position_ids = inputs
if input_ids is not None:
seq_length = shape_list(input_ids)[1]
else:
seq_length = shape_list(inputs_embeds)[1]
if position_ids is None:
position_ids = tf.range(seq_length, dtype=tf.int32)[tf.newaxis, :]
if inputs_embeds is None:
inputs_embeds = tf.gather(self.word_embeddings, input_ids)
position_embeddings = self.position_embeddings(position_ids) # (bs, max_seq_length, dim)
embeddings = inputs_embeds + position_embeddings # (bs, max_seq_length, dim)
embeddings = self.LayerNorm(embeddings) # (bs, max_seq_length, dim)
embeddings = self.dropout(embeddings, training=training) # (bs, max_seq_length, dim)
return embeddings
def _linear(self, inputs):
"""Computes logits by running inputs through a linear layer.
Args:
inputs: A float32 tensor with shape [batch_size, length, hidden_size]
Returns:
float32 tensor with shape [batch_size, length, vocab_size].
"""
batch_size = shape_list(inputs)[0]
length = shape_list(inputs)[1]
x = tf.reshape(inputs, [-1, self.dim])
logits = tf.matmul(x, self.word_embeddings, transpose_b=True)
return tf.reshape(logits, [batch_size, length, self.vocab_size])
class TFMultiHeadSelfAttention(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.n_heads = config.n_heads
self.dim = config.dim
self.dropout = tf.keras.layers.Dropout(config.attention_dropout)
self.output_attentions = config.output_attentions
assert self.dim % self.n_heads == 0
self.q_lin = tf.keras.layers.Dense(
config.dim, kernel_initializer=get_initializer(config.initializer_range), name="q_lin"
)
self.k_lin = tf.keras.layers.Dense(
config.dim, kernel_initializer=get_initializer(config.initializer_range), name="k_lin"
)
self.v_lin = tf.keras.layers.Dense(
config.dim, kernel_initializer=get_initializer(config.initializer_range), name="v_lin"
)
self.out_lin = tf.keras.layers.Dense(
config.dim, kernel_initializer=get_initializer(config.initializer_range), name="out_lin"
)
self.pruned_heads = set()
def prune_heads(self, heads):
raise NotImplementedError
def call(self, inputs, training=False):
"""
Parameters
----------
query: tf.Tensor(bs, seq_length, dim)
key: tf.Tensor(bs, seq_length, dim)
value: tf.Tensor(bs, seq_length, dim)
mask: tf.Tensor(bs, seq_length)
Outputs
-------
weights: tf.Tensor(bs, n_heads, seq_length, seq_length)
Attention weights
context: tf.Tensor(bs, seq_length, dim)
Contextualized layer. Optional: only if `output_attentions=True`
"""
query, key, value, mask, head_mask = inputs
bs, q_length, dim = shape_list(query)
k_length = shape_list(key)[1]
# assert dim == self.dim, 'Dimensions do not match: %s input vs %s configured' % (dim, self.dim)
# assert key.size() == value.size()
dim_per_head = self.dim // self.n_heads
mask_reshape = [bs, 1, 1, k_length]
def shape(x):
""" separate heads """
return tf.transpose(tf.reshape(x, (bs, -1, self.n_heads, dim_per_head)), perm=(0, 2, 1, 3))
def unshape(x):
""" group heads """
return tf.reshape(tf.transpose(x, perm=(0, 2, 1, 3)), (bs, -1, self.n_heads * dim_per_head))
q = shape(self.q_lin(query)) # (bs, n_heads, q_length, dim_per_head)
k = shape(self.k_lin(key)) # (bs, n_heads, k_length, dim_per_head)
v = shape(self.v_lin(value)) # (bs, n_heads, k_length, dim_per_head)
q = q / math.sqrt(dim_per_head) # (bs, n_heads, q_length, dim_per_head)
scores = tf.matmul(q, k, transpose_b=True) # (bs, n_heads, q_length, k_length)
mask = tf.reshape(mask, mask_reshape) # (bs, n_heads, qlen, klen)
# scores.masked_fill_(mask, -float('inf')) # (bs, n_heads, q_length, k_length)
scores = scores - 1e30 * (1.0 - mask)
weights = tf.nn.softmax(scores, axis=-1) # (bs, n_heads, qlen, klen)
weights = self.dropout(weights, training=training) # (bs, n_heads, qlen, klen)
# Mask heads if we want to
if head_mask is not None:
weights = weights * head_mask
context = tf.matmul(weights, v) # (bs, n_heads, qlen, dim_per_head)
context = unshape(context) # (bs, q_length, dim)
context = self.out_lin(context) # (bs, q_length, dim)
if self.output_attentions:
return (context, weights)
else:
return (context,)
class TFFFN(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.dropout = tf.keras.layers.Dropout(config.dropout)
self.lin1 = tf.keras.layers.Dense(
config.hidden_dim, kernel_initializer=get_initializer(config.initializer_range), name="lin1"
)
self.lin2 = tf.keras.layers.Dense(
config.dim, kernel_initializer=get_initializer(config.initializer_range), name="lin2"
)
assert config.activation in ["relu", "gelu"], "activation ({}) must be in ['relu', 'gelu']".format(
config.activation
)
self.activation = (
tf.keras.layers.Activation(gelu) if config.activation == "gelu" else tf.keras.activations.relu
)
def call(self, input, training=False):
x = self.lin1(input)
x = self.activation(x)
x = self.lin2(x)
x = self.dropout(x, training=training)
return x
class TFTransformerBlock(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.n_heads = config.n_heads
self.dim = config.dim
self.hidden_dim = config.hidden_dim
self.dropout = tf.keras.layers.Dropout(config.dropout)
self.activation = config.activation
self.output_attentions = config.output_attentions
assert config.dim % config.n_heads == 0
self.attention = TFMultiHeadSelfAttention(config, name="attention")
self.sa_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-12, name="sa_layer_norm")
self.ffn = TFFFN(config, name="ffn")
self.output_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-12, name="output_layer_norm")
def call(self, inputs, training=False): # removed: src_enc=None, src_len=None
"""
Parameters
----------
x: tf.Tensor(bs, seq_length, dim)
attn_mask: tf.Tensor(bs, seq_length)
Outputs
-------
sa_weights: tf.Tensor(bs, n_heads, seq_length, seq_length)
The attention weights
ffn_output: tf.Tensor(bs, seq_length, dim)
The output of the transformer block contextualization.
"""
x, attn_mask, head_mask = inputs
# Self-Attention
sa_output = self.attention([x, x, x, attn_mask, head_mask], training=training)
if self.output_attentions:
sa_output, sa_weights = sa_output # (bs, seq_length, dim), (bs, n_heads, seq_length, seq_length)
else: # To handle these `output_attention` or `output_hidden_states` cases returning tuples
# assert type(sa_output) == tuple
sa_output = sa_output[0]
sa_output = self.sa_layer_norm(sa_output + x) # (bs, seq_length, dim)
# Feed Forward Network
ffn_output = self.ffn(sa_output, training=training) # (bs, seq_length, dim)
ffn_output = self.output_layer_norm(ffn_output + sa_output) # (bs, seq_length, dim)
output = (ffn_output,)
if self.output_attentions:
output = (sa_weights,) + output
return output
class TFTransformer(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.n_layers = config.n_layers
self.output_attentions = config.output_attentions
self.output_hidden_states = config.output_hidden_states
self.layer = [TFTransformerBlock(config, name="layer_._{}".format(i)) for i in range(config.n_layers)]
def call(self, inputs, training=False):
"""
Parameters
----------
x: tf.Tensor(bs, seq_length, dim)
Input sequence embedded.
attn_mask: tf.Tensor(bs, seq_length)
Attention mask on the sequence.
Outputs
-------
hidden_state: tf.Tensor(bs, seq_length, dim)
Sequence of hiddens states in the last (top) layer
all_hidden_states: Tuple[tf.Tensor(bs, seq_length, dim)]
Tuple of length n_layers with the hidden states from each layer.
Optional: only if output_hidden_states=True
all_attentions: Tuple[tf.Tensor(bs, n_heads, seq_length, seq_length)]
Tuple of length n_layers with the attention weights from each layer
Optional: only if output_attentions=True
"""
x, attn_mask, head_mask = inputs
all_hidden_states = ()
all_attentions = ()
hidden_state = x
for i, layer_module in enumerate(self.layer):
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_state,)
layer_outputs = layer_module([hidden_state, attn_mask, head_mask[i]], training=training)
hidden_state = layer_outputs[-1]
if self.output_attentions:
assert len(layer_outputs) == 2
attentions = layer_outputs[0]
all_attentions = all_attentions + (attentions,)
else:
assert len(layer_outputs) == 1
# Add last layer
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_state,)
outputs = (hidden_state,)
if self.output_hidden_states:
outputs = outputs + (all_hidden_states,)
if self.output_attentions:
outputs = outputs + (all_attentions,)
return outputs # last-layer hidden state, (all hidden states), (all attentions)
class TFDistilBertMainLayer(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.num_hidden_layers = config.num_hidden_layers
self.embeddings = TFEmbeddings(config, name="embeddings") # Embeddings
self.transformer = TFTransformer(config, name="transformer") # Encoder
def get_input_embeddings(self):
return self.embeddings
def _resize_token_embeddings(self, new_num_tokens):
raise NotImplementedError
def _prune_heads(self, heads_to_prune):
raise NotImplementedError
def call(self, inputs, attention_mask=None, head_mask=None, inputs_embeds=None, training=False):
if isinstance(inputs, (tuple, list)):
input_ids = inputs[0]
attention_mask = inputs[1] if len(inputs) > 1 else attention_mask
head_mask = inputs[2] if len(inputs) > 2 else head_mask
inputs_embeds = inputs[3] if len(inputs) > 3 else inputs_embeds
assert len(inputs) <= 4, "Too many inputs."
elif isinstance(inputs, dict):
input_ids = inputs.get("input_ids")
attention_mask = inputs.get("attention_mask", attention_mask)
head_mask = inputs.get("head_mask", head_mask)
inputs_embeds = inputs.get("inputs_embeds", inputs_embeds)
assert len(inputs) <= 4, "Too many inputs."
else:
input_ids = inputs
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = shape_list(input_ids)
elif inputs_embeds is not None:
input_shape = shape_list(inputs_embeds)[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if attention_mask is None:
attention_mask = tf.ones(input_shape) # (bs, seq_length)
attention_mask = tf.cast(attention_mask, dtype=tf.float32)
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
if head_mask is not None:
raise NotImplementedError
else:
head_mask = [None] * self.num_hidden_layers
embedding_output = self.embeddings(input_ids, inputs_embeds=inputs_embeds) # (bs, seq_length, dim)
tfmr_output = self.transformer([embedding_output, attention_mask, head_mask], training=training)
return tfmr_output # last-layer hidden-state, (all hidden_states), (all attentions)
# INTERFACE FOR ENCODER AND TASK SPECIFIC MODEL #
class TFDistilBertPreTrainedModel(TFPreTrainedModel):
""" An abstract class to handle weights initialization and
a simple interface for downloading and loading pretrained models.
"""
config_class = DistilBertConfig
pretrained_model_archive_map = TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_MAP
base_model_prefix = "distilbert"
DISTILBERT_START_DOCSTRING = r"""
This model is a `tf.keras.Model <https://www.tensorflow.org/api_docs/python/tf/keras/Model>`__ sub-class.
Use it as a regular TF 2.0 Keras Model and
refer to the TF 2.0 documentation for all matter related to general usage and behavior.
.. note::
TF 2.0 models accepts two formats as inputs:
- having all inputs as keyword arguments (like PyTorch models), or
- having all inputs as a list, tuple or dict in the first positional arguments.
This second option is useful when using :obj:`tf.keras.Model.fit()` method which currently requires having
all the tensors in the first argument of the model call function: :obj:`model(inputs)`.
If you choose this second option, there are three possibilities you can use to gather all the input Tensors
in the first positional argument :
- a single Tensor with input_ids only and nothing else: :obj:`model(inputs_ids)`
- a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
:obj:`model([input_ids, attention_mask])` or :obj:`model([input_ids, attention_mask, token_type_ids])`
- a dictionary with one or several input Tensors associated to the input names given in the docstring:
:obj:`model({'input_ids': input_ids, 'token_type_ids': token_type_ids})`
Parameters:
config (:class:`~transformers.DistilBertConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the configuration.
Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.
"""
DISTILBERT_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`transformers.BertTokenizer`.
See :func:`transformers.PreTrainedTokenizer.encode` and
:func:`transformers.PreTrainedTokenizer.encode_plus` for details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Mask to avoid performing attention on padding token indices.
Mask values selected in ``[0, 1]``:
``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
`What are attention masks? <../glossary.html#attention-mask>`__
head_mask (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`, defaults to :obj:`None`):
Mask to nullify selected heads of the self-attention modules.
Mask values selected in ``[0, 1]``:
:obj:`1` indicates the head is **not masked**, :obj:`0` indicates the head is **masked**.
inputs_embeds (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, embedding_dim)`, `optional`, defaults to :obj:`None`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
training (:obj:`boolean`, `optional`, defaults to :obj:`False`):
Whether to activate dropout modules (if set to :obj:`True`) during training or to de-activate them
(if set to :obj:`False`) for evaluation.
"""
@add_start_docstrings(
"The bare DistilBERT encoder/transformer outputing raw hidden-states without any specific head on top.",
DISTILBERT_START_DOCSTRING,
)
class TFDistilBertModel(TFDistilBertPreTrainedModel):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.distilbert = TFDistilBertMainLayer(config, name="distilbert") # Embeddings
@add_start_docstrings_to_callable(DISTILBERT_INPUTS_DOCSTRING)
def call(self, inputs, **kwargs):
r"""
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers,DistilBertConfig`) and inputs:
last_hidden_state (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when :obj:`config.output_hidden_states=True`):
tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``config.output_attentions=True``):
tuple of :obj:`tf.Tensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
import tensorflow as tf
from transformers import DistilBertTokenizer, TFDistilBertModel
tokenizer = DistilBertTokenizer.from_pretrained('distilbert-base-cased')
model = TFDistilBertModel.from_pretrained('distilbert-base-cased')
input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"))[None, :] # Batch size 1
outputs = model(input_ids)
last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple
"""
outputs = self.distilbert(inputs, **kwargs)
return outputs
class TFDistilBertLMHead(tf.keras.layers.Layer):
def __init__(self, config, input_embeddings, **kwargs):
super().__init__(**kwargs)
self.vocab_size = config.vocab_size
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.input_embeddings = input_embeddings
def build(self, input_shape):
self.bias = self.add_weight(shape=(self.vocab_size,), initializer="zeros", trainable=True, name="bias")
super().build(input_shape)
def call(self, hidden_states):
hidden_states = self.input_embeddings(hidden_states, mode="linear")
hidden_states = hidden_states + self.bias
return hidden_states
@add_start_docstrings(
"""DistilBert Model with a `masked language modeling` head on top. """, DISTILBERT_START_DOCSTRING,
)
class TFDistilBertForMaskedLM(TFDistilBertPreTrainedModel):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.output_attentions = config.output_attentions
self.output_hidden_states = config.output_hidden_states
self.vocab_size = config.vocab_size
self.distilbert = TFDistilBertMainLayer(config, name="distilbert")
self.vocab_transform = tf.keras.layers.Dense(
config.dim, kernel_initializer=get_initializer(config.initializer_range), name="vocab_transform"
)
self.act = tf.keras.layers.Activation(gelu)
self.vocab_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-12, name="vocab_layer_norm")
self.vocab_projector = TFDistilBertLMHead(config, self.distilbert.embeddings, name="vocab_projector")
def get_output_embeddings(self):
return self.vocab_projector.input_embeddings
@add_start_docstrings_to_callable(DISTILBERT_INPUTS_DOCSTRING)
def call(self, inputs, **kwargs):
r"""
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers,DistilBertConfig`) and inputs:
prediction_scores (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when :obj:`config.output_hidden_states=True`):
tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``config.output_attentions=True``):
tuple of :obj:`tf.Tensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
import tensorflow as tf
from transformers import DistilBertTokenizer, TFDistilBertForMaskedLM
tokenizer = DistilBertTokenizer.from_pretrained('distilbert-base-cased')
model = TFDistilBertForMaskedLM.from_pretrained('distilbert-base-cased')
input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"))[None, :] # Batch size 1
outputs = model(input_ids)
prediction_scores = outputs[0]
"""
distilbert_output = self.distilbert(inputs, **kwargs)
hidden_states = distilbert_output[0] # (bs, seq_length, dim)
prediction_logits = self.vocab_transform(hidden_states) # (bs, seq_length, dim)
prediction_logits = self.act(prediction_logits) # (bs, seq_length, dim)
prediction_logits = self.vocab_layer_norm(prediction_logits) # (bs, seq_length, dim)
prediction_logits = self.vocab_projector(prediction_logits)
outputs = (prediction_logits,) + distilbert_output[1:]
return outputs # logits, (hidden_states), (attentions)
@add_start_docstrings(
"""DistilBert Model transformer with a sequence classification/regression head on top (a linear layer on top of
the pooled output) e.g. for GLUE tasks. """,
DISTILBERT_START_DOCSTRING,
)
class TFDistilBertForSequenceClassification(TFDistilBertPreTrainedModel):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.num_labels = config.num_labels
self.distilbert = TFDistilBertMainLayer(config, name="distilbert")
self.pre_classifier = tf.keras.layers.Dense(
config.dim,
kernel_initializer=get_initializer(config.initializer_range),
activation="relu",
name="pre_classifier",
)
self.classifier = tf.keras.layers.Dense(
config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
)
self.dropout = tf.keras.layers.Dropout(config.seq_classif_dropout)
@add_start_docstrings_to_callable(DISTILBERT_INPUTS_DOCSTRING)
def call(self, inputs, **kwargs):
r"""
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers,DistilBertConfig`) and inputs:
logits (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`(batch_size, config.num_labels)`):
Classification (or regression if config.num_labels==1) scores (before SoftMax).
hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when :obj:`config.output_hidden_states=True`):
tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``config.output_attentions=True``):
tuple of :obj:`tf.Tensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
import tensorflow as tf
from transformers import DistilBertTokenizer, TFDistilBertForSequenceClassification
tokenizer = DistilBertTokenizer.from_pretrained('distilbert-base-cased')
model = TFDistilBertForSequenceClassification.from_pretrained('distilbert-base-cased')
input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"))[None, :] # Batch size 1
outputs = model(input_ids)
logits = outputs[0]
"""
distilbert_output = self.distilbert(inputs, **kwargs)
hidden_state = distilbert_output[0] # (bs, seq_len, dim)
pooled_output = hidden_state[:, 0] # (bs, dim)
pooled_output = self.pre_classifier(pooled_output) # (bs, dim)
pooled_output = self.dropout(pooled_output, training=kwargs.get("training", False)) # (bs, dim)
logits = self.classifier(pooled_output) # (bs, dim)
outputs = (logits,) + distilbert_output[1:]
return outputs # logits, (hidden_states), (attentions)
@add_start_docstrings(
"""DistilBert Model with a token classification head on top (a linear layer on top of
the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """,
DISTILBERT_START_DOCSTRING,
)
class TFDistilBertForTokenClassification(TFDistilBertPreTrainedModel):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.num_labels = config.num_labels
self.distilbert = TFDistilBertMainLayer(config, name="distilbert")
self.dropout = tf.keras.layers.Dropout(config.dropout)
self.classifier = tf.keras.layers.Dense(
config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
)
@add_start_docstrings_to_callable(DISTILBERT_INPUTS_DOCSTRING)
def call(self, inputs, **kwargs):
r"""
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers,DistilBertConfig`) and inputs:
scores (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, config.num_labels)`):
Classification scores (before SoftMax).
hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when :obj:`config.output_hidden_states=True`):
tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``config.output_attentions=True``):
tuple of :obj:`tf.Tensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
import tensorflow as tf
from transformers import DistilBertTokenizer, TFDistilBertForTokenClassification
tokenizer = DistilBertTokenizer.from_pretrained('distilbert-base-cased')
model = TFDistilBertForTokenClassification.from_pretrained('distilbert-base-cased')
input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"))[None, :] # Batch size 1
outputs = model(input_ids)
scores = outputs[0]
"""
outputs = self.distilbert(inputs, **kwargs)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output, training=kwargs.get("training", False))
logits = self.classifier(sequence_output)
outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here
return outputs # scores, (hidden_states), (attentions)
@add_start_docstrings(
"""DistilBert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of
the hidden-states output to compute `span start logits` and `span end logits`). """,
DISTILBERT_START_DOCSTRING,
)
class TFDistilBertForQuestionAnswering(TFDistilBertPreTrainedModel):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.distilbert = TFDistilBertMainLayer(config, name="distilbert")
self.qa_outputs = tf.keras.layers.Dense(
config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="qa_outputs"
)
assert config.num_labels == 2
self.dropout = tf.keras.layers.Dropout(config.qa_dropout)
@add_start_docstrings_to_callable(DISTILBERT_INPUTS_DOCSTRING)
def call(self, inputs, **kwargs):
r"""
Return:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers,DistilBertConfig`) and inputs:
start_scores (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length,)`):
Span-start scores (before SoftMax).
end_scores (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length,)`):
Span-end scores (before SoftMax).
hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when :obj:`config.output_hidden_states=True`):
tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``config.output_attentions=True``):
tuple of :obj:`tf.Tensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
import tensorflow as tf
from transformers import DistilBertTokenizer, TFDistilBertForQuestionAnswering
tokenizer = DistilBertTokenizer.from_pretrained('distilbert-base-cased')
model = TFDistilBertForQuestionAnswering.from_pretrained('distilbert-base-cased')
input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"))[None, :] # Batch size 1
outputs = model(input_ids)
start_scores, end_scores = outputs[:2]
"""
distilbert_output = self.distilbert(inputs, **kwargs)
hidden_states = distilbert_output[0] # (bs, max_query_len, dim)
hidden_states = self.dropout(hidden_states, training=kwargs.get("training", False)) # (bs, max_query_len, dim)
logits = self.qa_outputs(hidden_states) # (bs, max_query_len, 2)
start_logits, end_logits = tf.split(logits, 2, axis=-1)
start_logits = tf.squeeze(start_logits, axis=-1)
end_logits = tf.squeeze(end_logits, axis=-1)
outputs = (start_logits, end_logits,) + distilbert_output[1:]
return outputs # start_logits, end_logits, (hidden_states), (attentions)
| 39,379 | 45.93683 | 169 | py |
KoSentenceBERT-SKT | KoSentenceBERT-SKT-main/transformers/convert_bert_pytorch_checkpoint_to_original_tf.py | # coding=utf-8
# Copyright 2018 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert Huggingface Pytorch checkpoint to Tensorflow checkpoint."""
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def convert_pytorch_checkpoint_to_tf(model: BertModel, ckpt_dir: str, model_name: str):
"""
:param model:BertModel Pytorch model instance to be converted
:param ckpt_dir: Tensorflow model directory
:param model_name: model name
:return:
Currently supported HF models:
Y BertModel
N BertForMaskedLM
N BertForPreTraining
N BertForMultipleChoice
N BertForNextSentencePrediction
N BertForSequenceClassification
N BertForQuestionAnswering
"""
tensors_to_transpose = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value")
var_map = (
("layer.", "layer_"),
("word_embeddings.weight", "word_embeddings"),
("position_embeddings.weight", "position_embeddings"),
("token_type_embeddings.weight", "token_type_embeddings"),
(".", "/"),
("LayerNorm/weight", "LayerNorm/gamma"),
("LayerNorm/bias", "LayerNorm/beta"),
("weight", "kernel"),
)
if not os.path.isdir(ckpt_dir):
os.makedirs(ckpt_dir)
state_dict = model.state_dict()
def to_tf_var_name(name: str):
for patt, repl in iter(var_map):
name = name.replace(patt, repl)
return "bert/{}".format(name)
def create_tf_var(tensor: np.ndarray, name: str, session: tf.Session):
tf_dtype = tf.dtypes.as_dtype(tensor.dtype)
tf_var = tf.get_variable(dtype=tf_dtype, shape=tensor.shape, name=name, initializer=tf.zeros_initializer())
session.run(tf.variables_initializer([tf_var]))
session.run(tf_var)
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
tf_name = to_tf_var_name(var_name)
torch_tensor = state_dict[var_name].numpy()
if any([x in var_name for x in tensors_to_transpose]):
torch_tensor = torch_tensor.T
tf_var = create_tf_var(tensor=torch_tensor, name=tf_name, session=session)
tf.keras.backend.set_value(tf_var, torch_tensor)
tf_weight = session.run(tf_var)
print("Successfully created {}: {}".format(tf_name, np.allclose(tf_weight, torch_tensor)))
saver = tf.train.Saver(tf.trainable_variables())
saver.save(session, os.path.join(ckpt_dir, model_name.replace("-", "_") + ".ckpt"))
def main(raw_args=None):
parser = argparse.ArgumentParser()
parser.add_argument("--model_name", type=str, required=True, help="model name e.g. bert-base-uncased")
parser.add_argument(
"--cache_dir", type=str, default=None, required=False, help="Directory containing pytorch model"
)
parser.add_argument("--pytorch_model_path", type=str, required=True, help="/path/to/<pytorch-model-name>.bin")
parser.add_argument("--tf_cache_dir", type=str, required=True, help="Directory in which to save tensorflow model")
args = parser.parse_args(raw_args)
model = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name,
state_dict=torch.load(args.pytorch_model_path),
cache_dir=args.cache_dir,
)
convert_pytorch_checkpoint_to_tf(model=model, ckpt_dir=args.tf_cache_dir, model_name=args.model_name)
if __name__ == "__main__":
main()
| 4,115 | 35.424779 | 118 | py |
KoSentenceBERT-SKT | KoSentenceBERT-SKT-main/transformers/modeling_transfo_xl.py | # coding=utf-8
# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch Transformer XL model.
Adapted from https://github.com/kimiyoung/transformer-xl.
In particular https://github.com/kimiyoung/transformer-xl/blob/master/pytorch/mem_transformer.py
"""
import logging
import torch
import torch.nn as nn
import torch.nn.functional as F
from .configuration_transfo_xl import TransfoXLConfig
from .file_utils import add_start_docstrings, add_start_docstrings_to_callable
from .modeling_transfo_xl_utilities import ProjectedAdaptiveLogSoftmax
from .modeling_utils import PreTrainedModel
logger = logging.getLogger(__name__)
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_MAP = {
"transfo-xl-wt103": "https://s3.amazonaws.com/models.huggingface.co/bert/transfo-xl-wt103-pytorch_model.bin",
}
def build_tf_to_pytorch_map(model, config):
""" A map of modules from TF to PyTorch.
This time I use a map to keep the PyTorch model as identical to the original PyTorch model as possible.
"""
tf_to_pt_map = {}
if hasattr(model, "transformer"):
# We are loading in a TransfoXLLMHeadModel => we will load also the Adaptive Softmax
tf_to_pt_map.update(
{
"transformer/adaptive_softmax/cutoff_0/cluster_W": model.crit.cluster_weight,
"transformer/adaptive_softmax/cutoff_0/cluster_b": model.crit.cluster_bias,
}
)
for i, (out_l, proj_l, tie_proj) in enumerate(
zip(model.crit.out_layers, model.crit.out_projs, config.tie_projs)
):
layer_str = "transformer/adaptive_softmax/cutoff_%d/" % i
if config.tie_weight:
tf_to_pt_map.update({layer_str + "b": out_l.bias})
else:
raise NotImplementedError
# I don't think this is implemented in the TF code
tf_to_pt_map.update({layer_str + "lookup_table": out_l.weight, layer_str + "b": out_l.bias})
if not tie_proj:
tf_to_pt_map.update({layer_str + "proj": proj_l})
# Now load the rest of the transformer
model = model.transformer
# Embeddings
for i, (embed_l, proj_l) in enumerate(zip(model.word_emb.emb_layers, model.word_emb.emb_projs)):
layer_str = "transformer/adaptive_embed/cutoff_%d/" % i
tf_to_pt_map.update({layer_str + "lookup_table": embed_l.weight, layer_str + "proj_W": proj_l})
# Transformer blocks
for i, b in enumerate(model.layers):
layer_str = "transformer/layer_%d/" % i
tf_to_pt_map.update(
{
layer_str + "rel_attn/LayerNorm/gamma": b.dec_attn.layer_norm.weight,
layer_str + "rel_attn/LayerNorm/beta": b.dec_attn.layer_norm.bias,
layer_str + "rel_attn/o/kernel": b.dec_attn.o_net.weight,
layer_str + "rel_attn/qkv/kernel": b.dec_attn.qkv_net.weight,
layer_str + "rel_attn/r/kernel": b.dec_attn.r_net.weight,
layer_str + "ff/LayerNorm/gamma": b.pos_ff.layer_norm.weight,
layer_str + "ff/LayerNorm/beta": b.pos_ff.layer_norm.bias,
layer_str + "ff/layer_1/kernel": b.pos_ff.CoreNet[0].weight,
layer_str + "ff/layer_1/bias": b.pos_ff.CoreNet[0].bias,
layer_str + "ff/layer_2/kernel": b.pos_ff.CoreNet[3].weight,
layer_str + "ff/layer_2/bias": b.pos_ff.CoreNet[3].bias,
}
)
# Relative positioning biases
if config.untie_r:
r_r_list = []
r_w_list = []
for b in model.layers:
r_r_list.append(b.dec_attn.r_r_bias)
r_w_list.append(b.dec_attn.r_w_bias)
else:
r_r_list = [model.r_r_bias]
r_w_list = [model.r_w_bias]
tf_to_pt_map.update({"transformer/r_r_bias": r_r_list, "transformer/r_w_bias": r_w_list})
return tf_to_pt_map
def load_tf_weights_in_transfo_xl(model, config, tf_path):
""" Load tf checkpoints in a pytorch model
"""
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise
# Build TF to PyTorch weights loading map
tf_to_pt_map = build_tf_to_pytorch_map(model, config)
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
tf_weights = {}
for name, shape in init_vars:
logger.info("Loading TF weight {} with shape {}".format(name, shape))
array = tf.train.load_variable(tf_path, name)
tf_weights[name] = array
for name, pointer in tf_to_pt_map.items():
assert name in tf_weights
array = tf_weights[name]
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if "kernel" in name or "proj" in name:
array = np.transpose(array)
if ("r_r_bias" in name or "r_w_bias" in name) and len(pointer) > 1:
# Here we will split the TF weights
assert len(pointer) == array.shape[0]
for i, p_i in enumerate(pointer):
arr_i = array[i, ...]
try:
assert p_i.shape == arr_i.shape
except AssertionError as e:
e.args += (p_i.shape, arr_i.shape)
raise
logger.info("Initialize PyTorch weight {} for layer {}".format(name, i))
p_i.data = torch.from_numpy(arr_i)
else:
try:
assert pointer.shape == array.shape
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
logger.info("Initialize PyTorch weight {}".format(name))
pointer.data = torch.from_numpy(array)
tf_weights.pop(name, None)
tf_weights.pop(name + "/Adam", None)
tf_weights.pop(name + "/Adam_1", None)
logger.info("Weights not copied to PyTorch model: {}".format(", ".join(tf_weights.keys())))
return model
class PositionalEmbedding(nn.Module):
def __init__(self, demb):
super().__init__()
self.demb = demb
inv_freq = 1 / (10000 ** (torch.arange(0.0, demb, 2.0) / demb))
self.register_buffer("inv_freq", inv_freq)
def forward(self, pos_seq, bsz=None):
sinusoid_inp = torch.ger(pos_seq, self.inv_freq)
pos_emb = torch.cat([sinusoid_inp.sin(), sinusoid_inp.cos()], dim=-1)
if bsz is not None:
return pos_emb[:, None, :].expand(-1, bsz, -1)
else:
return pos_emb[:, None, :]
class PositionwiseFF(nn.Module):
def __init__(self, d_model, d_inner, dropout, pre_lnorm=False, layer_norm_epsilon=1e-5):
super().__init__()
self.d_model = d_model
self.d_inner = d_inner
self.dropout = dropout
self.CoreNet = nn.Sequential(
nn.Linear(d_model, d_inner),
nn.ReLU(inplace=True),
nn.Dropout(dropout),
nn.Linear(d_inner, d_model),
nn.Dropout(dropout),
)
self.layer_norm = nn.LayerNorm(d_model, eps=layer_norm_epsilon)
self.pre_lnorm = pre_lnorm
def forward(self, inp):
if self.pre_lnorm:
# layer normalization + positionwise feed-forward
core_out = self.CoreNet(self.layer_norm(inp))
# residual connection
output = core_out + inp
else:
# positionwise feed-forward
core_out = self.CoreNet(inp)
# residual connection + layer normalization
output = self.layer_norm(inp + core_out)
return output
class RelPartialLearnableMultiHeadAttn(nn.Module):
def __init__(
self,
n_head,
d_model,
d_head,
dropout,
dropatt=0,
tgt_len=None,
ext_len=None,
mem_len=None,
pre_lnorm=False,
r_r_bias=None,
r_w_bias=None,
output_attentions=False,
layer_norm_epsilon=1e-5,
):
super().__init__()
self.output_attentions = output_attentions
self.n_head = n_head
self.d_model = d_model
self.d_head = d_head
self.dropout = dropout
self.qkv_net = nn.Linear(d_model, 3 * n_head * d_head, bias=False)
self.drop = nn.Dropout(dropout)
self.dropatt = nn.Dropout(dropatt)
self.o_net = nn.Linear(n_head * d_head, d_model, bias=False)
self.layer_norm = nn.LayerNorm(d_model, eps=layer_norm_epsilon)
self.scale = 1 / (d_head ** 0.5)
self.pre_lnorm = pre_lnorm
if r_r_bias is None or r_w_bias is None: # Biases are not shared
self.r_r_bias = nn.Parameter(torch.FloatTensor(self.n_head, self.d_head))
self.r_w_bias = nn.Parameter(torch.FloatTensor(self.n_head, self.d_head))
else:
self.r_r_bias = r_r_bias
self.r_w_bias = r_w_bias
self.r_net = nn.Linear(self.d_model, self.n_head * self.d_head, bias=False)
def _rel_shift(self, x):
zero_pad_shape = (x.size(0), 1) + x.size()[2:]
zero_pad = torch.zeros(zero_pad_shape, device=x.device, dtype=x.dtype)
x_padded = torch.cat([zero_pad, x], dim=1)
x_padded_shape = (x.size(1) + 1, x.size(0)) + x.size()[2:]
x_padded = x_padded.view(*x_padded_shape)
x = x_padded[1:].view_as(x)
return x
def forward(self, w, r, attn_mask=None, mems=None, head_mask=None):
qlen, rlen, bsz = w.size(0), r.size(0), w.size(1)
if mems is not None:
cat = torch.cat([mems, w], 0)
if self.pre_lnorm:
w_heads = self.qkv_net(self.layer_norm(cat))
else:
w_heads = self.qkv_net(cat)
r_head_k = self.r_net(r)
w_head_q, w_head_k, w_head_v = torch.chunk(w_heads, 3, dim=-1)
w_head_q = w_head_q[-qlen:]
else:
if self.pre_lnorm:
w_heads = self.qkv_net(self.layer_norm(w))
else:
w_heads = self.qkv_net(w)
r_head_k = self.r_net(r)
w_head_q, w_head_k, w_head_v = torch.chunk(w_heads, 3, dim=-1)
klen = w_head_k.size(0)
w_head_q = w_head_q.view(qlen, bsz, self.n_head, self.d_head) # qlen x bsz x n_head x d_head
w_head_k = w_head_k.view(klen, bsz, self.n_head, self.d_head) # qlen x bsz x n_head x d_head
w_head_v = w_head_v.view(klen, bsz, self.n_head, self.d_head) # qlen x bsz x n_head x d_head
r_head_k = r_head_k.view(rlen, self.n_head, self.d_head) # qlen x n_head x d_head
# compute attention score
rw_head_q = w_head_q + self.r_w_bias # qlen x bsz x n_head x d_head
AC = torch.einsum("ibnd,jbnd->ijbn", (rw_head_q, w_head_k)) # qlen x klen x bsz x n_head
rr_head_q = w_head_q + self.r_r_bias
BD = torch.einsum("ibnd,jnd->ijbn", (rr_head_q, r_head_k)) # qlen x klen x bsz x n_head
BD = self._rel_shift(BD)
# [qlen x klen x bsz x n_head]
attn_score = AC + BD
attn_score.mul_(self.scale)
# compute attention probability
if attn_mask is not None and torch.sum(attn_mask).item():
attn_mask = attn_mask == 1 # Switch to bool
if attn_mask.dim() == 2:
if next(self.parameters()).dtype == torch.float16:
attn_score = (
attn_score.float().masked_fill(attn_mask[None, :, :, None], -65000).type_as(attn_score)
)
else:
attn_score = attn_score.float().masked_fill(attn_mask[None, :, :, None], -1e30).type_as(attn_score)
elif attn_mask.dim() == 3:
if next(self.parameters()).dtype == torch.float16:
attn_score = attn_score.float().masked_fill(attn_mask[:, :, :, None], -65000).type_as(attn_score)
else:
attn_score = attn_score.float().masked_fill(attn_mask[:, :, :, None], -1e30).type_as(attn_score)
# [qlen x klen x bsz x n_head]
attn_prob = F.softmax(attn_score, dim=1)
attn_prob = self.dropatt(attn_prob)
# Mask heads if we want to
if head_mask is not None:
attn_prob = attn_prob * head_mask
# compute attention vector
attn_vec = torch.einsum("ijbn,jbnd->ibnd", (attn_prob, w_head_v))
# [qlen x bsz x n_head x d_head]
attn_vec = attn_vec.contiguous().view(attn_vec.size(0), attn_vec.size(1), self.n_head * self.d_head)
# linear projection
attn_out = self.o_net(attn_vec)
attn_out = self.drop(attn_out)
if self.pre_lnorm:
# residual connection
outputs = [w + attn_out]
else:
# residual connection + layer normalization
outputs = [self.layer_norm(w + attn_out)]
if self.output_attentions:
outputs.append(attn_prob)
return outputs
class RelPartialLearnableDecoderLayer(nn.Module):
def __init__(self, n_head, d_model, d_head, d_inner, dropout, layer_norm_epsilon=1e-5, **kwargs):
super().__init__()
self.dec_attn = RelPartialLearnableMultiHeadAttn(
n_head, d_model, d_head, dropout, layer_norm_epsilon=layer_norm_epsilon, **kwargs
)
self.pos_ff = PositionwiseFF(
d_model, d_inner, dropout, pre_lnorm=kwargs.get("pre_lnorm"), layer_norm_epsilon=layer_norm_epsilon
)
def forward(self, dec_inp, r, dec_attn_mask=None, mems=None, head_mask=None):
attn_outputs = self.dec_attn(dec_inp, r, attn_mask=dec_attn_mask, mems=mems, head_mask=head_mask)
ff_output = self.pos_ff(attn_outputs[0])
outputs = [ff_output] + attn_outputs[1:]
return outputs
class AdaptiveEmbedding(nn.Module):
def __init__(self, n_token, d_embed, d_proj, cutoffs, div_val=1, sample_softmax=False):
super().__init__()
self.n_token = n_token
self.d_embed = d_embed
self.cutoffs = cutoffs + [n_token]
self.div_val = div_val
self.d_proj = d_proj
self.emb_scale = d_proj ** 0.5
self.cutoff_ends = [0] + self.cutoffs
self.emb_layers = nn.ModuleList()
self.emb_projs = nn.ParameterList()
if div_val == 1:
self.emb_layers.append(nn.Embedding(n_token, d_embed, sparse=sample_softmax > 0))
if d_proj != d_embed:
self.emb_projs.append(nn.Parameter(torch.FloatTensor(d_proj, d_embed)))
else:
for i in range(len(self.cutoffs)):
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
d_emb_i = d_embed // (div_val ** i)
self.emb_layers.append(nn.Embedding(r_idx - l_idx, d_emb_i))
self.emb_projs.append(nn.Parameter(torch.FloatTensor(d_proj, d_emb_i)))
def forward(self, inp):
if self.div_val == 1:
embed = self.emb_layers[0](inp)
if self.d_proj != self.d_embed:
embed = F.linear(embed, self.emb_projs[0])
else:
param = next(self.parameters())
inp_flat = inp.view(-1)
emb_flat = torch.zeros([inp_flat.size(0), self.d_proj], dtype=param.dtype, device=param.device)
for i in range(len(self.cutoffs)):
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
mask_i = (inp_flat >= l_idx) & (inp_flat < r_idx)
indices_i = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
inp_i = inp_flat.index_select(0, indices_i) - l_idx
emb_i = self.emb_layers[i](inp_i)
emb_i = F.linear(emb_i, self.emb_projs[i])
emb_flat.index_copy_(0, indices_i, emb_i)
embed_shape = inp.size() + (self.d_proj,)
embed = emb_flat.view(embed_shape)
embed.mul_(self.emb_scale)
return embed
class TransfoXLPreTrainedModel(PreTrainedModel):
""" An abstract class to handle weights initialization and
a simple interface for downloading and loading pretrained models.
"""
config_class = TransfoXLConfig
pretrained_model_archive_map = TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_MAP
load_tf_weights = load_tf_weights_in_transfo_xl
base_model_prefix = "transformer"
def _init_weight(self, weight):
if self.config.init == "uniform":
nn.init.uniform_(weight, -self.config.init_range, self.config.init_range)
elif self.config.init == "normal":
nn.init.normal_(weight, 0.0, self.config.init_std)
def _init_bias(self, bias):
nn.init.constant_(bias, 0.0)
def _init_weights(self, m):
""" Initialize the weights.
"""
classname = m.__class__.__name__
if classname.find("Linear") != -1:
if hasattr(m, "weight") and m.weight is not None:
self._init_weight(m.weight)
if hasattr(m, "bias") and m.bias is not None:
self._init_bias(m.bias)
elif classname.find("AdaptiveEmbedding") != -1:
if hasattr(m, "emb_projs"):
for i in range(len(m.emb_projs)):
if m.emb_projs[i] is not None:
nn.init.normal_(m.emb_projs[i], 0.0, self.config.proj_init_std)
elif classname.find("Embedding") != -1:
if hasattr(m, "weight"):
self._init_weight(m.weight)
elif classname.find("ProjectedAdaptiveLogSoftmax") != -1:
if hasattr(m, "cluster_weight") and m.cluster_weight is not None:
self._init_weight(m.cluster_weight)
if hasattr(m, "cluster_bias") and m.cluster_bias is not None:
self._init_bias(m.cluster_bias)
if hasattr(m, "out_projs"):
for i in range(len(m.out_projs)):
if m.out_projs[i] is not None:
nn.init.normal_(m.out_projs[i], 0.0, self.config.proj_init_std)
elif classname.find("LayerNorm") != -1:
if hasattr(m, "weight"):
nn.init.normal_(m.weight, 1.0, self.config.init_std)
if hasattr(m, "bias") and m.bias is not None:
self._init_bias(m.bias)
else:
if hasattr(m, "r_emb"):
self._init_weight(m.r_emb)
if hasattr(m, "r_w_bias"):
self._init_weight(m.r_w_bias)
if hasattr(m, "r_r_bias"):
self._init_weight(m.r_r_bias)
if hasattr(m, "r_bias"):
self._init_bias(m.r_bias)
TRANSFO_XL_START_DOCSTRING = r"""
This model is a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`_ sub-class.
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general
usage and behavior.
Parameters:
config (:class:`~transformers.TransfoXLConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the configuration.
Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.
"""
TRANSFO_XL_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`transformers.TransfoXLTokenizer`.
See :func:`transformers.PreTrainedTokenizer.encode` and
:func:`transformers.PreTrainedTokenizer.encode_plus` for details.
`What are input IDs? <../glossary.html#input-ids>`__
mems (:obj:`List[torch.FloatTensor]` of length :obj:`config.n_layers`):
Contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model
(see `mems` output below). Can be used to speed up sequential decoding. The token ids which have their mems
given to this model should not be passed as input ids as they have already been computed.
head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`, defaults to :obj:`None`):
Mask to nullify selected heads of the self-attention modules.
Mask values selected in ``[0, 1]``:
:obj:`1` indicates the head is **not masked**, :obj:`0` indicates the head is **masked**.
input_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
"""
@add_start_docstrings(
"The bare Bert Model transformer outputting raw hidden-states without any specific head on top.",
TRANSFO_XL_START_DOCSTRING,
)
class TransfoXLModel(TransfoXLPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.output_attentions = config.output_attentions
self.output_hidden_states = config.output_hidden_states
self.n_token = config.vocab_size
self.d_embed = config.d_embed
self.d_model = config.d_model
self.n_head = config.n_head
self.d_head = config.d_head
self.word_emb = AdaptiveEmbedding(
config.vocab_size, config.d_embed, config.d_model, config.cutoffs, div_val=config.div_val
)
self.drop = nn.Dropout(config.dropout)
self.n_layer = config.n_layer
self.tgt_len = config.tgt_len
self.mem_len = config.mem_len
self.ext_len = config.ext_len
self.max_klen = config.tgt_len + config.ext_len + config.mem_len
self.attn_type = config.attn_type
if not config.untie_r:
self.r_w_bias = nn.Parameter(torch.FloatTensor(self.n_head, self.d_head))
self.r_r_bias = nn.Parameter(torch.FloatTensor(self.n_head, self.d_head))
self.layers = nn.ModuleList()
if config.attn_type == 0: # the default attention
for i in range(config.n_layer):
self.layers.append(
RelPartialLearnableDecoderLayer(
config.n_head,
config.d_model,
config.d_head,
config.d_inner,
config.dropout,
tgt_len=config.tgt_len,
ext_len=config.ext_len,
mem_len=config.mem_len,
dropatt=config.dropatt,
pre_lnorm=config.pre_lnorm,
r_w_bias=None if config.untie_r else self.r_w_bias,
r_r_bias=None if config.untie_r else self.r_r_bias,
output_attentions=self.output_attentions,
layer_norm_epsilon=config.layer_norm_epsilon,
)
)
else: # learnable embeddings and absolute embeddings are not used in our pretrained checkpoints
raise NotImplementedError # Removed them to avoid maintaining dead code
self.same_length = config.same_length
self.clamp_len = config.clamp_len
if self.attn_type == 0: # default attention
self.pos_emb = PositionalEmbedding(self.d_model)
else: # learnable embeddings and absolute embeddings
raise NotImplementedError # Removed these to avoid maintaining dead code - They are not used in our pretrained checkpoint
self.init_weights()
def get_input_embeddings(self):
return self.word_emb
def set_input_embeddings(self, new_embeddings):
self.word_emb = new_embeddings
def backward_compatible(self):
self.sample_softmax = -1
def reset_length(self, tgt_len, ext_len, mem_len):
self.tgt_len = tgt_len
self.mem_len = mem_len
self.ext_len = ext_len
def _prune_heads(self, heads):
logger.info("Head pruning is not implemented for Transformer-XL model")
pass
def init_mems(self, bsz):
if self.mem_len > 0:
mems = []
param = next(self.parameters())
for i in range(self.n_layer):
empty = torch.zeros(self.mem_len, bsz, self.config.d_model, dtype=param.dtype, device=param.device)
mems.append(empty)
return mems
else:
return None
def _update_mems(self, hids, mems, mlen, qlen):
# does not deal with None
if mems is None:
return None
# mems is not None
assert len(hids) == len(mems), "len(hids) != len(mems)"
# There are `mlen + qlen` steps that can be cached into mems
# For the next step, the last `ext_len` of the `qlen` tokens
# will be used as the extended context. Hence, we only cache
# the tokens from `mlen + qlen - self.ext_len - self.mem_len`
# to `mlen + qlen - self.ext_len`.
with torch.no_grad():
new_mems = []
end_idx = mlen + max(0, qlen - 0 - self.ext_len)
beg_idx = max(0, end_idx - self.mem_len)
for i in range(len(hids)):
cat = torch.cat([mems[i], hids[i]], dim=0)
new_mems.append(cat[beg_idx:end_idx].detach())
return new_mems
@add_start_docstrings_to_callable(TRANSFO_XL_INPUTS_DOCSTRING)
def forward(self, input_ids=None, mems=None, head_mask=None, inputs_embeds=None):
r"""
Return:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.TransfoXLConfig`) and inputs:
last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the last layer of the model.
mems (:obj:`List[torch.FloatTensor]` of length :obj:`config.n_layers`):
Contains pre-computed hidden-states (key and values in the attention blocks).
Can be used (see `mems` input) to speed up sequential decoding. The token ids which have their past given to this model
should not be passed as input ids as they have already been computed.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import TransfoXLTokenizer, TransfoXLModel
import torch
tokenizer = TransfoXLTokenizer.from_pretrained('transfo-xl-wt103')
model = TransfoXLModel.from_pretrained('transfo-xl-wt103')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
outputs = model(input_ids)
last_hidden_states, mems = outputs[:2]
"""
# the original code for Transformer-XL used shapes [len, bsz] but we want a unified interface in the library
# so we transpose here from shape [bsz, len] to shape [len, bsz]
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_ids = input_ids.transpose(0, 1).contiguous()
qlen, bsz = input_ids.size()
elif inputs_embeds is not None:
inputs_embeds = inputs_embeds.transpose(0, 1).contiguous()
qlen, bsz = inputs_embeds.shape[0], inputs_embeds.shape[1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if mems is None:
mems = self.init_mems(bsz)
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] (a head_mask for each layer)
# and head_mask is converted to shape [num_hidden_layers x qlen x klen x bsz x n_head]
if head_mask is not None:
if head_mask.dim() == 1:
head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(0).unsqueeze(0)
head_mask = head_mask.expand(self.n_layer, -1, -1, -1, -1)
elif head_mask.dim() == 2:
head_mask = head_mask.unsqueeze(1).unsqueeze(1).unsqueeze(1)
head_mask = head_mask.to(
dtype=next(self.parameters()).dtype
) # switch to fload if need + fp16 compatibility
else:
head_mask = [None] * self.n_layer
if inputs_embeds is not None:
word_emb = inputs_embeds
else:
word_emb = self.word_emb(input_ids)
mlen = mems[0].size(0) if mems is not None else 0
klen = mlen + qlen
if self.same_length:
all_ones = word_emb.new_ones((qlen, klen), dtype=torch.uint8)
mask_len = klen - self.mem_len
if mask_len > 0:
mask_shift_len = qlen - mask_len
else:
mask_shift_len = qlen
dec_attn_mask = (torch.triu(all_ones, 1 + mlen) + torch.tril(all_ones, -mask_shift_len))[:, :, None] # -1
else:
dec_attn_mask = torch.triu(word_emb.new_ones((qlen, klen), dtype=torch.uint8), diagonal=1 + mlen)[
:, :, None
]
hids = []
attentions = []
if self.attn_type == 0: # default
pos_seq = torch.arange(klen - 1, -1, -1.0, device=word_emb.device, dtype=word_emb.dtype)
if self.clamp_len > 0:
pos_seq.clamp_(max=self.clamp_len)
pos_emb = self.pos_emb(pos_seq)
core_out = self.drop(word_emb)
pos_emb = self.drop(pos_emb)
for i, layer in enumerate(self.layers):
hids.append(core_out)
mems_i = None if mems is None else mems[i]
layer_outputs = layer(
core_out, pos_emb, dec_attn_mask=dec_attn_mask, mems=mems_i, head_mask=head_mask[i]
)
core_out = layer_outputs[0]
if self.output_attentions:
attentions.append(layer_outputs[1])
else: # learnable embeddings and absolute embeddings
raise NotImplementedError # Removed these to avoid maintaining dead code - They are not used in our pretrained checkpoint
core_out = self.drop(core_out)
new_mems = self._update_mems(hids, mems, mlen, qlen)
# We transpose back here to shape [bsz, len, hidden_dim]
outputs = [core_out.transpose(0, 1).contiguous(), new_mems]
if self.output_hidden_states:
# Add last layer and transpose to library standard shape [bsz, len, hidden_dim]
hids.append(core_out)
hids = list(t.transpose(0, 1).contiguous() for t in hids)
outputs.append(hids)
if self.output_attentions:
# Transpose to library standard shape [bsz, n_heads, query_seq_len, key_seq_len]
attentions = list(t.permute(2, 3, 0, 1).contiguous() for t in attentions)
outputs.append(attentions)
return outputs # last hidden state, new_mems, (all hidden states), (all attentions)
@add_start_docstrings(
"""The Transformer-XL Model with a language modeling head on top
(adaptive softmax with weights tied to the adaptive input embeddings)""",
TRANSFO_XL_START_DOCSTRING,
)
class TransfoXLLMHeadModel(TransfoXLPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.transformer = TransfoXLModel(config)
self.sample_softmax = config.sample_softmax
assert (
self.sample_softmax <= 0
), "Sampling from the softmax is not implemented yet. Please look at issue: #3310: https://github.com/huggingface/transformers/issues/3310"
self.crit = ProjectedAdaptiveLogSoftmax(
config.vocab_size, config.d_embed, config.d_model, config.cutoffs, div_val=config.div_val
)
self.init_weights()
def tie_weights(self):
"""
Run this to be sure output and input (adaptive) softmax weights are tied
"""
if self.config.tie_weight:
for i in range(len(self.crit.out_layers)):
self._tie_or_clone_weights(self.crit.out_layers[i], self.transformer.word_emb.emb_layers[i])
if self.config.tie_projs:
for i, tie_proj in enumerate(self.config.tie_projs):
if tie_proj and self.config.div_val == 1 and self.config.d_model != self.config.d_embed:
if self.config.torchscript:
self.crit.out_projs[i] = nn.Parameter(self.transformer.word_emb.emb_projs[0].clone())
else:
self.crit.out_projs[i] = self.transformer.word_emb.emb_projs[0]
elif tie_proj and self.config.div_val != 1:
if self.config.torchscript:
self.crit.out_projs[i] = nn.Parameter(self.transformer.word_emb.emb_projs[i].clone())
else:
self.crit.out_projs[i] = self.transformer.word_emb.emb_projs[i]
def reset_length(self, tgt_len, ext_len, mem_len):
self.transformer.reset_length(tgt_len, ext_len, mem_len)
def init_mems(self, bsz):
return self.transformer.init_mems(bsz)
@add_start_docstrings_to_callable(TRANSFO_XL_INPUTS_DOCSTRING)
def forward(self, input_ids=None, mems=None, head_mask=None, inputs_embeds=None, labels=None):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Labels for language modeling.
Note that the labels **are shifted** inside the model, i.e. you can set ``lm_labels = input_ids``
Indices are selected in ``[-100, 0, ..., config.vocab_size]``
All labels set to ``-100`` are ignored (masked), the loss is only
computed for labels in ``[0, ..., config.vocab_size]``
Return:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.TransfoXLConfig`) and inputs:
loss (:obj:`torch.FloatTensor` of shape `(1,)`, `optional`, returned when ``labels`` is provided)
Language modeling loss.
prediction_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
mems (:obj:`List[torch.FloatTensor]` of length :obj:`config.n_layers`):
Contains pre-computed hidden-states (key and values in the attention blocks).
Can be used (see `past` input) to speed up sequential decoding. The token ids which have their past given to this model
should not be passed as input ids as they have already been computed.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import TransfoXLTokenizer, TransfoXLLMHeadModel
import torch
tokenizer = TransfoXLTokenizer.from_pretrained('transfo-xl-wt103')
model = TransfoXLLMHeadModel.from_pretrained('transfo-xl-wt103')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
outputs = model(input_ids)
prediction_scores, mems = outputs[:2]
"""
if input_ids is not None:
bsz, tgt_len = input_ids.size(0), input_ids.size(1)
elif inputs_embeds is not None:
bsz, tgt_len = inputs_embeds.size(0), inputs_embeds.size(1)
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
transformer_outputs = self.transformer(input_ids, mems=mems, head_mask=head_mask, inputs_embeds=inputs_embeds)
last_hidden = transformer_outputs[0]
pred_hid = last_hidden[:, -tgt_len:]
outputs = transformer_outputs[1:]
softmax_output = self.crit(pred_hid.view(-1, pred_hid.size(-1)), labels)
if labels is None:
softmax_output = softmax_output.view(bsz, tgt_len, -1)
outputs = [softmax_output] + outputs
else:
softmax_output = softmax_output.view(bsz, tgt_len)
outputs = [softmax_output, None] + outputs
return outputs # (loss), logits or None if labels is not None (speed up adaptive softmax), new_mems, (all hidden states), (all attentions)
def get_output_embeddings(self):
""" Double-check if you are using adaptive softmax.
"""
if self.sample_softmax > 0:
return self.out_layer
else:
return self.crit.out_layers[-1]
def prepare_inputs_for_generation(self, input_ids, past, **model_kwargs):
inputs = {"input_ids": input_ids}
# if past is defined in model kwargs then use it for faster decoding
if past:
inputs["mems"] = past
return inputs
| 39,986 | 41.858521 | 151 | py |
KoSentenceBERT-SKT | KoSentenceBERT-SKT-main/transformers/convert_xlnet_original_tf_checkpoint_to_pytorch.py | # coding=utf-8
# Copyright 2018 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert BERT checkpoint."""
import argparse
import logging
import os
import torch
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
GLUE_TASKS_NUM_LABELS = {
"cola": 2,
"mnli": 3,
"mrpc": 2,
"sst-2": 2,
"sts-b": 1,
"qqp": 2,
"qnli": 2,
"rte": 2,
"wnli": 2,
}
logging.basicConfig(level=logging.INFO)
def convert_xlnet_checkpoint_to_pytorch(
tf_checkpoint_path, bert_config_file, pytorch_dump_folder_path, finetuning_task=None
):
# Initialise PyTorch model
config = XLNetConfig.from_json_file(bert_config_file)
finetuning_task = finetuning_task.lower() if finetuning_task is not None else ""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print("Building PyTorch XLNetForSequenceClassification model from configuration: {}".format(str(config)))
config.finetuning_task = finetuning_task
config.num_labels = GLUE_TASKS_NUM_LABELS[finetuning_task]
model = XLNetForSequenceClassification(config)
elif "squad" in finetuning_task:
config.finetuning_task = finetuning_task
model = XLNetForQuestionAnswering(config)
else:
model = XLNetLMHeadModel(config)
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(model, config, tf_checkpoint_path)
# Save pytorch-model
pytorch_weights_dump_path = os.path.join(pytorch_dump_folder_path, WEIGHTS_NAME)
pytorch_config_dump_path = os.path.join(pytorch_dump_folder_path, CONFIG_NAME)
print("Save PyTorch model to {}".format(os.path.abspath(pytorch_weights_dump_path)))
torch.save(model.state_dict(), pytorch_weights_dump_path)
print("Save configuration file to {}".format(os.path.abspath(pytorch_config_dump_path)))
with open(pytorch_config_dump_path, "w", encoding="utf-8") as f:
f.write(config.to_json_string())
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--xlnet_config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained XLNet model. \n"
"This specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the folder to store the PyTorch model or dataset/vocab.",
)
parser.add_argument(
"--finetuning_task",
default=None,
type=str,
help="Name of a task on which the XLNet TensorFloaw model was fine-tuned",
)
args = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 3,685 | 31.052174 | 117 | py |
KoSentenceBERT-SKT | KoSentenceBERT-SKT-main/transformers/modeling_albert.py | # coding=utf-8
# Copyright 2018 Google AI, Google Brain and the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch ALBERT model. """
import logging
import math
import os
import torch
import torch.nn as nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.configuration_albert import AlbertConfig
from transformers.modeling_bert import ACT2FN, BertEmbeddings, BertSelfAttention, prune_linear_layer
from transformers.modeling_utils import PreTrainedModel
from .file_utils import add_start_docstrings, add_start_docstrings_to_callable
logger = logging.getLogger(__name__)
ALBERT_PRETRAINED_MODEL_ARCHIVE_MAP = {
"albert-base-v1": "https://s3.amazonaws.com/models.huggingface.co/bert/albert-base-v1-pytorch_model.bin",
"albert-large-v1": "https://s3.amazonaws.com/models.huggingface.co/bert/albert-large-v1-pytorch_model.bin",
"albert-xlarge-v1": "https://s3.amazonaws.com/models.huggingface.co/bert/albert-xlarge-v1-pytorch_model.bin",
"albert-xxlarge-v1": "https://s3.amazonaws.com/models.huggingface.co/bert/albert-xxlarge-v1-pytorch_model.bin",
"albert-base-v2": "https://s3.amazonaws.com/models.huggingface.co/bert/albert-base-v2-pytorch_model.bin",
"albert-large-v2": "https://s3.amazonaws.com/models.huggingface.co/bert/albert-large-v2-pytorch_model.bin",
"albert-xlarge-v2": "https://s3.amazonaws.com/models.huggingface.co/bert/albert-xlarge-v2-pytorch_model.bin",
"albert-xxlarge-v2": "https://s3.amazonaws.com/models.huggingface.co/bert/albert-xxlarge-v2-pytorch_model.bin",
}
def load_tf_weights_in_albert(model, config, tf_checkpoint_path):
""" Load tf checkpoints in a pytorch model."""
try:
import re
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise
tf_path = os.path.abspath(tf_checkpoint_path)
logger.info("Converting TensorFlow checkpoint from {}".format(tf_path))
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
logger.info("Loading TF weight {} with shape {}".format(name, shape))
array = tf.train.load_variable(tf_path, name)
names.append(name)
arrays.append(array)
for name, array in zip(names, arrays):
print(name)
for name, array in zip(names, arrays):
original_name = name
# If saved from the TF HUB module
name = name.replace("module/", "")
# Renaming and simplifying
name = name.replace("ffn_1", "ffn")
name = name.replace("bert/", "albert/")
name = name.replace("attention_1", "attention")
name = name.replace("transform/", "")
name = name.replace("LayerNorm_1", "full_layer_layer_norm")
name = name.replace("LayerNorm", "attention/LayerNorm")
name = name.replace("transformer/", "")
# The feed forward layer had an 'intermediate' step which has been abstracted away
name = name.replace("intermediate/dense/", "")
name = name.replace("ffn/intermediate/output/dense/", "ffn_output/")
# ALBERT attention was split between self and output which have been abstracted away
name = name.replace("/output/", "/")
name = name.replace("/self/", "/")
# The pooler is a linear layer
name = name.replace("pooler/dense", "pooler")
# The classifier was simplified to predictions from cls/predictions
name = name.replace("cls/predictions", "predictions")
name = name.replace("predictions/attention", "predictions")
# Naming was changed to be more explicit
name = name.replace("embeddings/attention", "embeddings")
name = name.replace("inner_group_", "albert_layers/")
name = name.replace("group_", "albert_layer_groups/")
# Classifier
if len(name.split("/")) == 1 and ("output_bias" in name or "output_weights" in name):
name = "classifier/" + name
# No ALBERT model currently handles the next sentence prediction task
if "seq_relationship" in name:
continue
name = name.split("/")
# Ignore the gradients applied by the LAMB/ADAM optimizers.
if (
"adam_m" in name
or "adam_v" in name
or "AdamWeightDecayOptimizer" in name
or "AdamWeightDecayOptimizer_1" in name
or "global_step" in name
):
logger.info("Skipping {}".format("/".join(name)))
continue
pointer = model
for m_name in name:
if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
scope_names = re.split(r"_(\d+)", m_name)
else:
scope_names = [m_name]
if scope_names[0] == "kernel" or scope_names[0] == "gamma":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
pointer = getattr(pointer, "bias")
elif scope_names[0] == "output_weights":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "squad":
pointer = getattr(pointer, "classifier")
else:
try:
pointer = getattr(pointer, scope_names[0])
except AttributeError:
logger.info("Skipping {}".format("/".join(name)))
continue
if len(scope_names) >= 2:
num = int(scope_names[1])
pointer = pointer[num]
if m_name[-11:] == "_embeddings":
pointer = getattr(pointer, "weight")
elif m_name == "kernel":
array = np.transpose(array)
try:
assert pointer.shape == array.shape
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
print("Initialize PyTorch weight {} from {}".format(name, original_name))
pointer.data = torch.from_numpy(array)
return model
class AlbertEmbeddings(BertEmbeddings):
"""
Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
super().__init__(config)
self.word_embeddings = nn.Embedding(config.vocab_size, config.embedding_size, padding_idx=0)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.embedding_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.embedding_size)
self.LayerNorm = torch.nn.LayerNorm(config.embedding_size, eps=config.layer_norm_eps)
class AlbertAttention(BertSelfAttention):
def __init__(self, config):
super().__init__(config)
self.output_attentions = config.output_attentions
self.num_attention_heads = config.num_attention_heads
self.hidden_size = config.hidden_size
self.attention_head_size = config.hidden_size // config.num_attention_heads
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
mask = torch.ones(self.num_attention_heads, self.attention_head_size)
heads = set(heads) - self.pruned_heads # Convert to set and emove already pruned heads
for head in heads:
# Compute how many pruned heads are before the head and move the index accordingly
head = head - sum(1 if h < head else 0 for h in self.pruned_heads)
mask[head] = 0
mask = mask.view(-1).contiguous().eq(1)
index = torch.arange(len(mask))[mask].long()
# Prune linear layers
self.query = prune_linear_layer(self.query, index)
self.key = prune_linear_layer(self.key, index)
self.value = prune_linear_layer(self.value, index)
self.dense = prune_linear_layer(self.dense, index, dim=1)
# Update hyper params and store pruned heads
self.num_attention_heads = self.num_attention_heads - len(heads)
self.all_head_size = self.attention_head_size * self.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(self, input_ids, attention_mask=None, head_mask=None):
mixed_query_layer = self.query(input_ids)
mixed_key_layer = self.key(input_ids)
mixed_value_layer = self.value(input_ids)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
# Should find a better way to do this
w = (
self.dense.weight.t()
.view(self.num_attention_heads, self.attention_head_size, self.hidden_size)
.to(context_layer.dtype)
)
b = self.dense.bias.to(context_layer.dtype)
projected_context_layer = torch.einsum("bfnd,ndh->bfh", context_layer, w) + b
projected_context_layer_dropout = self.dropout(projected_context_layer)
layernormed_context_layer = self.LayerNorm(input_ids + projected_context_layer_dropout)
return (layernormed_context_layer, attention_probs) if self.output_attentions else (layernormed_context_layer,)
class AlbertLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.full_layer_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.attention = AlbertAttention(config)
self.ffn = nn.Linear(config.hidden_size, config.intermediate_size)
self.ffn_output = nn.Linear(config.intermediate_size, config.hidden_size)
self.activation = ACT2FN[config.hidden_act]
def forward(self, hidden_states, attention_mask=None, head_mask=None):
attention_output = self.attention(hidden_states, attention_mask, head_mask)
ffn_output = self.ffn(attention_output[0])
ffn_output = self.activation(ffn_output)
ffn_output = self.ffn_output(ffn_output)
hidden_states = self.full_layer_layer_norm(ffn_output + attention_output[0])
return (hidden_states,) + attention_output[1:] # add attentions if we output them
class AlbertLayerGroup(nn.Module):
def __init__(self, config):
super().__init__()
self.output_attentions = config.output_attentions
self.output_hidden_states = config.output_hidden_states
self.albert_layers = nn.ModuleList([AlbertLayer(config) for _ in range(config.inner_group_num)])
def forward(self, hidden_states, attention_mask=None, head_mask=None):
layer_hidden_states = ()
layer_attentions = ()
for layer_index, albert_layer in enumerate(self.albert_layers):
layer_output = albert_layer(hidden_states, attention_mask, head_mask[layer_index])
hidden_states = layer_output[0]
if self.output_attentions:
layer_attentions = layer_attentions + (layer_output[1],)
if self.output_hidden_states:
layer_hidden_states = layer_hidden_states + (hidden_states,)
outputs = (hidden_states,)
if self.output_hidden_states:
outputs = outputs + (layer_hidden_states,)
if self.output_attentions:
outputs = outputs + (layer_attentions,)
return outputs # last-layer hidden state, (layer hidden states), (layer attentions)
class AlbertTransformer(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.output_attentions = config.output_attentions
self.output_hidden_states = config.output_hidden_states
self.embedding_hidden_mapping_in = nn.Linear(config.embedding_size, config.hidden_size)
self.albert_layer_groups = nn.ModuleList([AlbertLayerGroup(config) for _ in range(config.num_hidden_groups)])
def forward(self, hidden_states, attention_mask=None, head_mask=None):
hidden_states = self.embedding_hidden_mapping_in(hidden_states)
all_attentions = ()
if self.output_hidden_states:
all_hidden_states = (hidden_states,)
for i in range(self.config.num_hidden_layers):
# Number of layers in a hidden group
layers_per_group = int(self.config.num_hidden_layers / self.config.num_hidden_groups)
# Index of the hidden group
group_idx = int(i / (self.config.num_hidden_layers / self.config.num_hidden_groups))
layer_group_output = self.albert_layer_groups[group_idx](
hidden_states,
attention_mask,
head_mask[group_idx * layers_per_group : (group_idx + 1) * layers_per_group],
)
hidden_states = layer_group_output[0]
if self.output_attentions:
all_attentions = all_attentions + layer_group_output[-1]
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
outputs = (hidden_states,)
if self.output_hidden_states:
outputs = outputs + (all_hidden_states,)
if self.output_attentions:
outputs = outputs + (all_attentions,)
return outputs # last-layer hidden state, (all hidden states), (all attentions)
class AlbertPreTrainedModel(PreTrainedModel):
""" An abstract class to handle weights initialization and
a simple interface for downloading and loading pretrained models.
"""
config_class = AlbertConfig
pretrained_model_archive_map = ALBERT_PRETRAINED_MODEL_ARCHIVE_MAP
base_model_prefix = "albert"
def _init_weights(self, module):
""" Initialize the weights.
"""
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if isinstance(module, (nn.Linear)) and module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
ALBERT_START_DOCSTRING = r"""
This model is a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`_ sub-class.
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general
usage and behavior.
Args:
config (:class:`~transformers.AlbertConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the configuration.
Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.
"""
ALBERT_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`transformers.AlbertTokenizer`.
See :func:`transformers.PreTrainedTokenizer.encode` and
:func:`transformers.PreTrainedTokenizer.encode_plus` for details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Mask to avoid performing attention on padding token indices.
Mask values selected in ``[0, 1]``:
``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
`What are attention masks? <../glossary.html#attention-mask>`__
token_type_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Segment token indices to indicate first and second portions of the inputs.
Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1``
corresponds to a `sentence B` token
`What are token type IDs? <../glossary.html#token-type-ids>`_
position_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Indices of positions of each input sequence tokens in the position embeddings.
Selected in the range ``[0, config.max_position_embeddings - 1]``.
`What are position IDs? <../glossary.html#position-ids>`_
head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`, defaults to :obj:`None`):
Mask to nullify selected heads of the self-attention modules.
Mask values selected in ``[0, 1]``:
:obj:`1` indicates the head is **not masked**, :obj:`0` indicates the head is **masked**.
input_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
"""
@add_start_docstrings(
"The bare ALBERT Model transformer outputting raw hidden-states without any specific head on top.",
ALBERT_START_DOCSTRING,
)
class AlbertModel(AlbertPreTrainedModel):
config_class = AlbertConfig
pretrained_model_archive_map = ALBERT_PRETRAINED_MODEL_ARCHIVE_MAP
load_tf_weights = load_tf_weights_in_albert
base_model_prefix = "albert"
def __init__(self, config):
super().__init__(config)
self.config = config
self.embeddings = AlbertEmbeddings(config)
self.encoder = AlbertTransformer(config)
self.pooler = nn.Linear(config.hidden_size, config.hidden_size)
self.pooler_activation = nn.Tanh()
self.init_weights()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _resize_token_embeddings(self, new_num_tokens):
old_embeddings = self.embeddings.word_embeddings
new_embeddings = self._get_resized_embeddings(old_embeddings, new_num_tokens)
self.embeddings.word_embeddings = new_embeddings
return self.embeddings.word_embeddings
def _prune_heads(self, heads_to_prune):
""" Prunes heads of the model.
heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
ALBERT has a different architecture in that its layers are shared across groups, which then has inner groups.
If an ALBERT model has 12 hidden layers and 2 hidden groups, with two inner groups, there
is a total of 4 different layers.
These layers are flattened: the indices [0,1] correspond to the two inner groups of the first hidden layer,
while [2,3] correspond to the two inner groups of the second hidden layer.
Any layer with in index other than [0,1,2,3] will result in an error.
See base class PreTrainedModel for more information about head pruning
"""
for layer, heads in heads_to_prune.items():
group_idx = int(layer / self.config.inner_group_num)
inner_group_idx = int(layer - group_idx * self.config.inner_group_num)
self.encoder.albert_layer_groups[group_idx].albert_layers[inner_group_idx].attention.prune_heads(heads)
@add_start_docstrings_to_callable(ALBERT_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
):
r"""
Return:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.AlbertConfig`) and inputs:
last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
pooler_output (:obj:`torch.FloatTensor`: of shape :obj:`(batch_size, hidden_size)`):
Last layer hidden-state of the first token of the sequence (classification token)
further processed by a Linear layer and a Tanh activation function. The Linear
layer weights are trained from the next sentence prediction (classification)
objective during pre-training.
This output is usually *not* a good summary
of the semantic content of the input, you're often better with averaging or pooling
the sequence of hidden-states for the whole input sequence.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Example::
from transformers import AlbertModel, AlbertTokenizer
import torch
tokenizer = AlbertTokenizer.from_pretrained('albert-base-v2')
model = AlbertModel.from_pretrained('albert-base-v2')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
outputs = model(input_ids)
last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple
"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
attention_mask = torch.ones(input_shape, device=device)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
if head_mask is not None:
if head_mask.dim() == 1:
head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
head_mask = head_mask.expand(self.config.num_hidden_layers, -1, -1, -1, -1)
elif head_mask.dim() == 2:
head_mask = (
head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1)
) # We can specify head_mask for each layer
head_mask = head_mask.to(
dtype=next(self.parameters()).dtype
) # switch to fload if need + fp16 compatibility
else:
head_mask = [None] * self.config.num_hidden_layers
embedding_output = self.embeddings(
input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds
)
encoder_outputs = self.encoder(embedding_output, extended_attention_mask, head_mask=head_mask)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler_activation(self.pooler(sequence_output[:, 0]))
outputs = (sequence_output, pooled_output) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs
class AlbertMLMHead(nn.Module):
def __init__(self, config):
super().__init__()
self.LayerNorm = nn.LayerNorm(config.embedding_size)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
self.dense = nn.Linear(config.hidden_size, config.embedding_size)
self.decoder = nn.Linear(config.embedding_size, config.vocab_size)
self.activation = ACT2FN[config.hidden_act]
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.activation(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
hidden_states = self.decoder(hidden_states)
prediction_scores = hidden_states
return prediction_scores
@add_start_docstrings(
"Albert Model with a `language modeling` head on top.", ALBERT_START_DOCSTRING,
)
class AlbertForMaskedLM(AlbertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.albert = AlbertModel(config)
self.predictions = AlbertMLMHead(config)
self.init_weights()
self.tie_weights()
def tie_weights(self):
self._tie_or_clone_weights(self.predictions.decoder, self.albert.embeddings.word_embeddings)
def get_output_embeddings(self):
return self.predictions.decoder
@add_start_docstrings_to_callable(ALBERT_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
masked_lm_labels=None,
):
r"""
masked_lm_labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Labels for computing the masked language modeling loss.
Indices should be in ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring)
Tokens with indices set to ``-100`` are ignored (masked), the loss is only computed for the tokens with
labels in ``[0, ..., config.vocab_size]``
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.AlbertConfig`) and inputs:
loss (`optional`, returned when ``masked_lm_labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Masked language modeling loss.
prediction_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`)
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Example::
from transformers import AlbertTokenizer, AlbertForMaskedLM
import torch
tokenizer = AlbertTokenizer.from_pretrained('albert-base-v2')
model = AlbertForMaskedLM.from_pretrained('albert-base-v2')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
outputs = model(input_ids, masked_lm_labels=input_ids)
loss, prediction_scores = outputs[:2]
"""
outputs = self.albert(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
sequence_outputs = outputs[0]
prediction_scores = self.predictions(sequence_outputs)
outputs = (prediction_scores,) + outputs[2:] # Add hidden states and attention if they are here
if masked_lm_labels is not None:
loss_fct = CrossEntropyLoss()
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))
outputs = (masked_lm_loss,) + outputs
return outputs
@add_start_docstrings(
"""Albert Model transformer with a sequence classification/regression head on top (a linear layer on top of
the pooled output) e.g. for GLUE tasks. """,
ALBERT_START_DOCSTRING,
)
class AlbertForSequenceClassification(AlbertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.albert = AlbertModel(config)
self.dropout = nn.Dropout(config.classifier_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, self.config.num_labels)
self.init_weights()
@add_start_docstrings_to_callable(ALBERT_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for computing the sequence classification/regression loss.
Indices should be in ``[0, ..., config.num_labels - 1]``.
If ``config.num_labels == 1`` a regression loss is computed (Mean-Square loss),
If ``config.num_labels > 1`` a classification loss is computed (Cross-Entropy).
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.AlbertConfig`) and inputs:
loss: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Classification (or regression if config.num_labels==1) loss.
logits ``torch.FloatTensor`` of shape ``(batch_size, config.num_labels)``
Classification (or regression if config.num_labels==1) scores (before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import AlbertTokenizer, AlbertForSequenceClassification
import torch
tokenizer = AlbertTokenizer.from_pretrained('albert-base-v2')
model = AlbertForSequenceClassification.from_pretrained('albert-base-v2')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
labels = torch.tensor([1]).unsqueeze(0) # Batch size 1
outputs = model(input_ids, labels=labels)
loss, logits = outputs[:2]
"""
outputs = self.albert(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here
if labels is not None:
if self.num_labels == 1:
# We are doing regression
loss_fct = MSELoss()
loss = loss_fct(logits.view(-1), labels.view(-1))
else:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
outputs = (loss,) + outputs
return outputs # (loss), logits, (hidden_states), (attentions)
@add_start_docstrings(
"""Albert Model with a token classification head on top (a linear layer on top of
the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """,
ALBERT_START_DOCSTRING,
)
class AlbertForTokenClassification(AlbertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.albert = AlbertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, self.config.num_labels)
self.init_weights()
@add_start_docstrings_to_callable(ALBERT_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Labels for computing the token classification loss.
Indices should be in ``[0, ..., config.num_labels - 1]``.
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.AlbertConfig`) and inputs:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when ``labels`` is provided) :
Classification loss.
scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.num_labels)`)
Classification scores (before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import AlbertTokenizer, AlbertForTokenClassification
import torch
tokenizer = AlbertTokenizer.from_pretrained('albert-base-v2')
model = AlbertForTokenClassification.from_pretrained('albert-base-v2')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
labels = torch.tensor([1] * input_ids.size(1)).unsqueeze(0) # Batch size 1
outputs = model(input_ids, labels=labels)
loss, scores = outputs[:2]
"""
outputs = self.albert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here
if labels is not None:
loss_fct = CrossEntropyLoss()
# Only keep active parts of the loss
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)[active_loss]
active_labels = labels.view(-1)[active_loss]
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
outputs = (loss,) + outputs
return outputs # (loss), logits, (hidden_states), (attentions)
@add_start_docstrings(
"""Albert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of
the hidden-states output to compute `span start logits` and `span end logits`). """,
ALBERT_START_DOCSTRING,
)
class AlbertForQuestionAnswering(AlbertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.albert = AlbertModel(config)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_callable(ALBERT_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
start_positions=None,
end_positions=None,
):
r"""
start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`).
Position outside of the sequence are not taken into account for computing the loss.
end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`).
Position outside of the sequence are not taken into account for computing the loss.
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.AlbertConfig`) and inputs:
loss: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.
start_scores ``torch.FloatTensor`` of shape ``(batch_size, sequence_length,)``
Span-start scores (before SoftMax).
end_scores: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length,)``
Span-end scores (before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
# The checkpoint albert-base-v2 is not fine-tuned for question answering. Please see the
# examples/run_squad.py example to see how to fine-tune a model to a question answering task.
from transformers import AlbertTokenizer, AlbertForQuestionAnswering
import torch
tokenizer = AlbertTokenizer.from_pretrained('albert-base-v2')
model = AlbertForQuestionAnswering.from_pretrained('albert-base-v2')
question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet"
input_dict = tokenizer.encode_plus(question, text, return_tensors='pt')
start_scores, end_scores = model(**input_dict)
"""
outputs = self.albert(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
outputs = (start_logits, end_logits,) + outputs[2:]
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions.clamp_(0, ignored_index)
end_positions.clamp_(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
outputs = (total_loss,) + outputs
return outputs # (loss), start_logits, end_logits, (hidden_states), (attentions)
| 45,507 | 44.690763 | 148 | py |
KoSentenceBERT-SKT | KoSentenceBERT-SKT-main/transformers/modeling_xlnet.py | # coding=utf-8
# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch XLNet model.
"""
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from torch.nn import functional as F
from .activations import gelu_new, swish
from .configuration_xlnet import XLNetConfig
from .file_utils import add_start_docstrings, add_start_docstrings_to_callable
from .modeling_utils import PoolerAnswerClass, PoolerEndLogits, PoolerStartLogits, PreTrainedModel, SequenceSummary
logger = logging.getLogger(__name__)
XLNET_PRETRAINED_MODEL_ARCHIVE_MAP = {
"xlnet-base-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/xlnet-base-cased-pytorch_model.bin",
"xlnet-large-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/xlnet-large-cased-pytorch_model.bin",
}
def build_tf_xlnet_to_pytorch_map(model, config, tf_weights=None):
""" A map of modules from TF to PyTorch.
I use a map to keep the PyTorch model as
identical to the original PyTorch model as possible.
"""
tf_to_pt_map = {}
if hasattr(model, "transformer"):
if hasattr(model, "lm_loss"):
# We will load also the output bias
tf_to_pt_map["model/lm_loss/bias"] = model.lm_loss.bias
if hasattr(model, "sequence_summary") and "model/sequnece_summary/summary/kernel" in tf_weights:
# We will load also the sequence summary
tf_to_pt_map["model/sequnece_summary/summary/kernel"] = model.sequence_summary.summary.weight
tf_to_pt_map["model/sequnece_summary/summary/bias"] = model.sequence_summary.summary.bias
if (
hasattr(model, "logits_proj")
and config.finetuning_task is not None
and "model/regression_{}/logit/kernel".format(config.finetuning_task) in tf_weights
):
tf_to_pt_map["model/regression_{}/logit/kernel".format(config.finetuning_task)] = model.logits_proj.weight
tf_to_pt_map["model/regression_{}/logit/bias".format(config.finetuning_task)] = model.logits_proj.bias
# Now load the rest of the transformer
model = model.transformer
# Embeddings and output
tf_to_pt_map.update(
{
"model/transformer/word_embedding/lookup_table": model.word_embedding.weight,
"model/transformer/mask_emb/mask_emb": model.mask_emb,
}
)
# Transformer blocks
for i, b in enumerate(model.layer):
layer_str = "model/transformer/layer_%d/" % i
tf_to_pt_map.update(
{
layer_str + "rel_attn/LayerNorm/gamma": b.rel_attn.layer_norm.weight,
layer_str + "rel_attn/LayerNorm/beta": b.rel_attn.layer_norm.bias,
layer_str + "rel_attn/o/kernel": b.rel_attn.o,
layer_str + "rel_attn/q/kernel": b.rel_attn.q,
layer_str + "rel_attn/k/kernel": b.rel_attn.k,
layer_str + "rel_attn/r/kernel": b.rel_attn.r,
layer_str + "rel_attn/v/kernel": b.rel_attn.v,
layer_str + "ff/LayerNorm/gamma": b.ff.layer_norm.weight,
layer_str + "ff/LayerNorm/beta": b.ff.layer_norm.bias,
layer_str + "ff/layer_1/kernel": b.ff.layer_1.weight,
layer_str + "ff/layer_1/bias": b.ff.layer_1.bias,
layer_str + "ff/layer_2/kernel": b.ff.layer_2.weight,
layer_str + "ff/layer_2/bias": b.ff.layer_2.bias,
}
)
# Relative positioning biases
if config.untie_r:
r_r_list = []
r_w_list = []
r_s_list = []
seg_embed_list = []
for b in model.layer:
r_r_list.append(b.rel_attn.r_r_bias)
r_w_list.append(b.rel_attn.r_w_bias)
r_s_list.append(b.rel_attn.r_s_bias)
seg_embed_list.append(b.rel_attn.seg_embed)
else:
r_r_list = [model.r_r_bias]
r_w_list = [model.r_w_bias]
r_s_list = [model.r_s_bias]
seg_embed_list = [model.seg_embed]
tf_to_pt_map.update(
{
"model/transformer/r_r_bias": r_r_list,
"model/transformer/r_w_bias": r_w_list,
"model/transformer/r_s_bias": r_s_list,
"model/transformer/seg_embed": seg_embed_list,
}
)
return tf_to_pt_map
def load_tf_weights_in_xlnet(model, config, tf_path):
""" Load tf checkpoints in a pytorch model
"""
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
tf_weights = {}
for name, shape in init_vars:
logger.info("Loading TF weight {} with shape {}".format(name, shape))
array = tf.train.load_variable(tf_path, name)
tf_weights[name] = array
# Build TF to PyTorch weights loading map
tf_to_pt_map = build_tf_xlnet_to_pytorch_map(model, config, tf_weights)
for name, pointer in tf_to_pt_map.items():
logger.info("Importing {}".format(name))
if name not in tf_weights:
logger.info("{} not in tf pre-trained weights, skipping".format(name))
continue
array = tf_weights[name]
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if "kernel" in name and ("ff" in name or "summary" in name or "logit" in name):
logger.info("Transposing")
array = np.transpose(array)
if isinstance(pointer, list):
# Here we will split the TF weights
assert len(pointer) == array.shape[0]
for i, p_i in enumerate(pointer):
arr_i = array[i, ...]
try:
assert p_i.shape == arr_i.shape
except AssertionError as e:
e.args += (p_i.shape, arr_i.shape)
raise
logger.info("Initialize PyTorch weight {} for layer {}".format(name, i))
p_i.data = torch.from_numpy(arr_i)
else:
try:
assert pointer.shape == array.shape
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
logger.info("Initialize PyTorch weight {}".format(name))
pointer.data = torch.from_numpy(array)
tf_weights.pop(name, None)
tf_weights.pop(name + "/Adam", None)
tf_weights.pop(name + "/Adam_1", None)
logger.info("Weights not copied to PyTorch model: {}".format(", ".join(tf_weights.keys())))
return model
ACT2FN = {"gelu": gelu_new, "relu": torch.nn.functional.relu, "swish": swish}
XLNetLayerNorm = nn.LayerNorm
class XLNetRelativeAttention(nn.Module):
def __init__(self, config):
super().__init__()
self.output_attentions = config.output_attentions
if config.d_model % config.n_head != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.d_model, config.n_head)
)
self.n_head = config.n_head
self.d_head = config.d_head
self.d_model = config.d_model
self.scale = 1 / (config.d_head ** 0.5)
self.q = nn.Parameter(torch.FloatTensor(config.d_model, self.n_head, self.d_head))
self.k = nn.Parameter(torch.FloatTensor(config.d_model, self.n_head, self.d_head))
self.v = nn.Parameter(torch.FloatTensor(config.d_model, self.n_head, self.d_head))
self.o = nn.Parameter(torch.FloatTensor(config.d_model, self.n_head, self.d_head))
self.r = nn.Parameter(torch.FloatTensor(config.d_model, self.n_head, self.d_head))
self.r_r_bias = nn.Parameter(torch.FloatTensor(self.n_head, self.d_head))
self.r_s_bias = nn.Parameter(torch.FloatTensor(self.n_head, self.d_head))
self.r_w_bias = nn.Parameter(torch.FloatTensor(self.n_head, self.d_head))
self.seg_embed = nn.Parameter(torch.FloatTensor(2, self.n_head, self.d_head))
self.layer_norm = XLNetLayerNorm(config.d_model, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.dropout)
def prune_heads(self, heads):
raise NotImplementedError
@staticmethod
def rel_shift(x, klen=-1):
"""perform relative shift to form the relative attention score."""
x_size = x.shape
x = x.reshape(x_size[1], x_size[0], x_size[2], x_size[3])
x = x[1:, ...]
x = x.reshape(x_size[0], x_size[1] - 1, x_size[2], x_size[3])
# x = x[:, 0:klen, :, :]
x = torch.index_select(x, 1, torch.arange(klen, device=x.device, dtype=torch.long))
return x
@staticmethod
def rel_shift_bnij(x, klen=-1):
x_size = x.shape
x = x.reshape(x_size[0], x_size[1], x_size[3], x_size[2])
x = x[:, :, 1:, :]
x = x.reshape(x_size[0], x_size[1], x_size[2], x_size[3] - 1)
# Note: the tensor-slice form was faster in my testing than torch.index_select
# However, tracing doesn't like the nature of the slice, and if klen changes
# during the run then it'll fail, whereas index_select will be fine.
x = torch.index_select(x, 3, torch.arange(klen, device=x.device, dtype=torch.long))
# x = x[:, :, :, :klen]
return x
def rel_attn_core(self, q_head, k_head_h, v_head_h, k_head_r, seg_mat=None, attn_mask=None, head_mask=None):
"""Core relative positional attention operations."""
# content based attention score
ac = torch.einsum("ibnd,jbnd->bnij", q_head + self.r_w_bias, k_head_h)
# position based attention score
bd = torch.einsum("ibnd,jbnd->bnij", q_head + self.r_r_bias, k_head_r)
bd = self.rel_shift_bnij(bd, klen=ac.shape[3])
# segment based attention score
if seg_mat is None:
ef = 0
else:
ef = torch.einsum("ibnd,snd->ibns", q_head + self.r_s_bias, self.seg_embed)
ef = torch.einsum("ijbs,ibns->bnij", seg_mat, ef)
# merge attention scores and perform masking
attn_score = (ac + bd + ef) * self.scale
if attn_mask is not None:
# attn_score = attn_score * (1 - attn_mask) - 1e30 * attn_mask
if attn_mask.dtype == torch.float16:
attn_score = attn_score - 65500 * torch.einsum("ijbn->bnij", attn_mask)
else:
attn_score = attn_score - 1e30 * torch.einsum("ijbn->bnij", attn_mask)
# attention probability
attn_prob = F.softmax(attn_score, dim=3)
attn_prob = self.dropout(attn_prob)
# Mask heads if we want to
if head_mask is not None:
attn_prob = attn_prob * torch.einsum("ijbn->bnij", head_mask)
# attention output
attn_vec = torch.einsum("bnij,jbnd->ibnd", attn_prob, v_head_h)
if self.output_attentions:
return attn_vec, torch.einsum("bnij->ijbn", attn_prob)
return attn_vec
def post_attention(self, h, attn_vec, residual=True):
"""Post-attention processing."""
# post-attention projection (back to `d_model`)
attn_out = torch.einsum("ibnd,hnd->ibh", attn_vec, self.o)
attn_out = self.dropout(attn_out)
if residual:
attn_out = attn_out + h
output = self.layer_norm(attn_out)
return output
def forward(self, h, g, attn_mask_h, attn_mask_g, r, seg_mat, mems=None, target_mapping=None, head_mask=None):
if g is not None:
# Two-stream attention with relative positional encoding.
# content based attention score
if mems is not None and mems.dim() > 1:
cat = torch.cat([mems, h], dim=0)
else:
cat = h
# content-based key head
k_head_h = torch.einsum("ibh,hnd->ibnd", cat, self.k)
# content-based value head
v_head_h = torch.einsum("ibh,hnd->ibnd", cat, self.v)
# position-based key head
k_head_r = torch.einsum("ibh,hnd->ibnd", r, self.r)
# h-stream
# content-stream query head
q_head_h = torch.einsum("ibh,hnd->ibnd", h, self.q)
# core attention ops
attn_vec_h = self.rel_attn_core(
q_head_h, k_head_h, v_head_h, k_head_r, seg_mat=seg_mat, attn_mask=attn_mask_h, head_mask=head_mask
)
if self.output_attentions:
attn_vec_h, attn_prob_h = attn_vec_h
# post processing
output_h = self.post_attention(h, attn_vec_h)
# g-stream
# query-stream query head
q_head_g = torch.einsum("ibh,hnd->ibnd", g, self.q)
# core attention ops
if target_mapping is not None:
q_head_g = torch.einsum("mbnd,mlb->lbnd", q_head_g, target_mapping)
attn_vec_g = self.rel_attn_core(
q_head_g, k_head_h, v_head_h, k_head_r, seg_mat=seg_mat, attn_mask=attn_mask_g, head_mask=head_mask
)
if self.output_attentions:
attn_vec_g, attn_prob_g = attn_vec_g
attn_vec_g = torch.einsum("lbnd,mlb->mbnd", attn_vec_g, target_mapping)
else:
attn_vec_g = self.rel_attn_core(
q_head_g, k_head_h, v_head_h, k_head_r, seg_mat=seg_mat, attn_mask=attn_mask_g, head_mask=head_mask
)
if self.output_attentions:
attn_vec_g, attn_prob_g = attn_vec_g
# post processing
output_g = self.post_attention(g, attn_vec_g)
if self.output_attentions:
attn_prob = attn_prob_h, attn_prob_g
else:
# Multi-head attention with relative positional encoding
if mems is not None and mems.dim() > 1:
cat = torch.cat([mems, h], dim=0)
else:
cat = h
# content heads
q_head_h = torch.einsum("ibh,hnd->ibnd", h, self.q)
k_head_h = torch.einsum("ibh,hnd->ibnd", cat, self.k)
v_head_h = torch.einsum("ibh,hnd->ibnd", cat, self.v)
# positional heads
k_head_r = torch.einsum("ibh,hnd->ibnd", r, self.r)
# core attention ops
attn_vec = self.rel_attn_core(
q_head_h, k_head_h, v_head_h, k_head_r, seg_mat=seg_mat, attn_mask=attn_mask_h, head_mask=head_mask
)
if self.output_attentions:
attn_vec, attn_prob = attn_vec
# post processing
output_h = self.post_attention(h, attn_vec)
output_g = None
outputs = (output_h, output_g)
if self.output_attentions:
outputs = outputs + (attn_prob,)
return outputs
class XLNetFeedForward(nn.Module):
def __init__(self, config):
super().__init__()
self.layer_norm = XLNetLayerNorm(config.d_model, eps=config.layer_norm_eps)
self.layer_1 = nn.Linear(config.d_model, config.d_inner)
self.layer_2 = nn.Linear(config.d_inner, config.d_model)
self.dropout = nn.Dropout(config.dropout)
if isinstance(config.ff_activation, str):
self.activation_function = ACT2FN[config.ff_activation]
else:
self.activation_function = config.ff_activation
def forward(self, inp):
output = inp
output = self.layer_1(output)
output = self.activation_function(output)
output = self.dropout(output)
output = self.layer_2(output)
output = self.dropout(output)
output = self.layer_norm(output + inp)
return output
class XLNetLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.rel_attn = XLNetRelativeAttention(config)
self.ff = XLNetFeedForward(config)
self.dropout = nn.Dropout(config.dropout)
def forward(
self, output_h, output_g, attn_mask_h, attn_mask_g, r, seg_mat, mems=None, target_mapping=None, head_mask=None
):
outputs = self.rel_attn(
output_h,
output_g,
attn_mask_h,
attn_mask_g,
r,
seg_mat,
mems=mems,
target_mapping=target_mapping,
head_mask=head_mask,
)
output_h, output_g = outputs[:2]
if output_g is not None:
output_g = self.ff(output_g)
output_h = self.ff(output_h)
outputs = (output_h, output_g) + outputs[2:] # Add again attentions if there are there
return outputs
class XLNetPreTrainedModel(PreTrainedModel):
""" An abstract class to handle weights initialization and
a simple interface for downloading and loading pretrained models.
"""
config_class = XLNetConfig
pretrained_model_archive_map = XLNET_PRETRAINED_MODEL_ARCHIVE_MAP
load_tf_weights = load_tf_weights_in_xlnet
base_model_prefix = "transformer"
def _init_weights(self, module):
""" Initialize the weights.
"""
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, XLNetLayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
elif isinstance(module, XLNetRelativeAttention):
for param in [
module.q,
module.k,
module.v,
module.o,
module.r,
module.r_r_bias,
module.r_s_bias,
module.r_w_bias,
module.seg_embed,
]:
param.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, XLNetModel):
module.mask_emb.data.normal_(mean=0.0, std=self.config.initializer_range)
XLNET_START_DOCSTRING = r"""
This model is a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`_ sub-class.
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general
usage and behavior.
Parameters:
config (:class:`~transformers.XLNetConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the configuration.
Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.
"""
XLNET_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`transformers.BertTokenizer`.
See :func:`transformers.PreTrainedTokenizer.encode` and
:func:`transformers.PreTrainedTokenizer.encode_plus` for details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Mask to avoid performing attention on padding token indices.
Mask values selected in ``[0, 1]``:
``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
`What are attention masks? <../glossary.html#attention-mask>`__
mems (:obj:`List[torch.FloatTensor]` of length :obj:`config.n_layers`):
Contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model
(see `mems` output below). Can be used to speed up sequential decoding. The token ids which have their mems
given to this model should not be passed as input ids as they have already been computed.
perm_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, sequence_length)`, `optional`, defaults to :obj:`None`):
Mask to indicate the attention pattern for each input token with values selected in ``[0, 1]``:
If ``perm_mask[k, i, j] = 0``, i attend to j in batch k;
if ``perm_mask[k, i, j] = 1``, i does not attend to j in batch k.
If None, each token attends to all the others (full bidirectional attention).
Only used during pretraining (to define factorization order) or for sequential decoding (generation).
target_mapping (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_predict, sequence_length)`, `optional`, defaults to :obj:`None`):
Mask to indicate the output tokens to use.
If ``target_mapping[k, i, j] = 1``, the i-th predict in batch k is on the j-th token.
Only used during pretraining for partial prediction or for sequential decoding (generation).
token_type_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Segment token indices to indicate first and second portions of the inputs.
Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1``
corresponds to a `sentence B` token. The classifier token should be represented by a ``2``.
`What are token type IDs? <../glossary.html#token-type-ids>`_
input_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Mask to avoid performing attention on padding token indices.
Negative of `attention_mask`, i.e. with 0 for real tokens and 1 for padding.
Kept for compatibility with the original code base.
You can only uses one of `input_mask` and `attention_mask`
Mask values selected in ``[0, 1]``:
``1`` for tokens that are MASKED, ``0`` for tokens that are NOT MASKED.
head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`, defaults to :obj:`None`):
Mask to nullify selected heads of the self-attention modules.
Mask values selected in ``[0, 1]``:
:obj:`1` indicates the head is **not masked**, :obj:`0` indicates the head is **masked**.
input_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
"""
@add_start_docstrings(
"The bare XLNet Model transformer outputting raw hidden-states without any specific head on top.",
XLNET_START_DOCSTRING,
)
class XLNetModel(XLNetPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.output_attentions = config.output_attentions
self.output_hidden_states = config.output_hidden_states
self.output_past = config.output_past
self.mem_len = config.mem_len
self.reuse_len = config.reuse_len
self.d_model = config.d_model
self.same_length = config.same_length
self.attn_type = config.attn_type
self.bi_data = config.bi_data
self.clamp_len = config.clamp_len
self.n_layer = config.n_layer
self.word_embedding = nn.Embedding(config.vocab_size, config.d_model)
self.mask_emb = nn.Parameter(torch.FloatTensor(1, 1, config.d_model))
self.layer = nn.ModuleList([XLNetLayer(config) for _ in range(config.n_layer)])
self.dropout = nn.Dropout(config.dropout)
self.init_weights()
def get_input_embeddings(self):
return self.word_embedding
def set_input_embeddings(self, new_embeddings):
self.word_embedding = new_embeddings
def _prune_heads(self, heads_to_prune):
raise NotImplementedError
def create_mask(self, qlen, mlen):
"""
Creates causal attention mask. Float mask where 1.0 indicates masked, 0.0 indicates not-masked.
Args:
qlen: Sequence length
mlen: Mask length
::
same_length=False: same_length=True:
<mlen > < qlen > <mlen > < qlen >
^ [0 0 0 0 0 1 1 1 1] [0 0 0 0 0 1 1 1 1]
[0 0 0 0 0 0 1 1 1] [1 0 0 0 0 0 1 1 1]
qlen [0 0 0 0 0 0 0 1 1] [1 1 0 0 0 0 0 1 1]
[0 0 0 0 0 0 0 0 1] [1 1 1 0 0 0 0 0 1]
v [0 0 0 0 0 0 0 0 0] [1 1 1 1 0 0 0 0 0]
"""
attn_mask = torch.ones([qlen, qlen])
mask_up = torch.triu(attn_mask, diagonal=1)
attn_mask_pad = torch.zeros([qlen, mlen])
ret = torch.cat([attn_mask_pad, mask_up], dim=1)
if self.same_length:
mask_lo = torch.tril(attn_mask, diagonal=-1)
ret = torch.cat([ret[:, :qlen] + mask_lo, ret[:, qlen:]], dim=1)
ret = ret.to(next(self.parameters()))
return ret
def cache_mem(self, curr_out, prev_mem):
# cache hidden states into memory.
if self.reuse_len is not None and self.reuse_len > 0:
curr_out = curr_out[: self.reuse_len]
if prev_mem is None:
new_mem = curr_out[-self.mem_len :]
else:
new_mem = torch.cat([prev_mem, curr_out], dim=0)[-self.mem_len :]
return new_mem.detach()
@staticmethod
def positional_embedding(pos_seq, inv_freq, bsz=None):
sinusoid_inp = torch.einsum("i,d->id", pos_seq, inv_freq)
pos_emb = torch.cat([torch.sin(sinusoid_inp), torch.cos(sinusoid_inp)], dim=-1)
pos_emb = pos_emb[:, None, :]
if bsz is not None:
pos_emb = pos_emb.expand(-1, bsz, -1)
return pos_emb
def relative_positional_encoding(self, qlen, klen, bsz=None):
# create relative positional encoding.
freq_seq = torch.arange(0, self.d_model, 2.0, dtype=torch.float)
inv_freq = 1 / torch.pow(10000, (freq_seq / self.d_model))
if self.attn_type == "bi":
# beg, end = klen - 1, -qlen
beg, end = klen, -qlen
elif self.attn_type == "uni":
# beg, end = klen - 1, -1
beg, end = klen, -1
else:
raise ValueError("Unknown `attn_type` {}.".format(self.attn_type))
if self.bi_data:
fwd_pos_seq = torch.arange(beg, end, -1.0, dtype=torch.float)
bwd_pos_seq = torch.arange(-beg, -end, 1.0, dtype=torch.float)
if self.clamp_len > 0:
fwd_pos_seq = fwd_pos_seq.clamp(-self.clamp_len, self.clamp_len)
bwd_pos_seq = bwd_pos_seq.clamp(-self.clamp_len, self.clamp_len)
if bsz is not None:
fwd_pos_emb = self.positional_embedding(fwd_pos_seq, inv_freq, bsz // 2)
bwd_pos_emb = self.positional_embedding(bwd_pos_seq, inv_freq, bsz // 2)
else:
fwd_pos_emb = self.positional_embedding(fwd_pos_seq, inv_freq)
bwd_pos_emb = self.positional_embedding(bwd_pos_seq, inv_freq)
pos_emb = torch.cat([fwd_pos_emb, bwd_pos_emb], dim=1)
else:
fwd_pos_seq = torch.arange(beg, end, -1.0)
if self.clamp_len > 0:
fwd_pos_seq = fwd_pos_seq.clamp(-self.clamp_len, self.clamp_len)
pos_emb = self.positional_embedding(fwd_pos_seq, inv_freq, bsz)
pos_emb = pos_emb.to(next(self.parameters()))
return pos_emb
@add_start_docstrings_to_callable(XLNET_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
attention_mask=None,
mems=None,
perm_mask=None,
target_mapping=None,
token_type_ids=None,
input_mask=None,
head_mask=None,
inputs_embeds=None,
):
r"""
Return:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.XLNetConfig`) and inputs:
last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_predict, hidden_size)`):
Sequence of hidden-states at the last layer of the model.
`num_predict` corresponds to `target_mapping.shape[1]`. If `target_mapping` is `None`, then `num_predict` corresponds to `sequence_length`.
mems (:obj:`List[torch.FloatTensor]` of length :obj:`config.n_layers`):
Contains pre-computed hidden-states (key and values in the attention blocks).
Can be used (see `mems` input) to speed up sequential decoding. The token ids which have their past given to this model
should not be passed as input ids as they have already been computed.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import XLNetTokenizer, XLNetModel
import torch
tokenizer = XLNetTokenizer.from_pretrained('xlnet-large-cased')
model = XLNetModel.from_pretrained('xlnet-large-cased')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=False)).unsqueeze(0) # Batch size 1
outputs = model(input_ids)
last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple
"""
# the original code for XLNet uses shapes [len, bsz] with the batch dimension at the end
# but we want a unified interface in the library with the batch size on the first dimension
# so we move here the first dimension (batch) to the end
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_ids = input_ids.transpose(0, 1).contiguous()
qlen, bsz = input_ids.shape[0], input_ids.shape[1]
elif inputs_embeds is not None:
inputs_embeds = inputs_embeds.transpose(0, 1).contiguous()
qlen, bsz = inputs_embeds.shape[0], inputs_embeds.shape[1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
token_type_ids = token_type_ids.transpose(0, 1).contiguous() if token_type_ids is not None else None
input_mask = input_mask.transpose(0, 1).contiguous() if input_mask is not None else None
attention_mask = attention_mask.transpose(0, 1).contiguous() if attention_mask is not None else None
perm_mask = perm_mask.permute(1, 2, 0).contiguous() if perm_mask is not None else None
target_mapping = target_mapping.permute(1, 2, 0).contiguous() if target_mapping is not None else None
mlen = mems[0].shape[0] if mems is not None and mems[0] is not None else 0
klen = mlen + qlen
dtype_float = next(self.parameters()).dtype
device = next(self.parameters()).device
# Attention mask
# causal attention mask
if self.attn_type == "uni":
attn_mask = self.create_mask(qlen, mlen)
attn_mask = attn_mask[:, :, None, None]
elif self.attn_type == "bi":
attn_mask = None
else:
raise ValueError("Unsupported attention type: {}".format(self.attn_type))
# data mask: input mask & perm mask
assert input_mask is None or attention_mask is None, "You can only use one of input_mask (uses 1 for padding) "
"or attention_mask (uses 0 for padding, added for compatbility with BERT). Please choose one."
if input_mask is None and attention_mask is not None:
input_mask = 1.0 - attention_mask
if input_mask is not None and perm_mask is not None:
data_mask = input_mask[None] + perm_mask
elif input_mask is not None and perm_mask is None:
data_mask = input_mask[None]
elif input_mask is None and perm_mask is not None:
data_mask = perm_mask
else:
data_mask = None
if data_mask is not None:
# all mems can be attended to
if mlen > 0:
mems_mask = torch.zeros([data_mask.shape[0], mlen, bsz]).to(data_mask)
data_mask = torch.cat([mems_mask, data_mask], dim=1)
if attn_mask is None:
attn_mask = data_mask[:, :, :, None]
else:
attn_mask += data_mask[:, :, :, None]
if attn_mask is not None:
attn_mask = (attn_mask > 0).to(dtype_float)
if attn_mask is not None:
non_tgt_mask = -torch.eye(qlen).to(attn_mask)
if mlen > 0:
non_tgt_mask = torch.cat([torch.zeros([qlen, mlen]).to(attn_mask), non_tgt_mask], dim=-1)
non_tgt_mask = ((attn_mask + non_tgt_mask[:, :, None, None]) > 0).to(attn_mask)
else:
non_tgt_mask = None
# Word embeddings and prepare h & g hidden states
if inputs_embeds is not None:
word_emb_k = inputs_embeds
else:
word_emb_k = self.word_embedding(input_ids)
output_h = self.dropout(word_emb_k)
if target_mapping is not None:
word_emb_q = self.mask_emb.expand(target_mapping.shape[0], bsz, -1)
# else: # We removed the inp_q input which was same as target mapping
# inp_q_ext = inp_q[:, :, None]
# word_emb_q = inp_q_ext * self.mask_emb + (1 - inp_q_ext) * word_emb_k
output_g = self.dropout(word_emb_q)
else:
output_g = None
# Segment embedding
if token_type_ids is not None:
# Convert `token_type_ids` to one-hot `seg_mat`
if mlen > 0:
mem_pad = torch.zeros([mlen, bsz], dtype=torch.long, device=device)
cat_ids = torch.cat([mem_pad, token_type_ids], dim=0)
else:
cat_ids = token_type_ids
# `1` indicates not in the same segment [qlen x klen x bsz]
seg_mat = (token_type_ids[:, None] != cat_ids[None, :]).long()
seg_mat = F.one_hot(seg_mat, num_classes=2).to(dtype_float)
else:
seg_mat = None
# Positional encoding
pos_emb = self.relative_positional_encoding(qlen, klen, bsz=bsz)
pos_emb = self.dropout(pos_emb)
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] (a head_mask for each layer)
# and head_mask is converted to shape [num_hidden_layers x qlen x klen x bsz x n_head]
if head_mask is not None:
if head_mask.dim() == 1:
head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(0).unsqueeze(0)
head_mask = head_mask.expand(self.n_layer, -1, -1, -1, -1)
elif head_mask.dim() == 2:
head_mask = head_mask.unsqueeze(1).unsqueeze(1).unsqueeze(1)
head_mask = head_mask.to(
dtype=next(self.parameters()).dtype
) # switch to fload if need + fp16 compatibility
else:
head_mask = [None] * self.n_layer
new_mems = ()
if mems is None:
mems = [None] * len(self.layer)
attentions = []
hidden_states = []
for i, layer_module in enumerate(self.layer):
if self.mem_len is not None and self.mem_len > 0 and self.output_past:
# cache new mems
new_mems = new_mems + (self.cache_mem(output_h, mems[i]),)
if self.output_hidden_states:
hidden_states.append((output_h, output_g) if output_g is not None else output_h)
outputs = layer_module(
output_h,
output_g,
attn_mask_h=non_tgt_mask,
attn_mask_g=attn_mask,
r=pos_emb,
seg_mat=seg_mat,
mems=mems[i],
target_mapping=target_mapping,
head_mask=head_mask[i],
)
output_h, output_g = outputs[:2]
if self.output_attentions:
attentions.append(outputs[2])
# Add last hidden state
if self.output_hidden_states:
hidden_states.append((output_h, output_g) if output_g is not None else output_h)
output = self.dropout(output_g if output_g is not None else output_h)
# Prepare outputs, we transpose back here to shape [bsz, len, hidden_dim] (cf. beginning of forward() method)
outputs = (output.permute(1, 0, 2).contiguous(),)
if self.mem_len is not None and self.mem_len > 0 and self.output_past:
outputs = outputs + (new_mems,)
if self.output_hidden_states:
if output_g is not None:
hidden_states = tuple(h.permute(1, 0, 2).contiguous() for hs in hidden_states for h in hs)
else:
hidden_states = tuple(hs.permute(1, 0, 2).contiguous() for hs in hidden_states)
outputs = outputs + (hidden_states,)
if self.output_attentions:
if target_mapping is not None:
# when target_mapping is provided, there are 2-tuple of attentions
attentions = tuple(
tuple(att_stream.permute(2, 3, 0, 1).contiguous() for att_stream in t) for t in attentions
)
else:
attentions = tuple(t.permute(2, 3, 0, 1).contiguous() for t in attentions)
outputs = outputs + (attentions,)
return outputs # outputs, (new_mems), (hidden_states), (attentions)
@add_start_docstrings(
"""XLNet Model with a language modeling head on top
(linear layer with weights tied to the input embeddings). """,
XLNET_START_DOCSTRING,
)
class XLNetLMHeadModel(XLNetPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.attn_type = config.attn_type
self.same_length = config.same_length
self.transformer = XLNetModel(config)
self.lm_loss = nn.Linear(config.d_model, config.vocab_size, bias=True)
self.init_weights()
def get_output_embeddings(self):
return self.lm_loss
def prepare_inputs_for_generation(self, input_ids, past, **model_kwargs):
# Add dummy token at the end (no attention on this one)
effective_batch_size = input_ids.shape[0]
dummy_token = torch.zeros((effective_batch_size, 1), dtype=torch.long, device=input_ids.device)
input_ids = torch.cat([input_ids, dummy_token], dim=1)
# Build permutation mask so that previous tokens don't see last token
sequence_length = input_ids.shape[1]
perm_mask = torch.zeros(
(effective_batch_size, sequence_length, sequence_length), dtype=torch.float, device=input_ids.device
)
perm_mask[:, :, -1] = 1.0
# We'll only predict the last token
target_mapping = torch.zeros(
(effective_batch_size, 1, sequence_length), dtype=torch.float, device=input_ids.device
)
target_mapping[0, 0, -1] = 1.0
inputs = {"input_ids": input_ids, "perm_mask": perm_mask, "target_mapping": target_mapping}
# if past is defined in model kwargs then use it for faster decoding
if past:
inputs["mems"] = past
return inputs
@add_start_docstrings_to_callable(XLNET_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
attention_mask=None,
mems=None,
perm_mask=None,
target_mapping=None,
token_type_ids=None,
input_mask=None,
head_mask=None,
inputs_embeds=None,
labels=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, num_predict)`, `optional`, defaults to :obj:`None`):
Labels for masked language modeling.
`num_predict` corresponds to `target_mapping.shape[1]`. If `target_mapping` is `None`, then `num_predict` corresponds to `sequence_length`.
The labels should correspond to the masked input words that should be predicted and depends on `target_mapping`. Note in order to perform standard auto-regressive language modeling a `<mask>` token has to be added to the `input_ids` (see `prepare_inputs_for_generation` fn and examples below)
Indices are selected in ``[-100, 0, ..., config.vocab_size]``
All labels set to ``-100`` are ignored, the loss is only
computed for labels in ``[0, ..., config.vocab_size]``
Return:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.XLNetConfig`) and inputs:
loss (:obj:`torch.FloatTensor` of shape `(1,)`, `optional`, returned when ``labels`` is provided)
Language modeling loss.
prediction_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_predict, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
`num_predict` corresponds to `target_mapping.shape[1]`. If `target_mapping` is `None`, then `num_predict` corresponds to `sequence_length`.
mems (:obj:`List[torch.FloatTensor]` of length :obj:`config.n_layers`):
Contains pre-computed hidden-states (key and values in the attention blocks).
Can be used (see `past` input) to speed up sequential decoding. The token ids which have their past given to this model
should not be passed as input ids as they have already been computed.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import XLNetTokenizer, XLNetLMHeadModel
import torch
tokenizer = XLNetTokenizer.from_pretrained('xlnet-large-cased')
model = XLNetLMHeadModel.from_pretrained('xlnet-large-cased')
# We show how to setup inputs to predict a next token using a bi-directional context.
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is very <mask>", add_special_tokens=False)).unsqueeze(0) # We will predict the masked token
perm_mask = torch.zeros((1, input_ids.shape[1], input_ids.shape[1]), dtype=torch.float)
perm_mask[:, :, -1] = 1.0 # Previous tokens don't see last token
target_mapping = torch.zeros((1, 1, input_ids.shape[1]), dtype=torch.float) # Shape [1, 1, seq_length] => let's predict one token
target_mapping[0, 0, -1] = 1.0 # Our first (and only) prediction will be the last token of the sequence (the masked token)
outputs = model(input_ids, perm_mask=perm_mask, target_mapping=target_mapping)
next_token_logits = outputs[0] # Output has shape [target_mapping.size(0), target_mapping.size(1), config.vocab_size]
# The same way can the XLNetLMHeadModel be used to be trained by standard auto-regressive language modeling.
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is very <mask>", add_special_tokens=False)).unsqueeze(0) # We will predict the masked token
labels = torch.tensor(tokenizer.encode("cute", add_special_tokens=False)).unsqueeze(0)
assert labels.shape[0] == 1, 'only one word will be predicted'
perm_mask = torch.zeros((1, input_ids.shape[1], input_ids.shape[1]), dtype=torch.float)
perm_mask[:, :, -1] = 1.0 # Previous tokens don't see last token as is done in standard auto-regressive lm training
target_mapping = torch.zeros((1, 1, input_ids.shape[1]), dtype=torch.float) # Shape [1, 1, seq_length] => let's predict one token
target_mapping[0, 0, -1] = 1.0 # Our first (and only) prediction will be the last token of the sequence (the masked token)
outputs = model(input_ids, perm_mask=perm_mask, target_mapping=target_mapping, labels=labels)
loss, next_token_logits = outputs[:2] # Output has shape [target_mapping.size(0), target_mapping.size(1), config.vocab_size]
"""
transformer_outputs = self.transformer(
input_ids,
attention_mask=attention_mask,
mems=mems,
perm_mask=perm_mask,
target_mapping=target_mapping,
token_type_ids=token_type_ids,
input_mask=input_mask,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
logits = self.lm_loss(transformer_outputs[0])
outputs = (logits,) + transformer_outputs[1:] # Keep mems, hidden states, attentions if there are in it
if labels is not None:
# Flatten the tokens
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, logits.size(-1)), labels.view(-1))
outputs = (loss,) + outputs
return outputs # return (loss), logits, (mems), (hidden states), (attentions)
@add_start_docstrings(
"""XLNet Model with a sequence classification/regression head on top (a linear layer on top of
the pooled output) e.g. for GLUE tasks. """,
XLNET_START_DOCSTRING,
)
class XLNetForSequenceClassification(XLNetPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.transformer = XLNetModel(config)
self.sequence_summary = SequenceSummary(config)
self.logits_proj = nn.Linear(config.d_model, config.num_labels)
self.init_weights()
@add_start_docstrings_to_callable(XLNET_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
attention_mask=None,
mems=None,
perm_mask=None,
target_mapping=None,
token_type_ids=None,
input_mask=None,
head_mask=None,
inputs_embeds=None,
labels=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`)
Labels for computing the sequence classification/regression loss.
Indices should be in ``[0, ..., config.num_labels - 1]``.
If ``config.num_labels == 1`` a regression loss is computed (Mean-Square loss),
If ``config.num_labels > 1`` a classification loss is computed (Cross-Entropy).
Return:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.XLNetConfig`) and inputs:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`labels` is provided):
Classification (or regression if config.num_labels==1) loss.
logits (:obj:`torch.FloatTensor` of shape :obj:(batch_size, config.num_labels)`):
Classification (or regression if config.num_labels==1) scores (before SoftMax).
mems (:obj:`List[torch.FloatTensor]` of length :obj:`config.n_layers`):
Contains pre-computed hidden-states (key and values in the attention blocks).
Can be used (see `past` input) to speed up sequential decoding. The token ids which have their past given to this model
should not be passed as input ids as they have already been computed.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import XLNetTokenizer, XLNetForSequenceClassification
import torch
tokenizer = XLNetTokenizer.from_pretrained('xlnet-large-cased')
model = XLNetForSequenceClassification.from_pretrained('xlnet-large-cased')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
labels = torch.tensor([1]).unsqueeze(0) # Batch size 1
outputs = model(input_ids, labels=labels)
loss, logits = outputs[:2]
"""
transformer_outputs = self.transformer(
input_ids,
attention_mask=attention_mask,
mems=mems,
perm_mask=perm_mask,
target_mapping=target_mapping,
token_type_ids=token_type_ids,
input_mask=input_mask,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
output = transformer_outputs[0]
output = self.sequence_summary(output)
logits = self.logits_proj(output)
outputs = (logits,) + transformer_outputs[1:] # Keep mems, hidden states, attentions if there are in it
if labels is not None:
if self.num_labels == 1:
# We are doing regression
loss_fct = MSELoss()
loss = loss_fct(logits.view(-1), labels.view(-1))
else:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
outputs = (loss,) + outputs
return outputs # return (loss), logits, (mems), (hidden states), (attentions)
@add_start_docstrings(
"""XLNet Model with a token classification head on top (a linear layer on top of
the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """,
XLNET_START_DOCSTRING,
)
class XLNetForTokenClassification(XLNetPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.transformer = XLNetModel(config)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_callable(XLNET_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
attention_mask=None,
mems=None,
perm_mask=None,
target_mapping=None,
token_type_ids=None,
input_mask=None,
head_mask=None,
inputs_embeds=None,
labels=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for computing the multiple choice classification loss.
Indices should be in ``[0, ..., num_choices]`` where `num_choices` is the size of the second dimension
of the input tensors. (see `input_ids` above)
Return:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.XLNetConfig`) and inputs:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`labels` is provided):
Classification loss.
logits (:obj:`torch.FloatTensor` of shape :obj:(batch_size, config.num_labels)`):
Classification scores (before SoftMax).
mems (:obj:`List[torch.FloatTensor]` of length :obj:`config.n_layers`):
Contains pre-computed hidden-states (key and values in the attention blocks).
Can be used (see `past` input) to speed up sequential decoding. The token ids which have their past given to this model
should not be passed as input ids as they have already been computed.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import XLNetTokenizer, XLNetForTokenClassification
import torch
tokenizer = XLNetTokenizer.from_pretrained('xlnet-large-cased')
model = XLNetForTokenClassification.from_pretrained('xlnet-large-cased')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
labels = torch.tensor([1] * input_ids.size(1)).unsqueeze(0) # Batch size 1
outputs = model(input_ids, labels=labels)
scores = outputs[0]
"""
outputs = self.transformer(
input_ids,
attention_mask=attention_mask,
mems=mems,
perm_mask=perm_mask,
target_mapping=target_mapping,
token_type_ids=token_type_ids,
input_mask=input_mask,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
sequence_output = outputs[0]
logits = self.classifier(sequence_output)
outputs = (logits,) + outputs[1:] # Keep mems, hidden states, attentions if there are in it
if labels is not None:
loss_fct = CrossEntropyLoss()
# Only keep active parts of the loss
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)
active_labels = torch.where(
active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels)
)
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
outputs = (loss,) + outputs
return outputs # return (loss), logits, (mems), (hidden states), (attentions)
@add_start_docstrings(
"""XLNet Model with a multiple choice classification head on top (a linear layer on top of
the pooled output and a softmax) e.g. for RACE/SWAG tasks. """,
XLNET_START_DOCSTRING,
)
class XLNetForMultipleChoice(XLNetPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.transformer = XLNetModel(config)
self.sequence_summary = SequenceSummary(config)
self.logits_proj = nn.Linear(config.d_model, 1)
self.init_weights()
@add_start_docstrings_to_callable(XLNET_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
token_type_ids=None,
input_mask=None,
attention_mask=None,
mems=None,
perm_mask=None,
target_mapping=None,
labels=None,
head_mask=None,
inputs_embeds=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for computing the multiple choice classification loss.
Indices should be in ``[0, ..., num_choices]`` where `num_choices` is the size of the second dimension
of the input tensors. (see `input_ids` above)
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.XLNetConfig`) and inputs:
loss (:obj:`torch.FloatTensor`` of shape ``(1,)`, `optional`, returned when :obj:`labels` is provided):
Classification loss.
classification_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_choices)`):
`num_choices` is the second dimension of the input tensors. (see `input_ids` above).
Classification scores (before SoftMax).
mems (:obj:`List[torch.FloatTensor]` of length :obj:`config.n_layers`):
Contains pre-computed hidden-states (key and values in the attention blocks).
Can be used (see `past` input) to speed up sequential decoding. The token ids which have their past given to this model
should not be passed as input ids as they have already been computed.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import XLNetTokenizer, XLNetForMultipleChoice
import torch
tokenizer = XLNetTokenizer.from_pretrained('xlnet-base-cased')
model = XLNetForMultipleChoice.from_pretrained('xlnet-base-cased')
choices = ["Hello, my dog is cute", "Hello, my cat is amazing"]
input_ids = torch.tensor([tokenizer.encode(s) for s in choices]).unsqueeze(0) # Batch size 1, 2 choices
labels = torch.tensor(1).unsqueeze(0) # Batch size 1
outputs = model(input_ids, labels=labels)
loss, classification_scores = outputs[:2]
"""
num_choices = input_ids.shape[1]
flat_input_ids = input_ids.view(-1, input_ids.size(-1))
flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
flat_input_mask = input_mask.view(-1, input_mask.size(-1)) if input_mask is not None else None
transformer_outputs = self.transformer(
flat_input_ids,
token_type_ids=flat_token_type_ids,
input_mask=flat_input_mask,
attention_mask=flat_attention_mask,
mems=mems,
perm_mask=perm_mask,
target_mapping=target_mapping,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
output = transformer_outputs[0]
output = self.sequence_summary(output)
logits = self.logits_proj(output)
reshaped_logits = logits.view(-1, num_choices)
outputs = (reshaped_logits,) + transformer_outputs[
1:
] # Keep mems, hidden states, attentions if there are in it
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels.view(-1))
outputs = (loss,) + outputs
return outputs # return (loss), logits, (mems), (hidden states), (attentions)
@add_start_docstrings(
"""XLNet Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of
the hidden-states output to compute `span start logits` and `span end logits`). """,
XLNET_START_DOCSTRING,
)
class XLNetForQuestionAnsweringSimple(XLNetPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.transformer = XLNetModel(config)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_callable(XLNET_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
attention_mask=None,
mems=None,
perm_mask=None,
target_mapping=None,
token_type_ids=None,
input_mask=None,
head_mask=None,
inputs_embeds=None,
start_positions=None,
end_positions=None,
):
r"""
start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`).
Position outside of the sequence are not taken into account for computing the loss.
end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`).
Position outside of the sequence are not taken into account for computing the loss.
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.XLNetConfig`) and inputs:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`labels` is provided):
Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.
start_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length,)`):
Span-start scores (before SoftMax).
end_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length,)`):
Span-end scores (before SoftMax).
mems (:obj:`List[torch.FloatTensor]` of length :obj:`config.n_layers`):
Contains pre-computed hidden-states (key and values in the attention blocks).
Can be used (see `past` input) to speed up sequential decoding. The token ids which have their past given to this model
should not be passed as input ids as they have already been computed.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import XLNetTokenizer, XLNetForQuestionAnsweringSimple
import torch
tokenizer = XLNetTokenizer.from_pretrained('xlnet-base-cased')
model = XLNetForQuestionAnsweringSimple.from_pretrained('xlnet-base-cased')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
start_positions = torch.tensor([1])
end_positions = torch.tensor([3])
outputs = model(input_ids, start_positions=start_positions, end_positions=end_positions)
loss = outputs[0]
"""
outputs = self.transformer(
input_ids,
attention_mask=attention_mask,
mems=mems,
perm_mask=perm_mask,
target_mapping=target_mapping,
token_type_ids=token_type_ids,
input_mask=input_mask,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
outputs = (start_logits, end_logits,) + outputs[2:]
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions.clamp_(0, ignored_index)
end_positions.clamp_(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
outputs = (total_loss,) + outputs
return outputs # (loss), start_logits, end_logits, (mems), (hidden_states), (attentions)
@add_start_docstrings(
"""XLNet Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of
the hidden-states output to compute `span start logits` and `span end logits`). """,
XLNET_START_DOCSTRING,
)
class XLNetForQuestionAnswering(XLNetPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.start_n_top = config.start_n_top
self.end_n_top = config.end_n_top
self.transformer = XLNetModel(config)
self.start_logits = PoolerStartLogits(config)
self.end_logits = PoolerEndLogits(config)
self.answer_class = PoolerAnswerClass(config)
self.init_weights()
@add_start_docstrings_to_callable(XLNET_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
attention_mask=None,
mems=None,
perm_mask=None,
target_mapping=None,
token_type_ids=None,
input_mask=None,
head_mask=None,
inputs_embeds=None,
start_positions=None,
end_positions=None,
is_impossible=None,
cls_index=None,
p_mask=None,
):
r"""
start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`).
Position outside of the sequence are not taken into account for computing the loss.
end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`).
Position outside of the sequence are not taken into account for computing the loss.
is_impossible (``torch.LongTensor`` of shape ``(batch_size,)``, `optional`, defaults to :obj:`None`):
Labels whether a question has an answer or no answer (SQuAD 2.0)
cls_index (``torch.LongTensor`` of shape ``(batch_size,)``, `optional`, defaults to :obj:`None`):
Labels for position (index) of the classification token to use as input for computing plausibility of the answer.
p_mask (``torch.FloatTensor`` of shape ``(batch_size, sequence_length)``, `optional`, defaults to :obj:`None`):
Optional mask of tokens which can't be in answers (e.g. [CLS], [PAD], ...).
1.0 means token should be masked. 0.0 mean token is not masked.
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.XLNetConfig`) and inputs:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned if both :obj:`start_positions` and :obj:`end_positions` are provided):
Classification loss as the sum of start token, end token (and is_impossible if provided) classification losses.
start_top_log_probs (``torch.FloatTensor`` of shape ``(batch_size, config.start_n_top)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided):
Log probabilities for the top config.start_n_top start token possibilities (beam-search).
start_top_index (``torch.LongTensor`` of shape ``(batch_size, config.start_n_top)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided):
Indices for the top config.start_n_top start token possibilities (beam-search).
end_top_log_probs (``torch.FloatTensor`` of shape ``(batch_size, config.start_n_top * config.end_n_top)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided):
Log probabilities for the top ``config.start_n_top * config.end_n_top`` end token possibilities (beam-search).
end_top_index (``torch.LongTensor`` of shape ``(batch_size, config.start_n_top * config.end_n_top)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided):
Indices for the top ``config.start_n_top * config.end_n_top`` end token possibilities (beam-search).
cls_logits (``torch.FloatTensor`` of shape ``(batch_size,)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided):
Log probabilities for the ``is_impossible`` label of the answers.
mems (:obj:`List[torch.FloatTensor]` of length :obj:`config.n_layers`):
Contains pre-computed hidden-states (key and values in the attention blocks).
Can be used (see `past` input) to speed up sequential decoding. The token ids which have their past given to this model
should not be passed as input ids as they have already been computed.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import XLNetTokenizer, XLNetForQuestionAnswering
import torch
tokenizer = XLNetTokenizer.from_pretrained('xlnet-base-cased')
model = XLNetForQuestionAnswering.from_pretrained('xlnet-base-cased')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
start_positions = torch.tensor([1])
end_positions = torch.tensor([3])
outputs = model(input_ids, start_positions=start_positions, end_positions=end_positions)
loss = outputs[0]
"""
transformer_outputs = self.transformer(
input_ids,
attention_mask=attention_mask,
mems=mems,
perm_mask=perm_mask,
target_mapping=target_mapping,
token_type_ids=token_type_ids,
input_mask=input_mask,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
hidden_states = transformer_outputs[0]
start_logits = self.start_logits(hidden_states, p_mask=p_mask)
outputs = transformer_outputs[1:] # Keep mems, hidden states, attentions if there are in it
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, let's remove the dimension added by batch splitting
for x in (start_positions, end_positions, cls_index, is_impossible):
if x is not None and x.dim() > 1:
x.squeeze_(-1)
# during training, compute the end logits based on the ground truth of the start position
end_logits = self.end_logits(hidden_states, start_positions=start_positions, p_mask=p_mask)
loss_fct = CrossEntropyLoss()
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if cls_index is not None and is_impossible is not None:
# Predict answerability from the representation of CLS and START
cls_logits = self.answer_class(hidden_states, start_positions=start_positions, cls_index=cls_index)
loss_fct_cls = nn.BCEWithLogitsLoss()
cls_loss = loss_fct_cls(cls_logits, is_impossible)
# note(zhiliny): by default multiply the loss by 0.5 so that the scale is comparable to start_loss and end_loss
total_loss += cls_loss * 0.5
outputs = (total_loss,) + outputs
else:
# during inference, compute the end logits based on beam search
bsz, slen, hsz = hidden_states.size()
start_log_probs = F.softmax(start_logits, dim=-1) # shape (bsz, slen)
start_top_log_probs, start_top_index = torch.topk(
start_log_probs, self.start_n_top, dim=-1
) # shape (bsz, start_n_top)
start_top_index_exp = start_top_index.unsqueeze(-1).expand(-1, -1, hsz) # shape (bsz, start_n_top, hsz)
start_states = torch.gather(hidden_states, -2, start_top_index_exp) # shape (bsz, start_n_top, hsz)
start_states = start_states.unsqueeze(1).expand(-1, slen, -1, -1) # shape (bsz, slen, start_n_top, hsz)
hidden_states_expanded = hidden_states.unsqueeze(2).expand_as(
start_states
) # shape (bsz, slen, start_n_top, hsz)
p_mask = p_mask.unsqueeze(-1) if p_mask is not None else None
end_logits = self.end_logits(hidden_states_expanded, start_states=start_states, p_mask=p_mask)
end_log_probs = F.softmax(end_logits, dim=1) # shape (bsz, slen, start_n_top)
end_top_log_probs, end_top_index = torch.topk(
end_log_probs, self.end_n_top, dim=1
) # shape (bsz, end_n_top, start_n_top)
end_top_log_probs = end_top_log_probs.view(-1, self.start_n_top * self.end_n_top)
end_top_index = end_top_index.view(-1, self.start_n_top * self.end_n_top)
start_states = torch.einsum(
"blh,bl->bh", hidden_states, start_log_probs
) # get the representation of START as weighted sum of hidden states
cls_logits = self.answer_class(
hidden_states, start_states=start_states, cls_index=cls_index
) # Shape (batch size,): one single `cls_logits` for each sample
outputs = (start_top_log_probs, start_top_index, end_top_log_probs, end_top_index, cls_logits) + outputs
# return start_top_log_probs, start_top_index, end_top_log_probs, end_top_index, cls_logits
# or (if labels are provided) (total_loss,)
return outputs
| 79,269 | 45.988737 | 304 | py |
KoSentenceBERT-SKT | KoSentenceBERT-SKT-main/transformers/modeling_tf_camembert.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" TF 2.0 CamemBERT model. """
import logging
from .configuration_camembert import CamembertConfig
from .file_utils import add_start_docstrings
from .modeling_tf_roberta import (
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaModel,
)
logger = logging.getLogger(__name__)
TF_CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_MAP = {}
CAMEMBERT_START_DOCSTRING = r"""
.. note::
TF 2.0 models accepts two formats as inputs:
- having all inputs as keyword arguments (like PyTorch models), or
- having all inputs as a list, tuple or dict in the first positional arguments.
This second option is useful when using :obj:`tf.keras.Model.fit()` method which currently requires having
all the tensors in the first argument of the model call function: :obj:`model(inputs)`.
If you choose this second option, there are three possibilities you can use to gather all the input Tensors
in the first positional argument :
- a single Tensor with input_ids only and nothing else: :obj:`model(inputs_ids)`
- a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
:obj:`model([input_ids, attention_mask])` or :obj:`model([input_ids, attention_mask, token_type_ids])`
- a dictionary with one or several input Tensors associated to the input names given in the docstring:
:obj:`model({'input_ids': input_ids, 'token_type_ids': token_type_ids})`
Parameters:
config (:class:`~transformers.CamembertConfig`): Model configuration class with all the parameters of the
model. Initializing with a config file does not load the weights associated with the model, only the configuration.
Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.
"""
@add_start_docstrings(
"The bare CamemBERT Model transformer outputting raw hidden-states without any specific head on top.",
CAMEMBERT_START_DOCSTRING,
)
class TFCamembertModel(TFRobertaModel):
"""
This class overrides :class:`~transformers.TFRobertaModel`. Please check the
superclass for the appropriate documentation alongside usage examples.
"""
config_class = CamembertConfig
pretrained_model_archive_map = TF_CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_MAP
@add_start_docstrings(
"""CamemBERT Model with a `language modeling` head on top. """, CAMEMBERT_START_DOCSTRING,
)
class TFCamembertForMaskedLM(TFRobertaForMaskedLM):
"""
This class overrides :class:`~transformers.TFRobertaForMaskedLM`. Please check the
superclass for the appropriate documentation alongside usage examples.
"""
config_class = CamembertConfig
pretrained_model_archive_map = TF_CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_MAP
@add_start_docstrings(
"""CamemBERT Model transformer with a sequence classification/regression head on top (a linear layer
on top of the pooled output) e.g. for GLUE tasks. """,
CAMEMBERT_START_DOCSTRING,
)
class TFCamembertForSequenceClassification(TFRobertaForSequenceClassification):
"""
This class overrides :class:`~transformers.TFRobertaForSequenceClassification`. Please check the
superclass for the appropriate documentation alongside usage examples.
"""
config_class = CamembertConfig
pretrained_model_archive_map = TF_CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_MAP
@add_start_docstrings(
"""CamemBERT Model with a token classification head on top (a linear layer on top of
the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """,
CAMEMBERT_START_DOCSTRING,
)
class TFCamembertForTokenClassification(TFRobertaForTokenClassification):
"""
This class overrides :class:`~transformers.TFRobertaForTokenClassification`. Please check the
superclass for the appropriate documentation alongside usage examples.
"""
config_class = CamembertConfig
pretrained_model_archive_map = TF_CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_MAP
| 4,799 | 39.336134 | 127 | py |
KoSentenceBERT-SKT | KoSentenceBERT-SKT-main/transformers/modeling_xlm.py | # coding=utf-8
# Copyright 2019-present, Facebook, Inc and the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch XLM model.
"""
import itertools
import logging
import math
import numpy as np
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from torch.nn import functional as F
from .activations import gelu
from .configuration_xlm import XLMConfig
from .file_utils import add_start_docstrings, add_start_docstrings_to_callable
from .modeling_utils import PreTrainedModel, SequenceSummary, SQuADHead, prune_linear_layer
logger = logging.getLogger(__name__)
XLM_PRETRAINED_MODEL_ARCHIVE_MAP = {
"xlm-mlm-en-2048": "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-en-2048-pytorch_model.bin",
"xlm-mlm-ende-1024": "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-ende-1024-pytorch_model.bin",
"xlm-mlm-enfr-1024": "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-enfr-1024-pytorch_model.bin",
"xlm-mlm-enro-1024": "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-enro-1024-pytorch_model.bin",
"xlm-mlm-tlm-xnli15-1024": "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-tlm-xnli15-1024-pytorch_model.bin",
"xlm-mlm-xnli15-1024": "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-xnli15-1024-pytorch_model.bin",
"xlm-clm-enfr-1024": "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-clm-enfr-1024-pytorch_model.bin",
"xlm-clm-ende-1024": "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-clm-ende-1024-pytorch_model.bin",
"xlm-mlm-17-1280": "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-17-1280-pytorch_model.bin",
"xlm-mlm-100-1280": "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-100-1280-pytorch_model.bin",
}
def create_sinusoidal_embeddings(n_pos, dim, out):
position_enc = np.array([[pos / np.power(10000, 2 * (j // 2) / dim) for j in range(dim)] for pos in range(n_pos)])
out[:, 0::2] = torch.FloatTensor(np.sin(position_enc[:, 0::2]))
out[:, 1::2] = torch.FloatTensor(np.cos(position_enc[:, 1::2]))
out.detach_()
out.requires_grad = False
def get_masks(slen, lengths, causal, padding_mask=None):
"""
Generate hidden states mask, and optionally an attention mask.
"""
alen = torch.arange(slen, dtype=torch.long, device=lengths.device)
if padding_mask is not None:
mask = padding_mask
else:
assert lengths.max().item() <= slen
mask = alen < lengths[:, None]
# attention mask is the same as mask, or triangular inferior attention (causal)
bs = lengths.size(0)
if causal:
attn_mask = alen[None, None, :].repeat(bs, slen, 1) <= alen[None, :, None]
else:
attn_mask = mask
# sanity check
assert mask.size() == (bs, slen)
assert causal is False or attn_mask.size() == (bs, slen, slen)
return mask, attn_mask
class MultiHeadAttention(nn.Module):
NEW_ID = itertools.count()
def __init__(self, n_heads, dim, config):
super().__init__()
self.layer_id = next(MultiHeadAttention.NEW_ID)
self.output_attentions = config.output_attentions
self.dim = dim
self.n_heads = n_heads
self.dropout = config.attention_dropout
assert self.dim % self.n_heads == 0
self.q_lin = nn.Linear(dim, dim)
self.k_lin = nn.Linear(dim, dim)
self.v_lin = nn.Linear(dim, dim)
self.out_lin = nn.Linear(dim, dim)
self.pruned_heads = set()
def prune_heads(self, heads):
attention_head_size = self.dim // self.n_heads
if len(heads) == 0:
return
mask = torch.ones(self.n_heads, attention_head_size)
heads = set(heads) - self.pruned_heads
for head in heads:
head -= sum(1 if h < head else 0 for h in self.pruned_heads)
mask[head] = 0
mask = mask.view(-1).contiguous().eq(1)
index = torch.arange(len(mask))[mask].long()
# Prune linear layers
self.q_lin = prune_linear_layer(self.q_lin, index)
self.k_lin = prune_linear_layer(self.k_lin, index)
self.v_lin = prune_linear_layer(self.v_lin, index)
self.out_lin = prune_linear_layer(self.out_lin, index, dim=1)
# Update hyper params
self.n_heads = self.n_heads - len(heads)
self.dim = attention_head_size * self.n_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(self, input, mask, kv=None, cache=None, head_mask=None):
"""
Self-attention (if kv is None) or attention over source sentence (provided by kv).
"""
# Input is (bs, qlen, dim)
# Mask is (bs, klen) (non-causal) or (bs, klen, klen)
bs, qlen, dim = input.size()
if kv is None:
klen = qlen if cache is None else cache["slen"] + qlen
else:
klen = kv.size(1)
# assert dim == self.dim, 'Dimensions do not match: %s input vs %s configured' % (dim, self.dim)
n_heads = self.n_heads
dim_per_head = self.dim // n_heads
mask_reshape = (bs, 1, qlen, klen) if mask.dim() == 3 else (bs, 1, 1, klen)
def shape(x):
""" projection """
return x.view(bs, -1, self.n_heads, dim_per_head).transpose(1, 2)
def unshape(x):
""" compute context """
return x.transpose(1, 2).contiguous().view(bs, -1, self.n_heads * dim_per_head)
q = shape(self.q_lin(input)) # (bs, n_heads, qlen, dim_per_head)
if kv is None:
k = shape(self.k_lin(input)) # (bs, n_heads, qlen, dim_per_head)
v = shape(self.v_lin(input)) # (bs, n_heads, qlen, dim_per_head)
elif cache is None or self.layer_id not in cache:
k = v = kv
k = shape(self.k_lin(k)) # (bs, n_heads, qlen, dim_per_head)
v = shape(self.v_lin(v)) # (bs, n_heads, qlen, dim_per_head)
if cache is not None:
if self.layer_id in cache:
if kv is None:
k_, v_ = cache[self.layer_id]
k = torch.cat([k_, k], dim=2) # (bs, n_heads, klen, dim_per_head)
v = torch.cat([v_, v], dim=2) # (bs, n_heads, klen, dim_per_head)
else:
k, v = cache[self.layer_id]
cache[self.layer_id] = (k, v)
q = q / math.sqrt(dim_per_head) # (bs, n_heads, qlen, dim_per_head)
scores = torch.matmul(q, k.transpose(2, 3)) # (bs, n_heads, qlen, klen)
mask = (mask == 0).view(mask_reshape).expand_as(scores) # (bs, n_heads, qlen, klen)
scores.masked_fill_(mask, -float("inf")) # (bs, n_heads, qlen, klen)
weights = F.softmax(scores.float(), dim=-1).type_as(scores) # (bs, n_heads, qlen, klen)
weights = F.dropout(weights, p=self.dropout, training=self.training) # (bs, n_heads, qlen, klen)
# Mask heads if we want to
if head_mask is not None:
weights = weights * head_mask
context = torch.matmul(weights, v) # (bs, n_heads, qlen, dim_per_head)
context = unshape(context) # (bs, qlen, dim)
outputs = (self.out_lin(context),)
if self.output_attentions:
outputs = outputs + (weights,)
return outputs
class TransformerFFN(nn.Module):
def __init__(self, in_dim, dim_hidden, out_dim, config):
super().__init__()
self.dropout = config.dropout
self.lin1 = nn.Linear(in_dim, dim_hidden)
self.lin2 = nn.Linear(dim_hidden, out_dim)
self.act = gelu if config.gelu_activation else F.relu
def forward(self, input):
x = self.lin1(input)
x = self.act(x)
x = self.lin2(x)
x = F.dropout(x, p=self.dropout, training=self.training)
return x
class XLMPreTrainedModel(PreTrainedModel):
""" An abstract class to handle weights initialization and
a simple interface for downloading and loading pretrained models.
"""
config_class = XLMConfig
pretrained_model_archive_map = XLM_PRETRAINED_MODEL_ARCHIVE_MAP
load_tf_weights = None
base_model_prefix = "transformer"
def __init__(self, *inputs, **kwargs):
super().__init__(*inputs, **kwargs)
@property
def dummy_inputs(self):
inputs_list = torch.tensor([[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]])
attns_list = torch.tensor([[1, 1, 0, 0, 1], [1, 1, 1, 0, 0], [1, 0, 0, 1, 1]])
if self.config.use_lang_emb and self.config.n_langs > 1:
langs_list = torch.tensor([[1, 1, 0, 0, 1], [1, 1, 1, 0, 0], [1, 0, 0, 1, 1]])
else:
langs_list = None
return {"input_ids": inputs_list, "attention_mask": attns_list, "langs": langs_list}
def _init_weights(self, module):
""" Initialize the weights. """
if isinstance(module, nn.Embedding):
if self.config is not None and self.config.embed_init_std is not None:
nn.init.normal_(module.weight, mean=0, std=self.config.embed_init_std)
if isinstance(module, nn.Linear):
if self.config is not None and self.config.init_std is not None:
nn.init.normal_(module.weight, mean=0, std=self.config.init_std)
if hasattr(module, "bias") and module.bias is not None:
nn.init.constant_(module.bias, 0.0)
if isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
XLM_START_DOCSTRING = r"""
This model is a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`_ sub-class.
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general
usage and behavior.
Parameters:
config (:class:`~transformers.XLMConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the configuration.
Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.
"""
XLM_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`transformers.BertTokenizer`.
See :func:`transformers.PreTrainedTokenizer.encode` and
:func:`transformers.PreTrainedTokenizer.encode_plus` for details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Mask to avoid performing attention on padding token indices.
Mask values selected in ``[0, 1]``:
``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
`What are attention masks? <../glossary.html#attention-mask>`__
langs (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
A parallel sequence of tokens to be used to indicate the language of each token in the input.
Indices are languages ids which can be obtained from the language names by using two conversion mappings
provided in the configuration of the model (only provided for multilingual models).
More precisely, the `language name -> language id` mapping is in `model.config.lang2id` (dict str -> int) and
the `language id -> language name` mapping is `model.config.id2lang` (dict int -> str).
See usage examples detailed in the `multilingual documentation <https://huggingface.co/transformers/multilingual.html>`__.
token_type_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Segment token indices to indicate first and second portions of the inputs.
Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1``
corresponds to a `sentence B` token
`What are token type IDs? <../glossary.html#token-type-ids>`_
position_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Indices of positions of each input sequence tokens in the position embeddings.
Selected in the range ``[0, config.max_position_embeddings - 1]``.
`What are position IDs? <../glossary.html#position-ids>`_
lengths (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Length of each sentence that can be used to avoid performing attention on padding token indices.
You can also use `attention_mask` for the same result (see above), kept here for compatbility.
Indices selected in ``[0, ..., input_ids.size(-1)]``:
cache (:obj:`Dict[str, torch.FloatTensor]`, `optional`, defaults to :obj:`None`):
dictionary with ``torch.FloatTensor`` that contains pre-computed
hidden-states (key and values in the attention blocks) as computed by the model
(see `cache` output below). Can be used to speed up sequential decoding.
The dictionary object will be modified in-place during the forward pass to add newly computed hidden-states.
head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`, defaults to :obj:`None`):
Mask to nullify selected heads of the self-attention modules.
Mask values selected in ``[0, 1]``:
:obj:`1` indicates the head is **not masked**, :obj:`0` indicates the head is **masked**.
input_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
"""
@add_start_docstrings(
"The bare XLM Model transformer outputting raw hidden-states without any specific head on top.",
XLM_START_DOCSTRING,
)
class XLMModel(XLMPreTrainedModel):
def __init__(self, config): # , dico, is_encoder, with_output):
super().__init__(config)
self.output_attentions = config.output_attentions
self.output_hidden_states = config.output_hidden_states
# encoder / decoder, output layer
self.is_encoder = config.is_encoder
self.is_decoder = not config.is_encoder
if self.is_decoder:
raise NotImplementedError("Currently XLM can only be used as an encoder")
# self.with_output = with_output
self.causal = config.causal
# dictionary / languages
self.n_langs = config.n_langs
self.use_lang_emb = config.use_lang_emb
self.n_words = config.n_words
self.eos_index = config.eos_index
self.pad_index = config.pad_index
# self.dico = dico
# self.id2lang = config.id2lang
# self.lang2id = config.lang2id
# assert len(self.dico) == self.n_words
# assert len(self.id2lang) == len(self.lang2id) == self.n_langs
# model parameters
self.dim = config.emb_dim # 512 by default
self.hidden_dim = self.dim * 4 # 2048 by default
self.n_heads = config.n_heads # 8 by default
self.n_layers = config.n_layers
self.dropout = config.dropout
self.attention_dropout = config.attention_dropout
assert self.dim % self.n_heads == 0, "transformer dim must be a multiple of n_heads"
# embeddings
self.position_embeddings = nn.Embedding(config.max_position_embeddings, self.dim)
if config.sinusoidal_embeddings:
create_sinusoidal_embeddings(config.max_position_embeddings, self.dim, out=self.position_embeddings.weight)
if config.n_langs > 1 and config.use_lang_emb:
self.lang_embeddings = nn.Embedding(self.n_langs, self.dim)
self.embeddings = nn.Embedding(self.n_words, self.dim, padding_idx=self.pad_index)
self.layer_norm_emb = nn.LayerNorm(self.dim, eps=config.layer_norm_eps)
# transformer layers
self.attentions = nn.ModuleList()
self.layer_norm1 = nn.ModuleList()
self.ffns = nn.ModuleList()
self.layer_norm2 = nn.ModuleList()
# if self.is_decoder:
# self.layer_norm15 = nn.ModuleList()
# self.encoder_attn = nn.ModuleList()
for _ in range(self.n_layers):
self.attentions.append(MultiHeadAttention(self.n_heads, self.dim, config=config))
self.layer_norm1.append(nn.LayerNorm(self.dim, eps=config.layer_norm_eps))
# if self.is_decoder:
# self.layer_norm15.append(nn.LayerNorm(self.dim, eps=config.layer_norm_eps))
# self.encoder_attn.append(MultiHeadAttention(self.n_heads, self.dim, dropout=self.attention_dropout))
self.ffns.append(TransformerFFN(self.dim, self.hidden_dim, self.dim, config=config))
self.layer_norm2.append(nn.LayerNorm(self.dim, eps=config.layer_norm_eps))
if hasattr(config, "pruned_heads"):
pruned_heads = config.pruned_heads.copy().items()
config.pruned_heads = {}
for layer, heads in pruned_heads:
if self.attentions[int(layer)].n_heads == config.n_heads:
self.prune_heads({int(layer): list(map(int, heads))})
self.init_weights()
def get_input_embeddings(self):
return self.embeddings
def set_input_embeddings(self, new_embeddings):
self.embeddings = new_embeddings
def _prune_heads(self, heads_to_prune):
""" Prunes heads of the model.
heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
See base class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.attentions[layer].prune_heads(heads)
@add_start_docstrings_to_callable(XLM_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
attention_mask=None,
langs=None,
token_type_ids=None,
position_ids=None,
lengths=None,
cache=None,
head_mask=None,
inputs_embeds=None,
):
r"""
Return:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.XLMConfig`) and inputs:
last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import XLMTokenizer, XLMModel
import torch
tokenizer = XLMTokenizer.from_pretrained('xlm-mlm-en-2048')
model = XLMModel.from_pretrained('xlm-mlm-en-2048')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
outputs = model(input_ids)
last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple
"""
if input_ids is not None:
bs, slen = input_ids.size()
else:
bs, slen = inputs_embeds.size()[:-1]
if lengths is None:
if input_ids is not None:
lengths = (input_ids != self.pad_index).sum(dim=1).long()
else:
lengths = torch.LongTensor([slen] * bs)
# mask = input_ids != self.pad_index
# check inputs
assert lengths.size(0) == bs
assert lengths.max().item() <= slen
# input_ids = input_ids.transpose(0, 1) # batch size as dimension 0
# assert (src_enc is None) == (src_len is None)
# if src_enc is not None:
# assert self.is_decoder
# assert src_enc.size(0) == bs
# generate masks
mask, attn_mask = get_masks(slen, lengths, self.causal, padding_mask=attention_mask)
# if self.is_decoder and src_enc is not None:
# src_mask = torch.arange(src_len.max(), dtype=torch.long, device=lengths.device) < src_len[:, None]
device = input_ids.device if input_ids is not None else inputs_embeds.device
# position_ids
if position_ids is None:
position_ids = torch.arange(slen, dtype=torch.long, device=device)
position_ids = position_ids.unsqueeze(0).expand((bs, slen))
else:
assert position_ids.size() == (bs, slen) # (slen, bs)
# position_ids = position_ids.transpose(0, 1)
# langs
if langs is not None:
assert langs.size() == (bs, slen) # (slen, bs)
# langs = langs.transpose(0, 1)
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x qlen x klen]
if head_mask is not None:
if head_mask.dim() == 1:
head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
head_mask = head_mask.expand(self.n_layers, -1, -1, -1, -1)
elif head_mask.dim() == 2:
head_mask = (
head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1)
) # We can specify head_mask for each layer
head_mask = head_mask.to(
dtype=next(self.parameters()).dtype
) # switch to fload if need + fp16 compatibility
else:
head_mask = [None] * self.n_layers
# do not recompute cached elements
if cache is not None and input_ids is not None:
_slen = slen - cache["slen"]
input_ids = input_ids[:, -_slen:]
position_ids = position_ids[:, -_slen:]
if langs is not None:
langs = langs[:, -_slen:]
mask = mask[:, -_slen:]
attn_mask = attn_mask[:, -_slen:]
# embeddings
if inputs_embeds is None:
inputs_embeds = self.embeddings(input_ids)
tensor = inputs_embeds + self.position_embeddings(position_ids).expand_as(inputs_embeds)
if langs is not None and self.use_lang_emb and self.n_langs > 1:
tensor = tensor + self.lang_embeddings(langs)
if token_type_ids is not None:
tensor = tensor + self.embeddings(token_type_ids)
tensor = self.layer_norm_emb(tensor)
tensor = F.dropout(tensor, p=self.dropout, training=self.training)
tensor *= mask.unsqueeze(-1).to(tensor.dtype)
# transformer layers
hidden_states = ()
attentions = ()
for i in range(self.n_layers):
if self.output_hidden_states:
hidden_states = hidden_states + (tensor,)
# self attention
attn_outputs = self.attentions[i](tensor, attn_mask, cache=cache, head_mask=head_mask[i])
attn = attn_outputs[0]
if self.output_attentions:
attentions = attentions + (attn_outputs[1],)
attn = F.dropout(attn, p=self.dropout, training=self.training)
tensor = tensor + attn
tensor = self.layer_norm1[i](tensor)
# encoder attention (for decoder only)
# if self.is_decoder and src_enc is not None:
# attn = self.encoder_attn[i](tensor, src_mask, kv=src_enc, cache=cache)
# attn = F.dropout(attn, p=self.dropout, training=self.training)
# tensor = tensor + attn
# tensor = self.layer_norm15[i](tensor)
# FFN
tensor = tensor + self.ffns[i](tensor)
tensor = self.layer_norm2[i](tensor)
tensor *= mask.unsqueeze(-1).to(tensor.dtype)
# Add last hidden state
if self.output_hidden_states:
hidden_states = hidden_states + (tensor,)
# update cache length
if cache is not None:
cache["slen"] += tensor.size(1)
# move back sequence length to dimension 0
# tensor = tensor.transpose(0, 1)
outputs = (tensor,)
if self.output_hidden_states:
outputs = outputs + (hidden_states,)
if self.output_attentions:
outputs = outputs + (attentions,)
return outputs # outputs, (hidden_states), (attentions)
class XLMPredLayer(nn.Module):
"""
Prediction layer (cross_entropy or adaptive_softmax).
"""
def __init__(self, config):
super().__init__()
self.asm = config.asm
self.n_words = config.n_words
self.pad_index = config.pad_index
dim = config.emb_dim
if config.asm is False:
self.proj = nn.Linear(dim, config.n_words, bias=True)
else:
self.proj = nn.AdaptiveLogSoftmaxWithLoss(
in_features=dim,
n_classes=config.n_words,
cutoffs=config.asm_cutoffs,
div_value=config.asm_div_value,
head_bias=True, # default is False
)
def forward(self, x, y=None):
""" Compute the loss, and optionally the scores.
"""
outputs = ()
if self.asm is False:
scores = self.proj(x)
outputs = (scores,) + outputs
if y is not None:
loss = F.cross_entropy(scores.view(-1, self.n_words), y.view(-1), reduction="elementwise_mean")
outputs = (loss,) + outputs
else:
scores = self.proj.log_prob(x)
outputs = (scores,) + outputs
if y is not None:
_, loss = self.proj(x, y)
outputs = (loss,) + outputs
return outputs
@add_start_docstrings(
"""The XLM Model transformer with a language modeling head on top
(linear layer with weights tied to the input embeddings). """,
XLM_START_DOCSTRING,
)
class XLMWithLMHeadModel(XLMPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.transformer = XLMModel(config)
self.pred_layer = XLMPredLayer(config)
self.init_weights()
def get_output_embeddings(self):
return self.pred_layer.proj
def prepare_inputs_for_generation(self, input_ids, **kwargs):
mask_token_id = self.config.mask_token_id
lang_id = self.config.lang_id
effective_batch_size = input_ids.shape[0]
mask_token = torch.full((effective_batch_size, 1), mask_token_id, dtype=torch.long, device=input_ids.device)
input_ids = torch.cat([input_ids, mask_token], dim=1)
if lang_id is not None:
langs = torch.full_like(input_ids, lang_id)
else:
langs = None
return {"input_ids": input_ids, "langs": langs}
@add_start_docstrings_to_callable(XLM_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
attention_mask=None,
langs=None,
token_type_ids=None,
position_ids=None,
lengths=None,
cache=None,
head_mask=None,
inputs_embeds=None,
labels=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Labels for language modeling.
Note that the labels **are shifted** inside the model, i.e. you can set ``lm_labels = input_ids``
Indices are selected in ``[-100, 0, ..., config.vocab_size]``
All labels set to ``-100`` are ignored (masked), the loss is only
computed for labels in ``[0, ..., config.vocab_size]``
Return:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.XLMConfig`) and inputs:
loss (:obj:`torch.FloatTensor` of shape `(1,)`, `optional`, returned when ``labels`` is provided)
Language modeling loss.
prediction_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import XLMTokenizer, XLMWithLMHeadModel
import torch
tokenizer = XLMTokenizer.from_pretrained('xlm-mlm-en-2048')
model = XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
outputs = model(input_ids)
last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple
"""
transformer_outputs = self.transformer(
input_ids,
attention_mask=attention_mask,
langs=langs,
token_type_ids=token_type_ids,
position_ids=position_ids,
lengths=lengths,
cache=cache,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
output = transformer_outputs[0]
outputs = self.pred_layer(output, labels)
outputs = outputs + transformer_outputs[1:] # Keep new_mems and attention/hidden states if they are here
return outputs
@add_start_docstrings(
"""XLM Model with a sequence classification/regression head on top (a linear layer on top of
the pooled output) e.g. for GLUE tasks. """,
XLM_START_DOCSTRING,
)
class XLMForSequenceClassification(XLMPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.transformer = XLMModel(config)
self.sequence_summary = SequenceSummary(config)
self.init_weights()
@add_start_docstrings_to_callable(XLM_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
attention_mask=None,
langs=None,
token_type_ids=None,
position_ids=None,
lengths=None,
cache=None,
head_mask=None,
inputs_embeds=None,
labels=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for computing the sequence classification/regression loss.
Indices should be in :obj:`[0, ..., config.num_labels - 1]`.
If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),
If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.XLMConfig`) and inputs:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`label` is provided):
Classification (or regression if config.num_labels==1) loss.
logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, config.num_labels)`):
Classification (or regression if config.num_labels==1) scores (before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import XLMTokenizer, XLMForSequenceClassification
import torch
tokenizer = XLMTokenizer.from_pretrained('xlm-mlm-en-2048')
model = XLMForSequenceClassification.from_pretrained('xlm-mlm-en-2048')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
labels = torch.tensor([1]).unsqueeze(0) # Batch size 1
outputs = model(input_ids, labels=labels)
loss, logits = outputs[:2]
"""
transformer_outputs = self.transformer(
input_ids,
attention_mask=attention_mask,
langs=langs,
token_type_ids=token_type_ids,
position_ids=position_ids,
lengths=lengths,
cache=cache,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
output = transformer_outputs[0]
logits = self.sequence_summary(output)
outputs = (logits,) + transformer_outputs[1:] # Keep new_mems and attention/hidden states if they are here
if labels is not None:
if self.num_labels == 1:
# We are doing regression
loss_fct = MSELoss()
loss = loss_fct(logits.view(-1), labels.view(-1))
else:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
outputs = (loss,) + outputs
return outputs
@add_start_docstrings(
"""XLM Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of
the hidden-states output to compute `span start logits` and `span end logits`). """,
XLM_START_DOCSTRING,
)
class XLMForQuestionAnsweringSimple(XLMPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.transformer = XLMModel(config)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_callable(XLM_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
attention_mask=None,
langs=None,
token_type_ids=None,
position_ids=None,
lengths=None,
cache=None,
head_mask=None,
inputs_embeds=None,
start_positions=None,
end_positions=None,
):
r"""
start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`).
Position outside of the sequence are not taken into account for computing the loss.
end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`).
Position outside of the sequence are not taken into account for computing the loss.
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.XLMConfig`) and inputs:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`labels` is provided):
Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.
start_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length,)`):
Span-start scores (before SoftMax).
end_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length,)`):
Span-end scores (before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import XLMTokenizer, XLMForQuestionAnsweringSimple
import torch
tokenizer = XLMTokenizer.from_pretrained('xlm-mlm-en-2048')
model = XLMForQuestionAnsweringSimple.from_pretrained('xlm-mlm-en-2048')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
start_positions = torch.tensor([1])
end_positions = torch.tensor([3])
outputs = model(input_ids, start_positions=start_positions, end_positions=end_positions)
loss = outputs[0]
"""
transformer_outputs = self.transformer(
input_ids,
attention_mask=attention_mask,
langs=langs,
token_type_ids=token_type_ids,
position_ids=position_ids,
lengths=lengths,
cache=cache,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
sequence_output = transformer_outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
outputs = (
start_logits,
end_logits,
)
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions.clamp_(0, ignored_index)
end_positions.clamp_(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
outputs = (total_loss,) + outputs
outputs = outputs + transformer_outputs[1:] # Keep new_mems and attention/hidden states if they are here
return outputs
@add_start_docstrings(
"""XLM Model with a beam-search span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of
the hidden-states output to compute `span start logits` and `span end logits`). """,
XLM_START_DOCSTRING,
)
class XLMForQuestionAnswering(XLMPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.transformer = XLMModel(config)
self.qa_outputs = SQuADHead(config)
self.init_weights()
@add_start_docstrings_to_callable(XLM_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
attention_mask=None,
langs=None,
token_type_ids=None,
position_ids=None,
lengths=None,
cache=None,
head_mask=None,
inputs_embeds=None,
start_positions=None,
end_positions=None,
is_impossible=None,
cls_index=None,
p_mask=None,
):
r"""
start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`).
Position outside of the sequence are not taken into account for computing the loss.
end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`).
Position outside of the sequence are not taken into account for computing the loss.
is_impossible (``torch.LongTensor`` of shape ``(batch_size,)``, `optional`, defaults to :obj:`None`):
Labels whether a question has an answer or no answer (SQuAD 2.0)
cls_index (``torch.LongTensor`` of shape ``(batch_size,)``, `optional`, defaults to :obj:`None`):
Labels for position (index) of the classification token to use as input for computing plausibility of the answer.
p_mask (``torch.FloatTensor`` of shape ``(batch_size, sequence_length)``, `optional`, defaults to :obj:`None`):
Optional mask of tokens which can't be in answers (e.g. [CLS], [PAD], ...).
1.0 means token should be masked. 0.0 mean token is not masked.
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.XLMConfig`) and inputs:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned if both :obj:`start_positions` and :obj:`end_positions` are provided):
Classification loss as the sum of start token, end token (and is_impossible if provided) classification losses.
start_top_log_probs (``torch.FloatTensor`` of shape ``(batch_size, config.start_n_top)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided):
Log probabilities for the top config.start_n_top start token possibilities (beam-search).
start_top_index (``torch.LongTensor`` of shape ``(batch_size, config.start_n_top)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided):
Indices for the top config.start_n_top start token possibilities (beam-search).
end_top_log_probs (``torch.FloatTensor`` of shape ``(batch_size, config.start_n_top * config.end_n_top)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided):
Log probabilities for the top ``config.start_n_top * config.end_n_top`` end token possibilities (beam-search).
end_top_index (``torch.LongTensor`` of shape ``(batch_size, config.start_n_top * config.end_n_top)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided):
Indices for the top ``config.start_n_top * config.end_n_top`` end token possibilities (beam-search).
cls_logits (``torch.FloatTensor`` of shape ``(batch_size,)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided):
Log probabilities for the ``is_impossible`` label of the answers.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import XLMTokenizer, XLMForQuestionAnswering
import torch
tokenizer = XLMTokenizer.from_pretrained('xlm-mlm-en-2048')
model = XLMForQuestionAnswering.from_pretrained('xlm-mlm-en-2048')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
start_positions = torch.tensor([1])
end_positions = torch.tensor([3])
outputs = model(input_ids, start_positions=start_positions, end_positions=end_positions)
loss = outputs[0]
"""
transformer_outputs = self.transformer(
input_ids,
attention_mask=attention_mask,
langs=langs,
token_type_ids=token_type_ids,
position_ids=position_ids,
lengths=lengths,
cache=cache,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
output = transformer_outputs[0]
outputs = self.qa_outputs(
output,
start_positions=start_positions,
end_positions=end_positions,
cls_index=cls_index,
is_impossible=is_impossible,
p_mask=p_mask,
)
outputs = outputs + transformer_outputs[1:] # Keep new_mems and attention/hidden states if they are here
return outputs
@add_start_docstrings(
"""XLM Model with a token classification head on top (a linear layer on top of
the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """,
XLM_START_DOCSTRING,
)
class XLMForTokenClassification(XLMPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.transformer = XLMModel(config)
self.dropout = nn.Dropout(config.dropout)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_callable(XLM_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
attention_mask=None,
langs=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
labels=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Labels for computing the token classification loss.
Indices should be in ``[0, ..., config.num_labels - 1]``.
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.XLMConfig`) and inputs:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when ``labels`` is provided) :
Classification loss.
scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.num_labels)`)
Classification scores (before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import XLMTokenizer, XLMForTokenClassification
import torch
tokenizer = XLMTokenizer.from_pretrained('xlm-mlm-100-1280')
model = XLMForTokenClassification.from_pretrained('xlm-mlm-100-1280')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
labels = torch.tensor([1] * input_ids.size(1)).unsqueeze(0) # Batch size 1
outputs = model(input_ids, labels=labels)
loss, scores = outputs[:2]
"""
outputs = self.transformer(
input_ids,
attention_mask=attention_mask,
langs=langs,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here
if labels is not None:
loss_fct = CrossEntropyLoss()
# Only keep active parts of the loss
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)
active_labels = torch.where(
active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels)
)
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
outputs = (loss,) + outputs
return outputs # (loss), scores, (hidden_states), (attentions)
| 52,669 | 45.282953 | 197 | py |
KoSentenceBERT-SKT | KoSentenceBERT-SKT-main/transformers/modeling_tf_utils.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TF general model utils."""
import functools
import logging
import os
import h5py
import numpy as np
import tensorflow as tf
from tensorflow.python.keras.saving import hdf5_format
from .configuration_utils import PretrainedConfig
from .file_utils import DUMMY_INPUTS, TF2_WEIGHTS_NAME, WEIGHTS_NAME, cached_path, hf_bucket_url, is_remote_url
from .modeling_tf_pytorch_utils import load_pytorch_checkpoint_in_tf2_model
logger = logging.getLogger(__name__)
class TFModelUtilsMixin:
"""
A few utilities for `tf.keras.Model`s, to be used as a mixin.
"""
def num_parameters(self, only_trainable: bool = False) -> int:
"""
Get number of (optionally, trainable) parameters in the model.
"""
if only_trainable:
return int(sum(np.prod(w.shape.as_list()) for w in self.trainable_variables))
else:
return self.count_params()
def keras_serializable(cls):
"""
Decorate a Keras Layer class to support Keras serialization.
This is done by:
1. adding a `transformers_config` dict to the Keras config dictionary in `get_config` (called by Keras at
serialization time
2. wrapping `__init__` to accept that `transformers_config` dict (passed by Keras at deserialization time) and
convert it to a config object for the actual layer initializer
3. registering the class as a custom object in Keras (if the Tensorflow version supports this), so that it does
not need to be supplied in `custom_objects` in the call to `tf.keras.models.load_model`
:param cls: a tf.keras.layers.Layers subclass that accepts a `config` argument to its initializer (typically a
`TF*MainLayer` class in this project)
:return: the same class object, with modifications for Keras deserialization.
"""
initializer = cls.__init__
config_class = getattr(cls, "config_class", None)
if config_class is None:
raise AttributeError("Must set `config_class` to use @keras_serializable")
@functools.wraps(initializer)
def wrapped_init(self, *args, **kwargs):
transformers_config = kwargs.pop("transformers_config", None)
config = args[0] if args and isinstance(args[0], PretrainedConfig) else kwargs.get("config", None)
if config is not None and transformers_config is not None:
raise ValueError("Must pass either `config` or `transformers_config`, not both")
elif config is not None:
# normal layer construction, call with unchanged args (config is already in there)
initializer(self, *args, **kwargs)
elif transformers_config is not None:
# Keras deserialization, convert dict to config
config = config_class.from_dict(transformers_config)
initializer(self, config, *args, **kwargs)
else:
raise ValueError("Must pass either `config` (PretrainedConfig) or `transformers_config` (dict)")
self._transformers_config = config
cls.__init__ = wrapped_init
if not hasattr(cls, "get_config"):
raise TypeError("Only use @keras_serializable on tf.keras.layers.Layer subclasses")
if hasattr(cls.get_config, "_is_default"):
def get_config(self):
cfg = super(cls, self).get_config()
cfg["transformers_config"] = self._transformers_config.to_dict()
return cfg
cls.get_config = get_config
cls._keras_serializable = True
if hasattr(tf.keras.utils, "register_keras_serializable"):
cls = tf.keras.utils.register_keras_serializable()(cls)
return cls
class TFPreTrainedModel(tf.keras.Model, TFModelUtilsMixin):
r""" Base class for all TF models.
:class:`~transformers.TFPreTrainedModel` takes care of storing the configuration of the models and handles methods for loading/downloading/saving models
as well as a few methods common to all models to (i) resize the input embeddings and (ii) prune heads in the self-attention heads.
Class attributes (overridden by derived classes):
- ``config_class``: a class derived from :class:`~transformers.PretrainedConfig` to use as configuration class for this model architecture.
- ``pretrained_model_archive_map``: a python ``dict`` of with `short-cut-names` (string) as keys and `url` (string) of associated pretrained weights as values.
- ``load_tf_weights``: a python ``method`` for loading a TensorFlow checkpoint in a PyTorch model, taking as arguments:
- ``model``: an instance of the relevant subclass of :class:`~transformers.PreTrainedModel`,
- ``config``: an instance of the relevant subclass of :class:`~transformers.PretrainedConfig`,
- ``path``: a path (string) to the TensorFlow checkpoint.
- ``base_model_prefix``: a string indicating the attribute associated to the base model in derived classes of the same architecture adding modules on top of the base model.
"""
config_class = None
pretrained_model_archive_map = {}
base_model_prefix = ""
@property
def dummy_inputs(self):
""" Dummy inputs to build the network.
Returns:
tf.Tensor with dummy inputs
"""
return {"input_ids": tf.constant(DUMMY_INPUTS)}
def __init__(self, config, *inputs, **kwargs):
super().__init__(*inputs, **kwargs)
if not isinstance(config, PretrainedConfig):
raise ValueError(
"Parameter config in `{}(config)` should be an instance of class `PretrainedConfig`. "
"To create a model from a pretrained model use "
"`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format(
self.__class__.__name__, self.__class__.__name__
)
)
# Save config in model
self.config = config
def get_input_embeddings(self):
"""
Returns the model's input embeddings.
Returns:
:obj:`tf.keras.layers.Layer`:
A torch module mapping vocabulary to hidden states.
"""
base_model = getattr(self, self.base_model_prefix, self)
if base_model is not self:
return base_model.get_input_embeddings()
else:
raise NotImplementedError
def get_output_embeddings(self):
"""
Returns the model's output embeddings.
Returns:
:obj:`tf.keras.layers.Layer`:
A torch module mapping hidden states to vocabulary.
"""
return None # Overwrite for models with output embeddings
def _get_resized_embeddings(self, old_embeddings, new_num_tokens=None):
""" Build a resized Embedding Variable from a provided token Embedding Module.
Increasing the size will add newly initialized vectors at the end
Reducing the size will remove vectors from the end
Args:
new_num_tokens: (`optional`) int
New number of tokens in the embedding matrix.
Increasing the size will add newly initialized vectors at the end
Reducing the size will remove vectors from the end
If not provided or None: return the provided token Embedding Module.
Return: ``tf.Variable``
Pointer to the resized Embedding Module or the old Embedding Module if new_num_tokens is None
"""
# if new_num_tokens is None:
# return old_embeddings
# old_num_tokens, old_embedding_dim = old_embeddings.weight.size()
# if old_num_tokens == new_num_tokens:
# return old_embeddings
# # Build new embeddings
# new_embeddings = nn.Embedding(new_num_tokens, old_embedding_dim)
# new_embeddings.to(old_embeddings.weight.device)
# # initialize all new embeddings (in particular added tokens)
# self._init_weights(new_embeddings)
# # Copy token embeddings from the previous weights
# num_tokens_to_copy = min(old_num_tokens, new_num_tokens)
# new_embeddings.weight.data[:num_tokens_to_copy, :] = old_embeddings.weight.data[:num_tokens_to_copy, :]
# return new_embeddings
def resize_token_embeddings(self, new_num_tokens=None):
""" Resize input token embeddings matrix of the model if new_num_tokens != config.vocab_size.
Take care of tying weights embeddings afterwards if the model class has a `tie_weights()` method.
Arguments:
new_num_tokens: (`optional`) int:
New number of tokens in the embedding matrix. Increasing the size will add newly initialized vectors at the end. Reducing the size will remove vectors from the end.
If not provided or None: does nothing and just returns a pointer to the input tokens ``tf.Variable`` Module of the model.
Return: ``tf.Variable``
Pointer to the input tokens Embeddings Module of the model
"""
raise NotImplementedError
def prune_heads(self, heads_to_prune):
""" Prunes heads of the base model.
Arguments:
heads_to_prune: dict with keys being selected layer indices (`int`) and associated values being the list of heads to prune in said layer (list of `int`).
"""
raise NotImplementedError
def save_pretrained(self, save_directory):
""" Save a model and its configuration file to a directory, so that it
can be re-loaded using the :func:`~transformers.PreTrainedModel.from_pretrained` class method.
"""
assert os.path.isdir(
save_directory
), "Saving path should be a directory where the model and configuration can be saved"
# Save configuration file
self.config.save_pretrained(save_directory)
# If we save using the predefined names, we can load using `from_pretrained`
output_model_file = os.path.join(save_directory, TF2_WEIGHTS_NAME)
self.save_weights(output_model_file)
logger.info("Model weights saved in {}".format(output_model_file))
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
r"""Instantiate a pretrained TF 2.0 model from a pre-trained model configuration.
The warning ``Weights from XXX not initialized from pretrained model`` means that the weights of XXX do not come pre-trained with the rest of the model.
It is up to you to train those weights with a downstream fine-tuning task.
The warning ``Weights from XXX not used in YYY`` means that the layer XXX is not used by YYY, therefore those weights are discarded.
Parameters:
pretrained_model_name_or_path: either:
- a string with the `shortcut name` of a pre-trained model to load from cache or download, e.g.: ``bert-base-uncased``.
- a string with the `identifier name` of a pre-trained model that was user-uploaded to our S3, e.g.: ``dbmdz/bert-base-german-cased``.
- a path to a `directory` containing model weights saved using :func:`~transformers.PreTrainedModel.save_pretrained`, e.g.: ``./my_model_directory/``.
- a path or url to a `PyTorch state_dict save file` (e.g. `./pt_model/pytorch_model.bin`). In this case, ``from_pt`` should be set to True and a configuration object should be provided as ``config`` argument. This loading path is slower than converting the PyTorch checkpoint in a TensorFlow model using the provided conversion scripts and loading the TensorFlow model afterwards.
model_args: (`optional`) Sequence of positional arguments:
All remaning positional arguments will be passed to the underlying model's ``__init__`` method
config: (`optional`) one of:
- an instance of a class derived from :class:`~transformers.PretrainedConfig`, or
- a string valid as input to :func:`~transformers.PretrainedConfig.from_pretrained()`
Configuration for the model to use instead of an automatically loaded configuation. Configuration can be automatically loaded when:
- the model is a model provided by the library (loaded with the ``shortcut-name`` string of a pretrained model), or
- the model was saved using :func:`~transformers.PreTrainedModel.save_pretrained` and is reloaded by suppling the save directory.
- the model is loaded by suppling a local directory as ``pretrained_model_name_or_path`` and a configuration JSON file named `config.json` is found in the directory.
from_pt: (`optional`) boolean, default False:
Load the model weights from a PyTorch state_dict save file (see docstring of pretrained_model_name_or_path argument).
cache_dir: (`optional`) string:
Path to a directory in which a downloaded pre-trained model
configuration should be cached if the standard cache should not be used.
force_download: (`optional`) boolean, default False:
Force to (re-)download the model weights and configuration files and override the cached versions if they exists.
resume_download: (`optional`) boolean, default False:
Do not delete incompletely recieved file. Attempt to resume the download if such a file exists.
proxies: (`optional`) dict, default None:
A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.
The proxies are used on each request.
output_loading_info: (`optional`) boolean:
Set to ``True`` to also return a dictionnary containing missing keys, unexpected keys and error messages.
kwargs: (`optional`) Remaining dictionary of keyword arguments:
Can be used to update the configuration object (after it being loaded) and initiate the model. (e.g. ``output_attention=True``). Behave differently depending on whether a `config` is provided or automatically loaded:
- If a configuration is provided with ``config``, ``**kwargs`` will be directly passed to the underlying model's ``__init__`` method (we assume all relevant updates to the configuration have already been done)
- If a configuration is not provided, ``kwargs`` will be first passed to the configuration class initialization function (:func:`~transformers.PretrainedConfig.from_pretrained`). Each key of ``kwargs`` that corresponds to a configuration attribute will be used to override said attribute with the supplied ``kwargs`` value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model's ``__init__`` function.
Examples::
# For example purposes. Not runnable.
model = BertModel.from_pretrained('bert-base-uncased') # Download model and configuration from S3 and cache.
model = BertModel.from_pretrained('./test/saved_model/') # E.g. model was saved using `save_pretrained('./test/saved_model/')`
model = BertModel.from_pretrained('bert-base-uncased', output_attention=True) # Update configuration during loading
assert model.config.output_attention == True
# Loading from a TF checkpoint file instead of a PyTorch model (slower)
config = BertConfig.from_json_file('./tf_model/my_tf_model_config.json')
model = BertModel.from_pretrained('./tf_model/my_tf_checkpoint.ckpt.index', from_pt=True, config=config)
"""
config = kwargs.pop("config", None)
cache_dir = kwargs.pop("cache_dir", None)
from_pt = kwargs.pop("from_pt", False)
force_download = kwargs.pop("force_download", False)
resume_download = kwargs.pop("resume_download", False)
proxies = kwargs.pop("proxies", None)
output_loading_info = kwargs.pop("output_loading_info", False)
# Load config if we don't provide a configuration
if not isinstance(config, PretrainedConfig):
config_path = config if config is not None else pretrained_model_name_or_path
config, model_kwargs = cls.config_class.from_pretrained(
config_path,
*model_args,
cache_dir=cache_dir,
return_unused_kwargs=True,
force_download=force_download,
resume_download=resume_download,
**kwargs,
)
else:
model_kwargs = kwargs
# Load model
if pretrained_model_name_or_path is not None:
if pretrained_model_name_or_path in cls.pretrained_model_archive_map:
archive_file = cls.pretrained_model_archive_map[pretrained_model_name_or_path]
elif os.path.isdir(pretrained_model_name_or_path):
if os.path.isfile(os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_NAME)):
# Load from a TF 2.0 checkpoint
archive_file = os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_NAME)
elif from_pt and os.path.isfile(os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)):
# Load from a PyTorch checkpoint
archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)
else:
raise EnvironmentError(
"Error no file named {} found in directory {} or `from_pt` set to False".format(
[WEIGHTS_NAME, TF2_WEIGHTS_NAME], pretrained_model_name_or_path
)
)
elif os.path.isfile(pretrained_model_name_or_path) or is_remote_url(pretrained_model_name_or_path):
archive_file = pretrained_model_name_or_path
elif os.path.isfile(pretrained_model_name_or_path + ".index"):
archive_file = pretrained_model_name_or_path + ".index"
else:
archive_file = hf_bucket_url(
pretrained_model_name_or_path, postfix=(WEIGHTS_NAME if from_pt else TF2_WEIGHTS_NAME)
)
# redirect to the cache, if necessary
try:
resolved_archive_file = cached_path(
archive_file,
cache_dir=cache_dir,
force_download=force_download,
resume_download=resume_download,
proxies=proxies,
)
except EnvironmentError as e:
if pretrained_model_name_or_path in cls.pretrained_model_archive_map:
logger.error("Couldn't reach server at '{}' to download pretrained weights.".format(archive_file))
else:
logger.error(
"Model name '{}' was not found in model name list ({}). "
"We assumed '{}' was a path or url but couldn't find any file "
"associated to this path or url.".format(
pretrained_model_name_or_path,
", ".join(cls.pretrained_model_archive_map.keys()),
archive_file,
)
)
raise e
if resolved_archive_file == archive_file:
logger.info("loading weights file {}".format(archive_file))
else:
logger.info("loading weights file {} from cache at {}".format(archive_file, resolved_archive_file))
else:
resolved_archive_file = None
# Instantiate model.
model = cls(config, *model_args, **model_kwargs)
if from_pt:
# Load from a PyTorch checkpoint
return load_pytorch_checkpoint_in_tf2_model(model, resolved_archive_file, allow_missing_keys=True)
model(model.dummy_inputs, training=False) # build the network with dummy inputs
assert os.path.isfile(resolved_archive_file), "Error retrieving file {}".format(resolved_archive_file)
# 'by_name' allow us to do transfer learning by skipping/adding layers
# see https://github.com/tensorflow/tensorflow/blob/00fad90125b18b80fe054de1055770cfb8fe4ba3/tensorflow/python/keras/engine/network.py#L1339-L1357
try:
model.load_weights(resolved_archive_file, by_name=True)
except OSError:
raise OSError(
"Unable to load weights from h5 file. "
"If you tried to load a TF 2.0 model from a PyTorch checkpoint, please set from_pt=True. "
)
model(model.dummy_inputs, training=False) # Make sure restore ops are run
# Check if the models are the same to output loading informations
with h5py.File(resolved_archive_file, "r") as f:
if "layer_names" not in f.attrs and "model_weights" in f:
f = f["model_weights"]
hdf5_layer_names = set(hdf5_format.load_attributes_from_hdf5_group(f, "layer_names"))
model_layer_names = set(layer.name for layer in model.layers)
missing_keys = list(model_layer_names - hdf5_layer_names)
unexpected_keys = list(hdf5_layer_names - model_layer_names)
error_msgs = []
if len(missing_keys) > 0:
logger.info(
"Layers of {} not initialized from pretrained model: {}".format(model.__class__.__name__, missing_keys)
)
if len(unexpected_keys) > 0:
logger.info(
"Layers from pretrained model not used in {}: {}".format(model.__class__.__name__, unexpected_keys)
)
if len(error_msgs) > 0:
raise RuntimeError(
"Error(s) in loading weights for {}:\n\t{}".format(model.__class__.__name__, "\n\t".join(error_msgs))
)
if output_loading_info:
loading_info = {"missing_keys": missing_keys, "unexpected_keys": unexpected_keys, "error_msgs": error_msgs}
return model, loading_info
return model
def prepare_inputs_for_generation(self, inputs, **kwargs):
return {"inputs": inputs}
def _do_output_past(self, outputs):
has_output_past = hasattr(self.config, "output_past") and self.config.output_past
has_mem_len = hasattr(self.config, "mem_len") and self.config.mem_len
if has_output_past and not has_mem_len and len(outputs) > 1:
return True
elif has_mem_len and self.config.mem_len > 0 and len(outputs) > 1:
return True
return False
def generate(
self,
input_ids=None,
max_length=None,
min_length=None,
do_sample=None,
early_stopping=None,
num_beams=None,
temperature=None,
top_k=None,
top_p=None,
repetition_penalty=None,
bad_words_ids=None,
bos_token_id=None,
pad_token_id=None,
eos_token_id=None,
length_penalty=None,
no_repeat_ngram_size=None,
num_return_sequences=None,
attention_mask=None,
decoder_start_token_id=None,
):
r""" Generates sequences for models with a LM head. The method currently supports greedy or penalized greedy decoding, sampling with top-k or nucleus sampling
and beam-search.
Adapted in part from `Facebook's XLM beam search code`_.
.. _`Facebook's XLM beam search code`:
https://github.com/facebookresearch/XLM/blob/9e6f6814d17be4fe5b15f2e6c43eb2b2d76daeb4/src/model/transformer.py#L529
Parameters:
input_ids: (`optional`) `tf.Tensor` of `dtype=tf.int32` of shape `(batch_size, sequence_length)`
The sequence used as a prompt for the generation. If `None` the method initializes
it as an empty `torch.LongTensor` of shape `(1,)`.
max_length: (`optional`) int
The max length of the sequence to be generated. Between 1 and infinity. Default to 20.
min_length: (`optional`) int
The min length of the sequence to be generated. Between 0 and infinity. Default to 0.
do_sample: (`optional`) bool
If set to `False` greedy decoding is used. Otherwise sampling is used. Defaults to `False` as defined in `configuration_utils.PretrainedConfig`.
early_stopping: (`optional`) bool
if set to `True` beam search is stopped when at least `num_beams` sentences finished per batch. Defaults to `False` as defined in `configuration_utils.PretrainedConfig`.
num_beams: (`optional`) int
Number of beams for beam search. Must be between 1 and infinity. 1 means no beam search. Default to 1.
temperature: (`optional`) float
The value used to module the next token probabilities. Must be strictely positive. Default to 1.0.
top_k: (`optional`) int
The number of highest probability vocabulary tokens to keep for top-k-filtering. Between 1 and infinity. Default to 50.
top_p: (`optional`) float
The cumulative probability of parameter highest probability vocabulary tokens to keep for nucleus sampling. Must be between 0 and 1. Default to 1.
repetition_penalty: (`optional`) float
The parameter for repetition penalty. Between 1.0 and infinity. 1.0 means no penalty. Default to 1.0.
bos_token_id: (`optional`) int
Beginning of sentence token if no prompt is provided. Default to specicic model bos_token_id or None if it does not exist.
pad_token_id: (`optional`) int
Pad token. Defaults to pad_token_id as defined in the models config.
eos_token_id: (`optional`) int
EOS token. Defaults to eos_token_id as defined in the models config.
length_penalty: (`optional`) float
Exponential penalty to the length. Default to 1.
no_repeat_ngram_size: (`optional`) int
If set to int > 0, all ngrams of size `no_repeat_ngram_size` can only occur once.
bad_words_ids: (`optional`) list of lists of int
`bad_words_ids` contains tokens that are not allowed to be generated. In order to get the tokens of the words that should not appear in the generated text, use `tokenizer.encode(bad_word, add_prefix_space=True)`.
num_return_sequences: (`optional`) int
The number of independently computed returned sequences for each element in the batch. Default to 1.
attention_mask (`optional`) obj: `tf.Tensor` with `dtype=tf.int32` of same shape as `input_ids`
Mask to avoid performing attention on padding token indices.
Mask values selected in ``[0, 1]``:
``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
Defaults to `None`.
`What are attention masks? <../glossary.html#attention-mask>`__
decoder_start_token_id=None: (`optional`) int
If an encoder-decoder model starts decoding with a different token than BOS.
Defaults to `None` and is changed to `BOS` later.
Return:
output: `tf.Tensor` of `dtype=tf.int32` shape `(batch_size * num_return_sequences, sequence_length)`
sequence_length is either equal to max_length or shorter if all batches finished early due to the `eos_token_id`
Examples::
tokenizer = AutoTokenizer.from_pretrained('distilgpt2') # Initialize tokenizer
model = TFAutoModelWithLMHead.from_pretrained('distilgpt2') # Download model and configuration from S3 and cache.
outputs = model.generate(max_length=40) # do greedy decoding
print('Generated: {}'.format(tokenizer.decode(outputs[0], skip_special_tokens=True)))
tokenizer = AutoTokenizer.from_pretrained('openai-gpt') # Initialize tokenizer
model = TFAutoModelWithLMHead.from_pretrained('openai-gpt') # Download model and configuration from S3 and cache.
input_context = 'The dog'
input_ids = tokenizer.encode(input_context, return_tensors='tf') # encode input context
outputs = model.generate(input_ids=input_ids, num_beams=5, num_return_sequences=3, temperature=1.5) # generate 3 independent sequences using beam search decoding (5 beams) with sampling from initial context 'The dog'
for i in range(3): # 3 output sequences were generated
print('Generated {}: {}'.format(i, tokenizer.decode(outputs[i], skip_special_tokens=True)))
tokenizer = AutoTokenizer.from_pretrained('distilgpt2') # Initialize tokenizer
model = TFAutoModelWithLMHead.from_pretrained('distilgpt2') # Download model and configuration from S3 and cache.
input_context = 'The dog'
input_ids = tokenizer.encode(input_context, return_tensors='tf') # encode input context
outputs = model.generate(input_ids=input_ids, max_length=40, temperature=0.7, num_return_sequences=3) # 3 generate sequences using by sampling
for i in range(3): # 3 output sequences were generated
print('Generated {}: {}'.format(i, tokenizer.decode(outputs[i], skip_special_tokens=True)))
tokenizer = AutoTokenizer.from_pretrained('ctrl') # Initialize tokenizer
model = TFAutoModelWithLMHead.from_pretrained('ctrl') # Download model and configuration from S3 and cache.
input_context = 'Legal My neighbor is' # "Legal" is one of the control codes for ctrl
input_ids = tokenizer.encode(input_context, return_tensors='tf') # encode input context
outputs = model.generate(input_ids=input_ids, max_length=50, temperature=0.7, repetition_penalty=1.2) # generate sequences
print('Generated: {}'.format(tokenizer.decode(outputs[0], skip_special_tokens=True)))
tokenizer = AutoTokenizer.from_pretrained('gpt2') # Initialize tokenizer
model = TFAutoModelWithLMHead.from_pretrained('gpt2') # Download model and configuration from S3 and cache.
input_context = 'My cute dog' # "Legal" is one of the control codes for ctrl
bad_words_ids = [tokenizer.encode(bad_word, add_prefix_space=True) for bad_word in ['idiot', 'stupid', 'shut up']]
input_ids = tokenizer.encode(input_context, return_tensors='tf') # encode input context
outputs = model.generate(input_ids=input_ids, max_length=100, do_sample=True, bad_words_ids=bad_words_ids) # generate sequences without allowing bad_words to be generated
"""
# We cannot generate if the model does not have a LM head
if self.get_output_embeddings() is None:
raise AttributeError(
"You tried to generate sequences with a model that does not have a LM Head."
"Please use another model class (e.g. `TFOpenAIGPTLMHeadModel`, `TFXLNetLMHeadModel`, `TFGPT2LMHeadModel`, `TFCTRLLMHeadModel`, `TFT5ForConditionalGeneration`, `TFTransfoXLLMHeadModel`)"
)
max_length = max_length if max_length is not None else self.config.max_length
min_length = min_length if min_length is not None else self.config.min_length
do_sample = do_sample if do_sample is not None else self.config.do_sample
early_stopping = early_stopping if early_stopping is not None else self.config.early_stopping
num_beams = num_beams if num_beams is not None else self.config.num_beams
temperature = temperature if temperature is not None else self.config.temperature
top_k = top_k if top_k is not None else self.config.top_k
top_p = top_p if top_p is not None else self.config.top_p
repetition_penalty = repetition_penalty if repetition_penalty is not None else self.config.repetition_penalty
bos_token_id = bos_token_id if bos_token_id is not None else self.config.bos_token_id
pad_token_id = pad_token_id if pad_token_id is not None else self.config.pad_token_id
eos_token_id = eos_token_id if eos_token_id is not None else self.config.eos_token_id
length_penalty = length_penalty if length_penalty is not None else self.config.length_penalty
no_repeat_ngram_size = (
no_repeat_ngram_size if no_repeat_ngram_size is not None else self.config.no_repeat_ngram_size
)
bad_words_ids = bad_words_ids if bad_words_ids is not None else self.config.bad_words_ids
num_return_sequences = (
num_return_sequences if num_return_sequences is not None else self.config.num_return_sequences
)
decoder_start_token_id = (
decoder_start_token_id if decoder_start_token_id is not None else self.config.decoder_start_token_id
)
if input_ids is not None:
batch_size = shape_list(input_ids)[0] # overriden by the input batch_size
else:
batch_size = 1
assert isinstance(max_length, int) and max_length > 0, "`max_length` should be a strictely positive integer."
assert isinstance(min_length, int) and min_length >= 0, "`min_length` should be a positive integer."
assert isinstance(do_sample, bool), "`do_sample` should be a boolean."
assert isinstance(early_stopping, bool), "`early_stopping` should be a boolean."
assert isinstance(num_beams, int) and num_beams > 0, "`num_beams` should be a strictely positive integer."
assert temperature > 0, "`temperature` should be strictely positive."
assert isinstance(top_k, int) and top_k >= 0, "`top_k` should be a positive integer."
assert 0 <= top_p <= 1, "`top_p` should be between 0 and 1."
assert repetition_penalty >= 1.0, "`repetition_penalty` should be >= 1."
assert input_ids is not None or (
isinstance(bos_token_id, int) and bos_token_id >= 0
), "If input_ids is not defined, `bos_token_id` should be a positive integer."
assert pad_token_id is None or (
isinstance(pad_token_id, int) and (pad_token_id >= 0)
), "`pad_token_id` should be a positive integer."
assert (eos_token_id is None) or (
isinstance(eos_token_id, int) and (eos_token_id >= 0)
), "`eos_token_id` should be a positive integer."
assert length_penalty > 0, "`length_penalty` should be strictely positive."
assert (
isinstance(num_return_sequences, int) and num_return_sequences > 0
), "`num_return_sequences` should be a strictely positive integer."
assert (
bad_words_ids is None or isinstance(bad_words_ids, list) and isinstance(bad_words_ids[0], list)
), "`bad_words_ids` is either `None` or a list of lists of tokens that should not be generated"
if input_ids is None:
assert isinstance(bos_token_id, int) and bos_token_id >= 0, (
"you should either supply a context to complete as `input_ids` input "
"or a `bos_token_id` (integer >= 0) as a first token to start the generation."
)
input_ids = tf.fill((batch_size, 1), bos_token_id)
else:
assert len(shape_list(input_ids)) == 2, "Input prompt should be of shape (batch_size, sequence length)."
# not allow to duplicate outputs when greedy decoding
if do_sample is False:
if num_beams == 1:
# no_beam_search greedy generation conditions
assert (
num_return_sequences == 1
), "Greedy decoding will always produce the same output for num_beams == 1 and num_return_sequences > 1. Please set num_return_sequences = 1"
else:
# beam_search greedy generation conditions
assert (
num_beams >= num_return_sequences
), "Greedy beam search decoding cannot return more sequences than it has beams. Please set num_beams >= num_return_sequences"
# create attention mask if necessary
# TODO (PVP): this should later be handled by the forward fn() in each model in the future see PR 3140
if (attention_mask is None) and (pad_token_id is not None) and (pad_token_id in input_ids.numpy()):
attention_mask = tf.cast(tf.math.not_equal(input_ids, pad_token_id), dtype=tf.int32)
elif attention_mask is None:
attention_mask = tf.ones_like(input_ids)
if pad_token_id is None and eos_token_id is not None:
logger.warning(
"Setting `pad_token_id` to {} (first `eos_token_id`) to generate sequence".format(eos_token_id)
)
pad_token_id = eos_token_id
# current position and vocab size
cur_len = shape_list(input_ids)[1]
vocab_size = self.config.vocab_size
# set effective batch size and effective batch multiplier according to do_sample
if do_sample:
effective_batch_size = batch_size * num_return_sequences
effective_batch_mult = num_return_sequences
else:
effective_batch_size = batch_size
effective_batch_mult = 1
# Expand input ids if num_beams > 1 or num_return_sequences > 1
if num_return_sequences > 1 or num_beams > 1:
input_ids_len = shape_list(input_ids)[-1]
input_ids = tf.broadcast_to(
tf.expand_dims(input_ids, 1), (batch_size, effective_batch_mult * num_beams, input_ids_len)
)
attention_mask = tf.broadcast_to(
tf.expand_dims(attention_mask, 1), (batch_size, effective_batch_mult * num_beams, input_ids_len)
)
input_ids = tf.reshape(
input_ids, (effective_batch_size * num_beams, input_ids_len)
) # shape: (batch_size * num_return_sequences * num_beams, cur_len)
attention_mask = tf.reshape(
attention_mask, (effective_batch_size * num_beams, input_ids_len)
) # shape: (batch_size * num_return_sequences * num_beams, cur_len)
if self.config.is_encoder_decoder:
if decoder_start_token_id is None:
decoder_start_token_id = bos_token_id
assert (
decoder_start_token_id is not None
), "decoder_start_token_id or bos_token_id has to be defined for encoder-decoder generation"
assert hasattr(self, "get_encoder"), "{} should have a 'get_encoder' function defined".format(self)
assert callable(self.get_encoder), "{} should be a method".format(self.get_encoder)
# get encoder and store encoder outputs
encoder = self.get_encoder()
encoder_outputs = encoder(input_ids, attention_mask=attention_mask)
# create empty decoder_input_ids
input_ids = tf.ones((effective_batch_size * num_beams, 1), dtype=tf.int32,) * decoder_start_token_id
cur_len = 1
else:
encoder_outputs = None
cur_len = shape_list(input_ids)[-1]
if num_beams > 1:
output = self._generate_beam_search(
input_ids,
cur_len=cur_len,
max_length=max_length,
min_length=min_length,
do_sample=do_sample,
early_stopping=early_stopping,
temperature=temperature,
top_k=top_k,
top_p=top_p,
repetition_penalty=repetition_penalty,
no_repeat_ngram_size=no_repeat_ngram_size,
bad_words_ids=bad_words_ids,
bos_token_id=bos_token_id,
pad_token_id=pad_token_id,
eos_token_id=eos_token_id,
decoder_start_token_id=decoder_start_token_id,
batch_size=effective_batch_size,
num_return_sequences=num_return_sequences,
length_penalty=length_penalty,
num_beams=num_beams,
vocab_size=vocab_size,
encoder_outputs=encoder_outputs,
attention_mask=attention_mask,
)
else:
output = self._generate_no_beam_search(
input_ids,
cur_len=cur_len,
max_length=max_length,
min_length=min_length,
do_sample=do_sample,
temperature=temperature,
top_k=top_k,
top_p=top_p,
repetition_penalty=repetition_penalty,
no_repeat_ngram_size=no_repeat_ngram_size,
bad_words_ids=bad_words_ids,
bos_token_id=bos_token_id,
pad_token_id=pad_token_id,
eos_token_id=eos_token_id,
decoder_start_token_id=decoder_start_token_id,
batch_size=effective_batch_size,
vocab_size=vocab_size,
encoder_outputs=encoder_outputs,
attention_mask=attention_mask,
)
return output
def _generate_no_beam_search(
self,
input_ids,
cur_len,
max_length,
min_length,
do_sample,
temperature,
top_k,
top_p,
repetition_penalty,
no_repeat_ngram_size,
bad_words_ids,
bos_token_id,
pad_token_id,
eos_token_id,
decoder_start_token_id,
batch_size,
vocab_size,
encoder_outputs,
attention_mask,
):
""" Generate sequences for each example without beam search (num_beams == 1).
All returned sequence are generated independantly.
"""
# length of generated sentences / unfinished sentences
unfinished_sents = tf.ones_like(input_ids[:, 0])
sent_lengths = tf.ones_like(input_ids[:, 0]) * max_length
past = encoder_outputs # defined for encoder-decoder models, None for decoder-only models
while cur_len < max_length:
model_inputs = self.prepare_inputs_for_generation(input_ids, past=past, attention_mask=attention_mask)
outputs = self(**model_inputs)
next_token_logits = outputs[0][:, -1, :]
# if model has past, then set the past variable to speed up decoding
if self._do_output_past(outputs):
past = outputs[1]
# repetition penalty from CTRL paper (https://arxiv.org/abs/1909.05858)
if repetition_penalty != 1.0:
next_token_logits_penalties = _create_next_token_logits_penalties(
input_ids, next_token_logits, repetition_penalty
)
next_token_logits = tf.math.multiply(next_token_logits, next_token_logits_penalties)
if no_repeat_ngram_size > 0:
# calculate a list of banned tokens to prevent repetitively generating the same ngrams
# from fairseq: https://github.com/pytorch/fairseq/blob/a07cb6f40480928c9e0548b737aadd36ee66ac76/fairseq/sequence_generator.py#L345
banned_tokens = calc_banned_ngram_tokens(input_ids, batch_size, no_repeat_ngram_size, cur_len)
# create banned_tokens boolean mask
banned_tokens_indices_mask = []
for banned_tokens_slice in banned_tokens:
banned_tokens_indices_mask.append(
[True if token in banned_tokens_slice else False for token in range(vocab_size)]
)
next_token_logits = set_tensor_by_indices_to_value(
next_token_logits, tf.convert_to_tensor(banned_tokens_indices_mask, dtype=tf.bool), -float("inf")
)
if bad_words_ids is not None:
# calculate a list of banned tokens according to bad words
banned_tokens = calc_banned_bad_words_ids(input_ids, bad_words_ids)
banned_tokens_indices_mask = []
for banned_tokens_slice in banned_tokens:
banned_tokens_indices_mask.append(
[True if token in banned_tokens_slice else False for token in range(vocab_size)]
)
next_token_logits = set_tensor_by_indices_to_value(
next_token_logits, tf.convert_to_tensor(banned_tokens_indices_mask, dtype=tf.bool), -float("inf")
)
# set eos token prob to zero if min_length is not reached
if eos_token_id is not None and cur_len < min_length:
# create eos_token_id boolean mask
is_token_logit_eos_token = tf.convert_to_tensor(
[True if token is eos_token_id else False for token in range(vocab_size)], dtype=tf.bool
)
eos_token_indices_mask = tf.broadcast_to(is_token_logit_eos_token, [batch_size, vocab_size])
next_token_logits = set_tensor_by_indices_to_value(
next_token_logits, eos_token_indices_mask, -float("inf")
)
if do_sample:
# Temperature (higher temperature => more likely to sample low probability tokens)
if temperature != 1.0:
next_token_logits = next_token_logits / temperature
# Top-p/top-k filtering
next_token_logits = tf_top_k_top_p_filtering(next_token_logits, top_k=top_k, top_p=top_p)
# Sample
next_token = tf.squeeze(
tf.random.categorical(next_token_logits, dtype=tf.int32, num_samples=1), axis=1
)
else:
# Greedy decoding
next_token = tf.math.argmax(next_token_logits, axis=-1, output_type=tf.int32)
# update generations and finished sentences
if eos_token_id is not None:
# pad finished sentences if eos_token_id exist
tokens_to_add = next_token * unfinished_sents + (pad_token_id) * (1 - unfinished_sents)
else:
tokens_to_add = next_token
input_ids = tf.concat([input_ids, tf.expand_dims(tokens_to_add, -1)], 1)
if eos_token_id is not None:
eos_in_sents = tokens_to_add == eos_token_id
# if sentence is unfinished and the token to add is eos, sent_lengths is filled with current length
is_sents_unfinished_and_token_to_add_is_eos = tf.math.multiply(
unfinished_sents, tf.cast(eos_in_sents, tf.int32)
)
sent_lengths = (
sent_lengths * (1 - is_sents_unfinished_and_token_to_add_is_eos)
+ cur_len * is_sents_unfinished_and_token_to_add_is_eos
)
# unfinished_sents is set to zero if eos in sentence
unfinished_sents -= is_sents_unfinished_and_token_to_add_is_eos
# stop when there is a </s> in each sentence, or if we exceed the maximul length
if tf.math.reduce_max(unfinished_sents) == 0:
break
# extend attention_mask for new generated input if only decoder
if self.config.is_encoder_decoder is False:
attention_mask = tf.concat(
[attention_mask, tf.ones((shape_list(attention_mask)[0], 1), dtype=tf.int32)], axis=-1
)
cur_len = cur_len + 1
# if there are different sentences lengths in the batch, some batches have to be padded
min_sent_length = tf.math.reduce_min(sent_lengths)
max_sent_length = tf.math.reduce_max(sent_lengths)
if min_sent_length != max_sent_length:
assert pad_token_id is not None, "`Pad_token_id` has to be defined if batches have different lengths"
# finished sents are filled with pad_token
padding = tf.ones([batch_size, max_sent_length.numpy()], dtype=tf.int32) * pad_token_id
# create length masks for tf.where operation
broad_casted_sent_lengths = tf.broadcast_to(
tf.expand_dims(sent_lengths, -1), [batch_size, max_sent_length]
)
broad_casted_range = tf.transpose(
tf.broadcast_to(tf.expand_dims(tf.range(max_length), -1), [max_length, batch_size])
)
decoded = tf.where(broad_casted_range < broad_casted_sent_lengths, input_ids, padding)
else:
decoded = input_ids
return decoded
def _generate_beam_search(
self,
input_ids,
cur_len,
max_length,
min_length,
do_sample,
early_stopping,
temperature,
top_k,
top_p,
repetition_penalty,
no_repeat_ngram_size,
bad_words_ids,
bos_token_id,
pad_token_id,
decoder_start_token_id,
eos_token_id,
batch_size,
num_return_sequences,
length_penalty,
num_beams,
vocab_size,
encoder_outputs,
attention_mask,
):
""" Generate sequences for each example with beam search.
"""
# generated hypotheses
generated_hyps = [
BeamHypotheses(num_beams, max_length, length_penalty, early_stopping=early_stopping)
for _ in range(batch_size)
]
# for greedy decoding it is made sure that only tokens of the first beam are considered to avoid sampling the exact same tokens three times
if do_sample is False:
beam_scores_begin = tf.zeros((batch_size, 1), dtype=tf.float32)
beam_scores_end = tf.ones((batch_size, num_beams - 1), dtype=tf.float32) * (-1e9)
beam_scores = tf.concat([beam_scores_begin, beam_scores_end], -1)
else:
beam_scores = tf.zeros((batch_size, num_beams), dtype=tf.float32)
beam_scores = tf.reshape(beam_scores, (batch_size * num_beams,))
# cache compute states
past = encoder_outputs
# done sentences
done = [False for _ in range(batch_size)]
while cur_len < max_length:
model_inputs = self.prepare_inputs_for_generation(input_ids, past=past, attention_mask=attention_mask)
outputs = self(**model_inputs) # (batch_size * num_beams, cur_len, vocab_size)
next_token_logits = outputs[0][:, -1, :] # (batch_size * num_beams, vocab_size)
# if model has past, then set the past variable to speed up decoding
if self._do_output_past(outputs):
past = outputs[1]
# repetition penalty (from CTRL paper https://arxiv.org/abs/1909.05858)
if repetition_penalty != 1.0:
next_token_logits_penalties = _create_next_token_logits_penalties(
input_ids, next_token_logits, repetition_penalty
)
next_token_logits = tf.math.multiply(next_token_logits, next_token_logits_penalties)
# Temperature (higher temperature => more likely to sample low probability tokens)
if temperature != 1.0:
next_token_logits = next_token_logits / temperature
# calculate log softmax score
scores = tf.nn.log_softmax(next_token_logits, axis=-1) # (batch_size * num_beams, vocab_size)
# set eos token prob to zero if min_length is not reached
if eos_token_id is not None and cur_len < min_length:
# create eos_token_id boolean mask
num_batch_hypotheses = batch_size * num_beams
is_token_logit_eos_token = tf.convert_to_tensor(
[True if token is eos_token_id else False for token in range(vocab_size)], dtype=tf.bool
)
eos_token_indices_mask = tf.broadcast_to(is_token_logit_eos_token, [num_batch_hypotheses, vocab_size])
scores = set_tensor_by_indices_to_value(scores, eos_token_indices_mask, -float("inf"))
if no_repeat_ngram_size > 0:
# calculate a list of banned tokens to prevent repetitively generating the same ngrams
# from fairseq: https://github.com/pytorch/fairseq/blob/a07cb6f40480928c9e0548b737aadd36ee66ac76/fairseq/sequence_generator.py#L345
num_batch_hypotheses = batch_size * num_beams
banned_tokens = calc_banned_ngram_tokens(
input_ids, num_batch_hypotheses, no_repeat_ngram_size, cur_len
)
# create banned_tokens boolean mask
banned_tokens_indices_mask = []
for banned_tokens_slice in banned_tokens:
banned_tokens_indices_mask.append(
[True if token in banned_tokens_slice else False for token in range(vocab_size)]
)
scores = set_tensor_by_indices_to_value(
scores, tf.convert_to_tensor(banned_tokens_indices_mask, dtype=tf.bool), -float("inf")
)
if bad_words_ids is not None:
# calculate a list of banned tokens according to bad words
banned_tokens = calc_banned_bad_words_ids(input_ids, bad_words_ids)
banned_tokens_indices_mask = []
for banned_tokens_slice in banned_tokens:
banned_tokens_indices_mask.append(
[True if token in banned_tokens_slice else False for token in range(vocab_size)]
)
scores = set_tensor_by_indices_to_value(
scores, tf.convert_to_tensor(banned_tokens_indices_mask, dtype=tf.bool), -float("inf")
)
assert shape_list(scores) == [batch_size * num_beams, vocab_size]
if do_sample:
_scores = scores + tf.broadcast_to(
beam_scores[:, None], (batch_size * num_beams, vocab_size)
) # (batch_size * num_beams, vocab_size)
# Top-p/top-k filtering
_scores = tf_top_k_top_p_filtering(
_scores, top_k=top_k, top_p=top_p, min_tokens_to_keep=2
) # (batch_size * num_beams, vocab_size)
# Sample 2 next tokens for each beam (so we have some spare tokens and match output of greedy beam search)
_scores = tf.reshape(_scores, (batch_size, num_beams * vocab_size))
next_tokens = tf.random.categorical(
_scores, dtype=tf.int32, num_samples=2 * num_beams
) # (batch_size, 2 * num_beams)
# Compute next scores
next_scores = tf.gather(_scores, next_tokens, batch_dims=1) # (batch_size, 2 * num_beams)
# sort the sampled vector to make sure that the first num_beams samples are the best
next_scores_indices = tf.argsort(next_scores, direction="DESCENDING", axis=1)
next_scores = tf.gather(next_scores, next_scores_indices, batch_dims=1) # (batch_size, num_beams * 2)
next_tokens = tf.gather(next_tokens, next_scores_indices, batch_dims=1) # (batch_size, num_beams * 2)
else:
# Add the log prob of the new beams to the log prob of the beginning of the sequence (sum of logs == log of the product)
next_scores = scores + tf.broadcast_to(
beam_scores[:, None], (batch_size * num_beams, vocab_size)
) # (batch_size * num_beams, vocab_size)
# re-organize to group the beam together (we are keeping top hypothesis accross beams)
next_scores = tf.reshape(
next_scores, (batch_size, num_beams * vocab_size)
) # (batch_size, num_beams * vocab_size)
next_scores, next_tokens = tf.math.top_k(next_scores, k=2 * num_beams, sorted=True)
assert shape_list(next_scores) == shape_list(next_tokens) == [batch_size, 2 * num_beams]
# next batch beam content
next_batch_beam = []
# for each sentence
for batch_idx in range(batch_size):
# if we are done with this sentence
if done[batch_idx]:
assert (
len(generated_hyps[batch_idx]) >= num_beams
), "Batch can only be done if at least {} beams have been generated".format(num_beams)
assert (
eos_token_id is not None and pad_token_id is not None
), "generated beams >= num_beams -> eos_token_id and pad_token have to be defined"
next_batch_beam.extend([(0, pad_token_id, 0)] * num_beams) # pad the batch
continue
# next sentence beam content
next_sent_beam = []
# next tokens for this sentence
for beam_token_rank, (beam_token_id, beam_token_score) in enumerate(
zip(next_tokens[batch_idx], next_scores[batch_idx])
):
# get beam and token IDs
beam_id = beam_token_id // vocab_size
token_id = beam_token_id % vocab_size
effective_beam_id = batch_idx * num_beams + beam_id
# add to generated hypotheses if end of sentence or last iteration
if (eos_token_id is not None) and (token_id.numpy() == eos_token_id):
# if beam_token does not belong to top num_beams tokens, it should not be added
is_beam_token_worse_than_top_num_beams = beam_token_rank >= num_beams
if is_beam_token_worse_than_top_num_beams:
continue
generated_hyps[batch_idx].add(
tf.identity(input_ids[effective_beam_id]), beam_token_score.numpy()
)
else:
# add next predicted token if it is not eos_token
next_sent_beam.append((beam_token_score, token_id, effective_beam_id))
# the beam for next step is full
if len(next_sent_beam) == num_beams:
break
# Check if were done so that we can save a pad step if all(done)
done[batch_idx] = done[batch_idx] or generated_hyps[batch_idx].is_done(
tf.reduce_max(next_scores[batch_idx]).numpy(), cur_len=cur_len
)
# update next beam content
assert len(next_sent_beam) == num_beams, "Beam should always be full"
next_batch_beam.extend(next_sent_beam)
assert len(next_batch_beam) == num_beams * (batch_idx + 1)
# stop when we are done with each sentence
if all(done):
break
# sanity check / prepare next batch
assert len(next_batch_beam) == batch_size * num_beams
beam_scores = tf.convert_to_tensor([x[0] for x in next_batch_beam], dtype=tf.float32)
beam_tokens = tf.convert_to_tensor([x[1] for x in next_batch_beam], dtype=tf.int32)
beam_idx = tf.convert_to_tensor([x[2] for x in next_batch_beam], dtype=tf.int32)
# re-order batch
input_ids = tf.stack([tf.identity(input_ids[x, :]) for x in beam_idx])
input_ids = tf.concat([input_ids, tf.expand_dims(beam_tokens, 1)], axis=-1)
# re-order internal states
if past is not None:
past = self._reorder_cache(past, beam_idx)
# extend attention_mask for new generated input if only decoder
if self.config.is_encoder_decoder is False:
attention_mask = tf.concat(
[attention_mask, tf.ones((shape_list(attention_mask)[0], 1), dtype=tf.int32)], axis=-1
)
# update current length
cur_len = cur_len + 1
# finalize all open beam hypotheses and end to generated hypotheses
for batch_idx in range(batch_size):
# Add all open beam hypothesis to generated_hyps
if done[batch_idx]:
continue
# test that beam scores match previously calculated scores if not eos and batch_idx not done
if eos_token_id is not None and all(
(token_id % vocab_size).numpy().item() is not eos_token_id for token_id in next_tokens[batch_idx]
):
assert tf.reduce_all(
next_scores[batch_idx, :num_beams] == tf.reshape(beam_scores, (batch_size, num_beams))[batch_idx]
), "If batch_idx is not done, final next scores: {} have to equal to accumulated beam_scores: {}".format(
next_scores[:, :num_beams][batch_idx], tf.reshape(beam_scores, (batch_size, num_beams))[batch_idx]
)
# need to add best num_beams hypotheses to generated hyps
for beam_id in range(num_beams):
effective_beam_id = batch_idx * num_beams + beam_id
final_score = beam_scores[effective_beam_id].numpy().item()
final_tokens = input_ids[effective_beam_id]
generated_hyps[batch_idx].add(final_tokens, final_score)
# depending on whether greedy generation is wanted or not define different output_batch_size and output_num_return_sequences_per_batch
output_batch_size = batch_size if do_sample else batch_size * num_return_sequences
output_num_return_sequences_per_batch = 1 if do_sample else num_return_sequences
# select the best hypotheses
sent_lengths_list = []
best = []
# retrieve best hypotheses
for i, hypotheses in enumerate(generated_hyps):
sorted_hyps = sorted(hypotheses.beams, key=lambda x: x[0])
for j in range(output_num_return_sequences_per_batch):
best_hyp = sorted_hyps.pop()[1]
sent_lengths_list.append(len(best_hyp))
best.append(best_hyp)
assert output_batch_size == len(best), "Output batch size {} must match output beam hypotheses {}".format(
output_batch_size, len(best)
)
sent_lengths = tf.convert_to_tensor(sent_lengths_list, dtype=tf.int32)
# shorter batches are filled with pad_token
if tf.reduce_min(sent_lengths).numpy() != tf.reduce_max(sent_lengths).numpy():
assert pad_token_id is not None, "`Pad_token_id` has to be defined"
sent_max_len = min(tf.reduce_max(sent_lengths).numpy() + 1, max_length)
decoded_list = []
# fill with hypothesis and eos_token_id if necessary
for i, hypo in enumerate(best):
assert sent_lengths[i] == shape_list(hypo)[0]
# if sent_length is max_len do not pad
if sent_lengths[i] == sent_max_len:
decoded_slice = hypo
else:
# else pad to sent_max_len
num_pad_tokens = sent_max_len - sent_lengths[i]
padding = pad_token_id * tf.ones((num_pad_tokens,), dtype=tf.int32)
decoded_slice = tf.concat([hypo, padding], axis=-1)
# finish sentence with EOS token
if sent_lengths[i] < max_length:
decoded_slice = tf.where(
tf.range(sent_max_len, dtype=tf.int32) == sent_lengths[i],
eos_token_id * tf.ones((sent_max_len,), dtype=tf.int32),
decoded_slice,
)
# add to list
decoded_list.append(decoded_slice)
decoded = tf.stack(decoded_list)
else:
# none of the hypotheses have an eos_token
assert (len(hypo) == max_length for hypo in best)
decoded = tf.stack(best)
return decoded
@staticmethod
def _reorder_cache(past, beam_idx):
reordered_past = []
for layer_past in past:
# get the correct batch idx from layer past batch dim
# batch dim of `past` and `mems` is at 2nd position
reordered_layer_past = [tf.identity(tf.expand_dims(layer_past[:, i], 1)) for i in beam_idx]
reordered_layer_past = tf.concat(reordered_layer_past, axis=1)
# check that shape matches
assert shape_list(reordered_layer_past) == shape_list(layer_past)
reordered_past.append(reordered_layer_past)
past = tuple(reordered_past)
return past
def _create_next_token_logits_penalties(input_ids, logits, repetition_penalty):
# create logit penalties for already seen input_ids
token_penalties = np.ones(shape_list(logits))
prev_input_ids = [np.unique(input_id) for input_id in input_ids.numpy()]
for i, prev_input_id in enumerate(prev_input_ids):
logit_penalized = logits[i].numpy()[prev_input_id]
logit_penalties = np.zeros(logit_penalized.shape)
# if previous logit score is < 0 then multiply repetition penalty else divide
logit_penalties[logit_penalized < 0] = repetition_penalty
logit_penalties[logit_penalized > 0] = 1 / repetition_penalty
np.put(token_penalties[i], prev_input_id, logit_penalties)
return tf.convert_to_tensor(token_penalties, dtype=tf.float32)
def calc_banned_ngram_tokens(prev_input_ids, num_hypos, no_repeat_ngram_size, cur_len):
# Copied from fairseq for no_repeat_ngram in beam_search"""
if cur_len + 1 < no_repeat_ngram_size:
# return no banned tokens if we haven't generated no_repeat_ngram_size tokens yet
return [[] for _ in range(num_hypos)]
generated_ngrams = [{} for _ in range(num_hypos)]
for idx in range(num_hypos):
gen_tokens = prev_input_ids[idx].numpy().tolist()
generated_ngram = generated_ngrams[idx]
for ngram in zip(*[gen_tokens[i:] for i in range(no_repeat_ngram_size)]):
prev_ngram_tuple = tuple(ngram[:-1])
generated_ngram[prev_ngram_tuple] = generated_ngram.get(prev_ngram_tuple, []) + [ngram[-1]]
def _get_generated_ngrams(hypo_idx):
# Before decoding the next token, prevent decoding of ngrams that have already appeared
start_idx = cur_len + 1 - no_repeat_ngram_size
ngram_idx = tuple(prev_input_ids[hypo_idx, start_idx:cur_len].numpy().tolist())
return generated_ngrams[hypo_idx].get(ngram_idx, [])
banned_tokens = [_get_generated_ngrams(hypo_idx) for hypo_idx in range(num_hypos)]
return banned_tokens
def calc_banned_bad_words_ids(prev_input_ids, bad_words_ids):
banned_tokens = []
def _tokens_match(prev_tokens, tokens):
if len(tokens) == 0:
# if bad word tokens is just one token always ban it
return True
if len(tokens) > len(prev_input_ids):
# if bad word tokens are longer then prev input_ids they can't be equal
return False
if prev_tokens[-len(tokens) :] == tokens:
# if tokens match
return True
else:
return False
for prev_input_ids_slice in prev_input_ids:
banned_tokens_slice = []
for banned_token_seq in bad_words_ids:
assert len(banned_token_seq) > 0, "Banned words token sequences {} cannot have an empty list".format(
bad_words_ids
)
if _tokens_match(prev_input_ids_slice.numpy().tolist(), banned_token_seq[:-1]) is False:
# if tokens do not match continue
continue
banned_tokens_slice.append(banned_token_seq[-1])
banned_tokens.append(banned_tokens_slice)
return banned_tokens
def tf_top_k_top_p_filtering(logits, top_k=0, top_p=1.0, filter_value=-float("Inf"), min_tokens_to_keep=1):
""" Filter a distribution of logits using top-k and/or nucleus (top-p) filtering
Args:
logits: logits distribution shape (batch size, vocabulary size)
if top_k > 0: keep only top k tokens with highest probability (top-k filtering).
if top_p < 1.0: keep the top tokens with cumulative probability >= top_p (nucleus filtering).
Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751)
Make sure we keep at least min_tokens_to_keep per batch example in the output
From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317
"""
logits_shape = shape_list(logits)
if top_k > 0:
top_k = min(max(top_k, min_tokens_to_keep), logits_shape[-1]) # Safety check
# Remove all tokens with a probability less than the last token of the top-k
indices_to_remove = logits < tf.math.top_k(logits, k=top_k)[0][..., -1, None]
logits = set_tensor_by_indices_to_value(logits, indices_to_remove, filter_value)
if top_p < 1.0:
sorted_indices = tf.argsort(logits, direction="DESCENDING")
sorted_logits = tf.gather(
logits, sorted_indices, axis=-1, batch_dims=1
) # expects logits to be of dim (batch_size, vocab_size)
cumulative_probs = tf.math.cumsum(tf.nn.softmax(sorted_logits, axis=-1), axis=-1)
# Remove tokens with cumulative probability above the threshold (token with 0 are kept)
sorted_indices_to_remove = cumulative_probs > top_p
if min_tokens_to_keep > 1:
# Keep at least min_tokens_to_keep (set to min_tokens_to_keep-1 because we add the first one below)
sorted_indices_to_remove = tf.concat(
[
tf.zeros_like(sorted_indices_to_remove[:, :min_tokens_to_keep]),
sorted_indices_to_remove[:, min_tokens_to_keep:],
],
-1,
)
# Shift the indices to the right to keep also the first token above the threshold
sorted_indices_to_remove = tf.roll(sorted_indices_to_remove, 1, axis=-1)
sorted_indices_to_remove = tf.concat(
[tf.zeros_like(sorted_indices_to_remove[:, :1]), sorted_indices_to_remove[:, 1:]], -1,
)
# scatter sorted tensors to original indexing
indices_to_remove = scatter_values_on_batch_indices(sorted_indices_to_remove, sorted_indices)
logits = set_tensor_by_indices_to_value(logits, indices_to_remove, filter_value)
return logits
def scatter_values_on_batch_indices(values, batch_indices):
shape = shape_list(batch_indices)
# broadcast batch dim to shape
broad_casted_batch_dims = tf.reshape(tf.broadcast_to(tf.expand_dims(tf.range(shape[0]), axis=-1), shape), [1, -1])
# transform batch_indices to pair_indices
pair_indices = tf.transpose(tf.concat([broad_casted_batch_dims, tf.reshape(batch_indices, [1, -1])], 0))
# scatter values to pair indices
return tf.scatter_nd(pair_indices, tf.reshape(values, [-1]), shape)
def set_tensor_by_indices_to_value(tensor, indices, value):
# create value_tensor since tensor value assignment is not possible in TF
value_tensor = tf.zeros_like(tensor) + value
return tf.where(indices, value_tensor, tensor)
class BeamHypotheses(object):
def __init__(self, num_beams, max_length, length_penalty, early_stopping):
"""
Initialize n-best list of hypotheses.
"""
self.max_length = max_length - 1 # ignoring bos_token
self.length_penalty = length_penalty
self.early_stopping = early_stopping
self.num_beams = num_beams
self.beams = []
self.worst_score = 1e9
def __len__(self):
"""
Number of hypotheses in the list.
"""
return len(self.beams)
def add(self, hyp, sum_logprobs):
"""
Add a new hypothesis to the list.
"""
score = sum_logprobs / len(hyp) ** self.length_penalty
if len(self) < self.num_beams or score > self.worst_score:
self.beams.append((score, hyp))
if len(self) > self.num_beams:
sorted_scores = sorted([(s, idx) for idx, (s, _) in enumerate(self.beams)])
del self.beams[sorted_scores[0][1]]
self.worst_score = sorted_scores[1][0]
else:
self.worst_score = min(score, self.worst_score)
def is_done(self, best_sum_logprobs, cur_len=None):
"""
If there are enough hypotheses and that none of the hypotheses being generated
can become better than the worst one in the heap, then we are done with this sentence.
"""
if len(self) < self.num_beams:
return False
elif self.early_stopping:
return True
else:
if cur_len is None:
cur_len = self.max_length
cur_score = best_sum_logprobs / cur_len ** self.length_penalty
ret = self.worst_score >= cur_score
return ret
class TFConv1D(tf.keras.layers.Layer):
def __init__(self, nf, nx, initializer_range=0.02, **kwargs):
""" TFConv1D layer as defined by Radford et al. for OpenAI GPT (and also used in GPT-2)
Basically works like a Linear layer but the weights are transposed
"""
super().__init__(**kwargs)
self.nf = nf
self.nx = nx
self.initializer_range = initializer_range
def build(self, input_shape):
self.weight = self.add_weight(
"weight", shape=[self.nx, self.nf], initializer=get_initializer(self.initializer_range)
)
self.bias = self.add_weight("bias", shape=[1, self.nf], initializer=tf.zeros_initializer())
def call(self, x):
bz, sl = shape_list(x)[:2]
x = tf.reshape(x, [-1, self.nx])
x = tf.matmul(x, self.weight) + self.bias
x = tf.reshape(x, [bz, sl, self.nf])
return x
class TFSharedEmbeddings(tf.keras.layers.Layer):
"""Construct shared token embeddings.
"""
def __init__(self, vocab_size, hidden_size, initializer_range=None, **kwargs):
super().__init__(**kwargs)
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.initializer_range = hidden_size ** -0.5 if initializer_range is None else initializer_range
def build(self, input_shape):
"""Build shared token embedding layer
Shared weights logic adapted from
https://github.com/tensorflow/models/blob/a009f4fb9d2fc4949e32192a944688925ef78659/official/transformer/v2/embedding_layer.py#L24
"""
self.weight = self.add_weight(
"weight", shape=[self.vocab_size, self.hidden_size], initializer=get_initializer(self.initializer_range)
)
super().build(input_shape)
def call(self, inputs, mode="embedding"):
"""Get token embeddings of inputs.
Args:
inputs: list of three int64 tensors with shape [batch_size, length]: (input_ids, position_ids, token_type_ids)
mode: string, a valid value is one of "embedding" and "linear".
Returns:
outputs: (1) If mode == "embedding", output embedding tensor, float32 with
shape [batch_size, length, embedding_size]; (2) mode == "linear", output
linear tensor, float32 with shape [batch_size, length, vocab_size].
Raises:
ValueError: if mode is not valid.
Shared weights logic adapted from
https://github.com/tensorflow/models/blob/a009f4fb9d2fc4949e32192a944688925ef78659/official/transformer/v2/embedding_layer.py#L24
"""
if mode == "embedding":
return self._embedding(inputs)
elif mode == "linear":
return self._linear(inputs)
else:
raise ValueError("mode {} is not valid.".format(mode))
def _embedding(self, input_ids):
"""Applies embedding based on inputs tensor."""
return tf.gather(self.weight, input_ids)
def _linear(self, inputs):
"""Computes logits by running inputs through a linear layer.
Args:
inputs: A float32 tensor with shape [..., hidden_size]
Returns:
float32 tensor with shape [..., vocab_size].
"""
first_dims = shape_list(inputs)[:-1]
x = tf.reshape(inputs, [-1, self.hidden_size])
logits = tf.matmul(x, self.weight, transpose_b=True)
return tf.reshape(logits, first_dims + [self.vocab_size])
class TFSequenceSummary(tf.keras.layers.Layer):
r""" Compute a single vector summary of a sequence hidden states according to various possibilities:
Args of the config class:
summary_type:
- 'last' => [default] take the last token hidden state (like XLNet)
- 'first' => take the first token hidden state (like Bert)
- 'mean' => take the mean of all tokens hidden states
- 'cls_index' => supply a Tensor of classification token position (GPT/GPT-2)
- 'attn' => Not implemented now, use multi-head attention
summary_use_proj: Add a projection after the vector extraction
summary_proj_to_labels: If True, the projection outputs to config.num_labels classes (otherwise to hidden_size). Default: False.
summary_activation: 'tanh' => add a tanh activation to the output, Other => no activation. Default
summary_first_dropout: Add a dropout before the projection and activation
summary_last_dropout: Add a dropout after the projection and activation
"""
def __init__(self, config, initializer_range=0.02, **kwargs):
super().__init__(**kwargs)
self.summary_type = config.summary_type if hasattr(config, "summary_use_proj") else "last"
if self.summary_type == "attn":
# We should use a standard multi-head attention module with absolute positional embedding for that.
# Cf. https://github.com/zihangdai/xlnet/blob/master/modeling.py#L253-L276
# We can probably just use the multi-head attention module of PyTorch >=1.1.0
raise NotImplementedError
self.has_summary = hasattr(config, "summary_use_proj") and config.summary_use_proj
if self.has_summary:
if hasattr(config, "summary_proj_to_labels") and config.summary_proj_to_labels and config.num_labels > 0:
num_classes = config.num_labels
else:
num_classes = config.hidden_size
self.summary = tf.keras.layers.Dense(
num_classes, kernel_initializer=get_initializer(initializer_range), name="summary"
)
self.has_activation = hasattr(config, "summary_activation") and config.summary_activation == "tanh"
if self.has_activation:
self.activation = tf.keras.activations.tanh
self.has_first_dropout = hasattr(config, "summary_first_dropout") and config.summary_first_dropout > 0
if self.has_first_dropout:
self.first_dropout = tf.keras.layers.Dropout(config.summary_first_dropout)
self.has_last_dropout = hasattr(config, "summary_last_dropout") and config.summary_last_dropout > 0
if self.has_last_dropout:
self.last_dropout = tf.keras.layers.Dropout(config.summary_last_dropout)
def call(self, inputs, training=False):
""" hidden_states: float Tensor in shape [bsz, seq_len, hidden_size], the hidden-states of the last layer.
cls_index: [optional] position of the classification token if summary_type == 'cls_index',
shape (bsz,) or more generally (bsz, ...) where ... are optional leading dimensions of hidden_states.
if summary_type == 'cls_index' and cls_index is None:
we take the last token of the sequence as classification token
"""
if not isinstance(inputs, (dict, tuple, list)):
hidden_states = inputs
cls_index = None
elif isinstance(inputs, (tuple, list)):
hidden_states = inputs[0]
cls_index = inputs[1] if len(inputs) > 1 else None
assert len(inputs) <= 2, "Too many inputs."
else:
hidden_states = inputs.get("hidden_states")
cls_index = inputs.get("cls_index", None)
if self.summary_type == "last":
output = hidden_states[:, -1]
elif self.summary_type == "first":
output = hidden_states[:, 0]
elif self.summary_type == "mean":
output = tf.reduce_mean(hidden_states, axis=1)
elif self.summary_type == "cls_index":
hidden_shape = shape_list(hidden_states) # e.g. [batch, num choices, seq length, hidden dims]
if cls_index is None:
cls_index = tf.fill(
hidden_shape[:-2], hidden_shape[-2] - 1
) # A tensor full of shape [batch] or [batch, num choices] full of sequence length
cls_shape = shape_list(cls_index)
if len(cls_shape) <= len(hidden_shape) - 2:
cls_index = cls_index[..., tf.newaxis]
# else:
# cls_index = cls_index[..., tf.newaxis]
# cls_index = cls_index.expand((-1,) * (cls_index.dim()-1) + (hidden_states.size(-1),))
# shape of cls_index: (bsz, XX, 1, hidden_size) where XX are optional leading dim of hidden_states
output = tf.gather(hidden_states, cls_index, batch_dims=len(hidden_shape) - 2)
output = tf.squeeze(
output, axis=len(hidden_shape) - 2
) # shape of output: (batch, num choices, hidden_size)
elif self.summary_type == "attn":
raise NotImplementedError
if self.has_first_dropout:
output = self.first_dropout(output, training=training)
if self.has_summary:
output = self.summary(output)
if self.has_activation:
output = self.activation(output)
if self.has_last_dropout:
output = self.last_dropout(output, training=training)
return output
def shape_list(x):
"""Deal with dynamic shape in tensorflow cleanly."""
static = x.shape.as_list()
dynamic = tf.shape(x)
return [dynamic[i] if s is None else s for i, s in enumerate(static)]
def get_initializer(initializer_range=0.02):
"""Creates a `tf.initializers.truncated_normal` with the given range.
Args:
initializer_range: float, initializer range for stddev.
Returns:
TruncatedNormal initializer with stddev = `initializer_range`.
"""
return tf.keras.initializers.TruncatedNormal(stddev=initializer_range)
| 84,031 | 48.752516 | 472 | py |
KoSentenceBERT-SKT | KoSentenceBERT-SKT-main/transformers/modeling_ctrl.py | # coding=utf-8
# Copyright 2018 Salesforce and HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch CTRL model."""
import logging
import numpy as np
import torch
import torch.nn as nn
from torch.nn import CrossEntropyLoss
from .configuration_ctrl import CTRLConfig
from .file_utils import add_start_docstrings, add_start_docstrings_to_callable
from .modeling_utils import Conv1D, PreTrainedModel
logger = logging.getLogger(__name__)
CTRL_PRETRAINED_MODEL_ARCHIVE_MAP = {"ctrl": "https://storage.googleapis.com/sf-ctrl/pytorch/seqlen256_v1.bin"}
def angle_defn(pos, i, d_model_size):
angle_rates = 1 / torch.pow(10000, (2 * (i // 2)) / d_model_size)
return pos * angle_rates
def positional_encoding(position, d_model_size, dtype):
# create the sinusoidal pattern for the positional encoding
angle_rads = angle_defn(
torch.arange(position, dtype=dtype).unsqueeze(1),
torch.arange(d_model_size, dtype=dtype).unsqueeze(0),
d_model_size,
)
sines = torch.sin(angle_rads[:, 0::2])
cosines = torch.cos(angle_rads[:, 1::2])
pos_encoding = torch.cat([sines, cosines], dim=-1)
return pos_encoding
def scaled_dot_product_attention(q, k, v, mask, attention_mask=None, head_mask=None):
# calculate attention
matmul_qk = torch.matmul(q, k.permute(0, 1, 3, 2))
dk = k.shape[-1]
scaled_attention_logits = matmul_qk / np.sqrt(dk)
if mask is not None:
nd, ns = scaled_attention_logits.size(-2), scaled_attention_logits.size(-1)
scaled_attention_logits += mask[ns - nd : ns, :ns] * -1e4
if attention_mask is not None:
# Apply the attention mask
scaled_attention_logits = scaled_attention_logits + attention_mask
attention_weights = torch.softmax(scaled_attention_logits, dim=-1)
# Mask heads if we want to
if head_mask is not None:
attention_weights = attention_weights * head_mask
output = torch.matmul(attention_weights, v)
return output, attention_weights
class MultiHeadAttention(torch.nn.Module):
def __init__(self, d_model_size, num_heads, output_attentions=False):
super().__init__()
self.output_attentions = output_attentions
self.num_heads = num_heads
self.d_model_size = d_model_size
self.depth = int(d_model_size / self.num_heads)
self.Wq = torch.nn.Linear(d_model_size, d_model_size)
self.Wk = torch.nn.Linear(d_model_size, d_model_size)
self.Wv = torch.nn.Linear(d_model_size, d_model_size)
self.dense = torch.nn.Linear(d_model_size, d_model_size)
def split_into_heads(self, x, batch_size):
x = x.reshape(batch_size, -1, self.num_heads, self.depth)
return x.permute([0, 2, 1, 3])
def forward(self, v, k, q, mask, layer_past=None, attention_mask=None, head_mask=None):
batch_size = q.shape[0]
q = self.Wq(q)
k = self.Wk(k)
v = self.Wv(v)
q = self.split_into_heads(q, batch_size)
k = self.split_into_heads(k, batch_size)
v = self.split_into_heads(v, batch_size)
if layer_past is not None:
past_key, past_value = layer_past[0], layer_past[1]
k = torch.cat((past_key, k), dim=-2)
v = torch.cat((past_value, v), dim=-2)
present = torch.stack((k, v))
output = scaled_dot_product_attention(q, k, v, mask, attention_mask, head_mask)
scaled_attention = output[0].permute([0, 2, 1, 3])
attn = output[1]
original_size_attention = scaled_attention.reshape(batch_size, -1, self.d_model_size)
output = self.dense(original_size_attention)
outputs = (output, present)
if self.output_attentions:
outputs = outputs + (attn,)
return outputs
def point_wise_feed_forward_network(d_model_size, dff):
return torch.nn.Sequential(torch.nn.Linear(d_model_size, dff), torch.nn.ReLU(), torch.nn.Linear(dff, d_model_size))
class EncoderLayer(torch.nn.Module):
def __init__(self, d_model_size, num_heads, dff, rate=0.1, output_attentions=False):
super().__init__()
self.multi_head_attention = MultiHeadAttention(d_model_size, num_heads, output_attentions)
self.ffn = point_wise_feed_forward_network(d_model_size, dff)
self.layernorm1 = torch.nn.LayerNorm(d_model_size, eps=1e-6)
self.layernorm2 = torch.nn.LayerNorm(d_model_size, eps=1e-6)
self.dropout1 = torch.nn.Dropout(rate)
self.dropout2 = torch.nn.Dropout(rate)
def forward(self, x, mask, layer_past=None, attention_mask=None, head_mask=None):
normed = self.layernorm1(x)
attn_outputs = self.multi_head_attention(
normed, normed, normed, mask, layer_past=layer_past, attention_mask=attention_mask, head_mask=head_mask
)
attn_output = attn_outputs[0]
attn_output = self.dropout1(attn_output)
out1 = x + attn_output
out2 = self.layernorm2(out1)
ffn_output = self.ffn(out2)
ffn_output = self.dropout2(ffn_output)
out2 = out1 + ffn_output
outputs = (out2,) + attn_outputs[1:]
return outputs
class CTRLPreTrainedModel(PreTrainedModel):
""" An abstract class to handle weights initialization and
a simple interface for downloading and loading pretrained models.
"""
config_class = CTRLConfig
pretrained_model_archive_map = CTRL_PRETRAINED_MODEL_ARCHIVE_MAP
base_model_prefix = "transformer"
def _init_weights(self, module):
""" Initialize the weights.
"""
if isinstance(module, (nn.Linear, nn.Embedding, Conv1D)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if isinstance(module, (nn.Linear, Conv1D)) and module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
CTRL_START_DOCSTRING = r"""
This model is a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`_ sub-class.
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general
usage and behavior.
Parameters:
config (:class:`~transformers.CTRLConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the configuration.
Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.
"""
CTRL_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`transformers.CTRLTokenizer`.
See :func:`transformers.PreTrainedTokenizer.encode` and
:func:`transformers.PreTrainedTokenizer.encode_plus` for details.
`What are input IDs? <../glossary.html#input-ids>`__
past (:obj:`List[torch.FloatTensor]` of length :obj:`config.n_layers`):
Contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model
(see `past` output below). Can be used to speed up sequential decoding. The token ids which have their past given to this model
should not be passed as input ids as they have already been computed.
attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Mask to avoid performing attention on padding token indices.
Mask values selected in ``[0, 1]``:
``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
`What are attention masks? <../glossary.html#attention-mask>`__
token_type_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Segment token indices to indicate first and second portions of the inputs.
Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1``
corresponds to a `sentence B` token
`What are token type IDs? <../glossary.html#token-type-ids>`_
position_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Indices of positions of each input sequence tokens in the position embeddings.
Selected in the range ``[0, config.max_position_embeddings - 1]``.
`What are position IDs? <../glossary.html#position-ids>`_
head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`, defaults to :obj:`None`):
Mask to nullify selected heads of the self-attention modules.
Mask values selected in ``[0, 1]``:
:obj:`1` indicates the head is **not masked**, :obj:`0` indicates the head is **masked**.
input_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
"""
@add_start_docstrings(
"The bare CTRL Model transformer outputting raw hidden-states without any specific head on top.",
CTRL_START_DOCSTRING,
)
class CTRLModel(CTRLPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.output_hidden_states = config.output_hidden_states
self.output_attentions = config.output_attentions
self.output_past = config.output_past
self.d_model_size = config.n_embd
self.num_layers = config.n_layer
self.pos_encoding = positional_encoding(config.n_positions, self.d_model_size, torch.float)
self.w = nn.Embedding(config.vocab_size, config.n_embd)
self.dropout = nn.Dropout(config.embd_pdrop)
self.h = nn.ModuleList(
[
EncoderLayer(config.n_embd, config.n_head, config.dff, config.resid_pdrop, config.output_attentions)
for _ in range(config.n_layer)
]
)
self.layernorm = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon)
self.init_weights()
def get_input_embeddings(self):
return self.w
def set_input_embeddings(self, new_embeddings):
self.w = new_embeddings
def _prune_heads(self, heads_to_prune):
""" Prunes heads of the model.
heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
"""
for layer, heads in heads_to_prune.items():
self.h[layer].attn.prune_heads(heads)
@add_start_docstrings_to_callable(CTRL_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
past=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
):
r"""
Return:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.CTRLConfig`) and inputs:
last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the last layer of the model.
past (:obj:`List[torch.FloatTensor]` of length :obj:`config.n_layers` with each tensor of shape :obj:`(2, batch_size, num_heads, sequence_length, embed_size_per_head)`):
Contains pre-computed hidden-states (key and values in the attention blocks).
Can be used (see `past` input) to speed up sequential decoding. The token ids which have their past given to this model
should not be passed as input ids as they have already been computed.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import CTRLTokenizer, CTRLModel
import torch
tokenizer = CTRLTokenizer.from_pretrained('ctrl')
model = CTRLModel.from_pretrained('ctrl')
input_ids = torch.tensor(tokenizer.encode("Links Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
outputs = model(input_ids)
last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple
"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
batch_size = input_ids.shape[0]
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
batch_size = inputs_embeds.shape[0]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if past is None:
past_length = 0
past = [None] * len(self.h)
else:
past_length = past[0][0].size(-2)
if position_ids is None:
device = input_ids.device if input_ids is not None else inputs_embeds.device
position_ids = torch.arange(past_length, input_shape[-1] + past_length, dtype=torch.long, device=device)
position_ids = position_ids.unsqueeze(0).view(-1, input_shape[-1])
# Attention mask.
if attention_mask is not None:
assert batch_size > 0, "batch_size has to be defined and > 0"
attention_mask = attention_mask.view(batch_size, -1)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
attention_mask = attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
attention_mask = (1.0 - attention_mask) * -10000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# head_mask has shape n_layer x batch x n_heads x N x N
if head_mask is not None:
if head_mask.dim() == 1:
head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
head_mask = head_mask.expand(self.config.n_layer, -1, -1, -1, -1)
elif head_mask.dim() == 2:
head_mask = (
head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1)
) # We can specify head_mask for each layer
head_mask = head_mask.to(
dtype=next(self.parameters()).dtype
) # switch to fload if need + fp16 compatibility
else:
head_mask = [None] * self.config.n_layer
if token_type_ids is not None:
token_type_ids = token_type_ids.view(-1, input_shape[-1])
token_type_embeds = self.w(token_type_ids)
token_type_embeds *= np.sqrt(self.d_model_size)
else:
token_type_embeds = 0
position_ids = position_ids.view(-1, input_shape[-1])
if inputs_embeds is None:
inputs_embeds = self.w(input_ids)
# inputs_embeds = embedded.unsqueeze(0) if len(input_ids.shape)<2 else embedded
seq_len = input_shape[-1]
mask = torch.triu(torch.ones(seq_len + past_length, seq_len + past_length), 1).to(inputs_embeds.device)
inputs_embeds *= np.sqrt(self.d_model_size)
pos_embeds = self.pos_encoding[position_ids, :].to(inputs_embeds.device)
hidden_states = inputs_embeds + pos_embeds + token_type_embeds
hidden_states = self.dropout(hidden_states)
output_shape = input_shape + (inputs_embeds.size(-1),)
presents = ()
all_hidden_states = ()
all_attentions = []
for i, (h, layer_past) in enumerate(zip(self.h, past)):
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states.view(*output_shape),)
outputs = h(
hidden_states, mask, layer_past=layer_past, attention_mask=attention_mask, head_mask=head_mask[i]
)
hidden_states, present = outputs[:2]
if self.output_past:
presents = presents + (present,)
if self.output_attentions:
all_attentions.append(outputs[2])
hidden_states = self.layernorm(hidden_states)
hidden_states = hidden_states.view(*output_shape)
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
outputs = (hidden_states,)
if self.output_past:
outputs = outputs + (presents,)
if self.output_hidden_states:
outputs = outputs + (all_hidden_states,)
if self.output_attentions:
# let the number of heads free (-1) so we can extract attention even after head pruning
attention_output_shape = input_shape[:-1] + (-1,) + all_attentions[0].shape[-2:]
all_attentions = tuple(t.view(*attention_output_shape) for t in all_attentions)
outputs = outputs + (all_attentions,)
return outputs
@add_start_docstrings(
"""The CTRL Model transformer with a language modeling head on top
(linear layer with weights tied to the input embeddings). """,
CTRL_START_DOCSTRING,
)
class CTRLLMHeadModel(CTRLPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.transformer = CTRLModel(config)
self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=True)
self.init_weights()
def get_output_embeddings(self):
return self.lm_head
def prepare_inputs_for_generation(self, input_ids, past, **kwargs):
# only last token for inputs_ids if past is defined in kwargs
if past:
input_ids = input_ids[:, -1].unsqueeze(-1)
return {"input_ids": input_ids, "past": past}
@add_start_docstrings_to_callable(CTRL_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
past=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Labels for language modeling.
Note that the labels **are shifted** inside the model, i.e. you can set ``lm_labels = input_ids``
Indices are selected in ``[-100, 0, ..., config.vocab_size]``
All labels set to ``-100`` are ignored (masked), the loss is only
computed for labels in ``[0, ..., config.vocab_size]``
Return:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.CTRLConfig`) and inputs:
loss (:obj:`torch.FloatTensor` of shape `(1,)`, `optional`, returned when ``labels`` is provided)
Language modeling loss.
prediction_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
past (:obj:`List[torch.FloatTensor]` of length :obj:`config.n_layers` with each tensor of shape :obj:`(2, batch_size, num_heads, sequence_length, embed_size_per_head)`):
Contains pre-computed hidden-states (key and values in the attention blocks).
Can be used (see `past` input) to speed up sequential decoding. The token ids which have their past given to this model
should not be passed as input ids as they have already been computed.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
import torch
from transformers import CTRLTokenizer, CTRLLMHeadModel
tokenizer = CTRLTokenizer.from_pretrained('ctrl')
model = CTRLLMHeadModel.from_pretrained('ctrl')
input_ids = torch.tensor(tokenizer.encode("Links Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
outputs = model(input_ids, labels=input_ids)
loss, logits = outputs[:2]
"""
transformer_outputs = self.transformer(
input_ids,
past=past,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
hidden_states = transformer_outputs[0]
lm_logits = self.lm_head(hidden_states)
outputs = (lm_logits,) + transformer_outputs[1:]
if labels is not None:
# Shift so that tokens < n predict n
shift_logits = lm_logits[..., :-1, :].contiguous()
shift_labels = labels[..., 1:].contiguous()
# Flatten the tokens
loss_fct = CrossEntropyLoss()
loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
outputs = (loss,) + outputs
return outputs # (loss), lm_logits, presents, (all hidden_states), (attentions)
| 24,739 | 44.145985 | 177 | py |
KoSentenceBERT-SKT | KoSentenceBERT-SKT-main/transformers/tokenization_transfo_xl.py | # coding=utf-8
# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Tokenization classes for Transformer XL model.
Adapted from https://github.com/kimiyoung/transformer-xl.
"""
import glob
import logging
import os
import pickle
import re
from collections import Counter, OrderedDict
from typing import List, Optional, Tuple, Union
import numpy as np
from tokenizers import Encoding, Tokenizer
from tokenizers.implementations import BaseTokenizer
from tokenizers.models import WordLevel
from tokenizers.normalizers import Lowercase, Sequence, unicode_normalizer_from_str
from tokenizers.pre_tokenizers import CharDelimiterSplit, WhitespaceSplit
from tokenizers.processors import BertProcessing
from .file_utils import cached_path, is_torch_available
from .tokenization_utils import PreTrainedTokenizer, PreTrainedTokenizerFast
if is_torch_available():
import torch
logger = logging.getLogger(__name__)
VOCAB_FILES_NAMES = {"pretrained_vocab_file": "vocab.bin", "vocab_file": "vocab.txt"}
VOCAB_FILES_NAMES_FAST = {"pretrained_vocab_file": "vocab.json", "vocab_file": "vocab.json"}
PRETRAINED_VOCAB_FILES_MAP = {
"pretrained_vocab_file": {
"transfo-xl-wt103": "https://s3.amazonaws.com/models.huggingface.co/bert/transfo-xl-wt103-vocab.bin",
}
}
PRETRAINED_VOCAB_FILES_MAP_FAST = {
"pretrained_vocab_file": {
"transfo-xl-wt103": "https://s3.amazonaws.com/models.huggingface.co/bert/transfo-xl-wt103-vocab.json",
}
}
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
"transfo-xl-wt103": None,
}
PRETRAINED_CORPUS_ARCHIVE_MAP = {
"transfo-xl-wt103": "https://s3.amazonaws.com/models.huggingface.co/bert/transfo-xl-wt103-corpus.bin",
}
CORPUS_NAME = "corpus.bin"
class TransfoXLTokenizer(PreTrainedTokenizer):
"""
Transformer-XL tokenizer adapted from Vocab class in https://github.com/kimiyoung/transformer-xl
This tokenizer inherits from :class:`~transformers.PreTrainedTokenizer` which contains most of the methods. Users
should refer to the superclass for more information regarding methods.
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__(
self,
special=None,
min_freq=0,
max_size=None,
lower_case=False,
delimiter=None,
vocab_file=None,
pretrained_vocab_file=None,
never_split=None,
unk_token="<unk>",
eos_token="<eos>",
additional_special_tokens=["<formula>"],
**kwargs
):
super().__init__(
unk_token=unk_token, eos_token=eos_token, additional_special_tokens=additional_special_tokens, **kwargs
)
self.max_len_single_sentence = (
self.max_len
) # no default special tokens - you can update this value if you add special tokens
self.max_len_sentences_pair = (
self.max_len
) # no default special tokens - you can update this value if you add special tokens
if never_split is None:
never_split = self.all_special_tokens
if special is None:
special = []
self.counter = Counter()
self.special = special
self.min_freq = min_freq
self.max_size = max_size
self.lower_case = lower_case
self.delimiter = delimiter
self.vocab_file = vocab_file
self.never_split = never_split
self.punctuation_symbols = '!"#$%&()*+,-./\:;<=>?@[\\]^_`{|}~' # noqa: W605
self.punction_without_space_before_pattern = re.compile(r"[^\s][{}]".format(self.punctuation_symbols))
self.punctuation_with_space_around_pattern = self._compile_space_around_punctuation_pattern()
try:
if pretrained_vocab_file is not None:
# Hack because, honestly this tokenizer was not made to be used
# in a library like ours, at all.
vocab_dict = torch.load(pretrained_vocab_file)
for key, value in vocab_dict.items():
if key not in self.__dict__:
self.__dict__[key] = value
if vocab_file is not None:
self.build_vocab()
except Exception:
raise ValueError(
"Unable to parse file {}. Unknown format. "
"If you tried to load a model saved through TransfoXLTokenizerFast,"
"please note they are not compatible.".format(pretrained_vocab_file)
)
if vocab_file is not None:
self.build_vocab()
def _compile_space_around_punctuation_pattern(self):
look_ahead_for_special_token = "(?=[{}])".format(self.punctuation_symbols)
look_ahead_to_match_all_except_space = "(?=[^\s])" # noqa: W605
return re.compile(r"" + look_ahead_for_special_token + look_ahead_to_match_all_except_space)
def count_file(self, path, verbose=False, add_eos=False):
if verbose:
logger.info("counting file {} ...".format(path))
assert os.path.exists(path)
sents = []
with open(path, "r", encoding="utf-8") as f:
for idx, line in enumerate(f):
if verbose and idx > 0 and idx % 500000 == 0:
logger.info(" line {}".format(idx))
symbols = self.tokenize(line, add_eos=add_eos)
self.counter.update(symbols)
sents.append(symbols)
return sents
def count_sents(self, sents, verbose=False):
"""
sents : a list of sentences, each a list of tokenized symbols
"""
if verbose:
logger.info("counting {} sents ...".format(len(sents)))
for idx, symbols in enumerate(sents):
if verbose and idx > 0 and idx % 500000 == 0:
logger.info(" line {}".format(idx))
self.counter.update(symbols)
def _build_from_file(self, vocab_file):
self.idx2sym = []
self.sym2idx = OrderedDict()
with open(vocab_file, "r", encoding="utf-8") as f:
for line in f:
symb = line.strip().split()[0]
self.add_symbol(symb)
if "<UNK>" in self.sym2idx:
self.unk_idx = self.sym2idx["<UNK>"]
elif "<unk>" in self.sym2idx:
self.unk_idx = self.sym2idx["<unk>"]
else:
raise ValueError("No <unkown> token in vocabulary")
def save_vocabulary(self, vocab_path):
"""
Save the vocabulary and special tokens file to a directory.
Args:
vocab_path (:obj:`str`):
The directory in which to save the vocabulary.
Returns:
:obj:`Tuple(str)`: Paths to the files saved.
"""
logger.warning(
"Please note you will not be able to load the save vocabulary in"
" Rust-based TransfoXLTokenizerFast as they don't share the same structure."
)
if os.path.isdir(vocab_path):
vocab_file = os.path.join(vocab_path, VOCAB_FILES_NAMES["pretrained_vocab_file"])
else:
vocab_file = vocab_path
torch.save(self.__dict__, vocab_file)
return (vocab_file,)
def build_vocab(self):
if self.vocab_file:
logger.info("building vocab from {}".format(self.vocab_file))
self._build_from_file(self.vocab_file)
logger.info("final vocab size {}".format(len(self)))
else:
logger.info("building vocab with min_freq={}, max_size={}".format(self.min_freq, self.max_size))
self.idx2sym = []
self.sym2idx = OrderedDict()
for sym in self.special:
self.add_special(sym)
for sym, cnt in self.counter.most_common(self.max_size):
if cnt < self.min_freq:
break
self.add_symbol(sym)
logger.info("final vocab size {} from {} unique tokens".format(len(self), len(self.counter)))
def encode_file(self, path, ordered=False, verbose=False, add_eos=True, add_double_eos=False):
if verbose:
logger.info("encoding file {} ...".format(path))
assert os.path.exists(path)
encoded = []
with open(path, "r", encoding="utf-8") as f:
for idx, line in enumerate(f):
if verbose and idx > 0 and idx % 500000 == 0:
logger.info(" line {}".format(idx))
symbols = self.tokenize(line, add_eos=add_eos, add_double_eos=add_double_eos)
encoded.append(self.convert_to_tensor(symbols))
if ordered:
encoded = torch.cat(encoded)
return encoded
def encode_sents(self, sents, ordered=False, verbose=False):
if verbose:
logger.info("encoding {} sents ...".format(len(sents)))
encoded = []
for idx, symbols in enumerate(sents):
if verbose and idx > 0 and idx % 500000 == 0:
logger.info(" line {}".format(idx))
encoded.append(self.convert_to_tensor(symbols))
if ordered:
encoded = torch.cat(encoded)
return encoded
def add_special(self, sym):
if sym not in self.sym2idx:
self.idx2sym.append(sym)
self.sym2idx[sym] = len(self.idx2sym) - 1
setattr(self, "{}_idx".format(sym.strip("<>")), self.sym2idx[sym])
def add_symbol(self, sym):
if sym not in self.sym2idx:
self.idx2sym.append(sym)
self.sym2idx[sym] = len(self.idx2sym) - 1
def _convert_id_to_token(self, idx):
"""Converts an id in a token (BPE) using the vocab."""
assert 0 <= idx < len(self), "Index {} out of vocabulary range".format(idx)
return self.idx2sym[idx]
def _convert_token_to_id(self, sym):
""" Converts a token (str) in an id using the vocab. """
if sym in self.sym2idx:
return self.sym2idx[sym]
else:
# logger.info('encounter unk {}'.format(sym))
# assert '<eos>' not in sym
if hasattr(self, "unk_idx"):
return self.sym2idx.get(sym, self.unk_idx)
# Backward compatibility with pre-trained models
elif "<unk>" in self.sym2idx:
return self.sym2idx["<unk>"]
elif "<UNK>" in self.sym2idx:
return self.sym2idx["<UNK>"]
else:
raise ValueError("Token not in vocabulary and no <unk> token in vocabulary for replacement")
def convert_tokens_to_string(self, tokens):
""" Converts a sequence of tokens (string) in a single string. """
out_string = " ".join(tokens).strip()
return out_string
def convert_to_tensor(self, symbols):
return torch.LongTensor(self.convert_tokens_to_ids(symbols))
@property
def vocab_size(self):
return len(self.idx2sym)
def get_vocab(self):
return dict(self.sym2idx, **self.added_tokens_encoder)
def _tokenize(self, line, add_eos=False, add_double_eos=False):
line = line.strip()
# convert to lower case
if self.lower_case:
line = line.lower()
# empty delimiter '' will evaluate False
if self.delimiter == "":
symbols = line
else:
symbols = line.split(self.delimiter)
if add_double_eos: # lm1b
return ["<S>"] + symbols + ["<S>"]
elif add_eos:
return symbols + ["<eos>"]
else:
return symbols
def prepare_for_tokenization(self, text, **kwargs):
# add spaces before punctuation symbols as should be done in transfo-xl
if "add_space_before_punct_symbol" in kwargs and kwargs["add_space_before_punct_symbol"]:
text = self.punctuation_with_space_around_pattern.sub(r" ", text)
elif self.punction_without_space_before_pattern.search(text):
# searches until the first occurence of a punctuation symbol without surrounding spaces
logger.warning(
"You might want to consider setting `add_space_before_punct_symbol=True` as an argument to the `tokenizer.encode()` to avoid tokenizing words with punctuation symbols to the `<unk>` token"
)
return text
class _TransfoXLDelimiterLookupTokenizer(BaseTokenizer):
def __init__(
self,
vocab_file,
delimiter,
lowercase,
unk_token,
eos_token,
add_eos=False,
add_double_eos=False,
normalization: Optional[str] = None,
):
try:
tokenizer = WordLevel.from_files(vocab_file, unk_token=unk_token)
tokenizer = Tokenizer(tokenizer)
except Exception:
raise ValueError(
"Unable to parse file {}. Unknown format. "
"If you tried to load a model saved through TransfoXLTokenizer,"
"please note they are not compatible.".format(vocab_file)
)
# Create the correct normalization path
normalizer = []
# Include unicode normalization
if normalization:
normalizer += [unicode_normalizer_from_str(normalization)]
# Include case normalization
if lowercase:
normalizer += [Lowercase()]
if len(normalizer) > 0:
tokenizer.normalizer = Sequence(normalizer) if len(normalizer) > 1 else normalizer[0]
# Setup the splitter
tokenizer.pre_tokenizer = CharDelimiterSplit(delimiter) if delimiter else WhitespaceSplit()
if add_double_eos:
tokenizer.post_processor = BertProcessing(
(eos_token, tokenizer.token_to_id(eos_token)), (eos_token, tokenizer.token_to_id(eos_token))
)
parameters = {
"model": "TransfoXLModel",
"add_eos": add_eos,
"add_double_eos": add_double_eos,
"unk_token": unk_token,
"eos_token": eos_token,
"delimiter": delimiter,
"lowercase": lowercase,
}
super().__init__(tokenizer, parameters)
def encode_batch(self, sequences: List[Union[str, Tuple[str, str]]]) -> List[Encoding]:
return super().encode_batch(
[seq.strip() if isinstance(seq, str) else (seq[0].strip(), seq[1].strip()) for seq in sequences]
)
def encode(self, sequence: str, pair: Optional[str] = None) -> Encoding:
return super().encode(sequence.strip(), pair.strip() if pair else pair)
class TransfoXLTokenizerFast(PreTrainedTokenizerFast):
vocab_files_names = VOCAB_FILES_NAMES_FAST
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP_FAST
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__(
self,
special=None,
min_freq=0,
max_size=None,
lower_case=False,
delimiter=None,
vocab_file=None,
pretrained_vocab_file=None,
never_split=None,
unk_token="<unk>",
eos_token="<eos>",
additional_special_tokens=["<formula>"],
add_eos=False,
add_double_eos=False,
normalization=None,
**kwargs
):
super().__init__(
_TransfoXLDelimiterLookupTokenizer(
vocab_file=vocab_file or pretrained_vocab_file,
delimiter=delimiter,
lowercase=lower_case,
unk_token=unk_token,
eos_token=eos_token,
add_eos=add_eos,
add_double_eos=add_double_eos,
normalization=normalization,
),
unk_token=unk_token,
eos_token=eos_token,
additional_special_tokens=additional_special_tokens,
**kwargs,
)
def save_pretrained(self, save_directory):
logger.warning(
"Please note you will not be able to load the vocabulary in"
" Python-based TransfoXLTokenizer as they don't share the same structure."
)
return super().save_pretrained(save_directory)
class LMOrderedIterator(object):
def __init__(self, data, bsz, bptt, device="cpu", ext_len=None):
"""
data -- LongTensor -- the LongTensor is strictly ordered
"""
self.bsz = bsz
self.bptt = bptt
self.ext_len = ext_len if ext_len is not None else 0
self.device = device
# Work out how cleanly we can divide the dataset into bsz parts.
self.n_step = data.size(0) // bsz
# Trim off any extra elements that wouldn't cleanly fit (remainders).
data = data.narrow(0, 0, self.n_step * bsz)
# Evenly divide the data across the bsz batches.
self.data = data.view(bsz, -1).t().contiguous().to(device)
# Number of mini-batches
self.n_batch = (self.n_step + self.bptt - 1) // self.bptt
def get_batch(self, i, bptt=None):
if bptt is None:
bptt = self.bptt
seq_len = min(bptt, self.data.size(0) - 1 - i)
end_idx = i + seq_len
beg_idx = max(0, i - self.ext_len)
data = self.data[beg_idx:end_idx]
target = self.data[i + 1 : i + 1 + seq_len]
data_out = data.transpose(0, 1).contiguous().to(self.device)
target_out = target.transpose(0, 1).contiguous().to(self.device)
return data_out, target_out, seq_len
def get_fixlen_iter(self, start=0):
for i in range(start, self.data.size(0) - 1, self.bptt):
yield self.get_batch(i)
def get_varlen_iter(self, start=0, std=5, min_len=5, max_deviation=3):
max_len = self.bptt + max_deviation * std
i = start
while True:
bptt = self.bptt if np.random.random() < 0.95 else self.bptt / 2.0
bptt = min(max_len, max(min_len, int(np.random.normal(bptt, std))))
data, target, seq_len = self.get_batch(i, bptt)
i += seq_len
yield data, target, seq_len
if i >= self.data.size(0) - 2:
break
def __iter__(self):
return self.get_fixlen_iter()
class LMShuffledIterator(object):
def __init__(self, data, bsz, bptt, device="cpu", ext_len=None, shuffle=False):
"""
data -- list[LongTensor] -- there is no order among the LongTensors
"""
self.data = data
self.bsz = bsz
self.bptt = bptt
self.ext_len = ext_len if ext_len is not None else 0
self.device = device
self.shuffle = shuffle
def get_sent_stream(self):
# index iterator
epoch_indices = np.random.permutation(len(self.data)) if self.shuffle else np.array(range(len(self.data)))
# sentence iterator
for idx in epoch_indices:
yield self.data[idx]
def stream_iterator(self, sent_stream):
# streams for each data in the batch
streams = [None] * self.bsz
data = torch.LongTensor(self.bptt, self.bsz)
target = torch.LongTensor(self.bptt, self.bsz)
n_retain = 0
while True:
# data : [n_retain+bptt x bsz]
# target : [bptt x bsz]
data[n_retain:].fill_(-1)
target.fill_(-1)
valid_batch = True
for i in range(self.bsz):
n_filled = 0
try:
while n_filled < self.bptt:
if streams[i] is None or len(streams[i]) <= 1:
streams[i] = next(sent_stream)
# number of new tokens to fill in
n_new = min(len(streams[i]) - 1, self.bptt - n_filled)
# first n_retain tokens are retained from last batch
data[n_retain + n_filled : n_retain + n_filled + n_new, i] = streams[i][:n_new]
target[n_filled : n_filled + n_new, i] = streams[i][1 : n_new + 1]
streams[i] = streams[i][n_new:]
n_filled += n_new
except StopIteration:
valid_batch = False
break
if not valid_batch:
return
data_out = data.transpose(0, 1).contiguous().to(self.device)
target_out = target.transpose(0, 1).contiguous().to(self.device)
yield data_out, target_out, self.bptt
n_retain = min(data.size(0), self.ext_len)
if n_retain > 0:
data[:n_retain] = data[-n_retain:]
data.resize_(n_retain + self.bptt, data.size(1))
def __iter__(self):
# sent_stream is an iterator
sent_stream = self.get_sent_stream()
for batch in self.stream_iterator(sent_stream):
yield batch
class LMMultiFileIterator(LMShuffledIterator):
def __init__(self, paths, vocab, bsz, bptt, device="cpu", ext_len=None, shuffle=False):
self.paths = paths
self.vocab = vocab
self.bsz = bsz
self.bptt = bptt
self.ext_len = ext_len if ext_len is not None else 0
self.device = device
self.shuffle = shuffle
def get_sent_stream(self, path):
sents = self.vocab.encode_file(path, add_double_eos=True)
if self.shuffle:
np.random.shuffle(sents)
sent_stream = iter(sents)
return sent_stream
def __iter__(self):
if self.shuffle:
np.random.shuffle(self.paths)
for path in self.paths:
# sent_stream is an iterator
sent_stream = self.get_sent_stream(path)
for batch in self.stream_iterator(sent_stream):
yield batch
class TransfoXLCorpus(object):
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, cache_dir=None, *inputs, **kwargs):
"""
Instantiate a pre-processed corpus.
"""
vocab = TransfoXLTokenizer.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
if pretrained_model_name_or_path in PRETRAINED_CORPUS_ARCHIVE_MAP:
corpus_file = PRETRAINED_CORPUS_ARCHIVE_MAP[pretrained_model_name_or_path]
else:
corpus_file = os.path.join(pretrained_model_name_or_path, CORPUS_NAME)
# redirect to the cache, if necessary
try:
resolved_corpus_file = cached_path(corpus_file, cache_dir=cache_dir)
except EnvironmentError:
logger.error(
"Corpus '{}' was not found in corpus list ({}). "
"We assumed '{}' was a path or url but couldn't find files {} "
"at this path or url.".format(
pretrained_model_name_or_path,
", ".join(PRETRAINED_CORPUS_ARCHIVE_MAP.keys()),
pretrained_model_name_or_path,
corpus_file,
)
)
return None
if resolved_corpus_file == corpus_file:
logger.info("loading corpus file {}".format(corpus_file))
else:
logger.info("loading corpus file {} from cache at {}".format(corpus_file, resolved_corpus_file))
# Instantiate tokenizer.
corpus = cls(*inputs, **kwargs)
corpus_dict = torch.load(resolved_corpus_file)
for key, value in corpus_dict.items():
corpus.__dict__[key] = value
corpus.vocab = vocab
if corpus.train is not None:
corpus.train = torch.tensor(corpus.train, dtype=torch.long)
if corpus.valid is not None:
corpus.valid = torch.tensor(corpus.valid, dtype=torch.long)
if corpus.test is not None:
corpus.test = torch.tensor(corpus.test, dtype=torch.long)
return corpus
def __init__(self, *args, **kwargs):
self.vocab = TransfoXLTokenizer(*args, **kwargs)
self.dataset = None
self.train = None
self.valid = None
self.test = None
def build_corpus(self, path, dataset):
self.dataset = dataset
if self.dataset in ["ptb", "wt2", "enwik8", "text8"]:
self.vocab.count_file(os.path.join(path, "train.txt"))
self.vocab.count_file(os.path.join(path, "valid.txt"))
self.vocab.count_file(os.path.join(path, "test.txt"))
elif self.dataset == "wt103":
self.vocab.count_file(os.path.join(path, "train.txt"))
elif self.dataset == "lm1b":
train_path_pattern = os.path.join(
path,
"1-billion-word-language-modeling-benchmark-r13output",
"training-monolingual.tokenized.shuffled",
"news.en-*",
)
train_paths = glob.glob(train_path_pattern)
# the vocab will load from file when build_vocab() is called
self.vocab.build_vocab()
if self.dataset in ["ptb", "wt2", "wt103"]:
self.train = self.vocab.encode_file(os.path.join(path, "train.txt"), ordered=True)
self.valid = self.vocab.encode_file(os.path.join(path, "valid.txt"), ordered=True)
self.test = self.vocab.encode_file(os.path.join(path, "test.txt"), ordered=True)
elif self.dataset in ["enwik8", "text8"]:
self.train = self.vocab.encode_file(os.path.join(path, "train.txt"), ordered=True, add_eos=False)
self.valid = self.vocab.encode_file(os.path.join(path, "valid.txt"), ordered=True, add_eos=False)
self.test = self.vocab.encode_file(os.path.join(path, "test.txt"), ordered=True, add_eos=False)
elif self.dataset == "lm1b":
self.train = train_paths
self.valid = self.vocab.encode_file(os.path.join(path, "valid.txt"), ordered=False, add_double_eos=True)
self.test = self.vocab.encode_file(os.path.join(path, "test.txt"), ordered=False, add_double_eos=True)
def get_iterator(self, split, *args, **kwargs):
if split == "train":
if self.dataset in ["ptb", "wt2", "wt103", "enwik8", "text8"]:
data_iter = LMOrderedIterator(self.train, *args, **kwargs)
elif self.dataset == "lm1b":
kwargs["shuffle"] = True
data_iter = LMMultiFileIterator(self.train, self.vocab, *args, **kwargs)
elif split in ["valid", "test"]:
data = self.valid if split == "valid" else self.test
if self.dataset in ["ptb", "wt2", "wt103", "enwik8", "text8"]:
data_iter = LMOrderedIterator(data, *args, **kwargs)
elif self.dataset == "lm1b":
data_iter = LMShuffledIterator(data, *args, **kwargs)
return data_iter
def get_lm_corpus(datadir, dataset):
fn = os.path.join(datadir, "cache.pt")
fn_pickle = os.path.join(datadir, "cache.pkl")
if os.path.exists(fn):
logger.info("Loading cached dataset...")
corpus = torch.load(fn_pickle)
elif os.path.exists(fn):
logger.info("Loading cached dataset from pickle...")
with open(fn, "rb") as fp:
corpus = pickle.load(fp)
else:
logger.info("Producing dataset {}...".format(dataset))
kwargs = {}
if dataset in ["wt103", "wt2"]:
kwargs["special"] = ["<eos>"]
kwargs["lower_case"] = False
elif dataset == "ptb":
kwargs["special"] = ["<eos>"]
kwargs["lower_case"] = True
elif dataset == "lm1b":
kwargs["special"] = []
kwargs["lower_case"] = False
kwargs["vocab_file"] = os.path.join(datadir, "1b_word_vocab.txt")
elif dataset in ["enwik8", "text8"]:
pass
corpus = TransfoXLCorpus(datadir, dataset, **kwargs)
torch.save(corpus, fn)
return corpus
| 28,550 | 36.175781 | 204 | py |
KoSentenceBERT-SKT | KoSentenceBERT-SKT-main/transformers/modeling_transfo_xl_utilities.py | # coding=utf-8
# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Utilities for PyTorch Transformer XL model.
Directly adapted from https://github.com/kimiyoung/transformer-xl.
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
# CUDA_MAJOR = int(torch.version.cuda.split('.')[0])
# CUDA_MINOR = int(torch.version.cuda.split('.')[1])
class ProjectedAdaptiveLogSoftmax(nn.Module):
def __init__(self, n_token, d_embed, d_proj, cutoffs, div_val=1, keep_order=False):
super().__init__()
self.n_token = n_token
self.d_embed = d_embed
self.d_proj = d_proj
self.cutoffs = cutoffs + [n_token]
self.cutoff_ends = [0] + self.cutoffs
self.div_val = div_val
self.shortlist_size = self.cutoffs[0]
self.n_clusters = len(self.cutoffs) - 1
self.head_size = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
self.cluster_weight = nn.Parameter(torch.zeros(self.n_clusters, self.d_embed))
self.cluster_bias = nn.Parameter(torch.zeros(self.n_clusters))
self.out_layers = nn.ModuleList()
self.out_projs = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs)):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(d_proj, d_embed)))
else:
self.out_projs.append(None)
self.out_layers.append(nn.Linear(d_embed, n_token))
else:
for i in range(len(self.cutoffs)):
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
d_emb_i = d_embed // (div_val ** i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(d_proj, d_emb_i)))
self.out_layers.append(nn.Linear(d_emb_i, r_idx - l_idx))
self.keep_order = keep_order
def _compute_logit(self, hidden, weight, bias, proj):
if proj is None:
logit = F.linear(hidden, weight, bias=bias)
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
proj_hid = F.linear(hidden, proj.t().contiguous())
logit = F.linear(proj_hid, weight, bias=bias)
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def forward(self, hidden, labels=None, keep_order=False):
"""
Params:
hidden :: [len*bsz x d_proj]
labels :: [len*bsz]
Return:
if labels is None:
out :: [len*bsz x n_tokens] log probabilities of tokens over the vocabulary
else:
out :: [len*bsz] Negative log likelihood
We could replace this implementation by the native PyTorch one
if their's had an option to set bias on all clusters in the native one.
here: https://github.com/pytorch/pytorch/blob/dbe6a7a9ff1a364a8706bf5df58a1ca96d2fd9da/torch/nn/modules/adaptive.py#L138
"""
if labels is not None:
labels = labels.view(-1)
if hidden.size(0) != labels.size(0):
raise RuntimeError("Input and labels should have the same size " "in the batch dimension.")
if self.n_clusters == 0:
logit = self._compute_logit(hidden, self.out_layers[0].weight, self.out_layers[0].bias, self.out_projs[0])
if labels is not None:
out = -F.log_softmax(logit, dim=-1).gather(1, labels.unsqueeze(1)).squeeze(1)
else:
out = F.log_softmax(logit, dim=-1)
else:
# construct weights and biases
weights, biases = [], []
for i in range(len(self.cutoffs)):
if self.div_val == 1:
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
weight_i = self.out_layers[0].weight[l_idx:r_idx]
bias_i = self.out_layers[0].bias[l_idx:r_idx]
else:
weight_i = self.out_layers[i].weight
bias_i = self.out_layers[i].bias
if i == 0:
weight_i = torch.cat([weight_i, self.cluster_weight], dim=0)
bias_i = torch.cat([bias_i, self.cluster_bias], dim=0)
weights.append(weight_i)
biases.append(bias_i)
head_weight, head_bias, head_proj = weights[0], biases[0], self.out_projs[0]
head_logit = self._compute_logit(hidden, head_weight, head_bias, head_proj)
head_logprob = F.log_softmax(head_logit, dim=1)
if labels is None:
out = hidden.new_empty((head_logit.size(0), self.n_token))
else:
out = torch.zeros_like(labels, dtype=hidden.dtype, device=hidden.device)
offset = 0
cutoff_values = [0] + self.cutoffs
for i in range(len(cutoff_values) - 1):
l_idx, r_idx = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
mask_i = (labels >= l_idx) & (labels < r_idx)
indices_i = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
target_i = labels.index_select(0, indices_i) - l_idx
head_logprob_i = head_logprob.index_select(0, indices_i)
hidden_i = hidden.index_select(0, indices_i)
else:
hidden_i = hidden
if i == 0:
if labels is not None:
logprob_i = head_logprob_i.gather(1, target_i[:, None]).squeeze(1)
else:
out[:, : self.cutoffs[0]] = head_logprob[:, : self.cutoffs[0]]
else:
weight_i, bias_i, proj_i = weights[i], biases[i], self.out_projs[i]
tail_logit_i = self._compute_logit(hidden_i, weight_i, bias_i, proj_i)
tail_logprob_i = F.log_softmax(tail_logit_i, dim=1)
cluster_prob_idx = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
logprob_i = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1, target_i[:, None]
).squeeze(1)
else:
logprob_i = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
out[:, l_idx:r_idx] = logprob_i
if labels is not None:
if (hasattr(self, "keep_order") and self.keep_order) or keep_order:
out.index_copy_(0, indices_i, -logprob_i)
else:
out[offset : offset + logprob_i.size(0)].copy_(-logprob_i)
offset += logprob_i.size(0)
return out
def log_prob(self, hidden):
r""" Computes log probabilities for all :math:`n\_classes`
From: https://github.com/pytorch/pytorch/blob/master/torch/nn/modules/adaptive.py
Args:
hidden (Tensor): a minibatch of examples
Returns:
log-probabilities of for each class :math:`c`
in range :math:`0 <= c <= n\_classes`, where :math:`n\_classes` is a
parameter passed to ``AdaptiveLogSoftmaxWithLoss`` constructor.
Shape:
- Input: :math:`(N, in\_features)`
- Output: :math:`(N, n\_classes)`
"""
if self.n_clusters == 0:
logit = self._compute_logit(hidden, self.out_layers[0].weight, self.out_layers[0].bias, self.out_projs[0])
return F.log_softmax(logit, dim=-1)
else:
# construct weights and biases
weights, biases = [], []
for i in range(len(self.cutoffs)):
if self.div_val == 1:
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
weight_i = self.out_layers[0].weight[l_idx:r_idx]
bias_i = self.out_layers[0].bias[l_idx:r_idx]
else:
weight_i = self.out_layers[i].weight
bias_i = self.out_layers[i].bias
if i == 0:
weight_i = torch.cat([weight_i, self.cluster_weight], dim=0)
bias_i = torch.cat([bias_i, self.cluster_bias], dim=0)
weights.append(weight_i)
biases.append(bias_i)
head_weight, head_bias, head_proj = weights[0], biases[0], self.out_projs[0]
head_logit = self._compute_logit(hidden, head_weight, head_bias, head_proj)
out = hidden.new_empty((head_logit.size(0), self.n_token))
head_logprob = F.log_softmax(head_logit, dim=1)
cutoff_values = [0] + self.cutoffs
for i in range(len(cutoff_values) - 1):
start_idx, stop_idx = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
out[:, : self.cutoffs[0]] = head_logprob[:, : self.cutoffs[0]]
else:
weight_i, bias_i, proj_i = weights[i], biases[i], self.out_projs[i]
tail_logit_i = self._compute_logit(hidden, weight_i, bias_i, proj_i)
tail_logprob_i = F.log_softmax(tail_logit_i, dim=1)
logprob_i = head_logprob[:, -i] + tail_logprob_i
out[:, start_idx, stop_idx] = logprob_i
return out
| 10,418 | 41.70082 | 132 | py |
KoSentenceBERT-SKT | KoSentenceBERT-SKT-main/transformers/convert_pytorch_checkpoint_to_tf2.py | # coding=utf-8
# Copyright 2018 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Convert pytorch checkpoints to TensorFlow """
import argparse
import logging
import os
from transformers import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
ElectraConfig,
FlaubertConfig,
GPT2Config,
OpenAIGPTConfig,
RobertaConfig,
T5Config,
TFAlbertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPT2LMHeadModel,
TFOpenAIGPTLMHeadModel,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFT5ForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
cached_path,
is_torch_available,
load_pytorch_checkpoint_in_tf2_model,
)
if is_torch_available():
import torch
import numpy as np
from transformers import (
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BERT_PRETRAINED_MODEL_ARCHIVE_MAP,
GPT2LMHeadModel,
GPT2_PRETRAINED_MODEL_ARCHIVE_MAP,
XLNetLMHeadModel,
XLNET_PRETRAINED_MODEL_ARCHIVE_MAP,
XLMWithLMHeadModel,
XLM_PRETRAINED_MODEL_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP,
XLMRobertaForMaskedLM,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_MAP,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_MAP,
RobertaForMaskedLM,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP,
CamembertForMaskedLM,
CamembertForSequenceClassification,
CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_MODEL_ARCHIVE_MAP,
FlaubertWithLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_MAP,
CTRLLMHeadModel,
CTRL_PRETRAINED_MODEL_ARCHIVE_MAP,
AlbertForMaskedLM,
ALBERT_PRETRAINED_MODEL_ARCHIVE_MAP,
T5ForConditionalGeneration,
T5_PRETRAINED_MODEL_ARCHIVE_MAP,
ElectraForPreTraining,
ELECTRA_PRETRAINED_MODEL_ARCHIVE_MAP,
)
else:
(
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BERT_PRETRAINED_MODEL_ARCHIVE_MAP,
GPT2LMHeadModel,
GPT2_PRETRAINED_MODEL_ARCHIVE_MAP,
XLNetLMHeadModel,
XLNET_PRETRAINED_MODEL_ARCHIVE_MAP,
XLMWithLMHeadModel,
XLM_PRETRAINED_MODEL_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP,
XLMRobertaForMaskedLM,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_MAP,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_MAP,
RobertaForMaskedLM,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP,
CamembertForMaskedLM,
CamembertForSequenceClassification,
CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_MODEL_ARCHIVE_MAP,
FlaubertWithLMHeadModel,
DistilBertForMaskedLM,
DistilBertForSequenceClassification,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_MAP,
CTRLLMHeadModel,
CTRL_PRETRAINED_MODEL_ARCHIVE_MAP,
AlbertForMaskedLM,
ALBERT_PRETRAINED_MODEL_ARCHIVE_MAP,
T5ForConditionalGeneration,
T5_PRETRAINED_MODEL_ARCHIVE_MAP,
ElectraForPreTraining,
ELECTRA_PRETRAINED_MODEL_ARCHIVE_MAP,
) = (
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
)
logging.basicConfig(level=logging.INFO)
MODEL_CLASSES = {
"bert": (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_MODEL_ARCHIVE_MAP,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_MODEL_ARCHIVE_MAP,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_MODEL_ARCHIVE_MAP,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"bert-base-cased-finetuned-mrpc": (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_MODEL_ARCHIVE_MAP,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"gpt2": (
GPT2Config,
TFGPT2LMHeadModel,
GPT2LMHeadModel,
GPT2_PRETRAINED_MODEL_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"xlnet": (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_MODEL_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"xlm": (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_MODEL_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"xlm-roberta": (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"transfo-xl": (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"openai-gpt": (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"roberta": (
RobertaConfig,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"roberta-large-mnli": (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"camembert": (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"flaubert": (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_MODEL_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"distilbert": (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"distilbert-base-distilled-squad": (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"ctrl": (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_MODEL_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"albert": (
AlbertConfig,
TFAlbertForMaskedLM,
AlbertForMaskedLM,
ALBERT_PRETRAINED_MODEL_ARCHIVE_MAP,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"t5": (
T5Config,
TFT5ForConditionalGeneration,
T5ForConditionalGeneration,
T5_PRETRAINED_MODEL_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"electra": (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_MODEL_ARCHIVE_MAP,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def convert_pt_checkpoint_to_tf(
model_type, pytorch_checkpoint_path, config_file, tf_dump_path, compare_with_pt_model=False, use_cached_models=True
):
if model_type not in MODEL_CLASSES:
raise ValueError("Unrecognized model type, should be one of {}.".format(list(MODEL_CLASSES.keys())))
config_class, model_class, pt_model_class, aws_model_maps, aws_config_map = MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
config_file = cached_path(aws_config_map[config_file], force_download=not use_cached_models)
config = config_class.from_json_file(config_file)
config.output_hidden_states = True
config.output_attentions = True
print("Building TensorFlow model from configuration: {}".format(str(config)))
tf_model = model_class(config)
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_model_maps:
pytorch_checkpoint_path = cached_path(
aws_model_maps[pytorch_checkpoint_path], force_download=not use_cached_models
)
# Load PyTorch checkpoint in tf2 model:
tf_model = load_pytorch_checkpoint_in_tf2_model(tf_model, pytorch_checkpoint_path)
if compare_with_pt_model:
tfo = tf_model(tf_model.dummy_inputs, training=False) # build the network
state_dict = torch.load(pytorch_checkpoint_path, map_location="cpu")
pt_model = pt_model_class.from_pretrained(
pretrained_model_name_or_path=None, config=config, state_dict=state_dict
)
with torch.no_grad():
pto = pt_model(**pt_model.dummy_inputs)
np_pt = pto[0].numpy()
np_tf = tfo[0].numpy()
diff = np.amax(np.abs(np_pt - np_tf))
print("Max absolute difference between models outputs {}".format(diff))
assert diff <= 2e-2, "Error, model absolute difference is >2e-2: {}".format(diff)
# Save pytorch-model
print("Save TensorFlow model to {}".format(tf_dump_path))
tf_model.save_weights(tf_dump_path, save_format="h5")
def convert_all_pt_checkpoints_to_tf(
args_model_type,
tf_dump_path,
model_shortcut_names_or_path=None,
config_shortcut_names_or_path=None,
compare_with_pt_model=False,
use_cached_models=False,
remove_cached_files=False,
only_convert_finetuned_models=False,
):
assert os.path.isdir(args.tf_dump_path), "--tf_dump_path should be a directory"
if args_model_type is None:
model_types = list(MODEL_CLASSES.keys())
else:
model_types = [args_model_type]
for j, model_type in enumerate(model_types, start=1):
print("=" * 100)
print(" Converting model type {}/{}: {}".format(j, len(model_types), model_type))
print("=" * 100)
if model_type not in MODEL_CLASSES:
raise ValueError(
"Unrecognized model type {}, should be one of {}.".format(model_type, list(MODEL_CLASSES.keys()))
)
config_class, model_class, pt_model_class, aws_model_maps, aws_config_map = MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
model_shortcut_names_or_path = list(aws_model_maps.keys())
if config_shortcut_names_or_path is None:
config_shortcut_names_or_path = model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(model_shortcut_names_or_path, config_shortcut_names_or_path), start=1
):
print("-" * 100)
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(" Skipping finetuned checkpoint {}".format(model_shortcut_name))
continue
model_type = model_shortcut_name
elif only_convert_finetuned_models:
print(" Skipping not finetuned checkpoint {}".format(model_shortcut_name))
continue
print(
" Converting checkpoint {}/{}: {} - model_type {}".format(
i, len(aws_config_map), model_shortcut_name, model_type
)
)
print("-" * 100)
if config_shortcut_name in aws_config_map:
config_file = cached_path(aws_config_map[config_shortcut_name], force_download=not use_cached_models)
else:
config_file = cached_path(config_shortcut_name, force_download=not use_cached_models)
if model_shortcut_name in aws_model_maps:
model_file = cached_path(aws_model_maps[model_shortcut_name], force_download=not use_cached_models)
else:
model_file = cached_path(model_shortcut_name, force_download=not use_cached_models)
if os.path.isfile(model_shortcut_name):
model_shortcut_name = "converted_model"
convert_pt_checkpoint_to_tf(
model_type=model_type,
pytorch_checkpoint_path=model_file,
config_file=config_file,
tf_dump_path=os.path.join(tf_dump_path, model_shortcut_name + "-tf_model.h5"),
compare_with_pt_model=compare_with_pt_model,
)
if remove_cached_files:
os.remove(config_file)
os.remove(model_file)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_dump_path", default=None, type=str, required=True, help="Path to the output Tensorflow dump file."
)
parser.add_argument(
"--model_type",
default=None,
type=str,
help="Model type selected in the list of {}. If not given, will download and convert all the models from AWS.".format(
list(MODEL_CLASSES.keys())
),
)
parser.add_argument(
"--pytorch_checkpoint_path",
default=None,
type=str,
help="Path to the PyTorch checkpoint path or shortcut name to download from AWS. "
"If not given, will download and convert all the checkpoints from AWS.",
)
parser.add_argument(
"--config_file",
default=None,
type=str,
help="The config json file corresponding to the pre-trained model. \n"
"This specifies the model architecture. If not given and "
"--pytorch_checkpoint_path is not given or is a shortcut name"
"use the configuration associated to the shortcut name on the AWS",
)
parser.add_argument(
"--compare_with_pt_model", action="store_true", help="Compare Tensorflow and PyTorch model predictions."
)
parser.add_argument(
"--use_cached_models",
action="store_true",
help="Use cached models if possible instead of updating to latest checkpoint versions.",
)
parser.add_argument(
"--remove_cached_files",
action="store_true",
help="Remove pytorch models after conversion (save memory when converting in batches).",
)
parser.add_argument("--only_convert_finetuned_models", action="store_true", help="Only convert finetuned models.")
args = parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
)
| 18,333 | 33.397749 | 126 | py |
KoSentenceBERT-SKT | KoSentenceBERT-SKT-main/transformers/modeling_roberta.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch RoBERTa model. """
import logging
import torch
import torch.nn as nn
from torch.nn import CrossEntropyLoss, MSELoss
from .configuration_roberta import RobertaConfig
from .file_utils import add_start_docstrings, add_start_docstrings_to_callable
from .modeling_bert import BertEmbeddings, BertLayerNorm, BertModel, BertPreTrainedModel, gelu
from .modeling_utils import create_position_ids_from_input_ids
logger = logging.getLogger(__name__)
ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP = {
"roberta-base": "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-base-pytorch_model.bin",
"roberta-large": "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-large-pytorch_model.bin",
"roberta-large-mnli": "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-large-mnli-pytorch_model.bin",
"distilroberta-base": "https://s3.amazonaws.com/models.huggingface.co/bert/distilroberta-base-pytorch_model.bin",
"roberta-base-openai-detector": "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-base-openai-detector-pytorch_model.bin",
"roberta-large-openai-detector": "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-large-openai-detector-pytorch_model.bin",
}
class RobertaEmbeddings(BertEmbeddings):
"""
Same as BertEmbeddings with a tiny tweak for positional embeddings indexing.
"""
def __init__(self, config):
super().__init__(config)
self.padding_idx = 1
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=self.padding_idx)
self.position_embeddings = nn.Embedding(
config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx
)
def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None):
if position_ids is None:
if input_ids is not None:
# Create the position ids from the input token ids. Any padded tokens remain padded.
position_ids = create_position_ids_from_input_ids(input_ids, self.padding_idx).to(input_ids.device)
else:
position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds)
return super().forward(
input_ids, token_type_ids=token_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds
)
def create_position_ids_from_inputs_embeds(self, inputs_embeds):
""" We are provided embeddings directly. We cannot infer which are padded so just generate
sequential position ids.
:param torch.Tensor inputs_embeds:
:return torch.Tensor:
"""
input_shape = inputs_embeds.size()[:-1]
sequence_length = input_shape[1]
position_ids = torch.arange(
self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device
)
return position_ids.unsqueeze(0).expand(input_shape)
ROBERTA_START_DOCSTRING = r"""
This model is a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`_ sub-class.
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general
usage and behavior.
Parameters:
config (:class:`~transformers.RobertaConfig`): Model configuration class with all the parameters of the
model. Initializing with a config file does not load the weights associated with the model, only the configuration.
Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.
"""
ROBERTA_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`transformers.RobertaTokenizer`.
See :func:`transformers.PreTrainedTokenizer.encode` and
:func:`transformers.PreTrainedTokenizer.encode_plus` for details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Mask to avoid performing attention on padding token indices.
Mask values selected in ``[0, 1]``:
``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
`What are attention masks? <../glossary.html#attention-mask>`__
token_type_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Segment token indices to indicate first and second portions of the inputs.
Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1``
corresponds to a `sentence B` token
`What are token type IDs? <../glossary.html#token-type-ids>`_
position_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Indices of positions of each input sequence tokens in the position embeddings.
Selected in the range ``[0, config.max_position_embeddings - 1]``.
`What are position IDs? <../glossary.html#position-ids>`_
head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`, defaults to :obj:`None`):
Mask to nullify selected heads of the self-attention modules.
Mask values selected in ``[0, 1]``:
:obj:`1` indicates the head is **not masked**, :obj:`0` indicates the head is **masked**.
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
"""
@add_start_docstrings(
"The bare RoBERTa Model transformer outputting raw hidden-states without any specific head on top.",
ROBERTA_START_DOCSTRING,
)
class RobertaModel(BertModel):
"""
This class overrides :class:`~transformers.BertModel`. Please check the
superclass for the appropriate documentation alongside usage examples.
"""
config_class = RobertaConfig
pretrained_model_archive_map = ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP
base_model_prefix = "roberta"
def __init__(self, config):
super().__init__(config)
self.embeddings = RobertaEmbeddings(config)
self.init_weights()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
@add_start_docstrings("""RoBERTa Model with a `language modeling` head on top. """, ROBERTA_START_DOCSTRING)
class RobertaForMaskedLM(BertPreTrainedModel):
config_class = RobertaConfig
pretrained_model_archive_map = ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP
base_model_prefix = "roberta"
def __init__(self, config):
super().__init__(config)
self.roberta = RobertaModel(config)
self.lm_head = RobertaLMHead(config)
self.init_weights()
def get_output_embeddings(self):
return self.lm_head.decoder
@add_start_docstrings_to_callable(ROBERTA_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
masked_lm_labels=None,
):
r"""
masked_lm_labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Labels for computing the masked language modeling loss.
Indices should be in ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring)
Tokens with indices set to ``-100`` are ignored (masked), the loss is only computed for the tokens with labels
in ``[0, ..., config.vocab_size]``
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.RobertaConfig`) and inputs:
masked_lm_loss (`optional`, returned when ``masked_lm_labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Masked language modeling loss.
prediction_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`)
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import RobertaTokenizer, RobertaForMaskedLM
import torch
tokenizer = RobertaTokenizer.from_pretrained('roberta-base')
model = RobertaForMaskedLM.from_pretrained('roberta-base')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
outputs = model(input_ids, masked_lm_labels=input_ids)
loss, prediction_scores = outputs[:2]
"""
outputs = self.roberta(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
sequence_output = outputs[0]
prediction_scores = self.lm_head(sequence_output)
outputs = (prediction_scores,) + outputs[2:] # Add hidden states and attention if they are here
if masked_lm_labels is not None:
loss_fct = CrossEntropyLoss()
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))
outputs = (masked_lm_loss,) + outputs
return outputs # (masked_lm_loss), prediction_scores, (hidden_states), (attentions)
class RobertaLMHead(nn.Module):
"""Roberta Head for masked language modeling."""
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.layer_norm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, features, **kwargs):
x = self.dense(features)
x = gelu(x)
x = self.layer_norm(x)
# project back to size of vocabulary with bias
x = self.decoder(x)
return x
@add_start_docstrings(
"""RoBERTa Model transformer with a sequence classification/regression head on top (a linear layer
on top of the pooled output) e.g. for GLUE tasks. """,
ROBERTA_START_DOCSTRING,
)
class RobertaForSequenceClassification(BertPreTrainedModel):
config_class = RobertaConfig
pretrained_model_archive_map = ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP
base_model_prefix = "roberta"
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.roberta = RobertaModel(config)
self.classifier = RobertaClassificationHead(config)
@add_start_docstrings_to_callable(ROBERTA_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for computing the sequence classification/regression loss.
Indices should be in :obj:`[0, ..., config.num_labels - 1]`.
If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),
If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.RobertaConfig`) and inputs:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`label` is provided):
Classification (or regression if config.num_labels==1) loss.
logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, config.num_labels)`):
Classification (or regression if config.num_labels==1) scores (before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import RobertaTokenizer, RobertaForSequenceClassification
import torch
tokenizer = RobertaTokenizer.from_pretrained('roberta-base')
model = RobertaForSequenceClassification.from_pretrained('roberta-base')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
labels = torch.tensor([1]).unsqueeze(0) # Batch size 1
outputs = model(input_ids, labels=labels)
loss, logits = outputs[:2]
"""
outputs = self.roberta(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
sequence_output = outputs[0]
logits = self.classifier(sequence_output)
outputs = (logits,) + outputs[2:]
if labels is not None:
if self.num_labels == 1:
# We are doing regression
loss_fct = MSELoss()
loss = loss_fct(logits.view(-1), labels.view(-1))
else:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
outputs = (loss,) + outputs
return outputs # (loss), logits, (hidden_states), (attentions)
@add_start_docstrings(
"""Roberta Model with a multiple choice classification head on top (a linear layer on top of
the pooled output and a softmax) e.g. for RocStories/SWAG tasks. """,
ROBERTA_START_DOCSTRING,
)
class RobertaForMultipleChoice(BertPreTrainedModel):
config_class = RobertaConfig
pretrained_model_archive_map = ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP
base_model_prefix = "roberta"
def __init__(self, config):
super().__init__(config)
self.roberta = RobertaModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, 1)
self.init_weights()
@add_start_docstrings_to_callable(ROBERTA_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
token_type_ids=None,
attention_mask=None,
labels=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for computing the multiple choice classification loss.
Indices should be in ``[0, ..., num_choices]`` where `num_choices` is the size of the second dimension
of the input tensors. (see `input_ids` above)
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.RobertaConfig`) and inputs:
loss (:obj:`torch.FloatTensor`` of shape ``(1,)`, `optional`, returned when :obj:`labels` is provided):
Classification loss.
classification_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_choices)`):
`num_choices` is the second dimension of the input tensors. (see `input_ids` above).
Classification scores (before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import RobertaTokenizer, RobertaForMultipleChoice
import torch
tokenizer = RobertaTokenizer.from_pretrained('roberta-base')
model = RobertaForMultipleChoice.from_pretrained('roberta-base')
choices = ["Hello, my dog is cute", "Hello, my cat is amazing"]
input_ids = torch.tensor([tokenizer.encode(s, add_special_tokens=True) for s in choices]).unsqueeze(0) # Batch size 1, 2 choices
labels = torch.tensor(1).unsqueeze(0) # Batch size 1
outputs = model(input_ids, labels=labels)
loss, classification_scores = outputs[:2]
"""
num_choices = input_ids.shape[1]
flat_input_ids = input_ids.view(-1, input_ids.size(-1))
flat_position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
outputs = self.roberta(
flat_input_ids,
position_ids=flat_position_ids,
token_type_ids=flat_token_type_ids,
attention_mask=flat_attention_mask,
head_mask=head_mask,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view(-1, num_choices)
outputs = (reshaped_logits,) + outputs[2:] # add hidden states and attention if they are here
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
outputs = (loss,) + outputs
return outputs # (loss), reshaped_logits, (hidden_states), (attentions)
@add_start_docstrings(
"""Roberta Model with a token classification head on top (a linear layer on top of
the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """,
ROBERTA_START_DOCSTRING,
)
class RobertaForTokenClassification(BertPreTrainedModel):
config_class = RobertaConfig
pretrained_model_archive_map = ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP
base_model_prefix = "roberta"
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.roberta = RobertaModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_callable(ROBERTA_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Labels for computing the token classification loss.
Indices should be in ``[0, ..., config.num_labels - 1]``.
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.RobertaConfig`) and inputs:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when ``labels`` is provided) :
Classification loss.
scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.num_labels)`)
Classification scores (before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import RobertaTokenizer, RobertaForTokenClassification
import torch
tokenizer = RobertaTokenizer.from_pretrained('roberta-base')
model = RobertaForTokenClassification.from_pretrained('roberta-base')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
labels = torch.tensor([1] * input_ids.size(1)).unsqueeze(0) # Batch size 1
outputs = model(input_ids, labels=labels)
loss, scores = outputs[:2]
"""
outputs = self.roberta(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here
if labels is not None:
loss_fct = CrossEntropyLoss()
# Only keep active parts of the loss
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)
active_labels = torch.where(
active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels)
)
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
outputs = (loss,) + outputs
return outputs # (loss), scores, (hidden_states), (attentions)
class RobertaClassificationHead(nn.Module):
"""Head for sentence-level classification tasks."""
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.out_proj = nn.Linear(config.hidden_size, config.num_labels)
def forward(self, features, **kwargs):
x = features[:, 0, :] # take <s> token (equiv. to [CLS])
x = self.dropout(x)
x = self.dense(x)
x = torch.tanh(x)
x = self.dropout(x)
x = self.out_proj(x)
return x
@add_start_docstrings(
"""Roberta Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of
the hidden-states output to compute `span start logits` and `span end logits`). """,
ROBERTA_START_DOCSTRING,
)
class RobertaForQuestionAnswering(BertPreTrainedModel):
config_class = RobertaConfig
pretrained_model_archive_map = ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP
base_model_prefix = "roberta"
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.roberta = RobertaModel(config)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_callable(ROBERTA_INPUTS_DOCSTRING)
def forward(
self,
input_ids,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
start_positions=None,
end_positions=None,
):
r"""
start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`).
Position outside of the sequence are not taken into account for computing the loss.
end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`).
Position outside of the sequence are not taken into account for computing the loss.
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.RobertaConfig`) and inputs:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`labels` is provided):
Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.
start_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length,)`):
Span-start scores (before SoftMax).
end_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length,)`):
Span-end scores (before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
# The checkpoint roberta-large is not fine-tuned for question answering. Please see the
# examples/run_squad.py example to see how to fine-tune a model to a question answering task.
from transformers import RobertaTokenizer, RobertaForQuestionAnswering
import torch
tokenizer = RobertaTokenizer.from_pretrained('roberta-base')
model = RobertaForQuestionAnswering.from_pretrained('roberta-base')
question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet"
input_ids = tokenizer.encode(question, text)
start_scores, end_scores = model(torch.tensor([input_ids]))
all_tokens = tokenizer.convert_ids_to_tokens(input_ids)
answer = ' '.join(all_tokens[torch.argmax(start_scores) : torch.argmax(end_scores)+1])
"""
outputs = self.roberta(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
outputs = (start_logits, end_logits,) + outputs[2:]
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions.clamp_(0, ignored_index)
end_positions.clamp_(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
outputs = (total_loss,) + outputs
return outputs # (loss), start_logits, end_logits, (hidden_states), (attentions)
| 32,164 | 45.081662 | 149 | py |
KoSentenceBERT-SKT | KoSentenceBERT-SKT-main/transformers/tokenization_utils.py | # coding=utf-8
# Copyright 2018 The Open AI Team Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization classes for OpenAI GPT."""
import copy
import itertools
import json
import logging
import os
import re
from collections import defaultdict
from contextlib import contextmanager
from typing import List, Optional, Tuple, Union
from tokenizers.implementations import BaseTokenizer
from .file_utils import cached_path, hf_bucket_url, is_remote_url, is_tf_available, is_torch_available
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
logger = logging.getLogger(__name__)
SPECIAL_TOKENS_MAP_FILE = "special_tokens_map.json"
ADDED_TOKENS_FILE = "added_tokens.json"
TOKENIZER_CONFIG_FILE = "tokenizer_config.json"
from .tokenization_utils_base import (
ENCODE_KWARGS_DOCSTRING,
ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING,
INIT_TOKENIZER_DOCSTRING,
AddedToken,
BatchEncoding,
EncodedInput,
EncodedInputPair,
PaddingStrategy,
PreTokenizedInput,
PreTokenizedInputPair,
PreTrainedTokenizerBase,
TensorType,
TextInput,
TextInputPair,
TruncationStrategy,
)
@contextmanager
def truncate_and_pad(
tokenizer: BaseTokenizer,
max_length: int,
stride: int,
strategy: str,
pad_to_max_length: bool,
padding_side: str,
pad_token_id: int,
pad_token_type_id: int,
pad_token: str,
):
"""
This contextmanager is in charge of defining the truncation and the padding strategies and then
restore the tokenizer settings afterwards.
This contextmanager assumes the provider tokenizer has no padding / truncation strategy
before the managed section. If your tokenizer set a padding / truncation strategy before,
then it will be reset to no padding/truncation when exiting the managed section.
:param tokenizer:
:param max_length:
:param stride:
:param strategy:
:param pad_to_max_length:
:param padding_side:
:param pad_token_id:
:param pad_token_type_id:
:param pad_token:
:return:
"""
# Handle all the truncation and padding stuff
if max_length is not None:
tokenizer.enable_truncation(max_length, stride=stride, strategy=strategy)
if pad_to_max_length and (pad_token and pad_token_id >= 0):
tokenizer.enable_padding(
max_length=max_length,
direction=padding_side,
pad_id=pad_token_id,
pad_type_id=pad_token_type_id,
pad_token=pad_token,
)
elif pad_to_max_length:
logger.warning(
"Disabled padding because no padding token set (pad_token: {}, pad_token_id: {}).\n"
"To remove this error, you can add a new pad token and then resize model embedding:\n"
"\ttokenizer.pad_token = '<PAD>'\n\tmodel.resize_token_embeddings(len(tokenizer))".format(
pad_token, pad_token_id
)
)
yield
if max_length is not None:
tokenizer.no_truncation()
if pad_to_max_length and (pad_token and pad_token_id >= 0):
tokenizer.no_padding()
class PreTrainedTokenizer(object):
""" Base class for all tokenizers.
Handle all the shared methods for tokenization and special tokens as well as methods downloading/caching/loading pretrained tokenizers as well as adding tokens to the vocabulary.
This class also contain the added tokens in a unified way on top of all tokenizers so we don't have to handle the specific vocabulary augmentation methods of the various underlying dictionary structures (BPE, sentencepiece...).
Class attributes (overridden by derived classes):
- ``vocab_files_names``: a python ``dict`` with, as keys, the ``__init__`` keyword name of each vocabulary file required by the model, and as associated values, the filename for saving the associated file (string).
- ``pretrained_vocab_files_map``: a python ``dict of dict`` the high-level keys being the ``__init__`` keyword name of each vocabulary file required by the model, the low-level being the `short-cut-names` (string) of the pretrained models with, as associated values, the `url` (string) to the associated pretrained vocabulary file.
- ``max_model_input_sizes``: a python ``dict`` with, as keys, the `short-cut-names` (string) of the pretrained models, and as associated values, the maximum length of the sequence inputs of this model, or None if the model has no maximum input size.
- ``pretrained_init_configuration``: a python ``dict`` with, as keys, the `short-cut-names` (string) of the pretrained models, and as associated values, a dictionnary of specific arguments to pass to the ``__init__``method of the tokenizer class for this pretrained model when loading the tokenizer with the ``from_pretrained()`` method.
Parameters:
- ``bos_token``: (`Optional`) string: a beginning of sentence token. Will be associated to ``self.bos_token`` and ``self.bos_token_id``
- ``eos_token``: (`Optional`) string: an end of sentence token. Will be associated to ``self.eos_token`` and ``self.eos_token_id``
- ``unk_token``: (`Optional`) string: an unknown token. Will be associated to ``self.unk_token`` and ``self.unk_token_id``
- ``sep_token``: (`Optional`) string: a separation token (e.g. to separate context and query in an input sequence). Will be associated to ``self.sep_token`` and ``self.sep_token_id``
- ``pad_token``: (`Optional`) string: a padding token. Will be associated to ``self.pad_token`` and ``self.pad_token_id``
- ``cls_token``: (`Optional`) string: a classification token (e.g. to extract a summary of an input sequence leveraging self-attention along the full depth of the model). Will be associated to ``self.cls_token`` and ``self.cls_token_id``
- ``mask_token``: (`Optional`) string: a masking token (e.g. when training a model with masked-language modeling). Will be associated to ``self.mask_token`` and ``self.mask_token_id``
- ``additional_special_tokens``: (`Optional`) list: a list of additional special tokens. Adding all special tokens here ensure they won't be split by the tokenization process. Will be associated to ``self.additional_special_tokens`` and ``self.additional_special_tokens_ids``
"""
padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD
truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE
vocab_files_names = {}
pretrained_vocab_files_map = {}
pretrained_init_configuration = {}
max_model_input_sizes = {}
model_input_names = ["token_type_ids", "attention_mask"]
SPECIAL_TOKENS_ATTRIBUTES = [
"bos_token",
"eos_token",
"unk_token",
"sep_token",
"pad_token",
"cls_token",
"mask_token",
"additional_special_tokens",
]
padding_side = "right"
NO_PAD_TOKEN_FOR_BATCH_MSG = (
"No padding token is set for this model, therefore no batch can be made with uneven "
"sequences. Set a padding token or adjust the lengths of the sequences building the "
"batch so that every sequence is of the same length."
)
UNEVEN_SEQUENCES_FOR_BATCH_MSG = (
"The sequences building the batch are not of the same size, no tensor "
"can be built. Set `pad_to_max_length=True` to pad the smaller sequences"
"up to the larger sequence's length."
)
@property
def bos_token(self):
""" Beginning of sentence token (string). Log an error if used while not having been set. """
if self._bos_token is None:
logger.error("Using bos_token, but it is not set yet.")
return self._bos_token
@property
def eos_token(self):
""" End of sentence token (string). Log an error if used while not having been set. """
if self._eos_token is None:
logger.error("Using eos_token, but it is not set yet.")
return self._eos_token
@property
def unk_token(self):
""" Unknown token (string). Log an error if used while not having been set. """
if self._unk_token is None:
logger.error("Using unk_token, but it is not set yet.")
return self._unk_token
@property
def sep_token(self):
""" Separation token (string). E.g. separate context and query in an input sequence. Log an error if used while not having been set. """
if self._sep_token is None:
logger.error("Using sep_token, but it is not set yet.")
return self._sep_token
@property
def pad_token(self):
""" Padding token (string). Log an error if used while not having been set. """
if self._pad_token is None:
logger.error("Using pad_token, but it is not set yet.")
return self._pad_token
@property
def cls_token(self):
""" Classification token (string). E.g. to extract a summary of an input sequence leveraging self-attention along the full depth of the model. Log an error if used while not having been set. """
if self._cls_token is None:
logger.error("Using cls_token, but it is not set yet.")
return self._cls_token
@property
def mask_token(self):
""" Mask token (string). E.g. when training a model with masked-language modeling. Log an error if used while not having been set. """
if self._mask_token is None:
logger.error("Using mask_token, but it is not set yet.")
return self._mask_token
@property
def additional_special_tokens(self):
""" All the additional special tokens you may want to use (list of strings). Log an error if used while not having been set. """
if self._additional_special_tokens is None:
logger.error("Using additional_special_tokens, but it is not set yet.")
return self._additional_special_tokens
@bos_token.setter
def bos_token(self, value):
self._bos_token = value
@eos_token.setter
def eos_token(self, value):
self._eos_token = value
@unk_token.setter
def unk_token(self, value):
self._unk_token = value
@sep_token.setter
def sep_token(self, value):
self._sep_token = value
@pad_token.setter
def pad_token(self, value):
self._pad_token = value
@cls_token.setter
def cls_token(self, value):
self._cls_token = value
@mask_token.setter
def mask_token(self, value):
self._mask_token = value
@additional_special_tokens.setter
def additional_special_tokens(self, value):
self._additional_special_tokens = value
@property
def bos_token_id(self):
""" Id of the beginning of sentence token in the vocabulary. Log an error if used while not having been set. """
return self.convert_tokens_to_ids(self.bos_token)
@property
def eos_token_id(self):
""" Id of the end of sentence token in the vocabulary. Log an error if used while not having been set. """
return self.convert_tokens_to_ids(self.eos_token)
@property
def unk_token_id(self):
""" Id of the unknown token in the vocabulary. Log an error if used while not having been set. """
return self.convert_tokens_to_ids(self.unk_token)
@property
def sep_token_id(self):
""" Id of the separation token in the vocabulary. E.g. separate context and query in an input sequence. Log an error if used while not having been set. """
return self.convert_tokens_to_ids(self.sep_token)
@property
def pad_token_id(self):
""" Id of the padding token in the vocabulary. Log an error if used while not having been set. """
return self.convert_tokens_to_ids(self.pad_token)
@property
def pad_token_type_id(self):
""" Id of the padding token type in the vocabulary."""
return self._pad_token_type_id
@property
def cls_token_id(self):
""" Id of the classification token in the vocabulary. E.g. to extract a summary of an input sequence leveraging self-attention along the full depth of the model. Log an error if used while not having been set. """
return self.convert_tokens_to_ids(self.cls_token)
@property
def mask_token_id(self):
""" Id of the mask token in the vocabulary. E.g. when training a model with masked-language modeling. Log an error if used while not having been set. """
return self.convert_tokens_to_ids(self.mask_token)
@property
def additional_special_tokens_ids(self):
""" Ids of all the additional special tokens in the vocabulary (list of integers). Log an error if used while not having been set. """
return self.convert_tokens_to_ids(self.additional_special_tokens)
def get_vocab(self):
""" Returns the vocabulary as a dict of {token: index} pairs. `tokenizer.get_vocab()[token]` is equivalent to `tokenizer.convert_tokens_to_ids(token)` when `token` is in the vocab. """
raise NotImplementedError()
def __init__(self, max_len=None, **kwargs):
self._bos_token = None
self._eos_token = None
self._unk_token = None
self._sep_token = None
self._pad_token = None
self._cls_token = None
self._mask_token = None
self._pad_token_type_id = 0
self._additional_special_tokens = []
self.max_len = max_len if max_len is not None else int(1e12)
# Padding side is right by default and over-riden in subclasses. If specified in the kwargs, it is changed.
self.padding_side = kwargs.pop("padding_side", self.padding_side)
self.model_input_names = kwargs.pop("model_input_names", self.model_input_names)
# Added tokens
self.added_tokens_encoder = {}
self.unique_added_tokens_encoder = set()
self.added_tokens_decoder = {}
# inputs and kwargs for saving and re-loading (see ``from_pretrained`` and ``save_pretrained``)
self.init_inputs = ()
self.init_kwargs = {}
for key, value in kwargs.items():
if key in self.SPECIAL_TOKENS_ATTRIBUTES:
if key == "additional_special_tokens":
assert isinstance(value, (list, tuple)) and all(isinstance(t, str) for t in value)
else:
assert isinstance(value, str)
setattr(self, key, value)
@classmethod
def from_pretrained(cls, *inputs, **kwargs):
r"""
Instantiate a :class:`~transformers.PreTrainedTokenizer` (or a derived class) from a predefined tokenizer.
Args:
pretrained_model_name_or_path: either:
- a string with the `shortcut name` of a predefined tokenizer to load from cache or download, e.g.: ``bert-base-uncased``.
- a string with the `identifier name` of a predefined tokenizer that was user-uploaded to our S3, e.g.: ``dbmdz/bert-base-german-cased``.
- a path to a `directory` containing vocabulary files required by the tokenizer, for instance saved using the :func:`~transformers.PreTrainedTokenizer.save_pretrained` method, e.g.: ``./my_model_directory/``.
- (not applicable to all derived classes, deprecated) a path or url to a single saved vocabulary file if and only if the tokenizer only requires a single vocabulary file (e.g. Bert, XLNet), e.g.: ``./my_model_directory/vocab.txt``.
cache_dir: (`optional`) string:
Path to a directory in which a downloaded predefined tokenizer vocabulary files should be cached if the standard cache should not be used.
force_download: (`optional`) boolean, default False:
Force to (re-)download the vocabulary files and override the cached versions if they exists.
resume_download: (`optional`) boolean, default False:
Do not delete incompletely recieved file. Attempt to resume the download if such a file exists.
proxies: (`optional`) dict, default None:
A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.
The proxies are used on each request.
inputs: (`optional`) positional arguments: will be passed to the Tokenizer ``__init__`` method.
kwargs: (`optional`) keyword arguments: will be passed to the Tokenizer ``__init__`` method. Can be used to set special tokens like ``bos_token``, ``eos_token``, ``unk_token``, ``sep_token``, ``pad_token``, ``cls_token``, ``mask_token``, ``additional_special_tokens``. See parameters in the doc string of :class:`~transformers.PreTrainedTokenizer` for details.
Examples::
# We can't instantiate directly the base class `PreTrainedTokenizer` so let's show our examples on a derived class: BertTokenizer
# Download vocabulary from S3 and cache.
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
# Download vocabulary from S3 (user-uploaded) and cache.
tokenizer = BertTokenizer.from_pretrained('dbmdz/bert-base-german-cased')
# If vocabulary files are in a directory (e.g. tokenizer was saved using `save_pretrained('./test/saved_model/')`)
tokenizer = BertTokenizer.from_pretrained('./test/saved_model/')
# If the tokenizer uses a single vocabulary file, you can point directly to this file
tokenizer = BertTokenizer.from_pretrained('./test/saved_model/my_vocab.txt')
# You can link tokens to special vocabulary when instantiating
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', unk_token='<unk>')
# You should be sure '<unk>' is in the vocabulary when doing that.
# Otherwise use tokenizer.add_special_tokens({'unk_token': '<unk>'}) instead)
assert tokenizer.unk_token == '<unk>'
"""
return cls._from_pretrained(*inputs, **kwargs)
@classmethod
def _from_pretrained(cls, pretrained_model_name_or_path, *init_inputs, **kwargs):
cache_dir = kwargs.pop("cache_dir", None)
force_download = kwargs.pop("force_download", False)
resume_download = kwargs.pop("resume_download", False)
proxies = kwargs.pop("proxies", None)
local_files_only = kwargs.pop("local_files_only", False)
s3_models = list(cls.max_model_input_sizes.keys())
vocab_files = {}
init_configuration = {}
if pretrained_model_name_or_path in s3_models:
# Get the vocabulary from AWS S3 bucket
for file_id, map_list in cls.pretrained_vocab_files_map.items():
vocab_files[file_id] = map_list[pretrained_model_name_or_path]
if (
cls.pretrained_init_configuration
and pretrained_model_name_or_path in cls.pretrained_init_configuration
):
init_configuration = cls.pretrained_init_configuration[pretrained_model_name_or_path].copy()
else:
# Get the vocabulary from local files
logger.info(
"Model name '{}' not found in model shortcut name list ({}). "
"Assuming '{}' is a path, a model identifier, or url to a directory containing tokenizer files.".format(
pretrained_model_name_or_path, ", ".join(s3_models), pretrained_model_name_or_path
)
)
if os.path.isfile(pretrained_model_name_or_path) or is_remote_url(pretrained_model_name_or_path):
if len(cls.vocab_files_names) > 1:
raise ValueError(
"Calling {}.from_pretrained() with the path to a single file or url is not supported."
"Use a model identifier or the path to a directory instead.".format(cls.__name__)
)
logger.warning(
"Calling {}.from_pretrained() with the path to a single file or url is deprecated".format(
cls.__name__
)
)
file_id = list(cls.vocab_files_names.keys())[0]
vocab_files[file_id] = pretrained_model_name_or_path
else:
# At this point pretrained_model_name_or_path is either a directory or a model identifier name
additional_files_names = {
"added_tokens_file": ADDED_TOKENS_FILE,
"special_tokens_map_file": SPECIAL_TOKENS_MAP_FILE,
"tokenizer_config_file": TOKENIZER_CONFIG_FILE,
}
# Look for the tokenizer main vocabulary files + the additional tokens files
for file_id, file_name in {**cls.vocab_files_names, **additional_files_names}.items():
if os.path.isdir(pretrained_model_name_or_path):
full_file_name = os.path.join(pretrained_model_name_or_path, file_name)
if not os.path.exists(full_file_name):
logger.info("Didn't find file {}. We won't load it.".format(full_file_name))
full_file_name = None
else:
full_file_name = hf_bucket_url(pretrained_model_name_or_path, postfix=file_name)
vocab_files[file_id] = full_file_name
# Get files from url, cache, or disk depending on the case
try:
resolved_vocab_files = {}
for file_id, file_path in vocab_files.items():
if file_path is None:
resolved_vocab_files[file_id] = None
else:
resolved_vocab_files[file_id] = cached_path(
file_path,
cache_dir=cache_dir,
force_download=force_download,
proxies=proxies,
resume_download=resume_download,
local_files_only=local_files_only,
)
except EnvironmentError:
if pretrained_model_name_or_path in s3_models:
msg = "Couldn't reach server at '{}' to download vocabulary files."
else:
msg = (
"Model name '{}' was not found in tokenizers model name list ({}). "
"We assumed '{}' was a path or url to a directory containing vocabulary files "
"named {}, but couldn't find such vocabulary files at this path or url.".format(
pretrained_model_name_or_path,
", ".join(s3_models),
pretrained_model_name_or_path,
list(cls.vocab_files_names.values()),
)
)
raise EnvironmentError(msg)
if all(full_file_name is None for full_file_name in resolved_vocab_files.values()):
raise EnvironmentError(
"Model name '{}' was not found in tokenizers model name list ({}). "
"We assumed '{}' was a path, a model identifier, or url to a directory containing vocabulary files "
"named {} but couldn't find such vocabulary files at this path or url.".format(
pretrained_model_name_or_path,
", ".join(s3_models),
pretrained_model_name_or_path,
list(cls.vocab_files_names.values()),
)
)
for file_id, file_path in vocab_files.items():
if file_path == resolved_vocab_files[file_id]:
logger.info("loading file {}".format(file_path))
else:
logger.info("loading file {} from cache at {}".format(file_path, resolved_vocab_files[file_id]))
# Prepare tokenizer initialization kwargs
# Did we saved some inputs and kwargs to reload ?
tokenizer_config_file = resolved_vocab_files.pop("tokenizer_config_file", None)
if tokenizer_config_file is not None:
with open(tokenizer_config_file, encoding="utf-8") as tokenizer_config_handle:
init_kwargs = json.load(tokenizer_config_handle)
saved_init_inputs = init_kwargs.pop("init_inputs", ())
if not init_inputs:
init_inputs = saved_init_inputs
else:
init_kwargs = init_configuration
# Update with newly provided kwargs
init_kwargs.update(kwargs)
# Set max length if needed
if pretrained_model_name_or_path in cls.max_model_input_sizes:
# if we're using a pretrained model, ensure the tokenizer
# wont index sequences longer than the number of positional embeddings
max_len = cls.max_model_input_sizes[pretrained_model_name_or_path]
if max_len is not None and isinstance(max_len, (int, float)):
init_kwargs["max_len"] = min(init_kwargs.get("max_len", int(1e12)), max_len)
# Merge resolved_vocab_files arguments in init_kwargs.
added_tokens_file = resolved_vocab_files.pop("added_tokens_file", None)
special_tokens_map_file = resolved_vocab_files.pop("special_tokens_map_file", None)
for args_name, file_path in resolved_vocab_files.items():
if args_name not in init_kwargs:
init_kwargs[args_name] = file_path
if special_tokens_map_file is not None:
with open(special_tokens_map_file, encoding="utf-8") as special_tokens_map_handle:
special_tokens_map = json.load(special_tokens_map_handle)
for key, value in special_tokens_map.items():
if key not in init_kwargs:
init_kwargs[key] = value
# Instantiate tokenizer.
try:
tokenizer = cls(*init_inputs, **init_kwargs)
except OSError:
raise OSError(
"Unable to load vocabulary from file. "
"Please check that the provided vocabulary is accessible and not corrupted."
)
# Save inputs and kwargs for saving and re-loading with ``save_pretrained``
tokenizer.init_inputs = init_inputs
tokenizer.init_kwargs = init_kwargs
# update unique_added_tokens_encoder with special tokens for correct tokenization
tokenizer.unique_added_tokens_encoder.update(set(tokenizer.all_special_tokens))
# Add supplementary tokens.
if added_tokens_file is not None:
with open(added_tokens_file, encoding="utf-8") as added_tokens_handle:
added_tok_encoder = json.load(added_tokens_handle)
added_tok_decoder = {v: k for k, v in added_tok_encoder.items()}
tokenizer.added_tokens_encoder.update(added_tok_encoder)
tokenizer.added_tokens_decoder.update(added_tok_decoder)
tokenizer.unique_added_tokens_encoder.update(set(tokenizer.added_tokens_encoder.keys()))
return tokenizer
def save_pretrained(self, save_directory):
""" Save the tokenizer vocabulary files together with:
- added tokens,
- special-tokens-to-class-attributes-mapping,
- tokenizer instantiation positional and keywords inputs (e.g. do_lower_case for Bert).
This won't save modifications other than (added tokens and special token mapping) you may have
applied to the tokenizer after the instantiation (e.g. modifying tokenizer.do_lower_case after creation).
This method make sure the full tokenizer can then be re-loaded using the :func:`~transformers.PreTrainedTokenizer.from_pretrained` class method.
"""
if not os.path.isdir(save_directory):
logger.error("Saving directory ({}) should be a directory".format(save_directory))
return
special_tokens_map_file = os.path.join(save_directory, SPECIAL_TOKENS_MAP_FILE)
added_tokens_file = os.path.join(save_directory, ADDED_TOKENS_FILE)
tokenizer_config_file = os.path.join(save_directory, TOKENIZER_CONFIG_FILE)
tokenizer_config = copy.deepcopy(self.init_kwargs)
if len(self.init_inputs) > 0:
tokenizer_config["init_inputs"] = copy.deepcopy(self.init_inputs)
for file_id in self.vocab_files_names.keys():
tokenizer_config.pop(file_id, None)
with open(tokenizer_config_file, "w", encoding="utf-8") as f:
f.write(json.dumps(tokenizer_config, ensure_ascii=False))
with open(special_tokens_map_file, "w", encoding="utf-8") as f:
f.write(json.dumps(self.special_tokens_map, ensure_ascii=False))
if len(self.added_tokens_encoder) > 0:
with open(added_tokens_file, "w", encoding="utf-8") as f:
out_str = json.dumps(self.added_tokens_encoder, ensure_ascii=False)
f.write(out_str)
vocab_files = self.save_vocabulary(save_directory)
return vocab_files + (special_tokens_map_file, added_tokens_file)
def save_vocabulary(self, save_directory):
""" Save the tokenizer vocabulary to a directory. This method does *NOT* save added tokens
and special token mappings.
Please use :func:`~transformers.PreTrainedTokenizer.save_pretrained` `()` to save the full Tokenizer state if you want to reload it using the :func:`~transformers.PreTrainedTokenizer.from_pretrained` class method.
"""
raise NotImplementedError
def vocab_size(self):
""" Size of the base vocabulary (without the added tokens) """
raise NotImplementedError
def __len__(self):
""" Size of the full vocabulary with the added tokens """
return self.vocab_size + len(self.added_tokens_encoder)
def add_tokens(self, new_tokens):
"""
Add a list of new tokens to the tokenizer class. If the new tokens are not in the
vocabulary, they are added to it with indices starting from length of the current vocabulary.
Args:
new_tokens: string or list of string. Each string is a token to add. Tokens are only added if they are not already in the vocabulary (tested by checking if the tokenizer assign the index of the ``unk_token`` to them).
Returns:
Number of tokens added to the vocabulary.
Examples::
# Let's see how to increase the vocabulary of Bert model and tokenizer
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertModel.from_pretrained('bert-base-uncased')
num_added_toks = tokenizer.add_tokens(['new_tok1', 'my_new-tok2'])
print('We have added', num_added_toks, 'tokens')
model.resize_token_embeddings(len(tokenizer)) # Notice: resize_token_embeddings expect to receive the full size of the new vocabulary, i.e. the length of the tokenizer.
"""
if not new_tokens:
return 0
if not isinstance(new_tokens, list):
new_tokens = [new_tokens]
to_add_tokens = []
for token in new_tokens:
assert isinstance(token, str)
if self.init_kwargs.get("do_lower_case", False) and token not in self.all_special_tokens:
token = token.lower()
if (
token != self.unk_token
and self.convert_tokens_to_ids(token) == self.convert_tokens_to_ids(self.unk_token)
and token not in to_add_tokens
):
to_add_tokens.append(token)
logger.info("Adding %s to the vocabulary", token)
added_tok_encoder = dict((tok, len(self) + i) for i, tok in enumerate(to_add_tokens))
added_tok_decoder = {v: k for k, v in added_tok_encoder.items()}
self.added_tokens_encoder.update(added_tok_encoder)
self.unique_added_tokens_encoder = set(self.added_tokens_encoder.keys()).union(set(self.all_special_tokens))
self.added_tokens_decoder.update(added_tok_decoder)
return len(to_add_tokens)
def num_added_tokens(self, pair=False):
"""
Returns the number of added tokens when encoding a sequence with special tokens.
Note:
This encodes inputs and checks the number of added tokens, and is therefore not efficient. Do not put this
inside your training loop.
Args:
pair: Returns the number of added tokens in the case of a sequence pair if set to True, returns the
number of added tokens in the case of a single sequence if set to False.
Returns:
Number of tokens added to sequences
"""
token_ids_0 = []
token_ids_1 = []
return len(self.build_inputs_with_special_tokens(token_ids_0, token_ids_1 if pair else None))
def add_special_tokens(self, special_tokens_dict):
"""
Add a dictionary of special tokens (eos, pad, cls...) to the encoder and link them
to class attributes. If special tokens are NOT in the vocabulary, they are added
to it (indexed starting from the last index of the current vocabulary).
Using `add_special_tokens` will ensure your special tokens can be used in several ways:
- special tokens are carefully handled by the tokenizer (they are never split)
- you can easily refer to special tokens using tokenizer class attributes like `tokenizer.cls_token`. This makes it easy to develop model-agnostic training and fine-tuning scripts.
When possible, special tokens are already registered for provided pretrained models (ex: BertTokenizer cls_token is already registered to be '[CLS]' and XLM's one is also registered to be '</s>')
Args:
special_tokens_dict: dict of string. Keys should be in the list of predefined special attributes:
[``bos_token``, ``eos_token``, ``unk_token``, ``sep_token``, ``pad_token``, ``cls_token``, ``mask_token``,
``additional_special_tokens``].
Tokens are only added if they are not already in the vocabulary (tested by checking if the tokenizer assign the index of the ``unk_token`` to them).
Returns:
Number of tokens added to the vocabulary.
Examples::
# Let's see how to add a new classification token to GPT-2
tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
model = GPT2Model.from_pretrained('gpt2')
special_tokens_dict = {'cls_token': '<CLS>'}
num_added_toks = tokenizer.add_special_tokens(special_tokens_dict)
print('We have added', num_added_toks, 'tokens')
model.resize_token_embeddings(len(tokenizer)) # Notice: resize_token_embeddings expect to receive the full size of the new vocabulary, i.e. the length of the tokenizer.
assert tokenizer.cls_token == '<CLS>'
"""
if not special_tokens_dict:
return 0
added_tokens = 0
for key, value in special_tokens_dict.items():
assert key in self.SPECIAL_TOKENS_ATTRIBUTES
if key == "additional_special_tokens":
assert isinstance(value, (list, tuple)) and all(isinstance(t, str) for t in value)
added_tokens += self.add_tokens(value)
else:
assert isinstance(value, str)
added_tokens += self.add_tokens([value])
logger.info("Assigning %s to the %s key of the tokenizer", value, key)
setattr(self, key, value)
return added_tokens
def tokenize(self, text, **kwargs):
""" Converts a string in a sequence of tokens (string), using the tokenizer.
Split in words for word-based vocabulary or sub-words for sub-word-based
vocabularies (BPE/SentencePieces/WordPieces).
Take care of added tokens.
text: The sequence to be encoded.
add_prefix_space: Only applies to GPT-2 and RoBERTa tokenizers. When `True`, this ensures that the sequence
begins with an empty space. False by default except for when using RoBERTa with `add_special_tokens=True`.
**kwargs: passed to the `prepare_for_tokenization` preprocessing method.
"""
all_special_tokens = self.all_special_tokens
text = self.prepare_for_tokenization(text, **kwargs)
def lowercase_text(t):
# convert non-special tokens to lowercase
escaped_special_toks = [re.escape(s_tok) for s_tok in all_special_tokens]
pattern = r"(" + r"|".join(escaped_special_toks) + r")|" + r"(.+?)"
return re.sub(pattern, lambda m: m.groups()[0] or m.groups()[1].lower(), t)
if self.init_kwargs.get("do_lower_case", False):
text = lowercase_text(text)
def split_on_token(tok, text):
result = []
split_text = text.split(tok)
for i, sub_text in enumerate(split_text):
sub_text = sub_text.rstrip()
if i == 0 and not sub_text:
result += [tok]
elif i == len(split_text) - 1:
if sub_text:
result += [sub_text]
else:
pass
else:
if sub_text:
result += [sub_text]
result += [tok]
return result
def split_on_tokens(tok_list, text):
if not text.strip():
return []
if not tok_list:
return self._tokenize(text)
tokenized_text = []
text_list = [text]
for tok in tok_list:
tokenized_text = []
for sub_text in text_list:
if sub_text not in self.unique_added_tokens_encoder:
tokenized_text += split_on_token(tok, sub_text)
else:
tokenized_text += [sub_text]
text_list = tokenized_text
return list(
itertools.chain.from_iterable(
(
self._tokenize(token) if token not in self.unique_added_tokens_encoder else [token]
for token in tokenized_text
)
)
)
added_tokens = self.unique_added_tokens_encoder
tokenized_text = split_on_tokens(added_tokens, text)
return tokenized_text
def _tokenize(self, text, **kwargs):
""" Converts a string in a sequence of tokens (string), using the tokenizer.
Split in words for word-based vocabulary or sub-words for sub-word-based
vocabularies (BPE/SentencePieces/WordPieces).
Do NOT take care of added tokens.
"""
raise NotImplementedError
def convert_tokens_to_ids(self, tokens):
""" Converts a single token, or a sequence of tokens, (str) in a single integer id
(resp. a sequence of ids), using the vocabulary.
"""
if tokens is None:
return None
if isinstance(tokens, str):
return self._convert_token_to_id_with_added_voc(tokens)
ids = []
for token in tokens:
ids.append(self._convert_token_to_id_with_added_voc(token))
return ids
def _convert_token_to_id_with_added_voc(self, token):
if token is None:
return None
if token in self.added_tokens_encoder:
return self.added_tokens_encoder[token]
return self._convert_token_to_id(token)
def _convert_token_to_id(self, token):
raise NotImplementedError
def encode(
self,
text: str,
text_pair: Optional[str] = None,
add_special_tokens: bool = True,
max_length: Optional[int] = None,
stride: int = 0,
padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
#padding=padding_strategy.value,
truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
#truncation_strategy: str = "longest_first",
pad_to_max_length: bool = False,
return_tensors: Optional[str] = None,
**kwargs
):
"""
Converts a string in a sequence of ids (integer), using the tokenizer and vocabulary.
Same as doing ``self.convert_tokens_to_ids(self.tokenize(text))``.
Args:
text (:obj:`str` or :obj:`List[str]`):
The first sequence to be encoded. This can be a string, a list of strings (tokenized string using
the `tokenize` method) or a list of integers (tokenized string ids using the `convert_tokens_to_ids`
method)
text_pair (:obj:`str` or :obj:`List[str]`, `optional`, defaults to :obj:`None`):
Optional second sequence to be encoded. This can be a string, a list of strings (tokenized
string using the `tokenize` method) or a list of integers (tokenized string ids using the
`convert_tokens_to_ids` method)
add_special_tokens (:obj:`bool`, `optional`, defaults to :obj:`True`):
If set to ``True``, the sequences will be encoded with the special tokens relative
to their model.
max_length (:obj:`int`, `optional`, defaults to :obj:`None`):
If set to a number, will limit the total sequence returned so that it has a maximum length.
If there are overflowing tokens, those will be added to the returned dictionary
stride (:obj:`int`, `optional`, defaults to ``0``):
If set to a number along with max_length, the overflowing tokens returned will contain some tokens
from the main sequence returned. The value of this argument defines the number of additional tokens.
truncation_strategy (:obj:`str`, `optional`, defaults to `longest_first`):
String selected in the following options:
- 'longest_first' (default) Iteratively reduce the inputs sequence until the input is under max_length
starting from the longest one at each token (when there is a pair of input sequences)
- 'only_first': Only truncate the first sequence
- 'only_second': Only truncate the second sequence
- 'do_not_truncate': Does not truncate (raise an error if the input sequence is longer than max_length)
pad_to_max_length (:obj:`bool`, `optional`, defaults to :obj:`False`):
If set to True, the returned sequences will be padded according to the model's padding side and
padding index, up to their max length. If no max length is specified, the padding is done up to the
model's max length. The tokenizer padding sides are handled by the class attribute `padding_side`
which can be set to the following strings:
- 'left': pads on the left of the sequences
- 'right': pads on the right of the sequences
Defaults to False: no padding.
return_tensors (:obj:`str`, `optional`, defaults to :obj:`None`):
Can be set to 'tf' or 'pt' to return respectively TensorFlow :obj:`tf.constant`
or PyTorch :obj:`torch.Tensor` instead of a list of python integers.
**kwargs: passed to the `self.tokenize()` method
"""
encoded_inputs = self.encode_plus(
text,
text_pair=text_pair,
max_length=max_length,
add_special_tokens=add_special_tokens,
stride=stride,
padding=padding_strategy.value,
truncation=truncation_strategy.value,
#truncation_strategy=truncation_strategy,
pad_to_max_length=pad_to_max_length,
return_tensors=return_tensors,
**kwargs,
)
return encoded_inputs["input_ids"]
def encode_plus(
self,
text: str,
text_pair: Optional[str] = None,
add_special_tokens: bool = True,
max_length: Optional[int] = None,
stride: int = 0,
padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,#: str = "longest_first",
pad_to_max_length: bool = False,
return_tensors: Optional[str] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
**kwargs
):
"""
Returns a dictionary containing the encoded sequence or sequence pair and additional information:
the mask for sequence classification and the overflowing elements if a ``max_length`` is specified.
Args:
text (:obj:`str` or :obj:`List[str]`):
The first sequence to be encoded. This can be a string, a list of strings (tokenized string using
the `tokenize` method) or a list of integers (tokenized string ids using the `convert_tokens_to_ids`
method)
text_pair (:obj:`str` or :obj:`List[str]`, `optional`, defaults to :obj:`None`):
Optional second sequence to be encoded. This can be a string, a list of strings (tokenized
string using the `tokenize` method) or a list of integers (tokenized string ids using the
`convert_tokens_to_ids` method)
add_special_tokens (:obj:`bool`, `optional`, defaults to :obj:`True`):
If set to ``True``, the sequences will be encoded with the special tokens relative
to their model.
max_length (:obj:`int`, `optional`, defaults to :obj:`None`):
If set to a number, will limit the total sequence returned so that it has a maximum length.
If there are overflowing tokens, those will be added to the returned dictionary
stride (:obj:`int`, `optional`, defaults to ``0``):
If set to a number along with max_length, the overflowing tokens returned will contain some tokens
from the main sequence returned. The value of this argument defines the number of additional tokens.
truncation_strategy (:obj:`str`, `optional`, defaults to `longest_first`):
String selected in the following options:
- 'longest_first' (default) Iteratively reduce the inputs sequence until the input is under max_length
starting from the longest one at each token (when there is a pair of input sequences)
- 'only_first': Only truncate the first sequence
- 'only_second': Only truncate the second sequence
- 'do_not_truncate': Does not truncate (raise an error if the input sequence is longer than max_length)
pad_to_max_length (:obj:`bool`, `optional`, defaults to :obj:`False`):
If set to True, the returned sequences will be padded according to the model's padding side and
padding index, up to their max length. If no max length is specified, the padding is done up to the
model's max length. The tokenizer padding sides are handled by the class attribute `padding_side`
which can be set to the following strings:
- 'left': pads on the left of the sequences
- 'right': pads on the right of the sequences
Defaults to False: no padding.
return_tensors (:obj:`str`, `optional`, defaults to :obj:`None`):
Can be set to 'tf' or 'pt' to return respectively TensorFlow :obj:`tf.constant`
or PyTorch :obj:`torch.Tensor` instead of a list of python integers.
return_token_type_ids (:obj:`bool`, `optional`, defaults to :obj:`None`):
Whether to return token type IDs. If left to the default, will return the token type IDs according
to the specific tokenizer's default, defined by the :obj:`return_outputs` attribute.
`What are token type IDs? <../glossary.html#token-type-ids>`_
return_attention_mask (:obj:`bool`, `optional`, defaults to :obj:`none`):
Whether to return the attention mask. If left to the default, will return the attention mask according
to the specific tokenizer's default, defined by the :obj:`return_outputs` attribute.
`What are attention masks? <../glossary.html#attention-mask>`__
return_overflowing_tokens (:obj:`bool`, `optional`, defaults to :obj:`False`):
Set to True to return overflowing token information (default False).
return_special_tokens_mask (:obj:`bool`, `optional`, defaults to :obj:`False`):
Set to True to return special tokens mask information (default False).
return_offsets_mapping (:obj:`bool`, `optional`, defaults to :obj:`False`):
Set to True to return (char_start, char_end) for each token (default False).
If using Python's tokenizer, this method will raise NotImplementedError. This one is only available on
Rust-based tokenizers inheriting from PreTrainedTokenizerFast.
**kwargs: passed to the `self.tokenize()` method
Return:
A Dictionary of shape::
{
input_ids: list[int],
token_type_ids: list[int] if return_token_type_ids is True (default)
attention_mask: list[int] if return_attention_mask is True (default)
overflowing_tokens: list[int] if a ``max_length`` is specified and return_overflowing_tokens is True
num_truncated_tokens: int if a ``max_length`` is specified and return_overflowing_tokens is True
special_tokens_mask: list[int] if ``add_special_tokens`` if set to ``True`` and return_special_tokens_mask is True
}
With the fields:
- ``input_ids``: list of token ids to be fed to a model
- ``token_type_ids``: list of token type ids to be fed to a model
- ``attention_mask``: list of indices specifying which tokens should be attended to by the model
- ``overflowing_tokens``: list of overflowing tokens if a max length is specified.
- ``num_truncated_tokens``: number of overflowing tokens a ``max_length`` is specified
- ``special_tokens_mask``: if adding special tokens, this is a list of [0, 1], with 0 specifying special added
tokens and 1 specifying sequence tokens.
"""
def get_input_ids(text):
if isinstance(text, str):
tokens = self.tokenize(text, add_special_tokens=add_special_tokens, **kwargs)
return self.convert_tokens_to_ids(tokens)
elif isinstance(text, (list, tuple)) and len(text) > 0 and isinstance(text[0], str):
return self.convert_tokens_to_ids(text)
elif isinstance(text, (list, tuple)) and len(text) > 0 and isinstance(text[0], int):
return text
else:
raise ValueError(
"Input is not valid. Should be a string, a list/tuple of strings or a list/tuple of integers."
)
if return_offsets_mapping:
raise NotImplementedError(
"return_offset_mapping is not available when using Python tokenizers."
"To use this feature, change your tokenizer to one deriving from "
"transformers.PreTrainedTokenizerFast."
"More information on available tokenizers at "
"https://github.com/huggingface/transformers/pull/2674"
)
# Throw an error if we can pad because there is no padding token
if pad_to_max_length and self.pad_token_id is None:
raise ValueError(
"Unable to set proper padding strategy as the tokenizer does not have a padding token. In this case please set the `pad_token` `(tokenizer.pad_token = tokenizer.eos_token e.g.)` or add a new pad token via the function add_special_tokens if you want to use a padding strategy"
)
first_ids = get_input_ids(text)
second_ids = get_input_ids(text_pair) if text_pair is not None else None
return self.prepare_for_model(
first_ids,
pair_ids=second_ids,
max_length=max_length,
pad_to_max_length=pad_to_max_length,
add_special_tokens=add_special_tokens,
stride=stride,
padding=padding_strategy.value,
truncation=truncation_strategy.value,
#truncation_strategy=truncation_strategy,
return_tensors=return_tensors,
return_attention_mask=return_attention_mask,
return_token_type_ids=return_token_type_ids,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
)
def batch_encode_plus(
self,
batch_text_or_text_pairs: Union[str, List[str]],
add_special_tokens: bool = True,
max_length: Optional[int] = None,
stride: int = 0,
padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,#str = "longest_first",
pad_to_max_length: bool = False,
return_tensors: Optional[str] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_masks: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_masks: bool = False,
return_offsets_mapping: bool = False,
return_input_lengths: bool = False,
**kwargs
):
"""
Returns a dictionary containing the encoded sequence or sequence pair and additional information:
the mask for sequence classification and the overflowing elements if a ``max_length`` is specified.
Args:
batch_text_or_text_pairs (:obj:`List[str]` or :obj:`List[List[str]]`):
Batch of sequences or pair of sequences to be encoded.
This can be a list of string/string-sequences/int-sequences or a list of pair of
string/string-sequences/int-sequence (see details in encode_plus)
add_special_tokens (:obj:`bool`, `optional`, defaults to :obj:`True`):
If set to ``True``, the sequences will be encoded with the special tokens relative
to their model.
max_length (:obj:`int`, `optional`, defaults to :obj:`None`):
If set to a number, will limit the total sequence returned so that it has a maximum length.
If there are overflowing tokens, those will be added to the returned dictionary
stride (:obj:`int`, `optional`, defaults to ``0``):
If set to a number along with max_length, the overflowing tokens returned will contain some tokens
from the main sequence returned. The value of this argument defines the number of additional tokens.
truncation_strategy (:obj:`str`, `optional`, defaults to `longest_first`):
String selected in the following options:
- 'longest_first' (default) Iteratively reduce the inputs sequence until the input is under max_length
starting from the longest one at each token (when there is a pair of input sequences)
- 'only_first': Only truncate the first sequence
- 'only_second': Only truncate the second sequence
- 'do_not_truncate': Does not truncate (raise an error if the input sequence is longer than max_length)
pad_to_max_length (:obj:`bool`, `optional`, defaults to :obj:`False`):
If set to True, the returned sequences will be padded according to the model's padding side and
padding index, up to their max length. If no max length is specified, the padding is done up to the
model's max length. The tokenizer padding sides are handled by the class attribute `padding_side`
which can be set to the following strings:
- 'left': pads on the left of the sequences
- 'right': pads on the right of the sequences
Defaults to False: no padding.
return_tensors (:obj:`str`, `optional`, defaults to :obj:`None`):
Can be set to 'tf' or 'pt' to return respectively TensorFlow :obj:`tf.constant`
or PyTorch :obj:`torch.Tensor` instead of a list of python integers.
return_token_type_ids (:obj:`bool`, `optional`, defaults to :obj:`None`):
Whether to return token type IDs. If left to the default, will return the token type IDs according
to the specific tokenizer's default, defined by the :obj:`return_outputs` attribute.
`What are token type IDs? <../glossary.html#token-type-ids>`_
return_attention_masks (:obj:`bool`, `optional`, defaults to :obj:`none`):
Whether to return the attention mask. If left to the default, will return the attention mask according
to the specific tokenizer's default, defined by the :obj:`return_outputs` attribute.
`What are attention masks? <../glossary.html#attention-mask>`__
return_overflowing_tokens (:obj:`bool`, `optional`, defaults to :obj:`False`):
Set to True to return overflowing token information (default False).
return_special_tokens_masks (:obj:`bool`, `optional`, defaults to :obj:`False`):
Set to True to return special tokens mask information (default False).
return_offsets_mapping (:obj:`bool`, `optional`, defaults to :obj:`False`):
Set to True to return (char_start, char_end) for each token (default False).
If using Python's tokenizer, this method will raise NotImplementedError. This one is only available on
Rust-based tokenizers inheriting from PreTrainedTokenizerFast.
return_input_lengths (:obj:`bool`, `optional`, defaults to :obj:`False`):
If set the resulting dictionary will include the length of each sample
**kwargs: passed to the `self.tokenize()` method
Return:
A Dictionary of shape::
{
input_ids: list[List[int]],
token_type_ids: list[List[int]] if return_token_type_ids is True (default)
attention_mask: list[List[int]] if return_attention_mask is True (default)
overflowing_tokens: list[List[int]] if a ``max_length`` is specified and return_overflowing_tokens is True
num_truncated_tokens: List[int] if a ``max_length`` is specified and return_overflowing_tokens is True
special_tokens_mask: list[List[int]] if ``add_special_tokens`` if set to ``True`` and return_special_tokens_mask is True
}
With the fields:
- ``input_ids``: list of token ids to be fed to a model
- ``token_type_ids``: list of token type ids to be fed to a model
- ``attention_mask``: list of indices specifying which tokens should be attended to by the model
- ``overflowing_tokens``: list of overflowing tokens if a max length is specified.
- ``num_truncated_tokens``: number of overflowing tokens a ``max_length`` is specified
- ``special_tokens_mask``: if adding special tokens, this is a list of [0, 1], with 0 specifying special added
tokens and 1 specifying sequence tokens.
"""
def get_input_ids(text):
if isinstance(text, str):
tokens = self.tokenize(text, add_special_tokens=add_special_tokens, **kwargs)
return self.convert_tokens_to_ids(tokens)
elif isinstance(text, (list, tuple)) and len(text) > 0 and isinstance(text[0], str):
return self.convert_tokens_to_ids(text)
elif isinstance(text, (list, tuple)) and len(text) > 0 and isinstance(text[0], int):
return text
else:
raise ValueError(
"Input is not valid. Should be a string, a list/tuple of strings or a list/tuple of integers."
)
# Throw an error if we can pad because there is no padding token
if pad_to_max_length and self.pad_token_id is None:
raise ValueError(
"Unable to set proper padding strategy as the tokenizer does not have a padding token. In this case please set the `pad_token` `(tokenizer.pad_token = tokenizer.eos_token e.g.)` or add a new pad token via the function add_special_tokens if you want to use a padding strategy"
)
if return_offsets_mapping:
raise NotImplementedError(
"return_offset_mapping is not available when using Python tokenizers."
"To use this feature, change your tokenizer to one deriving from "
"transformers.PreTrainedTokenizerFast."
"More information on available tokenizers at "
"https://github.com/huggingface/transformers/pull/2674"
)
input_ids = []
for ids_or_pair_ids in batch_text_or_text_pairs:
if isinstance(ids_or_pair_ids, (list, tuple)) and len(ids_or_pair_ids) == 2:
ids, pair_ids = ids_or_pair_ids
else:
ids, pair_ids = ids_or_pair_ids, None
first_ids = get_input_ids(ids)
second_ids = get_input_ids(pair_ids) if pair_ids is not None else None
input_ids.append((first_ids, second_ids))
if max_length is None and pad_to_max_length:
def total_sequence_length(input_pairs):
first_ids, second_ids = input_pairs
return len(first_ids) + (
self.num_added_tokens()
if second_ids is None
else (len(second_ids) + self.num_added_tokens(pair=True))
)
max_length = max([total_sequence_length(ids) for ids in input_ids])
batch_outputs = {}
for first_ids, second_ids in input_ids:
# Prepares a sequence of input id, or a pair of sequences of inputs ids so that it can be used by
# the model. It adds special tokens, truncates sequences if overflowing while taking into account
# the special tokens and manages a window stride for overflowing tokens
outputs = self.prepare_for_model(
first_ids,
pair_ids=second_ids,
max_length=max_length,
pad_to_max_length=pad_to_max_length,
add_special_tokens=add_special_tokens,
stride=stride,
padding=padding_strategy.value,
truncation=truncation_strategy.value,
#truncation_strategy=truncation_strategy,
return_attention_mask=return_attention_masks,
return_token_type_ids=return_token_type_ids,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_masks,
)
# Append the non-padded length to the output
if return_input_lengths:
outputs["input_len"] = len(outputs["input_ids"])
for key, value in outputs.items():
if key not in batch_outputs:
batch_outputs[key] = []
batch_outputs[key].append(value)
if return_tensors is not None:
# Do the tensor conversion in batch
for key, value in batch_outputs.items():
if return_tensors == "tf" and is_tf_available():
try:
batch_outputs[key] = tf.constant(value)
except ValueError:
if None in [item for sequence in value for item in sequence]:
raise ValueError(self.NO_PAD_TOKEN_FOR_BATCH_MSG)
else:
raise ValueError(self.UNEVEN_SEQUENCES_FOR_BATCH_MSG)
elif return_tensors == "pt" and is_torch_available():
try:
batch_outputs[key] = torch.tensor(value)
except ValueError:
raise ValueError(self.UNEVEN_SEQUENCES_FOR_BATCH_MSG)
except RuntimeError:
if None in [item for sequence in value for item in sequence]:
raise ValueError(self.NO_PAD_TOKEN_FOR_BATCH_MSG)
else:
raise
elif return_tensors is not None:
logger.warning(
"Unable to convert output to tensors format {}, PyTorch or TensorFlow is not available.".format(
return_tensors
)
)
return batch_outputs
def prepare_for_model(
self,
ids: List[int],
pair_ids: Optional[List[int]] = None,
max_length: Optional[int] = None,
add_special_tokens: bool = True,
stride: int = 0,
padding=padding_strategy.value,
truncation=truncation_strategy.value,
prepend_batch_axis=True,
#truncation_strategy: str = "longest_first",
pad_to_max_length: bool = False,
return_tensors: Optional[str] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
):
"""
Prepares a sequence of input id, or a pair of sequences of inputs ids so that it can be used by the model.
It adds special tokens, truncates
sequences if overflowing while taking into account the special tokens and manages a window stride for
overflowing tokens
Args:
ids: list of tokenized input ids. Can be obtained from a string by chaining the
`tokenize` and `convert_tokens_to_ids` methods.
pair_ids: Optional second list of input ids. Can be obtained from a string by chaining the
`tokenize` and `convert_tokens_to_ids` methods.
max_length: maximum length of the returned list. Will truncate by taking into account the special tokens.
add_special_tokens: if set to ``True``, the sequences will be encoded with the special tokens relative
to their model.
stride: window stride for overflowing tokens. Can be useful for edge effect removal when using sequential
list of inputs.
truncation_strategy: string selected in the following options:
- 'longest_first' (default) Iteratively reduce the inputs sequence until the input is under max_length
starting from the longest one at each token (when there is a pair of input sequences)
- 'only_first': Only truncate the first sequence
- 'only_second': Only truncate the second sequence
- 'do_not_truncate': Does not truncate (raise an error if the input sequence is longer than max_length)
pad_to_max_length: if set to True, the returned sequences will be padded according to the model's padding side and
padding index, up to their max length. If no max length is specified, the padding is done up to the model's max length.
The tokenizer padding sides are handled by the following strings:
- 'left': pads on the left of the sequences
- 'right': pads on the right of the sequences
Defaults to False: no padding.
return_tensors: (optional) can be set to 'tf' or 'pt' to return respectively TensorFlow tf.constant
or PyTorch torch.Tensor instead of a list of python integers.
return_token_type_ids: (optional) Set to False to avoid returning token_type_ids (default True).
return_attention_mask: (optional) Set to False to avoid returning attention mask (default True)
return_overflowing_tokens: (optional) Set to True to return overflowing token information (default False).
return_special_tokens_mask: (optional) Set to True to return special tokens mask information (default False).
Return:
A Dictionary of shape::
{
input_ids: list[int],
token_type_ids: list[int] if return_token_type_ids is True (default)
overflowing_tokens: list[int] if a ``max_length`` is specified and return_overflowing_tokens is True
num_truncated_tokens: int if a ``max_length`` is specified and return_overflowing_tokens is True
special_tokens_mask: list[int] if ``add_special_tokens`` if set to ``True`` and return_special_tokens_mask is True
}
With the fields:
``input_ids``: list of token ids to be fed to a model
``token_type_ids``: list of token type ids to be fed to a model
``overflowing_tokens``: list of overflowing tokens if a max length is specified.
``num_truncated_tokens``: number of overflowing tokens a ``max_length`` is specified
``special_tokens_mask``: if adding special tokens, this is a list of [0, 1], with 0 specifying special added
tokens and 1 specifying sequence tokens.
"""
pair = bool(pair_ids is not None)
len_ids = len(ids)
len_pair_ids = len(pair_ids) if pair else 0
if return_token_type_ids is None:
return_token_type_ids = "token_type_ids" in self.model_input_names
if return_attention_mask is None:
return_attention_mask = "attention_mask" in self.model_input_names
encoded_inputs = {}
# Handle max sequence length
total_len = len_ids + len_pair_ids + (self.num_added_tokens(pair=pair) if add_special_tokens else 0)
if max_length and total_len > max_length:
ids, pair_ids, overflowing_tokens = self.truncate_sequences(
ids,
pair_ids=pair_ids,
num_tokens_to_remove=total_len - max_length,
#truncation_strategy=truncation_strategy,
stride=stride,
)
if return_overflowing_tokens:
encoded_inputs["overflowing_tokens"] = overflowing_tokens
encoded_inputs["num_truncated_tokens"] = total_len - max_length
# Handle special_tokens
if add_special_tokens:
sequence = self.build_inputs_with_special_tokens(ids, pair_ids)
token_type_ids = self.create_token_type_ids_from_sequences(ids, pair_ids)
else:
sequence = ids + pair_ids if pair else ids
token_type_ids = [0] * len(ids) + ([1] * len(pair_ids) if pair else [])
if return_special_tokens_mask:
if add_special_tokens:
encoded_inputs["special_tokens_mask"] = self.get_special_tokens_mask(ids, pair_ids)
else:
encoded_inputs["special_tokens_mask"] = [0] * len(sequence)
encoded_inputs["input_ids"] = sequence
if return_token_type_ids:
encoded_inputs["token_type_ids"] = token_type_ids
if max_length and len(encoded_inputs["input_ids"]) > max_length:
encoded_inputs["input_ids"] = encoded_inputs["input_ids"][:max_length]
if return_token_type_ids:
encoded_inputs["token_type_ids"] = encoded_inputs["token_type_ids"][:max_length]
if return_special_tokens_mask:
encoded_inputs["special_tokens_mask"] = encoded_inputs["special_tokens_mask"][:max_length]
if max_length is None and len(encoded_inputs["input_ids"]) > self.max_len:
logger.warning(
"Token indices sequence length is longer than the specified maximum sequence length "
"for this model ({} > {}). Running this sequence through the model will result in "
"indexing errors".format(len(ids), self.max_len)
)
needs_to_be_padded = pad_to_max_length and (
max_length
and len(encoded_inputs["input_ids"]) < max_length
or max_length is None
and len(encoded_inputs["input_ids"]) < self.max_len
and self.max_len <= 10000
)
if pad_to_max_length and max_length is None and self.max_len > 10000:
logger.warning(
"Sequence can't be padded as no maximum length is specified and the model maximum length is too high."
)
if needs_to_be_padded:
difference = (max_length if max_length is not None else self.max_len) - len(encoded_inputs["input_ids"])
if self.padding_side == "right":
if return_attention_mask:
encoded_inputs["attention_mask"] = [1] * len(encoded_inputs["input_ids"]) + [0] * difference
if return_token_type_ids:
encoded_inputs["token_type_ids"] = (
encoded_inputs["token_type_ids"] + [self.pad_token_type_id] * difference
)
if return_special_tokens_mask:
encoded_inputs["special_tokens_mask"] = encoded_inputs["special_tokens_mask"] + [1] * difference
encoded_inputs["input_ids"] = encoded_inputs["input_ids"] + [self.pad_token_id] * difference
elif self.padding_side == "left":
if return_attention_mask:
encoded_inputs["attention_mask"] = [0] * difference + [1] * len(encoded_inputs["input_ids"])
if return_token_type_ids:
encoded_inputs["token_type_ids"] = [self.pad_token_type_id] * difference + encoded_inputs[
"token_type_ids"
]
if return_special_tokens_mask:
encoded_inputs["special_tokens_mask"] = [1] * difference + encoded_inputs["special_tokens_mask"]
encoded_inputs["input_ids"] = [self.pad_token_id] * difference + encoded_inputs["input_ids"]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side))
elif return_attention_mask:
encoded_inputs["attention_mask"] = [1] * len(encoded_inputs["input_ids"])
# Prepare inputs as tensors if asked
if return_tensors == "tf" and is_tf_available():
encoded_inputs["input_ids"] = tf.constant([encoded_inputs["input_ids"]])
if "token_type_ids" in encoded_inputs:
encoded_inputs["token_type_ids"] = tf.constant([encoded_inputs["token_type_ids"]])
if "attention_mask" in encoded_inputs:
encoded_inputs["attention_mask"] = tf.constant([encoded_inputs["attention_mask"]])
elif return_tensors == "pt" and is_torch_available():
encoded_inputs["input_ids"] = torch.tensor([encoded_inputs["input_ids"]])
if "token_type_ids" in encoded_inputs:
encoded_inputs["token_type_ids"] = torch.tensor([encoded_inputs["token_type_ids"]])
if "attention_mask" in encoded_inputs:
encoded_inputs["attention_mask"] = torch.tensor([encoded_inputs["attention_mask"]])
elif return_tensors is not None:
logger.warning(
"Unable to convert output to tensors format {}, PyTorch or TensorFlow is not available.".format(
return_tensors
)
)
return encoded_inputs
def prepare_for_tokenization(self, text, **kwargs):
""" Performs any necessary transformations before tokenization """
return text
def truncate_sequences(
self, ids, pair_ids=None, num_tokens_to_remove=0, truncation_strategy="longest_first", stride=0
):
"""Truncates a sequence pair in place to the maximum length.
truncation_strategy: string selected in the following options:
- 'longest_first' (default) Iteratively reduce the inputs sequence until the input is under max_length
starting from the longest one at each token (when there is a pair of input sequences).
Overflowing tokens only contains overflow from the first sequence.
- 'only_first': Only truncate the first sequence. raise an error if the first sequence is shorter or equal to than num_tokens_to_remove.
- 'only_second': Only truncate the second sequence
- 'do_not_truncate': Does not truncate (raise an error if the input sequence is longer than max_length)
"""
if num_tokens_to_remove <= 0:
return ids, pair_ids, []
if truncation_strategy == "longest_first":
overflowing_tokens = []
for _ in range(num_tokens_to_remove):
if pair_ids is None or len(ids) > len(pair_ids):
overflowing_tokens = [ids[-1]] + overflowing_tokens
ids = ids[:-1]
else:
pair_ids = pair_ids[:-1]
window_len = min(len(ids), stride)
if window_len > 0:
overflowing_tokens = ids[-window_len:] + overflowing_tokens
elif truncation_strategy == "only_first":
assert len(ids) > num_tokens_to_remove
window_len = min(len(ids), stride + num_tokens_to_remove)
overflowing_tokens = ids[-window_len:]
ids = ids[:-num_tokens_to_remove]
elif truncation_strategy == "only_second":
assert pair_ids is not None and len(pair_ids) > num_tokens_to_remove
window_len = min(len(pair_ids), stride + num_tokens_to_remove)
overflowing_tokens = pair_ids[-window_len:]
pair_ids = pair_ids[:-num_tokens_to_remove]
elif truncation_strategy == "do_not_truncate":
raise ValueError("Input sequence are too long for max_length. Please select a truncation strategy.")
else:
raise ValueError(
"Truncation_strategy should be selected in ['longest_first', 'only_first', 'only_second', 'do_not_truncate']"
)
return (ids, pair_ids, overflowing_tokens)
def create_token_type_ids_from_sequences(self, token_ids_0, token_ids_1=None):
if token_ids_1 is None:
return len(token_ids_0) * [0]
return [0] * len(token_ids_0) + [1] * len(token_ids_1)
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks
by concatenating and adding special tokens.
A RoBERTa sequence has the following format:
single sequence: <s> X </s>
pair of sequences: <s> A </s></s> B </s>
"""
if token_ids_1 is None:
return token_ids_0
return token_ids_0 + token_ids_1
def get_special_tokens_mask(self, token_ids_0, token_ids_1=None, already_has_special_tokens=False):
"""
Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer ``prepare_for_model`` or ``encode_plus`` methods.
Args:
token_ids_0: list of ids (must not contain special tokens)
token_ids_1: Optional list of ids (must not contain special tokens), necessary when fetching sequence ids
for sequence pairs
already_has_special_tokens: (default False) Set to True if the token list is already formated with
special tokens for the model
Returns:
A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
"""
return [0] * ((len(token_ids_1) if token_ids_1 else 0) + len(token_ids_0))
def convert_ids_to_tokens(self, ids, skip_special_tokens=False):
""" Converts a single index or a sequence of indices (integers) in a token "
(resp.) a sequence of tokens (str), using the vocabulary and added tokens.
Args:
skip_special_tokens: Don't decode special tokens (self.all_special_tokens). Default: False
"""
if isinstance(ids, int):
if ids in self.added_tokens_decoder:
return self.added_tokens_decoder[ids]
else:
return self._convert_id_to_token(ids)
tokens = []
for index in ids:
index = int(index)
if skip_special_tokens and index in self.all_special_ids:
continue
if index in self.added_tokens_decoder:
tokens.append(self.added_tokens_decoder[index])
else:
tokens.append(self._convert_id_to_token(index))
return tokens
def _convert_id_to_token(self, index):
raise NotImplementedError
def convert_tokens_to_string(self, tokens):
""" Converts a sequence of tokens (string) in a single string.
The most simple way to do it is ' '.join(self.convert_ids_to_tokens(token_ids))
but we often want to remove sub-word tokenization artifacts at the same time.
"""
return " ".join(self.convert_ids_to_tokens(tokens))
def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
"""
Converts a sequence of ids (integer) in a string, using the tokenizer and vocabulary
with options to remove special tokens and clean up tokenization spaces.
Similar to doing ``self.convert_tokens_to_string(self.convert_ids_to_tokens(token_ids))``.
Args:
token_ids: list of tokenized input ids. Can be obtained using the `encode` or `encode_plus` methods.
skip_special_tokens: if set to True, will replace special tokens.
clean_up_tokenization_spaces: if set to True, will clean up the tokenization spaces.
"""
filtered_tokens = self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens)
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separatly for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
sub_texts = []
current_sub_text = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(current_sub_text))
current_sub_text = []
sub_texts.append(token)
else:
current_sub_text.append(token)
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(current_sub_text))
text = " ".join(sub_texts)
if clean_up_tokenization_spaces:
clean_text = self.clean_up_tokenization(text)
return clean_text
else:
return text
@property
def special_tokens_map(self):
""" A dictionary mapping special token class attribute (cls_token, unk_token...) to their
values ('<unk>', '<cls>'...)
"""
set_attr = {}
for attr in self.SPECIAL_TOKENS_ATTRIBUTES:
attr_value = getattr(self, "_" + attr)
if attr_value:
set_attr[attr] = attr_value
return set_attr
@property
def all_special_tokens(self):
""" List all the special tokens ('<unk>', '<cls>'...) mapped to class attributes
(cls_token, unk_token...).
"""
all_toks = []
set_attr = self.special_tokens_map
for attr_value in set_attr.values():
all_toks = all_toks + (list(attr_value) if isinstance(attr_value, (list, tuple)) else [attr_value])
all_toks = list(set(all_toks))
return all_toks
@property
def all_special_ids(self):
""" List the vocabulary indices of the special tokens ('<unk>', '<cls>'...) mapped to
class attributes (cls_token, unk_token...).
"""
all_toks = self.all_special_tokens
all_ids = self.convert_tokens_to_ids(all_toks)
return all_ids
@staticmethod
def clean_up_tokenization(out_string):
""" Clean up a list of simple English tokenization artifacts like spaces before punctuations and abreviated forms.
"""
out_string = (
out_string.replace(" .", ".")
.replace(" ?", "?")
.replace(" !", "!")
.replace(" ,", ",")
.replace(" ' ", "'")
.replace(" n't", "n't")
.replace(" 'm", "'m")
.replace(" do not", " don't")
.replace(" 's", "'s")
.replace(" 've", "'ve")
.replace(" 're", "'re")
)
return out_string
class PreTrainedTokenizerFast(PreTrainedTokenizer):
model_input_names = ["token_type_ids", "attention_mask"]
def __init__(self, tokenizer: BaseTokenizer, **kwargs):
if tokenizer is None:
raise ValueError("Provided tokenizer cannot be None")
self._tokenizer = tokenizer
super().__init__(**kwargs)
self.max_len_single_sentence = self.max_len - self.num_added_tokens(False) # take into account special tokens
self.max_len_sentences_pair = self.max_len - self.num_added_tokens(True) # take into account special tokens
@property
def tokenizer(self):
return self._tokenizer
@property
def decoder(self):
return self._tokenizer._tokenizer.decoder
@property
def vocab_size(self):
return self._tokenizer.get_vocab_size(with_added_tokens=False)
def __len__(self):
return self._tokenizer.get_vocab_size(with_added_tokens=True)
@PreTrainedTokenizer.bos_token.setter
def bos_token(self, value):
self._bos_token = value
self._update_special_tokens()
@PreTrainedTokenizer.eos_token.setter
def eos_token(self, value):
self._eos_token = value
self._update_special_tokens()
@PreTrainedTokenizer.unk_token.setter
def unk_token(self, value):
self._unk_token = value
self._update_special_tokens()
@PreTrainedTokenizer.sep_token.setter
def sep_token(self, value):
self._sep_token = value
self._update_special_tokens()
@PreTrainedTokenizer.pad_token.setter
def pad_token(self, value):
self._pad_token = value
self._update_special_tokens()
@PreTrainedTokenizer.cls_token.setter
def cls_token(self, value):
self._cls_token = value
self._update_special_tokens()
@PreTrainedTokenizer.mask_token.setter
def mask_token(self, value):
self._mask_token = value
self._update_special_tokens()
@PreTrainedTokenizer.additional_special_tokens.setter
def additional_special_tokens(self, value):
self._additional_special_tokens = value
self._update_special_tokens()
def _update_special_tokens(self):
if self._tokenizer is not None:
self._tokenizer.add_special_tokens(self.all_special_tokens)
def _convert_encoding(
self,
encoding,
return_tensors=None,
return_token_type_ids=None,
return_attention_mask=None,
return_overflowing_tokens=False,
return_special_tokens_mask=False,
return_offsets_mapping=False,
):
if return_token_type_ids is None:
return_token_type_ids = "token_type_ids" in self.model_input_names
if return_attention_mask is None:
return_attention_mask = "attention_mask" in self.model_input_names
if return_overflowing_tokens and encoding.overflowing is not None:
encodings = [encoding] + encoding.overflowing
else:
encodings = [encoding]
encoding_dict = defaultdict(list)
for e in encodings:
encoding_dict["input_ids"].append(e.ids)
if return_token_type_ids:
encoding_dict["token_type_ids"].append(e.type_ids)
if return_attention_mask:
encoding_dict["attention_mask"].append(e.attention_mask)
if return_special_tokens_mask:
encoding_dict["special_tokens_mask"].append(e.special_tokens_mask)
if return_offsets_mapping:
encoding_dict["offset_mapping"].append([e.original_str.offsets(o) for o in e.offsets])
# Prepare inputs as tensors if asked
if return_tensors == "tf" and is_tf_available():
encoding_dict["input_ids"] = tf.constant(encoding_dict["input_ids"])
if "token_type_ids" in encoding_dict:
encoding_dict["token_type_ids"] = tf.constant(encoding_dict["token_type_ids"])
if "attention_mask" in encoding_dict:
encoding_dict["attention_mask"] = tf.constant(encoding_dict["attention_mask"])
elif return_tensors == "pt" and is_torch_available():
encoding_dict["input_ids"] = torch.tensor(encoding_dict["input_ids"])
if "token_type_ids" in encoding_dict:
encoding_dict["token_type_ids"] = torch.tensor(encoding_dict["token_type_ids"])
if "attention_mask" in encoding_dict:
encoding_dict["attention_mask"] = torch.tensor(encoding_dict["attention_mask"])
elif return_tensors is not None:
logger.warning(
"Unable to convert output to tensors format {}, PyTorch or TensorFlow is not available.".format(
return_tensors
)
)
return encoding_dict
def _convert_token_to_id_with_added_voc(self, token):
id = self._tokenizer.token_to_id(token)
if id is None:
return self.unk_token_id
return id
def _convert_id_to_token(self, index):
return self._tokenizer.id_to_token(int(index))
def convert_tokens_to_string(self, tokens):
return self._tokenizer.decode(tokens)
def add_tokens(self, new_tokens):
if isinstance(new_tokens, str):
new_tokens = [new_tokens]
return self._tokenizer.add_tokens(new_tokens)
def add_special_tokens(self, special_tokens_dict):
added = super().add_special_tokens(special_tokens_dict)
self._update_special_tokens()
return added
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
if token_ids_1 is None:
return token_ids_0
else:
return token_ids_0 + token_ids_1
def num_added_tokens(self, pair=False):
return self.tokenizer.num_special_tokens_to_add(pair)
def tokenize(self, text, **kwargs):
return self.tokenizer.encode(text).tokens
def batch_encode_plus(
self,
batch_text_or_text_pairs: Optional[Union[List[str], List[Tuple[str]]]] = None,
add_special_tokens: bool = True,
max_length: Optional[int] = None,
stride: int = 0,
truncation_strategy: str = "longest_first",
pad_to_max_length: bool = False,
return_tensors: Optional[str] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
**kwargs
):
if not add_special_tokens:
logger.warning(
"Fast tokenizers add special tokens by default. To remove special tokens, please specify"
"`add_special_tokens=False` during the initialisation rather than when calling `encode`,"
"`encode_plus` or `batch_encode_plus`."
)
# Needed if we have to return a tensor
pad_to_max_length = pad_to_max_length or (return_tensors is not None)
# Throw an error if we can pad because there is no padding token
if pad_to_max_length and self.pad_token_id is None:
raise ValueError("Unable to set proper padding strategy as the tokenizer does not have a padding token")
# Set the truncation and padding strategy and restore the initial configuration
with truncate_and_pad(
tokenizer=self._tokenizer,
max_length=max_length,
stride=stride,
strategy=truncation_strategy,
pad_to_max_length=pad_to_max_length,
padding_side=self.padding_side,
pad_token_id=self.pad_token_id,
pad_token_type_id=self.pad_token_type_id,
pad_token=self._pad_token,
):
if not isinstance(batch_text_or_text_pairs, list):
raise TypeError(
"batch_text_or_text_pairs has to be a list (got {})".format(type(batch_text_or_text_pairs))
)
# Avoid thread overhead if only one example.
if len(batch_text_or_text_pairs) == 1:
if isinstance(batch_text_or_text_pairs[0], (tuple, list)):
tokens = self._tokenizer.encode(*batch_text_or_text_pairs[0])
else:
tokens = self._tokenizer.encode(batch_text_or_text_pairs[0])
tokens = [tokens]
else:
tokens = self._tokenizer.encode_batch(batch_text_or_text_pairs)
# Convert encoding to dict
tokens = [
self._convert_encoding(
encoding=encoding,
return_tensors=return_tensors,
return_token_type_ids=return_token_type_ids,
return_attention_mask=return_attention_mask,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_offsets_mapping=return_offsets_mapping,
)
for encoding in tokens
]
# Sanitize the output to have dict[list] from list[dict]
sanitized = {}
for key in tokens[0].keys():
stack = [e for item in tokens for e in item[key]]
if return_tensors == "tf":
stack = tf.stack(stack, axis=0)
elif return_tensors == "pt":
stack = torch.stack(stack, dim=0)
elif not return_tensors and len(stack) == 1:
stack = stack[0]
sanitized[key] = stack
# If returning overflowing tokens, we need to return a mapping
# from the batch idx to the original sample
if return_overflowing_tokens:
overflow_to_sample_mapping = [
i if len(item["input_ids"]) == 1 else [i] * len(item["input_ids"]) for i, item in enumerate(tokens)
]
sanitized["overflow_to_sample_mapping"] = overflow_to_sample_mapping
return sanitized
def encode_plus(
self,
text: str,
text_pair: Optional[str] = None,
add_special_tokens: bool = False,
max_length: Optional[int] = None,
pad_to_max_length: bool = False,
stride: int = 0,
truncation_strategy: str = "longest_first",
return_tensors: Optional[bool] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
**kwargs
):
batched_input = [(text, text_pair)] if text_pair else [text]
batched_output = self.batch_encode_plus(
batched_input,
add_special_tokens=add_special_tokens,
max_length=max_length,
stride=stride,
truncation_strategy=truncation_strategy,
return_tensors=return_tensors,
return_token_type_ids=return_token_type_ids,
return_attention_mask=return_attention_mask,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_offsets_mapping=return_offsets_mapping,
pad_to_max_length=pad_to_max_length,
**kwargs,
)
# Return tensor is None, then we can remove the leading batch axis
if not return_tensors:
return {key: value[0] if isinstance(value[0], list) else value for key, value in batched_output.items()}
else:
return batched_output
def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
text = self.tokenizer.decode(token_ids, skip_special_tokens)
if clean_up_tokenization_spaces:
clean_text = self.clean_up_tokenization(text)
return clean_text
else:
return text
def save_vocabulary(self, save_directory):
if os.path.isdir(save_directory):
files = self._tokenizer.save(save_directory)
else:
folder, file = os.path.split(os.path.abspath(save_directory))
files = self._tokenizer.save(folder, name=file)
return tuple(files)
def trim_batch(
input_ids, pad_token_id, attention_mask=None,
):
"""Remove columns that are populated exclusively by pad_token_id"""
keep_column_mask = input_ids.ne(pad_token_id).any(dim=0)
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
| 99,451 | 49.766718 | 372 | py |
KoSentenceBERT-SKT | KoSentenceBERT-SKT-main/transformers/commands/convert.py | from argparse import ArgumentParser, Namespace
from logging import getLogger
from transformers.commands import BaseTransformersCLICommand
def convert_command_factory(args: Namespace):
"""
Factory function used to convert a model TF 1.0 checkpoint in a PyTorch checkpoint.
:return: ServeCommand
"""
return ConvertCommand(
args.model_type, args.tf_checkpoint, args.pytorch_dump_output, args.config, args.finetuning_task_name
)
class ConvertCommand(BaseTransformersCLICommand):
@staticmethod
def register_subcommand(parser: ArgumentParser):
"""
Register this command to argparse so it's available for the transformer-cli
:param parser: Root parser to register command-specific arguments
:return:
"""
train_parser = parser.add_parser(
"convert",
help="CLI tool to run convert model from original "
"author checkpoints to Transformers PyTorch checkpoints.",
)
train_parser.add_argument("--model_type", type=str, required=True, help="Model's type.")
train_parser.add_argument(
"--tf_checkpoint", type=str, required=True, help="TensorFlow checkpoint path or folder."
)
train_parser.add_argument(
"--pytorch_dump_output", type=str, required=True, help="Path to the PyTorch savd model output."
)
train_parser.add_argument("--config", type=str, default="", help="Configuration file path or folder.")
train_parser.add_argument(
"--finetuning_task_name",
type=str,
default=None,
help="Optional fine-tuning task name if the TF model was a finetuned model.",
)
train_parser.set_defaults(func=convert_command_factory)
def __init__(
self,
model_type: str,
tf_checkpoint: str,
pytorch_dump_output: str,
config: str,
finetuning_task_name: str,
*args
):
self._logger = getLogger("transformers-cli/converting")
self._logger.info("Loading model {}".format(model_type))
self._model_type = model_type
self._tf_checkpoint = tf_checkpoint
self._pytorch_dump_output = pytorch_dump_output
self._config = config
self._finetuning_task_name = finetuning_task_name
def run(self):
if self._model_type == "bert":
try:
from transformers.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
msg = (
"transformers can only be used from the commandline to convert TensorFlow models in PyTorch, "
"In that case, it requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise ImportError(msg)
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output)
elif self._model_type == "gpt":
from transformers.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output)
elif self._model_type == "transfo_xl":
try:
from transformers.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
msg = (
"transformers can only be used from the commandline to convert TensorFlow models in PyTorch, "
"In that case, it requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise ImportError(msg)
if "ckpt" in self._tf_checkpoint.lower():
TF_CHECKPOINT = self._tf_checkpoint
TF_DATASET_FILE = ""
else:
TF_DATASET_FILE = self._tf_checkpoint
TF_CHECKPOINT = ""
convert_transfo_xl_checkpoint_to_pytorch(
TF_CHECKPOINT, self._config, self._pytorch_dump_output, TF_DATASET_FILE
)
elif self._model_type == "gpt2":
try:
from transformers.convert_gpt2_original_tf_checkpoint_to_pytorch import (
convert_gpt2_checkpoint_to_pytorch,
)
except ImportError:
msg = (
"transformers can only be used from the commandline to convert TensorFlow models in PyTorch, "
"In that case, it requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise ImportError(msg)
convert_gpt2_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output)
elif self._model_type == "xlnet":
try:
from transformers.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
msg = (
"transformers can only be used from the commandline to convert TensorFlow models in PyTorch, "
"In that case, it requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise ImportError(msg)
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint, self._config, self._pytorch_dump_output, self._finetuning_task_name
)
elif self._model_type == "xlm":
from transformers.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint, self._pytorch_dump_output)
else:
raise ValueError("--model_type should be selected in the list [bert, gpt, gpt2, transfo_xl, xlnet, xlm]")
| 6,406 | 43.186207 | 117 | py |
KoSentenceBERT-SKT | KoSentenceBERT-SKT-main/transformers/commands/train.py | import os
from argparse import ArgumentParser, Namespace
from logging import getLogger
from transformers import SingleSentenceClassificationProcessor as Processor
from transformers import TextClassificationPipeline, is_tf_available, is_torch_available
from transformers.commands import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError("At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training")
# TF training parameters
USE_XLA = False
USE_AMP = False
def train_command_factory(args: Namespace):
"""
Factory function used to instantiate serving server from provided command line arguments.
:return: ServeCommand
"""
return TrainCommand(args)
class TrainCommand(BaseTransformersCLICommand):
@staticmethod
def register_subcommand(parser: ArgumentParser):
"""
Register this command to argparse so it's available for the transformer-cli
:param parser: Root parser to register command-specific arguments
:return:
"""
train_parser = parser.add_parser("train", help="CLI tool to train a model on a task.")
train_parser.add_argument(
"--train_data",
type=str,
required=True,
help="path to train (and optionally evaluation) dataset as a csv with "
"tab separated labels and sentences.",
)
train_parser.add_argument(
"--column_label", type=int, default=0, help="Column of the dataset csv file with example labels."
)
train_parser.add_argument(
"--column_text", type=int, default=1, help="Column of the dataset csv file with example texts."
)
train_parser.add_argument(
"--column_id", type=int, default=2, help="Column of the dataset csv file with example ids."
)
train_parser.add_argument(
"--skip_first_row", action="store_true", help="Skip the first row of the csv file (headers)."
)
train_parser.add_argument("--validation_data", type=str, default="", help="path to validation dataset.")
train_parser.add_argument(
"--validation_split",
type=float,
default=0.1,
help="if validation dataset is not provided, fraction of train dataset " "to use as validation dataset.",
)
train_parser.add_argument("--output", type=str, default="./", help="path to saved the trained model.")
train_parser.add_argument(
"--task", type=str, default="text_classification", help="Task to train the model on."
)
train_parser.add_argument(
"--model", type=str, default="bert-base-uncased", help="Model's name or path to stored model."
)
train_parser.add_argument("--train_batch_size", type=int, default=32, help="Batch size for training.")
train_parser.add_argument("--valid_batch_size", type=int, default=64, help="Batch size for validation.")
train_parser.add_argument("--learning_rate", type=float, default=3e-5, help="Learning rate.")
train_parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon for Adam optimizer.")
train_parser.set_defaults(func=train_command_factory)
def __init__(self, args: Namespace):
self.logger = getLogger("transformers-cli/training")
self.framework = "tf" if is_tf_available() else "torch"
os.makedirs(args.output, exist_ok=True)
assert os.path.isdir(args.output)
self.output = args.output
self.column_label = args.column_label
self.column_text = args.column_text
self.column_id = args.column_id
self.logger.info("Loading {} pipeline for {}".format(args.task, args.model))
if args.task == "text_classification":
self.pipeline = TextClassificationPipeline.from_pretrained(args.model)
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info("Loading dataset from {}".format(args.train_data))
self.train_dataset = Processor.create_from_csv(
args.train_data,
column_label=args.column_label,
column_text=args.column_text,
column_id=args.column_id,
skip_first_row=args.skip_first_row,
)
self.valid_dataset = None
if args.validation_data:
self.logger.info("Loading validation dataset from {}".format(args.validation_data))
self.valid_dataset = Processor.create_from_csv(
args.validation_data,
column_label=args.column_label,
column_text=args.column_text,
column_id=args.column_id,
skip_first_row=args.skip_first_row,
)
self.validation_split = args.validation_split
self.train_batch_size = args.train_batch_size
self.valid_batch_size = args.valid_batch_size
self.learning_rate = args.learning_rate
self.adam_epsilon = args.adam_epsilon
def run(self):
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def run_torch(self):
raise NotImplementedError
def run_tf(self):
self.pipeline.fit(
self.train_dataset,
validation_data=self.valid_dataset,
validation_split=self.validation_split,
learning_rate=self.learning_rate,
adam_epsilon=self.adam_epsilon,
train_batch_size=self.train_batch_size,
valid_batch_size=self.valid_batch_size,
)
# Save trained pipeline
self.pipeline.save_pretrained(self.output)
| 5,830 | 39.213793 | 117 | py |
KoSentenceBERT-SKT | KoSentenceBERT-SKT-main/transformers/commands/env.py | import platform
from argparse import ArgumentParser
from transformers import __version__ as version
from transformers import is_tf_available, is_torch_available
from transformers.commands import BaseTransformersCLICommand
def info_command_factory(_):
return EnvironmentCommand()
class EnvironmentCommand(BaseTransformersCLICommand):
@staticmethod
def register_subcommand(parser: ArgumentParser):
download_parser = parser.add_parser("env")
download_parser.set_defaults(func=info_command_factory)
def run(self):
pt_version = "not installed"
pt_cuda_available = "NA"
if is_torch_available():
import torch
pt_version = torch.__version__
pt_cuda_available = torch.cuda.is_available()
tf_version = "not installed"
tf_cuda_available = "NA"
if is_tf_available():
import tensorflow as tf
tf_version = tf.__version__
try:
# deprecated in v2.1
tf_cuda_available = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
tf_cuda_available = bool(tf.config.list_physical_devices("GPU"))
info = {
"`transformers` version": version,
"Platform": platform.platform(),
"Python version": platform.python_version(),
"PyTorch version (GPU?)": "{} ({})".format(pt_version, pt_cuda_available),
"Tensorflow version (GPU?)": "{} ({})".format(tf_version, tf_cuda_available),
"Using GPU in script?": "<fill in>",
"Using distributed or parallel set-up in script?": "<fill in>",
}
print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n")
print(self.format_dict(info))
return info
@staticmethod
def format_dict(d):
return "\n".join(["- {}: {}".format(prop, val) for prop, val in d.items()]) + "\n"
| 2,027 | 33.372881 | 105 | py |
KoSentenceBERT-SKT | KoSentenceBERT-SKT-main/transformers/data/processors/squad.py | import json
import logging
import os
from functools import partial
from multiprocessing import Pool, cpu_count
import numpy as np
from tqdm import tqdm
from ...file_utils import is_tf_available, is_torch_available
from ...tokenization_bert import whitespace_tokenize
from .utils import DataProcessor
if is_torch_available():
import torch
from torch.utils.data import TensorDataset
if is_tf_available():
import tensorflow as tf
logger = logging.getLogger(__name__)
def _improve_answer_span(doc_tokens, input_start, input_end, tokenizer, orig_answer_text):
"""Returns tokenized answer spans that better match the annotated answer."""
tok_answer_text = " ".join(tokenizer.tokenize(orig_answer_text))
for new_start in range(input_start, input_end + 1):
for new_end in range(input_end, new_start - 1, -1):
text_span = " ".join(doc_tokens[new_start : (new_end + 1)])
if text_span == tok_answer_text:
return (new_start, new_end)
return (input_start, input_end)
def _check_is_max_context(doc_spans, cur_span_index, position):
"""Check if this is the 'max context' doc span for the token."""
best_score = None
best_span_index = None
for (span_index, doc_span) in enumerate(doc_spans):
end = doc_span.start + doc_span.length - 1
if position < doc_span.start:
continue
if position > end:
continue
num_left_context = position - doc_span.start
num_right_context = end - position
score = min(num_left_context, num_right_context) + 0.01 * doc_span.length
if best_score is None or score > best_score:
best_score = score
best_span_index = span_index
return cur_span_index == best_span_index
def _new_check_is_max_context(doc_spans, cur_span_index, position):
"""Check if this is the 'max context' doc span for the token."""
# if len(doc_spans) == 1:
# return True
best_score = None
best_span_index = None
for (span_index, doc_span) in enumerate(doc_spans):
end = doc_span["start"] + doc_span["length"] - 1
if position < doc_span["start"]:
continue
if position > end:
continue
num_left_context = position - doc_span["start"]
num_right_context = end - position
score = min(num_left_context, num_right_context) + 0.01 * doc_span["length"]
if best_score is None or score > best_score:
best_score = score
best_span_index = span_index
return cur_span_index == best_span_index
def _is_whitespace(c):
if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F:
return True
return False
def squad_convert_example_to_features(example, max_seq_length, doc_stride, max_query_length, is_training):
features = []
if is_training and not example.is_impossible:
# Get start and end position
start_position = example.start_position
end_position = example.end_position
# If the answer cannot be found in the text, then skip this example.
actual_text = " ".join(example.doc_tokens[start_position : (end_position + 1)])
cleaned_answer_text = " ".join(whitespace_tokenize(example.answer_text))
if actual_text.find(cleaned_answer_text) == -1:
logger.warning("Could not find answer: '%s' vs. '%s'", actual_text, cleaned_answer_text)
return []
tok_to_orig_index = []
orig_to_tok_index = []
all_doc_tokens = []
for (i, token) in enumerate(example.doc_tokens):
orig_to_tok_index.append(len(all_doc_tokens))
sub_tokens = tokenizer.tokenize(token)
for sub_token in sub_tokens:
tok_to_orig_index.append(i)
all_doc_tokens.append(sub_token)
if is_training and not example.is_impossible:
tok_start_position = orig_to_tok_index[example.start_position]
if example.end_position < len(example.doc_tokens) - 1:
tok_end_position = orig_to_tok_index[example.end_position + 1] - 1
else:
tok_end_position = len(all_doc_tokens) - 1
(tok_start_position, tok_end_position) = _improve_answer_span(
all_doc_tokens, tok_start_position, tok_end_position, tokenizer, example.answer_text
)
spans = []
truncated_query = tokenizer.encode(example.question_text, add_special_tokens=False, max_length=max_query_length)
sequence_added_tokens = (
tokenizer.max_len - tokenizer.max_len_single_sentence + 1
if "roberta" in str(type(tokenizer)) or "camembert" in str(type(tokenizer))
else tokenizer.max_len - tokenizer.max_len_single_sentence
)
sequence_pair_added_tokens = tokenizer.max_len - tokenizer.max_len_sentences_pair
span_doc_tokens = all_doc_tokens
while len(spans) * doc_stride < len(all_doc_tokens):
encoded_dict = tokenizer.encode_plus(
truncated_query if tokenizer.padding_side == "right" else span_doc_tokens,
span_doc_tokens if tokenizer.padding_side == "right" else truncated_query,
max_length=max_seq_length,
return_overflowing_tokens=True,
pad_to_max_length=True,
stride=max_seq_length - doc_stride - len(truncated_query) - sequence_pair_added_tokens,
truncation_strategy="only_second" if tokenizer.padding_side == "right" else "only_first",
return_token_type_ids=True,
)
paragraph_len = min(
len(all_doc_tokens) - len(spans) * doc_stride,
max_seq_length - len(truncated_query) - sequence_pair_added_tokens,
)
if tokenizer.pad_token_id in encoded_dict["input_ids"]:
if tokenizer.padding_side == "right":
non_padded_ids = encoded_dict["input_ids"][: encoded_dict["input_ids"].index(tokenizer.pad_token_id)]
else:
last_padding_id_position = (
len(encoded_dict["input_ids"]) - 1 - encoded_dict["input_ids"][::-1].index(tokenizer.pad_token_id)
)
non_padded_ids = encoded_dict["input_ids"][last_padding_id_position + 1 :]
else:
non_padded_ids = encoded_dict["input_ids"]
tokens = tokenizer.convert_ids_to_tokens(non_padded_ids)
token_to_orig_map = {}
for i in range(paragraph_len):
index = len(truncated_query) + sequence_added_tokens + i if tokenizer.padding_side == "right" else i
token_to_orig_map[index] = tok_to_orig_index[len(spans) * doc_stride + i]
encoded_dict["paragraph_len"] = paragraph_len
encoded_dict["tokens"] = tokens
encoded_dict["token_to_orig_map"] = token_to_orig_map
encoded_dict["truncated_query_with_special_tokens_length"] = len(truncated_query) + sequence_added_tokens
encoded_dict["token_is_max_context"] = {}
encoded_dict["start"] = len(spans) * doc_stride
encoded_dict["length"] = paragraph_len
spans.append(encoded_dict)
if "overflowing_tokens" not in encoded_dict:
break
span_doc_tokens = encoded_dict["overflowing_tokens"]
for doc_span_index in range(len(spans)):
for j in range(spans[doc_span_index]["paragraph_len"]):
is_max_context = _new_check_is_max_context(spans, doc_span_index, doc_span_index * doc_stride + j)
index = (
j
if tokenizer.padding_side == "left"
else spans[doc_span_index]["truncated_query_with_special_tokens_length"] + j
)
spans[doc_span_index]["token_is_max_context"][index] = is_max_context
for span in spans:
# Identify the position of the CLS token
cls_index = span["input_ids"].index(tokenizer.cls_token_id)
# p_mask: mask with 1 for token than cannot be in the answer (0 for token which can be in an answer)
# Original TF implem also keep the classification token (set to 0) (not sure why...)
p_mask = np.array(span["token_type_ids"])
p_mask = np.minimum(p_mask, 1)
if tokenizer.padding_side == "right":
# Limit positive values to one
p_mask = 1 - p_mask
p_mask[np.where(np.array(span["input_ids"]) == tokenizer.sep_token_id)[0]] = 1
# Set the CLS index to '0'
p_mask[cls_index] = 0
span_is_impossible = example.is_impossible
start_position = 0
end_position = 0
if is_training and not span_is_impossible:
# For training, if our document chunk does not contain an annotation
# we throw it out, since there is nothing to predict.
doc_start = span["start"]
doc_end = span["start"] + span["length"] - 1
out_of_span = False
if not (tok_start_position >= doc_start and tok_end_position <= doc_end):
out_of_span = True
if out_of_span:
start_position = cls_index
end_position = cls_index
span_is_impossible = True
else:
if tokenizer.padding_side == "left":
doc_offset = 0
else:
doc_offset = len(truncated_query) + sequence_added_tokens
start_position = tok_start_position - doc_start + doc_offset
end_position = tok_end_position - doc_start + doc_offset
features.append(
SquadFeatures(
span["input_ids"],
span["attention_mask"],
span["token_type_ids"],
cls_index,
p_mask.tolist(),
example_index=0, # Can not set unique_id and example_index here. They will be set after multiple processing.
unique_id=0,
paragraph_len=span["paragraph_len"],
token_is_max_context=span["token_is_max_context"],
tokens=span["tokens"],
token_to_orig_map=span["token_to_orig_map"],
start_position=start_position,
end_position=end_position,
is_impossible=span_is_impossible,
)
)
return features
def squad_convert_example_to_features_init(tokenizer_for_convert):
global tokenizer
tokenizer = tokenizer_for_convert
def squad_convert_examples_to_features(
examples, tokenizer, max_seq_length, doc_stride, max_query_length, is_training, return_dataset=False, threads=1
):
"""
Converts a list of examples into a list of features that can be directly given as input to a model.
It is model-dependant and takes advantage of many of the tokenizer's features to create the model's inputs.
Args:
examples: list of :class:`~transformers.data.processors.squad.SquadExample`
tokenizer: an instance of a child of :class:`~transformers.PreTrainedTokenizer`
max_seq_length: The maximum sequence length of the inputs.
doc_stride: The stride used when the context is too large and is split across several features.
max_query_length: The maximum length of the query.
is_training: whether to create features for model evaluation or model training.
return_dataset: Default False. Either 'pt' or 'tf'.
if 'pt': returns a torch.data.TensorDataset,
if 'tf': returns a tf.data.Dataset
threads: multiple processing threadsa-smi
Returns:
list of :class:`~transformers.data.processors.squad.SquadFeatures`
Example::
processor = SquadV2Processor()
examples = processor.get_dev_examples(data_dir)
features = squad_convert_examples_to_features(
examples=examples,
tokenizer=tokenizer,
max_seq_length=args.max_seq_length,
doc_stride=args.doc_stride,
max_query_length=args.max_query_length,
is_training=not evaluate,
)
"""
# Defining helper methods
features = []
threads = min(threads, cpu_count())
with Pool(threads, initializer=squad_convert_example_to_features_init, initargs=(tokenizer,)) as p:
annotate_ = partial(
squad_convert_example_to_features,
max_seq_length=max_seq_length,
doc_stride=doc_stride,
max_query_length=max_query_length,
is_training=is_training,
)
features = list(
tqdm(
p.imap(annotate_, examples, chunksize=32),
total=len(examples),
desc="convert squad examples to features",
)
)
new_features = []
unique_id = 1000000000
example_index = 0
for example_features in tqdm(features, total=len(features), desc="add example index and unique id"):
if not example_features:
continue
for example_feature in example_features:
example_feature.example_index = example_index
example_feature.unique_id = unique_id
new_features.append(example_feature)
unique_id += 1
example_index += 1
features = new_features
del new_features
if return_dataset == "pt":
if not is_torch_available():
raise RuntimeError("PyTorch must be installed to return a PyTorch dataset.")
# Convert to Tensors and build dataset
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_attention_masks = torch.tensor([f.attention_mask for f in features], dtype=torch.long)
all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long)
all_cls_index = torch.tensor([f.cls_index for f in features], dtype=torch.long)
all_p_mask = torch.tensor([f.p_mask for f in features], dtype=torch.float)
all_is_impossible = torch.tensor([f.is_impossible for f in features], dtype=torch.float)
if not is_training:
all_example_index = torch.arange(all_input_ids.size(0), dtype=torch.long)
dataset = TensorDataset(
all_input_ids, all_attention_masks, all_token_type_ids, all_example_index, all_cls_index, all_p_mask
)
else:
all_start_positions = torch.tensor([f.start_position for f in features], dtype=torch.long)
all_end_positions = torch.tensor([f.end_position for f in features], dtype=torch.long)
dataset = TensorDataset(
all_input_ids,
all_attention_masks,
all_token_type_ids,
all_start_positions,
all_end_positions,
all_cls_index,
all_p_mask,
all_is_impossible,
)
return features, dataset
elif return_dataset == "tf":
if not is_tf_available():
raise RuntimeError("TensorFlow must be installed to return a TensorFlow dataset.")
def gen():
for ex in features:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
{
"start_position": ex.start_position,
"end_position": ex.end_position,
"cls_index": ex.cls_index,
"p_mask": ex.p_mask,
"is_impossible": ex.is_impossible,
},
)
return tf.data.Dataset.from_generator(
gen,
(
{"input_ids": tf.int32, "attention_mask": tf.int32, "token_type_ids": tf.int32},
{
"start_position": tf.int64,
"end_position": tf.int64,
"cls_index": tf.int64,
"p_mask": tf.int32,
"is_impossible": tf.int32,
},
),
(
{
"input_ids": tf.TensorShape([None]),
"attention_mask": tf.TensorShape([None]),
"token_type_ids": tf.TensorShape([None]),
},
{
"start_position": tf.TensorShape([]),
"end_position": tf.TensorShape([]),
"cls_index": tf.TensorShape([]),
"p_mask": tf.TensorShape([None]),
"is_impossible": tf.TensorShape([]),
},
),
)
return features
class SquadProcessor(DataProcessor):
"""
Processor for the SQuAD data set.
Overriden by SquadV1Processor and SquadV2Processor, used by the version 1.1 and version 2.0 of SQuAD, respectively.
"""
train_file = None
dev_file = None
def _get_example_from_tensor_dict(self, tensor_dict, evaluate=False):
if not evaluate:
answer = tensor_dict["answers"]["text"][0].numpy().decode("utf-8")
answer_start = tensor_dict["answers"]["answer_start"][0].numpy()
answers = []
else:
answers = [
{"answer_start": start.numpy(), "text": text.numpy().decode("utf-8")}
for start, text in zip(tensor_dict["answers"]["answer_start"], tensor_dict["answers"]["text"])
]
answer = None
answer_start = None
return SquadExample(
qas_id=tensor_dict["id"].numpy().decode("utf-8"),
question_text=tensor_dict["question"].numpy().decode("utf-8"),
context_text=tensor_dict["context"].numpy().decode("utf-8"),
answer_text=answer,
start_position_character=answer_start,
title=tensor_dict["title"].numpy().decode("utf-8"),
answers=answers,
)
def get_examples_from_dataset(self, dataset, evaluate=False):
"""
Creates a list of :class:`~transformers.data.processors.squad.SquadExample` using a TFDS dataset.
Args:
dataset: The tfds dataset loaded from `tensorflow_datasets.load("squad")`
evaluate: boolean specifying if in evaluation mode or in training mode
Returns:
List of SquadExample
Examples::
import tensorflow_datasets as tfds
dataset = tfds.load("squad")
training_examples = get_examples_from_dataset(dataset, evaluate=False)
evaluation_examples = get_examples_from_dataset(dataset, evaluate=True)
"""
if evaluate:
dataset = dataset["validation"]
else:
dataset = dataset["train"]
examples = []
for tensor_dict in tqdm(dataset):
examples.append(self._get_example_from_tensor_dict(tensor_dict, evaluate=evaluate))
return examples
def get_train_examples(self, data_dir, filename=None):
"""
Returns the training examples from the data directory.
Args:
data_dir: Directory containing the data files used for training and evaluating.
filename: None by default, specify this if the training file has a different name than the original one
which is `train-v1.1.json` and `train-v2.0.json` for squad versions 1.1 and 2.0 respectively.
"""
if data_dir is None:
data_dir = ""
if self.train_file is None:
raise ValueError("SquadProcessor should be instantiated via SquadV1Processor or SquadV2Processor")
with open(
os.path.join(data_dir, self.train_file if filename is None else filename), "r", encoding="utf-8"
) as reader:
input_data = json.load(reader)["data"]
return self._create_examples(input_data, "train")
def get_dev_examples(self, data_dir, filename=None):
"""
Returns the evaluation example from the data directory.
Args:
data_dir: Directory containing the data files used for training and evaluating.
filename: None by default, specify this if the evaluation file has a different name than the original one
which is `train-v1.1.json` and `train-v2.0.json` for squad versions 1.1 and 2.0 respectively.
"""
if data_dir is None:
data_dir = ""
if self.dev_file is None:
raise ValueError("SquadProcessor should be instantiated via SquadV1Processor or SquadV2Processor")
with open(
os.path.join(data_dir, self.dev_file if filename is None else filename), "r", encoding="utf-8"
) as reader:
input_data = json.load(reader)["data"]
return self._create_examples(input_data, "dev")
def _create_examples(self, input_data, set_type):
is_training = set_type == "train"
examples = []
for entry in tqdm(input_data):
title = entry["title"]
for paragraph in entry["paragraphs"]:
context_text = paragraph["context"]
for qa in paragraph["qas"]:
qas_id = qa["id"]
question_text = qa["question"]
start_position_character = None
answer_text = None
answers = []
if "is_impossible" in qa:
is_impossible = qa["is_impossible"]
else:
is_impossible = False
if not is_impossible:
if is_training:
answer = qa["answers"][0]
answer_text = answer["text"]
start_position_character = answer["answer_start"]
else:
answers = qa["answers"]
example = SquadExample(
qas_id=qas_id,
question_text=question_text,
context_text=context_text,
answer_text=answer_text,
start_position_character=start_position_character,
title=title,
is_impossible=is_impossible,
answers=answers,
)
examples.append(example)
return examples
class SquadV1Processor(SquadProcessor):
train_file = "train-v1.1.json"
dev_file = "dev-v1.1.json"
class SquadV2Processor(SquadProcessor):
train_file = "train-v2.0.json"
dev_file = "dev-v2.0.json"
class SquadExample(object):
"""
A single training/test example for the Squad dataset, as loaded from disk.
Args:
qas_id: The example's unique identifier
question_text: The question string
context_text: The context string
answer_text: The answer string
start_position_character: The character position of the start of the answer
title: The title of the example
answers: None by default, this is used during evaluation. Holds answers as well as their start positions.
is_impossible: False by default, set to True if the example has no possible answer.
"""
def __init__(
self,
qas_id,
question_text,
context_text,
answer_text,
start_position_character,
title,
answers=[],
is_impossible=False,
):
self.qas_id = qas_id
self.question_text = question_text
self.context_text = context_text
self.answer_text = answer_text
self.title = title
self.is_impossible = is_impossible
self.answers = answers
self.start_position, self.end_position = 0, 0
doc_tokens = []
char_to_word_offset = []
prev_is_whitespace = True
# Split on whitespace so that different tokens may be attributed to their original position.
for c in self.context_text:
if _is_whitespace(c):
prev_is_whitespace = True
else:
if prev_is_whitespace:
doc_tokens.append(c)
else:
doc_tokens[-1] += c
prev_is_whitespace = False
char_to_word_offset.append(len(doc_tokens) - 1)
self.doc_tokens = doc_tokens
self.char_to_word_offset = char_to_word_offset
# Start and end positions only has a value during evaluation.
if start_position_character is not None and not is_impossible:
self.start_position = char_to_word_offset[start_position_character]
self.end_position = char_to_word_offset[
min(start_position_character + len(answer_text) - 1, len(char_to_word_offset) - 1)
]
class SquadFeatures(object):
"""
Single squad example features to be fed to a model.
Those features are model-specific and can be crafted from :class:`~transformers.data.processors.squad.SquadExample`
using the :method:`~transformers.data.processors.squad.squad_convert_examples_to_features` method.
Args:
input_ids: Indices of input sequence tokens in the vocabulary.
attention_mask: Mask to avoid performing attention on padding token indices.
token_type_ids: Segment token indices to indicate first and second portions of the inputs.
cls_index: the index of the CLS token.
p_mask: Mask identifying tokens that can be answers vs. tokens that cannot.
Mask with 1 for tokens than cannot be in the answer and 0 for token that can be in an answer
example_index: the index of the example
unique_id: The unique Feature identifier
paragraph_len: The length of the context
token_is_max_context: List of booleans identifying which tokens have their maximum context in this feature object.
If a token does not have their maximum context in this feature object, it means that another feature object
has more information related to that token and should be prioritized over this feature for that token.
tokens: list of tokens corresponding to the input ids
token_to_orig_map: mapping between the tokens and the original text, needed in order to identify the answer.
start_position: start of the answer token index
end_position: end of the answer token index
"""
def __init__(
self,
input_ids,
attention_mask,
token_type_ids,
cls_index,
p_mask,
example_index,
unique_id,
paragraph_len,
token_is_max_context,
tokens,
token_to_orig_map,
start_position,
end_position,
is_impossible,
):
self.input_ids = input_ids
self.attention_mask = attention_mask
self.token_type_ids = token_type_ids
self.cls_index = cls_index
self.p_mask = p_mask
self.example_index = example_index
self.unique_id = unique_id
self.paragraph_len = paragraph_len
self.token_is_max_context = token_is_max_context
self.tokens = tokens
self.token_to_orig_map = token_to_orig_map
self.start_position = start_position
self.end_position = end_position
self.is_impossible = is_impossible
class SquadResult(object):
"""
Constructs a SquadResult which can be used to evaluate a model's output on the SQuAD dataset.
Args:
unique_id: The unique identifier corresponding to that example.
start_logits: The logits corresponding to the start of the answer
end_logits: The logits corresponding to the end of the answer
"""
def __init__(self, unique_id, start_logits, end_logits, start_top_index=None, end_top_index=None, cls_logits=None):
self.start_logits = start_logits
self.end_logits = end_logits
self.unique_id = unique_id
if start_top_index:
self.start_top_index = start_top_index
self.end_top_index = end_top_index
self.cls_logits = cls_logits
| 28,192 | 38.211405 | 125 | py |
KoSentenceBERT-SKT | KoSentenceBERT-SKT-main/transformers/data/processors/utils.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import csv
import dataclasses
import json
import logging
from dataclasses import dataclass
from typing import Optional
from ...file_utils import is_tf_available, is_torch_available
logger = logging.getLogger(__name__)
@dataclass(frozen=False)
class InputExample:
"""
A single training/test example for simple sequence classification.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
guid: str
text_a: str
text_b: Optional[str] = None
label: Optional[str] = None
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(dataclasses.asdict(self), indent=2, sort_keys=True) + "\n"
class InputFeatures(object):
"""
A single set of features of data.
Args:
input_ids: Indices of input sequence tokens in the vocabulary.
attention_mask: Mask to avoid performing attention on padding token indices.
Mask values selected in ``[0, 1]``:
Usually ``1`` for tokens that are NOT MASKED, ``0`` for MASKED (padded) tokens.
token_type_ids: Segment token indices to indicate first and second portions of the inputs.
label: Label corresponding to the input
"""
def __init__(self, input_ids, attention_mask=None, token_type_ids=None, label=None):
self.input_ids = input_ids
self.attention_mask = attention_mask
self.token_type_ids = token_type_ids
self.label = label
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_example_from_tensor_dict(self, tensor_dict):
"""Gets an example from a dict with tensorflow tensors
Args:
tensor_dict: Keys and values should match the corresponding Glue
tensorflow_dataset examples.
"""
raise NotImplementedError()
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
def tfds_map(self, example):
"""Some tensorflow_datasets datasets are not formatted the same way the GLUE datasets are.
This method converts examples to the correct format."""
if len(self.get_labels()) > 1:
example.label = self.get_labels()[int(example.label)]
return example
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with open(input_file, "r", encoding="utf-8-sig") as f:
return list(csv.reader(f, delimiter="\t", quotechar=quotechar))
class SingleSentenceClassificationProcessor(DataProcessor):
""" Generic processor for a single sentence classification data set."""
def __init__(self, labels=None, examples=None, mode="classification", verbose=False):
self.labels = [] if labels is None else labels
self.examples = [] if examples is None else examples
self.mode = mode
self.verbose = verbose
def __len__(self):
return len(self.examples)
def __getitem__(self, idx):
if isinstance(idx, slice):
return SingleSentenceClassificationProcessor(labels=self.labels, examples=self.examples[idx])
return self.examples[idx]
@classmethod
def create_from_csv(
cls, file_name, split_name="", column_label=0, column_text=1, column_id=None, skip_first_row=False, **kwargs
):
processor = cls(**kwargs)
processor.add_examples_from_csv(
file_name,
split_name=split_name,
column_label=column_label,
column_text=column_text,
column_id=column_id,
skip_first_row=skip_first_row,
overwrite_labels=True,
overwrite_examples=True,
)
return processor
@classmethod
def create_from_examples(cls, texts_or_text_and_labels, labels=None, **kwargs):
processor = cls(**kwargs)
processor.add_examples(texts_or_text_and_labels, labels=labels)
return processor
def add_examples_from_csv(
self,
file_name,
split_name="",
column_label=0,
column_text=1,
column_id=None,
skip_first_row=False,
overwrite_labels=False,
overwrite_examples=False,
):
lines = self._read_tsv(file_name)
if skip_first_row:
lines = lines[1:]
texts = []
labels = []
ids = []
for (i, line) in enumerate(lines):
texts.append(line[column_text])
labels.append(line[column_label])
if column_id is not None:
ids.append(line[column_id])
else:
guid = "%s-%s" % (split_name, i) if split_name else "%s" % i
ids.append(guid)
return self.add_examples(
texts, labels, ids, overwrite_labels=overwrite_labels, overwrite_examples=overwrite_examples
)
def add_examples(
self, texts_or_text_and_labels, labels=None, ids=None, overwrite_labels=False, overwrite_examples=False
):
assert labels is None or len(texts_or_text_and_labels) == len(labels)
assert ids is None or len(texts_or_text_and_labels) == len(ids)
if ids is None:
ids = [None] * len(texts_or_text_and_labels)
if labels is None:
labels = [None] * len(texts_or_text_and_labels)
examples = []
added_labels = set()
for (text_or_text_and_label, label, guid) in zip(texts_or_text_and_labels, labels, ids):
if isinstance(text_or_text_and_label, (tuple, list)) and label is None:
text, label = text_or_text_and_label
else:
text = text_or_text_and_label
added_labels.add(label)
examples.append(InputExample(guid=guid, text_a=text, text_b=None, label=label))
# Update examples
if overwrite_examples:
self.examples = examples
else:
self.examples.extend(examples)
# Update labels
if overwrite_labels:
self.labels = list(added_labels)
else:
self.labels = list(set(self.labels).union(added_labels))
return self.examples
def get_features(
self,
tokenizer,
max_length=None,
pad_on_left=False,
pad_token=0,
mask_padding_with_zero=True,
return_tensors=None,
):
"""
Convert examples in a list of ``InputFeatures``
Args:
tokenizer: Instance of a tokenizer that will tokenize the examples
max_length: Maximum example length
task: GLUE task
label_list: List of labels. Can be obtained from the processor using the ``processor.get_labels()`` method
output_mode: String indicating the output mode. Either ``regression`` or ``classification``
pad_on_left: If set to ``True``, the examples will be padded on the left rather than on the right (default)
pad_token: Padding token
mask_padding_with_zero: If set to ``True``, the attention mask will be filled by ``1`` for actual values
and by ``0`` for padded values. If set to ``False``, inverts it (``1`` for padded values, ``0`` for
actual values)
Returns:
If the ``examples`` input is a ``tf.data.Dataset``, will return a ``tf.data.Dataset``
containing the task-specific features. If the input is a list of ``InputExamples``, will return
a list of task-specific ``InputFeatures`` which can be fed to the model.
"""
if max_length is None:
max_length = tokenizer.max_len
label_map = {label: i for i, label in enumerate(self.labels)}
all_input_ids = []
for (ex_index, example) in enumerate(self.examples):
if ex_index % 10000 == 0:
logger.info("Tokenizing example %d", ex_index)
input_ids = tokenizer.encode(
example.text_a, add_special_tokens=True, max_length=min(max_length, tokenizer.max_len),
)
all_input_ids.append(input_ids)
batch_length = max(len(input_ids) for input_ids in all_input_ids)
features = []
for (ex_index, (input_ids, example)) in enumerate(zip(all_input_ids, self.examples)):
if ex_index % 10000 == 0:
logger.info("Writing example %d/%d" % (ex_index, len(self.examples)))
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
attention_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
# Zero-pad up to the sequence length.
padding_length = batch_length - len(input_ids)
if pad_on_left:
input_ids = ([pad_token] * padding_length) + input_ids
attention_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + attention_mask
else:
input_ids = input_ids + ([pad_token] * padding_length)
attention_mask = attention_mask + ([0 if mask_padding_with_zero else 1] * padding_length)
assert len(input_ids) == batch_length, "Error with input length {} vs {}".format(
len(input_ids), batch_length
)
assert len(attention_mask) == batch_length, "Error with input length {} vs {}".format(
len(attention_mask), batch_length
)
if self.mode == "classification":
label = label_map[example.label]
elif self.mode == "regression":
label = float(example.label)
else:
raise ValueError(self.mode)
if ex_index < 5 and self.verbose:
logger.info("*** Example ***")
logger.info("guid: %s" % (example.guid))
logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logger.info("attention_mask: %s" % " ".join([str(x) for x in attention_mask]))
logger.info("label: %s (id = %d)" % (example.label, label))
features.append(InputFeatures(input_ids=input_ids, attention_mask=attention_mask, label=label))
if return_tensors is None:
return features
elif return_tensors == "tf":
if not is_tf_available():
raise RuntimeError("return_tensors set to 'tf' but TensorFlow 2.0 can't be imported")
import tensorflow as tf
def gen():
for ex in features:
yield ({"input_ids": ex.input_ids, "attention_mask": ex.attention_mask}, ex.label)
dataset = tf.data.Dataset.from_generator(
gen,
({"input_ids": tf.int32, "attention_mask": tf.int32}, tf.int64),
({"input_ids": tf.TensorShape([None]), "attention_mask": tf.TensorShape([None])}, tf.TensorShape([])),
)
return dataset
elif return_tensors == "pt":
if not is_torch_available():
raise RuntimeError("return_tensors set to 'pt' but PyTorch can't be imported")
import torch
from torch.utils.data import TensorDataset
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long)
if self.mode == "classification":
all_labels = torch.tensor([f.label for f in features], dtype=torch.long)
elif self.mode == "regression":
all_labels = torch.tensor([f.label for f in features], dtype=torch.float)
dataset = TensorDataset(all_input_ids, all_attention_mask, all_labels)
return dataset
else:
raise ValueError("return_tensors should be one of 'tf' or 'pt'")
| 13,817 | 38.593123 | 119 | py |
KoSentenceBERT-SKT | KoSentenceBERT-SKT-main/sentence_transformers/SentenceTransformer.py | import json
import logging
import os
import shutil
from collections import OrderedDict
from typing import List, Dict, Tuple, Iterable, Type, Union, Callable
from zipfile import ZipFile
import requests
import numpy as np
import transformers
import torch
from numpy import ndarray
from torch import nn, Tensor, device
from torch.optim import Optimizer
from torch.utils.data import DataLoader
from tqdm.autonotebook import tqdm, trange
import torch.multiprocessing as mp
import math
import queue
from . import __DOWNLOAD_SERVER__
from .evaluation import SentenceEvaluator
from .util import import_from_string, batch_to_device, http_get
from .datasets.EncodeDataset import EncodeDataset
from .models import Transformer, Pooling
from . import __version__
class SentenceTransformer(nn.Sequential):
"""
Loads or create a SentenceTransformer model, that can be used to map sentences / text to embeddings.
:param model_name_or_path: If it is a filepath on disc, it loads the model from that path. If it is not a path, it first tries to download a pre-trained SentenceTransformer model. If that fails, tries to construct a model from Huggingface models repository with that name.
:param modules: This parameter can be used to create custom SentenceTransformer models from scratch.
:param device: Device (like 'cuda' / 'cpu') that should be used for computation. If None, checks if a GPU can be used.
"""
def __init__(self, model_name_or_path: str = None, modules: Iterable[nn.Module] = None, device: str = None):
if model_name_or_path is not None and model_name_or_path != "":
logging.info("Load pretrained SentenceTransformer: {}".format(model_name_or_path))
model_path = model_name_or_path
if not os.path.isdir(model_path) and not model_path.startswith('http://') and not model_path.startswith('https://'):
logging.info("Did not find folder {}. Assume to download model from server.".format(model_path))
model_path = __DOWNLOAD_SERVER__ + model_path + '.zip'
if model_path.startswith('http://') or model_path.startswith('https://'):
model_url = model_path
folder_name = model_url.replace("https://", "").replace("http://", "").replace("/", "_")[:250].rstrip('.zip')
try:
from torch.hub import _get_torch_home
torch_cache_home = _get_torch_home()
except ImportError:
torch_cache_home = os.path.expanduser(
os.getenv('TORCH_HOME', os.path.join(
os.getenv('XDG_CACHE_HOME', '~/.cache'), 'torch')))
default_cache_path = os.path.join(torch_cache_home, 'sentence_transformers')
model_path = os.path.join(default_cache_path, folder_name)
os.makedirs(model_path, exist_ok=True)
if not os.listdir(model_path):
if model_url[-1] == "/":
model_url = model_url[:-1]
logging.info("Downloading sentence transformer model from {} and saving it at {}".format(model_url, model_path))
try:
zip_save_path = os.path.join(model_path, 'model.zip')
http_get(model_url, zip_save_path)
with ZipFile(zip_save_path, 'r') as zip:
zip.extractall(model_path)
os.remove(zip_save_path)
except requests.exceptions.HTTPError as e:
shutil.rmtree(model_path)
if e.response.status_code == 404:
logging.warning('SentenceTransformer-Model {} not found. Try to create it from scratch'.format(model_url))
logging.warning('Try to create Transformer Model {} with mean pooling'.format(model_name_or_path))
model_path = None
transformer_model = Transformer(model_name_or_path)
pooling_model = Pooling(transformer_model.get_word_embedding_dimension())
modules = [transformer_model, pooling_model]
else:
raise e
except Exception as e:
shutil.rmtree(model_path)
raise e
#### Load from disk
if model_path is not None:
logging.info("Load SentenceTransformer from folder: {}".format(model_path))
if os.path.exists(os.path.join(model_path, 'config.json')):
with open(os.path.join(model_path, 'config.json')) as fIn:
config = json.load(fIn)
if config['__version__'] > __version__:
logging.warning("You try to use a model that was created with version {}, however, your version is {}. This might cause unexpected behavior or errors. In that case, try to update to the latest version.\n\n\n".format(config['__version__'], __version__))
with open(os.path.join(model_path, 'modules.json')) as fIn:
contained_modules = json.load(fIn)
modules = OrderedDict()
for module_config in contained_modules:
module_class = import_from_string(module_config['type'])
module = module_class.load(os.path.join(model_path, module_config['path']))
modules[module_config['name']] = module
if modules is not None and not isinstance(modules, OrderedDict):
modules = OrderedDict([(str(idx), module) for idx, module in enumerate(modules)])
super().__init__(modules)
if device is None:
device = "cuda" if torch.cuda.is_available() else "cpu"
logging.info("Use pytorch device: {}".format(device))
self._target_device = torch.device(device)
def encode(self, sentences: Union[str, List[str], List[int]],
batch_size: int = 32,
show_progress_bar: bool = None,
output_value: str = 'sentence_embedding',
convert_to_numpy: bool = True,
convert_to_tensor: bool = False,
is_pretokenized: bool = False,
device: str = None,
num_workers: int = 0) -> Union[List[Tensor], ndarray, Tensor]:
"""
Computes sentence embeddings
:param sentences: the sentences to embed
:param batch_size: the batch size used for the computation
:param show_progress_bar: Output a progress bar when encode sentences
:param output_value: Default sentence_embedding, to get sentence embeddings. Can be set to token_embeddings to get wordpiece token embeddings.
:param convert_to_numpy: If true, the output is a list of numpy vectors. Else, it is a list of pytorch tensors.
:param convert_to_tensor: If true, you get one large tensor as return. Overwrites any setting from conver_to_numpy
:param is_pretokenized: If is_pretokenized=True, sentences must be a list of integers, containing the tokenized sentences with each token convert to the respective int.
:param device: Which torch.device to use for the computation
:param num_workers: Number of background-workers to tokenize data. Set to positive number to increase tokenization speed
:return:
By default, a list of tensors is returned. If convert_to_tensor, a stacked tensor is returned. If convert_to_numpy, a numpy matrix is returned.
"""
self.eval()
if show_progress_bar is None:
show_progress_bar = (logging.getLogger().getEffectiveLevel()==logging.INFO or logging.getLogger().getEffectiveLevel()==logging.DEBUG)
input_was_string = False
if isinstance(sentences, str): #Cast an individual sentence to a list with length 1
sentences = [sentences]
input_was_string = True
if device is None:
device = self._target_device
self.to(device)
all_embeddings = []
length_sorted_idx = np.argsort([len(sen) for sen in sentences])
sentences_sorted = [sentences[idx] for idx in length_sorted_idx]
inp_dataset = EncodeDataset(sentences_sorted, model=self, is_tokenized=is_pretokenized)
inp_dataloader = DataLoader(inp_dataset, batch_size=batch_size, collate_fn=self.smart_batching_collate_text_only, num_workers=num_workers, shuffle=False)
iterator = inp_dataloader
if show_progress_bar:
iterator = tqdm(inp_dataloader, desc="Batches")
for features in iterator:
for feature_name in features:
features[feature_name] = features[feature_name].to(device)
with torch.no_grad():
out_features = self.forward(features)
embeddings = out_features[output_value]
if output_value == 'token_embeddings':
#Set token embeddings to 0 for padding tokens
input_mask = out_features['attention_mask']
input_mask_expanded = input_mask.unsqueeze(-1).expand(embeddings.size()).float()
embeddings = embeddings * input_mask_expanded
all_embeddings.extend(embeddings)
all_embeddings = [all_embeddings[idx] for idx in np.argsort(length_sorted_idx)]
if convert_to_tensor:
all_embeddings = torch.stack(all_embeddings)
elif convert_to_numpy:
all_embeddings = np.asarray([emb.cpu().detach().numpy() for emb in all_embeddings])
if input_was_string:
all_embeddings = all_embeddings[0]
return all_embeddings
def start_multi_process_pool(self, target_devices: List[str] = None, encode_batch_size: int = 32):
"""
Starts multi process to process the encode with several, independent process.
This methos is recommend if you want to encode on multiple GPUs. It is advised
to start only one process per GPU. This method works together with encode_multi_process
:param target_devices: PyTorch target devices, e.g. cuda:0, cuda:1... If None, all available CUDA devices will be used
:param encode_batch_size: Batch size for each process when calling encode
:return: Returns a dict with the target processes, an input queue and and output queue.
"""
if target_devices is None:
if torch.cuda.is_available():
target_devices = ['cuda:{}'.format(i) for i in range(torch.cuda.device_count())]
else:
logging.info("CUDA is not available. Start 4 CPU worker")
target_devices = ['cpu']*4
logging.info("Start multi-process pool on devices: {}".format(', '.join(map(str, target_devices))))
ctx = mp.get_context('spawn')
input_queue = ctx.Queue()
output_queue = ctx.Queue()
processes = []
for cuda_id in target_devices:
p = ctx.Process(target=SentenceTransformer._encode_multi_process_worker, args=(cuda_id, self, input_queue, output_queue, encode_batch_size), daemon=True)
p.start()
processes.append(p)
return {'input': input_queue, 'output': output_queue, 'processes': processes}
@staticmethod
def stop_multi_process_pool(pool):
"""
Stops all processes started with start_multi_process_pool
"""
for p in pool['processes']:
p.terminate()
for p in pool['processes']:
p.join()
p.close()
pool['input'].close()
pool['output'].close()
def encode_multi_process(self, sentences: List[str], pool: Dict[str, object], is_pretokenized: bool = False):
"""
This method allows to run encode() on multiple GPUs. The sentences are chunked into smaller packages
and sent to individual processes, which encode these on the different GPUs. This method is only suitable
for encoding large sets of sentences
:param sentences: List of sentences
:param pool: A pool of workers started with SentenceTransformer.start_multi_process_pool
:param is_pretokenized: If true, no tokenization will be applied. It is expected that the input sentences are list of ints.
:return: Numpy matrix with all embeddings
"""
chunk_size = min(math.ceil(len(sentences) / len(pool["processes"]) / 10), 5000)
logging.info("Chunk data into packages of size {}".format(chunk_size))
if is_pretokenized:
sentences_tokenized = sentences
else:
sentences_tokenized = map(self.tokenize, sentences)
input_queue = pool['input']
num_chunks = 0
chunk = []
for sentence in sentences_tokenized:
chunk.append(sentence)
if len(chunk) >= chunk_size:
input_queue.put([num_chunks, chunk])
num_chunks += 1
chunk = []
if len(chunk) > 0:
input_queue.put([num_chunks, chunk])
num_chunks += 1
output_queue = pool['output']
results_list = sorted([output_queue.get() for _ in range(num_chunks)], key=lambda x: x[0])
embeddings = np.concatenate([result[1] for result in results_list])
return embeddings
@staticmethod
def _encode_multi_process_worker(target_device: str, model, input_queue, results_queue, encode_batch_size):
"""
Internal working process to encode sentences in multi-process setup
"""
while True:
try:
id, sentences = input_queue.get()
embeddings = model.encode(sentences, device=target_device, is_pretokenized=True, show_progress_bar=False, convert_to_numpy=True, batch_size=encode_batch_size)
results_queue.put([id, embeddings])
except queue.Empty:
break
def get_max_seq_length(self):
"""
Returns the maximal sequence length for input the model accepts. Longer inputs will be truncated
"""
if hasattr(self._first_module(), 'max_seq_length'):
return self._first_module().max_seq_length
return None
def tokenize(self, text: str):
"""
Tokenizes the text
"""
return self._first_module().tokenize(text)
def get_sentence_features(self, *features):
return self._first_module().get_sentence_features(*features)
def get_sentence_embedding_dimension(self):
return self._last_module().get_sentence_embedding_dimension()
def _first_module(self):
"""Returns the first module of this sequential embedder"""
return self._modules[next(iter(self._modules))]
def _last_module(self):
"""Returns the last module of this sequential embedder"""
return self._modules[next(reversed(self._modules))]
def save(self, path):
"""
Saves all elements for this seq. sentence embedder into different sub-folders
"""
if path is None:
return
logging.info("Save model to {}".format(path))
contained_modules = []
for idx, name in enumerate(self._modules):
module = self._modules[name]
model_path = os.path.join(path, str(idx)+"_"+type(module).__name__)
os.makedirs(model_path, exist_ok=True)
module.save(model_path)
contained_modules.append({'idx': idx, 'name': name, 'path': os.path.basename(model_path), 'type': type(module).__module__})
with open(os.path.join(path, 'modules.json'), 'w') as fOut:
json.dump(contained_modules, fOut, indent=2)
with open(os.path.join(path, 'config.json'), 'w') as fOut:
json.dump({'__version__': __version__}, fOut, indent=2)
def smart_batching_collate(self, batch):
"""
Transforms a batch from a SmartBatchingDataset to a batch of tensors for the model
:param batch:
a batch from a SmartBatchingDataset
:return:
a batch of tensors for the model
"""
num_texts = len(batch[0][0])
labels = []
paired_texts = [[] for _ in range(num_texts)]
max_seq_len = [0] * num_texts
for tokens, label in batch:
labels.append(label)
for i in range(num_texts):
paired_texts[i].append(tokens[i])
max_seq_len[i] = max(max_seq_len[i], len(tokens[i]))
features = []
for idx in range(num_texts):
max_len = max_seq_len[idx]
feature_lists = {}
for text in paired_texts[idx]:
sentence_features = self.get_sentence_features(text, max_len)
for feature_name in sentence_features:
if feature_name not in feature_lists:
feature_lists[feature_name] = []
feature_lists[feature_name].append(sentence_features[feature_name])
import copy
max_ = 0
for feature_name in feature_lists:
#feature_lists[feature_name] = torch.tensor(np.asarray(feature_lists[feature_name]))
for i in range(len(feature_lists[feature_name])):
if max_ < feature_lists[feature_name][i].size(-1):
max_ = feature_lists[feature_name][i].size(-1)
new_data = torch.ones(len(feature_lists[feature_name]), max_, dtype=torch.int64)
for i in range(len(feature_lists[feature_name])):
epo = max_ - feature_lists[feature_name][i].size(-1)
copy_feature = copy.deepcopy(feature_lists[feature_name][i].squeeze(0))
for j in range(epo):
copy_feature = torch.cat([copy_feature, torch.tensor([0])], dim=-1)
new_data[i] = copy_feature
feature_lists[feature_name] = new_data
#feature_lists[feature_name] = torch.cat(feature_lists[feature_name])
features.append(feature_lists)
return {'features': features, 'labels': torch.stack(labels)}
def smart_batching_collate_text_only(self, batch):
"""
Transforms a batch from a SmartBatchingDataset to a batch of tensors for the model
:param batch:
a batch from a SmartBatchingDataset
:return:
a batch of tensors for the model
"""
max_seq_len = max([len(text) for text in batch])
feature_lists = {}
for text in batch:
sentence_features = self.get_sentence_features(text, max_seq_len)
for feature_name in sentence_features:
if feature_name not in feature_lists:
feature_lists[feature_name] = []
feature_lists[feature_name].append(sentence_features[feature_name])
import copy
max_ = 0
for feature_name in feature_lists:
for i in range(len(feature_lists[feature_name])):
if max_ < feature_lists[feature_name][i].size(-1):
max_ = feature_lists[feature_name][i].size(-1)
new_data = torch.ones(len(feature_lists[feature_name]), max_, dtype=torch.int64)
for i in range(len(feature_lists[feature_name])):
epo = max_ - feature_lists[feature_name][i].size(-1)
copy_feature = copy.deepcopy(feature_lists[feature_name][i].squeeze(0))
for j in range(epo):
copy_feature = torch.cat([copy_feature, torch.tensor([0])], dim=-1)
new_data[i] = copy_feature
feature_lists[feature_name] = new_data
return feature_lists
def fit(self,
train_objectives: Iterable[Tuple[DataLoader, nn.Module]],
evaluator: SentenceEvaluator,
epochs: int = 1,
steps_per_epoch = None,
scheduler: str = 'WarmupLinear',
warmup_steps: int = 10000,
optimizer_class: Type[Optimizer] = transformers.AdamW,
optimizer_params : Dict[str, object]= {'lr': 2e-5, 'eps': 1e-6, 'correct_bias': False},
weight_decay: float = 0.01,
evaluation_steps: int = 0,
output_path: str = None,
output_path_ignore_not_empty: bool = False,
save_best_model: bool = True,
max_grad_norm: float = 1,
use_amp: bool = False,
callback: Callable[[float, int, int], None] = None,
):
"""
Train the model with the given training objective
Each training objective is sampled in turn for one batch.
We sample only as many batches from each objective as there are in the smallest one
to make sure of equal training with each dataset.
:param train_objectives: Tuples of (DataLoader, LossFunction). Pass more than one for multi-task learning
:param evaluator: An evaluator (sentence_transformers.evaluation) evaluates the model performance during training on held-out dev data. It is used to determine the best model that is saved to disc.
:param epochs: Number of epochs for training
:param steps_per_epoch: Number of training steps per epoch. If set to None (default), one epoch is equal the DataLoader size from train_objectives.
:param scheduler: Learning rate scheduler. Available schedulers: constantlr, warmupconstant, warmuplinear, warmupcosine, warmupcosinewithhardrestarts
:param warmup_steps: Behavior depends on the scheduler. For WarmupLinear (default), the learning rate is increased from o up to the maximal learning rate. After these many training steps, the learning rate is decreased linearly back to zero.
:param optimizer_class: Optimizer
:param optimizer_params: Optimizer parameters
:param weight_decay: Weight decay for model parameters
:param evaluation_steps: If > 0, evaluate the model using evaluator after each number of training steps
:param output_path: Storage path for the model and evaluation files
:param output_path_ignore_not_empty: By default, training will stop if output_path is not empty. If set to true, this error will be ignored and training proceeds.
:param save_best_model: If true, the best model (according to evaluator) is stored at output_path
:param max_grad_norm: Used for gradient normalization.
:param use_amp: Use Automatic Mixed Precision (AMP). Only for Pytorch >= 1.6.0
:param callback: Callback function that is invoked after each evaluation.
It must accept the following three parameters in this order:
`score`, `epoch`, `steps`
"""
if use_amp:
from torch.cuda.amp import autocast
scaler = torch.cuda.amp.GradScaler()
self.to(self._target_device)
if output_path is not None:
os.makedirs(output_path, exist_ok=True)
if not output_path_ignore_not_empty and len(os.listdir(output_path)) > 0:
raise ValueError("Output directory ({}) already exists and is not empty.".format(
output_path))
dataloaders = [dataloader for dataloader, _ in train_objectives]
# Use smart batching
for dataloader in dataloaders:
dataloader.collate_fn = self.smart_batching_collate
loss_models = [loss for _, loss in train_objectives]
device = self._target_device
for loss_model in loss_models:
loss_model.to(device)
self.best_score = -9999999
if steps_per_epoch is None or steps_per_epoch == 0:
steps_per_epoch = min([len(dataloader) for dataloader in dataloaders])
num_train_steps = int(steps_per_epoch * epochs)
# Prepare optimizers
optimizers = []
schedulers = []
for loss_model in loss_models:
param_optimizer = list(loss_model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': weight_decay},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = optimizer_class(optimizer_grouped_parameters, **optimizer_params)
scheduler_obj = self._get_scheduler(optimizer, scheduler=scheduler, warmup_steps=warmup_steps, t_total=num_train_steps)
optimizers.append(optimizer)
schedulers.append(scheduler_obj)
global_step = 0
data_iterators = [iter(dataloader) for dataloader in dataloaders]
num_train_objectives = len(train_objectives)
skip_scheduler = False
for epoch in trange(epochs, desc="Epoch"):
training_steps = 0
for loss_model in loss_models:
loss_model.zero_grad()
loss_model.train()
for _ in trange(steps_per_epoch, desc="Iteration", smoothing=0.05):
for train_idx in range(num_train_objectives):
loss_model = loss_models[train_idx]
optimizer = optimizers[train_idx]
scheduler = schedulers[train_idx]
data_iterator = data_iterators[train_idx]
try:
data = next(data_iterator)
except StopIteration:
#logging.info("Restart data_iterator")
data_iterator = iter(dataloaders[train_idx])
data_iterators[train_idx] = data_iterator
data = next(data_iterator)
features, labels = batch_to_device(data, self._target_device)
if use_amp:
with autocast():
loss_value = loss_model(features, labels)
scale_before_step = scaler.get_scale()
scaler.scale(loss_value).backward()
scaler.unscale_(optimizer)
torch.nn.utils.clip_grad_norm_(loss_model.parameters(), max_grad_norm)
scaler.step(optimizer)
scaler.update()
skip_scheduler = scaler.get_scale() != scale_before_step
else:
try:
loss_value = loss_model(features, labels)
loss_value.backward()
torch.nn.utils.clip_grad_norm_(loss_model.parameters(), max_grad_norm)
optimizer.step()
except RuntimeError:
pass
optimizer.zero_grad()
if not skip_scheduler:
scheduler.step()
training_steps += 1
global_step += 1
if evaluation_steps > 0 and training_steps % evaluation_steps == 0:
self._eval_during_training(evaluator, output_path, save_best_model, epoch,
training_steps, callback)
for loss_model in loss_models:
loss_model.zero_grad()
loss_model.train()
self._eval_during_training(evaluator, output_path, save_best_model, epoch,
-1, callback)
def evaluate(self, evaluator: SentenceEvaluator, output_path: str = None):
"""
Evaluate the model
:param evaluator:
the evaluator
:param output_path:
the evaluator can write the results to this path
"""
if output_path is not None:
os.makedirs(output_path, exist_ok=True)
return evaluator(self, output_path)
def _eval_during_training(self, evaluator, output_path, save_best_model, epoch, steps, callback):
"""Runs evaluation during the training"""
if evaluator is not None:
score = evaluator(self, output_path=output_path, epoch=epoch, steps=steps)
if callback is not None:
callback(score, epoch, steps)
if score > self.best_score and save_best_model:
self.save(output_path)
self.best_score = score
def _get_scheduler(self, optimizer, scheduler: str, warmup_steps: int, t_total: int):
"""
Returns the correct learning rate scheduler. Available scheduler: constantlr, warmupconstant, warmuplinear, warmupcosine, warmupcosinewithhardrestarts
"""
scheduler = scheduler.lower()
if scheduler == 'constantlr':
return transformers.get_constant_schedule(optimizer)
elif scheduler == 'warmupconstant':
return transformers.get_constant_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps)
elif scheduler == 'warmuplinear':
return transformers.get_linear_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps, num_training_steps=t_total)
elif scheduler == 'warmupcosine':
return transformers.get_cosine_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps, num_training_steps=t_total)
elif scheduler == 'warmupcosinewithhardrestarts':
return transformers.get_cosine_with_hard_restarts_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps, num_training_steps=t_total)
else:
raise ValueError("Unknown scheduler {}".format(scheduler))
@property
def device(self) -> device:
"""
Get torch.device from module, assuming that the whole module has one device.
"""
try:
return next(self.parameters()).device
except StopIteration:
# For nn.DataParallel compatibility in PyTorch 1.5
def find_tensor_attributes(module: nn.Module) -> List[Tuple[str, Tensor]]:
tuples = [(k, v) for k, v in module.__dict__.items() if torch.is_tensor(v)]
return tuples
gen = self._named_members(get_members_fn=find_tensor_attributes)
first_tuple = next(gen)
return first_tuple[1].device
@property
def tokenizer(self):
"""
Property to get the tokenizer that is used by this model
"""
return self._first_module().tokenizer
@tokenizer.setter
def tokenizer(self, value):
"""
Property to set the tokenizer that is should used by this model
"""
self._first_module().tokenizer = value
@property
def max_seq_length(self):
"""
Property to get the maximal input sequence length for the model. Longer inputs will be truncated.
"""
return self._first_module().max_seq_length
@max_seq_length.setter
def max_seq_length(self, value):
"""
Property to set the maximal input sequence length for the model. Longer inputs will be truncated.
"""
self._first_module().max_seq_length = value
| 31,733 | 43.632911 | 280 | py |
KoSentenceBERT-SKT | KoSentenceBERT-SKT-main/sentence_transformers/util.py | import requests
from torch import Tensor, device
from typing import Tuple, List
from tqdm import tqdm
import sys
import importlib
import os
import torch
import numpy as np
import queue
import logging
def pytorch_cos_sim(a: Tensor, b: Tensor):
"""
Computes the cosine similarity cos_sim(a[i], b[j]) for all i and j.
This function can be used as a faster replacement for 1-scipy.spatial.distance.cdist(a,b)
:return: Matrix with res[i][j] = cos_sim(a[i], b[j])
"""
if not isinstance(a, torch.Tensor):
a = torch.tensor(a)
if not isinstance(b, torch.Tensor):
b = torch.tensor(b)
if len(a.shape) == 1:
a = a.unsqueeze(0)
if len(b.shape) == 1:
b = b.unsqueeze(0)
a_norm = a / a.norm(dim=1)[:, None]
b_norm = b / b.norm(dim=1)[:, None]
return torch.mm(a_norm, b_norm.transpose(0, 1))
def paraphrase_mining(model,
sentences: List[str],
show_progress_bar=False,
batch_size=32,
query_chunk_size: int = 5000,
corpus_chunk_size: int = 100000,
max_pairs: int = 500000,
top_k: int = 100):
"""
Given a list of sentences / texts, this function performs paraphrase mining. It compares all sentences against all
other sentences and returns a list with the pairs that have the highest cosine similarity score.
:param model: SentenceTransformer model for embedding computation
:param sentences: A list of strings (texts or sentences)
:param show_progress_bar: Plotting of a progress bar
:param batch_size: Number of texts that are encoded simultaneously by the model
:param query_chunk_size: Search for most similar pairs for #query_chunk_size at the same time. Decrease, to lower memory footprint (increases run-time).
:param corpus_chunk_size: Compare a sentence simultaneously against #corpus_chunk_size other sentences. Decrease, to lower memory footprint (increases run-time).
:param max_pairs: Maximal number of text pairs returned.
:param top_k: For each sentence, we retrieve up to top_k other sentences
:return: Returns a list of triplets with the format [score, id1, id2]
"""
top_k += 1 #A sentence has the highest similarity to itself. Increase +1 as we are interest in distinct pairs
# Compute embedding for the sentences
embeddings = model.encode(sentences, show_progress_bar=show_progress_bar, batch_size=batch_size,
convert_to_tensor=True)
# Mine for duplicates
pairs = queue.PriorityQueue()
min_score = -1
num_added = 0
for corpus_start_idx in range(0, len(embeddings), corpus_chunk_size):
corpus_end_idx = min(corpus_start_idx + corpus_chunk_size, len(embeddings))
for query_start_idx in range(0, len(embeddings), query_chunk_size):
query_end_idx = min(query_start_idx + query_chunk_size, len(embeddings))
#logging.info("Compute cosine similarities")
cos_scores = pytorch_cos_sim(embeddings[query_start_idx:query_end_idx],
embeddings[corpus_start_idx:corpus_end_idx]).cpu()
cos_scores_top_k_values, cos_scores_top_k_idx = torch.topk(cos_scores, min(top_k, len(cos_scores[0])-1), dim=1, largest=True, sorted=False)
cos_scores_top_k_values = cos_scores_top_k_values.tolist()
cos_scores_top_k_idx = cos_scores_top_k_idx.tolist()
#logging.info("Find most similar pairs out of {} queries".format(len(cos_scores)))
for query_itr in range(len(cos_scores)):
for top_k_idx, corpus_itr in enumerate(cos_scores_top_k_idx[query_itr]):
i = query_start_idx + query_itr
j = corpus_start_idx + corpus_itr
if i != j and cos_scores_top_k_values[query_itr][top_k_idx] > min_score:
pairs.put((cos_scores_top_k_values[query_itr][top_k_idx], i, j))
num_added += 1
if num_added >= max_pairs:
entry = pairs.get()
min_score = entry[0]
# Get the pairs
added_pairs = set() # Used for duplicate detection
pairs_list = []
while not pairs.empty():
score, i, j = pairs.get()
sorted_i, sorted_j = sorted([i, j])
if sorted_i != sorted_j and (sorted_i, sorted_j) not in added_pairs:
added_pairs.add((sorted_i, sorted_j))
pairs_list.append([score, i, j])
# Highest scores first
pairs_list = sorted(pairs_list, key=lambda x: x[0], reverse=True)
return pairs_list
def information_retrieval(*args, **kwargs):
"""This function is decprecated. Use semantic_search insted"""
return semantic_search(*args, **kwargs)
def semantic_search(query_embeddings: Tensor,
corpus_embeddings: Tensor,
query_chunk_size: int = 100,
corpus_chunk_size: int = 100000,
top_k: int = 10):
"""
This function performs a cosine similarity search between a list of query embeddings and a list of corpus embeddings.
It can be used for Information Retrieval / Semantic Search for corpora up to about 1 Million entries.
:param query_embeddings: A 2 dimensional tensor with the query embeddings.
:param corpus_embeddings: A 2 dimensional tensor with the corpus embeddings.
:param query_chunk_size: Process 100 queries simultaneously. Increasing that value increases the speed, but requires more memory.
:param corpus_chunk_size: Scans the corpus 100k entries at a time. Increasing that value increases the speed, but requires more memory.
:param top_k: Retrieve top k matching entries. Note, if your corpus is larger than query_chunk_size, |Chunks|*top_k are returned
:return: Returns a sorted list with decreasing cosine similarity scores. Entries are dictionaries with the keys 'corpus_id' and 'score'
"""
if isinstance(query_embeddings, (np.ndarray, np.generic)):
query_embeddings = torch.from_numpy(query_embeddings)
elif isinstance(query_embeddings, list):
query_embeddings = torch.stack(query_embeddings)
if len(query_embeddings.shape) == 1:
query_embeddings = query_embeddings.unsqueeze(0)
if isinstance(corpus_embeddings, (np.ndarray, np.generic)):
corpus_embeddings = torch.from_numpy(corpus_embeddings)
elif isinstance(corpus_embeddings, list):
corpus_embeddings = torch.stack(corpus_embeddings)
#Normalize scores, so that the dot-product is equivalent to cosine similarity
query_embeddings = query_embeddings / query_embeddings.norm(dim=1)[:, None]
corpus_embeddings = corpus_embeddings / corpus_embeddings.norm(dim=1)[:, None]
queries_result_list = [[] for _ in range(len(query_embeddings))]
for query_start_idx in range(0, len(query_embeddings), query_chunk_size):
query_end_idx = min(query_start_idx + query_chunk_size, len(query_embeddings))
# Iterate over chunks of the corpus
for corpus_start_idx in range(0, len(corpus_embeddings), corpus_chunk_size):
corpus_end_idx = min(corpus_start_idx + corpus_chunk_size, len(corpus_embeddings))
# Compute cosine similarites
cos_scores = torch.mm(query_embeddings[query_start_idx:query_end_idx], corpus_embeddings[corpus_start_idx:corpus_end_idx].transpose(0, 1)).cpu().numpy()
cos_scores = np.nan_to_num(cos_scores)
# Partial sort scores
cos_score_argpartition = np.argpartition(-cos_scores, min(top_k, len(cos_scores[0])-1))[:, 0:top_k]
for query_itr in range(len(cos_scores)):
for sub_corpus_id in cos_score_argpartition[query_itr]:
corpus_id = corpus_start_idx + sub_corpus_id
query_id = query_start_idx + query_itr
score = cos_scores[query_itr][sub_corpus_id]
queries_result_list[query_id].append({'corpus_id': corpus_id, 'score': score})
#Sort and strip to top_k results
for idx in range(len(queries_result_list)):
queries_result_list[idx] = sorted(queries_result_list[idx], key=lambda x: x['score'], reverse=True)
queries_result_list[idx] = queries_result_list[idx][0:top_k]
return queries_result_list
def http_get(url, path):
"""
Downloads a URL to a given path on disc
"""
if os.path.dirname(path) != '':
os.makedirs(os.path.dirname(path), exist_ok=True)
req = requests.get(url, stream=True)
if req.status_code != 200:
print("Exception when trying to download {}. Response {}".format(url, req.status_code), file=sys.stderr)
req.raise_for_status()
return
download_filepath = path+"_part"
with open(download_filepath, "wb") as file_binary:
content_length = req.headers.get('Content-Length')
total = int(content_length) if content_length is not None else None
progress = tqdm(unit="B", total=total, unit_scale=True)
for chunk in req.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
progress.update(len(chunk))
file_binary.write(chunk)
os.rename(download_filepath, path)
progress.close()
def batch_to_device(batch, target_device: device):
"""
send a pytorch batch to a device (CPU/GPU)
"""
features = batch['features']
for paired_sentence_idx in range(len(features)):
for feature_name in features[paired_sentence_idx]:
features[paired_sentence_idx][feature_name] = features[paired_sentence_idx][feature_name].to(target_device)
labels = batch['labels'].to(target_device)
return features, labels
def fullname(o):
"""
Gives a full name (package_name.class_name) for a class / object in Python. Will
be used to load the correct classes from JSON files
"""
module = o.__class__.__module__
if module is None or module == str.__class__.__module__:
return o.__class__.__name__ # Avoid reporting __builtin__
else:
return module + '.' + o.__class__.__name__
def import_from_string(dotted_path):
"""
Import a dotted module path and return the attribute/class designated by the
last name in the path. Raise ImportError if the import failed.
"""
try:
module_path, class_name = dotted_path.rsplit('.', 1)
except ValueError:
msg = "%s doesn't look like a module path" % dotted_path
raise ImportError(msg)
module = importlib.import_module(module_path)
try:
return getattr(module, class_name)
except AttributeError:
msg = 'Module "%s" does not define a "%s" attribute/class' % (module_path, class_name)
raise ImportError(msg) | 10,912 | 41.964567 | 165 | py |
KoSentenceBERT-SKT | KoSentenceBERT-SKT-main/sentence_transformers/evaluation/BinaryClassificationEvaluator.py | from . import SentenceEvaluator, SimilarityFunction
import torch
from torch.utils.data import DataLoader
import logging
from tqdm import tqdm
from sentence_transformers.util import batch_to_device
import os
import csv
from sklearn.metrics.pairwise import paired_cosine_distances, paired_euclidean_distances, paired_manhattan_distances
import numpy as np
from typing import List
from ..readers import InputExample
from sklearn.metrics import average_precision_score
class BinaryClassificationEvaluator(SentenceEvaluator):
"""
Evaluate a model based on the similarity of the embeddings by calculating the accuracy of identifying similar and
dissimilar sentences.
The metrics are the cosine similarity as well as euclidean and Manhattan distance
The returned score is the accuracy with a specified metric.
The results are written in a CSV. If a CSV already exists, then values are appended.
The labels need to be 0 for dissimilar pairs and 1 for similar pairs.
:param sentences1: The first column of sentences
:param sentences2: The second column of sentences
:param labels: labels[i] is the label for the pair (sentences1[i], sentences2[i]). Must be 0 or 1
:param name: Name for the output
:param batch_size: Batch size used to compute embeddings
:param show_progress_bar: If true, prints a progress bar
"""
def __init__(self, sentences1: List[str], sentences2: List[str], labels: List[int], name: str = '', batch_size: int = 32, show_progress_bar: bool = False):
self.sentences1 = sentences1
self.sentences2 = sentences2
self.labels = labels
assert len(self.sentences1) == len(self.sentences2)
assert len(self.sentences1) == len(self.labels)
for label in labels:
assert (label == 0 or label == 1)
self.name = name
self.batch_size = batch_size
if show_progress_bar is None:
show_progress_bar = (logging.getLogger().getEffectiveLevel() == logging.INFO or logging.getLogger().getEffectiveLevel() == logging.DEBUG)
self.show_progress_bar = show_progress_bar
self.csv_file: str = "binary_classification_evaluation" + ("_"+name if name else '') + "_results.csv"
self.csv_headers = ["epoch", "steps",
"cosine_acc", "cosine_acc_threshold", "cosine_f1", "cosine_precision", "cosine_recall", "cosine_f1_threshold", "cosine_average_precision",
"manhatten_acc", "manhatten_acc_threshold", "manhatten_f1", "manhatten_precision", "manhatten_recall", "manhatten_f1_threshold", "manhatten_average_precision",
"eucledian_acc", "eucledian_acc_threshold", "eucledian_f1", "eucledian_precision", "eucledian_recall", "eucledian_f1_threshold", "eucledian_average_precision"]
@classmethod
def from_input_examples(cls, examples: List[InputExample], **kwargs):
sentences1 = []
sentences2 = []
scores = []
for example in examples:
sentences1.append(example.texts[0])
sentences2.append(example.texts[1])
scores.append(example.label)
return cls(sentences1, sentences2, scores, **kwargs)
def __call__(self, model, output_path: str = None, epoch: int = -1, steps: int = -1) -> float:
if epoch != -1:
if steps == -1:
out_txt = f" after epoch {epoch}:"
else:
out_txt = f" in epoch {epoch} after {steps} steps:"
else:
out_txt = ":"
logging.info("Binary Accuracy Evaluation of the model on " + self.name + " dataset" + out_txt)
embeddings1 = model.encode(self.sentences1, batch_size=self.batch_size,
show_progress_bar=self.show_progress_bar, convert_to_numpy=True)
embeddings2 = model.encode(self.sentences2, batch_size=self.batch_size,
show_progress_bar=self.show_progress_bar, convert_to_numpy=True)
cosine_scores = 1-paired_cosine_distances(embeddings1, embeddings2)
manhattan_distances = paired_manhattan_distances(embeddings1, embeddings2)
euclidean_distances = paired_euclidean_distances(embeddings1, embeddings2)
labels = np.asarray(self.labels)
file_output_data = [epoch, steps]
main_score = None
for name, scores, reverse in [['Cosine-Similarity', cosine_scores, True], ['Manhatten-Distance', manhattan_distances, False], ['Euclidean-Distance', euclidean_distances, False]]:
acc, acc_threshold = self.find_best_acc_and_threshold(scores, labels, reverse)
f1, precision, recall, f1_threshold = self.find_best_f1_and_threshold(scores, labels, reverse)
ap = average_precision_score(labels, scores * (1 if reverse else -1))
logging.info("Accuracy with {}: {:.2f}\t(Threshold: {:.4f})".format(name, acc * 100, acc_threshold))
logging.info("F1 with {}: {:.2f}\t(Threshold: {:.4f})".format(name, f1 * 100, f1_threshold))
logging.info("Precision with {}: {:.2f}".format(name, precision * 100))
logging.info("Recall with {}: {:.2f}".format(name, recall * 100))
logging.info("Average Precision with {}: {:.2f}\n".format(name, ap * 100))
file_output_data.extend([acc, acc_threshold, f1, precision, recall, f1_threshold, ap])
if main_score is None: #Use AveragePrecision with Cosine-Similarity as main score
main_score = ap
if output_path is not None:
csv_path = os.path.join(output_path, self.csv_file)
if not os.path.isfile(csv_path):
with open(csv_path, mode="w", encoding="utf-8") as f:
writer = csv.writer(f)
writer.writerow(self.csv_headers)
writer.writerow(file_output_data)
else:
with open(csv_path, mode="a", encoding="utf-8") as f:
writer = csv.writer(f)
writer.writerow(file_output_data)
return main_score
@staticmethod
def find_best_acc_and_threshold(scores, labels, high_score_more_similar: bool):
assert len(scores) == len(labels)
rows = list(zip(scores, labels))
rows = sorted(rows, key=lambda x: x[0], reverse=high_score_more_similar)
max_acc = 0
best_threshold = -1
positive_so_far = 0
remaining_negatives = sum(labels == 0)
for i in range(len(rows)-1):
score, label = rows[i]
if label == 1:
positive_so_far += 1
else:
remaining_negatives -= 1
acc = (positive_so_far + remaining_negatives) / len(labels)
if acc > max_acc:
max_acc = acc
best_threshold = (rows[i][0] + rows[i+1][0]) / 2
return max_acc, best_threshold
@staticmethod
def find_best_f1_and_threshold(scores, labels, high_score_more_similar: bool):
assert len(scores) == len(labels)
scores = np.asarray(scores)
labels = np.asarray(labels)
rows = list(zip(scores, labels))
rows = sorted(rows, key=lambda x: x[0], reverse=high_score_more_similar)
best_f1 = best_precision = best_recall = 0
threshold = 0
nextract = 0
ncorrect = 0
total_num_duplicates = sum(labels)
for i in range(len(rows)-1):
score, label = rows[i]
nextract += 1
if label == 1:
ncorrect += 1
if ncorrect > 0:
precision = ncorrect / nextract
recall = ncorrect / total_num_duplicates
f1 = 2 * precision * recall / (precision + recall)
if f1 > best_f1:
best_f1 = f1
best_precision = precision
best_recall = recall
threshold = (rows[i][0] + rows[i + 1][0]) / 2
return best_f1, best_precision, best_recall, threshold
| 8,128 | 42.010582 | 187 | py |
KoSentenceBERT-SKT | KoSentenceBERT-SKT-main/sentence_transformers/evaluation/EmbeddingSimilarityEvaluator.py | from . import SentenceEvaluator, SimilarityFunction
import torch
import logging
import os
import csv
from sklearn.metrics.pairwise import paired_cosine_distances, paired_euclidean_distances, paired_manhattan_distances
from scipy.stats import pearsonr, spearmanr
import numpy as np
from typing import List
from ..readers import InputExample
class EmbeddingSimilarityEvaluator(SentenceEvaluator):
"""
Evaluate a model based on the similarity of the embeddings by calculating the Spearman and Pearson rank correlation
in comparison to the gold standard labels.
The metrics are the cosine similarity as well as euclidean and Manhattan distance
The returned score is the Spearman correlation with a specified metric.
The results are written in a CSV. If a CSV already exists, then values are appended.
"""
def __init__(self, sentences1: List[str], sentences2: List[str], scores: List[float], batch_size: int = 16, main_similarity: SimilarityFunction = None, name: str = '', show_progress_bar: bool = False):
"""
Constructs an evaluator based for the dataset
The labels need to indicate the similarity between the sentences.
:param sentences1:
List with the first sentence in a pair
:param sentences2:
List with the second sentence in a pair
:param scores:
Similarity score between sentences1[i] and sentences2[i]
"""
self.sentences1 = sentences1
self.sentences2 = sentences2
self.scores = scores
assert len(self.sentences1) == len(self.sentences2)
assert len(self.sentences1) == len(self.scores)
self.main_similarity = main_similarity
self.name = name
self.batch_size = batch_size
if show_progress_bar is None:
show_progress_bar = (logging.getLogger().getEffectiveLevel() == logging.INFO or logging.getLogger().getEffectiveLevel() == logging.DEBUG)
self.show_progress_bar = show_progress_bar
self.csv_file = "similarity_evaluation"+("_"+name if name else '')+"_results.csv"
self.csv_headers = ["epoch", "steps", "cosine_pearson", "cosine_spearman", "euclidean_pearson", "euclidean_spearman", "manhattan_pearson", "manhattan_spearman", "dot_pearson", "dot_spearman"]
@classmethod
def from_input_examples(cls, examples: List[InputExample], **kwargs):
sentences1 = []
sentences2 = []
scores = []
for example in examples:
sentences1.append(example.texts[0])
sentences2.append(example.texts[1])
scores.append(example.label)
return cls(sentences1, sentences2, scores, **kwargs)
def __call__(self, model, output_path: str = None, epoch: int = -1, steps: int = -1) -> float:
if epoch != -1:
if steps == -1:
out_txt = " after epoch {}:".format(epoch)
else:
out_txt = " in epoch {} after {} steps:".format(epoch, steps)
else:
out_txt = ":"
logging.info("Evaluation the model on " + self.name + " dataset" + out_txt)
embeddings1 = model.encode(self.sentences1, batch_size=self.batch_size, show_progress_bar=self.show_progress_bar, convert_to_numpy=True)
embeddings2 = model.encode(self.sentences2, batch_size=self.batch_size, show_progress_bar=self.show_progress_bar, convert_to_numpy=True)
labels = self.scores
cosine_scores = 1 - (paired_cosine_distances(embeddings1, embeddings2))
manhattan_distances = -paired_manhattan_distances(embeddings1, embeddings2)
euclidean_distances = -paired_euclidean_distances(embeddings1, embeddings2)
dot_products = [np.dot(emb1, emb2) for emb1, emb2 in zip(embeddings1, embeddings2)]
eval_pearson_cosine, _ = pearsonr(labels, cosine_scores)
eval_spearman_cosine, _ = spearmanr(labels, cosine_scores)
eval_pearson_manhattan, _ = pearsonr(labels, manhattan_distances)
eval_spearman_manhattan, _ = spearmanr(labels, manhattan_distances)
eval_pearson_euclidean, _ = pearsonr(labels, euclidean_distances)
eval_spearman_euclidean, _ = spearmanr(labels, euclidean_distances)
eval_pearson_dot, _ = pearsonr(labels, dot_products)
eval_spearman_dot, _ = spearmanr(labels, dot_products)
logging.info("Cosine-Similarity :\tPearson: {:.4f}\tSpearman: {:.4f}".format(
eval_pearson_cosine, eval_spearman_cosine))
logging.info("Manhattan-Distance:\tPearson: {:.4f}\tSpearman: {:.4f}".format(
eval_pearson_manhattan, eval_spearman_manhattan))
logging.info("Euclidean-Distance:\tPearson: {:.4f}\tSpearman: {:.4f}".format(
eval_pearson_euclidean, eval_spearman_euclidean))
logging.info("Dot-Product-Similarity:\tPearson: {:.4f}\tSpearman: {:.4f}".format(
eval_pearson_dot, eval_spearman_dot))
if output_path is not None:
csv_path = os.path.join(output_path, self.csv_file)
output_file_exists = os.path.isfile(csv_path)
with open(csv_path, mode="a" if output_file_exists else 'w', encoding="utf-8") as f:
writer = csv.writer(f)
if not output_file_exists:
writer.writerow(self.csv_headers)
writer.writerow([epoch, steps, eval_pearson_cosine, eval_spearman_cosine, eval_pearson_euclidean,
eval_spearman_euclidean, eval_pearson_manhattan, eval_spearman_manhattan, eval_pearson_dot, eval_spearman_dot])
if self.main_similarity == SimilarityFunction.COSINE:
return eval_spearman_cosine
elif self.main_similarity == SimilarityFunction.EUCLIDEAN:
return eval_spearman_euclidean
elif self.main_similarity == SimilarityFunction.MANHATTAN:
return eval_spearman_manhattan
elif self.main_similarity == SimilarityFunction.DOT_PRODUCT:
return eval_spearman_dot
elif self.main_similarity is None:
return max(eval_spearman_cosine, eval_spearman_manhattan, eval_spearman_euclidean, eval_spearman_dot)
else:
raise ValueError("Unknown main_similarity value")
| 6,243 | 46.30303 | 205 | py |
KoSentenceBERT-SKT | KoSentenceBERT-SKT-main/sentence_transformers/evaluation/InformationRetrievalEvaluator.py | from . import SentenceEvaluator, SimilarityFunction
import torch
from torch.utils.data import DataLoader
import logging
from tqdm import tqdm
from sentence_transformers.util import batch_to_device, pytorch_cos_sim
import os
import csv
import numpy as np
from typing import List, Tuple, Dict, Set
from collections import defaultdict
import queue
class InformationRetrievalEvaluator(SentenceEvaluator):
"""
This class evaluates an Information Retrieval (IR) setting.
Given a set of queries and a large corpus set. It will retrieve for each query the top-k most similar document. It measures
Mean Reciprocal Rank (MRR), Recall@k, and Normalized Discounted Cumulative Gain (NDCG)
"""
def __init__(self,
queries: Dict[str, str], #qid => query
corpus: Dict[str, str], #cid => doc
relevant_docs: Dict[str, Set[str]], #qid => Set[cid]
query_chunk_size: int = 1000,
corpus_chunk_size: int = 500000,
mrr_at_k: List[int] = [10],
ndcg_at_k: List[int] = [10],
accuracy_at_k: List[int] = [1, 3, 5, 10],
precision_recall_at_k: List[int] = [1, 3, 5, 10],
map_at_k: List[int] = [100],
show_progress_bar: bool = False,
batch_size: int = 32,
name: str = ''):
self.queries_ids = []
for qid in queries:
if qid in relevant_docs and len(relevant_docs[qid]) > 0:
self.queries_ids.append(qid)
self.queries = [queries[qid] for qid in self.queries_ids]
self.corpus_ids = list(corpus.keys())
self.corpus = [corpus[cid] for cid in self.corpus_ids]
self.relevant_docs = relevant_docs
self.query_chunk_size = query_chunk_size
self.corpus_chunk_size = corpus_chunk_size
self.mrr_at_k = mrr_at_k
self.ndcg_at_k = ndcg_at_k
self.accuracy_at_k = accuracy_at_k
self.precision_recall_at_k = precision_recall_at_k
self.map_at_k = map_at_k
self.show_progress_bar = show_progress_bar
self.batch_size = batch_size
self.name = name
if name:
name = "_" + name
self.csv_file: str = "Information-Retrieval_evaluation" + name + "_results.csv"
self.csv_headers = ["epoch", "steps"]
for k in accuracy_at_k:
self.csv_headers.append("Accuracy@{}".format(k))
for k in precision_recall_at_k:
self.csv_headers.append("Precision@{}".format(k))
self.csv_headers.append("Recall@{}".format(k))
for k in mrr_at_k:
self.csv_headers.append("MRR@{}".format(k))
for k in ndcg_at_k:
self.csv_headers.append("NDCG@{}".format(k))
for k in map_at_k:
self.csv_headers.append("MAP@{}".format(k))
def __call__(self, model, output_path: str = None, epoch: int = -1, steps: int = -1) -> float:
if epoch != -1:
out_txt = " after epoch {}:".format(epoch) if steps == -1 else " in epoch {} after {} steps:".format(epoch, steps)
else:
out_txt = ":"
logging.info("Information Retrieval Evaluation on " + self.name + " dataset" + out_txt)
max_k = max(max(self.mrr_at_k), max(self.ndcg_at_k), max(self.accuracy_at_k), max(self.precision_recall_at_k), max(self.map_at_k))
# Compute embedding for the queries
query_embeddings = model.encode(self.queries, show_progress_bar=self.show_progress_bar, batch_size=self.batch_size, convert_to_tensor=True)
#Init score computation values
num_hits_at_k = {k: 0 for k in self.accuracy_at_k}
precisions_at_k = {k: [] for k in self.precision_recall_at_k}
recall_at_k = {k: [] for k in self.precision_recall_at_k}
MRR = {k: 0 for k in self.mrr_at_k}
ndcg = {k: [] for k in self.ndcg_at_k}
AveP_at_k = {k: [] for k in self.map_at_k}
#Compute embedding for the corpus
corpus_embeddings = model.encode(self.corpus, show_progress_bar=self.show_progress_bar, batch_size=self.batch_size, convert_to_tensor=True)
for query_start_idx in range(0, len(query_embeddings), self.query_chunk_size):
query_end_idx = min(query_start_idx + self.query_chunk_size, len(query_embeddings))
queries_result_list = [[] for _ in range(query_start_idx, query_end_idx)]
#Iterate over chunks of the corpus
for corpus_start_idx in range(0, len(corpus_embeddings), self.corpus_chunk_size):
corpus_end_idx = min(corpus_start_idx + self.corpus_chunk_size, len(corpus_embeddings))
#Compute cosine similarites
cos_scores = pytorch_cos_sim(query_embeddings[query_start_idx:query_end_idx], corpus_embeddings[corpus_start_idx:corpus_end_idx]).cpu()
#Get top-k values
cos_scores_top_k_values, cos_scores_top_k_idx = torch.topk(cos_scores, min(max_k, len(cos_scores[0]) - 1), dim=1, largest=True, sorted=False)
cos_scores_top_k_values = cos_scores_top_k_values.tolist()
cos_scores_top_k_idx = cos_scores_top_k_idx.tolist()
for query_itr in range(len(cos_scores)):
for sub_corpus_id, score in zip(cos_scores_top_k_idx[query_itr], cos_scores_top_k_values[query_itr]):
corpus_id = self.corpus_ids[corpus_start_idx+sub_corpus_id]
queries_result_list[query_itr].append({'corpus_id': corpus_id, 'score': score})
for query_itr in range(len(queries_result_list)):
query_id = self.queries_ids[query_start_idx + query_itr]
#Sort scores
top_hits = sorted(queries_result_list[query_itr], key=lambda x: x['score'], reverse=True)
query_relevant_docs = self.relevant_docs[query_id]
#Accuracy@k - We count the result correct, if at least one relevant doc is accross the top-k documents
for k_val in self.accuracy_at_k:
for hit in top_hits[0:k_val]:
if hit['corpus_id'] in query_relevant_docs:
num_hits_at_k[k_val] += 1
break
#Precision and Recall@k
for k_val in self.precision_recall_at_k:
num_correct = 0
for hit in top_hits[0:k_val]:
if hit['corpus_id'] in query_relevant_docs:
num_correct += 1
precisions_at_k[k_val].append(num_correct / k_val)
recall_at_k[k_val].append(num_correct / len(query_relevant_docs))
#MRR@k
for k_val in self.mrr_at_k:
for rank, hit in enumerate(top_hits[0:k_val]):
if hit['corpus_id'] in query_relevant_docs:
MRR[k_val] += 1.0 / (rank + 1)
break
#NDCG@k
for k_val in self.ndcg_at_k:
predicted_relevance = [1 if top_hit['corpus_id'] in query_relevant_docs else 0 for top_hit in top_hits[0:k_val]]
true_relevances = [1] * len(query_relevant_docs)
ndcg_value = self.compute_dcg_at_k(predicted_relevance, k_val) / self.compute_dcg_at_k(true_relevances, k_val)
ndcg[k_val].append(ndcg_value)
#MAP@k
for k_val in self.map_at_k:
num_correct = 0
sum_precisions = 0
for rank, hit in enumerate(top_hits[0:k_val]):
if hit['corpus_id'] in query_relevant_docs:
num_correct += 1
sum_precisions += num_correct / (rank+1)
avg_precision = sum_precisions / min(k_val, len(query_relevant_docs))
AveP_at_k[k_val].append(avg_precision)
#Compute averages
for k in num_hits_at_k:
num_hits_at_k[k] /= len(self.queries)
for k in precisions_at_k:
precisions_at_k[k] = np.mean(precisions_at_k[k])
for k in recall_at_k:
recall_at_k[k] = np.mean(recall_at_k[k])
for k in ndcg:
ndcg[k] = np.mean(ndcg[k])
for k in MRR:
MRR[k] /= len(self.queries)
for k in AveP_at_k:
AveP_at_k[k] = np.mean(AveP_at_k[k])
#Output
for k in num_hits_at_k:
logging.info("Accuracy@{}: {:.2f}%".format(k, num_hits_at_k[k]*100))
for k in precisions_at_k:
logging.info("Precision@{}: {:.2f}%".format(k, precisions_at_k[k]*100))
for k in recall_at_k:
logging.info("Recall@{}: {:.2f}%".format(k, recall_at_k[k]*100))
for k in MRR:
logging.info("MRR@{}: {:.4f}".format(k, MRR[k]))
for k in ndcg:
logging.info("NDCG@{}: {:.4f}".format(k, ndcg[k]))
for k in AveP_at_k:
logging.info("MAP@{}: {:.4f}".format(k, AveP_at_k[k]))
logging.info("Queries: {}".format(len(self.queries)))
logging.info("Corpus: {}\n".format(len(self.corpus)))
if output_path is not None:
csv_path = os.path.join(output_path, self.csv_file)
if not os.path.isfile(csv_path):
fOut = open(csv_path, mode="w", encoding="utf-8")
fOut.write(",".join(self.csv_headers))
fOut.write("\n")
else:
fOut = open(csv_path, mode="a", encoding="utf-8")
output_data = [epoch, steps]
for k in self.accuracy_at_k:
output_data.append(num_hits_at_k[k])
for k in self.precision_recall_at_k:
output_data.append(precisions_at_k[k])
output_data.append(recall_at_k[k])
for k in self.mrr_at_k:
output_data.append(MRR[k])
for k in self.ndcg_at_k:
output_data.append(ndcg[k])
for k in self.map_at_k:
output_data.append(AveP_at_k[k])
fOut.write(",".join(map(str,output_data)))
fOut.write("\n")
fOut.close()
return AveP_at_k[max(self.map_at_k)]
@staticmethod
def compute_dcg_at_k(relevances, k):
dcg = 0
for i in range(min(len(relevances), k)):
dcg += relevances[i] / np.log2(i + 2) #+2 as we start our idx at 0
return dcg
| 10,636 | 36.586572 | 157 | py |
KoSentenceBERT-SKT | KoSentenceBERT-SKT-main/sentence_transformers/evaluation/TranslationEvaluator.py | from . import SentenceEvaluator
import logging
from ..util import pytorch_cos_sim
import os
import csv
import numpy as np
import scipy.spatial
from typing import List
import torch
class TranslationEvaluator(SentenceEvaluator):
"""
Given two sets of sentences in different languages, e.g. (en_1, en_2, en_3...) and (fr_1, fr_2, fr_3, ...),
and assuming that fr_i is the translation of en_i.
Checks if vec(en_i) has the highest similarity to vec(fr_i). Computes the accurarcy in both directions
"""
def __init__(self, source_sentences: List[str], target_sentences: List[str], show_progress_bar: bool = False, batch_size: int = 16, name: str = '', print_wrong_matches: bool = False):
"""
Constructs an evaluator based for the dataset
The labels need to indicate the similarity between the sentences.
:param source_sentences:
List of sentences in source language
:param target_sentences:
List of sentences in target language
:param print_wrong_matches:
Prints incorrect matches
"""
self.source_sentences = source_sentences
self.target_sentences = target_sentences
self.name = name
self.batch_size = batch_size
self.show_progress_bar = show_progress_bar
self.print_wrong_matches = print_wrong_matches
assert len(self.source_sentences) == len(self.target_sentences)
if name:
name = "_"+name
self.csv_file = "translation_evaluation"+name+"_results.csv"
self.csv_headers = ["epoch", "steps", "src2trg", "trg2src"]
def __call__(self, model, output_path: str = None, epoch: int = -1, steps: int = -1) -> float:
if epoch != -1:
if steps == -1:
out_txt = " after epoch {}:".format(epoch)
else:
out_txt = " in epoch {} after {} steps:".format(epoch, steps)
else:
out_txt = ":"
logging.info("Evaluating translation matching Accuracy on "+self.name+" dataset"+out_txt)
embeddings1 = torch.stack(model.encode(self.source_sentences, show_progress_bar=self.show_progress_bar, batch_size=self.batch_size, convert_to_numpy=False))
embeddings2 = torch.stack(model.encode(self.target_sentences, show_progress_bar=self.show_progress_bar, batch_size=self.batch_size, convert_to_numpy=False))
cos_sims = pytorch_cos_sim(embeddings1, embeddings2).detach().cpu().numpy()
correct_src2trg = 0
correct_trg2src = 0
for i in range(len(cos_sims)):
max_idx = np.argmax(cos_sims[i])
if i == max_idx:
correct_src2trg += 1
elif self.print_wrong_matches:
print("i:", i, "j:", max_idx, "INCORRECT" if i != max_idx else "CORRECT")
print("Src:", self.source_sentences[i])
print("Trg:", self.target_sentences[max_idx])
print("Argmax score:", cos_sims[i][max_idx], "vs. correct score:", cos_sims[i][i])
results = zip(range(len(cos_sims[i])), cos_sims[i])
results = sorted(results, key=lambda x: x[1], reverse=True)
for idx, score in results[0:5]:
print("\t", idx, "(Score: %.4f)" % (score), self.target_sentences[idx])
cos_sims = cos_sims.T
for i in range(len(cos_sims)):
max_idx = np.argmax(cos_sims[i])
if i == max_idx:
correct_trg2src += 1
acc_src2trg = correct_src2trg / len(cos_sims)
acc_trg2src = correct_trg2src / len(cos_sims)
logging.info("Accuracy src2trg: {:.2f}".format(acc_src2trg*100))
logging.info("Accuracy trg2src: {:.2f}".format(acc_trg2src*100))
if output_path is not None:
csv_path = os.path.join(output_path, self.csv_file)
output_file_exists = os.path.isfile(csv_path)
with open(csv_path, mode="a" if output_file_exists else 'w', encoding="utf-8") as f:
writer = csv.writer(f)
if not output_file_exists:
writer.writerow(self.csv_headers)
writer.writerow([epoch, steps, acc_src2trg, acc_trg2src])
return (acc_src2trg+acc_trg2src)/2
| 4,278 | 39.367925 | 188 | py |
KoSentenceBERT-SKT | KoSentenceBERT-SKT-main/sentence_transformers/evaluation/MSEEvaluatorFromDataFrame.py | from sentence_transformers.evaluation import SentenceEvaluator
from sentence_transformers.util import batch_to_device
from sentence_transformers import SentenceTransformer
from typing import List, Tuple, Dict
import torch
import numpy as np
import logging
import os
import csv
class MSEEvaluatorFromDataFrame(SentenceEvaluator):
"""
Computes the mean squared error (x100) between the computed sentence embedding
and some target sentence embedding.
:param dataframe:
It must have the following format. Rows contains different, parallel sentences. Columns are the respective language codes
[{'en': 'My sentence', 'es': 'Sentence in Spanisch', 'fr': 'Sentence in French'...},
{'en': 'My second sentence', ....]
:param combinations:
Must be of the format [('en', 'es'), ('en', 'fr'), ...]
First entry in a tuple is the source language. The sentence in the respective language will be fetched from the dataframe and passed to the teacher model.
Second entry in a tuple the the target language. Sentence will be fetched from the dataframe and passed to the student model
"""
def __init__(self, dataframe: List[Dict[str, str]], teacher_model: SentenceTransformer, combinations: List[Tuple[str, str]], batch_size: int = 8, name=''):
self.combinations = combinations
self.name = name
self.batch_size = batch_size
if name:
name = "_"+name
self.csv_file = "mse_evaluation" + name + "_results.csv"
self.csv_headers = ["epoch", "steps"]
self.data = {}
logging.info("Compute teacher embeddings")
all_source_sentences = set()
for src_lang, trg_lang in self.combinations:
src_sentences = []
trg_sentences = []
for row in dataframe:
if row[src_lang].strip() != "" and row[trg_lang].strip() != "":
all_source_sentences.add(row[src_lang])
src_sentences.append(row[src_lang])
trg_sentences.append(row[trg_lang])
self.data[(src_lang, trg_lang)] = (src_sentences, trg_sentences)
self.csv_headers.append("{}-{}".format(src_lang, trg_lang))
all_source_sentences = list(all_source_sentences)
all_src_embeddings = teacher_model.encode(all_source_sentences, batch_size=self.batch_size)
self.teacher_embeddings = {sent: emb for sent, emb in zip(all_source_sentences, all_src_embeddings)}
def __call__(self, model, output_path: str = None, epoch: int = -1, steps: int = -1):
model.eval()
mse_scores = []
for src_lang, trg_lang in self.combinations:
src_sentences, trg_sentences = self.data[(src_lang, trg_lang)]
src_embeddings = np.asarray([self.teacher_embeddings[sent] for sent in src_sentences])
trg_embeddings = np.asarray(model.encode(trg_sentences, batch_size=self.batch_size))
mse = ((src_embeddings - trg_embeddings) ** 2).mean()
mse *= 100
mse_scores.append(mse)
logging.info("MSE evaluation on {} dataset - {}-{}:".format(self.name, src_lang, trg_lang))
logging.info("MSE (*100):\t{:4f}".format(mse))
if output_path is not None:
csv_path = os.path.join(output_path, self.csv_file)
output_file_exists = os.path.isfile(csv_path)
with open(csv_path, mode="a" if output_file_exists else 'w', encoding="utf-8") as f:
writer = csv.writer(f)
if not output_file_exists:
writer.writerow(self.csv_headers)
writer.writerow([epoch, steps]+mse_scores)
return -np.mean(mse_scores) #Return negative score as SentenceTransformers maximizes the performance
| 3,814 | 42.850575 | 162 | py |
KoSentenceBERT-SKT | KoSentenceBERT-SKT-main/sentence_transformers/evaluation/LabelAccuracyEvaluator.py | from . import SentenceEvaluator
import torch
from torch.utils.data import DataLoader
import logging
from tqdm import tqdm
from ..util import batch_to_device
import os
import csv
class LabelAccuracyEvaluator(SentenceEvaluator):
"""
Evaluate a model based on its accuracy on a labeled dataset
This requires a model with LossFunction.SOFTMAX
The results are written in a CSV. If a CSV already exists, then values are appended.
"""
def __init__(self, dataloader: DataLoader, name: str = "", softmax_model = None):
"""
Constructs an evaluator for the given dataset
:param dataloader:
the data for the evaluation
"""
self.dataloader = dataloader
self.name = name
self.softmax_model = softmax_model
self.softmax_model.to(self.device)
if name:
name = "_"+name
self.csv_file = "accuracy_evaluation"+name+"_results.csv"
self.csv_headers = ["epoch", "steps", "accuracy"]
def __call__(self, model, output_path: str = None, epoch: int = -1, steps: int = -1) -> float:
model.eval()
total = 0
correct = 0
if epoch != -1:
if steps == -1:
out_txt = " after epoch {}:".format(epoch)
else:
out_txt = " in epoch {} after {} steps:".format(epoch, steps)
else:
out_txt = ":"
logging.info("Evaluation on the "+self.name+" dataset"+out_txt)
self.dataloader.collate_fn = model.smart_batching_collate
for step, batch in enumerate(tqdm(self.dataloader, desc="Evaluating")):
features, label_ids = batch_to_device(batch, model.device)
with torch.no_grad():
_, prediction = self.softmax_model(features, labels=None)
total += prediction.size(0)
correct += torch.argmax(prediction, dim=1).eq(label_ids).sum().item()
accuracy = correct/total
logging.info("Accuracy: {:.4f} ({}/{})\n".format(accuracy, correct, total))
if output_path is not None:
csv_path = os.path.join(output_path, self.csv_file)
if not os.path.isfile(csv_path):
with open(csv_path, mode="w", encoding="utf-8") as f:
writer = csv.writer(f)
writer.writerow(self.csv_headers)
writer.writerow([epoch, steps, accuracy])
else:
with open(csv_path, mode="a", encoding="utf-8") as f:
writer = csv.writer(f)
writer.writerow([epoch, steps, accuracy])
return accuracy | 2,641 | 34.226667 | 98 | py |
KoSentenceBERT-SKT | KoSentenceBERT-SKT-main/sentence_transformers/evaluation/TripletEvaluator.py | from . import SentenceEvaluator, SimilarityFunction
import torch
from torch.utils.data import DataLoader
import logging
from tqdm import tqdm
from ..util import batch_to_device
import os
import csv
from sklearn.metrics.pairwise import paired_cosine_distances, paired_euclidean_distances, paired_manhattan_distances
from typing import List
from ..readers import InputExample
class TripletEvaluator(SentenceEvaluator):
"""
Evaluate a model based on a triplet: (sentence, positive_example, negative_example). Checks if distance(sentence,positive_example) < distance(sentence, negative_example).
"""
def __init__(self, anchors: List[str], positives: List[str], negatives: List[str], main_distance_function: SimilarityFunction = None, name: str = '', batch_size: int = 16, show_progress_bar: bool = False):
"""
Constructs an evaluator based for the dataset
:param dataloader:
the data for the evaluation
:param main_similarity:
the similarity metric that will be used for the returned score
"""
self.anchors = anchors
self.positives = positives
self.negatives = negatives
self.name = name
assert len(self.anchors) == len(self.positives)
assert len(self.anchors) == len(self.negatives)
self.main_distance_function = main_distance_function
self.batch_size = batch_size
if show_progress_bar is None:
show_progress_bar = (logging.getLogger().getEffectiveLevel() == logging.INFO or logging.getLogger().getEffectiveLevel() == logging.DEBUG)
self.show_progress_bar = show_progress_bar
self.csv_file: str = "triplet_evaluation"+("_"+name if name else '')+"_results.csv"
self.csv_headers = ["epoch", "steps", "accuracy_cosinus", "accuracy_manhatten", "accuracy_euclidean"]
@classmethod
def from_input_examples(cls, examples: List[InputExample], **kwargs):
anchors = []
positives = []
negatives = []
for example in examples:
anchors.append(example.texts[0])
positives.append(example.texts[1])
negatives.append(example.texts[2])
return cls(anchors, positives, negatives, **kwargs)
def __call__(self, model, output_path: str = None, epoch: int = -1, steps: int = -1) -> float:
if epoch != -1:
if steps == -1:
out_txt = " after epoch {}:".format(epoch)
else:
out_txt = " in epoch {} after {} steps:".format(epoch, steps)
else:
out_txt = ":"
logging.info("TripletEvaluator: Evaluating the model on "+self.name+" dataset"+out_txt)
num_triplets = 0
num_correct_cos_triplets, num_correct_manhatten_triplets, num_correct_euclidean_triplets = 0, 0, 0
embeddings_anchors = model.encode(self.anchors, batch_size=self.batch_size,
show_progress_bar=self.show_progress_bar, convert_to_numpy=True)
embeddings_positives = model.encode(self.positives, batch_size=self.batch_size,
show_progress_bar=self.show_progress_bar, convert_to_numpy=True)
embeddings_negatives = model.encode(self.negatives, batch_size=self.batch_size,
show_progress_bar=self.show_progress_bar, convert_to_numpy=True)
#Cosine distance
pos_cos_distance = paired_cosine_distances(embeddings_anchors, embeddings_positives)
neg_cos_distances = paired_cosine_distances(embeddings_anchors, embeddings_negatives)
# Manhatten
pos_manhatten_distance = paired_manhattan_distances(embeddings_anchors, embeddings_positives)
neg_manhatten_distances = paired_manhattan_distances(embeddings_anchors, embeddings_negatives)
# Euclidean
pos_euclidean_distance = paired_euclidean_distances(embeddings_anchors, embeddings_positives)
neg_euclidean_distances = paired_euclidean_distances(embeddings_anchors, embeddings_negatives)
for idx in range(len(pos_cos_distance)):
num_triplets += 1
if pos_cos_distance[idx] < neg_cos_distances[idx]:
num_correct_cos_triplets += 1
if pos_manhatten_distance[idx] < neg_manhatten_distances[idx]:
num_correct_manhatten_triplets += 1
if pos_euclidean_distance[idx] < neg_euclidean_distances[idx]:
num_correct_euclidean_triplets += 1
accuracy_cos = num_correct_cos_triplets / num_triplets
accuracy_manhatten = num_correct_manhatten_triplets / num_triplets
accuracy_euclidean = num_correct_euclidean_triplets / num_triplets
logging.info("Accuracy Cosine Distance: \t{:.2f}".format(accuracy_cos*100))
logging.info("Accuracy Manhatten Distance:\t{:.2f}".format(accuracy_manhatten*100))
logging.info("Accuracy Euclidean Distance:\t{:.2f}\n".format(accuracy_euclidean*100))
if output_path is not None:
csv_path = os.path.join(output_path, self.csv_file)
if not os.path.isfile(csv_path):
with open(csv_path, mode="w", encoding="utf-8") as f:
writer = csv.writer(f)
writer.writerow(self.csv_headers)
writer.writerow([epoch, steps, accuracy_cos, accuracy_manhatten, accuracy_euclidean])
else:
with open(csv_path, mode="a", encoding="utf-8") as f:
writer = csv.writer(f)
writer.writerow([epoch, steps, accuracy_cos, accuracy_manhatten, accuracy_euclidean])
if self.main_distance_function == SimilarityFunction.COSINE:
return accuracy_cos
if self.main_distance_function == SimilarityFunction.MANHATTAN:
return accuracy_manhatten
if self.main_distance_function == SimilarityFunction.EUCLIDEAN:
return accuracy_euclidean
return max(accuracy_cos, accuracy_manhatten, accuracy_euclidean) | 6,057 | 44.893939 | 209 | py |
KoSentenceBERT-SKT | KoSentenceBERT-SKT-main/sentence_transformers/models/Transformer.py | from torch import nn
from transformers import AutoModel, AutoTokenizer, AutoConfig, BertModel, BertConfig
import json
from typing import List, Dict, Optional
import os
import gluonnlp as nlp
from kobert.utils import get_tokenizer
from kobert.pytorch_kobert import get_pytorch_kobert_model
import torch
class Transformer(nn.Module):
"""Huggingface AutoModel to generate token embeddings.
Loads the correct class, e.g. BERT / RoBERTa etc.
:param model_name_or_path: Huggingface models name (https://huggingface.co/models)
:param max_seq_length: Truncate any inputs longer than max_seq_length
:param model_args: Arguments (key, value pairs) passed to the Huggingface Transformers model
:param cache_dir: Cache dir for Huggingface Transformers to store/load models
:param tokenizer_args: Arguments (key, value pairs) passed to the Huggingface Tokenizer model
:param tokenizer_args: Dict with parameters which are passed to the tokenizer.
"""
def __init__(self, model_name_or_path: str, max_seq_length: int = 128,
model_args: Dict = {}, cache_dir: Optional[str] = None,
tokenizer_args: Dict = {}, isKor=False, isLoad=False):
super(Transformer, self).__init__()
self.config_keys = ['max_seq_length']
self.max_seq_length = max_seq_length
# for Korea BERT
if isKor:
bert_model, vocab = get_pytorch_kobert_model()
tokenizer = get_tokenizer()
bert_tokenizer = nlp.data.BERTSPTokenizer(tokenizer, vocab, lower=False)
self.auto_model = bert_model
self.tokenizer = bert_tokenizer
self.vocab = vocab
if isLoad:
print("Load Model")
self.auto_model.load_state_dict(torch.load(model_name_or_path+'/result.pt'))
else:
config = AutoConfig.from_pretrained(model_name_or_path, **model_args, cache_dir=cache_dir)
self.auto_model = AutoModel.from_pretrained(model_name_or_path, config=config, cache_dir=cache_dir)
self.tokenizer = AutoTokenizer.from_pretrained(model_name_or_path,
cache_dir=cache_dir,
**tokenizer_args)
def forward(self, features):
"""Returns token_embeddings, cls_token"""
output_states = self.auto_model(**features)
output_tokens = output_states[0]
cls_tokens = output_tokens[:, 0, :] # CLS token is first token
features.update({'token_embeddings': output_tokens, 'cls_token_embeddings': cls_tokens, 'attention_mask': features['attention_mask']})
if self.auto_model.config.output_hidden_states:
all_layer_idx = 2
if len(output_states) < 3: #Some models only output last_hidden_states and all_hidden_states
all_layer_idx = 1
hidden_states = output_states[all_layer_idx]
features.update({'all_layer_embeddings': hidden_states})
return features
def get_word_embedding_dimension(self) -> int:
return self.auto_model.config.hidden_size
def tokenize(self, text: str):
"""
Tokenizes a text and maps tokens to token-ids
"""
tokens = self.tokenizer(text)
tokens = [self.vocab.token_to_idx[token] for token in tokens]
return tokens
def get_segment_ids_vaild_len(self, inputs):
v_len_list = [0] * len(inputs)
segment_ids = torch.zeros_like(inputs).long()
valid_length = torch.tensor(v_len_list, dtype=torch.int32)
return segment_ids, valid_length
def gen_attention_mask(self, token_ids, valid_length):
attention_mask = torch.zeros_like(token_ids)
for i, v in enumerate(valid_length) : attention_mask[i][:v] = 1
return attention_mask
def get_sentence_features(self, tokens: List[int], pad_seq_length: int):
"""
Convert tokenized sentence in its embedding ids, segment ids and mask
:param tokens:
a tokenized sentence
:param pad_seq_length:
the maximal length of the sequence. Cannot be greater than self.sentence_transformer_config.max_seq_length
:return: embedding ids, segment ids and mask for the sentence
"""
pad_seq_length = min(pad_seq_length, self.max_seq_length) + 3 #Add space for special tokens
cls_token = self.vocab.cls_token
sep_token = self.vocab.sep_token
sep_token_idx = self.vocab.token_to_idx[sep_token]
cls_token_idx = self.vocab.token_to_idx[cls_token]
#input_sentence = [cls_token_idx] + tokens + [sep_token_idx]
tokens = torch.cat([torch.tensor([cls_token_idx]), torch.tensor(tokens)], dim=-1)
tokens = torch.cat([tokens, torch.tensor([sep_token_idx])], dim=-1)
segment_ids, valid_len = self.get_segment_ids_vaild_len(tokens)
attention_mask = self.gen_attention_mask(tokens, valid_len)
result = {'input_ids':tokens.unsqueeze(0), 'token_type_ids':segment_ids.unsqueeze(0), 'attention_mask':attention_mask.unsqueeze(0)}
return result
#return self.tokenizer.prepare_for_model(tokens, max_length=pad_seq_length, padding='max_length', return_tensors='pt', truncation=True, prepend_batch_axis=True)
def get_config_dict(self):
return {key: self.__dict__[key] for key in self.config_keys}
def save(self, output_path: str):
torch.save(self.auto_model.state_dict(), os.path.join(output_path+'/result.pt'))
#self.auto_model.save_pretrained(output_path)
#self.tokenizer.save_pretrained(output_path)
#with open(os.path.join(output_path, 'sentence_bert_config.json'), 'w') as fOut:
# json.dump(self.get_config_dict(), fOut, indent=2)
@staticmethod
def load(input_path: str):
#with open(os.path.join(input_path, 'sentence_bert_config.json')) as fIn:
# config = json.load(fIn)
return Transformer(model_name_or_path=input_path, isKor=True, isLoad=True)
| 6,187 | 42.272727 | 168 | py |
KoSentenceBERT-SKT | KoSentenceBERT-SKT-main/sentence_transformers/models/WeightedLayerPooling.py | import torch
from torch import Tensor
from torch import nn
from typing import Union, Tuple, List, Iterable, Dict
import os
import json
import numpy as np
import torch.nn.functional as F
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.preprocessing import normalize
class WeightedLayerPooling(nn.Module):
"""
Token embeddings are weighted mean of their different hidden layer representations
"""
def __init__(self, word_embedding_dimension, num_hidden_layers: int = 12, layer_start: int = 4, layer_weights = None):
super(WeightedLayerPooling, self).__init__()
self.config_keys = ['word_embedding_dimension', 'layer_start', 'num_hidden_layers']
self.word_embedding_dimension = word_embedding_dimension
self.layer_start = layer_start
self.num_hidden_layers = num_hidden_layers
self.layer_weights = layer_weights if layer_weights is not None else nn.Parameter(torch.tensor([1] * (num_hidden_layers+1 - layer_start), dtype=torch.float))
def forward(self, features: Dict[str, Tensor]):
ft_all_layers = features['all_layer_embeddings']
all_layer_embedding = torch.stack(ft_all_layers)
all_layer_embedding = all_layer_embedding[self.layer_start:, :, :, :] # Start from 4th layers output
weight_factor = self.layer_weights.unsqueeze(-1).unsqueeze(-1).unsqueeze(-1).expand(all_layer_embedding.size())
weighted_average = (weight_factor*all_layer_embedding).sum(dim=0) / self.layer_weights.sum()
features.update({'token_embeddings': weighted_average})
return features
def get_word_embedding_dimension(self):
return self.word_embedding_dimension
def get_config_dict(self):
return {key: self.__dict__[key] for key in self.config_keys}
def save(self, output_path):
with open(os.path.join(output_path, 'config.json'), 'w') as fOut:
json.dump(self.get_config_dict(), fOut, indent=2)
torch.save(self.state_dict(), os.path.join(output_path, 'pytorch_model.bin'))
@staticmethod
def load(input_path):
with open(os.path.join(input_path, 'config.json')) as fIn:
config = json.load(fIn)
model = WeightedLayerPooling(**config)
model.load_state_dict(torch.load(os.path.join(input_path, 'pytorch_model.bin'), map_location=torch.device('cpu')))
return model
| 2,396 | 40.327586 | 165 | py |
KoSentenceBERT-SKT | KoSentenceBERT-SKT-main/sentence_transformers/models/CNN.py | import torch
from torch import nn, Tensor
from typing import Union, Tuple, List, Iterable, Dict
import logging
import gzip
from tqdm import tqdm
import numpy as np
import os
import json
from ..util import import_from_string, fullname, http_get
from .tokenizer import WordTokenizer, WhitespaceTokenizer
class CNN(nn.Module):
"""CNN-layer with multiple kernel-sizes over the word embeddings"""
def __init__(self, in_word_embedding_dimension: int, out_channels: int = 256, kernel_sizes: List[int] = [1, 3, 5]):
nn.Module.__init__(self)
self.config_keys = ['in_word_embedding_dimension', 'out_channels', 'kernel_sizes']
self.in_word_embedding_dimension = in_word_embedding_dimension
self.out_channels = out_channels
self.kernel_sizes = kernel_sizes
self.embeddings_dimension = out_channels*len(kernel_sizes)
self.convs = nn.ModuleList()
in_channels = in_word_embedding_dimension
for kernel_size in kernel_sizes:
padding_size = int((kernel_size - 1) / 2)
conv = nn.Conv1d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size,
padding=padding_size)
self.convs.append(conv)
def forward(self, features):
token_embeddings = features['token_embeddings']
token_embeddings = token_embeddings.transpose(1, -1)
vectors = [conv(token_embeddings) for conv in self.convs]
out = torch.cat(vectors, 1).transpose(1, -1)
features.update({'token_embeddings': out})
return features
def get_word_embedding_dimension(self) -> int:
return self.embeddings_dimension
def tokenize(self, text: str) -> List[int]:
raise NotImplementedError()
def save(self, output_path: str):
with open(os.path.join(output_path, 'cnn_config.json'), 'w') as fOut:
json.dump(self.get_config_dict(), fOut, indent=2)
torch.save(self.state_dict(), os.path.join(output_path, 'pytorch_model.bin'))
def get_config_dict(self):
return {key: self.__dict__[key] for key in self.config_keys}
@staticmethod
def load(input_path: str):
with open(os.path.join(input_path, 'cnn_config.json'), 'r') as fIn:
config = json.load(fIn)
weights = torch.load(os.path.join(input_path, 'pytorch_model.bin'))
model = CNN(**config)
model.load_state_dict(weights)
return model
| 2,462 | 34.695652 | 119 | py |
KoSentenceBERT-SKT | KoSentenceBERT-SKT-main/sentence_transformers/models/WordEmbeddings.py | import torch
from torch import nn, Tensor
from typing import Union, Tuple, List, Iterable, Dict
import logging
import gzip
from tqdm import tqdm
import numpy as np
import os
import json
from ..util import import_from_string, fullname, http_get
from .tokenizer import WordTokenizer, WhitespaceTokenizer
class WordEmbeddings(nn.Module):
def __init__(self, tokenizer: WordTokenizer, embedding_weights, update_embeddings: bool = False, max_seq_length: int = 1000000):
nn.Module.__init__(self)
if isinstance(embedding_weights, list):
embedding_weights = np.asarray(embedding_weights)
if isinstance(embedding_weights, np.ndarray):
embedding_weights = torch.from_numpy(embedding_weights)
num_embeddings, embeddings_dimension = embedding_weights.size()
self.embeddings_dimension = embeddings_dimension
self.emb_layer = nn.Embedding(num_embeddings, embeddings_dimension)
self.emb_layer.load_state_dict({'weight': embedding_weights})
self.emb_layer.weight.requires_grad = update_embeddings
self.tokenizer = tokenizer
self.update_embeddings = update_embeddings
self.max_seq_length = max_seq_length
def forward(self, features):
token_embeddings = self.emb_layer(features['input_ids'])
cls_tokens = None
features.update({'token_embeddings': token_embeddings, 'cls_token_embeddings': cls_tokens, 'attention_mask': features['attention_mask']})
return features
def get_sentence_features(self, tokens: List[int], pad_seq_length: int):
pad_seq_length = min(pad_seq_length, self.max_seq_length)
tokens = tokens[0:pad_seq_length] #Truncate tokens if needed
input_ids = tokens
sentence_length = len(input_ids)
attention_mask = [1] * len(input_ids)
padding = [0] * (pad_seq_length - len(input_ids))
input_ids += padding
attention_mask += padding
assert len(input_ids) == pad_seq_length
assert len(attention_mask) == pad_seq_length
return {'input_ids': torch.tensor([input_ids], dtype=torch.long),
'attention_mask': torch.tensor([attention_mask], dtype=torch.long),
'sentence_lengths': torch.tensor([sentence_length], dtype=torch.long)}
def get_word_embedding_dimension(self) -> int:
return self.embeddings_dimension
def tokenize(self, text: str) -> List[int]:
return self.tokenizer.tokenize(text)
def save(self, output_path: str):
with open(os.path.join(output_path, 'wordembedding_config.json'), 'w') as fOut:
json.dump(self.get_config_dict(), fOut, indent=2)
torch.save(self.state_dict(), os.path.join(output_path, 'pytorch_model.bin'))
self.tokenizer.save(output_path)
def get_config_dict(self):
return {'tokenizer_class': fullname(self.tokenizer), 'update_embeddings': self.update_embeddings, 'max_seq_length': self.max_seq_length}
@staticmethod
def load(input_path: str):
with open(os.path.join(input_path, 'wordembedding_config.json'), 'r') as fIn:
config = json.load(fIn)
tokenizer_class = import_from_string(config['tokenizer_class'])
tokenizer = tokenizer_class.load(input_path)
weights = torch.load(os.path.join(input_path, 'pytorch_model.bin'))
embedding_weights = weights['emb_layer.weight']
model = WordEmbeddings(tokenizer=tokenizer, embedding_weights=embedding_weights, update_embeddings=config['update_embeddings'])
return model
@staticmethod
def from_text_file(embeddings_file_path: str, update_embeddings: bool = False, item_separator: str = " ", tokenizer=WhitespaceTokenizer(), max_vocab_size: int = None):
logging.info("Read in embeddings file {}".format(embeddings_file_path))
if not os.path.exists(embeddings_file_path):
logging.info("{} does not exist, try to download from server".format(embeddings_file_path))
if '/' in embeddings_file_path or '\\' in embeddings_file_path:
raise ValueError("Embeddings file not found: ".format(embeddings_file_path))
url = "https://public.ukp.informatik.tu-darmstadt.de/reimers/embeddings/"+embeddings_file_path
http_get(url, embeddings_file_path)
embeddings_dimension = None
vocab = []
embeddings = []
with gzip.open(embeddings_file_path, "rt", encoding="utf8") if embeddings_file_path.endswith('.gz') else open(embeddings_file_path, encoding="utf8") as fIn:
iterator = tqdm(fIn, desc="Load Word Embeddings", unit="Embeddings")
for line in iterator:
split = line.rstrip().split(item_separator)
word = split[0]
if embeddings_dimension == None:
embeddings_dimension = len(split) - 1
vocab.append("PADDING_TOKEN")
embeddings.append(np.zeros(embeddings_dimension))
if (len(split) - 1) != embeddings_dimension: # Assure that all lines in the embeddings file are of the same length
logging.error("ERROR: A line in the embeddings file had more or less dimensions than expected. Skip token.")
continue
vector = np.array([float(num) for num in split[1:]])
embeddings.append(vector)
vocab.append(word)
if max_vocab_size is not None and max_vocab_size > 0 and len(vocab) > max_vocab_size:
break
embeddings = np.asarray(embeddings)
tokenizer.set_vocab(vocab)
return WordEmbeddings(tokenizer=tokenizer, embedding_weights=embeddings, update_embeddings=update_embeddings)
| 5,784 | 43.844961 | 171 | py |
KoSentenceBERT-SKT | KoSentenceBERT-SKT-main/sentence_transformers/models/XLMRoBERTa.py | from torch import Tensor
from torch import nn
from transformers import XLMRobertaModel, XLMRobertaTokenizer
import json
from typing import Union, Tuple, List, Dict, Optional
import os
import numpy as np
import logging
class XLMRoBERTa(nn.Module):
"""DEPRECATED: Please use models.Transformer instead.
RoBERTa model to generate token embeddings.
Each token is mapped to an output vector from RoBERTa.
"""
def __init__(self, model_name_or_path: str, max_seq_length: int = 128, do_lower_case: Optional[bool] = None, model_args: Dict = {}, tokenizer_args: Dict = {}):
super(XLMRoBERTa, self).__init__()
self.config_keys = ['max_seq_length', 'do_lower_case']
self.do_lower_case = do_lower_case
if self.do_lower_case is not None:
tokenizer_args['do_lower_case'] = do_lower_case
self.xlm_roberta = XLMRobertaModel.from_pretrained(model_name_or_path, **model_args)
self.tokenizer = XLMRobertaTokenizer.from_pretrained(model_name_or_path, **tokenizer_args)
if max_seq_length > self.tokenizer.max_len_single_sentence:
logging.warning("XLM-RoBERTa only allows a max_seq_length of "+self.tokenizer.max_len_single_sentence)
max_seq_length = self.tokenizer.max_len_single_sentence
self.max_seq_length = max_seq_length
def forward(self, features):
"""Returns token_embeddings, cls_token"""
#RoBERTa does not use token_type_ids
output_states = self.xlm_roberta(**features)
output_tokens = output_states[0]
cls_tokens = output_tokens[:, 0, :] # CLS token is first token
features.update({'token_embeddings': output_tokens, 'cls_token_embeddings': cls_tokens, 'attention_mask': features['attention_mask']})
if self.xlm_roberta.config.output_hidden_states:
hidden_states = output_states[2]
features.update({'all_layer_embeddings': hidden_states})
return features
def get_word_embedding_dimension(self) -> int:
return self.xlm_roberta.config.hidden_size
def tokenize(self, text: str) -> List[int]:
"""
Tokenizes a text and maps tokens to token-ids
"""
return self.tokenizer.convert_tokens_to_ids(self.tokenizer.tokenize(text))
def get_sentence_features(self, tokens: List[int], pad_seq_length: int):
"""
Convert tokenized sentence in its embedding ids, segment ids and mask
:param tokens:
a tokenized sentence
:param pad_seq_length:
the maximal length of the sequence. Cannot be greater than self.sentence_transformer_config.max_seq_length
:return: embedding ids, segment ids and mask for the sentence
"""
pad_seq_length = min(pad_seq_length, self.max_seq_length) + 2 #Add space for special tokens
return self.tokenizer.prepare_for_model(tokens, max_length=pad_seq_length, padding='max_length', return_tensors='pt', truncation=True, prepend_batch_axis=True)
def get_config_dict(self):
return {key: self.__dict__[key] for key in self.config_keys}
def save(self, output_path: str):
self.xlm_roberta.save_pretrained(output_path)
self.tokenizer.save_pretrained(output_path)
with open(os.path.join(output_path, 'sentence_xlm-roberta_config.json'), 'w') as fOut:
json.dump(self.get_config_dict(), fOut, indent=2)
@staticmethod
def load(input_path: str):
with open(os.path.join(input_path, 'sentence_xlm-roberta_config.json')) as fIn:
config = json.load(fIn)
return XLMRoBERTa(model_name_or_path=input_path, **config)
| 3,655 | 39.175824 | 167 | py |
KoSentenceBERT-SKT | KoSentenceBERT-SKT-main/sentence_transformers/models/T5.py | from torch import nn
from transformers import T5Model, T5Tokenizer
import json
from typing import List, Dict, Optional
import os
import numpy as np
import logging
class T5(nn.Module):
"""DEPRECATED: Please use models.Transformer instead.
T5 model to generate token embeddings.
Each token is mapped to an output vector from BERT.
"""
def __init__(self, model_name_or_path: str, max_seq_length: int = 128, do_lower_case: Optional[bool] = None, task_identifier: str = 'stsb sentence1: ', model_args: Dict = {}, tokenizer_args: Dict = {}):
super(T5, self).__init__()
self.config_keys = ['max_seq_length', 'do_lower_case', 'task_identifier']
self.do_lower_case = do_lower_case
if max_seq_length > 512:
logging.warning("T5 only allows a max_seq_length of 512. Value will be set to 512")
max_seq_length = 512
self.max_seq_length = max_seq_length
if self.do_lower_case is not None:
tokenizer_args['do_lower_case'] = do_lower_case
self.t5model = T5Model.from_pretrained(model_name_or_path, **model_args)
self.tokenizer = T5Tokenizer.from_pretrained(model_name_or_path, **tokenizer_args)
self.task_identifier = task_identifier
def forward(self, features):
"""Returns token_embeddings, cls_token"""
output_states = self.t5model.encoder(input_ids=features['input_ids'], attention_mask=features['attention_mask'])
output_tokens = output_states[0]
cls_tokens = output_tokens[:, 0, :] # CLS token is first token
features.update({'token_embeddings': output_tokens, 'cls_token_embeddings': cls_tokens})
if len(output_states) > 1:
features.update({'all_layer_embeddings': output_states[1]})
return features
def get_word_embedding_dimension(self) -> int:
return self.t5model.config.hidden_size
def tokenize(self, text: str) -> List[int]:
"""
Tokenizes a text and maps tokens to token-ids
"""
return self.tokenizer.encode(self.task_identifier+text)
def get_sentence_features(self, tokens: List[int], pad_seq_length: int):
"""
Convert tokenized sentence in its embedding ids, segment ids and mask
:param tokens:
a tokenized sentence
:param pad_seq_length:
the maximal length of the sequence. Cannot be greater than self.sentence_transformer_config.max_seq_length
:return: embedding ids, segment ids and mask for the sentence
"""
pad_seq_length = min(pad_seq_length, self.max_seq_length)
return self.tokenizer.prepare_for_model(tokens, max_length=pad_seq_length, padding='max_length', return_tensors='pt', truncation=True, prepend_batch_axis=True)
def get_config_dict(self):
return {key: self.__dict__[key] for key in self.config_keys}
def save(self, output_path: str):
self.t5model.save_pretrained(output_path)
self.tokenizer.save_pretrained(output_path)
with open(os.path.join(output_path, 'sentence_T5_config.json'), 'w') as fOut:
json.dump(self.get_config_dict(), fOut, indent=2)
@staticmethod
def load(input_path: str):
with open(os.path.join(input_path, 'sentence_T5_config.json')) as fIn:
config = json.load(fIn)
return T5(model_name_or_path=input_path, **config)
| 3,402 | 37.235955 | 206 | py |
KoSentenceBERT-SKT | KoSentenceBERT-SKT-main/sentence_transformers/models/RoBERTa.py | from torch import Tensor
from torch import nn
from transformers import RobertaModel, RobertaTokenizer
import json
from typing import Union, Tuple, List, Dict, Optional
import os
import numpy as np
import logging
class RoBERTa(nn.Module):
"""DEPRECATED: Please use models.Transformer instead.
RoBERTa model to generate token embeddings.
Each token is mapped to an output vector from RoBERTa.
"""
def __init__(self, model_name_or_path: str, max_seq_length: int = 128, do_lower_case: Optional[bool] = None, model_args: Dict = {}, tokenizer_args: Dict = {}):
super(RoBERTa, self).__init__()
self.config_keys = ['max_seq_length', 'do_lower_case']
self.do_lower_case = do_lower_case
if max_seq_length > 512:
logging.warning("RoBERTa only allows a max_seq_length of 512 (514 with special tokens). Value will be set to 512")
max_seq_length = 512
self.max_seq_length = max_seq_length
if self.do_lower_case is not None:
tokenizer_args['do_lower_case'] = do_lower_case
self.roberta = RobertaModel.from_pretrained(model_name_or_path, **model_args)
self.tokenizer = RobertaTokenizer.from_pretrained(model_name_or_path, **tokenizer_args)
def forward(self, features):
"""Returns token_embeddings, cls_token"""
output_states = self.roberta(**features)
output_tokens = output_states[0]
cls_tokens = output_tokens[:, 0, :] # CLS token is first token
features.update({'token_embeddings': output_tokens, 'cls_token_embeddings': cls_tokens, 'attention_mask': features['attention_mask']})
if len(output_states) > 2:
features.update({'all_layer_embeddings': output_states[2]})
return features
def get_word_embedding_dimension(self) -> int:
return self.roberta.config.hidden_size
def tokenize(self, text: str) -> List[int]:
"""
Tokenizes a text and maps tokens to token-ids
"""
return self.tokenizer.convert_tokens_to_ids(self.tokenizer.tokenize(text))
def get_sentence_features(self, tokens: List[int], pad_seq_length: int):
"""
Convert tokenized sentence in its embedding ids, segment ids and mask
:param tokens:
a tokenized sentence
:param pad_seq_length:
the maximal length of the sequence. Cannot be greater than self.sentence_transformer_config.max_seq_length
:return: embedding ids, segment ids and mask for the sentence
"""
pad_seq_length = min(pad_seq_length, self.max_seq_length) + 2 ##Add Space for CLS + SEP token
return self.tokenizer.prepare_for_model(tokens, max_length=pad_seq_length, padding='max_length', return_tensors='pt', truncation=True, prepend_batch_axis=True)
def get_config_dict(self):
return {key: self.__dict__[key] for key in self.config_keys}
def save(self, output_path: str):
self.roberta.save_pretrained(output_path)
self.tokenizer.save_pretrained(output_path)
with open(os.path.join(output_path, 'sentence_roberta_config.json'), 'w') as fOut:
json.dump(self.get_config_dict(), fOut, indent=2)
@staticmethod
def load(input_path: str):
with open(os.path.join(input_path, 'sentence_roberta_config.json')) as fIn:
config = json.load(fIn)
return RoBERTa(model_name_or_path=input_path, **config)
| 3,445 | 37.719101 | 167 | py |
KoSentenceBERT-SKT | KoSentenceBERT-SKT-main/sentence_transformers/models/CamemBERT.py | from torch import Tensor
from torch import nn
from transformers import CamembertModel, CamembertTokenizer
import json
from typing import Union, Tuple, List, Dict, Optional
import os
import numpy as np
import logging
class CamemBERT(nn.Module):
"""DEPRECATED: Please use models.Transformer instead.
CamemBERT model to generate token embeddings.
Each token is mapped to an output vector from CamemBERT.
"""
def __init__(self, model_name_or_path: str, max_seq_length: int = 128, do_lower_case: Optional[bool] = None, model_args: Dict = {}, tokenizer_args: Dict = {}):
super(CamemBERT, self).__init__()
self.config_keys = ['max_seq_length', 'do_lower_case']
self.do_lower_case = do_lower_case
if max_seq_length > 511:
logging.warning("CamemBERT only allows a max_seq_length of 511 (514 with special tokens). Value will be set to 511")
max_seq_length = 511
self.max_seq_length = max_seq_length
if self.do_lower_case is not None:
tokenizer_args['do_lower_case'] = do_lower_case
self.camembert = CamembertModel.from_pretrained(model_name_or_path, **model_args)
self.tokenizer = CamembertTokenizer.from_pretrained(model_name_or_path, **tokenizer_args)
def forward(self, features):
"""Returns token_embeddings, cls_token"""
#CamemBERT does not use token_type_ids
output_states = self.camembert(**features)
output_tokens = output_states[0]
cls_tokens = output_tokens[:, 0, :] # CLS token is first token
features.update({'token_embeddings': output_tokens, 'cls_token_embeddings': cls_tokens, 'attention_mask': features['attention_mask']})
if self.camembert.config.output_hidden_states:
hidden_states = output_states[2]
features.update({'all_layer_embeddings': hidden_states})
return features
def get_word_embedding_dimension(self) -> int:
return self.camembert.config.hidden_size
def tokenize(self, text: str) -> List[int]:
"""
Tokenizes a text and maps tokens to token-ids
"""
return self.tokenizer.convert_tokens_to_ids(self.tokenizer.tokenize(text))
def get_sentence_features(self, tokens: List[int], pad_seq_length: int):
"""
Convert tokenized sentence in its embedding ids, segment ids and mask
:param tokens:
a tokenized sentence
:param pad_seq_length:
the maximal length of the sequence. Cannot be greater than self.sentence_transformer_config.max_seq_length
:return: embedding ids, segment ids and mask for the sentence
"""
pad_seq_length = min(pad_seq_length, self.max_seq_length) + 3 #Add space for special tokens
return self.tokenizer.prepare_for_model(tokens, max_length=pad_seq_length, padding='max_length', return_tensors='pt', truncation=True, prepend_batch_axis=True)
def get_config_dict(self):
return {key: self.__dict__[key] for key in self.config_keys}
def save(self, output_path: str):
self.camembert.save_pretrained(output_path)
self.tokenizer.save_pretrained(output_path)
with open(os.path.join(output_path, 'sentence_camembert_config.json'), 'w') as fOut:
json.dump(self.get_config_dict(), fOut, indent=2)
@staticmethod
def load(input_path: str):
with open(os.path.join(input_path, 'sentence_camembert_config.json')) as fIn:
config = json.load(fIn)
return CamemBERT(model_name_or_path=input_path, **config)
| 3,584 | 38.395604 | 167 | py |
KoSentenceBERT-SKT | KoSentenceBERT-SKT-main/sentence_transformers/models/BERT.py | from torch import nn
from transformers import BertModel, BertTokenizer
import json
from typing import List, Dict, Optional
import os
import numpy as np
import logging
class BERT(nn.Module):
"""DEPRECATED: Please use models.Transformer instead.
BERT model to generate token embeddings.
Each token is mapped to an output vector from BERT.
"""
def __init__(self, model_name_or_path: str, max_seq_length: int = 128, do_lower_case: Optional[bool] = None, model_args: Dict = {}, tokenizer_args: Dict = {}):
super(BERT, self).__init__()
self.config_keys = ['max_seq_length', 'do_lower_case']
self.do_lower_case = do_lower_case
if max_seq_length > 510:
logging.warning("BERT only allows a max_seq_length of 510 (512 with special tokens). Value will be set to 510")
max_seq_length = 510
self.max_seq_length = max_seq_length
if self.do_lower_case is not None:
tokenizer_args['do_lower_case'] = do_lower_case
self.bert = BertModel.from_pretrained(model_name_or_path, **model_args)
self.tokenizer = BertTokenizer.from_pretrained(model_name_or_path, **tokenizer_args)
def forward(self, features):
"""Returns token_embeddings, cls_token"""
output_states = self.bert(**features)
output_tokens = output_states[0]
cls_tokens = output_tokens[:, 0, :] # CLS token is first token
features.update({'token_embeddings': output_tokens, 'cls_token_embeddings': cls_tokens, 'attention_mask': features['attention_mask']})
if len(output_states) > 2:
features.update({'all_layer_embeddings': output_states[2]})
return features
def get_word_embedding_dimension(self) -> int:
return self.bert.config.hidden_size
def tokenize(self, text: str) -> List[int]:
"""
Tokenizes a text and maps tokens to token-ids
"""
return self.tokenizer.convert_tokens_to_ids(self.tokenizer.tokenize(text))
def get_sentence_features(self, tokens: List[int], pad_seq_length: int):
"""
Convert tokenized sentence in its embedding ids, segment ids and mask
:param tokens:
a tokenized sentence
:param pad_seq_length:
the maximal length of the sequence. Cannot be greater than self.sentence_transformer_config.max_seq_length
:return: embedding ids, segment ids and mask for the sentence
"""
pad_seq_length = min(pad_seq_length, self.max_seq_length) + 2 ##Add Space for CLS + SEP token
return self.tokenizer.prepare_for_model(tokens, max_length=pad_seq_length, padding='max_length', return_tensors='pt', truncation=True, prepend_batch_axis=True)
def get_config_dict(self):
return {key: self.__dict__[key] for key in self.config_keys}
def save(self, output_path: str):
self.bert.save_pretrained(output_path)
self.tokenizer.save_pretrained(output_path)
with open(os.path.join(output_path, 'sentence_bert_config.json'), 'w') as fOut:
json.dump(self.get_config_dict(), fOut, indent=2)
@staticmethod
def load(input_path: str):
with open(os.path.join(input_path, 'sentence_bert_config.json')) as fIn:
config = json.load(fIn)
return BERT(model_name_or_path=input_path, **config)
| 3,361 | 36.355556 | 167 | py |
KoSentenceBERT-SKT | KoSentenceBERT-SKT-main/sentence_transformers/models/XLNet.py | from torch import Tensor
from torch import nn
from transformers import XLNetModel, XLNetTokenizer
import json
from typing import Union, Tuple, List, Dict, Optional
import os
import numpy as np
class XLNet(nn.Module):
"""DEPRECATED: Please use models.Transformer instead.
XLNet model to generate token embeddings.
Each token is mapped to an output vector from XLNet.
"""
def __init__(self, model_name_or_path: str, max_seq_length: int = 128, do_lower_case: Optional[bool] = None, model_args: Dict = {}, tokenizer_args: Dict = {}):
super(XLNet, self).__init__()
self.config_keys = ['max_seq_length', 'do_lower_case']
self.max_seq_length = max_seq_length
self.do_lower_case = do_lower_case
if self.do_lower_case is not None:
tokenizer_args['do_lower_case'] = do_lower_case
self.xlnet = XLNetModel.from_pretrained(model_name_or_path, **model_args)
self.tokenizer = XLNetTokenizer.from_pretrained(model_name_or_path, **tokenizer_args)
self.cls_token_id = self.tokenizer.convert_tokens_to_ids([self.tokenizer.cls_token])[0]
self.sep_token_id = self.tokenizer.convert_tokens_to_ids([self.tokenizer.sep_token])[0]
def forward(self, features):
"""Returns token_embeddings, cls_token"""
output_states = self.xlnet(**features)
output_tokens = output_states[0]
cls_tokens = output_tokens[:, -1, :] # CLS token is the last token
features.update({'token_embeddings': output_tokens, 'cls_token_embeddings': cls_tokens, 'attention_mask': features['attention_mask']})
if self.xlnet.config.output_hidden_states:
hidden_states = output_states[2]
features.update({'all_layer_embeddings': hidden_states})
return features
def get_word_embedding_dimension(self) -> int:
return self.xlnet.config.d_model
def tokenize(self, text: str) -> List[int]:
"""
Tokenizes a text and maps tokens to token-ids
"""
return self.tokenizer.convert_tokens_to_ids(self.tokenizer.tokenize(text))
def get_sentence_features(self, tokens: List[int], pad_seq_length: int) -> Dict[str, Tensor]:
"""
Convert tokenized sentence in its embedding ids, segment ids and mask
:param tokens:
a tokenized sentence
:param pad_seq_length:
the maximal length of the sequence. Cannot be greater than self.sentence_transformer_config.max_seq_length
:return: embedding ids, segment ids and mask for the sentence
"""
pad_seq_length = min(pad_seq_length, self.max_seq_length) + 3 #Add space for special tokens
return self.tokenizer.prepare_for_model(tokens, max_length=pad_seq_length, padding='max_length', return_tensors='pt', truncation=True, prepend_batch_axis=True)
def get_config_dict(self):
return {key: self.__dict__[key] for key in self.config_keys}
def save(self, output_path: str):
self.xlnet.save_pretrained(output_path)
self.tokenizer.save_pretrained(output_path)
with open(os.path.join(output_path, 'sentence_xlnet_config.json'), 'w') as fOut:
json.dump(self.get_config_dict(), fOut, indent=2)
@staticmethod
def load(input_path: str):
with open(os.path.join(input_path, 'sentence_xlnet_config.json')) as fIn:
config = json.load(fIn)
return XLNet(model_name_or_path=input_path, **config)
| 3,474 | 39.406977 | 167 | py |
KoSentenceBERT-SKT | KoSentenceBERT-SKT-main/sentence_transformers/models/WordWeights.py | import torch
from torch import Tensor
from torch import nn
from typing import Union, Tuple, List, Iterable, Dict
import os
import json
import logging
class WordWeights(nn.Module):
"""This model can weight word embeddings, for example, with idf-values."""
def __init__(self, vocab: List[str], word_weights: Dict[str, float], unknown_word_weight: float = 1):
"""
:param vocab:
Vocabulary of the tokenizer
:param word_weights:
Mapping of tokens to a float weight value. Words embeddings are multiplied by this float value. Tokens in word_weights must not be equal to the vocab (can contain more or less values)
:param unknown_word_weight:
Weight for words in vocab, that do not appear in the word_weights lookup. These can be for example rare words in the vocab, where no weight exists.
"""
super(WordWeights, self).__init__()
self.config_keys = ['vocab', 'word_weights', 'unknown_word_weight']
self.vocab = vocab
self.word_weights = word_weights
self.unknown_word_weight = unknown_word_weight
weights = []
num_unknown_words = 0
for word in vocab:
weight = unknown_word_weight
if word in word_weights:
weight = word_weights[word]
elif word.lower() in word_weights:
weight = word_weights[word.lower()]
else:
num_unknown_words += 1
weights.append(weight)
logging.info("{} of {} words without a weighting value. Set weight to {}".format(num_unknown_words, len(vocab), unknown_word_weight))
self.emb_layer = nn.Embedding(len(vocab), 1)
self.emb_layer.load_state_dict({'weight': torch.FloatTensor(weights).unsqueeze(1)})
def forward(self, features: Dict[str, Tensor]):
attention_mask = features['attention_mask']
token_embeddings = features['token_embeddings']
#Compute a weight value for each token
token_weights_raw = self.emb_layer(features['input_ids']).squeeze(-1)
token_weights = token_weights_raw * attention_mask.float()
token_weights_sum = torch.sum(token_weights, 1)
#Multiply embedding by token weight value
token_weights_expanded = token_weights.unsqueeze(-1).expand(token_embeddings.size())
token_embeddings = token_embeddings * token_weights_expanded
features.update({'token_embeddings': token_embeddings, 'token_weights_sum': token_weights_sum})
return features
def get_config_dict(self):
return {key: self.__dict__[key] for key in self.config_keys}
def save(self, output_path):
with open(os.path.join(output_path, 'config.json'), 'w') as fOut:
json.dump(self.get_config_dict(), fOut, indent=2)
@staticmethod
def load(input_path):
with open(os.path.join(input_path, 'config.json')) as fIn:
config = json.load(fIn)
return WordWeights(**config) | 3,017 | 39.783784 | 196 | py |
KoSentenceBERT-SKT | KoSentenceBERT-SKT-main/sentence_transformers/models/WKPooling.py | import torch
from torch import Tensor
from torch import nn
from typing import Union, Tuple, List, Iterable, Dict
import os
import json
import numpy as np
class WKPooling(nn.Module):
"""
Pooling based on the paper: "SBERT-WK: A Sentence Embedding Method ByDissecting BERT-based Word Models"
https://arxiv.org/pdf/2002.06652.pdf
Note: SBERT-WK uses QR decomposition. torch QR decomposition is currently extremely slow when run on GPU.
Hence, the tensor is first transferred to the CPU before it is applied. This makes this pooling method rather slow
"""
def __init__(self, word_embedding_dimension, layer_start: int = 4, context_window_size: int = 2):
super(WKPooling, self).__init__()
self.config_keys = ['word_embedding_dimension', 'layer_start', 'context_window_size']
self.word_embedding_dimension = word_embedding_dimension
self.pooling_output_dimension = word_embedding_dimension
self.layer_start = layer_start
self.context_window_size = context_window_size
def forward(self, features: Dict[str, Tensor]):
ft_all_layers = features['all_layer_embeddings']
org_device = ft_all_layers[0].device
all_layer_embedding = torch.stack(ft_all_layers).transpose(1,0)
all_layer_embedding = all_layer_embedding[:, self.layer_start:, :, :] # Start from 4th layers output
# torch.qr is slow on GPU (see https://github.com/pytorch/pytorch/issues/22573). So compute it on CPU until issue is fixed
all_layer_embedding = all_layer_embedding.cpu()
attention_mask = features['attention_mask'].cpu().numpy()
unmask_num = np.array([sum(mask) for mask in attention_mask]) - 1 # Not considering the last item
embedding = []
# One sentence at a time
for sent_index in range(len(unmask_num)):
sentence_feature = all_layer_embedding[sent_index, :, :unmask_num[sent_index], :]
one_sentence_embedding = []
# Process each token
for token_index in range(sentence_feature.shape[1]):
token_feature = sentence_feature[:, token_index, :]
# 'Unified Word Representation'
token_embedding = self.unify_token(token_feature)
one_sentence_embedding.append(token_embedding)
features.update({'sentence_embedding': features['cls_token_embeddings']})
one_sentence_embedding = torch.stack(one_sentence_embedding)
sentence_embedding = self.unify_sentence(sentence_feature, one_sentence_embedding)
embedding.append(sentence_embedding)
output_vector = torch.stack(embedding).to(org_device)
features.update({'sentence_embedding': output_vector})
return features
def unify_token(self, token_feature):
"""
Unify Token Representation
"""
window_size = self.context_window_size
alpha_alignment = torch.zeros(token_feature.size()[0], device=token_feature.device)
alpha_novelty = torch.zeros(token_feature.size()[0], device=token_feature.device)
for k in range(token_feature.size()[0]):
left_window = token_feature[k - window_size:k, :]
right_window = token_feature[k + 1:k + window_size + 1, :]
window_matrix = torch.cat([left_window, right_window, token_feature[k, :][None, :]])
Q, R = torch.qr(window_matrix.T)
r = R[:, -1]
alpha_alignment[k] = torch.mean(self.norm_vector(R[:-1, :-1], dim=0), dim=1).matmul(R[:-1, -1]) / torch.norm(r[:-1])
alpha_alignment[k] = 1 / (alpha_alignment[k] * window_matrix.size()[0] * 2)
alpha_novelty[k] = torch.abs(r[-1]) / torch.norm(r)
# Sum Norm
alpha_alignment = alpha_alignment / torch.sum(alpha_alignment) # Normalization Choice
alpha_novelty = alpha_novelty / torch.sum(alpha_novelty)
alpha = alpha_novelty + alpha_alignment
alpha = alpha / torch.sum(alpha) # Normalize
out_embedding = torch.mv(token_feature.t(), alpha)
return out_embedding
def norm_vector(self, vec, p=2, dim=0):
"""
Implements the normalize() function from sklearn
"""
vec_norm = torch.norm(vec, p=p, dim=dim)
return vec.div(vec_norm.expand_as(vec))
def unify_sentence(self, sentence_feature, one_sentence_embedding):
"""
Unify Sentence By Token Importance
"""
sent_len = one_sentence_embedding.size()[0]
var_token = torch.zeros(sent_len, device=one_sentence_embedding.device)
for token_index in range(sent_len):
token_feature = sentence_feature[:, token_index, :]
sim_map = self.cosine_similarity_torch(token_feature)
var_token[token_index] = torch.var(sim_map.diagonal(-1))
var_token = var_token / torch.sum(var_token)
sentence_embedding = torch.mv(one_sentence_embedding.t(), var_token)
return sentence_embedding
def cosine_similarity_torch(self, x1, x2=None, eps=1e-8):
x2 = x1 if x2 is None else x2
w1 = x1.norm(p=2, dim=1, keepdim=True)
w2 = w1 if x2 is x1 else x2.norm(p=2, dim=1, keepdim=True)
return torch.mm(x1, x2.t()) / (w1 * w2.t()).clamp(min=eps)
def get_sentence_embedding_dimension(self):
return self.pooling_output_dimension
def get_config_dict(self):
return {key: self.__dict__[key] for key in self.config_keys}
def save(self, output_path):
with open(os.path.join(output_path, 'config.json'), 'w') as fOut:
json.dump(self.get_config_dict(), fOut, indent=2)
@staticmethod
def load(input_path):
with open(os.path.join(input_path, 'config.json')) as fIn:
config = json.load(fIn)
return WKPooling(**config)
| 5,864 | 40.595745 | 130 | py |
KoSentenceBERT-SKT | KoSentenceBERT-SKT-main/sentence_transformers/models/ALBERT.py | from torch import Tensor
from torch import nn
from transformers import AlbertModel, AlbertTokenizer
import json
from typing import Union, Tuple, List, Dict, Optional
import os
import numpy as np
import logging
class ALBERT(nn.Module):
"""DEPRECATED: Please use models.Transformer instead.
ALBERT model to generate token embeddings.
Each token is mapped to an output vector from BERT.
"""
def __init__(self, model_name_or_path: str, max_seq_length: int = 128, do_lower_case: Optional[bool] = None, model_args: Dict = {}, tokenizer_args: Dict = {}):
super(ALBERT, self).__init__()
self.config_keys = ['max_seq_length', 'do_lower_case']
self.do_lower_case = do_lower_case
if max_seq_length > 510:
logging.warning("BERT only allows a max_seq_length of 510 (512 with special tokens). Value will be set to 510")
max_seq_length = 510
self.max_seq_length = max_seq_length
if self.do_lower_case is not None:
tokenizer_args['do_lower_case'] = do_lower_case
self.albert = AlbertModel.from_pretrained(model_name_or_path, **model_args)
self.tokenizer = AlbertTokenizer.from_pretrained(model_name_or_path, **tokenizer_args)
def forward(self, features):
"""Returns token_embeddings, cls_token"""
output_states = self.albert(**features)
output_tokens = output_states[0]
cls_tokens = output_tokens[:, 0, :] # CLS token is first token
features.update({'token_embeddings': output_tokens, 'cls_token_embeddings': cls_tokens, 'attention_mask': features['attention_mask']})
if self.albert.config.output_hidden_states:
hidden_states = output_states[2]
features.update({'all_layer_embeddings': hidden_states})
return features
def get_word_embedding_dimension(self) -> int:
return self.albert.config.hidden_size
def tokenize(self, text: str) -> List[int]:
"""
Tokenizes a text and maps tokens to token-ids
"""
return self.tokenizer.convert_tokens_to_ids(self.tokenizer.tokenize(text))
def get_sentence_features(self, tokens: List[int], pad_seq_length: int):
"""
Convert tokenized sentence in its embedding ids, segment ids and mask
:param tokens:
a tokenized sentence
:param pad_seq_length:
the maximal length of the sequence. Cannot be greater than self.sentence_transformer_config.max_seq_length
:return: embedding ids, segment ids and mask for the sentence
"""
pad_seq_length = min(pad_seq_length, self.max_seq_length) + 3 #Add space for special tokens
return self.tokenizer.prepare_for_model(tokens, max_length=pad_seq_length, padding='max_length', return_tensors='pt', truncation=True, prepend_batch_axis=True)
def get_config_dict(self):
return {key: self.__dict__[key] for key in self.config_keys}
def save(self, output_path: str):
self.albert.save_pretrained(output_path)
self.tokenizer.save_pretrained(output_path)
with open(os.path.join(output_path, 'sentence_albert_config.json'), 'w') as fOut:
json.dump(self.get_config_dict(), fOut, indent=2)
@staticmethod
def load(input_path: str):
with open(os.path.join(input_path, 'sentence_albert_config.json')) as fIn:
config = json.load(fIn)
return ALBERT(model_name_or_path=input_path, **config)
| 3,482 | 37.7 | 167 | py |
KoSentenceBERT-SKT | KoSentenceBERT-SKT-main/sentence_transformers/models/Dense.py | import torch
from torch import Tensor
from torch import nn
from torch import functional as F
from typing import Union, Tuple, List, Iterable, Dict
import os
import json
from ..util import fullname, import_from_string
class Dense(nn.Module):
"""Feed-forward function with activiation function.
This layer takes a fixed-sized sentence embedding and passes it through a feed-forward layer. Can be used to generate deep averaging networs (DAN).
:param in_features: Size of the input dimension
:param out_features: Output size
:param bias: Add a bias vector
:param activation_function: Pytorch activation function applied on output
"""
def __init__(self, in_features: int, out_features: int, bias: bool = True, activation_function=nn.Tanh()):
super(Dense, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.bias = bias
self.activation_function = activation_function
self.linear = nn.Linear(in_features, out_features, bias=bias)
def forward(self, features: Dict[str, Tensor]):
features.update({'sentence_embedding': self.activation_function(self.linear(features['sentence_embedding']))})
return features
def get_sentence_embedding_dimension(self) -> int:
return self.out_features
def save(self, output_path):
with open(os.path.join(output_path, 'config.json'), 'w') as fOut:
json.dump({'in_features': self.in_features, 'out_features': self.out_features, 'bias': self.bias, 'activation_function': fullname(self.activation_function)}, fOut)
torch.save(self.state_dict(), os.path.join(output_path, 'pytorch_model.bin'))
@staticmethod
def load(input_path):
with open(os.path.join(input_path, 'config.json')) as fIn:
config = json.load(fIn)
config['activation_function'] = import_from_string(config['activation_function'])()
model = Dense(**config)
model.load_state_dict(torch.load(os.path.join(input_path, 'pytorch_model.bin'), map_location=torch.device('cpu')))
return model
| 2,116 | 40.509804 | 175 | py |
KoSentenceBERT-SKT | KoSentenceBERT-SKT-main/sentence_transformers/models/BoW.py | import torch
from torch import Tensor
from torch import nn
from typing import Union, Tuple, List, Iterable, Dict
import os
import json
import logging
import numpy as np
from .tokenizer import WhitespaceTokenizer
class BoW(nn.Module):
"""Implements a Bag-of-Words (BoW) model to derive sentence embeddings.
A weighting can be added to allow the generation of tf-idf vectors. The output vector has the size of the vocab.
"""
def __init__(self, vocab: List[str], word_weights: Dict[str, float] = {}, unknown_word_weight: float = 1, cumulative_term_frequency: bool = True):
super(BoW, self).__init__()
vocab = list(set(vocab)) #Ensure vocab is unique
self.config_keys = ['vocab', 'word_weights', 'unknown_word_weight', 'cumulative_term_frequency']
self.vocab = vocab
self.word_weights = word_weights
self.unknown_word_weight = unknown_word_weight
self.cumulative_term_frequency = cumulative_term_frequency
#Maps wordIdx -> word weight
self.weights = []
num_unknown_words = 0
for word in vocab:
weight = unknown_word_weight
if word in word_weights:
weight = word_weights[word]
elif word.lower() in word_weights:
weight = word_weights[word.lower()]
else:
num_unknown_words += 1
self.weights.append(weight)
logging.info("{} out of {} words without a weighting value. Set weight to {}".format(num_unknown_words, len(vocab), unknown_word_weight))
self.tokenizer = WhitespaceTokenizer(vocab, stop_words=set(), do_lower_case=False)
self.sentence_embedding_dimension = len(vocab)
def forward(self, features: Dict[str, Tensor]):
#Nothing to do, everything is done in get_sentence_features
return features
def tokenize(self, text: str) -> List[int]:
return self.tokenizer.tokenize(text)
def get_sentence_embedding_dimension(self):
return self.sentence_embedding_dimension
def get_sentence_features(self, tokens: List[int], pad_seq_length: int):
vector = np.zeros(self.get_sentence_embedding_dimension(), dtype=np.float32)
for token in tokens:
if self.cumulative_term_frequency:
vector[token] += self.weights[token]
else:
vector[token] = self.weights[token]
return {'sentence_embedding': torch.tensor([vector], dtype=torch.float)}
def get_config_dict(self):
return {key: self.__dict__[key] for key in self.config_keys}
def save(self, output_path):
with open(os.path.join(output_path, 'config.json'), 'w') as fOut:
json.dump(self.get_config_dict(), fOut, indent=2)
@staticmethod
def load(input_path):
with open(os.path.join(input_path, 'config.json')) as fIn:
config = json.load(fIn)
return BoW(**config) | 2,940 | 37.194805 | 150 | py |
KoSentenceBERT-SKT | KoSentenceBERT-SKT-main/sentence_transformers/models/LASER.py | import torch
from torch import nn
from typing import List
import os
import json
class LASER(nn.Module):
"""
Implementation of LASER
Paper: Mikel Artetxe and Holger Schwenk, Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond arXiv, Dec 26 2018.
Code: https://github.com/facebookresearch/LASER
"""
def __init__(self, model_path):
nn.Module.__init__(self)
state_dict = torch.load(model_path)
self.encoder = LaserEncoder(**state_dict['params'])
self.encoder.load_state_dict(state_dict['model'])
self.dictionary = state_dict['dictionary']
self.pad_index = self.dictionary['<pad>']
self.eos_index = self.dictionary['</s>']
self.unk_index = self.dictionary['<unk>']
def forward(self, features):
pass
def get_word_embedding_dimension(self) -> int:
pass
def tokenize(self, text: str) -> List[int]:
pass
def get_sentence_features(self, tokens: List[int], pad_seq_length: int):
pass
def save(self, output_path: str):
with open(os.path.join(output_path, 'laser_config.json'), 'w') as fOut:
json.dump(self.get_config_dict(), fOut, indent=2)
torch.save(self.state_dict(), os.path.join(output_path, 'pytorch_model.bin'))
def get_config_dict(self):
return {key: self.__dict__[key] for key in self.config_keys}
@staticmethod
def load(input_path: str):
with open(os.path.join(input_path, 'laser_config.json'), 'r') as fIn:
config = json.load(fIn)
weights = torch.load(os.path.join(input_path, 'pytorch_model.bin'))
model = LASER(**config)
model.load_state_dict(weights)
return model
class LaserEncoder(nn.Module):
def __init__(
self, num_embeddings, padding_idx, embed_dim=320, hidden_size=512, num_layers=1, bidirectional=False,
left_pad=True, padding_value=0.
):
super().__init__()
self.num_layers = num_layers
self.bidirectional = bidirectional
self.hidden_size = hidden_size
self.padding_idx = padding_idx
self.embed_tokens = nn.Embedding(num_embeddings, embed_dim, padding_idx=self.padding_idx)
self.lstm = nn.LSTM(
input_size=embed_dim,
hidden_size=hidden_size,
num_layers=num_layers,
bidirectional=bidirectional
)
self.left_pad = left_pad
self.padding_value = padding_value
self.output_units = hidden_size
if bidirectional:
self.output_units *= 2
def forward(self, src_tokens, src_lengths):
if self.left_pad:
# convert left-padding to right-padding
src_tokens = convert_padding_direction(
src_tokens,
self.padding_idx,
left_to_right=True,
)
bsz, seqlen = src_tokens.size()
# embed tokens
x = self.embed_tokens(src_tokens)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
# pack embedded source tokens into a PackedSequence
packed_x = nn.utils.rnn.pack_padded_sequence(x, src_lengths.data.tolist())
# apply LSTM
if self.bidirectional:
state_size = 2 * self.num_layers, bsz, self.hidden_size
else:
state_size = self.num_layers, bsz, self.hidden_size
h0 = x.data.new(*state_size).zero_()
c0 = x.data.new(*state_size).zero_()
packed_outs, (final_hiddens, final_cells) = self.lstm(packed_x, (h0, c0))
# unpack outputs and apply dropout
x, _ = nn.utils.rnn.pad_packed_sequence(packed_outs, padding_value=self.padding_value)
assert list(x.size()) == [seqlen, bsz, self.output_units]
if self.bidirectional:
def combine_bidir(outs):
return torch.cat([
torch.cat([outs[2 * i], outs[2 * i + 1]], dim=0).view(1, bsz, self.output_units)
for i in range(self.num_layers)
], dim=0)
final_hiddens = combine_bidir(final_hiddens)
final_cells = combine_bidir(final_cells)
encoder_padding_mask = src_tokens.eq(self.padding_idx).t()
# Set padded outputs to -inf so they are not selected by max-pooling
padding_mask = src_tokens.eq(self.padding_idx).t().unsqueeze(-1)
if padding_mask.any():
x = x.float().masked_fill_(padding_mask, float('-inf')).type_as(x)
# Build the sentence embedding by max-pooling over the encoder outputs
sentemb = x.max(dim=0)[0]
return {
'sentemb': sentemb,
'encoder_out': (x, final_hiddens, final_cells),
'encoder_padding_mask': encoder_padding_mask if encoder_padding_mask.any() else None
}
####################################################################
###
### Functions from LASER
###
####################################################################
# get environment
assert os.environ.get('LASER'), 'Please set the enviornment variable LASER'
from subprocess import run, check_output, DEVNULL
LASER = os.environ['LASER']
FASTBPE = LASER + '/tools-external/fastBPE/fast'
MOSES_BDIR = LASER + '/tools-external/moses-tokenizer/tokenizer/'
MOSES_TOKENIZER = MOSES_BDIR + 'tokenizer.perl -q -no-escape -threads 20 -l '
MOSES_LC = MOSES_BDIR + 'lowercase.perl'
NORM_PUNC = MOSES_BDIR + 'normalize-punctuation.perl -l '
DESCAPE = MOSES_BDIR + 'deescape-special-chars.perl'
REM_NON_PRINT_CHAR = MOSES_BDIR + 'remove-non-printing-char.perl'
# Romanization (Greek only)
ROMAN_LC = 'python3 ' + LASER + '/source/lib/romanize_lc.py -l '
# Mecab tokenizer for Japanese
MECAB = LASER + '/tools-external/mecab'
def Token(inp_fname, out_fname, lang='en',
lower_case=True, romanize=False, descape=False,
verbose=False, over_write=False, gzip=False):
assert lower_case, 'lower case is needed by all the models'
assert not over_write, 'over-write is not yet implemented'
if not os.path.isfile(out_fname):
cat = 'zcat ' if gzip else 'cat '
roman = lang if romanize else 'none'
# handle some iso3 langauge codes
if lang in ('cmn', 'wuu', 'yue'):
lang = 'zh'
if lang in ('jpn'):
lang = 'ja'
if verbose:
print(' - Tokenizer: {} in language {} {} {}'
.format(os.path.basename(inp_fname), lang,
'(gzip)' if gzip else '',
'(de-escaped)' if descape else '',
'(romanized)' if romanize else ''))
run(cat + inp_fname
+ '|' + REM_NON_PRINT_CHAR
+ '|' + NORM_PUNC + lang
+ ('|' + DESCAPE if descape else '')
+ '|' + MOSES_TOKENIZER + lang
+ ('| python3 -m jieba -d ' if lang == 'zh' else '')
+ ('|' + MECAB + '/bin/mecab -O wakati -b 50000 ' if lang == 'ja' else '')
+ '|' + ROMAN_LC + roman
+ '>' + out_fname,
env=dict(os.environ, LD_LIBRARY_PATH=MECAB + '/lib'),
shell=True)
elif not over_write and verbose:
print(' - Tokenizer: {} exists already'
.format(os.path.basename(out_fname), lang))
# TODO Do proper padding from the beginning
def convert_padding_direction(src_tokens, padding_idx, right_to_left=False, left_to_right=False):
assert right_to_left ^ left_to_right
pad_mask = src_tokens.eq(padding_idx)
if not pad_mask.any():
# no padding, return early
return src_tokens
if left_to_right and not pad_mask[:, 0].any():
# already right padded
return src_tokens
if right_to_left and not pad_mask[:, -1].any():
# already left padded
return src_tokens
max_len = src_tokens.size(1)
range = buffered_arange(max_len).type_as(src_tokens).expand_as(src_tokens)
num_pads = pad_mask.long().sum(dim=1, keepdim=True)
if right_to_left:
index = torch.remainder(range - num_pads, max_len)
else:
index = torch.remainder(range + num_pads, max_len)
return src_tokens.gather(1, index)
def buffered_arange(max):
if not hasattr(buffered_arange, 'buf'):
buffered_arange.buf = torch.LongTensor()
if max > buffered_arange.buf.numel():
torch.arange(max, out=buffered_arange.buf)
return buffered_arange.buf[:max]
| 8,454 | 35.287554 | 155 | py |
KoSentenceBERT-SKT | KoSentenceBERT-SKT-main/sentence_transformers/models/Pooling.py | import torch
from torch import Tensor
from torch import nn
from typing import Union, Tuple, List, Iterable, Dict
import os
import json
class Pooling(nn.Module):
"""Performs pooling (max or mean) on the token embeddings.
Using pooling, it generates from a variable sized sentence a fixed sized sentence embedding. This layer also allows to use the CLS token if it is returned by the underlying word embedding model.
You can concatenate multiple poolings together.
:param word_embedding_dimension: Dimensions for the word embeddings
:param pooling_mode_cls_token: Use the first token (CLS token) as text representations
:param pooling_mode_max_tokens: Use max in each dimension over all tokens.
:param pooling_mode_mean_tokens: Perform mean-pooling
:param pooling_mode_mean_sqrt_len_tokens: Perform mean-pooling, but devide by sqrt(input_length).
"""
def __init__(self,
word_embedding_dimension: int,
pooling_mode_cls_token: bool = False,
pooling_mode_max_tokens: bool = False,
pooling_mode_mean_tokens: bool = True,
pooling_mode_mean_sqrt_len_tokens: bool = False,
):
super(Pooling, self).__init__()
self.config_keys = ['word_embedding_dimension', 'pooling_mode_cls_token', 'pooling_mode_mean_tokens', 'pooling_mode_max_tokens', 'pooling_mode_mean_sqrt_len_tokens']
self.word_embedding_dimension = word_embedding_dimension
self.pooling_mode_cls_token = pooling_mode_cls_token
self.pooling_mode_mean_tokens = pooling_mode_mean_tokens
self.pooling_mode_max_tokens = pooling_mode_max_tokens
self.pooling_mode_mean_sqrt_len_tokens = pooling_mode_mean_sqrt_len_tokens
pooling_mode_multiplier = sum([pooling_mode_cls_token, pooling_mode_max_tokens, pooling_mode_mean_tokens, pooling_mode_mean_sqrt_len_tokens])
self.pooling_output_dimension = (pooling_mode_multiplier * word_embedding_dimension)
def forward(self, features: Dict[str, Tensor]):
token_embeddings = features['token_embeddings']
cls_token = features['cls_token_embeddings']
attention_mask = features['attention_mask']
## Pooling strategy
output_vectors = []
if self.pooling_mode_cls_token:
output_vectors.append(cls_token)
if self.pooling_mode_max_tokens:
input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
token_embeddings[input_mask_expanded == 0] = -1e9 # Set padding tokens to large negative value
max_over_time = torch.max(token_embeddings, 1)[0]
output_vectors.append(max_over_time)
if self.pooling_mode_mean_tokens or self.pooling_mode_mean_sqrt_len_tokens:
input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
sum_embeddings = torch.sum(token_embeddings * input_mask_expanded, 1)
#If tokens are weighted (by WordWeights layer), feature 'token_weights_sum' will be present
if 'token_weights_sum' in features:
sum_mask = features['token_weights_sum'].unsqueeze(-1).expand(sum_embeddings.size())
else:
sum_mask = input_mask_expanded.sum(1)
sum_mask = torch.clamp(sum_mask, min=1e-9)
if self.pooling_mode_mean_tokens:
output_vectors.append(sum_embeddings / sum_mask)
if self.pooling_mode_mean_sqrt_len_tokens:
output_vectors.append(sum_embeddings / torch.sqrt(sum_mask))
output_vector = torch.cat(output_vectors, 1)
features.update({'sentence_embedding': output_vector})
return features
def get_sentence_embedding_dimension(self):
return self.pooling_output_dimension
def get_config_dict(self):
return {key: self.__dict__[key] for key in self.config_keys}
def save(self, output_path):
with open(os.path.join(output_path, 'config.json'), 'w') as fOut:
json.dump(self.get_config_dict(), fOut, indent=2)
@staticmethod
def load(input_path):
with open(os.path.join(input_path, 'config.json')) as fIn:
config = json.load(fIn)
return Pooling(**config)
| 4,313 | 45.891304 | 198 | py |
KoSentenceBERT-SKT | KoSentenceBERT-SKT-main/sentence_transformers/models/LSTM.py | import torch
from torch import nn
from typing import List
import os
import json
class LSTM(nn.Module):
"""
Bidirectional LSTM running over word embeddings.
"""
def __init__(self, word_embedding_dimension: int, hidden_dim: int, num_layers: int = 1, dropout: float = 0, bidirectional: bool = True):
nn.Module.__init__(self)
self.config_keys = ['word_embedding_dimension', 'hidden_dim', 'num_layers', 'dropout', 'bidirectional']
self.word_embedding_dimension = word_embedding_dimension
self.hidden_dim = hidden_dim
self.num_layers = num_layers
self.dropout = dropout
self.bidirectional = bidirectional
self.embeddings_dimension = hidden_dim
if self.bidirectional:
self.embeddings_dimension *= 2
self.encoder = nn.LSTM(word_embedding_dimension, hidden_dim, num_layers=num_layers, dropout=dropout, bidirectional=bidirectional, batch_first=True)
def forward(self, features):
token_embeddings = features['token_embeddings']
sentence_lengths = torch.clamp(features['sentence_lengths'], min=1)
packed = nn.utils.rnn.pack_padded_sequence(token_embeddings, sentence_lengths, batch_first=True, enforce_sorted=False)
packed = self.encoder(packed)
unpack = nn.utils.rnn.pad_packed_sequence(packed[0], batch_first=True)[0]
features.update({'token_embeddings': unpack})
return features
def get_word_embedding_dimension(self) -> int:
return self.embeddings_dimension
def tokenize(self, text: str) -> List[int]:
raise NotImplementedError()
def save(self, output_path: str):
with open(os.path.join(output_path, 'lstm_config.json'), 'w') as fOut:
json.dump(self.get_config_dict(), fOut, indent=2)
torch.save(self.state_dict(), os.path.join(output_path, 'pytorch_model.bin'))
def get_config_dict(self):
return {key: self.__dict__[key] for key in self.config_keys}
@staticmethod
def load(input_path: str):
with open(os.path.join(input_path, 'lstm_config.json'), 'r') as fIn:
config = json.load(fIn)
weights = torch.load(os.path.join(input_path, 'pytorch_model.bin'))
model = LSTM(**config)
model.load_state_dict(weights)
return model
| 2,323 | 35.888889 | 155 | py |
KoSentenceBERT-SKT | KoSentenceBERT-SKT-main/sentence_transformers/models/DistilBERT.py | from torch import Tensor
from torch import nn
from transformers import DistilBertModel, DistilBertTokenizer
import json
from typing import Union, Tuple, List, Dict, Optional
import os
import numpy as np
import logging
class DistilBERT(nn.Module):
"""DEPRECATED: Please use models.Transformer instead.
DistilBERT model to generate token embeddings.
Each token is mapped to an output vector from DistilBERT.
"""
def __init__(self, model_name_or_path: str, max_seq_length: int = 128, do_lower_case: Optional[bool] = None, model_args: Dict = {}, tokenizer_args: Dict = {}):
super(DistilBERT, self).__init__()
self.config_keys = ['max_seq_length', 'do_lower_case']
self.do_lower_case = do_lower_case
if max_seq_length > 510:
logging.warning("BERT only allows a max_seq_length of 510 (512 with special tokens). Value will be set to 510")
max_seq_length = 510
self.max_seq_length = max_seq_length
if self.do_lower_case is not None:
tokenizer_args['do_lower_case'] = do_lower_case
self.bert = DistilBertModel.from_pretrained(model_name_or_path, **model_args)
self.tokenizer = DistilBertTokenizer.from_pretrained(model_name_or_path, **tokenizer_args)
def forward(self, features):
"""Returns token_embeddings, cls_token"""
# DistilBERT does not use token_type_ids
output_states = self.bert(**features)
output_tokens = output_states[0]
cls_tokens = output_tokens[:, 0, :] # CLS token is first token
features.update({'token_embeddings': output_tokens, 'cls_token_embeddings': cls_tokens, 'attention_mask': features['attention_mask']})
if len(output_states) > 1:
features.update({'all_layer_embeddings': output_states[1]})
return features
def get_word_embedding_dimension(self) -> int:
return self.bert.config.hidden_size
def tokenize(self, text: str) -> List[int]:
"""
Tokenizes a text and maps tokens to token-ids
"""
return self.tokenizer.convert_tokens_to_ids(self.tokenizer.tokenize(text))
def get_sentence_features(self, tokens: List[int], pad_seq_length: int):
"""
Convert tokenized sentence in its embedding ids, segment ids and mask
:param tokens:
a tokenized sentence
:param pad_seq_length:
the maximal length of the sequence. Cannot be greater than self.sentence_transformer_config.max_seq_length
:return: embedding ids, segment ids and mask for the sentence
"""
pad_seq_length = min(pad_seq_length, self.max_seq_length) + 2 #Add space for special tokens
return self.tokenizer.prepare_for_model(tokens, max_length=pad_seq_length, padding='max_length', return_tensors='pt', truncation=True, prepend_batch_axis=True)
def get_config_dict(self):
return {key: self.__dict__[key] for key in self.config_keys}
def save(self, output_path: str):
self.bert.save_pretrained(output_path)
self.tokenizer.save_pretrained(output_path)
with open(os.path.join(output_path, 'sentence_distilbert_config.json'), 'w') as fOut:
json.dump(self.get_config_dict(), fOut, indent=2)
@staticmethod
def load(input_path: str):
with open(os.path.join(input_path, 'sentence_distilbert_config.json')) as fIn:
config = json.load(fIn)
return DistilBERT(model_name_or_path=input_path, **config)
| 3,511 | 38.022222 | 167 | py |
KoSentenceBERT-SKT | KoSentenceBERT-SKT-main/sentence_transformers/datasets/SentenceLabelDataset.py | from torch.utils.data import Dataset
from typing import List
import bisect
import torch
import logging
import numpy as np
from tqdm import tqdm
from .. import SentenceTransformer
from ..readers.InputExample import InputExample
from multiprocessing import Pool, cpu_count
import multiprocessing
class SentenceLabelDataset(Dataset):
"""
Dataset for training with triplet loss.
This dataset takes a list of sentences grouped by their label and uses this grouping to dynamically select a
positive example from the same group and a negative example from the other sentences for a selected anchor sentence.
This dataset should be used in combination with dataset_reader.LabelSentenceReader
One iteration over this dataset selects every sentence as anchor once.
This also uses smart batching like SentenceDataset.
"""
def __init__(self, examples: List[InputExample], model: SentenceTransformer, provide_positive: bool = True,
provide_negative: bool = True,
parallel_tokenization: bool = True,
max_processes: int = 4,
chunk_size: int = 5000):
"""
Converts input examples to a SentenceLabelDataset usable to train the model with
SentenceTransformer.smart_batching_collate as the collate_fn for the DataLoader
Assumes only one sentence per InputExample and labels as integers from 0 to max_num_labels
and should be used in combination with dataset_reader.LabelSentenceReader.
Labels with only one example are ignored.
smart_batching_collate as collate_fn is required because it transforms the tokenized texts to the tensors.
:param examples:
the input examples for the training
:param model
the Sentence BERT model for the conversion
:param provide_positive:
set this to False, if you don't need a positive example (e.g. for BATCH_HARD_TRIPLET_LOSS).
:param provide_negative:
set this to False, if you don't need a negative example (e.g. for BATCH_HARD_TRIPLET_LOSS
or MULTIPLE_NEGATIVES_RANKING_LOSS).
:param parallel_tokenization
If true, multiple processes will be started for the tokenization
:param max_processes
Maximum number of processes started for tokenization. Cannot be larger can cpu_count()
:param chunk_size
#chunk_size number of examples are send to each process. Larger values increase overall tokenization speed
"""
self.model = model
self.groups_right_border = []
self.grouped_inputs = []
self.grouped_labels = []
self.num_labels = 0
self.max_processes = min(max_processes, cpu_count())
self.chunk_size = chunk_size
self.parallel_tokenization = parallel_tokenization
if self.parallel_tokenization:
if multiprocessing.get_start_method() != 'fork':
logging.info("Parallel tokenization is only available on Unix systems which allow to fork processes. Fall back to sequential tokenization")
self.parallel_tokenization = False
self.convert_input_examples(examples, model)
self.idxs = np.arange(len(self.grouped_inputs))
self.provide_positive = provide_positive
self.provide_negative = provide_negative
def convert_input_examples(self, examples: List[InputExample], model: SentenceTransformer):
"""
Converts input examples to a SentenceLabelDataset.
Assumes only one sentence per InputExample and labels as integers from 0 to max_num_labels
and should be used in combination with dataset_reader.LabelSentenceReader.
Labels with only one example are ignored.
:param examples:
the input examples for the training
:param model
the Sentence Transformer model for the conversion
:param is_pretokenized
If set to true, no tokenization will be applied. It is expected that the input is tokenized via model.tokenize
"""
inputs = []
labels = []
label_sent_mapping = {}
too_long = 0
label_type = None
logging.info("Start tokenization")
if not self.parallel_tokenization or self.max_processes == 1 or len(examples) <= self.chunk_size:
tokenized_texts = [self.tokenize_example(example) for example in examples]
else:
logging.info("Use multi-process tokenization with {} processes".format(self.max_processes))
self.model.to('cpu')
with Pool(self.max_processes) as p:
tokenized_texts = list(p.imap(self.tokenize_example, examples, chunksize=self.chunk_size))
# Group examples and labels
# Add examples with the same label to the same dict
for ex_index, example in enumerate(tqdm(examples, desc="Convert dataset")):
if label_type is None:
if isinstance(example.label, int):
label_type = torch.long
elif isinstance(example.label, float):
label_type = torch.float
tokenized_text = tokenized_texts[ex_index][0]
if hasattr(model, 'max_seq_length') and model.max_seq_length is not None and model.max_seq_length > 0 and len(tokenized_text) > model.max_seq_length:
too_long += 1
if example.label in label_sent_mapping:
label_sent_mapping[example.label].append(ex_index)
else:
label_sent_mapping[example.label] = [ex_index]
inputs.append(tokenized_text)
labels.append(example.label)
# Group sentences, such that sentences with the same label
# are besides each other. Only take labels with at least 2 examples
distinct_labels = list(label_sent_mapping.keys())
for i in range(len(distinct_labels)):
label = distinct_labels[i]
if len(label_sent_mapping[label]) >= 2:
self.grouped_inputs.extend([inputs[j] for j in label_sent_mapping[label]])
self.grouped_labels.extend([labels[j] for j in label_sent_mapping[label]])
self.groups_right_border.append(len(self.grouped_inputs)) #At which position does this label group / bucket end?
self.num_labels += 1
self.grouped_labels = torch.tensor(self.grouped_labels, dtype=label_type)
logging.info("Num sentences: %d" % (len(self.grouped_inputs)))
logging.info("Sentences longer than max_seqence_length: {}".format(too_long))
logging.info("Number of labels with >1 examples: {}".format(len(distinct_labels)))
def tokenize_example(self, example):
if example.texts_tokenized is not None:
return example.texts_tokenized
return [self.model.tokenize(text) for text in example.texts]
def __getitem__(self, item):
if not self.provide_positive and not self.provide_negative:
return [self.grouped_inputs[item]], self.grouped_labels[item]
# Anchor element
anchor = self.grouped_inputs[item]
# Check start and end position for this label in our list of grouped sentences
group_idx = bisect.bisect_right(self.groups_right_border, item)
left_border = 0 if group_idx == 0 else self.groups_right_border[group_idx - 1]
right_border = self.groups_right_border[group_idx]
if self.provide_positive:
positive_item_idx = np.random.choice(np.concatenate([self.idxs[left_border:item], self.idxs[item + 1:right_border]]))
positive = self.grouped_inputs[positive_item_idx]
else:
positive = []
if self.provide_negative:
negative_item_idx = np.random.choice(np.concatenate([self.idxs[0:left_border], self.idxs[right_border:]]))
negative = self.grouped_inputs[negative_item_idx]
else:
negative = []
return [anchor, positive, negative], self.grouped_labels[item]
def __len__(self):
return len(self.grouped_inputs) | 8,156 | 43.091892 | 161 | py |
KoSentenceBERT-SKT | KoSentenceBERT-SKT-main/sentence_transformers/datasets/SentencesDataset.py | from torch.utils.data import Dataset
from typing import List
import torch
from .. import SentenceTransformer
from ..readers.InputExample import InputExample
class SentencesDataset(Dataset):
"""
Dataset for smart batching, that is each batch is only padded to its longest sequence instead of padding all
sequences to the max length.
The SentenceBertEncoder.smart_batching_collate is required for this to work.
SmartBatchingDataset does *not* work without it.
"""
def __init__(self,
examples: List[InputExample],
model: SentenceTransformer
):
"""
Create a new SentencesDataset with the tokenized texts and the labels as Tensor
:param examples
A list of sentence.transformers.readers.InputExample
:param model:
SentenceTransformerModel
"""
self.model = model
self.examples = examples
self.label_type = torch.long if isinstance(self.examples[0].label, int) else torch.float
def __getitem__(self, item):
label = torch.tensor(self.examples[item].label, dtype=self.label_type)
if self.examples[item].texts_tokenized is None:
self.examples[item].texts_tokenized = [self.model.tokenize(text) for text in self.examples[item].texts]
return self.examples[item].texts_tokenized, label
def __len__(self):
return len(self.examples)
| 1,443 | 34.219512 | 115 | py |
KoSentenceBERT-SKT | KoSentenceBERT-SKT-main/sentence_transformers/datasets/EncodeDataset.py | from torch.utils.data import Dataset
from typing import List, Union
from .. import SentenceTransformer
class EncodeDataset(Dataset):
def __init__(self,
sentences: Union[List[str], List[int]],
model: SentenceTransformer,
is_tokenized: bool = True):
"""
EncodeDataset is used by SentenceTransformer.encode method. It just stores
the input texts and returns a tokenized version of it.
"""
self.model = model
self.sentences = sentences
self.is_tokenized = is_tokenized
def __getitem__(self, item):
return self.sentences[item] if self.is_tokenized else self.model.tokenize(self.sentences[item])
def __len__(self):
return len(self.sentences)
| 777 | 28.923077 | 103 | py |
KoSentenceBERT-SKT | KoSentenceBERT-SKT-main/sentence_transformers/datasets/ParallelSentencesDataset.py | from torch.utils.data import Dataset
import logging
import gzip
from queue import Queue
from .. import SentenceTransformer
from typing import List
import random
class ParallelSentencesDataset(Dataset):
"""
This dataset reader can be used to read-in parallel sentences, i.e., it reads in a file with tab-seperated sentences with the same
sentence in different languages. For example, the file can look like this (EN\tDE\tES):
hello world hallo welt hola mundo
second sentence zweiter satz segunda oración
The sentence in the first column will be mapped to a sentence embedding using the given the embedder. For example,
embedder is a mono-lingual sentence embedding method for English. The sentences in the other languages will also be
mapped to this English sentence embedding.
When getting a sample from the dataset, we get one sentence with the according sentence embedding for this sentence.
teacher_model can be any class that implement an encode function. The encode function gets a list of sentences and
returns a list of sentence embeddings
"""
def __init__(self, student_model: SentenceTransformer, teacher_model: SentenceTransformer, batch_size: int = 8, use_embedding_cache: bool = True):
"""
Parallel sentences dataset reader to train student model given a teacher model
:param student_model: Student sentence embedding model that should be trained
:param teacher_model: Teacher model, that provides the sentence embeddings for the first column in the dataset file
"""
self.student_model = student_model
self.teacher_model = teacher_model
self.datasets = []
self.datasets_iterator = []
self.datasets_tokenized = []
self.dataset_indices = []
self.copy_dataset_indices = []
self.cache = []
self.batch_size = batch_size
self.use_embedding_cache = use_embedding_cache
self.embedding_cache = {}
self.num_sentences = 0
def load_data(self, filepath: str, weight: int = 100, max_sentences: int = None, max_sentence_length: int = 128):
"""
Reads in a tab-seperated .txt/.csv/.tsv or .gz file. The different columns contain the different translations of the sentence in the first column
:param filepath: Filepath to the file
:param weight: If more that one dataset is loaded with load_data: With which frequency should data be sampled from this dataset?
:param max_sentences: Max number of lines to be read from filepath
:param max_sentence_length: Skip the example if one of the sentences is has more characters than max_sentence_length
:param batch_size: Size for encoding parallel sentences
:return:
"""
logging.info("Load "+filepath)
parallel_sentences = []
with gzip.open(filepath, 'rt', encoding='utf8') if filepath.endswith('.gz') else open(filepath, encoding='utf8') as fIn:
count = 0
for line in fIn:
sentences = line.strip().split("\t")
if max_sentence_length is not None and max_sentence_length > 0 and max([len(sent) for sent in sentences]) > max_sentence_length:
continue
parallel_sentences.append(sentences)
count += 1
if max_sentences is not None and max_sentences > 0 and count >= max_sentences:
break
self.add_dataset(parallel_sentences, weight=weight, max_sentences=max_sentences, max_sentence_length=max_sentence_length)
def add_dataset(self, parallel_sentences: List[List[str]], weight: int = 1000, max_sentences: int = None, max_sentence_length: int = 128):
sentences_map = {}
for sentences in parallel_sentences:
if max_sentence_length is not None and max_sentence_length > 0 and max([len(sent) for sent in sentences]) > max_sentence_length:
continue
source_sentence = sentences[0]
if source_sentence not in sentences_map:
sentences_map[source_sentence] = set()
for sent in sentences:
sentences_map[source_sentence].add(sent)
if max_sentences is not None and max_sentences > 0 and len(sentences_map) >= max_sentences:
break
if len(sentences_map) == 0:
return
self.num_sentences += sum([len(sentences_map[sent]) for sent in sentences_map])
dataset_id = len(self.datasets)
self.datasets.append(list(sentences_map.items()))
self.datasets_iterator.append(0)
self.datasets_tokenized.append(False)
self.dataset_indices.extend([dataset_id] * weight)
def generate_data(self):
source_sentences_list = []
target_sentences_list = []
for data_idx in self.dataset_indices:
src_sentence, trg_sentences = self.next_entry(data_idx)
source_sentences_list.append(src_sentence)
target_sentences_list.append(trg_sentences)
#Generate embeddings
src_embeddings = self.get_embeddings(source_sentences_list)
for src_embedding, trg_sentences in zip(src_embeddings, target_sentences_list):
for trg_sentence in trg_sentences:
self.cache.append([[trg_sentence], src_embedding])
random.shuffle(self.cache)
def next_entry(self, data_idx):
source, target_sentences = self.datasets[data_idx][self.datasets_iterator[data_idx]]
if not self.datasets_tokenized[data_idx]:
target_sentences = [self.student_model.tokenize(sent) for sent in target_sentences]
self.datasets[data_idx][self.datasets_iterator[data_idx]] = [source, target_sentences]
self.datasets_iterator[data_idx] += 1
if self.datasets_iterator[data_idx] >= len(self.datasets[data_idx]): #Restart iterator
self.datasets_iterator[data_idx] = 0
self.datasets_tokenized[data_idx] = True
random.shuffle(self.datasets[data_idx])
return source, target_sentences
def get_embeddings(self, sentences):
if not self.use_embedding_cache:
return self.teacher_model.encode(sentences, batch_size=self.batch_size, show_progress_bar=False, convert_to_numpy=False)
#Use caching
new_sentences = []
for sent in sentences:
if sent not in self.embedding_cache:
new_sentences.append(sent)
if len(new_sentences) > 0:
new_embeddings = self.teacher_model.encode(new_sentences, batch_size=self.batch_size, show_progress_bar=False, convert_to_numpy=False)
for sent, embedding in zip(new_sentences, new_embeddings):
self.embedding_cache[sent] = embedding
return [self.embedding_cache[sent] for sent in sentences]
def __len__(self):
return self.num_sentences
def __getitem__(self, idx):
if len(self.cache) == 0:
self.generate_data()
return self.cache.pop()
| 7,074 | 43.496855 | 153 | py |
KoSentenceBERT-SKT | KoSentenceBERT-SKT-main/sentence_transformers/datasets/sampler/LabelSampler.py | """
This file contains sampler functions, that can be used to sample mini-batches with specific properties.
"""
from torch.utils.data import Sampler
import numpy as np
from ...datasets import SentenceLabelDataset
class LabelSampler(Sampler):
"""
This sampler is used for some specific Triplet Losses like BATCH_HARD_TRIPLET_LOSS
or MULTIPLE_NEGATIVES_RANKING_LOSS which require multiple or only one sample from one label per batch.
It draws n consecutive, random and unique samples from one label at a time. This is repeated for each label.
Labels with fewer than n unique samples are ignored.
This also applied to drawing without replacement, once less than n samples remain for a label, it is skipped.
This *DOES NOT* check if there are more labels than the batch is large or if the batch size is divisible
by the samples drawn per label.
"""
def __init__(self, data_source: SentenceLabelDataset, samples_per_label: int = 5,
with_replacement: bool = False):
"""
Creates a LabelSampler for a SentenceLabelDataset.
:param data_source:
the dataset from which samples are drawn
:param samples_per_label:
the number of consecutive, random and unique samples drawn per label
:param with_replacement:
if this is True, then each sample is drawn at most once (depending on the total number of samples per label).
if this is False, then one sample can be drawn in multiple draws, but still not multiple times in the same
drawing.
"""
super().__init__(data_source)
self.data_source = data_source
self.samples_per_label = samples_per_label
self.label_range = np.arange(data_source.num_labels)
self.borders = data_source.groups_right_border
self.with_replacement = with_replacement
np.random.shuffle(self.label_range)
def __iter__(self):
label_idx = 0
count = 0
already_seen = {}
while count < len(self.data_source):
label = self.label_range[label_idx]
if label not in already_seen:
already_seen[label] = set()
left_border = 0 if label == 0 else self.borders[label-1]
right_border = self.borders[label]
if self.with_replacement:
selection = np.arange(left_border, right_border)
else:
selection = [i for i in np.arange(left_border, right_border) if i not in already_seen[label]]
if len(selection) >= self.samples_per_label:
for element_idx in np.random.choice(selection, self.samples_per_label, replace=False):
count += 1
already_seen[label].add(element_idx)
yield element_idx
label_idx += 1
if label_idx >= len(self.label_range):
label_idx = 0
already_seen = {}
np.random.shuffle(self.label_range)
def __len__(self):
return len(self.data_source) | 3,097 | 39.763158 | 121 | py |
KoSentenceBERT-SKT | KoSentenceBERT-SKT-main/sentence_transformers/losses/CosineSimilarityLoss.py | import torch
from torch import nn, Tensor
from typing import Iterable, Dict
from ..SentenceTransformer import SentenceTransformer
class CosineSimilarityLoss(nn.Module):
"""
CosineSimilarityLoss expects, that the InputExamples consists of two texts and a float label.
It computes the vectors u = model(input_text[0]) and v = model(input_text[1]) and measures the cosine-similarity between the two.
By default, it minimizes the following loss: ||input_label - cos_score_transformation(cosine_sim(u,v))||_2.
:param model: SentenceTranformer model
:param loss_fct: Which pytorch loss function should be used to compare the cosine_similartiy(u,v) with the input_label? By default, MSE: ||input_label - cosine_sim(u,v)||_2
:param cos_score_transformation: The cos_score_transformation function is applied on top of cosine_similarity. By default, the identify function is used (i.e. no change).
Example::
from sentence_transformers import SentenceTransformer, SentencesDataset, InputExample, losses
model = SentenceTransformer('distilbert-base-nli-mean-tokens')
train_examples = [InputExample(texts=['My first sentence', 'My second sentence'], label=0.8),
InputExample(texts=['Another pair', 'Unrelated sentence'], label=0.3)]
train_dataset = SentencesDataset(train_examples, model)
train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=train_batch_size)
train_loss = losses.CosineSimilarityLoss(model=model)
"""
def __init__(self, model: SentenceTransformer, loss_fct = nn.MSELoss(), cos_score_transformation=nn.Identity()):
super(CosineSimilarityLoss, self).__init__()
self.model = model
self.loss_fct = loss_fct
self.cos_score_transformation = cos_score_transformation
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor):
embeddings = [self.model(sentence_feature)['sentence_embedding'] for sentence_feature in sentence_features]
output = self.cos_score_transformation(torch.cosine_similarity(embeddings[0], embeddings[1]))
return self.loss_fct(output, labels.view(-1))
| 2,213 | 50.488372 | 177 | py |
KoSentenceBERT-SKT | KoSentenceBERT-SKT-main/sentence_transformers/losses/MSELoss.py | import torch
from torch import nn, Tensor
from typing import Union, Tuple, List, Iterable, Dict
class MSELoss(nn.Module):
"""
Computes the MSE loss between the computed sentence embedding and a target sentence embedding. This loss
is used when extending sentence embeddings to new languages as described in our publication
Making Monolingual Sentence Embeddings Multilingual using Knowledge Distillation: https://arxiv.org/abs/2004.09813
For an example, see the documentation on extending language models to new languages.
"""
def __init__(self, model):
super(MSELoss, self).__init__()
self.model = model
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor):
rep = self.model(sentence_features[0])['sentence_embedding']
loss_fct = nn.MSELoss()
loss = loss_fct(rep, labels)
return loss
| 898 | 38.086957 | 118 | py |
KoSentenceBERT-SKT | KoSentenceBERT-SKT-main/sentence_transformers/losses/TripletLoss.py | import torch
from torch import nn, Tensor
from typing import Union, Tuple, List, Iterable, Dict
import torch.nn.functional as F
from enum import Enum
from ..SentenceTransformer import SentenceTransformer
class TripletDistanceMetric(Enum):
"""
The metric for the triplet loss
"""
COSINE = lambda x, y: 1 - F.cosine_similarity(x, y)
EUCLIDEAN = lambda x, y: F.pairwise_distance(x, y, p=2)
MANHATTAN = lambda x, y: F.pairwise_distance(x, y, p=1)
class TripletLoss(nn.Module):
"""
This class implements triplet loss. Given a triplet of (anchor, positive, negative),
the loss minimizes the distance between anchor and positive while it maximizes the distance
between anchor and negative. It compute the following loss function:
loss = max(||anchor - positive|| - ||anchor - negative|| + margin, 0).
Margin is an important hyperparameter and needs to be tuned respectively.
For further details, see: https://en.wikipedia.org/wiki/Triplet_loss
:param model: SentenceTransformerModel
:param distance_metric: Function to compute distance between two embeddings. The class TripletDistanceMetric contains common distance metrices that can be used.
:param triplet_margin: The negative should be at least this much further away from the anchor than the positive.
Example::
from sentence_transformers import SentenceTransformer, SentencesDataset, LoggingHandler, losses
from sentence_transformers.readers import InputExample
model = SentenceTransformer('distilbert-base-nli-mean-tokens')
train_examples = [InputExample(texts=['Anchor 1', 'Positive 1', 'Negative 1']),
InputExample(texts=['Anchor 2', 'Positive 2', 'Negative 2'])]
train_dataset = SentencesDataset(train_examples, model)
train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=train_batch_size)
train_loss = losses.TripletLoss(model=model)
"""
def __init__(self, model: SentenceTransformer, distance_metric=TripletDistanceMetric.EUCLIDEAN, triplet_margin: float = 5):
super(TripletLoss, self).__init__()
self.model = model
self.distance_metric = distance_metric
self.triplet_margin = triplet_margin
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor):
reps = [self.model(sentence_feature)['sentence_embedding'] for sentence_feature in sentence_features]
rep_anchor, rep_pos, rep_neg = reps
distance_pos = self.distance_metric(rep_anchor, rep_pos)
distance_neg = self.distance_metric(rep_anchor, rep_neg)
losses = F.relu(distance_pos - distance_neg + self.triplet_margin)
return losses.mean() | 2,728 | 45.254237 | 164 | py |
KoSentenceBERT-SKT | KoSentenceBERT-SKT-main/sentence_transformers/losses/BatchHardSoftMarginTripletLoss.py | import torch
from torch import nn, Tensor
from typing import Union, Tuple, List, Iterable, Dict
from .BatchHardTripletLoss import BatchHardTripletLoss, BatchHardTripletLossDistanceFunction
from sentence_transformers.SentenceTransformer import SentenceTransformer
class BatchHardSoftMarginTripletLoss(BatchHardTripletLoss):
"""
BatchHardSoftMarginTripletLoss takes a batch with (label, sentence) pairs and computes the loss for all possible, valid
triplets, i.e., anchor and positive must have the same label, anchor and negative a different label. The labels
must be integers, with same label indicating sentences from the same class. You train dataset
must contain at least 2 examples per label class. The margin is computed automatically.
Source: https://github.com/NegatioN/OnlineMiningTripletLoss/blob/master/online_triplet_loss/losses.py
Paper: In Defense of the Triplet Loss for Person Re-Identification, https://arxiv.org/abs/1703.07737
Blog post: https://omoindrot.github.io/triplet-loss
:param model: SentenceTransformer model
:param distance_metric: Function that returns a distance between two emeddings. The class SiameseDistanceMetric contains pre-defined metrices that can be used
Example::
from sentence_transformers import SentenceTransformer, SentencesDataset, LoggingHandler, losses
from sentence_transformers.readers import InputExample
model = SentenceTransformer('distilbert-base-nli-mean-tokens')
train_examples = [InputExample(texts=['Sentence from class 0'], label=0), InputExample(texts=['Another sentence from class 0'], label=0),
InputExample(texts=['Sentence from class 1'], label=1), InputExample(texts=['Sentence from class 2'], label=2)]
train_dataset = SentencesDataset(train_examples, model)
train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=train_batch_size)
train_loss = losses.BatchHardSoftMarginTripletLoss(model=model)
"""
def __init__(self, model: SentenceTransformer, distance_metric=BatchHardTripletLossDistanceFunction.eucledian_distance):
super(BatchHardSoftMarginTripletLoss, self).__init__(model)
self.sentence_embedder = model
self.distance_metric = distance_metric
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor):
reps = [self.sentence_embedder(sentence_feature)['sentence_embedding'] for sentence_feature in sentence_features]
return self.batch_hard_triplet_soft_margin_loss(labels, reps[0])
# Hard Triplet Loss with Soft Margin
# Paper: In Defense of the Triplet Loss for Person Re-Identification, https://arxiv.org/abs/1703.07737
def batch_hard_triplet_soft_margin_loss(self, labels: Tensor, embeddings: Tensor) -> Tensor:
"""Build the triplet loss over a batch of embeddings.
For each anchor, we get the hardest positive and hardest negative to form a triplet.
Args:
labels: labels of the batch, of size (batch_size,)
embeddings: tensor of shape (batch_size, embed_dim)
squared: Boolean. If true, output is the pairwise squared euclidean distance matrix.
If false, output is the pairwise euclidean distance matrix.
Returns:
Label_Sentence_Triplet: scalar tensor containing the triplet loss
"""
# Get the pairwise distance matrix
pairwise_dist = self.distance_metric(embeddings)
# For each anchor, get the hardest positive
# First, we need to get a mask for every valid positive (they should have same label)
mask_anchor_positive = BatchHardTripletLoss.get_anchor_positive_triplet_mask(labels).float()
# We put to 0 any element where (a, p) is not valid (valid if a != p and label(a) == label(p))
anchor_positive_dist = mask_anchor_positive * pairwise_dist
# shape (batch_size, 1)
hardest_positive_dist, _ = anchor_positive_dist.max(1, keepdim=True)
# For each anchor, get the hardest negative
# First, we need to get a mask for every valid negative (they should have different labels)
mask_anchor_negative = BatchHardTripletLoss.get_anchor_negative_triplet_mask(labels).float()
# We add the maximum value in each row to the invalid negatives (label(a) == label(n))
max_anchor_negative_dist, _ = pairwise_dist.max(1, keepdim=True)
anchor_negative_dist = pairwise_dist + max_anchor_negative_dist * (1.0 - mask_anchor_negative)
# shape (batch_size,)
hardest_negative_dist, _ = anchor_negative_dist.min(1, keepdim=True)
# Combine biggest d(a, p) and smallest d(a, n) into final triplet loss with soft margin
#tl = hardest_positive_dist - hardest_negative_dist + margin
#tl[tl < 0] = 0
tl = torch.log1p(torch.exp(hardest_positive_dist - hardest_negative_dist))
triplet_loss = tl.mean()
return triplet_loss
| 4,988 | 54.433333 | 162 | py |
KoSentenceBERT-SKT | KoSentenceBERT-SKT-main/sentence_transformers/losses/BatchHardTripletLoss.py | import torch
from torch import nn, Tensor
from typing import Union, Tuple, List, Iterable, Dict
from sentence_transformers import util
from sentence_transformers.SentenceTransformer import SentenceTransformer
class BatchHardTripletLossDistanceFunction:
"""
This class defines distance functions, that can be used with Batch[All/Hard/SemiHard]TripletLoss
"""
@staticmethod
def cosine_distance(embeddings):
"""
Compute the 2D matrix of cosine distances (1-cosine_similarity) between all embeddings.
"""
return 1 - util.pytorch_cos_sim(embeddings, embeddings)
@staticmethod
def eucledian_distance(embeddings, squared=False):
"""
Compute the 2D matrix of eucledian distances between all the embeddings.
Args:
embeddings: tensor of shape (batch_size, embed_dim)
squared: Boolean. If true, output is the pairwise squared euclidean distance matrix.
If false, output is the pairwise euclidean distance matrix.
Returns:
pairwise_distances: tensor of shape (batch_size, batch_size)
"""
dot_product = torch.matmul(embeddings, embeddings.t())
# Get squared L2 norm for each embedding. We can just take the diagonal of `dot_product`.
# This also provides more numerical stability (the diagonal of the result will be exactly 0).
# shape (batch_size,)
square_norm = torch.diag(dot_product)
# Compute the pairwise distance matrix as we have:
# ||a - b||^2 = ||a||^2 - 2 <a, b> + ||b||^2
# shape (batch_size, batch_size)
distances = square_norm.unsqueeze(0) - 2.0 * dot_product + square_norm.unsqueeze(1)
# Because of computation errors, some distances might be negative so we put everything >= 0.0
distances[distances < 0] = 0
if not squared:
# Because the gradient of sqrt is infinite when distances == 0.0 (ex: on the diagonal)
# we need to add a small epsilon where distances == 0.0
mask = distances.eq(0).float()
distances = distances + mask * 1e-16
distances = (1.0 - mask) * torch.sqrt(distances)
return distances
class BatchHardTripletLoss(nn.Module):
"""
BatchHardTripletLoss takes a batch with (label, sentence) pairs and computes the loss for all possible, valid
triplets, i.e., anchor and positive must have the same label, anchor and negative a different label. It then looks
for the hardest positive and the hardest negatives.
The labels must be integers, with same label indicating sentences from the same class. You train dataset
must contain at least 2 examples per label class. The margin is computed automatically.
Source: https://github.com/NegatioN/OnlineMiningTripletLoss/blob/master/online_triplet_loss/losses.py
Paper: In Defense of the Triplet Loss for Person Re-Identification, https://arxiv.org/abs/1703.07737
Blog post: https://omoindrot.github.io/triplet-loss
:param model: SentenceTransformer model
:param distance_metric: Function that returns a distance between two emeddings. The class SiameseDistanceMetric contains pre-defined metrices that can be used
Example::
from sentence_transformers import SentenceTransformer, SentencesDataset, losses
from sentence_transformers.readers import InputExample
model = SentenceTransformer('distilbert-base-nli-mean-tokens')
train_examples = [InputExample(texts=['Sentence from class 0'], label=0), InputExample(texts=['Another sentence from class 0'], label=0),
InputExample(texts=['Sentence from class 1'], label=1), InputExample(texts=['Sentence from class 2'], label=2)]
train_dataset = SentencesDataset(train_examples, model)
train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=train_batch_size)
train_loss = losses.BatchHardTripletLoss(model=model)
"""
def __init__(self, model: SentenceTransformer, distance_metric = BatchHardTripletLossDistanceFunction.eucledian_distance, margin: float = 5):
super(BatchHardTripletLoss, self).__init__()
self.sentence_embedder = model
self.triplet_margin = margin
self.distance_metric = distance_metric
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor):
reps = [self.sentence_embedder(sentence_feature)['sentence_embedding'] for sentence_feature in sentence_features]
return self.batch_hard_triplet_loss(labels, reps[0])
# Hard Triplet Loss
# Source: https://github.com/NegatioN/OnlineMiningTripletLoss/blob/master/online_triplet_loss/losses.py
# Paper: In Defense of the Triplet Loss for Person Re-Identification, https://arxiv.org/abs/1703.07737
# Blog post: https://omoindrot.github.io/triplet-loss
def batch_hard_triplet_loss(self, labels: Tensor, embeddings: Tensor) -> Tensor:
"""Build the triplet loss over a batch of embeddings.
For each anchor, we get the hardest positive and hardest negative to form a triplet.
Args:
labels: labels of the batch, of size (batch_size,)
embeddings: tensor of shape (batch_size, embed_dim)
margin: margin for triplet loss
squared: Boolean. If true, output is the pairwise squared euclidean distance matrix.
If false, output is the pairwise euclidean distance matrix.
Returns:
Label_Sentence_Triplet: scalar tensor containing the triplet loss
"""
# Get the pairwise distance matrix
pairwise_dist = self.distance_metric(embeddings)
# For each anchor, get the hardest positive
# First, we need to get a mask for every valid positive (they should have same label)
mask_anchor_positive = BatchHardTripletLoss.get_anchor_positive_triplet_mask(labels).float()
# We put to 0 any element where (a, p) is not valid (valid if a != p and label(a) == label(p))
anchor_positive_dist = mask_anchor_positive * pairwise_dist
# shape (batch_size, 1)
hardest_positive_dist, _ = anchor_positive_dist.max(1, keepdim=True)
# For each anchor, get the hardest negative
# First, we need to get a mask for every valid negative (they should have different labels)
mask_anchor_negative = BatchHardTripletLoss.get_anchor_negative_triplet_mask(labels).float()
# We add the maximum value in each row to the invalid negatives (label(a) == label(n))
max_anchor_negative_dist, _ = pairwise_dist.max(1, keepdim=True)
anchor_negative_dist = pairwise_dist + max_anchor_negative_dist * (1.0 - mask_anchor_negative)
# shape (batch_size,)
hardest_negative_dist, _ = anchor_negative_dist.min(1, keepdim=True)
# Combine biggest d(a, p) and smallest d(a, n) into final triplet loss
tl = hardest_positive_dist - hardest_negative_dist + self.triplet_margin
tl[tl < 0] = 0
triplet_loss = tl.mean()
return triplet_loss
@staticmethod
def get_triplet_mask(labels):
"""Return a 3D mask where mask[a, p, n] is True iff the triplet (a, p, n) is valid.
A triplet (i, j, k) is valid if:
- i, j, k are distinct
- labels[i] == labels[j] and labels[i] != labels[k]
Args:
labels: tf.int32 `Tensor` with shape [batch_size]
"""
# Check that i, j and k are distinct
indices_equal = torch.eye(labels.size(0), device=labels.device).bool()
indices_not_equal = ~indices_equal
i_not_equal_j = indices_not_equal.unsqueeze(2)
i_not_equal_k = indices_not_equal.unsqueeze(1)
j_not_equal_k = indices_not_equal.unsqueeze(0)
distinct_indices = (i_not_equal_j & i_not_equal_k) & j_not_equal_k
label_equal = labels.unsqueeze(0) == labels.unsqueeze(1)
i_equal_j = label_equal.unsqueeze(2)
i_equal_k = label_equal.unsqueeze(1)
valid_labels = ~i_equal_k & i_equal_j
return valid_labels & distinct_indices
@staticmethod
def get_anchor_positive_triplet_mask(labels):
"""Return a 2D mask where mask[a, p] is True iff a and p are distinct and have same label.
Args:
labels: tf.int32 `Tensor` with shape [batch_size]
Returns:
mask: tf.bool `Tensor` with shape [batch_size, batch_size]
"""
# Check that i and j are distinct
indices_equal = torch.eye(labels.size(0), device=labels.device).bool()
indices_not_equal = ~indices_equal
# Check if labels[i] == labels[j]
# Uses broadcasting where the 1st argument has shape (1, batch_size) and the 2nd (batch_size, 1)
labels_equal = labels.unsqueeze(0) == labels.unsqueeze(1)
return labels_equal & indices_not_equal
@staticmethod
def get_anchor_negative_triplet_mask(labels):
"""Return a 2D mask where mask[a, n] is True iff a and n have distinct labels.
Args:
labels: tf.int32 `Tensor` with shape [batch_size]
Returns:
mask: tf.bool `Tensor` with shape [batch_size, batch_size]
"""
# Check if labels[i] != labels[k]
# Uses broadcasting where the 1st argument has shape (1, batch_size) and the 2nd (batch_size, 1)
return ~(labels.unsqueeze(0) == labels.unsqueeze(1))
| 9,443 | 45.522167 | 162 | py |
KoSentenceBERT-SKT | KoSentenceBERT-SKT-main/sentence_transformers/losses/MultipleNegativesRankingLoss.py | import torch
from torch import nn, Tensor
from typing import Iterable, Dict
from ..SentenceTransformer import SentenceTransformer
class MultipleNegativesRankingLoss(nn.Module):
"""
This loss expects as input a batch consisting of sentence pairs (a_1, b_1), (a_2, b_2)..., (a_n, b_n)
where we assume that (a_i, b_i) are a positive pair and (a_i, b_j) for i!=j a negative pair.
For each a_i, it uses all other b_j as negative samples, i.e., for a_i, we have 1 positive example (b_i) and
n-1 negative examples (b_j). It then minimizes the negative log-likehood for softmax normalized scores.
This loss function works great to train embeddings for retrieval setups where you have positive pairs (e.g. (query, relevant_doc))
as it will sample in each batch n-1 negative docs randomly.
The performance usually increases with increasing batch sizes.
For more information, see: https://arxiv.org/pdf/1705.00652.pdf
(Efficient Natural Language Response Suggestion for Smart Reply, Section 4.4)
The error function is equivalent to::
scores = torch.matmul(embeddings_a, embeddings_b.t())
labels = torch.tensor(range(len(scores)), dtype=torch.long).to(self.model.device) #Example a[i] should match with b[i]
cross_entropy_loss = nn.CrossEntropyLoss()
return cross_entropy_loss(scores, labels)
Example::
from sentence_transformers import SentenceTransformer, SentencesDataset, LoggingHandler, losses
from sentence_transformers.readers import InputExample
model = SentenceTransformer('distilbert-base-nli-mean-tokens')
train_examples = [InputExample(texts=['Anchor 1', 'Positive 1']),
InputExample(texts=['Anchor 2', 'Positive 2'])]
train_dataset = SentencesDataset(train_examples, model)
train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=train_batch_size)
train_loss = losses.MultipleNegativesRankingLoss(model=model)
"""
def __init__(self, model: SentenceTransformer):
super(MultipleNegativesRankingLoss, self).__init__()
self.model = model
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor):
reps = [self.model(sentence_feature)['sentence_embedding'] for sentence_feature in sentence_features]
reps_a, reps_b = reps
return self.multiple_negatives_ranking_loss(reps_a, reps_b)
def multiple_negatives_ranking_loss(self, embeddings_a: Tensor, embeddings_b: Tensor):
"""
:param embeddings_a:
Tensor of shape (batch_size, embedding_dim)
:param embeddings_b:
Tensor of shape (batch_size, embedding_dim)
:return:
The scalar loss
"""
scores = torch.matmul(embeddings_a, embeddings_b.t())
diagonal_mean = torch.mean(torch.diag(scores))
mean_log_row_sum_exp = torch.mean(torch.logsumexp(scores, dim=1))
return -diagonal_mean + mean_log_row_sum_exp
| 3,100 | 44.602941 | 138 | py |
KoSentenceBERT-SKT | KoSentenceBERT-SKT-main/sentence_transformers/losses/BatchAllTripletLoss.py | import torch
from torch import nn, Tensor
from typing import Union, Tuple, List, Iterable, Dict
from .BatchHardTripletLoss import BatchHardTripletLoss, BatchHardTripletLossDistanceFunction
from sentence_transformers.SentenceTransformer import SentenceTransformer
class BatchAllTripletLoss(nn.Module):
"""
BatchAllTripletLoss takes a batch with (label, sentence) pairs and computes the loss for all possible, valid
triplets, i.e., anchor and positive must have the same label, anchor and negative a different label. The labels
must be integers, with same label indicating sentences from the same class. You train dataset
must contain at least 2 examples per label class.
| Source: https://github.com/NegatioN/OnlineMiningTripletLoss/blob/master/online_triplet_loss/losses.py
| Paper: In Defense of the Triplet Loss for Person Re-Identification, https://arxiv.org/abs/1703.07737
| Blog post: https://omoindrot.github.io/triplet-loss
:param model: SentenceTransformer model
:param distance_metric: Function that returns a distance between two emeddings. The class SiameseDistanceMetric contains pre-defined metrices that can be used
:param margin: Negative samples should be at least margin further apart from the anchor than the positive.
Example::
from sentence_transformers import SentenceTransformer, SentencesDataset, losses
from sentence_transformers.readers import InputExample
model = SentenceTransformer('distilbert-base-nli-mean-tokens')
train_examples = [InputExample(texts=['Sentence from class 0'], label=0), InputExample(texts=['Another sentence from class 0'], label=0),
InputExample(texts=['Sentence from class 1'], label=1), InputExample(texts=['Sentence from class 2'], label=2)]
train_dataset = SentencesDataset(train_examples, model)
train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=train_batch_size)
train_loss = losses.BatchAllTripletLoss(model=model)
"""
def __init__(self, model: SentenceTransformer, distance_metric=BatchHardTripletLossDistanceFunction.eucledian_distance, margin: float = 5):
super(BatchAllTripletLoss, self).__init__()
self.sentence_embedder = model
self.triplet_margin = margin
self.distance_metric = distance_metric
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor):
reps = [self.sentence_embedder(sentence_feature)['sentence_embedding'] for sentence_feature in sentence_features]
return self.batch_all_triplet_loss(labels, reps[0])
def batch_all_triplet_loss(self, labels, embeddings):
"""Build the triplet loss over a batch of embeddings.
We generate all the valid triplets and average the loss over the positive ones.
Args:
labels: labels of the batch, of size (batch_size,)
embeddings: tensor of shape (batch_size, embed_dim)
margin: margin for triplet loss
squared: Boolean. If true, output is the pairwise squared euclidean distance matrix.
If false, output is the pairwise euclidean distance matrix.
Returns:
Label_Sentence_Triplet: scalar tensor containing the triplet loss
"""
# Get the pairwise distance matrix
pairwise_dist = self.distance_metric(embeddings)
anchor_positive_dist = pairwise_dist.unsqueeze(2)
anchor_negative_dist = pairwise_dist.unsqueeze(1)
# Compute a 3D tensor of size (batch_size, batch_size, batch_size)
# triplet_loss[i, j, k] will contain the triplet loss of anchor=i, positive=j, negative=k
# Uses broadcasting where the 1st argument has shape (batch_size, batch_size, 1)
# and the 2nd (batch_size, 1, batch_size)
triplet_loss = anchor_positive_dist - anchor_negative_dist + self.triplet_margin
# Put to zero the invalid triplets
# (where label(a) != label(p) or label(n) == label(a) or a == p)
mask = BatchHardTripletLoss.get_triplet_mask(labels)
triplet_loss = mask.float() * triplet_loss
# Remove negative losses (i.e. the easy triplets)
triplet_loss[triplet_loss < 0] = 0
# Count number of positive triplets (where triplet_loss > 0)
valid_triplets = triplet_loss[triplet_loss > 1e-16]
num_positive_triplets = valid_triplets.size(0)
num_valid_triplets = mask.sum()
fraction_positive_triplets = num_positive_triplets / (num_valid_triplets.float() + 1e-16)
# Get final mean triplet loss over the positive valid triplets
triplet_loss = triplet_loss.sum() / (num_positive_triplets + 1e-16)
return triplet_loss
| 4,745 | 51.153846 | 162 | py |
KoSentenceBERT-SKT | KoSentenceBERT-SKT-main/sentence_transformers/losses/BatchSemiHardTripletLoss.py | import torch
from torch import nn, Tensor
from typing import Union, Tuple, List, Iterable, Dict
from .BatchHardTripletLoss import BatchHardTripletLoss, BatchHardTripletLossDistanceFunction
from sentence_transformers.SentenceTransformer import SentenceTransformer
class BatchSemiHardTripletLoss(nn.Module):
"""
BatchSemiHardTripletLoss takes a batch with (label, sentence) pairs and computes the loss for all possible, valid
triplets, i.e., anchor and positive must have the same label, anchor and negative a different label. It then looks
for the semi hard positives and negatives.
The labels must be integers, with same label indicating sentences from the same class. You train dataset
must contain at least 2 examples per label class. The margin is computed automatically.
Source: https://github.com/NegatioN/OnlineMiningTripletLoss/blob/master/online_triplet_loss/losses.py
Paper: In Defense of the Triplet Loss for Person Re-Identification, https://arxiv.org/abs/1703.07737
Blog post: https://omoindrot.github.io/triplet-loss
:param model: SentenceTransformer model
:param distance_metric: Function that returns a distance between two emeddings. The class SiameseDistanceMetric contains pre-defined metrices that can be used
Example::
from sentence_transformers import SentenceTransformer, SentencesDataset, losses
from sentence_transformers.readers import InputExample
model = SentenceTransformer('distilbert-base-nli-mean-tokens')
train_examples = [InputExample(texts=['Sentence from class 0'], label=0), InputExample(texts=['Another sentence from class 0'], label=0),
InputExample(texts=['Sentence from class 1'], label=1), InputExample(texts=['Sentence from class 2'], label=2)]
train_dataset = SentencesDataset(train_examples, model)
train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=train_batch_size)
train_loss = losses.BatchSemiHardTripletLoss(model=model)
"""
def __init__(self, model: SentenceTransformer, distance_metric = BatchHardTripletLossDistanceFunction.eucledian_distance, margin: float = 5):
super(BatchSemiHardTripletLoss, self).__init__()
self.sentence_embedder = model
self.margin = margin
self.distance_metric = distance_metric
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor):
reps = [self.sentence_embedder(sentence_feature)['sentence_embedding'] for sentence_feature in sentence_features]
return self.batch_semi_hard_triplet_loss(labels, reps[0])
# Semi-Hard Triplet Loss
# Based on: https://github.com/tensorflow/addons/blob/master/tensorflow_addons/losses/triplet.py#L71
# Paper: FaceNet: A Unified Embedding for Face Recognition and Clustering: https://arxiv.org/pdf/1503.03832.pdf
def batch_semi_hard_triplet_loss(self, labels: Tensor, embeddings: Tensor) -> Tensor:
"""Build the triplet loss over a batch of embeddings.
We generate all the valid triplets and average the loss over the positive ones.
Args:
labels: labels of the batch, of size (batch_size,)
embeddings: tensor of shape (batch_size, embed_dim)
margin: margin for triplet loss
squared: Boolean. If true, output is the pairwise squared euclidean distance matrix.
If false, output is the pairwise euclidean distance matrix.
Returns:
Label_Sentence_Triplet: scalar tensor containing the triplet loss
"""
labels = labels.unsqueeze(1)
pdist_matrix = self.distance_metric(embeddings)
adjacency = labels == labels.t()
adjacency_not = ~adjacency
batch_size = torch.numel(labels)
pdist_matrix_tile = pdist_matrix.repeat([batch_size, 1])
mask = adjacency_not.repeat([batch_size, 1]) & (pdist_matrix_tile > torch.reshape(pdist_matrix.t(), [-1, 1]))
mask_final = torch.reshape(torch.sum(mask, 1, keepdims=True) > 0.0, [batch_size, batch_size])
mask_final = mask_final.t()
negatives_outside = torch.reshape(BatchSemiHardTripletLoss._masked_minimum(pdist_matrix_tile, mask), [batch_size, batch_size])
negatives_outside = negatives_outside.t()
negatives_inside = BatchSemiHardTripletLoss._masked_maximum(pdist_matrix, adjacency_not)
negatives_inside = negatives_inside.repeat([1, batch_size])
semi_hard_negatives = torch.where(mask_final, negatives_outside, negatives_inside)
loss_mat = (pdist_matrix - semi_hard_negatives) + self.margin
mask_positives = adjacency.float().to(labels.device) - torch.eye(batch_size, device=labels.device)
mask_positives = mask_positives.to(labels.device)
num_positives = torch.sum(mask_positives)
triplet_loss = torch.sum(torch.max(loss_mat * mask_positives, torch.tensor([0.0], device=labels.device))) / num_positives
return triplet_loss
@staticmethod
def _masked_minimum(data, mask, dim=1):
axis_maximums, _ = data.max(dim, keepdims=True)
masked_minimums = (data - axis_maximums) * mask
masked_minimums, _ = masked_minimums.min(dim, keepdims=True)
masked_minimums += axis_maximums
return masked_minimums
@staticmethod
def _masked_maximum(data, mask, dim=1):
axis_minimums, _ = data.min(dim, keepdims=True)
masked_maximums = (data - axis_minimums) * mask
masked_maximums, _ = masked_maximums.max(dim, keepdims=True)
masked_maximums += axis_minimums
return masked_maximums
| 5,631 | 48.840708 | 162 | py |
KoSentenceBERT-SKT | KoSentenceBERT-SKT-main/sentence_transformers/losses/OnlineContrastiveLoss.py | from typing import Iterable, Dict
import torch.nn.functional as F
from torch import nn, Tensor
from .ContrastiveLoss import SiameseDistanceMetric
from sentence_transformers.SentenceTransformer import SentenceTransformer
class OnlineContrastiveLoss(nn.Module):
"""
Online Contrastive loss. Similar to ConstrativeLoss, but it selects hard positive (positives that are far apart)
and hard negative pairs (negatives that are close) and computes the loss only for these pairs. Often yields
better performances than ConstrativeLoss.
:param model: SentenceTransformer model
:param distance_metric: Function that returns a distance between two emeddings. The class SiameseDistanceMetric contains pre-defined metrices that can be used
:param margin: Negative samples (label == 0) should have a distance of at least the margin value.
:param size_average: Average by the size of the mini-batch.
Example::
from sentence_transformers import SentenceTransformer, SentencesDataset, LoggingHandler, losses
from sentence_transformers.readers import InputExample
model = SentenceTransformer('distilbert-base-nli-mean-tokens')
train_examples = [InputExample(texts=['This is a positive pair', 'Where the distance will be minimized'], label=1),
InputExample(texts=['This is a negative pair', 'Their distance will be increased'], label=0)]
train_dataset = SentencesDataset(train_examples, model)
train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=train_batch_size)
train_loss = losses.OnlineContrastiveLoss(model=model)
"""
def __init__(self, model: SentenceTransformer, distance_metric=SiameseDistanceMetric.COSINE_DISTANCE, margin: float = 0.5):
super(OnlineContrastiveLoss, self).__init__()
self.model = model
self.margin = margin
self.distance_metric = distance_metric
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor, size_average=False):
embeddings = [self.model(sentence_feature)['sentence_embedding'] for sentence_feature in sentence_features]
distance_matrix = self.distance_metric(embeddings[0], embeddings[1])
negs = distance_matrix[labels == 0]
poss = distance_matrix[labels == 1]
# select hard positive and hard negative pairs
negative_pairs = negs[negs < (poss.max() if len(poss) > 1 else negs.mean())]
positive_pairs = poss[poss > (negs.min() if len(negs) > 1 else poss.mean())]
positive_loss = positive_pairs.pow(2).sum()
negative_loss = F.relu(self.margin - negative_pairs).pow(2).sum()
loss = positive_loss + negative_loss
return loss | 2,732 | 51.557692 | 162 | py |
KoSentenceBERT-SKT | KoSentenceBERT-SKT-main/sentence_transformers/losses/ContrastiveLoss.py | from enum import Enum
from typing import Iterable, Dict
import torch.nn.functional as F
from torch import nn, Tensor
from sentence_transformers.SentenceTransformer import SentenceTransformer
class SiameseDistanceMetric(Enum):
"""
The metric for the contrastive loss
"""
EUCLIDEAN = lambda x, y: F.pairwise_distance(x, y, p=2)
MANHATTAN = lambda x, y: F.pairwise_distance(x, y, p=1)
COSINE_DISTANCE = lambda x, y: 1-F.cosine_similarity(x, y)
class ContrastiveLoss(nn.Module):
"""
Contrastive loss. Expects as input two texts and a label of either 0 or 1. If the label == 1, then the distance between the
two embeddings is reduced. If the label == 0, then the distance between the embeddings is increased.
Further information: http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf
:param model: SentenceTransformer model
:param distance_metric: Function that returns a distance between two emeddings. The class SiameseDistanceMetric contains pre-defined metrices that can be used
:param margin: Negative samples (label == 0) should have a distance of at least the margin value.
:param size_average: Average by the size of the mini-batch.
Example::
from sentence_transformers import SentenceTransformer, SentencesDataset, LoggingHandler, losses
from sentence_transformers.readers import InputExample
model = SentenceTransformer('distilbert-base-nli-mean-tokens')
train_examples = [InputExample(texts=['This is a positive pair', 'Where the distance will be minimized'], label=1),
InputExample(texts=['This is a negative pair', 'Their distance will be increased'], label=0)]
train_dataset = SentencesDataset(train_examples, model)
train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=train_batch_size)
train_loss = losses.ContrastiveLoss(model=model)
"""
def __init__(self, model: SentenceTransformer, distance_metric=SiameseDistanceMetric.COSINE_DISTANCE, margin: float = 0.5, size_average:bool = True):
super(ContrastiveLoss, self).__init__()
self.distance_metric = distance_metric
self.margin = margin
self.model = model
self.size_average = size_average
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor):
reps = [self.model(sentence_feature)['sentence_embedding'] for sentence_feature in sentence_features]
assert len(reps) == 2
rep_anchor, rep_other = reps
distances = self.distance_metric(rep_anchor, rep_other)
losses = 0.5 * (labels.float() * distances.pow(2) + (1 - labels).float() * F.relu(self.margin - distances).pow(2))
return losses.mean() if self.size_average else losses.sum()
| 2,794 | 44.080645 | 162 | py |
KoSentenceBERT-SKT | KoSentenceBERT-SKT-main/sentence_transformers/losses/SoftmaxLoss.py | import torch
from torch import nn, Tensor
from typing import Union, Tuple, List, Iterable, Dict
from ..SentenceTransformer import SentenceTransformer
import logging
class SoftmaxLoss(nn.Module):
"""
This loss was used in our SBERT publication (https://arxiv.org/abs/1908.10084) to train the SentenceTransformer
model on NLI data. It adds a softmax classifier on top of the output of two transformer networks.
:param model: SentenceTransformer model
:param sentence_embedding_dimension: Dimension of your sentence embeddings
:param num_labels: Number of different labels
:param concatenation_sent_rep: Concatenate vectors u,v for the softmax classifier?
:param concatenation_sent_difference: Add abs(u-v) for the softmax classifier?
:param concatenation_sent_multiplication: Add u*v for the softmax classifier?
Example::
from sentence_transformers import SentenceTransformer, SentencesDataset, losses
from sentence_transformers.readers import InputExample
model = SentenceTransformer('distilbert-base-nli-mean-tokens')
train_examples = [InputExample(InputExample(texts=['First pair, sent A', 'First pair, sent B'], label=0),
InputExample(texts=['Second Pair, sent A', 'Second Pair, sent B'], label=3)]
train_dataset = SentencesDataset(train_examples, model)
train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=train_batch_size)
train_loss = losses.SoftmaxLoss(model=model, sentence_embedding_dimension=model.get_sentence_embedding_dimension(), num_labels=train_num_labels)
"""
def __init__(self,
model: SentenceTransformer,
sentence_embedding_dimension: int,
num_labels: int,
concatenation_sent_rep: bool = True,
concatenation_sent_difference: bool = True,
concatenation_sent_multiplication: bool = False):
super(SoftmaxLoss, self).__init__()
self.model = model
self.num_labels = num_labels
self.concatenation_sent_rep = concatenation_sent_rep
self.concatenation_sent_difference = concatenation_sent_difference
self.concatenation_sent_multiplication = concatenation_sent_multiplication
num_vectors_concatenated = 0
if concatenation_sent_rep:
num_vectors_concatenated += 2
if concatenation_sent_difference:
num_vectors_concatenated += 1
if concatenation_sent_multiplication:
num_vectors_concatenated += 1
logging.info("Softmax loss: #Vectors concatenated: {}".format(num_vectors_concatenated))
self.classifier = nn.Linear(num_vectors_concatenated * sentence_embedding_dimension, num_labels)
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor):
reps = [self.model(sentence_feature)['sentence_embedding'] for sentence_feature in sentence_features]
rep_a, rep_b = reps
vectors_concat = []
if self.concatenation_sent_rep:
vectors_concat.append(rep_a)
vectors_concat.append(rep_b)
if self.concatenation_sent_difference:
vectors_concat.append(torch.abs(rep_a - rep_b))
if self.concatenation_sent_multiplication:
vectors_concat.append(rep_a * rep_b)
features = torch.cat(vectors_concat, 1)
output = self.classifier(features)
loss_fct = nn.CrossEntropyLoss()
if labels is not None:
loss = loss_fct(output, labels.view(-1))
return loss
else:
return reps, output | 3,637 | 45.050633 | 152 | py |
torch-adaptive-imle | torch-adaptive-imle-main/cli/synth-cli.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import torch as t
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from torch import Tensor
from imle.ste import ste as my_ste
from imle.imle import imle as my_imle
from imle.aimle import aimle as my_aimle
from imle.target import TargetDistribution, AdaptiveTargetDistribution
from imle.noise import SumOfGammaNoiseDistribution, GumbelNoiseDistribution
from aaai23.synth import distributions, utils, sfe
FIGSIZE = (3.2, 2.5)
# set the colormap and centre the colorbar
class MidpointNormalize(colors.Normalize):
"""
Normalise the colorbar so that diverging bars work there way either side from a prescribed midpoint value)
e.g. im=ax1.imshow(array, norm=MidpointNormalize(midpoint=0.,vmin=-100, vmax=100))
"""
def __init__(self, vmin=None, vmax=None, midpoint=None, clip=False):
self.midpoint = midpoint
colors.Normalize.__init__(self, vmin, vmax, clip)
def __call__(self, value, clip=None):
# I'm ignoring masked values and all kinds of edge cases to make a
# simple example...
x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1]
return np.ma.masked_array(np.interp(value, x, y), np.isnan(value))
def optim_loop(min_objective, true_objective, lr, momentum_factor, n_steps, theta0, debug_print_loss=False):
theta_t = t.from_numpy(theta0).float().requires_grad_(True)
# let's try to optimize this expectation w.r.t. theta
optimizer = t.optim.SGD([theta_t], lr, momentum=momentum_factor)
hist, hist_expectation = [], []
for _t in range(n_steps):
optimizer.zero_grad()
obj = min_objective(theta_t)
if debug_print_loss:
print(obj)
hist.append(obj.detach().numpy())
hist_expectation.append(true_objective(theta_t).detach().numpy())
obj.backward()
optimizer.step()
return hist, hist_expectation
def experiment(min_obj, ture_objective, lr, theta0, momentum=0.9, steps=100, n_rp=50, do_plot=True, postprocess=None):
# redefine objective with given strategy
hist = []
for _ in range(n_rp):
# print('-', end='')
stoc_obj, true_obj = optim_loop(min_obj, ture_objective, lr, momentum, steps, theta0)
if postprocess:
true_obj = postprocess(true_obj)
hist.append(true_obj)
if do_plot:
mean = np.mean(hist, axis=0)
# plt.plot(full_optim_hist)
plt.plot(mean)
# plt.show()
# print()
return hist
def plot_mean_std(histories, names, xs=None):
means = [np.mean(np.array(his), axis=0) for his in histories]
std_devs = [np.std(np.array(his), axis=0) for his in histories]
for h, st, nm in zip(means, std_devs, names):
x_axis = xs if xs else list(range(len(h)))
line = plt.plot(xs, h, label=nm)
plt.fill_between(x_axis, h - st, h + st, alpha=0.5, color=line[0].get_color())
def do_plots_exp(histories, names, savename=None, figsize=FIGSIZE, min_value_of_exp=None):
# computing also standard devs
plt.figure(figsize=figsize)
means = [np.mean(np.array(his) - min_value_of_exp, axis=0) for his in histories]
std_devs = [np.std(np.array(his), axis=0) for his in histories]
for h, st, nm in zip(means, std_devs, names):
x_axis = list(range(len(h)))
line = plt.plot(h, label=nm)
plt.fill_between(x_axis, h - st, h + st, alpha=0.5, color=line[0].get_color())
plt.legend(loc=0)
plt.ylim((0., 3.))
plt.xlim((0, 99))
plt.xlabel('Optimization steps')
plt.ylabel('Optimality gap')
if savename:
print('Saving plots ..', savename)
plt.savefig(savename, bbox_inches='tight')
# plt.show()
def toy_exp_v2(n, k,
imle_sog_hyp, imle_gum_hyp,
aimle_sym_sog_hyp, aimle_sym_gum_hyp,
aimle_adapt_sog_hyp, aimle_adapt_gum_hyp,
aimle_sym_adapt_sog_hyp, aimle_sym_adapt_gum_hyp,
ste_sog_hyp, ste_gum_hyp,
sfe_hyp, n_rep=50):
rng = np.random.RandomState(0)
theta = rng.randn(n)
topk = distributions.TopK(n, k)
b_t = t.abs(t.from_numpy(rng.randn(n)).float())
print(b_t)
sorted_bt = np.sort(b_t.detach().numpy())
min_value_of_exp = np.sum((sorted_bt[:k])**2) + np.sum((sorted_bt[k:] - 1)**2)
print(min_value_of_exp)
def objective(z):
return ((z - b_t) ** 2).sum()
# Expected value of the loss
full_obj = lambda _th: utils.expect_obj(topk, _th, objective)
exp = lambda strategy, lr, n_rp=n_rep, steps=100: experiment(
lambda _th: ((strategy(_th) - b_t)**2).sum(),
full_obj,
lr,
theta,
steps=steps,
n_rp=n_rp
)
# Returns a function that produces a single random scalar
# sog_noise = utils.sum_of_gamma_noise(k, rng=np.random.RandomState(0))
# Returns a state, so [10]
# pam_sog = topk.perturb_and_map(sog_noise)
# gumbel_noise = utils.gumbel_noise(rng=np.random.RandomState(0))
# pam_gum = topk.perturb_and_map(gumbel_noise)
# hyperparameter values obtained by grid search (sensitivity_imle)
# Returns a f(theta) with a custom backward pass, given lambda=2.0 and sampler=pam_sog
# imle_pid_sog = imle.imle_pid(2., pam_sog)
# Runs the actual experiment
# imle_pam_sog_lcs = exp(imle_pid_sog, 0.75)
# imle_pam_gum_lcs = exp(imle.imle_pid(2., pam_gum), 0.75)
target_distribution_sog = TargetDistribution(alpha=1.0, beta=imle_sog_hyp["lmd"], do_gradient_scaling=True)
target_distribution_gum = TargetDistribution(alpha=1.0, beta=imle_gum_hyp["lmd"], do_gradient_scaling=True)
sog_distribution = SumOfGammaNoiseDistribution(k=k, nb_iterations=10)
gum_distribution = GumbelNoiseDistribution()
@my_imle(target_distribution=target_distribution_sog, noise_distribution=sog_distribution, nb_samples=1,
theta_noise_temperature=1.0, target_noise_temperature=1.0)
def imle_topk_sog_batched(thetas: Tensor) -> Tensor:
return t.stack([topk.map(thetas[i]) for i in range(thetas.shape[0])])
def imle_topk_sog(theta: Tensor) -> Tensor:
return imle_topk_sog_batched(theta.view(1, -1)).view(-1)
my_imle_pam_sog_lcs = exp(imle_topk_sog, imle_sog_hyp["lr"])
@my_imle(target_distribution=target_distribution_gum, noise_distribution=gum_distribution, nb_samples=1,
theta_noise_temperature=1.0, target_noise_temperature=1.0)
def imle_topk_gum_batched(thetas: Tensor) -> Tensor:
return t.stack([topk.map(thetas[i]) for i in range(thetas.shape[0])])
def imle_topk_gum(theta: Tensor) -> Tensor:
return imle_topk_gum_batched(theta.view(1, -1)).view(-1)
my_imle_pam_gum_lcs = exp(imle_topk_gum, imle_gum_hyp["lr"])
# ---
@my_aimle(target_distribution=target_distribution_sog, noise_distribution=sog_distribution, nb_samples=1,
theta_noise_temperature=1.0, target_noise_temperature=1.0, symmetric_perturbation=True)
def aimle_sym_topk_sog_batched(thetas: Tensor) -> Tensor:
return t.stack([topk.map(thetas[i]) for i in range(thetas.shape[0])])
def aimle_sym_topk_sog(theta: Tensor) -> Tensor:
return imle_topk_sog_batched(theta.view(1, -1)).view(-1)
my_aimle_sym_pam_sog_lcs = exp(aimle_sym_topk_sog, aimle_sym_sog_hyp["lr"])
@my_aimle(target_distribution=target_distribution_gum, noise_distribution=gum_distribution, nb_samples=1,
theta_noise_temperature=1.0, target_noise_temperature=1.0, symmetric_perturbation=True)
def aimle_sym_topk_gum_batched(thetas: Tensor) -> Tensor:
return t.stack([topk.map(thetas[i]) for i in range(thetas.shape[0])])
def aimle_sym_topk_gum(theta: Tensor) -> Tensor:
return aimle_sym_topk_gum_batched(theta.view(1, -1)).view(-1)
my_aimle_sym_pam_gum_lcs = exp(aimle_sym_topk_gum, aimle_sym_gum_hyp["lr"])
# ---
target_distribution_sog_a = AdaptiveTargetDistribution(initial_beta=aimle_adapt_sog_hyp["lmd"])
target_distribution_gum_a = AdaptiveTargetDistribution(initial_beta=aimle_adapt_gum_hyp["lmd"])
@my_aimle(target_distribution=target_distribution_sog_a, noise_distribution=sog_distribution, nb_samples=1,
theta_noise_temperature=1.0, target_noise_temperature=1.0, symmetric_perturbation=False)
def aimle_adapt_topk_sog_batched(thetas: Tensor) -> Tensor:
return t.stack([topk.map(thetas[i]) for i in range(thetas.shape[0])])
def aimle_adapt_topk_sog(theta: Tensor) -> Tensor:
return imle_topk_sog_batched(theta.view(1, -1)).view(-1)
my_aimle_adapt_pam_sog_lcs = exp(aimle_adapt_topk_sog, aimle_adapt_sog_hyp["lr"])
@my_aimle(target_distribution=target_distribution_gum_a, noise_distribution=gum_distribution, nb_samples=1,
theta_noise_temperature=1.0, target_noise_temperature=1.0, symmetric_perturbation=False)
def aimle_adapt_topk_gum_batched(thetas: Tensor) -> Tensor:
return t.stack([topk.map(thetas[i]) for i in range(thetas.shape[0])])
def aimle_adapt_topk_gum(theta: Tensor) -> Tensor:
return aimle_sym_topk_gum_batched(theta.view(1, -1)).view(-1)
my_aimle_adapt_pam_gum_lcs = exp(aimle_adapt_topk_gum, aimle_adapt_gum_hyp["lr"])
# ---
target_distribution_sog_a = AdaptiveTargetDistribution(initial_beta=aimle_sym_adapt_sog_hyp["lmd"])
target_distribution_gum_a = AdaptiveTargetDistribution(initial_beta=aimle_sym_adapt_gum_hyp["lmd"])
@my_aimle(target_distribution=target_distribution_sog_a, noise_distribution=sog_distribution, nb_samples=1,
theta_noise_temperature=1.0, target_noise_temperature=1.0, symmetric_perturbation=True)
def aimle_sym_adapt_topk_sog_batched(thetas: Tensor) -> Tensor:
return t.stack([topk.map(thetas[i]) for i in range(thetas.shape[0])])
def aimle_sym_adapt_topk_sog(theta: Tensor) -> Tensor:
return imle_topk_sog_batched(theta.view(1, -1)).view(-1)
my_aimle_sym_adapt_pam_sog_lcs = exp(aimle_sym_adapt_topk_sog, aimle_sym_adapt_sog_hyp["lr"])
@my_aimle(target_distribution=target_distribution_gum_a, noise_distribution=gum_distribution, nb_samples=1,
theta_noise_temperature=1.0, target_noise_temperature=1.0, symmetric_perturbation=True)
def aimle_sym_adapt_topk_gum_batched(thetas: Tensor) -> Tensor:
return t.stack([topk.map(thetas[i]) for i in range(thetas.shape[0])])
def aimle_sym_adapt_topk_gum(theta: Tensor) -> Tensor:
return aimle_sym_topk_gum_batched(theta.view(1, -1)).view(-1)
my_aimle_sym_adapt_pam_gum_lcs = exp(aimle_sym_adapt_topk_gum, aimle_sym_adapt_gum_hyp["lr"])
# ---
@my_ste(noise_distribution=sog_distribution, noise_temperature=1.0)
def ste_topk_sog_batched(thetas: Tensor) -> Tensor:
return t.stack([topk.map(thetas[i]) for i in range(thetas.shape[0])])
def ste_topk_sog(theta: Tensor) -> Tensor:
return imle_topk_sog_batched(theta.view(1, -1)).view(-1)
my_ste_pam_sog_lcs = exp(ste_topk_sog, ste_sog_hyp["lr"])
@my_ste(noise_distribution=gum_distribution, noise_temperature=1.0)
def ste_topk_gum_batched(thetas: Tensor) -> Tensor:
return t.stack([topk.map(thetas[i]) for i in range(thetas.shape[0])])
def ste_topk_gum(theta: Tensor) -> Tensor:
return imle_topk_gum_batched(theta.view(1, -1)).view(-1)
my_ste_pam_gum_lcs = exp(ste_topk_gum, ste_gum_hyp["lr"])
# ---
# hyperparameter values obtained by grid search (sensitivity_ste)
# ste_pam_lcs = exp(ste.ste(pam_gum), 0.019)
do_plots_exp(
[
# my_ste_pam_sog_lcs,
my_ste_pam_gum_lcs,
# my_imle_pam_sog_lcs,
my_imle_pam_gum_lcs,
# my_aimle_sym_pam_sog_lcs,
my_aimle_sym_pam_gum_lcs,
# my_aimle_adapt_pam_sog_lcs,
my_aimle_adapt_pam_gum_lcs,
# my_aimle_sym_adapt_pam_sog_lcs,
my_aimle_sym_adapt_pam_gum_lcs
], [
# 'STE PaM (SoG)',
'STE PaM (Gum)',
# 'I-MLE PaM (SoG)',
'I-MLE PaM (Gum)',
# 'AI-MLE Sym PaM (SoG)',
'AI-MLE Sym PaM (Gum)',
# 'AI-MLE Adapt PaM (SoG)',
'AI-MLE Adapt PaM (Gum)',
# 'AI-MLE Sym Adapt PaM (SoG)',
'AI-MLE Sym Adapt PaM (Gum)'
], savename='SYNTH_v2.pdf', figsize=(4, 3), min_value_of_exp=min_value_of_exp)
# hyperparameter values obtained by grid search (sensitivity_sfe)
sfe_full = sfe.sfe(topk.sample_f(np.random.RandomState(0)), objective, topk.grad_log_p(topk.marginals))
sfe_full_lcs = exp(sfe_full, sfe_hyp["lr"], n_rp=n_rep//5, steps=1000)
ary_sfe = np.array(sfe_full_lcs)
# final plot!
do_plots_exp(
[
ary_sfe[:, ::10],
# my_ste_pam_sog_lcs,
my_ste_pam_gum_lcs,
# my_imle_pam_sog_lcs,
my_imle_pam_gum_lcs,
# my_aimle_sym_pam_sog_lcs,
my_aimle_sym_pam_gum_lcs,
# my_aimle_adapt_pam_sog_lcs,
my_aimle_adapt_pam_gum_lcs,
# my_aimle_sym_adapt_pam_sog_lcs,
my_aimle_sym_adapt_pam_gum_lcs
], [
'SFE (steps x 10)',
# 'STE PaM (SoG)',
'STE PaM (Gum)',
# 'I-MLE PaM (SoG)',
'I-MLE PaM (Gum)',
# 'AI-MLE PaM (SoG)',
'AI-MLE PaM (Gum)',
# 'AI-MLE Adapt PaM (SoG)',
'AI-MLE Adapt PaM (Gum)'
], savename='SYNTH2.pdf', min_value_of_exp=min_value_of_exp)
def sensibility_imle_v2(n, k, n_rep=20):
rng = np.random.RandomState(0)
theta = rng.randn(n)
topk = distributions.TopK(n, k)
b_t = t.abs(t.from_numpy(rng.randn(n)).float())
# print(b_t)
sorted_bt = np.sort(b_t.detach().numpy())
min_value_of_exp = np.sum((sorted_bt[:k])**2) + np.sum((sorted_bt[k:] - 1)**2)
# print(min_value_of_exp)
def objective(z):
return ((z - b_t)**2).sum()
full_obj = lambda _th: utils.expect_obj(topk, _th, objective)
def pp(_his):
if _his[-1] - min_value_of_exp < 0.: # then it's all lost
_his[-1] = 5.
# print('pp')
return _his
exp = lambda strategy, lr, n_rp=n_rep, steps=100: experiment(
lambda _th: ((strategy(_th) - b_t)**2).sum(),
full_obj,
lr, theta, steps=steps, n_rp=n_rp, do_plot=False,
postprocess=pp
)
n_lr, n_lbd = 5, 6
search_grid_lr = np.linspace(0.5, 1., num=n_lr)
search_grid_lambda = np.linspace(0.5, 3., num=n_lbd)
res_sog_mean, res_sog_std = np.zeros((n_lr, n_lbd)), np.zeros((n_lr, n_lbd))
res_gum_mean, res_gum_std = np.zeros((n_lr, n_lbd)), np.zeros((n_lr, n_lbd))
best_sog, sog_hyp = None, None
best_gum, gum_hyp = None, None
for i, lr in enumerate(search_grid_lr):
for j, lmd in enumerate(search_grid_lambda):
# print(i, j)
# pam_sog = topk.perturb_and_map(utils.sum_of_gamma_noise(k, rng=np.random.RandomState(0)))
# pam_gum = topk.perturb_and_map(utils.gumbel_noise(rng=np.random.RandomState(0)))
# imle_sog_lcs = exp(imle.imle_pid(lmd, pam_sog), lr)
# imle_gum_lcs = exp(imle.imle_pid(lmd, pam_gum), lr)
target_distribution = TargetDistribution(alpha=1.0, beta=lmd, do_gradient_scaling=True)
sog_distribution = SumOfGammaNoiseDistribution(k=k, nb_iterations=10)
gum_distribution = GumbelNoiseDistribution()
@my_imle(target_distribution=target_distribution, noise_distribution=sog_distribution, nb_samples=1,
theta_noise_temperature=1.0, target_noise_temperature=1.0)
def imle_topk_sog_batched(thetas: Tensor) -> Tensor:
return t.stack([topk.map(thetas[i]) for i in range(thetas.shape[0])])
def imle_topk_sog(theta: Tensor) -> Tensor:
return imle_topk_sog_batched(theta.view(1, -1)).view(-1)
@my_imle(target_distribution=target_distribution, noise_distribution=gum_distribution, nb_samples=1,
theta_noise_temperature=1.0, target_noise_temperature=1.0)
def imle_topk_gum_batched(thetas: Tensor) -> Tensor:
return t.stack([topk.map(thetas[i]) for i in range(thetas.shape[0])])
def imle_topk_gum(theta: Tensor) -> Tensor:
return imle_topk_sog_batched(theta.view(1, -1)).view(-1)
imle_sog_lcs = exp(imle_topk_sog, lr)
imle_gum_lcs = exp(imle_topk_gum, lr)
res_sog_mean[i, j] = np.mean(np.array(imle_sog_lcs) - min_value_of_exp, axis=0)[-1]
res_sog_std[i, j] = np.std(np.array(imle_sog_lcs), axis=0)[-1]
if best_sog is None or res_sog_mean[i, j] < best_sog:
best_sog = res_sog_mean[i, j]
sog_hyp = {"lr": lr, "lmd": lmd, "loss": best_sog}
res_gum_mean[i, j] = np.mean(np.array(imle_gum_lcs) - min_value_of_exp, axis=0)[-1]
res_gum_std[i, j] = np.std(np.array(imle_gum_lcs), axis=0)[-1]
if best_gum is None or res_gum_mean[i, j] < best_gum:
best_gum = res_gum_mean[i, j]
gum_hyp = {"lr": lr, "lmd": lmd, "loss": best_gum}
print(f'XXX lr: {lr} lmd: {lmd} res_sog {res_sog_mean[i, j]:.5f} res_gum {res_gum_mean[i, j]:.5f}')
return sog_hyp, gum_hyp
def sensibility_aimle_v2(n, k, is_sym, n_rep=20):
rng = np.random.RandomState(0)
theta = rng.randn(n)
topk = distributions.TopK(n, k)
b_t = t.abs(t.from_numpy(rng.randn(n)).float())
# print(b_t)
sorted_bt = np.sort(b_t.detach().numpy())
min_value_of_exp = np.sum((sorted_bt[:k])**2) + np.sum((sorted_bt[k:] - 1)**2)
# print(min_value_of_exp)
def objective(z):
return ((z - b_t)**2).sum()
full_obj = lambda _th: utils.expect_obj(topk, _th, objective)
def pp(_his):
if _his[-1] - min_value_of_exp < 0.: # then it's all lost
_his[-1] = 5.
# print('pp')
return _his
exp = lambda strategy, lr, n_rp=n_rep, steps=100: experiment(
lambda _th: ((strategy(_th) - b_t)**2).sum(),
full_obj,
lr, theta, steps=steps, n_rp=n_rp, do_plot=False,
postprocess=pp
)
n_lr, n_lbd = 5, 6
search_grid_lr = np.linspace(0.5, 1., num=n_lr)
search_grid_lambda = np.linspace(0.5, 3., num=n_lbd)
res_sog_mean, res_sog_std = np.zeros((n_lr, n_lbd)), np.zeros((n_lr, n_lbd))
res_gum_mean, res_gum_std = np.zeros((n_lr, n_lbd)), np.zeros((n_lr, n_lbd))
best_sog, sog_hyp = None, None
best_gum, gum_hyp = None, None
for i, lr in enumerate(search_grid_lr):
for j, lmd in enumerate(search_grid_lambda):
# print(i, j)
# pam_sog = topk.perturb_and_map(utils.sum_of_gamma_noise(k, rng=np.random.RandomState(0)))
# pam_gum = topk.perturb_and_map(utils.gumbel_noise(rng=np.random.RandomState(0)))
# imle_sog_lcs = exp(imle.imle_pid(lmd, pam_sog), lr)
# imle_gum_lcs = exp(imle.imle_pid(lmd, pam_gum), lr)
target_distribution = TargetDistribution(alpha=1.0, beta=lmd, do_gradient_scaling=True)
sog_distribution = SumOfGammaNoiseDistribution(k=k, nb_iterations=10)
gum_distribution = GumbelNoiseDistribution()
@my_aimle(target_distribution=target_distribution, noise_distribution=sog_distribution, nb_samples=1,
theta_noise_temperature=1.0, target_noise_temperature=1.0, symmetric_perturbation=is_sym)
def imle_topk_sog_batched(thetas: Tensor) -> Tensor:
return t.stack([topk.map(thetas[i]) for i in range(thetas.shape[0])])
def imle_topk_sog(theta: Tensor) -> Tensor:
return imle_topk_sog_batched(theta.view(1, -1)).view(-1)
@my_aimle(target_distribution=target_distribution, noise_distribution=gum_distribution, nb_samples=1,
theta_noise_temperature=1.0, target_noise_temperature=1.0, symmetric_perturbation=is_sym)
def imle_topk_gum_batched(thetas: Tensor) -> Tensor:
return t.stack([topk.map(thetas[i]) for i in range(thetas.shape[0])])
def imle_topk_gum(theta: Tensor) -> Tensor:
return imle_topk_sog_batched(theta.view(1, -1)).view(-1)
imle_sog_lcs = exp(imle_topk_sog, lr)
imle_gum_lcs = exp(imle_topk_gum, lr)
res_sog_mean[i, j] = np.mean(np.array(imle_sog_lcs) - min_value_of_exp, axis=0)[-1]
res_sog_std[i, j] = np.std(np.array(imle_sog_lcs), axis=0)[-1]
if best_sog is None or res_sog_mean[i, j] < best_sog:
best_sog = res_sog_mean[i, j]
sog_hyp = {"lr": lr, "lmd": lmd, "loss": best_sog}
res_gum_mean[i, j] = np.mean(np.array(imle_gum_lcs) - min_value_of_exp, axis=0)[-1]
res_gum_std[i, j] = np.std(np.array(imle_gum_lcs), axis=0)[-1]
if best_gum is None or res_gum_mean[i, j] < best_gum:
best_gum = res_gum_mean[i, j]
gum_hyp = {"lr": lr, "lmd": lmd, "loss": best_gum}
print(f'XXX lr: {lr} lmd: {lmd} res_sog {res_sog_mean[i, j]:.5f} res_gum {res_gum_mean[i, j]:.5f}')
return sog_hyp, gum_hyp
def sensibility_aimle_adapt_v2(n, k, n_rep=20):
rng = np.random.RandomState(0)
theta = rng.randn(n)
topk = distributions.TopK(n, k)
b_t = t.abs(t.from_numpy(rng.randn(n)).float())
# print(b_t)
sorted_bt = np.sort(b_t.detach().numpy())
min_value_of_exp = np.sum((sorted_bt[:k])**2) + np.sum((sorted_bt[k:] - 1)**2)
# print(min_value_of_exp)
def objective(z):
return ((z - b_t)**2).sum()
full_obj = lambda _th: utils.expect_obj(topk, _th, objective)
def pp(_his):
if _his[-1] - min_value_of_exp < 0.: # then it's all lost
_his[-1] = 5.
# print('pp')
return _his
exp = lambda strategy, lr, n_rp=n_rep, steps=100: experiment(
lambda _th: ((strategy(_th) - b_t)**2).sum(),
full_obj,
lr, theta, steps=steps, n_rp=n_rp, do_plot=False,
postprocess=pp
)
n_lr, n_lbd = 5, 1
search_grid_lr = np.linspace(0.5, 1., num=n_lr)
search_grid_lambda = np.array([0.0]) # np.linspace(0.5, 3., num=n_lbd)
res_sog_mean, res_sog_std = np.zeros((n_lr, n_lbd)), np.zeros((n_lr, n_lbd))
res_gum_mean, res_gum_std = np.zeros((n_lr, n_lbd)), np.zeros((n_lr, n_lbd))
best_sog, sog_hyp = None, None
best_gum, gum_hyp = None, None
for i, lr in enumerate(search_grid_lr):
for j, lmd in enumerate(search_grid_lambda):
# print(i, j)
# pam_sog = topk.perturb_and_map(utils.sum_of_gamma_noise(k, rng=np.random.RandomState(0)))
# pam_gum = topk.perturb_and_map(utils.gumbel_noise(rng=np.random.RandomState(0)))
# imle_sog_lcs = exp(imle.imle_pid(lmd, pam_sog), lr)
# imle_gum_lcs = exp(imle.imle_pid(lmd, pam_gum), lr)
target_distribution = AdaptiveTargetDistribution(initial_beta=lmd)
sog_distribution = SumOfGammaNoiseDistribution(k=k, nb_iterations=10)
gum_distribution = GumbelNoiseDistribution()
@my_aimle(target_distribution=target_distribution, noise_distribution=sog_distribution, nb_samples=1,
theta_noise_temperature=1.0, target_noise_temperature=1.0, symmetric_perturbation=False)
def imle_topk_sog_batched(thetas: Tensor) -> Tensor:
return t.stack([topk.map(thetas[i]) for i in range(thetas.shape[0])])
def imle_topk_sog(theta: Tensor) -> Tensor:
return imle_topk_sog_batched(theta.view(1, -1)).view(-1)
@my_aimle(target_distribution=target_distribution, noise_distribution=gum_distribution, nb_samples=1,
theta_noise_temperature=1.0, target_noise_temperature=1.0, symmetric_perturbation=False)
def imle_topk_gum_batched(thetas: Tensor) -> Tensor:
return t.stack([topk.map(thetas[i]) for i in range(thetas.shape[0])])
def imle_topk_gum(theta: Tensor) -> Tensor:
return imle_topk_gum_batched(theta.view(1, -1)).view(-1)
imle_sog_lcs = exp(imle_topk_sog, lr)
imle_gum_lcs = exp(imle_topk_gum, lr)
res_sog_mean[i, j] = np.mean(np.array(imle_sog_lcs) - min_value_of_exp, axis=0)[-1]
res_sog_std[i, j] = np.std(np.array(imle_sog_lcs), axis=0)[-1]
if best_sog is None or res_sog_mean[i, j] < best_sog:
best_sog = res_sog_mean[i, j]
sog_hyp = {"lr": lr, "lmd": lmd, "loss": best_sog}
res_gum_mean[i, j] = np.mean(np.array(imle_gum_lcs) - min_value_of_exp, axis=0)[-1]
res_gum_std[i, j] = np.std(np.array(imle_gum_lcs), axis=0)[-1]
if best_gum is None or res_gum_mean[i, j] < best_gum:
best_gum = res_gum_mean[i, j]
gum_hyp = {"lr": lr, "lmd": lmd, "loss": best_gum}
print(f'XXX lr: {lr} lmd: {lmd} res_sog {res_sog_mean[i, j]:.5f} res_gum {res_gum_mean[i, j]:.5f}')
return sog_hyp, gum_hyp
def sensibility_aimle_adapt_v2(n, k, is_sym, n_rep=20):
rng = np.random.RandomState(0)
theta = rng.randn(n)
topk = distributions.TopK(n, k)
b_t = t.abs(t.from_numpy(rng.randn(n)).float())
sorted_bt = np.sort(b_t.detach().numpy())
min_value_of_exp = np.sum((sorted_bt[:k])**2) + np.sum((sorted_bt[k:] - 1)**2)
def objective(z):
return ((z - b_t)**2).sum()
full_obj = lambda _th: utils.expect_obj(topk, _th, objective)
def pp(_his):
if _his[-1] - min_value_of_exp < 0.: # then it's all lost
_his[-1] = 5.
# print('pp')
return _his
exp = lambda strategy, lr, n_rp=n_rep, steps=100: experiment(
lambda _th: ((strategy(_th) - b_t)**2).sum(),
full_obj,
lr, theta, steps=steps, n_rp=n_rp, do_plot=False,
postprocess=pp
)
n_lr, n_lbd = 5, 1
search_grid_lr = np.linspace(0.5, 1., num=n_lr)
search_grid_lambda = np.array([0.0]) # np.linspace(0.5, 3., num=n_lbd)
res_sog_mean, res_sog_std = np.zeros((n_lr, n_lbd)), np.zeros((n_lr, n_lbd))
res_gum_mean, res_gum_std = np.zeros((n_lr, n_lbd)), np.zeros((n_lr, n_lbd))
best_sog, sog_hyp = None, None
best_gum, gum_hyp = None, None
for i, lr in enumerate(search_grid_lr):
for j, lmd in enumerate(search_grid_lambda):
# print(i, j)
# pam_sog = topk.perturb_and_map(utils.sum_of_gamma_noise(k, rng=np.random.RandomState(0)))
# pam_gum = topk.perturb_and_map(utils.gumbel_noise(rng=np.random.RandomState(0)))
# imle_sog_lcs = exp(imle.imle_pid(lmd, pam_sog), lr)
# imle_gum_lcs = exp(imle.imle_pid(lmd, pam_gum), lr)
target_distribution = AdaptiveTargetDistribution(initial_beta=lmd)
sog_distribution = SumOfGammaNoiseDistribution(k=k, nb_iterations=10)
gum_distribution = GumbelNoiseDistribution()
@my_aimle(target_distribution=target_distribution, noise_distribution=sog_distribution, nb_samples=1,
theta_noise_temperature=1.0, target_noise_temperature=1.0, symmetric_perturbation=is_sym)
def imle_topk_sog_batched(thetas: Tensor) -> Tensor:
return t.stack([topk.map(thetas[i]) for i in range(thetas.shape[0])])
def imle_topk_sog(theta: Tensor) -> Tensor:
return imle_topk_sog_batched(theta.view(1, -1)).view(-1)
@my_aimle(target_distribution=target_distribution, noise_distribution=gum_distribution, nb_samples=1,
theta_noise_temperature=1.0, target_noise_temperature=1.0, symmetric_perturbation=is_sym)
def imle_topk_gum_batched(thetas: Tensor) -> Tensor:
return t.stack([topk.map(thetas[i]) for i in range(thetas.shape[0])])
def imle_topk_gum(theta: Tensor) -> Tensor:
return imle_topk_sog_batched(theta.view(1, -1)).view(-1)
imle_sog_lcs = exp(imle_topk_sog, lr)
imle_gum_lcs = exp(imle_topk_gum, lr)
res_sog_mean[i, j] = np.mean(np.array(imle_sog_lcs) - min_value_of_exp, axis=0)[-1]
res_sog_std[i, j] = np.std(np.array(imle_sog_lcs), axis=0)[-1]
if best_sog is None or res_sog_mean[i, j] < best_sog:
best_sog = res_sog_mean[i, j]
sog_hyp = {"lr": lr, "lmd": lmd, "loss": best_sog}
res_gum_mean[i, j] = np.mean(np.array(imle_gum_lcs) - min_value_of_exp, axis=0)[-1]
res_gum_std[i, j] = np.std(np.array(imle_gum_lcs), axis=0)[-1]
if best_gum is None or res_gum_mean[i, j] < best_gum:
best_gum = res_gum_mean[i, j]
gum_hyp = {"lr": lr, "lmd": lmd, "loss": best_gum}
print(f'XXX lr: {lr} lmd: {lmd} res_sog {res_sog_mean[i, j]:.5f} res_gum {res_gum_mean[i, j]:.5f}')
return sog_hyp, gum_hyp
def sensibility_ste_v2(n, k, n_rep=20):
rng = np.random.RandomState(0)
theta = rng.randn(n)
topk = distributions.TopK(n, k)
b_t = t.abs(t.from_numpy(rng.randn(n)).float())
# print(b_t)
sorted_bt = np.sort(b_t.detach().numpy())
min_value_of_exp = np.sum((sorted_bt[:k])**2) + np.sum((sorted_bt[k:] - 1)**2)
# print(min_value_of_exp)
def objective(z):
return ((z - b_t)**2).sum()
full_obj = lambda _th: utils.expect_obj(topk, _th, objective)
def pp(_his):
if _his[-1] - min_value_of_exp < 0.: # then it's all lost
_his[-1] = 5.
# print('pp')
return _his
exp = lambda strategy, lr, n_rp=n_rep, steps=100: experiment(
lambda _th: ((strategy(_th) - b_t)**2).sum(),
full_obj,
lr, theta, steps=steps, n_rp=n_rp, do_plot=False,
postprocess=pp
)
n_lr, n_lbd = 10, 1
search_grid_lr = np.exp(np.linspace(np.log(0.001), np.log(.2), num=n_lr))
search_grid_lambda = np.linspace(0.5, 3., num=n_lbd)
res_ste_mean, res_ste_std = np.zeros((n_lr, n_lbd)), np.zeros((n_lr, n_lbd))
res_ste_g_mean, res_ste_g_std = np.zeros((n_lr, n_lbd)), np.zeros((n_lr, n_lbd))
# res_gum_mean, res_gum_std = np.zeros((n_lr, n_lbd)), np.zeros((n_lr, n_lbd))
best_sog, sog_hyp = None, None
best_gum, gum_hyp = None, None
for i, lr in enumerate(search_grid_lr):
for j, lmd in enumerate(search_grid_lambda):
# print(i, j)
#pam_sog = topk.perturb_and_map(utils.sum_of_gamma_noise(k, rng=np.random.RandomState(0)))
#pam_gum = topk.perturb_and_map(utils.gumbel_noise(rng=np.random.RandomState(0)))
#ste_pam_lcs = exp(ste.ste(pam_sog), lr)
#ste_pam_g_lcs = exp(ste.ste(pam_gum), lr)
sog_distribution = SumOfGammaNoiseDistribution(k=k, nb_iterations=10)
gum_distribution = GumbelNoiseDistribution()
@my_ste(noise_distribution=sog_distribution, noise_temperature=1.0)
def ste_topk_sog_batched(thetas: Tensor) -> Tensor:
return t.stack([topk.map(thetas[i]) for i in range(thetas.shape[0])])
def ste_topk_sog(theta: Tensor) -> Tensor:
return ste_topk_sog_batched(theta.view(1, -1)).view(-1)
@my_ste(noise_distribution=gum_distribution, noise_temperature=1.0)
def ste_topk_gum_batched(thetas: Tensor) -> Tensor:
return t.stack([topk.map(thetas[i]) for i in range(thetas.shape[0])])
def ste_topk_gum(theta: Tensor) -> Tensor:
return ste_topk_sog_batched(theta.view(1, -1)).view(-1)
ste_pam_lcs = exp(ste_topk_sog, lr)
ste_pam_g_lcs = exp(ste_topk_gum, lr)
res_ste_mean[i, j] = np.mean(np.array(ste_pam_lcs) - min_value_of_exp, axis=0)[-1]
# res_sog_std[i, j] = np.std(np.array(imle_sog_lcs), axis=0)[-1]
if best_sog is None or res_ste_mean[i, j] < best_sog:
best_sog = res_ste_mean[i, j]
sog_hyp = {"lr": lr, "lmd": lmd, "loss": best_sog}
res_ste_g_mean[i, j] = np.mean(np.array(ste_pam_g_lcs) - min_value_of_exp, axis=0)[-1]
# res_gum_std[i, j] = np.std(np.array(imle_gum_lcs), axis=0)[-1]
if best_gum is None or res_ste_g_mean[i, j] < best_gum:
best_gum = res_ste_g_mean[i, j]
gum_hyp = {"lr": lr, "lmd": lmd, "loss": best_gum}
print(f'XXX lr {lr:.5f} lmd {lmd} ste_mean {res_ste_mean[i, j]:.5f} ste_g_mean {res_ste_g_mean[i, j]:.5f}')
return sog_hyp, gum_hyp
def sensibility_sfe(n, k, n_rep=20):
rng = np.random.RandomState(0)
theta = rng.randn(n)
topk = distributions.TopK(n, k)
b_t = t.abs(t.from_numpy(rng.randn(n)).float())
print(b_t)
sorted_bt = np.sort(b_t.detach().numpy())
min_value_of_exp = np.sum((sorted_bt[:k])**2) + np.sum((sorted_bt[k:] - 1)**2)
print(min_value_of_exp)
def objective(z):
return ((z - b_t)**2).sum()
full_obj = lambda _th: utils.expect_obj(topk, _th, objective)
def pp(_his):
if _his[-1] - min_value_of_exp < 0.: # then it's all lost
_his[-1] = 5.
# print('pp')
return _his
exp = lambda strategy, lr, n_rp=n_rep, steps=100: experiment(
lambda _th: ((strategy(_th) - b_t)**2).sum(),
full_obj,
lr, theta, steps=steps, n_rp=n_rp, do_plot=False,
postprocess=pp
)
n_lr, n_lbd = 10, 1
search_grid_lr = np.exp(np.linspace(np.log(0.0001), np.log(.1), num=n_lr))
search_grid_lambda = np.linspace(0.5, 3., num=n_lbd)
res_sfe_mean, res_sfe_std = np.zeros((n_lr, n_lbd)), np.zeros((n_lr, n_lbd))
best, hyp = None, None
for i, lr in enumerate(search_grid_lr):
for j, lmd in enumerate(search_grid_lambda):
print(i, j)
sfe_full = sfe.sfe(topk.sample_f(np.random.RandomState(0)),
objective, topk.grad_log_p(topk.marginals))
sfe_full_lcs = exp(sfe_full, lr, n_rp=n_rep, steps=1000)
res_sfe_mean[i, j] = np.mean(np.array(sfe_full_lcs) - min_value_of_exp, axis=0)[-1]
res_sfe_std[i, j] = np.std(np.array(sfe_full_lcs), axis=0)[-1]
if best is None or res_sfe_mean[i, j] < best:
best = res_sfe_mean[i, j]
hyp = {"lr": lr, "lmd": lmd, "loss": best}
return hyp
if __name__ == '__main__':
def start_process():
pass
def sensibility(model):
if model in {'imle'}:
res = sensibility_imle_v2(10, 5, n_rep=100)
print('Best IMLE hyp:', res)
elif model in {'aimle_sym'}:
res = sensibility_aimle_v2(10, 5, is_sym=True, n_rep=100)
print('Best AIMLE sym hyp:', res)
elif model in {'aimle_adapt'}:
res = sensibility_aimle_adapt_v2(10, 5, is_sym=False, n_rep=100)
print('Best AIMLE adapt hyp:', res)
elif model in {'aimle_sym_adapt'}:
res = sensibility_aimle_adapt_v2(10, 5, is_sym=True, n_rep=100)
print('Best AIMLE sym adapt hyp:', res)
elif model in {'ste'}:
res = sensibility_ste_v2(10, 5, n_rep=100)
print('Best STE hyp:', res)
elif model in {'sfe'}:
res = sensibility_sfe(10, 5, n_rep=100)
print('Best SFE hyp:', res)
else:
assert False, f'{model} not supported'
return res
# imle_sog_hyp, imle_gum_hyp = sensibility_imle_v2(10, 5, n_rep=100)
# print('Best IMLE hyp:', imle_sog_hyp, imle_gum_hyp)
# aimle_sym_sog_hyp, aimle_sym_gum_hyp = sensibility_aimle_sym_v2(10, 5, n_rep=100)
# print('Best AIMLE sym hyp:', aimle_sym_sog_hyp, aimle_sym_gum_hyp)
# ste_sog_hyp, ste_gum_hyp = sensibility_ste_v2(10, 5, n_rep=100)
# print('Best STE hyp:', ste_sog_hyp, ste_gum_hyp)
# sfe_hyp = sensibility_sfe(10, 5, n_rep=100)
# print('Best SFE hyp:', sfe_hyp)
import os
import json
from multiprocessing.pool import ThreadPool
cache_path = 'synth_cache.json'
cache = dict()
if os.path.exists(cache_path):
with open(cache_path, 'r') as f:
cache = json.load(f)
pool = ThreadPool(processes=32, initializer=start_process)
all_keys = ['imle', 'aimle_sym', 'ste', 'sfe', 'aimle_adapt', 'aimle_sym_adapt']
missing_keys = sorted({k for k in all_keys} - {k for k in cache})
hyp_lst = pool.map(sensibility, missing_keys)
for key, hyp in zip(missing_keys, hyp_lst):
cache[key] = hyp
with open(cache_path, 'w') as f:
json.dump(cache, f)
imle_sog_hyp, imle_gum_hyp = cache['imle']
aimle_sym_sog_hyp, aimle_sym_gum_hyp = cache['aimle_sym']
ste_sog_hyp, ste_gum_hyp = cache['ste']
sfe_hyp = cache['sfe']
aimle_adapt_sog_hyp, aimle_adapt_gum_hyp = cache['aimle_adapt']
aimle_sym_adapt_sog_hyp, aimle_sym_adapt_gum_hyp = cache['aimle_sym_adapt']
# toy_exp(10, 5, n_rep=100, an='_final')
toy_exp_v2(10, 5, n_rep=100,
imle_sog_hyp=imle_sog_hyp, imle_gum_hyp=imle_gum_hyp,
aimle_sym_sog_hyp=aimle_sym_sog_hyp, aimle_sym_gum_hyp=aimle_sym_gum_hyp,
aimle_adapt_sog_hyp=aimle_adapt_sog_hyp, aimle_adapt_gum_hyp=aimle_adapt_gum_hyp,
aimle_sym_adapt_sog_hyp=aimle_adapt_sog_hyp, aimle_sym_adapt_gum_hyp=aimle_adapt_gum_hyp,
ste_sog_hyp=ste_sog_hyp, ste_gum_hyp=ste_gum_hyp,
sfe_hyp=sfe_hyp)
| 37,646 | 38.75396 | 119 | py |
torch-adaptive-imle | torch-adaptive-imle-main/cli/gradient-samples-cli.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import sys
import torch
import numpy as np
from torch import Tensor, nn
from imle.ste import ste as ste
from imle.imle import imle as imle
from imle.aimle import aimle as aimle
from imle.target import BaseTargetDistribution, TargetDistribution, AdaptiveTargetDistribution
from imle.noise import BaseNoiseDistribution, SumOfGammaNoiseDistribution, GumbelNoiseDistribution
from aaai23.synth import distributions, utils, sfe2 as sfe
import argparse
from tqdm import tqdm
import seaborn as sns
import pandas as pd
import matplotlib.pyplot as plt
from multiprocessing.pool import Pool
from multiprocessing import freeze_support, Manager
from typing import Optional
import logging
logger = logging.getLogger(os.path.basename(sys.argv[0]))
def objective(z: Tensor, b_t: Tensor) -> Tensor:
if len(z.shape) > len(b_t.shape):
z = z.view(-1)
# print('Z', z.shape, 'b_t', b_t.shape)
if z.shape[0] > b_t.shape[0]:
nb_samples = z.shape[0] // b_t.shape[0]
# broadcast b_t
z_2d = z.view(nb_samples, b_t.shape[0])
b_t_2d = b_t.view(1, -1).repeat(nb_samples, 1)
res_2d = ((z_2d - b_t_2d) ** 2)
res_1d = res_2d.sum(1)
### res = res_1d.mean()
res = res_1d.sum()
else:
res = ((z - b_t) ** 2).sum()
return res
def true_gradient_fun(topk: distributions.TopK,
theta_t: Tensor,
b_t: Tensor) -> Tensor:
objective_bt = lambda z_: objective(z_, b_t)
# Expected value of the loss
exact_obective = lambda _theta: utils.expect_obj(topk, _theta, objective_bt)
theta_t_param = nn.Parameter(theta_t, requires_grad=True)
loss = exact_obective(theta_t_param)
loss.backward()
return theta_t_param.grad
def imle_gradient_fun(topk: distributions.TopK,
theta_t: Tensor,
b_t: Tensor,
target_distribution: BaseTargetDistribution,
noise_distribution: BaseNoiseDistribution,
noise_temperature: float,
nb_samples: int,
nb_marginal_samples: int) -> Tensor:
@imle(theta_noise_temperature=noise_temperature, target_noise_temperature=noise_temperature,
target_distribution=target_distribution, noise_distribution=noise_distribution,
nb_samples=nb_samples, nb_marginal_samples=nb_marginal_samples)
def imle_topk_batched(thetas: Tensor) -> Tensor:
# return torch.stack([topk.map(thetas[i]) for i in range(thetas.shape[0])])
return topk.map_2d(thetas)
def imle_topk(theta: Tensor) -> Tensor:
return imle_topk_batched(theta.view(1, -1)).view(-1)
theta_t_param = nn.Parameter(theta_t, requires_grad=True)
z = imle_topk(theta_t_param)
loss = objective(z, b_t)
loss.backward()
return theta_t_param.grad
def aimle_gradient_fun(topk: distributions.TopK,
theta_t: Tensor,
b_t: Tensor,
target_distribution: BaseTargetDistribution,
noise_distribution: BaseNoiseDistribution,
noise_temperature: float,
nb_samples: int,
nb_marginal_samples: int,
is_symmetric: bool,
warmup_steps: int = 0) -> Tensor:
@aimle(theta_noise_temperature=noise_temperature, target_noise_temperature=noise_temperature,
target_distribution=target_distribution, noise_distribution=noise_distribution,
nb_samples=nb_samples, nb_marginal_samples=nb_marginal_samples,
symmetric_perturbation=is_symmetric)
def imle_topk_batched(thetas: Tensor) -> Tensor:
# Thetas is [B, N]
# print('Thetas', thetas.shape)
# return torch.stack([topk.map(thetas[i]) for i in range(thetas.shape[0])])
return topk.map_2d(thetas)
def imle_topk(theta: Tensor) -> Tensor:
return imle_topk_batched(theta.view(1, -1)).view(-1)
theta_t_param = nn.Parameter(theta_t, requires_grad=True)
for _ in range(warmup_steps):
z = imle_topk(theta_t_param)
loss = objective(z, b_t)
loss.backward()
theta_t_param.grad = None
z = imle_topk(theta_t_param)
loss = objective(z, b_t)
loss.backward()
return theta_t_param.grad
def ste_gradient_fun(topk: distributions.TopK,
theta_t: Tensor,
b_t: Tensor,
noise_distribution: BaseNoiseDistribution,
noise_temperature: float,
nb_samples: int) -> Tensor:
@ste(noise_temperature=noise_temperature, noise_distribution=noise_distribution, nb_samples=nb_samples)
def ste_topk_batched(thetas: Tensor) -> Tensor:
# return torch.stack([topk.map(thetas[i]) for i in range(thetas.shape[0])])
return topk.map_2d(thetas)
def ste_topk(theta: Tensor) -> Tensor:
return ste_topk_batched(theta.view(1, -1)).view(-1)
theta_t_param = nn.Parameter(theta_t, requires_grad=True)
z = ste_topk(theta_t_param)
loss = objective(z, b_t)
loss.backward()
return theta_t_param.grad
def sfe_gradient_fun(topk: distributions.TopK,
theta_t: Tensor,
b_t: Tensor,
nb_samples: Optional[int]) -> Tensor:
rs = np.random.RandomState(0)
objective_bt = lambda z_: objective(z_, b_t)
sfe_full = sfe.sfe(topk.sample_f(rs), objective_bt, topk.grad_log_p(topk.marginals), nb_samples)
theta_t_param = nn.Parameter(theta_t, requires_grad=True)
z = sfe_full(theta_t_param)
loss = objective(z, b_t)
loss.backward()
return theta_t_param.grad
# def main(argv):
argv = sys.argv[1:]
# freeze_support()
parser = argparse.ArgumentParser('Gradient Estimation', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-k', action='store', type=int, default=2)
parser.add_argument('-n', action='store', type=int, default=5)
parser.add_argument('-o', action='store', type=str, default='gradient-samples.pdf')
parser.add_argument('--min-samples', action='store', type=int, default=1)
parser.add_argument('--max-samples', action='store', type=int, default=100)
parser.add_argument('--samples', type=int, nargs='+', default=[])
parser.add_argument('--imle-lambdas', type=float, nargs='+', default=[])
parser.add_argument('--nb-marginal-samples', type=int, nargs='+', default=[1])
parser.add_argument('--seeds', '-s', action='store', type=int, default=64)
parser.add_argument('--warmup-steps', action='store', type=int, default=100)
parser.add_argument('--processes', '-p', action='store', type=int, default=0)
args = parser.parse_args(argv)
device = torch.device('cpu')
if torch.cuda.is_available():
torch.set_default_tensor_type(torch.cuda.FloatTensor)
device = torch.device('cuda')
print('Device: ', device)
torch.set_num_threads(32)
n = args.n
# k = 5
k = args.k
topk = distributions.TopK(n, k, device=device)
print(f'Possible states: {topk.states.shape}')
# theta_lst = []
# lambdas = np.linspace(args.min_lambda, args.max_lambda, num=args.nb_lambdas)
samples = args.samples
if len(samples) < 1:
samples = list(range(args.min_samples, args.max_samples + 1))
pdist_batch = nn.CosineSimilarity(dim=1, eps=1e-6)
pdist = lambda x, y: pdist_batch(x.view(1, -1), y.view(1, -1))[0]
def run_experiment(i: int, lmd_mse_dict, mutex) -> int:
rng = np.random.RandomState(i)
theta = rng.randn(n)
b_t = torch.abs(torch.from_numpy(rng.randn(n)).float().to(device))
theta_t = torch.tensor(theta, dtype=torch.float, requires_grad=False, device=device)
# theta_lst += [theta_t]
true_gradient = true_gradient_fun(topk, theta_t, b_t)
# noise_distribution = SumOfGammaNoiseDistribution(k=k, nb_iterations=10)
noise_distribution = GumbelNoiseDistribution()
for s in samples:
print(f'Processing {s} ..')
for ms in args.nb_marginal_samples:
for lmd in args.imle_lambdas:
target_distribution = TargetDistribution(alpha=1.0, beta=lmd, do_gradient_scaling=True)
imle_gradient = imle_gradient_fun(topk, theta_t, b_t,
target_distribution=target_distribution,
noise_distribution=noise_distribution,
noise_temperature=1.0, nb_samples=s, nb_marginal_samples=ms)
sim = pdist(true_gradient, imle_gradient)
with mutex:
lmd_mse_dict['Method'] += [f'IMLE (Forward, $\\lambda = {lmd}$)']
lmd_mse_dict['Samples'] += [s]
lmd_mse_dict['Cosine Similarity'] += [sim.item()]
lmd_mse_dict['Seed'] += [i]
imle_sym_gradient = aimle_gradient_fun(topk, theta_t, b_t,
target_distribution=target_distribution,
noise_distribution=noise_distribution,
noise_temperature=1.0, nb_samples=s, nb_marginal_samples=ms,
is_symmetric=True)
sim = pdist(true_gradient, imle_sym_gradient)
with mutex:
lmd_mse_dict['Method'] += [f'IMLE (Central, $\\lambda = {lmd}$)']
lmd_mse_dict['Samples'] += [s]
lmd_mse_dict['Cosine Similarity'] += [sim.item()]
lmd_mse_dict['Seed'] += [i]
adaptive_target_distribution = AdaptiveTargetDistribution(initial_beta=0.0,
beta_update_momentum=0.0,
beta_update_step=1e-3)
aimle_gradient = aimle_gradient_fun(topk, theta_t, b_t,
target_distribution=adaptive_target_distribution,
noise_distribution=noise_distribution,
noise_temperature=1.0, nb_samples=s, nb_marginal_samples=ms,
is_symmetric=False, warmup_steps=args.warmup_steps)
sim = pdist(true_gradient, aimle_gradient)
with mutex:
lmd_mse_dict['Method'] += [f'AIMLE (Forward)']
lmd_mse_dict['Samples'] += [s]
lmd_mse_dict['Cosine Similarity'] += [sim.item()]
lmd_mse_dict['Seed'] += [i]
adaptive_target_distribution = AdaptiveTargetDistribution(initial_beta=0.0,
beta_update_momentum=0.0,
beta_update_step=1e-3)
aimle_gradient = aimle_gradient_fun(topk, theta_t, b_t,
target_distribution=adaptive_target_distribution,
noise_distribution=noise_distribution,
noise_temperature=1.0, nb_samples=s, nb_marginal_samples=ms,
is_symmetric=True, warmup_steps=args.warmup_steps)
sim = pdist(true_gradient, aimle_gradient)
with mutex:
lmd_mse_dict['Method'] += [f'AIMLE (Central)']
lmd_mse_dict['Samples'] += [s]
lmd_mse_dict['Cosine Similarity'] += [sim.item()]
lmd_mse_dict['Seed'] += [i]
if False:
adaptive_target_distribution = AdaptiveTargetDistribution(initial_beta=0.0,
beta_update_momentum=0.9,
beta_update_step=1e-3)
aimle_gradient = aimle_gradient_fun(topk, theta_t, b_t,
target_distribution=adaptive_target_distribution,
noise_distribution=noise_distribution,
noise_temperature=1.0, nb_samples=s, nb_marginal_samples=ms,
is_symmetric=False, warmup_steps=args.warmup_steps)
sim = pdist(true_gradient, aimle_gradient)
with mutex:
lmd_mse_dict['Method'] += [f'AIMLE Mom ($\\mu = {ms}$)']
lmd_mse_dict['Samples'] += [s]
lmd_mse_dict['Cosine Similarity'] += [sim.item()]
lmd_mse_dict['Seed'] += [i]
adaptive_target_distribution = AdaptiveTargetDistribution(initial_beta=0.0,
beta_update_momentum=0.9,
beta_update_step=1e-3)
aimle_gradient = aimle_gradient_fun(topk, theta_t, b_t,
target_distribution=adaptive_target_distribution,
noise_distribution=noise_distribution,
noise_temperature=1.0, nb_samples=s, nb_marginal_samples=ms,
is_symmetric=True, warmup_steps=args.warmup_steps)
sim = pdist(true_gradient, aimle_gradient)
with mutex:
lmd_mse_dict['Method'] += [f'AIMLE Mom Sym ($\\mu = {ms}$)']
lmd_mse_dict['Samples'] += [s]
lmd_mse_dict['Cosine Similarity'] += [sim.item()]
lmd_mse_dict['Seed'] += [i]
ste_gradient = ste_gradient_fun(topk, theta_t, b_t,
noise_distribution=noise_distribution,
noise_temperature=1.0, nb_samples=s)
sim = pdist(true_gradient, ste_gradient)
with mutex:
lmd_mse_dict['Method'] += [f'STE']
lmd_mse_dict['Samples'] += [s]
lmd_mse_dict['Cosine Similarity'] += [sim.item()]
lmd_mse_dict['Seed'] += [i]
sfe_gradient = sfe_gradient_fun(topk, theta_t, b_t, s)
sim = pdist(true_gradient, sfe_gradient)
with mutex:
lmd_mse_dict['Method'] += [f'SFE']
lmd_mse_dict['Samples'] += [s]
lmd_mse_dict['Cosine Similarity'] += [sim.item()]
lmd_mse_dict['Seed'] += [i]
return 0
def main():
manager = Manager()
md = manager.dict()
md['Method'] = []
md['Samples'] = []
md['Cosine Similarity'] = []
md['Seed'] = []
mutex = manager.Lock()
keys = [(i, md, mutex) for i in list(range(args.seeds))]
if args.processes > 0:
pool = Pool(processes=args.processes)
pool.starmap(run_experiment, keys)
else:
for entry in keys:
run_experiment(*entry)
lmd_mse_dict = dict()
for k, v in md.items():
lmd_mse_dict[k] = v
import math
samples_field = lmd_mse_dict['Samples']
lmd_mse_dict['Samples'] = [int(math.log10(s)) for s in samples_field]
df = pd.DataFrame.from_dict(lmd_mse_dict)
all_method_lst = sorted({m for m in lmd_mse_dict['Method']})
filter_method_lst = [m for m in all_method_lst if 'Sym' in m]
df = df[~df['Method'].isin(filter_method_lst)]
# sns.set(rc={'figure.figsize': (12, 8)})
from matplotlib import rcParams
# figure size in inches
rcParams['figure.figsize'] = 8, 4
rcParams['font.size'] = 13
g = sns.lineplot(x='Samples',
y="Cosine Similarity",
hue="Method",
data=df)
g.set(title='Number of samples $\\times$ Similarity to the true gradient')
ticks = [int(math.log10(s)) for s in samples]
g.set_xticks(ticks)
g.set_xticklabels([f'$10^{t}$' for t in ticks])
plt.xlim(ticks[0], ticks[-1])
plt.ylim(0.0, 1.0)
g.set_xlabel("Number of Samples")
g.set_ylabel("Cosine Similarity")
plt.grid()
# handles, labels = g.get_legend_handles_labels()
# g.legend(handles=handles[1:], labels=labels[1:])
g.legend_.set_title(None)
# plt.show()
plt.savefig(args.o, bbox_inches='tight')
print(lmd_mse_dict)
print('Done!')
if __name__ == '__main__':
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
freeze_support()
main()
| 16,797 | 38.995238 | 115 | py |
torch-adaptive-imle | torch-adaptive-imle-main/cli/warcraft-cli.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
import os
from logging import WARNING
import warnings
import numpy as np
import psutil
import ray
import re
import torch
import getpass
from aaai23.maprop.logger import Logger
from aaai23.maprop.utils import set_seed, save_metrics_params, update_params_from_cmdline, save_settings_to_json
import aaai23.maprop.warcraft_shortest_path.data_utils as warcraft_shortest_path_data
from aaai23.maprop.utils import load_json
from aaai23.maprop.warcraft_shortest_path.trainers import BaselineTrainer, DijkstraOnFull
from aaai23.maprop.warcraft_shortest_path.maprop import DijkstraMAP
# torch.autograd.set_detect_anomaly(True)
np.set_printoptions(threshold=sys.maxsize, linewidth=sys.maxsize, precision=3)
warnings.filterwarnings("ignore")
def get_trainer(trainer_name):
trainers = {
"Baseline": BaselineTrainer,
"DijkstraOnFull": DijkstraOnFull,
"DijkstraMAP": DijkstraMAP
}
return trainers[trainer_name]
dataset_loaders = {
"warcraft_shortest_path": warcraft_shortest_path_data.load_dataset
}
trainer_loaders = {
"warcraft_shortest_path": get_trainer
}
required_top_level_params = [
"model_dir",
"seed",
"loader_params",
"problem_type",
"trainer_name",
"trainer_params",
"num_epochs",
"evaluate_every",
"save_visualizations"
]
optional_top_level_params = ["num_cpus", "use_ray", "default_json", "id", "fast_mode", "fast_forward_training"]
def verify_top_level_params(**kwargs):
for kwarg in kwargs:
if kwarg not in required_top_level_params and kwarg not in optional_top_level_params:
raise ValueError("Unknown top_level argument: {}".format(kwarg))
for required in required_top_level_params:
if required not in kwargs.keys():
raise ValueError("Missing required argument: {}".format(required))
def custom_parser(args):
cmd_params = load_json(args[1])
cmd_params["trainer_params"]["use_cuda"] = torch.cuda.is_available()
if torch.cuda.is_available():
import subprocess
result = subprocess.run(['nvidia-smi'], stdout=subprocess.PIPE)
print(result.stdout.decode('utf-8'))
result = subprocess.run(['hostname'], stdout=subprocess.PIPE)
print(result.stdout.decode('utf-8'))
cmd_params["num_cpus"] = os.cpu_count()
for arg in args[2:]:
assert '=' in arg, 'each arg apart from json needs to be in the form a=b'
key, value = arg.split('=')
key_entries = key.split('.')
if value in {'true', 'True'}:
value = True
if value in {'false', 'False'}:
value = False
sub_param = cmd_params
for key_entry in key_entries[:-1]:
if key_entry not in sub_param:
sub_param[key_entry] = dict()
sub_param = sub_param[key_entry]
inferred_type = None
if key_entries[-1] in sub_param:
inferred_type = type(sub_param[key_entries[-1]])
if inferred_type is None:
print(f'No existing entry for {key}')
if isinstance(value, str):
if all(char.isdigit() for char in value):
value = int(value)
elif re.match(r'^-?\d+(?:\.\d+)?$', value) is not None:
value = float(value)
inferred_type = type(value)
print(f'{sub_param}[{key_entries[-1]}] = {inferred_type}({value})')
sub_param[key_entries[-1]] = inferred_type(value)
return cmd_params
def main():
params = update_params_from_cmdline(verbose=True, custom_parser=custom_parser)
os.makedirs(params.model_dir, exist_ok=True)
save_settings_to_json(params, params.model_dir)
num_cpus = params.get("num_cpus", psutil.cpu_count(logical=True))
use_ray = params.get("use_ray", False)
fast_forward_training = params.get("fast_forward_training", False)
if use_ray:
ray.init(
num_cpus=num_cpus,
logging_level=WARNING,
ignore_reinit_error=True,
log_to_driver=False,
_temp_dir=f'/tmp/{getpass.getuser()}-{os.getpid()}-ray',
**params.get("ray_params", {})
)
set_seed(params.seed)
Logger.configure(params.model_dir, "tensorboard")
dataset_loader = dataset_loaders[params.problem_type]
train_iterator, test_iterator, metadata = dataset_loader(**params.loader_params)
trainer_class = trainer_loaders[params.problem_type](params.trainer_name)
fast_mode = params.get("fast_mode", False)
# if params.trainer_name in {'Baseline'}:
# del params.trainer_params['mode']
trainer = trainer_class(
train_iterator=train_iterator,
test_iterator=test_iterator,
metadata=metadata,
fast_mode=fast_mode,
**params.trainer_params
)
train_results = {}
for i in range(params.num_epochs):
if i % params.evaluate_every == 0:
print('EVALUATING')
eval_results = trainer.evaluate()
print(eval_results)
train_results = trainer.train_epoch()
if train_results["train_accuracy"] > 0.999 and fast_forward_training:
print(f'Reached train accuracy of {train_results["train_accuracy"]}. Fast forwarding.')
break
print('EVALUATING')
eval_results = trainer.evaluate()
print(eval_results)
train_results = train_results or {}
save_metrics_params(params=params, metrics={**eval_results, **train_results})
if use_ray:
ray.shutdown()
print('DONE')
if __name__ == "__main__":
main()
| 5,638 | 28.52356 | 112 | py |
torch-adaptive-imle | torch-adaptive-imle-main/cli/nri-cli.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from __future__ import division
from __future__ import print_function
import sys
import json
import itertools
import math
import time
import argparse
import pickle
import os
from functools import partial
import numpy as np
import torch
import torch.nn.functional as F
from torch.autograd import Variable
import torch.optim as optim
from torch.optim import lr_scheduler
from nri.modules import MLPEncoder, MLPDecoder
from nri.utils import get_experiments_folder, get_experiment_name, map_estimator
from nri.utils import load_data, encode_onehot
from nri.utils import maybe_make_logits_symmetric
from nri.utils import nll_gaussian, kl_categorical_uniform, kl_gumbel
from nri.utils import sampling_edge_metrics
from nri.utils import sample_indep_edges
from nri.core.spanning_tree import sample_tree_from_logits
from nri.core.topk import sample_topk_from_logits
from imle.aimle import aimle
from imle.ste import ste
from imle.target import BaseTargetDistribution, TargetDistribution, AdaptiveTargetDistribution
from imle.noise import BaseNoiseDistribution, SumOfGammaNoiseDistribution, GumbelNoiseDistribution
from typing import Optional, Tuple, List
from torch import Tensor
import logging
logger = logging.getLogger(os.path.basename(sys.argv[0]))
torch.set_printoptions(precision=32)
torch.autograd.set_detect_anomaly(True)
def name_to_distribution(distribution_name: str, logits: Tensor) -> Optional[BaseNoiseDistribution]:
n = int(0.5 * (1 + np.sqrt(4 * logits.size(1) + 1)))
if distribution_name in {'none'}:
noise_distribution = None
elif distribution_name in {'sog'}:
noise_distribution = SumOfGammaNoiseDistribution(k=n, nb_iterations=10, device=logits.device)
elif distribution_name in {'gumbel'}:
noise_distribution = GumbelNoiseDistribution(device=logits.device)
else:
assert False, f'Noise model not supported: {distribution_name}'
return noise_distribution
def sample_edges_imle(logits: Tensor,
args,
is_eval: bool,
target_distribution: BaseTargetDistribution) -> Tuple[List[Tensor], List[Tensor]]:
edges, edge_weights = [], []
n = int(0.5 * (1 + np.sqrt(4 * logits.size(1) + 1)))
noise_distribution = name_to_distribution('sog', logits)
if args.method in {'aimle'}:
@aimle(target_distribution=target_distribution,
noise_distribution=noise_distribution,
nb_samples=1 if is_eval else args.imle_samples,
theta_noise_temperature=0.0 if is_eval else args.imle_noise_temperature,
target_noise_temperature=0.0 if is_eval else args.imle_noise_temperature,
symmetric_perturbation=args.aimle_symmetric)
def differentiable_map_estimator(logits_: Tensor) -> Tensor:
res = map_estimator(logits_, args.use_cpp_for_sampling)
return res
elif args.method in {'ste'}:
@ste(noise_distribution=noise_distribution, noise_temperature=args.imle_noise_temperature)
def differentiable_map_estimator(logits_: Tensor) -> Tensor:
res = map_estimator(logits_, args.use_cpp_for_sampling)
return res
else:
assert False, f'Unknown method: {args.method}'
ss_edges = differentiable_map_estimator(logits)
if args.edge_types == 2:
null_edges = 1.0 - ss_edges
# ss_edges is [B, 90, 2]
ss_edges = torch.cat((null_edges, ss_edges), dim=2)
# Make sampled edge weights into adj matrix format.
reshaped_logits = logits.view(-1, n, n - 1)
reshaped_logits = reshaped_logits.transpose(1, 2) # (bs, n-1, n)
vertices = torch.triu_indices(n - 1, n, offset=1)
edge_logits = reshaped_logits[:, vertices[0], vertices[1]]
edge_weights_ = edge_logits
edge_weights_reshaped = torch.zeros_like(reshaped_logits)
edge_weights_reshaped[:, vertices[0], vertices[1]] = edge_weights_
edge_weights_reshaped[:, vertices[1] - 1, vertices[0]] = edge_weights_
ss_edge_weights = edge_weights_reshaped.transpose(1, 2).contiguous().view(logits.shape)
edges.append(ss_edges)
edge_weights.append(ss_edge_weights)
return edges, edge_weights
def main(argv):
parser = argparse.ArgumentParser()
parser.add_argument("--cuda", type=eval, default=False, choices=[True, False],
help="Enables CUDA training.")
parser.add_argument("--seed", type=int, default=42, help="Random seed.")
parser.add_argument("--mode", type=str, default="train",
choices=["train", "eval"],
help="Whether to train or evaluate.")
parser.add_argument("--num_iterations", type=int, default=50000,
help="Number of iterations to train.")
parser.add_argument("--eval_every", type=int, default=500,
help="Number of training steps in-between evaluating.")
parser.add_argument("--batch_size", type=int, default=128,
help="Number of samples per batch.")
parser.add_argument("--eval_batch_size", type=int, default=100,
help="Number of samples per batch for eval on validation.")
parser.add_argument("--temp", type=float, default=5.0, help="Temperature.")
parser.add_argument("--lr", type=float, default=0.0003,
help="Initial learning rate.")
parser.add_argument("--lr_decay", type=int, default=200,
help="After how epochs to decay LR by a factor of gamma.")
parser.add_argument("--gamma", type=float, default=0.5,
help="LR decay factor.")
parser.add_argument("--enc_weight_decay", type=float, default=0.0,
help="Weight decay for AdamW.")
parser.add_argument("--dec_weight_decay", type=float, default=0.0,
help="Weight decay for AdamW.")
parser.add_argument("--encoder_hidden", type=int, default=256,
help="Number of hidden units.")
parser.add_argument("--decoder_hidden", type=int, default=256,
help="Number of hidden units.")
parser.add_argument("--num_vertices", type=int, default=10,
help="Number of vertices in the graph.")
parser.add_argument("--encoder_dropout", type=float, default=0.0,
help="Dropout rate (1 - keep probability).")
parser.add_argument("--decoder_dropout", type=float, default=0.0,
help="Dropout rate (1 - keep probability).")
parser.add_argument("--factor", type=eval, default=True,
choices=[True, False],
help="Enables factor graph model.")
parser.add_argument("--suffix", type=str, default="_novar_1skip_10t_1r_graph10",
help="Suffix for training data.")
parser.add_argument("--edge_types", type=int, default=2, choices=[1, 2],
help="The number of edge types to infer. Must be <= 2.")
parser.add_argument("--dims", type=int, default=2,
help="The number of input dimensions.")
parser.add_argument("--timesteps", type=int, default=10,
help="The number of time steps per sample.")
parser.add_argument("--prediction_steps", type=int, default=10, metavar="N",
help="Num steps to predict before re-using teacher forcing.")
parser.add_argument("--num_rounds", type=int, default=1,
help="Num message passing rounds in decoder per timestep.")
parser.add_argument("--skip_first", type=eval, default=False, choices=[True, False],
help="Skip first edge type in decoder, i.e. it represents no-edge.")
parser.add_argument("--var", type=float, default=5e-5, help="Output variance.")
parser.add_argument("--hard", type=eval, default=False, choices=[True, False],
help="Uses discrete samples in training forward pass.")
parser.add_argument("--st", type=eval, default=False, choices=[True, False],
help="Uses discrete samples in training forward pass.")
parser.add_argument("--sst", type=str, default="tree",
choices=["indep", "tree", "topk"],
help="Stochastic Softmax Tricks")
parser.add_argument("--relaxation", type=str, default="exp_family_entropy",
help="Relaxation for SST.")
parser.add_argument("--max_range", type=float, default=np.inf,
help="Max range of logits for spanning tree sst.")
parser.add_argument("--eps_for_finitediff", type=float, default=1e-2,
help="Epsilon for finite difference for topk.")
parser.add_argument("--use_gumbels_for_kl", type=eval, default=True,
choices=[True, False],
help="Whether to compute KL wrt U (gumbels) instead of X.")
parser.add_argument("--use_nvil", type=eval, default=False,
choices=[True, False], help="Whether to use NVIL.")
parser.add_argument("--use_reinforce", type=eval, default=False,
choices=[True, False], help="Whether to use REINFORCE.")
parser.add_argument("--num_samples", type=int, default=1,
help="Num. samples for gradient estimation.")
parser.add_argument("--reinforce_baseline", type=str, default="ema",
choices=["ema", "batch", "multi_sample"],
help="Choice of baseline for REINFORCE.")
parser.add_argument("--ema_for_loss", type=float, default=0.99,
help="EMA coefficient for NVIL or REINFORCE.")
parser.add_argument("--use_cpp_for_sampling", type=eval, default=False,
choices=[True, False],
help=("Whether to use C++ Kruskal's when sampling for "
"spanning tree sst."))
parser.add_argument("--use_cpp_for_edge_metric", type=eval, default=False,
choices=[True, False],
help=("Whether to use C++ Kruskal's when computing edge "
"metrics for spanning tree sst."))
parser.add_argument("--edge_metric_num_samples", type=int, default=1,
help="Num. samples when computing edge metrics.")
parser.add_argument("--log_edge_metric_train", type=eval, default=False,
choices=[True, False],
help="Whether to compute and log edge metrics on train.")
parser.add_argument("--log_edge_metric_val", type=eval, default=True,
choices=[True, False],
help="Whether to compute and log edge metrics on valid.")
parser.add_argument("--eval_edge_metric_bs", type=int, default=10000,
help="Batch size for computing edge metrics during eval.")
parser.add_argument("--symmeterize_logits", type=eval, default=True,
choices=[True, False],
help="Whether to make the encoder output edge symmetric.")
parser.add_argument("--experiments_folder", type=str, default=None,
help=("Name of folder for experiment group."
"Set this for evaluation (mode == 'eval'."))
parser.add_argument("--experiment_name", type=str, default=None,
help="Name of experiment.")
parser.add_argument("--save_best_model", type=eval, default=True,
choices=[True, False],
help="Whether to save the checkpoint for the best model.")
parser.add_argument("--add_timestamp", type=eval, default=True,
choices=[True, False],
help="Whether to add timestamp to experiments folder.")
parser.add_argument("--verbose", type=eval, default=False, choices=[True, False])
parser.add_argument("--max-steps", type=int, default=None)
parser.add_argument("--method", type=str, default="sst", choices=['sst', 'aimle', 'ste'])
parser.add_argument('--aimle-symmetric', action='store_true', default=False)
parser.add_argument('--aimle-target', type=str, choices=['standard', 'adaptive'], default='standard')
parser.add_argument('--imle-lambda', action='store', type=float, default=0.0)
parser.add_argument('--imle-lambda-update-step', action='store', type=float, default=1e-4)
parser.add_argument('--imle-noise', type=str, choices=['none', 'sog', 'gumbel'], default='sog')
parser.add_argument('--imle-noise-temperature', action='store', type=float, default=0.0)
parser.add_argument('--imle-samples', action='store', type=int, default=1)
parser.add_argument('--gradient-scaling', action='store_true', default=False)
args = parser.parse_args(argv)
print(args)
if args.aimle_target in {'standard'}:
target_distribution = TargetDistribution(alpha=1.0,
beta=args.imle_lambda,
do_gradient_scaling=args.gradient_scaling)
elif args.aimle_target in {'adaptive'}:
target_distribution = AdaptiveTargetDistribution(initial_alpha=1.0,
initial_beta=args.imle_lambda,
beta_update_step=args.imle_lambda_update_step)
else:
assert False, f'Do not know how to handle {args.aimle_target} as target distribution'
np.random.seed(args.seed)
torch.manual_seed(args.seed)
device = "cuda" if args.cuda else "cpu"
# Check arguments.
if args.sst != "indep":
assert args.use_gumbels_for_kl
if args.use_nvil or args.use_reinforce:
assert not (args.use_nvil and args.use_reinforce)
assert args.hard
if args.use_reinforce and args.reinforce_baseline == "multi_sample":
assert args.num_samples > 1
if args.mode == "eval":
assert args.experiments_folder is not None
if args.mode == "train":
# Experiments are organized such that there is a main experiments folder
# which contains experiments that share the same training configuration
# (same SST, relaxation, etc...) except for the hyperparameters.
# First, set up main experiments folder.
experiments_folder = (args.experiments_folder if args.experiments_folder else get_experiments_folder(args))
if not os.path.exists("experiments"):
os.makedirs("experiments")
if not os.path.exists(os.path.join("experiments", experiments_folder)):
os.makedirs(os.path.join("experiments", experiments_folder))
# Set up the folder for specific hyperparameter settings.
experiment_name = get_experiment_name(args)
experiment_folder = os.path.join("experiments", experiments_folder, experiment_name)
if not os.path.exists(experiment_folder):
os.makedirs(experiment_folder)
# Save args in experiment folder.
config = {k: v for (k, v) in vars(args).items()}
with open(os.path.join(experiment_folder, "train_config.json"), "w") as f:
json.dump(config, f, indent=2)
# Get ready to save model.
encoder_file = os.path.join(experiment_folder, "encoder.pt")
decoder_file = os.path.join(experiment_folder, "decoder.pt")
log_file = os.path.join(experiment_folder, "log.txt")
log = open(log_file, "w")
# Setup up training, validation, and test data.
train_loader, valid_loader, test_loader, num_train, num_valid, num_test = \
load_data(args.batch_size, args.eval_batch_size, args.suffix)
num_complete_batches, leftover = divmod(num_train, args.batch_size)
num_batches_per_epoch = num_complete_batches + bool(leftover)
# Make sure eval batch size divides validation set, since we assume this
# when computing eval metrics.
eval_edge_metric_bs = (args.eval_batch_size if not args.eval_edge_metric_bs
else args.eval_edge_metric_bs)
assert num_valid % args.eval_batch_size == 0
assert num_valid % eval_edge_metric_bs == 0
if args.mode == "eval":
assert num_test % args.eval_batch_size == 0
assert num_test % eval_edge_metric_bs == 0
# Generate off-diagonal interaction graph
off_diag = np.ones([args.num_vertices, args.num_vertices]) - np.eye(args.num_vertices)
rel_rec = np.array(encode_onehot(np.where(off_diag)[0]), dtype=np.float32)
rel_send = np.array(encode_onehot(np.where(off_diag)[1]), dtype=np.float32)
rel_rec = torch.FloatTensor(rel_rec)
rel_send = torch.FloatTensor(rel_send)
encoder = MLPEncoder(args.timesteps * args.dims, args.encoder_hidden,
(args.edge_types if args.sst == "indep" else 1),
args.encoder_dropout, args.factor,
args.use_nvil, num_edges=rel_rec.size(0), n=args.num_vertices,
num_timesteps=args.timesteps, num_dims=args.dims)
decoder = MLPDecoder(n_in_node=args.dims,
edge_types=args.edge_types,
msg_hid=args.decoder_hidden,
msg_out=args.decoder_hidden,
n_hid=args.decoder_hidden,
do_prob=args.decoder_dropout,
skip_first=args.skip_first,
num_rounds=args.num_rounds)
if args.enc_weight_decay > 0.0 or args.dec_weight_decay > 0.0:
print("Using AdamW.")
optimizer = optim.AdamW([
{"params": encoder.parameters(), "weight_decay": args.enc_weight_decay},
{"params": decoder.parameters(), "weight_decay": args.dec_weight_decay}],
lr=args.lr)
else:
print("Using Adam.")
optimizer = optim.Adam(
list(encoder.parameters()) + list(decoder.parameters()), lr=args.lr)
scheduler = lr_scheduler.StepLR(optimizer, step_size=args.lr_decay,
gamma=args.gamma)
# Setup sampling function and probability calculation function for tree prior.
if args.sst == "tree":
# Check that the output of the encode is made to be symmetric.
assert args.symmeterize_logits is not None
sample_edges = partial(sample_tree_from_logits, edge_types=args.edge_types,
relaxation=args.relaxation, max_range=args.max_range,
use_cpp=args.use_cpp_for_sampling)
elif args.sst == "topk":
# Check that the output of the encode is made to be symmetric.
assert args.symmeterize_logits is not None
sample_edges = partial(sample_topk_from_logits, k=(args.num_vertices - 1),
edge_types=args.edge_types, relaxation=args.relaxation,
eps=args.eps_for_finitediff)
elif args.sst == "indep":
# sample_edges = gumbel_softmax
sample_edges = partial(
sample_indep_edges, is_edgesymmetric=args.symmeterize_logits)
else:
raise ValueError(f"Stochastic Softmax Trick type {args.sst} is not valid!")
def compute_kl(logits):
if args.sst == "indep" and not args.use_gumbels_for_kl:
probs = F.softmax(logits, dim=-1)
return kl_categorical_uniform(probs, args.num_vertices, args.edge_types)
else:
return kl_gumbel(logits, args.num_vertices)
get_sampling_metrics = partial(
sampling_edge_metrics, sst=args.sst, n=args.num_vertices,
num_samples=args.edge_metric_num_samples,
is_edgesymmetric=args.symmeterize_logits, use_cpp=args.use_cpp_for_edge_metric)
if args.cuda:
encoder.to("cuda")
decoder.to("cuda")
rel_rec = rel_rec.to("cuda")
rel_send = rel_send.to("cuda")
rel_rec = Variable(rel_rec)
rel_send = Variable(rel_send)
def train():
itercount = itertools.count()
num_epochs = math.ceil(args.num_iterations / num_batches_per_epoch)
best_elbo = -np.inf
best_step, best_epoch = 0, 0
# Exponential moving average of negative log-likelihood for
# NVIL and REINFORCE.
loss_nll_ema = 0.0
measurements = {
# Measurements on training set.
"train_steps": [], "nll_train": [],
"acc_train": [], "precision_train": [], "recall_train": [],
"elbo_train": [], "tf_elbo_train": [], "kl_train": [],
"mse_train": [],
# Measurements on validation set.
"val_steps": [], "nll_val": [],
"acc_val": [], "precision_val": [], "recall_val": [],
"elbo_val": [], "kl_val": [], "mse_val": [],
}
encoder.train()
decoder.train()
start_time = time.time()
for epoch in range(num_epochs):
for batch_idx, (data, relations) in enumerate(train_loader):
if args.max_steps is not None and batch_idx > args.max_steps:
break
i = next(itercount)
if args.cuda:
data, relations = data.to("cuda"), relations.to("cuda")
data, relations = Variable(data), Variable(relations)
optimizer.zero_grad()
# data is [B, 10, 10, 2]
logits, nvil_baseline = encoder(data, rel_rec, rel_send)
# logits is [B, 90, 1]
logits = maybe_make_logits_symmetric(logits, args.symmeterize_logits)
if args.method in {'sst'}:
edges = []
edge_weights = []
for _ in range(args.num_samples):
# ss stands for single sample.
ss_edges, ss_edge_weights = sample_edges(logits, tau=args.temp, hard=args.hard,
hard_with_grad=args.st)
# ss_edges: [B, 90, 2]
# ss_edge_weights: [B, 90, 1]
edges.append(ss_edges)
edge_weights.append(ss_edge_weights)
elif args.method in {'aimle', 'ste'}:
edges, edge_weights = sample_edges_imle(logits, args, is_eval=False,
target_distribution=target_distribution)
# Edges and edge_weights are of shape
# (num_samples * bs, (n - 1) * n, edge_types).
edges = torch.cat(edges)
edge_weights = torch.cat(edge_weights)
if args.use_nvil or args.use_reinforce:
edges = edges.detach()
edge_weights = edge_weights.detach()
# Repeat data to account for multiple samples.
data = data.repeat(args.num_samples, *([1] * len(data.shape[1:])))
output = decoder(data, edges, rel_rec, rel_send, args.prediction_steps)
target = data[:, :, 1:, :]
loss_nll = nll_gaussian(output, target, args.var)
# this is needed by IMLE if we draw multiple samples
batch_size = args.num_samples * logits.shape[0]
batch_size_imle = loss_nll.shape[0]
nb_samples = batch_size_imle // batch_size
orig_logits_shp = logits.shape
if nb_samples > 1:
logits_imle = logits.reshape(batch_size, 1, -1).repeat(1, nb_samples, 1)
logits_imle = logits_imle.view([batch_size_imle] + list(logits.shape)[1:])
logits = logits_imle
# Reshape to take into account num_samples.
loss_nll = loss_nll.view(args.num_samples, logits.size(0))
# Unsqueeze to take into account num_samples.
loss_kl = compute_kl(logits).unsqueeze(0)
# Make sure all losses are consistently divided by num_vertices.
# print('DIVIDING BY', (orig_logits_shp[0] * args.num_samples))
loss = (loss_nll + loss_kl).sum() / (orig_logits_shp[0] * args.num_samples)
if args.verbose is True:
print(f'Epoch {epoch}\tBatch {batch_idx}\tLoss: {loss.item():.5f}')
measurements["train_steps"].append(i)
if args.log_edge_metric_train:
acc, precision, recall = get_sampling_metrics(logits, relations)
best_idx = np.argmax(precision)
measurements["acc_train"].append(acc[best_idx])
measurements["precision_train"].append(precision[best_idx])
measurements["recall_train"].append(recall[best_idx])
# this is needed by IMLE if we draw multiple samples
batch_size = target.shape[0]
batch_size_imle = output.shape[0]
nb_samples = batch_size_imle // batch_size
if nb_samples > 1:
target_imle = target.reshape(batch_size, 1, -1).repeat(1, nb_samples, 1)
target_imle = target_imle.view([batch_size_imle] + list(target.shape)[1:])
target = target_imle
mse_loss = F.mse_loss(output, target).item()
measurements["mse_train"].append(mse_loss)
measurements["nll_train"].append(loss_nll.mean().item())
measurements["kl_train"].append(loss_kl.mean().item())
measurements["elbo_train"].append(-1.0 * loss.item())
# Get decoder output using teacher forcing. We do this to get a
# comparable elbo measurement with evaluation results, since we use
# teacherforcing when evaluating on the validation and test set.
with torch.inference_mode():
# Here, tf stands for teacher forcing.
tf_output = decoder(data, edges, rel_rec, rel_send, 1)
tf_nll = nll_gaussian(tf_output, target, args.var)
tf_nll = tf_nll.view(args.num_samples, logits.size(0))
tf_loss = (tf_nll + loss_kl).sum() / (logits.size(0) * args.num_samples)
tf_elbo = -1.0 * tf_loss
measurements["tf_elbo_train"].append(tf_elbo.item())
if args.use_nvil or args.use_reinforce:
# Compute log p with respect to U.
edge_weights = edge_weights.view(args.num_samples, *logits.shape)
if args.use_gumbels_for_kl:
logprob = (
-(edge_weights - logits.unsqueeze(0)) -
torch.exp(-(edge_weights - logits.unsqueeze(0))))
else:
# Compute log p with respect to X. This only makes sense
# when args.sst == 'indep'.
edges = edges.view(args.num_samples, *logits.shape)
logprob = torch.log(torch.sum(
F.softmax(logits, dim=-1).unsqueeze(0) * edges,
axis=-1, keepdim=True))
logprob = logprob.sum(-1).sum(-1)
# Exponential moving average on the loss.
# If ema coeff is 0 then baseline is also 0.
if args.ema_for_loss > 0.0:
loss_nll_ema = (
args.ema_for_loss * loss_nll_ema +
(1.0 - args.ema_for_loss) * loss_nll.mean()).detach()
else:
loss_nll_ema = 0.0
if args.use_nvil:
nvil_baseline = nvil_baseline.unsqueeze(0)
baseline_loss = ((
(loss_nll - loss_nll_ema).detach() - nvil_baseline) ** 2)
nvil_loss = (
loss_nll +
(loss_nll - loss_nll_ema - nvil_baseline).detach() * logprob +
baseline_loss / args.num_vertices
)
nvil_loss = (nvil_loss + loss_kl).sum() / (logits.size(0) * args.num_samples)
nvil_loss.backward()
optimizer.step()
else: # REINFORCE
# Compute the baseline.
if args.reinforce_baseline == "ema":
baseline = loss_nll_ema
elif args.reinforce_baseline == "batch":
# Use the mean of the whole batch.
# Compute mean over each sample separately.
baseline = loss_nll.mean()
elif args.reinforce_baseline == "multi_sample":
baseline = loss_nll.mean(0).unsqueeze(0) # (1, bs)
reinforce_loss = loss_nll + (loss_nll - baseline).detach() * logprob
# Divide by (num_samples - 1) in the multi-sample case for
# an unbiased estimate.
reinforce_loss = reinforce_loss.sum(0) / (
(args.num_samples - 1) if args.reinforce_baseline == "multi_sample"
else args.num_samples
)
reinforce_loss = (reinforce_loss + loss_kl).sum() / logits.size(0)
reinforce_loss.backward()
optimizer.step()
else:
loss.backward()
optimizer.step()
# Evaluate every args.eval_every steps.
if i % args.eval_every == 0 and args.eval_every >= 0:
train_time = time.time() - start_time
start_time = time.time()
eval_start_time = time.time()
measurements["val_steps"].append(i)
nlls, mses = [], []
accs, precisions, recalls = [], [], []
kls, elbos = [], []
logits_list_for_eval, relations_list_for_eval = [], []
encoder.eval()
decoder.eval()
for batch_idx, (data, relations) in enumerate(valid_loader):
if args.cuda:
data, relations = data.to("cuda"), relations.to("cuda")
data, relations = Variable(data), Variable(relations)
logits, baseline = encoder(data, rel_rec, rel_send)
logits = maybe_make_logits_symmetric(logits, args.symmeterize_logits)
if args.method in {'sst'}:
edges, _ = sample_edges(logits, tau=args.temp, hard=True)
elif args.method in {'aimle', 'ste'}:
edges, _ = sample_edges_imle(logits,
args,
is_eval=True,
target_distribution=target_distribution)
edges = edges[0]
# validation output uses teacher forcing.
output = decoder(data, edges, rel_rec, rel_send, 1)
target = data[:, :, 1:, :]
loss_nll = nll_gaussian(output, target, args.var).mean()
loss_kl = compute_kl(logits).mean()
# Since computing the edge metrics can be done with a
# much bigger batch size the eval batch size (for obtaining
# encoder and decoder outputs), we might want to collect
# the encoder output logits and compute edge metics
# with a bigger batch size outside the eval loop.
if args.log_edge_metric_val and eval_edge_metric_bs == args.eval_batch_size:
acc, precision, recall = get_sampling_metrics(logits, relations)
accs.append(acc)
precisions.append(precision)
recalls.append(recall)
elif args.log_edge_metric_val and eval_edge_metric_bs != args.eval_batch_size:
logits_list_for_eval.append(logits.to("cpu").detach().numpy())
relations_list_for_eval.append(relations.to("cpu").detach().numpy())
# this is needed by IMLE if we draw multiple samples
batch_size = target.shape[0]
batch_size_imle = output.shape[0]
nb_samples = batch_size_imle // batch_size
if nb_samples > 1:
target_imle = target.reshape(batch_size, 1, -1).repeat(1, nb_samples, 1)
target_imle = target_imle.view([batch_size_imle] + list(target.shape)[1:])
target = target_imle
mses.append(F.mse_loss(output, target).item())
nlls.append(loss_nll.item())
kls.append(loss_kl.item())
elbos.append(-1.0 * (loss_nll + loss_kl).item())
# Compute edge metrics with a bigger batch size separately
# from the eval loop. For spanning tree SST, doing this is
# faster only when the batched pytorch version of Kruskal's is
# used. The C++ Kruskal's is faster for small batch sizes
# (for example, when evaluating a bigger graph where we can
# only fit a small batch size for the encoder and decoder.)
if args.log_edge_metric_val and eval_edge_metric_bs != args.eval_batch_size:
logits_for_eval = torch.tensor(np.vstack(logits_list_for_eval)).to(device)
relations_for_eval = torch.tensor(np.vstack(relations_list_for_eval)).to(device)
for sub_idx in range(int(logits_for_eval.size(0) / eval_edge_metric_bs)):
logits_ = logits_for_eval[
sub_idx * eval_edge_metric_bs: (sub_idx + 1) * eval_edge_metric_bs]
relations_ = relations_for_eval[
sub_idx * eval_edge_metric_bs: (sub_idx + 1) * eval_edge_metric_bs]
acc, precision, recall = get_sampling_metrics(logits_, relations_)
accs.append(acc)
precisions.append(precision)
recalls.append(recall)
measurements["nll_val"].append(np.mean(nlls))
measurements["kl_val"].append(np.mean(kls))
measurements["elbo_val"].append(np.mean(elbos))
measurements["mse_val"].append(np.mean(mses))
accs = np.mean(accs, axis=0)
precisions = np.mean(precisions, axis=0)
recalls = np.mean(recalls, axis=0)
best_idx = np.argmax(precisions)
measurements["acc_val"].append(accs[best_idx])
measurements["precision_val"].append(precisions[best_idx])
measurements["recall_val"].append(recalls[best_idx])
eval_time = time.time() - eval_start_time
print(
"{}/{} iterations in {:0.2f}s; ".format(
i, args.num_iterations, train_time) +
"Eval in {:0.2f} sec".format(eval_time), flush=True)
measurements_str = (
"Iteration {} (Epoch {}) ".format(i, epoch) +
"nll_train: {:.10f} ".format(measurements["nll_train"][-1]) +
"kl_train: {:.10f} ".format(measurements["kl_train"][-1]) +
"elbo_train: {:.10f} ".format(measurements["elbo_train"][-1]) +
"tf_elbo_train: {:.10f} ".format(measurements["tf_elbo_train"][-1]) +
"mse_train: {:.10f} ".format(measurements["mse_train"][-1]) +
("acc_train: {:.10f} ".format(measurements["acc_train"][-1]) +
"precision_train: {:.10f} ".format(measurements["precision_train"][-1]) +
"recall_train: {:.10f} ".format(measurements["recall_train"][-1])
if args.log_edge_metric_train else "") +
"nll_val: {:.10f} ".format(measurements["nll_val"][-1]) +
"kl_val: {:.10f} ".format(measurements["kl_val"][-1]) +
"elbo_val: {:.10f} ".format(measurements["elbo_val"][-1]) +
"mse_val: {:.10f} ".format(measurements["mse_val"][-1]) +
("acc_val: {:.10f} ".format(measurements["acc_val"][-1]) +
"precision_val: {:.10f} ".format(measurements["precision_val"][-1]) +
"recall_val: {:.10f} ".format(measurements["recall_val"][-1])
if args.log_edge_metric_val else "")
)
print(measurements_str)
print(measurements_str, file=log)
log.flush()
if args.save_best_model and measurements["elbo_val"][-1] > best_elbo:
torch.save(encoder.state_dict(), encoder_file)
torch.save(decoder.state_dict(), decoder_file)
print("Best model so far, saving...")
if measurements["elbo_val"][-1] > best_elbo:
best_elbo = measurements["elbo_val"][-1]
best_step, best_epoch = i, epoch
encoder.train()
decoder.train()
scheduler.step()
return measurements, best_elbo, best_step, best_epoch
def test():
measurements = {
"valid":{
"elbo": [], "acc": [], "precision": [], "recall": []},
"test":{
"elbo": [], "acc": [], "precision": [], "recall": []}
}
idx = 0
for exp in os.listdir(os.path.join("experiments", args.experiments_folder)):
trial_path = os.path.join("experiments", args.experiments_folder, exp)
print('trial_path 1', trial_path, os.path.os.path.isdir(trial_path))
print('trial_path 2', os.path.join(trial_path, "train_and_val_measurements.pkl"), os.path.exists(os.path.join(trial_path, "train_and_val_measurements.pkl")))
if not os.path.os.path.isdir(trial_path):
continue
if not os.path.exists(os.path.join(trial_path, "train_and_val_measurements.pkl")):
continue
start = time.time()
try:
encoder_file = os.path.join(trial_path, "encoder.pt")
encoder.load_state_dict(
torch.load(encoder_file, map_location=torch.device(device)))
decoder_file = os.path.join(trial_path, "decoder.pt")
decoder.load_state_dict(
torch.load(decoder_file, map_location=torch.device(device)))
except Exception as ex:
print('Exception', ex)
print('encoder', encoder)
print('decoder', decoder)
for dataset in ["valid", "test"]:
dataloader = valid_loader if dataset == "valid" else test_loader
elbos = []
logits_list = []
relations_list = []
accs_list, precisions_list, recalls_list = [], [], []
for batch_idx, (data, relations) in enumerate(dataloader):
data = data[:, :, :args.timesteps, :]
if args.cuda:
data, relations = data.to("cuda"), relations.to("cuda")
data, relations = Variable(data), Variable(relations)
logits, _ = encoder(data, rel_rec, rel_send)
logits = maybe_make_logits_symmetric(logits, args.symmeterize_logits)
if args.method in {'sst'}:
edges, _ = sample_edges(logits, tau=args.temp, hard=True)
elif args.method in {'aimle', 'ste'}:
edges, _ = sample_edges_imle(logits,
args,
is_eval=True,
target_distribution=target_distribution)
edges = edges[0]
# validation output uses teacher forcing.
output = decoder(data, edges, rel_rec, rel_send, 1)
target = data[:, :, 1:, :]
loss_nll = nll_gaussian(output, target, args.var).mean()
loss_kl = compute_kl(logits).mean()
elbos.append(-1.0 * (loss_nll + loss_kl).item())
logits_list.append(logits.to("cpu").detach().numpy())
relations_list.append(relations.to("cpu").detach().numpy())
logits_for_eval = torch.tensor(np.vstack(logits_list)).to(device)
relations_for_eval = torch.tensor(np.vstack(relations_list)).to(device)
for sub_idx in range(int(logits_for_eval.size(0) / eval_edge_metric_bs)):
logits_ = logits_for_eval[
sub_idx * eval_edge_metric_bs: (sub_idx + 1) * eval_edge_metric_bs]
relations_ = relations_for_eval[
sub_idx * eval_edge_metric_bs: (sub_idx + 1) * eval_edge_metric_bs]
accs, precisions, recalls = get_sampling_metrics(logits_, relations_)
accs_list.append(accs)
precisions_list.append(precisions)
recalls_list.append(recalls)
print(f"{dataset} trial {idx} for {args.experiments_folder} took {time.time() - start}s.")
measurements[dataset]["elbo"].append(np.mean(elbos))
accs = np.mean(accs_list, axis=0)
precisions = np.mean(precisions_list, axis=0)
recalls = np.mean(recalls_list, axis=0)
best_idx = np.argmax(precisions)
measurements[dataset]["acc"].append(accs[best_idx])
measurements[dataset]["precision"].append(precisions[best_idx])
measurements[dataset]["recall"].append(recalls[best_idx])
idx += 1
all_measurements = {}
for dataset in measurements:
all_measurements[dataset] = {}
for k, v in measurements[dataset].items():
all_measurements[dataset][k] = np.array(v)
return all_measurements
if args.mode == "train":
# Train model
train_and_val_measurements, best_elbo, best_step, best_epoch = train()
print("Optimization Finished!")
print("Best Epoch: {:04d}; best step: {:04d}".format(best_epoch, best_step))
print("Best Epoch: {:04d}; best step: {:04d}".format(best_epoch, best_step), file=log)
log.flush()
# Save measurements.
meas_fname = "train_and_val_measurements.pkl"
with open(os.path.join(experiment_folder, meas_fname), "wb") as f:
pickle.dump(train_and_val_measurements, f)
# import shutil
# shutil.rmtree(experiment_folder)
else:
all_measurements = test()
# import pdb;pdb.set_trace()
print('XXX all_measurements', all_measurements)
print("Saving data.")
data_path = os.path.join(
"experiments", args.experiments_folder, "data_for_bootstrapping.pkl")
with open(data_path, "wb") as f:
pickle.dump(all_measurements, f)
if __name__ == '__main__':
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
main(sys.argv[1:])
| 44,627 | 49.771331 | 169 | py |
torch-adaptive-imle | torch-adaptive-imle-main/cli/l2x-cli.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import sys
import time
import numpy as np
import argparse
import torch
from torch import optim, Tensor
from torch.utils.data import TensorDataset
from torch.utils.data import DataLoader
from imle.imle import imle
from imle.aimle import aimle
from imle.ste import ste
from imle.target import TargetDistribution, AdaptiveTargetDistribution
from imle.noise import BaseNoiseDistribution, SumOfGammaNoiseDistribution, GumbelNoiseDistribution
from imle.solvers import mathias_select_k
from sklearn.model_selection import train_test_split
from aaai23.torch.utils import set_seed, subset_precision
from aaai23.torch.modules import Model, ConcreteDistribution, SampleSubset, IMLETopK
from aaai23.utils import pad_sequences
from typing import Optional, Callable
import socket
import wandb
import logging
logger = logging.getLogger(os.path.basename(sys.argv[0]))
class DifferentiableSelectKModel(torch.nn.Module):
def __init__(self,
diff_fun: Callable[[Tensor], Tensor],
fun: Callable[[Tensor], Tensor]):
super().__init__()
self.diff_fun = diff_fun
self.fun = fun
def forward(self, logits: Tensor) -> Tensor:
return self.diff_fun(logits) if self.training else self.fun(logits)
def evaluate(model_eval: Model,
x_eval: np.ndarray,
y_eval: np.ndarray,
device: torch.device) -> float:
mse = torch.nn.MSELoss()
x_eval_t = torch.tensor(x_eval, dtype=torch.long, device=device)
y_eval_t = torch.tensor(y_eval, dtype=torch.float, device=device)
eval_dataset = TensorDataset(x_eval_t, y_eval_t)
eval_loader = DataLoader(eval_dataset, batch_size=100, shuffle=False)
with torch.inference_mode():
model_eval.eval()
p_eval_lst = []
for X, y in eval_loader:
p_eval_lst += model_eval(x=X).view(-1).tolist()
p_eval_t = torch.tensor(p_eval_lst, dtype=torch.float, requires_grad=False, device=device)
mse_value = mse(p_eval_t, y_eval_t)
return mse_value.item()
def main(argv):
parser = argparse.ArgumentParser('PyTorch I-MLE/BeerAdvocate', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--aspect', '-a', action='store', type=int, default=1, help='Aspect')
parser.add_argument('--epochs', '-e', action='store', type=int, default=20, help='Epochs')
parser.add_argument('--batch-size', '-b', action='store', type=int, default=40, help='Batch Size')
parser.add_argument('--kernel-size', '-k', action='store', type=int, default=3, help='Kernel Size')
parser.add_argument('--hidden-dims', '-H', action='store', type=int, default=250, help='Hidden Dimensions')
parser.add_argument('--max-len', '-m', action='store', type=int, default=350, help='Maximum Sequence Length')
parser.add_argument('--select-k', '-K', action='store', type=int, default=10, help='Select K')
parser.add_argument("--checkpoint", "-c", action='store', type=str, default='models/model.pt')
parser.add_argument("--reruns", "-r", action='store', type=int, default=10)
parser.add_argument("--method", "-M", type=str, choices=['sst', 'imle', 'imletopk', 'aimle', 'ste', 'softsub'],
default='imle', help="Method (SST, IMLE, AIMLE, STE, SoftSub)")
parser.add_argument('--aimle-symmetric', action='store_true', default=False)
parser.add_argument('--aimle-target', type=str, choices=['standard', 'adaptive'], default='standard')
parser.add_argument('--imle-noise', type=str, choices=['none', 'sog', 'gumbel'], default='sog')
parser.add_argument('--imle-samples', action='store', type=int, default=1)
parser.add_argument('--imle-input-temperature', action='store', type=float, default=0.0)
parser.add_argument('--imle-output-temperature', action='store', type=float, default=10.0)
parser.add_argument('--imle-lambda', action='store', type=float, default=1000.0)
parser.add_argument('--aimle-beta-update-step', action='store', type=float, default=0.0001)
parser.add_argument('--aimle-beta-update-momentum', action='store', type=float, default=0.0)
parser.add_argument('--aimle-target-norm', action='store', type=float, default=1.0)
parser.add_argument('--sst-temperature', action='store', type=float, default=0.1)
parser.add_argument('--softsub-temperature', action='store', type=float, default=0.5)
parser.add_argument('--ste-noise', type=str, choices=['none', 'sog', 'gumbel'], default='sog')
parser.add_argument('--ste-temperature', action='store', type=float, default=0.0)
parser.add_argument('--debug', '-D', action='store_true', default=False)
parser.add_argument('--max-iterations', action='store', type=int, default=None)
parser.add_argument('--gradient-scaling', action='store_true', default=False)
args = parser.parse_args(argv)
if args.debug is True:
torch.autograd.set_detect_anomaly(True)
hostname = socket.gethostname()
print(f'Hostname: {hostname}')
device = torch.device('cpu')
if torch.cuda.is_available():
device = torch.device('cuda')
aspect = args.aspect
input_path_train = "data/reviews.aspect" + str(aspect) + ".train.txt"
input_path_validation = "data/reviews.aspect" + str(aspect) + ".heldout.txt"
# the dictionary mapping words to their IDs
word_to_id = dict()
token_id_counter = 3
with open(input_path_train) as fin:
for line in fin:
y, sep, text = line.partition("\t")
token_list = text.split(" ")
for token in token_list:
if token not in word_to_id:
word_to_id[token] = token_id_counter
token_id_counter = token_id_counter + 1
word_to_id["<PAD>"] = 0
word_to_id["<START>"] = 1
word_to_id["<UNK>"] = 2
# Set parameters:
method_name = args.method
# max_features = token_id_counter + 1
maxlen = args.max_len
batch_size = args.batch_size
embedding_dims = 200
kernel_size = args.kernel_size
hidden_dims = args.hidden_dims
epochs = args.epochs
select_k = args.select_k # Number of selected words by the methods
checkpoint_path = args.checkpoint
id_to_word = {value: key for key, value in word_to_id.items()}
X_train_list = []
Y_train_list = []
# now we iterate again to assign IDs
with open(input_path_train) as fin:
for line in fin:
y, sep, text = line.partition("\t")
token_list = text.split(" ")
tokenid_list = [word_to_id[token] for token in token_list]
X_train_list.append(tokenid_list)
# extract the normalized [0,1] value for the aspect
y = [float(v) for v in y.split()]
Y_train_list.append(y[aspect])
X_train = pad_sequences(X_train_list, max_len=maxlen)
Y_train = np.asarray(Y_train_list)
X_train_t = torch.tensor(X_train, dtype=torch.long, device=device)
Y_train_t = torch.tensor(Y_train, dtype=torch.float, device=device)
train_dataset = TensorDataset(X_train_t, Y_train_t)
print("Loading heldout data...")
X_val_list = []
Y_val_list = []
# now we iterate again to assign IDs
with open(input_path_validation) as fin:
for line in fin:
y, sep, text = line.partition("\t")
token_list = text.split(" ")
tokenid_list = [word_to_id.get(token, 2) for token in token_list]
X_val_list.append(tokenid_list)
# extract the normalized [0,1] value for the aspect
y = [float(v) for v in y.split()]
Y_val_list.append(y[aspect])
X_val_both = pad_sequences(X_val_list, max_len=maxlen)
Y_val_both = np.asarray(Y_val_list)
# this cell loads the word embeddings from the external data
embeddings_index = {}
with open("data/review+wiki.filtered.200.txt") as f:
for line in f:
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
embeddings_index[word] = coefs
print('Found %s word vectors.' % len(embeddings_index))
embedding_matrix = np.zeros((len(word_to_id) + 1, embedding_dims))
for word, i in word_to_id.items():
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
# words not found in embedding index will be all-zeros.
embedding_matrix[i] = embedding_vector
print('Creating model...')
val_mse_lst = []
test_mse_lst = []
subset_precision_lst = []
embedding_matrix_t = torch.tensor(embedding_matrix, dtype=torch.float, requires_grad=False, device=device)
loss_function = torch.nn.MSELoss()
loss_function_nored = torch.nn.MSELoss(reduction='none')
# here we can now iterate a few times to compute statistics
for seed in range(args.reruns):
wandb.init(project="beeradv-aaai23", name=f'{method_name}-{seed}')
wandb.config.update(args)
wandb.config.update({'hostname': hostname, 'seed': seed})
set_seed(seed, is_deterministic=True)
# create a new validation/test split
X_val, X_test, Y_val, Y_test = train_test_split(X_val_both, Y_val_both, test_size=0.5, random_state=seed)
print('Initialising the model ..')
def name_to_distribution(distribution_name: str) -> Optional[BaseNoiseDistribution]:
if distribution_name in {'none'}:
noise_distribution = None
elif distribution_name in {'sog'}:
noise_distribution = SumOfGammaNoiseDistribution(k=select_k, nb_iterations=10, device=device)
elif distribution_name in {'gumbel'}:
noise_distribution = GumbelNoiseDistribution(device=device)
else:
assert False, f'Noise model not supported: {distribution_name}'
return noise_distribution
blackbox_function = lambda logits: mathias_select_k(logits, k=select_k)
if method_name in {'imle'}:
nb_samples = args.imle_samples
imle_input_temp = args.imle_input_temperature
imle_output_temp = args.imle_output_temperature
imle_lambda = args.imle_lambda
target_distribution = TargetDistribution(alpha=1.0, beta=imle_lambda, do_gradient_scaling=args.gradient_scaling)
noise_distribution = name_to_distribution(args.imle_noise)
@imle(target_distribution=target_distribution, noise_distribution=noise_distribution, nb_samples=nb_samples,
theta_noise_temperature=imle_input_temp, target_noise_temperature=imle_output_temp)
def imle_select_k(logits: Tensor) -> Tensor:
return mathias_select_k(logits, k=select_k)
differentiable_select_k = DifferentiableSelectKModel(imle_select_k, blackbox_function)
elif method_name in {'imletopk'}:
IMLETopK.k = select_k
IMLETopK.tau = args.imle_output_temperature
IMLETopK.lambda_ = args.imle_lambda
differentiable_select_k = DifferentiableSelectKModel(IMLETopK.apply, blackbox_function)
elif method_name in {'aimle'}:
nb_samples = args.imle_samples
imle_input_temp = args.imle_input_temperature
imle_output_temp = args.imle_output_temperature
imle_lambda = args.imle_lambda
if args.aimle_target in {'standard'}:
target_distribution = TargetDistribution(alpha=1.0,
beta=imle_lambda,
do_gradient_scaling=args.gradient_scaling)
elif args.aimle_target in {'adaptive'}:
target_distribution = AdaptiveTargetDistribution(initial_alpha=1.0,
initial_beta=imle_lambda,
beta_update_step=args.aimle_beta_update_step,
beta_update_momentum=args.aimle_beta_update_momentum,
target_norm=args.aimle_target_norm)
else:
assert False, f'Do not know how to handle {args.aimle_target} as target distribution'
noise_distribution = name_to_distribution(args.imle_noise)
@aimle(target_distribution=target_distribution, noise_distribution=noise_distribution,
nb_samples=nb_samples,
theta_noise_temperature=imle_input_temp, target_noise_temperature=imle_output_temp,
symmetric_perturbation=args.aimle_symmetric)
def aimle_select_k(logits: Tensor) -> Tensor:
return mathias_select_k(logits, k=select_k)
differentiable_select_k = DifferentiableSelectKModel(aimle_select_k, blackbox_function)
elif method_name in {'ste'}:
noise_distribution = name_to_distribution(args.ste_noise)
@ste(noise_distribution=noise_distribution, noise_temperature=args.ste_temperature)
def ste_select_k(logits: Tensor) -> Tensor:
return mathias_select_k(logits, k=select_k)
differentiable_select_k = DifferentiableSelectKModel(ste_select_k, blackbox_function)
elif method_name in {'sst'}:
tau = args.sst_temperature
differentiable_select_k = ConcreteDistribution(tau=tau, k=select_k, device=device)
elif method_name in {'softsub'}:
tau = args.softsub_temperature
differentiable_select_k = SampleSubset(tau=tau, k=select_k, device=device)
else:
assert False, f'Method not supported: {method_name}'
model = Model(embedding_weights=embedding_matrix_t,
hidden_dims=hidden_dims,
kernel_size=kernel_size,
select_k=select_k,
differentiable_select_k=differentiable_select_k).to(device)
print('Model:')
group_name_to_nparams = dict()
for name in model.state_dict():
group_name = name.split('.')[0].strip()
pt = model.state_dict()[name]
print(f'\t{name}\t{pt.size()}\t{pt.numel()}')
group_name_to_nparams[group_name] = group_name_to_nparams.get(group_name, 0) + pt.numel()
print('Model modules:')
for name, nparams in group_name_to_nparams.items():
print(f'\t{name}\t{nparams}')
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
optimizer = optim.Adam(model.parameters(), lr=0.001, eps=1e-7)
st = time.time()
best_val_mse = None
for epoch_no in range(1, epochs + 1):
epoch_loss_values = []
for i, (X, y) in enumerate(train_loader):
# Used for unit tests
if args.max_iterations is not None and i > args.max_iterations:
break
model.train()
p = model(x=X).view(-1)
# Now, note that, while y is [B], p is [B * S], where S is the number of samples
# drawn by I-MLE during the forward pass. We may need to replicate y S times.
nb_samples = p.shape[0] // y.shape[0]
if p.shape[0] > y.shape[0]:
assert method_name in {'imle', 'aimle'} and args.imle_samples > 1, "p.shape and y.shape differ"
y = y.view(batch_size, 1)
y = y.repeat(1, nb_samples)
y = y.view(batch_size * nb_samples)
# XXX About the loss values, remember we should sum over S and aggregate over B
assert nb_samples > 0
if nb_samples == 1:
loss = loss_function(p, y)
else:
loss = loss_function_nored(p, y)
loss = loss.view(-1, nb_samples).sum(axis=1).mean(axis=0)
loss_value = loss.item()
if args.debug is True:
logger.info(f'Epoch {epoch_no}/{epochs}\tIteration {i + 1}\tLoss value: {loss_value:.4f}')
epoch_loss_values += [loss_value]
loss.backward()
optimizer.step()
optimizer.zero_grad()
loss_mean, loss_std = np.mean(epoch_loss_values), np.std(epoch_loss_values)
logger.info(f'Epoch {epoch_no}/{epochs}\tLoss {loss_mean:.4f} ± {loss_std:.4f}')
# Checkpointing
val_mse = evaluate(model, X_val, Y_val, device=device)
test_mse = evaluate(model, X_test, Y_test, device=device)
if best_val_mse is None or val_mse <= best_val_mse:
print(f'Saving new checkpoint -- new best validation MSE: {val_mse:.5f}')
torch.save({'model_state_dict': model.state_dict()}, checkpoint_path)
best_val_mse = val_mse
wandb.log({'seed': seed, 'val_mse': val_mse, 'test_mse': test_mse, 'loss_mean': loss_mean}, step=epoch_no)
duration = time.time() - st
print(f'[{seed}] Training time is {duration} ms')
if os.path.isfile(checkpoint_path):
print(f'Loading checkpoint at {checkpoint_path} ..')
checkpoint = torch.load(checkpoint_path)
model.load_state_dict(checkpoint['model_state_dict'])
val_mse = evaluate(model, X_val, Y_val, device=device) * 100.0
print(f"[{seed}] Validation MSE: {val_mse:.5f}")
val_mse_lst += [val_mse]
test_mse = evaluate(model, X_test, Y_test, device=device) * 100.0
print(f"[{seed}] Test MSE: {test_mse:.5f}")
test_mse_lst += [test_mse]
subset_prec = subset_precision(model, aspect, id_to_word, word_to_id, select_k,
device=device, max_len=maxlen) * 100.0
print(f"[{seed}] Subset precision: {subset_prec:.5f}")
subset_precision_lst += [subset_prec]
wandb.log({'best_val_mse': val_mse, 'best_test_mse': test_mse, 'best_subset_prec': subset_prec})
wandb.finish()
print(f'Final Subset Precision List: {np.mean(subset_precision_lst):.5f} ± {np.std(subset_precision_lst):.5f}')
print(f'Final Validation MSE List: {np.mean(val_mse_lst):.5f} ± {np.std(val_mse_lst):.5f}')
print(f'Final Test MSE List: {np.mean(test_mse_lst):.5f} ± {np.std(test_mse_lst):.5f}')
if os.path.exists(checkpoint_path):
os.remove(checkpoint_path)
print('Experiment completed.')
if __name__ == '__main__':
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
main(sys.argv[1:])
| 18,819 | 41.387387 | 124 | py |
torch-adaptive-imle | torch-adaptive-imle-main/cli/expected-sparsity-cli.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# This is an extended version of gradient-cli.py that supports AIMLE
# Remember to replace gradient-cli.py with this one
import os
import sys
import torch
import numpy as np
from torch import Tensor, nn
from aaai23.synth import distributions, utils, sfe2 as sfe
import argparse
from tqdm import tqdm
from typing import Optional
import logging
logger = logging.getLogger(os.path.basename(sys.argv[0]))
def objective(z: Tensor, b_t: Tensor) -> Tensor:
if len(z.shape) > len(b_t.shape):
z = z.view(-1)
# print('Z', z.shape, 'b_t', b_t.shape)
if z.shape[0] > b_t.shape[0]:
nb_samples = z.shape[0] // b_t.shape[0]
# broadcast b_t
z_2d = z.view(nb_samples, b_t.shape[0])
b_t_2d = b_t.view(1, -1).repeat(nb_samples, 1)
res_2d = ((z_2d - b_t_2d) ** 2)
res_1d = res_2d.sum(1)
### res = res_1d.mean()
res = res_1d.sum()
else:
res = ((z - b_t) ** 2).sum()
return res
def true_gradient_fun(topk: distributions.TopK,
theta_t: Tensor,
b_t: Tensor) -> Tensor:
objective_bt = lambda z_: objective(z_, b_t)
# Expected value of the loss
exact_obective = lambda _theta: utils.expect_obj(topk, _theta, objective_bt)
theta_t_param = nn.Parameter(theta_t, requires_grad=True)
loss = exact_obective(theta_t_param)
loss.backward()
return theta_t_param.grad
def sfe_gradient_fun(topk: distributions.TopK,
theta_t: Tensor,
b_t: Tensor,
nb_samples: Optional[int]) -> Tensor:
rs = np.random.RandomState(0)
objective_bt = lambda z_: objective(z_, b_t)
sfe_full = sfe.sfe(topk.sample_f(rs), objective_bt, topk.grad_log_p(topk.marginals), nb_samples)
theta_t_param = nn.Parameter(theta_t, requires_grad=True)
z = sfe_full(theta_t_param)
loss = objective(z, b_t)
loss.backward()
return theta_t_param.grad
def main(argv):
parser = argparse.ArgumentParser('Gradient Estimation', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-k', action='store', type=int, default=1)
parser.add_argument('-n', action='store', type=int, default=20)
parser.add_argument('--seeds', '-s', action='store', type=int, default=64)
args = parser.parse_args(argv)
device = torch.device('cpu')
if torch.cuda.is_available():
torch.set_default_tensor_type(torch.cuda.FloatTensor)
device = torch.device('cuda')
torch.set_num_threads(16)
n = args.n
k = args.k
topk = distributions.TopK(n, k, device=device)
print(f'Possible states: {topk.states.shape}')
lmd_mse_dict = {
'Method': [],
'$\\lambda$': [],
'Cosine Similarity': [],
'Seed': []
}
pdist = nn.CosineSimilarity(dim=1, eps=1e-6)
for i in tqdm(range(args.seeds), desc='Seed'):
rng = np.random.RandomState(i)
theta = rng.randn(n)
b_t = torch.abs(torch.from_numpy(rng.randn(n)).float().to(device))
theta_t = torch.tensor(theta, dtype=torch.float, requires_grad=False, device=device)
true_gradient = true_gradient_fun(topk, theta_t, b_t)
print(true_gradient)
print('Done!')
if __name__ == '__main__':
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
main(sys.argv[1:])
| 3,417 | 27.247934 | 115 | py |
torch-adaptive-imle | torch-adaptive-imle-main/cli/gradient-sparsity-bias-cli.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# This is an extended version of gradient-cli.py that supports AIMLE
# Remember to replace gradient-cli.py with this one
import os
import sys
import torch
import numpy as np
from torch import Tensor, nn
import torch.nn.functional as F
from imle.ste import ste as ste
from imle.imle import imle as imle
from imle.aimle import aimle as aimle
from imle.target import BaseTargetDistribution, TargetDistribution, AdaptiveTargetDistribution
from imle.noise import BaseNoiseDistribution, SumOfGammaNoiseDistribution, GumbelNoiseDistribution
from aaai23.synth import distributions, utils, sfe2 as sfe
import argparse
from tqdm import tqdm
import seaborn as sns
import pandas as pd
import matplotlib.pyplot as plt
from typing import Optional
import logging
logger = logging.getLogger(os.path.basename(sys.argv[0]))
torch.set_printoptions(profile="full", linewidth=512)
def objective(z: Tensor, b_t: Tensor) -> Tensor:
if len(z.shape) > len(b_t.shape):
z = z.view(-1)
# print('Z', z.shape, 'b_t', b_t.shape)
if z.shape[0] > b_t.shape[0]:
nb_samples = z.shape[0] // b_t.shape[0]
# broadcast b_t
z_2d = z.view(nb_samples, b_t.shape[0])
b_t_2d = b_t.view(1, -1).repeat(nb_samples, 1)
res_2d = ((z_2d - b_t_2d) ** 2)
res_1d = res_2d.sum(1)
### res = res_1d.mean()
res = res_1d.sum()
else:
res = ((z - b_t) ** 2).sum()
return res
def true_gradient_fun(topk: distributions.TopK,
theta_t: Tensor,
b_t: Tensor) -> Tensor:
objective_bt = lambda z_: objective(z_, b_t)
# Expected value of the loss
exact_obective = lambda _theta: utils.expect_obj(topk, _theta, objective_bt)
theta_t_param = nn.Parameter(theta_t, requires_grad=True)
loss = exact_obective(theta_t_param)
loss.backward()
return theta_t_param.grad
def imle_gradient_fun(topk: distributions.TopK,
theta_t: Tensor,
b_t: Tensor,
target_distribution: BaseTargetDistribution,
noise_distribution: BaseNoiseDistribution,
noise_temperature: float,
nb_samples: int,
nb_marginal_samples: int) -> Tensor:
@imle(theta_noise_temperature=noise_temperature, target_noise_temperature=noise_temperature,
target_distribution=target_distribution, noise_distribution=noise_distribution,
nb_samples=nb_samples, nb_marginal_samples=nb_marginal_samples)
def imle_topk_batched(thetas: Tensor) -> Tensor:
# return torch.stack([topk.map(thetas[i]) for i in range(thetas.shape[0])])
return topk.map_2d(thetas)
def imle_topk(theta: Tensor) -> Tensor:
return imle_topk_batched(theta.view(1, -1)).view(-1)
theta_t_param = nn.Parameter(theta_t, requires_grad=True)
z = imle_topk(theta_t_param)
loss = objective(z, b_t)
loss.backward()
return theta_t_param.grad
def aimle_gradient_fun(topk: distributions.TopK,
theta_t: Tensor,
b_t: Tensor,
target_distribution: BaseTargetDistribution,
noise_distribution: BaseNoiseDistribution,
noise_temperature: float,
nb_samples: int,
nb_marginal_samples: int,
is_symmetric: bool,
warmup_steps: int = 0) -> Tensor:
@aimle(theta_noise_temperature=noise_temperature, target_noise_temperature=noise_temperature,
target_distribution=target_distribution, noise_distribution=noise_distribution,
nb_samples=nb_samples, nb_marginal_samples=nb_marginal_samples, symmetric_perturbation=is_symmetric)
def imle_topk_batched(thetas: Tensor) -> Tensor:
# return torch.stack([topk.map(thetas[i]) for i in range(thetas.shape[0])])
return topk.map_2d(thetas)
def imle_topk(theta: Tensor) -> Tensor:
return imle_topk_batched(theta.view(1, -1)).view(-1)
theta_t_param = nn.Parameter(theta_t, requires_grad=True)
for _ in range(warmup_steps):
z = imle_topk(theta_t_param)
loss = objective(z, b_t)
loss.backward()
theta_t_param.grad = None
z = imle_topk(theta_t_param)
loss = objective(z, b_t)
loss.backward()
return theta_t_param.grad
def ste_gradient_fun(topk: distributions.TopK,
theta_t: Tensor,
b_t: Tensor,
noise_distribution: BaseNoiseDistribution,
noise_temperature: float,
nb_samples: int) -> Tensor:
@ste(noise_temperature=noise_temperature, noise_distribution=noise_distribution, nb_samples=nb_samples)
def ste_topk_batched(thetas: Tensor) -> Tensor:
# return torch.stack([topk.map(thetas[i]) for i in range(thetas.shape[0])])
return topk.map_2d(thetas)
def ste_topk(theta: Tensor) -> Tensor:
return ste_topk_batched(theta.view(1, -1)).view(-1)
theta_t_param = nn.Parameter(theta_t, requires_grad=True)
z = ste_topk(theta_t_param)
loss = objective(z, b_t)
loss.backward()
return theta_t_param.grad
def sfe_gradient_fun(topk: distributions.TopK,
theta_t: Tensor,
b_t: Tensor,
nb_samples: Optional[int]) -> Tensor:
rs = np.random.RandomState(0)
objective_bt = lambda z_: objective(z_, b_t)
sfe_full = sfe.sfe(topk.sample_f(rs), objective_bt, topk.grad_log_p(topk.marginals), nb_samples)
theta_t_param = nn.Parameter(theta_t, requires_grad=True)
z = sfe_full(theta_t_param)
loss = objective(z, b_t)
loss.backward()
return theta_t_param.grad
def gs_gradient_fun(theta_t: Tensor,
b_t: Tensor,
nb_samples: Optional[int],
tau: float = 1.0,
hard: bool = True) -> Tensor:
# [B, N]
theta_t_batch = theta_t.view(1, -1).repeat(nb_samples, 1)
theta_t_batch_param = nn.Parameter(theta_t_batch, requires_grad=True)
z = F.gumbel_softmax(theta_t_batch_param, tau=tau, hard=hard)
loss = objective(z, b_t)
loss.backward()
return theta_t_batch_param.grad.mean(0)
def main(argv):
parser = argparse.ArgumentParser('Gradient Estimation', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-k', action='store', type=int, default=10)
parser.add_argument('-n', action='store', type=int, default=20)
parser.add_argument('-o', action='store', type=str, default=None)
parser.add_argument('--min-lambda', action='store', type=float, default=0.0)
parser.add_argument('--max-lambda', action='store', type=float, default=100.0)
parser.add_argument('--nb-lambdas', action='store', type=int, default=1001)
parser.add_argument('--lambdas', type=float, nargs='+', default=[])
parser.add_argument('--imle-samples', type=int, nargs='+', default=[])
parser.add_argument('--nb-marginal-samples', action='store', type=int, default=1)
# The following methods are not sensitive to lambdas
parser.add_argument('--aimle-samples', type=int, nargs='+', default=[])
parser.add_argument('--ste-samples', type=int, nargs='+', default=[])
parser.add_argument('--sfe-samples', type=int, nargs='+', default=[])
parser.add_argument('--gs-samples', type=int, nargs='+', default=[])
parser.add_argument('--seeds', '-s', action='store', type=int, default=64)
parser.add_argument('--warmup-steps', action='store', type=int, default=100)
parser.add_argument('--momentum', action='store', type=float, default=0.0)
parser.add_argument('--target', action='store', type=float, default=1.0)
parser.add_argument('--tau', action='store', type=float, default=1.0)
parser.add_argument('--threshold', action='store', type=float, default=1e-8)
args = parser.parse_args(argv)
device = torch.device('cpu')
if torch.cuda.is_available():
torch.set_default_tensor_type(torch.cuda.FloatTensor)
device = torch.device('cuda')
torch.set_num_threads(16)
n = args.n
k = args.k
ms = args.nb_marginal_samples
topk = distributions.TopK(n, k, device=device)
print(f'Possible states: {topk.states.shape}')
if len(args.lambdas) > 0:
lambdas = np.array(args.lambdas)
else:
lambdas = np.linspace(args.min_lambda, args.max_lambda, num=args.nb_lambdas)
warmup_steps = args.warmup_steps
# noise_distribution = SumOfGammaNoiseDistribution(k=k, nb_iterations=10)
noise_distribution = GumbelNoiseDistribution()
lmd_mse_dict = {
'Method': [],
'$\\lambda$': [],
'Cosine Similarity': [],
'Seed': []
}
pdist_ = nn.CosineSimilarity(dim=1, eps=1e-6)
pdist = lambda x, y: pdist_(x.view(1, -1), y.view(1, -1)).view(-1)[0]
for i in range(args.seeds):
print(f'Processing seed {i} ..')
rng = np.random.RandomState(i)
theta = rng.randn(n)
b_t = torch.abs(torch.from_numpy(rng.randn(n)).float().to(device))
theta_t = torch.tensor(theta, dtype=torch.float, requires_grad=False, device=device)
true_gradient = true_gradient_fun(topk, theta_t, b_t)
for lmd in lambdas:
target_distribution = TargetDistribution(alpha=1.0, beta=lmd, do_gradient_scaling=True)
for s in args.imle_samples:
imle_gradient = imle_gradient_fun(topk, theta_t, b_t,
target_distribution=target_distribution,
noise_distribution=noise_distribution,
noise_temperature=1.0,
nb_samples=s, nb_marginal_samples=ms)
dist = pdist(true_gradient, imle_gradient)
lmd_mse_dict['Method'] += [f'IMLE (Forward, $S={s}$)']
lmd_mse_dict['$\\lambda$'] += [lmd.item()]
lmd_mse_dict['Cosine Similarity'] += [dist.item()]
lmd_mse_dict['Seed'] += [i]
if False:
imle_gradient = imle_gradient_fun(topk, theta_t, b_t,
target_distribution=target_distribution,
noise_distribution=noise_distribution,
noise_temperature=1.0,
nb_samples=1, nb_marginal_samples=ms)
sparsity = float((torch.abs(imle_gradient) < args.threshold).sum().item()) / float(imle_gradient.shape[0])
lmd_mse_dict['Method'] += [f'IMLE ($S={s}$, $\\mu = {ms}$) Sparsity']
lmd_mse_dict['$\\lambda$'] += [lmd.item()]
lmd_mse_dict['Cosine Similarity'] += [sparsity]
lmd_mse_dict['Seed'] += [i]
imle_sym_gradient = aimle_gradient_fun(topk, theta_t, b_t,
target_distribution=target_distribution,
noise_distribution=noise_distribution,
noise_temperature=1.0,
nb_samples=s, nb_marginal_samples=ms,
is_symmetric=True)
dist = pdist(true_gradient, imle_sym_gradient)
lmd_mse_dict['Method'] += [f'IMLE (Central, $S={s}$)']
lmd_mse_dict['$\\lambda$'] += [lmd.item()]
lmd_mse_dict['Cosine Similarity'] += [dist.item()]
lmd_mse_dict['Seed'] += [i]
if False:
imle_sym_gradient = aimle_gradient_fun(topk, theta_t, b_t,
target_distribution=target_distribution,
noise_distribution=noise_distribution,
noise_temperature=1.0,
nb_samples=1,
nb_marginal_samples=ms,
is_symmetric=True)
sparsity = float((torch.abs(imle_sym_gradient) < args.threshold).sum().item()) / float(imle_gradient.shape[0])
lmd_mse_dict['Method'] += [f'IMLE Sym ({s}, $\\mu = {ms}$) Sparsity']
lmd_mse_dict['$\\lambda$'] += [lmd.item()]
lmd_mse_dict['Cosine Similarity'] += [sparsity]
lmd_mse_dict['Seed'] += [i]
for s in args.aimle_samples:
adaptive_target_distribution = AdaptiveTargetDistribution(initial_beta=0.0,
beta_update_momentum=args.momentum,
beta_update_step=1e-3,
target_norm=args.target)
aimle_gradient = aimle_gradient_fun(topk, theta_t, b_t,
target_distribution=adaptive_target_distribution,
noise_distribution=noise_distribution,
noise_temperature=1.0,
nb_samples=s,
nb_marginal_samples=ms,
is_symmetric=False,
warmup_steps=warmup_steps)
dist = pdist(true_gradient, aimle_gradient)
for lmd in lambdas:
lmd_mse_dict['Method'] += [f'AIMLE (Forward, $S={s}$)']
lmd_mse_dict['$\\lambda$'] += [lmd]
lmd_mse_dict['Cosine Similarity'] += [dist.item()]
lmd_mse_dict['Seed'] += [i]
adaptive_target_distribution = AdaptiveTargetDistribution(initial_beta=0.0,
beta_update_momentum=args.momentum,
beta_update_step=1e-3,
target_norm=args.target)
aimle_gradient = aimle_gradient_fun(topk, theta_t, b_t,
target_distribution=adaptive_target_distribution,
noise_distribution=noise_distribution,
noise_temperature=1.0,
nb_samples=s,
nb_marginal_samples=ms,
is_symmetric=True,
warmup_steps=warmup_steps)
dist = pdist(true_gradient, aimle_gradient)
for lmd in lambdas:
lmd_mse_dict['Method'] += [f'AIMLE (Central $S={s}$)']
lmd_mse_dict['$\\lambda$'] += [lmd.item()]
lmd_mse_dict['Cosine Similarity'] += [dist.item()]
lmd_mse_dict['Seed'] += [i]
for s in args.ste_samples:
ste_gradient = ste_gradient_fun(topk, theta_t, b_t,
noise_distribution=noise_distribution,
noise_temperature=1.0, nb_samples=s)
dist = pdist(true_gradient, ste_gradient)
for lmd in lambdas:
lmd_mse_dict['Method'] += [f'STE ($S={s}$)']
lmd_mse_dict['$\\lambda$'] += [lmd.item()]
lmd_mse_dict['Cosine Similarity'] += [dist.item()]
lmd_mse_dict['Seed'] += [i]
for s in args.sfe_samples:
sfe_gradient = sfe_gradient_fun(topk, theta_t, b_t, s)
dist = pdist(true_gradient, sfe_gradient)
for lmd in lambdas:
lmd_mse_dict['Method'] += [f'SFE ($S={s}$)']
lmd_mse_dict['$\\lambda$'] += [lmd.item()]
lmd_mse_dict['Cosine Similarity'] += [dist.item()]
lmd_mse_dict['Seed'] += [i]
for s in args.gs_samples:
gs_gradient = gs_gradient_fun(theta_t, b_t,
nb_samples=s,
tau=args.tau,
hard=False)
dist = pdist(true_gradient, gs_gradient)
for lmd in lambdas:
lmd_mse_dict['Method'] += [f'Gumbel-Softmax ($S={s}$, $\\tau = {args.tau}$)']
lmd_mse_dict['$\\lambda$'] += [lmd.item()]
lmd_mse_dict['Cosine Similarity'] += [dist.item()]
lmd_mse_dict['Seed'] += [i]
gs_gradient = gs_gradient_fun(theta_t, b_t,
nb_samples=s,
tau=args.tau,
hard=True)
dist = pdist(true_gradient, gs_gradient)
for lmd in lambdas:
lmd_mse_dict['Method'] += [f'Gumbel-Softmax (Hard, $S={s}$, $\\tau = {args.tau}$)']
lmd_mse_dict['$\\lambda$'] += [lmd.item()]
lmd_mse_dict['Cosine Similarity'] += [dist.item()]
lmd_mse_dict['Seed'] += [i]
if args.o is not None:
df = pd.DataFrame.from_dict(lmd_mse_dict)
all_methods_set = {m for m in lmd_mse_dict['Method']}
# filter_method_lst = ['SFE (1)', 'SFE (10)']
# filter_method_lst = [m for m in all_methods_set if 'IMLE' in m]
filter_method_lst = []
df = df[~df['Method'].isin(filter_method_lst)]
import matplotlib as mpl
xdim, ydim = 8, 3
mpl.rcParams['figure.figsize'] = xdim, ydim
mpl.rcParams['font.size'] = 13
g = sns.lineplot(x='$\\lambda$',
y="Cosine Similarity",
hue="Method",
data=df)
g.set(title='Value of $\\lambda$ $\\times$ Similarity to the true gradient')
plt.grid()
plt.xlim(lambdas[0], lambdas[-1])
plt.ylim(0.0, 1.0)
# handles, labels = g.get_legend_handles_labels()
# g.legend(handles=handles[1:], labels=labels[1:])
g.legend_.set_title(None)
# plt.show()
plt.savefig(args.o, bbox_inches='tight')
print('Done!')
if __name__ == '__main__':
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
main(sys.argv[1:])
| 18,995 | 41.591928 | 130 | py |
torch-adaptive-imle | torch-adaptive-imle-main/cli/gradient-cli.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# This is an extended version of gradient-cli.py that supports AIMLE
# Remember to replace gradient-cli.py with this one
import os
import sys
import torch
import numpy as np
from torch import Tensor, nn
from imle.ste import ste as ste
from imle.imle import imle as imle
from imle.aimle import aimle as aimle
from imle.target import BaseTargetDistribution, TargetDistribution, AdaptiveTargetDistribution
from imle.noise import BaseNoiseDistribution, SumOfGammaNoiseDistribution, GumbelNoiseDistribution
from aaai23.synth import distributions, utils, sfe2 as sfe
import argparse
from tqdm import tqdm
import seaborn as sns
import pandas as pd
import matplotlib.pyplot as plt
from typing import Optional
import logging
logger = logging.getLogger(os.path.basename(sys.argv[0]))
def objective(z: Tensor, b_t: Tensor) -> Tensor:
if len(z.shape) > len(b_t.shape):
z = z.view(-1)
# print('Z', z.shape, 'b_t', b_t.shape)
if z.shape[0] > b_t.shape[0]:
nb_samples = z.shape[0] // b_t.shape[0]
# broadcast b_t
z_2d = z.view(nb_samples, b_t.shape[0])
b_t_2d = b_t.view(1, -1).repeat(nb_samples, 1)
res_2d = ((z_2d - b_t_2d) ** 2)
res_1d = res_2d.sum(1)
### res = res_1d.mean()
res = res_1d.sum()
else:
res = ((z - b_t) ** 2).sum()
return res
def true_gradient_fun(topk: distributions.TopK,
theta_t: Tensor,
b_t: Tensor) -> Tensor:
objective_bt = lambda z_: objective(z_, b_t)
# Expected value of the loss
exact_obective = lambda _theta: utils.expect_obj(topk, _theta, objective_bt)
theta_t_param = nn.Parameter(theta_t, requires_grad=True)
loss = exact_obective(theta_t_param)
loss.backward()
return theta_t_param.grad
def imle_gradient_fun(topk: distributions.TopK,
theta_t: Tensor,
b_t: Tensor,
target_distribution: BaseTargetDistribution,
noise_distribution: BaseNoiseDistribution,
noise_temperature: float,
nb_samples: int,
nb_marginal_samples: int) -> Tensor:
@imle(theta_noise_temperature=noise_temperature, target_noise_temperature=noise_temperature,
target_distribution=target_distribution, noise_distribution=noise_distribution,
nb_samples=nb_samples, nb_marginal_samples=nb_marginal_samples)
def imle_topk_batched(thetas: Tensor) -> Tensor:
return torch.stack([topk.map(thetas[i]) for i in range(thetas.shape[0])])
def imle_topk(theta: Tensor) -> Tensor:
return imle_topk_batched(theta.view(1, -1)).view(-1)
theta_t_param = nn.Parameter(theta_t, requires_grad=True)
z = imle_topk(theta_t_param)
loss = objective(z, b_t)
loss.backward()
return theta_t_param.grad
def aimle_gradient_fun(topk: distributions.TopK,
theta_t: Tensor,
b_t: Tensor,
target_distribution: BaseTargetDistribution,
noise_distribution: BaseNoiseDistribution,
noise_temperature: float,
nb_samples: int,
nb_marginal_samples: int,
is_symmetric: bool,
warmup_steps: int = 0) -> Tensor:
@aimle(theta_noise_temperature=noise_temperature, target_noise_temperature=noise_temperature,
target_distribution=target_distribution, noise_distribution=noise_distribution,
nb_samples=nb_samples, nb_marginal_samples=nb_marginal_samples, symmetric_perturbation=is_symmetric)
def imle_topk_batched(thetas: Tensor) -> Tensor:
return torch.stack([topk.map(thetas[i]) for i in range(thetas.shape[0])])
def imle_topk(theta: Tensor) -> Tensor:
return imle_topk_batched(theta.view(1, -1)).view(-1)
theta_t_param = nn.Parameter(theta_t, requires_grad=True)
for _ in range(warmup_steps):
z = imle_topk(theta_t_param)
loss = objective(z, b_t)
loss.backward()
theta_t_param.grad = None
z = imle_topk(theta_t_param)
loss = objective(z, b_t)
loss.backward()
return theta_t_param.grad
def ste_gradient_fun(topk: distributions.TopK,
theta_t: Tensor,
b_t: Tensor,
noise_distribution: BaseNoiseDistribution,
noise_temperature: float,
nb_samples: int) -> Tensor:
@ste(noise_temperature=noise_temperature, noise_distribution=noise_distribution, nb_samples=nb_samples)
def ste_topk_batched(thetas: Tensor) -> Tensor:
return torch.stack([topk.map(thetas[i]) for i in range(thetas.shape[0])])
def ste_topk(theta: Tensor) -> Tensor:
return ste_topk_batched(theta.view(1, -1)).view(-1)
theta_t_param = nn.Parameter(theta_t, requires_grad=True)
z = ste_topk(theta_t_param)
loss = objective(z, b_t)
loss.backward()
return theta_t_param.grad
def sfe_gradient_fun(topk: distributions.TopK,
theta_t: Tensor,
b_t: Tensor,
nb_samples: Optional[int]) -> Tensor:
rs = np.random.RandomState(0)
objective_bt = lambda z_: objective(z_, b_t)
sfe_full = sfe.sfe(topk.sample_f(rs), objective_bt, topk.grad_log_p(topk.marginals), nb_samples)
theta_t_param = nn.Parameter(theta_t, requires_grad=True)
z = sfe_full(theta_t_param)
loss = objective(z, b_t)
loss.backward()
return theta_t_param.grad
def main(argv):
parser = argparse.ArgumentParser('Gradient Estimation', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-k', action='store', type=int, default=10)
parser.add_argument('-n', action='store', type=int, default=20)
parser.add_argument('-o', action='store', type=str, default='gradient.pdf')
parser.add_argument('--min-lambda', action='store', type=float, default=0.0)
parser.add_argument('--max-lambda', action='store', type=float, default=100.0)
parser.add_argument('--nb-lambdas', action='store', type=int, default=1001)
parser.add_argument('--imle-samples', type=int, nargs='+', default=[])
parser.add_argument('--nb-marginal-samples', action='store', type=int, default=1)
# The following methods are not sensitive to lambdas
parser.add_argument('--aimle-samples', type=int, nargs='+', default=[])
parser.add_argument('--ste-samples', type=int, nargs='+', default=[])
parser.add_argument('--sfe-samples', type=int, nargs='+', default=[])
parser.add_argument('--seeds', '-s', action='store', type=int, default=64)
parser.add_argument('--warmup-steps', action='store', type=int, default=100)
parser.add_argument('--momentum', action='store', type=float, default=0.0)
parser.add_argument('--target', action='store', type=float, default=1.0)
args = parser.parse_args(argv)
device = torch.device('cpu')
if torch.cuda.is_available():
torch.set_default_tensor_type(torch.cuda.FloatTensor)
device = torch.device('cuda')
torch.set_num_threads(16)
n = args.n
k = args.k
ms = args.nb_marginal_samples
topk = distributions.TopK(n, k, device=device)
print(f'Possible states: {topk.states.shape}')
lambdas = np.linspace(args.min_lambda, args.max_lambda, num=args.nb_lambdas)
warmup_steps = args.warmup_steps
# noise_distribution = SumOfGammaNoiseDistribution(k=k, nb_iterations=10)
noise_distribution = GumbelNoiseDistribution()
lmd_mse_dict = {
'Method': [],
'$\\lambda$': [],
'Cosine Similarity': [],
'Seed': []
}
pdist = nn.CosineSimilarity(dim=1, eps=1e-6)
for i in tqdm(range(args.seeds), desc='Seed'):
rng = np.random.RandomState(i)
theta = rng.randn(n)
b_t = torch.abs(torch.from_numpy(rng.randn(n)).float().to(device))
theta_t = torch.tensor(theta, dtype=torch.float, requires_grad=False, device=device)
true_gradient = true_gradient_fun(topk, theta_t, b_t)
for lmd in tqdm(lambdas, desc='Lambda', leave=False):
target_distribution = TargetDistribution(alpha=1.0, beta=lmd, do_gradient_scaling=True)
for s in args.imle_samples:
imle_gradient = imle_gradient_fun(topk, theta_t, b_t,
target_distribution=target_distribution,
noise_distribution=noise_distribution,
noise_temperature=1.0,
nb_samples=s, nb_marginal_samples=ms)
dist = pdist(true_gradient, imle_gradient)
lmd_mse_dict['Method'] += [f'IMLE ({s}, $\\mu = {ms}$)']
lmd_mse_dict['$\\lambda$'] += [lmd.item()]
lmd_mse_dict['Cosine Similarity'] += [dist.item()]
lmd_mse_dict['Seed'] += [i]
imle_sym_gradient = aimle_gradient_fun(topk, theta_t, b_t,
target_distribution=target_distribution,
noise_distribution=noise_distribution,
noise_temperature=1.0,
nb_samples=s,
nb_marginal_samples=ms,
is_symmetric=True)
dist = pdist(true_gradient, imle_sym_gradient)
lmd_mse_dict['Method'] += [f'IMLE Sym ({s}, $\\mu = {ms}$)']
lmd_mse_dict['$\\lambda$'] += [lmd.item()]
lmd_mse_dict['Cosine Similarity'] += [dist.item()]
lmd_mse_dict['Seed'] += [i]
for s in args.aimle_samples:
adaptive_target_distribution = AdaptiveTargetDistribution(initial_beta=0.0,
beta_update_momentum=args.momentum,
beta_update_step=1e-3,
target_norm=args.target)
aimle_gradient = aimle_gradient_fun(topk, theta_t, b_t,
target_distribution=adaptive_target_distribution,
noise_distribution=noise_distribution,
noise_temperature=1.0,
nb_samples=s,
nb_marginal_samples=ms,
is_symmetric=False,
warmup_steps=warmup_steps)
dist = pdist(true_gradient, aimle_gradient)
for lmd in lambdas:
lmd_mse_dict['Method'] += [f'AIMLE ({s}, $\\mu = {ms}$)']
lmd_mse_dict['$\\lambda$'] += [lmd]
lmd_mse_dict['Cosine Similarity'] += [dist.item()]
lmd_mse_dict['Seed'] += [i]
adaptive_target_distribution = AdaptiveTargetDistribution(initial_beta=0.0,
beta_update_momentum=args.momentum,
beta_update_step=1e-3,
target_norm=args.target)
aimle_gradient = aimle_gradient_fun(topk, theta_t, b_t,
target_distribution=adaptive_target_distribution,
noise_distribution=noise_distribution,
noise_temperature=1.0,
nb_samples=s,
nb_marginal_samples=ms,
is_symmetric=True,
warmup_steps=warmup_steps)
dist = pdist(true_gradient, aimle_gradient)
for lmd in lambdas:
lmd_mse_dict['Method'] += [f'AIMLE Sym ({s}, $\\mu = {ms}$)']
lmd_mse_dict['$\\lambda$'] += [lmd.item()]
lmd_mse_dict['Cosine Similarity'] += [dist.item()]
lmd_mse_dict['Seed'] += [i]
for s in args.ste_samples:
ste_gradient = ste_gradient_fun(topk, theta_t, b_t,
noise_distribution=noise_distribution,
noise_temperature=1.0, nb_samples=s)
dist = pdist(true_gradient, ste_gradient)
for lmd in lambdas:
lmd_mse_dict['Method'] += [f'STE ({s})']
lmd_mse_dict['$\\lambda$'] += [lmd.item()]
lmd_mse_dict['Cosine Similarity'] += [dist.item()]
lmd_mse_dict['Seed'] += [i]
for s in args.sfe_samples:
sfe_gradient = sfe_gradient_fun(topk, theta_t, b_t, s)
dist = pdist(true_gradient, sfe_gradient)
for lmd in lambdas:
lmd_mse_dict['Method'] += [f'SFE ({s})']
lmd_mse_dict['$\\lambda$'] += [lmd.item()]
lmd_mse_dict['Cosine Similarity'] += [dist.item()]
lmd_mse_dict['Seed'] += [i]
df = pd.DataFrame.from_dict(lmd_mse_dict)
all_methods_set = {m for m in lmd_mse_dict['Method']}
# filter_method_lst = ['SFE (1)', 'SFE (10)']
# filter_method_lst = [m for m in all_methods_set if 'IMLE' in m]
filter_method_lst = []
df = df[~df['Method'].isin(filter_method_lst)]
sns.lineplot(x='$\\lambda$',
y="Cosine Similarity",
hue="Method",
data=df)
# plt.xlim(lambdas[0], lambdas[-1])
# plt.ylim(0.0, 1.0)
# plt.show()
plt.savefig(args.o, bbox_inches='tight')
print('Done!')
if __name__ == '__main__':
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
main(sys.argv[1:])
| 14,339 | 40.325648 | 115 | py |
torch-adaptive-imle | torch-adaptive-imle-main/cli/vae-cli.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import sys
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn, optim, Tensor
from imle.imle import imle
from imle.aimle import aimle
from imle.ste import ste
from imle.target import TargetDistribution, AdaptiveTargetDistribution
from imle.noise import BaseNoiseDistribution, SumOfGammaNoiseDistribution, GumbelNoiseDistribution
from imle.solvers import mathias_select_k
from aaai23.torch.utils import set_seed
from aaai23.torch.dvae.modules import DiscreteVAE
from torch.utils.data import DataLoader
from torchvision import datasets, transforms
import argparse
import socket
import wandb
from typing import Tuple, Callable, Optional
import logging
logger = logging.getLogger(os.path.basename(sys.argv[0]))
class DifferentiableSelectKModel(nn.Module):
def __init__(self,
diff_fun: Callable[[Tensor], Tensor],
fun: Callable[[Tensor], Tensor]):
super().__init__()
self.diff_fun = diff_fun
self.fun = fun
def forward(self, logits: Tensor) -> Tensor:
return self.diff_fun(logits) if self.training else self.fun(logits)
def gumbel_loss(logits_2d: Tensor,
rec_2d: Tensor,
x_2d: Tensor,
m: int,
n: int,
reduction: str = 'mean') -> Tuple[Tensor, int]:
batch_size = logits_2d.shape[0]
input_size = x_2d.shape[1]
if reduction in {'mean'}:
reduction_fun = lambda x, dim: torch.mean(x, dim=dim)
elif reduction in {'sum'}:
reduction_fun = lambda x, dim: torch.sum(x, dim=dim)
elif reduction in {'none'}:
reduction_fun = lambda x, dim: x
else:
assert False, f'Unknown reduction function: {reduction}'
# BCE Loss
bce_loss_fun = torch.nn.BCEWithLogitsLoss(reduction='none')
# x_2d is [B, H * W], let's make it [B * S, H * W] to match rec_2d
nb_samples = rec_2d.shape[0] // x_2d.shape[0]
x_2d_ = x_2d.view(batch_size, 1, input_size)
x_2d_ = x_2d_.repeat(1, nb_samples, 1)
x_2d_ = x_2d_.view(batch_size * nb_samples, input_size)
# Per-pixel BCE Loss -- [B * S, M * N]
bce_loss = bce_loss_fun(rec_2d, x_2d_)
# Sum over pixels -- [B * S]
bce_loss = bce_loss.sum(dim=1)
# XXX IN THE FOLLOWING I THINK WE SHOULD SUM OVER "S" AND REDUNCTION_FUN OVER "B"
# This fixes the problem of having to sum over the samples
if nb_samples > 1:
bce_loss_2d = bce_loss.view(-1, nb_samples)
bce_loss = bce_loss_2d.sum(dim=1)
# Average over batch -- []
bce_loss = reduction_fun(bce_loss, 0)
# KL Loss -- [B, M, N]
logits_3d = logits_2d.view(batch_size, m, n)
# q_y(b, m), distribution over binary values for N -- [B, M, N]
q_y = torch.softmax(logits_3d, dim=-1)
# log q_y: log(q_y(b, m)) -- [B, M, N]
log_q_y = torch.log(q_y + 1e-20)
# q_y * (log q_y - log(1/n)) = KL(q_y, 1/n) -- [B, M, N]
kl_3d = q_y * (log_q_y - np.log(1.0 / n)) # This is equivalent to q_y * torch.log((q_y + 1e-20) * n)
# Sum of the KL divergence terms over pixels, i.e. M and N -- [B]
kl_1d = kl_3d.sum(dim=(1, 2))
# Average of the KL terms over the batch -- []
# kl = kl_1d.mean(dim=0)
kl = reduction_fun(kl_1d, 0)
loss = bce_loss + kl
return loss, nb_samples
def main(argv):
parser = argparse.ArgumentParser('PyTorch I-MLE/DVAE', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--batch-size', '-b', action='store', type=int, default=100, help='Batch Size')
parser.add_argument('--input-dim', action='store', type=int, default=28 * 28)
parser.add_argument('--epochs', '-e', action='store', type=int, default=100)
parser.add_argument('--select-k', '-K', action='store', type=int, default=10, help='Select K')
parser.add_argument('--code-m', action='store', type=int, default=20)
parser.add_argument('--code-n', action='store', type=int, default=20)
parser.add_argument("--method", "-M", type=str, choices=['gumbel-softmax', 'imle', 'aimle', 'ste'], default='imle')
# Gumbel SoftMax
parser.add_argument('--anneal-rate', action='store', type=float, default=0.00003)
parser.add_argument('--init-temperature', action='store', type=float, default=1.0)
parser.add_argument('--min-temperature', action='store', type=float, default=0.5)
parser.add_argument('--hard', action='store_true', default=False)
# AIMLE
parser.add_argument('--aimle-symmetric', action='store_true', default=False)
parser.add_argument('--aimle-target', type=str, choices=['standard', 'adaptive'], default='standard')
parser.add_argument('--aimle-beta-update-step', action='store', type=float, default=0.0001)
parser.add_argument('--aimle-beta-update-momentum', action='store', type=float, default=0.0)
parser.add_argument('--aimle-target-norm', action='store', type=float, default=1.0)
# IMLE
parser.add_argument('--imle-noise', type=str, choices=['none', 'sog', 'gumbel'], default='sog')
parser.add_argument('--imle-samples', action='store', type=int, default=1)
parser.add_argument('--imle-temperature', action='store', type=float, default=10.0)
parser.add_argument('--imle-lambda', action='store', type=float, default=10.0)
# STE
parser.add_argument('--ste-noise', type=str, choices=['none', 'sog', 'gumbel'], default='sog')
parser.add_argument('--ste-temperature', action='store', type=float, default=0.0)
parser.add_argument('--gradient-scaling', action='store_true', default=False)
parser.add_argument('--seed', action='store', type=int, default=0)
args = parser.parse_args(argv)
batch_size = args.batch_size
input_dim = args.input_dim
code_m = args.code_m
code_n = args.code_n
nb_epochs = args.epochs
anneal_rate = args.anneal_rate
init_temperature = args.init_temperature
min_temperature = args.min_temperature
hard = args.hard
set_seed(args.seed)
hostname = socket.gethostname()
logger.info(f'Hostname: {hostname}')
wandb.init(project="aimle-dvae", name=f'{args.method}')
wandb.config.update(args)
wandb.config.update({'hostname': hostname, 'seed': args.seed})
device = torch.device('cpu')
if torch.cuda.is_available():
device = torch.device('cuda')
mnist_train_ds = datasets.MNIST('./mnist-data',
train=True,
download=True,
transform=transforms.Compose([
transforms.ToTensor()
]))
train_loader = DataLoader(mnist_train_ds, batch_size=batch_size, shuffle=True)
mnist_test_ds = datasets.MNIST('./mnist-data',
train=False,
download=True,
transform=transforms.Compose([
transforms.ToTensor()
]))
test_loader = DataLoader(mnist_test_ds, batch_size=batch_size, shuffle=True)
model = DiscreteVAE(input_dim=input_dim, n=code_n, m=code_m).to(device)
optimizer = optim.Adam(model.parameters(), lr=0.001)
tau = init_temperature
if args.aimle_target in {'standard'}:
target_distribution = TargetDistribution(alpha=1.0,
beta=args.imle_lambda,
do_gradient_scaling=args.gradient_scaling)
elif args.aimle_target in {'adaptive'}:
target_distribution = AdaptiveTargetDistribution(initial_alpha=1.0,
initial_beta=args.imle_lambda,
beta_update_step=args.aimle_beta_update_step,
beta_update_momentum=args.aimle_beta_update_momentum,
target_norm=args.aimle_target_norm)
else:
assert False, f'Do not know how to handle {args.aimle_target} as target distribution'
def name_to_distribution(distribution_name: str) -> Optional[BaseNoiseDistribution]:
if distribution_name in {'none'}:
noise_distribution = None
elif distribution_name in {'sog'}:
noise_distribution = SumOfGammaNoiseDistribution(k=args.select_k, nb_iterations=10, device=device)
elif distribution_name in {'gumbel'}:
noise_distribution = GumbelNoiseDistribution(device=device)
else:
assert False, f'Noise model not supported: {distribution_name}'
return noise_distribution
batch_idx = 0
for epoch in range(1, nb_epochs + 1):
epoch_loss_values = []
# this is only needed for the standard Gumbel softmax trick
### tau = np.maximum(tau * np.exp(- anneal_rate * epoch), min_temperature)
blackbox_function = lambda logits: mathias_select_k(logits, k=args.select_k)
if args.method in {'gumbel-softmax'}:
gumbel_softmax = lambda logits_m_2d: F.gumbel_softmax(logits_m_2d, tau=tau, hard=hard)
code_generator = DifferentiableSelectKModel(gumbel_softmax, blackbox_function)
elif args.method in {'imle'}:
noise_distribution = name_to_distribution(args.imle_noise)
@imle(target_distribution=target_distribution,
noise_distribution=noise_distribution,
nb_samples=args.imle_samples,
theta_noise_temperature=args.imle_temperature,
target_noise_temperature=args.imle_temperature)
def imle_select_k(logits: Tensor) -> Tensor:
return mathias_select_k(logits, k=args.select_k)
code_generator = DifferentiableSelectKModel(imle_select_k, blackbox_function)
elif args.method in {'aimle'}:
noise_distribution = name_to_distribution(args.imle_noise)
@aimle(target_distribution=target_distribution,
noise_distribution=noise_distribution,
nb_samples=args.imle_samples,
theta_noise_temperature=args.imle_temperature,
target_noise_temperature=args.imle_temperature,
symmetric_perturbation=args.aimle_symmetric)
def aimle_select_k(logits: Tensor) -> Tensor:
return mathias_select_k(logits, k=args.select_k)
code_generator = DifferentiableSelectKModel(aimle_select_k, blackbox_function)
elif args.method in {'ste'}:
noise_distribution = name_to_distribution(args.ste_noise)
@ste(noise_distribution=noise_distribution,
noise_temperature=args.ste_temperature,
nb_samples=args.imle_samples)
def ste_select_k(logits: Tensor) -> Tensor:
return mathias_select_k(logits, k=args.select_k)
code_generator = DifferentiableSelectKModel(ste_select_k, blackbox_function)
else:
assert False, f'Unknown method: {args.method}'
for X, _ in train_loader:
batch_idx += 1
if batch_idx % 1000 == 0:
tau = np.maximum(init_temperature * np.exp(- anneal_rate * batch_idx), min_temperature)
if args.method in {'gumbel-softmax'}:
gumbel_softmax = lambda logits_m_2d: F.gumbel_softmax(logits_m_2d, tau=tau, hard=hard)
code_generator = DifferentiableSelectKModel(gumbel_softmax, blackbox_function)
model.train()
X = X.to(device)
# [B, 1, H, W]
batch_shape = X.shape
batch_size_ = batch_shape[0]
assert batch_shape[1] == 1
assert batch_shape[2] == 28
assert batch_shape[3] == 28
# [B, H * W]
x_2d = X.view(batch_size_, -1)
flat_input_size = x_2d.shape[1]
assert x_2d.shape[0] == batch_size_
assert x_2d.shape[1] == flat_input_size == 28 * 28
# [B, M * N], [B * S, H * W]
logits_2d, rec_2d = model(x_2d, code_generator=code_generator)
nb_samples = rec_2d.shape[0] // batch_size_
assert logits_2d.shape[0] == batch_size_
assert logits_2d.shape[1] == code_m * code_n
assert rec_2d.shape[0] == batch_size_ * nb_samples
assert rec_2d.shape[1] == flat_input_size
loss, _ = gumbel_loss(logits_2d=logits_2d, rec_2d=rec_2d, x_2d=x_2d, m=code_m, n=code_n, reduction='mean')
loss_value = loss.item()
epoch_loss_values += [loss_value]
loss.backward()
optimizer.step()
optimizer.zero_grad()
loss_mean, loss_std = np.mean(epoch_loss_values), np.std(epoch_loss_values)
beta_value_str = 'None' if target_distribution is None else f'{target_distribution.beta:.5f}'
logger.info(f'Epoch {epoch}/{nb_epochs}\t'
f'Training Loss: {loss_mean:.4f} ± {loss_std:.4f}\t'
f'Temperature: {tau:.5f}\tBeta: {beta_value_str}')
if args.method in {'gumbel-softmax'}:
gumbel_softmax = lambda logits_m_2d: F.gumbel_softmax(logits_m_2d, tau=tau, hard=True)
code_generator = DifferentiableSelectKModel(gumbel_softmax, blackbox_function)
test_loss = 0.0
nb_instances = 0.0
with torch.inference_mode():
for X, _ in test_loader:
model.train()
X = X.to(device)
# [B, 1, H, W]
batch_shape = X.shape
batch_size_ = batch_shape[0]
# [B, H * W]
x_2d = X.view(batch_size_, -1)
# [B, M * N], [B, H * W]
logits_2d, rec_2d = model(x_2d, code_generator=code_generator)
loss, nb_samples = gumbel_loss(logits_2d=logits_2d,
rec_2d=rec_2d,
x_2d=x_2d,
m=code_m,
n=code_n,
reduction='sum')
test_loss += loss.item() / nb_samples
nb_instances += batch_size_
logger.info(f'Average Test Loss: {test_loss / nb_instances:.5f}')
wandb.log({'loss': loss_mean, 'tau': tau, 'test_loss': test_loss / nb_instances}, step=epoch)
logger.info(f'Experiment completed.')
if __name__ == '__main__':
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
main(sys.argv[1:])
| 14,808 | 38.80914 | 119 | py |
torch-adaptive-imle | torch-adaptive-imle-main/aaai23/torch/modules.py | # -*- coding: utf-8 -*-
import torch
from torch import nn, Tensor
from torch.distributions.gamma import Gamma
from torch.distributions import Uniform
import math
from typing import Optional, Tuple, Callable
import logging
logger = logging.getLogger(__name__)
def init(layer: nn.Module):
if isinstance(layer, nn.Conv1d) or isinstance(layer, nn.Linear):
torch.nn.init.xavier_uniform_(layer.weight, gain=nn.init.calculate_gain('relu'))
torch.nn.init.zeros_(layer.bias)
else:
assert f'Do not know how to deal with {type(layer)}'
class GumbelSelector(torch.nn.Module):
def __init__(self,
embedding_weights: Tensor,
kernel_size: int):
super().__init__()
self.nb_words = embedding_weights.shape[0]
self.embedding_dim = embedding_weights.shape[1]
self.embeddings = nn.Embedding.from_pretrained(embedding_weights, freeze=True)
self.first_layer = nn.Conv1d(in_channels=self.embedding_dim, out_channels=100,
kernel_size=(kernel_size,), padding='same', stride=(1,))
init(self.first_layer)
self.global_layer = nn.Linear(in_features=100, out_features=100, bias=True)
init(self.global_layer)
self.local_layer_1 = nn.Conv1d(in_channels=100, out_channels=100,
kernel_size=(kernel_size,), padding='same', stride=(1,))
init(self.local_layer_1)
self.local_layer_2 = nn.Conv1d(in_channels=100, out_channels=100,
kernel_size=(kernel_size,), padding='same', stride=(1,))
init(self.local_layer_2)
self.final_layer_1 = nn.Conv1d(in_channels=200, out_channels=100,
kernel_size=(1,), padding='same', stride=(1,))
init(self.final_layer_1)
self.final_layer_2 = nn.Conv1d(in_channels=100, out_channels=1,
kernel_size=(1,), padding='same', stride=(1,))
init(self.final_layer_2)
self.activation = nn.ReLU()
self.dropout = nn.Dropout(p=0.2)
def forward(self,
x: Tensor) -> Tensor:
# [B, T] -> [B, T, E]
x_emb = self.embeddings(x)
batch_size = x_emb.shape[0]
seq_len = x_emb.shape[1]
emb_size = x_emb.shape[2]
# [B, E, T]
x_emb = torch.transpose(x_emb, 1, 2) # x_emb
assert x_emb.shape == torch.Size([batch_size, emb_size, seq_len])
# [B, 100, T]
first_rep = self.first_layer(x_emb)
first_rep = self.activation(first_rep) # A
hidden_size = first_rep.shape[1] # 100
assert first_rep.shape == torch.Size([batch_size, hidden_size, seq_len])
# [B, 100]
global_info, _ = torch.max(input=first_rep, dim=2) # B
global_info = self.global_layer(global_info)
global_info = self.activation(global_info) # C
# print('[B, 100]', global_info.shape)
assert global_info.shape == torch.Size([batch_size, hidden_size])
# [B, 100, 350]
local_info = self.local_layer_1(first_rep)
local_info = self.activation(local_info) # B'
local_info = self.local_layer_2(local_info)
local_info = self.activation(local_info) # C'
# print('[B, 100, 350]', local_info.shape)
assert local_info.shape == torch.Size([batch_size, hidden_size, seq_len])
assert global_info.shape == torch.Size([batch_size, hidden_size])
global_info_3d = global_info.view(batch_size, hidden_size, 1).repeat(1, 1, seq_len)
assert global_info_3d.shape == torch.Size([batch_size, hidden_size, seq_len])
# [B, 200, T]
final_rep = torch.cat((global_info_3d, local_info), dim=1) # D
assert final_rep.shape == torch.Size([batch_size, hidden_size * 2, seq_len])
final_rep = self.dropout(final_rep)
# [B, 100, T]
final_rep = self.final_layer_1(final_rep)
final_rep = self.activation(final_rep) # E
assert final_rep.shape == torch.Size([batch_size, hidden_size, seq_len])
# [B, 1, T]
final_rep = self.final_layer_2(final_rep) # F
assert final_rep.shape == torch.Size([batch_size, 1, seq_len])
return final_rep
class IMLETopK(torch.autograd.Function):
k: int = 10
tau: float = 10.0
lambda_: float = 1000.0
@staticmethod
def sample_gumbel_k(shape: torch.Size,
k: int,
tau: float,
device: torch.device) -> Tensor:
sog = 0.0
for t in [i + 1.0 for i in range(0, 10)]:
concentration = torch.tensor(1.0 / k, dtype=torch.float, device=device)
rate = torch.tensor(t / k, dtype=torch.float, device=device)
gamma = Gamma(concentration=concentration, rate=rate)
sample = gamma.sample(sample_shape=shape).to(device)
sog = sog + sample
sog = sog - math.log(10.0)
sog = tau * (sog / k)
return sog
@staticmethod
def sample(logits: Tensor,
k: int,
tau: float,
samples: Optional[Tensor] = None) -> Tuple[Tensor, Tensor]:
if samples is None:
samples = IMLETopK.sample_gumbel_k(logits.shape, k, tau, device=logits.device)
samples = samples.to(logits.device)
gumbel_softmax_sample = logits + samples
scores, _ = torch.topk(gumbel_softmax_sample, k, sorted=True)
thr_2d = scores[:, -1].view(-1, 1)
z = (gumbel_softmax_sample >= thr_2d).float()
return z, samples
@staticmethod
def forward(ctx,
logits: Tensor):
z, sample = IMLETopK.sample(logits, IMLETopK.k, IMLETopK.tau)
ctx.save_for_backward(logits, z, sample)
return z
@staticmethod
def backward(ctx,
dy: Tensor):
logits, z, sample = ctx.saved_tensors
target_logits = logits - (IMLETopK.lambda_ * dy)
map_dy, _ = IMLETopK.sample(target_logits, IMLETopK.k, IMLETopK.tau, sample)
grad = z - map_dy
return grad
class PredictionModel(torch.nn.Module):
def __init__(self,
embedding_weights: Tensor,
hidden_dims: int,
select_k: int):
super().__init__()
self.nb_words = embedding_weights.shape[0]
self.embedding_dim = embedding_weights.shape[1]
self.hidden_dims = hidden_dims
self.select_k = float(select_k)
self.embeddings = nn.Embedding.from_pretrained(embedding_weights, freeze=True)
self.layer_1 = nn.Linear(in_features=self.embedding_dim, out_features=self.hidden_dims, bias=True)
init(self.layer_1)
self.layer_2 = nn.Linear(in_features=self.hidden_dims, out_features=1, bias=True)
init(self.layer_2)
self.activation = nn.ReLU()
self.output_activation = nn.Sigmoid()
def forward(self,
x: Tensor,
mask: Tensor) -> Tensor:
# [B, T] -> [B, T, E]
x_emb = self.embeddings(x)
# [B, S, E]
res = x_emb * mask
# [B, E]
# res = torch.mean(res, dim=1)
res = torch.sum(res, dim=1) / self.select_k
# [B, H]
res = self.layer_1(res)
res = self.activation(res)
# [B, 1]
res = self.layer_2(res)
res = self.output_activation(res)
return res
class Model(torch.nn.Module):
def __init__(self,
embedding_weights: Tensor,
hidden_dims: int,
kernel_size: int,
select_k: int,
differentiable_select_k: Optional[Callable[[Tensor], Tensor]] = None):
super().__init__()
self.gumbel_selector = GumbelSelector(embedding_weights=embedding_weights, kernel_size=kernel_size)
self.prediction_model = PredictionModel(embedding_weights=embedding_weights, hidden_dims=hidden_dims, select_k=select_k)
self.differentiable_select_k = differentiable_select_k
def z(self, x: Tensor) -> Tensor:
# [B, 1, T]
token_logits = self.gumbel_selector(x)
token_logits = token_logits.to(x.device)
# [B, T, 1]
token_logits = token_logits.transpose(1, 2)
batch_size_ = token_logits.shape[0]
seq_len_ = token_logits.shape[1]
assert token_logits.shape[2] == 1
token_logits = token_logits.view(batch_size_, seq_len_)
# [B, T]
if self.differentiable_select_k is None:
assert False, "This should never happen"
token_selections = IMLETopK.apply(token_logits)
else:
token_selections = self.differentiable_select_k(token_logits)
return token_selections
def forward(self,
x: Tensor) -> Tensor:
# [B, T]
token_selections = self.z(x)
# [B, T, 1]
token_selections = torch.unsqueeze(token_selections, dim=-1)
# Now, note that, while x is [B, T], token_selections is [B * S, T, 1],
# where S is the number of samples drawn by I-MLE during the forward pass.
# We may need to replicate x S times.
batch_size = x.shape[0]
seq_len = x.shape[1]
assert token_selections.shape[1] == seq_len
if token_selections.shape[0] > batch_size:
nb_samples = token_selections.shape[0] // batch_size
x = x.view(batch_size, 1, seq_len)
x = x.repeat(1, nb_samples, 1)
x = x.view(batch_size * nb_samples, seq_len)
p = self.prediction_model(x=x, mask=token_selections)
return p
class ConcreteDistribution(nn.Module):
def __init__(self,
tau: float,
k: int,
eps: float = torch.finfo(torch.float).tiny,
device: Optional[torch.device] = None):
super().__init__()
self.tau = tau
self.k = k
self.eps = eps
u_min = torch.tensor(self.eps, dtype=torch.float, device=device)
u_max = torch.tensor(1.0, dtype=torch.float, device=device)
self.base_distribution = Uniform(u_min, u_max)
def forward(self,
logits: Tensor):
# logits is [B, D]
logits_shape = logits.shape
if self.training:
# Uniform, [B, D]
u_sample = self.base_distribution.sample(sample_shape=logits_shape).to(logits.device)
# Gumbel, [B, D]
g_sample = torch.log(- torch.log(u_sample))
# [B, D]
noisy_logits = (g_sample + logits) / self.tau
# [B, D]
samples = torch.softmax(noisy_logits, dim=1)
res = samples
else:
# [B, k]
scores, _ = torch.topk(logits, self.k, sorted=True)
# [B, 1]
thr_2d = scores[:, -1].view(-1, 1)
# [B, D]
z = (logits >= thr_2d).float()
res = z
return res
class SampleSubset(nn.Module):
def __init__(self,
tau: float,
k: int,
eps: float = torch.finfo(torch.float).tiny,
device: Optional[torch.device] = None):
super().__init__()
self.tau = tau
self.k = k
self.eps = eps
u_min = torch.tensor(self.eps, dtype=torch.float, device=device)
u_max = torch.tensor(1.0, dtype=torch.float, device=device)
self.base_distribution = Uniform(u_min, u_max)
def gumbel_keys(self, w: Tensor) -> Tensor:
u_sample = self.base_distribution.sample(sample_shape=w.shape).to(w.device)
z = torch.log(- torch.log(u_sample))
return w + z
def continuous_topk(self,
w: Tensor,
k: int,
t: float) -> Tensor:
# [B, D]
one_hot_approximation = torch.zeros_like(w, dtype=torch.float, device=w.device)
res_lst = []
for i in range(k):
# [B, D]
k_hot_mask = torch.clip(1.0 - one_hot_approximation, min=self.eps)
w_ = w + torch.log(k_hot_mask)
# [B, D]
one_hot_approximation = torch.softmax(w_ / t, dim=-1)
res_lst += [one_hot_approximation]
# [B, D]
return sum(res_lst)
def forward(self,
logits: Tensor):
# logits is [B, D]
if self.training:
# [B, D]
w = self.gumbel_keys(logits)
# [B, D]
samples = self.continuous_topk(w, self.k, self.tau)
res = samples
else:
# [B, D]
scores, _ = torch.topk(logits, self.k, sorted=True)
thr_2d = scores[:, -1].view(-1, 1)
discrete_logits = (logits >= thr_2d).float()
res = discrete_logits
return res
| 12,902 | 34.254098 | 128 | py |
torch-adaptive-imle | torch-adaptive-imle-main/aaai23/torch/utils.py | # -*- coding: utf-8 -*-
import json
import numpy as np
import random
import torch
from torch import nn, Tensor
from torch.distributions.gamma import Gamma
from torch.distributions import Uniform
import math
from aaai23.utils import pad_sequences
from typing import Optional, Tuple, Callable
import logging
logger = logging.getLogger(__name__)
def set_seed(seed: int, is_deterministic: bool = True):
# set the seeds
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
if is_deterministic is True:
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
return
def subset_precision(model, aspect, id_to_word, word_to_id, select_k, device: torch.device, max_len: int = 350):
data = []
num_annotated_reviews = 0
with open("data/annotations.json") as fin:
for line in fin:
item = json.loads(line)
data.append(item)
num_annotated_reviews = num_annotated_reviews + 1
selected_word_counter = 0
correct_selected_counter = 0
for anotr in range(num_annotated_reviews):
ranges = data[anotr][str(aspect)] # the aspect id
text_list = data[anotr]['x']
review_length = len(text_list)
list_test = []
tokenid_list = [word_to_id.get(token, 0) for token in text_list]
list_test.append(tokenid_list)
# X_test_subset = np.asarray(list_test)
# X_test_subset = sequence.pad_sequences(X_test_subset, maxlen=350)
X_test_subset = pad_sequences(list_test, max_len=max_len)
X_test_subset_t = torch.tensor(X_test_subset, dtype=torch.long, device=device)
with torch.inference_mode():
model.eval()
prediction = model.z(X_test_subset_t)
x_val_selected = prediction[0].cpu().numpy() * X_test_subset
# [L,]
selected_words = np.vectorize(id_to_word.get)(x_val_selected)[0][-review_length:]
selected_nonpadding_word_counter = 0
for i, w in enumerate(selected_words):
if w != '<PAD>': # we are nice to the L2X approach by only considering selected non-pad tokens
selected_nonpadding_word_counter = selected_nonpadding_word_counter + 1
for r in ranges:
rl = list(r)
if i in range(rl[0], rl[1]):
correct_selected_counter = correct_selected_counter + 1
# we make sure that we select at least 10 non-padding words
# if we have more than select_k non-padding words selected, we allow it but count that in
selected_word_counter = selected_word_counter + max(selected_nonpadding_word_counter, select_k)
return correct_selected_counter / selected_word_counter
| 2,895 | 31.909091 | 112 | py |
torch-adaptive-imle | torch-adaptive-imle-main/aaai23/torch/dvae/modules.py | # -*- coding: utf-8 -*-
import torch
import torch.nn.functional as F
from torch import nn, Tensor
from typing import Callable, Tuple
import logging
logger = logging.getLogger(__name__)
def init(layer: nn.Module):
if isinstance(layer, nn.Conv1d) or isinstance(layer, nn.Linear):
torch.nn.init.xavier_uniform_(layer.weight, gain=nn.init.calculate_gain('relu'))
torch.nn.init.zeros_(layer.bias)
else:
assert f'Do not know how to deal with {type(layer)}'
class Encoder(nn.Module):
def __init__(self,
input_dim: int,
code_dim: int):
super().__init__()
self.input_dim = input_dim
self.code_dim = code_dim
self.linear1 = torch.nn.Linear(self.input_dim, 512, bias=True)
init(self.linear1)
self.linear2 = torch.nn.Linear(512, 256, bias=True)
init(self.linear2)
self.linear3 = torch.nn.Linear(256, self.code_dim, bias=True)
init(self.linear3)
def forward(self, x: Tensor) -> Tensor:
x = F.relu(self.linear1(x))
x = F.relu(self.linear2(x))
return self.linear3(x)
class Decoder(nn.Module):
def __init__(self,
input_dim: int,
code_dim: int):
super().__init__()
self.input_dim = input_dim
self.code_dim = code_dim
self.linear1 = torch.nn.Linear(self.code_dim, 256, bias=True)
init(self.linear1)
self.linear2 = torch.nn.Linear(256, 512, bias=True)
init(self.linear2)
self.linear3 = torch.nn.Linear(512, self.input_dim, bias=True)
init(self.linear3)
def forward(self, x: Tensor) -> Tensor:
x = F.relu(self.linear1(x))
x = F.relu(self.linear2(x))
return self.linear3(x)
class DiscreteVAE(nn.Module):
def __init__(self,
input_dim: int,
n: int = 20,
m: int = 20):
super().__init__()
self.input_dim = input_dim
self.n = n
self.m = m
self.code_dim = self.m * self.n
self.encoder = Encoder(input_dim=self.input_dim, code_dim=self.code_dim)
self.decoder = Decoder(input_dim=self.input_dim, code_dim=self.code_dim)
def forward(self,
x: Tensor,
code_generator: Callable[[Tensor], Tensor]) -> Tuple[Tensor, Tensor]:
# x is # [B, H * W]
batch_size = x.shape[0]
# [B, M * N]
logits_2d = self.encoder(x)
assert len(logits_2d.shape) == 2
assert logits_2d.shape[0] == batch_size
assert logits_2d.shape[1] == self.m * self.n
# [B * M, N]
logits_m_2d = logits_2d.view(batch_size * self.m, self.n)
# [B * M * S, N]
code_m_2d = code_generator(logits_m_2d)
# print(f'Expected: {batch_size * self.m} x {self.n}')
# print(f'Got: {code_m_2d.shape}')
# Note: if we are using *IMLE and nb_samples > 1,
# code_generator may return a [B * S * M, N] tensor,
# where S = nb_samples
# [B, M, S, N]
code_4d = code_m_2d.view(batch_size, self.m, -1, self.n)
nb_samples = code_4d.shape[2]
# [B, S, M, N]
code_4d = torch.transpose(code_4d, 1, 2)
# print(code_4d[0, 0])
# [B * S, M * N]
# code_2d = code_m_2d.view(batch_size * nb_samples, self.m * self.n)
code_2d = code_4d.reshape(batch_size * nb_samples, self.m * self.n)
assert len(code_2d.shape) == 2
assert code_2d.shape[0] == batch_size * nb_samples
assert code_2d.shape[1] == self.m * self.n
# [B, H * W]
reconstruction = self.decoder(code_2d)
assert x.shape[0] * nb_samples == reconstruction.shape[0]
assert x.shape[1] == reconstruction.shape[1]
return logits_2d, reconstruction
| 3,838 | 28.530769 | 88 | py |
torch-adaptive-imle | torch-adaptive-imle-main/aaai23/synth/distributions.py | # -*- coding: utf-8 -*-
import itertools
import numpy as np
import torch
class DiscreteExpFamily:
def __init__(self, m) -> None:
"""
Base class for (constrained) exponential family distributions.
When subclassing, one must at least implement the `states` function.
:param m: dimensionality
"""
super().__init__()
self.m = m
self._states = None # lazy initialization of this
@property
def states(self):
""" Returns a matrix of possible states (states in cal{C}), organized by rows"""
raise NotImplementedError()
@property
def n_states(self):
return len(self.states)
def weights(self, theta):
"""Vector of un-normalized weights"""
# [252, 10] @ [10] -> [252]
return self.states @ theta
def log_partition(self, theta):
# scalar
return torch.log(torch.sum(torch.exp(self.weights(theta))))
def pmf(self, theta):
"""Probability mass (vector)"""
# [252] - scalar
# <theta, state> - log sum_{s'} exp <theta, s'>
return torch.exp(self.weights(theta) - self.log_partition(theta))
def marginals(self, theta):
"""Basic implementation for the marginal (aka the expected value of this random variable)"""
return self.pmf(theta) @ self.states
def sample(self, theta, rng=None):
"""
Base implementation of (faithful) sampling
"""
if rng is None:
rng = np.random.RandomState()
_pmft = self.pmf(theta)
# print('YYY', _pmft)
n_states = self.n_states
indx_ch = rng.choice(list(range(n_states)), p=_pmft.detach().cpu().numpy())
res = self.states[indx_ch]
# print('XXX', res)
return res
def sample_f(self, rng):
"""
functional version of sampling (useful for setting the random generator)
"""
return lambda th: self.sample(th, rng)
def map(self, theta):
"""
Basic (inefficient) implementation of map function (returns 1 state)
"""
return self.states[torch.argmax(self.weights(theta))]
def perturb_and_map(self, noise_f):
def _pam(theta, ctx=None):
if hasattr(ctx, 'eps'):
eps = ctx.eps
else:
eps = torch.stack([noise_f() for _ in range(self.m)])
if ctx is not None:
try:
ctx.eps = eps
except AttributeError:
print('Problems with ctx')
theta_prime = theta + eps
return self.map(theta_prime)
return _pam
def grad_log_p(self, mu_f=None):
"""Gradient of the log probability:
\nabla log p(z, theta) = \nabla [<z, theta> - A(theta)] = z - mu(theta).
Use `mu_f` for approximate computation, otherwise uses full marginals.
Returns a function (to be used with the score function estimator)"""
if mu_f is None:
mu_f = self.marginals
def _glp(theta, ctx=None):
# mu_f takes a theta and returns the marginals
mu_theta = mu_f(theta) # here surely you don't want to use the same sample!
assert hasattr(ctx, 'sample'), 'must save the forward value with ctx.sample!'
return ctx.sample - mu_theta
return _glp
class TopK(DiscreteExpFamily):
def __init__(self, m, k, device=None) -> None:
super().__init__(m)
self.k = k
self.device = device
@property
def states(self):
# TODO implement an iterator version of this (with yield, so that it scales in memory)
if self._states is None:
n, k = self.m, self.k
combs = list(itertools.combinations(range(n), k))
n_states = len(combs)
assert n_states == np.math.factorial(n)/(np.math.factorial(k)*np.math.factorial(n-k))
print('Number of possible states:', n_states)
mat_x = np.zeros((len(combs), n))
for i in range(n_states):
mat_x[i, combs[i]] = 1.
self._states = torch.from_numpy(mat_x).float().to(self.device)
return self._states
def map_2d(self, theta_2d):
batch_size = theta_2d.shape[0]
state_2d = torch.zeros((batch_size, self.m), device=self.device)
ind1_2d = torch.argsort(theta_2d, descending=True).to(self.device)[:, :self.k]
dim_0 = torch.arange(batch_size, device=self.device).view(-1, 1).repeat(1, self.k).view(-1)
dim_1 = ind1_2d.reshape(-1)
state_2d[dim_0, dim_1] = 1.
return state_2d
def map(self, theta):
"""Better implementation of map that uses argsort (probably not linear).
Could do better... but this is fine atm"""
# theta is a tensor with shape [N]
state = torch.zeros(self.m, device=self.device)
ind1 = torch.argsort(theta, descending=True)[:self.k]
state[ind1] = 1.
return state
if __name__ == '__main__':
topk = TopK(10, 5)
ttheta = 0.1 * torch.randn(10)
print(f'STATES ({topk.states.shape})', topk.states)
print(topk.sample(ttheta))
print(topk.sample(ttheta))
print(topk.sample(ttheta))
print()
print(topk.map(ttheta))
print(topk.map(ttheta))
print(topk.marginals(ttheta)) | 5,382 | 31.427711 | 100 | py |
torch-adaptive-imle | torch-adaptive-imle-main/aaai23/synth/sfe.py | # -*- coding: utf-8 -*-
"""Score function estimator"""
import torch
from aaai23.synth.utils import _maybe_ctx_call
def sfe(sampler, loss_f, grad_log_p):
# print(f'sfe.sfe({sampler}, {loss_f}, {grad_log_p})')
return lambda theta: _SFE.apply(theta, sampler, loss_f, grad_log_p)
# noinspection PyMethodOverriding
class _SFE(torch.autograd.Function):
@staticmethod
def forward(ctx, _theta, sample_strategy, loss, grad_log_p):
# z samples
# [10]
ctx.sample = _maybe_ctx_call(sample_strategy, ctx, _theta)
# θ
ctx.theta = _theta
# loss(z) = ((z - b_t) ** 2).sum()
ctx.loss = _maybe_ctx_call(loss, ctx, ctx.sample)
ctx.grad_log_p = grad_log_p
return ctx.sample
# Reminder: ∇θ 𝔼[ f(z) ] = 𝔼ₚ₍z;θ₎ [ f(z) ∇θ log p(z;θ) ]
@staticmethod
def backward(ctx, grad_output):
return ctx.loss * _maybe_ctx_call(ctx.grad_log_p, ctx, ctx.theta), None, None, None
| 958 | 28.060606 | 91 | py |
torch-adaptive-imle | torch-adaptive-imle-main/aaai23/synth/utils.py | # -*- coding: utf-8 -*-
import inspect
import torch
import numpy as np
def expect_obj(dist, theta, obj):
"""
Computes \mathbb{E}_{z\sim dist(z, theta)} [ obj(z) ] =
= sum_{z in dist.states} dist(z) * obj(z)
:param dist:
:param theta:
:param obj:
:return:
"""
# [252]
_pmf = dist.pmf(theta)
# [252, 10]
states = dist.states
# [p(state) * loss(state)] -> [252]
_p_values = torch.stack([p_z * obj(z) for p_z, z in zip(_pmf, states)])
# scalar
return torch.sum(_p_values)
def sum_of_gamma_noise(k_gamma, tau=1., rng=None, s=10):
if rng is None:
rng = np.random.RandomState()
return lambda: torch.tensor((tau / k_gamma) * (
np.sum(
[rng.gamma(1.0 / k_gamma, k_gamma / (i + 1.0)) for i in range(s)])
- np.log(s)
)).float()
def gumbel_noise(tau=1., rng=None):
if rng is None:
rng = np.random.RandomState()
return lambda: torch.tensor(rng.gumbel(0, tau)).float()
if __name__ == '__main__':
sog_f = sum_of_gamma_noise(3., 1.)
print(sog_f())
print(sog_f())
ppp = torch.stack([sog_f() for _ in range(3)])
print(ppp)
def _maybe_ctx_call(func, ctx, theta):
args = inspect.getfullargspec(func).args
if 'ctx' in args:
return func(theta, ctx=ctx)
else:
return func(theta)
| 1,384 | 21.33871 | 82 | py |
torch-adaptive-imle | torch-adaptive-imle-main/aaai23/synth/imle.py | # -*- coding: utf-8 -*-
"""Implicit maximum likelihood estimator (I-MLE)"""
import torch
from aaai23.synth.utils import _maybe_ctx_call
def imle_pid(lmd, sampler, use_fw_pass_for_mu_p=True, marginals_approx=None,
normalized=False):
"""
I-MLE ``layer'' with target distribution given by perturbation-based implicit differentiation.
:param lmd: perturbation intensity (scalar)
:param sampler: method to sample from the distribution (e.g. sample, perturb and MAP or MAP)
can also be used to sample multiple times
:param use_fw_pass_for_mu_p: if True (default) use the forward pass to approximate the marginals of p
:param marginals_approx: (optional) function to approximate the marginals, default is (one sample using) `sampler`
:param normalized: if true, the perturbation intensity is proportional to the norm of theta (default=False)
this needs to be thought over a bit more...
:return: a function (`torch.autograd.Function`) with forward and backward fully implemented!
"""
return lambda theta: _IMLE_PID.apply(
theta, lmd, sampler, use_fw_pass_for_mu_p, marginals_approx, normalized
)
# noinspection PyMethodOverriding
class _IMLE_PID(torch.autograd.Function):
@staticmethod
def forward(ctx, theta, lmd, sample_strategy,
use_fw_pass_for_mu_p=True, marginals_approx=None,
normalized=False):
# save stuff for backward pass
ctx.ttheta = theta
ctx.normalized = normalized
ctx.use_fw_pass_for_mu_p = use_fw_pass_for_mu_p
ctx.lmd = lmd
ctx.sample_strategy = sample_strategy
ctx.marginal_approx = marginals_approx if marginals_approx is not None else sample_strategy
# actual forward
ctx.fw = _maybe_ctx_call(sample_strategy, ctx, theta)
return ctx.fw
@staticmethod
def backward(ctx, grad_outputs):
norm_theta = torch.norm(ctx.ttheta) if ctx.normalized else 1.
# perturbation-based q
mu_p = ctx.fw if ctx.use_fw_pass_for_mu_p else _maybe_ctx_call(ctx.marginal_approx, ctx, ctx.ttheta)
theta_prime = ctx.ttheta - ctx.lmd * norm_theta * grad_outputs
mu_q = _maybe_ctx_call(ctx.marginal_approx, ctx, theta_prime)
return mu_p - mu_q, None, None, None, None, None
| 2,351 | 41 | 118 | py |
torch-adaptive-imle | torch-adaptive-imle-main/aaai23/synth/ste.py | # -*- coding: utf-8 -*-
"""Straight through estimator"""
import torch
def ste(sampler):
return lambda theta: _StraightThroughEstimator.apply(theta, sampler)
# noinspection PyMethodOverriding
class _StraightThroughEstimator(torch.autograd.Function):
@staticmethod
def forward(ctx, theta, sampler):
return sampler(theta)
@staticmethod
def backward(ctx, grad_outputs):
return grad_outputs, None
| 436 | 18.863636 | 72 | py |
torch-adaptive-imle | torch-adaptive-imle-main/aaai23/synth/sfe2.py | # -*- coding: utf-8 -*-
"""Score function estimator"""
import torch
from aaai23.synth.utils import _maybe_ctx_call
def sfe(sampler, loss_f, grad_log_p, nb_samples):
# print(f'sfe2.sfe({sampler}, {loss_f}, {grad_log_p})')
return lambda theta: _SFE.apply(theta, sampler, loss_f, grad_log_p, nb_samples)
# noinspection PyMethodOverriding
class _SFE(torch.autograd.Function):
@staticmethod
def forward(ctx, _theta, sample_strategy, loss, grad_log_p, nb_samples):
# z samples
ctx.nb_samples = nb_samples
if ctx.nb_samples is None:
ctx.sample = _maybe_ctx_call(sample_strategy, ctx, _theta)
else:
ctx.sample = [_maybe_ctx_call(sample_strategy, ctx, _theta) for _ in range(nb_samples)]
# θ
ctx.theta = _theta
# loss(z) = ((z - b_t)**2).sum()
if ctx.nb_samples is None:
ctx.loss = _maybe_ctx_call(loss, ctx, ctx.sample)
else:
ctx.loss = [_maybe_ctx_call(loss, ctx, s) for s in ctx.sample]
ctx.grad_log_p = grad_log_p
res = ctx.sample
if ctx.nb_samples is not None:
res = torch.stack(res, dim=0)
return res
# Reminder: ∇θ 𝔼[ f(z) ] = 𝔼ₚ₍z;θ₎ [ f(z) ∇θ log p(z;θ) ]
@staticmethod
def backward(ctx, grad_output):
if ctx.nb_samples is None:
grad = ctx.loss * _maybe_ctx_call(ctx.grad_log_p, ctx, ctx.theta)
else:
grad = 0.0
all_samples = ctx.sample
for i, sample in enumerate(all_samples):
ctx.sample = sample
local_grad = ctx.loss[i] * _maybe_ctx_call(ctx.grad_log_p, ctx, ctx.theta)
grad = grad + local_grad
ctx.sample = all_samples
grad = grad / ctx.nb_samples
return grad, None, None, None, None
| 1,832 | 33.584906 | 99 | py |
torch-adaptive-imle | torch-adaptive-imle-main/aaai23/maprop/utils.py | # -*- coding: utf-8 -*-
import os
import sys
import pickle
import random
import torch
import csv
import ray
import itertools
from collections import defaultdict, deque
import time
from functools import lru_cache
import ast
import collections
import json
from copy import deepcopy
from warnings import warn
import numpy as np
import inspect
import re
import shutil
import tempfile
from time import sleep
from pathlib2 import Path
CLUSTER_PARAM_FILE = 'param_choice.csv'
CLUSTER_METRIC_FILE = 'metrics.csv'
JSON_SETTINGS_FILE = 'settings.json'
JOB_INFO_FILE = 'job_info.csv'
STATUS_PICKLE_FILE = 'status.pickle'
FULL_DF_FILE = 'all_data.csv'
REDUCED_DF_FILE = 'reduced_data.csv'
STD_ENDING = '__std'
RESTART_PARAM_NAME = 'restarts'
JSON_FILE_KEY = 'default_json'
OBJECT_SEPARATOR = '.'
PARAM_TYPES = (bool, str, int, float, tuple, dict)
RESERVED_PARAMS = ('model_dir', 'id', 'iteration', RESTART_PARAM_NAME, 'cluster_job_id')
DISTR_BASE_COLORS = [(0.99, 0.7, 0.18), (0.7, 0.7, 0.9), (0.56, 0.692, 0.195), (0.923, 0.386, 0.209)]
class customdefaultdict(defaultdict):
def __missing__(self, key):
if self.default_factory:
dict.__setitem__(self, key, self.default_factory(key))
return self[key]
else:
defaultdict.__missing__(self, key)
@lru_cache(maxsize=128)
def cached_np_load(path, **kwargs):
return np.load(path, **kwargs)
def efficient_from_numpy(x, device):
if device == 'cpu':
return torch.from_numpy(x).cpu()
else:
return torch.from_numpy(x).contiguous().pin_memory().to(device=device, non_blocking=True)
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name=None, fmt=":f"):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = "{name} ({avg" + self.fmt + "})"
return fmtstr.format(**self.__dict__)
def set_seed(seed):
if seed is not None:
random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
np.random.seed(seed)
def save_pickle(data, path):
with open(path, "wb") as fh:
pickle.dump(data, fh)
def load_pickle(path):
with open(path, "rb") as fh:
return pickle.load(fh)
def concat_2d(arr):
rows, columns, channels, height, width = arr.shape
return np.rollaxis(arr, 2, 0).swapaxes(2, 3).reshape(channels, height * rows, width * columns)
class TrainingIterator(object):
def __init__(self, data_dict):
self.data = data_dict
zipped_data = list(zip(*data_dict.values()))
self.dtype = [(key, "f4", value[0].shape) for key, value in data_dict.items()]
# PyTorch works with 32-bit floats by default
self.array = np.array(zipped_data, dtype=self.dtype)
def get_epoch_iterator(self, batch_size, number_of_epochs, device='cpu', preload=False, shuffle=True):
def iterator():
if preload:
preload_deque = deque(maxlen=2)
for i in range(number_of_epochs):
if shuffle:
np.random.shuffle(self.array)
for j in range(1 + len(self.array) // batch_size):
numpy_batch = self.array[j * batch_size: (j + 1) * batch_size]
torch_batch = {key: efficient_from_numpy(numpy_batch[key], device=device) for key in
numpy_batch.dtype.names}
if numpy_batch.size:
if j == 0 and preload:
preload_deque.appendleft(torch_batch)
continue
if preload:
preload_deque.appendleft(torch_batch)
yield preload_deque.pop()
else:
yield torch_batch
if preload:
while len(preload_deque) > 0:
yield preload_deque.pop()
return iterator()
def grid_to_im_coordinate(grid_x, grid_y, grid_x_max, grid_y_max, im_width, im_height):
x_spacing = im_width / grid_x_max
im_x = x_spacing * (0.5 + grid_x)
y_spacing = im_height / grid_y_max
im_y = y_spacing * (0.5 + grid_y)
return im_x, im_y, x_spacing, y_spacing
def maybe_parallelize(function, arg_list):
if ray.is_initialized():
ray_fn = ray.remote(function)
return ray.get([ray_fn.remote(arg) for arg in arg_list])
else:
return [function(arg) for arg in arg_list]
def optimizer_from_string(optimizer_name):
dct = {"Adam": torch.optim.Adam, "SGD": torch.optim.SGD}
return dct[optimizer_name]
def all_accuracies(true_labels, suggested_labels, true_costs, is_valid_label_fn, num_thresholds, minimize=True):
num_examples = len(true_labels)
valid = 0
meets_threshold = [0] * num_thresholds
for true_label, suggested_label, true_cost in zip(true_labels, suggested_labels, true_costs):
if not is_valid_label_fn(suggested_label):
continue
valid += 1
cost_ratio = np.sum(suggested_label * true_cost) / np.sum(true_label * true_cost)
if not minimize:
cost_ratio = 1.0 / cost_ratio
#print(suggested_label, true_cost)
#print(true_label, true_cost)
assert cost_ratio > 0.99 # cost is not better than optimal...
for i in range(len(meets_threshold)):
if cost_ratio - 1.0 < 10.0 ** (-i - 1):
meets_threshold[i] += 1
threshold_dict = {f"below_{10. ** (1 - i)}_percent_acc": val / num_examples for i, val in
enumerate(meets_threshold)}
threshold_dict['valid_acc'] = valid / num_examples
return threshold_dict
def shorten_string(string, max_len):
if len(string) > max_len - 3:
return '...' + string[-max_len + 3:]
return string
def get_caller_file(depth=2):
_, filename, _, _, _, _ = inspect.stack()[depth]
return filename
def check_valid_name(string):
pat = '[A-Za-z0-9_.-]*$'
if type(string) is not str:
raise TypeError(('Parameter \'{}\' not valid. String expected.'.format(string)))
if string in RESERVED_PARAMS:
raise ValueError('Parameter name {} is reserved'.format(string))
if string.endswith(STD_ENDING):
raise ValueError('Parameter name \'{}\' not valid.'
'Ends with \'{}\' (may cause collisions)'.format(string, STD_ENDING))
if not bool(re.compile(pat).match(string)):
raise ValueError('Parameter name \'{}\' not valid. Only \'[0-9][a-z][A-Z]_-.\' allowed.'.format(string))
if string.endswith('.') or string.startswith('.'):
raise ValueError('Parameter name \'{}\' not valid. \'.\' not allowed at start/end'.format(string))
def rm_dir_full(dir_name):
sleep(0.5)
if os.path.exists(dir_name):
shutil.rmtree(dir_name, ignore_errors=True)
# filesystem is sometimes slow to response
if os.path.exists(dir_name):
sleep(1.0)
shutil.rmtree(dir_name, ignore_errors=True)
if os.path.exists(dir_name):
warn(f'Removing of dir {dir_name} failed')
def create_dir(dir_name):
if not os.path.exists(dir_name):
os.makedirs(dir_name)
def flatten_nested_string_dict(nested_dict, prepend=''):
for key, value in nested_dict.items():
if type(key) is not str:
raise TypeError('Only strings as keys expected')
if isinstance(value, dict):
for sub in flatten_nested_string_dict(value, prepend=prepend + str(key) + OBJECT_SEPARATOR):
yield sub
else:
yield prepend + str(key), value
def save_dict_as_one_line_csv(dct, filename):
with open(filename, 'w') as f:
writer = csv.DictWriter(f, fieldnames=dct.keys())
writer.writeheader()
writer.writerow(dct)
def get_sample_generator(samples, hyperparam_dict, distribution_list, extra_settings=None):
if bool(hyperparam_dict) == bool(distribution_list):
raise TypeError('Exactly one of hyperparam_dict and distribution list must be provided')
if distribution_list and not samples:
raise TypeError('Number of samples not specified')
if distribution_list:
ans = distribution_list_sampler(distribution_list, samples)
elif samples:
assert hyperparam_dict
ans = hyperparam_dict_samples(hyperparam_dict, samples)
else:
ans = hyperparam_dict_product(hyperparam_dict)
if extra_settings is not None:
return itertools.chain(extra_settings, ans)
else:
return ans
def validate_hyperparam_dict(hyperparam_dict):
for name, option_list in hyperparam_dict.items():
check_valid_name(name)
if type(option_list) is not list:
raise TypeError('Entries in hyperparam dict must be type list (not {}: {})'.format(name, type(option_list)))
for item in option_list:
if not any([isinstance(item, allowed_type) for allowed_type in PARAM_TYPES]):
raise TypeError('Settings must from the following types: {}, not {}'.format(PARAM_TYPES, type(item)))
def hyperparam_dict_samples(hyperparam_dict, num_samples):
validate_hyperparam_dict(hyperparam_dict)
nested_items = [(name.split(OBJECT_SEPARATOR), options) for name, options in hyperparam_dict.items()]
for i in range(num_samples):
nested_samples = [(nested_path, random.choice(options)) for nested_path, options in nested_items]
yield nested_to_dict(nested_samples)
def hyperparam_dict_product(hyperparam_dict):
validate_hyperparam_dict(hyperparam_dict)
nested_items = [(name.split(OBJECT_SEPARATOR), options) for name, options in hyperparam_dict.items()]
nested_names, option_lists = zip(*nested_items)
for sample_from_product in itertools.product(*list(option_lists)):
yield nested_to_dict(zip(nested_names, sample_from_product))
def default_to_regular(d):
if isinstance(d, defaultdict):
d = {k: default_to_regular(v) for k, v in d.items()}
return d
def nested_to_dict(nested_items):
nested_dict = lambda: defaultdict(nested_dict)
result = nested_dict()
for nested_key, value in nested_items:
ptr = result
for key in nested_key[:-1]:
ptr = ptr[key]
ptr[nested_key[-1]] = value
return default_to_regular(result)
def distribution_list_sampler(distribution_list, num_samples):
for distr in distribution_list:
distr.prepare_samples(howmany=num_samples)
for i in range(num_samples):
nested_items = [(distr.param_name.split(OBJECT_SEPARATOR), distr.sample()) for distr in distribution_list]
yield nested_to_dict(nested_items)
home = str(Path.home())
def mkdtemp(prefix='cluster_utils', suffix=''):
new_prefix = prefix + ('' if not suffix else '-' + suffix + '-')
return tempfile.mkdtemp(prefix=new_prefix, dir=os.path.join(home, '.cache'))
def temp_directory(prefix='cluster_utils', suffix=''):
new_prefix = prefix + ('' if not suffix else '-' + suffix + '-')
return tempfile.TemporaryDirectory(prefix=new_prefix, dir=os.path.join(home, '.cache'))
class ParamDict(dict):
""" An immutable dict where elements can be accessed with a dot"""
__getattr__ = dict.__getitem__
def __delattr__(self, item):
raise TypeError("Setting object not mutable after settings are fixed!")
def __setattr__(self, key, value):
raise TypeError("Setting object not mutable after settings are fixed!")
def __setitem__(self, key, value):
raise TypeError("Setting object not mutable after settings are fixed!")
def __deepcopy__(self, memo):
""" In order to support deepcopy"""
return ParamDict([(deepcopy(k, memo), deepcopy(v, memo)) for k, v in self.items()])
def __repr__(self):
return json.dumps(self, indent=4, sort_keys=True)
def recursive_objectify(nested_dict):
"Turns a nested_dict into a nested ParamDict"
result = deepcopy(nested_dict)
for k, v in result.items():
if isinstance(v, collections.Mapping):
result[k] = recursive_objectify(v)
return ParamDict(result)
class SafeDict(dict):
""" A dict with prohibiting init from a list of pairs containing duplicates"""
def __ini__(self, *args, **kwargs):
if args and args[0] and not isinstance(args[0], dict):
keys, _ = zip(*args[0])
duplicates = [item for item, count in collections.Counter(keys).items() if count > 1]
if duplicates:
raise TypeError("Keys {} repeated in json parsing".format(duplicates))
super().__init__(*args, **kwargs)
def load_json(file):
""" Safe load of a json file (doubled entries raise exception)"""
with open(file, 'r') as f:
data = json.load(f, object_pairs_hook=SafeDict)
return data
def update_recursive(d, u, defensive=False):
for k, v in u.items():
if defensive and k not in d:
raise KeyError("Updating a non-existing key")
if isinstance(v, collections.Mapping):
d[k] = update_recursive(d.get(k, {}), v)
else:
d[k] = v
return d
def save_settings_to_json(setting_dict, model_dir):
filename = os.path.join(model_dir, JSON_SETTINGS_FILE)
with open(filename, 'w') as file:
file.write(json.dumps(setting_dict, sort_keys=True, indent=4))
def save_metrics_params(metrics, params, save_dir=None):
if save_dir is None:
save_dir = params.model_dir
create_dir(save_dir)
save_settings_to_json(params, save_dir)
param_file = os.path.join(save_dir, CLUSTER_PARAM_FILE)
flattened_params = dict(flatten_nested_string_dict(params))
save_dict_as_one_line_csv(flattened_params, param_file)
time_elapsed = time.time() - update_params_from_cmdline.start_time
if 'time_elapsed' not in metrics.keys():
metrics['time_elapsed'] = time_elapsed
else:
warn('\'time_elapsed\' metric already taken. Automatic time saving failed.')
metric_file = os.path.join(save_dir, CLUSTER_METRIC_FILE)
save_dict_as_one_line_csv(metrics, metric_file)
def is_json_file(cmd_line):
try:
return os.path.isfile(cmd_line)
except Exception as e:
warn('JSON parsing suppressed exception: ', e)
return False
def is_parseable_dict(cmd_line):
try:
res = ast.literal_eval(cmd_line)
return isinstance(res, dict)
except Exception as e:
warn('Dict literal eval suppressed exception: ', e)
return False
def update_params_from_cmdline(cmd_line=None, default_params=None, custom_parser=None, verbose=True):
""" Updates default settings based on command line input.
:param cmd_line: Expecting (same format as) sys.argv
:param default_params: Dictionary of default params
:param custom_parser: callable that returns a dict of params on success
and None on failure (suppress exceptions!)
:param verbose: Boolean to determine if final settings are pretty printed
:return: Immutable nested dict with (deep) dot access. Priority: default_params < default_json < cmd_line
"""
if not cmd_line:
cmd_line = sys.argv
if default_params is None:
default_params = {}
if len(cmd_line) < 2:
cmd_params = {}
elif custom_parser and custom_parser(cmd_line): # Custom parsing, typically for flags
cmd_params = custom_parser(cmd_line)
elif len(cmd_line) == 2 and is_json_file(cmd_line[1]):
cmd_params = load_json(cmd_line[1])
elif len(cmd_line) == 2 and is_parseable_dict(cmd_line[1]):
cmd_params = ast.literal_eval(cmd_line[1])
else:
raise ValueError('Failed to parse command line')
update_recursive(default_params, cmd_params)
if JSON_FILE_KEY in default_params:
json_params = load_json(default_params[JSON_FILE_KEY])
if 'default_json' in json_params:
json_base = load_json(json_params[JSON_FILE_KEY])
else:
json_base = {}
update_recursive(json_base, json_params)
update_recursive(default_params, json_base)
update_recursive(default_params, cmd_params)
final_params = recursive_objectify(default_params)
if verbose:
print(final_params)
update_params_from_cmdline.start_time = time.time()
return final_params
update_params_from_cmdline.start_time = None
| 16,722 | 32.114851 | 120 | py |
torch-adaptive-imle | torch-adaptive-imle-main/aaai23/maprop/perturbations.py | # -*- coding: utf-8 -*-
"""Introduces differentiation via perturbations.
Example of usage:
@perturbed
def sign_or(x, axis=-1):
s = ((torch.sign(x) + 1) / 2.0).type(torch.bool)
result = torch.any(s, dim=-1)
return result.type(torch.float) * 2.0 - 1
Then sign_or is differentiable (unlike what it seems).
It is possible to specify the parameters of the perturbations using:
@perturbed(num_samples=1000, sigma=0.1, noise='gumbel')
...
The decorator can also be used directly as a function, for example:
soft_argsort = perturbed(torch.argsort, num_samples=200, sigma=0.01)
"""
import functools
import torch
from torch import Tensor
from torch.distributions.gumbel import Gumbel
from torch.distributions.normal import Normal
from typing import Optional
_GUMBEL = 'gumbel'
_NORMAL = 'normal'
SUPPORTED_NOISES = (_GUMBEL, _NORMAL)
def sample_noise_with_gradients(noise, shape):
"""Samples a noise tensor according to a distribution with its gradient.
Args:
noise: (str) a type of supported noise distribution.
shape: torch.tensor<int>, the shape of the tensor to sample.
Returns:
A tuple Tensor<float>[shape], Tensor<float>[shape] that corresponds to the
sampled noise and the gradient of log the underlying probability
distribution function. For instance, for a gaussian noise (normal), the
gradient is equal to the noise itself.
Raises:
ValueError in case the requested noise distribution is not supported.
See perturbations.SUPPORTED_NOISES for the list of supported distributions.
"""
if noise not in SUPPORTED_NOISES:
raise ValueError('{} noise is not supported. Use one of [{}]'.format(
noise, SUPPORTED_NOISES))
if noise == _GUMBEL:
sampler = Gumbel(0.0, 1.0)
samples = sampler.sample(shape)
gradients = 1 - torch.exp(-samples)
elif noise == _NORMAL:
sampler = Normal(0.0, 1.0)
samples = sampler.sample(shape)
gradients = samples
return samples, gradients
def perturbed(func=None,
num_samples: int = 1000,
sigma: float = 0.05,
noise: str = _NORMAL,
batched: bool = True,
device: Optional[torch.device] = None):
"""Turns a function into a differentiable one via perturbations.
The input function has to be the solution to a linear program for the trick
to work. For instance the maximum function, the logical operators or the ranks
can be expressed as solutions to some linear programs on some polytopes.
If this condition is violated though, the result would not hold and there is
no guarantee on the validity of the obtained gradients.
This function can be used directly or as a decorator.
Args:
func: the function to be turned into a perturbed and differentiable one.
Four I/O signatures for func are currently supported:
If batched is True,
(1) input [B, D1, ..., Dk], output [B, D1, ..., Dk], k >= 1
(2) input [B, D1, ..., Dk], output [B], k >= 1
If batched is False,
(3) input [D1, ..., Dk], output [D1, ..., Dk], k >= 1
(4) input [D1, ..., Dk], output [], k >= 1.
num_samples: the number of samples to use for the expectation computation.
sigma: the scale of the perturbation.
noise: a string representing the noise distribution to be used to sample
perturbations.
batched: whether inputs to the perturbed function will have a leading batch
dimension (True) or consist of a single example (False). Defaults to True.
device: The device to create tensors on (cpu/gpu). If None given, it will
default to gpu:0 if available, cpu otherwise.
Returns:
a function has the same signature as func but that can be back propagated.
"""
# If device not supplied, auto detect
if device is None:
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# This is a trick to have the decorator work both with and without arguments.
if func is None:
return functools.partial(
perturbed, num_samples=num_samples, sigma=sigma, noise=noise,
batched=batched, device=device)
@functools.wraps(func)
def wrapper(input_tensor, *args):
class PerturbedFunc(torch.autograd.Function):
@staticmethod
def forward(ctx,
input_tensor: Tensor,
*args):
original_input_shape = input_tensor.shape
if batched:
if not input_tensor.dim() >= 2:
raise ValueError('Batched inputs must have at least rank two')
else: # Adds dummy batch dimension internally.
input_tensor = input_tensor.unsqueeze(0)
input_shape = input_tensor.shape # [B, D1, ... Dk], k >= 1
perturbed_input_shape = [num_samples] + list(input_shape)
noises = sample_noise_with_gradients(noise, perturbed_input_shape)
additive_noise, noise_gradient = tuple(
[noise.type(input_tensor.dtype) for noise in noises])
additive_noise = additive_noise.to(device)
noise_gradient = noise_gradient.to(device)
perturbed_input = input_tensor.unsqueeze(0) + sigma * additive_noise
# [N, B, D1, ..., Dk] -> [NB, D1, ..., Dk].
flat_batch_dim_shape = [-1] + list(input_shape)[1:]
perturbed_input = torch.reshape(perturbed_input, flat_batch_dim_shape)
# Calls user-defined function in a perturbation agnostic manner.
perturbed_output = func(perturbed_input, *args)
# [NB, D1, ..., Dk] -> [N, B, D1, ..., Dk].
perturbed_input = torch.reshape(perturbed_input, perturbed_input_shape)
# Either
# (Default case): [NB, D1, ..., Dk] -> [N, B, D1, ..., Dk]
# or
# (Full-reduce case) [NB] -> [N, B]
perturbed_output_shape = [num_samples, -1] + list(perturbed_output.shape)[1:]
perturbed_output = torch.reshape(perturbed_output, perturbed_output_shape)
forward_output = torch.mean(perturbed_output, dim=0)
if not batched: # Removes dummy batch dimension.
forward_output = forward_output[0]
# Save context for backward pass
ctx.save_for_backward(perturbed_input, perturbed_output, noise_gradient)
ctx.original_input_shape = original_input_shape
return forward_output
@staticmethod
def backward(ctx,
dy: Tensor):
# Pull saved tensors
original_input_shape = ctx.original_input_shape
perturbed_input, perturbed_output, noise_gradient = ctx.saved_tensors
output, noise_grad = perturbed_output, noise_gradient
# Adds dummy feature/channel dimension internally.
if perturbed_input.dim() > output.dim():
dy = dy.unsqueeze(-1)
output = output.unsqueeze(-1)
# Adds dummy batch dimension internally.
if not batched:
dy = dy.unsqueeze(0)
# Flattens [D1, ..., Dk] to a single feat dim [D].
flatten = lambda t: torch.reshape(t, (list(t.shape)[0], list(t.shape)[1], -1))
dy = torch.reshape(dy, (list(dy.shape)[0], -1)) # (B, D)
output = flatten(output) # (N, B, D)
noise_grad = flatten(noise_grad) # (N, B, D)
g = torch.einsum('nbd,nb->bd', noise_grad, torch.einsum('nbd,bd->nb', output, dy))
g /= sigma * num_samples
return torch.reshape(g, original_input_shape)
return PerturbedFunc.apply(input_tensor, *args)
return wrapper
| 8,022 | 40.569948 | 98 | py |
torch-adaptive-imle | torch-adaptive-imle-main/aaai23/maprop/fenchel_young.py | # -*- coding: utf-8 -*-
"""Implementation of a Fenchel-Young loss using perturbation techniques."""
import torch
import torch.nn as nn
from torch import Tensor
from aaai23.maprop import perturbations
from typing import Callable, Optional
class PerturbedFunc(torch.autograd.Function):
"""Implementation of a Fenchel Young loss."""
@staticmethod
def forward(ctx,
input_tensor: Tensor,
y_true: Tensor,
perturbed: Callable,
batched: bool,
maximize: bool, *args) -> Tensor:
diff = perturbed(input_tensor, *args) - y_true.type(input_tensor.dtype)
if not maximize:
diff = -diff
# Computes per-example loss for batched inputs.
if batched:
loss = torch.sum(torch.reshape(diff, [list(diff.shape)[0], -1]) ** 2, dim=-1)
else:
# Computes loss for unbatched inputs.
loss = torch.sum(diff ** 2)
ctx.save_for_backward(diff)
ctx.batched = batched
return loss
@staticmethod
def backward(ctx,
dy: Tensor):
diff, = ctx.saved_tensors
batched = ctx.batched
if batched: # dy has shape (batch_size,) in this case.
dy = torch.reshape(dy, [list(dy.shape)[0]] + (diff.dim() - 1) * [1])
return dy * diff, None, None, None, None
class FenchelYoungLoss(nn.Module):
def __init__(self,
func: Callable[[Tensor], Tensor] = None,
num_samples: int = 1000,
sigma: float = 0.01,
noise: str = perturbations._NORMAL,
batched: bool = True,
maximize: bool = True,
device: Optional[torch.device] = None):
"""Initializes the Fenchel-Young loss.
Args:
func: the function whose argmax is to be differentiated by perturbation.
num_samples: (int) the number of perturbed inputs.
sigma: (float) the amount of noise to be considered
noise: (str) the noise distribution to be used to sample perturbations.
batched: whether inputs to the func will have a leading batch dimension
(True) or consist of a single example (False). Defaults to True.
maximize: (bool) whether to maximize or to minimize the input function.
device: The device to create tensors on (cpu/gpu). If None given, it will
default to gpu:0 if available, cpu otherwise.
"""
super().__init__()
self._batched = batched
self._maximize = maximize
self.func = func
self.perturbed = perturbations.perturbed(func=func,
num_samples=num_samples,
sigma=sigma,
noise=noise,
batched=batched,
device=device)
def forward(self, input_tensor, y_true, *args):
return PerturbedFunc.apply(input_tensor, y_true, self.perturbed, self._batched, self._maximize, *args)
| 3,200 | 38.036585 | 110 | py |
torch-adaptive-imle | torch-adaptive-imle-main/aaai23/maprop/models.py | # -*- coding: utf-8 -*-
from math import sqrt
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
def get_model(model_name, out_features, in_channels, arch_params):
preloaded_models = {"ResNet18": torchvision.models.resnet18}
own_models = {"ConvNet": ConvNet, "MLP": MLP, "PureConvNet": PureConvNet, "CombResnet18": CombRenset18}
if model_name in preloaded_models:
model = preloaded_models[model_name](pretrained=False, num_classes=out_features, **arch_params)
# Hacking ResNets to expect 'in_channels' input channel (and not three)
del model.conv1
model.conv1 = nn.Conv2d(in_channels, 64, kernel_size=7, stride=2, padding=3, bias=False)
return model
elif model_name in own_models:
return own_models[model_name](out_features=out_features, in_channels=in_channels, **arch_params)
else:
raise ValueError(f"Model name {model_name} not recognized!")
def dim_after_conv2D(input_dim, stride, kernel_size):
return (input_dim - kernel_size + 2) // stride
class CombRenset18(nn.Module):
def __init__(self, out_features, in_channels):
super().__init__()
self.resnet_model = torchvision.models.resnet18(pretrained=False, num_classes=out_features)
del self.resnet_model.conv1
self.resnet_model.conv1 = nn.Conv2d(in_channels, 64, kernel_size=7, stride=2, padding=3, bias=False)
output_shape = (int(sqrt(out_features)), int(sqrt(out_features)))
self.pool = nn.AdaptiveMaxPool2d(output_shape)
#self.last_conv = nn.Conv2d(128, 1, kernel_size=1, stride=1)
def forward(self, x):
x = self.resnet_model.conv1(x)
x = self.resnet_model.bn1(x)
x = self.resnet_model.relu(x)
x = self.resnet_model.maxpool(x)
x = self.resnet_model.layer1(x)
#x = self.resnet_model.layer2(x)
#x = self.resnet_model.layer3(x)
#x = self.last_conv(x)
x = self.pool(x)
x = x.mean(dim=1)
return x
class ConvNet(torch.nn.Module):
def __init__(self, out_features, in_channels, kernel_size, stride, linear_layer_size, channels_1, channels_2):
super().__init__()
self.conv1 = nn.Conv2d(in_channels=in_channels, out_channels=channels_1, kernel_size=kernel_size, stride=stride)
self.conv2 = nn.Conv2d(in_channels=channels_1, out_channels=channels_2, kernel_size=kernel_size, stride=stride)
output_shape = (4, 4)
self.pool = nn.AdaptiveAvgPool2d(output_shape)
self.fc1 = nn.Linear(in_features=output_shape[0] * output_shape[1] * channels_2, out_features=linear_layer_size)
self.fc2 = nn.Linear(in_features=linear_layer_size, out_features=out_features)
def forward(self, x):
batch_size = x.shape[0]
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, 2, 2)
x = F.relu(self.conv2(x))
x = self.pool(x)
x = x.view(batch_size, -1)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return x
class MLP(torch.nn.Module):
def __init__(self, out_features, in_channels, hidden_layer_size):
super().__init__()
input_dim = in_channels * 40 * 20
self.fc1 = nn.Linear(in_features=input_dim, out_features=hidden_layer_size)
self.fc2 = nn.Linear(in_features=hidden_layer_size, out_features=out_features)
def forward(self, x):
batch_size = x.shape[0]
x = x.view(batch_size, -1)
x = torch.tanh(self.fc1(x))
x = self.fc2(x)
return x
class PureConvNet(torch.nn.Module):
act_funcs = {"relu": F.relu, "tanh": F.tanh, "identity": lambda x: x}
def __init__(self, out_features, pooling, use_second_conv, kernel_size, in_channels,
channels_1=20, channels_2=20, act_func="relu"):
super().__init__()
self.use_second_conv = use_second_conv
self.conv1 = nn.Conv2d(in_channels=in_channels, out_channels=channels_1, kernel_size=kernel_size, stride=1)
self.conv2 = nn.Conv2d(in_channels=channels_1, out_channels=channels_2, kernel_size=kernel_size, stride=1)
output_shape = (int(sqrt(out_features)), int(sqrt(out_features)))
if pooling == "average":
self.pool = nn.AdaptiveAvgPool2d(output_shape)
elif pooling == "max":
self.pool = nn.AdaptiveMaxPool2d(output_shape)
self.conv3 = nn.Conv2d(in_channels=channels_2 if use_second_conv else channels_1,
out_channels=1, kernel_size=1, stride=1)
self.act_func = PureConvNet.act_funcs[act_func]
def forward(self, x):
x = self.act_func(self.conv1(x))
if self.use_second_conv:
x = self.act_func(self.conv2(x))
x = self.pool(x)
x = self.conv3(x)
return x
| 4,809 | 37.48 | 120 | py |
torch-adaptive-imle | torch-adaptive-imle-main/aaai23/maprop/decorators.py | # -*- coding: utf-8 -*-
from itertools import chain
import torch
from abc import ABC, abstractmethod
from functools import update_wrapper, partial
class Decorator(ABC):
def __init__(self, f):
self.func = f
update_wrapper(self, f, updated=[]) # updated=[] so that 'self' attributes are not overwritten
@abstractmethod
def __call__(self, *args, **kwargs):
pass
def __get__(self, instance, owner):
new_f = partial(self.__call__, instance)
update_wrapper(new_f, self.func)
return new_f
def to_numpy(x):
if isinstance(x, torch.Tensor):
return x.cpu().detach().numpy()
else:
return x
# noinspection PyPep8Naming
class input_to_numpy(Decorator):
def __call__(self, *args, **kwargs):
new_args = [to_numpy(arg) for arg in args]
new_kwargs = {key: to_numpy(value) for key, value in kwargs.items()}
return self.func(*new_args, **new_kwargs)
# noinspection PyPep8Naming
class output_to_numpy(Decorator):
def __call__(self, *args, **kwargs):
outputs = self.func(*args, **kwargs)
if isinstance(outputs, torch.Tensor):
return to_numpy(outputs)
if isinstance(outputs, tuple):
new_outputs = tuple([to_numpy(item) for item in outputs])
return new_outputs
return outputs
# noinspection PyPep8Naming
class none_if_missing_arg(Decorator):
def __call__(self, *args, **kwargs):
for arg in chain(args, kwargs.values()):
if arg is None:
return None
return self.func(*args, **kwargs)
| 1,611 | 25.42623 | 103 | py |
torch-adaptive-imle | torch-adaptive-imle-main/aaai23/maprop/warcraft_shortest_path/trainers.py | # -*- coding: utf-8 -*-
import random
import time
from abc import ABC, abstractmethod
import torch
from aaai23.maprop.blackbox.losses import HammingLoss
from aaai23.maprop.blackbox.dijkstra import ShortestPath
from aaai23.maprop.logger import Logger
from aaai23.maprop.models import get_model
from aaai23.maprop.utils import AverageMeter, optimizer_from_string, customdefaultdict
from aaai23.maprop.warcraft_shortest_path import metrics
import numpy as np
from collections import defaultdict
from torch.optim.lr_scheduler import MultiStepLR
from aaai23.maprop.warcraft_shortest_path.visualization import draw_paths_on_image
class ShortestPathAbstractTrainer(ABC):
def __init__(
self,
*,
train_iterator,
test_iterator,
metadata,
use_cuda,
batch_size,
optimizer_name,
optimizer_params,
model_params,
fast_mode,
neighbourhood_fn,
preload_batch,
lr_milestone_1,
lr_milestone_2,
use_lr_scheduling
):
self.fast_mode = fast_mode
self.use_cuda = use_cuda
self.optimizer_params = optimizer_params
self.batch_size = batch_size
self.test_iterator = test_iterator
self.train_iterator = train_iterator
self.metadata = metadata
self.grid_dim = int(np.sqrt(self.metadata["output_features"]))
self.neighbourhood_fn = neighbourhood_fn
self.preload_batch = preload_batch
self.model = None
self.build_model(**model_params)
if self.use_cuda:
self.model.to("cuda")
self.optimizer = optimizer_from_string(optimizer_name)(self.model.parameters(), **optimizer_params)
self.use_lr_scheduling = use_lr_scheduling
if use_lr_scheduling:
self.scheduler = MultiStepLR(self.optimizer, milestones=[lr_milestone_1, lr_milestone_2], gamma=0.1)
self.epochs = 0
self.train_logger = Logger(scope="training", default_output="tensorboard")
self.val_logger = Logger(scope="validation", default_output="tensorboard")
def train_epoch(self):
self.epochs += 1
batch_time = AverageMeter("Batch time")
data_time = AverageMeter("Data time")
cuda_time = AverageMeter("Cuda time")
avg_loss = AverageMeter("Loss")
avg_accuracy = AverageMeter("Accuracy")
avg_perfect_accuracy = AverageMeter("Perfect Accuracy")
avg_metrics = customdefaultdict(lambda k: AverageMeter("train_"+k))
self.model.train()
end = time.time()
iterator = self.train_iterator.get_epoch_iterator(batch_size=self.batch_size, number_of_epochs=1, device='cuda' if self.use_cuda else 'cpu', preload=self.preload_batch)
for i, data in enumerate(iterator):
input, true_path, true_weights = data["images"], data["labels"], data["true_weights"]
if i == 0:
self.log(data, train=True)
cuda_begin = time.time()
cuda_time.update(time.time()-cuda_begin)
# measure data loading time
data_time.update(time.time() - end)
loss, accuracy, last_suggestion = self.forward_pass(input, true_path, train=True, i=i,
true_weights=true_weights)
suggested_path = last_suggestion["suggested_path"]
batch_metrics = metrics.compute_metrics(true_paths=true_path, suggested_paths=suggested_path, true_vertex_costs=true_weights)
# update batch metrics
{avg_metrics[k].update(v, input.size(0)) for k, v in batch_metrics.items()}
assert len(avg_metrics.keys()) > 0
avg_loss.update(loss.item(), input.size(0))
avg_accuracy.update(accuracy.item(), input.size(0))
# compute gradient and do SGD step
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if self.fast_mode:
break
meters = [batch_time, data_time, cuda_time, avg_loss, avg_accuracy]
meter_str = "\t".join([str(meter) for meter in meters])
print(f"Epoch: {self.epochs}\t{meter_str}")
if self.use_lr_scheduling:
self.scheduler.step()
self.train_logger.log(avg_loss.avg, "loss")
self.train_logger.log(avg_accuracy.avg, "accuracy")
for key, avg_metric in avg_metrics.items():
self.train_logger.log(avg_metric.avg, key=key)
return {
"train_loss": avg_loss.avg,
"train_accuracy": avg_accuracy.avg,
**{"train_"+k: avg_metrics[k].avg for k in avg_metrics.keys()}
}
def evaluate(self):
avg_metrics = defaultdict(AverageMeter)
self.model.eval()
iterator = self.test_iterator.get_epoch_iterator(batch_size=self.batch_size, number_of_epochs=1, shuffle=False,
device='cuda' if self.use_cuda else 'cpu',
preload=self.preload_batch)
for i, data in enumerate(iterator):
input, true_path, true_weights = (
data["images"].contiguous(),
data["labels"].contiguous(),
data["true_weights"].contiguous(),
)
if self.use_cuda:
input = input.cuda()
true_path = true_path.cuda()
loss, accuracy, last_suggestion = self.forward_pass(input, true_path, train=False, i=i,
true_weights=true_weights)
suggested_path = last_suggestion["suggested_path"]
data.update(last_suggestion)
if i == 0:
indices_in_batch = random.sample(range(self.batch_size), 4)
for num, k in enumerate(indices_in_batch):
self.log(data, train=False, k=k, num=num)
evaluated_metrics = metrics.compute_metrics(true_paths=true_path,
suggested_paths=suggested_path,
true_vertex_costs=true_weights)
avg_metrics["loss"].update(loss.item(), input.size(0))
avg_metrics["accuracy"].update(accuracy.item(), input.size(0))
for key, value in evaluated_metrics.items():
avg_metrics[key].update(value, input.size(0))
if self.fast_mode:
break
for key, avg_metric in avg_metrics.items():
self.val_logger.log(avg_metric.avg, key=key)
avg_metrics_values = dict([(key, avg_metric.avg) for key, avg_metric in avg_metrics.items()])
return avg_metrics_values
@abstractmethod
def build_model(self, **kwargs):
pass
@abstractmethod
def forward_pass(self, input, true_shortest_paths, train, i, true_weights=None):
pass
def log(self, data, train, k=None, num=None):
logger = self.train_logger if train else self.val_logger
if not train:
image = self.metadata['denormalize'](data["images"][k]).squeeze().astype(np.uint8)
suggested_path = data["suggested_path"][k].squeeze()
labels = data["labels"][k].squeeze()
suggested_path_im = torch.ones((3, *suggested_path.shape))*255*suggested_path.cpu()
labels_im = torch.ones((3, *labels.shape))*255*labels.cpu()
image_with_path = draw_paths_on_image(image=image, true_path=labels, suggested_path=suggested_path, scaling_factor=10)
logger.log(labels_im.data.numpy().astype(np.uint8), key=f"shortest_path_{num}", data_type="image")
logger.log(suggested_path_im.data.numpy().astype(np.uint8), key=f"suggested_path_{num}", data_type="image")
logger.log(image_with_path, key=f"full_input_with_path{num}", data_type="image")
class BaselineTrainer(ShortestPathAbstractTrainer):
def build_model(self, model_name, arch_params):
grid_dim = int(np.sqrt(self.metadata["output_features"]))
self.model = get_model(
model_name, out_features=self.metadata["output_features"], in_channels=self.metadata["num_channels"], arch_params=arch_params
)
def forward_pass(self, input, true_shortest_paths, train, i, true_weights=None):
output = self.model(input)
output = torch.sigmoid(output)
print('XXX XXX')
label = true_shortest_paths
flat_target = label.view(label.size()[0], -1)
criterion = torch.nn.BCELoss()
loss = criterion(output, flat_target).mean()
accuracy = (output.round() * flat_target).sum() / flat_target.sum()
suggested_path = output.view(label.shape).round()
last_suggestion = {"vertex_costs": None, "suggested_path": suggested_path}
return loss, accuracy, last_suggestion
class DijkstraOnFull(ShortestPathAbstractTrainer):
def __init__(self, *, l1_regconst, lambda_val, **kwargs):
super().__init__(**kwargs)
self.l1_regconst = l1_regconst
self.lambda_val = lambda_val
# self.solver = ShortestPath(lambda_val=lambda_val, neighbourhood_fn=self.neighbourhood_fn)
self.loss_fn = HammingLoss()
print("META:", self.metadata)
def build_model(self, model_name, arch_params):
self.model = get_model(
model_name, out_features=self.metadata["output_features"], in_channels=self.metadata["num_channels"], arch_params=arch_params
)
def forward_pass(self, input, true_shortest_paths, train, i, true_weights=None):
output = self.model(input)
# make grid weights positive
output = torch.abs(output)
weights = output.reshape(-1, output.shape[-1], output.shape[-1])
#if i == 0 and not train:
# print(output[0])
assert len(weights.shape) == 3, f"{str(weights.shape)}"
sp_fun = ShortestPath.apply
# shortest_paths = self.solver(weights)
shortest_paths = sp_fun(weights, self.lambda_val, self.neighbourhood_fn)
loss = self.loss_fn(shortest_paths, true_shortest_paths)
logger = self.train_logger if train else self.val_logger
last_suggestion = {
"suggested_weights": weights,
"suggested_path": shortest_paths
}
accuracy = (torch.abs(shortest_paths - true_shortest_paths) < 0.5).to(torch.float32).mean()
extra_loss = self.l1_regconst * torch.mean(output)
loss += extra_loss
return loss, accuracy, last_suggestion
| 10,765 | 37.45 | 176 | py |
torch-adaptive-imle | torch-adaptive-imle-main/aaai23/maprop/warcraft_shortest_path/maprop.py | # -*- coding: utf-8 -*-
import numpy as np
import torch
from torch import Tensor
from aaai23.maprop.blackbox.losses import HammingLoss
from aaai23.maprop.warcraft_shortest_path.trainers import ShortestPathAbstractTrainer
from aaai23.maprop.blackbox.dijkstra import get_solver
from aaai23.maprop.utils import maybe_parallelize
from aaai23.maprop.models import get_model
from imle.aimle import aimle
from imle.ste import ste
from imle.target import TargetDistribution, AdaptiveTargetDistribution
from imle.noise import GumbelNoiseDistribution, SumOfGammaNoiseDistribution
from typing import Dict, Any, Callable
def translate_weights(weights: np.ndarray) -> np.ndarray:
# Weights can be negative - shift them so they are positive
weights_shp = weights.shape
batch_size = weights_shp[0]
weights_2d = weights.reshape(batch_size, -1)
instance_min = np.amin(weights_2d, axis=-1)
res_2d = weights_2d.T - np.minimum(instance_min, 0).T
return res_2d.T.reshape(weights_shp)
def sanitise_weights(weights: np.ndarray) -> np.ndarray:
# Weights can be negative - in that case, clip them to 0
res = np.maximum(weights, 0)
return res
def map_estimator(weights: Tensor,
neighbourhood_fn: str = "8-grid") -> Tensor:
weights_np = weights.detach().cpu().numpy()
# weights_np = translate_weights(weights_np)
weights_np = sanitise_weights(weights_np)
solver = get_solver(neighbourhood_fn)
suggested_tours = np.asarray(maybe_parallelize(solver, arg_list=list(weights_np)))
res = torch.from_numpy(suggested_tours).float().to(weights.device)
return res
class DijkstraMAP(ShortestPathAbstractTrainer):
def __init__(self, *, l1_regconst, lambda_val,
mode,
**kwargs):
super().__init__(**kwargs)
self.l1_regconst = l1_regconst
self.lambda_val = lambda_val
self.mode = mode
self.target_distribution = None
self.noise_distribution = None
print(f'MAP-BACKPROP MODE: {self.mode}')
def bb_dijkstra(_weights: Tensor) -> Tensor:
_solver = get_solver(self.neighbourhood_fn)
_weights_np = _weights.detach().cpu().numpy()
_paths = np.asarray(maybe_parallelize(_solver, arg_list=list(_weights_np)))
_res = torch.from_numpy(_paths).float().to(_weights.device)
return _res
self.bb_dijkstra = bb_dijkstra
self.loss_fn = HammingLoss()
if 'objective_type' in self.mode and self.mode.objective_type in {'cost', 'cost2'}:
print(f'OBJECTIVE TYPE: {self.mode.objective_type}')
print("META:", self.metadata)
def build_model(self, model_name, arch_params):
self.model = get_model(
model_name, out_features=self.metadata["output_features"], in_channels=self.metadata["num_channels"], arch_params=arch_params
)
def forward_pass(self, input, true_shortest_paths, train, i, true_weights=None):
output = self.model(input)
# make grid weights positive
output = torch.abs(output)
weights = output.reshape(-1, output.shape[-1], output.shape[-1])
assert len(weights.shape) == 3, f"{str(weights.shape)}"
is_training = self.model.training
if is_training:
assert 'target' in self.mode
if self.target_distribution is None:
if self.mode.target in {'standard'}:
self.target_distribution = TargetDistribution(alpha=1.0, beta=self.lambda_val, do_gradient_scaling=True)
elif self.mode.target in {'adaptive'}:
assert 'lambda_update_step' in self.mode
lambda_update_step = self.mode.lambda_update_step
self.target_distribution = AdaptiveTargetDistribution(initial_beta=self.lambda_val,
beta_update_step=lambda_update_step)
else:
assert False, 'Missing target distribution'
assert 'noise_distribution' in self.mode
if self.noise_distribution is None:
if self.mode.noise_distribution in {'gumbel'}:
self.noise_distribution = GumbelNoiseDistribution()
elif self.mode.noise_distribution in {'sog'}:
height_size = weights.shape[1]
k_ = int(height_size * 1.3)
self.noise_distribution = SumOfGammaNoiseDistribution(k=k_)
elif self.mode.noise_distribution in {'none'}:
self.noise_distribution = None
else:
assert False, 'Missing noise distribution'
assert 'noise_temperature' in self.mode
noise_temperature = self.mode.noise_temperature
assert 'is_symmetric' in self.mode
is_symmetric = self.mode.is_symmetric
assert 'nb_samples' in self.mode
nb_samples = self.mode.nb_samples
assert 'method' in self.mode
if self.mode.method in {'imle'}:
@aimle(target_distribution=self.target_distribution, noise_distribution=self.noise_distribution,
theta_noise_temperature=noise_temperature, target_noise_temperature=noise_temperature,
symmetric_perturbation=is_symmetric, nb_samples=nb_samples, _is_minimization=True)
def diff_function(weights_t: Tensor) -> Tensor:
return map_estimator(weights_t, self.neighbourhood_fn)
elif self.mode.method in {'ste'}:
@ste(noise_distribution=self.noise_distribution, noise_temperature=noise_temperature,
nb_samples=nb_samples)
def diff_function(weights_t: Tensor) -> Tensor:
return map_estimator(weights_t, self.neighbourhood_fn)
elif self.mode.method in {'none'}:
def diff_function(weights_t: Tensor) -> Tensor:
return weights_t
else:
assert False, f'Unknown method {self.mode.method}'
shortest_paths = diff_function(weights)
else:
shortest_paths = map_estimator(weights, self.neighbourhood_fn)
loss = self.loss_fn(shortest_paths, true_shortest_paths)
logger = self.train_logger if train else self.val_logger
last_suggestion = {
"suggested_weights": weights,
"suggested_path": shortest_paths
}
accuracy = (torch.abs(shortest_paths - true_shortest_paths) < 0.5).to(torch.float32).mean()
extra_loss = self.l1_regconst * torch.mean(output)
loss += extra_loss
return loss, accuracy, last_suggestion
| 6,796 | 38.748538 | 137 | py |
torch-adaptive-imle | torch-adaptive-imle-main/aaai23/maprop/blackbox/losses.py | # -*- coding: utf-8 -*-
import torch
class HammingLoss(torch.nn.Module):
def forward(self, suggested, target):
errors = suggested * (1.0 - target) + (1.0 - suggested) * target
return errors.mean(dim=0).sum()
| 231 | 22.2 | 72 | py |
torch-adaptive-imle | torch-adaptive-imle-main/aaai23/maprop/blackbox/dijkstra.py | # -*- coding: utf-8 -*-
import numpy as np
import heapq
import torch
from functools import partial
from aaai23.maprop.blackbox.utils import get_neighbourhood_func
from collections import namedtuple
from aaai23.maprop.utils import maybe_parallelize
DijkstraOutput = namedtuple("DijkstraOutput", ["shortest_path", "is_unique", "transitions"])
def dijkstra(matrix, neighbourhood_fn="8-grid", request_transitions=False):
# print('XXX')
x_max, y_max = matrix.shape
neighbors_func = partial(get_neighbourhood_func(neighbourhood_fn), x_max=x_max, y_max=y_max)
costs = np.full_like(matrix, 1.0e10)
costs[0][0] = matrix[0][0]
num_path = np.zeros_like(matrix)
num_path[0][0] = 1
priority_queue = [(matrix[0][0], (0, 0))]
certain = set()
transitions = dict()
while priority_queue:
cur_cost, (cur_x, cur_y) = heapq.heappop(priority_queue)
if (cur_x, cur_y) in certain:
pass
for x, y in neighbors_func(cur_x, cur_y):
if (x, y) not in certain:
if matrix[x][y] + costs[cur_x][cur_y] < costs[x][y]:
costs[x][y] = matrix[x][y] + costs[cur_x][cur_y]
heapq.heappush(priority_queue, (costs[x][y], (x, y)))
transitions[(x, y)] = (cur_x, cur_y)
num_path[x, y] = num_path[cur_x, cur_y]
elif matrix[x][y] + costs[cur_x][cur_y] == costs[x][y]:
num_path[x, y] += 1
certain.add((cur_x, cur_y))
# retrieve the path
cur_x, cur_y = x_max - 1, y_max - 1
on_path = np.zeros_like(matrix)
on_path[-1][-1] = 1
while (cur_x, cur_y) != (0, 0):
cur_x, cur_y = transitions[(cur_x, cur_y)]
on_path[cur_x, cur_y] = 1.0
is_unique = num_path[-1, -1] == 1
if request_transitions:
return DijkstraOutput(shortest_path=on_path, is_unique=is_unique, transitions=transitions)
else:
return DijkstraOutput(shortest_path=on_path, is_unique=is_unique, transitions=None)
def get_solver(neighbourhood_fn):
def solver(matrix):
return dijkstra(matrix, neighbourhood_fn).shortest_path
return solver
class ShortestPath(torch.autograd.Function):
@staticmethod
def forward(ctx, weights, lambda_val, neighbourhood_fn="8-grid"):
ctx.lambda_val = lambda_val
ctx.neighbourhood_fn = neighbourhood_fn
ctx.solver = get_solver(neighbourhood_fn)
ctx.weights = weights.detach().cpu().numpy()
ctx.suggested_tours = np.asarray(maybe_parallelize(ctx.solver, arg_list=list(ctx.weights)))
return torch.from_numpy(ctx.suggested_tours).float().to(weights.device)
@staticmethod
def backward(ctx, grad_output):
assert grad_output.shape == ctx.suggested_tours.shape
grad_output_numpy = grad_output.detach().cpu().numpy()
weights_prime = np.maximum(ctx.weights + ctx.lambda_val * grad_output_numpy, 0.0)
better_paths = np.asarray(maybe_parallelize(ctx.solver, arg_list=list(weights_prime)))
gradient = -(ctx.suggested_tours - better_paths) / ctx.lambda_val
return torch.from_numpy(gradient).to(grad_output.device), None, None
| 3,187 | 35.643678 | 99 | py |
torch-adaptive-imle | torch-adaptive-imle-main/aaai23/tf/utils.py | # -*- coding: utf-8 -*-
import json
import tensorflow as tf
import numpy as np
from tensorflow.keras.layers import Layer, Conv1D, GlobalMaxPooling1D, Embedding, Dense, Dropout
from tensorflow.keras import backend as K
from tensorflow.keras.preprocessing import sequence
import logging
logger = logging.getLogger(__name__)
class IMLESubsetkLayer(tf.keras.layers.Layer):
def __init__(self, k, _tau=30.0, _lambda=1000.0):
super(IMLESubsetkLayer, self).__init__()
self.k = k
self._tau = _tau
self._lambda = _lambda
self.samples = None
def sample_gumbel(self, shape, eps=1e-20):
U = tf.random.uniform(shape, minval=0, maxval=1)
return -tf.math.log(-tf.math.log(U + eps) + eps)
def sample_discrete(self, logits):
gumbel_softmax_sample = logits + self.sample_gumbel(tf.shape(logits))
threshold = tf.expand_dims(tf.nn.top_k(gumbel_softmax_sample, self.k, sorted=True)[0][:, -1], -1)
y = tf.cast(tf.greater_equal(gumbel_softmax_sample, threshold), tf.float32)
return y
@tf.function
def sample_gumbel_k(self, shape):
s = tf.map_fn(fn=lambda t: tf.random.gamma(shape, 1.0 / self.k, self.k / t),
elems=tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0]))
# now add the samples
s = tf.reduce_sum(s, 0)
# the log(m) term
s = s - tf.math.log(10.0)
# divide by k --> each s[c] has k samples whose sum is distributed as Gumbel(0, 1)
s = self._tau * (s / self.k)
return s
# @tf.function
def sample_discrete_2(self, logits):
self.samples = self.sample_gumbel_k(tf.shape(logits))
gumbel_softmax_sample = logits + self.samples
threshold = tf.expand_dims(tf.nn.top_k(gumbel_softmax_sample, self.k, sorted=True)[0][:, -1], -1)
y = tf.cast(tf.greater_equal(gumbel_softmax_sample, threshold), tf.float32)
return y
# @tf.function
def sample_discrete_2_reuse(self, logits):
gumbel_softmax_sample = logits + self.samples
threshold = tf.expand_dims(tf.nn.top_k(gumbel_softmax_sample, self.k, sorted=True)[0][:, -1], -1)
y = tf.cast(tf.greater_equal(gumbel_softmax_sample, threshold), tf.float32)
return y
@tf.custom_gradient
def gumbel_topk_new(self, logits):
# we compute a map state for the distribution
# we also store the sample for later
z_train = self.sample_discrete_2(logits)
threshold = tf.expand_dims(tf.nn.top_k(logits, self.k, sorted=True)[0][:, -1], -1)
z_test = tf.cast(tf.greater_equal(logits, threshold), tf.float32)
z_output = K.in_train_phase(z_train, z_test)
def custom_grad(dy):
# we perturb (implicit diff) and then resuse sample for perturb and MAP
map_dy = self.sample_discrete_2_reuse(logits - (self._lambda * dy))
# we now compute the gradients as the difference (I-MLE gradients)
grad = tf.math.subtract(z_train, map_dy)
# for the straight-through estimator, simply use the following line
# return dy, k
return grad
return z_output, custom_grad
def call(self, logits):
logits = tf.squeeze(logits, -1) # [batchsize, d]
y = self.gumbel_topk_new(logits)
y = tf.expand_dims(y, -1) # [batchsize, d, 1]
return y
def get_config(self):
cfg = super().get_config()
return cfg
EPSILON = np.finfo(tf.float32.as_numpy_dtype).tiny
def gumbel_keys(w):
# sample some gumbels
uniform = tf.random.uniform(tf.shape(w), minval=EPSILON, maxval=1.0)
z = tf.math.log(-tf.math.log(uniform))
w = w + z
return w
def continuous_topk(w, k, t, separate=False):
khot_list = []
onehot_approx = tf.zeros_like(w, dtype=tf.float32)
for i in range(k):
khot_mask = tf.maximum(1.0 - onehot_approx, EPSILON)
w += tf.math.log(khot_mask)
onehot_approx = tf.nn.softmax(w / t, axis=-1)
khot_list.append(onehot_approx)
return khot_list if separate else tf.reduce_sum(khot_list, 0)
def sample_subset(w, k, t=0.1):
'''
Args:
w (Tensor): Float Tensor of weights for each element. In gumbel mode
these are interpreted as log probabilities
k (int): number of elements in the subset sample
t (float): temperature of the softmax
'''
w = gumbel_keys(w)
return continuous_topk(w, k, t)
class SampleSubset(Layer):
"""
Layer for continuous approx of subset sampling
"""
def __init__(self, tau0, k, **kwargs):
self.tau0 = tau0
self.k = k
super(SampleSubset, self).__init__(**kwargs)
def call(self, logits):
# logits: [BATCH_SIZE, d, 1]
logits = tf.squeeze(logits, 2)
samples = sample_subset(logits, self.k, self.tau0)
# Explanation Stage output.
threshold = tf.expand_dims(tf.nn.top_k(logits, self.k, sorted=True)[0][:, -1], -1)
discrete_logits = tf.cast(tf.greater_equal(logits, threshold), tf.float32)
output = K.in_train_phase(samples, discrete_logits)
return tf.expand_dims(output, -1)
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
cfg = super().get_config()
return cfg
class Concatenate(Layer):
"""
Layer for concatenation.
"""
def __init__(self, **kwargs):
super(Concatenate, self).__init__(**kwargs)
def call(self, inputs):
input1, input2 = inputs
input1 = tf.expand_dims(input1, axis=-2) # [batchsize, 1, input1_dim]
dim1 = int(input2.get_shape()[1])
input1 = tf.tile(input1, [1, dim1, 1])
return tf.concat([input1, input2], axis=-1)
def compute_output_shape(self, input_shapes):
input_shape1, input_shape2 = input_shapes
input_shape = list(input_shape2)
input_shape[-1] = int(input_shape[-1]) + int(input_shape1[-1])
input_shape[-2] = int(input_shape[-2])
return tuple(input_shape)
class Sample_Concrete(Layer):
"""
Layer for sample Concrete / Gumbel-Softmax variables.
"""
def __init__(self, tau0, k, **kwargs):
self.tau0 = tau0
self.k = k
super(Sample_Concrete, self).__init__(**kwargs)
def call(self, logits):
# logits: [batch_size, d, 1]
logits_ = K.permute_dimensions(logits, (0, 2, 1)) # [batch_size, 1, d]
d = int(logits_.get_shape()[2])
uniform = tf.random.uniform(shape=tf.shape(logits_), minval=0.0, maxval=1.0)
gumbel = - K.log(-K.log(uniform))
noisy_logits = (gumbel + logits_) / self.tau0
samples = K.softmax(noisy_logits)
samples = K.max(samples, axis=1)
logits = tf.reshape(logits, [-1, d])
threshold = tf.expand_dims(tf.nn.top_k(logits, self.k, sorted=True)[0][:, -1], -1)
discrete_logits = tf.cast(tf.greater_equal(logits, threshold), tf.float32)
output = K.in_train_phase(samples, discrete_logits)
return tf.expand_dims(output, -1)
def get_config(self):
cfg = super().get_config()
return cfg
def compute_output_shape(self, input_shape):
return input_shape
def construct_gumbel_selector(X_ph, num_words, embedding_dims, embedding_matrix, kernel_size, maxlen):
"""
Build the L2X model for selecting words.
"""
emb_layer = Embedding(num_words,
embedding_dims,
weights=[embedding_matrix],
input_length=maxlen,
trainable=False,
name='emb_gumbel')
# XXX (None, 350, 200)
emb = emb_layer(X_ph) # (350, 200)
# net = Dropout(0.2, name = 'dropout_gumbel')(emb)# this is not used in the L2X experiments
net = emb
# 100 here should be "filters"
conv_layer = Conv1D(100, kernel_size, padding='same', activation='relu', strides=1, name='conv1_gumbel')
# XXX (None, 350, 100)
first_layer = conv_layer(net)
# global info
pooling_layer = GlobalMaxPooling1D(name='new_global_max_pooling1d_1')
# XXX (None, 100)
net_new = pooling_layer(first_layer)
dense_layer = Dense(100, name='new_dense_1', activation='relu')
# XXX (None, 100)
global_info = dense_layer(net_new)
# local info
conv_layer = Conv1D(100, kernel_size, padding='same', activation='relu', strides=1, name='conv2_gumbel')
# XXX (None, 350, 100)
net = conv_layer(first_layer)
conv_layer = Conv1D(100, kernel_size, padding='same', activation='relu', strides=1, name='conv3_gumbel')
# XXX (None, 350, 100)
local_info = conv_layer(net)
concat_layer = Concatenate()
# XXX (None, 350, 200)
combined = concat_layer([global_info, local_info])
dropout_layer = Dropout(0.2, name='new_dropout_2')
# XXX (None, 350, 200)
net = dropout_layer(combined)
conv_layer = Conv1D(100, 1, padding='same', activation='relu', strides=1, name='conv_last_gumbel')
# XXX (None, 350, 100)
net = conv_layer(net)
conv_layer = Conv1D(1, 1, padding='same', activation=None, strides=1, name='conv4_gumbel')
# XXX (None, 350, 1)
logits_T = conv_layer(net)
return logits_T
def subset_precision(modelTestInput, aspect, id_to_word, word_to_id, select_k):
data = []
num_annotated_reviews = 0
with open("data/annotations.json") as fin:
for line in fin:
item = json.loads(line)
data.append(item)
num_annotated_reviews = num_annotated_reviews + 1
selected_word_counter = 0
correct_selected_counter = 0
for anotr in range(num_annotated_reviews):
ranges = data[anotr][str(aspect)] # the aspect id
text_list = data[anotr]['x']
review_length = len(text_list)
list_test = []
tokenid_list = [word_to_id.get(token, 0) for token in text_list]
list_test.append(tokenid_list)
X_test_subset = np.asarray(list_test)
X_test_subset = sequence.pad_sequences(X_test_subset, maxlen=350)
# X_test_subset = pad_sequences(list_test, max_len=350)
prediction = modelTestInput.predict(X_test_subset)
# print(prediction.shape)
prediction = tf.squeeze(prediction, -1)
# import sys
# sys.exit(0)
x_val_selected = prediction[0] * X_test_subset
selected_words = np.vectorize(id_to_word.get)(x_val_selected)[0][-review_length:]
selected_nonpadding_word_counter = 0
for i, w in enumerate(selected_words):
if w != '<PAD>': # we are nice to the L2X approach by only considering selected non-pad tokens
selected_nonpadding_word_counter = selected_nonpadding_word_counter + 1
for r in ranges:
rl = list(r)
if i in range(rl[0], rl[1]):
correct_selected_counter = correct_selected_counter + 1
# we make sure that we select at least 10 non-padding words
# if we have more than select_k non-padding words selected, we allow it but count that in
selected_word_counter = selected_word_counter + max(selected_nonpadding_word_counter, select_k)
return correct_selected_counter / selected_word_counter
| 11,352 | 33.507599 | 108 | py |
torch-adaptive-imle | torch-adaptive-imle-main/tests/imle/test_imle.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import sys
import numpy as np
import torch
from torch import nn, Tensor, Size
from imle.imle import imle
from imle.aimle import aimle
from imle.target import TargetDistribution
from imle.noise import BaseNoiseDistribution
from imle.solvers import select_k, mathias_select_k
from typing import Callable, Optional
import pytest
import logging
logger = logging.getLogger(os.path.basename(sys.argv[0]))
class ConstantNoiseDistribution(BaseNoiseDistribution):
def __init__(self,
constant: float = 0.0,
device: Optional[torch.device] = None):
super().__init__()
self.constant = torch.tensor(constant, dtype=torch.float, device=device)
self.device = device
def sample(self,
shape: Size) -> Tensor:
return torch.zeros(size=shape, device=self.device) + self.constant
def _test_imle_v1(select_fun: Callable[[Tensor, int], Tensor], nb_samples: int):
rs = np.random.RandomState(0)
sym_mismatch_count = 0
for i in range(2 ** 12):
input_size = rs.randint(32, 1024)
k = rs.randint(1, input_size)
alpha = rs.uniform(0, 10000)
beta = rs.uniform(0, 10000)
noise_temperature = 1.0
target_distribution = TargetDistribution(alpha=alpha, beta=beta)
noise_distribution = None
@imle(target_distribution=target_distribution, noise_distribution=noise_distribution, nb_samples=nb_samples,
theta_noise_temperature=noise_temperature, target_noise_temperature=noise_temperature)
def imle_select_k(logits: Tensor) -> Tensor:
return select_fun(logits, k)
@aimle(target_distribution=target_distribution, noise_distribution=noise_distribution, nb_samples=nb_samples,
theta_noise_temperature=noise_temperature, target_noise_temperature=noise_temperature,
symmetric_perturbation=False)
def aimle_select_k(logits: Tensor) -> Tensor:
return select_fun(logits, k)
@aimle(target_distribution=target_distribution, noise_distribution=noise_distribution, nb_samples=nb_samples,
theta_noise_temperature=noise_temperature, target_noise_temperature=noise_temperature,
symmetric_perturbation=True)
def aimle_select_k_sym(logits: Tensor) -> Tensor:
return select_fun(logits, k)
init_np = rs.randn(1, input_size)
linear = nn.Linear(input_size, 1)
aimle_params = nn.Parameter(torch.tensor(init_np, dtype=torch.float, requires_grad=True), requires_grad=True)
aimle_sym_params = nn.Parameter(torch.tensor(init_np, dtype=torch.float, requires_grad=True), requires_grad=True)
imle_params = nn.Parameter(torch.tensor(init_np, dtype=torch.float, requires_grad=True), requires_grad=True)
aimle_res = linear(aimle_select_k(aimle_params))
aimle_sym_res = linear(aimle_select_k_sym(aimle_sym_params))
imle_res = linear(imle_select_k(imle_params))
res = aimle_res + imle_res + aimle_sym_res
if nb_samples > 1:
value = res[0].item()
for v in res:
np.testing.assert_allclose(value, v.item(), atol=1e-3, rtol=1e-3)
res = torch.sum(res)
res.backward()
diff = torch.sum(torch.abs(aimle_params.grad - imle_params.grad)).item()
diff_sym = torch.sum(torch.abs(aimle_sym_params.grad - imle_params.grad)).item()
assert diff < 1e-24
sym_mismatch_count += 1 if diff_sym > 1e-24 else 0
assert sym_mismatch_count > 0
def _test_imle_v2(select_fun: Callable[[Tensor, int], Tensor], nb_samples: int):
rs = np.random.RandomState(0)
sym_mismatch_count = 0
for i in range(2 ** 12):
input_size = rs.randint(32, 1024)
k = rs.randint(1, input_size)
alpha = rs.uniform(0, 10000)
beta = rs.uniform(0, 10000)
noise_temperature = rs.uniform(0, 100.0)
target_distribution = TargetDistribution(alpha=alpha, beta=beta)
noise_distribution = ConstantNoiseDistribution(constant=rs.uniform(-0.01, -0.01))
# noise_distribution = ConstantNoiseDistribution(constant=0.0)
@imle(target_distribution=target_distribution, noise_distribution=noise_distribution, nb_samples=nb_samples,
theta_noise_temperature=noise_temperature, target_noise_temperature=noise_temperature)
def imle_select_k(logits: Tensor) -> Tensor:
return select_fun(logits, k)
@aimle(target_distribution=target_distribution, noise_distribution=noise_distribution, nb_samples=nb_samples,
theta_noise_temperature=noise_temperature, target_noise_temperature=noise_temperature,
symmetric_perturbation=False)
def aimle_select_k(logits: Tensor) -> Tensor:
return select_fun(logits, k)
@aimle(target_distribution=target_distribution, noise_distribution=noise_distribution, nb_samples=nb_samples,
theta_noise_temperature=noise_temperature, target_noise_temperature=noise_temperature,
symmetric_perturbation=True)
def aimle_select_k_sym(logits: Tensor) -> Tensor:
return select_fun(logits, k)
init_np = rs.randn(1, input_size)
linear = nn.Linear(input_size, 1)
aimle_params = nn.Parameter(torch.tensor(init_np, dtype=torch.float, requires_grad=True), requires_grad=True)
aimle_sym_params = nn.Parameter(torch.tensor(init_np, dtype=torch.float, requires_grad=True),
requires_grad=True)
imle_params = nn.Parameter(torch.tensor(init_np, dtype=torch.float, requires_grad=True), requires_grad=True)
aimle_res = linear(aimle_select_k(aimle_params))
aimle_sym_res = linear(aimle_select_k_sym(aimle_sym_params))
imle_res = linear(imle_select_k(imle_params))
res = aimle_res + imle_res + aimle_sym_res
if nb_samples > 1:
value = res[0].item()
for v in res:
np.testing.assert_allclose(value, v.item(), atol=1e-3, rtol=1e-3)
res = torch.sum(res)
res.backward()
diff = torch.sum(torch.abs(aimle_params.grad - imle_params.grad)).item()
diff_sym = torch.sum(torch.abs(aimle_sym_params.grad - imle_params.grad)).item()
# print(aimle_sym_params.grad)
assert diff < 1e-24
sym_mismatch_count += 1 if diff_sym > 1e-24 else 0
assert sym_mismatch_count > 0
def _test_imle_v3(select_fun: Callable[[Tensor, int], Tensor],
nb_samples: int,
batch_size: int):
rs = np.random.RandomState(0)
sym_mismatch_count = 0
for i in range(2 ** 12):
input_size = rs.randint(32, 1024)
k = rs.randint(1, input_size)
alpha = rs.uniform(0, 10000)
beta = rs.uniform(0, 10000)
noise_temperature = rs.uniform(0, 100.0)
target_distribution = TargetDistribution(alpha=alpha, beta=beta)
noise_distribution = ConstantNoiseDistribution(constant=rs.uniform(-0.01, -0.01))
# noise_distribution = ConstantNoiseDistribution(constant=0.0)
@imle(target_distribution=target_distribution, noise_distribution=noise_distribution, nb_samples=nb_samples,
theta_noise_temperature=noise_temperature, target_noise_temperature=noise_temperature)
def imle_select_k(logits: Tensor) -> Tensor:
return select_fun(logits, k)
@aimle(target_distribution=target_distribution, noise_distribution=noise_distribution, nb_samples=nb_samples,
theta_noise_temperature=noise_temperature, target_noise_temperature=noise_temperature,
symmetric_perturbation=False)
def aimle_select_k(logits: Tensor) -> Tensor:
return select_fun(logits, k)
@aimle(target_distribution=target_distribution, noise_distribution=noise_distribution, nb_samples=nb_samples,
theta_noise_temperature=noise_temperature, target_noise_temperature=noise_temperature,
symmetric_perturbation=True)
def aimle_select_k_sym(logits: Tensor) -> Tensor:
return select_fun(logits, k)
init_np = rs.randn(batch_size, input_size)
linear = nn.Linear(input_size, 1)
aimle_params = nn.Parameter(torch.tensor(init_np, dtype=torch.float, requires_grad=True), requires_grad=True)
aimle_sym_params = nn.Parameter(torch.tensor(init_np, dtype=torch.float, requires_grad=True),
requires_grad=True)
imle_params = nn.Parameter(torch.tensor(init_np, dtype=torch.float, requires_grad=True), requires_grad=True)
aimle_res = linear(aimle_select_k(aimle_params))
aimle_sym_res = linear(aimle_select_k_sym(aimle_sym_params))
imle_res = linear(imle_select_k(imle_params))
res = aimle_res + imle_res + aimle_sym_res
res_2d = res.view(batch_size, nb_samples)
if nb_samples > 1:
for i in range(batch_size):
value = res_2d[i, 0].item()
for v in res_2d[i, :]:
np.testing.assert_allclose(value, v.item(), atol=1e-3, rtol=1e-3)
res = torch.sum(res)
res.backward()
diff = torch.sum(torch.abs(aimle_params.grad - imle_params.grad)).item()
diff_sym = torch.sum(torch.abs(aimle_sym_params.grad - imle_params.grad)).item()
# print(aimle_sym_params.grad)
assert diff < 1e-24
sym_mismatch_count += 1 if diff_sym > 1e-24 else 0
assert sym_mismatch_count > 0
def test_imle_v1a():
_test_imle_v1(select_fun=select_k, nb_samples=1)
def test_imle_v1b():
_test_imle_v1(select_fun=mathias_select_k, nb_samples=1)
def test_imle_v1c():
_test_imle_v1(select_fun=mathias_select_k, nb_samples=10)
def test_imle_v2a():
_test_imle_v2(select_fun=select_k, nb_samples=1)
def test_imle_v2b():
_test_imle_v2(select_fun=mathias_select_k, nb_samples=1)
def test_imle_v2c():
_test_imle_v2(select_fun=mathias_select_k, nb_samples=10)
def test_imle_v3a():
_test_imle_v3(select_fun=select_k, nb_samples=5, batch_size=3)
def test_imle_v3b():
_test_imle_v3(select_fun=mathias_select_k, nb_samples=5, batch_size=3)
def test_imle_v3c():
_test_imle_v3(select_fun=mathias_select_k, nb_samples=5, batch_size=3)
if __name__ == '__main__':
pytest.main([__file__])
# test_imle_v2a()
# test_imle_v1c()
# test_imle_v3a()
| 10,550 | 36.282686 | 121 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.