repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
flair | flair-master/flair/nn/dropout.py | import torch
class LockedDropout(torch.nn.Module):
"""Implementation of locked (or variational) dropout.
Randomly drops out entire parameters in embedding space.
"""
def __init__(self, dropout_rate=0.5, batch_first=True, inplace=False) -> None:
super().__init__()
self.dropout_rate = dropout_rate
self.batch_first = batch_first
self.inplace = inplace
def forward(self, x):
if not self.training or not self.dropout_rate:
return x
if not self.batch_first:
m = x.data.new(1, x.size(1), x.size(2)).bernoulli_(1 - self.dropout_rate)
else:
m = x.data.new(x.size(0), 1, x.size(2)).bernoulli_(1 - self.dropout_rate)
mask = torch.autograd.Variable(m, requires_grad=False) / (1 - self.dropout_rate)
mask = mask.expand_as(x)
return mask * x
def extra_repr(self):
inplace_str = ", inplace" if self.inplace else ""
return f"p={self.dropout_rate}{inplace_str}"
class WordDropout(torch.nn.Module):
"""Implementation of word dropout.
Randomly drops out entire words (or characters) in embedding space.
"""
def __init__(self, dropout_rate=0.05, inplace=False) -> None:
super().__init__()
self.dropout_rate = dropout_rate
self.inplace = inplace
def forward(self, x):
if not self.training or not self.dropout_rate:
return x
m = x.data.new(x.size(0), x.size(1), 1).bernoulli_(1 - self.dropout_rate)
mask = torch.autograd.Variable(m, requires_grad=False)
return mask * x
def extra_repr(self):
inplace_str = ", inplace" if self.inplace else ""
return f"p={self.dropout_rate}{inplace_str}"
| 1,747 | 29.666667 | 88 | py |
flair | flair-master/flair/nn/recurrent.py | from torch import nn
rnn_layers = {"lstm": (nn.LSTM, 2), "gru": (nn.GRU, 1)}
def create_recurrent_layer(layer_type, initial_size, hidden_size, nlayers, dropout=0, **kwargs):
layer_type = layer_type.lower()
assert layer_type in rnn_layers
module, hidden_count = rnn_layers[layer_type]
if nlayers == 1:
dropout = 0
return module(initial_size, hidden_size, nlayers, dropout=dropout, **kwargs), hidden_count
| 437 | 28.2 | 96 | py |
flair | flair-master/flair/nn/decoder.py | import logging
from typing import List, Optional
import torch
import flair
from flair.data import Dictionary, Sentence
from flair.embeddings import Embeddings
from flair.nn.distance import (
CosineDistance,
EuclideanDistance,
HyperbolicDistance,
LogitCosineDistance,
NegativeScaledDotProduct,
)
from flair.training_utils import store_embeddings
logger = logging.getLogger("flair")
class PrototypicalDecoder(torch.nn.Module):
def __init__(
self,
num_prototypes: int,
embeddings_size: int,
prototype_size: Optional[int] = None,
distance_function: str = "euclidean",
use_radius: Optional[bool] = False,
min_radius: Optional[int] = 0,
unlabeled_distance: Optional[float] = None,
unlabeled_idx: Optional[int] = None,
learning_mode: Optional[str] = "joint",
normal_distributed_initial_prototypes: bool = False,
) -> None:
super().__init__()
if not prototype_size:
prototype_size = embeddings_size
self.prototype_size = prototype_size
# optional metric space decoder if prototypes have different length than embedding
self.metric_space_decoder: Optional[torch.nn.Linear] = None
if prototype_size != embeddings_size:
self.metric_space_decoder = torch.nn.Linear(embeddings_size, prototype_size)
torch.nn.init.xavier_uniform_(self.metric_space_decoder.weight)
# create initial prototypes for all classes (all initial prototypes are a vector of all 1s)
self.prototype_vectors = torch.nn.Parameter(torch.ones(num_prototypes, prototype_size), requires_grad=True)
# if set, create initial prototypes from normal distribution
if normal_distributed_initial_prototypes:
self.prototype_vectors = torch.nn.Parameter(torch.normal(torch.zeros(num_prototypes, prototype_size)))
# if set, use a radius
self.prototype_radii: Optional[torch.nn.Parameter] = None
if use_radius:
self.prototype_radii = torch.nn.Parameter(torch.ones(num_prototypes), requires_grad=True)
self.min_radius = min_radius
self.learning_mode = learning_mode
assert (unlabeled_idx is None) == (
unlabeled_distance is None
), "'unlabeled_idx' and 'unlabeled_distance' should either both be set or both not be set."
self.unlabeled_idx = unlabeled_idx
self.unlabeled_distance = unlabeled_distance
self._distance_function = distance_function
self.distance: Optional[torch.nn.Module] = None
if distance_function.lower() == "hyperbolic":
self.distance = HyperbolicDistance()
elif distance_function.lower() == "cosine":
self.distance = CosineDistance()
elif distance_function.lower() == "logit_cosine":
self.distance = LogitCosineDistance()
elif distance_function.lower() == "euclidean":
self.distance = EuclideanDistance()
elif distance_function.lower() == "dot_product":
self.distance = NegativeScaledDotProduct()
else:
raise KeyError(f"Distance function {distance_function} not found.")
# all parameters will be pushed internally to the specified device
self.to(flair.device)
@property
def num_prototypes(self):
return self.prototype_vectors.size(0)
def forward(self, embedded):
if self.learning_mode == "learn_only_map_and_prototypes":
embedded = embedded.detach()
# decode embeddings into prototype space
encoded = self.metric_space_decoder(embedded) if self.metric_space_decoder is not None else embedded
prot = self.prototype_vectors
radii = self.prototype_radii
if self.learning_mode == "learn_only_prototypes":
encoded = encoded.detach()
if self.learning_mode == "learn_only_embeddings_and_map":
prot = prot.detach()
if radii is not None:
radii = radii.detach()
distance = self.distance(encoded, prot)
if radii is not None:
distance /= self.min_radius + torch.nn.functional.softplus(radii)
# if unlabeled distance is set, mask out loss to unlabeled class prototype
if self.unlabeled_distance:
distance[..., self.unlabeled_idx] = self.unlabeled_distance
scores = -distance
return scores
class LabelVerbalizerDecoder(torch.nn.Module):
"""A class for decoding labels using the idea of siamese networks / bi-encoders. This can be used for all classification tasks in flair.
Args:
label_encoder (flair.embeddings.TokenEmbeddings):
The label encoder used to encode the labels into an embedding.
label_dictionary (flair.data.Dictionary):
The label dictionary containing the mapping between labels and indices.
Attributes:
label_encoder (flair.embeddings.TokenEmbeddings):
The label encoder used to encode the labels into an embedding.
label_dictionary (flair.data.Dictionary):
The label dictionary containing the mapping between labels and indices.
Methods:
forward(self, label_embeddings: torch.Tensor, context_embeddings: torch.Tensor) -> torch.Tensor:
Takes the label embeddings and context embeddings as input and returns a tensor of label scores.
Examples:
label_dictionary = corpus.make_label_dictionary("ner")
label_encoder = TransformerWordEmbeddings('bert-base-ucnased')
label_verbalizer_decoder = LabelVerbalizerDecoder(label_encoder, label_dictionary)
"""
def __init__(self, label_embedding: Embeddings, label_dictionary: Dictionary):
super().__init__()
self.label_embedding = label_embedding
self.verbalized_labels: List[Sentence] = self.verbalize_labels(label_dictionary)
self.to(flair.device)
@staticmethod
def verbalize_labels(label_dictionary: Dictionary) -> List[Sentence]:
"""Takes a label dictionary and returns a list of sentences with verbalized labels.
Args:
label_dictionary (flair.data.Dictionary): The label dictionary to verbalize.
Returns:
A list of sentences with verbalized labels.
Examples:
label_dictionary = corpus.make_label_dictionary("ner")
verbalized_labels = LabelVerbalizerDecoder.verbalize_labels(label_dictionary)
print(verbalized_labels)
[Sentence: "begin person", Sentence: "inside person", Sentence: "end person", Sentence: "single org", ...]
"""
verbalized_labels = []
for byte_label, idx in label_dictionary.item2idx.items():
str_label = byte_label.decode("utf-8")
if label_dictionary.span_labels:
if str_label == "O":
verbalized_labels.append("outside")
elif str_label.startswith("B-"):
verbalized_labels.append("begin " + str_label.split("-")[1])
elif str_label.startswith("I-"):
verbalized_labels.append("inside " + str_label.split("-")[1])
elif str_label.startswith("E-"):
verbalized_labels.append("ending " + str_label.split("-")[1])
elif str_label.startswith("S-"):
verbalized_labels.append("single " + str_label.split("-")[1])
else:
verbalized_labels.append(str_label)
return list(map(Sentence, verbalized_labels))
def forward(self, inputs: torch.Tensor) -> torch.Tensor:
"""Forward pass of the label verbalizer decoder.
Args:
inputs (torch.Tensor): The input tensor.
Returns:
The scores of the decoder.
Raises:
RuntimeError: If an unknown decoding type is specified.
"""
if self.training or not self.label_embedding._everything_embedded(self.verbalized_labels):
self.label_embedding.embed(self.verbalized_labels)
label_tensor = torch.stack([label.get_embedding() for label in self.verbalized_labels])
if self.training:
store_embeddings(self.verbalized_labels, "none")
scores = torch.mm(inputs, label_tensor.T)
return scores
| 8,375 | 38.140187 | 140 | py |
flair | flair-master/flair/nn/distance/hyperbolic.py | """Hyperbolic distances implemented in pytorch.
This module was copied from the repository the following repository:
https://github.com/asappresearch/dynamic-classification
It contains the code from the paper "Metric Learning for Dynamic Text
Classification".
https://arxiv.org/abs/1911.01026
In case this file is modified, please consider contributing to the original
repository.
It was published under MIT License:
https://github.com/asappresearch/dynamic-classification/blob/master/LICENSE.md
Source: https://github.com/asappresearch/dynamic-classification/blob/55beb5a48406c187674bea40487c011e8fa45aab/distance/hyperbolic.py
"""
import torch
from torch import Tensor, nn
EPSILON = 1e-5
def arccosh(x):
"""Compute the arcosh, numerically stable."""
x = torch.clamp(x, min=1 + EPSILON)
a = torch.log(x)
b = torch.log1p(torch.sqrt(x * x - 1) / x)
return a + b
def mdot(x, y):
"""Compute the inner product."""
m = x.new_ones(1, x.size(1))
m[0, 0] = -1
return torch.sum(m * x * y, 1, keepdim=True)
def dist(x, y):
"""Get the hyperbolic distance between x and y."""
return arccosh(-mdot(x, y))
def project(x):
"""Project onto the hyeprboloid embedded in in n+1 dimensions."""
return torch.cat([torch.sqrt(1.0 + torch.sum(x * x, 1, keepdim=True)), x], 1)
def log_map(x, y):
"""Perform the log step."""
d = dist(x, y)
return (d / torch.sinh(d)) * (y - torch.cosh(d) * x)
def norm(x):
"""Compute the norm."""
n = torch.sqrt(torch.abs(mdot(x, x)))
return n
def exp_map(x, y):
"""Perform the exp step."""
n = torch.clamp(norm(y), min=EPSILON)
return torch.cosh(n) * x + (torch.sinh(n) / n) * y
def loss(x, y):
"""Get the loss for the optimizer."""
return torch.sum(dist(x, y) ** 2)
class HyperbolicDistance(nn.Module):
"""Implement a HyperbolicDistance object."""
def forward(self, mat_1: Tensor, mat_2: Tensor) -> Tensor:
"""Returns the squared euclidean distance between each element in mat_1 and each element in mat_2.
Parameters
----------
mat_1: torch.Tensor
matrix of shape (n_1, n_features)
mat_2: torch.Tensor
matrix of shape (n_2, n_features)
Returns:
-------
dist: torch.Tensor
distance matrix of shape (n_1, n_2)
"""
# Get projected 1st dimension
mat_1_x_0 = torch.sqrt(1 + mat_1.pow(2).sum(dim=1, keepdim=True))
mat_2_x_0 = torch.sqrt(1 + mat_2.pow(2).sum(dim=1, keepdim=True))
# Compute bilinear form
left = mat_1_x_0.mm(mat_2_x_0.t()) # n_1 x n_2
right = mat_1[:, 1:].mm(mat_2[:, 1:].t()) # n_1 x n_2
# Arcosh
return arccosh(left - right).pow(2)
class HyperbolicMean(nn.Module):
"""Compute the mean point in the hyperboloid model."""
def forward(self, data: Tensor) -> Tensor:
"""Performs a forward pass through the network.
Parameters
----------
data : torch.Tensor
The input data, as a float tensor
Returns:
-------
torch.Tensor
The encoded output, as a float tensor
"""
n_iter = 5 if self.training else 100
# Project the input data to n+1 dimensions
projected = project(data)
mean = torch.mean(projected, 0, keepdim=True)
mean = mean / norm(mean)
r = 1e-2
for _i in range(n_iter):
g = -2 * torch.mean(log_map(mean, projected), 0, keepdim=True)
mean = exp_map(mean, -r * g)
mean = mean / norm(mean)
# The first dimension, is recomputed in the distance module
return mean.squeeze()[1:]
| 3,718 | 25.949275 | 132 | py |
flair | flair-master/flair/nn/distance/euclidean.py | """Euclidean distances implemented in pytorch.
This module was copied from the repository the following repository:
https://github.com/asappresearch/dynamic-classification
It contains the code from the paper "Metric Learning for Dynamic Text
Classification".
https://arxiv.org/abs/1911.01026
In case this file is modified, please consider contributing to the original
repository.
It was published under MIT License:
https://github.com/asappresearch/dynamic-classification/blob/master/LICENSE.md
Source: https://github.com/asappresearch/dynamic-classification/blob/55beb5a48406c187674bea40487c011e8fa45aab/distance/euclidean.py
"""
import torch
from torch import Tensor, nn
class EuclideanDistance(nn.Module):
"""Implement a EuclideanDistance object."""
def forward(self, mat_1: Tensor, mat_2: Tensor) -> Tensor:
"""Returns the squared euclidean distance between each element in mat_1 and each element in mat_2.
Parameters
----------
mat_1: torch.Tensor
matrix of shape (n_1, n_features)
mat_2: torch.Tensor
matrix of shape (n_2, n_features)
Returns:
-------
dist: torch.Tensor
distance matrix of shape (n_1, n_2)
"""
_dist = [torch.sum((mat_1 - mat_2[i]) ** 2, dim=1) for i in range(mat_2.size(0))]
dist = torch.stack(_dist, dim=1)
return dist
class EuclideanMean(nn.Module):
"""Implement a EuclideanMean object."""
def forward(self, data: Tensor) -> Tensor:
"""Performs a forward pass through the network.
Parameters
----------
data : torch.Tensor
The input data, as a float tensor
Returns:
-------
torch.Tensor
The encoded output, as a float tensor
"""
return data.mean(0)
| 1,839 | 26.462687 | 131 | py |
flair | flair-master/flair/nn/distance/cosine.py | import torch
# Source: https://github.com/UKPLab/sentence-transformers/blob/master/sentence_transformers/util.py#L23
def dot_product(a: torch.Tensor, b: torch.Tensor, normalize=False):
"""Computes dot product for pairs of vectors.
:param normalize: Vectors are normalized (leads to cosine similarity)
:return: Matrix with res[i][j] = dot_product(a[i], b[j])
"""
if len(a.shape) == 1:
a = a.unsqueeze(0)
if len(b.shape) == 1:
b = b.unsqueeze(0)
if normalize:
a = torch.nn.functional.normalize(a, p=2, dim=1)
b = torch.nn.functional.normalize(b, p=2, dim=1)
return torch.mm(a, b.transpose(0, 1))
class CosineDistance(torch.nn.Module):
def forward(self, a, b):
return -dot_product(a, b, normalize=True)
class LogitCosineDistance(torch.nn.Module):
def forward(self, a, b):
return torch.logit(0.5 - 0.5 * dot_product(a, b, normalize=True))
class NegativeScaledDotProduct(torch.nn.Module):
def forward(self, a, b):
sqrt_d = torch.sqrt(torch.tensor(a.size(-1)))
return -dot_product(a, b, normalize=False) / sqrt_d
| 1,129 | 27.974359 | 103 | py |
flair | flair-master/flair/models/text_regression_model.py | import logging
import typing
from pathlib import Path
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data.dataset import Dataset
from tqdm import tqdm
import flair
import flair.embeddings
from flair.data import Corpus, Dictionary, Sentence, _iter_dataset
from flair.datasets import DataLoader, FlairDatapointDataset
from flair.embeddings.base import load_embeddings
from flair.nn.model import ReduceTransformerVocabMixin
from flair.training_utils import MetricRegression, Result, store_embeddings
log = logging.getLogger("flair")
class TextRegressor(flair.nn.Model[Sentence], ReduceTransformerVocabMixin):
def __init__(
self,
document_embeddings: flair.embeddings.DocumentEmbeddings,
label_name: str = "label",
) -> None:
super().__init__()
self.document_embeddings: flair.embeddings.DocumentEmbeddings = document_embeddings
self.label_name = label_name
self.decoder = nn.Linear(self.document_embeddings.embedding_length, 1)
nn.init.xavier_uniform_(self.decoder.weight)
self.loss_function = nn.MSELoss(reduction="sum")
# auto-spawn on GPU if available
self.to(flair.device)
@property
def label_type(self):
return self.label_name
def _prepare_tensors(self, sentences: List[Sentence]) -> Tuple[torch.Tensor]:
self.document_embeddings.embed(sentences)
embedding_names = self.document_embeddings.get_names()
text_embedding_list = [sentence.get_embedding(embedding_names).unsqueeze(0) for sentence in sentences]
text_embedding_tensor = torch.cat(text_embedding_list, 0).to(flair.device)
return (text_embedding_tensor,)
def forward(self, *args: torch.Tensor) -> torch.Tensor:
(text_embedding_tensor,) = args
label_scores = self.decoder(text_embedding_tensor)
return label_scores
def forward_loss(self, sentences: List[Sentence]) -> Tuple[torch.Tensor, int]:
labels = self._labels_to_tensor(sentences)
text_embedding_tensor = self._prepare_tensors(sentences)
scores = self.forward(*text_embedding_tensor)
return self.loss_function(scores.squeeze(1), labels), len(sentences)
def _labels_to_tensor(self, sentences: List[Sentence]):
indices = [
torch.tensor([float(label.value) for label in sentence.labels], dtype=torch.float) for sentence in sentences
]
vec = torch.cat(indices, 0).to(flair.device)
return vec
def predict(
self,
sentences: Union[Sentence, List[Sentence]],
mini_batch_size: int = 32,
verbose: bool = False,
label_name: Optional[str] = None,
embedding_storage_mode="none",
) -> List[Sentence]:
if label_name is None:
label_name = self.label_name if self.label_name is not None else "label"
with torch.no_grad():
if not isinstance(sentences, list):
sentences = [sentences]
if not sentences:
return sentences
Sentence.set_context_for_sentences(sentences)
filtered_sentences = self._filter_empty_sentences(sentences)
reordered_sentences = sorted(filtered_sentences, key=lambda s: len(s), reverse=True)
if len(reordered_sentences) == 0:
return sentences
dataloader = DataLoader(
dataset=FlairDatapointDataset(reordered_sentences),
batch_size=mini_batch_size,
)
# progress bar for verbosity
if verbose:
progress_bar = tqdm(dataloader)
progress_bar.set_description("Batch inference")
dataloader = progress_bar
for batch in dataloader:
# stop if all sentences are empty
if not batch:
continue
(sentence_tensor,) = self._prepare_tensors(batch)
scores = self.forward(sentence_tensor)
for sentence, score in zip(batch, scores.tolist()):
sentence.set_label(label_name, value=str(score[0]))
# clearing token embeddings to save memory
store_embeddings(batch, storage_mode=embedding_storage_mode)
return sentences
def forward_labels_and_loss(self, sentences: List[Sentence]) -> Tuple[torch.Tensor, torch.Tensor]:
labels = self._labels_to_tensor(sentences)
text_embedding_tensor = self._prepare_tensors(sentences)
scores = self.forward(*text_embedding_tensor)
return scores, self.loss_function(scores.squeeze(1), labels)
def evaluate(
self,
data_points: Union[List[Sentence], Dataset],
gold_label_type: str,
out_path: Optional[Union[str, Path]] = None,
embedding_storage_mode: str = "none",
mini_batch_size: int = 32,
main_evaluation_metric: Tuple[str, str] = ("micro avg", "f1-score"),
exclude_labels: List[str] = [],
gold_label_dictionary: Optional[Dictionary] = None,
return_loss: bool = True,
**kwargs,
) -> Result:
# read Dataset into data loader, if list of sentences passed, make Dataset first
if not isinstance(data_points, Dataset):
data_points = FlairDatapointDataset(data_points)
data_loader = DataLoader(data_points, batch_size=mini_batch_size)
with torch.no_grad():
eval_loss = torch.zeros(1, device=flair.device)
metric = MetricRegression("Evaluation")
lines: List[str] = []
total_count = 0
for batch in data_loader:
if isinstance(batch, Sentence):
batch = [batch]
scores, loss = self.forward_labels_and_loss(batch)
true_values = []
for sentence in batch:
total_count += 1
for label in sentence.get_labels(gold_label_type):
true_values.append(float(label.value))
results = scores[:, 0].cpu().tolist()
eval_loss += loss
metric.true.extend(true_values)
metric.pred.extend(results)
for sentence, prediction, true_value in zip(batch, results, true_values):
eval_line = f"{sentence.to_original_text()}\t{true_value}\t{prediction}\n"
lines.append(eval_line)
store_embeddings(batch, embedding_storage_mode)
eval_loss /= total_count
# TODO: not saving lines yet
if out_path is not None:
with open(out_path, "w", encoding="utf-8") as outfile:
outfile.write("".join(lines))
detailed_result = (
f"AVG: mse: {metric.mean_squared_error():.4f} - "
f"mae: {metric.mean_absolute_error():.4f} - "
f"pearson: {metric.pearsonr():.4f} - "
f"spearman: {metric.spearmanr():.4f}"
)
result: Result = Result(
main_score=metric.pearsonr(),
detailed_results=detailed_result,
scores={
"loss": eval_loss.item(),
"mse": metric.mean_squared_error(),
"mae": metric.mean_absolute_error(),
"pearson": metric.pearsonr(),
"spearman": metric.spearmanr(),
},
)
return result
def _get_state_dict(self):
model_state = {
**super()._get_state_dict(),
"document_embeddings": self.document_embeddings.save_embeddings(use_state_dict=False),
"label_name": self.label_type,
}
return model_state
@classmethod
def _init_model_with_state_dict(cls, state, **kwargs):
embeddings = state["document_embeddings"]
if isinstance(embeddings, dict):
embeddings = load_embeddings(embeddings)
return super()._init_model_with_state_dict(
state, document_embeddings=embeddings, label_name=state.get("label_name"), **kwargs
)
@staticmethod
def _filter_empty_sentences(sentences: List[Sentence]) -> List[Sentence]:
filtered_sentences = [sentence for sentence in sentences if sentence.tokens]
if len(sentences) != len(filtered_sentences):
log.warning(f"Ignore {len(sentences) - len(filtered_sentences)} sentence(s) with no tokens.")
return filtered_sentences
@classmethod
def load(cls, model_path: Union[str, Path, Dict[str, Any]]) -> "TextRegressor":
from typing import cast
return cast("TextRegressor", super().load(model_path=model_path))
def get_used_tokens(self, corpus: Corpus) -> typing.Iterable[List[str]]:
for sentence in _iter_dataset(corpus.get_all_sentences()):
yield [t.text for t in sentence]
| 9,039 | 36.201646 | 120 | py |
flair | flair-master/flair/models/pairwise_classification_model.py | import typing
from typing import List
import torch
import flair.embeddings
import flair.nn
from flair.data import Corpus, Sentence, TextPair, _iter_dataset
class TextPairClassifier(flair.nn.DefaultClassifier[TextPair, TextPair]):
"""Text Pair Classification Model for tasks such as Recognizing Textual Entailment, build upon TextClassifier.
The model takes document embeddings and puts resulting text representation(s) into a linear layer to get the
actual class label. We provide two ways to embed the DataPairs: Either by embedding both DataPoints
and concatenating the resulting vectors ("embed_separately=True") or by concatenating the DataPoints and embedding
the resulting vector ("embed_separately=False").
"""
def __init__(
self,
embeddings: flair.embeddings.DocumentEmbeddings,
label_type: str,
embed_separately: bool = False,
**classifierargs,
) -> None:
"""Initializes a TextPairClassifier.
:param embeddings: embeddings used to embed each data point
:param label_dictionary: dictionary of labels you want to predict
:param multi_label: auto-detected by default, but you can set this to True to force multi-label prediction
or False to force single-label prediction
:param multi_label_threshold: If multi-label you can set the threshold to make predictions
:param loss_weights: Dictionary of weights for labels for the loss function
(if any label's weight is unspecified it will default to 1.0)
"""
super().__init__(
**classifierargs,
embeddings=embeddings,
final_embedding_size=2 * embeddings.embedding_length if embed_separately else embeddings.embedding_length,
should_embed_sentence=False,
)
self._label_type = label_type
self.embed_separately = embed_separately
if not self.embed_separately:
# set separator to concatenate two sentences
self.sep = " "
if isinstance(
self.embeddings,
flair.embeddings.document.TransformerDocumentEmbeddings,
):
if self.embeddings.tokenizer.sep_token:
self.sep = " " + str(self.embeddings.tokenizer.sep_token) + " "
else:
self.sep = " [SEP] "
# auto-spawn on GPU if available
self.to(flair.device)
@property
def label_type(self):
return self._label_type
def _get_data_points_from_sentence(self, sentence: TextPair) -> List[TextPair]:
return [sentence]
def _get_embedding_for_data_point(self, prediction_data_point: TextPair) -> torch.Tensor:
embedding_names = self.embeddings.get_names()
if self.embed_separately:
self.embeddings.embed([prediction_data_point.first, prediction_data_point.second])
return torch.cat(
[
prediction_data_point.first.get_embedding(embedding_names),
prediction_data_point.second.get_embedding(embedding_names),
],
0,
)
else:
concatenated_sentence = Sentence(
prediction_data_point.first.to_tokenized_string()
+ self.sep
+ prediction_data_point.second.to_tokenized_string(),
use_tokenizer=False,
)
self.embeddings.embed(concatenated_sentence)
return concatenated_sentence.get_embedding(embedding_names)
def _get_state_dict(self):
model_state = {
**super()._get_state_dict(),
"document_embeddings": self.embeddings.save_embeddings(use_state_dict=False),
"label_dictionary": self.label_dictionary,
"label_type": self.label_type,
"embed_separately": self.embed_separately,
}
return model_state
@classmethod
def _init_model_with_state_dict(cls, state, **kwargs):
return super()._init_model_with_state_dict(
state,
embeddings=state.get("document_embeddings"),
label_dictionary=state.get("label_dictionary"),
label_type=state.get("label_type"),
embed_separately=state.get("embed_separately"),
**kwargs,
)
def get_used_tokens(self, corpus: Corpus) -> typing.Iterable[List[str]]:
for sentence_pair in _iter_dataset(corpus.get_all_sentences()):
yield [t.text for t in sentence_pair.first]
yield [t.text for t in sentence_pair.second]
| 4,628 | 38.905172 | 118 | py |
flair | flair-master/flair/models/word_tagger_model.py | import logging
from pathlib import Path
from typing import Any, Dict, List, Union
import torch
import flair.nn
from flair.data import Dictionary, Sentence, Span, Token
from flair.embeddings import TokenEmbeddings
log = logging.getLogger("flair")
def WordTagger(embeddings, tag_dictionary, tag_type, **classifierargs):
from warnings import warn
warn("The WordTagger class is deprecated after Flair version 0.12.2. Use TokenClassifier instead!")
return TokenClassifier(
embeddings=embeddings, label_dictionary=tag_dictionary, label_type=tag_type, **classifierargs
)
class TokenClassifier(flair.nn.DefaultClassifier[Sentence, Token]):
"""This is a simple class of models that tags individual words in text."""
def __init__(
self,
embeddings: TokenEmbeddings,
label_dictionary: Dictionary,
label_type: str,
span_encoding: str = "BIOES",
**classifierargs,
) -> None:
"""Initializes a TokenClassifier.
:param embeddings: word embeddings used in tagger
:param tag_dictionary: dictionary of tags you want to predict
:param tag_type: string identifier for tag type
"""
# if the classifier predicts BIO/BIOES span labels, the internal label dictionary must be computed
if label_dictionary.span_labels:
internal_label_dictionary = self._create_internal_label_dictionary(label_dictionary, span_encoding)
else:
internal_label_dictionary = label_dictionary
super().__init__(
embeddings=embeddings,
label_dictionary=internal_label_dictionary,
final_embedding_size=embeddings.embedding_length,
**classifierargs,
)
# fields in case this is a span-prediction problem
self.span_prediction_problem = self._determine_if_span_prediction_problem(internal_label_dictionary)
self.span_encoding = span_encoding
# the label type
self._label_type: str = label_type
# all parameters will be pushed internally to the specified device
self.to(flair.device)
@staticmethod
def _create_internal_label_dictionary(label_dictionary, span_encoding):
internal_label_dictionary = Dictionary(add_unk=False)
for label in label_dictionary.get_items():
if label == "<unk>":
continue
internal_label_dictionary.add_item("O")
if span_encoding == "BIOES":
internal_label_dictionary.add_item("S-" + label)
internal_label_dictionary.add_item("B-" + label)
internal_label_dictionary.add_item("E-" + label)
internal_label_dictionary.add_item("I-" + label)
if span_encoding == "BIO":
internal_label_dictionary.add_item("B-" + label)
internal_label_dictionary.add_item("I-" + label)
return internal_label_dictionary
def _determine_if_span_prediction_problem(self, dictionary: Dictionary) -> bool:
return any(item.startswith(("B-", "S-", "I-")) for item in dictionary.get_items())
def _get_state_dict(self):
model_state = {
**super()._get_state_dict(),
"embeddings": self.embeddings.save_embeddings(use_state_dict=False),
"label_dictionary": self.label_dictionary,
"label_type": self.label_type,
}
return model_state
@classmethod
def _init_model_with_state_dict(cls, state, **kwargs):
return super()._init_model_with_state_dict(
state,
embeddings=state.get("embeddings"),
label_dictionary=state.get("label_dictionary"),
label_type=state.get("label_type"),
**kwargs,
)
def _get_embedding_for_data_point(self, prediction_data_point: Token) -> torch.Tensor:
names = self.embeddings.get_names()
return prediction_data_point.get_embedding(names)
def _get_data_points_from_sentence(self, sentence: Sentence) -> List[Token]:
# special handling during training if this is a span prediction problem
if self.training and self.span_prediction_problem:
for token in sentence.tokens:
token.set_label(self.label_type, "O")
for span in sentence.get_spans(self.label_type):
span_label = span.get_label(self.label_type).value
if len(span) == 1:
if self.span_encoding == "BIOES":
span.tokens[0].set_label(self.label_type, "S-" + span_label)
elif self.span_encoding == "BIO":
span.tokens[0].set_label(self.label_type, "B-" + span_label)
else:
for token in span.tokens:
token.set_label(self.label_type, "I-" + span_label)
span.tokens[0].set_label(self.label_type, "B-" + span_label)
if self.span_encoding == "BIOES":
span.tokens[-1].set_label(self.label_type, "E-" + span_label)
return sentence.tokens
def _post_process_batch_after_prediction(self, batch, label_name):
if self.span_prediction_problem:
for sentence in batch:
# internal variables
previous_tag = "O-"
current_span: List[Token] = []
for token in sentence:
bioes_tag = token.get_label(label_name).value
# non-set tags are OUT tags
if bioes_tag == "" or bioes_tag == "O" or bioes_tag == "_":
bioes_tag = "O-"
# anything that is not OUT is IN
in_span = bioes_tag != "O-"
# does this prediction start a new span?
starts_new_span = False
if bioes_tag[:2] in {"B-", "S-"} or (
in_span
and previous_tag[2:] != bioes_tag[2:]
and (bioes_tag[:2] == "I-" or previous_tag[2:] == "S-")
):
# B- and S- always start new spans
# if the predicted class changes, I- starts a new span
# if the predicted class changes and S- was previous tag, start a new span
starts_new_span = True
# if an existing span is ended (either by reaching O or starting a new span)
if (starts_new_span or not in_span) and len(current_span) > 0:
sentence[current_span[0].idx - 1 : current_span[-1].idx].set_label(label_name, previous_tag[2:])
# reset for-loop variables for new span
current_span = []
if in_span:
current_span.append(token)
# remember previous tag
previous_tag = bioes_tag
token.remove_labels(label_name)
token.remove_labels(self.label_type)
# if there is a span at end of sentence, add it
if len(current_span) > 0:
sentence[current_span[0].idx - 1 : current_span[-1].idx].set_label(label_name, previous_tag[2:])
@property
def label_type(self):
return self._label_type
def _print_predictions(self, batch, gold_label_type):
lines = []
if self.span_prediction_problem:
for datapoint in batch:
# all labels default to "O"
for token in datapoint:
token.set_label("gold_bio", "O")
token.set_label("predicted_bio", "O")
# set gold token-level
for gold_label in datapoint.get_labels(gold_label_type):
gold_span: Span = gold_label.data_point
prefix = "B-"
for token in gold_span:
token.set_label("gold_bio", prefix + gold_label.value)
prefix = "I-"
# set predicted token-level
for predicted_label in datapoint.get_labels("predicted"):
predicted_span: Span = predicted_label.data_point
prefix = "B-"
for token in predicted_span:
token.set_label("predicted_bio", prefix + predicted_label.value)
prefix = "I-"
# now print labels in CoNLL format
for token in datapoint:
eval_line = (
f"{token.text} "
f"{token.get_label('gold_bio').value} "
f"{token.get_label('predicted_bio').value}\n"
)
lines.append(eval_line)
lines.append("\n")
else:
for datapoint in batch:
# print labels in CoNLL format
for token in datapoint:
eval_line = (
f"{token.text} "
f"{token.get_label(gold_label_type).value} "
f"{token.get_label('predicted').value}\n"
)
lines.append(eval_line)
lines.append("\n")
return lines
@classmethod
def load(cls, model_path: Union[str, Path, Dict[str, Any]]) -> "TokenClassifier":
from typing import cast
return cast("TokenClassifier", super().load(model_path=model_path))
| 9,672 | 40.337607 | 120 | py |
flair | flair-master/flair/models/pairwise_regression_model.py | import typing
from pathlib import Path
from typing import Any, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data.dataset import Dataset
from tqdm import tqdm
import flair.embeddings
import flair.nn
from flair.data import Corpus, Dictionary, Sentence, TextPair, _iter_dataset
from flair.datasets import DataLoader, FlairDatapointDataset
from flair.nn.model import ReduceTransformerVocabMixin
from flair.training_utils import MetricRegression, Result, store_embeddings
class TextPairRegressor(flair.nn.Model[TextPair], ReduceTransformerVocabMixin):
"""Text Pair Regression Model for tasks such as Semantic Textual Similarity Benchmark.
The model takes document embeddings and puts resulting text representation(s) into a linear layer to get the
score. We provide two ways to embed the DataPairs: Either by embedding both DataPoints
and concatenating the resulting vectors ("embed_separately=True") or by concatenating the DataPoints and embedding
the resulting vector ("embed_separately=False").
"""
def __init__(
self,
embeddings: flair.embeddings.DocumentEmbeddings,
label_type: str,
embed_separately: bool = False,
dropout: float = 0.0,
locked_dropout: float = 0.0,
word_dropout: float = 0.0,
decoder: Optional[torch.nn.Module] = None,
**classifierargs,
) -> None:
"""Initialize the Text Pair Regression Model.
:param embeddings: embeddings used to embed each data point
:param label_name:
"""
super().__init__()
self.embeddings: flair.embeddings.DocumentEmbeddings = embeddings
self.label_name = label_type
self.embed_separately = embed_separately
if not self.embed_separately:
# set separator to concatenate two sentences
self.sep = " "
if isinstance(
self.embeddings,
flair.embeddings.document.TransformerDocumentEmbeddings,
):
if self.embeddings.tokenizer.sep_token:
self.sep = " " + str(self.embeddings.tokenizer.sep_token) + " "
else:
self.sep = " [SEP] "
self.decoder: torch.nn.Module
if decoder is None:
self.decoder = nn.Linear(
2 * embeddings.embedding_length if embed_separately else embeddings.embedding_length, 1
)
nn.init.xavier_uniform_(self.decoder.weight)
else:
self.decoder = decoder
# init dropouts
self.dropout: torch.nn.Dropout = torch.nn.Dropout(dropout)
self.locked_dropout = flair.nn.LockedDropout(locked_dropout)
self.word_dropout = flair.nn.WordDropout(word_dropout)
self.loss_function = nn.MSELoss(reduction="sum")
# auto-spawn on GPU if available
self.to(flair.device)
@property
def label_type(self):
return self.label_name
def get_used_tokens(self, corpus: Corpus) -> typing.Iterable[List[str]]:
for sentence_pair in _iter_dataset(corpus.get_all_sentences()):
yield [t.text for t in sentence_pair.first]
yield [t.text for t in sentence_pair.second]
def forward_loss(self, pairs: List[TextPair]) -> Tuple[torch.Tensor, int]:
loss, num = self._forward_loss_and_scores(pairs=pairs, return_num=True, return_scores=False)
assert isinstance(loss, torch.Tensor)
assert isinstance(num, int)
return loss, num
def _forward_loss_and_scores(self, pairs: List[TextPair], return_num=True, return_scores=True) -> Tuple:
# make a forward pass to produce embedded data points and labels
pairs = [pair for pair in pairs if self._filter_data_point(pair)]
if len(pairs) == 0:
return torch.tensor(0.0, requires_grad=True, device=flair.device), 1
# get their gold labels as a tensor
target_tensor = self._prepare_target_tensor(pairs)
if target_tensor.size(0) == 0:
return torch.tensor(0.0, requires_grad=True, device=flair.device), 1
# pass data points through network to get encoded data point tensor
data_point_tensor = self._encode_data_points(pairs)
# decode
scores = self.decoder(data_point_tensor)[:, 0]
# calculate the loss
loss, num = self._calculate_loss(scores, target_tensor)
return_value: Tuple[Any, ...] = (loss,)
if return_num:
return_value += (num,)
if return_scores:
return_value += (scores,)
return return_value
def _calculate_loss(self, scores: torch.Tensor, target_tensor: torch.Tensor) -> Tuple[torch.Tensor, int]:
return self.loss_function(scores, target_tensor), target_tensor.size(0)
def _prepare_target_tensor(self, pairs: List[TextPair]):
target_values = [
torch.tensor([float(label.value) for label in pair.get_labels(self.label_name)], dtype=torch.float)
for pair in pairs
]
return torch.cat(target_values, 0).to(flair.device)
def _filter_data_point(self, pair: TextPair) -> bool:
return len(pair) > 0
def _encode_data_points(self, data_points: List[TextPair]):
# get a tensor of data points
data_point_tensor = torch.stack([self._get_embedding_for_data_point(data_point) for data_point in data_points])
# do dropout
data_point_tensor = data_point_tensor.unsqueeze(1)
data_point_tensor = self.dropout(data_point_tensor)
data_point_tensor = self.locked_dropout(data_point_tensor)
data_point_tensor = self.word_dropout(data_point_tensor)
data_point_tensor = data_point_tensor.squeeze(1)
return data_point_tensor
def _get_embedding_for_data_point(self, prediction_data_point: TextPair) -> torch.Tensor:
embedding_names = self.embeddings.get_names()
if self.embed_separately:
self.embeddings.embed([prediction_data_point.first, prediction_data_point.second])
return torch.cat(
[
prediction_data_point.first.get_embedding(embedding_names),
prediction_data_point.second.get_embedding(embedding_names),
],
0,
)
else:
concatenated_sentence = Sentence(
prediction_data_point.first.to_tokenized_string()
+ self.sep
+ prediction_data_point.second.to_tokenized_string(),
use_tokenizer=False,
)
self.embeddings.embed(concatenated_sentence)
return concatenated_sentence.get_embedding(embedding_names)
def _get_state_dict(self):
model_state = {
**super()._get_state_dict(),
"document_embeddings": self.embeddings.save_embeddings(use_state_dict=False),
"label_type": self.label_type,
"embed_separately": self.embed_separately,
"dropout": self.dropout.p,
"word_dropout": self.word_dropout.dropout_rate,
"locked_dropout": self.locked_dropout.dropout_rate,
"decoder": self.decoder,
}
return model_state
@classmethod
def _init_model_with_state_dict(cls, state, **kwargs):
# add DefaultClassifier arguments
for arg in [
"document_embeddings",
"label_type",
"embed_separately",
"dropout",
"word_dropout",
"locked_dropout",
"decoder",
]:
if arg not in kwargs and arg in state:
kwargs[arg] = state[arg]
return super()._init_model_with_state_dict(state, **kwargs)
def predict(
self,
pairs: Union[TextPair, List[TextPair]],
mini_batch_size: int = 32,
verbose: bool = False,
label_name: Optional[str] = None,
embedding_storage_mode="none",
) -> List[TextPair]:
if label_name is None:
label_name = self.label_name if self.label_name is not None else "label"
with torch.no_grad():
if isinstance(pairs, list):
if len(pairs) == 0:
return []
else:
pairs = [pairs]
filtered_pairs = [pair for pair in pairs if self._filter_data_point(pair)]
if len(filtered_pairs) == 0:
return pairs
reordered_pairs = sorted(filtered_pairs, key=lambda pair: len(pair.first) + len(pair.second), reverse=True)
dataloader = DataLoader(
dataset=FlairDatapointDataset(reordered_pairs),
batch_size=mini_batch_size,
)
# progress bar for verbosity
if verbose:
progress_bar = tqdm(dataloader)
progress_bar.set_description("Batch inference")
dataloader = progress_bar
for batch in dataloader:
# stop if all sentences are empty
if not batch:
continue
data_point_tensor = self._encode_data_points(pairs)
scores = self.decoder(data_point_tensor)
for sentence, score in zip(batch, scores.tolist()):
sentence.set_label(label_name, value=str(score[0]))
# clearing token embeddings to save memory
store_embeddings(batch, storage_mode=embedding_storage_mode)
return pairs
def evaluate(
self,
data_points: Union[List[TextPair], Dataset],
gold_label_type: str,
out_path: Union[str, Path, None] = None,
embedding_storage_mode: str = "none",
mini_batch_size: int = 32,
main_evaluation_metric: Tuple[str, str] = ("micro avg", "f1-score"),
exclude_labels: List[str] = [],
gold_label_dictionary: Optional[Dictionary] = None,
return_loss: bool = True,
**kwargs,
) -> Result:
# read Dataset into data loader, if list of sentences passed, make Dataset first
if not isinstance(data_points, Dataset):
data_points = FlairDatapointDataset(data_points)
data_loader = DataLoader(data_points, batch_size=mini_batch_size)
with torch.no_grad():
eval_loss = torch.zeros(1, device=flair.device)
metric = MetricRegression("Evaluation")
if out_path is not None:
out_file = open(out_path, "w", encoding="utf-8") # noqa: SIM115
total_count = 0
try:
for batch in data_loader:
if isinstance(batch, Sentence):
batch = [batch]
loss, num, scores = self._forward_loss_and_scores(batch, return_scores=True)
true_values = []
for sentence in batch:
total_count += 1
for label in sentence.get_labels(gold_label_type):
true_values.append(float(label.value))
results = scores.cpu().tolist()
eval_loss += loss
metric.true.extend(true_values)
metric.pred.extend(results)
if out_path is not None:
for pair, prediction, true_value in zip(batch, results, true_values):
eval_line = "{}\t{}\t{}\t{}\n".format(
pair.first.to_original_text(), pair.second.to_original_text(), true_value, prediction
)
out_file.write(eval_line)
store_embeddings(batch, embedding_storage_mode)
finally:
if out_path is not None:
out_file.close()
eval_loss /= total_count
detailed_result = (
f"AVG: mse: {metric.mean_squared_error():.4f} - "
f"mae: {metric.mean_absolute_error():.4f} - "
f"pearson: {metric.pearsonr():.4f} - "
f"spearman: {metric.spearmanr():.4f}"
)
scores = {
"loss": eval_loss.item(),
"mse": metric.mean_squared_error(),
"mae": metric.mean_absolute_error(),
"pearson": metric.pearsonr(),
"spearman": metric.spearmanr(),
}
if main_evaluation_metric[0] in ("correlation", "other"):
main_score = scores[main_evaluation_metric[1]]
else:
main_score = scores["spearman"]
return Result(
main_score=main_score,
detailed_results=detailed_result,
scores=scores,
)
| 12,876 | 36.324638 | 119 | py |
flair | flair-master/flair/models/sequence_tagger_model.py | import logging
import tempfile
from pathlib import Path
from typing import Any, Dict, List, Optional, Tuple, Union, cast
from urllib.error import HTTPError
import torch
import torch.nn
import torch.nn.functional as F
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
from tqdm import tqdm
import flair.nn
from flair.data import Dictionary, Label, Sentence, Span, get_spans_from_bio
from flair.datasets import DataLoader, FlairDatapointDataset
from flair.embeddings import TokenEmbeddings
from flair.file_utils import cached_path, unzip_file
from flair.models.sequence_tagger_utils.crf import CRF
from flair.models.sequence_tagger_utils.viterbi import ViterbiDecoder, ViterbiLoss
from flair.training_utils import store_embeddings
log = logging.getLogger("flair")
class SequenceTagger(flair.nn.Classifier[Sentence]):
def __init__(
self,
embeddings: TokenEmbeddings,
tag_dictionary: Dictionary,
tag_type: str,
use_rnn: bool = True,
rnn: Optional[torch.nn.RNN] = None,
rnn_type: str = "LSTM",
tag_format: str = "BIOES",
hidden_size: int = 256,
rnn_layers: int = 1,
bidirectional: bool = True,
use_crf: bool = True,
reproject_embeddings: bool = True,
dropout: float = 0.0,
word_dropout: float = 0.05,
locked_dropout: float = 0.5,
train_initial_hidden_state: bool = False,
loss_weights: Optional[Dict[str, float]] = None,
init_from_state_dict: bool = False,
allow_unk_predictions: bool = False,
) -> None:
"""Sequence Tagger class for predicting labels for single tokens. Can be parameterized by several attributes.
In case of multitask learning, pass shared embeddings or shared rnn into respective attributes.
:param embeddings: Embeddings to use during training and prediction
:param tag_dictionary: Dictionary containing all tags from corpus which can be predicted
:param tag_type: type of tag which is going to be predicted in case a corpus has multiple annotations
:param use_rnn: If true, use a RNN, else Linear layer.
:param rnn: (Optional) Takes a torch.nn.Module as parameter by which you can pass a shared RNN between
different tasks.
:param rnn_type: Specifies the RNN type to use, default is 'LSTM', can choose between 'GRU' and 'RNN' as well.
:param hidden_size: Hidden size of RNN layer
:param rnn_layers: number of RNN layers
:param bidirectional: If True, RNN becomes bidirectional
:param use_crf: If True, use a Conditional Random Field for prediction, else linear map to tag space.
:param reproject_embeddings: If True, add a linear layer on top of embeddings, if you want to imitate
fine tune non-trainable embeddings.
:param dropout: If > 0, then use dropout.
:param word_dropout: If > 0, then use word dropout.
:param locked_dropout: If > 0, then use locked dropout.
:param train_initial_hidden_state: if True, trains initial hidden state of RNN
:param loss_weights: Dictionary of weights for labels for the loss function
(if any label's weight is unspecified it will default to 1.0)
:param init_from_state_dict: Indicator whether we are loading a model from state dict
since we need to transform previous models' weights into CRF instance weights
"""
super().__init__()
# ----- Create the internal tag dictionary -----
self.tag_type = tag_type
self.tag_format = tag_format.upper()
if init_from_state_dict:
self.label_dictionary = tag_dictionary
else:
# span-labels need special encoding (BIO or BIOES)
if tag_dictionary.span_labels:
# the big question is whether the label dictionary should contain an UNK or not
# without UNK, we cannot evaluate on data that contains labels not seen in test
# with UNK, the model learns less well if there are no UNK examples
self.label_dictionary = Dictionary(add_unk=allow_unk_predictions)
assert self.tag_format in ["BIOES", "BIO"]
for label in tag_dictionary.get_items():
if label == "<unk>":
continue
self.label_dictionary.add_item("O")
if self.tag_format == "BIOES":
self.label_dictionary.add_item("S-" + label)
self.label_dictionary.add_item("B-" + label)
self.label_dictionary.add_item("E-" + label)
self.label_dictionary.add_item("I-" + label)
if self.tag_format == "BIO":
self.label_dictionary.add_item("B-" + label)
self.label_dictionary.add_item("I-" + label)
else:
self.label_dictionary = tag_dictionary
# is this a span prediction problem?
self.predict_spans = self._determine_if_span_prediction_problem(self.label_dictionary)
self.tagset_size = len(self.label_dictionary)
log.info(f"SequenceTagger predicts: {self.label_dictionary}")
# ----- Embeddings -----
self.embeddings = embeddings
embedding_dim: int = embeddings.embedding_length
# ----- Initial loss weights parameters -----
self.weight_dict = loss_weights
self.loss_weights = self._init_loss_weights(loss_weights) if loss_weights else None
# ----- RNN specific parameters -----
self.use_rnn = use_rnn
self.rnn_type = rnn_type if not rnn else rnn._get_name()
self.hidden_size = hidden_size if not rnn else rnn.hidden_size
self.rnn_layers = rnn_layers if not rnn else rnn.num_layers
self.bidirectional = bidirectional if not rnn else rnn.bidirectional
# ----- Conditional Random Field parameters -----
self.use_crf = use_crf
# Previously trained models have been trained without an explicit CRF, thus it is required to check
# whether we are loading a model from state dict in order to skip or add START and STOP token
if use_crf and not init_from_state_dict and not self.label_dictionary.start_stop_tags_are_set():
self.label_dictionary.set_start_stop_tags()
self.tagset_size += 2
# ----- Dropout parameters -----
# dropouts
# remove word dropout if there is no contact over the sequence dimension.
if not use_crf and not use_rnn:
word_dropout = 0.0
self.use_dropout: float = dropout
self.use_word_dropout: float = word_dropout
self.use_locked_dropout: float = locked_dropout
if dropout > 0.0:
self.dropout = torch.nn.Dropout(dropout)
if word_dropout > 0.0:
self.word_dropout = flair.nn.WordDropout(word_dropout)
if locked_dropout > 0.0:
self.locked_dropout = flair.nn.LockedDropout(locked_dropout)
# ----- Model layers -----
self.reproject_embeddings = reproject_embeddings
if self.reproject_embeddings:
self.embedding2nn = torch.nn.Linear(embedding_dim, embedding_dim)
# ----- RNN layer -----
if use_rnn:
# If shared RNN provided, else create one for model
self.rnn: torch.nn.RNN = (
rnn
if rnn
else self.RNN(
rnn_type,
rnn_layers,
hidden_size,
bidirectional,
rnn_input_dim=embedding_dim,
)
)
num_directions = 2 if self.bidirectional else 1
hidden_output_dim = self.rnn.hidden_size * num_directions
# Whether to train initial hidden state
self.train_initial_hidden_state = train_initial_hidden_state
if self.train_initial_hidden_state:
(
self.hs_initializer,
self.lstm_init_h,
self.lstm_init_c,
) = self._init_initial_hidden_state(num_directions)
# final linear map to tag space
self.linear = torch.nn.Linear(hidden_output_dim, len(self.label_dictionary))
else:
self.linear = torch.nn.Linear(embedding_dim, len(self.label_dictionary))
self.train_initial_hidden_state = False
# the loss function is Viterbi if using CRF, else regular Cross Entropy Loss
self.loss_function = (
ViterbiLoss(self.label_dictionary)
if use_crf
else torch.nn.CrossEntropyLoss(weight=self.loss_weights, reduction="sum")
)
# if using CRF, we also require a CRF and a Viterbi decoder
if use_crf:
self.crf = CRF(self.label_dictionary, self.tagset_size, init_from_state_dict)
self.viterbi_decoder = ViterbiDecoder(self.label_dictionary)
self.to(flair.device)
@property
def label_type(self):
return self.tag_type
def _init_loss_weights(self, loss_weights: Dict[str, float]) -> torch.Tensor:
"""Initializes the loss weights based on given dictionary.
:param loss_weights: dictionary - contains loss weights
"""
n_classes = len(self.label_dictionary)
weight_list = [1.0 for _ in range(n_classes)]
for i, tag in enumerate(self.label_dictionary.get_items()):
if tag in loss_weights:
weight_list[i] = loss_weights[tag]
return torch.tensor(weight_list).to(flair.device)
def _init_initial_hidden_state(self, num_directions: int):
"""Initializes hidden states given the number of directions in RNN.
:param num_directions: Number of directions in RNN.
"""
hs_initializer = torch.nn.init.xavier_normal_
lstm_init_h = torch.nn.Parameter(
torch.randn(self.rnn.num_layers * num_directions, self.hidden_size),
requires_grad=True,
)
lstm_init_c = torch.nn.Parameter(
torch.randn(self.rnn.num_layers * num_directions, self.hidden_size),
requires_grad=True,
)
return hs_initializer, lstm_init_h, lstm_init_c
@staticmethod
def RNN(
rnn_type: str,
rnn_layers: int,
hidden_size: int,
bidirectional: bool,
rnn_input_dim: int,
) -> torch.nn.RNN:
"""Static wrapper function returning an RNN instance from PyTorch.
:param rnn_type: Type of RNN from torch.nn
:param rnn_layers: number of layers to include
:param hidden_size: hidden size of RNN cell
:param bidirectional: If True, RNN cell is bidirectional
:param rnn_input_dim: Input dimension to RNN cell
"""
if rnn_type in ["LSTM", "GRU", "RNN"]:
RNN = getattr(torch.nn, rnn_type)(
rnn_input_dim,
hidden_size,
num_layers=rnn_layers,
dropout=0.0 if rnn_layers == 1 else 0.5,
bidirectional=bidirectional,
batch_first=True,
)
else:
raise Exception(f"Unknown RNN type: {rnn_type}. Please use either LSTM, GRU or RNN.")
return RNN
def forward_loss(self, sentences: List[Sentence]) -> Tuple[torch.Tensor, int]:
# if there are no sentences, there is no loss
if len(sentences) == 0:
return torch.tensor(0.0, dtype=torch.float, device=flair.device, requires_grad=True), 0
sentences = sorted(sentences, key=len, reverse=True)
gold_labels = self._prepare_label_tensor(sentences)
sentence_tensor, lengths = self._prepare_tensors(sentences)
# forward pass to get scores
scores = self.forward(sentence_tensor, lengths)
# calculate loss given scores and labels
return self._calculate_loss(scores, gold_labels)
def _prepare_tensors(self, data_points: Union[List[Sentence], Sentence]) -> Tuple[torch.Tensor, torch.LongTensor]:
sentences = [data_points] if not isinstance(data_points, list) else data_points
self.embeddings.embed(sentences)
# make a zero-padded tensor for the whole sentence
lengths, sentence_tensor = self._make_padded_tensor_for_batch(sentences)
return sentence_tensor, lengths
def forward(self, sentence_tensor: torch.Tensor, lengths: torch.LongTensor):
"""Forward propagation through network.
:param sentence_tensor: A tensor representing the batch of sentences.
:param lengths: A IntTensor representing the lengths of the respective sentences.
"""
if self.use_dropout:
sentence_tensor = self.dropout(sentence_tensor)
if self.use_word_dropout:
sentence_tensor = self.word_dropout(sentence_tensor)
if self.use_locked_dropout:
sentence_tensor = self.locked_dropout(sentence_tensor)
if self.reproject_embeddings:
sentence_tensor = self.embedding2nn(sentence_tensor)
if self.use_rnn:
packed = pack_padded_sequence(sentence_tensor, lengths, batch_first=True)
rnn_output, hidden = self.rnn(packed)
sentence_tensor, output_lengths = pad_packed_sequence(rnn_output, batch_first=True)
if self.use_dropout:
sentence_tensor = self.dropout(sentence_tensor)
if self.use_locked_dropout:
sentence_tensor = self.locked_dropout(sentence_tensor)
# linear map to tag space
features = self.linear(sentence_tensor)
# Depending on whether we are using CRF or a linear layer, scores is either:
# -- A tensor of shape (batch size, sequence length, tagset size, tagset size) for CRF
# -- A tensor of shape (aggregated sequence length for all sentences in batch, tagset size) for linear layer
if self.use_crf:
features = self.crf(features)
scores = (features, lengths, self.crf.transitions)
else:
scores = self._get_scores_from_features(features, lengths)
return scores
def _calculate_loss(self, scores: torch.Tensor, labels: torch.LongTensor) -> Tuple[torch.Tensor, int]:
if labels.size(0) == 0:
return torch.tensor(0.0, requires_grad=True, device=flair.device), 1
return self.loss_function(scores, labels), len(labels)
def _make_padded_tensor_for_batch(self, sentences: List[Sentence]) -> Tuple[torch.LongTensor, torch.Tensor]:
names = self.embeddings.get_names()
lengths: List[int] = [len(sentence.tokens) for sentence in sentences]
longest_token_sequence_in_batch: int = max(lengths)
pre_allocated_zero_tensor = torch.zeros(
self.embeddings.embedding_length * longest_token_sequence_in_batch,
dtype=torch.float,
device=flair.device,
)
all_embs = []
for sentence in sentences:
all_embs += [emb for token in sentence for emb in token.get_each_embedding(names)]
nb_padding_tokens = longest_token_sequence_in_batch - len(sentence)
if nb_padding_tokens > 0:
t = pre_allocated_zero_tensor[: self.embeddings.embedding_length * nb_padding_tokens]
all_embs.append(t)
sentence_tensor = torch.cat(all_embs).view(
[
len(sentences),
longest_token_sequence_in_batch,
self.embeddings.embedding_length,
]
)
return torch.LongTensor(lengths), sentence_tensor
@staticmethod
def _get_scores_from_features(features: torch.Tensor, lengths: torch.Tensor):
"""Remove paddings to get a smaller tensor.
Trims current batch tensor in shape (batch size, sequence length, tagset size)
in such a way that all pads are going to be removed.
:param features: torch.tensor containing all features from forward propagation
:param lengths: length from each sentence in batch in order to trim padding tokens
"""
features_formatted = []
for feat, length in zip(features, lengths):
features_formatted.append(feat[:length])
scores = torch.cat(features_formatted)
return scores
def _get_gold_labels(self, sentences: List[Sentence]) -> List[str]:
"""Extracts gold labels from each sentence.
:param sentences: List of sentences in batch
"""
# spans need to be encoded as token-level predictions
if self.predict_spans:
all_sentence_labels = []
for sentence in sentences:
sentence_labels = ["O"] * len(sentence)
for label in sentence.get_labels(self.label_type):
span: Span = label.data_point
if self.tag_format == "BIOES":
if len(span) == 1:
sentence_labels[span[0].idx - 1] = "S-" + label.value
else:
sentence_labels[span[0].idx - 1] = "B-" + label.value
sentence_labels[span[-1].idx - 1] = "E-" + label.value
for i in range(span[0].idx, span[-1].idx - 1):
sentence_labels[i] = "I-" + label.value
else:
sentence_labels[span[0].idx - 1] = "B-" + label.value
for i in range(span[0].idx, span[-1].idx):
sentence_labels[i] = "I-" + label.value
all_sentence_labels.extend(sentence_labels)
labels = all_sentence_labels
# all others are regular labels for each token
else:
labels = [token.get_label(self.label_type, "O").value for sentence in sentences for token in sentence]
return labels
def _prepare_label_tensor(self, sentences: List[Sentence]):
gold_labels = self._get_gold_labels(sentences)
labels = torch.tensor(
[self.label_dictionary.get_idx_for_item(label) for label in gold_labels],
dtype=torch.long,
device=flair.device,
)
return labels
def predict(
self,
sentences: Union[List[Sentence], Sentence],
mini_batch_size: int = 32,
return_probabilities_for_all_classes: bool = False,
verbose: bool = False,
label_name: Optional[str] = None,
return_loss=False,
embedding_storage_mode="none",
force_token_predictions: bool = False,
):
"""Predicts labels for current batch with CRF or Softmax.
:param sentences: List of sentences in batch
:param mini_batch_size: batch size for test data
:param return_probabilities_for_all_classes: Whether to return probabilities for all classes
:param verbose: whether to use progress bar
:param label_name: which label to predict
:param return_loss: whether to return loss value
:param embedding_storage_mode: determines where to store embeddings - can be "gpu", "cpu" or None.
"""
if label_name is None:
label_name = self.tag_type
with torch.no_grad():
if not sentences:
return sentences
# make sure it's a list
if not isinstance(sentences, list) and not isinstance(sentences, flair.data.Dataset):
sentences = [sentences]
Sentence.set_context_for_sentences(cast(List[Sentence], sentences))
# filter empty sentences
sentences = [sentence for sentence in sentences if len(sentence) > 0]
# reverse sort all sequences by their length
reordered_sentences = sorted(sentences, key=len, reverse=True)
if len(reordered_sentences) == 0:
return sentences
dataloader = DataLoader(
dataset=FlairDatapointDataset(reordered_sentences),
batch_size=mini_batch_size,
)
# progress bar for verbosity
if verbose:
dataloader = tqdm(dataloader, desc="Batch inference")
overall_loss = torch.zeros(1, device=flair.device)
label_count = 0
for batch in dataloader:
# stop if all sentences are empty
if not batch:
continue
# get features from forward propagation
sentence_tensor, lengths = self._prepare_tensors(batch)
features = self.forward(sentence_tensor, lengths)
# remove previously predicted labels of this type
for sentence in batch:
sentence.remove_labels(label_name)
# if return_loss, get loss value
if return_loss:
gold_labels = self._prepare_label_tensor(batch)
loss = self._calculate_loss(features, gold_labels)
overall_loss += loss[0]
label_count += loss[1]
# make predictions
if self.use_crf:
predictions, all_tags = self.viterbi_decoder.decode(
features, return_probabilities_for_all_classes, batch
)
else:
predictions, all_tags = self._standard_inference(
features, batch, return_probabilities_for_all_classes
)
# add predictions to Sentence
for sentence, sentence_predictions in zip(batch, predictions):
# BIOES-labels need to be converted to spans
if self.predict_spans and not force_token_predictions:
sentence_tags = [label[0] for label in sentence_predictions]
sentence_scores = [label[1] for label in sentence_predictions]
predicted_spans = get_spans_from_bio(sentence_tags, sentence_scores)
for predicted_span in predicted_spans:
span: Span = sentence[predicted_span[0][0] : predicted_span[0][-1] + 1]
span.add_label(label_name, value=predicted_span[2], score=predicted_span[1])
# token-labels can be added directly ("O" and legacy "_" predictions are skipped)
else:
for token, label in zip(sentence.tokens, sentence_predictions):
if label[0] in ["O", "_"]:
continue
token.add_label(typename=label_name, value=label[0], score=label[1])
# all_tags will be empty if all_tag_prob is set to False, so the for loop will be avoided
for sentence, sent_all_tags in zip(batch, all_tags):
for token, token_all_tags in zip(sentence.tokens, sent_all_tags):
token.add_tags_proba_dist(label_name, token_all_tags)
store_embeddings(sentences, storage_mode=embedding_storage_mode)
if return_loss:
return overall_loss, label_count
return None
def _standard_inference(self, features: torch.Tensor, batch: List[Sentence], probabilities_for_all_classes: bool):
"""Softmax over emission scores from forward propagation.
:param features: sentence tensor from forward propagation
:param batch: list of sentence
:param probabilities_for_all_classes: whether to return score for each tag in tag dictionary
"""
softmax_batch = F.softmax(features, dim=1).cpu()
scores_batch, prediction_batch = torch.max(softmax_batch, dim=1)
predictions = []
all_tags = []
for sentence in batch:
scores = scores_batch[: len(sentence)]
predictions_for_sentence = prediction_batch[: len(sentence)]
predictions.append(
[
(self.label_dictionary.get_item_for_index(prediction), score.item())
for token, score, prediction in zip(sentence, scores, predictions_for_sentence)
]
)
scores_batch = scores_batch[len(sentence) :]
prediction_batch = prediction_batch[len(sentence) :]
if probabilities_for_all_classes:
lengths = [len(sentence) for sentence in batch]
all_tags = self._all_scores_for_token(batch, softmax_batch, lengths)
return predictions, all_tags
def _all_scores_for_token(self, sentences: List[Sentence], scores: torch.Tensor, lengths: List[int]):
"""Returns all scores for each tag in tag dictionary.
:param scores: Scores for current sentence.
"""
scores = scores.numpy()
tokens = [token for sentence in sentences for token in sentence]
prob_all_tags = [
[
Label(token, self.label_dictionary.get_item_for_index(score_id), score)
for score_id, score in enumerate(score_dist)
]
for score_dist, token in zip(scores, tokens)
]
prob_tags_per_sentence = []
previous = 0
for length in lengths:
prob_tags_per_sentence.append(prob_all_tags[previous : previous + length])
previous = length
return prob_tags_per_sentence
def _get_state_dict(self):
"""Returns the state dictionary for this model."""
model_state = {
**super()._get_state_dict(),
"embeddings": self.embeddings.save_embeddings(use_state_dict=False),
"hidden_size": self.hidden_size,
"tag_dictionary": self.label_dictionary,
"tag_format": self.tag_format,
"tag_type": self.tag_type,
"use_crf": self.use_crf,
"use_rnn": self.use_rnn,
"rnn_layers": self.rnn_layers,
"use_dropout": self.use_dropout,
"use_word_dropout": self.use_word_dropout,
"use_locked_dropout": self.use_locked_dropout,
"rnn_type": self.rnn_type,
"reproject_embeddings": self.reproject_embeddings,
"weight_dict": self.weight_dict,
"train_initial_hidden_state": self.train_initial_hidden_state,
}
return model_state
@classmethod
def _init_model_with_state_dict(cls, state, **kwargs):
if state["use_crf"] and "transitions" in state["state_dict"]:
state["state_dict"]["crf.transitions"] = state["state_dict"]["transitions"]
del state["state_dict"]["transitions"]
return super()._init_model_with_state_dict(
state,
embeddings=state.get("embeddings"),
tag_dictionary=state.get("tag_dictionary"),
tag_format=state.get("tag_format", "BIOES"),
tag_type=state.get("tag_type"),
use_crf=state.get("use_crf"),
use_rnn=state.get("use_rnn"),
rnn_layers=state.get("rnn_layers"),
hidden_size=state.get("hidden_size"),
dropout=state.get("use_dropout", 0.0),
word_dropout=state.get("use_word_dropout", 0.0),
locked_dropout=state.get("use_locked_dropout", 0.0),
rnn_type=state.get("rnn_type", "LSTM"),
reproject_embeddings=state.get("reproject_embeddings", True),
loss_weights=state.get("weight_dict"),
init_from_state_dict=True,
train_initial_hidden_state=state.get("train_initial_hidden_state", False),
**kwargs,
)
@staticmethod
def _fetch_model(model_name) -> str:
# core Flair models on Huggingface ModelHub
huggingface_model_map = {
"ner": "flair/ner-english",
"ner-fast": "flair/ner-english-fast",
"ner-ontonotes": "flair/ner-english-ontonotes",
"ner-ontonotes-fast": "flair/ner-english-ontonotes-fast",
# Large NER models,
"ner-large": "flair/ner-english-large",
"ner-ontonotes-large": "flair/ner-english-ontonotes-large",
"de-ner-large": "flair/ner-german-large",
"nl-ner-large": "flair/ner-dutch-large",
"es-ner-large": "flair/ner-spanish-large",
# Multilingual NER models
"ner-multi": "flair/ner-multi",
"multi-ner": "flair/ner-multi",
"ner-multi-fast": "flair/ner-multi-fast",
# English POS models
"upos": "flair/upos-english",
"upos-fast": "flair/upos-english-fast",
"pos": "flair/pos-english",
"pos-fast": "flair/pos-english-fast",
# Multilingual POS models
"pos-multi": "flair/upos-multi",
"multi-pos": "flair/upos-multi",
"pos-multi-fast": "flair/upos-multi-fast",
"multi-pos-fast": "flair/upos-multi-fast",
# English SRL models
"frame": "flair/frame-english",
"frame-fast": "flair/frame-english-fast",
# English chunking models
"chunk": "flair/chunk-english",
"chunk-fast": "flair/chunk-english-fast",
# Language-specific NER models
"ar-ner": "megantosh/flair-arabic-multi-ner",
"ar-pos": "megantosh/flair-arabic-dialects-codeswitch-egy-lev",
"da-ner": "flair/ner-danish",
"de-ner": "flair/ner-german",
"de-ler": "flair/ner-german-legal",
"de-ner-legal": "flair/ner-german-legal",
"fr-ner": "flair/ner-french",
"nl-ner": "flair/ner-dutch",
"ner-ukrainian": "dchaplinsky/flair-uk-ner",
# Language-specific POS models
"pos-ukrainian": "dchaplinsky/flair-uk-pos",
}
hu_path: str = "https://nlp.informatik.hu-berlin.de/resources/models"
hunflair_paper_path = hu_path + "/hunflair_smallish_models"
hunflair_main_path = hu_path + "/hunflair_allcorpus_models"
hu_model_map = {
# English NER models
"ner": "/".join([hu_path, "ner", "en-ner-conll03-v0.4.pt"]),
"ner-pooled": "/".join([hu_path, "ner-pooled", "en-ner-conll03-pooled-v0.5.pt"]),
"ner-fast": "/".join([hu_path, "ner-fast", "en-ner-fast-conll03-v0.4.pt"]),
"ner-ontonotes": "/".join([hu_path, "ner-ontonotes", "en-ner-ontonotes-v0.4.pt"]),
"ner-ontonotes-fast": "/".join([hu_path, "ner-ontonotes-fast", "en-ner-ontonotes-fast-v0.4.pt"]),
# Multilingual NER models
"ner-multi": "/".join([hu_path, "multi-ner", "quadner-large.pt"]),
"multi-ner": "/".join([hu_path, "multi-ner", "quadner-large.pt"]),
"ner-multi-fast": "/".join([hu_path, "multi-ner-fast", "ner-multi-fast.pt"]),
# English POS models
"upos": "/".join([hu_path, "upos", "en-pos-ontonotes-v0.4.pt"]),
"upos-fast": "/".join([hu_path, "upos-fast", "en-upos-ontonotes-fast-v0.4.pt"]),
"pos": "/".join([hu_path, "pos", "en-pos-ontonotes-v0.5.pt"]),
"pos-fast": "/".join([hu_path, "pos-fast", "en-pos-ontonotes-fast-v0.5.pt"]),
# Multilingual POS models
"pos-multi": "/".join([hu_path, "multi-pos", "pos-multi-v0.1.pt"]),
"multi-pos": "/".join([hu_path, "multi-pos", "pos-multi-v0.1.pt"]),
"pos-multi-fast": "/".join([hu_path, "multi-pos-fast", "pos-multi-fast.pt"]),
"multi-pos-fast": "/".join([hu_path, "multi-pos-fast", "pos-multi-fast.pt"]),
# English SRL models
"frame": "/".join([hu_path, "frame", "en-frame-ontonotes-v0.4.pt"]),
"frame-fast": "/".join([hu_path, "frame-fast", "en-frame-ontonotes-fast-v0.4.pt"]),
"frame-large": "/".join([hu_path, "frame-large", "frame-large.pt"]),
# English chunking models
"chunk": "/".join([hu_path, "chunk", "en-chunk-conll2000-v0.4.pt"]),
"chunk-fast": "/".join([hu_path, "chunk-fast", "en-chunk-conll2000-fast-v0.4.pt"]),
# Danish models
"da-pos": "/".join([hu_path, "da-pos", "da-pos-v0.1.pt"]),
"da-ner": "/".join([hu_path, "NER-danish", "da-ner-v0.1.pt"]),
# German models
"de-pos": "/".join([hu_path, "de-pos", "de-pos-ud-hdt-v0.5.pt"]),
"de-pos-tweets": "/".join([hu_path, "de-pos-tweets", "de-pos-twitter-v0.1.pt"]),
"de-ner": "/".join([hu_path, "de-ner", "de-ner-conll03-v0.4.pt"]),
"de-ner-germeval": "/".join([hu_path, "de-ner-germeval", "de-ner-germeval-0.4.1.pt"]),
"de-ler": "/".join([hu_path, "de-ner-legal", "de-ner-legal.pt"]),
"de-ner-legal": "/".join([hu_path, "de-ner-legal", "de-ner-legal.pt"]),
# French models
"fr-ner": "/".join([hu_path, "fr-ner", "fr-ner-wikiner-0.4.pt"]),
# Dutch models
"nl-ner": "/".join([hu_path, "nl-ner", "nl-ner-bert-conll02-v0.8.pt"]),
"nl-ner-rnn": "/".join([hu_path, "nl-ner-rnn", "nl-ner-conll02-v0.5.pt"]),
# Malayalam models
"ml-pos": "https://raw.githubusercontent.com/qburst/models-repository/master/FlairMalayalamModels/malayalam-xpos-model.pt",
"ml-upos": "https://raw.githubusercontent.com/qburst/models-repository/master/FlairMalayalamModels/malayalam-upos-model.pt",
# Portuguese models
"pt-pos-clinical": "/".join(
[
hu_path,
"pt-pos-clinical",
"pucpr-flair-clinical-pos-tagging-best-model.pt",
]
),
# Keyphase models
"keyphrase": "/".join([hu_path, "keyphrase", "keyphrase-en-scibert.pt"]),
"negation-speculation": "/".join([hu_path, "negation-speculation", "negation-speculation-model.pt"]),
# Biomedical models
"hunflair-paper-cellline": "/".join([hunflair_paper_path, "cellline", "hunflair-celline-v1.0.pt"]),
"hunflair-paper-chemical": "/".join([hunflair_paper_path, "chemical", "hunflair-chemical-v1.0.pt"]),
"hunflair-paper-disease": "/".join([hunflair_paper_path, "disease", "hunflair-disease-v1.0.pt"]),
"hunflair-paper-gene": "/".join([hunflair_paper_path, "gene", "hunflair-gene-v1.0.pt"]),
"hunflair-paper-species": "/".join([hunflair_paper_path, "species", "hunflair-species-v1.0.pt"]),
"hunflair-cellline": "/".join([hunflair_main_path, "cellline", "hunflair-celline-v1.0.pt"]),
"hunflair-chemical": "/".join([hunflair_main_path, "huner-chemical", "hunflair-chemical-full-v1.0.pt"]),
"hunflair-disease": "/".join([hunflair_main_path, "huner-disease", "hunflair-disease-full-v1.0.pt"]),
"hunflair-gene": "/".join([hunflair_main_path, "huner-gene", "hunflair-gene-full-v1.0.pt"]),
"hunflair-species": "/".join([hunflair_main_path, "huner-species", "hunflair-species-full-v1.1.pt"]),
}
cache_dir = Path("models")
get_from_model_hub = False
# check if model name is a valid local file
if Path(model_name).exists():
model_path = model_name
# check if model key is remapped to HF key - if so, print out information
elif model_name in huggingface_model_map:
# get mapped name
hf_model_name = huggingface_model_map[model_name]
# use mapped name instead
model_name = hf_model_name
get_from_model_hub = True
# if not, check if model key is remapped to direct download location. If so, download model
elif model_name in hu_model_map:
model_path = cached_path(hu_model_map[model_name], cache_dir=cache_dir)
# special handling for the taggers by the @redewiegergabe project (TODO: move to model hub)
elif model_name == "de-historic-indirect":
model_file = flair.cache_root / cache_dir / "indirect" / "final-model.pt"
if not model_file.exists():
cached_path(
"http://www.redewiedergabe.de/models/indirect.zip",
cache_dir=cache_dir,
)
unzip_file(
flair.cache_root / cache_dir / "indirect.zip",
flair.cache_root / cache_dir,
)
model_path = str(flair.cache_root / cache_dir / "indirect" / "final-model.pt")
elif model_name == "de-historic-direct":
model_file = flair.cache_root / cache_dir / "direct" / "final-model.pt"
if not model_file.exists():
cached_path(
"http://www.redewiedergabe.de/models/direct.zip",
cache_dir=cache_dir,
)
unzip_file(
flair.cache_root / cache_dir / "direct.zip",
flair.cache_root / cache_dir,
)
model_path = str(flair.cache_root / cache_dir / "direct" / "final-model.pt")
elif model_name == "de-historic-reported":
model_file = flair.cache_root / cache_dir / "reported" / "final-model.pt"
if not model_file.exists():
cached_path(
"http://www.redewiedergabe.de/models/reported.zip",
cache_dir=cache_dir,
)
unzip_file(
flair.cache_root / cache_dir / "reported.zip",
flair.cache_root / cache_dir,
)
model_path = str(flair.cache_root / cache_dir / "reported" / "final-model.pt")
elif model_name == "de-historic-free-indirect":
model_file = flair.cache_root / cache_dir / "freeIndirect" / "final-model.pt"
if not model_file.exists():
cached_path(
"http://www.redewiedergabe.de/models/freeIndirect.zip",
cache_dir=cache_dir,
)
unzip_file(
flair.cache_root / cache_dir / "freeIndirect.zip",
flair.cache_root / cache_dir,
)
model_path = str(flair.cache_root / cache_dir / "freeIndirect" / "final-model.pt")
# for all other cases (not local file or special download location), use HF model hub
else:
get_from_model_hub = True
# if not a local file, get from model hub
if get_from_model_hub:
hf_model_name = "pytorch_model.bin"
revision = "main"
if "@" in model_name:
model_name_split = model_name.split("@")
revision = model_name_split[-1]
model_name = model_name_split[0]
# use model name as subfolder
model_folder = model_name.split("/", maxsplit=1)[1] if "/" in model_name else model_name
# Lazy import
from huggingface_hub.file_download import hf_hub_download
try:
model_path = hf_hub_download(
repo_id=model_name,
filename=hf_model_name,
revision=revision,
library_name="flair",
library_version=flair.__version__,
cache_dir=flair.cache_root / "models" / model_folder,
)
except HTTPError:
# output information
log.error("-" * 80)
log.error(
f"ERROR: The key '{model_name}' was neither found on the ModelHub nor is this a valid path to a file on your system!"
)
log.error(" -> Please check https://huggingface.co/models?filter=flair for all available models.")
log.error(" -> Alternatively, point to a model file on your local drive.")
log.error("-" * 80)
Path(flair.cache_root / "models" / model_folder).rmdir() # remove folder again if not valid
raise
return model_path
def _generate_model_card(self, repo_id):
return f"""---
tags:
- flair
- token-classification
- sequence-tagger-model
---
### Demo: How to use in Flair
Requires:
- **[Flair](https://github.com/flairNLP/flair/)** (`pip install flair`)
```python
from flair.data import Sentence
from flair.models import SequenceTagger
# load tagger
tagger = SequenceTagger.load("{repo_id}")
# make example sentence
sentence = Sentence("On September 1st George won 1 dollar while watching Game of Thrones.")
# predict NER tags
tagger.predict(sentence)
# print sentence
print(sentence)
# print predicted NER spans
print('The following NER tags are found:')
# iterate over entities and print
for entity in sentence.get_spans('ner'):
print(entity)
```"""
def push_to_hub(
self,
repo_id: str,
token: Optional[str] = None,
private: Optional[bool] = None,
commit_message: str = "Add new SequenceTagger model.",
):
"""Uploads the Sequence Tagger model to a Hugging Face Hub repository.
:param repo_id: A namespace (user or an organization) and a repo name separated by a `/`.
:param token: An authentication token (See https://huggingface.co/settings/token).
:param private: Whether the repository is private.
:param commit_message: Message to commit while pushing.
:return: The url of the repository.
"""
# Lazy import
from huggingface_hub import create_repo, model_info, upload_folder
repo_url = create_repo(
repo_id=repo_id,
token=token,
private=private,
exist_ok=True,
)
with tempfile.TemporaryDirectory() as tmp_dir:
tmp_path = Path(tmp_dir)
# Save model weight
local_model_path = tmp_path / "pytorch_model.bin"
self.save(local_model_path)
# Determine if model card already exists
info = model_info(repo_id, use_auth_token=token)
write_readme = all(f.rfilename != "README.md" for f in info.siblings)
# Generate and save model card
if write_readme:
model_card_content = self._generate_model_card(repo_id)
readme_path = tmp_path / "README.md"
with readme_path.open("w", encoding="utf-8") as f:
f.write(model_card_content)
# Upload files
upload_folder(
repo_id=repo_id,
folder_path=tmp_path,
path_in_repo="",
token=token,
commit_message=commit_message,
)
return repo_url
@staticmethod
def _filter_empty_sentences(sentences: List[Sentence]) -> List[Sentence]:
filtered_sentences = [sentence for sentence in sentences if sentence.tokens]
if len(sentences) != len(filtered_sentences):
log.warning(f"Ignore {len(sentences) - len(filtered_sentences)} sentence(s) with no tokens.")
return filtered_sentences
def _determine_if_span_prediction_problem(self, dictionary: Dictionary) -> bool:
return any(item.startswith(("B-", "S-", "I-")) for item in dictionary.get_items())
def _print_predictions(self, batch, gold_label_type):
lines = []
if self.predict_spans:
for datapoint in batch:
# all labels default to "O"
for token in datapoint:
token.set_label("gold_bio", "O")
token.set_label("predicted_bio", "O")
# set gold token-level
for gold_label in datapoint.get_labels(gold_label_type):
gold_span: Span = gold_label.data_point
prefix = "B-"
for token in gold_span:
token.set_label("gold_bio", prefix + gold_label.value)
prefix = "I-"
# set predicted token-level
for predicted_label in datapoint.get_labels("predicted"):
predicted_span: Span = predicted_label.data_point
prefix = "B-"
for token in predicted_span:
token.set_label("predicted_bio", prefix + predicted_label.value)
prefix = "I-"
# now print labels in CoNLL format
for token in datapoint:
eval_line = (
f"{token.text} "
f"{token.get_label('gold_bio').value} "
f"{token.get_label('predicted_bio').value}\n"
)
lines.append(eval_line)
lines.append("\n")
else:
for datapoint in batch:
# print labels in CoNLL format
for token in datapoint:
eval_line = (
f"{token.text} "
f"{token.get_label(gold_label_type).value} "
f"{token.get_label('predicted').value}\n"
)
lines.append(eval_line)
lines.append("\n")
return lines
@classmethod
def load(cls, model_path: Union[str, Path, Dict[str, Any]]) -> "SequenceTagger":
from typing import cast
return cast("SequenceTagger", super().load(model_path=model_path))
| 45,746 | 43.500973 | 137 | py |
flair | flair-master/flair/models/multitask_model.py | import logging
import random
from pathlib import Path
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
import flair.nn
from flair.data import DT, Dictionary, Sentence
from flair.file_utils import cached_path
from flair.nn import Classifier
from flair.training_utils import Result
log = logging.getLogger("flair")
class MultitaskModel(flair.nn.Classifier):
"""Multitask Model class which acts as wrapper for creating custom multitask models.
Takes different tasks as input, parameter sharing is done by objects in flair,
i.e. creating a Embedding Layer and passing it to two different Models, will
result in a hard parameter-shared embedding layer. The abstract class takes care
of calling the correct forward propagation and loss function of the respective
model.
"""
def __init__(
self,
models: List[flair.nn.Classifier],
task_ids: Optional[List[str]] = None,
loss_factors: Optional[List[float]] = None,
use_all_tasks: bool = False,
) -> None:
"""Instantiates the MultiTaskModel.
:param models: Key (Task ID) - Value (flair.nn.Model) Pairs to stack model
"""
super().__init__()
task_ids_internal: List[str] = task_ids if task_ids else [f"Task_{i}" for i in range(len(models))]
self.tasks: Dict[str, flair.nn.Classifier] = {}
self.loss_factors: Dict[str, float] = {}
self.use_all_tasks = use_all_tasks
if not loss_factors:
loss_factors = [1.0] * len(models)
for task_id, model, loss_factor in zip(task_ids_internal, models, loss_factors):
self.add_module(task_id, model)
self.tasks[task_id] = model
self.loss_factors[task_id] = loss_factor
# the multi task model has several labels
self._label_type = model.label_type
self.to(flair.device)
def forward(self, *args) -> torch.Tensor:
raise NotImplementedError("`forward` is not used for multitask learning")
def _prepare_tensors(self, data_points: List[DT]) -> Tuple[torch.Tensor, ...]:
raise NotImplementedError("`_prepare_tensors` is not used for multitask learning")
def forward_loss(self, sentences: Union[List[Sentence], Sentence]) -> Tuple[torch.Tensor, int]:
"""Calls the respective forward loss of each model and sums them weighted by their loss factors.
:param sentences: batch of sentences
:return: loss
"""
batch_split = self.split_batch_to_task_ids(sentences, all_tasks=self.use_all_tasks)
loss = torch.tensor(0.0, device=flair.device)
count = 0
for task_id, split in batch_split.items():
task_loss, task_count = self.tasks[task_id].forward_loss([sentences[i] for i in split])
loss += self.loss_factors[task_id] * task_loss
count += task_count
return loss, count
def predict(
self,
sentences,
**predictargs,
):
for task in self.tasks.values():
task.predict(sentences, **predictargs)
@staticmethod
def split_batch_to_task_ids(sentences: Union[List[Sentence], Sentence], all_tasks: bool = False) -> Dict:
"""Splits a batch of sentences to its respective model.
If single sentence is assigned to several tasks (i.e. same corpus but different tasks), then the model
assignment for this batch is randomly chosen.
:param sentences: batch of sentences
:param all_tasks: use all tasks of each sentence. If deactivated, a random task will be sampled
:return: Key-value pairs as (task_id, list of sentences ids in batch)
"""
batch_to_task_mapping: Dict[str, List[int]] = {}
for sentence_id, sentence in enumerate(sentences):
if all_tasks:
multitask_ids = sentence.get_labels("multitask_id")
else:
multitask_ids = [random.choice(sentence.get_labels("multitask_id"))]
for multitask_id in multitask_ids:
if multitask_id.value in batch_to_task_mapping:
batch_to_task_mapping[multitask_id.value].append(sentence_id)
elif multitask_id.value not in batch_to_task_mapping:
batch_to_task_mapping[multitask_id.value] = [sentence_id]
return batch_to_task_mapping
def evaluate(
self,
data_points,
gold_label_type: str,
out_path: Optional[Union[str, Path]] = None,
embedding_storage_mode: str = "none",
mini_batch_size: int = 32,
main_evaluation_metric: Tuple[str, str] = ("micro avg", "f1-score"),
exclude_labels: List[str] = [],
gold_label_dictionary: Optional[Dictionary] = None,
return_loss: bool = True,
evaluate_all: bool = True,
**evalargs,
) -> Result:
"""Evaluates the model. Returns a Result object containing evaluation results and a loss value.
:param sentences: batch of sentences
:param embeddings_storage_mode: One of 'none' (all embeddings are deleted and freshly recomputed),
'cpu' (embeddings are stored on CPU) or 'gpu' (embeddings are stored on GPU)
:param mini_batch_size: size of batches
:param evaluate_all: choose if all tasks should be evaluated, or a single one, depending on gold_label_type
:return: Tuple of Result object and loss value (float)
"""
if not evaluate_all:
if gold_label_type not in self.tasks:
raise ValueError(
"evaluating a single task on a multitask model requires 'gold_label_type' to be a valid task."
)
data = [
dp
for dp in data_points
if any(label.value == gold_label_type for label in dp.get_labels("multitask_id"))
]
return self.tasks[gold_label_type].evaluate(
data,
gold_label_type=self.tasks[gold_label_type].label_type,
out_path=out_path,
embedding_storage_mode=embedding_storage_mode,
mini_batch_size=mini_batch_size,
main_evaluation_metric=main_evaluation_metric,
exclude_labels=exclude_labels,
gold_label_dictionary=gold_label_dictionary,
return_loss=return_loss,
**evalargs,
)
batch_split = self.split_batch_to_task_ids(data_points, all_tasks=True)
loss = torch.tensor(0.0, device=flair.device)
main_score = 0.0
all_detailed_results = ""
all_classification_report: Dict[str, Dict[str, Any]] = {}
for task_id, split in batch_split.items():
result = self.tasks[task_id].evaluate(
data_points=[data_points[i] for i in split],
gold_label_type=self.tasks[task_id].label_type,
out_path=f"{out_path}_{task_id}.txt" if out_path is not None else None,
embedding_storage_mode=embedding_storage_mode,
mini_batch_size=mini_batch_size,
main_evaluation_metric=main_evaluation_metric,
exclude_labels=exclude_labels,
gold_label_dictionary=gold_label_dictionary,
return_loss=return_loss,
**evalargs,
)
log.info(
f"{task_id} - {self.tasks[task_id]._get_name()} - "
f"loss: {result.loss} - {main_evaluation_metric[1]} "
f"({main_evaluation_metric[0]}) {round(result.main_score, 4)}"
)
loss += result.loss
main_score += result.main_score
all_detailed_results += (
50 * "-"
+ "\n\n"
+ task_id
+ " - "
+ "Label type: "
+ self.tasks[task_id].label_type
+ "\n\n"
+ result.detailed_results
)
all_classification_report[task_id] = result.classification_report
scores = {"loss": loss.item() / len(batch_split)}
return Result(
main_score=main_score / len(batch_split),
detailed_results=all_detailed_results,
scores=scores,
classification_report=all_classification_report,
)
def _get_state_dict(self):
"""Returns the state dict of the multitask model which has multiple models underneath.
:return model_state: model state for the multitask model
"""
initial_model_state = super()._get_state_dict()
initial_model_state["state_dict"] = {} # the model state is stored per model already.
model_state = {
**initial_model_state,
"model_states": {task: model._get_state_dict() for task, model in self.tasks.items()},
"loss_factors": [self.loss_factors[task] for task in self.tasks],
"use_all_tasks": self.use_all_tasks,
}
return model_state
@classmethod
def _init_model_with_state_dict(cls, state, **kwargs):
"""Initializes the model based on given state dict."""
models = []
tasks = []
loss_factors = state["loss_factors"]
for task, task_state in state["model_states"].items():
models.append(Classifier.load(task_state))
tasks.append(task)
model = cls(
models=models,
task_ids=tasks,
loss_factors=loss_factors,
use_all_tasks=state.get("use_all_tasks", False),
**kwargs,
)
return model
@property
def label_type(self):
return self._label_type
@staticmethod
def _fetch_model(model_name) -> str:
model_map = {}
hu_path: str = "https://nlp.informatik.hu-berlin.de/resources/models"
# biomedical models
model_map["bioner"] = "/".join([hu_path, "bioner", "hunflair.pt"])
model_map["hunflair"] = "/".join([hu_path, "bioner", "hunflair.pt"])
model_map["hunflair-paper"] = "/".join([hu_path, "bioner", "hunflair-paper.pt"])
# entity linker
model_map["linker"] = "/".join([hu_path, "zelda", "v2", "zelda-v2.pt"])
model_map["zelda"] = "/".join([hu_path, "zelda", "v2", "zelda-v2.pt"])
cache_dir = Path("models")
if model_name in model_map:
model_name = cached_path(model_map[model_name], cache_dir=cache_dir)
return model_name
@classmethod
def load(cls, model_path: Union[str, Path, Dict[str, Any]]) -> "MultitaskModel":
from typing import cast
return cast("MultitaskModel", super().load(model_path=model_path))
| 10,773 | 38.756458 | 115 | py |
flair | flair-master/flair/models/entity_linker_model.py | import logging
import re
from functools import lru_cache
from pathlib import Path
from typing import Any, Callable, Dict, List, Optional, Set, Union
from unicodedata import category
import torch
import flair.embeddings
import flair.nn
from flair.data import Dictionary, Sentence, Span
from flair.file_utils import cached_path
log = logging.getLogger("flair")
class CandidateGenerator:
"""Given a string, the CandidateGenerator returns possible target classes as candidates."""
def __init__(self, candidates: Union[str, Dict], backoff: bool = True) -> None:
# internal candidate lists of generator
self.mention_to_candidates_map: Dict = {}
# load Zelda candidates if so passed
if isinstance(candidates, str) and candidates.lower() == "zelda":
zelda_path: str = "https://flair.informatik.hu-berlin.de/resources/datasets/zelda"
zelda_candidates = cached_path(f"{zelda_path}/zelda_mention_entities_counter.pickle", cache_dir="datasets")
import pickle
with open(zelda_candidates, "rb") as handle:
mention_entities_counter = pickle.load(handle)
# create candidate lists
candidate_lists = {}
for mention in mention_entities_counter:
candidate_lists[mention] = list(mention_entities_counter[mention].keys())
self.mention_to_candidates_map = candidate_lists
elif isinstance(candidates, Dict):
self.mention_to_candidates_map = candidates
# if lower casing is enabled, create candidate lists of lower cased versions
self.backoff = backoff
if self.backoff:
# create a new dictionary for lower cased mentions
lowercased_mention_to_candidates_map: Dict = {}
# go through each mention and its candidates
for mention, candidates in self.mention_to_candidates_map.items():
backoff_mention = self._make_backoff_string(mention)
# check if backoff mention already seen. If so, add candidates. Else, create new entry.
if backoff_mention in lowercased_mention_to_candidates_map:
current_candidates = lowercased_mention_to_candidates_map[backoff_mention]
lowercased_mention_to_candidates_map[backoff_mention] = set(current_candidates).union(candidates)
else:
lowercased_mention_to_candidates_map[backoff_mention] = candidates
# set lowercased version as map
self.mention_to_candidates_map = lowercased_mention_to_candidates_map
@lru_cache(maxsize=50000)
def _make_backoff_string(self, mention: str) -> str:
backoff_mention = mention.lower()
backoff_mention = "".join(ch for ch in backoff_mention if category(ch)[0] not in "P")
backoff_mention = re.sub(" +", " ", backoff_mention)
return backoff_mention
def get_candidates(self, mention: str) -> Set[str]:
"""Given a mention, this method returns a set of candidate classes."""
if self.backoff:
mention = self._make_backoff_string(mention)
return set(self.mention_to_candidates_map[mention]) if mention in self.mention_to_candidates_map else set()
class EntityLinker(flair.nn.DefaultClassifier[Sentence, Span]):
"""Entity Linking Model.
The model expects text/sentences with annotated entity mentions and predicts entities to these mentions.
To this end a word embedding is used to embed the sentences and the embedding of the entity mention goes through a linear layer to get the actual class label.
The model is able to predict '<unk>' for entity mentions that the model can not confidently match to any of the known labels.
"""
def __init__(
self,
embeddings: flair.embeddings.TokenEmbeddings,
label_dictionary: Dictionary,
pooling_operation: str = "first_last",
label_type: str = "nel",
candidates: Optional[CandidateGenerator] = None,
**classifierargs,
) -> None:
"""Initializes an EntityLinker.
:param embeddings: embeddings used to embed the words/sentences
:param label_dictionary: dictionary that gives ids to all classes. Should contain <unk>
:param pooling_operation: either 'average', 'first', 'last' or 'first&last'. Specifies the way of how text representations of entity mentions (with more than one word) are handled.
E.g. 'average' means that as text representation we take the average of the embeddings of the words in the mention. 'first&last' concatenates
the embedding of the first and the embedding of the last word.
:param label_type: name of the label you use.
"""
super().__init__(
embeddings=embeddings,
label_dictionary=label_dictionary,
final_embedding_size=embeddings.embedding_length * 2
if pooling_operation == "first_last"
else embeddings.embedding_length,
**classifierargs,
)
self.pooling_operation = pooling_operation
self._label_type = label_type
cases: Dict[str, Callable[[Span, List[str]], torch.Tensor]] = {
"average": self.emb_mean,
"first": self.emb_first,
"last": self.emb_last,
"first_last": self.emb_firstAndLast,
}
if pooling_operation not in cases:
raise KeyError('pooling_operation has to be one of "average", "first", "last" or "first_last"')
self.aggregated_embedding = cases[pooling_operation]
self.candidates = candidates
self.to(flair.device)
def emb_first(self, span: Span, embedding_names):
return span.tokens[0].get_embedding(embedding_names)
def emb_last(self, span: Span, embedding_names):
return span.tokens[-1].get_embedding(embedding_names)
def emb_firstAndLast(self, span: Span, embedding_names):
return torch.cat(
(span.tokens[0].get_embedding(embedding_names), span.tokens[-1].get_embedding(embedding_names)), 0
)
def emb_mean(self, span, embedding_names):
return torch.mean(torch.stack([token.get_embedding(embedding_names) for token in span], 0), 0)
def _get_data_points_from_sentence(self, sentence: Sentence) -> List[Span]:
return sentence.get_spans(self.label_type)
def _filter_data_point(self, data_point: Sentence) -> bool:
return bool(data_point.get_labels(self.label_type))
def _get_embedding_for_data_point(self, prediction_data_point: Span) -> torch.Tensor:
return self.aggregated_embedding(prediction_data_point, self.embeddings.get_names())
def _get_state_dict(self):
model_state = {
**super()._get_state_dict(),
"word_embeddings": self.embeddings.save_embeddings(use_state_dict=False),
"label_type": self.label_type,
"label_dictionary": self.label_dictionary,
"pooling_operation": self.pooling_operation,
"loss_weights": self.weight_dict,
"candidates": self.candidates,
}
return model_state
def _print_predictions(self, batch, gold_label_type):
lines = []
for datapoint in batch:
eval_line = f"\n{datapoint.to_original_text()}\n"
for span in datapoint.get_spans(gold_label_type):
symbol = "✓" if span.get_label(gold_label_type).value == span.get_label("predicted").value else "❌"
eval_line += (
f' - "{span.text}" / {span.get_label(gold_label_type).value}'
f' --> {span.get_label("predicted").value} ({symbol})\n'
)
lines.append(eval_line)
return lines
@classmethod
def _init_model_with_state_dict(cls, state, **kwargs):
# remap state dict for models serialized with Flair <= 0.11.3
import re
state_dict = state["state_dict"]
for key in list(state_dict.keys()):
state_dict[re.sub("^word_embeddings\\.", "embeddings.", key)] = state_dict.pop(key)
return super()._init_model_with_state_dict(
state,
embeddings=state.get("word_embeddings"),
label_dictionary=state.get("label_dictionary"),
label_type=state.get("label_type"),
pooling_operation=state.get("pooling_operation"),
loss_weights=state.get("loss_weights", {"<unk>": 0.3}),
candidates=state.get("candidates", None),
**kwargs,
)
@property
def label_type(self):
return self._label_type
def _mask_scores(self, scores: torch.Tensor, data_points: List[Span]):
if not self.candidates:
return scores
masked_scores = -torch.inf * torch.ones(scores.size(), requires_grad=True, device=flair.device)
for idx, span in enumerate(data_points):
# get the candidates
candidate_set = self.candidates.get_candidates(span.text)
# during training, add the gold value as candidate
if self.training:
candidate_set.add(span.get_label(self.label_type).value)
candidate_set.add("<unk>")
indices_of_candidates = [self.label_dictionary.get_idx_for_item(candidate) for candidate in candidate_set]
masked_scores[idx, indices_of_candidates] = scores[idx, indices_of_candidates]
return masked_scores
@classmethod
def load(cls, model_path: Union[str, Path, Dict[str, Any]]) -> "EntityLinker":
from typing import cast
return cast("EntityLinker", super().load(model_path=model_path))
| 9,722 | 41.458515 | 188 | py |
flair | flair-master/flair/models/text_classification_model.py | import logging
from pathlib import Path
from typing import Any, Dict, List, Union
import torch
import flair.embeddings
import flair.nn
from flair.data import Sentence
from flair.file_utils import cached_path
log = logging.getLogger("flair")
class TextClassifier(flair.nn.DefaultClassifier[Sentence, Sentence]):
"""Text Classification Model.
The model takes word embeddings, puts them into an RNN to obtain a text
representation, and puts the text representation in the end into a linear
layer to get the actual class label. The model can handle single and multi
class data sets.
"""
def __init__(
self,
embeddings: flair.embeddings.DocumentEmbeddings,
label_type: str,
**classifierargs,
) -> None:
"""Initializes a TextClassifier.
:param embeddings: embeddings used to embed each data point
:param label_dictionary: dictionary of labels you want to predict
:param multi_label: auto-detected by default, but you can set this to True to force multi-label prediction
or False to force single-label prediction
:param multi_label_threshold: If multi-label you can set the threshold to make predictions
:param beta: Parameter for F-beta score for evaluation and training annealing
:param loss_weights: Dictionary of weights for labels for the loss function
(if any label's weight is unspecified it will default to 1.0)
"""
super().__init__(
**classifierargs,
embeddings=embeddings,
final_embedding_size=embeddings.embedding_length,
)
self._label_type = label_type
# auto-spawn on GPU if available
self.to(flair.device)
def _get_embedding_for_data_point(self, prediction_data_point: Sentence) -> torch.Tensor:
embedding_names = self.embeddings.get_names()
return prediction_data_point.get_embedding(embedding_names)
def _get_data_points_from_sentence(self, sentence: Sentence) -> List[Sentence]:
return [sentence]
def _get_state_dict(self):
model_state = {
**super()._get_state_dict(),
"document_embeddings": self.embeddings.save_embeddings(use_state_dict=False),
"label_dictionary": self.label_dictionary,
"label_type": self.label_type,
"multi_label": self.multi_label,
"multi_label_threshold": self.multi_label_threshold,
"weight_dict": self.weight_dict,
}
return model_state
@classmethod
def _init_model_with_state_dict(cls, state, **kwargs):
import re
# remap state dict for models serialized with Flair <= 0.11.3
state_dict = state["state_dict"]
for key in list(state_dict.keys()):
state_dict[re.sub("^document_embeddings\\.", "embeddings.", key)] = state_dict.pop(key)
return super()._init_model_with_state_dict(
state,
embeddings=state.get("document_embeddings"),
label_dictionary=state.get("label_dictionary"),
label_type=state.get("label_type"),
multi_label=state.get("multi_label"),
multi_label_threshold=state.get("multi_label_threshold", 0.5),
loss_weights=state.get("weight_dict"),
**kwargs,
)
@staticmethod
def _fetch_model(model_name) -> str:
model_map = {}
hu_path: str = "https://nlp.informatik.hu-berlin.de/resources/models"
model_map["de-offensive-language"] = "/".join(
[hu_path, "de-offensive-language", "germ-eval-2018-task-1-v0.8.pt"]
)
# English sentiment models
model_map["sentiment"] = "/".join(
[
hu_path,
"sentiment-curated-distilbert",
"sentiment-en-mix-distillbert_4.pt",
]
)
model_map["en-sentiment"] = "/".join(
[
hu_path,
"sentiment-curated-distilbert",
"sentiment-en-mix-distillbert_4.pt",
]
)
model_map["sentiment-fast"] = "/".join(
[hu_path, "sentiment-curated-fasttext-rnn", "sentiment-en-mix-ft-rnn_v8.pt"]
)
# Communicative Functions Model
model_map["communicative-functions"] = "/".join([hu_path, "comfunc", "communicative-functions.pt"])
cache_dir = Path("models")
if model_name in model_map:
model_name = cached_path(model_map[model_name], cache_dir=cache_dir)
return model_name
@property
def label_type(self):
return self._label_type
@classmethod
def load(cls, model_path: Union[str, Path, Dict[str, Any]]) -> "TextClassifier":
from typing import cast
return cast("TextClassifier", super().load(model_path=model_path))
| 4,864 | 34.510949 | 114 | py |
flair | flair-master/flair/models/relation_extractor_model.py | import logging
from pathlib import Path
from typing import Any, Dict, List, Optional, Set, Tuple, Union
import torch
import flair.embeddings
import flair.nn
from flair.data import Relation, Sentence
from flair.file_utils import cached_path
log = logging.getLogger("flair")
class RelationExtractor(flair.nn.DefaultClassifier[Sentence, Relation]):
def __init__(
self,
embeddings: flair.embeddings.TokenEmbeddings,
label_type: str,
entity_label_type: str,
entity_pair_filters: Optional[List[Tuple[str, str]]] = None,
pooling_operation: str = "first_last",
train_on_gold_pairs_only: bool = False,
**classifierargs,
) -> None:
"""Initializes a RelationClassifier.
:param document_embeddings: embeddings used to embed each data point
:param label_dictionary: dictionary of labels you want to predict
:param beta: Parameter for F-beta score for evaluation and training annealing
:param train_on_gold_pairs_only: Set true to not train to predict no relation.
:param loss_weights: Dictionary of weights for labels for the loss function
(if any label's weight is unspecified it will default to 1.0)
"""
# pooling operation to get embeddings for entites
self.pooling_operation = pooling_operation
relation_representation_length = 2 * embeddings.embedding_length
if self.pooling_operation == "first_last":
relation_representation_length *= 2
super().__init__(
embeddings=embeddings,
final_embedding_size=relation_representation_length,
**classifierargs,
)
# set embeddings
self.embeddings: flair.embeddings.TokenEmbeddings = embeddings
# set relation and entity label types
self._label_type = label_type
self.entity_label_type = entity_label_type
self.train_on_gold_pairs_only = train_on_gold_pairs_only
# whether to use gold entity pairs, and whether to filter entity pairs by type
if entity_pair_filters is not None:
self.entity_pair_filters: Optional[Set[Tuple[str, str]]] = set(entity_pair_filters)
else:
self.entity_pair_filters = None
self.to(flair.device)
def _get_data_points_from_sentence(self, sentence: Sentence) -> List[Relation]:
entity_pairs = []
entity_spans = sentence.get_spans(self.entity_label_type)
for span_1 in entity_spans:
for span_2 in entity_spans:
if span_1 == span_2:
continue
# filter entity pairs according to their tags if set
if (
self.entity_pair_filters is not None
and (
span_1.get_label(self.entity_label_type).value,
span_2.get_label(self.entity_label_type).value,
)
not in self.entity_pair_filters
):
continue
relation = Relation(span_1, span_2)
if self.training and self.train_on_gold_pairs_only and relation.get_label(self.label_type).value == "O":
continue
entity_pairs.append(relation)
return entity_pairs
def _get_embedding_for_data_point(self, prediction_data_point: Relation) -> torch.Tensor:
span_1 = prediction_data_point.first
span_2 = prediction_data_point.second
embedding_names = self.embeddings.get_names()
if self.pooling_operation == "first_last":
return torch.cat(
[
span_1.tokens[0].get_embedding(embedding_names),
span_1.tokens[-1].get_embedding(embedding_names),
span_2.tokens[0].get_embedding(embedding_names),
span_2.tokens[-1].get_embedding(embedding_names),
]
)
else:
return torch.cat(
[span_1.tokens[0].get_embedding(embedding_names), span_2.tokens[0].get_embedding(embedding_names)]
)
def _print_predictions(self, batch, gold_label_type):
lines = []
for datapoint in batch:
eval_line = f"\n{datapoint.to_original_text()}\n"
for relation in datapoint.get_relations(gold_label_type):
symbol = (
"✓" if relation.get_label(gold_label_type).value == relation.get_label("predicted").value else "❌"
)
eval_line += (
f' - "{relation.text}"\t{relation.get_label(gold_label_type).value}'
f' --> {relation.get_label("predicted").value} ({symbol})\n'
)
lines.append(eval_line)
return lines
def _get_state_dict(self):
model_state = {
**super()._get_state_dict(),
"embeddings": self.embeddings.save_embeddings(use_state_dict=False),
"label_dictionary": self.label_dictionary,
"label_type": self.label_type,
"entity_label_type": self.entity_label_type,
"weight_dict": self.weight_dict,
"pooling_operation": self.pooling_operation,
"entity_pair_filters": self.entity_pair_filters,
"train_on_gold_pairs_only": self.train_on_gold_pairs_only,
}
return model_state
@classmethod
def _init_model_with_state_dict(cls, state, **kwargs):
return super()._init_model_with_state_dict(
state,
embeddings=state.get("embeddings"),
label_dictionary=state.get("label_dictionary"),
label_type=state.get("label_type"),
entity_label_type=state.get("entity_label_type"),
loss_weights=state.get("weight_dict"),
pooling_operation=state.get("pooling_operation"),
entity_pair_filters=state.get("entity_pair_filters"),
train_on_gold_pairs_only=state.get("train_on_gold_pairs_only", False),
**kwargs,
)
@property
def label_type(self):
return self._label_type
@staticmethod
def _fetch_model(model_name) -> str:
model_map = {}
hu_path: str = "https://nlp.informatik.hu-berlin.de/resources/models"
model_map["relations"] = "/".join([hu_path, "relations", "relations-v11.pt"])
cache_dir = Path("models")
if model_name in model_map:
model_name = cached_path(model_map[model_name], cache_dir=cache_dir)
return model_name
@classmethod
def load(cls, model_path: Union[str, Path, Dict[str, Any]]) -> "RelationExtractor":
from typing import cast
return cast("RelationExtractor", super().load(model_path=model_path))
| 6,828 | 37.801136 | 120 | py |
flair | flair-master/flair/models/tars_model.py | import logging
import typing
from abc import ABC
from collections import OrderedDict
from pathlib import Path
from typing import Any, Dict, List, Optional, Set, Tuple, Union
import numpy as np
import torch
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.preprocessing import minmax_scale
from tqdm import tqdm
import flair
from flair.data import Corpus, Dictionary, Sentence, Span
from flair.datasets import DataLoader, FlairDatapointDataset
from flair.embeddings import (
TokenEmbeddings,
TransformerDocumentEmbeddings,
TransformerWordEmbeddings,
)
from flair.file_utils import cached_path
from flair.models.sequence_tagger_model import SequenceTagger
from flair.models.text_classification_model import TextClassifier
from flair.training_utils import store_embeddings
log = logging.getLogger("flair")
class FewshotClassifier(flair.nn.Classifier[Sentence], ABC):
def __init__(self) -> None:
self._current_task = None
self._task_specific_attributes: Dict[str, Dict[str, Any]] = {}
self.label_nearest_map = None
self.tars_model: flair.nn.Classifier[Sentence]
self.separator: str
super().__init__()
def forward_loss(self, data_points: Union[List[Sentence], Sentence]) -> Tuple[torch.Tensor, int]:
if not isinstance(data_points, list):
data_points = [data_points]
# Transform input data into TARS format
sentences = self._get_tars_formatted_sentences(data_points)
loss, count = self.tars_model.forward_loss(sentences)
return loss, count
@property
def tars_embeddings(self):
raise NotImplementedError
def _get_tars_formatted_sentence(self, label, sentence):
raise NotImplementedError
def _get_tars_formatted_sentences(self, sentences: List[Sentence]):
label_text_pairs = []
all_labels = [label.decode("utf-8") for label in self.get_current_label_dictionary().idx2item]
for sentence in sentences:
label_text_pairs_for_sentence = []
if self.training and self.num_negative_labels_to_sample is not None:
positive_labels = list(
OrderedDict.fromkeys([label.value for label in sentence.get_labels(self.label_type)])
)
sampled_negative_labels = self._get_nearest_labels_for(positive_labels)
for label in positive_labels:
label_text_pairs_for_sentence.append(self._get_tars_formatted_sentence(label, sentence))
for label in sampled_negative_labels:
label_text_pairs_for_sentence.append(self._get_tars_formatted_sentence(label, sentence))
else:
for label in all_labels:
label_text_pairs_for_sentence.append(self._get_tars_formatted_sentence(label, sentence))
label_text_pairs.extend(label_text_pairs_for_sentence)
return label_text_pairs
def _get_nearest_labels_for(self, labels):
# if there are no labels, return a random sample as negatives
if len(labels) == 0:
tags = self.get_current_label_dictionary().get_items()
import random
sample = random.sample(tags, k=self.num_negative_labels_to_sample)
return sample
already_sampled_negative_labels = set()
# otherwise, go through all labels
for label in labels:
plausible_labels = []
plausible_label_probabilities = []
for plausible_label in self.label_nearest_map[label]:
if plausible_label in already_sampled_negative_labels or plausible_label in labels:
continue
else:
plausible_labels.append(plausible_label)
plausible_label_probabilities.append(self.label_nearest_map[label][plausible_label])
# make sure the probabilities always sum up to 1
plausible_label_probabilities = np.array(plausible_label_probabilities, dtype="float64")
plausible_label_probabilities += 1e-08
plausible_label_probabilities /= np.sum(plausible_label_probabilities)
if len(plausible_labels) > 0:
num_samples = min(self.num_negative_labels_to_sample, len(plausible_labels))
sampled_negative_labels = np.random.default_rng().choice(
plausible_labels,
num_samples,
replace=False,
p=plausible_label_probabilities,
)
already_sampled_negative_labels.update(sampled_negative_labels)
return already_sampled_negative_labels
def train(self, mode=True):
"""Populate label similarity map based on cosine similarity before running epoch.
If the `num_negative_labels_to_sample` is set to an integer value then before starting
each epoch the model would create a similarity measure between the label names based
on cosine distances between their BERT encoded embeddings.
"""
if mode and self.num_negative_labels_to_sample is not None:
self._compute_label_similarity_for_current_epoch()
super().train(mode)
def _compute_label_similarity_for_current_epoch(self):
"""Compute the similarity between all labels for better sampling of negatives."""
# get and embed all labels by making a Sentence object that contains only the label text
all_labels = [label.decode("utf-8") for label in self.get_current_label_dictionary().idx2item]
label_sentences = [Sentence(label) for label in all_labels]
self.tars_embeddings.eval() # TODO: check if this is necessary
self.tars_embeddings.embed(label_sentences)
self.tars_embeddings.train()
# get each label embedding and scale between 0 and 1
if isinstance(self.tars_embeddings, TokenEmbeddings):
encodings_np = [sentence[0].get_embedding().cpu().detach().numpy() for sentence in label_sentences]
else:
encodings_np = [sentence.get_embedding().cpu().detach().numpy() for sentence in label_sentences]
normalized_encoding = minmax_scale(encodings_np)
# compute similarity matrix
similarity_matrix = cosine_similarity(normalized_encoding)
# the higher the similarity, the greater the chance that a label is
# sampled as negative example
negative_label_probabilities = {}
for row_index, label in enumerate(all_labels):
negative_label_probabilities[label] = {}
for column_index, other_label in enumerate(all_labels):
if label != other_label:
negative_label_probabilities[label][other_label] = similarity_matrix[row_index][column_index]
self.label_nearest_map = negative_label_probabilities
def get_current_label_dictionary(self):
label_dictionary = self._task_specific_attributes[self._current_task]["label_dictionary"]
return label_dictionary
def get_current_label_type(self):
return self._task_specific_attributes[self._current_task]["label_type"]
def is_current_task_multi_label(self):
return self._task_specific_attributes[self._current_task]["multi_label"]
def add_and_switch_to_new_task(
self,
task_name: str,
label_dictionary: Union[List, Set, Dictionary, str],
label_type: str,
multi_label: bool = True,
force_switch: bool = False,
):
"""Adds a new task to an existing TARS model.
Sets necessary attributes and finally 'switches' to the new task. Parameters are similar to the constructor
except for model choice, batch size and negative sampling. This method does not store the resultant model onto
disk.
:param task_name: a string depicting the name of the task
:param label_dictionary: dictionary of the labels you want to predict
:param label_type: string to identify the label type ('ner', 'sentiment', etc.)
:param multi_label: whether this task is a multi-label prediction problem
:param force_switch: if True, will overwrite existing task with same name
"""
if task_name in self._task_specific_attributes and not force_switch:
log.warning(f"Task `{task_name}` already exists in TARS model. Switching to it.")
else:
# make label dictionary if no Dictionary object is passed
if isinstance(label_dictionary, Dictionary):
label_dictionary = label_dictionary.get_items()
if type(label_dictionary) == str:
label_dictionary = [label_dictionary]
# prepare dictionary of tags (without B- I- prefixes and without UNK)
tag_dictionary = Dictionary(add_unk=False)
for tag in label_dictionary:
if tag == "<unk>" or tag == "O":
continue
if len(tag) > 1 and tag[1] == "-":
tag = tag[2:]
tag_dictionary.add_item(tag)
else:
tag_dictionary.add_item(tag)
self._task_specific_attributes[task_name] = {
"label_dictionary": tag_dictionary,
"label_type": label_type,
"multi_label": multi_label,
}
self.switch_to_task(task_name)
def list_existing_tasks(self) -> Set[str]:
"""Lists existing tasks in the loaded TARS model on the console."""
return set(self._task_specific_attributes.keys())
def switch_to_task(self, task_name):
"""Switches to a task which was previously added."""
if task_name not in self._task_specific_attributes:
log.error(
"Provided `%s` does not exist in the model. Consider calling `add_and_switch_to_new_task` first.",
task_name,
)
else:
self._current_task = task_name
def _drop_task(self, task_name):
if task_name in self._task_specific_attributes:
if self._current_task == task_name:
log.error(
"`%s` is the current task. Switch to some other task before dropping this.",
task_name,
)
else:
self._task_specific_attributes.pop(task_name)
else:
log.warning("No task exists with the name `%s`.", task_name)
@staticmethod
def _filter_empty_sentences(sentences: List[Sentence]) -> List[Sentence]:
filtered_sentences = [sentence for sentence in sentences if sentence.tokens]
if len(sentences) != len(filtered_sentences):
log.warning(f"Ignore {len(sentences) - len(filtered_sentences)} sentence(s) with no tokens.")
return filtered_sentences
@property
def label_type(self):
return self.get_current_label_type()
def predict_zero_shot(
self,
sentences: Union[List[Sentence], Sentence],
candidate_label_set: Union[List[str], Set[str], str],
multi_label: bool = True,
):
"""Make zero shot predictions from the TARS model.
:param sentences: input sentence objects to classify
:param candidate_label_set: set of candidate labels
:param multi_label: indicates whether multi-label or single class prediction. Defaults to True.
"""
# check if candidate_label_set is empty
if candidate_label_set is None or len(candidate_label_set) == 0:
log.warning("Provided candidate_label_set is empty")
return
# make list if only one candidate label is passed
if isinstance(candidate_label_set, str):
candidate_label_set = {candidate_label_set}
# create label dictionary
label_dictionary = Dictionary(add_unk=False)
for label in candidate_label_set:
label_dictionary.add_item(label)
# note current task
existing_current_task = self._current_task
# create a temporary task
self.add_and_switch_to_new_task(
task_name="ZeroShot",
label_dictionary=label_dictionary,
label_type="-".join(label_dictionary.get_items()),
multi_label=multi_label,
force_switch=True, # overwrite any older configuration
)
try:
# make zero shot predictions
self.predict(sentences)
finally:
# switch to the pre-existing task
self.switch_to_task(existing_current_task)
self._drop_task("ZeroShot")
return
def get_used_tokens(self, corpus: Corpus) -> typing.Iterable[List[str]]:
yield from super().get_used_tokens(corpus)
for label in self.get_current_label_dictionary().idx2item:
yield [label.decode("utf-8")]
yield [self.separator]
@classmethod
def load(cls, model_path: Union[str, Path, Dict[str, Any]]) -> "FewshotClassifier":
from typing import cast
return cast("FewshotClassifier", super().load(model_path=model_path))
class TARSTagger(FewshotClassifier):
"""TARS model for sequence tagging.
In the backend, the model uses a BERT based 5-class sequence labeler which given a <label, text> pair predicts the
probability for each word to belong to one of the BIOES classes. The input data is a usual Sentence object which
is inflated by the model internally before pushing it through the transformer stack of BERT.
"""
static_label_type = "tars_label"
def __init__(
self,
task_name: Optional[str] = None,
label_dictionary: Optional[Dictionary] = None,
label_type: Optional[str] = None,
embeddings: Union[TransformerWordEmbeddings, str] = "bert-base-uncased",
num_negative_labels_to_sample: int = 2,
prefix: bool = True,
**tagger_args,
) -> None:
"""Initializes a TarsTagger.
:param task_name: a string depicting the name of the task
:param label_dictionary: dictionary of labels you want to predict
:param embeddings: name of the pre-trained transformer model e.g.,
'bert-base-uncased' etc
:param num_negative_labels_to_sample: number of negative labels to sample for each
positive labels against a sentence during training. Defaults to 2 negative
labels for each positive label. The model would sample all the negative labels
if None is passed. That slows down the training considerably.
"""
super().__init__()
if isinstance(embeddings, str):
embeddings = TransformerWordEmbeddings(
model=embeddings,
fine_tune=True,
layers="-1",
layer_mean=False,
)
# prepare TARS dictionary
tars_dictionary = Dictionary(add_unk=False)
tars_dictionary.add_item("entity")
tars_dictionary.span_labels = True
# initialize a bare-bones sequence tagger
self.tars_model: SequenceTagger = SequenceTagger(
hidden_size=123,
embeddings=embeddings,
tag_dictionary=tars_dictionary,
tag_type=self.static_label_type,
use_crf=False,
use_rnn=False,
reproject_embeddings=False,
**tagger_args,
)
# transformer separator
self.separator = str(self.tars_embeddings.tokenizer.sep_token)
if self.tars_embeddings.tokenizer._bos_token:
self.separator += str(self.tars_embeddings.tokenizer.bos_token)
self.prefix = prefix
self.num_negative_labels_to_sample = num_negative_labels_to_sample
if task_name and label_dictionary and label_type:
# Store task specific labels since TARS can handle multiple tasks
self.add_and_switch_to_new_task(task_name, label_dictionary, label_type)
else:
log.info(
"TARS initialized without a task. You need to call .add_and_switch_to_new_task() "
"before training this model"
)
def _get_tars_formatted_sentence(self, label, sentence):
original_text = sentence.to_tokenized_string()
label_text_pair = (
f"{label} {self.separator} {original_text}" if self.prefix else f"{original_text} {self.separator} {label}"
)
label_length = 0 if not self.prefix else len(label.split(" ")) + len(self.separator.split(" "))
# make a tars sentence where all labels are O by default
tars_sentence = Sentence(label_text_pair, use_tokenizer=False)
for entity_label in sentence.get_labels(self.label_type):
if entity_label.value == label:
new_span = Span(
[tars_sentence.get_token(token.idx + label_length) for token in entity_label.data_point]
)
new_span.add_label(self.static_label_type, value="entity")
tars_sentence.copy_context_from_sentence(sentence)
return tars_sentence
def _get_state_dict(self):
model_state = {
**super()._get_state_dict(),
"current_task": self._current_task,
"tag_type": self.get_current_label_type(),
"tag_dictionary": self.get_current_label_dictionary(),
"tars_embeddings": self.tars_model.embeddings.save_embeddings(use_state_dict=False),
"num_negative_labels_to_sample": self.num_negative_labels_to_sample,
"prefix": self.prefix,
"task_specific_attributes": self._task_specific_attributes,
}
return model_state
@staticmethod
def _fetch_model(model_name) -> str:
if model_name == "tars-ner":
cache_dir = Path("models")
model_name = cached_path(
"https://nlp.informatik.hu-berlin.de/resources/models/tars-ner/tars-ner.pt",
cache_dir=cache_dir,
)
return model_name
@classmethod
def _init_model_with_state_dict(cls, state, **kwargs):
tars_embeddings = state.get("tars_embeddings")
if tars_embeddings is None:
tars_model = state["tars_model"]
tars_embeddings = tars_model.embeddings
# init new TARS classifier
model = super()._init_model_with_state_dict(
state,
task_name=state.get("current_task"),
label_dictionary=state.get("tag_dictionary"),
label_type=state.get("tag_type"),
embeddings=tars_embeddings,
num_negative_labels_to_sample=state.get("num_negative_labels_to_sample"),
prefix=state.get("prefix"),
**kwargs,
)
# set all task information
model._task_specific_attributes = state["task_specific_attributes"]
return model
@property
def tars_embeddings(self):
return self.tars_model.embeddings
def predict(
self,
sentences: Union[List[Sentence], Sentence],
mini_batch_size=32,
return_probabilities_for_all_classes: bool = False,
verbose: bool = False,
label_name: Optional[str] = None,
return_loss=False,
embedding_storage_mode="none",
most_probable_first: bool = True,
):
"""Predict sequence tags for Named Entity Recognition task.
Args:
sentences: a Sentence or a List of Sentence
mini_batch_size: size of the minibatch, usually bigger is more rapid but consume more memory,
up to a point when it has no more effect.
all_tag_prob: True to compute the score for each tag on each token,
otherwise only the score of the best tag is returned
verbose: set to True to display a progress bar
return_loss: set to True to also compute the loss
label_name: set this to change the name of the label type that is predicted
embedding_storage_mode: default is 'none' which doesn't store the embeddings in RAM. Only set to 'cpu' or 'gpu'
if you wish to not only predict, but also keep the generated embeddings in CPU or GPU memory respectively.
'gpu' to store embeddings in GPU memory.
return_probabilities_for_all_classes: if True, all classes will be added with their respective confidences.
most_probable_first: if True, nested predictions will be removed, if False all predictions will be returned,
including overlaps
"""
if label_name is None:
label_name = self.get_current_label_type()
if not sentences:
return sentences
if not isinstance(sentences, list):
sentences = [sentences]
Sentence.set_context_for_sentences(sentences)
reordered_sentences = sorted(sentences, key=lambda s: len(s), reverse=True)
dataloader = DataLoader(
dataset=FlairDatapointDataset(reordered_sentences),
batch_size=mini_batch_size,
)
# progress bar for verbosity
if verbose:
dataloader = tqdm(dataloader)
all_labels = [label.decode("utf-8") for label in self.get_current_label_dictionary().idx2item]
overall_loss = 0
overall_count = 0
with torch.no_grad():
for batch in dataloader:
batch = self._filter_empty_sentences(batch)
# stop if all sentences are empty
if not batch:
continue
tars_sentences: List[Sentence] = []
all_labels_to_sentence: List[Dict[str, Sentence]] = []
for sentence in batch:
# always remove tags first
sentence.remove_labels(label_name)
labels_to_sentence: Dict[str, Sentence] = {}
for label in all_labels:
tars_sentence = self._get_tars_formatted_sentence(label, sentence)
tars_sentences.append(tars_sentence)
labels_to_sentence[label] = tars_sentence
all_labels_to_sentence.append(labels_to_sentence)
loss_and_count = self.tars_model.predict(
tars_sentences,
label_name=label_name,
mini_batch_size=mini_batch_size,
return_loss=return_loss,
)
if return_loss:
overall_loss += loss_and_count[0].item()
overall_count += loss_and_count[1]
# go through each sentence in the batch
for sentence, labels_to_sentence in zip(batch, all_labels_to_sentence):
# always remove tags first
sentence.remove_labels(label_name)
all_detected = {}
for label, tars_sentence in labels_to_sentence.items():
for predicted in tars_sentence.get_labels(label_name):
predicted.set_value(label, predicted.score)
all_detected[predicted] = predicted.score
if most_probable_first:
import operator
already_set_indices: List[int] = []
sorted_x = sorted(all_detected.items(), key=operator.itemgetter(1))
sorted_x.reverse()
for tuple in sorted_x:
# get the span and its label
label = tuple[0]
label_length = (
0 if not self.prefix else len(label.value.split(" ")) + len(self.separator.split(" "))
)
# determine whether tokens in this span already have a label
tag_this = True
for token in label.data_point:
corresponding_token = sentence.get_token(token.idx - label_length)
if corresponding_token is None:
tag_this = False
continue
if corresponding_token.idx in already_set_indices:
tag_this = False
continue
# only add if all tokens have no label
if tag_this:
# make and add a corresponding predicted span
predicted_span = Span(
[sentence.get_token(token.idx - label_length) for token in label.data_point]
)
predicted_span.add_label(label_name, value=label.value, score=label.score)
# set indices so that no token can be tagged twice
already_set_indices.extend(token.idx for token in predicted_span)
# clearing token embeddings to save memory
store_embeddings(batch, storage_mode=embedding_storage_mode)
if return_loss:
return overall_loss, overall_count
return None
def _print_predictions(self, batch, gold_label_type):
lines = []
if self.tars_model.predict_spans:
for datapoint in batch:
# all labels default to "O"
for token in datapoint:
token.set_label("gold_bio", "O")
token.set_label("predicted_bio", "O")
# set gold token-level
for gold_label in datapoint.get_labels(gold_label_type):
gold_span: Span = gold_label.data_point
prefix = "B-"
for token in gold_span:
token.set_label("gold_bio", prefix + gold_label.value)
prefix = "I-"
# set predicted token-level
for predicted_label in datapoint.get_labels("predicted"):
predicted_span: Span = predicted_label.data_point
prefix = "B-"
for token in predicted_span:
token.set_label("predicted_bio", prefix + predicted_label.value)
prefix = "I-"
# now print labels in CoNLL format
for token in datapoint:
eval_line = (
f"{token.text} "
f"{token.get_label('gold_bio').value} "
f"{token.get_label('predicted_bio').value}\n"
)
lines.append(eval_line)
lines.append("\n")
return lines
@classmethod
def load(cls, model_path: Union[str, Path, Dict[str, Any]]) -> "TARSTagger":
from typing import cast
return cast("TARSTagger", super().load(model_path=model_path))
class TARSClassifier(FewshotClassifier):
"""TARS model for text classification.
In the backend, the model uses a BERT based binary text classifier which given a <label, text> pair predicts the
probability of two classes "True", and "False". The input data is a usual Sentence object which is inflated
by the model internally before pushing it through the transformer stack of BERT.
"""
static_label_type = "tars_label"
LABEL_MATCH = "YES"
LABEL_NO_MATCH = "NO"
def __init__(
self,
task_name: Optional[str] = None,
label_dictionary: Optional[Dictionary] = None,
label_type: Optional[str] = None,
embeddings: Union[TransformerDocumentEmbeddings, str] = "bert-base-uncased",
num_negative_labels_to_sample: int = 2,
prefix: bool = True,
**tagger_args,
) -> None:
"""Initializes a TarsClassifier.
:param task_name: a string depicting the name of the task
:param label_dictionary: dictionary of labels you want to predict
:param embeddings: name of the pre-trained transformer model e.g.,
'bert-base-uncased' etc
:param num_negative_labels_to_sample: number of negative labels to sample for each
positive labels against a sentence during training. Defaults to 2 negative
labels for each positive label. The model would sample all the negative labels
if None is passed. That slows down the training considerably.
:param multi_label: auto-detected by default, but you can set this to True
to force multi-label predictionor False to force single-label prediction
:param multi_label_threshold: If multi-label you can set the threshold to make predictions
:param beta: Parameter for F-beta score for evaluation and training annealing
"""
super().__init__()
if isinstance(embeddings, str):
embeddings = TransformerDocumentEmbeddings(
model=embeddings,
fine_tune=True,
layers="-1",
layer_mean=False,
)
# prepare TARS dictionary
tars_dictionary = Dictionary(add_unk=False)
tars_dictionary.add_item(self.LABEL_NO_MATCH)
tars_dictionary.add_item(self.LABEL_MATCH)
# initialize a bare-bones sequence tagger
self.tars_model = TextClassifier(
embeddings=embeddings,
label_dictionary=tars_dictionary,
label_type=self.static_label_type,
**tagger_args,
)
# transformer separator
self.separator = str(self.tars_embeddings.tokenizer.sep_token)
if self.tars_embeddings.tokenizer._bos_token:
self.separator += str(self.tars_embeddings.tokenizer.bos_token)
self.prefix = prefix
self.num_negative_labels_to_sample = num_negative_labels_to_sample
if task_name and label_dictionary and label_type:
# Store task specific labels since TARS can handle multiple tasks
self.add_and_switch_to_new_task(task_name, label_dictionary, label_type)
else:
log.info(
"TARS initialized without a task. You need to call .add_and_switch_to_new_task() "
"before training this model"
)
self.clean_up_labels = True
def _clean(self, label_value: str) -> str:
if self.clean_up_labels:
return label_value.replace("_", " ")
else:
return label_value
def _get_tars_formatted_sentence(self, label, sentence):
label = self._clean(label)
original_text = sentence.to_tokenized_string()
label_text_pair = (
f"{label} {self.separator} {original_text}" if self.prefix else f"{original_text} {self.separator} {label}"
)
sentence_labels = [self._clean(label.value) for label in sentence.get_labels(self.get_current_label_type())]
tars_label = self.LABEL_MATCH if label in sentence_labels else self.LABEL_NO_MATCH
tars_sentence = Sentence(label_text_pair, use_tokenizer=False).add_label(self.static_label_type, tars_label)
tars_sentence.copy_context_from_sentence(sentence)
return tars_sentence
def _get_state_dict(self):
model_state = {
**super()._get_state_dict(),
"current_task": self._current_task,
"tars_embeddings": self.tars_model.embeddings.save_embeddings(use_state_dict=False),
"num_negative_labels_to_sample": self.num_negative_labels_to_sample,
"task_specific_attributes": self._task_specific_attributes,
}
if self._current_task is not None:
model_state.update(
{
"label_type": self.get_current_label_type(),
"label_dictionary": self.get_current_label_dictionary(),
}
)
return model_state
@classmethod
def _init_model_with_state_dict(cls, state, **kwargs):
# get the serialized embeddings
tars_embeddings = state.get("tars_embeddings")
if tars_embeddings is None:
tars_model = state["tars_model"]
if hasattr(tars_model, "embeddings"):
tars_embeddings = tars_model.embeddings
else:
tars_embeddings = tars_model.document_embeddings
# remap state dict for models serialized with Flair <= 0.11.3
import re
state_dict = state["state_dict"]
for key in list(state_dict.keys()):
state_dict[re.sub("^tars_model.document_embeddings\\.", "tars_model.embeddings.", key)] = state_dict.pop(
key
)
# init new TARS classifier
model: TARSClassifier = super()._init_model_with_state_dict(
state,
task_name=state["current_task"],
label_dictionary=state.get("label_dictionary"),
label_type=state.get("label_type", "default_label"),
embeddings=tars_embeddings,
num_negative_labels_to_sample=state.get("num_negative_labels_to_sample"),
**kwargs,
)
# set all task information
model._task_specific_attributes = state.get("task_specific_attributes")
return model
@staticmethod
def _fetch_model(model_name) -> str:
model_map = {}
hu_path: str = "https://nlp.informatik.hu-berlin.de/resources/models"
model_map["tars-base"] = "/".join([hu_path, "tars-base", "tars-base-v8.pt"])
cache_dir = Path("models")
if model_name in model_map:
model_name = cached_path(model_map[model_name], cache_dir=cache_dir)
return model_name
@property
def tars_embeddings(self):
return self.tars_model.embeddings
def predict(
self,
sentences: Union[List[Sentence], Sentence],
mini_batch_size=32,
return_probabilities_for_all_classes: bool = False,
verbose: bool = False,
label_name: Optional[str] = None,
return_loss=False,
embedding_storage_mode="none",
label_threshold: float = 0.5,
multi_label: Optional[bool] = None,
force_label: bool = False,
):
"""Predict sentences on the Text Classification task.
Args:
return_probabilities_for_all_classes: if True, all classes will be added with their respective confidences.
sentences: a Sentence or a List of Sentence
force_label: when multilabel is active, you can force to always get at least one prediction.
multi_label: if True multiple labels can be predicted. Defaults to the setting of the configured task.
label_threshold: when multi_label, specify the threshold when a class is considered as predicted.
mini_batch_size: size of the minibatch, usually bigger is more rapid but consume more memory,
up to a point when it has no more effect.
all_tag_prob: True to compute the score for each tag on each token,
otherwise only the score of the best tag is returned
verbose: set to True to display a progress bar
return_loss: set to True to also compute the loss
label_name: set this to change the name of the label type that is predicted
embedding_storage_mode: default is 'none' which doesn't store the embeddings in RAM. Only set to 'cpu' or 'gpu' if
you wish to not only predict, but also keep the generated embeddings in CPU or GPU memory respectively.
'gpu' to store embeddings in GPU memory.
"""
if label_name is None:
label_name = self.get_current_label_type()
if multi_label is None:
multi_label = self.is_current_task_multi_label()
if not multi_label:
label_threshold = 0.0
# with torch.no_grad():
if not sentences:
return sentences
if isinstance(sentences, Sentence):
sentences = [sentences]
Sentence.set_context_for_sentences(sentences)
reordered_sentences = sorted(sentences, key=lambda s: len(s), reverse=True)
dataloader = DataLoader(
dataset=FlairDatapointDataset(reordered_sentences),
batch_size=mini_batch_size,
)
# progress bar for verbosity
if verbose:
progressbar = tqdm(dataloader)
progressbar.set_description("Batch inference")
dataloader = progressbar
overall_loss = 0
overall_count = 0
all_labels = [label.decode("utf-8") for label in self.get_current_label_dictionary().idx2item]
with torch.no_grad():
for batch in dataloader:
batch = self._filter_empty_sentences(batch)
# stop if all sentences are empty
if not batch:
continue
tars_sentences: List[Sentence] = []
all_labels_to_sentence: List[Dict[str, Sentence]] = []
for sentence in batch:
# always remove tags first
sentence.remove_labels(label_name)
labels_to_sentence: Dict[str, Sentence] = {}
for label in all_labels:
tars_sentence = self._get_tars_formatted_sentence(label, sentence)
tars_sentences.append(tars_sentence)
labels_to_sentence[label] = tars_sentence
all_labels_to_sentence.append(labels_to_sentence)
loss_and_count = self.tars_model.predict(
tars_sentences,
label_name=label_name,
mini_batch_size=mini_batch_size,
return_loss=return_loss,
)
if return_loss:
overall_loss += loss_and_count[0].item()
overall_count += loss_and_count[1]
# go through each sentence in the batch
for sentence, labels_to_sentence in zip(batch, all_labels_to_sentence):
# always remove tags first
sentence.remove_labels(label_name)
best_value = ""
best_score = 0.0
for label, tars_sentence in labels_to_sentence.items():
# add all labels that according to TARS match the text and are above threshold
predicted_tars_label = tars_sentence.get_label(label_name)
score = (
predicted_tars_label.score
if predicted_tars_label.value == self.LABEL_MATCH
else 1 - predicted_tars_label.score
)
if score > label_threshold:
# do not add labels below confidence threshold
sentence.add_label(label_name, label, score)
if score > best_score:
best_score = score
best_value = label
# only use label with the highest confidence if enforcing single-label predictions
# add the label with the highest score even if below the threshold if force label is activated.
if not multi_label or (multi_label and force_label and len(sentence.get_labels(label_name)) == 0):
# remove previously added labels and only add the best label
sentence.remove_labels(label_name)
sentence.add_label(
typename=label_name,
value=best_value,
score=best_score,
)
# clearing token embeddings to save memory
store_embeddings(batch, storage_mode=embedding_storage_mode)
if return_loss:
return overall_loss, overall_count
return None
@classmethod
def load(cls, model_path: Union[str, Path, Dict[str, Any]]) -> "TARSClassifier":
from typing import cast
return cast("TARSClassifier", super().load(model_path=model_path))
| 40,613 | 40.956612 | 126 | py |
flair | flair-master/flair/models/lemmatizer_model.py | import logging
from math import inf
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
import flair.embeddings
import flair.nn
from flair.data import Dictionary, Sentence, Token
from flair.datasets import DataLoader, FlairDatapointDataset
from flair.training_utils import Result, store_embeddings
log = logging.getLogger("flair")
class Lemmatizer(flair.nn.Classifier[Sentence]):
def __init__(
self,
embeddings: Optional[flair.embeddings.TokenEmbeddings] = None,
label_type: str = "lemma",
rnn_input_size: int = 50,
rnn_hidden_size: int = 256,
rnn_layers: int = 2,
encode_characters: bool = True,
char_dict: Union[str, Dictionary] = "common-chars-lemmatizer",
max_sequence_length_dependent_on_input: bool = True,
max_sequence_length: int = 20,
use_attention: bool = True,
beam_size: int = 1,
start_symbol_for_encoding: bool = True,
end_symbol_for_encoding: bool = True,
bidirectional_encoding: bool = True,
) -> None:
"""Initializes a Lemmatizer model.
The model consists of a decoder and an encoder. The encoder is either a RNN-cell (torch.nn.GRU)
or a Token-Embedding from flair if an embedding is handed to the constructor (token_embedding).
The output of the encoder is used as the initial hidden state to the decoder, which is an RNN-cell (GRU)
that predicts the lemma of the given token one letter at a time.
Note that one can use data in which only those words are annotated that differ from their lemma or data
in which all words are annotated with a (maybe equal) lemma.
:param embeddings: Embedding used to encode sentence
:param rnn_input_size: Input size of the RNN('s). Each letter of a token is represented by a hot-one-vector
over the given character dictionary. This vector is transformed to a input_size vector with a linear layer.
:param rnn_hidden_size: size of the hidden state of the RNN('s).
:param rnn_layers: Number of stacked RNN cells
:param beam_size: Number of hypothesis used when decoding the output of the RNN. Only used in prediction.
:param char_dict: Dictionary of characters the model is able to process. The dictionary must contain <unk> for
the handling of unknown characters. If None, a standard dictionary will be loaded. One can either hand
over a path to a dictionary or the dictionary itself.
:param label_type: Name of the gold labels to use.
:param max_sequence_length_dependent_on_input: If set to True, the maximum length of a decoded sequence in
the prediction depends on the sentences you want to lemmatize. To be precise the maximum length is
computed as the length of the longest token in the sentences plus one.
:param max_sequence_length: If set to True and max_sequence_length_dependend_on_input is False a fixed
maximum length for the decoding will be used for all sentences.
:param use_attention: whether to use attention. Only sensible if encoding via RNN
"""
super().__init__()
self._label_type = label_type
self.beam_size = beam_size
self.max_sequence_length = max_sequence_length
self.dependent_on_input = max_sequence_length_dependent_on_input
self.start_symbol = start_symbol_for_encoding
self.end_symbol = end_symbol_for_encoding
self.bi_encoding = bidirectional_encoding
self.rnn_hidden_size = rnn_hidden_size
# whether to encode characters and whether to use attention (attention can only be used if chars are encoded)
self.encode_characters = encode_characters
self.use_attention = use_attention
if not self.encode_characters:
self.use_attention = False
# character dictionary for decoding and encoding
self.char_dictionary = char_dict if isinstance(char_dict, Dictionary) else Dictionary.load(char_dict)
# make sure <unk> is in dictionary for handling of unknown characters
if not self.char_dictionary.add_unk:
raise KeyError("<unk> must be contained in char_dict")
# add special symbols to dictionary if necessary and save respective indices
self.dummy_index = self.char_dictionary.add_item("<>")
self.start_index = self.char_dictionary.add_item("<S>")
self.end_index = self.char_dictionary.add_item("<E>")
# ---- ENCODER ----
# encoder character embeddings
self.encoder_character_embedding = nn.Embedding(len(self.char_dictionary), rnn_input_size)
# encoder pre-trained embeddings
self.encoder_embeddings = embeddings
hidden_input_size = 0
if embeddings:
hidden_input_size += embeddings.embedding_length
if encode_characters:
hidden_input_size += rnn_hidden_size
if encode_characters and bidirectional_encoding:
hidden_input_size += rnn_hidden_size
self.emb_to_hidden = nn.Linear(hidden_input_size, rnn_hidden_size)
# encoder RNN
self.encoder_rnn = nn.GRU(
input_size=rnn_input_size,
hidden_size=self.rnn_hidden_size,
batch_first=True,
num_layers=rnn_layers,
bidirectional=self.bi_encoding,
)
# additional encoder linear layer if bidirectional encoding
if self.bi_encoding:
self.bi_hidden_states_to_hidden_size: Optional[nn.Linear] = nn.Linear(
2 * self.rnn_hidden_size, self.rnn_hidden_size, bias=False
)
else:
self.bi_hidden_states_to_hidden_size = None
# ---- DECODER ----
# decoder: linear layers to transform vectors to and from alphabet_size
self.decoder_character_embedding = nn.Embedding(len(self.char_dictionary), rnn_input_size)
# when using attention we concatenate attention outcome and decoder hidden states
self.character_decoder = nn.Linear(
2 * self.rnn_hidden_size if self.use_attention else self.rnn_hidden_size,
len(self.char_dictionary),
)
# decoder RNN
self.rnn_input_size = rnn_input_size
self.rnn_layers = rnn_layers
self.decoder_rnn = nn.GRU(
input_size=rnn_input_size,
hidden_size=self.rnn_hidden_size,
batch_first=True,
num_layers=rnn_layers,
)
# loss and softmax
self.loss = nn.CrossEntropyLoss(reduction="sum")
# self.unreduced_loss = nn.CrossEntropyLoss(reduction='none') # for prediction
self.softmax = nn.Softmax(dim=2)
self.to(flair.device)
@property
def label_type(self):
return self._label_type
def words_to_char_indices(
self,
tokens: List[str],
end_symbol=True,
start_symbol=False,
padding_in_front=False,
seq_length=None,
):
"""For a given list of strings this function creates index vectors that represent the characters of the strings.
Each string is represented by sequence_length (maximum string length + entries for special symbold) many
indices representing characters in self.char_dict.
One can manually set the vector length with the parameter seq_length, though the vector length is always
at least maximum string length in the list.
:param end_symbol: add self.end_index at the end of each representation
:param start_symbol: add self.start_index in front of of each representation
:param padding_in_front: whether to fill up with self.dummy_index in front or in back of strings
"""
# add additional columns for special symbols if necessary
c = int(end_symbol) + int(start_symbol)
max_length = max(len(token) for token in tokens) + c
sequence_length = max_length if not seq_length else max(seq_length, max_length)
# initialize with dummy symbols
tensor = self.dummy_index * torch.ones(len(tokens), sequence_length, dtype=torch.long).to(flair.device)
for i in range(len(tokens)):
dif = sequence_length - (len(tokens[i]) + c)
shift = 0
if padding_in_front:
shift += dif
if start_symbol:
tensor[i][0 + shift] = self.start_index
if end_symbol:
tensor[i][len(tokens[i]) + int(start_symbol) + shift] = self.end_index
for index, letter in enumerate(tokens[i]):
tensor[i][index + int(start_symbol) + shift] = self.char_dictionary.get_idx_for_item(letter)
return tensor
def forward_pass(self, sentences: Union[List[Sentence], Sentence]):
if isinstance(sentences, Sentence):
sentences = [sentences]
# encode inputs
initial_hidden_states, all_encoder_outputs = self.encode(sentences)
# get labels (we assume each token has a lemma label)
labels = [token.get_label(self._label_type).value for sentence in sentences for token in sentence]
# get char indices for labels of sentence
# (batch_size, max_sequence_length) batch_size = #words in sentence,
# max_sequence_length = length of longest label of sentence + 1
decoder_input_indices = self.words_to_char_indices(
labels, start_symbol=True, end_symbol=False, padding_in_front=False
)
# get char embeddings
# (batch_size,max_sequence_length,input_size), i.e. replaces char indices with vectors of length input_size
output_vectors, _ = self.decode(decoder_input_indices, initial_hidden_states, all_encoder_outputs)
return output_vectors, labels
def decode(self, decoder_input_indices, initial_hidden_states, all_encoder_outputs):
# take decoder input and initial hidden and pass through RNN
input_tensor = self.decoder_character_embedding(decoder_input_indices)
output, hidden = self.decoder_rnn(input_tensor, initial_hidden_states)
# if all encoder outputs are provided, use attention
if self.use_attention:
attention_coeff = torch.softmax(torch.matmul(all_encoder_outputs, torch.transpose(output, 1, 2)), dim=1)
# take convex combinations of encoder hidden states as new output using the computed attention coefficients
attention_output = torch.transpose(
torch.matmul(torch.transpose(all_encoder_outputs, 1, 2), attention_coeff),
1,
2,
)
output = torch.cat((output, attention_output), dim=2)
# transform output to vectors of size len(char_dict) -> (batch_size, max_sequence_length, alphabet_size)
output_vectors = self.character_decoder(output)
return output_vectors, hidden
def _prepare_tensors(self, sentences: List[Sentence]) -> Tuple[Optional[torch.Tensor], ...]:
# get all tokens
tokens = [token for sentence in sentences for token in sentence]
# encode input characters by sending them through RNN
if self.encode_characters:
# get one-hots for characters and add special symbols / padding
encoder_input_indices = self.words_to_char_indices(
[token.text for token in tokens],
start_symbol=self.start_symbol,
end_symbol=self.end_symbol,
padding_in_front=False,
)
# determine length of each token
extra = 0
if self.start_symbol:
extra += 1
if self.end_symbol:
extra += 1
lengths = torch.tensor([len(token.text) + extra for token in tokens], device=flair.device)
else:
encoder_input_indices = None
lengths = None
if self.encoder_embeddings:
# embed sentences
self.encoder_embeddings.embed(sentences)
# create initial hidden state tensor for batch (num_layers, batch_size, hidden_size)
token_embedding_hidden = torch.stack(
self.rnn_layers * [torch.stack([token.get_embedding() for token in tokens])]
)
else:
token_embedding_hidden = None
return encoder_input_indices, lengths, token_embedding_hidden
def forward(
self,
encoder_input_indices: Optional[torch.Tensor],
lengths: Optional[torch.Tensor],
token_embedding_hidden: Optional[torch.Tensor],
) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
# variable to store initial hidden states for decoder
initial_hidden_for_decoder = []
# encode input characters by sending them through RNN
if encoder_input_indices is not None and lengths is not None:
input_vectors = self.encoder_character_embedding(encoder_input_indices)
# test packing and padding
packed_sequence = torch.nn.utils.rnn.pack_padded_sequence(
input_vectors,
lengths,
enforce_sorted=False,
batch_first=True,
)
encoding_flat, initial_hidden_states = self.encoder_rnn(packed_sequence)
encoder_outputs, lengths = torch.nn.utils.rnn.pad_packed_sequence(encoding_flat, batch_first=True)
# since bidirectional rnn is only used in encoding we need to project outputs to hidden_size of decoder
if self.bi_encoding and self.bi_hidden_states_to_hidden_size is not None:
encoder_outputs = self.bi_hidden_states_to_hidden_size(encoder_outputs)
# concatenate the final hidden states of the encoder. These will be projected to hidden_size of
# decoder later with self.emb_to_hidden
conditions = torch.cat(2 * [torch.eye(self.rnn_layers).bool()])
bi_states = [initial_hidden_states[conditions[:, i], :, :] for i in range(self.rnn_layers)]
initial_hidden_states = torch.stack([torch.cat((b[0, :, :], b[1, :, :]), dim=1) for b in bi_states])
initial_hidden_for_decoder.append(initial_hidden_states)
# mask out vectors that correspond to a dummy symbol (TODO: check attention masking)
mask = torch.cat(
(self.rnn_hidden_size * [(encoder_input_indices == self.dummy_index).unsqueeze(2)]),
dim=2,
)
all_encoder_outputs: Optional[torch.Tensor] = torch.where(
mask, torch.tensor(0.0, device=flair.device), encoder_outputs
)
else:
all_encoder_outputs = None
# use token embedding as initial hidden state for decoder
if token_embedding_hidden is not None:
initial_hidden_for_decoder.append(token_embedding_hidden)
# concatenate everything together and project to appropriate size for decoder
initial_hidden = self.emb_to_hidden(torch.cat(initial_hidden_for_decoder, dim=2))
return initial_hidden, all_encoder_outputs
def encode(self, sentences: List[Sentence]):
tensors = self._prepare_tensors(sentences)
return self.forward(*tensors)
def encode_token(self, token: Token):
# variable to store initial hidden states for decoder
initial_hidden_for_decoder = []
all_encoder_outputs = None
# encode input characters by sending them through RNN
if self.encode_characters:
# note that we do not need to fill up with dummy symbols since we process each token seperately
encoder_input_indices = self.words_to_char_indices(
[token.text], start_symbol=self.start_symbol, end_symbol=self.end_symbol
)
# embed character one-hots
input_vector = self.encoder_character_embedding(encoder_input_indices)
# send through encoder RNN (produces initial hidden for decoder)
all_encoder_outputs, initial_hidden_states = self.encoder_rnn(input_vector)
# since bidirectional rnn is only used in encoding we need to project outputs to hidden_size of decoder
if self.bi_encoding and self.bi_hidden_states_to_hidden_size is not None:
# project 2*hidden_size to hidden_size
all_encoder_outputs = self.bi_hidden_states_to_hidden_size(all_encoder_outputs)
# concatenate the final hidden states of the encoder. These will be projected to hidden_size of decoder
# later with self.emb_to_hidden
conditions = torch.cat(2 * [torch.eye(self.rnn_layers).bool()])
bi_states = [initial_hidden_states[conditions[:, i], :, :] for i in range(self.rnn_layers)]
initial_hidden_states = torch.stack([torch.cat((b[0, :, :], b[1, :, :]), dim=1) for b in bi_states])
initial_hidden_for_decoder.append(initial_hidden_states)
# use token embedding as initial hidden state for decoder
if self.encoder_embeddings:
# create initial hidden state tensor for batch (num_layers, batch_size, hidden_size)
token_embedding_hidden = torch.stack(self.rnn_layers * [token.get_embedding()]).unsqueeze(1)
initial_hidden_for_decoder.append(token_embedding_hidden)
# concatenate everything together and project to appropriate size for decoder
initial_hidden_for_decoder = self.emb_to_hidden(torch.cat(initial_hidden_for_decoder, dim=2))
return initial_hidden_for_decoder, all_encoder_outputs
def _calculate_loss(self, scores, labels):
# score vector has to have a certain format for (2d-)loss fct (batch_size, alphabet_size, 1, max_seq_length)
scores_in_correct_format = scores.permute(0, 2, 1).unsqueeze(2)
# create target vector (batch_size, max_label_seq_length + 1)
target = self.words_to_char_indices(labels, start_symbol=False, end_symbol=True, padding_in_front=False)
target.unsqueeze_(1) # (batch_size, 1, max_label_seq_length + 1)
return self.loss(scores_in_correct_format, target), len(labels)
def forward_loss(self, sentences: Union[List[Sentence], Sentence]) -> Tuple[torch.Tensor, int]:
scores, labels = self.forward_pass(sentences)
return self._calculate_loss(scores, labels)
def predict(
self,
sentences: Union[List[Sentence], Sentence],
mini_batch_size: int = 16,
return_probabilities_for_all_classes: bool = False,
verbose: bool = False,
label_name="predicted",
return_loss=False,
embedding_storage_mode="none",
):
"""Predict lemmas of words for a given (list of) sentence(s).
:param sentences: sentences to predict
:param label_name: label name used for predicted lemmas
:param mini_batch_size: number of tokens that are send through the RNN simultaneously, assuming batching_in_rnn
is set to True
:param embedding_storage_mode: default is 'none' which is always best. Only set to 'cpu' or 'gpu' if
you wish to not only predict, but also keep the generated embeddings in CPU or GPU memory respectively.
:param return_loss: whether or not to compute and return loss. Setting it to True only makes sense if labels
are provided
:param verbose: If True, lemmatized sentences will be printed in the console.
"""
if isinstance(sentences, Sentence):
sentences = [sentences]
Sentence.set_context_for_sentences(sentences)
# filter empty sentences
sentences = [sentence for sentence in sentences if len(sentence) > 0]
if len(sentences) == 0:
return sentences
# max length of the predicted sequences
if not self.dependent_on_input:
max_length = self.max_sequence_length
else:
max_length = max([len(token.text) + 1 for sentence in sentences for token in sentence])
# for printing
line_to_print = ""
overall_loss = 0.0
number_tokens_in_total = 0
with torch.no_grad():
dataloader = DataLoader(dataset=FlairDatapointDataset(sentences), batch_size=mini_batch_size)
for batch in dataloader:
# stop if all sentences are empty
if not batch:
continue
# remove previously predicted labels of this type
for sentence in batch:
for token in sentence:
token.remove_labels(label_name)
# create list of tokens in batch
tokens_in_batch = [token for sentence in batch for token in sentence]
number_tokens = len(tokens_in_batch)
number_tokens_in_total += number_tokens
# encode inputs
hidden, all_encoder_outputs = self.encode(batch)
# create input for first pass (batch_size, 1, input_size), first letter is special character <S>
# sequence length is always set to one in prediction
input_indices = self.start_index * torch.ones(
number_tokens, dtype=torch.long, device=flair.device
).unsqueeze(1)
# option 1: greedy decoding
if self.beam_size == 1:
# predictions
predicted: List[List[int]] = [[] for _ in range(number_tokens)]
for _decode_step in range(max_length):
# decode next character
output_vectors, hidden = self.decode(input_indices, hidden, all_encoder_outputs)
log_softmax_probs = torch.nn.functional.log_softmax(output_vectors, dim=2)
# pick top beam size many outputs with highest probabilities
input_indices = log_softmax_probs.argmax(dim=2)
for i in range(number_tokens):
if len(predicted[i]) > 0 and predicted[i][-1] == self.end_index:
continue
predicted[i].append(input_indices[i].item())
for t_id, token in enumerate(tokens_in_batch):
predicted_lemma = "".join(
self.char_dictionary.get_item_for_index(idx) if idx != self.end_index else ""
for idx in predicted[t_id]
)
token.set_label(typename=label_name, value=predicted_lemma)
# option 2: beam search
else:
output_vectors, hidden = self.decode(input_indices, hidden, all_encoder_outputs)
# out_probs = self.softmax(output_vectors).squeeze(1)
log_softmax_probs = torch.nn.functional.log_softmax(output_vectors, dim=2).squeeze(1)
# make sure no dummy symbol <> or start symbol <S> is predicted
log_softmax_probs[:, self.dummy_index] = -inf
log_softmax_probs[:, self.start_index] = -inf
# pick top beam size many outputs with highest probabilities
# probabilities, leading_indices = out_probs.topk(self.beam_size, 1) # max prob along dimension 1
log_probabilities, leading_indices = log_softmax_probs.topk(self.beam_size, 1)
# leading_indices and probabilities have size (batch_size, beam_size)
# keep scores of beam_size many hypothesis for each token in the batch
scores = log_probabilities.view(-1, 1)
# stack all leading indices of all hypothesis and corresponding hidden states in two tensors
leading_indices = leading_indices.view(-1, 1) # this vector goes through RNN in each iteration
hidden_states_beam = torch.stack(self.beam_size * [hidden], dim=2).view(
self.rnn_layers, -1, self.rnn_hidden_size
)
# save sequences so far
sequences = torch.tensor([[i.item()] for i in leading_indices], device=flair.device)
# keep track of how many hypothesis were completed for each token
n_completed = [0 for _ in range(number_tokens)] # cpu
final_candidates: List[List[Tuple[torch.Tensor, float]]] = [[] for _ in range(number_tokens)] # cpu
# if all_encoder_outputs returned, expand them to beam size (otherwise keep this as None)
batched_encoding_output = (
torch.stack(self.beam_size * [all_encoder_outputs], dim=1).view(
self.beam_size * number_tokens, -1, self.rnn_hidden_size
)
if self.use_attention
else None
)
for _j in range(1, max_length):
output_vectors, hidden_states_beam = self.decode(
leading_indices, hidden_states_beam, batched_encoding_output
)
# decode with log softmax
out_log_probs = torch.nn.functional.log_softmax(output_vectors, dim=2)
# make sure no dummy symbol <> or start symbol <S> is predicted
out_log_probs[:, 0, self.dummy_index] = -inf
out_log_probs[:, 0, self.start_index] = -inf
log_probabilities, index_candidates = out_log_probs.topk(self.beam_size, 2)
log_probabilities.squeeze_(1)
index_candidates.squeeze_(1)
# check if an end symbol <E> has been predicted and, in that case, set hypothesis aside
end_symbols = (index_candidates == self.end_index).nonzero(as_tuple=False)
for tuple in end_symbols:
# if the sequence is already ended, do not record as candidate
if sequences[tuple[0], -1].item() == self.end_index:
continue
# index of token in in list tokens_in_batch
token_number = torch.div(tuple[0], self.beam_size, rounding_mode="trunc")
# print(token_number)
seq = sequences[tuple[0], :] # hypothesis sequence
# hypothesis score
score = (scores[tuple[0]] + log_probabilities[tuple[0], tuple[1]]) / (len(seq) + 1)
final_candidates[token_number].append((seq, score.item()))
# TODO: remove token if number of completed hypothesis exceeds given value
n_completed[token_number] += 1
# set score of corresponding entry to -inf so it will not be expanded
log_probabilities[tuple[0], tuple[1]] = -inf
# get leading_indices for next expansion
# find highest scoring hypothesis among beam_size*beam_size possible ones for each token
# take beam_size many copies of scores vector and add scores of possible new extensions
# size (beam_size*batch_size, beam_size)
hypothesis_scores = torch.cat(self.beam_size * [scores], dim=1) + log_probabilities
# print(hypothesis_scores)
# reshape to vector of size (batch_size, beam_size*beam_size),
# each row contains beam_size*beam_size scores of the new possible hypothesis
hypothesis_scores_per_token = hypothesis_scores.view(number_tokens, self.beam_size**2)
# print(hypothesis_scores_per_token)
# choose beam_size best for each token - size (batch_size, beam_size)
(
best_scores,
indices_per_token,
) = hypothesis_scores_per_token.topk(self.beam_size, 1)
# out of indices_per_token we now need to recompute the original indices of the hypothesis in
# a list of length beam_size*batch_size
# where the first three inidices belong to the first token, the next three to the second token,
# and so on
beam_numbers: List[int] = []
seq_numbers: List[int] = []
for i, row in enumerate(indices_per_token):
beam_numbers.extend(i * self.beam_size + index.item() // self.beam_size for index in row)
seq_numbers.extend(index.item() % self.beam_size for index in row)
# with these indices we can compute the tensors for the next iteration
# expand sequences with corresponding index
sequences = torch.cat(
(
sequences[beam_numbers],
index_candidates[beam_numbers, seq_numbers].unsqueeze(1),
),
dim=1,
)
# add log-probabilities to the scores
scores = scores[beam_numbers] + log_probabilities[beam_numbers, seq_numbers].unsqueeze(1)
# save new leading indices
leading_indices = index_candidates[beam_numbers, seq_numbers].unsqueeze(1)
# save corresponding hidden states
hidden_states_beam = hidden_states_beam[:, beam_numbers, :]
# it may happen that no end symbol <E> is predicted for a token in all of the max_length iterations
# in that case we append one of the final seuqences without end symbol to the final_candidates
best_scores, indices = scores.view(number_tokens, -1).topk(1, 1)
for j, (score, index) in enumerate(zip(best_scores.squeeze(1), indices.squeeze(1))):
if len(final_candidates[j]) == 0:
beam = j * self.beam_size + index.item()
final_candidates[j].append((sequences[beam, :], score.item() / max_length))
# get best final hypothesis for each token
output_sequences = []
for candidate in final_candidates:
l_ordered = sorted(candidate, key=lambda tup: tup[1], reverse=True)
output_sequences.append(l_ordered[0])
# get characters from index sequences and add predicted label to token
for i, out_seq in enumerate(output_sequences):
predicted_lemma = ""
for idx in out_seq[0]:
predicted_lemma += self.char_dictionary.get_item_for_index(idx)
line_to_print += predicted_lemma
line_to_print += " "
tokens_in_batch[i].add_tag(tag_type=label_name, tag_value=predicted_lemma)
if return_loss:
overall_loss += self.forward_loss(batch)[0].item()
store_embeddings(batch, storage_mode=embedding_storage_mode)
if verbose:
log.info(line_to_print)
if return_loss:
return overall_loss, number_tokens_in_total
return None
def _get_state_dict(self):
model_state = {
**super()._get_state_dict(),
"embeddings": self.encoder_embeddings.save_embeddings(use_state_dict=False),
"rnn_input_size": self.rnn_input_size,
"rnn_hidden_size": self.rnn_hidden_size,
"rnn_layers": self.rnn_layers,
"char_dict": self.char_dictionary,
"label_type": self._label_type,
"beam_size": self.beam_size,
"max_sequence_length": self.max_sequence_length,
"dependent_on_input": self.dependent_on_input,
"use_attention": self.use_attention,
"encode_characters": self.encode_characters,
"start_symbol": self.start_symbol,
"end_symbol": self.end_symbol,
"bidirectional_encoding": self.bi_encoding,
}
return model_state
@classmethod
def _init_model_with_state_dict(cls, state, **kwargs):
return super()._init_model_with_state_dict(
state,
embeddings=state.get("embeddings"),
encode_characters=state.get("encode_characters"),
rnn_input_size=state.get("rnn_input_size"),
rnn_hidden_size=state.get("rnn_hidden_size"),
rnn_layers=state.get("rnn_layers"),
char_dict=state.get("char_dict"),
label_type=state.get("label_type"),
beam_size=state.get("beam_size"),
max_sequence_length_dependent_on_input=state.get("dependent_on_input"),
max_sequence_length=state.get("max_sequence_length"),
use_attention=state.get("use_attention"),
start_symbol_for_encoding=state.get("start_symbol"),
end_symbol_for_encoding=state.get("end_symbol"),
bidirectional_encoding=state.get("bidirectional_encoding"),
**kwargs,
)
def _print_predictions(self, batch, gold_label_type):
lines = []
for sentence in batch:
eval_line = (
f" - Text: {' '.join([token.text for token in sentence])}\n"
f" - Gold-Lemma: {' '.join([token.get_label(gold_label_type).value for token in sentence])}\n"
f" - Predicted: {' '.join([token.get_label('predicted').value for token in sentence])}\n\n"
)
lines.append(eval_line)
return lines
def evaluate(self, *args, **kwargs) -> Result:
# Overwrites evaluate of parent class to remove the "by class" printout
result = super().evaluate(*args, **kwargs)
result.detailed_results = result.detailed_results.split("\n\n")[0]
return result
| 34,759 | 48.026798 | 120 | py |
flair | flair-master/flair/models/language_model.py | import math
from pathlib import Path
from typing import List, Optional, Tuple, Union
import torch
from torch import logsumexp, nn
from torch.optim import Optimizer
import flair
from flair.data import Dictionary
from flair.nn.recurrent import create_recurrent_layer
class LanguageModel(nn.Module):
"""Container module with an encoder, a recurrent module, and a decoder."""
def __init__(
self,
dictionary: Dictionary,
is_forward_lm: bool,
hidden_size: int,
nlayers: int,
embedding_size: int = 100,
nout=None,
document_delimiter: str = "\n",
dropout=0.1,
recurrent_type="LSTM",
has_decoder=True,
) -> None:
super().__init__()
self.dictionary = dictionary
self.document_delimiter = document_delimiter
self.is_forward_lm: bool = is_forward_lm
self.dropout = dropout
self.hidden_size = hidden_size
self.embedding_size = embedding_size
self.nlayers = nlayers
self.drop = nn.Dropout(dropout)
self.encoder = nn.Embedding(len(dictionary), embedding_size)
self.rnn, self.state_count = create_recurrent_layer(
recurrent_type, embedding_size, hidden_size, nlayers, dropout
)
self.recurrent_type = recurrent_type
self.hidden = None
self.nout = nout
if nout is not None:
self.proj: Optional[nn.Linear] = nn.Linear(hidden_size, nout)
self.initialize(self.proj.weight)
hidden_size = nout
else:
self.proj = None
if has_decoder:
self.decoder: Optional[nn.Linear] = nn.Linear(hidden_size, len(dictionary))
else:
self.decoder = None
self.init_weights()
# auto-spawn on GPU if available
self.to(flair.device)
def init_weights(self):
initrange = 0.1
self.encoder.weight.detach().uniform_(-initrange, initrange)
if self.decoder is not None:
self.decoder.bias.detach().fill_(0)
self.decoder.weight.detach().uniform_(-initrange, initrange)
def set_hidden(self, hidden):
self.hidden = hidden
def forward(self, input, hidden, ordered_sequence_lengths=None, decode=True):
encoded = self.encoder(input)
emb = self.drop(encoded)
if hasattr(self.rnn, "flatten_parameters"):
self.rnn.flatten_parameters()
if len(hidden) == 1:
output, h = self.rnn(emb, hidden[0])
hidden = (h,)
else:
output, hidden = self.rnn(emb, hidden)
if self.proj is not None:
output = self.proj(output)
output = self.drop(output)
if decode:
decoded = self.decoder(output)
return (
decoded,
output,
hidden,
)
else:
return output, hidden
def init_hidden(self, bsz):
weight = next(self.parameters()).detach()
return tuple(
weight.new(self.nlayers, bsz, self.hidden_size).zero_().clone().detach() for _ in range(self.state_count)
)
def get_representation(
self,
strings: List[str],
start_marker: str,
end_marker: str,
chars_per_chunk: int = 512,
):
len_longest_str: int = len(max(strings, key=len))
# pad strings with whitespaces to longest sentence
padded_strings: List[str] = []
for string in strings:
if not self.is_forward_lm:
string = string[::-1]
padded = f"{start_marker}{string}{end_marker}"
padded_strings.append(padded)
# cut up the input into chunks of max charlength = chunk_size
chunks = []
splice_begin = 0
longest_padded_str: int = len_longest_str + len(start_marker) + len(end_marker)
for splice_end in range(chars_per_chunk, longest_padded_str, chars_per_chunk):
chunks.append([text[splice_begin:splice_end] for text in padded_strings])
splice_begin = splice_end
chunks.append([text[splice_begin:longest_padded_str] for text in padded_strings])
hidden = self.init_hidden(len(chunks[0]))
padding_char_index = self.dictionary.get_idx_for_item(" ")
batches: List[torch.Tensor] = []
# push each chunk through the RNN language model
for chunk in chunks:
len_longest_chunk: int = len(max(chunk, key=len))
sequences_as_char_indices: List[List[int]] = []
for string in chunk:
char_indices = self.dictionary.get_idx_for_items(list(string))
char_indices += [padding_char_index] * (len_longest_chunk - len(string))
sequences_as_char_indices.append(char_indices)
t = torch.tensor(sequences_as_char_indices, dtype=torch.long).to(device=flair.device, non_blocking=True)
batches.append(t)
output_parts = []
for batch in batches:
batch = batch.transpose(0, 1)
rnn_output, hidden = self.forward(batch, hidden, decode=False)
output_parts.append(rnn_output)
# concatenate all chunks to make final output
output = torch.cat(output_parts)
return output
def get_output(self, text: str):
char_indices = [self.dictionary.get_idx_for_item(char) for char in text]
input_vector = torch.LongTensor([char_indices]).transpose(0, 1)
hidden = self.init_hidden(1)
prediction, rnn_output, hidden = self.forward(input_vector, hidden)
return self.repackage_hidden(hidden)
def repackage_hidden(self, h):
"""Wraps hidden states in new Variables, to detach them from their history."""
if type(h) == torch.Tensor:
return h.clone().detach()
else:
return tuple(self.repackage_hidden(v) for v in h)
@staticmethod
def initialize(matrix):
in_, out_ = matrix.size()
stdv = math.sqrt(3.0 / (in_ + out_))
matrix.detach().uniform_(-stdv, stdv)
@classmethod
def load_language_model(cls, model_file: Union[Path, str], has_decoder=True):
state = torch.load(str(model_file), map_location=flair.device)
document_delimiter = state.get("document_delimiter", "\n")
has_decoder = state.get("has_decoder", True) and has_decoder
model = cls(
dictionary=state["dictionary"],
is_forward_lm=state["is_forward_lm"],
hidden_size=state["hidden_size"],
nlayers=state["nlayers"],
embedding_size=state["embedding_size"],
nout=state["nout"],
document_delimiter=document_delimiter,
dropout=state["dropout"],
recurrent_type=state.get("recurrent_type", "lstm"),
has_decoder=has_decoder,
)
model.load_state_dict(state["state_dict"], strict=has_decoder)
model.eval()
model.to(flair.device)
return model
@classmethod
def load_checkpoint(cls, model_file: Union[Path, str]):
state = torch.load(str(model_file), map_location=flair.device)
epoch = state["epoch"] if "epoch" in state else None
split = state["split"] if "split" in state else None
loss = state["loss"] if "loss" in state else None
document_delimiter = state.get("document_delimiter", "\n")
optimizer_state_dict = state.get("optimizer_state_dict")
model = cls(
dictionary=state["dictionary"],
is_forward_lm=state["is_forward_lm"],
hidden_size=state["hidden_size"],
nlayers=state["nlayers"],
embedding_size=state["embedding_size"],
nout=state["nout"],
document_delimiter=document_delimiter,
dropout=state["dropout"],
recurrent_type=state.get("recurrent_type", "lstm"),
)
model.load_state_dict(state["state_dict"])
model.eval()
model.to(flair.device)
return {
"model": model,
"epoch": epoch,
"split": split,
"loss": loss,
"optimizer_state_dict": optimizer_state_dict,
}
def save_checkpoint(
self,
file: Union[Path, str],
optimizer: Optimizer,
epoch: int,
split: int,
loss: float,
):
model_state = {
"state_dict": self.state_dict(),
"dictionary": self.dictionary,
"is_forward_lm": self.is_forward_lm,
"hidden_size": self.hidden_size,
"nlayers": self.nlayers,
"embedding_size": self.embedding_size,
"nout": self.nout,
"document_delimiter": self.document_delimiter,
"dropout": self.dropout,
"optimizer_state_dict": optimizer.state_dict(),
"epoch": epoch,
"split": split,
"loss": loss,
"recurrent_type": self.recurrent_type,
"has_decoder": self.decoder is not None,
}
torch.save(model_state, str(file), pickle_protocol=4)
def save(self, file: Union[Path, str]):
model_state = {
"state_dict": self.state_dict(),
"dictionary": self.dictionary,
"is_forward_lm": self.is_forward_lm,
"hidden_size": self.hidden_size,
"nlayers": self.nlayers,
"embedding_size": self.embedding_size,
"nout": self.nout,
"document_delimiter": self.document_delimiter,
"dropout": self.dropout,
"recurrent_type": self.recurrent_type,
"has_decoder": self.decoder is not None,
}
torch.save(model_state, str(file), pickle_protocol=4)
def generate_text(
self,
prefix: str = "\n",
number_of_characters: int = 1000,
temperature: float = 1.0,
break_on_suffix=None,
) -> Tuple[str, float]:
if prefix == "":
prefix = "\n"
with torch.no_grad():
characters = []
idx2item = self.dictionary.idx2item
# initial hidden state
hidden = self.init_hidden(1)
if len(prefix) > 1:
char_tensors = []
for character in prefix[:-1]:
char_tensors.append(
torch.tensor(self.dictionary.get_idx_for_item(character)).unsqueeze(0).unsqueeze(0)
)
input = torch.cat(char_tensors).to(flair.device)
prediction, _, hidden = self.forward(input, hidden)
input = torch.tensor(self.dictionary.get_idx_for_item(prefix[-1])).unsqueeze(0).unsqueeze(0)
log_prob = torch.zeros(1, device=flair.device)
for _i in range(number_of_characters):
input = input.to(flair.device)
# get predicted weights
prediction, _, hidden = self.forward(input, hidden)
prediction = prediction.squeeze().detach()
decoder_output = prediction
# divide by temperature
prediction = prediction.div(temperature)
# to prevent overflow problem with small temperature values, substract largest value from all
# this makes a vector in which the largest value is 0
max = torch.max(prediction)
prediction -= max
# compute word weights with exponential function
word_weights = prediction.exp().cpu()
# try sampling multinomial distribution for next character
try:
word_idx = torch.multinomial(word_weights, 1)[0]
except: # noqa: E722 TODO: figure out exception type
word_idx = torch.tensor(0)
# print(word_idx)
prob = decoder_output[word_idx] - logsumexp(decoder_output, dim=0)
log_prob += prob
input = word_idx.detach().unsqueeze(0).unsqueeze(0)
word = idx2item[word_idx].decode("UTF-8")
characters.append(word)
if break_on_suffix is not None and "".join(characters).endswith(break_on_suffix):
break
text = prefix + "".join(characters)
log_prob_float = log_prob.item()
log_prob_float /= len(characters)
if not self.is_forward_lm:
text = text[::-1]
return text, -log_prob_float
def calculate_perplexity(self, text: str) -> float:
if not self.is_forward_lm:
text = text[::-1]
# input ids
input = torch.tensor([self.dictionary.get_idx_for_item(char) for char in text[:-1]]).unsqueeze(1)
input = input.to(flair.device)
# push list of character IDs through model
hidden = self.init_hidden(1)
prediction, _, hidden = self.forward(input, hidden)
# the target is always the next character
targets = torch.tensor([self.dictionary.get_idx_for_item(char) for char in text[1:]])
targets = targets.to(flair.device)
# use cross entropy loss to compare output of forward pass with targets
cross_entroy_loss = torch.nn.CrossEntropyLoss()
loss = cross_entroy_loss(prediction.view(-1, len(self.dictionary)), targets).item()
# exponentiate cross-entropy loss to calculate perplexity
perplexity = math.exp(loss)
return perplexity
def __getstate__(self):
# "document_delimiter" property may be missing in some older pre-trained models
self.document_delimiter = getattr(self, "document_delimiter", "\n")
# serialize the language models and the constructor arguments (but nothing else)
model_state = {
"state_dict": self.state_dict(),
"dictionary": self.dictionary,
"is_forward_lm": self.is_forward_lm,
"hidden_size": self.hidden_size,
"nlayers": self.nlayers,
"embedding_size": self.embedding_size,
"nout": self.nout,
"document_delimiter": self.document_delimiter,
"dropout": self.dropout,
"recurrent_type": self.recurrent_type,
"has_decoder": self.decoder is not None,
}
return model_state
def __setstate__(self, d):
# special handling for deserializing language models
if "state_dict" in d:
# re-initialize language model with constructor arguments
language_model = LanguageModel(
dictionary=d["dictionary"],
is_forward_lm=d["is_forward_lm"],
hidden_size=d["hidden_size"],
nlayers=d["nlayers"],
embedding_size=d["embedding_size"],
nout=d["nout"],
document_delimiter=d["document_delimiter"],
dropout=d["dropout"],
recurrent_type=d.get("recurrent_type", "lstm"),
has_decoder=d.get("has_decoder", True),
)
language_model.load_state_dict(d["state_dict"], strict=d.get("has_decoder", True))
# copy over state dictionary to self
for key in language_model.__dict__:
self.__dict__[key] = language_model.__dict__[key]
# set the language model to eval() by default (this is necessary since FlairEmbeddings "protect" the LM
# in their "self.train()" method)
self.eval()
else:
if "recurrent_type" not in d:
d["recurrent_type"] = "lstm"
if "state_count" not in d:
d["state_count"] = 2
super().__setstate__(d)
def _apply(self, fn):
# models that were serialized using torch versions older than 1.4.0 lack the _flat_weights_names attribute
# check if this is the case and if so, set it
for child_module in self.children():
if isinstance(child_module, torch.nn.RNNBase) and not hasattr(child_module, "_flat_weights_names"):
_flat_weights_names = []
num_direction = 2 if child_module.__dict__["bidirectional"] else 1
for layer in range(child_module.__dict__["num_layers"]):
for direction in range(num_direction):
suffix = "_reverse" if direction == 1 else ""
param_names = ["weight_ih_l{}{}", "weight_hh_l{}{}"]
if child_module.__dict__["bias"]:
param_names += ["bias_ih_l{}{}", "bias_hh_l{}{}"]
param_names = [x.format(layer, suffix) for x in param_names]
_flat_weights_names.extend(param_names)
child_module._flat_weights_names = _flat_weights_names
child_module._apply(fn)
| 17,001 | 35.021186 | 117 | py |
flair | flair-master/flair/models/relation_classifier_model.py | import itertools
import logging
import typing
from abc import ABC, abstractmethod
from pathlib import Path
from typing import (
Any,
Dict,
Iterator,
List,
NamedTuple,
Optional,
Sequence,
Set,
Tuple,
Union,
cast,
)
import torch
from torch.utils.data.dataset import Dataset
import flair
from flair.data import (
Corpus,
Dictionary,
Label,
Relation,
Sentence,
Span,
Token,
_iter_dataset,
)
from flair.datasets import DataLoader, FlairDatapointDataset
from flair.embeddings import DocumentEmbeddings, TransformerDocumentEmbeddings
from flair.tokenization import SpaceTokenizer
logger: logging.Logger = logging.getLogger("flair")
class EncodedSentence(Sentence):
"""A Sentence that expresses that a sentence is encoded and compatible with the relation classifier.
For inference, i.e. `predict` and `evaluate`, the relation classifier internally encodes the sentences.
Therefore, these functions work with the regular flair sentence objects.
"""
class EncodingStrategy(ABC):
"""The encoding of the head and tail entities in a sentence with a relation annotation."""
special_tokens: Set[str] = set()
def __init__(self, add_special_tokens: bool = False) -> None:
self.add_special_tokens = add_special_tokens
@abstractmethod
def encode_head(self, head_span: Span, label: Label) -> str:
"""Returns the encoded string representation of the head span.
Multi-token head encodings tokens are separated by a space.
"""
...
@abstractmethod
def encode_tail(self, tail_span: Span, label: Label) -> str:
"""Returns the encoded string representation of the tail span.
Multi-token tail encodings tokens are separated by a space.
"""
...
class EntityMask(EncodingStrategy):
"""An `class`:EncodingStrategy: that masks the head and tail relation entities.
Example:
-------
For the `founded_by` relation from `ORG` to `PER` and
the sentence "Larry Page and Sergey Brin founded Google .",
the encoded sentences and relations are
- "[TAIL] and Sergey Brin founded [HEAD]" -> Relation(head='Google', tail='Larry Page') and
- "Larry Page and [TAIL] founded [HEAD]" -> Relation(head='Google', tail='Sergey Brin').
"""
special_tokens: Set[str] = {"[HEAD]", "[TAIL]"}
def encode_head(self, head_span: Span, label: Label) -> str:
return "[HEAD]"
def encode_tail(self, tail_span: Span, label: Label) -> str:
return "[TAIL]"
class TypedEntityMask(EncodingStrategy):
"""An `class`:EncodingStrategy: that masks the head and tail relation entities with their label.
Example:
-------
For the `founded_by` relation from `ORG` to `PER` and
the sentence "Larry Page and Sergey Brin founded Google .",
the encoded sentences and relations are
- "[TAIL-PER] and Sergey Brin founded [HEAD-ORG]" -> Relation(head='Google', tail='Larry Page') and
- "Larry Page and [TAIL-PER] founded [HEAD-ORG]" -> Relation(head='Google', tail='Sergey Brin').
"""
def encode_head(self, head: Span, label: Label) -> str:
return f"[HEAD-{label.value}]"
def encode_tail(self, tail: Span, label: Label) -> str:
return f"[TAIL-{label.value}]"
class EntityMarker(EncodingStrategy):
"""An `class`:EncodingStrategy: that marks the head and tail relation entities.
Example:
-------
For the `founded_by` relation from `ORG` to `PER` and
the sentence "Larry Page and Sergey Brin founded Google .",
the encoded sentences and relations are
- "[HEAD] Larry Page [/HEAD] and Sergey Brin founded [TAIL] Google [/TAIL]"
-> Relation(head='Google', tail='Larry Page') and
- "Larry Page and [HEAD] Sergey Brin [/HEAD] founded [TAIL] Google [/TAIL]"
-> Relation(head='Google', tail='Sergey Brin').
"""
special_tokens: Set[str] = {"[HEAD]", "[/HEAD]", "[TAIL]", "[/TAIL]"}
def encode_head(self, head: Span, label: Label) -> str:
space_tokenized_text: str = " ".join(token.text for token in head)
return f"[HEAD] {space_tokenized_text} [/HEAD]"
def encode_tail(self, tail: Span, label: Label) -> str:
space_tokenized_text: str = " ".join(token.text for token in tail)
return f"[TAIL] {space_tokenized_text} [/TAIL]"
class TypedEntityMarker(EncodingStrategy):
"""An `class`:EncodingStrategy: that marks the head and tail relation entities with their label.
Example:
-------
For the `founded_by` relation from `ORG` to `PER` and
the sentence "Larry Page and Sergey Brin founded Google .",
the encoded sentences and relations are
- "[HEAD-PER] Larry Page [/HEAD-PER] and Sergey Brin founded [TAIL-ORG] Google [/TAIL-ORG]"
-> Relation(head='Google', tail='Larry Page') and
- "Larry Page and [HEAD-PER] Sergey Brin [/HEAD-PER] founded [TAIL-ORG] Google [/TAIL-ORG]"
-> Relation(head='Google', tail='Sergey Brin').
"""
def encode_head(self, head: Span, label: Label) -> str:
space_tokenized_text: str = " ".join(token.text for token in head)
return f"[HEAD-{label.value}] {space_tokenized_text} [/HEAD-{label.value}]"
def encode_tail(self, tail: Span, label: Label) -> str:
space_tokenized_text: str = " ".join(token.text for token in tail)
return f"[TAIL-{label.value}] {space_tokenized_text} [/TAIL-{label.value}]"
class EntityMarkerPunct(EncodingStrategy):
"""An alternate version of `class`:EntityMarker: with punctuations as control tokens.
Example:
-------
For the `founded_by` relation from `ORG` to `PER` and
the sentence "Larry Page and Sergey Brin founded Google .",
the encoded sentences and relations are
- "@ Larry Page @ and Sergey Brin founded # Google #" -> Relation(head='Google', tail='Larry Page') and
- "Larry Page and @ Sergey Brin @ founded # Google #" -> Relation(head='Google', tail='Sergey Brin').
"""
def encode_head(self, head: Span, label: Label) -> str:
space_tokenized_text: str = " ".join(token.text for token in head)
return f"@ {space_tokenized_text} @"
def encode_tail(self, tail: Span, label: Label) -> str:
space_tokenized_text: str = " ".join(token.text for token in tail)
return f"# {space_tokenized_text} #"
class TypedEntityMarkerPunct(EncodingStrategy):
"""An alternate version of `class`:TypedEntityMarker: with punctuations as control tokens.
Example:
-------
For the `founded_by` relation from `ORG` to `PER` and
the sentence "Larry Page and Sergey Brin founded Google .",
the encoded sentences and relations are
- "@ * PER * Larry Page @ and Sergey Brin founded # * ORG * Google #"
-> Relation(head='Google', tail='Larry Page') and
- "Larry Page and @ * PER * Sergey Brin @ founded # * ORG * Google #"
-> Relation(head='Google', tail='Sergey Brin').
"""
def encode_head(self, head: Span, label: Label) -> str:
space_tokenized_text: str = " ".join(token.text for token in head)
return f"@ * {label.value} * {space_tokenized_text} @"
def encode_tail(self, tail: Span, label: Label) -> str:
space_tokenized_text: str = " ".join(token.text for token in tail)
return f"# ^ {label.value} ^ {space_tokenized_text} #"
class _Entity(NamedTuple):
"""A `_Entity` encapsulates either a relation's head or a tail span, including its label.
This class servers as an internal helper class.
"""
span: Span
label: Label
# TODO: This closely shadows the RelationExtractor name. Maybe we need a better name here.
# - MaskedRelationClassifier ?
# This depends if this relation classification architecture should replace or offer as an alternative.
class RelationClassifier(flair.nn.DefaultClassifier[EncodedSentence, EncodedSentence]):
"""Relation Classifier to predict the relation between two entities.
---- Task ----
Relation Classification (RC) is the task of identifying the semantic relation between two entities in a text.
In contrast to (end-to-end) Relation Extraction (RE), RC requires pre-labelled entities.
Example:
-------
For the `founded_by` relation from `ORG` (head) to `PER` (tail) and the sentence
"Larry Page and Sergey Brin founded Google .", we extract the relations
- founded_by(head='Google', tail='Larry Page') and
- founded_by(head='Google', tail='Sergey Brin').
---- Architecture ----
The Relation Classifier Model builds upon a text classifier.
The model generates an encoded sentence for each entity pair
in the cross product of all entities in the original sentence.
In the encoded representation, the entities in the current entity pair are masked/marked with control tokens.
(For an example, see the docstrings of different encoding strategies, e.g. :class:`TypedEntityMarker`.)
Then, for each encoded sentence, the model takes its document embedding and puts the resulting
text representation(s) through a linear layer to get the class relation label.
The implemented encoding strategies are taken from this paper by Zhou et al.: https://arxiv.org/abs/2102.01373
Note: Currently, the model has no multi-label support.
"""
def __init__(
self,
embeddings: DocumentEmbeddings,
label_dictionary: Dictionary,
label_type: str,
entity_label_types: Union[str, Sequence[str], Dict[str, Optional[Set[str]]]],
entity_pair_labels: Optional[Set[Tuple[str, str]]] = None,
entity_threshold: Optional[float] = None,
cross_augmentation: bool = True,
encoding_strategy: EncodingStrategy = TypedEntityMarker(),
zero_tag_value: str = "O",
allow_unk_tag: bool = True,
**classifierargs,
) -> None:
"""Initializes a `RelationClassifier`.
:param embeddings: The document embeddings used to embed each sentence
:param label_dictionary: A Dictionary containing all predictable labels from the corpus
:param label_type: The label type which is going to be predicted, in case a corpus has multiple annotations
:param entity_label_types: A label type or sequence of label types of the required relation entities.
You can also specify a label filter in a dictionary with the label type as key and
the valid entity labels as values in a set.
E.g. to use only 'PER' and 'ORG' labels from a NER-tagger: `{'ner': {'PER', 'ORG'}}`.
To use all labels from 'ner', pass 'ner'.
:param entity_pair_labels: A set of valid relation entity pair combinations, used as relation candidates.
Specify valid entity pairs in a set of tuples of labels (<HEAD>, <TAIL>).
E.g. for the `born_in` relation, only relations from 'PER' to 'LOC' make sense.
Here, relations from 'PER' to 'PER' are not meaningful, so
it is advised to specify the `entity_pair_labels` as `{('PER', 'ORG')}`.
This setting may help to reduce the number of relation candidates.
Leaving this parameter as `None` (default) disables the relation-candidate-filter,
i.e. the model classifies the relation for each entity pair
in the cross product of *all* entity pairs (inefficient).
:param entity_threshold: Only pre-labelled entities above this threshold are taken into account by the model.
:param cross_augmentation: If `True`, use cross augmentation to transform `Sentence`s into `EncodedSentenece`s.
When cross augmentation is enabled, the transformation functions,
e.g. `transform_corpus`, generate an encoded sentence for each entity pair
in the cross product of all entities in the original sentence.
When disabling cross augmentation, the transform functions only generate
encoded sentences for each gold relation annotation in the original sentence.
:param encoding_strategy: An instance of a class conforming the :class:`EncodingStrategy` protocol
:param zero_tag_value: The label to use for out-of-class relations
:param allow_unk_tag: If `False`, removes `<unk>` from the passed label dictionary, otherwise do nothing.
:param classifierargs: The remaining parameters passed to the underlying `DefaultClassifier`
"""
# Set label type and prepare label dictionary
self._label_type = label_type
self._zero_tag_value = zero_tag_value
self._allow_unk_tag = allow_unk_tag
modified_label_dictionary: Dictionary = Dictionary(add_unk=self._allow_unk_tag)
modified_label_dictionary.add_item(self._zero_tag_value)
for label in label_dictionary.get_items():
if label != "<unk>":
modified_label_dictionary.add_item(label)
# Initialize super default classifier
super().__init__(
embeddings=embeddings,
label_dictionary=modified_label_dictionary,
final_embedding_size=embeddings.embedding_length,
**classifierargs,
)
if isinstance(entity_label_types, str):
self.entity_label_types: Dict[str, Optional[Set[str]]] = {entity_label_types: None}
elif isinstance(entity_label_types, Sequence):
self.entity_label_types = {entity_label_type: None for entity_label_type in entity_label_types}
else:
self.entity_label_types = entity_label_types
self.entity_pair_labels = entity_pair_labels
self.entity_threshold = entity_threshold
self.cross_augmentation = cross_augmentation
self.encoding_strategy = encoding_strategy
# Add the special tokens from the encoding strategy
if (
self.encoding_strategy.add_special_tokens
and self.encoding_strategy.special_tokens
and isinstance(self.embeddings, TransformerDocumentEmbeddings)
):
special_tokens: List[str] = list(self.encoding_strategy.special_tokens)
tokenizer = self.embeddings.tokenizer
tokenizer.add_special_tokens({"additional_special_tokens": special_tokens})
self.embeddings.model.resize_token_embeddings(len(tokenizer))
logger.info(
f"{self.__class__.__name__}: "
f"Added {', '.join(special_tokens)} as additional special tokens to {self.embeddings.name}"
)
# Auto-spawn on GPU, if available
self.to(flair.device)
def _valid_entities(self, sentence: Sentence) -> Iterator[_Entity]:
"""Yields all valid entities, filtered under the specification of `self.entity_label_types`.
:param sentence: A flair `Sentence` object with entity annotations
:return: Valid entities as `_Entity`
"""
for label_type, valid_labels in self.entity_label_types.items():
for entity_span in sentence.get_spans(type=label_type):
entity_label: Label = entity_span.get_label(label_type=label_type)
# Only use entities labelled with the specified labels for each label type
if valid_labels is not None and entity_label.value not in valid_labels:
continue
# Only use entities above the specified threshold
if self.entity_threshold is not None and entity_label.score <= self.entity_threshold:
continue
yield _Entity(span=entity_span, label=entity_label)
def _entity_pair_permutations(
self,
sentence: Sentence,
) -> Iterator[Tuple[_Entity, _Entity, Optional[str]]]:
"""Yields all valid entity pair permutations (relation candidates).
If the passed sentence contains relation annotations,
the relation gold label will be yielded along with the participating entities.
The permutations are constructed by a filtered cross-product
under the specification of `self.entity_label_types` and `self.entity_pair_labels`.
:param sentence: A flair `Sentence` object with entity annotations
:yields: Tuples of (HEAD, TAIL, gold_label).
The head and tail `_Entity`s have span references to the passed sentence.
"""
valid_entities: List[_Entity] = list(self._valid_entities(sentence))
# Use a dictionary to find gold relation annotations for a given entity pair
relation_to_gold_label: Dict[str, str] = {
relation.unlabeled_identifier: relation.get_label(self.label_type, zero_tag_value=self.zero_tag_value).value
for relation in sentence.get_relations(self.label_type)
}
# Yield head and tail entity pairs from the cross product of all entities
for head, tail in itertools.product(valid_entities, repeat=2):
# Remove identity relation entity pairs
if head.span is tail.span:
continue
# Remove entity pairs with labels that do not match any
# of the specified relations in `self.entity_pair_labels`
if (
self.entity_pair_labels is not None
and (head.label.value, tail.label.value) not in self.entity_pair_labels
):
continue
# Obtain gold label, if existing
original_relation: Relation = Relation(first=head.span, second=tail.span)
gold_label: Optional[str] = relation_to_gold_label.get(original_relation.unlabeled_identifier)
yield head, tail, gold_label
def _encode_sentence(
self,
head: _Entity,
tail: _Entity,
gold_label: Optional[str] = None,
) -> EncodedSentence:
"""Returns a new `Sentence` object with masked/marked head and tail spans according to the encoding strategy.
If provided, the encoded sentence also has the corresponding gold label annotation from `self.label_type`.
:param head: The head `_Entity`
:param tail: The tail `_Entity`
:param gold_label: An optional gold label of the induced relation by the head and tail entity
:return: The `EncodedSentence` (with gold annotations)
"""
# Some sanity checks
original_sentence: Sentence = head.span.sentence
assert original_sentence is tail.span.sentence, "The head and tail need to come from the same sentence."
# Pre-compute non-leading head and tail tokens for entity masking
non_leading_head_tokens: List[Token] = head.span.tokens[1:]
non_leading_tail_tokens: List[Token] = tail.span.tokens[1:]
# We can not use the plaintext of the head/tail span in the sentence as the mask/marker
# since there may be multiple occurrences of the same entity mentioned in the sentence.
# Therefore, we use the span's position in the sentence.
encoded_sentence_tokens: List[str] = []
for token in original_sentence:
if token is head.span[0]:
encoded_sentence_tokens.append(self.encoding_strategy.encode_head(head.span, head.label))
elif token is tail.span[0]:
encoded_sentence_tokens.append(self.encoding_strategy.encode_tail(tail.span, tail.label))
elif all(
token is not non_leading_entity_token
for non_leading_entity_token in itertools.chain(non_leading_head_tokens, non_leading_tail_tokens)
):
encoded_sentence_tokens.append(token.text)
# Create masked sentence
encoded_sentence: EncodedSentence = EncodedSentence(
" ".join(encoded_sentence_tokens), use_tokenizer=SpaceTokenizer()
)
if gold_label is not None:
# Add gold relation annotation as sentence label
# Using the sentence label instead of annotating a separate `Relation` object is easier to manage since,
# during prediction, the forward pass does not need any knowledge about the entities in the sentence.
encoded_sentence.add_label(typename=self.label_type, value=gold_label, score=1.0)
encoded_sentence.copy_context_from_sentence(original_sentence)
return encoded_sentence
def _encode_sentence_for_inference(
self,
sentence: Sentence,
) -> Iterator[Tuple[EncodedSentence, Relation]]:
"""Create Encoded Sentences and Relation pairs for Inference.
Yields encoded sentences annotated with their gold relation and
the corresponding relation object in the original sentence for all valid entity pair permutations.
The created encoded sentences are newly created sentences with no reference to the passed sentence.
Important properties:
- Every sentence has exactly one encoded head and tail entity token. Therefore, every encoded sentence has
**exactly** one induced relation annotation, the gold annotation or `self.zero_tag_value`.
- The created relations have head and tail spans from the original passed sentence.
:param sentence: A flair `Sentence` object with entity annotations
:return: Encoded sentences annotated with their gold relation and
the corresponding relation in the original sentence
"""
for head, tail, gold_label in self._entity_pair_permutations(sentence):
masked_sentence: EncodedSentence = self._encode_sentence(
head=head,
tail=tail,
gold_label=gold_label if gold_label is not None else self.zero_tag_value,
)
original_relation: Relation = Relation(first=head.span, second=tail.span)
yield masked_sentence, original_relation
def _encode_sentence_for_training(self, sentence: Sentence) -> Iterator[EncodedSentence]:
"""Create Encoded Sentences and Relation pairs for Training.
Same as `self._encode_sentence_for_inference`.
with the option of disabling cross augmentation via `self.cross_augmentation`
(and that the relation with reference to the original sentence is not returned).
"""
for head, tail, gold_label in self._entity_pair_permutations(sentence):
if gold_label is None:
if self.cross_augmentation:
gold_label = self.zero_tag_value
else:
continue # Skip generated data points that do not express an originally annotated relation
masked_sentence: EncodedSentence = self._encode_sentence(
head=head,
tail=tail,
gold_label=gold_label,
)
yield masked_sentence
def transform_sentence(self, sentences: Union[Sentence, List[Sentence]]) -> List[EncodedSentence]:
"""Transforms sentences into encoded sentences specific to the `RelationClassifier`.
For more information on the internal sentence transformation procedure,
see the :class:`RelationClassifier` architecture and
the different :class:`EncodingStrategy` variants docstrings.
:param sentences: A (list) of sentence(s) to transform
:return: A list of encoded sentences specific to the `RelationClassifier`
"""
if not isinstance(sentences, list):
sentences = [sentences]
return [
encoded_sentence
for sentence in sentences
for encoded_sentence in self._encode_sentence_for_training(sentence)
]
def transform_dataset(self, dataset: Dataset[Sentence]) -> FlairDatapointDataset[EncodedSentence]:
"""Transforms a dataset into a dataset containing encoded sentences specific to the `RelationClassifier`.
The returned dataset is stored in memory.
For more information on the internal sentence transformation procedure,
see the :class:`RelationClassifier` architecture and
the different :class:`EncodingStrategy` variants docstrings.
:param dataset: A dataset of sentences to transform
:return: A dataset of encoded sentences specific to the `RelationClassifier`
"""
data_loader: DataLoader = DataLoader(dataset, batch_size=1)
original_sentences: List[Sentence] = [batch[0] for batch in iter(data_loader)]
return FlairDatapointDataset(self.transform_sentence(original_sentences))
def transform_corpus(self, corpus: Corpus[Sentence]) -> Corpus[EncodedSentence]:
"""Transforms a corpus into a corpus containing encoded sentences specific to the `RelationClassifier`.
The splits of the returned corpus are stored in memory.
For more information on the internal sentence transformation procedure,
see the :class:`RelationClassifier` architecture and
the different :class:`EncodingStrategy` variants docstrings.
:param corpus: A corpus of sentences to transform
:return: A corpus of encoded sentences specific to the `RelationClassifier`
"""
return Corpus(
train=self.transform_dataset(corpus.train) if corpus.train is not None else None,
dev=self.transform_dataset(corpus.dev) if corpus.dev is not None else None,
test=self.transform_dataset(corpus.test) if corpus.test is not None else None,
name=corpus.name,
# If we sample missing splits, the encoded sentences that correspond to the same original sentences
# may get distributed into different splits. For training purposes, this is always undesired.
sample_missing_splits=False,
)
def _get_embedding_for_data_point(self, prediction_data_point: EncodedSentence) -> torch.Tensor:
embedding_names: List[str] = self.embeddings.get_names()
return prediction_data_point.get_embedding(embedding_names)
def _get_data_points_from_sentence(self, sentence: EncodedSentence) -> List[EncodedSentence]:
"""Returns the encoded sentences to which labels are added.
To encode sentences, use the `transform` function of the `RelationClassifier`.
"""
# Ensure that all sentences are encoded properly
if not isinstance(sentence, EncodedSentence):
raise ValueError(
"Some of the passed sentences are not encoded "
"to be compatible with the relation classifier's forward pass.\n"
"Did you transform your raw sentences into encoded sentences? "
"Use the\n"
"\t- transform_sentence\n"
"\t- transform_dataset\n"
"\t- transform_corpus\n"
"functions to transform you data first. "
"When using the ModelTrainer to train a relation classification model, "
"be sure to pass a transformed corpus:\n"
"WRONG: trainer: ModelTrainer = ModelTrainer(model=model, corpus=corpus)\n"
"CORRECT: trainer: ModelTrainer = ModelTrainer(model=model, corpus=model.transform_corpus(corpus))"
)
return [sentence]
def predict(
self,
sentences: Union[List[Sentence], List[EncodedSentence], Sentence, EncodedSentence],
mini_batch_size: int = 32,
return_probabilities_for_all_classes: bool = False,
verbose: bool = False,
label_name: Optional[str] = None,
return_loss: bool = False,
embedding_storage_mode: str = "none",
) -> Optional[Tuple[torch.Tensor, int]]:
"""Predicts the class labels for the given sentence(s).
Standard `Sentence` objects and `EncodedSentences` specific to the `RelationClassifier` are allowed as input.
The (relation) labels are directly added to the sentences.
:param sentences: A list of (encoded) sentences.
:param mini_batch_size: The mini batch size to use
:param return_probabilities_for_all_classes: Return probabilities for all classes instead of only best predicted
:param verbose: Set to display a progress bar
:param return_loss: Set to return loss
:param label_name: Set to change the predicted label type name
:param embedding_storage_mode: The default is 'none', which is always best.
Only set to 'cpu' or 'gpu' if you wish to predict
and keep the generated embeddings in CPU or GPU memory, respectively.
:return: The loss and the total number of classes, if `return_loss` is set
"""
prediction_label_type: str = self.label_type if label_name is None else label_name
if not isinstance(sentences, list):
sentences = [sentences]
loss: Optional[Tuple[torch.Tensor, int]]
encoded_sentences: List[EncodedSentence]
if all(isinstance(sentence, EncodedSentence) for sentence in sentences):
# Deal with the case where all sentences are encoded sentences
# mypy does not infer the type of "sentences" restricted by the if statement
encoded_sentences = cast(List[EncodedSentence], sentences)
loss = super().predict(
encoded_sentences,
mini_batch_size=mini_batch_size,
return_probabilities_for_all_classes=return_probabilities_for_all_classes,
verbose=verbose,
label_name=prediction_label_type,
return_loss=return_loss,
embedding_storage_mode=embedding_storage_mode,
)
elif all(not isinstance(sentence, EncodedSentence) for sentence in sentences):
# Deal with the case where all sentences are standard (non-encoded) sentences
Sentence.set_context_for_sentences(cast(List[Sentence], sentences))
sentences_with_relation_reference: List[Tuple[EncodedSentence, Relation]] = list(
itertools.chain.from_iterable(self._encode_sentence_for_inference(sentence) for sentence in sentences)
)
encoded_sentences = [x[0] for x in sentences_with_relation_reference]
loss = super().predict(
encoded_sentences,
mini_batch_size=mini_batch_size,
return_probabilities_for_all_classes=return_probabilities_for_all_classes,
verbose=verbose,
label_name=prediction_label_type,
return_loss=return_loss,
embedding_storage_mode=embedding_storage_mode,
)
# For each encoded sentence, transfer its prediction onto the original relation
for encoded_sentence, original_relation in sentences_with_relation_reference:
for label in encoded_sentence.get_labels(prediction_label_type):
original_relation.add_label(prediction_label_type, value=label.value, score=label.score)
else:
raise ValueError("All passed sentences must be either uniformly encoded or not.")
return loss if return_loss else None
def _get_state_dict(self) -> Dict[str, Any]:
model_state: Dict[str, Any] = {
**super()._get_state_dict(),
"embeddings": self.embeddings.save_embeddings(use_state_dict=False),
"label_dictionary": self.label_dictionary,
"label_type": self.label_type,
"entity_label_types": self.entity_label_types,
"entity_pair_labels": self.entity_pair_labels,
"entity_threshold": self.entity_threshold,
"cross_augmentation": self.cross_augmentation,
"encoding_strategy": self.encoding_strategy,
"zero_tag_value": self.zero_tag_value,
"allow_unk_tag": self.allow_unk_tag,
}
return model_state
@classmethod
def _init_model_with_state_dict(cls, state: Dict[str, Any], **kwargs):
return super()._init_model_with_state_dict(
state,
embeddings=state["embeddings"],
label_dictionary=state["label_dictionary"],
label_type=state["label_type"],
entity_label_types=state["entity_label_types"],
entity_pair_labels=state["entity_pair_labels"],
entity_threshold=state["entity_threshold"],
cross_augmentation=state["cross_augmentation"],
encoding_strategy=state["encoding_strategy"],
zero_tag_value=state["zero_tag_value"],
allow_unk_tag=state["allow_unk_tag"],
**kwargs,
)
@property
def label_type(self) -> str:
return self._label_type
@property
def zero_tag_value(self) -> str:
return self._zero_tag_value
@property
def allow_unk_tag(self) -> bool:
return self._allow_unk_tag
def get_used_tokens(self, corpus: Corpus) -> typing.Iterable[List[str]]:
yield from super().get_used_tokens(corpus)
for sentence in _iter_dataset(corpus.get_all_sentences()):
for span in sentence.get_spans(self.label_type):
yield self.encoding_strategy.encode_head(span, span.get_label(self.label_type)).split(" ")
yield self.encoding_strategy.encode_tail(span, span.get_label(self.label_type)).split(" ")
@classmethod
def load(cls, model_path: Union[str, Path, Dict[str, Any]]) -> "RelationClassifier":
from typing import cast
return cast("RelationClassifier", super().load(model_path=model_path))
| 34,165 | 45.995873 | 120 | py |
flair | flair-master/flair/models/sequence_tagger_utils/viterbi.py | from typing import Tuple
import numpy as np
import torch
import torch.nn
from torch.nn.functional import softmax
from torch.nn.utils.rnn import pack_padded_sequence
import flair
from flair.data import Dictionary, Label, List, Sentence
START_TAG: str = "<START>"
STOP_TAG: str = "<STOP>"
class ViterbiLoss(torch.nn.Module):
"""Calculates the loss for each sequence up to its length t."""
def __init__(self, tag_dictionary: Dictionary) -> None:
"""Create an instance of the Viterbi loss.
:param tag_dictionary: tag_dictionary of task
"""
super().__init__()
self.tag_dictionary = tag_dictionary
self.tagset_size = len(tag_dictionary)
self.start_tag = tag_dictionary.get_idx_for_item(START_TAG)
self.stop_tag = tag_dictionary.get_idx_for_item(STOP_TAG)
def forward(self, features_tuple: tuple, targets: torch.Tensor) -> torch.Tensor:
"""Forward propagation of Viterbi Loss.
:param features_tuple: CRF scores from forward method in shape (batch size, seq len, tagset size, tagset size),
lengths of sentences in batch, transitions from CRF
:param targets: true tags for sentences which will be converted to matrix indices.
:return: summed Viterbi Loss over all data points
"""
features, lengths, transitions = features_tuple
batch_size = features.size(0)
seq_len = features.size(1)
targets, targets_matrix_indices = self._format_targets(targets, lengths)
targets_matrix_indices = torch.tensor(targets_matrix_indices, dtype=torch.long).unsqueeze(2).to(flair.device)
# scores_at_targets[range(features.shape[0]), lengths.values -1]
# Squeeze crf scores matrices in 1-dim shape and gather scores at targets by matrix indices
scores_at_targets = torch.gather(features.view(batch_size, seq_len, -1), 2, targets_matrix_indices)
scores_at_targets = pack_padded_sequence(scores_at_targets, lengths, batch_first=True)[0]
transitions_to_stop = transitions[
np.repeat(self.stop_tag, features.shape[0]),
[target[length - 1] for target, length in zip(targets, lengths)],
]
gold_score = scores_at_targets.sum() + transitions_to_stop.sum()
scores_upto_t = torch.zeros(batch_size, self.tagset_size, device=flair.device)
for t in range(max(lengths)):
batch_size_t = sum(
[length > t for length in lengths]
) # since batch is ordered, we can save computation time by reducing our effective batch_size
if t == 0:
# Initially, get scores from <start> tag to all other tags
scores_upto_t[:batch_size_t] = (
scores_upto_t[:batch_size_t] + features[:batch_size_t, t, :, self.start_tag]
)
else:
# We add scores at current timestep to scores accumulated up to previous timestep, and log-sum-exp
# Remember, the cur_tag of the previous timestep is the prev_tag of this timestep
scores_upto_t[:batch_size_t] = self._log_sum_exp(
features[:batch_size_t, t, :, :] + scores_upto_t[:batch_size_t].unsqueeze(1), dim=2
)
all_paths_scores = self._log_sum_exp(scores_upto_t + transitions[self.stop_tag].unsqueeze(0), dim=1).sum()
viterbi_loss = all_paths_scores - gold_score
return viterbi_loss
@staticmethod
def _log_sum_exp(tensor, dim):
"""Calculates the log-sum-exponent of a tensor's dimension in a numerically stable way.
:param tensor: tensor
:param dim: dimension to calculate log-sum-exp of
:return: log-sum-exp
"""
m, _ = torch.max(tensor, dim)
m_expanded = m.unsqueeze(dim).expand_as(tensor)
return m + torch.log(torch.sum(torch.exp(tensor - m_expanded), dim))
def _format_targets(self, targets: torch.Tensor, lengths: torch.IntTensor):
"""Formats targets into matrix indices.
CRF scores contain per sentence, per token a (tagset_size x tagset_size) matrix, containing emission score for
token j + transition prob from previous token i. Means, if we think of our rows as "to tag" and our columns
as "from tag", the matrix in cell [10,5] would contain the emission score for tag 10 + transition score
from previous tag 5 and could directly be addressed through the 1-dim indices (10 + tagset_size * 5) = 70,
if our tagset consists of 12 tags.
:param targets: targets as in tag dictionary
:param lengths: lengths of sentences in batch
"""
targets_per_sentence = []
targets_list = targets.tolist()
for cut in lengths:
targets_per_sentence.append(targets_list[:cut])
targets_list = targets_list[cut:]
for t in targets_per_sentence:
t += [self.tag_dictionary.get_idx_for_item(STOP_TAG)] * (int(lengths.max().item()) - len(t))
matrix_indices = [
[self.tag_dictionary.get_idx_for_item(START_TAG) + (s[0] * self.tagset_size)]
+ [s[i] + (s[i + 1] * self.tagset_size) for i in range(0, len(s) - 1)]
for s in targets_per_sentence
]
return targets_per_sentence, matrix_indices
class ViterbiDecoder:
"""Decodes a given sequence using the Viterbi algorithm."""
def __init__(self, tag_dictionary: Dictionary) -> None:
"""Initialize the Viterbi Decoder.
:param tag_dictionary: Dictionary of tags for sequence labeling task
"""
self.tag_dictionary = tag_dictionary
self.tagset_size = len(tag_dictionary)
self.start_tag = tag_dictionary.get_idx_for_item(START_TAG)
self.stop_tag = tag_dictionary.get_idx_for_item(STOP_TAG)
def decode(
self, features_tuple: tuple, probabilities_for_all_classes: bool, sentences: List[Sentence]
) -> Tuple[List, List]:
"""Decoding function returning the most likely sequence of tags.
:param features_tuple: CRF scores from forward method in shape (batch size, seq len, tagset size, tagset size),
lengths of sentence in batch, transitions of CRF
:param probabilities_for_all_classes: whether to return probabilities for all tags
:return: decoded sequences
"""
features, lengths, transitions = features_tuple
all_tags = []
batch_size = features.size(0)
seq_len = features.size(1)
# Create a tensor to hold accumulated sequence scores at each current tag
scores_upto_t = torch.zeros(batch_size, seq_len + 1, self.tagset_size).to(flair.device)
# Create a tensor to hold back-pointers
# i.e., indices of the previous_tag that corresponds to maximum accumulated score at current tag
# Let pads be the <end> tag index, since that was the last tag in the decoded sequence
backpointers = (
torch.ones((batch_size, seq_len + 1, self.tagset_size), dtype=torch.long, device=flair.device)
* self.stop_tag
)
for t in range(seq_len):
batch_size_t = sum([length > t for length in lengths]) # effective batch size (sans pads) at this timestep
terminates = [i for i, length in enumerate(lengths) if length == t + 1]
if t == 0:
scores_upto_t[:batch_size_t, t] = features[:batch_size_t, t, :, self.start_tag]
backpointers[:batch_size_t, t, :] = (
torch.ones((batch_size_t, self.tagset_size), dtype=torch.long) * self.start_tag
)
else:
# We add scores at current timestep to scores accumulated up to previous timestep, and
# choose the previous timestep that corresponds to the max. accumulated score for each current timestep
scores_upto_t[:batch_size_t, t], backpointers[:batch_size_t, t, :] = torch.max(
features[:batch_size_t, t, :, :] + scores_upto_t[:batch_size_t, t - 1].unsqueeze(1), dim=2
)
# If sentence is over, add transition to STOP-tag
if terminates:
scores_upto_t[terminates, t + 1], backpointers[terminates, t + 1, :] = torch.max(
scores_upto_t[terminates, t].unsqueeze(1) + transitions[self.stop_tag].unsqueeze(0), dim=2
)
# Decode/trace best path backwards
decoded = torch.zeros((batch_size, backpointers.size(1)), dtype=torch.long, device=flair.device)
pointer = torch.ones((batch_size, 1), dtype=torch.long, device=flair.device) * self.stop_tag
for t in list(reversed(range(backpointers.size(1)))):
decoded[:, t] = torch.gather(backpointers[:, t, :], 1, pointer).squeeze(1)
pointer = decoded[:, t].unsqueeze(1)
# Sanity check
assert torch.equal(
decoded[:, 0], torch.ones((batch_size), dtype=torch.long, device=flair.device) * self.start_tag
)
# remove start-tag and backscore to stop-tag
scores_upto_t = scores_upto_t[:, :-1, :]
decoded = decoded[:, 1:]
# Max + Softmax to get confidence score for predicted label and append label to each token
scores = softmax(scores_upto_t, dim=2)
confidences = torch.max(scores, dim=2)
tags = []
for tag_seq, tag_seq_conf, length_seq in zip(decoded, confidences.values, lengths):
tags.append(
[
(self.tag_dictionary.get_item_for_index(tag), conf.item())
for tag, conf in list(zip(tag_seq, tag_seq_conf))[:length_seq]
]
)
if probabilities_for_all_classes:
all_tags = self._all_scores_for_token(scores.cpu(), lengths, sentences)
return tags, all_tags
def _all_scores_for_token(self, scores: torch.Tensor, lengths: torch.IntTensor, sentences: List[Sentence]):
"""Returns all scores for each tag in tag dictionary.
:param scores: Scores for current sentence.
"""
scores = scores.numpy()
prob_tags_per_sentence = []
for scores_sentence, length, sentence in zip(scores, lengths, sentences):
scores_sentence = scores_sentence[:length]
prob_tags_per_sentence.append(
[
[
Label(token, self.tag_dictionary.get_item_for_index(score_id), score)
for score_id, score in enumerate(score_dist)
]
for score_dist, token in zip(scores_sentence, sentence)
]
)
return prob_tags_per_sentence
| 10,765 | 44.046025 | 119 | py |
flair | flair-master/flair/models/sequence_tagger_utils/crf.py | import torch
import flair
START_TAG: str = "<START>"
STOP_TAG: str = "<STOP>"
class CRF(torch.nn.Module):
"""Conditional Random Field.
Conditional Random Field Implementation according to sgrvinod (https://github.com/sgrvinod).
Classifier which predicts single tag / class / label for given word based on not just the word,
but also on previous seen annotations.
"""
def __init__(self, tag_dictionary, tagset_size: int, init_from_state_dict: bool) -> None:
"""Initialize the Conditional Random Field.
:param tag_dictionary: tag dictionary in order to find ID for start and stop tags
:param tagset_size: number of tag from tag dictionary
:param init_from_state_dict: whether we load pretrained model from state dict
"""
super().__init__()
self.tagset_size = tagset_size
# Transitions are used in the following way: transitions[to, from].
self.transitions = torch.nn.Parameter(torch.randn(tagset_size, tagset_size))
# If we are not using a pretrained model and train a fresh one, we need to set transitions from any tag
# to START-tag and from STOP-tag to any other tag to -10000.
if not init_from_state_dict:
self.transitions.detach()[tag_dictionary.get_idx_for_item(START_TAG), :] = -10000
self.transitions.detach()[:, tag_dictionary.get_idx_for_item(STOP_TAG)] = -10000
self.to(flair.device)
def forward(self, features: torch.Tensor) -> torch.Tensor:
"""Forward propagation of Conditional Random Field.
:param features: output from RNN / Linear layer in shape (batch size, seq len, hidden size)
:return: CRF scores (emission scores for each token + transitions prob from previous state) in
shape (batch_size, seq len, tagset size, tagset size)
"""
batch_size, seq_len = features.size()[:2]
emission_scores = features
emission_scores = emission_scores.unsqueeze(-1).expand(batch_size, seq_len, self.tagset_size, self.tagset_size)
crf_scores = emission_scores + self.transitions.unsqueeze(0).unsqueeze(0)
return crf_scores
| 2,171 | 41.588235 | 119 | py |
flair | flair-master/flair/embeddings/document.py | import logging
from typing import Any, Dict, List, Optional, Union, cast
import torch
from sklearn.feature_extraction.text import TfidfVectorizer
from torch.nn import RNNBase
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
import flair
from flair.data import Sentence
from flair.embeddings.base import (
DocumentEmbeddings,
load_embeddings,
register_embeddings,
)
from flair.embeddings.token import FlairEmbeddings, StackedEmbeddings, TokenEmbeddings
from flair.embeddings.transformer import (
TransformerEmbeddings,
TransformerOnnxDocumentEmbeddings,
)
from flair.nn import LockedDropout, WordDropout
log = logging.getLogger("flair")
@register_embeddings
class TransformerDocumentEmbeddings(DocumentEmbeddings, TransformerEmbeddings):
onnx_cls = TransformerOnnxDocumentEmbeddings
def __init__(
self,
model: str = "bert-base-uncased", # set parameters with different default values
layers: str = "-1",
layer_mean: bool = False,
is_token_embedding: bool = False,
**kwargs,
) -> None:
"""Bidirectional transformer embeddings of words from various transformer architectures.
:param model: name of transformer model (see https://huggingface.co/transformers/pretrained_models.html for
options)
:param layers: string indicating which layers to take for embedding (-1 is topmost layer)
:param cls_pooling: Pooling strategy for combining token level embeddings. options are 'cls', 'max', 'mean'.
:param layer_mean: If True, uses a scalar mix of layers as embedding
:param fine_tune: If True, allows transformers to be fine-tuned during training
"""
TransformerEmbeddings.__init__(
self,
model=model,
layers=layers,
layer_mean=layer_mean,
is_token_embedding=is_token_embedding,
is_document_embedding=True,
**kwargs,
)
@classmethod
def create_from_state(cls, **state):
# this parameter is fixed
del state["is_document_embedding"]
return cls(**state)
@register_embeddings
class DocumentPoolEmbeddings(DocumentEmbeddings):
def __init__(
self,
embeddings: Union[TokenEmbeddings, List[TokenEmbeddings]],
fine_tune_mode: str = "none",
pooling: str = "mean",
) -> None:
"""The constructor takes a list of embeddings to be combined.
:param embeddings: a list of token embeddings
:param fine_tune_mode: if set to "linear" a trainable layer is added, if set to
"nonlinear", a nonlinearity is added as well. Set this to make the pooling trainable.
:param pooling: a string which can any value from ['mean', 'max', 'min']
"""
super().__init__()
if isinstance(embeddings, StackedEmbeddings):
embeddings = embeddings.embeddings
elif isinstance(embeddings, TokenEmbeddings):
embeddings = [embeddings]
self.embeddings: StackedEmbeddings = StackedEmbeddings(embeddings=embeddings)
self.__embedding_length = self.embeddings.embedding_length
# optional fine-tuning on top of embedding layer
self.fine_tune_mode = fine_tune_mode
if self.fine_tune_mode in ["nonlinear", "linear"]:
self.embedding_flex = torch.nn.Linear(self.embedding_length, self.embedding_length, bias=False)
self.embedding_flex.weight.data.copy_(torch.eye(self.embedding_length))
if self.fine_tune_mode in ["nonlinear"]:
self.embedding_flex_nonlinear = torch.nn.ReLU()
self.embedding_flex_nonlinear_map = torch.nn.Linear(self.embedding_length, self.embedding_length)
self.__embedding_length = self.embeddings.embedding_length
self.to(flair.device)
if pooling not in ["min", "max", "mean"]:
raise ValueError(f"Pooling operation for {self.mode!r} is not defined")
self.pooling = pooling
self.name: str = f"document_{self.pooling}"
self.eval()
@property
def embedding_length(self) -> int:
return self.__embedding_length
def embed(self, sentences: Union[List[Sentence], Sentence]):
"""Add embeddings to every sentence in the given list of sentences.
If embeddings are already added, updates only if embeddings are non-static.
"""
# if only one sentence is passed, convert to list of sentence
if isinstance(sentences, Sentence):
sentences = [sentences]
self.embeddings.embed(sentences)
for sentence in sentences:
word_embeddings = torch.cat([token.get_embedding().unsqueeze(0) for token in sentence.tokens], dim=0).to(
flair.device
)
if self.fine_tune_mode in ["nonlinear", "linear"]:
word_embeddings = self.embedding_flex(word_embeddings)
if self.fine_tune_mode in ["nonlinear"]:
word_embeddings = self.embedding_flex_nonlinear(word_embeddings)
word_embeddings = self.embedding_flex_nonlinear_map(word_embeddings)
if self.pooling == "mean":
pooled_embedding = torch.mean(word_embeddings, 0)
elif self.pooling == "max":
pooled_embedding, _ = torch.max(word_embeddings, 0)
elif self.pooling == "min":
pooled_embedding, _ = torch.min(word_embeddings, 0)
sentence.set_embedding(self.name, pooled_embedding)
def _add_embeddings_internal(self, sentences: List[Sentence]):
pass
def extra_repr(self):
return f"fine_tune_mode={self.fine_tune_mode}, pooling={self.pooling}"
@classmethod
def from_params(cls, params: Dict[str, Any]) -> "DocumentPoolEmbeddings":
embeddings = cast(StackedEmbeddings, load_embeddings(params.pop("embeddings"))).embeddings
return cls(embeddings=embeddings, **params)
def to_params(self) -> Dict[str, Any]:
return {
"pooling": self.pooling,
"fine_tune_mode": self.fine_tune_mode,
"embeddings": self.embeddings.save_embeddings(False),
}
@register_embeddings
class DocumentTFIDFEmbeddings(DocumentEmbeddings):
def __init__(
self,
train_dataset: List[Sentence],
vectorizer: Optional[TfidfVectorizer] = None,
**vectorizer_params,
) -> None:
"""The constructor for DocumentTFIDFEmbeddings.
:param train_dataset: the train dataset which will be used to construct a vectorizer
:param vectorizer_params: parameters given to Scikit-learn's TfidfVectorizer constructor
"""
super().__init__()
import numpy as np
if vectorizer is not None:
self.vectorizer = vectorizer
if len(train_dataset) > 0:
raise ValueError("Cannot initialize document tfidf embeddings with a vectorizer and with a dataset")
else:
self.vectorizer = TfidfVectorizer(dtype=np.float32, **vectorizer_params)
self.vectorizer.fit([s.to_original_text() for s in train_dataset])
self.__embedding_length: int = len(self.vectorizer.vocabulary_)
self.to(flair.device)
self.name: str = "document_tfidf"
self.eval()
@property
def embedding_length(self) -> int:
return self.__embedding_length
def embed(self, sentences: Union[List[Sentence], Sentence]):
"""Add embeddings to every sentence in the given list of sentences."""
# if only one sentence is passed, convert to list of sentence
if isinstance(sentences, Sentence):
sentences = [sentences]
raw_sentences = [s.to_original_text() for s in sentences]
tfidf_vectors = torch.from_numpy(self.vectorizer.transform(raw_sentences).A)
for sentence_id, sentence in enumerate(sentences):
sentence.set_embedding(self.name, tfidf_vectors[sentence_id])
def _add_embeddings_internal(self, sentences: List[Sentence]):
pass
@classmethod
def from_params(cls, params: Dict[str, Any]) -> "DocumentTFIDFEmbeddings":
return cls(train_dataset=[], vectorizer=params["vectorizer"])
def to_params(self) -> Dict[str, Any]:
return {
"vectorizer": self.vectorizer,
}
@register_embeddings
class DocumentRNNEmbeddings(DocumentEmbeddings):
def __init__(
self,
embeddings: List[TokenEmbeddings],
hidden_size=128,
rnn_layers=1,
reproject_words: bool = True,
reproject_words_dimension: Optional[int] = None,
bidirectional: bool = False,
dropout: float = 0.5,
word_dropout: float = 0.0,
locked_dropout: float = 0.0,
rnn_type="GRU",
fine_tune: bool = True,
) -> None:
"""Instantiates an RNN that works upon some token embeddings.
:param embeddings: a list of token embeddings
:param hidden_size: the number of hidden states in the rnn
:param rnn_layers: the number of layers for the rnn
:param reproject_words: boolean value, indicating whether to reproject the token embeddings in a separate linear
layer before putting them into the rnn or not
:param reproject_words_dimension: output dimension of reprojecting token embeddings. If None the same output
dimension as before will be taken.
:param bidirectional: boolean value, indicating whether to use a bidirectional rnn or not
:param dropout: the dropout value to be used
:param word_dropout: the word dropout value to be used, if 0.0 word dropout is not used
:param locked_dropout: the locked dropout value to be used, if 0.0 locked dropout is not used
:param rnn_type: 'GRU' or 'LSTM'
"""
super().__init__()
self.embeddings: StackedEmbeddings = StackedEmbeddings(embeddings=embeddings)
self.rnn_type = rnn_type
self.reproject_words = reproject_words
self.bidirectional = bidirectional
self.length_of_all_token_embeddings: int = self.embeddings.embedding_length
self.static_embeddings = not fine_tune
self.__embedding_length: int = hidden_size
if self.bidirectional:
self.__embedding_length *= 4
self.embeddings_dimension: int = self.length_of_all_token_embeddings
if self.reproject_words and reproject_words_dimension is not None:
self.embeddings_dimension = reproject_words_dimension
self.word_reprojection_map = torch.nn.Linear(self.length_of_all_token_embeddings, self.embeddings_dimension)
# bidirectional RNN on top of embedding layer
if rnn_type == "LSTM":
self.rnn: RNNBase = torch.nn.LSTM(
self.embeddings_dimension,
hidden_size,
num_layers=rnn_layers,
bidirectional=self.bidirectional,
batch_first=True,
)
else:
self.rnn = torch.nn.GRU(
self.embeddings_dimension,
hidden_size,
num_layers=rnn_layers,
bidirectional=self.bidirectional,
batch_first=True,
)
self.name = "document_" + self.rnn._get_name()
# dropouts
self.dropout = torch.nn.Dropout(dropout) if dropout > 0.0 else None
self.locked_dropout = LockedDropout(locked_dropout) if locked_dropout > 0.0 else None
self.word_dropout = WordDropout(word_dropout) if word_dropout > 0.0 else None
torch.nn.init.xavier_uniform_(self.word_reprojection_map.weight)
self.to(flair.device)
self.eval()
@property
def embedding_length(self) -> int:
return self.__embedding_length
def _add_embeddings_internal(self, sentences: List[Sentence]):
"""Add embeddings to all sentences in the given list of sentences.
If embeddings are already added, update only if embeddings are non-static.
"""
# TODO: remove in future versions
if not hasattr(self, "locked_dropout"):
self.locked_dropout = None
if not hasattr(self, "word_dropout"):
self.word_dropout = None
self.rnn.zero_grad()
# embed words in the sentence
self.embeddings.embed(sentences)
lengths: List[int] = [len(sentence.tokens) for sentence in sentences]
longest_token_sequence_in_batch: int = max(lengths)
pre_allocated_zero_tensor = torch.zeros(
self.embeddings.embedding_length * longest_token_sequence_in_batch,
dtype=torch.float,
device=flair.device,
)
all_embs: List[torch.Tensor] = []
for sentence in sentences:
all_embs += [emb for token in sentence for emb in token.get_each_embedding()]
nb_padding_tokens = longest_token_sequence_in_batch - len(sentence)
if nb_padding_tokens > 0:
t = pre_allocated_zero_tensor[: self.embeddings.embedding_length * nb_padding_tokens]
all_embs.append(t)
sentence_tensor = torch.cat(all_embs).view(
[
len(sentences),
longest_token_sequence_in_batch,
self.embeddings.embedding_length,
]
)
# before-RNN dropout
if self.dropout:
sentence_tensor = self.dropout(sentence_tensor)
if self.locked_dropout:
sentence_tensor = self.locked_dropout(sentence_tensor)
if self.word_dropout:
sentence_tensor = self.word_dropout(sentence_tensor)
# reproject if set
if self.reproject_words:
sentence_tensor = self.word_reprojection_map(sentence_tensor)
# push through RNN
packed = pack_padded_sequence(sentence_tensor, lengths, enforce_sorted=False, batch_first=True) # type: ignore[arg-type]
rnn_out, hidden = self.rnn(packed)
outputs, output_lengths = pad_packed_sequence(rnn_out, batch_first=True)
# after-RNN dropout
if self.dropout:
outputs = self.dropout(outputs)
if self.locked_dropout:
outputs = self.locked_dropout(outputs)
# extract embeddings from RNN
for sentence_no, length in enumerate(lengths):
last_rep = outputs[sentence_no, length - 1]
embedding = last_rep
if self.bidirectional:
first_rep = outputs[sentence_no, 0]
embedding = torch.cat([first_rep, last_rep], 0)
if self.static_embeddings:
embedding = embedding.detach()
sentence = sentences[sentence_no]
sentence.set_embedding(self.name, embedding)
def _apply(self, fn):
# models that were serialized using torch versions older than 1.4.0 lack the _flat_weights_names attribute
# check if this is the case and if so, set it
for child_module in self.children():
if isinstance(child_module, torch.nn.RNNBase) and not hasattr(child_module, "_flat_weights_names"):
_flat_weights_names = []
num_direction = 2 if child_module.__dict__["bidirectional"] else 1
for layer in range(child_module.__dict__["num_layers"]):
for direction in range(num_direction):
suffix = "_reverse" if direction == 1 else ""
param_names = ["weight_ih_l{}{}", "weight_hh_l{}{}"]
if child_module.__dict__["bias"]:
param_names += ["bias_ih_l{}{}", "bias_hh_l{}{}"]
param_names = [x.format(layer, suffix) for x in param_names]
_flat_weights_names.extend(param_names)
child_module._flat_weights_names = _flat_weights_names
child_module._apply(fn)
def to_params(self):
# serialize the language models and the constructor arguments (but nothing else)
model_state = {
"embeddings": self.embeddings.save_embeddings(False),
"hidden_size": self.rnn.hidden_size,
"rnn_layers": self.rnn.num_layers,
"reproject_words": self.reproject_words,
"reproject_words_dimension": self.embeddings_dimension,
"bidirectional": self.bidirectional,
"dropout": self.dropout.p if self.dropout is not None else 0.0,
"word_dropout": self.word_dropout.p if self.word_dropout is not None else 0.0,
"locked_dropout": self.locked_dropout.p if self.locked_dropout is not None else 0.0,
"rnn_type": self.rnn_type,
"fine_tune": not self.static_embeddings,
}
return model_state
@classmethod
def from_params(cls, params: Dict[str, Any]) -> "DocumentRNNEmbeddings":
stacked_embeddings = load_embeddings(params["embeddings"])
assert isinstance(stacked_embeddings, StackedEmbeddings)
return cls(
embeddings=stacked_embeddings.embeddings,
hidden_size=params["hidden_size"],
rnn_layers=params["rnn_layers"],
reproject_words=params["reproject_words"],
reproject_words_dimension=params["reproject_words_dimension"],
bidirectional=params["bidirectional"],
dropout=params["dropout"],
word_dropout=params["word_dropout"],
locked_dropout=params["locked_dropout"],
rnn_type=params["rnn_type"],
fine_tune=params["fine_tune"],
)
def __setstate__(self, d):
# re-initialize language model with constructor arguments
language_model = DocumentRNNEmbeddings(
embeddings=d["embeddings"],
hidden_size=d["hidden_size"],
rnn_layers=d["rnn_layers"],
reproject_words=d["reproject_words"],
reproject_words_dimension=d["reproject_words_dimension"],
bidirectional=d["bidirectional"],
dropout=d["dropout"],
word_dropout=d["word_dropout"],
locked_dropout=d["locked_dropout"],
rnn_type=d["rnn_type"],
fine_tune=d["fine_tune"],
)
# special handling for deserializing language models
if "state_dict" in d:
language_model.load_state_dict(d["state_dict"])
# copy over state dictionary to self
for key in language_model.__dict__:
self.__dict__[key] = language_model.__dict__[key]
# set the language model to eval() by default (this is necessary since FlairEmbeddings "protect" the LM
# in their "self.train()" method)
self.eval()
@register_embeddings
class DocumentLMEmbeddings(DocumentEmbeddings):
def __init__(self, flair_embeddings: List[FlairEmbeddings]) -> None:
super().__init__()
self.embeddings = flair_embeddings
self.name = "document_lm"
# IMPORTANT: add embeddings as torch modules
for i, embedding in enumerate(flair_embeddings):
self.add_module(f"lm_embedding_{i}", embedding)
if not embedding.static_embeddings:
self.static_embeddings = False
self._embedding_length: int = sum(embedding.embedding_length for embedding in flair_embeddings)
self.eval()
@property
def embedding_length(self) -> int:
return self._embedding_length
def _add_embeddings_internal(self, sentences: List[Sentence]):
for embedding in self.embeddings:
embedding.embed(sentences)
# iterate over sentences
for sentence in sentences:
# if its a forward LM, take last state
if embedding.is_forward_lm:
sentence.set_embedding(
embedding.name,
sentence[len(sentence) - 1]._embeddings[embedding.name],
)
else:
sentence.set_embedding(embedding.name, sentence[0]._embeddings[embedding.name])
return sentences
def get_names(self) -> List[str]:
if "__names" not in self.__dict__:
self.__names = [name for embedding in self.embeddings for name in embedding.get_names()]
return self.__names
def to_params(self) -> Dict[str, Any]:
return {"flair_embeddings": [embedding.save_embeddings(False) for embedding in self.embeddings]}
@classmethod
def from_params(cls, params: Dict[str, Any]) -> "DocumentLMEmbeddings":
return cls([cast(FlairEmbeddings, load_embeddings(embedding)) for embedding in params["flair_embeddings"]])
@register_embeddings
class SentenceTransformerDocumentEmbeddings(DocumentEmbeddings):
def __init__(
self,
model: str = "bert-base-nli-mean-tokens",
batch_size: int = 1,
) -> None:
"""Instantiates a document embedding using the SentenceTransformer Embeddings.
:param model: string name of models from SentencesTransformer Class
:param name: string name of embedding type which will be set to Sentence object
:param batch_size: int number of sentences to processed in one batch
"""
super().__init__()
try:
from sentence_transformers import SentenceTransformer
except ModuleNotFoundError:
log.warning("-" * 100)
log.warning('ATTENTION! The library "sentence-transformers" is not installed!')
log.warning('To use Sentence Transformers, please first install with "pip install sentence-transformers"')
log.warning("-" * 100)
pass
self.model_name = model
self.model = SentenceTransformer(
model, cache_folder=str(flair.cache_root / "embeddings" / "sentence-transformer")
)
self.name = "sentence-transformers-" + str(model)
self.batch_size = batch_size
self.static_embeddings = True
self.eval()
def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:
sentence_batches = [
sentences[i * self.batch_size : (i + 1) * self.batch_size]
for i in range((len(sentences) + self.batch_size - 1) // self.batch_size)
]
for batch in sentence_batches:
self._add_embeddings_to_sentences(batch)
return sentences
def _add_embeddings_to_sentences(self, sentences: List[Sentence]):
# convert to plain strings, embedded in a list for the encode function
sentences_plain_text = [sentence.to_plain_string() for sentence in sentences]
embeddings = self.model.encode(sentences_plain_text, convert_to_numpy=False)
for sentence, embedding in zip(sentences, embeddings):
sentence.set_embedding(self.name, embedding)
@property
def embedding_length(self) -> int:
"""Returns the length of the embedding vector."""
return self.model.get_sentence_embedding_dimension()
@classmethod
def from_params(cls, params: Dict[str, Any]) -> "SentenceTransformerDocumentEmbeddings":
return cls(**params)
def to_params(self) -> Dict[str, Any]:
return {
"model": self.model_name,
"batch_size": self.batch_size,
}
@register_embeddings
class DocumentCNNEmbeddings(DocumentEmbeddings):
def __init__(
self,
embeddings: List[TokenEmbeddings],
kernels=((100, 3), (100, 4), (100, 5)),
reproject_words: bool = True,
reproject_words_dimension: Optional[int] = None,
dropout: float = 0.5,
word_dropout: float = 0.0,
locked_dropout: float = 0.0,
fine_tune: bool = True,
) -> None:
"""Instantiates a CNN that works uppons some token embeddings.
:param embeddings: a list of token embeddings
:param kernels: list of (number of kernels, kernel size)
:param reproject_words: boolean value, indicating whether to reproject the token embeddings in a separate linear
layer before putting them into the rnn or not
:param reproject_words_dimension: output dimension of reprojecting token embeddings. If None the same output
dimension as before will be taken.
:param dropout: the dropout value to be used
:param word_dropout: the word dropout value to be used, if 0.0 word dropout is not used
:param locked_dropout: the locked dropout value to be used, if 0.0 locked dropout is not used
"""
super().__init__()
self.embeddings: StackedEmbeddings = StackedEmbeddings(embeddings=embeddings)
self.length_of_all_token_embeddings: int = self.embeddings.embedding_length
self.kernels = kernels
self.reproject_words = reproject_words
self.static_embeddings = not fine_tune
self.embeddings_dimension: int = self.length_of_all_token_embeddings
if self.reproject_words and reproject_words_dimension is not None:
self.embeddings_dimension = reproject_words_dimension
if self.reproject_words:
self.word_reprojection_map: Optional[torch.nn.Linear] = torch.nn.Linear(
self.length_of_all_token_embeddings, self.embeddings_dimension
)
torch.nn.init.xavier_uniform_(self.word_reprojection_map.weight)
else:
self.word_reprojection_map = None
# CNN
self.__embedding_length: int = sum([kernel_num for kernel_num, kernel_size in self.kernels])
self.convs = torch.nn.ModuleList(
[
torch.nn.Conv1d(self.embeddings_dimension, kernel_num, kernel_size)
for kernel_num, kernel_size in self.kernels
]
)
self.pool = torch.nn.AdaptiveMaxPool1d(1)
self.name = "document_cnn"
# dropouts
self.dropout = torch.nn.Dropout(dropout) if dropout > 0.0 else None
self.locked_dropout = LockedDropout(locked_dropout) if locked_dropout > 0.0 else None
self.word_dropout = WordDropout(word_dropout) if word_dropout > 0.0 else None
self.to(flair.device)
self.min_sequence_length = max(kernel_size for _, kernel_size in self.kernels)
self.eval()
@property
def embedding_length(self) -> int:
return self.__embedding_length
def _add_embeddings_internal(self, sentences: List[Sentence]):
"""Add embeddings to all sentences in the given list of sentences.
If embeddings are already added, update only if embeddings are non-static.
"""
# TODO: remove in future versions
if not hasattr(self, "locked_dropout"):
self.locked_dropout = None
if not hasattr(self, "word_dropout"):
self.word_dropout = None
self.zero_grad() # is it necessary?
# embed words in the sentence
self.embeddings.embed(sentences)
lengths: List[int] = [len(sentence.tokens) for sentence in sentences]
padding_length: int = max(max(lengths), self.min_sequence_length)
pre_allocated_zero_tensor = torch.zeros(
self.embeddings.embedding_length * padding_length,
dtype=torch.float,
device=flair.device,
)
all_embs: List[torch.Tensor] = []
for sentence in sentences:
all_embs += [emb for token in sentence for emb in token.get_each_embedding()]
nb_padding_tokens = padding_length - len(sentence)
if nb_padding_tokens > 0:
t = pre_allocated_zero_tensor[: self.embeddings.embedding_length * nb_padding_tokens]
all_embs.append(t)
sentence_tensor = torch.cat(all_embs).view(
[
len(sentences),
padding_length,
self.embeddings.embedding_length,
]
)
# before-RNN dropout
if self.dropout:
sentence_tensor = self.dropout(sentence_tensor)
if self.locked_dropout:
sentence_tensor = self.locked_dropout(sentence_tensor)
if self.word_dropout:
sentence_tensor = self.word_dropout(sentence_tensor)
# reproject if set
if self.word_reprojection_map is not None:
sentence_tensor = self.word_reprojection_map(sentence_tensor)
# push CNN
x = sentence_tensor
x = x.permute(0, 2, 1)
rep = [self.pool(torch.nn.functional.relu(conv(x))) for conv in self.convs]
outputs = torch.cat(rep, 1)
outputs = outputs.reshape(outputs.size(0), -1)
# after-CNN dropout
if self.dropout:
outputs = self.dropout(outputs)
if self.locked_dropout:
outputs = self.locked_dropout(outputs)
# extract embeddings from CNN
for sentence_no, _length in enumerate(lengths):
embedding = outputs[sentence_no]
if self.static_embeddings:
embedding = embedding.detach()
sentence = sentences[sentence_no]
sentence.set_embedding(self.name, embedding)
def _apply(self, fn):
for child_module in self.children():
child_module._apply(fn)
@classmethod
def from_params(cls, params: Dict[str, Any]) -> "DocumentCNNEmbeddings":
embeddings = cast(StackedEmbeddings, load_embeddings(params.pop("embeddings"))).embeddings
return cls(embeddings=embeddings, **params)
def to_params(self) -> Dict[str, Any]:
return {
"embeddings": self.embeddings.save_embeddings(False),
"kernels": self.kernels,
"reproject_words": self.reproject_words,
"reproject_words_dimension": self.embeddings_dimension,
"dropout": 0.0 if self.dropout is None else self.dropout.p,
"word_dropout": 0.0 if self.word_dropout is None else self.word_dropout.p,
"locked_dropout": 0.0 if self.locked_dropout is None else self.locked_dropout.dropout_rate,
"fine_tune": not self.static_embeddings,
}
| 30,169 | 38.080311 | 129 | py |
flair | flair-master/flair/embeddings/base.py | import inspect
import logging
from abc import abstractmethod
from typing import Any, Dict, Generic, List, Sequence, Type, Union
import torch
from torch.nn import Parameter, ParameterList
import flair
from flair.data import DT, Sentence
log = logging.getLogger("flair")
class Embeddings(torch.nn.Module, Generic[DT]):
"""Abstract base class for all embeddings. Every new type of embedding must implement these methods."""
embeddings_name: str # class-variable referring to the "class embedding name"
def __init__(self) -> None:
"""Set some attributes that would otherwise result in errors. Overwrite these in your embedding class."""
if not hasattr(self, "name"):
self.name: str = "unnamed_embedding"
if not hasattr(self, "static_embeddings"):
# if the embeddings for a sentence are the same in each epoch, set this to True for improved efficiency
self.static_embeddings = False
super().__init__()
@property
@abstractmethod
def embedding_length(self) -> int:
"""Returns the length of the embedding vector."""
raise NotImplementedError
@property
@abstractmethod
def embedding_type(self) -> str:
raise NotImplementedError
def embed(self, data_points: Union[DT, List[DT]]) -> List[DT]:
"""Add embeddings to all words in a list of sentences.
If embeddings are already added, updates only if embeddings are non-static.
"""
# if only one sentence is passed, convert to list of sentence
if not isinstance(data_points, list):
data_points = [data_points]
if not self._everything_embedded(data_points):
self._add_embeddings_internal(data_points)
return data_points
def _everything_embedded(self, data_points: Sequence[DT]) -> bool:
return all(self.name in data_point._embeddings for data_point in data_points)
@abstractmethod
def _add_embeddings_internal(self, sentences: List[DT]):
"""Private method for adding embeddings to all words in a list of sentences."""
def get_names(self) -> List[str]:
"""Returns a list of embedding names.
In most cases, it is just a list with one item, namely the name of
this embedding. But in some cases, the embedding is made up by different embeddings (StackedEmbedding).
Then, the list contains the names of all embeddings in the stack.
"""
return [self.name]
def get_named_embeddings_dict(self) -> Dict:
return {self.name: self}
@staticmethod
def get_instance_parameters(locals: dict) -> dict:
class_definition = locals.get("__class__")
instance_parameter_names = set(inspect.signature(class_definition.__init__).parameters) # type: ignore[misc]
instance_parameter_names.remove("self")
instance_parameter_names.add("__class__")
instance_parameters = {
class_attribute: attribute_value
for class_attribute, attribute_value in locals.items()
if class_attribute in instance_parameter_names
}
return instance_parameters
@classmethod
def from_params(cls, params: Dict[str, Any]) -> "Embeddings":
raise NotImplementedError
def to_params(self) -> Dict[str, Any]:
raise NotImplementedError
@classmethod
def load_embedding(cls, params: Dict[str, Any]):
state_dict = params.pop("state_dict", None)
embedding = cls.from_params(params)
if state_dict is not None:
embedding.load_state_dict(state_dict)
return embedding
def save_embeddings(self, use_state_dict: bool = True):
params = self.to_params()
if use_state_dict:
params["state_dict"] = self.state_dict()
params["__cls__"] = type(self).embeddings_name
return params
class ScalarMix(torch.nn.Module):
"""Mixes several tensors by a learned weighting.
Computes a parameterised scalar mixture of N tensors.
This method was proposed by Liu et al. (2019) in the paper:
"Linguistic Knowledge and Transferability of Contextual Representations" (https://arxiv.org/abs/1903.08855)
The implementation is copied and slightly modified from the allennlp repository and is licensed under Apache 2.0.
It can be found under:
https://github.com/allenai/allennlp/blob/master/allennlp/modules/scalar_mix.py.
"""
def __init__(self, mixture_size: int, trainable: bool = False) -> None:
"""Inits scalar mix implementation.
``mixture = gamma * sum(s_k * tensor_k)`` where ``s = softmax(w)``, with ``w`` and ``gamma`` scalar parameters.
:param mixture_size: size of mixtures (usually the number of layers)
"""
super().__init__()
self.mixture_size = mixture_size
initial_scalar_parameters = [0.0] * mixture_size
self.scalar_parameters = ParameterList(
[
Parameter(
torch.tensor(
[initial_scalar_parameters[i]],
dtype=torch.float,
device=flair.device,
),
requires_grad=trainable,
)
for i in range(mixture_size)
]
)
self.gamma = Parameter(
torch.tensor(
[1.0],
dtype=torch.float,
device=flair.device,
),
requires_grad=trainable,
)
def forward(self, tensors: List[torch.Tensor]) -> torch.Tensor:
"""Forward pass of scalar mix.
Computes a weighted average of the ``tensors``. The input tensors an be any shape
with at least two dimensions, but must all be the same shape.
:param tensors: list of input tensors
:return: computed weighted average of input tensors
"""
if len(tensors) != self.mixture_size:
log.error(
"{} tensors were passed, but the module was initialized to mix {} tensors.".format(
len(tensors), self.mixture_size
)
)
normed_weights = torch.nn.functional.softmax(torch.cat(list(self.scalar_parameters)), dim=0)
normed_weights_split = torch.split(normed_weights, split_size_or_sections=1)
pieces = []
for weight, tensor in zip(normed_weights_split, tensors):
pieces.append(weight * tensor)
return self.gamma * sum(pieces)
class DocumentEmbeddings(Embeddings[Sentence]):
"""Abstract base class for all document-level embeddings. Every new type of document embedding must implement these methods."""
@property
def embedding_type(self) -> str:
return "sentence-level"
class TokenEmbeddings(Embeddings[Sentence]):
"""Abstract base class for all token-level embeddings. Ever new type of word embedding must implement these methods."""
@property
def embedding_type(self) -> str:
return "word-level"
def _everything_embedded(self, data_points: Sequence[Sentence]) -> bool:
for sentence in data_points:
for token in sentence.tokens:
if self.name not in token._embeddings.keys():
return False
return True
EMBEDDING_CLASSES: Dict[str, Type[Embeddings]] = {}
def register_embeddings(*args):
name = None
def _register(cls):
nonlocal name
if name is None:
name = cls.__name__
cls.embeddings_name = name
EMBEDDING_CLASSES[name] = cls
return cls
if len(args) == 1 and callable(args[0]):
return _register(args[0])
elif len(args) > 0:
name = args[0]
return _register
def load_embeddings(params: Dict[str, Any]) -> Embeddings:
cls_name = params.pop("__cls__")
cls = EMBEDDING_CLASSES[cls_name]
return cls.load_embedding(params)
| 7,959 | 33.912281 | 131 | py |
flair | flair-master/flair/embeddings/legacy.py | import logging
import re
from abc import abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Tuple, Union
import torch
from deprecated import deprecated
from transformers import (
AlbertModel,
AlbertTokenizer,
BertModel,
BertTokenizer,
CamembertModel,
CamembertTokenizer,
GPT2Model,
GPT2Tokenizer,
OpenAIGPTModel,
OpenAIGPTTokenizer,
PreTrainedModel,
PreTrainedTokenizer,
RobertaModel,
RobertaTokenizer,
XLMModel,
XLMRobertaModel,
XLMRobertaTokenizer,
XLMTokenizer,
XLNetModel,
XLNetTokenizer,
)
import flair
from flair.data import Sentence, Token
from flair.embeddings.base import ScalarMix
from flair.embeddings.document import DocumentEmbeddings
from flair.embeddings.token import StackedEmbeddings, TokenEmbeddings
from flair.file_utils import cached_path
from flair.nn import LockedDropout, WordDropout
log = logging.getLogger("flair")
class ELMoEmbeddings(TokenEmbeddings):
"""Contextual word embeddings using word-level LM, as proposed in Peters et al., 2018.
ELMo word vectors can be constructed by combining layers in different ways.
Default is to concatene the top 3 layers in the LM.
"""
def __init__(
self,
model: str = "original",
options_file: Optional[str] = None,
weight_file: Optional[str] = None,
embedding_mode: str = "all",
) -> None:
super().__init__()
self.instance_parameters = self.get_instance_parameters(locals=locals())
try:
import allennlp.commands.elmo
except ModuleNotFoundError:
log.warning("-" * 100)
log.warning('ATTENTION! The library "allennlp" is not installed!')
log.warning('To use ELMoEmbeddings, please first install with "pip install allennlp==0.9.0"')
log.warning("-" * 100)
pass
assert embedding_mode in ["all", "top", "average"]
self.name = f"elmo-{model}-{embedding_mode}"
self.static_embeddings = True
if not options_file or not weight_file:
# the default model for ELMo is the 'original' model, which is very large
options_file = allennlp.commands.elmo.DEFAULT_OPTIONS_FILE
weight_file = allennlp.commands.elmo.DEFAULT_WEIGHT_FILE
# alternatively, a small, medium or portuguese model can be selected by passing the appropriate mode name
if model == "small":
options_file = "https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/2x1024_128_2048cnn_1xhighway/elmo_2x1024_128_2048cnn_1xhighway_options.json"
weight_file = "https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/2x1024_128_2048cnn_1xhighway/elmo_2x1024_128_2048cnn_1xhighway_weights.hdf5"
if model == "medium":
options_file = "https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/2x2048_256_2048cnn_1xhighway/elmo_2x2048_256_2048cnn_1xhighway_options.json"
weight_file = "https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/2x2048_256_2048cnn_1xhighway/elmo_2x2048_256_2048cnn_1xhighway_weights.hdf5"
if model in ["large", "5.5B"]:
options_file = "https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/2x4096_512_2048cnn_2xhighway_5.5B/elmo_2x4096_512_2048cnn_2xhighway_5.5B_options.json"
weight_file = "https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/2x4096_512_2048cnn_2xhighway_5.5B/elmo_2x4096_512_2048cnn_2xhighway_5.5B_weights.hdf5"
if model == "pt" or model == "portuguese":
options_file = (
"https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/contributed/pt/elmo_pt_options.json"
)
weight_file = (
"https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/contributed/pt/elmo_pt_weights.hdf5"
)
if model == "pubmed":
options_file = "https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/contributed/pubmed/elmo_2x4096_512_2048cnn_2xhighway_options.json"
weight_file = "https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/contributed/pubmed/elmo_2x4096_512_2048cnn_2xhighway_weights_PubMed_only.hdf5"
if embedding_mode == "all":
self.embedding_mode_fn = self.use_layers_all
elif embedding_mode == "top":
self.embedding_mode_fn = self.use_layers_top
elif embedding_mode == "average":
self.embedding_mode_fn = self.use_layers_average
# put on Cuda if available
from flair import device
if device.type == "cuda":
cuda_device = device.index
elif device.type == "cpu":
cuda_device = -1
else:
cuda_device = 0
self.ee = allennlp.commands.elmo.ElmoEmbedder(
options_file=options_file, weight_file=weight_file, cuda_device=cuda_device
)
# embed a dummy sentence to determine embedding_length
dummy_sentence: Sentence = Sentence([Token("hello")])
embedded_dummy = self.embed(dummy_sentence)
self.__embedding_length: int = len(embedded_dummy[0][0].get_embedding())
@property
def embedding_length(self) -> int:
return self.__embedding_length
def use_layers_all(self, x):
return torch.cat(x, 0)
def use_layers_top(self, x):
return x[-1]
def use_layers_average(self, x):
return torch.mean(torch.stack(x), 0)
def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:
# ELMoEmbeddings before Release 0.5 did not set self.embedding_mode_fn
if not getattr(self, "embedding_mode_fn", None):
self.embedding_mode_fn = self.use_layers_all
sentence_words: List[List[str]] = []
for sentence in sentences:
sentence_words.append([token.text for token in sentence])
embeddings = self.ee.embed_batch(sentence_words)
for i, sentence in enumerate(sentences):
sentence_embeddings = embeddings[i]
for token, token_idx in zip(sentence.tokens, range(len(sentence.tokens))):
elmo_embedding_layers = [
torch.FloatTensor(sentence_embeddings[0, token_idx, :]),
torch.FloatTensor(sentence_embeddings[1, token_idx, :]),
torch.FloatTensor(sentence_embeddings[2, token_idx, :]),
]
word_embedding = self.embedding_mode_fn(elmo_embedding_layers)
token.set_embedding(self.name, word_embedding)
return sentences
def extra_repr(self):
return f"model={self.name}"
def __str__(self) -> str:
return self.name
def __setstate__(self, state):
self.__dict__ = state
if re.fullmatch(r"cuda:[0-9]+", str(flair.device)):
cuda_device = int(str(flair.device).split(":")[-1])
elif str(flair.device) == "cpu":
cuda_device = -1
else:
cuda_device = 0
self.ee.cuda_device = cuda_device
self.ee.elmo_bilm.to(device=flair.device)
self.ee.elmo_bilm._elmo_lstm._states = tuple(
[state.to(flair.device) for state in self.ee.elmo_bilm._elmo_lstm._states]
)
class CharLMEmbeddings(TokenEmbeddings):
"""Contextual string embeddings of words, as proposed in Akbik et al., 2018."""
@deprecated(version="0.4", reason="Use 'FlairEmbeddings' instead.")
def __init__(
self,
model: str,
detach: bool = True,
use_cache: bool = False,
cache_directory: Optional[Path] = None,
) -> None:
"""Initializes contextual string embeddings using a character-level language model.
:param model: model string, one of 'news-forward', 'news-backward', 'news-forward-fast', 'news-backward-fast',
'mix-forward', 'mix-backward', 'german-forward', 'german-backward', 'polish-backward', 'polish-forward'
depending on which character language model is desired.
:param detach: if set to False, the gradient will propagate into the language model. this dramatically slows down
training and often leads to worse results, so not recommended.
:param use_cache: if set to False, will not write embeddings to file for later retrieval. this saves disk space but will
not allow re-use of once computed embeddings that do not fit into memory
:param cache_directory: if cache_directory is not set, the cache will be written to ~/.flair/embeddings. otherwise the cache
is written to the provided directory.
"""
super().__init__()
cache_dir = Path("embeddings")
# multilingual forward (English, German, French, Italian, Dutch, Polish)
if model.lower() == "multi-forward":
base_path = (
"https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings-v0.4/lm-multi-forward-v0.1.pt"
)
model = cached_path(base_path, cache_dir=cache_dir)
# multilingual backward (English, German, French, Italian, Dutch, Polish)
elif model.lower() == "multi-backward":
base_path = (
"https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings-v0.4/lm-multi-backward-v0.1.pt"
)
model = cached_path(base_path, cache_dir=cache_dir)
# news-english-forward
elif model.lower() == "news-forward":
base_path = (
"https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings/lm-news-english-forward-v0.2rc.pt"
)
model = cached_path(base_path, cache_dir=cache_dir)
# news-english-backward
elif model.lower() == "news-backward":
base_path = (
"https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings/lm-news-english-backward-v0.2rc.pt"
)
model = cached_path(base_path, cache_dir=cache_dir)
# news-english-forward
elif model.lower() == "news-forward-fast":
base_path = "https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings/lm-news-english-forward-1024-v0.2rc.pt"
model = cached_path(base_path, cache_dir=cache_dir)
# news-english-backward
elif model.lower() == "news-backward-fast":
base_path = "https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings/lm-news-english-backward-1024-v0.2rc.pt"
model = cached_path(base_path, cache_dir=cache_dir)
# mix-english-forward
elif model.lower() == "mix-forward":
base_path = (
"https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings/lm-mix-english-forward-v0.2rc.pt"
)
model = cached_path(base_path, cache_dir=cache_dir)
# mix-english-backward
elif model.lower() == "mix-backward":
base_path = (
"https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings/lm-mix-english-backward-v0.2rc.pt"
)
model = cached_path(base_path, cache_dir=cache_dir)
# mix-german-forward
elif model.lower() == "german-forward" or model.lower() == "de-forward":
base_path = (
"https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings/lm-mix-german-forward-v0.2rc.pt"
)
model = cached_path(base_path, cache_dir=cache_dir)
# mix-german-backward
elif model.lower() == "german-backward" or model.lower() == "de-backward":
base_path = (
"https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings/lm-mix-german-backward-v0.2rc.pt"
)
model = cached_path(base_path, cache_dir=cache_dir)
# common crawl Polish forward
elif model.lower() == "polish-forward" or model.lower() == "pl-forward":
base_path = "https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings/lm-polish-forward-v0.2.pt"
model = cached_path(base_path, cache_dir=cache_dir)
# common crawl Polish backward
elif model.lower() == "polish-backward" or model.lower() == "pl-backward":
base_path = "https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings/lm-polish-backward-v0.2.pt"
model = cached_path(base_path, cache_dir=cache_dir)
# Slovenian forward
elif model.lower() == "slovenian-forward" or model.lower() == "sl-forward":
base_path = (
"https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings-v0.3/lm-sl-large-forward-v0.1.pt"
)
model = cached_path(base_path, cache_dir=cache_dir)
# Slovenian backward
elif model.lower() == "slovenian-backward" or model.lower() == "sl-backward":
base_path = (
"https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings-v0.3/lm-sl-large-backward-v0.1.pt"
)
model = cached_path(base_path, cache_dir=cache_dir)
# Bulgarian forward
elif model.lower() == "bulgarian-forward" or model.lower() == "bg-forward":
base_path = (
"https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings-v0.3/lm-bg-small-forward-v0.1.pt"
)
model = cached_path(base_path, cache_dir=cache_dir)
# Bulgarian backward
elif model.lower() == "bulgarian-backward" or model.lower() == "bg-backward":
base_path = (
"https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings-v0.3/lm-bg-small-backward-v0.1.pt"
)
model = cached_path(base_path, cache_dir=cache_dir)
# Dutch forward
elif model.lower() == "dutch-forward" or model.lower() == "nl-forward":
base_path = (
"https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings-v0.4/lm-nl-large-forward-v0.1.pt"
)
model = cached_path(base_path, cache_dir=cache_dir)
# Dutch backward
elif model.lower() == "dutch-backward" or model.lower() == "nl-backward":
base_path = (
"https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings-v0.4/lm-nl-large-backward-v0.1.pt"
)
model = cached_path(base_path, cache_dir=cache_dir)
# Swedish forward
elif model.lower() == "swedish-forward" or model.lower() == "sv-forward":
base_path = (
"https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings-v0.4/lm-sv-large-forward-v0.1.pt"
)
model = cached_path(base_path, cache_dir=cache_dir)
# Swedish backward
elif model.lower() == "swedish-backward" or model.lower() == "sv-backward":
base_path = (
"https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings-v0.4/lm-sv-large-backward-v0.1.pt"
)
model = cached_path(base_path, cache_dir=cache_dir)
# French forward
elif model.lower() == "french-forward" or model.lower() == "fr-forward":
base_path = "https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings/lm-fr-charlm-forward.pt"
model = cached_path(base_path, cache_dir=cache_dir)
# French backward
elif model.lower() == "french-backward" or model.lower() == "fr-backward":
base_path = "https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings/lm-fr-charlm-backward.pt"
model = cached_path(base_path, cache_dir=cache_dir)
# Czech forward
elif model.lower() == "czech-forward" or model.lower() == "cs-forward":
base_path = (
"https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings-v0.4/lm-cs-large-forward-v0.1.pt"
)
model = cached_path(base_path, cache_dir=cache_dir)
# Czech backward
elif model.lower() == "czech-backward" or model.lower() == "cs-backward":
base_path = (
"https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings-v0.4/lm-cs-large-backward-v0.1.pt"
)
model = cached_path(base_path, cache_dir=cache_dir)
# Portuguese forward
elif model.lower() == "portuguese-forward" or model.lower() == "pt-forward":
base_path = "https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings-v0.4/lm-pt-forward.pt"
model = cached_path(base_path, cache_dir=cache_dir)
# Portuguese backward
elif model.lower() == "portuguese-backward" or model.lower() == "pt-backward":
base_path = "https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings-v0.4/lm-pt-backward.pt"
model = cached_path(base_path, cache_dir=cache_dir)
elif not Path(model).exists():
raise ValueError(f'The given model "{model}" is not available or is not a valid path.')
self.name = str(model)
self.static_embeddings = detach
from flair.models import LanguageModel
self.lm = LanguageModel.load_language_model(model)
self.detach = detach
self.is_forward_lm: bool = self.lm.is_forward_lm
# initialize cache if use_cache set
self.cache = None
if use_cache:
cache_path = (
Path(f"{self.name}-tmp-cache.sqllite")
if not cache_directory
else cache_directory / f"{self.name}-tmp-cache.sqllite"
)
from sqlitedict import SqliteDict
self.cache = SqliteDict(str(cache_path), autocommit=True)
# embed a dummy sentence to determine embedding_length
dummy_sentence: Sentence = Sentence(["hello"])
embedded_dummy = self.embed(dummy_sentence)
self.__embedding_length: int = len(embedded_dummy[0].get_token(1).get_embedding())
# set to eval mode
self.eval()
def train(self, mode=True):
pass
def __getstate__(self):
# Copy the object's state from self.__dict__ which contains
# all our instance attributes. Always use the dict.copy()
# method to avoid modifying the original state.
state = self.__dict__.copy()
# Remove the unpicklable entries.
state["cache"] = None
return state
@property
def embedding_length(self) -> int:
return self.__embedding_length
def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:
# if cache is used, try setting embeddings from cache first
if "cache" in self.__dict__ and self.cache is not None:
# try populating embeddings from cache
all_embeddings_retrieved_from_cache: bool = True
for sentence in sentences:
key = sentence.to_tokenized_string()
embeddings = self.cache.get(key)
if not embeddings:
all_embeddings_retrieved_from_cache = False
break
else:
for token, embedding in zip(sentence, embeddings):
token.set_embedding(self.name, torch.FloatTensor(embedding))
if all_embeddings_retrieved_from_cache:
return sentences
# if this is not possible, use LM to generate embedding. First, get text sentences
text_sentences = [sentence.to_tokenized_string() for sentence in sentences]
start_marker = "\n"
end_marker = " "
# get hidden states from language model
all_hidden_states_in_lm = self.lm.get_representation(
text_sentences, start_marker, end_marker, self.chars_per_chunk
)
# take first or last hidden states from language model as word representation
for i, sentence in enumerate(sentences):
sentence_text = sentence.to_tokenized_string()
offset_forward: int = len(start_marker)
offset_backward: int = len(sentence_text) + len(start_marker)
for token in sentence.tokens:
offset_forward += len(token.text)
offset = offset_forward if self.is_forward_lm else offset_backward
embedding = all_hidden_states_in_lm[offset, i, :]
# if self.tokenized_lm or token.whitespace_after:
offset_forward += 1
offset_backward -= 1
offset_backward -= len(token.text)
token.set_embedding(self.name, embedding)
if "cache" in self.__dict__ and self.cache is not None:
for sentence in sentences:
self.cache[sentence.to_tokenized_string()] = [
token._embeddings[self.name].tolist() for token in sentence
]
return sentences
def __str__(self) -> str:
return self.name
class XLNetEmbeddings(TokenEmbeddings):
@deprecated(
version="0.4.5",
reason="Use 'TransformerWordEmbeddings' for all transformer-based word embeddings",
)
def __init__(
self,
pretrained_model_name_or_path: str = "xlnet-large-cased",
layers: str = "1",
pooling_operation: str = "first_last",
use_scalar_mix: bool = False,
) -> None:
"""XLNet embeddings, as proposed in Yang et al., 2019.
:param pretrained_model_name_or_path: name or path of XLNet model
:param layers: comma-separated list of layers
:param pooling_operation: defines pooling operation for subwords
:param use_scalar_mix: defines the usage of scalar mix for specified layer(s).
"""
super().__init__()
self.tokenizer = XLNetTokenizer.from_pretrained(pretrained_model_name_or_path)
self.model = XLNetModel.from_pretrained(
pretrained_model_name_or_path=pretrained_model_name_or_path,
output_hidden_states=True,
)
self.name = pretrained_model_name_or_path
self.layers: List[int] = [int(layer) for layer in layers.split(",")]
self.pooling_operation = pooling_operation
self.use_scalar_mix = use_scalar_mix
self.static_embeddings = True
dummy_sentence: Sentence = Sentence(["hello"])
embedded_dummy = self.embed(dummy_sentence)
self.__embedding_length: int = len(embedded_dummy[0].get_token(1).get_embedding())
@property
def embedding_length(self) -> int:
return self.__embedding_length
def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:
self.model.to(flair.device)
self.model.eval()
sentences = _get_transformer_sentence_embeddings(
sentences=sentences,
tokenizer=self.tokenizer,
model=self.model,
name=self.name,
layers=self.layers,
pooling_operation=self.pooling_operation,
use_scalar_mix=self.use_scalar_mix,
bos_token="<s>",
eos_token="</s>",
)
return sentences
def extra_repr(self):
return f"model={self.name}"
def __str__(self) -> str:
return self.name
class XLMEmbeddings(TokenEmbeddings):
@deprecated(
version="0.4.5",
reason="Use 'TransformerWordEmbeddings' for all transformer-based word embeddings",
)
def __init__(
self,
pretrained_model_name_or_path: str = "xlm-mlm-en-2048",
layers: str = "1",
pooling_operation: str = "first_last",
use_scalar_mix: bool = False,
) -> None:
"""XLM embeddings, as proposed in Guillaume et al., 2019.
:param pretrained_model_name_or_path: name or path of XLM model
:param layers: comma-separated list of layers
:param pooling_operation: defines pooling operation for subwords
:param use_scalar_mix: defines the usage of scalar mix for specified layer(s).
"""
super().__init__()
self.tokenizer = XLMTokenizer.from_pretrained(pretrained_model_name_or_path)
self.model = XLMModel.from_pretrained(
pretrained_model_name_or_path=pretrained_model_name_or_path,
output_hidden_states=True,
)
self.name = pretrained_model_name_or_path
self.layers: List[int] = [int(layer) for layer in layers.split(",")]
self.pooling_operation = pooling_operation
self.use_scalar_mix = use_scalar_mix
self.static_embeddings = True
dummy_sentence: Sentence = Sentence(["hello"])
embedded_dummy = self.embed(dummy_sentence)
self.__embedding_length: int = len(embedded_dummy[0].get_token(1).get_embedding())
@property
def embedding_length(self) -> int:
return self.__embedding_length
def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:
self.model.to(flair.device)
self.model.eval()
sentences = _get_transformer_sentence_embeddings(
sentences=sentences,
tokenizer=self.tokenizer,
model=self.model,
name=self.name,
layers=self.layers,
pooling_operation=self.pooling_operation,
use_scalar_mix=self.use_scalar_mix,
bos_token="<s>",
eos_token="</s>",
)
return sentences
def extra_repr(self):
return f"model={self.name}"
def __str__(self) -> str:
return self.name
class OpenAIGPTEmbeddings(TokenEmbeddings):
@deprecated(
version="0.4.5",
reason="Use 'TransformerWordEmbeddings' for all transformer-based word embeddings",
)
def __init__(
self,
pretrained_model_name_or_path: str = "openai-gpt",
layers: str = "1",
pooling_operation: str = "first_last",
use_scalar_mix: bool = False,
) -> None:
"""OpenAI GPT embeddings, as proposed in Radford et al. 2018.
:param pretrained_model_name_or_path: name or path of OpenAI GPT model
:param layers: comma-separated list of layers
:param pooling_operation: defines pooling operation for subwords
:param use_scalar_mix: defines the usage of scalar mix for specified layer(s).
"""
super().__init__()
self.tokenizer = OpenAIGPTTokenizer.from_pretrained(pretrained_model_name_or_path)
self.model = OpenAIGPTModel.from_pretrained(
pretrained_model_name_or_path=pretrained_model_name_or_path,
output_hidden_states=True,
)
self.name = pretrained_model_name_or_path
self.layers: List[int] = [int(layer) for layer in layers.split(",")]
self.pooling_operation = pooling_operation
self.use_scalar_mix = use_scalar_mix
self.static_embeddings = True
dummy_sentence: Sentence = Sentence(["hello"])
embedded_dummy = self.embed(dummy_sentence)
self.__embedding_length: int = len(embedded_dummy[0].get_token(1).get_embedding())
@property
def embedding_length(self) -> int:
return self.__embedding_length
def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:
self.model.to(flair.device)
self.model.eval()
sentences = _get_transformer_sentence_embeddings(
sentences=sentences,
tokenizer=self.tokenizer,
model=self.model,
name=self.name,
layers=self.layers,
pooling_operation=self.pooling_operation,
use_scalar_mix=self.use_scalar_mix,
)
return sentences
def extra_repr(self):
return f"model={self.name}"
def __str__(self) -> str:
return self.name
class OpenAIGPT2Embeddings(TokenEmbeddings):
@deprecated(
version="0.4.5",
reason="Use 'TransformerWordEmbeddings' for all transformer-based word embeddings",
)
def __init__(
self,
pretrained_model_name_or_path: str = "gpt2-medium",
layers: str = "1",
pooling_operation: str = "first_last",
use_scalar_mix: bool = False,
) -> None:
"""OpenAI GPT-2 embeddings, as proposed in Radford et al. 2019.
:param pretrained_model_name_or_path: name or path of OpenAI GPT-2 model
:param layers: comma-separated list of layers
:param pooling_operation: defines pooling operation for subwords
:param use_scalar_mix: defines the usage of scalar mix for specified layer(s).
"""
super().__init__()
self.tokenizer = GPT2Tokenizer.from_pretrained(pretrained_model_name_or_path)
self.model = GPT2Model.from_pretrained(
pretrained_model_name_or_path=pretrained_model_name_or_path,
output_hidden_states=True,
)
self.name = pretrained_model_name_or_path
self.layers: List[int] = [int(layer) for layer in layers.split(",")]
self.pooling_operation = pooling_operation
self.use_scalar_mix = use_scalar_mix
self.static_embeddings = True
dummy_sentence: Sentence = Sentence(["hello"])
embedded_dummy = self.embed(dummy_sentence)
self.__embedding_length: int = len(embedded_dummy[0].get_token(1).get_embedding())
@property
def embedding_length(self) -> int:
return self.__embedding_length
def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:
self.model.to(flair.device)
self.model.eval()
sentences = _get_transformer_sentence_embeddings(
sentences=sentences,
tokenizer=self.tokenizer,
model=self.model,
name=self.name,
layers=self.layers,
pooling_operation=self.pooling_operation,
use_scalar_mix=self.use_scalar_mix,
bos_token="<|endoftext|>",
eos_token="<|endoftext|>",
)
return sentences
class RoBERTaEmbeddings(TokenEmbeddings):
@deprecated(
version="0.4.5",
reason="Use 'TransformerWordEmbeddings' for all transformer-based word embeddings",
)
def __init__(
self,
pretrained_model_name_or_path: str = "roberta-base",
layers: str = "-1",
pooling_operation: str = "first",
use_scalar_mix: bool = False,
) -> None:
"""RoBERTa, as proposed by Liu et al. 2019.
:param pretrained_model_name_or_path: name or path of RoBERTa model
:param layers: comma-separated list of layers
:param pooling_operation: defines pooling operation for subwords
:param use_scalar_mix: defines the usage of scalar mix for specified layer(s).
"""
super().__init__()
self.tokenizer = RobertaTokenizer.from_pretrained(pretrained_model_name_or_path)
self.model = RobertaModel.from_pretrained(
pretrained_model_name_or_path=pretrained_model_name_or_path,
output_hidden_states=True,
)
self.name = pretrained_model_name_or_path
self.layers: List[int] = [int(layer) for layer in layers.split(",")]
self.pooling_operation = pooling_operation
self.use_scalar_mix = use_scalar_mix
self.static_embeddings = True
dummy_sentence: Sentence = Sentence(["hello"])
embedded_dummy = self.embed(dummy_sentence)
self.__embedding_length: int = len(embedded_dummy[0].get_token(1).get_embedding())
@property
def embedding_length(self) -> int:
return self.__embedding_length
def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:
self.model.to(flair.device)
self.model.eval()
sentences = _get_transformer_sentence_embeddings(
sentences=sentences,
tokenizer=self.tokenizer,
model=self.model,
name=self.name,
layers=self.layers,
pooling_operation=self.pooling_operation,
use_scalar_mix=self.use_scalar_mix,
bos_token="<s>",
eos_token="</s>",
)
return sentences
class CamembertEmbeddings(TokenEmbeddings):
@deprecated(
version="0.4.5",
reason="Use 'TransformerWordEmbeddings' for all transformer-based word embeddings",
)
def __init__(
self,
pretrained_model_name_or_path: str = "camembert-base",
layers: str = "-1",
pooling_operation: str = "first",
use_scalar_mix: bool = False,
) -> None:
"""CamemBERT, a Tasty French Language Model, as proposed by Martin et al. 2019.
:param pretrained_model_name_or_path: name or path of RoBERTa model
:param layers: comma-separated list of layers
:param pooling_operation: defines pooling operation for subwords
:param use_scalar_mix: defines the usage of scalar mix for specified layer(s).
"""
super().__init__()
self.tokenizer = CamembertTokenizer.from_pretrained(pretrained_model_name_or_path)
self.model = CamembertModel.from_pretrained(
pretrained_model_name_or_path=pretrained_model_name_or_path,
output_hidden_states=True,
)
self.name = pretrained_model_name_or_path
self.layers: List[int] = [int(layer) for layer in layers.split(",")]
self.pooling_operation = pooling_operation
self.use_scalar_mix = use_scalar_mix
self.static_embeddings = True
dummy_sentence: Sentence = Sentence(["hello"])
embedded_dummy = self.embed(dummy_sentence)
self.__embedding_length: int = len(embedded_dummy[0].get_token(1).get_embedding())
def __getstate__(self):
state = self.__dict__.copy()
state["tokenizer"] = None
return state
def __setstate__(self, d):
super().__setstate__(d)
# 1-camembert-base -> camembert-base
if any(char.isdigit() for char in self.name):
self.tokenizer = CamembertTokenizer.from_pretrained("-".join(self.name.split("-")[1:]))
else:
self.tokenizer = CamembertTokenizer.from_pretrained(self.name)
@property
def embedding_length(self) -> int:
return self.__embedding_length
def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:
self.model.to(flair.device)
self.model.eval()
sentences = _get_transformer_sentence_embeddings(
sentences=sentences,
tokenizer=self.tokenizer,
model=self.model,
name=self.name,
layers=self.layers,
pooling_operation=self.pooling_operation,
use_scalar_mix=self.use_scalar_mix,
bos_token="<s>",
eos_token="</s>",
)
return sentences
class XLMRobertaEmbeddings(TokenEmbeddings):
@deprecated(
version="0.4.5",
reason="Use 'TransformerWordEmbeddings' for all transformer-based word embeddings",
)
def __init__(
self,
pretrained_model_name_or_path: str = "xlm-roberta-large",
layers: str = "-1",
pooling_operation: str = "first",
use_scalar_mix: bool = False,
) -> None:
"""XLM-RoBERTa as proposed by Conneau et al. 2019.
:param pretrained_model_name_or_path: name or path of XLM-R model
:param layers: comma-separated list of layers
:param pooling_operation: defines pooling operation for subwords
:param use_scalar_mix: defines the usage of scalar mix for specified layer(s).
"""
super().__init__()
self.tokenizer = XLMRobertaTokenizer.from_pretrained(pretrained_model_name_or_path)
self.model = XLMRobertaModel.from_pretrained(
pretrained_model_name_or_path=pretrained_model_name_or_path,
output_hidden_states=True,
)
self.name = pretrained_model_name_or_path
self.layers: List[int] = [int(layer) for layer in layers.split(",")]
self.pooling_operation = pooling_operation
self.use_scalar_mix = use_scalar_mix
self.static_embeddings = True
dummy_sentence: Sentence = Sentence(["hello"])
embedded_dummy = self.embed(dummy_sentence)
self.__embedding_length: int = len(embedded_dummy[0].get_token(1).get_embedding())
def __getstate__(self):
state = self.__dict__.copy()
state["tokenizer"] = None
return state
def __setstate__(self, d):
super().__setstate__(d)
# 1-xlm-roberta-large -> xlm-roberta-large
self.tokenizer = self.tokenizer = XLMRobertaTokenizer.from_pretrained("-".join(self.name.split("-")[1:]))
@property
def embedding_length(self) -> int:
return self.__embedding_length
def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:
self.model.to(flair.device)
self.model.eval()
sentences = _get_transformer_sentence_embeddings(
sentences=sentences,
tokenizer=self.tokenizer,
model=self.model,
name=self.name,
layers=self.layers,
pooling_operation=self.pooling_operation,
use_scalar_mix=self.use_scalar_mix,
bos_token="<s>",
eos_token="</s>",
)
return sentences
def _extract_embeddings(
hidden_states: List[torch.FloatTensor],
layers: List[int],
pooling_operation: str,
subword_start_idx: int,
subword_end_idx: int,
use_scalar_mix: bool = False,
) -> List[torch.FloatTensor]:
"""Extracts subword embeddings from specified layers from hidden states.
:param hidden_states: list of hidden states from model
:param layers: list of layers
:param pooling_operation: pooling operation for subword embeddings (supported: first, last, first_last and mean)
:param subword_start_idx: defines start index for subword
:param subword_end_idx: defines end index for subword
:param use_scalar_mix: determines, if scalar mix should be used
:return: list of extracted subword embeddings.
"""
subtoken_embeddings: List[torch.FloatTensor] = []
for layer in layers:
current_embeddings = hidden_states[layer][0][subword_start_idx:subword_end_idx]
first_embedding: torch.FloatTensor = current_embeddings[0]
if pooling_operation == "first_last":
last_embedding: torch.FloatTensor = current_embeddings[-1]
final_embedding: torch.FloatTensor = torch.cat([first_embedding, last_embedding])
elif pooling_operation == "last":
final_embedding: torch.FloatTensor = current_embeddings[-1]
elif pooling_operation == "mean":
all_embeddings: List[torch.FloatTensor] = [embedding.unsqueeze(0) for embedding in current_embeddings]
final_embedding: torch.FloatTensor = torch.mean(torch.cat(all_embeddings, dim=0), dim=0)
else:
final_embedding: torch.FloatTensor = first_embedding
subtoken_embeddings.append(final_embedding)
if use_scalar_mix:
sm = ScalarMix(mixture_size=len(subtoken_embeddings))
sm_embeddings = sm(subtoken_embeddings)
subtoken_embeddings = [sm_embeddings]
return subtoken_embeddings
def _build_token_subwords_mapping(sentence: Sentence, tokenizer: PreTrainedTokenizer) -> Tuple[Dict[int, int], str]:
"""Builds a dictionary that stores the following information:
Token index (key) and number of corresponding subwords (value) for a sentence.
:param sentence: input sentence
:param tokenizer: Transformers tokenization object
:return: dictionary of token index to corresponding number of subwords, tokenized string
"""
token_subwords_mapping: Dict[int, int] = {}
tokens = []
for token in sentence.tokens:
token_text = token.text
subwords = tokenizer.tokenize(token_text)
tokens.append(token.text if subwords else tokenizer.unk_token)
token_subwords_mapping[token.idx] = len(subwords) if subwords else 1
return token_subwords_mapping, " ".join(tokens)
def _build_token_subwords_mapping_gpt2(
sentence: Sentence, tokenizer: PreTrainedTokenizer
) -> Tuple[Dict[int, int], str]:
"""Builds a dictionary that stores the following information:
Token index (key) and number of corresponding subwords (value) for a sentence.
:param sentence: input sentence
:param tokenizer: Transformers tokenization object
:return: dictionary of token index to corresponding number of subwords, tokenized string
"""
token_subwords_mapping: Dict[int, int] = {}
tokens = []
for token in sentence.tokens:
# Dummy token is needed to get the actually token tokenized correctly with special ``Ġ`` symbol
if token.idx == 1:
token_text = token.text
subwords = tokenizer.tokenize(token_text)
else:
token_text = "X " + token.text
subwords = tokenizer.tokenize(token_text)[1:]
tokens.append(token.text if subwords else tokenizer.unk_token)
token_subwords_mapping[token.idx] = len(subwords) if subwords else 1
return token_subwords_mapping, " ".join(tokens)
def _get_transformer_sentence_embeddings(
sentences: List[Sentence],
tokenizer: PreTrainedTokenizer,
model: PreTrainedModel,
name: str,
layers: List[int],
pooling_operation: str,
use_scalar_mix: bool,
bos_token: Optional[str] = None,
eos_token: Optional[str] = None,
) -> List[Sentence]:
"""Builds sentence embeddings for Transformer-based architectures.
:param sentences: input sentences
:param tokenizer: tokenization object
:param model: model object
:param name: name of the Transformer-based model
:param layers: list of layers
:param pooling_operation: defines pooling operation for subword extraction
:param use_scalar_mix: defines the usage of scalar mix for specified layer(s)
:param bos_token: defines begin of sentence token (used for left padding)
:param eos_token: defines end of sentence token (used for right padding)
:return: list of sentences (each token of a sentence is now embedded).
"""
with torch.no_grad():
for sentence in sentences:
token_subwords_mapping: Dict[int, int] = {}
if ("gpt2" in name or "roberta" in name) and "xlm" not in name:
(
token_subwords_mapping,
tokenized_string,
) = _build_token_subwords_mapping_gpt2(sentence=sentence, tokenizer=tokenizer)
else:
(
token_subwords_mapping,
tokenized_string,
) = _build_token_subwords_mapping(sentence=sentence, tokenizer=tokenizer)
subwords = tokenizer.tokenize(tokenized_string)
offset = 0
if bos_token:
subwords = [bos_token, *subwords]
offset = 1
if eos_token:
subwords = [*subwords, eos_token]
indexed_tokens = tokenizer.convert_tokens_to_ids(subwords)
tokens_tensor = torch.tensor([indexed_tokens])
tokens_tensor = tokens_tensor.to(flair.device)
hidden_states = model(tokens_tensor)[-1]
for token in sentence.tokens:
len_subwords = token_subwords_mapping[token.idx]
subtoken_embeddings = _extract_embeddings(
hidden_states=hidden_states,
layers=layers,
pooling_operation=pooling_operation,
subword_start_idx=offset,
subword_end_idx=offset + len_subwords,
use_scalar_mix=use_scalar_mix,
)
offset += len_subwords
final_subtoken_embedding = torch.cat(subtoken_embeddings)
token.set_embedding(name, final_subtoken_embedding)
return sentences
class BertEmbeddings(TokenEmbeddings):
@deprecated(
version="0.4.5",
reason="Use 'TransformerWordEmbeddings' for all transformer-based word embeddings",
)
def __init__(
self,
bert_model_or_path: str = "bert-base-uncased",
layers: str = "-1,-2,-3,-4",
pooling_operation: str = "first",
use_scalar_mix: bool = False,
) -> None:
"""Bidirectional transformer embeddings of words, as proposed in Devlin et al., 2018.
:param bert_model_or_path: name of BERT model ('') or directory path containing custom model, configuration file
and vocab file (names of three files should be - config.json, pytorch_model.bin/model.chkpt, vocab.txt)
:param layers: string indicating which layers to take for embedding
:param pooling_operation: how to get from token piece embeddings to token embedding. Either pool them and take
the average ('mean') or use first word piece embedding as token embedding ('first).
"""
super().__init__()
if "distilbert" in bert_model_or_path:
try:
from transformers import DistilBertModel, DistilBertTokenizer
except ImportError:
log.warning("-" * 100)
log.warning("ATTENTION! To use DistilBert, please first install a recent version of transformers!")
log.warning("-" * 100)
pass
self.tokenizer = DistilBertTokenizer.from_pretrained(bert_model_or_path)
self.model = DistilBertModel.from_pretrained(
pretrained_model_name_or_path=bert_model_or_path,
output_hidden_states=True,
)
elif "albert" in bert_model_or_path:
self.tokenizer = AlbertTokenizer.from_pretrained(bert_model_or_path)
self.model = AlbertModel.from_pretrained(
pretrained_model_name_or_path=bert_model_or_path,
output_hidden_states=True,
)
else:
self.tokenizer = BertTokenizer.from_pretrained(bert_model_or_path)
self.model = BertModel.from_pretrained(
pretrained_model_name_or_path=bert_model_or_path,
output_hidden_states=True,
)
self.layer_indexes = [int(x) for x in layers.split(",")]
self.pooling_operation = pooling_operation
self.use_scalar_mix = use_scalar_mix
self.name = str(bert_model_or_path)
self.static_embeddings = True
class BertInputFeatures:
"""Private helper class for holding BERT-formatted features."""
def __init__(
self,
unique_id,
tokens,
input_ids,
input_mask,
input_type_ids,
token_subtoken_count,
) -> None:
self.unique_id = unique_id
self.tokens = tokens
self.input_ids = input_ids
self.input_mask = input_mask
self.input_type_ids = input_type_ids
self.token_subtoken_count = token_subtoken_count
def _convert_sentences_to_features(self, sentences, max_sequence_length: int) -> [BertInputFeatures]:
max_sequence_length = max_sequence_length + 2
features: List[BertEmbeddings.BertInputFeatures] = []
for sentence_index, sentence in enumerate(sentences):
bert_tokenization: List[str] = []
token_subtoken_count: Dict[int, int] = {}
for token in sentence:
subtokens = self.tokenizer.tokenize(token.text)
bert_tokenization.extend(subtokens)
token_subtoken_count[token.idx] = len(subtokens)
if len(bert_tokenization) > max_sequence_length - 2:
bert_tokenization = bert_tokenization[0 : (max_sequence_length - 2)]
tokens = []
input_type_ids = []
tokens.append("[CLS]")
input_type_ids.append(0)
for token in bert_tokenization:
tokens.append(token)
input_type_ids.append(0)
tokens.append("[SEP]")
input_type_ids.append(0)
input_ids = self.tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_sequence_length:
input_ids.append(0)
input_mask.append(0)
input_type_ids.append(0)
features.append(
BertEmbeddings.BertInputFeatures(
unique_id=sentence_index,
tokens=tokens,
input_ids=input_ids,
input_mask=input_mask,
input_type_ids=input_type_ids,
token_subtoken_count=token_subtoken_count,
)
)
return features
def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:
"""Add embeddings to all words in a list of sentences. If embeddings are already added,
updates only if embeddings are non-static.
"""
# first, find longest sentence in batch
longest_sentence_in_batch: int = len(
max(
[self.tokenizer.tokenize(sentence.to_tokenized_string()) for sentence in sentences],
key=len,
)
)
# prepare id maps for BERT model
features = self._convert_sentences_to_features(sentences, longest_sentence_in_batch)
all_input_ids = torch.LongTensor([f.input_ids for f in features]).to(flair.device)
all_input_masks = torch.LongTensor([f.input_mask for f in features]).to(flair.device)
# put encoded batch through BERT model to get all hidden states of all encoder layers
self.model.to(flair.device)
self.model.eval()
all_encoder_layers = self.model(all_input_ids, attention_mask=all_input_masks)[-1]
with torch.no_grad():
for sentence_index, sentence in enumerate(sentences):
feature = features[sentence_index]
# get aggregated embeddings for each BERT-subtoken in sentence
subtoken_embeddings = []
for token_index, _ in enumerate(feature.tokens):
all_layers = []
for layer_index in self.layer_indexes:
layer_output = all_encoder_layers[int(layer_index)][sentence_index]
all_layers.append(layer_output[token_index])
if self.use_scalar_mix:
sm = ScalarMix(mixture_size=len(all_layers))
sm_embeddings = sm(all_layers)
all_layers = [sm_embeddings]
subtoken_embeddings.append(torch.cat(all_layers))
# get the current sentence object
token_idx = 0
for token in sentence:
# add concatenated embedding to sentence
token_idx += 1
if self.pooling_operation == "first":
# use first subword embedding if pooling operation is 'first'
token.set_embedding(self.name, subtoken_embeddings[token_idx])
else:
# otherwise, do a mean over all subwords in token
embeddings = subtoken_embeddings[
token_idx : token_idx + feature.token_subtoken_count[token.idx]
]
embeddings = [embedding.unsqueeze(0) for embedding in embeddings]
mean = torch.mean(torch.cat(embeddings, dim=0), dim=0)
token.set_embedding(self.name, mean)
token_idx += feature.token_subtoken_count[token.idx] - 1
return sentences
@property
@abstractmethod
def embedding_length(self) -> int:
"""Returns the length of the embedding vector."""
return (
len(self.layer_indexes) * self.model.config.hidden_size
if not self.use_scalar_mix
else self.model.config.hidden_size
)
class DocumentMeanEmbeddings(DocumentEmbeddings):
@deprecated(
version="0.3.1",
reason="The functionality of this class is moved to 'DocumentPoolEmbeddings'",
)
def __init__(self, token_embeddings: List[TokenEmbeddings]) -> None:
"""The constructor takes a list of embeddings to be combined."""
super().__init__()
self.embeddings: StackedEmbeddings = StackedEmbeddings(embeddings=token_embeddings)
self.name: str = "document_mean"
self.__embedding_length: int = self.embeddings.embedding_length
self.to(flair.device)
@property
def embedding_length(self) -> int:
return self.__embedding_length
def embed(self, sentences: Union[List[Sentence], Sentence]):
"""Add embeddings to every sentence in the given list of sentences. If embeddings are already added, updates
only if embeddings are non-static.
"""
everything_embedded: bool = True
# if only one sentence is passed, convert to list of sentence
if type(sentences) is Sentence:
sentences = [sentences]
for sentence in sentences:
if self.name not in sentence._embeddings.keys():
everything_embedded = False
if not everything_embedded:
self.embeddings.embed(sentences)
for sentence in sentences:
word_embeddings = []
for token in sentence.tokens:
word_embeddings.append(token.get_embedding().unsqueeze(0))
word_embeddings = torch.cat(word_embeddings, dim=0).to(flair.device)
mean_embedding = torch.mean(word_embeddings, 0)
sentence.set_embedding(self.name, mean_embedding)
def _add_embeddings_internal(self, sentences: List[Sentence]):
pass
class DocumentLSTMEmbeddings(DocumentEmbeddings):
@deprecated(
version="0.4",
reason="The functionality of this class is moved to 'DocumentRNNEmbeddings'",
)
def __init__(
self,
embeddings: List[TokenEmbeddings],
hidden_size=128,
rnn_layers=1,
reproject_words: bool = True,
reproject_words_dimension: Optional[int] = None,
bidirectional: bool = False,
dropout: float = 0.5,
word_dropout: float = 0.0,
locked_dropout: float = 0.0,
) -> None:
"""The constructor takes a list of embeddings to be combined.
:param embeddings: a list of token embeddings
:param hidden_size: the number of hidden states in the lstm
:param rnn_layers: the number of layers for the lstm
:param reproject_words: boolean value, indicating whether to reproject the token embeddings in a separate linear
layer before putting them into the lstm or not
:param reproject_words_dimension: output dimension of reprojecting token embeddings. If None the same output
dimension as before will be taken.
:param bidirectional: boolean value, indicating whether to use a bidirectional lstm or not
:param dropout: the dropout value to be used
:param word_dropout: the word dropout value to be used, if 0.0 word dropout is not used
:param locked_dropout: the locked dropout value to be used, if 0.0 locked dropout is not used.
"""
super().__init__()
self.embeddings: StackedEmbeddings = StackedEmbeddings(embeddings=embeddings)
self.reproject_words = reproject_words
self.bidirectional = bidirectional
self.length_of_all_token_embeddings: int = self.embeddings.embedding_length
self.name = "document_lstm"
self.static_embeddings = False
self.__embedding_length: int = hidden_size
if self.bidirectional:
self.__embedding_length *= 4
self.embeddings_dimension: int = self.length_of_all_token_embeddings
if self.reproject_words and reproject_words_dimension is not None:
self.embeddings_dimension = reproject_words_dimension
# bidirectional LSTM on top of embedding layer
self.word_reprojection_map = torch.nn.Linear(self.length_of_all_token_embeddings, self.embeddings_dimension)
self.rnn = torch.nn.GRU(
self.embeddings_dimension,
hidden_size,
num_layers=rnn_layers,
bidirectional=self.bidirectional,
)
# dropouts
if locked_dropout > 0.0:
self.dropout: torch.nn.Module = LockedDropout(locked_dropout)
else:
self.dropout = torch.nn.Dropout(dropout)
self.use_word_dropout: bool = word_dropout > 0.0
if self.use_word_dropout:
self.word_dropout = WordDropout(word_dropout)
torch.nn.init.xavier_uniform_(self.word_reprojection_map.weight)
self.to(flair.device)
@property
def embedding_length(self) -> int:
return self.__embedding_length
def embed(self, sentences: Union[List[Sentence], Sentence]):
"""Add embeddings to all sentences in the given list of sentences. If embeddings are already added, update
only if embeddings are non-static.
"""
if type(sentences) is Sentence:
sentences = [sentences]
self.rnn.zero_grad()
sentences.sort(key=lambda x: len(x), reverse=True)
self.embeddings.embed(sentences)
# first, sort sentences by number of tokens
longest_token_sequence_in_batch: int = len(sentences[0])
all_sentence_tensors = []
lengths: List[int] = []
# go through each sentence in batch
for _i, sentence in enumerate(sentences):
lengths.append(len(sentence.tokens))
word_embeddings = []
for token, _token_idx in zip(sentence.tokens, range(len(sentence.tokens))):
word_embeddings.append(token.get_embedding().unsqueeze(0))
# PADDING: pad shorter sentences out
for _add in range(longest_token_sequence_in_batch - len(sentence.tokens)):
word_embeddings.append(
torch.zeros(self.length_of_all_token_embeddings, dtype=torch.float).unsqueeze(0).to(flair.device)
)
word_embeddings_tensor = torch.cat(word_embeddings, 0).to(flair.device)
sentence_states = word_embeddings_tensor
# ADD TO SENTENCE LIST: add the representation
all_sentence_tensors.append(sentence_states.unsqueeze(1))
# --------------------------------------------------------------------
# GET REPRESENTATION FOR ENTIRE BATCH
# --------------------------------------------------------------------
sentence_tensor = torch.cat(all_sentence_tensors, 1)
# --------------------------------------------------------------------
# FF PART
# --------------------------------------------------------------------
# use word dropout if set
if self.use_word_dropout:
sentence_tensor = self.word_dropout(sentence_tensor)
if self.reproject_words:
sentence_tensor = self.word_reprojection_map(sentence_tensor)
sentence_tensor = self.dropout(sentence_tensor)
packed = torch.nn.utils.rnn.pack_padded_sequence(sentence_tensor, lengths)
self.rnn.flatten_parameters()
lstm_out, hidden = self.rnn(packed)
outputs, output_lengths = torch.nn.utils.rnn.pad_packed_sequence(lstm_out)
outputs = self.dropout(outputs)
# --------------------------------------------------------------------
# EXTRACT EMBEDDINGS FROM LSTM
# --------------------------------------------------------------------
for sentence_no, length in enumerate(lengths):
last_rep = outputs[length - 1, sentence_no]
embedding = last_rep
if self.bidirectional:
first_rep = outputs[0, sentence_no]
embedding = torch.cat([first_rep, last_rep], 0)
sentence = sentences[sentence_no]
sentence.set_embedding(self.name, embedding)
def _add_embeddings_internal(self, sentences: List[Sentence]):
pass
class ELMoTransformerEmbeddings(TokenEmbeddings):
"""Contextual word embeddings using word-level Transformer-based LM, as proposed in Peters et al., 2018."""
@deprecated(
version="0.4.2",
reason="Not possible to load or save ELMo Transformer models. @stefan-it is working on it.",
)
def __init__(self, model_file: str) -> None:
super().__init__()
try:
from allennlp.data.token_indexers.elmo_indexer import (
ELMoTokenCharactersIndexer,
)
from allennlp.modules.token_embedders.bidirectional_language_model_token_embedder import (
BidirectionalLanguageModelTokenEmbedder,
)
except ModuleNotFoundError:
log.warning("-" * 100)
log.warning('ATTENTION! The library "allennlp" is not installed!')
log.warning(
"To use ELMoTransformerEmbeddings, please first install a recent version from https://github.com/allenai/allennlp"
)
log.warning("-" * 100)
pass
self.name = "elmo-transformer"
self.static_embeddings = True
self.lm_embedder = BidirectionalLanguageModelTokenEmbedder(
archive_file=model_file,
dropout=0.2,
bos_eos_tokens=("<S>", "</S>"),
remove_bos_eos=True,
requires_grad=False,
)
self.lm_embedder = self.lm_embedder.to(device=flair.device)
self.vocab = self.lm_embedder._lm.vocab
self.indexer = ELMoTokenCharactersIndexer()
# embed a dummy sentence to determine embedding_length
dummy_sentence: Sentence = Sentence(["hello"])
embedded_dummy = self.embed(dummy_sentence)
self.__embedding_length: int = len(embedded_dummy[0].get_token(1).get_embedding())
@property
def embedding_length(self) -> int:
return self.__embedding_length
def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:
# Avoid conflicts with flair's Token class
import allennlp.data.tokenizers.token as allen_nlp_token
indexer = self.indexer
vocab = self.vocab
for sentence in sentences:
character_indices = indexer.tokens_to_indices(
[allen_nlp_token.Token(token.text) for token in sentence], vocab, "elmo"
)["elmo"]
indices_tensor = torch.LongTensor([character_indices])
indices_tensor = indices_tensor.to(device=flair.device)
embeddings = self.lm_embedder(indices_tensor)[0].detach().cpu().numpy()
for token, token_idx in zip(sentence.tokens, range(len(sentence.tokens))):
embedding = embeddings[token_idx]
word_embedding = torch.FloatTensor(embedding)
token.set_embedding(self.name, word_embedding)
return sentences
def extra_repr(self):
return f"model={self.name}"
def __str__(self) -> str:
return self.name
| 63,878 | 39.099812 | 174 | py |
flair | flair-master/flair/embeddings/image.py | import logging
from typing import Any, Dict, List, Optional
import torch
import torch.nn.functional as F
from torch.nn import (
AdaptiveAvgPool2d,
AdaptiveMaxPool2d,
Conv2d,
Dropout2d,
Linear,
MaxPool2d,
Parameter,
ReLU,
Sequential,
TransformerEncoder,
TransformerEncoderLayer,
)
import flair
from flair.data import Image
from flair.embeddings.base import Embeddings, register_embeddings
log = logging.getLogger("flair")
class ImageEmbeddings(Embeddings[Image]):
@property
def embedding_type(self) -> str:
return "image-level"
def to_params(self) -> Dict[str, Any]:
# legacy pickle-like saving for image embeddings, as implementation details are not obvious
return self.__getstate__() # type: ignore[operator]
@classmethod
def from_params(cls, params: Dict[str, Any]) -> "Embeddings":
# legacy pickle-like loading for image embeddings, as implementation details are not obvious
embedding = cls.__new__(cls)
embedding.__setstate__(params)
return embedding
@register_embeddings
class IdentityImageEmbeddings(ImageEmbeddings):
def __init__(self, transforms) -> None:
import PIL as pythonimagelib
self.PIL = pythonimagelib
self.name = "Identity"
self.transforms = transforms
self.__embedding_length: Optional[int] = None
self.static_embeddings = True
super().__init__()
def _add_embeddings_internal(self, images: List[Image]):
for image in images:
image_data = self.PIL.Image.open(image.imageURL)
image_data.load()
image.set_embedding(self.name, self.transforms(image_data))
@property
def embedding_length(self) -> int:
assert self.__embedding_length is not None
return self.__embedding_length
def __str__(self) -> str:
return self.name
@register_embeddings
class PrecomputedImageEmbeddings(ImageEmbeddings):
def __init__(self, url2tensor_dict, name) -> None:
self.url2tensor_dict = url2tensor_dict
self.name = name
self.__embedding_length = len(list(self.url2tensor_dict.values())[0])
self.static_embeddings = True
super().__init__()
def _add_embeddings_internal(self, images: List[Image]):
for image in images:
if image.imageURL in self.url2tensor_dict:
image.set_embedding(self.name, self.url2tensor_dict[image.imageURL])
else:
image.set_embedding(self.name, torch.zeros(self.__embedding_length, device=flair.device))
@property
def embedding_length(self) -> int:
return self.__embedding_length
def __str__(self) -> str:
return self.name
@register_embeddings
class NetworkImageEmbeddings(ImageEmbeddings):
def __init__(self, name, pretrained=True, transforms=None) -> None:
super().__init__()
try:
import torchvision
except ModuleNotFoundError:
log.warning("-" * 100)
log.warning('ATTENTION! The library "torchvision" is not installed!')
log.warning('To use convnets pretraned on ImageNet, please first install with "pip install torchvision"')
log.warning("-" * 100)
pass
model_info = {
"resnet50": (torchvision.models.resnet50, lambda x: list(x)[:-1], 2048),
"mobilenet_v2": (
torchvision.models.mobilenet_v2,
lambda x: list(x)[:-1] + [torch.nn.AdaptiveAvgPool2d((1, 1))],
1280,
),
}
transforms = [] if transforms is None else transforms
transforms += [torchvision.transforms.ToTensor()]
if pretrained:
imagenet_mean = [0.485, 0.456, 0.406]
imagenet_std = [0.229, 0.224, 0.225]
transforms += [torchvision.transforms.Normalize(mean=imagenet_mean, std=imagenet_std)]
self.transforms = torchvision.transforms.Compose(transforms)
if name in model_info:
model_constructor = model_info[name][0]
model_features = model_info[name][1]
embedding_length = model_info[name][2]
net = model_constructor(pretrained=pretrained)
modules = model_features(net.children())
self.features = torch.nn.Sequential(*modules)
self.__embedding_length = embedding_length
self.name = name
else:
raise Exception(f"Image embeddings {name} not available.")
def _add_embeddings_internal(self, images: List[Image]):
image_tensor = torch.stack([self.transforms(image.data) for image in images])
image_embeddings = self.features(image_tensor)
image_embeddings = (
image_embeddings.view(image_embeddings.shape[:2]) if image_embeddings.dim() == 4 else image_embeddings
)
if image_embeddings.dim() != 2:
raise Exception(f"Unknown embedding shape of length {image_embeddings.dim()}")
for image_id, image in enumerate(images):
image.set_embedding(self.name, image_embeddings[image_id])
@property
def embedding_length(self) -> int:
return self.__embedding_length
def __str__(self) -> str:
return self.name
@register_embeddings
class ConvTransformNetworkImageEmbeddings(ImageEmbeddings):
def __init__(self, feats_in, convnet_parms, posnet_parms, transformer_parms) -> None:
super().__init__()
adaptive_pool_func_map = {"max": AdaptiveMaxPool2d, "avg": AdaptiveAvgPool2d}
convnet_arch: List[Any] = [] if convnet_parms["dropout"][0] <= 0 else [Dropout2d(convnet_parms["dropout"][0])]
convnet_arch.extend(
[
Conv2d(
in_channels=feats_in,
out_channels=convnet_parms["n_feats_out"][0],
kernel_size=convnet_parms["kernel_sizes"][0],
padding=convnet_parms["kernel_sizes"][0][0] // 2,
stride=convnet_parms["strides"][0],
groups=convnet_parms["groups"][0],
),
ReLU(),
]
)
if "0" in convnet_parms["pool_layers_map"]:
convnet_arch.append(MaxPool2d(kernel_size=convnet_parms["pool_layers_map"]["0"]))
for layer_id, (kernel_size, n_in, n_out, groups, stride, dropout) in enumerate(
zip(
convnet_parms["kernel_sizes"][1:],
convnet_parms["n_feats_out"][:-1],
convnet_parms["n_feats_out"][1:],
convnet_parms["groups"][1:],
convnet_parms["strides"][1:],
convnet_parms["dropout"][1:],
)
):
if dropout > 0:
convnet_arch.append(Dropout2d(dropout))
convnet_arch.append(
Conv2d(
in_channels=n_in,
out_channels=n_out,
kernel_size=kernel_size,
padding=kernel_size[0] // 2,
stride=stride,
groups=groups,
)
)
convnet_arch.append(ReLU())
if str(layer_id + 1) in convnet_parms["pool_layers_map"]:
convnet_arch.append(MaxPool2d(kernel_size=convnet_parms["pool_layers_map"][str(layer_id + 1)]))
convnet_arch.append(
adaptive_pool_func_map[convnet_parms["adaptive_pool_func"]](output_size=convnet_parms["output_size"])
)
self.conv_features = Sequential(*convnet_arch)
conv_feat_dim = convnet_parms["n_feats_out"][-1]
if posnet_parms is not None and transformer_parms is not None:
self.use_transformer = True
if posnet_parms["nonlinear"]:
posnet_arch = [
Linear(2, posnet_parms["n_hidden"]),
ReLU(),
Linear(posnet_parms["n_hidden"], conv_feat_dim),
]
else:
posnet_arch = [Linear(2, conv_feat_dim)]
self.position_features = Sequential(*posnet_arch)
transformer_layer = TransformerEncoderLayer(
d_model=conv_feat_dim, **transformer_parms["transformer_encoder_parms"]
)
self.transformer = TransformerEncoder(transformer_layer, num_layers=transformer_parms["n_blocks"])
# <cls> token initially set to 1/D, so it attends to all image features equally
self.cls_token = Parameter(torch.ones(conv_feat_dim, 1) / conv_feat_dim)
self._feat_dim = conv_feat_dim
else:
self.use_transformer = False
self._feat_dim = convnet_parms["output_size"][0] * convnet_parms["output_size"][1] * conv_feat_dim
def forward(self, x):
x = self.conv_features(x) # [b, d, h, w]
b, d, h, w = x.shape
if self.use_transformer:
# add positional encodings
y = torch.stack(
[
torch.cat([torch.arange(h).unsqueeze(1)] * w, dim=1),
torch.cat([torch.arange(w).unsqueeze(0)] * h, dim=0),
]
) # [2, h, w
y = y.view([2, h * w]).transpose(1, 0) # [h*w, 2]
y = y.type(torch.float32).to(flair.device)
y = self.position_features(y).transpose(1, 0).view([d, h, w]) # [h*w, d] => [d, h, w]
y = y.unsqueeze(dim=0) # [1, d, h, w]
x = x + y # [b, d, h, w] + [1, d, h, w] => [b, d, h, w]
# reshape the pixels into the sequence
x = x.view([b, d, h * w]) # [b, d, h*w]
# layer norm after convolution and positional encodings
x = F.layer_norm(x.permute([0, 2, 1]), (d,)).permute([0, 2, 1])
# add <cls> token
x = torch.cat([x, torch.stack([self.cls_token] * b)], dim=2) # [b, d, h*w+1]
# transformer requires input in the shape [h*w+1, b, d]
x = (
x.view([b * d, h * w + 1]).transpose(1, 0).view([h * w + 1, b, d])
) # [b, d, h*w+1] => [b*d, h*w+1] => [h*w+1, b*d] => [h*w+1, b*d]
x = self.transformer(x) # [h*w+1, b, d]
# the output is an embedding of <cls> token
x = x[-1, :, :] # [b, d]
else:
x = x.view([-1, self._feat_dim])
x = F.layer_norm(x, (self._feat_dim,))
return x
def _add_embeddings_internal(self, images: List[Image]):
image_tensor = torch.stack([image.data for image in images])
image_embeddings = self.forward(image_tensor)
for image_id, image in enumerate(images):
image.set_embedding(self.name, image_embeddings[image_id])
@property
def embedding_length(self):
return self._feat_dim
def __str__(self) -> str:
return self.name
| 10,902 | 37.663121 | 118 | py |
flair | flair-master/flair/embeddings/transformer.py | import inspect
import os
import random
import re
import tempfile
import warnings
import zipfile
from abc import abstractmethod
from io import BytesIO
from pathlib import Path
from typing import Any, Dict, List, Optional, Tuple, Type, Union, cast
import torch
from torch.jit import ScriptModule
from transformers import (
CONFIG_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoModel,
AutoTokenizer,
FeatureExtractionMixin,
LayoutLMTokenizer,
LayoutLMTokenizerFast,
LayoutLMv2FeatureExtractor,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.tokenization_utils_base import LARGE_INTEGER
from transformers.utils import PaddingStrategy
import flair
from flair.data import Sentence, Token, log
from flair.embeddings.base import (
DocumentEmbeddings,
Embeddings,
TokenEmbeddings,
register_embeddings,
)
SENTENCE_BOUNDARY_TAG: str = "[FLERT]"
@torch.jit.script_if_tracing
def pad_sequence_embeddings(all_hidden_states: List[torch.Tensor]) -> torch.Tensor:
embedding_length = all_hidden_states[0].shape[1]
longest_token_sequence_in_batch = 0
for hidden_states in all_hidden_states:
if hidden_states.shape[0] > longest_token_sequence_in_batch:
longest_token_sequence_in_batch = hidden_states.shape[0]
pre_allocated_zero_tensor = torch.zeros(
embedding_length * longest_token_sequence_in_batch,
dtype=torch.float,
device=flair.device,
)
all_embs = []
for hidden_states in all_hidden_states:
all_embs.append(hidden_states.view(-1))
nb_padding_tokens = longest_token_sequence_in_batch - hidden_states.shape[0]
if nb_padding_tokens > 0:
all_embs.append(pre_allocated_zero_tensor[: embedding_length * nb_padding_tokens])
return torch.cat(all_embs).view(len(all_hidden_states), longest_token_sequence_in_batch, embedding_length)
@torch.jit.script_if_tracing
def truncate_hidden_states(hidden_states: torch.Tensor, input_ids: torch.Tensor) -> torch.Tensor:
return hidden_states[:, :, : input_ids.size()[1]]
@torch.jit.script_if_tracing
def combine_strided_tensors(
hidden_states: torch.Tensor,
overflow_to_sample_mapping: torch.Tensor,
half_stride: int,
max_length: int,
default_value: int,
) -> torch.Tensor:
_, counts = torch.unique(overflow_to_sample_mapping, sorted=True, return_counts=True)
sentence_count = int(overflow_to_sample_mapping.max().item() + 1)
token_count = max_length + (max_length - 2) * int(counts.max().item() - 1)
if hidden_states.dim() == 2:
sentence_hidden_states = torch.zeros(
(sentence_count, token_count), device=flair.device, dtype=hidden_states.dtype
)
else:
sentence_hidden_states = torch.zeros(
(sentence_count, token_count, hidden_states.shape[2]), device=flair.device, dtype=hidden_states.dtype
)
sentence_hidden_states += default_value
for sentence_id in torch.arange(0, sentence_hidden_states.shape[0]):
selected_sentences = hidden_states[overflow_to_sample_mapping == sentence_id]
if selected_sentences.size(0) > 1:
start_part = selected_sentences[0, : half_stride + 1]
mid_part = selected_sentences[:, half_stride + 1 : max_length - 1 - half_stride]
mid_part = torch.reshape(mid_part, (mid_part.shape[0] * mid_part.shape[1],) + mid_part.shape[2:])
end_part = selected_sentences[selected_sentences.shape[0] - 1, max_length - half_stride - 1 :]
sentence_hidden_state = torch.cat((start_part, mid_part, end_part), dim=0)
sentence_hidden_states[sentence_id, : sentence_hidden_state.shape[0]] = torch.cat(
(start_part, mid_part, end_part), dim=0
)
else:
sentence_hidden_states[sentence_id, : selected_sentences.shape[1]] = selected_sentences[0, :]
return sentence_hidden_states
@torch.jit.script_if_tracing
def fill_masked_elements(
all_token_embeddings: torch.Tensor,
sentence_hidden_states: torch.Tensor,
mask: torch.Tensor,
word_ids: torch.Tensor,
lengths: torch.LongTensor,
):
for i in torch.arange(int(all_token_embeddings.shape[0])):
r = insert_missing_embeddings(sentence_hidden_states[i][mask[i] & (word_ids[i] >= 0)], word_ids[i], lengths[i])
all_token_embeddings[i, : lengths[i], :] = r
return all_token_embeddings
@torch.jit.script_if_tracing
def insert_missing_embeddings(
token_embeddings: torch.Tensor, word_id: torch.Tensor, length: torch.LongTensor
) -> torch.Tensor:
# in some cases we need to insert zero vectors for tokens without embedding.
if token_embeddings.shape[0] == 0:
if token_embeddings.dim() == 2:
token_embeddings = torch.zeros(
int(length), token_embeddings.shape[1], dtype=token_embeddings.dtype, device=token_embeddings.device
)
elif token_embeddings.dim() == 3:
token_embeddings = torch.zeros(
int(length),
token_embeddings.shape[1],
token_embeddings.shape[2],
dtype=token_embeddings.dtype,
device=token_embeddings.device,
)
elif token_embeddings.dim() == 4:
token_embeddings = torch.zeros(
int(length),
token_embeddings.shape[1],
token_embeddings.shape[2],
token_embeddings.shape[3],
dtype=token_embeddings.dtype,
device=token_embeddings.device,
)
elif token_embeddings.shape[0] < length:
for _id in torch.arange(int(length)):
zero_vector = torch.zeros_like(token_embeddings[:1])
if not (word_id == _id).any():
token_embeddings = torch.cat(
(
token_embeddings[:_id],
zero_vector,
token_embeddings[_id:],
),
dim=0,
)
return token_embeddings
@torch.jit.script_if_tracing
def fill_mean_token_embeddings(
all_token_embeddings: torch.Tensor,
sentence_hidden_states: torch.Tensor,
word_ids: torch.Tensor,
token_lengths: torch.Tensor,
):
for i in torch.arange(all_token_embeddings.shape[0]):
for _id in torch.arange(token_lengths[i]): # type: ignore[call-overload]
all_token_embeddings[i, _id, :] = torch.nan_to_num(
sentence_hidden_states[i][word_ids[i] == _id].mean(dim=0)
)
return all_token_embeddings
@torch.jit.script_if_tracing
def document_mean_pooling(sentence_hidden_states: torch.Tensor, sentence_lengths: torch.Tensor):
result = torch.zeros(sentence_hidden_states.shape[0], sentence_hidden_states.shape[2])
for i in torch.arange(sentence_hidden_states.shape[0]):
result[i] = sentence_hidden_states[i, : sentence_lengths[i]].mean(dim=0)
@torch.jit.script_if_tracing
def document_max_pooling(sentence_hidden_states: torch.Tensor, sentence_lengths: torch.Tensor):
result = torch.zeros(sentence_hidden_states.shape[0], sentence_hidden_states.shape[2])
for i in torch.arange(sentence_hidden_states.shape[0]):
result[i], _ = sentence_hidden_states[i, : sentence_lengths[i]].max(dim=0)
def _legacy_reconstruct_word_ids(
embedding: "TransformerBaseEmbeddings", flair_tokens: List[List[str]]
) -> List[List[Optional[int]]]:
word_ids_list = []
max_len = 0
for tokens in flair_tokens:
token_texts = embedding.tokenizer.tokenize(" ".join(tokens), is_split_into_words=True)
token_ids = cast(List[int], embedding.tokenizer.convert_tokens_to_ids(token_texts))
expanded_token_ids = embedding.tokenizer.build_inputs_with_special_tokens(token_ids)
j = 0
for _i, token_id in enumerate(token_ids):
while expanded_token_ids[j] != token_id:
token_texts.insert(j, embedding.tokenizer.convert_ids_to_tokens(expanded_token_ids[j]))
j += 1
j += 1
while j < len(expanded_token_ids):
token_texts.insert(j, embedding.tokenizer.convert_ids_to_tokens(expanded_token_ids[j]))
j += 1
if not embedding.allow_long_sentences and embedding.truncate:
token_texts = token_texts[: embedding.tokenizer.model_max_length]
reconstruct = _reconstruct_word_ids_from_subtokens(embedding, tokens, token_texts)
word_ids_list.append(reconstruct)
reconstruct_len = len(reconstruct)
if reconstruct_len > max_len:
max_len = reconstruct_len
for _word_ids in word_ids_list:
# padding
_word_ids.extend([None] * (max_len - len(_word_ids)))
return word_ids_list
def remove_special_markup(text: str):
# remove special markup
text = re.sub("^Ġ", "", text) # RoBERTa models
text = re.sub("^##", "", text) # BERT models
text = re.sub("^▁", "", text) # XLNet models
text = re.sub("</w>$", "", text) # XLM models
return text
def _get_processed_token_text(tokenizer, token: str) -> str:
pieces = tokenizer.tokenize(token)
token_text = "".join(map(remove_special_markup, pieces))
token_text = token_text.lower()
return token_text.strip()
def _reconstruct_word_ids_from_subtokens(embedding, tokens: List[str], subtokens: List[str]):
word_iterator = iter(enumerate(_get_processed_token_text(embedding.tokenizer, token) for token in tokens))
token_id, token_text = next(word_iterator)
word_ids: List[Optional[int]] = []
reconstructed_token = ""
subtoken_count = 0
processed_first_token = False
special_tokens = []
# check if special tokens exist to circumvent error message
if embedding.tokenizer._bos_token:
special_tokens.append(embedding.tokenizer.bos_token)
if embedding.tokenizer._cls_token:
special_tokens.append(embedding.tokenizer.cls_token)
if embedding.tokenizer._sep_token:
special_tokens.append(embedding.tokenizer.sep_token)
# iterate over subtokens and reconstruct tokens
for _subtoken_id, subtoken in enumerate(subtokens):
# remove special markup
subtoken = remove_special_markup(subtoken)
# check if reconstructed token is special begin token ([CLS] or similar)
if subtoken in special_tokens:
word_ids.append(None)
continue
if subtoken_count == 0 and processed_first_token:
token_id, token_text = next(word_iterator)
processed_first_token = True
# some BERT tokenizers somehow omit words - in such cases skip to next token
while subtoken_count == 0 and not token_text.startswith(subtoken.lower()):
token_id, token_text = next(word_iterator)
word_ids.append(token_id)
subtoken_count += 1
reconstructed_token = reconstructed_token + subtoken
if reconstructed_token.lower() == token_text:
# we cannot handle unk_tokens perfectly, so let's assume that one unk_token corresponds to one token.
reconstructed_token = ""
subtoken_count = 0
# if tokens are unaccounted for
while len(word_ids) < len(subtokens):
word_ids.append(None)
# check if all tokens were matched to subtokens
if token_id + 1 != len(tokens) and not embedding.truncate:
log.error(f"Reconstructed token: '{reconstructed_token}'")
log.error(f"Tokenization MISMATCH in sentence '{' '.join(tokens)}'")
log.error(f"Last matched: '{tokens[token_id]}'")
log.error(f"Last sentence: '{tokens[-1]}'")
log.error(f"subtokenized: '{subtokens}'")
return word_ids
class TransformerBaseEmbeddings(Embeddings[Sentence]):
"""Base class for all TransformerEmbeddings.
This base class handles the tokenizer and the input preparation, however it won't implement the actual model.
This can be further extended to implement the model in either a pytorch, jit or onnx way of working.
"""
def __init__(
self,
name: str,
tokenizer: PreTrainedTokenizer,
embedding_length: int,
context_length: int,
context_dropout: float,
respect_document_boundaries: bool,
stride: int,
allow_long_sentences: bool,
fine_tune: bool,
truncate: bool,
use_lang_emb: bool,
is_document_embedding: bool = False,
is_token_embedding: bool = False,
force_device: Optional[torch.device] = None,
force_max_length: bool = False,
feature_extractor: Optional[FeatureExtractionMixin] = None,
needs_manual_ocr: Optional[bool] = None,
use_context_separator: bool = True,
) -> None:
self.name = name
super().__init__()
self.document_embedding = is_document_embedding
self.token_embedding = is_token_embedding
self.tokenizer: PreTrainedTokenizer = tokenizer
self.embedding_length_internal = embedding_length
self.context_length = context_length
self.context_dropout = context_dropout
self.respect_document_boundaries = respect_document_boundaries
self.stride = stride
self.allow_long_sentences = allow_long_sentences
self.truncate = truncate
self.use_lang_emb = use_lang_emb
self.force_device = force_device
self.fine_tune = fine_tune
self.force_max_length = force_max_length
self.feature_extractor = feature_extractor
self.use_context_separator = use_context_separator
tokenizer_params = list(inspect.signature(self.tokenizer.__call__).parameters.keys())
self.tokenizer_needs_ocr_boxes = "boxes" in tokenizer_params
# The layoutlm tokenizer doesn't handle ocr themselves
self.needs_manual_ocr = isinstance(self.tokenizer, (LayoutLMTokenizer, LayoutLMTokenizerFast))
if needs_manual_ocr is not None:
self.needs_manual_ocr = needs_manual_ocr
if (self.tokenizer_needs_ocr_boxes or self.needs_manual_ocr) and self.context_length > 0:
warnings.warn(f"using '{name}' with additional context, might lead to bad results.", UserWarning)
if not self.token_embedding and not self.document_embedding:
raise ValueError("either 'is_token_embedding' or 'is_document_embedding' needs to be set.")
def to_args(self):
args = {
"is_token_embedding": self.token_embedding,
"is_document_embedding": self.document_embedding,
"allow_long_sentences": self.allow_long_sentences,
"tokenizer": self.tokenizer,
"context_length": self.context_length,
"context_dropout": self.context_dropout,
"respect_document_boundaries": self.respect_document_boundaries,
"truncate": self.truncate,
"stride": self.stride,
"embedding_length": self.embedding_length_internal,
"name": self.name,
"fine_tune": self.fine_tune,
"use_lang_emb": self.use_lang_emb,
"force_max_length": self.force_max_length,
"feature_extractor": self.feature_extractor,
"use_context_separator": self.use_context_separator,
}
if hasattr(self, "needs_manual_ocr"):
args["needs_manual_ocr"] = self.needs_manual_ocr
return args
def __setstate__(self, state):
embedding = self.from_params(state)
for key in embedding.__dict__:
self.__dict__[key] = embedding.__dict__[key]
@classmethod
def from_params(cls, params):
tokenizer = cls._tokenizer_from_bytes(params.pop("tokenizer_data"))
feature_extractor = cls._feature_extractor_from_bytes(params.pop("feature_extractor_data", None))
embedding = cls.create_from_state(tokenizer=tokenizer, feature_extractor=feature_extractor, **params)
return embedding
def to_params(self):
model_state = self.to_args()
del model_state["tokenizer"]
model_state["tokenizer_data"] = self.__tokenizer_bytes()
del model_state["feature_extractor"]
if self.feature_extractor:
model_state["feature_extractor_data"] = self.__feature_extractor_bytes()
return model_state
@classmethod
def _tokenizer_from_bytes(cls, zip_data: BytesIO) -> PreTrainedTokenizer:
zip_obj = zipfile.ZipFile(zip_data)
with tempfile.TemporaryDirectory() as temp_dir:
zip_obj.extractall(temp_dir)
return AutoTokenizer.from_pretrained(temp_dir, add_prefix_space=True)
@classmethod
def _feature_extractor_from_bytes(cls, zip_data: Optional[BytesIO]) -> Optional[FeatureExtractionMixin]:
if zip_data is None:
return None
zip_obj = zipfile.ZipFile(zip_data)
with tempfile.TemporaryDirectory() as temp_dir:
zip_obj.extractall(temp_dir)
return AutoFeatureExtractor.from_pretrained(temp_dir, apply_ocr=False)
def __tokenizer_bytes(self):
with tempfile.TemporaryDirectory() as temp_dir:
files = list(self.tokenizer.save_pretrained(temp_dir))
if self.tokenizer.is_fast and self.tokenizer.slow_tokenizer_class:
vocab_files = self.tokenizer.slow_tokenizer_class.vocab_files_names.values()
files = [f for f in files if all(v not in f for v in vocab_files)]
zip_data = BytesIO()
zip_obj = zipfile.ZipFile(zip_data, "w")
for f in files:
# transformers returns the "added_tokens.json" even if it doesn't create it
if os.path.exists(f):
zip_obj.write(f, os.path.relpath(f, temp_dir))
zip_data.seek(0)
return zip_data
def __feature_extractor_bytes(self):
with tempfile.TemporaryDirectory() as temp_dir:
files = list(self.feature_extractor.save_pretrained(temp_dir))
zip_data = BytesIO()
zip_obj = zipfile.ZipFile(zip_data, "w")
for f in files:
# transformers returns the "added_tokens.json" even if it doesn't create it
if os.path.exists(f):
zip_obj.write(f, os.path.relpath(f, temp_dir))
zip_data.seek(0)
return zip_data
@classmethod
def create_from_state(cls, **state):
return cls(**state)
@property
def embedding_length(self) -> int:
return self.embedding_length_internal
@property
def embedding_type(self) -> str:
# in case of doubt: token embedding has higher priority than document embedding
return "word-level" if self.token_embedding else "sentence-level"
@abstractmethod
def _forward_tensors(self, tensors) -> Dict[str, torch.Tensor]:
return self(**tensors)
def prepare_tensors(self, sentences: List[Sentence], device: Optional[torch.device] = None):
if device is None:
device = flair.device
flair_tokens, offsets, lengths = self.__gather_flair_tokens(sentences)
# random check some tokens to save performance.
if (self.needs_manual_ocr or self.tokenizer_needs_ocr_boxes) and not all(
[
flair_tokens[0][0].has_metadata("bbox"),
flair_tokens[0][-1].has_metadata("bbox"),
flair_tokens[-1][0].has_metadata("bbox"),
flair_tokens[-1][-1].has_metadata("bbox"),
]
):
raise ValueError(f"The embedding '{self.name}' requires the ocr 'bbox' set as metadata on all tokens.")
if self.feature_extractor is not None and not all(
[
sentences[0].has_metadata("image"),
sentences[-1].has_metadata("image"),
]
):
raise ValueError(f"The embedding '{self.name}' requires the 'image' set as metadata for all sentences.")
return self.__build_transformer_model_inputs(sentences, offsets, lengths, flair_tokens, device)
def __build_transformer_model_inputs(
self,
sentences: List[Sentence],
offsets: List[int],
sentence_lengths: List[int],
flair_tokens: List[List[Token]],
device: torch.device,
):
tokenizer_kwargs: Dict[str, Any] = {}
if self.tokenizer_needs_ocr_boxes:
tokenizer_kwargs["boxes"] = [[t.get_metadata("bbox") for t in tokens] for tokens in flair_tokens]
else:
tokenizer_kwargs["is_split_into_words"] = True
batch_encoding = self.tokenizer(
[[t.text for t in tokens] for tokens in flair_tokens],
stride=self.stride,
return_overflowing_tokens=self.allow_long_sentences,
truncation=self.truncate,
padding=PaddingStrategy.MAX_LENGTH if self.force_max_length else PaddingStrategy.LONGEST,
return_tensors="pt",
**tokenizer_kwargs,
)
input_ids = batch_encoding["input_ids"].to(device, non_blocking=True)
model_kwargs = {"input_ids": input_ids}
# Models such as FNet do not have an attention_mask
if "attention_mask" in batch_encoding:
model_kwargs["attention_mask"] = batch_encoding["attention_mask"].to(device, non_blocking=True)
if "overflow_to_sample_mapping" in batch_encoding:
cpu_overflow_to_sample_mapping = batch_encoding["overflow_to_sample_mapping"]
model_kwargs["overflow_to_sample_mapping"] = cpu_overflow_to_sample_mapping.to(device, non_blocking=True)
unpacked_ids = combine_strided_tensors(
input_ids,
model_kwargs["overflow_to_sample_mapping"],
self.stride // 2,
self.tokenizer.model_max_length,
self.tokenizer.pad_token_id,
)
sub_token_lengths = (unpacked_ids != self.tokenizer.pad_token_id).sum(dim=1)
padded_tokens = [flair_tokens[i] for i in cpu_overflow_to_sample_mapping]
else:
cpu_overflow_to_sample_mapping = None
sub_token_lengths = (input_ids != self.tokenizer.pad_token_id).sum(dim=1)
padded_tokens = flair_tokens
if self.document_embedding and not (self.cls_pooling == "cls" and self.initial_cls_token):
model_kwargs["sub_token_lengths"] = sub_token_lengths
# set language IDs for XLM-style transformers
if self.use_lang_emb and self.tokenizer.lang2id is not None:
model_kwargs["langs"] = torch.zeros_like(input_ids, dtype=input_ids.dtype)
lang2id = self.tokenizer.lang2id
if not self.allow_long_sentences:
for s_id, sentence in enumerate(sentences):
lang_id = lang2id.get(sentence.get_language_code(), 0)
model_kwargs["langs"][s_id] = lang_id
else:
sentence_part_lengths = torch.unique(
batch_encoding["overflow_to_sample_mapping"],
return_counts=True,
sorted=True,
)[1].tolist()
sentence_idx = 0
for sentence, part_length in zip(sentences, sentence_part_lengths):
lang_id = lang2id.get(sentence.get_language_code(), 0)
model_kwargs["langs"][sentence_idx : sentence_idx + part_length] = lang_id
sentence_idx += part_length
if "bbox" in batch_encoding:
model_kwargs["bbox"] = batch_encoding["bbox"].to(device, non_blocking=True)
if self.token_embedding or self.needs_manual_ocr:
model_kwargs["token_lengths"] = torch.tensor(sentence_lengths, device=device)
if self.tokenizer.is_fast:
word_ids_list = [batch_encoding.word_ids(i) for i in range(input_ids.size()[0])]
else:
word_ids_list = _legacy_reconstruct_word_ids(
self,
[[t.text for t in tokens] for tokens in flair_tokens],
)
# word_ids is only supported for fast rust tokenizers. Some models like "xlm-mlm-ende-1024" do not have
# a fast tokenizer implementation, hence we need to fall back to our own reconstruction of word_ids.
if self.token_embedding:
if self.allow_long_sentences:
new_offsets = []
new_lengths = []
assert cpu_overflow_to_sample_mapping is not None
for sent_id in cpu_overflow_to_sample_mapping:
new_offsets.append(offsets[sent_id])
new_lengths.append(sentence_lengths[sent_id])
offsets = new_offsets
sentence_lengths = new_lengths
word_ids = torch.tensor(
[
[
-100 if (val is None or val < offset or val >= offset + length) else val - offset
for val in _word_ids
]
for _word_ids, offset, length in zip(word_ids_list, offsets, sentence_lengths)
],
device=device,
)
model_kwargs["word_ids"] = word_ids
if self.needs_manual_ocr:
bbox = [
[(0, 0, 0, 0) if val is None else tokens[val].get_metadata("bbox") for val in _word_ids]
for _word_ids, tokens in zip(word_ids_list, padded_tokens)
]
model_kwargs["bbox"] = torch.tensor(bbox, device=device)
if self.feature_extractor is not None:
images = [sent.get_metadata("image") for sent in sentences]
image_encodings = self.feature_extractor(images, return_tensors="pt")["pixel_values"]
if cpu_overflow_to_sample_mapping is not None:
batched_image_encodings = [image_encodings[i] for i in cpu_overflow_to_sample_mapping]
image_encodings = torch.stack(batched_image_encodings)
image_encodings = image_encodings.to(flair.device)
if isinstance(self.feature_extractor, LayoutLMv2FeatureExtractor):
model_kwargs["image"] = image_encodings
else:
model_kwargs["pixel_values"] = image_encodings
return model_kwargs
def __gather_flair_tokens(self, sentences: List[Sentence]) -> Tuple[List[List[Token]], List[int], List[int]]:
offsets = []
lengths = []
if self.context_length > 0:
# set context if not set already
previous_sentence = None
for sentence in sentences:
if sentence.is_context_set():
continue
sentence._previous_sentence = previous_sentence
sentence._next_sentence = None
if previous_sentence:
previous_sentence._next_sentence = sentence
previous_sentence = sentence
sentence_tokens = []
for sentence in sentences:
# flair specific pre-tokenization
tokens, offset = self._expand_sentence_with_context(sentence)
sentence_tokens.append(tokens)
offsets.append(offset)
lengths.append(len(sentence))
return sentence_tokens, offsets, lengths
def _expand_sentence_with_context(self, sentence) -> Tuple[List[Token], int]:
# fields to store left and right context
left_context = []
right_context = []
# expand context only if context_length is set
expand_context = self.context_length > 0
if expand_context:
# if context_dropout is set, randomly deactivate left context during training
if not self.training or random.randint(1, 100) > (self.context_dropout * 100):
left_context = sentence.left_context(self.context_length, self.respect_document_boundaries)
# if context_dropout is set, randomly deactivate right context during training
if not self.training or random.randint(1, 100) > (self.context_dropout * 100):
right_context = sentence.right_context(self.context_length, self.respect_document_boundaries)
# if use_context_separator is set, add a [FLERT] token
if self.use_context_separator and self.context_length > 0:
left_context = [*left_context, Token(SENTENCE_BOUNDARY_TAG)]
right_context = [Token(SENTENCE_BOUNDARY_TAG), *right_context]
# return expanded sentence and context length information
expanded_sentence = left_context + sentence.tokens + right_context
context_length = len(left_context)
return expanded_sentence, context_length
def __extract_document_embeddings(self, sentence_hidden_states, sentences):
for document_emb, sentence in zip(sentence_hidden_states, sentences):
sentence.set_embedding(self.name, document_emb)
def __extract_token_embeddings(self, sentence_embeddings, sentences):
for token_embeddings, sentence in zip(sentence_embeddings, sentences):
for token_embedding, token in zip(token_embeddings, sentence):
token.set_embedding(self.name, token_embedding)
def _add_embeddings_internal(self, sentences: List[Sentence]):
tensors = self.prepare_tensors(sentences, device=self.force_device)
gradient_context = torch.enable_grad() if (self.fine_tune and self.training) else torch.no_grad()
with gradient_context:
embeddings = self._forward_tensors(tensors)
if self.document_embedding:
document_embedding = embeddings["document_embeddings"]
self.__extract_document_embeddings(document_embedding, sentences)
if self.token_embedding:
token_embedding = embeddings["token_embeddings"]
self.__extract_token_embeddings(token_embedding, sentences)
@register_embeddings
class TransformerOnnxEmbeddings(TransformerBaseEmbeddings):
def __init__(self, onnx_model: str, providers: List = [], **kwargs) -> None:
# onnx prepares numpy arrays, no mather if it runs on gpu or cpu, the input is on cpu first.
super().__init__(**kwargs, force_device=torch.device("cpu"))
self.onnx_model = onnx_model
self.providers = providers
self.create_session()
self.eval()
def to_params(self):
params = super().to_params()
params["providers"] = self.providers
params["onnx_model"] = self.onnx_model
return params
@classmethod
def from_params(cls, params: Dict[str, Any]) -> "TransformerOnnxEmbeddings":
params["tokenizer"] = cls._tokenizer_from_bytes(params.pop("tokenizer_data"))
params["feature_extractor"] = cls._feature_extractor_from_bytes(params.pop("feature_extractor_data", None))
return cls(**params)
def create_session(self):
try:
import onnxruntime
except ImportError:
log.error(
"You cannot use OnnxEmbeddings without ONNXruntime being installed,"
"please run `pip install onnxruntime`"
)
raise
if os.path.isfile(self.onnx_model):
self.session = onnxruntime.InferenceSession(self.onnx_model, providers=self.providers)
else:
log.warning(
f"Could not find file '{self.onnx_model}' used in {self.__class__.name}({self.name})."
"The embedding won't work unless a valid path is set."
)
self.session = None
def remove_session(self):
if self.session is not None:
self.session._sess = None
del self.session
self.session = None
def optimize_model(self, optimize_model_path, use_external_data_format: bool = False, **kwargs):
"""Wrapper for `onnxruntime.transformers.optimizer.optimize_model`."""
from onnxruntime.transformers.optimizer import optimize_model
self.remove_session()
model = optimize_model(self.onnx_model, **kwargs)
model.save_model_to_file(optimize_model_path, use_external_data_format=use_external_data_format)
self.onnx_model = optimize_model_path
self.create_session()
def quantize_model(self, quantize_model_path, use_external_data_format: bool = False, **kwargs):
from onnxruntime.quantization import quantize_dynamic
self.remove_session()
quantize_dynamic(
self.onnx_model, quantize_model_path, use_external_data_format=use_external_data_format, **kwargs
)
self.onnx_model = quantize_model_path
self.create_session()
def _forward_tensors(self, tensors) -> Dict[str, torch.Tensor]:
input_array = {k: v.numpy() for k, v in tensors.items()}
embeddings = self.session.run([], input_array)
result = {}
if self.document_embedding:
result["document_embeddings"] = torch.tensor(embeddings[0], device=flair.device)
if self.token_embedding:
result["token_embeddings"] = torch.tensor(embeddings[-1], device=flair.device)
return result
@classmethod
def collect_dynamic_axes(cls, embedding: "TransformerEmbeddings", tensors):
dynamic_axes = {}
for k, v in tensors.items():
if k in ["sub_token_lengths", "token_lengths"]:
dynamic_axes[k] = {0: "sent-count"}
continue
if k == "word_ids":
if embedding.tokenizer.is_fast:
dynamic_axes[k] = {0: "batch", 1: "sequ_length"}
else:
dynamic_axes[k] = {0: "sent-count", 1: "max_token_count"}
continue
if k == "overflow_to_sample_mapping":
dynamic_axes[k] = {0: "batch"}
if v.dim() == 1:
dynamic_axes[k] = {0: "batch"}
else:
dynamic_axes[k] = {0: "batch", 1: "sequ_length"}
if embedding.token_embedding:
dynamic_axes["token_embeddings"] = {0: "sent-count", 1: "max_token_count", 2: "token_embedding_size"}
if embedding.document_embedding:
dynamic_axes["document_embeddings"] = {0: "sent-count", 1: "document_embedding_size"}
return dynamic_axes
@classmethod
def export_from_embedding(
cls,
path: Union[str, Path],
embedding: "TransformerEmbeddings",
example_sentences: List[Sentence],
opset_version: int = 14,
providers: Optional[List] = None,
):
path = str(path)
example_tensors = embedding.prepare_tensors(example_sentences)
dynamic_axes = cls.collect_dynamic_axes(embedding, example_tensors)
output_names = []
if embedding.document_embedding:
output_names.append("document_embeddings")
if embedding.token_embedding:
output_names.append("token_embeddings")
if providers is None:
if flair.device.type == "cuda":
providers = [
(
"CUDAExecutionProvider",
{
"device_id": 0,
"arena_extend_strategy": "kNextPowerOfTwo",
"gpu_mem_limit": 4 * 1024 * 1024 * 1024,
"cudnn_conv_algo_search": "EXHAUSTIVE",
"do_copy_in_default_stream": True,
},
),
"CPUExecutionProvider",
]
else:
providers = ["CPUExecutionProvider"]
desired_keys_order = [
param for param in inspect.signature(embedding.forward).parameters if param in example_tensors
]
torch.onnx.export(
embedding,
(example_tensors,),
path,
input_names=desired_keys_order,
output_names=output_names,
dynamic_axes=dynamic_axes,
opset_version=opset_version,
)
return cls(onnx_model=path, providers=providers, **embedding.to_args())
@register_embeddings
class TransformerJitEmbeddings(TransformerBaseEmbeddings):
def __init__(self, jit_model: Union[bytes, ScriptModule], param_names: List[str], **kwargs) -> None:
super().__init__(**kwargs)
if isinstance(jit_model, bytes):
buffer = BytesIO(jit_model)
buffer.seek(0)
self.jit_model: ScriptModule = torch.jit.load(buffer, map_location=flair.device)
else:
self.jit_model = jit_model
self.param_names = param_names
self.to(flair.device)
self.eval()
def to_params(self):
state = super().to_params()
buffer = BytesIO()
torch.jit.save(self.jit_model, buffer)
state["jit_model"] = buffer.getvalue()
state["param_names"] = self.param_names
return state
@classmethod
def from_params(cls, params: Dict[str, Any]) -> "Embeddings":
params["tokenizer"] = cls._tokenizer_from_bytes(params.pop("tokenizer_data"))
params["feature_extractor"] = cls._feature_extractor_from_bytes(params.pop("feature_extractor_data", None))
return cls(**params)
def _forward_tensors(self, tensors) -> Dict[str, torch.Tensor]:
parameters = []
for param in self.param_names:
parameters.append(tensors[param])
embeddings = self.jit_model(*parameters)
if isinstance(embeddings, tuple):
return {"document_embeddings": embeddings[0], "token_embeddings": embeddings[1]}
elif self.token_embedding:
return {"token_embeddings": embeddings}
elif self.document_embedding:
return {"document_embeddings": embeddings}
else:
raise ValueError("either 'token_embedding' or 'document_embedding' needs to be set.")
@classmethod
def create_from_embedding(cls, module: ScriptModule, embedding: "TransformerEmbeddings", param_names: List[str]):
return cls(jit_model=module, param_names=param_names, **embedding.to_args())
@classmethod
def parameter_to_list(
cls, embedding: "TransformerEmbeddings", wrapper: torch.nn.Module, sentences: List[Sentence]
) -> Tuple[List[str], List[torch.Tensor]]:
tensors = embedding.prepare_tensors(sentences)
param_names = list(inspect.signature(wrapper.forward).parameters.keys())
params = []
for param in param_names:
params.append(tensors[param])
return param_names, params
@register_embeddings
class TransformerJitWordEmbeddings(TokenEmbeddings, TransformerJitEmbeddings):
def __init__(
self,
**kwargs,
) -> None:
TransformerJitEmbeddings.__init__(self, **kwargs)
@register_embeddings
class TransformerJitDocumentEmbeddings(DocumentEmbeddings, TransformerJitEmbeddings):
def __init__(
self,
**kwargs,
) -> None:
TransformerJitEmbeddings.__init__(self, **kwargs)
@register_embeddings
class TransformerOnnxWordEmbeddings(TokenEmbeddings, TransformerOnnxEmbeddings):
def __init__(
self,
**kwargs,
) -> None:
TransformerOnnxEmbeddings.__init__(self, **kwargs)
@register_embeddings
class TransformerOnnxDocumentEmbeddings(DocumentEmbeddings, TransformerOnnxEmbeddings):
def __init__(
self,
**kwargs,
) -> None:
TransformerOnnxEmbeddings.__init__(self, **kwargs)
@register_embeddings
class TransformerEmbeddings(TransformerBaseEmbeddings):
onnx_cls: Type[TransformerOnnxEmbeddings] = TransformerOnnxEmbeddings
def __init__(
self,
model: str = "bert-base-uncased",
fine_tune: bool = True,
layers: str = "-1",
layer_mean: bool = True,
subtoken_pooling: str = "first",
cls_pooling: str = "cls",
is_token_embedding: bool = True,
is_document_embedding: bool = True,
allow_long_sentences: bool = False,
use_context: Union[bool, int] = False,
respect_document_boundaries: bool = True,
context_dropout: float = 0.5,
saved_config: Optional[PretrainedConfig] = None,
tokenizer_data: Optional[BytesIO] = None,
feature_extractor_data: Optional[BytesIO] = None,
name: Optional[str] = None,
force_max_length: bool = False,
needs_manual_ocr: Optional[bool] = None,
use_context_separator: bool = True,
**kwargs,
) -> None:
self.instance_parameters = self.get_instance_parameters(locals=locals())
del self.instance_parameters["saved_config"]
del self.instance_parameters["tokenizer_data"]
# temporary fix to disable tokenizer parallelism warning
# (see https://stackoverflow.com/questions/62691279/how-to-disable-tokenizers-parallelism-true-false-warning)
os.environ["TOKENIZERS_PARALLELISM"] = "false"
# do not print transformer warnings as these are confusing in this case
from transformers import logging
logging.set_verbosity_error()
self.tokenizer: PreTrainedTokenizer
self.feature_extractor: Optional[FeatureExtractionMixin]
if tokenizer_data is None:
# load tokenizer and transformer model
self.tokenizer = AutoTokenizer.from_pretrained(model, add_prefix_space=True, **kwargs)
try:
self.feature_extractor = AutoFeatureExtractor.from_pretrained(model, apply_ocr=False)
except OSError:
self.feature_extractor = None
else:
# load tokenizer from inmemory zip-file
self.tokenizer = self._tokenizer_from_bytes(tokenizer_data)
if feature_extractor_data is not None:
self.feature_extractor = self._feature_extractor_from_bytes(feature_extractor_data)
else:
self.feature_extractor = None
def is_supported_t5_model(config: PretrainedConfig) -> bool:
t5_supported_model_types = ["t5", "mt5", "longt5"]
return getattr(config, "model_type", "") in t5_supported_model_types
if saved_config is None:
config = AutoConfig.from_pretrained(model, output_hidden_states=True, **kwargs)
if is_supported_t5_model(config):
from transformers import T5EncoderModel
transformer_model = T5EncoderModel.from_pretrained(model, config=config)
else:
transformer_model = AutoModel.from_pretrained(model, config=config)
else:
if is_supported_t5_model(saved_config):
from transformers import T5EncoderModel
transformer_model = T5EncoderModel(saved_config, **kwargs)
else:
transformer_model = AutoModel.from_config(saved_config, **kwargs)
transformer_model = transformer_model.to(flair.device)
self.truncate = True
self.force_max_length = force_max_length
if self.tokenizer.model_max_length > LARGE_INTEGER:
allow_long_sentences = False
self.truncate = False
self.stride = self.tokenizer.model_max_length // 2 if allow_long_sentences else 0
self.allow_long_sentences = allow_long_sentences
self.use_lang_emb = hasattr(transformer_model, "use_lang_emb") and transformer_model.use_lang_emb
# model name
if name is None:
self.name = "transformer-" + transformer_model.name_or_path
else:
self.name = name
self.base_model_name = transformer_model.name_or_path
self.token_embedding = is_token_embedding
self.document_embedding = is_document_embedding
if self.document_embedding and cls_pooling not in ["cls", "max", "mean"]:
raise ValueError(f"Document Pooling operation `{cls_pooling}` is not defined for TransformerEmbedding")
if self.token_embedding and subtoken_pooling not in ["first", "last", "first_last", "mean"]:
raise ValueError(f"Subtoken Pooling operation `{subtoken_pooling}` is not defined for TransformerEmbedding")
if self.document_embedding and cls_pooling == "cls" and allow_long_sentences:
log.warning(
"Using long sentences for Document embeddings is only beneficial for cls_pooling types 'mean' and 'max "
)
if isinstance(use_context, bool):
self.context_length: int = 64 if use_context else 0
else:
self.context_length = use_context
self.context_dropout = context_dropout
self.respect_document_boundaries = respect_document_boundaries
# embedding parameters
if layers == "all":
# send mini-token through to check how many layers the model has
hidden_states = transformer_model(torch.tensor([1], device=flair.device).unsqueeze(0))[-1]
self.layer_indexes = list(range(len(hidden_states)))
else:
self.layer_indexes = list(map(int, layers.split(",")))
self.cls_pooling = cls_pooling
self.subtoken_pooling = subtoken_pooling
self.layer_mean = layer_mean
self.fine_tune = fine_tune
self.static_embeddings = not self.fine_tune
# return length
self.embedding_length_internal = self._calculate_embedding_length(transformer_model)
if needs_manual_ocr is not None:
self.needs_manual_ocr = needs_manual_ocr
# If we use a context separator, add a new special token
self.use_context_separator = use_context_separator
if use_context_separator:
self.tokenizer.add_special_tokens({"additional_special_tokens": [SENTENCE_BOUNDARY_TAG]})
transformer_model.resize_token_embeddings(len(self.tokenizer))
super().__init__(**self.to_args())
# most models have an initial BOS token, except for XLNet, T5 and GPT2
self.initial_cls_token: bool = self._has_initial_cls_token()
self.model = transformer_model
self.to(flair.device)
# when initializing, embeddings are in eval mode by default
self.eval()
@property
def embedding_length(self) -> int:
if not hasattr(self, "embedding_length_internal"):
self.embedding_length_internal = self._calculate_embedding_length(self.model)
return self.embedding_length_internal
def _has_initial_cls_token(self) -> bool:
# most models have CLS token as last token (GPT-1, GPT-2, TransfoXL, XLNet, XLM), but BERT is initial
if self.tokenizer_needs_ocr_boxes:
# cannot run `.encode` if ocr boxes are required, assume
return True
tokens = self.tokenizer.encode("a")
return tokens[0] == self.tokenizer.cls_token_id
def _calculate_embedding_length(self, model) -> int:
length = len(self.layer_indexes) * model.config.hidden_size if not self.layer_mean else model.config.hidden_size
# in case of doubt: token embedding has higher priority than document embedding
if self.token_embedding and self.subtoken_pooling == "first_last":
length *= 2
if self.document_embedding:
log.warning(
"Token embedding length and Document embedding length vary, due to `first_last` subtoken pooling, this might not be supported"
)
return length
@property
def embedding_type(self) -> str:
# in case of doubt: token embedding has higher priority than document embedding
return "word-level" if self.token_embedding else "sentence-level"
def __setstate__(self, state):
config_state_dict = state.pop("config_state_dict", None)
model_state_dict = state.pop("model_state_dict", None)
# legacy TransformerDocumentEmbedding
state.pop("batch_size", None)
state.pop("embedding_length_internal", None)
# legacy TransformerTokenEmbedding
state.pop("memory_effective_training", None)
if "base_model_name" in state:
state["model"] = state.pop("base_model_name")
state["use_context"] = state.pop("context_length", False)
if "layer_indexes" in state:
layer_indexes = state.pop("layer_indexes")
state["layers"] = ",".join(map(str, layer_indexes))
if "use_context_separator" not in state:
# legacy Flair <= 0.12
state["use_context_separator"] = False
if "use_scalar_mix" in state:
# legacy Flair <= 0.7
state["layer_mean"] = state.pop("use_scalar_mix")
if "is_token_embedding" not in state:
# legacy TransformerTokenEmbedding
state["is_token_embedding"] = "pooling_operation" in state
if "is_document_embedding" not in state:
# Legacy TransformerDocumentEmbedding
state["is_document_embedding"] = "pooling" in state
if "pooling_operation" in state:
# legacy TransformerTokenEmbedding
state["subtoken_pooling"] = state.pop("pooling_operation")
if "pooling" in state:
# legacy TransformerDocumentEmbedding
state["cls_pooling"] = state.pop("pooling")
config = None
if config_state_dict:
# some models like the tars model somehow lost this information.
if config_state_dict.get("_name_or_path") == "None":
config_state_dict["_name_or_path"] = state.get("model", "None")
model_type = config_state_dict.get("model_type", "bert")
config_class = CONFIG_MAPPING[model_type]
config = config_class.from_dict(config_state_dict)
embedding = self.create_from_state(saved_config=config, **state)
# copy values from new embedding
for key in embedding.__dict__:
self.__dict__[key] = embedding.__dict__[key]
if model_state_dict:
self.model.load_state_dict(model_state_dict)
@classmethod
def from_params(cls, params):
params.pop("truncate", None)
params.pop("stride", None)
params.pop("embedding_length", None)
params.pop("use_lang_emb", None)
params["use_context"] = params.pop("context_length", 0)
config_state_dict = params.pop("config_state_dict", None)
config = None
if config_state_dict:
model_type = config_state_dict.get("model_type", "bert")
config_class = CONFIG_MAPPING[model_type]
config = config_class.from_dict(config_state_dict)
return cls.create_from_state(saved_config=config, **params)
def to_params(self):
config_dict = self.model.config.to_dict()
super_params = super().to_params()
# those parameters are only from the super class and will be recreated in the constructor.
del super_params["truncate"]
del super_params["stride"]
del super_params["embedding_length"]
del super_params["use_lang_emb"]
model_state = {
**super_params,
"model": self.base_model_name,
"fine_tune": self.fine_tune,
"layers": ",".join(map(str, self.layer_indexes)),
"layer_mean": self.layer_mean,
"subtoken_pooling": self.subtoken_pooling,
"cls_pooling": self.cls_pooling,
"config_state_dict": config_dict,
}
return model_state
def _can_document_embedding_shortcut(self):
# cls first pooling can be done without recreating sentence hidden states
return (
self.document_embedding
and not self.token_embedding
and self.cls_pooling == "cls"
and self.initial_cls_token
)
def forward(
self,
input_ids: torch.Tensor,
sub_token_lengths: Optional[torch.LongTensor] = None,
token_lengths: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
overflow_to_sample_mapping: Optional[torch.Tensor] = None,
word_ids: Optional[torch.Tensor] = None,
langs: Optional[torch.Tensor] = None,
bbox: Optional[torch.Tensor] = None,
pixel_values: Optional[torch.Tensor] = None,
):
model_kwargs = {}
if langs is not None:
model_kwargs["langs"] = langs
if attention_mask is not None:
model_kwargs["attention_mask"] = attention_mask
if bbox is not None:
model_kwargs["bbox"] = bbox
if pixel_values is not None:
model_kwargs["pixel_values"] = pixel_values
hidden_states = self.model(input_ids, **model_kwargs)[-1]
# make the tuple a tensor; makes working with it easier.
hidden_states = torch.stack(hidden_states)
# for multimodal models like layoutlmv3, we truncate the image embeddings as they are only used via attention
hidden_states = truncate_hidden_states(hidden_states, input_ids)
# only use layers that will be outputted
hidden_states = hidden_states[self.layer_indexes, :, :]
if self.layer_mean:
hidden_states = hidden_states.mean(dim=0)
else:
hidden_states = torch.flatten(hidden_states.permute((0, 3, 1, 2)), 0, 1).permute((1, 2, 0))
if self._can_document_embedding_shortcut():
return {"document_embeddings": hidden_states[:, 0]}
if self.allow_long_sentences:
assert overflow_to_sample_mapping is not None
sentence_hidden_states = combine_strided_tensors(
hidden_states, overflow_to_sample_mapping, self.stride // 2, self.tokenizer.model_max_length, 0
)
if self.tokenizer.is_fast and self.token_embedding:
word_ids = combine_strided_tensors(
word_ids, overflow_to_sample_mapping, self.stride // 2, self.tokenizer.model_max_length, -100
)
else:
sentence_hidden_states = hidden_states
result = {}
if self.document_embedding:
if self.cls_pooling == "cls" and self.initial_cls_token:
document_embeddings = sentence_hidden_states[:, 0]
else:
assert sub_token_lengths is not None
if self.cls_pooling == "cls":
document_embeddings = sentence_hidden_states[
torch.arange(sentence_hidden_states.shape[0]), sub_token_lengths - 1
]
elif self.cls_pooling == "mean":
document_embeddings = document_mean_pooling(sentence_hidden_states, sub_token_lengths)
elif self.cls_pooling == "max":
document_embeddings = document_max_pooling(sentence_hidden_states, sub_token_lengths)
else:
raise ValueError(f"cls pooling method: `{self.cls_pooling}` is not implemented")
result["document_embeddings"] = document_embeddings
if self.token_embedding:
assert word_ids is not None
assert token_lengths is not None
all_token_embeddings = torch.zeros( # type: ignore[call-overload]
word_ids.shape[0], token_lengths.max(), self.embedding_length_internal, device=flair.device
)
true_tensor = torch.ones_like(word_ids[:, :1], dtype=torch.bool)
if self.subtoken_pooling == "first":
gain_mask = word_ids[:, 1:] != word_ids[:, : word_ids.shape[1] - 1]
first_mask = torch.cat([true_tensor, gain_mask], dim=1)
all_token_embeddings = fill_masked_elements(
all_token_embeddings, sentence_hidden_states, first_mask, word_ids, token_lengths
)
elif self.subtoken_pooling == "last":
gain_mask = word_ids[:, 1:] != word_ids[:, : word_ids.shape[1] - 1]
last_mask = torch.cat([gain_mask, true_tensor], dim=1)
all_token_embeddings = fill_masked_elements(
all_token_embeddings, sentence_hidden_states, last_mask, word_ids, token_lengths
)
elif self.subtoken_pooling == "first_last":
gain_mask = word_ids[:, 1:] != word_ids[:, : word_ids.shape[1] - 1]
first_mask = torch.cat([true_tensor, gain_mask], dim=1)
last_mask = torch.cat([gain_mask, true_tensor], dim=1)
all_token_embeddings[:, :, : sentence_hidden_states.shape[2]] = fill_masked_elements(
all_token_embeddings[:, :, : sentence_hidden_states.shape[2]],
sentence_hidden_states,
first_mask,
word_ids,
token_lengths,
)
all_token_embeddings[:, :, sentence_hidden_states.shape[2] :] = fill_masked_elements(
all_token_embeddings[:, :, sentence_hidden_states.shape[2] :],
sentence_hidden_states,
last_mask,
word_ids,
token_lengths,
)
elif self.subtoken_pooling == "mean":
all_token_embeddings = fill_mean_token_embeddings(
all_token_embeddings, sentence_hidden_states, word_ids, token_lengths
)
else:
raise ValueError(f"subtoken pooling method: `{self.subtoken_pooling}` is not implemented")
result["token_embeddings"] = all_token_embeddings
return result
def _forward_tensors(self, tensors) -> Dict[str, torch.Tensor]:
return self.forward(**tensors)
def export_onnx(
self, path: Union[str, Path], example_sentences: List[Sentence], **kwargs
) -> TransformerOnnxEmbeddings:
"""Export TransformerEmbeddings to OnnxFormat.
:param example_sentences: a list of sentences that will be used for tracing. It is recommended to take 2-4
sentences with some variation.
"""
return self.onnx_cls.export_from_embedding(path, self, example_sentences, **kwargs)
| 58,455 | 41.606414 | 146 | py |
flair | flair-master/flair/embeddings/token.py | import hashlib
import logging
import os
import re
import tempfile
from collections import Counter
from pathlib import Path
from typing import Any, Dict, List, Optional, Union
import gensim
import numpy as np
import torch
from bpemb import BPEmb
from gensim.models import KeyedVectors
from gensim.models.fasttext import FastTextKeyedVectors, load_facebook_vectors
from torch import nn
import flair
from flair.data import Corpus, Dictionary, Sentence, _iter_dataset
from flair.embeddings.base import TokenEmbeddings, load_embeddings, register_embeddings
from flair.embeddings.transformer import (
TransformerEmbeddings,
TransformerOnnxWordEmbeddings,
)
from flair.file_utils import cached_path, extract_single_zip_file, instance_lru_cache
log = logging.getLogger("flair")
@register_embeddings
class TransformerWordEmbeddings(TokenEmbeddings, TransformerEmbeddings):
onnx_cls = TransformerOnnxWordEmbeddings
def __init__(
self,
model: str = "bert-base-uncased",
is_document_embedding: bool = False,
allow_long_sentences: bool = True,
**kwargs,
) -> None:
"""Bidirectional transformer embeddings of words from various transformer architectures.
:param model: name of transformer model (see https://huggingface.co/transformers/pretrained_models.html for
options)
:param layers: string indicating which layers to take for embedding (-1 is topmost layer)
:param subtoken_pooling: how to get from token piece embeddings to token embedding. Either take the first
subtoken ('first'), the last subtoken ('last'), both first and last ('first_last') or a mean over all ('mean')
:param layer_mean: If True, uses a scalar mix of layers as embedding
:param fine_tune: If True, allows transformers to be fine-tuned during training
"""
TransformerEmbeddings.__init__(
self,
model=model,
is_token_embedding=True,
is_document_embedding=is_document_embedding,
allow_long_sentences=allow_long_sentences,
**kwargs,
)
@classmethod
def create_from_state(cls, **state):
# this parameter is fixed
del state["is_token_embedding"]
return cls(**state)
@register_embeddings
class StackedEmbeddings(TokenEmbeddings):
"""A stack of embeddings, used if you need to combine several different embedding types."""
def __init__(self, embeddings: List[TokenEmbeddings], overwrite_names: bool = True) -> None:
"""The constructor takes a list of embeddings to be combined."""
super().__init__()
self.embeddings = embeddings
# IMPORTANT: add embeddings as torch modules
for i, embedding in enumerate(embeddings):
if overwrite_names:
embedding.name = f"{i!s}-{embedding.name}"
self.add_module(f"list_embedding_{i!s}", embedding)
self.name: str = "Stack"
self.__names = [name for embedding in self.embeddings for name in embedding.get_names()]
self.static_embeddings: bool = True
self.__embedding_type: str = embeddings[0].embedding_type
self.__embedding_length: int = 0
for embedding in embeddings:
self.__embedding_length += embedding.embedding_length
self.eval()
def embed(self, sentences: Union[Sentence, List[Sentence]], static_embeddings: bool = True):
# if only one sentence is passed, convert to list of sentence
if type(sentences) is Sentence:
sentences = [sentences]
for embedding in self.embeddings:
embedding.embed(sentences)
@property
def embedding_type(self) -> str:
return self.__embedding_type
@property
def embedding_length(self) -> int:
return self.__embedding_length
def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:
for embedding in self.embeddings:
embedding._add_embeddings_internal(sentences)
return sentences
def __str__(self) -> str:
return f'StackedEmbeddings [{",".join([str(e) for e in self.embeddings])}]'
def get_names(self) -> List[str]:
"""Returns a list of embedding names.
In most cases, it is just a list with one item, namely the name of this embedding. But in some cases, the
embedding is made up by different embeddings (StackedEmbedding).
Then, the list contains the names of all embeddings in the stack.
"""
# make compatible with serialized models
if "__names" not in self.__dict__:
self.__names = [name for embedding in self.embeddings for name in embedding.get_names()]
return self.__names
def get_named_embeddings_dict(self) -> Dict:
named_embeddings_dict = {}
for embedding in self.embeddings:
named_embeddings_dict.update(embedding.get_named_embeddings_dict())
return named_embeddings_dict
@classmethod
def from_params(cls, params):
embeddings = [load_embeddings(p) for p in params["embeddings"]]
return cls(embeddings=embeddings, overwrite_names=False)
def to_params(self):
return {"embeddings": [emb.save_embeddings(use_state_dict=False) for emb in self.embeddings]}
@register_embeddings
class WordEmbeddings(TokenEmbeddings):
"""Standard static word embeddings, such as GloVe or FastText."""
def __init__(
self,
embeddings: Optional[str],
field: Optional[str] = None,
fine_tune: bool = False,
force_cpu: bool = True,
stable: bool = False,
no_header: bool = False,
vocab: Optional[Dict[str, int]] = None,
embedding_length: Optional[int] = None,
name: Optional[str] = None,
) -> None:
"""Initializes classic word embeddings.
Constructor downloads required files if not there.
:param embeddings: one of: 'glove', 'extvec', 'crawl' or two-letter language code or custom
If you want to use a custom embedding file, just pass the path to the embeddings as embeddings variable.
set stable=True to use the stable embeddings as described in https://arxiv.org/abs/2110.02861
"""
self.instance_parameters = self.get_instance_parameters(locals=locals())
if fine_tune and force_cpu and flair.device.type != "cpu":
raise ValueError("Cannot train WordEmbeddings on cpu if the model is trained on gpu, set force_cpu=False")
embeddings_path = self.resolve_precomputed_path(embeddings)
if name is None:
name = str(embeddings_path)
self.name = name
self.embeddings = embeddings if embeddings is not None else name
self.static_embeddings = not fine_tune
self.fine_tune = fine_tune
self.force_cpu = force_cpu
self.field = field
self.stable = stable
super().__init__()
if embeddings_path is not None:
if embeddings_path.suffix in [".bin", ".txt"]:
precomputed_word_embeddings = gensim.models.KeyedVectors.load_word2vec_format(
str(embeddings_path), binary=embeddings_path.suffix == ".bin", no_header=no_header
)
else:
precomputed_word_embeddings = gensim.models.KeyedVectors.load(str(embeddings_path))
self.__embedding_length: int = precomputed_word_embeddings.vector_size
vectors = np.row_stack(
(
precomputed_word_embeddings.vectors,
np.zeros(self.__embedding_length, dtype="float"),
)
)
try:
# gensim version 4
self.vocab = precomputed_word_embeddings.key_to_index
except AttributeError:
# gensim version 3
self.vocab = {k: v.index for k, v in precomputed_word_embeddings.vocab.items()}
else:
# if no embedding is set, the vocab and embedding length is requried
assert vocab is not None
assert embedding_length is not None
self.vocab = vocab
self.__embedding_length = embedding_length
vectors = np.zeros((len(self.vocab) + 1, self.__embedding_length), dtype="float")
self.embedding = nn.Embedding.from_pretrained(torch.FloatTensor(vectors), freeze=not fine_tune)
if stable:
self.layer_norm: Optional[nn.LayerNorm] = nn.LayerNorm(
self.__embedding_length, elementwise_affine=fine_tune
)
else:
self.layer_norm = None
self.device = None
self.to(flair.device)
self.eval()
def resolve_precomputed_path(self, embeddings: Optional[str]) -> Optional[Path]:
if embeddings is None:
return None
hu_path: str = "https://flair.informatik.hu-berlin.de/resources/embeddings/token"
cache_dir = Path("embeddings")
# GLOVE embeddings
if embeddings.lower() == "glove" or embeddings.lower() == "en-glove":
cached_path(f"{hu_path}/glove.gensim.vectors.npy", cache_dir=cache_dir)
return cached_path(f"{hu_path}/glove.gensim", cache_dir=cache_dir)
# TURIAN embeddings
elif embeddings.lower() == "turian" or embeddings.lower() == "en-turian":
cached_path(f"{hu_path}/turian.vectors.npy", cache_dir=cache_dir)
return cached_path(f"{hu_path}/turian", cache_dir=cache_dir)
# KOMNINOS embeddings
elif embeddings.lower() == "extvec" or embeddings.lower() == "en-extvec":
cached_path(f"{hu_path}/extvec.gensim.vectors.npy", cache_dir=cache_dir)
return cached_path(f"{hu_path}/extvec.gensim", cache_dir=cache_dir)
# pubmed embeddings
elif embeddings.lower() == "pubmed" or embeddings.lower() == "en-pubmed":
cached_path(
f"{hu_path}/pubmed_pmc_wiki_sg_1M.gensim.vectors.npy",
cache_dir=cache_dir,
)
return cached_path(f"{hu_path}/pubmed_pmc_wiki_sg_1M.gensim", cache_dir=cache_dir)
# FT-CRAWL embeddings
elif embeddings.lower() == "crawl" or embeddings.lower() == "en-crawl":
cached_path(f"{hu_path}/en-fasttext-crawl-300d-1M.vectors.npy", cache_dir=cache_dir)
return cached_path(f"{hu_path}/en-fasttext-crawl-300d-1M", cache_dir=cache_dir)
# FT-CRAWL embeddings
elif embeddings.lower() in ["news", "en-news", "en"]:
cached_path(f"{hu_path}/en-fasttext-news-300d-1M.vectors.npy", cache_dir=cache_dir)
return cached_path(f"{hu_path}/en-fasttext-news-300d-1M", cache_dir=cache_dir)
# twitter embeddings
elif embeddings.lower() in ["twitter", "en-twitter"]:
cached_path(f"{hu_path}/twitter.gensim.vectors.npy", cache_dir=cache_dir)
return cached_path(f"{hu_path}/twitter.gensim", cache_dir=cache_dir)
# two-letter language code wiki embeddings
elif len(embeddings.lower()) == 2:
cached_path(
f"{hu_path}/{embeddings}-wiki-fasttext-300d-1M.vectors.npy",
cache_dir=cache_dir,
)
return cached_path(f"{hu_path}/{embeddings}-wiki-fasttext-300d-1M", cache_dir=cache_dir)
# two-letter language code wiki embeddings
elif len(embeddings.lower()) == 7 and embeddings.endswith("-wiki"):
cached_path(
f"{hu_path}/{embeddings[:2]}-wiki-fasttext-300d-1M.vectors.npy",
cache_dir=cache_dir,
)
return cached_path(f"{hu_path}/{embeddings[:2]}-wiki-fasttext-300d-1M", cache_dir=cache_dir)
# two-letter language code crawl embeddings
elif len(embeddings.lower()) == 8 and embeddings.endswith("-crawl"):
cached_path(
f"{hu_path}/{embeddings[:2]}-crawl-fasttext-300d-1M.vectors.npy",
cache_dir=cache_dir,
)
return cached_path(
f"{hu_path}/{embeddings[:2]}-crawl-fasttext-300d-1M",
cache_dir=cache_dir,
)
elif not Path(embeddings).exists():
raise ValueError(f'The given embeddings "{embeddings}" is not available or is not a valid path.')
else:
return Path(embeddings)
@property
def embedding_length(self) -> int:
return self.__embedding_length
@instance_lru_cache(maxsize=100000, typed=False)
def get_cached_token_index(self, word: str) -> int:
if word in self.vocab:
return self.vocab[word]
elif word.lower() in self.vocab:
return self.vocab[word.lower()]
elif re.sub(r"\d", "#", word.lower()) in self.vocab:
return self.vocab[re.sub(r"\d", "#", word.lower())]
elif re.sub(r"\d", "0", word.lower()) in self.vocab:
return self.vocab[re.sub(r"\d", "0", word.lower())]
else:
return len(self.vocab) # <unk> token
def get_vec(self, word: str) -> torch.Tensor:
word_embedding = self.vectors[self.get_cached_token_index(word)]
word_embedding = torch.tensor(word_embedding.tolist(), device=flair.device, dtype=torch.float)
return word_embedding
def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:
tokens = [token for sentence in sentences for token in sentence.tokens]
word_indices: List[int] = []
for token in tokens:
word = token.text if self.field is None else token.get_label(self.field).value
word_indices.append(self.get_cached_token_index(word))
embeddings = self.embedding(torch.tensor(word_indices, dtype=torch.long, device=self.device))
if self.layer_norm is not None:
embeddings = self.layer_norm(embeddings)
if self.force_cpu:
embeddings = embeddings.to(flair.device)
for emb, token in zip(embeddings, tokens):
token.set_embedding(self.name, emb)
return sentences
def __str__(self) -> str:
return self.name
def extra_repr(self):
return f"'{self.embeddings}'"
def train(self, mode=True):
super().train(self.fine_tune and mode)
def to(self, device):
if self.force_cpu:
device = torch.device("cpu")
self.device = device
super().to(device)
def _apply(self, fn):
if fn.__name__ == "convert" and self.force_cpu:
# this is required to force the module on the cpu,
# if a parent module is put to gpu, the _apply is called to each sub_module
# self.to(..) actually sets the device properly
if not hasattr(self, "device"):
self.to(flair.device)
return
super()._apply(fn)
def __getattribute__(self, item):
# this ignores the get_cached_vec method when loading older versions
# it is needed for compatibility reasons
if item == "get_cached_vec":
return None
return super().__getattribute__(item)
def __setstate__(self, state: Dict[str, Any]):
state.pop("get_cached_vec", None)
state.setdefault("embeddings", state["name"])
state.setdefault("force_cpu", True)
state.setdefault("fine_tune", False)
state.setdefault("field", None)
if "precomputed_word_embeddings" in state:
precomputed_word_embeddings: KeyedVectors = state.pop("precomputed_word_embeddings")
vectors = np.row_stack(
(
precomputed_word_embeddings.vectors,
np.zeros(precomputed_word_embeddings.vector_size, dtype="float"),
)
)
embedding = nn.Embedding.from_pretrained(torch.FloatTensor(vectors), freeze=not state["fine_tune"])
try:
# gensim version 4
vocab = precomputed_word_embeddings.key_to_index
except AttributeError:
# gensim version 3
vocab = {k: v.index for k, v in precomputed_word_embeddings.__dict__["vocab"].items()}
state["embedding"] = embedding
state["vocab"] = vocab
if "stable" not in state:
state["stable"] = False
state["layer_norm"] = None
super().__setstate__(state)
@classmethod
def from_params(cls, params: Dict[str, Any]) -> "WordEmbeddings":
return cls(embeddings=None, **params)
def to_params(self) -> Dict[str, Any]:
return {
"vocab": self.vocab,
"stable": self.stable,
"fine_tune": self.fine_tune,
"force_cpu": self.force_cpu,
"field": self.field,
"name": self.name,
"embedding_length": self.__embedding_length,
}
def state_dict(self, *args, destination=None, prefix="", keep_vars=False):
# when loading the old versions from pickle, the embeddings might not be added as pytorch module.
# we do this delayed, when the weights are collected (e.g. for saving), as doing this earlier might
# lead to issues while loading (trying to load weights that weren't stored as python weights and therefore
# not finding them)
if list(self.modules()) == [self]:
self.embedding = self.embedding
return super().state_dict(*args, destination=destination, prefix=prefix, keep_vars=keep_vars)
@register_embeddings
class CharacterEmbeddings(TokenEmbeddings):
"""Character embeddings of words, as proposed in Lample et al., 2016."""
def __init__(
self,
path_to_char_dict: Optional[Union[str, Dictionary]] = None,
char_embedding_dim: int = 25,
hidden_size_char: int = 25,
) -> None:
"""Instantiates a bidirectional lstm layer toi encode words by their character representation.
Uses the default character dictionary if none provided.
"""
super().__init__()
self.name = "Char"
self.static_embeddings = False
self.instance_parameters = self.get_instance_parameters(locals=locals())
# use list of common characters if none provided
if path_to_char_dict is None:
self.char_dictionary: Dictionary = Dictionary.load("common-chars")
elif isinstance(path_to_char_dict, Dictionary):
self.char_dictionary = path_to_char_dict
else:
self.char_dictionary = Dictionary.load_from_file(path_to_char_dict)
self.char_embedding_dim: int = char_embedding_dim
self.hidden_size_char: int = hidden_size_char
self.char_embedding = torch.nn.Embedding(len(self.char_dictionary.item2idx), self.char_embedding_dim)
self.char_rnn = torch.nn.LSTM(
self.char_embedding_dim,
self.hidden_size_char,
num_layers=1,
bidirectional=True,
)
self.__embedding_length = self.hidden_size_char * 2
self.to(flair.device)
self.eval()
@property
def embedding_length(self) -> int:
return self.__embedding_length
def _add_embeddings_internal(self, sentences: List[Sentence]):
for sentence in sentences:
tokens_char_indices = []
# translate words in sentence into ints using dictionary
for token in sentence.tokens:
char_indices = [self.char_dictionary.get_idx_for_item(char) for char in token.text]
tokens_char_indices.append(char_indices)
# sort words by length, for batching and masking
tokens_sorted_by_length = sorted(tokens_char_indices, key=lambda p: len(p), reverse=True)
d = {}
for i, ci in enumerate(tokens_char_indices):
for j, cj in enumerate(tokens_sorted_by_length):
if ci == cj:
d[j] = i
continue
chars2_length = [len(c) for c in tokens_sorted_by_length]
longest_token_in_sentence = max(chars2_length)
tokens_mask = torch.zeros(
(len(tokens_sorted_by_length), longest_token_in_sentence),
dtype=torch.long,
device=flair.device,
)
for i, c in enumerate(tokens_sorted_by_length):
tokens_mask[i, : chars2_length[i]] = torch.tensor(c, dtype=torch.long, device=flair.device)
# chars for rnn processing
chars = tokens_mask
character_embeddings = self.char_embedding(chars).transpose(0, 1)
packed = torch.nn.utils.rnn.pack_padded_sequence(character_embeddings, chars2_length) # type: ignore[arg-type]
lstm_out, self.hidden = self.char_rnn(packed)
outputs, output_lengths = torch.nn.utils.rnn.pad_packed_sequence(lstm_out)
outputs = outputs.transpose(0, 1)
chars_embeds_temp = torch.zeros(
(outputs.size(0), outputs.size(2)),
dtype=torch.float,
device=flair.device,
)
for i, index in enumerate(output_lengths):
chars_embeds_temp[i] = outputs[i, index - 1]
character_embeddings = chars_embeds_temp.clone()
for i in range(character_embeddings.size(0)):
character_embeddings[d[i]] = chars_embeds_temp[i]
for token_number, token in enumerate(sentence.tokens):
token.set_embedding(self.name, character_embeddings[token_number])
def __str__(self) -> str:
return self.name
@classmethod
def from_params(cls, params: Dict[str, Any]) -> "CharacterEmbeddings":
return cls(**params)
def to_params(self) -> Dict[str, Any]:
return {
"path_to_char_dict": self.char_dictionary,
"char_embedding_dim": self.char_embedding_dim,
"hidden_size_char": self.hidden_size_char,
}
@register_embeddings
class FlairEmbeddings(TokenEmbeddings):
"""Contextual string embeddings of words, as proposed in Akbik et al., 2018."""
def __init__(
self,
model,
fine_tune: bool = False,
chars_per_chunk: int = 512,
with_whitespace: bool = True,
tokenized_lm: bool = True,
is_lower: bool = False,
name: Optional[str] = None,
has_decoder: bool = False,
) -> None:
"""Initializes contextual string embeddings using a character-level language model.
:param model: model string, one of 'news-forward', 'news-backward', 'news-forward-fast', 'news-backward-fast',
'mix-forward', 'mix-backward', 'german-forward', 'german-backward', 'polish-backward', 'polish-forward',
etc (see https://github.com/flairNLP/flair/blob/master/resources/docs/embeddings/FLAIR_EMBEDDINGS.md)
depending on which character language model is desired.
:param fine_tune: if set to True, the gradient will propagate into the language model. This dramatically slows
down training and often leads to overfitting, so use with caution.
:param chars_per_chunk: max number of chars per rnn pass to control speed/memory tradeoff. Higher means faster
but requires more memory. Lower means slower but less memory.
:param with_whitespace: If True, use hidden state after whitespace after word. If False, use hidden
state at last character of word.
:param tokenized_lm: Whether this lm is tokenized. Default is True, but for LMs trained over unprocessed text
False might be better.
"""
super().__init__()
self.instance_parameters = self.get_instance_parameters(locals=locals())
cache_dir = Path("embeddings")
hu_path: str = "https://flair.informatik.hu-berlin.de/resources/embeddings/flair"
clef_hipe_path: str = "https://files.ifi.uzh.ch/cl/siclemat/impresso/clef-hipe-2020/flair"
am_path: str = "http://ltdata1.informatik.uni-hamburg.de/amharic/models/flair/"
self.is_lower: bool = is_lower
self.PRETRAINED_MODEL_ARCHIVE_MAP = {
# multilingual models
"multi-forward": f"{hu_path}/lm-jw300-forward-v0.1.pt",
"multi-backward": f"{hu_path}/lm-jw300-backward-v0.1.pt",
"multi-v0-forward": f"{hu_path}/lm-multi-forward-v0.1.pt",
"multi-v0-backward": f"{hu_path}/lm-multi-backward-v0.1.pt",
"multi-forward-fast": f"{hu_path}/lm-multi-forward-fast-v0.1.pt",
"multi-backward-fast": f"{hu_path}/lm-multi-backward-fast-v0.1.pt",
# English models
"en-forward": f"{hu_path}/news-forward-0.4.1.pt",
"en-backward": f"{hu_path}/news-backward-0.4.1.pt",
"en-forward-fast": f"{hu_path}/lm-news-english-forward-1024-v0.2rc.pt",
"en-backward-fast": f"{hu_path}/lm-news-english-backward-1024-v0.2rc.pt",
"news-forward": f"{hu_path}/news-forward-0.4.1.pt",
"news-backward": f"{hu_path}/news-backward-0.4.1.pt",
"news-forward-fast": f"{hu_path}/lm-news-english-forward-1024-v0.2rc.pt",
"news-backward-fast": f"{hu_path}/lm-news-english-backward-1024-v0.2rc.pt",
"mix-forward": f"{hu_path}/lm-mix-english-forward-v0.2rc.pt",
"mix-backward": f"{hu_path}/lm-mix-english-backward-v0.2rc.pt",
# Arabic
"ar-forward": f"{hu_path}/lm-ar-opus-large-forward-v0.1.pt",
"ar-backward": f"{hu_path}/lm-ar-opus-large-backward-v0.1.pt",
# Bulgarian
"bg-forward-fast": f"{hu_path}/lm-bg-small-forward-v0.1.pt",
"bg-backward-fast": f"{hu_path}/lm-bg-small-backward-v0.1.pt",
"bg-forward": f"{hu_path}/lm-bg-opus-large-forward-v0.1.pt",
"bg-backward": f"{hu_path}/lm-bg-opus-large-backward-v0.1.pt",
# Czech
"cs-forward": f"{hu_path}/lm-cs-opus-large-forward-v0.1.pt",
"cs-backward": f"{hu_path}/lm-cs-opus-large-backward-v0.1.pt",
"cs-v0-forward": f"{hu_path}/lm-cs-large-forward-v0.1.pt",
"cs-v0-backward": f"{hu_path}/lm-cs-large-backward-v0.1.pt",
# Danish
"da-forward": f"{hu_path}/lm-da-opus-large-forward-v0.1.pt",
"da-backward": f"{hu_path}/lm-da-opus-large-backward-v0.1.pt",
# German
"de-forward": f"{hu_path}/lm-mix-german-forward-v0.2rc.pt",
"de-backward": f"{hu_path}/lm-mix-german-backward-v0.2rc.pt",
"de-historic-ha-forward": f"{hu_path}/lm-historic-hamburger-anzeiger-forward-v0.1.pt",
"de-historic-ha-backward": f"{hu_path}/lm-historic-hamburger-anzeiger-backward-v0.1.pt",
"de-historic-wz-forward": f"{hu_path}/lm-historic-wiener-zeitung-forward-v0.1.pt",
"de-historic-wz-backward": f"{hu_path}/lm-historic-wiener-zeitung-backward-v0.1.pt",
"de-historic-rw-forward": f"{hu_path}/redewiedergabe_lm_forward.pt",
"de-historic-rw-backward": f"{hu_path}/redewiedergabe_lm_backward.pt",
# Spanish
"es-forward": f"{hu_path}/lm-es-forward.pt",
"es-backward": f"{hu_path}/lm-es-backward.pt",
"es-forward-fast": f"{hu_path}/lm-es-forward-fast.pt",
"es-backward-fast": f"{hu_path}/lm-es-backward-fast.pt",
# Basque
"eu-forward": f"{hu_path}/lm-eu-opus-large-forward-v0.2.pt",
"eu-backward": f"{hu_path}/lm-eu-opus-large-backward-v0.2.pt",
"eu-v1-forward": f"{hu_path}/lm-eu-opus-large-forward-v0.1.pt",
"eu-v1-backward": f"{hu_path}/lm-eu-opus-large-backward-v0.1.pt",
"eu-v0-forward": f"{hu_path}/lm-eu-large-forward-v0.1.pt",
"eu-v0-backward": f"{hu_path}/lm-eu-large-backward-v0.1.pt",
# Persian
"fa-forward": f"{hu_path}/lm-fa-opus-large-forward-v0.1.pt",
"fa-backward": f"{hu_path}/lm-fa-opus-large-backward-v0.1.pt",
# Finnish
"fi-forward": f"{hu_path}/lm-fi-opus-large-forward-v0.1.pt",
"fi-backward": f"{hu_path}/lm-fi-opus-large-backward-v0.1.pt",
# French
"fr-forward": f"{hu_path}/lm-fr-charlm-forward.pt",
"fr-backward": f"{hu_path}/lm-fr-charlm-backward.pt",
# Hebrew
"he-forward": f"{hu_path}/lm-he-opus-large-forward-v0.1.pt",
"he-backward": f"{hu_path}/lm-he-opus-large-backward-v0.1.pt",
# Hindi
"hi-forward": f"{hu_path}/lm-hi-opus-large-forward-v0.1.pt",
"hi-backward": f"{hu_path}/lm-hi-opus-large-backward-v0.1.pt",
# Croatian
"hr-forward": f"{hu_path}/lm-hr-opus-large-forward-v0.1.pt",
"hr-backward": f"{hu_path}/lm-hr-opus-large-backward-v0.1.pt",
# Indonesian
"id-forward": f"{hu_path}/lm-id-opus-large-forward-v0.1.pt",
"id-backward": f"{hu_path}/lm-id-opus-large-backward-v0.1.pt",
# Italian
"it-forward": f"{hu_path}/lm-it-opus-large-forward-v0.1.pt",
"it-backward": f"{hu_path}/lm-it-opus-large-backward-v0.1.pt",
# Japanese
"ja-forward": f"{hu_path}/japanese-forward.pt",
"ja-backward": f"{hu_path}/japanese-backward.pt",
# Malayalam
"ml-forward": "https://raw.githubusercontent.com/qburst/models-repository/master/FlairMalayalamModels/ml-forward.pt",
"ml-backward": "https://raw.githubusercontent.com/qburst/models-repository/master/FlairMalayalamModels/ml-backward.pt",
# Dutch
"nl-forward": f"{hu_path}/lm-nl-opus-large-forward-v0.1.pt",
"nl-backward": f"{hu_path}/lm-nl-opus-large-backward-v0.1.pt",
"nl-v0-forward": f"{hu_path}/lm-nl-large-forward-v0.1.pt",
"nl-v0-backward": f"{hu_path}/lm-nl-large-backward-v0.1.pt",
# Norwegian
"no-forward": f"{hu_path}/lm-no-opus-large-forward-v0.1.pt",
"no-backward": f"{hu_path}/lm-no-opus-large-backward-v0.1.pt",
# Polish
"pl-forward": f"{hu_path}/lm-polish-forward-v0.2.pt",
"pl-backward": f"{hu_path}/lm-polish-backward-v0.2.pt",
"pl-opus-forward": f"{hu_path}/lm-pl-opus-large-forward-v0.1.pt",
"pl-opus-backward": f"{hu_path}/lm-pl-opus-large-backward-v0.1.pt",
# Portuguese
"pt-forward": f"{hu_path}/lm-pt-forward.pt",
"pt-backward": f"{hu_path}/lm-pt-backward.pt",
# Pubmed
"pubmed-forward": f"{hu_path}/pubmed-forward.pt",
"pubmed-backward": f"{hu_path}/pubmed-backward.pt",
"pubmed-2015-forward": f"{hu_path}/pubmed-2015-fw-lm.pt",
"pubmed-2015-backward": f"{hu_path}/pubmed-2015-bw-lm.pt",
# Slovenian
"sl-forward": f"{hu_path}/lm-sl-opus-large-forward-v0.1.pt",
"sl-backward": f"{hu_path}/lm-sl-opus-large-backward-v0.1.pt",
"sl-v0-forward": f"{hu_path}/lm-sl-large-forward-v0.1.pt",
"sl-v0-backward": f"{hu_path}/lm-sl-large-backward-v0.1.pt",
# Swedish
"sv-forward": f"{hu_path}/lm-sv-opus-large-forward-v0.1.pt",
"sv-backward": f"{hu_path}/lm-sv-opus-large-backward-v0.1.pt",
"sv-v0-forward": f"{hu_path}/lm-sv-large-forward-v0.1.pt",
"sv-v0-backward": f"{hu_path}/lm-sv-large-backward-v0.1.pt",
# Tamil
"ta-forward": f"{hu_path}/lm-ta-opus-large-forward-v0.1.pt",
"ta-backward": f"{hu_path}/lm-ta-opus-large-backward-v0.1.pt",
# Spanish clinical
"es-clinical-forward": f"{hu_path}/es-clinical-forward.pt",
"es-clinical-backward": f"{hu_path}/es-clinical-backward.pt",
# CLEF HIPE Shared task
"de-impresso-hipe-v1-forward": f"{clef_hipe_path}/de-hipe-flair-v1-forward/best-lm.pt",
"de-impresso-hipe-v1-backward": f"{clef_hipe_path}/de-hipe-flair-v1-backward/best-lm.pt",
"en-impresso-hipe-v1-forward": f"{clef_hipe_path}/en-flair-v1-forward/best-lm.pt",
"en-impresso-hipe-v1-backward": f"{clef_hipe_path}/en-flair-v1-backward/best-lm.pt",
"fr-impresso-hipe-v1-forward": f"{clef_hipe_path}/fr-hipe-flair-v1-forward/best-lm.pt",
"fr-impresso-hipe-v1-backward": f"{clef_hipe_path}/fr-hipe-flair-v1-backward/best-lm.pt",
# Amharic
"am-forward": f"{am_path}/best-lm.pt",
# Ukrainian
"uk-forward": "https://huggingface.co/dchaplinsky/flair-uk-forward/resolve/main/best-lm.pt",
"uk-backward": "https://huggingface.co/dchaplinsky/flair-uk-backward/resolve/main/best-lm.pt",
}
if isinstance(model, str):
# load model if in pretrained model map
if model.lower() in self.PRETRAINED_MODEL_ARCHIVE_MAP:
base_path = self.PRETRAINED_MODEL_ARCHIVE_MAP[model.lower()]
# Fix for CLEF HIPE models (avoid overwriting best-lm.pt in cache_dir)
if "impresso-hipe" in model.lower():
cache_dir = cache_dir / model.lower()
# CLEF HIPE models are lowercased
self.is_lower = True
model = cached_path(base_path, cache_dir=cache_dir)
elif replace_with_language_code(model) in self.PRETRAINED_MODEL_ARCHIVE_MAP:
base_path = self.PRETRAINED_MODEL_ARCHIVE_MAP[replace_with_language_code(model)]
model = cached_path(base_path, cache_dir=cache_dir)
elif not Path(model).exists():
raise ValueError(f'The given model "{model}" is not available or is not a valid path.')
from flair.models import LanguageModel
if isinstance(model, LanguageModel):
self.lm: LanguageModel = model
self.name = f"Task-LSTM-{self.lm.hidden_size}-{self.lm.nlayers}-{self.lm.is_forward_lm}"
else:
self.lm = LanguageModel.load_language_model(model, has_decoder=has_decoder)
self.name = str(model)
if name is not None:
self.name = name
# embeddings are static if we don't do finetuning
self.fine_tune = fine_tune
self.static_embeddings = not fine_tune
self.is_forward_lm: bool = self.lm.is_forward_lm
self.with_whitespace: bool = with_whitespace
self.tokenized_lm: bool = tokenized_lm
self.chars_per_chunk: int = chars_per_chunk
# embed a dummy sentence to determine embedding_length
dummy_sentence: Sentence = Sentence("hello")
embedded_dummy = self.embed(dummy_sentence)
self.__embedding_length: int = len(embedded_dummy[0][0].get_embedding())
# set to eval mode
self.eval()
def train(self, mode=True):
# unless fine-tuning is set, do not set language model to train() in order to disallow language model dropout
super().train(self.fine_tune and mode)
@property
def embedding_length(self) -> int:
return self.__embedding_length
def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:
# gradients are enable if fine-tuning is enabled
gradient_context = torch.enable_grad() if self.fine_tune else torch.no_grad()
with gradient_context:
# if this is not possible, use LM to generate embedding. First, get text sentences
text_sentences = (
[sentence.to_tokenized_string() for sentence in sentences]
if self.tokenized_lm
else [sentence.to_plain_string() for sentence in sentences]
)
if self.is_lower:
text_sentences = [sentence.lower() for sentence in text_sentences]
start_marker = self.lm.document_delimiter if "document_delimiter" in self.lm.__dict__ else "\n"
end_marker = " "
# get hidden states from language model
all_hidden_states_in_lm = self.lm.get_representation(
text_sentences, start_marker, end_marker, self.chars_per_chunk
)
if not self.fine_tune:
all_hidden_states_in_lm = all_hidden_states_in_lm.detach()
# take first or last hidden states from language model as word representation
for i, sentence in enumerate(sentences):
sentence_text = sentence.to_tokenized_string() if self.tokenized_lm else sentence.to_plain_string()
offset_forward: int = len(start_marker)
offset_backward: int = len(sentence_text) + len(start_marker)
for token in sentence.tokens:
offset_forward += len(token.text)
if self.is_forward_lm:
offset_with_whitespace = offset_forward
offset_without_whitespace = offset_forward - 1
else:
offset_with_whitespace = offset_backward
offset_without_whitespace = offset_backward - 1
# offset mode that extracts at whitespace after last character
if self.with_whitespace:
embedding = all_hidden_states_in_lm[offset_with_whitespace, i, :]
# offset mode that extracts at last character
else:
embedding = all_hidden_states_in_lm[offset_without_whitespace, i, :]
if self.tokenized_lm or token.whitespace_after > 0:
offset_forward += 1
offset_backward -= 1
offset_backward -= len(token.text)
token.set_embedding(self.name, embedding)
del all_hidden_states_in_lm
return sentences
def __str__(self) -> str:
return self.name
def to_params(self):
return {
"fine_tune": self.fine_tune,
"chars_per_chunk": self.chars_per_chunk,
"is_lower": self.is_lower,
"tokenized_lm": self.tokenized_lm,
"model_params": {
"dictionary": self.lm.dictionary,
"is_forward_lm": self.lm.is_forward_lm,
"hidden_size": self.lm.hidden_size,
"nlayers": self.lm.nlayers,
"embedding_size": self.lm.embedding_size,
"nout": self.lm.nout,
"document_delimiter": self.lm.document_delimiter,
"dropout": self.lm.dropout,
"has_decoder": self.lm.decoder is not None,
},
"name": self.name,
}
@classmethod
def from_params(cls, params):
model_params = params.pop("model_params")
from flair.models import LanguageModel
lm = LanguageModel(**model_params)
return cls(lm, **params)
def __setstate__(self, d: Dict[str, Any]):
# make compatible with old models
d.setdefault("fine_tune", False)
d.setdefault("chars_per_chunk", 512)
d.setdefault("with_whitespace", True)
d.setdefault("tokenized_lm", True)
d.setdefault("is_lower", False)
d.setdefault("field", None)
super().__setstate__(d)
@register_embeddings
class PooledFlairEmbeddings(TokenEmbeddings):
def __init__(
self,
contextual_embeddings: Union[str, FlairEmbeddings],
pooling: str = "min",
only_capitalized: bool = False,
**kwargs,
) -> None:
super().__init__()
self.instance_parameters = self.get_instance_parameters(locals=locals())
# use the character language model embeddings as basis
if isinstance(contextual_embeddings, str):
self.context_embeddings: FlairEmbeddings = FlairEmbeddings(contextual_embeddings, **kwargs)
else:
self.context_embeddings = contextual_embeddings
# length is twice the original character LM embedding length
self.__embedding_length = self.context_embeddings.embedding_length * 2
self.name = self.context_embeddings.name + "-context"
# these fields are for the embedding memory
self.word_embeddings: Dict[str, torch.Tensor] = {}
self.word_count: Dict[str, int] = {}
# whether to add only capitalized words to memory (faster runtime and lower memory consumption)
self.only_capitalized = only_capitalized
# we re-compute embeddings dynamically at each epoch
self.static_embeddings = False
# set the memory method
self.pooling = pooling
def train(self, mode=True):
super().train(mode=mode)
if mode:
# memory is wiped each time we do a training run
log.info("train mode resetting embeddings")
self.word_embeddings = {}
self.word_count = {}
def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:
self.context_embeddings.embed(sentences)
# if we keep a pooling, it needs to be updated continuously
for sentence in sentences:
for token in sentence.tokens:
# update embedding
local_embedding = token._embeddings[self.context_embeddings.name].cpu()
# check token.text is empty or not
if token.text and (token.text[0].isupper() or not self.only_capitalized):
if token.text not in self.word_embeddings:
self.word_embeddings[token.text] = local_embedding
self.word_count[token.text] = 1
else:
# set aggregation operation
if self.pooling == "mean":
aggregated_embedding = torch.add(self.word_embeddings[token.text], local_embedding)
elif self.pooling == "fade":
aggregated_embedding = torch.add(self.word_embeddings[token.text], local_embedding)
aggregated_embedding /= 2
elif self.pooling == "max":
aggregated_embedding = torch.max(self.word_embeddings[token.text], local_embedding)
elif self.pooling == "min":
aggregated_embedding = torch.min(self.word_embeddings[token.text], local_embedding)
self.word_embeddings[token.text] = aggregated_embedding
self.word_count[token.text] += 1
# add embeddings after updating
for sentence in sentences:
for token in sentence.tokens:
if token.text in self.word_embeddings:
base = (
self.word_embeddings[token.text] / self.word_count[token.text]
if self.pooling == "mean"
else self.word_embeddings[token.text]
)
else:
base = token._embeddings[self.context_embeddings.name]
token.set_embedding(self.name, base)
return sentences
@property
def embedding_length(self) -> int:
return self.__embedding_length
def get_names(self) -> List[str]:
return [self.name, self.context_embeddings.name]
def __setstate__(self, d: Dict[str, Any]):
super().__setstate__(d)
if flair.device.type != "cpu":
for key in self.word_embeddings:
self.word_embeddings[key] = self.word_embeddings[key].cpu()
@classmethod
def from_params(cls, params):
return cls(contextual_embeddings=load_embeddings(params.pop("contextual_embeddings")), **params)
def to_params(self):
return {
"pooling": self.pooling,
"only_capitalized": self.only_capitalized,
"contextual_embeddings": self.context_embeddings.save_embeddings(use_state_dict=False),
}
@register_embeddings
class FastTextEmbeddings(TokenEmbeddings):
"""FastText Embeddings with oov functionality."""
def __init__(
self, embeddings: str, use_local: bool = True, field: Optional[str] = None, name: Optional[str] = None
) -> None:
"""Initializes fasttext word embeddings.
Constructor downloads required embedding file and stores in cache if use_local is False.
:param embeddings: path to your embeddings '.bin' file
:param use_local: set this to False if you are using embeddings from a remote source
"""
self.instance_parameters = self.get_instance_parameters(locals=locals())
cache_dir = Path("embeddings")
if use_local:
embeddings_path = Path(embeddings)
if not embeddings_path.exists():
raise ValueError(f'The given embeddings "{embeddings}" is not available or is not a valid path.')
else:
embeddings_path = cached_path(f"{embeddings}", cache_dir=cache_dir)
self.embeddings = embeddings_path
self.name: str = str(embeddings_path)
self.static_embeddings = True
if embeddings_path.suffix == ".bin":
self.precomputed_word_embeddings: FastTextKeyedVectors = load_facebook_vectors(str(embeddings_path))
else:
self.precomputed_word_embeddings = FastTextKeyedVectors.load(str(embeddings_path))
self.__embedding_length: int = self.precomputed_word_embeddings.vector_size
self.field = field
super().__init__()
@property
def embedding_length(self) -> int:
return self.__embedding_length
@instance_lru_cache(maxsize=10000, typed=False)
def get_cached_vec(self, word: str) -> torch.Tensor:
word_embedding = self.precomputed_word_embeddings.get_vector(word)
word_embedding = torch.tensor(word_embedding.tolist(), device=flair.device, dtype=torch.float)
return word_embedding
def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:
for sentence in sentences:
for token in sentence.tokens:
word = token.text if self.field is None else token.get_label(self.field).value
word_embedding = self.get_cached_vec(word)
token.set_embedding(self.name, word_embedding)
return sentences
def __str__(self) -> str:
return self.name
def extra_repr(self):
return f"'{self.embeddings}'"
@classmethod
def from_params(cls, params):
fasttext_binary = params.pop("fasttext_binary")
with tempfile.TemporaryDirectory() as temp_dir:
temp_path = Path(temp_dir)
out_path = temp_path / "fasttext.model"
out_path.write_bytes(fasttext_binary)
return cls(**params, embeddings=str(out_path), use_local=True)
def to_params(self):
with tempfile.TemporaryDirectory() as temp_dir:
temp_path = Path(temp_dir)
out_path = temp_path / "fasttext.model"
self.precomputed_word_embeddings.save(str(out_path))
return {"name": self.name, "field": self.field, "fasttext_binary": out_path.read_bytes()}
@register_embeddings
class OneHotEmbeddings(TokenEmbeddings):
"""One-hot encoded embeddings."""
def __init__(
self,
vocab_dictionary: Dictionary,
field: str = "text",
embedding_length: int = 300,
stable: bool = False,
) -> None:
"""Initializes one-hot encoded word embeddings and a trainable embedding layer.
:param vocab_dictionary: the vocabulary that will be encoded
:param field: by default, the 'text' of tokens is embedded, but you can also embed tags such as 'pos'
:param embedding_length: dimensionality of the trainable embedding layer
:param stable: set stable=True to use the stable embeddings as described in https://arxiv.org/abs/2110.02861
"""
super().__init__()
self.name = f"one-hot-{field}"
self.static_embeddings = False
self.field = field
self.instance_parameters = self.get_instance_parameters(locals=locals())
self.__embedding_length = embedding_length
self.vocab_dictionary = vocab_dictionary
log.info(self.vocab_dictionary.idx2item)
log.info(f"vocabulary size of {len(self.vocab_dictionary)}")
# model architecture
self.embedding_layer = nn.Embedding(len(self.vocab_dictionary), self.__embedding_length)
nn.init.xavier_uniform_(self.embedding_layer.weight)
if stable:
self.layer_norm: Optional[nn.LayerNorm] = nn.LayerNorm(embedding_length)
else:
self.layer_norm = None
self.to(flair.device)
self.eval()
@property
def embedding_length(self) -> int:
return self.__embedding_length
def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:
tokens = [t for sentence in sentences for t in sentence.tokens]
if self.field == "text":
one_hot_sentences = [self.vocab_dictionary.get_idx_for_item(t.text) for t in tokens]
else:
one_hot_sentences = [self.vocab_dictionary.get_idx_for_item(t.get_label(self.field).value) for t in tokens]
one_hot_sentences_tensor = torch.tensor(one_hot_sentences, dtype=torch.long).to(flair.device)
embedded = self.embedding_layer.forward(one_hot_sentences_tensor)
if self.layer_norm is not None:
embedded = self.layer_norm(embedded)
for emb, token in zip(embedded, tokens):
token.set_embedding(self.name, emb)
return sentences
def __str__(self) -> str:
return self.name
@classmethod
def from_corpus(cls, corpus: Corpus, field: str = "text", min_freq: int = 3, **kwargs):
vocab_dictionary = Dictionary()
assert corpus.train is not None
tokens = [s.tokens for s in _iter_dataset(corpus.train)]
tokens = [token for sublist in tokens for token in sublist]
if field == "text":
most_common = Counter([t.text for t in tokens]).most_common()
else:
most_common = Counter([t.get_label(field).value for t in tokens]).most_common()
tokens = []
for token, freq in most_common:
if freq < min_freq:
break
tokens.append(token)
for token in tokens:
vocab_dictionary.add_item(token)
return cls(vocab_dictionary, field=field, **kwargs)
@classmethod
def from_params(cls, params):
return cls(**params)
def to_params(self):
return {
"vocab_dictionary": self.vocab_dictionary,
"field": self.field,
"embedding_length": self.__embedding_length,
"stable": self.layer_norm is not None,
}
@register_embeddings
class HashEmbeddings(TokenEmbeddings):
"""Standard embeddings with Hashing Trick."""
def __init__(self, num_embeddings: int = 1000, embedding_length: int = 300, hash_method="md5") -> None:
super().__init__()
self.name = "hash"
self.static_embeddings = False
self.instance_parameters = self.get_instance_parameters(locals=locals())
self.__num_embeddings = num_embeddings
self.__embedding_length = embedding_length
self.__hash_method = hash_method
# model architecture
self.embedding_layer = torch.nn.Embedding(self.__num_embeddings, self.__embedding_length)
torch.nn.init.xavier_uniform_(self.embedding_layer.weight)
self.to(flair.device)
self.eval()
@property
def num_embeddings(self) -> int:
return self.__num_embeddings
@property
def embedding_length(self) -> int:
return self.__embedding_length
def _add_embeddings_internal(self, sentences: List[Sentence]):
def get_idx_for_item(text):
hash_function = hashlib.new(self.__hash_method)
hash_function.update(bytes(str(text), "utf-8"))
return int(hash_function.hexdigest(), 16) % self.__num_embeddings
context_idxs = [get_idx_for_item(t.text) for sentence in sentences for t in sentence.tokens]
hash_sentences = torch.tensor(context_idxs, dtype=torch.long).to(flair.device)
embedded = self.embedding_layer.forward(hash_sentences)
index = 0
for sentence in sentences:
for token in sentence:
embedding = embedded[index]
token.set_embedding(self.name, embedding)
index += 1
def __str__(self) -> str:
return self.name
@classmethod
def from_params(cls, params):
return cls(**params)
def to_params(self):
return {
"num_embeddings": self.num_embeddings,
"embedding_length": self.embedding_length,
"hash_method": self.__hash_method,
}
@register_embeddings
class MuseCrosslingualEmbeddings(TokenEmbeddings):
def __init__(
self,
) -> None:
self.name: str = "muse-crosslingual"
self.static_embeddings = True
self.__embedding_length: int = 300
self.language_embeddings: Dict[str, Any] = {}
super().__init__()
self.eval()
@instance_lru_cache(maxsize=10000, typed=False)
def get_cached_vec(self, language_code: str, word: str) -> torch.Tensor:
current_embedding_model = self.language_embeddings[language_code]
if word in current_embedding_model:
word_embedding = current_embedding_model[word]
elif word.lower() in current_embedding_model:
word_embedding = current_embedding_model[word.lower()]
elif re.sub(r"\d", "#", word.lower()) in current_embedding_model:
word_embedding = current_embedding_model[re.sub(r"\d", "#", word.lower())]
elif re.sub(r"\d", "0", word.lower()) in current_embedding_model:
word_embedding = current_embedding_model[re.sub(r"\d", "0", word.lower())]
else:
word_embedding = np.zeros(self.embedding_length, dtype="float")
word_embedding = torch.tensor(word_embedding, device=flair.device, dtype=torch.float)
return word_embedding
def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:
for _i, sentence in enumerate(sentences):
language_code = sentence.get_language_code()
supported = [
"en",
"de",
"bg",
"ca",
"hr",
"cs",
"da",
"nl",
"et",
"fi",
"fr",
"el",
"he",
"hu",
"id",
"it",
"mk",
"no",
# "pl",
"pt",
"ro",
"ru",
"sk",
]
if language_code not in supported:
language_code = "en"
if language_code not in self.language_embeddings:
log.info(f"Loading up MUSE embeddings for '{language_code}'!")
# download if necessary
hu_path: str = "https://flair.informatik.hu-berlin.de/resources/embeddings/muse"
cache_dir = Path("embeddings") / "MUSE"
cached_path(
f"{hu_path}/muse.{language_code}.vec.gensim.vectors.npy",
cache_dir=cache_dir,
)
embeddings_file = cached_path(f"{hu_path}/muse.{language_code}.vec.gensim", cache_dir=cache_dir)
# load the model
self.language_embeddings[language_code] = gensim.models.KeyedVectors.load(str(embeddings_file))
for token, _token_idx in zip(sentence.tokens, range(len(sentence.tokens))):
word_embedding = self.get_cached_vec(language_code=language_code, word=token.text)
token.set_embedding(self.name, word_embedding)
return sentences
@property
def embedding_length(self) -> int:
return self.__embedding_length
def __str__(self) -> str:
return self.name
@classmethod
def from_params(cls, params):
return cls()
def to_params(self):
return {}
# TODO: keep for backwards compatibility, but remove in future
class BPEmbSerializable(BPEmb):
def __getstate__(self):
state = self.__dict__.copy()
# save the sentence piece model as binary file (not as path which may change)
with self.model_file.open(mode="rb") as fin:
state["spm_model_binary"] = fin.read()
state["spm"] = None
return state
def __setstate__(self, state):
from bpemb.util import sentencepiece_load
model_file = self.model_tpl.format(lang=state["lang"], vs=state["vs"])
self.__dict__ = state
# write out the binary sentence piece model into the expected directory
self.cache_dir: Path = flair.cache_root / "embeddings"
if "spm_model_binary" in self.__dict__:
# if the model was saved as binary and it is not found on disk, write to appropriate path
if not os.path.exists(self.cache_dir / state["lang"]):
os.makedirs(self.cache_dir / state["lang"])
self.model_file = self.cache_dir / model_file
with open(self.model_file, "wb") as out:
out.write(self.__dict__["spm_model_binary"])
else:
# otherwise, use normal process and potentially trigger another download
self.model_file = self._load_file(model_file)
# once the modes if there, load it with sentence piece
state["spm"] = sentencepiece_load(self.model_file)
@register_embeddings
class BytePairEmbeddings(TokenEmbeddings):
def __init__(
self,
language: Optional[str] = None,
dim: int = 50,
syllables: int = 100000,
cache_dir=None,
model_file_path: Optional[Path] = None,
embedding_file_path: Optional[Path] = None,
name: Optional[str] = None,
**kwargs,
) -> None:
"""Initializes BP embeddings.
Constructor downloads required files if not there.
"""
self.instance_parameters = self.get_instance_parameters(locals=locals())
if not cache_dir:
cache_dir = flair.cache_root / "embeddings"
if language:
self.name: str = f"bpe-{language}-{syllables}-{dim}"
else:
assert (
model_file_path is not None and embedding_file_path is not None
), "Need to specify model_file_path and embedding_file_path if no language is given in BytePairEmbeddings(...)"
dim = None # type: ignore[assignment]
self.embedder = BPEmbSerializable(
lang=language,
vs=syllables,
dim=dim,
cache_dir=cache_dir,
model_file=model_file_path,
emb_file=embedding_file_path,
**kwargs,
)
if not language:
self.name = f"bpe-custom-{self.embedder.vs}-{self.embedder.dim}"
if name is not None:
self.name = name
self.static_embeddings = True
self.__embedding_length: int = self.embedder.emb.vector_size * 2
super().__init__()
self.eval()
@property
def embedding_length(self) -> int:
return self.__embedding_length
def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:
for _i, sentence in enumerate(sentences):
for token, _token_idx in zip(sentence.tokens, range(len(sentence.tokens))):
word = token.text
if word.strip() == "":
# empty words get no embedding
token.set_embedding(self.name, torch.zeros(self.embedding_length, dtype=torch.float))
else:
# all other words get embedded
embeddings = self.embedder.embed(word.lower())
embedding = np.concatenate((embeddings[0], embeddings[len(embeddings) - 1]))
token.set_embedding(self.name, torch.tensor(embedding, dtype=torch.float))
return sentences
def __str__(self) -> str:
return self.name
def extra_repr(self):
return f"model={self.name}"
@classmethod
def from_params(cls, params):
with tempfile.TemporaryDirectory() as temp_dir:
temp_path = Path(temp_dir)
model_file_path = temp_path / "model.spm"
model_file_path.write_bytes(params["spm_model_binary"])
embedding_file_path = temp_path / "word2vec.bin"
embedding_file_path.write_bytes(params["word2vec_binary"])
return cls(name=params["name"], model_file_path=model_file_path, embedding_file_path=embedding_file_path)
def to_params(self):
if not self.embedder.emb_file.exists():
self.embedder.emb_file = self.embedder.emb_file.with_suffix(".bin")
self.embedder.emb.save_word2vec_format(str(self.embedder.emb_file), binary=True)
return {
"name": self.name,
"spm_model_binary": self.embedder.spm.serialized_model_proto(),
"word2vec_binary": self.embedder.emb_file.read_bytes(),
}
@register_embeddings
class NILCEmbeddings(WordEmbeddings):
def __init__(self, embeddings: str, model: str = "skip", size: int = 100) -> None:
"""Initializes portuguese classic word embeddings trained by NILC Lab.
See: http://www.nilc.icmc.usp.br/embeddings
Constructor downloads required files if not there.
:param embeddings: one of: 'fasttext', 'glove', 'wang2vec' or 'word2vec'
:param model: one of: 'skip' or 'cbow'. This is not applicable to glove.
:param size: one of: 50, 100, 300, 600 or 1000.
"""
self.instance_parameters = self.get_instance_parameters(locals=locals())
base_path = "http://143.107.183.175:22980/download.php?file=embeddings/"
cache_dir = Path("embeddings") / ("nilc-" + embeddings.lower())
# GLOVE embeddings
if embeddings.lower() == "glove":
cached_path(f"{base_path}{embeddings}/{embeddings}_s{size}.zip", cache_dir=cache_dir)
embeddings_path = f"{base_path}{embeddings}/{embeddings}_s{size}.zip"
elif embeddings.lower() in ["fasttext", "wang2vec", "word2vec"]:
cached_path(f"{base_path}{embeddings}/{model}_s{size}.zip", cache_dir=cache_dir)
embeddings_path = f"{base_path}{embeddings}/{model}_s{size}.zip"
elif not Path(embeddings).exists():
raise ValueError(f'The given embeddings "{embeddings}" is not available or is not a valid path.')
else:
embeddings_path = embeddings
log.info("Reading embeddings from %s" % embeddings_path)
super().__init__(
embeddings=str(extract_single_zip_file(embeddings_path, cache_dir=cache_dir)), name="NILC-" + embeddings
)
@classmethod
def from_params(cls, params: Dict[str, Any]) -> "WordEmbeddings":
# no need to recreate as NILCEmbeddings
return WordEmbeddings(embeddings=None, **params)
def replace_with_language_code(string: str):
string = string.replace("arabic-", "ar-")
string = string.replace("basque-", "eu-")
string = string.replace("bulgarian-", "bg-")
string = string.replace("croatian-", "hr-")
string = string.replace("czech-", "cs-")
string = string.replace("danish-", "da-")
string = string.replace("dutch-", "nl-")
string = string.replace("farsi-", "fa-")
string = string.replace("persian-", "fa-")
string = string.replace("finnish-", "fi-")
string = string.replace("french-", "fr-")
string = string.replace("german-", "de-")
string = string.replace("hebrew-", "he-")
string = string.replace("hindi-", "hi-")
string = string.replace("indonesian-", "id-")
string = string.replace("italian-", "it-")
string = string.replace("japanese-", "ja-")
string = string.replace("norwegian-", "no")
string = string.replace("polish-", "pl-")
string = string.replace("portuguese-", "pt-")
string = string.replace("slovenian-", "sl-")
string = string.replace("spanish-", "es-")
string = string.replace("swedish-", "sv-")
return string
| 64,771 | 40.734536 | 131 | py |
flair | flair-master/flair/datasets/text_image.py | import json
import logging
import os
import urllib
from pathlib import Path
from typing import List
import numpy as np
import torch.utils.data.dataloader
from torch.utils.data import Dataset
from tqdm import tqdm
from flair.data import Corpus, DataPair, FlairDataset, Image, Sentence
from flair.file_utils import cached_path
log = logging.getLogger("flair")
class FeideggerCorpus(Corpus):
def __init__(self, **kwargs) -> None:
dataset = "feidegger"
# cache Feidegger config file
json_link = "https://raw.githubusercontent.com/zalandoresearch/feidegger/master/data/FEIDEGGER_release_1.1.json"
json_local_path = cached_path(json_link, Path("datasets") / dataset)
# cache Feidegger images
with json_local_path.open(encoding="utf-8") as fin:
dataset_info = json.load(fin)
images_cache_folder = os.path.join(os.path.dirname(json_local_path), "images")
if not os.path.isdir(images_cache_folder):
os.mkdir(images_cache_folder)
for image_info in tqdm(dataset_info):
name = os.path.basename(image_info["url"])
filename = os.path.join(images_cache_folder, name)
if not os.path.isfile(filename):
urllib.request.urlretrieve(image_info["url"], filename)
# replace image URL with local cached file
image_info["url"] = filename
feidegger_dataset: Dataset = FeideggerDataset(dataset_info, **kwargs)
train_indices = list(np.where(np.in1d(feidegger_dataset.split, list(range(8))))[0]) # type: ignore[attr-defined]
train = torch.utils.data.dataset.Subset(feidegger_dataset, train_indices)
dev_indices = list(np.where(np.in1d(feidegger_dataset.split, [8]))[0]) # type: ignore[attr-defined]
dev = torch.utils.data.dataset.Subset(feidegger_dataset, dev_indices)
test_indices = list(np.where(np.in1d(feidegger_dataset.split, [9]))[0]) # type: ignore[attr-defined]
test = torch.utils.data.dataset.Subset(feidegger_dataset, test_indices)
super().__init__(train, dev, test, name="feidegger")
class FeideggerDataset(FlairDataset):
def __init__(self, dataset_info, **kwargs) -> None:
super().__init__()
self.data_points: List[DataPair] = []
self.split: List[int] = []
def identity(x):
return x
preprocessor = identity
if "lowercase" in kwargs and kwargs["lowercase"]:
preprocessor = str.lower
for image_info in dataset_info:
image = Image(imageURL=image_info["url"])
for caption in image_info["descriptions"]:
# append Sentence-Image data point
self.data_points.append(DataPair(Sentence(preprocessor(caption), use_tokenizer=True), image))
self.split.append(int(image_info["split"]))
def __len__(self) -> int:
return len(self.data_points)
def __getitem__(self, index: int = 0) -> DataPair:
return self.data_points[index]
def is_in_memory(self) -> bool:
return True
| 3,092 | 35.821429 | 121 | py |
flair | flair-master/flair/datasets/base.py | import logging
from abc import abstractmethod
from pathlib import Path
from typing import Generic, List, Optional, Union
import torch.utils.data.dataloader
from deprecated import deprecated
from flair.data import DT, FlairDataset, Sentence, Tokenizer
from flair.tokenization import SegtokTokenizer, SpaceTokenizer
log = logging.getLogger("flair")
class DataLoader(torch.utils.data.dataloader.DataLoader):
def __init__(
self,
dataset,
batch_size=1,
shuffle=False,
sampler=None,
batch_sampler=None,
drop_last=False,
timeout=0,
worker_init_fn=None,
) -> None:
super().__init__(
dataset,
batch_size=batch_size,
shuffle=shuffle,
sampler=sampler,
batch_sampler=batch_sampler,
num_workers=0,
collate_fn=list,
drop_last=drop_last,
timeout=timeout,
worker_init_fn=worker_init_fn,
)
class FlairDatapointDataset(FlairDataset, Generic[DT]):
"""A simple Dataset object to wrap a List of Datapoints, for example Sentences."""
def __init__(self, datapoints: Union[DT, List[DT]]) -> None:
"""Instantiate FlairDatapointDataset.
:param sentences: DT or List of DT that make up FlairDatapointDataset
"""
# cast to list if necessary
if not isinstance(datapoints, list):
datapoints = [datapoints]
self.datapoints = datapoints
def is_in_memory(self) -> bool:
return True
def __len__(self) -> int:
return len(self.datapoints)
def __getitem__(self, index: int = 0) -> DT:
return self.datapoints[index]
class SentenceDataset(FlairDatapointDataset):
@deprecated(version="0.11", reason="The 'SentenceDataset' class was renamed to 'FlairDatapointDataset'")
def __init__(self, sentences: Union[Sentence, List[Sentence]]) -> None:
super().__init__(sentences)
class StringDataset(FlairDataset):
"""A Dataset taking string as input and returning Sentence during iteration."""
def __init__(
self,
texts: Union[str, List[str]],
use_tokenizer: Union[bool, Tokenizer] = SpaceTokenizer(),
) -> None:
"""Instantiate StringDataset.
:param texts: a string or List of string that make up StringDataset
:param use_tokenizer: Custom tokenizer to use (default is SpaceTokenizer,
more advanced options are SegTokTokenizer to use segtok or SpacyTokenizer to use Spacy library models
if available). Check the code of subclasses of Tokenizer to implement your own (if you need it).
If instead of providing a function, this parameter is just set to True, SegTokTokenizer will be used.
"""
# cast to list if necessary
if isinstance(texts, str):
texts = [texts]
self.texts = texts
self.use_tokenizer = use_tokenizer
@abstractmethod
def is_in_memory(self) -> bool:
return True
def __len__(self) -> int:
return len(self.texts)
def __getitem__(self, index: int = 0) -> Sentence:
text = self.texts[index]
return Sentence(text, use_tokenizer=self.use_tokenizer)
class MongoDataset(FlairDataset):
def __init__(
self,
query: str,
host: str,
port: int,
database: str,
collection: str,
text_field: str,
categories_field: Optional[List[str]] = None,
max_tokens_per_doc: int = -1,
max_chars_per_doc: int = -1,
tokenizer: Tokenizer = SegtokTokenizer(),
in_memory: bool = True,
tag_type: str = "class",
) -> None:
"""Reads Mongo collections.
Each collection should contain one document/text per item.
Each item should have the following format:
{
'Beskrivning': 'Abrahamsby. Gård i Gottröra sn, Långhundra hd, Stockholms län, nära Långsjön.',
'Län':'Stockholms län',
'Härad': 'Långhundra',
'Församling': 'Gottröra',
'Plats': 'Abrahamsby'
}
:param query: Query, e.g. {'Län': 'Stockholms län'}
:param host: Host, e.g. 'localhost',
:param port: Port, e.g. 27017
:param database: Database, e.g. 'rosenberg',
:param collection: Collection, e.g. 'book',
:param text_field: Text field, e.g. 'Beskrivning',
:param categories_field: List of category fields, e.g ['Län', 'Härad', 'Tingslag', 'Församling', 'Plats'],
:param max_tokens_per_doc: Takes at most this amount of tokens per document. If set to -1 all documents are taken as is.
:param max_tokens_per_doc: If set, truncates each Sentence to a maximum number of Tokens
:param max_chars_per_doc: If set, truncates each Sentence to a maximum number of chars
:param tokenizer: Custom tokenizer to use (default SegtokTokenizer)
:param in_memory: If True, keeps dataset as Sentences in memory, otherwise only keeps strings
:return: list of sentences
"""
# first, check if pymongo is installed
try:
import pymongo
except ModuleNotFoundError:
log.warning("-" * 100)
log.warning('ATTENTION! The library "pymongo" is not installed!')
log.warning('To use MongoDataset, please first install with "pip install pymongo"')
log.warning("-" * 100)
pass
self.in_memory = in_memory
self.tokenizer = tokenizer
if self.in_memory:
self.sentences = []
else:
self.indices = []
self.total_sentence_count: int = 0
self.max_chars_per_doc = max_chars_per_doc
self.max_tokens_per_doc = max_tokens_per_doc
self.__connection = pymongo.MongoClient(host, port)
self.__cursor = self.__connection[database][collection]
self.text = text_field
self.categories = categories_field if categories_field is not None else []
self.tag_type = tag_type
start = 0
if self.in_memory:
for document in self.__cursor.find(filter=query, skip=start, limit=0):
sentence = self._parse_document_to_sentence(
document[self.text],
[document[_] if _ in document else "" for _ in self.categories],
tokenizer,
)
if sentence is not None and len(sentence.tokens) > 0:
self.sentences.append(sentence)
self.total_sentence_count += 1
else:
self.indices = self.__cursor.find().distinct("_id")
self.total_sentence_count = self.__cursor.count_documents()
def _parse_document_to_sentence(
self,
text: str,
labels: List[str],
tokenizer: Union[bool, Tokenizer],
):
if self.max_chars_per_doc > 0:
text = text[: self.max_chars_per_doc]
if text and labels:
sentence = Sentence(text, use_tokenizer=tokenizer)
for label in labels:
sentence.add_label(self.tag_type, label)
if self.max_tokens_per_doc > 0:
sentence.tokens = sentence.tokens[: min(len(sentence), self.max_tokens_per_doc)]
return sentence
return None
def is_in_memory(self) -> bool:
return self.in_memory
def __len__(self) -> int:
return self.total_sentence_count
def __getitem__(self, index: int = 0) -> Sentence:
if self.in_memory:
return self.sentences[index]
else:
document = self.__cursor.find_one({"_id": index})
sentence = self._parse_document_to_sentence(
document[self.text],
[document[_] if _ in document else "" for _ in self.categories],
self.tokenizer,
)
return sentence
def find_train_dev_test_files(data_folder, dev_file, test_file, train_file, autofind_splits=True):
if type(data_folder) == str:
data_folder: Path = Path(data_folder)
if train_file is not None:
train_file = data_folder / train_file
if test_file is not None:
test_file = data_folder / test_file
if dev_file is not None:
dev_file = data_folder / dev_file
suffixes_to_ignore = {".gz", ".swp"}
# automatically identify train / test / dev files
if train_file is None and autofind_splits:
for file in data_folder.iterdir():
file_name = file.name
if not suffixes_to_ignore.isdisjoint(file.suffixes):
continue
if "train" in file_name and "54019" not in file_name:
train_file = file
if "dev" in file_name:
dev_file = file
if "testa" in file_name:
dev_file = file
if "testb" in file_name:
test_file = file
# if no test file is found, take any file with 'test' in name
if test_file is None and autofind_splits:
for file in data_folder.iterdir():
file_name = file.name
if not suffixes_to_ignore.isdisjoint(file.suffixes):
continue
if "test" in file_name:
test_file = file
log.info(f"Reading data from {data_folder}")
log.info(f"Train: {train_file}")
log.info(f"Dev: {dev_file}")
log.info(f"Test: {test_file}")
return dev_file, test_file, train_file
| 9,549 | 33.854015 | 128 | py |
flair | flair-master/flair/datasets/ocr.py | import json
from pathlib import Path
from typing import Dict, Optional, Union
import gdown.download_folder
import PIL
from torch.utils.data import Dataset
import flair
from flair.data import BoundingBox, Corpus, FlairDataset, Sentence, get_spans_from_bio
from flair.datasets.base import find_train_dev_test_files
class OcrJsonDataset(FlairDataset):
def __init__(
self,
path_to_split_directory: Union[str, Path],
label_type: str = "ner",
in_memory: bool = True,
encoding: str = "utf-8",
load_images: bool = False,
normalize_coords_to_thousands: bool = True,
label_name_map: Optional[Dict[str, str]] = None,
) -> None:
"""Instantiates a Dataset from a OCR-Json format.
The folder is structured with a "images" folder and a "tagged" folder.
Those folders contain respectively .jpg and .json files with matching file name.
The json contains 3 fields "words", "bbox", "labels" which are lists of equal length
"words" is a list of strings, containing the ocr texts,
"bbox" is a list of int-Tuples, containing left, top, right, bottom
"labels" is a BIO-tagging of the sentences
:param path_to_split_directory: base folder with the task data
:param label_type: the label_type to add the ocr labels to
:param encoding: the encoding to load the .json files with
:param normalize_coords_to_thousands: if True, the coordinates will be ranged from 0 to 1000
:param load_images: if True, the pillow images will be added as metadata
:param in_memory: If set to True, the dataset is kept in memory as Sentence objects, otherwise does disk reads
:param label_name_map: Optionally map tag names to different schema.
:return: a Dataset with Sentences that contain OCR information
"""
self.in_memory = in_memory
path_to_split_directory = Path(path_to_split_directory)
assert path_to_split_directory.exists()
image_dir = path_to_split_directory / "images"
tagged_dir = path_to_split_directory / "tagged"
self.base_path = path_to_split_directory
assert tagged_dir.exists()
assert image_dir.exists()
self.file_names = sorted(
{p.stem for p in image_dir.iterdir() if p.is_file()} & {p.stem for p in tagged_dir.iterdir() if p.is_file()}
)
self.total_sentence_count: int = len(self.file_names)
self.load_images = load_images
self.label_type = label_type
self.encoding = encoding
self.label_name_map = label_name_map
self.normalize_coords_to_thousands = normalize_coords_to_thousands
if in_memory:
self.sentences = [self._load_example(file_name) for file_name in self.file_names]
def _remap_label(self, tag):
# remap regular tag names
if self.label_name_map is not None:
return self.label_name_map.get(tag, tag) # for example, transforming 'PER' to 'person'
return tag
def _load_example(self, file_name: str) -> Sentence:
data_path = self.base_path / "tagged" / f"{file_name}.json"
with data_path.open("r", encoding=self.encoding) as f:
data = json.load(f)
sentence = Sentence(text=data["words"])
img_path = self.base_path / "images" / f"{file_name}.jpg"
with PIL.Image.open(img_path) as img:
width, height = img.size
if self.load_images:
img.load()
sentence.add_metadata("image", img.convert("RGB"))
sentence.add_metadata("img_width", width)
sentence.add_metadata("img_height", height)
for token, (left, top, right, bottom) in zip(sentence, data["bbox"]):
if self.normalize_coords_to_thousands:
left = int(1000 * left / width)
top = int(1000 * top / height)
right = int(1000 * right / width)
bottom = int(1000 * bottom / height)
token.add_metadata("bbox", BoundingBox(left=left, top=top, right=right, bottom=bottom))
for span_indices, score, label in get_spans_from_bio(data["labels"]):
span = sentence[span_indices[0] : span_indices[-1] + 1]
value = self._remap_label(label)
if value != "O":
span.add_label(self.label_type, value=value, score=score)
return sentence
def is_in_memory(self) -> bool:
return self.in_memory
def __len__(self) -> int:
return self.total_sentence_count
def __getitem__(self, index: int = 0) -> Sentence:
if self.in_memory:
sentence = self.sentences[index]
# else skip to position in file where sentence begins
else:
sentence = self._load_example(self.file_names[index])
# set sentence context using partials TODO: pointer to dataset is really inefficient
sentence._has_context = True
sentence._position_in_dataset = (self, index)
return sentence
class OcrCorpus(Corpus):
def __init__(
self,
train_path: Optional[Path] = None,
dev_path: Optional[Path] = None,
test_path: Optional[Path] = None,
encoding: str = "utf-8",
label_type: str = "ner",
in_memory: bool = True,
load_images: bool = False,
normalize_coords_to_thousands: bool = True,
label_name_map: Optional[Dict[str, str]] = None,
**corpusargs,
) -> None:
"""Instantiates a Corpus from a OCR-Json format.
:param train_path: the folder for the training data
:param dev_path: the folder for the dev data
:param test_path: the folder for the test data
:param path_to_split_directory: base folder with the task data
:param label_type: the label_type to add the ocr labels to
:param encoding: the encoding to load the .json files with
:param load_images: if True, the pillow images will be added as metadata
:param normalize_coords_to_thousands: if True, the coordinates will be ranged from 0 to 1000
:param in_memory: If set to True, the dataset is kept in memory as Sentence objects, otherwise does disk reads
:param label_name_map: Optionally map tag names to different schema.
:return: a Corpus with Sentences that contain OCR information
"""
train: Optional[Dataset] = (
OcrJsonDataset(
train_path,
label_type=label_type,
encoding=encoding,
in_memory=in_memory,
load_images=load_images,
normalize_coords_to_thousands=normalize_coords_to_thousands,
label_name_map=label_name_map,
)
if train_path is not None
else None
)
# read in dev file if exists
dev: Optional[Dataset] = (
OcrJsonDataset(
dev_path,
label_type=label_type,
encoding=encoding,
in_memory=in_memory,
load_images=load_images,
normalize_coords_to_thousands=normalize_coords_to_thousands,
label_name_map=label_name_map,
)
if dev_path is not None
else None
)
# read in test file if exists
test: Optional[Dataset] = (
OcrJsonDataset(
test_path,
label_type=label_type,
encoding=encoding,
in_memory=in_memory,
load_images=load_images,
normalize_coords_to_thousands=normalize_coords_to_thousands,
label_name_map=label_name_map,
)
if test_path is not None
else None
)
super().__init__(train, dev, test, **corpusargs)
class SROIE(OcrCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
encoding: str = "utf-8",
label_type: str = "ner",
in_memory: bool = True,
load_images: bool = False,
normalize_coords_to_thousands: bool = True,
label_name_map: Optional[Dict[str, str]] = None,
**corpusargs,
) -> None:
"""Instantiates the SROIE corpus with perfect ocr boxes.
:param base_path: the path to store the dataset or load it from
:param label_type: the label_type to add the ocr labels to
:param encoding: the encoding to load the .json files with
:param load_images: if True, the pillow images will be added as metadata
:param normalize_coords_to_thousands: if True, the coordinates will be ranged from 0 to 1000
:param in_memory: If set to True, the dataset is kept in memory as Sentence objects, otherwise does disk reads
:param label_name_map: Optionally map tag names to different schema.
:return: a Corpus with Sentences that contain OCR information
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
if not data_folder.exists():
# the url is copied from https://huggingface.co/datasets/darentang/sroie/blob/main/sroie.py#L44
url = "https://drive.google.com/uc?id=1ZyxAw1d-9UvhgNLGRvsJK4gBCMf0VpGD"
zip_path = base_path / "sroie.zip"
gdown.cached_download(url, str(zip_path), postprocess=gdown.extractall)
zip_path.unlink()
dev_path, test_path, train_path = find_train_dev_test_files(data_folder, None, None, None)
super().__init__(
train_path,
dev_path,
test_path,
encoding=encoding,
label_type=label_type,
in_memory=in_memory,
load_images=load_images,
label_name_map=label_name_map,
normalize_coords_to_thousands=normalize_coords_to_thousands,
**corpusargs,
)
| 10,117 | 40.130081 | 120 | py |
flair | flair-master/flair/datasets/sequence_labeling.py | import copy
import json
import logging
import os
import re
import shutil
from collections import defaultdict
from pathlib import Path
from typing import (
Any,
DefaultDict,
Dict,
Iterable,
Iterator,
List,
Optional,
Tuple,
Union,
cast,
)
from torch.utils.data import ConcatDataset, Dataset
import flair
from flair.data import (
Corpus,
FlairDataset,
MultiCorpus,
Relation,
Sentence,
Token,
get_spans_from_bio,
)
from flair.datasets.base import find_train_dev_test_files
from flair.file_utils import cached_path, unpack_file
log = logging.getLogger("flair")
class MultiFileJsonlCorpus(Corpus):
"""This class represents a generic Jsonl corpus with multiple train, dev, and test files."""
def __init__(
self,
train_files=None,
test_files=None,
dev_files=None,
encoding: str = "utf-8",
text_column_name: str = "data",
label_column_name: str = "label",
label_type: str = "ner",
**corpusargs,
) -> None:
"""Instantiates a MuliFileJsonlCorpus as, e.g., created with doccanos JSONL export.
Note that at least one of train_files, test_files, and dev_files must contain one path.
Otherwise, the initialization will fail.
:param corpusargs: Additional arguments for Corpus initialization
:param train_files: the name of the train files
:param test_files: the name of the test files
:param dev_files: the name of the dev files, if empty, dev data is sampled from train
:param text_column_name: Name of the text column inside the jsonl files.
:param label_column_name: Name of the label column inside the jsonl files.
:raises RuntimeError: If no paths are given
"""
train: Optional[Dataset] = (
ConcatDataset(
[
JsonlDataset(
train_file,
text_column_name=text_column_name,
label_column_name=label_column_name,
label_type=label_type,
encoding=encoding,
)
for train_file in train_files
]
)
if train_files and train_files[0]
else None
)
# read in test file if exists
test: Optional[Dataset] = (
ConcatDataset(
[
JsonlDataset(
test_file,
text_column_name=text_column_name,
label_column_name=label_column_name,
label_type=label_type,
)
for test_file in test_files
]
)
if test_files and test_files[0]
else None
)
# read in dev file if exists
dev: Optional[Dataset] = (
ConcatDataset(
[
JsonlDataset(
dev_file,
text_column_name=text_column_name,
label_column_name=label_column_name,
label_type=label_type,
)
for dev_file in dev_files
]
)
if dev_files and dev_files[0]
else None
)
super().__init__(train, dev, test, **corpusargs)
class JsonlCorpus(MultiFileJsonlCorpus):
def __init__(
self,
data_folder: Union[str, Path],
train_file: Optional[Union[str, Path]] = None,
test_file: Optional[Union[str, Path]] = None,
dev_file: Optional[Union[str, Path]] = None,
encoding: str = "utf-8",
text_column_name: str = "data",
label_column_name: str = "label",
label_type: str = "ner",
autofind_splits: bool = True,
name: Optional[str] = None,
**corpusargs,
) -> None:
"""Instantiates a JsonlCorpus with one file per Dataset (train, dev, and test).
:param data_folder: Path to the folder containing the JSONL corpus
:param train_file: the name of the train file
:param test_file: the name of the test file
:param dev_file: the name of the dev file, if None, dev data is sampled from train
:param text_column_name: Name of the text column inside the JSONL file.
:param label_column_name: Name of the label column inside the JSONL file.
:param autofind_splits: Whether train, test and dev file should be determined automatically
:param name: name of the Corpus see flair.data.Corpus
"""
# find train, dev and test files if not specified
dev_file, test_file, train_file = find_train_dev_test_files(
data_folder, dev_file, test_file, train_file, autofind_splits
)
super().__init__(
dev_files=[dev_file] if dev_file else [],
train_files=[train_file] if train_file else [],
test_files=[test_file] if test_file else [],
text_column_name=text_column_name,
label_column_name=label_column_name,
label_type=label_type,
name=name if data_folder is None else str(data_folder),
encoding=encoding,
**corpusargs,
)
class JsonlDataset(FlairDataset):
def __init__(
self,
path_to_jsonl_file: Union[str, Path],
encoding: str = "utf-8",
text_column_name: str = "data",
label_column_name: str = "label",
label_type: str = "ner",
) -> None:
"""Instantiates a JsonlDataset and converts all annotated char spans to token tags using the IOB scheme.
The expected file format is:
{ "<text_column_name>": "<text>", "label_column_name": [[<start_char_index>, <end_char_index>, <label>],...] }
:param path_to_json._file: File to read
:param text_column_name: Name of the text column
:param label_column_name: Name of the label column
"""
path_to_json_file = Path(path_to_jsonl_file)
self.text_column_name = text_column_name
self.label_column_name = label_column_name
self.label_type = label_type
self.path_to_json_file = path_to_json_file
self.sentences: List[Sentence] = []
with path_to_json_file.open(encoding=encoding) as jsonl_fp:
for line in jsonl_fp:
current_line = json.loads(line)
raw_text = current_line[text_column_name]
current_labels = current_line[label_column_name]
current_sentence = Sentence(raw_text)
self._add_labels_to_sentence(raw_text, current_sentence, current_labels)
self.sentences.append(current_sentence)
def _add_labels_to_sentence(self, raw_text: str, sentence: Sentence, labels: List[List[Any]]):
# Add tags for each annotated span
for label in labels:
self._add_label_to_sentence(raw_text, sentence, label[0], label[1], label[2])
def _add_label_to_sentence(self, text: str, sentence: Sentence, start: int, end: int, label: str):
"""Adds a NE label to a given sentence.
:param text: raw sentence (with all whitespaces etc.). Is used to determine the token indices.
:param sentence: Tokenized flair Sentence.
:param start: Start character index of the label.
:param end: End character index of the label.
:param label: Label to assign to the given range.
:return: Nothing. Changes sentence as INOUT-param
"""
annotated_part = text[start:end]
# Remove leading and trailing whitespaces from annotated spans
while re.search(r"^\s", annotated_part):
start += 1
annotated_part = text[start:end]
while re.search(r"\s$", annotated_part):
end -= 1
annotated_part = text[start:end]
# Search start and end token index for current span
start_idx = -1
end_idx = -1
for token in sentence:
if token.start_position <= start <= token.end_position and start_idx == -1:
start_idx = token.idx - 1
if token.start_position <= end <= token.end_position and end_idx == -1:
end_idx = token.idx - 1
# If end index is not found set to last token
if end_idx == -1:
end_idx = sentence[-1].idx - 1
# Throw error if indices are not valid
if start_idx == -1 or start_idx > end_idx:
raise ValueError(
f"Could not create token span from char span.\n\
Sen: {sentence}\nStart: {start}, End: {end}, Label: {label}\n\
Ann: {annotated_part}\nRaw: {text}\nCo: {start_idx}, {end_idx}"
)
sentence[start_idx : end_idx + 1].add_label(self.label_type, label)
def is_in_memory(self) -> bool:
# Currently all Jsonl Datasets are stored in Memory
return True
def __len__(self) -> int:
"""Number of sentences in the Dataset."""
return len(self.sentences)
def __getitem__(self, index: int) -> Sentence:
"""Returns the sentence at a given index."""
return self.sentences[index]
class MultiFileColumnCorpus(Corpus):
def __init__(
self,
column_format: Dict[int, str],
train_files=None,
test_files=None,
dev_files=None,
column_delimiter: str = r"\s+",
comment_symbol: Optional[str] = None,
encoding: str = "utf-8",
document_separator_token: Optional[str] = None,
skip_first_line: bool = False,
in_memory: bool = True,
label_name_map: Optional[Dict[str, str]] = None,
banned_sentences: Optional[List[str]] = None,
default_whitespace_after: int = 1,
**corpusargs,
) -> None:
r"""Instantiates a Corpus from CoNLL column-formatted task data such as CoNLL03 or CoNLL2000.
:param data_folder: base folder with the task data
:param column_format: a map specifying the column format
:param train_files: the name of the train files
:param test_files: the name of the test files
:param dev_files: the name of the dev files, if empty, dev data is sampled from train
:param column_delimiter: default is to split on any separatator, but you can overwrite for instance with "\t"
to split only on tabs
:param comment_symbol: if set, lines that begin with this symbol are treated as comments
:param document_separator_token: If provided, sentences that function as document boundaries are so marked
:param skip_first_line: set to True if your dataset has a header line
:param in_memory: If set to True, the dataset is kept in memory as Sentence objects, otherwise does disk reads
:param label_name_map: Optionally map tag names to different schema.
:param banned_sentences: Optionally remove sentences from the corpus. Works only if `in_memory` is true
:return: a Corpus with annotated train, dev and test data
"""
# get train data
train: Optional[Dataset] = (
ConcatDataset(
[
ColumnDataset(
train_file,
column_format,
encoding=encoding,
comment_symbol=comment_symbol,
column_delimiter=column_delimiter,
banned_sentences=banned_sentences,
in_memory=in_memory,
document_separator_token=document_separator_token,
skip_first_line=skip_first_line,
label_name_map=label_name_map,
default_whitespace_after=default_whitespace_after,
)
for train_file in train_files
]
)
if train_files and train_files[0]
else None
)
# read in test file if exists
test: Optional[Dataset] = (
ConcatDataset(
[
ColumnDataset(
test_file,
column_format,
encoding=encoding,
comment_symbol=comment_symbol,
column_delimiter=column_delimiter,
banned_sentences=banned_sentences,
in_memory=in_memory,
document_separator_token=document_separator_token,
skip_first_line=skip_first_line,
label_name_map=label_name_map,
default_whitespace_after=default_whitespace_after,
)
for test_file in test_files
]
)
if test_files and test_files[0]
else None
)
# read in dev file if exists
dev: Optional[Dataset] = (
ConcatDataset(
[
ColumnDataset(
dev_file,
column_format,
encoding=encoding,
comment_symbol=comment_symbol,
column_delimiter=column_delimiter,
banned_sentences=banned_sentences,
in_memory=in_memory,
document_separator_token=document_separator_token,
skip_first_line=skip_first_line,
label_name_map=label_name_map,
default_whitespace_after=default_whitespace_after,
)
for dev_file in dev_files
]
)
if dev_files and dev_files[0]
else None
)
super().__init__(train, dev, test, **corpusargs)
class ColumnCorpus(MultiFileColumnCorpus):
def __init__(
self,
data_folder: Union[str, Path],
column_format: Dict[int, str],
train_file=None,
test_file=None,
dev_file=None,
autofind_splits: bool = True,
name: Optional[str] = None,
comment_symbol="# ",
**corpusargs,
) -> None:
r"""Instantiates a Corpus from CoNLL column-formatted task data such as CoNLL03 or CoNLL2000.
:param data_folder: base folder with the task data
:param column_format: a map specifying the column format
:param train_file: the name of the train file
:param test_file: the name of the test file
:param dev_file: the name of the dev file, if None, dev data is sampled from train
:param column_delimiter: default is to split on any separatator, but you can overwrite for instance with "\t"
to split only on tabs
:param comment_symbol: if set, lines that begin with this symbol are treated as comments
:param document_separator_token: If provided, sentences that function as document boundaries are so marked
:param skip_first_line: set to True if your dataset has a header line
:param in_memory: If set to True, the dataset is kept in memory as Sentence objects, otherwise does disk reads
:param label_name_map: Optionally map tag names to different schema.
:param banned_sentences: Optionally remove sentences from the corpus. Works only if `in_memory` is true
:return: a Corpus with annotated train, dev and test data
"""
# find train, dev and test files if not specified
dev_file, test_file, train_file = find_train_dev_test_files(
data_folder, dev_file, test_file, train_file, autofind_splits
)
super().__init__(
column_format,
dev_files=[dev_file] if dev_file else [],
train_files=[train_file] if train_file else [],
test_files=[test_file] if test_file else [],
name=name if data_folder is None else str(data_folder),
comment_symbol=comment_symbol,
**corpusargs,
)
class ColumnDataset(FlairDataset):
# special key for space after
SPACE_AFTER_KEY = "space-after"
# special key for feature columns
FEATS = ["feats", "misc"]
# special key for dependency head id
HEAD = ["head", "head_id"]
def __init__(
self,
path_to_column_file: Union[str, Path],
column_name_map: Dict[int, str],
column_delimiter: str = r"\s+",
comment_symbol: Optional[str] = None,
banned_sentences: Optional[List[str]] = None,
in_memory: bool = True,
document_separator_token: Optional[str] = None,
encoding: str = "utf-8",
skip_first_line: bool = False,
label_name_map: Optional[Dict[str, str]] = None,
default_whitespace_after: int = 1,
) -> None:
r"""Instantiates a column dataset.
:param path_to_column_file: path to the file with the column-formatted data
:param column_name_map: a map specifying the column format
:param column_delimiter: default is to split on any separatator, but you can overwrite for instance with "\t"
to split only on tabs
:param comment_symbol: if set, lines that begin with this symbol are treated as comments
:param in_memory: If set to True, the dataset is kept in memory as Sentence objects, otherwise does disk reads
:param document_separator_token: If provided, sentences that function as document boundaries are so marked
:param skip_first_line: set to True if your dataset has a header line
:param label_name_map: Optionally map tag names to different schema.
:param banned_sentences: Optionally remove sentences from the corpus. Works only if `in_memory` is true
:return: a dataset with annotated data
"""
path_to_column_file = Path(path_to_column_file)
assert path_to_column_file.exists()
self.path_to_column_file = path_to_column_file
self.column_delimiter = re.compile(column_delimiter)
self.comment_symbol = comment_symbol
self.document_separator_token = document_separator_token
self.label_name_map = label_name_map
self.banned_sentences = banned_sentences
self.default_whitespace_after = default_whitespace_after
# store either Sentence objects in memory, or only file offsets
self.in_memory = in_memory
self.total_sentence_count: int = 0
# most data sets have the token text in the first column, if not, pass 'text' as column
self.text_column: int = 0
self.head_id_column: Optional[int] = None
for column in column_name_map:
if column_name_map[column] == "text":
self.text_column = column
if column_name_map[column] in self.HEAD:
self.head_id_column = column
# determine encoding of text file
self.encoding = encoding
# identify which columns are spans and which are word-level
self._identify_span_columns(column_name_map, skip_first_line)
# now load all sentences
with open(str(self.path_to_column_file), encoding=self.encoding) as file:
# skip first line if to selected
if skip_first_line:
file.readline()
# option 1: keep Sentence objects in memory
if self.in_memory:
self.sentences: List[Sentence] = []
# pointer to previous
previous_sentence = None
while True:
# parse next sentence
next_sentence = self._read_next_sentence(file)
# quit if last sentence reached
if len(next_sentence) == 0:
break
sentence = self._convert_lines_to_sentence(
next_sentence,
word_level_tag_columns=self.word_level_tag_columns,
span_level_tag_columns=self.span_level_tag_columns,
)
if not sentence:
continue
# skip banned sentences
if self.banned_sentences is not None and any(
d in sentence.to_plain_string() for d in self.banned_sentences
):
continue
# set previous and next sentence for context
sentence._previous_sentence = previous_sentence
sentence._next_sentence = None
if previous_sentence:
previous_sentence._next_sentence = sentence
# append parsed sentence to list in memory
self.sentences.append(sentence)
previous_sentence = sentence
self.total_sentence_count = len(self.sentences)
# option 2: keep source data in memory
if not self.in_memory:
self.sentences_raw: List[List[str]] = []
while True:
# read lines for next sentence, but don't parse
sentence_raw = self._read_next_sentence(file)
# quit if last sentence reached
if len(sentence_raw) == 0:
break
# append raw lines for each sentence
self.sentences_raw.append(sentence_raw)
self.total_sentence_count = len(self.sentences_raw)
def _identify_span_columns(self, column_name_map, skip_first_line):
# we make a distinction between word-level tags and span-level tags
self.span_level_tag_columns = {}
self.word_level_tag_columns = {self.text_column: "text"}
# read first sentence to determine which columns are span-labels
with open(str(self.path_to_column_file), encoding=self.encoding) as file:
# skip first line if to selected
if skip_first_line:
file.readline()
# check the first 5 sentences
probe = []
for _i in range(5):
next_sentence = self._read_next_sentence(file)
if len(next_sentence) == 0:
break
sentence = self._convert_lines_to_sentence(next_sentence, word_level_tag_columns=column_name_map)
if sentence:
probe.append(sentence)
else:
break
# go through all annotations and identify word- and span-level annotations
# - if a column has at least one BIES we know it's a Span label
# - if a column has at least one tag that is not BIOES, we know it's a Token label
# - problem cases are columns for which we see only O - in this case we default to Span
for sentence in probe:
for column in column_name_map:
# skip assigned columns
if (
column in self.word_level_tag_columns
or column in self.span_level_tag_columns
or column == self.head_id_column
):
continue
layer = column_name_map[column]
# the space after key is always word-levels
if column_name_map[column] == self.SPACE_AFTER_KEY:
self.word_level_tag_columns[column] = layer
continue
if layer in self.FEATS:
self.word_level_tag_columns[column] = layer
continue
for token in sentence:
# if at least one token has a BIES, we know it's a span label
if token.get_label(layer).value[0:2] in ["B-", "I-", "E-", "S-"]:
self.span_level_tag_columns[column] = layer
break
# if at least one token has a label other than BIOES, we know it's a token label
elif token.get_label(layer, "O").value != "O":
self.word_level_tag_columns[column] = layer
break
# all remaining columns that are not word-level are span-level
for column in column_name_map:
if column not in self.word_level_tag_columns:
self.span_level_tag_columns[column] = column_name_map[column]
for column in self.span_level_tag_columns:
log.debug(f"Column {column} ({self.span_level_tag_columns[column]}) is a span-level column.")
# for column in self.word_level_tag_columns:
# log.info(f"Column {column} ({self.word_level_tag_columns[column]}) is a word-level column.")
def _read_next_sentence(self, file):
lines = []
line = file.readline()
while line:
if not line.isspace():
lines.append(line)
# if sentence ends, break
if len(lines) > 0 and self.__line_completes_sentence(line):
break
line = file.readline()
return lines
def _convert_lines_to_sentence(
self, lines, word_level_tag_columns: Dict[int, str], span_level_tag_columns: Optional[Dict[int, str]] = None
):
token: Optional[Token] = None
tokens: List[Token] = []
filtered_lines = []
comments = []
for line in lines:
# parse comments if possible
if self.comment_symbol is not None and line.startswith(self.comment_symbol):
comments.append(line)
continue
filtered_lines.append(line)
# otherwise, this line is a token. parse and add to sentence
token = self._parse_token(line, word_level_tag_columns, token)
tokens.append(token)
sentence: Sentence = Sentence(text=tokens)
# check if this sentence is a document boundary
if sentence.to_original_text() == self.document_separator_token:
sentence.is_document_boundary = True
# add span labels
if span_level_tag_columns:
for span_column in span_level_tag_columns:
try:
bioes_tags = [self.column_delimiter.split(line.rstrip())[span_column] for line in filtered_lines]
# discard tags from tokens that are not added to the sentence
bioes_tags = [tag for tag, token in zip(bioes_tags, tokens) if token._internal_index is not None]
predicted_spans = get_spans_from_bio(bioes_tags)
for span_indices, score, label in predicted_spans:
span = sentence[span_indices[0] : span_indices[-1] + 1]
value = self._remap_label(label)
if value != "O":
span.add_label(span_level_tag_columns[span_column], value=value, score=score)
except Exception:
pass
for comment in comments:
# parse relations if they are set
if comment.startswith("# relations = "):
relations_string = comment.strip().split("# relations = ")[1]
for relation in relations_string.split("|"):
indices = relation.split(";")
head_start = int(indices[0])
head_end = int(indices[1])
tail_start = int(indices[2])
tail_end = int(indices[3])
label = indices[4]
# head and tail span indices are 1-indexed and end index is inclusive
relation = Relation(
first=sentence[head_start - 1 : head_end], second=sentence[tail_start - 1 : tail_end]
)
remapped = self._remap_label(label)
if remapped != "O":
relation.add_label(typename="relation", value=remapped)
# parse comments such as '# id cd27886d-6895-4d02-a8df-e5fa763fa88f domain=de-orcas'
# to set the metadata "domain" to "de-orcas"
for comment_row in comment.split("\t"):
if "=" in comment_row:
key, value = comment_row.split("=", 1)
sentence.add_metadata(key, value)
if len(sentence) > 0:
return sentence
return None
def _parse_token(self, line: str, column_name_map: Dict[int, str], last_token: Optional[Token] = None) -> Token:
# get fields from line
fields: List[str] = self.column_delimiter.split(line.rstrip())
field_count = len(fields)
# get head_id if exists (only in dependency parses)
head_id = int(fields[self.head_id_column]) if self.head_id_column else None
if last_token is None:
start = 0
else:
assert last_token.end_position is not None
start = last_token.end_position + last_token.whitespace_after
# initialize token
token = Token(
fields[self.text_column],
head_id=head_id,
whitespace_after=self.default_whitespace_after,
start_position=start,
)
# go through all columns
for column, column_type in column_name_map.items():
if field_count <= column:
continue
if column == self.text_column:
continue
if column == self.head_id_column:
continue
if column_type == self.SPACE_AFTER_KEY:
if fields[column] == "-":
token.whitespace_after = 0
continue
# 'feats' and 'misc' column should be split into different fields
if column_type in self.FEATS:
for feature in fields[column].split("|"):
# special handling for whitespace after
if feature == "SpaceAfter=No":
token.whitespace_after = 0
continue
if "=" in feature:
# add each other feature as label-value pair
label_name, original_label_value = feature.split("=", 1)
label_value = self._remap_label(original_label_value)
if label_value != "O":
token.add_label(label_name, label_value)
else:
# get the task name (e.g. 'ner')
label_name = column_type
# get the label value
label_value = self._remap_label(fields[column])
# add label
if label_value != "O":
token.add_label(label_name, label_value)
return token
def _remap_label(self, tag):
# remap regular tag names
if self.label_name_map and tag in self.label_name_map:
tag = self.label_name_map[tag] # for example, transforming 'PER' to 'person'
return tag
def __line_completes_sentence(self, line: str) -> bool:
sentence_completed = line.isspace() or line == ""
return sentence_completed
def is_in_memory(self) -> bool:
return self.in_memory
def __len__(self) -> int:
return self.total_sentence_count
def __getitem__(self, index: int = 0) -> Sentence:
# if in memory, retrieve parsed sentence
if self.in_memory:
sentence = self.sentences[index]
# else skip to position in file where sentence begins
else:
sentence = self._convert_lines_to_sentence(
self.sentences_raw[index],
word_level_tag_columns=self.word_level_tag_columns,
span_level_tag_columns=self.span_level_tag_columns,
)
# set sentence context using partials TODO: pointer to dataset is really inefficient
sentence._has_context = True
sentence._position_in_dataset = (self, index)
return sentence
class ONTONOTES(MultiFileColumnCorpus):
archive_url = "https://data.mendeley.com/public-files/datasets/zmycy7t9h9/files/b078e1c4-f7a4-4427-be7f-9389967831ef/file_downloaded"
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
version: str = "v4",
language: str = "english",
domain: Union[None, str, List[str], Dict[str, Union[None, str, List[str]]]] = None,
in_memory: bool = True,
**corpusargs,
) -> None:
assert version in ["v4", "v12"]
if version == "v12":
assert language == "english"
else:
assert language in ["english", "chinese", "arabic"]
column_format = {0: "text", 1: "pos", 2: "ner"}
processed_data_path = self._ensure_data_processed(base_path, language, version)
kw = {"version": version, "language": language, "domain": domain, "processed_data_path": processed_data_path}
dev_files = list(self._get_processed_file_paths(split="development", **kw))
train_files = list(self._get_processed_file_paths(split="train", **kw))
test_files = list(self._get_processed_file_paths(split="test", **kw))
super().__init__(
dev_files=dev_files,
train_files=train_files,
test_files=test_files,
name="/".join((self.__class__.__name__, language, version)),
column_format=column_format,
in_memory=in_memory,
column_delimiter="\t",
**corpusargs,
)
@classmethod
def get_available_domains(
cls,
base_path: Optional[Union[str, Path]] = None,
version: str = "v4",
language: str = "english",
split: str = "train",
) -> List[str]:
processed_data_path = cls._ensure_data_processed(base_path=base_path, language=language, version=version)
processed_split_path = processed_data_path / "splits" / version / language / split
return [domain_path.name for domain_path in processed_split_path.iterdir()]
@classmethod
def _get_processed_file_paths(
cls,
processed_data_path: Path,
split: str = "train",
version: str = "v4",
language: str = "english",
domain: Optional[Union[str, List[str], Dict[str, Union[None, str, List[str]]]]] = None,
) -> Iterable[Path]:
processed_split_path = processed_data_path / "splits" / version / language / split
if domain is None:
# use all domains
assert processed_split_path.exists(), f"Processed data not found (expected at: {processed_split_path})"
yield from sorted(filter(os.path.isfile, processed_split_path.rglob("*")))
elif isinstance(domain, str):
domain_path = processed_split_path / domain
assert domain_path.exists(), f"Processed data not found (expected at: {domain_path})"
yield from sorted(filter(os.path.isfile, domain_path.rglob("*")))
elif isinstance(domain, list):
for d in domain:
domain_path = processed_split_path / d
assert domain_path.exists(), f"Processed data not found (expected at: {domain_path})"
yield from sorted(filter(os.path.isfile, domain_path.rglob("*")))
else:
assert isinstance(domain, dict)
for d, sources in domain.items():
domain_path = processed_split_path / d
assert domain_path.exists(), f"Processed data not found (expected at: {domain_path})"
if sources is None:
yield from sorted(domain_path.rglob("*"))
elif isinstance(sources, str):
source_path = domain_path / sources
assert source_path.exists(), f"Processed data not found (expected at: {source_path})"
yield source_path
else:
assert isinstance(sources, list)
for s in sources:
source_path = domain_path / s
assert source_path.exists(), f"Processed data not found (expected at: {source_path})"
yield source_path
@classmethod
def _ensure_data_processed(cls, base_path, language: str, version: str):
raw_data_path = cls._ensure_data_downloaded(base_path)
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
dataset_name = cls.__name__.lower()
processed_data_path = base_path / dataset_name
processed_split_path = processed_data_path / "splits" / version / language
if not processed_split_path.exists():
log.info(f"OntoNotes splits for {version}/{language} have not been generated yet, generating it now.")
for split in ["train", "development", "test"]:
log.info(f"Generating {split} split for {version}/{language}")
raw_split_path = raw_data_path / version / "data" / split / "data" / language / "annotations"
# iter over all domains / sources and create target files
for raw_domain_path in raw_split_path.iterdir():
for raw_source_path in raw_domain_path.iterdir():
conll_files = sorted(raw_source_path.rglob("*gold_conll"))
processed_source_path = (
processed_split_path / split / raw_domain_path.name / raw_source_path.name
)
processed_source_path.parent.mkdir(parents=True, exist_ok=True)
with open(processed_source_path, "w") as f:
for conll_file in conll_files:
for sent in cls.sentence_iterator(conll_file):
if language == "arabic":
trimmed_sentence = [_sent.split("#")[0] for _sent in sent["sentence"]]
sent["sentence"] = trimmed_sentence
for row in zip(sent["sentence"], sent["pos_tags"], sent["named_entities"]):
f.write("\t".join(row) + "\n")
f.write("\n")
return processed_data_path
@classmethod
def _ensure_data_downloaded(cls, base_path: Optional[Union[str, Path]] = None) -> Path:
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
data_folder = base_path / "conll-2012"
if not data_folder.exists():
unpack_file(cached_path(cls.archive_url, data_folder), data_folder.parent, "zip", False)
return data_folder
@classmethod
def _process_coref_span_annotations_for_word(
cls,
label: str,
word_index: int,
clusters: DefaultDict[int, List[Tuple[int, int]]],
coref_stacks: DefaultDict[int, List[int]],
) -> None:
"""For a given coref label, add it to a currently open span(s), complete a span(s) or ignore it, if it is outside of all spans.
This method mutates the clusters and coref_stacks dictionaries.
# Parameters
label : `str`
The coref label for this word.
word_index : `int`
The word index into the sentence.
clusters : `DefaultDict[int, List[Tuple[int, int]]]`
A dictionary mapping cluster ids to lists of inclusive spans into the
sentence.
coref_stacks : `DefaultDict[int, List[int]]`
Stacks for each cluster id to hold the start indices of active spans (spans
which we are inside of when processing a given word). Spans with the same id
can be nested, which is why we collect these opening spans on a stack, e.g:
[Greg, the baker who referred to [himself]_ID1 as 'the bread man']_ID1
"""
if label != "-":
for segment in label.split("|"):
# The conll representation of coref spans allows spans to
# overlap. If spans end or begin at the same word, they are
# separated by a "|".
if segment[0] == "(":
# The span begins at this word.
if segment[-1] == ")":
# The span begins and ends at this word (single word span).
cluster_id = int(segment[1:-1])
clusters[cluster_id].append((word_index, word_index))
else:
# The span is starting, so we record the index of the word.
cluster_id = int(segment[1:])
coref_stacks[cluster_id].append(word_index)
else:
# The span for this id is ending, but didn't start at this word.
# Retrieve the start index from the document state and
# add the span to the clusters for this id.
cluster_id = int(segment[:-1])
start = coref_stacks[cluster_id].pop()
clusters[cluster_id].append((start, word_index))
@classmethod
def _process_span_annotations_for_word(
cls,
annotations: List[str],
span_labels: List[List[str]],
current_span_labels: List[Optional[str]],
) -> None:
"""Given a sequence of different label types for a single word and the current span label we are inside, compute the BIO tag for each label and append to a list.
# Parameters
annotations : `List[str]`
A list of labels to compute BIO tags for.
span_labels : `List[List[str]]`
A list of lists, one for each annotation, to incrementally collect
the BIO tags for a sequence.
current_span_labels : `List[Optional[str]]`
The currently open span per annotation type, or `None` if there is no open span.
"""
for annotation_index, annotation in enumerate(annotations):
# strip all bracketing information to
# get the actual propbank label.
label = annotation.strip("()*")
if "(" in annotation:
# Entering into a span for a particular semantic role label.
# We append the label and set the current span for this annotation.
bio_label = "B-" + label
span_labels[annotation_index].append(bio_label)
current_span_labels[annotation_index] = label
elif current_span_labels[annotation_index] is not None:
# If there's no '(' token, but the current_span_label is not None,
# then we are inside a span.
bio_label = "I-" + cast(str, current_span_labels[annotation_index])
span_labels[annotation_index].append(bio_label)
else:
# We're outside a span.
span_labels[annotation_index].append("O")
# Exiting a span, so we reset the current span label for this annotation.
if ")" in annotation:
current_span_labels[annotation_index] = None
@classmethod
def _conll_rows_to_sentence(cls, conll_rows: List[str]) -> Dict:
document_id: str
sentence_id: int
# The words in the sentence.
sentence: List[str] = []
# The pos tags of the words in the sentence.
pos_tags: List[str] = []
# the pieces of the parse tree.
parse_pieces: List[Optional[str]] = []
# The lemmatised form of the words in the sentence which
# have SRL or word sense information.
predicate_lemmas: List[Optional[str]] = []
# The FrameNet ID of the predicate.
predicate_framenet_ids: List[Optional[str]] = []
# The sense of the word, if available.
word_senses: List[Optional[float]] = []
# The current speaker, if available.
speakers: List[Optional[str]] = []
verbal_predicates: List[str] = []
span_labels: List[List[str]] = []
current_span_labels: List[Optional[str]] = []
# Cluster id -> List of (start_index, end_index) spans.
clusters: DefaultDict[int, List[Tuple[int, int]]] = defaultdict(list)
# Cluster id -> List of start_indices which are open for this id.
coref_stacks: DefaultDict[int, List[int]] = defaultdict(list)
for index, row in enumerate(conll_rows):
conll_components = row.split()
document_id = conll_components[0]
sentence_id = int(conll_components[1])
word = conll_components[3]
pos_tag = conll_components[4]
parse_piece: Optional[str]
# Replace brackets in text and pos tags
# with a different token for parse trees.
if pos_tag != "XX" and word != "XX":
if word == "(":
parse_word = "-LRB-"
elif word == ")":
parse_word = "-RRB-"
else:
parse_word = word
if pos_tag == "(":
pos_tag = "-LRB-"
if pos_tag == ")":
pos_tag = "-RRB-"
(left_brackets, right_hand_side) = conll_components[5].split("*")
# only keep ')' if there are nested brackets with nothing in them.
right_brackets = right_hand_side.count(")") * ")"
parse_piece = f"{left_brackets} ({pos_tag} {parse_word}) {right_brackets}"
else:
# There are some bad annotations in the CONLL data.
# They contain no information, so to make this explicit,
# we just set the parse piece to be None which will result
# in the overall parse tree being None.
parse_piece = None
lemmatised_word = conll_components[6]
framenet_id = conll_components[7]
word_sense = conll_components[8]
speaker = conll_components[9]
if not span_labels:
# If this is the first word in the sentence, create
# empty lists to collect the NER and SRL BIO labels.
# We can't do this upfront, because we don't know how many
# components we are collecting, as a sentence can have
# variable numbers of SRL frames.
span_labels = [[] for _ in conll_components[10:-1]]
# Create variables representing the current label for each label
# sequence we are collecting.
current_span_labels = [None for _ in conll_components[10:-1]]
cls._process_span_annotations_for_word(conll_components[10:-1], span_labels, current_span_labels)
# If any annotation marks this word as a verb predicate,
# we need to record its index. This also has the side effect
# of ordering the verbal predicates by their location in the
# sentence, automatically aligning them with the annotations.
word_is_verbal_predicate = any("(V" in x for x in conll_components[11:-1])
if word_is_verbal_predicate:
verbal_predicates.append(word)
cls._process_coref_span_annotations_for_word(conll_components[-1], index, clusters, coref_stacks)
sentence.append(word)
pos_tags.append(pos_tag)
parse_pieces.append(parse_piece)
predicate_lemmas.append(lemmatised_word if lemmatised_word != "-" else None)
predicate_framenet_ids.append(framenet_id if framenet_id != "-" else None)
word_senses.append(float(word_sense) if word_sense != "-" else None)
speakers.append(speaker if speaker != "-" else None)
named_entities = span_labels[0]
srl_frames = [(predicate, labels) for predicate, labels in zip(verbal_predicates, span_labels[1:])]
# this would not be reached if parse_pieces contained None, hence the cast
parse_tree = "".join(cast(List[str], parse_pieces)) if all(parse_pieces) else None
coref_span_tuples = {(cluster_id, span) for cluster_id, span_list in clusters.items() for span in span_list}
return {
"document_id": document_id,
"sentence_id": sentence_id,
"sentence": sentence,
"pos_tags": pos_tags,
"parse_tree": parse_tree,
"predicate_lemmas": predicate_lemmas,
"predicate_framenet_ids": predicate_framenet_ids,
"word_senses": word_senses,
"speakers": speakers,
"named_entities": named_entities,
"srl_frames": srl_frames,
"coref_span_tuples": coref_span_tuples,
}
@classmethod
def dataset_document_iterator(cls, file_path: Union[Path, str]) -> Iterator[List]:
"""An iterator over CONLL formatted files which yields documents, regardless of the number of document annotations in a particular file.
This is useful for conll data which has been preprocessed, such
as the preprocessing which takes place for the 2012 CONLL
Coreference Resolution task.
"""
with open(file_path, encoding="utf8") as open_file:
conll_rows = []
document: List = []
for line in open_file:
line = line.strip()
if line != "" and not line.startswith("#"):
# Non-empty line. Collect the annotation.
conll_rows.append(line)
else:
if conll_rows:
document.append(cls._conll_rows_to_sentence(conll_rows))
conll_rows = []
if line.startswith("#end document"):
yield document
document = []
if document:
# Collect any stragglers or files which might not
# have the '#end document' format for the end of the file.
yield document
@classmethod
def sentence_iterator(cls, file_path: Union[Path, str]) -> Iterator:
"""An iterator over the sentences in an individual CONLL formatted file."""
for document in cls.dataset_document_iterator(file_path):
for sentence in document:
yield sentence
class CONLL_03(ColumnCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
column_format={0: "text", 1: "pos", 3: "ner"},
in_memory: bool = True,
**corpusargs,
) -> None:
"""Initialize the CoNLL-03 corpus.
This is only possible if you've manually downloaded it to your machine.
Obtain the corpus from https://www.clips.uantwerpen.be/conll2003/ner/ and put the eng.testa, .testb, .train
files in a folder called 'conll_03'. Then set the base_path parameter in the constructor to the path to the
parent directory where the conll_03 folder resides.
If using entity linking, the conll03 dateset is reduced by about 20 Documents, which are not part of the yago dataset.
:param base_path: Path to the CoNLL-03 corpus (i.e. 'conll_03' folder) on your machine
POS tags or chunks respectively
:param in_memory: If True, keeps dataset in memory giving speedups in training.
:param document_as_sequence: If True, all sentences of a document are read into a single Sentence object
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# check if data there
if not data_folder.exists():
log.warning("-" * 100)
log.warning(f'WARNING: CoNLL-03 dataset not found at "{data_folder}".')
log.warning(
'Instructions for obtaining the data can be found here: https://www.clips.uantwerpen.be/conll2003/ner/"'
)
log.warning("-" * 100)
super().__init__(
data_folder,
column_format=column_format,
in_memory=in_memory,
document_separator_token="-DOCSTART-",
**corpusargs,
)
class CONLL_03_GERMAN(ColumnCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
**corpusargs,
) -> None:
"""Initialize the CoNLL-03 corpus for German.
This is only possible if you've manually downloaded it to your machine.
Obtain the corpus from https://www.clips.uantwerpen.be/conll2003/ner/ and put the respective files in a folder called
'conll_03_german'. Then set the base_path parameter in the constructor to the path to the parent directory where
the conll_03_german folder resides.
:param base_path: Path to the CoNLL-03 corpus (i.e. 'conll_03_german' folder) on your machine
word lemmas, POS tags or chunks respectively
:param in_memory: If True, keeps dataset in memory giving speedups in training.
:param document_as_sequence: If True, all sentences of a document are read into a single Sentence object
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# column format
columns = {0: "text", 1: "lemma", 2: "pos", 3: "np", 4: "ner"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# check if data there
if not data_folder.exists():
log.warning("-" * 100)
log.warning(f'WARNING: CoNLL-03 dataset not found at "{data_folder}".')
log.warning(
'Instructions for obtaining the data can be found here: https://www.clips.uantwerpen.be/conll2003/ner/"'
)
log.warning("-" * 100)
super().__init__(
data_folder,
columns,
in_memory=in_memory,
document_separator_token="-DOCSTART-",
**corpusargs,
)
class CONLL_03_DUTCH(ColumnCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
**corpusargs,
) -> None:
"""Initialize the CoNLL-03 corpus for Dutch.
The first time you call this constructor it will automatically download the dataset.
:param base_path: Default is None, meaning that corpus gets auto-downloaded and loaded. You can override this
to point to a different folder but typically this should not be necessary.
POS tags instead
:param in_memory: If True, keeps dataset in memory giving speedups in training.
:param document_as_sequence: If True, all sentences of a document are read into a single Sentence object
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# column format
columns = {0: "text", 1: "pos", 2: "ner"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
conll_02_path = "https://www.clips.uantwerpen.be/conll2002/ner/data/"
# download files if not present locally
cached_path(f"{conll_02_path}ned.testa", data_folder / "raw")
cached_path(f"{conll_02_path}ned.testb", data_folder / "raw")
cached_path(f"{conll_02_path}ned.train", data_folder / "raw")
# we need to slightly modify the original files by adding some new lines after document separators
train_data_file = data_folder / "train.txt"
if not train_data_file.is_file():
self.__offset_docstarts(data_folder / "raw" / "ned.train", data_folder / "train.txt")
self.__offset_docstarts(data_folder / "raw" / "ned.testa", data_folder / "dev.txt")
self.__offset_docstarts(data_folder / "raw" / "ned.testb", data_folder / "test.txt")
super().__init__(
data_folder,
columns,
train_file="train.txt",
dev_file="dev.txt",
test_file="test.txt",
encoding="latin-1",
in_memory=in_memory,
document_separator_token="-DOCSTART-",
**corpusargs,
)
@staticmethod
def __offset_docstarts(file_in: Union[str, Path], file_out: Union[str, Path]):
with open(file_in, encoding="latin-1") as f:
lines = f.readlines()
with open(file_out, "w", encoding="latin-1") as f:
for line in lines:
f.write(line)
if line.startswith("-DOCSTART-"):
f.write("\n")
class CONLL_03_SPANISH(ColumnCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
**corpusargs,
) -> None:
"""Initialize the CoNLL-03 corpus for Spanish.
The first time you call this constructor it will automatically download the dataset.
:param base_path: Default is None, meaning that corpus gets auto-downloaded and loaded. You can override this
to point to a different folder but typically this should not be necessary.
:param in_memory: If True, keeps dataset in memory giving speedups in training.
:param document_as_sequence: If True, all sentences of a document are read into a single Sentence object
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# column format
columns = {0: "text", 1: "ner"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
conll_02_path = "https://www.clips.uantwerpen.be/conll2002/ner/data/"
cached_path(f"{conll_02_path}esp.testa", Path("datasets") / dataset_name)
cached_path(f"{conll_02_path}esp.testb", Path("datasets") / dataset_name)
cached_path(f"{conll_02_path}esp.train", Path("datasets") / dataset_name)
super().__init__(
data_folder,
columns,
encoding="latin-1",
in_memory=in_memory,
**corpusargs,
)
class CONLL_2000(ColumnCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
**corpusargs,
) -> None:
"""Initialize the CoNLL-2000 corpus for English chunking.
The first time you call this constructor it will automatically download the dataset.
:param base_path: Default is None, meaning that corpus gets auto-downloaded and loaded. You can override this
to point to a different folder but typically this should not be necessary.
:param in_memory: If True, keeps dataset in memory giving speedups in training.
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# column format
columns = {0: "text", 1: "pos", 2: "np"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
conll_2000_path = "https://www.clips.uantwerpen.be/conll2000/chunking/"
data_file = flair.cache_root / "datasets" / dataset_name / "train.txt"
if not data_file.is_file():
cached_path(f"{conll_2000_path}train.txt.gz", Path("datasets") / dataset_name)
cached_path(f"{conll_2000_path}test.txt.gz", Path("datasets") / dataset_name)
import gzip
import shutil
with gzip.open(flair.cache_root / "datasets" / dataset_name / "train.txt.gz", "rb") as f_in, open(
flair.cache_root / "datasets" / dataset_name / "train.txt",
"wb",
) as f_out:
shutil.copyfileobj(f_in, f_out)
with gzip.open(flair.cache_root / "datasets" / dataset_name / "test.txt.gz", "rb") as f_in, open(
flair.cache_root / "datasets" / dataset_name / "test.txt",
"wb",
) as f_out:
shutil.copyfileobj(f_in, f_out)
super().__init__(
data_folder,
columns,
in_memory=in_memory,
**corpusargs,
)
class WNUT_17(ColumnCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
**corpusargs,
) -> None:
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# column format
columns = {0: "text", 1: "ner"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
wnut_path = "https://noisy-text.github.io/2017/files/"
cached_path(f"{wnut_path}wnut17train.conll", Path("datasets") / dataset_name)
cached_path(f"{wnut_path}emerging.dev.conll", Path("datasets") / dataset_name)
cached_path(f"{wnut_path}emerging.test.annotated", Path("datasets") / dataset_name)
super().__init__(
data_folder,
columns,
in_memory=in_memory,
**corpusargs,
)
class FEWNERD(ColumnCorpus):
def __init__(
self,
setting: str = "supervised",
**corpusargs,
) -> None:
assert setting in ["supervised", "inter", "intra"]
base_path = flair.cache_root / "datasets"
self.dataset_name = self.__class__.__name__.lower()
self.data_folder = base_path / self.dataset_name / setting
self.bio_format_data = base_path / self.dataset_name / setting / "bio_format"
if not self.data_folder.exists():
self._download(setting=setting)
if not self.bio_format_data.exists():
self._generate_splits(setting)
super().__init__(
self.bio_format_data,
column_format={0: "text", 1: "ner"},
**corpusargs,
)
def _download(self, setting):
_URLs = {
"supervised": "https://cloud.tsinghua.edu.cn/f/09265750ae6340429827/?dl=1",
"intra": "https://cloud.tsinghua.edu.cn/f/a0d3efdebddd4412b07c/?dl=1",
"inter": "https://cloud.tsinghua.edu.cn/f/165693d5e68b43558f9b/?dl=1",
}
log.info(f"FewNERD ({setting}) dataset not found, downloading.")
dl_path = _URLs[setting]
dl_dir = cached_path(dl_path, Path("datasets") / self.dataset_name / setting)
if setting not in os.listdir(self.data_folder):
import zipfile
from tqdm import tqdm
log.info("FewNERD dataset has not been extracted yet, extracting it now. This might take a while.")
with zipfile.ZipFile(dl_dir, "r") as zip_ref:
for f in tqdm(zip_ref.namelist()):
if f.endswith("/"):
os.makedirs(self.data_folder / f)
else:
zip_ref.extract(f, path=self.data_folder)
def _generate_splits(self, setting):
log.info(
f"FewNERD splits for {setting} have not been parsed into BIO format, parsing it now. This might take a while."
)
os.mkdir(self.bio_format_data)
for split in os.listdir(self.data_folder / setting):
with open(self.data_folder / setting / split) as source, open(self.bio_format_data / split, "w") as target:
previous_tag = None
for line in source:
if line == "" or line == "\n":
target.write("\n")
else:
token, tag = line.split("\t")
tag = tag.replace("\n", "")
if tag == "O":
target.write(token + "\t" + tag + "\n")
elif previous_tag != tag and tag != "O":
target.write(token + "\t" + "B-" + tag + "\n")
elif previous_tag == tag and tag != "O":
target.write(token + "\t" + "I-" + tag + "\n")
previous_tag = tag
class BIOSCOPE(ColumnCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
**corpusargs,
) -> None:
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# column format
columns = {0: "text", 1: "tag"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
bioscope_path = (
"https://raw.githubusercontent.com/whoisjones/BioScopeSequenceLabelingData/master/sequence_labeled/"
)
cached_path(f"{bioscope_path}output.txt", Path("datasets") / dataset_name)
super().__init__(
data_folder,
columns,
in_memory=in_memory,
train_file="output.txt",
**corpusargs,
)
class NER_ARABIC_ANER(ColumnCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
document_as_sequence: bool = False,
**corpusargs,
) -> None:
"""Initialize a preprocessed version of the Arabic Named Entity Recognition Corpus (ANERCorp).
The dataset is downloaded from http://curtis.ml.cmu.edu/w/courses/index.php/ANERcorp
Column order is swapped
The first time you call this constructor it will automatically download the dataset.
:param base_path: Default is None, meaning that corpus gets auto-downloaded and loaded. You can override this
to point to a different folder but typically this should not be necessary.
:param in_memory: If True, keeps dataset in memory giving speedups in training.
:param document_as_sequence: If True, all sentences of a document are read into a single Sentence object
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# column format
columns = {0: "text", 1: "ner"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
# default dataset folder is the cache root
if not base_path:
base_path = flair.cache_root / "datasets"
data_folder = base_path / dataset_name
# download data if necessary
anercorp_path = "https://megantosh.s3.eu-central-1.amazonaws.com/ANERcorp/"
# cached_path(f"{anercorp_path}test.txt", Path("datasets") / dataset_name)
cached_path(f"{anercorp_path}train.txt", Path("datasets") / dataset_name)
super().__init__(
data_folder,
columns,
encoding="utf-8",
in_memory=in_memory,
document_separator_token=None if not document_as_sequence else "-DOCSTART-",
**corpusargs,
)
class NER_ARABIC_AQMAR(ColumnCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
document_as_sequence: bool = False,
**corpusargs,
) -> None:
"""Initialize a preprocessed and modified version of the American and Qatari Modeling of Arabic (AQMAR) dataset.
The dataset is downloaded from http://www.cs.cmu.edu/~ark/AQMAR/
- Modifications from original dataset: Miscellaneous tags (MIS0, MIS1, MIS2, MIS3) are merged to one tag "MISC" as these categories deviate across the original dataset
- The 28 original Wikipedia articles are merged into a single file containing the articles in alphabetical order
The first time you call this constructor it will automatically download the dataset.
This dataset is licensed under a Creative Commons Attribution-ShareAlike 3.0 Unported License.
please cite: "Behrang Mohit, Nathan Schneider, Rishav Bhowmick, Kemal Oflazer, and Noah A. Smith (2012),
Recall-Oriented Learning of Named Entities in Arabic Wikipedia. Proceedings of EACL."
:param base_path: Default is None, meaning that corpus gets auto-downloaded and loaded. You can override this to point to a different folder but typically this should not be necessary.
:param in_memory: If True, keeps dataset in memory giving speedups in training.
:param document_as_sequence: If True, all sentences of a document are read into a single Sentence object
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# column format
columns = {0: "text", 1: "ner"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
# default dataset folder is the cache root
if not base_path:
base_path = flair.cache_root / "datasets"
data_folder = base_path / dataset_name
# download data if necessary
aqmar_path = "https://megantosh.s3.eu-central-1.amazonaws.com/AQMAR/"
# cached_path(f"{anercorp_path}test.txt", Path("datasets") / dataset_name)
cached_path(f"{aqmar_path}train.txt", Path("datasets") / dataset_name)
super().__init__(
data_folder,
columns,
encoding="utf-8",
in_memory=in_memory,
document_separator_token=None if not document_as_sequence else "-DOCSTART-",
**corpusargs,
)
class NER_BASQUE(ColumnCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
**corpusargs,
) -> None:
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# column format
columns = {0: "text", 1: "ner"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
ner_basque_path = "http://ixa2.si.ehu.eus/eiec/"
data_path = flair.cache_root / "datasets" / dataset_name
data_file = data_path / "named_ent_eu.train"
if not data_file.is_file():
cached_path(f"{ner_basque_path}/eiec_v1.0.tgz", Path("datasets") / dataset_name)
import shutil
import tarfile
with tarfile.open(
flair.cache_root / "datasets" / dataset_name / "eiec_v1.0.tgz",
"r:gz",
) as f_in:
corpus_files = (
"eiec_v1.0/named_ent_eu.train",
"eiec_v1.0/named_ent_eu.test",
)
for corpus_file in corpus_files:
f_in.extract(corpus_file, data_path)
shutil.move(f"{data_path}/{corpus_file}", data_path)
super().__init__(
data_folder,
columns,
in_memory=in_memory,
**corpusargs,
)
class NER_CHINESE_WEIBO(ColumnCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
document_as_sequence: bool = False,
**corpusargs,
) -> None:
"""Initialize the WEIBO_NER corpus.
The first time you call this constructor it will automatically download the dataset.
:param base_path: Default is None, meaning that corpus gets auto-downloaded and loaded. You can override this
to point to a different folder but typically this should not be necessary.
POS tags instead
:param in_memory: If True, keeps dataset in memory giving speedups in training.
:param document_as_sequence: If True, all sentences of a document are read into a single Sentence object
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# column format
columns = {0: "text", 1: "ner"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
weiboNER_conll_path = "https://raw.githubusercontent.com/87302380/WEIBO_NER/main/data/"
cached_path(
f"{weiboNER_conll_path}weiboNER_2nd_conll_format.train",
Path("datasets") / dataset_name,
)
cached_path(
f"{weiboNER_conll_path}weiboNER_2nd_conll_format.test",
Path("datasets") / dataset_name,
)
cached_path(
f"{weiboNER_conll_path}weiboNER_2nd_conll_format.dev",
Path("datasets") / dataset_name,
)
super().__init__(
data_folder,
columns,
encoding="utf-8",
in_memory=in_memory,
train_file="weiboNER_2nd_conll_format.train",
test_file="weiboNER_2nd_conll_format.test",
dev_file="weiboNER_2nd_conll_format.dev",
document_separator_token=None if not document_as_sequence else "-DOCSTART-",
**corpusargs,
)
class NER_DANISH_DANE(ColumnCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
**corpusargs,
) -> None:
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# column format
columns = {1: "text", 3: "pos", 9: "ner"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
data_path = flair.cache_root / "datasets" / dataset_name
train_data_file = data_path / "ddt.train.conllu"
if not train_data_file.is_file():
temp_file = cached_path(
"https://danlp.alexandra.dk/304bd159d5de/datasets/ddt.zip",
Path("datasets") / dataset_name,
)
from zipfile import ZipFile
with ZipFile(temp_file, "r") as zip_file:
zip_file.extractall(path=data_path)
# Remove CoNLL-U meta information in the last column
for part in ["train", "dev", "test"]:
lines = []
data_file = f"ddt.{part}.conllu"
with open(data_path / data_file) as file:
for line in file:
if line.startswith("#") or line == "\n":
lines.append(line)
lines.append(line.replace("name=", "").replace("|SpaceAfter=No", ""))
with open(data_path / data_file, "w") as file:
file.writelines(lines)
super().__init__(
data_folder,
columns,
in_memory=in_memory,
comment_symbol="#",
**corpusargs,
)
class NER_ENGLISH_MOVIE_SIMPLE(ColumnCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
**corpusargs,
) -> None:
"""Initialize the eng corpus of the MIT Movie Corpus.
The first time you call this constructor it will automatically download the dataset.
:param base_path: Default is None, meaning that corpus gets auto-downloaded and loaded. You can override this
to point to a different folder but typically this should not be necessary.
POS tags instead
:param in_memory: If True, keeps dataset in memory giving speedups in training.
"""
# column format
columns = {0: "ner", 1: "text"}
# dataset name
dataset_name = self.__class__.__name__.lower()
# data folder: default dataset folder is the cache root
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
data_folder = base_path / dataset_name
# download data if necessary
mit_movie_path = "https://groups.csail.mit.edu/sls/downloads/movie/"
train_file = "engtrain.bio"
test_file = "engtest.bio"
cached_path(f"{mit_movie_path}{train_file}", Path("datasets") / dataset_name)
cached_path(f"{mit_movie_path}{test_file}", Path("datasets") / dataset_name)
super().__init__(
data_folder,
columns,
train_file=train_file,
test_file=test_file,
in_memory=in_memory,
**corpusargs,
)
class NER_ENGLISH_MOVIE_COMPLEX(ColumnCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
**corpusargs,
) -> None:
"""Initialize the trivia10k13 corpus of the MIT Movie Corpus.
The first time you call this constructor it will automatically download the dataset.
:param base_path: Default is None, meaning that corpus gets auto-downloaded and loaded. You can override this
to point to a different folder but typically this should not be necessary.
POS tags instead
:param in_memory: If True, keeps dataset in memory giving speedups in training.
"""
# column format
columns = {0: "ner", 1: "text"}
# dataset name
dataset_name = self.__class__.__name__.lower()
# data folder: default dataset folder is the cache root
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
data_folder = base_path / dataset_name
# download data if necessary
mit_movie_path = "https://groups.csail.mit.edu/sls/downloads/movie/"
train_file = "trivia10k13train.bio"
test_file = "trivia10k13test.bio"
cached_path(f"{mit_movie_path}{train_file}", Path("datasets") / dataset_name)
cached_path(f"{mit_movie_path}{test_file}", Path("datasets") / dataset_name)
super().__init__(
data_folder,
columns,
train_file=train_file,
test_file=test_file,
in_memory=in_memory,
**corpusargs,
)
class NER_ENGLISH_SEC_FILLINGS(ColumnCorpus):
"""Initialize corpus of SEC-fillings annotated with English NER tags.
See paper "Domain Adaption of Named Entity Recognition to Support Credit Risk Assessment" by Alvarado et al, 2015: https://aclanthology.org/U15-1010/
:param base_path: Path to the CoNLL-03 corpus (i.e. 'conll_03' folder) on your machine
POS tags or chunks respectively
:param in_memory: If True, keeps dataset in memory giving speedups in training.
:param document_as_sequence: If True, all sentences of a document are read into a single Sentence object
"""
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
**corpusargs,
) -> None:
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# column format
columns = {0: "text", 1: "pos", 3: "ner"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
SEC_FILLINGS_Path = "https://raw.githubusercontent.com/juand-r/entity-recognition-datasets/master/data/SEC-filings/CONLL-format/data/"
cached_path(f"{SEC_FILLINGS_Path}test/FIN3.txt", Path("datasets") / dataset_name)
cached_path(f"{SEC_FILLINGS_Path}train/FIN5.txt", Path("datasets") / dataset_name)
super().__init__(
data_folder,
columns,
encoding="utf-8",
in_memory=in_memory,
train_file="FIN5.txt",
test_file="FIN3.txt",
skip_first_line=True,
**corpusargs,
)
class NER_ENGLISH_RESTAURANT(ColumnCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
**corpusargs,
) -> None:
"""Initialize the MIT Restaurant corpus.
The corpus will be downloaded from https://groups.csail.mit.edu/sls/downloads/restaurant/.
The first time you call this constructor it will automatically download the dataset.
:param base_path: Default is None, meaning that corpus gets auto-downloaded and loaded. You can override this
to point to a different folder but typically this should not be necessary.
POS tags instead
:param in_memory: If True, keeps dataset in memory giving speedups in training.
:param document_as_sequence: If True, all sentences of a document are read into a single Sentence object
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# column format
columns = {0: "text", 1: "ner"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
mit_restaurants_path = "https://megantosh.s3.eu-central-1.amazonaws.com/MITRestoCorpus/"
cached_path(f"{mit_restaurants_path}test.txt", Path("datasets") / dataset_name)
cached_path(f"{mit_restaurants_path}train.txt", Path("datasets") / dataset_name)
super().__init__(
data_folder,
columns,
encoding="latin-1",
in_memory=in_memory,
**corpusargs,
)
class NER_ENGLISH_STACKOVERFLOW(ColumnCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
**corpusargs,
) -> None:
"""Initialize the STACKOVERFLOW_NER corpus.
The first time you call this constructor it will automatically download the dataset.
:param base_path: Default is None, meaning that corpus gets auto-downloaded and loaded. You can override this
to point to a different folder but typically this should not be necessary.
POS tags instead
:param in_memory: If True, keeps dataset in memory giving speedups in training.
:param document_as_sequence: If True, all sentences of a document are read into a single Sentence object
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
"""
The Datasets are represented in the Conll format.
In this format each line of the Dataset is in the following format:
<word>+"\t"+<NE>"\t"+<word>+"\t"<markdown>
The end of sentence is marked with an empty line.
In each line NE represented the human annotated named entity
and <markdown> represented the code tags provided by the users who wrote the posts.
"""
# column format
columns = {0: "word", 1: "ner", 3: "markdown"}
# entity_mapping
entity_mapping = {
"Library_Function": "Function",
"Function_Name": "Function",
"Class_Name": "Class",
"Library_Class": "Class",
"Organization": "Website",
"Library_Variable": "Variable",
"Variable_Name": "Variable",
"Error_Name": "O",
"Keyboard_IP": "O",
"Value": "O",
"Output_Block": "O",
}
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
STACKOVERFLOW_NER_path = "https://raw.githubusercontent.com/jeniyat/StackOverflowNER/master/resources/annotated_ner_data/StackOverflow/"
# data validation
banned_sentences = [
"code omitted for annotation",
"omitted for annotation",
"CODE_BLOCK :",
"OP_BLOCK :",
"Question_URL :",
"Question_ID :",
]
files = ["train", "test", "dev"]
for file in files:
questions = 0
answers = 0
cached_path(f"{STACKOVERFLOW_NER_path}{file}.txt", Path("datasets") / dataset_name)
with (data_folder / (file + ".txt")).open(encoding="utf-8") as fin:
for line in fin:
if line.startswith("Question_ID"):
questions += 1
if line.startswith("Answer_to_Question_ID"):
answers += 1
log.info(f"File {file} has {questions} questions and {answers} answers.")
super().__init__(
data_folder,
columns,
train_file="train.txt",
test_file="test.txt",
dev_file="dev.txt",
encoding="utf-8",
banned_sentences=banned_sentences,
in_memory=in_memory,
label_name_map=entity_mapping,
**corpusargs,
)
class NER_ENGLISH_TWITTER(ColumnCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
**corpusargs,
) -> None:
"""Initialize the twitter_ner corpus.
The corpus will be downoaded from https://raw.githubusercontent.com/aritter/twitter_nlp/master/data/annotated/ner.txt.
The first time you call this constructor it will automatically download the dataset.
:param base_path: Default is None, meaning that corpus gets auto-downloaded and loaded. You can override this
to point to a different folder but typically this should not be necessary.
:param in_memory: If True, keeps dataset in memory giving speedups in training.
:param document_as_sequence: If True, all sentences of a document are read into a single Sentence object
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# column format
columns = {0: "text", 1: "ner"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
# default dataset folder is the cache root
if not base_path:
base_path = flair.cache_root / "datasets"
data_folder = base_path / dataset_name
# download data if necessary
twitter_ner_path = "https://raw.githubusercontent.com/aritter/twitter_nlp/master/data/annotated/"
cached_path(f"{twitter_ner_path}ner.txt", Path("datasets") / dataset_name)
super().__init__(
data_folder,
columns,
encoding="latin-1",
train_file="ner.txt",
in_memory=in_memory,
**corpusargs,
)
class NER_ENGLISH_PERSON(ColumnCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
) -> None:
"""Initialize the PERSON_NER corpus for person names.
The first time you call this constructor it will automatically download the dataset.
:param base_path: Default is None, meaning that corpus gets auto-downloaded and loaded. You can override this
to point to a different folder but typically this should not be necessary.
:param in_memory: If True, keeps dataset in memory giving speedups in training.
:param document_as_sequence: If True, all sentences of a document are read into a single Sentence object
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# column format
columns = {0: "text", 1: "ner"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
# default dataset folder is the cache root
if not base_path:
base_path = flair.cache_root / "datasets"
data_folder = base_path / dataset_name
# download data if necessary
conll_path = "https://raw.githubusercontent.com/das-sudeshna/genid/master/"
# download files if not present locallys
cached_path(f"{conll_path}conll-g.conll", data_folder / "raw")
cached_path(f"{conll_path}ieer-g.conll", data_folder / "raw")
cached_path(f"{conll_path}textbook-g.conll", data_folder / "raw")
cached_path(f"{conll_path}wiki-g.conll", data_folder / "raw")
self.__concatAllFiles(data_folder)
super().__init__(data_folder, columns, in_memory=in_memory, train_file="bigFile.conll")
@staticmethod
def __concatAllFiles(data_folder):
arr = os.listdir(data_folder / "raw")
with open(data_folder / "bigFile.conll", "w") as outfile:
for fname in arr:
with open(data_folder / "raw" / fname) as infile:
outfile.write(infile.read())
class NER_ENGLISH_WEBPAGES(ColumnCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
**corpusargs,
) -> None:
"""Initialize the WEBPAGES_NER corpus.
The corpus was introduced in the paper "Design Challenges and Misconceptions in Named Entity Recognition" by Ratinov and Roth (2009): https://aclanthology.org/W09-1119/.
The first time you call this constructor it will automatically download the dataset.
:param base_path: Default is None, meaning that corpus gets auto-downloaded and loaded. You can override this
to point to a different folder but typically this should not be necessary.
POS tags instead
:param in_memory: If True, keeps dataset in memory giving speedups in training.
:param document_as_sequence: If True, all sentences of a document are read into a single Sentence object
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# column format
columns = {0: "ner", 5: "text"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
# default dataset folder is the cache root
if not base_path:
base_path = Path(flair.cache_root) / "datasets"
data_folder = base_path / dataset_name
import tarfile
if not os.path.isfile(data_folder / "webpages_ner.txt"):
# # download zip
tar_file = "https://cogcomp.seas.upenn.edu/Data/NERWebpagesColumns.tgz"
webpages_ner_path = cached_path(tar_file, Path("datasets") / dataset_name)
tf = tarfile.open(webpages_ner_path)
tf.extractall(data_folder)
tf.close()
outputfile = os.path.abspath(data_folder)
# merge the files in one as the zip is containing multiples files
with open(outputfile / data_folder / "webpages_ner.txt", "w+") as outfile:
for files in os.walk(outputfile):
f = files[1]
ff = os.listdir(outputfile / data_folder / f[-1])
for _i, file in enumerate(ff):
if file.endswith(".gold"):
with open(
outputfile / data_folder / f[-1] / file,
"r+",
errors="replace",
) as infile:
content = infile.read()
outfile.write(content)
break
super().__init__(
data_folder,
columns,
train_file="webpages_ner.txt",
in_memory=in_memory,
**corpusargs,
)
class NER_ENGLISH_WNUT_2020(ColumnCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
document_as_sequence: bool = False,
**corpusargs,
) -> None:
"""Initialize the WNUT_2020_NER corpus.
The first time you call this constructor it will automatically download the dataset.
:param base_path: Default is None, meaning that corpus gets auto-downloaded and loaded. You can override this
to point to a different folder but typically this should not be necessary.
:param in_memory: If True, keeps dataset in memory giving speedups in training.
:param document_as_sequence: If True, all sentences of a document are read into a single Sentence object
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# column format
columns = {0: "text", 1: "ner"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
github_url = "https://github.com/jeniyat/WNUT_2020_NER/archive/master.zip"
for sample in ["train", "test", "dev"]:
sample_file = data_folder / (sample + ".txt")
if not sample_file.is_file():
zip_path = cached_path(f"{github_url}", Path("datasets") / dataset_name)
# unzip the downloaded repo and merge the train, dev and test datasets
unpack_file(zip_path, data_folder, "zip", False) # unzipped folder name: WNUT_2020_NER-master
if sample == "test":
file_path = data_folder / Path("WNUT_2020_NER-master/data/" + sample + "_data_2020/Conll_Format/")
else:
file_path = data_folder / Path("WNUT_2020_NER-master/data/" + sample + "_data/Conll_Format/")
filenames = os.listdir(file_path)
with open(data_folder / (sample + ".txt"), "w") as outfile:
for fname in filenames:
with open(file_path / fname) as infile:
lines = infile.read()
outfile.write(lines)
shutil.rmtree(str(data_folder / "WNUT_2020_NER-master")) # clean up when done
super().__init__(
data_folder,
columns,
encoding="utf-8",
in_memory=in_memory,
document_separator_token=None if not document_as_sequence else "-DOCSTART-",
**corpusargs,
)
class NER_ENGLISH_WIKIGOLD(ColumnCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
document_as_sequence: bool = False,
**corpusargs,
) -> None:
"""Initialize the wikigold corpus.
The first time you call this constructor it will automatically download the dataset.
:param base_path: Default is None, meaning that corpus gets auto-downloaded and loaded. You can override this
to point to a different folder but typically this should not be necessary.
:param in_memory: If True, keeps dataset in memory giving speedups in training.
:param document_as_sequence: If True, all sentences of a document are read into a single Sentence object
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# column format
columns = {0: "text", 1: "ner"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
wikigold_ner_path = "https://raw.githubusercontent.com/juand-r/entity-recognition-datasets/master/data/wikigold/CONLL-format/data/"
cached_path(f"{wikigold_ner_path}wikigold.conll.txt", Path("datasets") / dataset_name)
super().__init__(
data_folder,
columns,
encoding="utf-8",
in_memory=in_memory,
train_file="wikigold.conll.txt",
document_separator_token=None if not document_as_sequence else "-DOCSTART-",
**corpusargs,
)
class NER_FINNISH(ColumnCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
**corpusargs,
) -> None:
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# column format
columns = {0: "text", 1: "ner"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
# default dataset folder is the cache root
if not base_path:
base_path = flair.cache_root / "datasets"
data_folder = base_path / dataset_name
# download data if necessary
ner_finnish_path = "https://raw.githubusercontent.com/mpsilfve/finer-data/master/data/digitoday."
cached_path(f"{ner_finnish_path}2014.train.csv", Path("datasets") / dataset_name)
cached_path(f"{ner_finnish_path}2014.dev.csv", Path("datasets") / dataset_name)
cached_path(f"{ner_finnish_path}2015.test.csv", Path("datasets") / dataset_name)
self._remove_lines_without_annotations(data_file=Path(data_folder / "digitoday.2015.test.csv"))
super().__init__(
data_folder,
columns,
in_memory=in_memory,
skip_first_line=True,
**corpusargs,
)
def _remove_lines_without_annotations(self, data_file: Union[str, Path]):
with open(data_file) as f:
lines = f.readlines()
with open(data_file, "w") as f:
for line in lines:
if len(line.split()) != 1:
f.write(line)
class NER_GERMAN_BIOFID(ColumnCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
**corpusargs,
) -> None:
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# column format
columns = {0: "text", 1: "lemma", 2: "pos", 3: "ner"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
biofid_path = "https://raw.githubusercontent.com/texttechnologylab/BIOfid/master/BIOfid-Dataset-NER/"
cached_path(f"{biofid_path}train.conll", Path("datasets") / dataset_name)
cached_path(f"{biofid_path}dev.conll", Path("datasets") / dataset_name)
cached_path(f"{biofid_path}test.conll", Path("datasets") / dataset_name)
super().__init__(
data_folder,
columns,
in_memory=in_memory,
**corpusargs,
)
class NER_GERMAN_EUROPARL(ColumnCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
**corpusargs,
) -> None:
"""Initialize the EUROPARL_NER_GERMAN corpus.
The first time you call this constructor it will automatically download the dataset.
:param base_path: Default is None, meaning that corpus gets auto-downloaded and loaded. You can override this
to point to a different folder but typically this should not be necessary.
:param in_memory: If True, keeps dataset in memory giving speedups in training. Not recommended due to heavy RAM usage.
:param document_as_sequence: If True, all sentences of a document are read into a single Sentence object
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# column format
columns = {0: "text", 1: "lemma", 2: "pos", 3: "np", 4: "ner"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
europarl_ner_german_path = "https://nlpado.de/~sebastian/software/ner/"
cached_path(
f"{europarl_ner_german_path}ep-96-04-15.conll",
Path("datasets") / dataset_name,
)
cached_path(
f"{europarl_ner_german_path}ep-96-04-16.conll",
Path("datasets") / dataset_name,
)
self._add_IOB_tags(
data_file=Path(data_folder / "ep-96-04-15.conll"),
encoding="latin-1",
ner_column=4,
)
self._add_IOB_tags(
data_file=Path(data_folder / "ep-96-04-16.conll"),
encoding="latin-1",
ner_column=4,
)
super().__init__(
data_folder,
columns,
encoding="latin-1",
in_memory=in_memory,
train_file="ep-96-04-16.conll",
test_file="ep-96-04-15.conll",
**corpusargs,
)
def _add_IOB_tags(self, data_file: Union[str, Path], encoding: str = "utf8", ner_column: int = 1):
"""Function that adds IOB tags if only chunk names are provided.
e.g. words are tagged PER instead of B-PER or I-PER. Replaces '0' with 'O' as the no-chunk tag since ColumnCorpus expects
the letter 'O'. Additionally it removes lines with no tags in the data file and can also
be used if the data is only partially IOB tagged.
Parameters
----------
data_file : Union[str, Path]
Path to the data file.
encoding : str, optional
Encoding used in open function. The default is "utf8".
ner_column : int, optional
Specifies the ner-tagged column. The default is 1 (the second column).
"""
def add_I_prefix(current_line: List[str], ner: int, tag: str):
for i in range(0, len(current_line)):
if i == 0:
f.write(line_list[i])
elif i == ner:
f.write(" I-" + tag)
else:
f.write(" " + current_line[i])
f.write("\n")
with open(file=data_file, encoding=encoding) as f:
lines = f.readlines()
with open(file=data_file, mode="w", encoding=encoding) as f:
pred = "O" # remembers ner tag of predecessing line
for line in lines:
line_list = line.split()
if len(line_list) > 2: # word with tags
ner_tag = line_list[ner_column]
if ner_tag in ["0", "O"]: # no chunk
for i in range(0, len(line_list)):
if i == 0:
f.write(line_list[i])
elif i == ner_column:
f.write(" O")
else:
f.write(" " + line_list[i])
f.write("\n")
pred = "O"
elif "-" not in ner_tag: # no IOB tags
if pred == "O": # found a new chunk
add_I_prefix(line_list, ner_column, ner_tag)
pred = ner_tag
else: # found further part of chunk or new chunk directly after old chunk
add_I_prefix(line_list, ner_column, ner_tag)
pred = ner_tag
else: # line already has IOB tag (tag contains '-')
f.write(line)
pred = ner_tag.split("-")[1]
elif len(line_list) == 0: # empty line
f.write("\n")
pred = "O"
class NER_GERMAN_LEGAL(ColumnCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
**corpusargs,
) -> None:
"""Initialize the LER_GERMAN (Legal Entity Recognition) corpus.
The first time you call this constructor it will automatically download the dataset.
:param base_path: Default is None, meaning that corpus gets auto-downloaded and loaded. You can override this
to point to a different folder but typically this should not be necessary.
:param in_memory: If True, keeps dataset in memory giving speedups in training. Not recommended due to heavy RAM usage.
:param document_as_sequence: If True, all sentences of a document are read into a single Sentence object
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# column format
columns = {0: "text", 1: "ner"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
ler_path = "https://raw.githubusercontent.com/elenanereiss/Legal-Entity-Recognition/master/data/"
cached_path(f"{ler_path}ler.conll", Path("datasets") / dataset_name)
super().__init__(
data_folder,
columns,
in_memory=in_memory,
train_file="ler.conll",
**corpusargs,
)
class NER_GERMAN_GERMEVAL(ColumnCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
**corpusargs,
) -> None:
"""Initialize the GermEval NER corpus for German.
This is only possible if you've manually downloaded it to your machine.
Obtain the corpus from https://sites.google.com/site/germeval2014ner/data and put it into some folder.
Then point the base_path parameter in the constructor to this folder
:param base_path: Path to the GermEval corpus on your machine
:param in_memory:If True, keeps dataset in memory giving speedups in training.
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# column format
columns = {1: "text", 2: "ner"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# check if data there
if not data_folder.exists():
# create folder
os.makedirs(data_folder)
# download dataset
import gdown
gdown.download(
url="https://drive.google.com/uc?id={}".format("1Jjhbal535VVz2ap4v4r_rN1UEHTdLK5P"),
output=str(data_folder / "train.tsv"),
)
gdown.download(
url="https://drive.google.com/uc?id={}".format("1u9mb7kNJHWQCWyweMDRMuTFoOHOfeBTH"),
output=str(data_folder / "test.tsv"),
)
gdown.download(
url="https://drive.google.com/uc?id={}".format("1ZfRcQThdtAR5PPRjIDtrVP7BtXSCUBbm"),
output=str(data_folder / "dev.tsv"),
)
super().__init__(
data_folder,
columns,
comment_symbol="#",
in_memory=in_memory,
**corpusargs,
)
class NER_GERMAN_POLITICS(ColumnCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
column_delimiter: str = r"\s+",
in_memory: bool = True,
**corpusargs,
) -> None:
"""Initialize corpus with Named Entity Model for German Politics (NEMGP).
data from https://www.thomas-zastrow.de/nlp/.
The first time you call this constructor it will automatically download the
dataset.
:param base_path: Default is None, meaning that corpus gets auto-downloaded and loaded. You can override this
to point to a different folder but typically this should not be necessary.
POS tags instead
:param in_memory: If True, keeps dataset in memory giving speedups in training.
:param document_as_sequence: If True, all sentences of a document are read into a single Sentence object
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# column format
columns = {0: "text", 1: "ner"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download and parse data if necessary
german_politics_path = "https://www.thomas-zastrow.de/nlp/nemgp_trainingdata_01.txt.zip"
corpus_file_name = "nemgp_trainingdata_01.txt"
parsed_dataset = data_folder / "raw" / corpus_file_name
if not parsed_dataset.exists():
german_politics_zip = cached_path(f"{german_politics_path}", Path("datasets") / dataset_name / "raw")
unpack_file(german_politics_zip, data_folder / "raw", "zip", False)
self._convert_to_column_corpus(parsed_dataset)
# create train test dev if not exist
train_dataset = data_folder / "train.txt"
if not train_dataset.exists():
self._create_datasets(parsed_dataset, data_folder)
super().__init__(
data_folder,
columns,
column_delimiter=column_delimiter,
train_file="train.txt",
dev_file="dev.txt",
test_file="test.txt",
encoding="utf-8",
in_memory=in_memory,
**corpusargs,
)
def _convert_to_column_corpus(self, data_file: Union[str, Path]):
with open(data_file, encoding="utf-8") as f:
lines = f.readlines()
with open(data_file, "w", encoding="utf-8") as f:
tag_bool = False
new_sentence = True
for line in lines:
line_splits = re.sub(r"\s{2,}", " ", line).strip().split(" ")
for substr in line_splits:
if substr == ".":
f.write("\n")
new_sentence = True
elif "<START:" in substr:
tag_bool = True
tag = substr.strip("<START:").strip(">")
if "loc" in tag:
tag_IOB = "-LOC"
elif "per" in tag:
tag_IOB = "-PER"
elif "org" in tag:
tag_IOB = "-ORG"
elif "misc" in tag:
tag_IOB = "-MISC"
elif "<END>" in substr:
tag_bool = False
new_sentence = True
else:
if tag_bool:
if new_sentence is True:
start = "B"
new_sentence = False
else:
start = "I"
f.write(substr.strip(" ") + " " + start + tag_IOB + "\n")
else:
f.write(substr.strip(" ") + " " + "O" + "\n")
def _create_datasets(self, data_file: Union[str, Path], data_folder: Path):
with open(data_file) as file:
num_lines = len(file.readlines())
file.seek(0)
train_len = round(num_lines * 0.8)
test_len = round(num_lines * 0.1)
with (data_folder / "train.txt").open("w", encoding="utf-8") as train, (data_folder / "test.txt").open(
"w", encoding="utf-8"
) as test, (data_folder / "dev.txt").open("w", encoding="utf-8") as dev:
k = 0
for line in file.readlines():
k += 1
if k <= train_len:
train.write(line)
elif k > train_len and k <= (train_len + test_len):
test.write(line)
elif k > (train_len + test_len) and k <= num_lines:
dev.write(line)
class NER_HUNGARIAN(ColumnCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
document_as_sequence: bool = False,
**corpusargs,
) -> None:
"""Initialize the NER Business corpus for Hungarian.
The first time you call this constructor it will automatically download the dataset.
:param base_path: Default is None, meaning that corpus gets auto-downloaded and loaded. You can override this
to point to a different folder but typically this should not be necessary.
POS tags instead
:param in_memory: If True, keeps dataset in memory giving speedups in training.
:param document_as_sequence: If True, all sentences of a document are read into a single Sentence object
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# column format
columns = {0: "text", 1: "ner"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# If the extracted corpus file is not yet present in dir
if not os.path.isfile(data_folder / "hun_ner_corpus.txt"):
# download zip if necessary
hun_ner_path = "https://rgai.sed.hu/sites/rgai.sed.hu/files/business_NER.zip"
path_to_zipped_corpus = cached_path(hun_ner_path, Path("datasets") / dataset_name)
# extracted corpus is not present , so unpacking it.
unpack_file(path_to_zipped_corpus, data_folder, mode="zip", keep=True)
super().__init__(
data_folder,
columns,
train_file="hun_ner_corpus.txt",
column_delimiter="\t",
encoding="latin-1",
in_memory=in_memory,
label_name_map={"0": "O"},
document_separator_token=None if not document_as_sequence else "-DOCSTART-",
**corpusargs,
)
class NER_ICELANDIC(ColumnCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
**corpusargs,
) -> None:
"""Initialize the ICELANDIC_NER corpus.
The first time you call this constructor it will automatically download the dataset.
:param base_path: Default is None, meaning that corpus gets auto-downloaded and loaded. You can override this
to point to a different folder but typically this should not be necessary.
POS tags instead
:param in_memory: If True, keeps dataset in memory giving speedups in training.
:param document_as_sequence: If True, all sentences of a document are read into a single Sentence object
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# column format
columns = {0: "text", 1: "ner"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
if not os.path.isfile(data_folder / "icelandic_ner.txt"):
# download zip
icelandic_ner = "https://repository.clarin.is/repository/xmlui/handle/20.500.12537/42/allzip"
icelandic_ner_path = cached_path(icelandic_ner, Path("datasets") / dataset_name)
# unpacking the zip
unpack_file(icelandic_ner_path, data_folder, mode="zip", keep=True)
outputfile = os.path.abspath(data_folder)
# merge the files in one as the zip is containing multiples files
with open(outputfile / data_folder / "icelandic_ner.txt", "wb") as outfile:
for files in os.walk(outputfile / data_folder):
f = files[2]
for i in range(len(f)):
if f[i].endswith(".txt"):
with open(outputfile / data_folder / f[i], "rb") as infile:
contents = infile.read()
outfile.write(contents)
super().__init__(
data_folder,
columns,
train_file="icelandic_ner.txt",
in_memory=in_memory,
**corpusargs,
)
class NER_JAPANESE(ColumnCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
**corpusargs,
) -> None:
"""Initialize the Hironsan/IOB2 corpus for Japanese.
The first time you call this constructor it will automatically download the dataset.
:param base_path: Default is None, meaning that corpus gets auto-downloaded and loaded. You can override this
to point to a different folder but typically this should not be necessary.
:param in_memory: If True, keeps dataset in memory giving speedups in training.
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# column format
columns = {0: "text", 1: "ner"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data from github if necessary (hironsan.txt, ja.wikipedia.conll)
IOB2_path = "https://raw.githubusercontent.com/Hironsan/IOB2Corpus/master/"
# download files if not present locally
cached_path(f"{IOB2_path}hironsan.txt", data_folder / "raw")
cached_path(f"{IOB2_path}ja.wikipedia.conll", data_folder / "raw")
# we need to modify the original files by adding new lines after after the end of each sentence
train_data_file = data_folder / "train.txt"
if not train_data_file.is_file():
self.__prepare_jap_wikinews_corpus(data_folder / "raw" / "hironsan.txt", data_folder / "train.txt")
self.__prepare_jap_wikipedia_corpus(data_folder / "raw" / "ja.wikipedia.conll", data_folder / "train.txt")
super().__init__(
data_folder,
columns,
train_file="train.txt",
in_memory=in_memory,
default_whitespace_after=0,
**corpusargs,
)
@staticmethod
def __prepare_jap_wikipedia_corpus(file_in: Union[str, Path], file_out: Union[str, Path]):
with open(file_in) as f:
lines = f.readlines()
with open(file_out, "a") as f:
for line in lines:
if line[0] == "。":
f.write(line)
f.write("\n")
elif line[0] == "\n":
continue
else:
f.write(line)
@staticmethod
def __prepare_jap_wikinews_corpus(file_in: Union[str, Path], file_out: Union[str, Path]):
with open(file_in) as f:
lines = f.readlines()
with open(file_out, "a") as f:
for line in lines:
sp_line = line.split("\t")
if sp_line[0] == "\n":
f.write("\n")
else:
f.write(sp_line[0] + "\t" + sp_line[-1])
class NER_MASAKHANE(MultiCorpus):
def __init__(
self,
languages: Union[str, List[str]] = "luo",
version: str = "v2",
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
**corpusargs,
) -> None:
"""Initialize the Masakhane corpus available on https://github.com/masakhane-io/masakhane-ner/tree/main/data.
It consists of ten African languages. Pass a language code or a list of language codes to initialize the corpus
with the languages you require. If you pass "all", all languages will be initialized.
:version: Specifies version of the dataset. Currently, only "v1" and "v2" are supported, using "v2" as default.
:param base_path: Default is None, meaning that corpus gets auto-downloaded and loaded. You can override this
to point to a different folder but typically this should not be necessary.
POS tags instead
:param in_memory: If True, keeps dataset in memory giving speedups in training.
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# if only one language is given
if type(languages) == str:
languages = [languages]
# column format
columns = {0: "text", 1: "ner"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
supported_versions = ["v1", "v2"]
if version not in supported_versions:
log.error(f"The specified version '{version}' is not in the list of supported version!")
log.error(f"Supported versions are '{supported_versions}'!")
raise Exception
data_folder = base_path / dataset_name / version
languages_to_code = {
"v1": {
"amharic": "amh",
"hausa": "hau",
"igbo": "ibo",
"kinyarwanda": "kin",
"luganda": "lug",
"luo": "luo",
"naija": "pcm",
"swahili": "swa",
"yoruba": "yor",
"wolof": "wol",
},
"v2": {
"bambara": "bam",
"ghomala": "bbj",
"ewe": "ewe",
"fon": "fon",
"hausa": "hau",
"igbo": "ibo",
"kinyarwanda": "kin",
"luganda": "lug",
"mossi": "mos",
"naija": "pcm",
"chichewa": "nya",
"chishona": "sna",
"kiswahili": "swa",
"setswana": "tsn",
"akan_twi": "twi",
"wolof": "wol",
"isixhosa": "xho",
"yoruba": "yor",
"isizulu": "zul",
},
}
language_to_code = languages_to_code[version]
data_paths = {
"v1": "https://raw.githubusercontent.com/masakhane-io/masakhane-ner/main/data",
"v2": "https://raw.githubusercontent.com/masakhane-io/masakhane-ner/main/MasakhaNER2.0/data",
}
# use all languages if explicitly set to "all"
if languages == ["all"]:
languages = list(language_to_code.values())
corpora: List[Corpus] = []
for language in languages:
if language in language_to_code:
language = language_to_code[language]
if language not in language_to_code.values():
log.error(f"Language '{language}' is not in list of supported languages!")
log.error(f"Supported are '{language_to_code.values()}'!")
log.error("Instantiate this Corpus for instance like so 'corpus = NER_MASAKHANE(languages='luo')'")
raise Exception
language_folder = data_folder / language
# download data if necessary
data_path = f"{data_paths[version]}/{language}/"
cached_path(f"{data_path}dev.txt", language_folder)
cached_path(f"{data_path}test.txt", language_folder)
cached_path(f"{data_path}train.txt", language_folder)
# initialize comlumncorpus and add it to list
log.info(f"Reading data for language {language}@{version}")
corp = ColumnCorpus(
data_folder=language_folder,
column_format=columns,
encoding="utf-8",
in_memory=in_memory,
name=language,
**corpusargs,
)
corpora.append(corp)
super().__init__(
corpora,
name="masakhane-" + "-".join(languages),
)
class NER_MULTI_CONER(MultiFileColumnCorpus):
def __init__(
self,
task: str = "multi",
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
**corpusargs,
) -> None:
"""Download and Initialize the MultiCoNer corpus.
:param task: either 'multi', 'code-switch', or the language code for one of the mono tasks.
:param base_path: Path to the CoNLL-03 corpus (i.e. 'conll_03' folder) on your machine
POS tags or chunks respectively
:param in_memory: If True, keeps dataset in memory giving speedups in training.
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
folders = {
"bn": "BN-Bangla",
"de": "DE-German",
"en": "EN-English",
"es": "ES-Espanish",
"fa": "FA-Farsi",
"hi": "HI-Hindi",
"ko": "KO-Korean",
"nl": "NL-Dutch",
"ru": "RU-Russian",
"tr": "TR-Turkish",
"zh": "ZH-Chinese",
"mix": "MIX_Code_mixed",
"multi": "MULTI_Multilingual",
}
possible_tasks = list(folders.keys())
task = task.lower()
if task not in possible_tasks:
raise ValueError(f"task has to be one of {possible_tasks}, but is '{task}'")
# column format
columns = {0: "text", 3: "ner"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = cached_path("s3://multiconer", base_path / dataset_name) / "multiconer2022"
train_files = [data_folder / folders[task] / f"{task}_train.conll"]
dev_files = [data_folder / folders[task] / f"{task}_dev.conll"]
test_files = [data_folder / folders[task] / f"{task}_test.conll"]
super().__init__(
train_files=train_files,
dev_files=dev_files,
test_files=test_files,
column_format=columns,
comment_symbol="# id ",
in_memory=in_memory,
**corpusargs,
)
class NER_MULTI_CONER_V2(MultiFileColumnCorpus):
def __init__(
self,
task: str = "multi",
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
use_dev_as_test: bool = True,
**corpusargs,
) -> None:
"""Initialize the MultiCoNer V2 corpus for the Semeval2023 workshop.
This is only possible if you've applied and downloaded it to your machine.
Apply for the corpus from here https://multiconer.github.io/dataset and unpack the .zip file's content into
a folder called 'ner_multi_coner_v2'. Then set the base_path parameter in the constructor to the path to the
parent directory where the ner_multi_coner_v2 folder resides. You can also create the multiconer in
the {FLAIR_CACHE_ROOT}/datasets folder to leave the path empty.
:param base_path: Path to the ner_multi_coner_v2 corpus (i.e. 'ner_multi_coner_v2' folder) on your machine
POS tags or chunks respectively
:param in_memory: If True, keeps dataset in memory giving speedups in training.
:param use_dev_as_test: If True, it uses the dev set as test set and samples random training data for a dev split.
:param task: either 'multi', 'code-switch', or the language code for one of the mono tasks.
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
folders = {
"bn": "BN-Bangla",
"de": "DE-German",
"en": "EN-English",
"es": "ES-Espanish",
"fa": "FA-Farsi",
"fr": "FR-French",
"hi": "HI-Hindi",
"it": "IT-Italian",
"pt": "PT-Portuguese",
"sv": "SV-Swedish",
"uk": "UK-Ukrainian",
"zh": "ZH-Chinese",
}
possible_tasks = [*list(folders.keys()), "multi"]
task = task.lower()
if task not in possible_tasks:
raise ValueError(f"task has to be one of {possible_tasks}, but is '{task}'")
# column format
columns = {0: "text", 3: "ner"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name / "train_dev"
if not data_folder.exists():
log.warning("-" * 100)
log.warning(f'WARNING: MultiCoNerV2 dataset not found at "{data_folder}".')
log.warning('Instructions for obtaining the data can be found here: https://multiconer.github.io/dataset"')
log.warning("-" * 100)
if task == "multi":
train_files = list(data_folder.glob("*-train.conll"))
dev_files = list(data_folder.glob("*-dev.conll"))
else:
train_files = [data_folder / f"{task}-train.conll"]
dev_files = [data_folder / f"{task}-dev.conll"]
test_files = []
if use_dev_as_test:
test_files = dev_files
dev_files = []
super().__init__(
train_files=train_files,
dev_files=dev_files,
test_files=test_files,
column_format=columns,
comment_symbol="# id ",
in_memory=in_memory,
**corpusargs,
)
class NER_MULTI_WIKIANN(MultiCorpus):
def __init__(
self,
languages: Union[str, List[str]] = "en",
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = False,
**corpusargs,
) -> None:
"""Initialize the WkiAnn corpus for cross-lingual NER consisting of datasets from 282 languages that exist in Wikipedia.
See https://elisa-ie.github.io/wikiann/ for details and for the languages and their
respective abbreveations, i.e. "en" for english. (license: https://opendatacommons.org/licenses/by/)
Parameters
----------
languages : Union[str, List[str]]
Should be an abbreviation of a language ("en", "de",..) or a list of abbreviations.
The datasets of all passed languages will be saved in one MultiCorpus.
(Note that, even though listed on https://elisa-ie.github.io/wikiann/ some datasets are empty.
This includes "aa", "cho", "ho", "hz", "ii", "jam", "kj", "kr", "mus", "olo" and "tcy".)
base_path : Union[str, Path], optional
Default is None, meaning that corpus gets auto-downloaded and loaded. You can override this
to point to a different folder but typically this should not be necessary.
The data is in bio-format. It will by default (with the string "ner" as value) be transformed
into the bioes format. If you dont want that set it to None.
in_memory : bool, optional
Specify that the dataset should be loaded in memory, which speeds up the training process but takes increases the RAM usage significantly.
"""
if type(languages) == str:
languages = [languages]
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# column format
columns = {0: "text", 1: "ner"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# For each language in languages, the file is downloaded if not existent
# Then a comlumncorpus of that data is created and saved in a list
# this list is handed to the multicorpus
# list that contains the columncopora
corpora: List[Corpus] = []
google_drive_path = "https://drive.google.com/uc?id="
# download data if necessary
first = True
for language in languages:
language_folder = data_folder / language
file_name = "wikiann-" + language + ".bio"
# if language not downloaded yet, download it
if not language_folder.exists():
if first:
import tarfile
import gdown
first = False
# create folder
os.makedirs(language_folder)
# get google drive id from list
google_id = self._google_drive_id_from_language_name(language)
url = google_drive_path + google_id
# download from google drive
gdown.download(url, str(language_folder / language) + ".tar.gz")
# unzip
log.info("Extracting data...")
tar = tarfile.open(str(language_folder / language) + ".tar.gz", "r:gz")
# tar.extractall(language_folder,members=[tar.getmember(file_name)])
tar.extract(file_name, str(language_folder))
tar.close()
log.info("...done.")
# transform data into required format
# the processed dataset has the additional ending "_new"
log.info("Processing dataset...")
self._silver_standard_to_simple_ner_annotation(str(language_folder / file_name))
# remove the unprocessed dataset
os.remove(str(language_folder / file_name))
log.info("...done.")
# initialize comlumncorpus and add it to list
log.info(f"Reading data for language {language}")
corp = ColumnCorpus(
data_folder=language_folder,
column_format=columns,
train_file=file_name + "_new",
in_memory=in_memory,
**corpusargs,
)
corpora.append(corp)
log.info("...done.")
super().__init__(
corpora,
name="wikiann",
)
def _silver_standard_to_simple_ner_annotation(self, data_file: Union[str, Path]):
with open(data_file, encoding="utf-8") as f_read, open(
str(data_file) + "_new", "w+", encoding="utf-8"
) as f_write:
while True:
line = f_read.readline()
if line:
if line == "\n":
f_write.write(line)
else:
liste = line.split()
f_write.write(liste[0] + " " + liste[-1] + "\n")
else:
break
def _google_drive_id_from_language_name(self, language):
languages_ids = {
"aa": "1tDDlydKq7KQQ3_23Ysbtke4HJOe4snIk", # leer
"ab": "1hB8REj2XA_0DjI9hdQvNvSDpuBIb8qRf",
"ace": "1WENJS2ppHcZqaBEXRZyk2zY-PqXkTkgG",
"ady": "1n6On8WWDHxEoybj7F9K15d_fkGPy6KgO",
"af": "1CPB-0BD2tg3zIT60D3hmJT0i5O_SKja0",
"ak": "1l2vlGHnQwvm9XhW5S-403fetwUXhBlZm",
"als": "196xyYjhbie7sYLHLZHWkkurOwQLi8wK-",
"am": "1ug1IEoExKD3xWpvfZprAPSQi82YF9Cet",
"an": "1DNLgPOAOsGZBYd6rC5ddhzvc9_DtWnk2",
"ang": "1W_0ti7Tl8AkqM91lRCMPWEuUnPOAZroV",
"ar": "1tyvd32udEQG_cNeVpaD5I2fxvCc6XKIS",
"arc": "1hSOByStqPmP3b9HfQ39EclUZGo8IKCMb",
"arz": "1CKW5ZhxTpIHmc8Jt5JLz_5O6Cr8Icsan",
"as": "12opBoIweBLM8XciMHT4B6-MAaKdYdvpE",
"ast": "1rp64PxGZBDfcw-tpFBjLg_ddLDElG1II",
"av": "1hncGUrkG1vwAAQgLtwOf41BWkHkEvdss",
"ay": "1VmIsWpMTz442b4Mx798ZOgtB9vquKQtf",
"az": "1FXDXsvBSdqc7GGIDZv0hqBOaaw12Ip2-",
"azb": "1amVqOuHLEkhjn8rkGUl-mXdZlaACWyNT",
"ba": "1aLx1d8GagI11VZVYOGQy0BEePeqoT0x3",
"bar": "1JZ8-k8ZmnpWYI_Yl_cBBgjVdxoM9Daci",
"bat-smg": "1trxKXDFSeKsygTMKi-ZqXSJs7F90k5a8",
"bcl": "1Hs0k7KVZ2DPsqroZ4cUKcwZG4HdPV794",
"be-x-old": "1gaK-spj1m6eGYQ-SsngLxxLUvP1VRk08",
"be": "1_ttfOSy9BzCRkIT_p3mImT82XRPpEiuH",
"bg": "1Iug6gYKemb0OrLTUrKDc_c66YGypTfCF",
"bh": "12OcSFLu940A8tVQLxI8pnxKBpTeZHmrh",
"bi": "1rftVziS_pqARx4mvLJC0sKLY-OL5ZIjE",
"bjn": "1n17mkRjPUAOWQk5LQs2C3Tz3ShxK0enZ",
"bm": "1284dwO_sfdsWE7FR06HhfBRUb8ePesKR",
"bn": "1K2DM1mT4hkr6NlAIBTj95BeVXcgvpgDm",
"bo": "1SzGHDVK-OguKdjZ4DXWiOJVrie1iHeWm",
"bpy": "1m-e5EoruJufvwBEgJLmJtx6jzx64pYN2",
"br": "1xdaBoJ1DnwI0iEq7gQN1dWcABAs_bM9H",
"bs": "167dsB01trMYFQl8FshtIdfhjw7IfVKbk",
"bug": "1yCnevM9_KJzFk27Vxsva_20OacLo4Uam",
"bxr": "1DlByAX3zB-9UyEAVD4wtX-R7mXC-8xum",
"ca": "1LuUgbd9sGa-5Ahcsy31EK89a3WOowftY",
"cbk-zam": "1kgF8xoD-kIOWZET_9kp_4yNX6AAXn6PI",
"cdo": "14x1y6611G-UAEGq92QEHRpreVkYnoUCw",
"ce": "1QUUCVKA-fkiCHd3KT3zUWefaWnxzlZLu",
"ceb": "1DJZE9RfaMoPNXHI73KBXAm4YSe-_YCUk",
"ch": "1YzAfhmatkmTpkZbAcD6X83epCgzD5S2_",
"cho": "1ciY0vF3c5a2mTOo_k32A2wMs0klK98Kb", # leer
"chr": "1EHaxz1UZHn7v2bbRzCLAhPsNtRzrG3Ae",
"chy": "1nNWwMAJr1KNdz3bHf6uIn-thZCknlTeB",
"ckb": "1llpaftcUSiXCZQZMdAqaJSrhwMdcf9IV",
"co": "1ZP-8oWgMYfW7a6w6ygEFkKDGbN39QnDn",
"cr": "1ST0xRicLAG4JdCZwGdaY-0pEXooQh7e6",
"crh": "1Jmpq2XVYUR_XaXU5XNhtOMnz-qkpsgpE",
"cs": "1Vydyze-jBkK_S1uV5ewV_Y6dbwhXr7lk",
"csb": "1naUyF74lZPnnopXdOqf5Xor2kT4WoHfS",
"cu": "1EN5dVTU6jc7YOYPCHq8EYUF31HlMUKs7",
"cv": "1gEUAlqYSSDI4TrWCqP1LUq2n0X1XEjN3",
"cy": "1q5g6NJE5GXf65Vc_P4BnUMHQ49Prz-J1",
"da": "11onAGOLkkqrIwM784siWlg-cewa5WKm8",
"de": "1f9nWvNkCCy6XWhd9uf4Dq-2--GzSaYAb",
"diq": "1IkpJaVbEOuOs9qay_KG9rkxRghWZhWPm",
"dsb": "1hlExWaMth-2eVIQ3i3siJSG-MN_7Z6MY",
"dv": "1WpCrslO4I7TMb2uaKVQw4U2U8qMs5szi",
"dz": "10WX52ePq2KfyGliwPvY_54hIjpzW6klV",
"ee": "1tYEt3oN2KPzBSWrk9jpCqnW3J1KXdhjz",
"el": "1cxq4NUYmHwWsEn5waYXfFSanlINXWLfM",
"eml": "17FgGhPZqZNtzbxpTJOf-6nxEuI5oU4Vd",
"en": "1mqxeCPjxqmO7e8utj1MQv1CICLFVvKa-",
"eo": "1YeknLymGcqj44ug2yd4P7xQVpSK27HkK",
"es": "1Dnx3MVR9r5cuoOgeew2gT8bDvWpOKxkU",
"et": "1Qhb3kYlQnLefWmNimdN_Vykm4mWzbcWy",
"eu": "1f613wH88UeITYyBSEMZByK-nRNMwLHTs",
"ext": "1D0nLOZ3aolCM8TShIRyCgF3-_MhWXccN",
"fa": "1QOG15HU8VfZvJUNKos024xI-OGm0zhEX",
"ff": "1h5pVjxDYcq70bSus30oqi9KzDmezVNry",
"fi": "1y3Kf6qYsSvL8_nSEwE1Y6Bf6ninaPvqa",
"fiu-vro": "1oKUiqG19WgPd3CCl4FGudk5ATmtNfToR",
"fj": "10xDMuqtoTJlJFp5ghbhKfNWRpLDK3W4d",
"fo": "1RhjYqgtri1276Be1N9RrNitdBNkpzh0J",
"fr": "1sK_T_-wzVPJYrnziNqWTriU52rEsXGjn",
"frp": "1NUm8B2zClBcEa8dHLBb-ZgzEr8phcQyZ",
"frr": "1FjNqbIUlOW1deJdB8WCuWjaZfUzKqujV",
"fur": "1oqHZMK7WAV8oHoZLjGR0PfmO38wmR6XY",
"fy": "1DvnU6iaTJc9bWedmDklHyx8nzKD1s3Ge",
"ga": "1Ql6rh7absdYQ8l-3hj_MVKcEC3tHKeFB",
"gag": "1zli-hOl2abuQ2wsDJU45qbb0xuvYwA3a",
"gan": "1u2dOwy58y-GaS-tCPJS_i9VRDQIPXwCr",
"gd": "1umsUpngJiwkLdGQbRqYpkgxZju9dWlRz",
"gl": "141K2IbLjJfXwFTIf-kthmmG0YWdi8liE",
"glk": "1ZDaxQ6ilXaoivo4_KllagabbvfOuiZ0c",
"gn": "1hM4MuCaVnZqnL-w-0N-WcWag22ikVLtZ",
"gom": "1BNOSw75tzPC0wEgLOCKbwu9wg9gcLOzs",
"got": "1YSHYBtXc1WvUvMIHPz6HHgJvaXKulJUj",
"gu": "1VdK-B2drqFwKg8KD23c3dKXY-cZgCMgd",
"gv": "1XZFohYNbKszEFR-V-yDXxx40V41PV9Zm",
"ha": "18ZG4tUU0owRtQA8Ey3Dl72ALjryEJWMC",
"hak": "1QQe3WgrCWbvnVH42QXD7KX4kihHURB0Z",
"haw": "1FLqlK-wpz4jy768XbQAtxd9PhC-9ciP7",
"he": "18K-Erc2VOgtIdskaQq4D5A3XkVstDmfX",
"hi": "1lBRapb5tjBqT176gD36K5yb_qsaFeu-k",
"hif": "153MQ9Ga4NQ-CkK8UiJM3DjKOk09fhCOV",
"ho": "1c1AoS7yq15iVkTEE-0f3x25NT4F202B8", # leer
"hr": "1wS-UtB3sGHuXJQQGR0F5lDegogsgoyif",
"hsb": "1_3mMLzAE5OmXn2z64rW3OwWbo85Mirbd",
"ht": "1BwCaF0nfdgkM7Yt7A7d7KyVk0BcuwPGk",
"hu": "10AkDmTxUWNbOXuYLYZ-ZPbLAdGAGZZ8J",
"hy": "1Mi2k2alJJquT1ybd3GC3QYDstSagaWdo",
"hz": "1c1m_-Q92v0Di7Nez6VuaccrN19i8icKV", # leer
"ia": "1jPyqTmDuVhEhj89N606Cja5heJEbcMoM",
"id": "1JWIvIh8fQoMQqk1rPvUThaskxnTs8tsf",
"ie": "1TaKRlTtB8-Wqu4sfvx6JQKIugAlg0pV-",
"ig": "15NFAf2Qx6BXSjv_Oun9_3QRBWNn49g86",
"ii": "1qldGJkMOMKwY13DpcgbxQCbff0K982f9", # leer
"ik": "1VoSTou2ZlwVhply26ujowDz6gjwtxmny",
"ilo": "1-xMuIT6GaM_YeHqgm1OamGkxYfBREiv3",
"io": "19Zla0wsAcrZm2c0Pw5ghpp4rHjYs26Pp",
"is": "11i-NCyqS6HbldIbYulsCgQGZFXR8hwoB",
"it": "1HmjlOaQunHqL2Te7pIkuBWrnjlmdfYo_",
"iu": "18jKm1S7Ls3l0_pHqQH8MycG3LhoC2pdX",
"ja": "10dz8UxyK4RIacXE2HcGdrharmp5rwc3r",
"jam": "1v99CXf9RnbF6aJo669YeTR6mQRTOLZ74", # leer
"jbo": "1_LmH9hc6FDGE3F7pyGB1fUEbSwuTYQdD",
"jv": "1qiSu1uECCLl4IBZS27FBdJIBivkJ7GwE",
"ka": "172UFuFRBX2V1aWeXlPSpu9TjS-3cxNaD",
"kaa": "1kh6hMPUdqO-FIxRY6qaIBZothBURXxbY",
"kab": "1oKjbZI6ZrrALCqnPCYgIjKNrKDA7ehcs",
"kbd": "1jNbfrboPOwJmlXQBIv053d7n5WXpMRv7",
"kg": "1iiu5z-sdJ2JLC4Ja9IgDxpRZklIb6nDx",
"ki": "1GUtt0QI84c5McyLGGxoi5uwjHOq1d6G8",
"kj": "1nSxXUSGDlXVCIPGlVpcakRc537MwuKZR", # leer
"kk": "1ryC3UN0myckc1awrWhhb6RIi17C0LCuS",
"kl": "1gXtGtX9gcTXms1IExICnqZUHefrlcIFf",
"km": "1DS5ATxvxyfn1iWvq2G6qmjZv9pv0T6hD",
"kn": "1ZGLYMxbb5-29MNmuUfg2xFhYUbkJFMJJ",
"ko": "12r8tIkTnwKhLJxy71qpIcoLrT6NNhQYm",
"koi": "1EdG_wZ_Qk124EPAZw-w6rdEhYLsgcvIj",
"kr": "19VNQtnBA-YL_avWuVeHQHxJZ9MZ04WPF", # leer
"krc": "1nReV4Mb7Wdj96czpO5regFbdBPu0zZ_y",
"ks": "1kzh0Pgrv27WRMstR9MpU8mu7p60TcT-X",
"ksh": "1iHJvrl2HeRaCumlrx3N7CPrHQ2KuLUkt",
"ku": "1YqJog7Bkk0fHBCSTxJ9heeE-bfbkbkye",
"kv": "1s91HI4eq8lQYlZwfrJAgaGlCyAtIhvIJ",
"kw": "16TaIX2nRfqDp8n7zudd4bqf5abN49dvW",
"ky": "17HPUKFdKWhUjuR1NOp5f3PQYfMlMCxCT",
"la": "1NiQuBaUIFEERvVXo6CQLwosPraGyiRYw",
"lad": "1PEmXCWLCqnjLBomMAYHeObM1AmVHtD08",
"lb": "1nE4g10xoTU23idmDtOQ0w2QCuizZ6QH_",
"lbe": "1KOm-AdRcCHfSc1-uYBxBA4GjxXjnIlE-",
"lez": "1cJAXshrLlF1TZlPHJTpDwEvurIOsz4yR",
"lg": "1Ur0y7iiEpWBgHECrIrT1OyIC8um_y4th",
"li": "1TikIqfqcZlSDWhOae1JnjJiDko4nj4Dj",
"lij": "1ro5ItUcF49iP3JdV82lhCQ07MtZn_VjW",
"lmo": "1W4rhBy2Pi5SuYWyWbNotOVkVY3kYWS_O",
"ln": "1bLSV6bWx0CgFm7ByKppZLpYCFL8EIAoD",
"lo": "1C6SSLeKF3QirjZbAZAcpVX_AXYg_TJG3",
"lrc": "1GUcS28MlJe_OjeQfS2AJ8uczpD8ut60e",
"lt": "1gAG6TcMTmC128wWK0rCXRlCTsJY9wFQY",
"ltg": "12ziP8t_fAAS9JqOCEC0kuJObEyuoiOjD",
"lv": "1MPuAM04u-AtfybXdpHwCqUpFWbe-zD0_",
"mai": "1d_nUewBkka2QGEmxCc9v3dTfvo7lPATH",
"map-bms": "1wrNIE-mqp2xb3lrNdwADe6pb7f35NP6V",
"mdf": "1BmMGUJy7afuKfhfTBMiKxM3D7FY-JrQ2",
"mg": "105WaMhcWa-46tCztoj8npUyg0aH18nFL",
"mh": "1Ej7n6yA1cF1cpD5XneftHtL33iHJwntT",
"mhr": "1CCPIUaFkEYXiHO0HF8_w07UzVyWchrjS",
"mi": "1F6au9xQjnF-aNBupGJ1PwaMMM6T_PgdQ",
"min": "1tVK5SHiCy_DaZSDm3nZBgT5bgWThbJt_",
"mk": "18NpudytGhSWq_LbmycTDw10cSftlSBGS",
"ml": "1V73UE-EvcE-vV3V1RTvU4sak6QFcP91y",
"mn": "14jRXicA87oXZOZllWqUjKBMetNpQEUUp",
"mo": "1YsLGNMsJ7VsekhdcITQeolzOSK4NzE6U",
"mr": "1vOr1AIHbgkhTO9Ol9Jx5Wh98Qdyh1QKI",
"mrj": "1dW-YmEW8a9D5KyXz8ojSdIXWGekNzGzN",
"ms": "1bs-_5WNRiZBjO-DtcNtkcIle-98homf_",
"mt": "1L7aU3iGjm6SmPIU74k990qRgHFV9hrL0",
"mus": "1_b7DcRqiKJFEFwp87cUecqf8A5BDbTIJ", # leer
"mwl": "1MfP0jba2jQfGVeJOLq26MjI6fYY7xTPu",
"my": "16wsIGBhNVd2lC2p6n1X8rdMbiaemeiUM",
"myv": "1KEqHmfx2pfU-a1tdI_7ZxMQAk5NJzJjB",
"mzn": "1CflvmYEXZnWwpsBmIs2OvG-zDDvLEMDJ",
"na": "1r0AVjee5wNnrcgJxQmVGPVKg5YWz1irz",
"nah": "1fx6eu91NegyueZ1i0XaB07CKjUwjHN7H",
"nap": "1bhT4sXCJvaTchCIV9mwLBtf3a7OprbVB",
"nds-nl": "1UIFi8eOCuFYJXSAXZ9pCWwkQMlHaY4ye",
"nds": "1FLgZIXUWa_vekDt4ndY0B5XL7FNLiulr",
"ne": "1gEoCjSJmzjIH4kdHsbDZzD6ID4_78ekS",
"new": "1_-p45Ny4w9UvGuhD8uRNSPPeaARYvESH",
"ng": "11yxPdkmpmnijQUcnFHZ3xcOmLTYJmN_R",
"nl": "1dqYXg3ilzVOSQ_tz_dF47elSIvSIhgqd",
"nn": "1pDrtRhQ001z2WUNMWCZQU3RV_M0BqOmv",
"no": "1zuT8MI96Ivpiu9mEVFNjwbiM8gJlSzY2",
"nov": "1l38388Rln0NXsSARMZHmTmyfo5C0wYTd",
"nrm": "10vxPq1Nci7Wpq4XOvx3dtqODskzjdxJQ",
"nso": "1iaIV8qlT0RDnbeQlnxJ3RehsG3gU5ePK",
"nv": "1oN31jT0w3wP9aGwAPz91pSdUytnd9B0g",
"ny": "1eEKH_rUPC560bfEg11kp3kbe8qWm35IG",
"oc": "1C01cW8G_j8US-DTrsmeal_ENHTtNWn-H",
"olo": "1vbDwKZKqFq84dusr1SvDx5JbBcPanx9L", # leer
"om": "1q3h22VMbWg2kgVFm-OArR-E4y1yBQ1JX",
"or": "1k8LwCE8nC7lq6neXDaS3zRn0KOrd9RnS",
"os": "1u81KAB34aEQfet00dLMRIBJsfRwbDTij",
"pa": "1JDEHL1VcLHBamgTPBom_Ryi8hk6PBpsu",
"pag": "1k905VUWnRgY8kFb2P2431Kr4dZuolYGF",
"pam": "1ssugGyJb8ipispC60B3I6kzMsri1WcvC",
"pap": "1Za0wfwatxYoD7jGclmTtRoBP0uV_qImQ",
"pcd": "1csJlKgtG04pdIYCUWhsCCZARKIGlEYPx",
"pdc": "1Xnms4RXZKZ1BBQmQJEPokmkiweTpouUw",
"pfl": "1tPQfHX7E0uKMdDSlwNw5aGmaS5bUK0rn",
"pi": "16b-KxNxzbEuyoNSlI3bfe2YXmdSEsPFu",
"pih": "1vwyihTnS8_PE5BNK7cTISmIBqGWvsVnF",
"pl": "1fijjS0LbfpKcoPB5V8c8fH08T8AkXRp9",
"pms": "12ySc7X9ajWWqMlBjyrPiEdc-qVBuIkbA",
"pnb": "1RB3-wjluhTKbdTGCsk3nag1bM3m4wENb",
"pnt": "1ZCUzms6fY4on_fW8uVgO7cEs9KHydHY_",
"ps": "1WKl9Av6Sqz6aHKyUM5kIh90mzFzyVWH9",
"pt": "13BX-_4_hcTUp59HDyczFDI32qUB94vUY",
"qu": "1CB_C4ygtRoegkqgcqfXNHr8oQd-UcvDE",
"rm": "1YRSGgWoxEqSojHXuBHJnY8vAHr1VgLu-",
"rmy": "1uFcCyvOWBJWKFQxbkYSp373xUXVl4IgF",
"rn": "1ekyyb2MvupYGY_E8_BhKvV664sLvW4aE",
"ro": "1YfeNTSoxU-zJMnyQotLk5X8B_6nHryBu",
"roa-rup": "150s4H4TdQ5nNYVC6j0E416TUAjBE85yy",
"roa-tara": "1H6emfQsD_a5yohK4RMPQ-GrnHXqqVgr3",
"ru": "11gP2s-SYcfS3j9MjPp5C3_nFeQB-8x86",
"rue": "1OuSglZAndja1J5D5IUmdbt_niTTyEgYK",
"rw": "1NuhHfi0-B-Xlr_BApijnxCw0WMEltttP",
"sa": "1P2S3gL_zvKgXLKJJxg-Fb4z8XdlVpQik",
"sah": "1qz0MpKckzUref2FX_FYiNzI2p4BDc5oR",
"sc": "1oAYj_Fty4FUwjAOBEBaiZt_cY8dtpDfA",
"scn": "1sDN9zHkXWYoHYx-DUu-GPvsUgB_IRa8S",
"sco": "1i8W7KQPj6YZQLop89vZBSybJNgNsvXWR",
"sd": "1vaNqfv3S8Gl5pQmig3vwWQ3cqRTsXmMR",
"se": "1RT9xhn0Vl90zjWYDTw5V1L_u1Oh16tpP",
"sg": "1iIh2oXD2Szz_AygUvTt3_ZK8a3RYEGZ_",
"sh": "1qPwLiAm6t4__G-zVEOrBgYx6VRmgDgiS",
"si": "1G5ryceID0TP6SAO42e-HAbIlCvYmnUN7",
"simple": "1FVV49o_RlK6M5Iw_7zeJOEDQoTa5zSbq",
"sk": "11mkYvbmAWKTInj6t4Ma8BUPxoR5o6irL",
"sl": "1fsIZS5LgMzMzZ6T7ogStyj-ILEZIBRvO",
"sm": "1yefECpKX_Y4R7G2tggIxvc_BvJfOAz-t",
"sn": "1fYeCjMPvRAv94kvZjiKI-ktIDLkbv0Ve",
"so": "1Uc-eSZnJb36SgeTvRU3GirXZOlGD_NB6",
"sq": "11u-53n71O_yjpwRiCQSwgL7N2w72ZptX",
"sr": "1PGLGlQi8Q0Eac6dib-uuCJAAHK6SF5Pz",
"srn": "1JKiL3TSXqK1-KhPfAwMK0uqw90WEzg7M",
"ss": "1e0quNEsA1dn57-IbincF4D82dRWgzQlp",
"st": "1ny-FBzpBqIDgv6jMcsoFev3Ih65FNZFO",
"stq": "15Fx32ROy2IM6lSqAPUykkr3CITR6Xd7v",
"su": "1C0FJum7bYZpnyptBvfAgwJb0TX2hggtO",
"sv": "1YyqzOSXzK5yrAou9zeTDWH_7s569mDcz",
"sw": "1_bNTj6T8eXlNAIuHaveleWlHB_22alJs",
"szl": "1_dXEip1snK4CPVGqH8x7lF5O-6FdCNFW",
"ta": "1ZFTONsxGtSnC9QB6RpWSvgD_MbZwIhHH",
"tcy": "15R6u7KQs1vmDSm_aSDrQMJ3Q6q3Be0r7", # leer
"te": "11Sx-pBAPeZOXGyv48UNSVMD0AH7uf4YN",
"tet": "11mr2MYLcv9pz7mHhGGNi5iNCOVErYeOt",
"tg": "16ttF7HWqM9Cnj4qmgf3ZfNniiOJfZ52w",
"th": "14xhIt-xr5n9nMuvcwayCGM1-zBCFZquW",
"ti": "123q5e9MStMShp8eESGtHdSBGLDrCKfJU",
"tk": "1X-JNInt34BNGhg8A8Peyjw2WjsALdXsD",
"tl": "1WkQHbWd9cqtTnSHAv0DpUThaBnzeSPTJ",
"tn": "1fHfQHetZn8-fLuRZEu-cvs-kQYwPvjyL",
"to": "1cHOLaczYJ8h-OqQgxeoH9vMG3izg6muT",
"tpi": "1YsRjxVu6NYOrXRb8oqMO9FPaicelFEcu",
"tr": "1J1Zy02IxvtCK0d1Ba2h_Ulit1mVb9UIX",
"ts": "1pIcfAt3KmtmDkyhOl-SMSeoM8aP8bOpl",
"tt": "1vsfzCjj-_bMOn5jBai41TF5GjKJM_Ius",
"tum": "1NWcg65daI2Bt0awyEgU6apUDbBmiqCus",
"tw": "1WCYKZIqS7AagS76QFSfbteiOgFNBvNne",
"ty": "1DIqaP1l-N9VXTNokrlr6EuPMGE765o4h",
"tyv": "1F3qa05OYLBcjT1lXMurAJFDXP_EesCvM",
"udm": "1T0YMTAPLOk768sstnewy5Jxgx2RPu3Rb",
"ug": "1fjezvqlysyZhiQMZdazqLGgk72PqtXAw",
"uk": "1UMJCHtzxkfLDBJE7NtfN5FeMrnnUVwoh",
"ur": "1WNaD2TuHvdsF-z0k_emQYchwoQQDFmRk",
"uz": "11wrG2FSTpRJc2jb5MhgvxjkVDYhT8M-l",
"ve": "1PucJ7pJ4CXGEXZ5p_WleZDs2usNz74to",
"vec": "1cAVjm_y3ehNteDQIYz9yyoq1EKkqOXZ0",
"vep": "1K_eqV7O6C7KPJWZtmIuzFMKAagj-0O85",
"vi": "1yQ6nhm1BmG9lD4_NaG1hE5VV6biEaV5f",
"vls": "1bpQQW6pKHruKJJaKtuggH5rReMXyeVXp",
"vo": "1D80QRdTpe7H4mHFKpfugscsjX71kiMJN",
"wa": "1m4B81QYbf74htpInDU5p7d0n0ot8WLPZ",
"war": "1EC3jsHtu22tHBv6jX_I4rupC5RwV3OYd",
"wo": "1vChyqNNLu5xYHdyHpACwwpw4l3ptiKlo",
"wuu": "1_EIn02xCUBcwLOwYnA-lScjS2Lh2ECw6",
"xal": "19bKXsL1D2UesbB50JPyc9TpG1lNc2POt",
"xh": "1pPVcxBG3xsCzEnUzlohc_p89gQ9dSJB3",
"xmf": "1SM9llku6I_ZuZz05mOBuL2lx-KQXvehr",
"yi": "1WNWr1oV-Nl7c1Jv8x_MiAj2vxRtyQawu",
"yo": "1yNVOwMOWeglbOcRoZzgd4uwlN5JMynnY",
"za": "1i7pg162cD_iU9h8dgtI2An8QCcbzUAjB",
"zea": "1EWSkiSkPBfbyjWjZK0VuKdpqFnFOpXXQ",
"zh-classical": "1uUKZamNp08KA7s7794sKPOqPALvo_btl",
"zh-min-nan": "1oSgz3YBXLGUgI7kl-uMOC_ww6L0FNFmp",
"zh-yue": "1zhwlUeeiyOAU1QqwqZ8n91yXIRPFA7UE",
"zh": "1LZ96GUhkVHQU-aj2C3WOrtffOp0U3Z7f",
"zu": "1FyXl_UK1737XB3drqQFhGXiJrJckiB1W",
}
return languages_ids[language]
class NER_MULTI_XTREME(MultiCorpus):
def __init__(
self,
languages: Union[str, List[str]] = "en",
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = False,
**corpusargs,
) -> None:
"""Xtreme corpus for cross-lingual NER consisting of datasets of a total of 40 languages.
The data comes from the google research work XTREME https://github.com/google-research/xtreme.
The data is derived from the wikiann dataset https://elisa-ie.github.io/wikiann/ (license: https://opendatacommons.org/licenses/by/)
Parameters
----------
languages : Union[str, List[str]], optional
Specify the languages you want to load. Provide an empty list or string to select all languages.
base_path : Union[str, Path], optional
Default is None, meaning that corpus gets auto-downloaded and loaded. You can override this to point to a different folder but typically this should not be necessary.
in_memory : bool, optional
Specify that the dataset should be loaded in memory, which speeds up the training process but takes increases the RAM usage significantly.
"""
# if no languages are given as argument all languages used in XTREME will be loaded
if not languages:
languages = [
"af",
"ar",
"bg",
"bn",
"de",
"el",
"en",
"es",
"et",
"eu",
"fa",
"fi",
"fr",
"he",
"hi",
"hu",
"id",
"it",
"ja",
"jv",
"ka",
"kk",
"ko",
"ml",
"mr",
"ms",
"my",
"nl",
"pt",
"ru",
"sw",
"ta",
"te",
"th",
"tl",
"tr",
"ur",
"vi",
"yo",
"zh",
]
# if only one language is given
if type(languages) == str:
languages = [languages]
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# column format
columns = {0: "text", 1: "ner"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# For each language in languages, the file is downloaded if not existent
# Then a comlumncorpus of that data is created and saved in a list
# This list is handed to the multicorpus
# list that contains the columncopora
corpora: List[Corpus] = []
hu_path = "https://nlp.informatik.hu-berlin.de/resources/datasets/panx_dataset"
# download data if necessary
for language in languages:
language_folder = data_folder / language
# if language not downloaded yet, download it
if not language_folder.exists():
file_name = language + ".tar.gz"
# create folder
os.makedirs(language_folder)
# download from HU Server
temp_file = cached_path(
hu_path + "/" + file_name,
Path("datasets") / dataset_name / language,
)
# unzip
log.info("Extracting data...")
import tarfile
tar = tarfile.open(str(temp_file), "r:gz")
for part in ["train", "test", "dev"]:
tar.extract(part, str(language_folder))
tar.close()
log.info("...done.")
# transform data into required format
log.info("Processing dataset...")
for part in ["train", "test", "dev"]:
self._xtreme_to_simple_ner_annotation(str(language_folder / part))
log.info("...done.")
# initialize comlumncorpus and add it to list
log.info(f"Reading data for language {language}")
corp = ColumnCorpus(
data_folder=language_folder,
column_format=columns,
in_memory=in_memory,
**corpusargs,
)
corpora.append(corp)
super().__init__(
corpora,
name="xtreme",
)
def _xtreme_to_simple_ner_annotation(self, data_file: Union[str, Path]):
with open(data_file, encoding="utf-8") as f:
lines = f.readlines()
with open(data_file, "w", encoding="utf-8") as f:
for line in lines:
if line == "\n":
f.write(line)
else:
liste = line.split()
f.write(liste[0].split(":", 1)[1] + " " + liste[1] + "\n")
class NER_MULTI_WIKINER(MultiCorpus):
def __init__(
self,
languages: Union[str, List[str]] = "en",
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = False,
**corpusargs,
) -> None:
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# if only one language is given
if type(languages) == str:
languages = [languages]
# column format
columns = {0: "text", 1: "pos", 2: "ner"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
corpora: List[Corpus] = []
for language in languages:
language_folder = data_folder / language
# download data if necessary
self._download_wikiner(language, str(language_folder))
# initialize comlumncorpus and add it to list
log.info(f"Read data for language {language}")
corp = ColumnCorpus(
data_folder=language_folder,
column_format=columns,
in_memory=in_memory,
**corpusargs,
)
corpora.append(corp)
super().__init__(
corpora,
name="wikiner",
)
def _download_wikiner(self, language_code: str, dataset_name: str):
# download data if necessary
wikiner_path = "https://raw.githubusercontent.com/dice-group/FOX/master/input/Wikiner/"
lc = language_code
data_file = flair.cache_root / "datasets" / dataset_name / f"aij-wikiner-{lc}-wp3.train"
if not data_file.is_file():
cached_path(
f"{wikiner_path}aij-wikiner-{lc}-wp3.bz2",
Path("datasets") / dataset_name,
)
import bz2
# unpack and write out in CoNLL column-like format
bz_file = bz2.BZ2File(
flair.cache_root / "datasets" / dataset_name / f"aij-wikiner-{lc}-wp3.bz2",
"rb",
)
with bz_file as f, open(
flair.cache_root / "datasets" / dataset_name / f"aij-wikiner-{lc}-wp3.train",
"w",
encoding="utf-8",
) as out:
for lineb in f:
line = lineb.decode("utf-8")
words = line.split(" ")
for word in words:
out.write("\t".join(word.split("|")) + "\n")
class NER_SWEDISH(ColumnCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
**corpusargs,
) -> None:
"""Initialize the NER_SWEDISH corpus for Swedish.
The first time you call this constructor it will automatically download the dataset.
:param base_path: Default is None, meaning that corpus gets auto-downloaded and loaded. You can override this
to point to a different folder but typically this should not be necessary.
:param in_memory: If True, keeps dataset in memory giving speedups in training.
:param document_as_sequence: If True, all sentences of a document are read into a single Sentence object
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# column format
columns = {0: "text", 1: "ner"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
ner_spraakbanken_path = "https://raw.githubusercontent.com/klintan/swedish-ner-corpus/master/"
cached_path(f"{ner_spraakbanken_path}test_corpus.txt", Path("datasets") / dataset_name)
cached_path(f"{ner_spraakbanken_path}train_corpus.txt", Path("datasets") / dataset_name)
# data is not in IOB2 format. Thus we transform it to IOB2
self._add_IOB2_tags(data_file=Path(data_folder / "test_corpus.txt"))
self._add_IOB2_tags(data_file=Path(data_folder / "train_corpus.txt"))
super().__init__(
data_folder,
columns,
in_memory=in_memory,
**corpusargs,
)
def _add_IOB2_tags(self, data_file: Union[str, Path], encoding: str = "utf8"):
"""Function that adds IOB2 tags if only chunk names are provided.
e.g. words are tagged PER instead of B-PER or I-PER. Replaces '0' with 'O' as the no-chunk tag since ColumnCorpus expects
the letter 'O'. Additionally it removes lines with no tags in the data file and can also
be used if the data is only partially IOB tagged.
Parameters
----------
data_file : Union[str, Path]
Path to the data file.
encoding : str, optional
Encoding used in open function. The default is "utf8".
"""
with open(file=data_file, encoding=encoding) as f:
lines = f.readlines()
with open(file=data_file, mode="w", encoding=encoding) as f:
pred = "O" # remembers tag of predecessing line
for line in lines:
line_list = line.split()
if len(line_list) == 2: # word with tag
word = line_list[0]
tag = line_list[1]
if tag in ["0", "O"]: # no chunk
f.write(word + " O\n")
pred = "O"
elif "-" not in tag: # no IOB tags
if pred == "O": # found a new chunk
f.write(word + " B-" + tag + "\n")
pred = tag
else: # found further part of chunk or new chunk directly after old chunk
if pred == tag:
f.write(word + " I-" + tag + "\n")
else:
f.write(word + " B-" + tag + "\n")
pred = tag
else: # line already has IOB tag (tag contains '-')
f.write(line)
pred = tag.split("-")[1]
elif len(line_list) == 0: # empty line
f.write("\n")
pred = "O"
class NER_TURKU(ColumnCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
**corpusargs,
) -> None:
"""Initialize the Finnish TurkuNER corpus.
The first time you call this constructor it will automatically download the dataset.
:param base_path: Default is None, meaning that corpus gets auto-downloaded and loaded. You can override this
to point to a different folder but typically this should not be necessary.
POS tags instead
:param in_memory: If True, keeps dataset in memory giving speedups in training.
:param document_as_sequence: If True, all sentences of a document are read into a single Sentence object
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# column format
columns = {0: "text", 1: "ner"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
conll_path = "https://raw.githubusercontent.com/TurkuNLP/turku-ner-corpus/master/data/conll"
dev_file = "dev.tsv"
test_file = "test.tsv"
train_file = "train.tsv"
cached_path(f"{conll_path}/{dev_file}", Path("datasets") / dataset_name)
cached_path(f"{conll_path}/{test_file}", Path("datasets") / dataset_name)
cached_path(f"{conll_path}/{train_file}", Path("datasets") / dataset_name)
super().__init__(
data_folder,
columns,
dev_file=dev_file,
test_file=test_file,
train_file=train_file,
column_delimiter="\t",
encoding="latin-1",
in_memory=in_memory,
document_separator_token="-DOCSTART-",
**corpusargs,
)
class NER_UKRAINIAN(ColumnCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
**corpusargs,
) -> None:
"""Initialize the Ukrainian NER corpus from lang-uk project.
The first time you call this constructor it will automatically download the dataset.
:param base_path: Default is None, meaning that corpus gets auto-downloaded and loaded. You can override this
to point to a different folder but typically this should not be necessary.
POS tags instead
:param in_memory: If True, keeps dataset in memory giving speedups in training.
:param document_as_sequence: If True, all sentences of a document are read into a single Sentence object
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# column format
columns = {0: "text", 1: "ner"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
conll_path = "https://raw.githubusercontent.com/lang-uk/flair-ner/master/fixed-split"
test_file = "test.iob"
train_file = "train.iob"
cached_path(f"{conll_path}/{test_file}", Path("datasets") / dataset_name)
cached_path(f"{conll_path}/{train_file}", Path("datasets") / dataset_name)
super().__init__(
data_folder,
columns,
test_file=test_file,
train_file=train_file,
column_delimiter=" ",
encoding="utf-8",
in_memory=in_memory,
**corpusargs,
)
class KEYPHRASE_SEMEVAL2017(ColumnCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
**corpusargs,
) -> None:
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# column format
columns = {0: "text", 1: "keyword"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
semeval2017_path = "https://raw.githubusercontent.com/midas-research/keyphrase-extraction-as-sequence-labeling-data/master/SemEval-2017"
cached_path(f"{semeval2017_path}/train.txt", Path("datasets") / dataset_name)
cached_path(f"{semeval2017_path}/test.txt", Path("datasets") / dataset_name)
cached_path(f"{semeval2017_path}/dev.txt", Path("datasets") / dataset_name)
super().__init__(
data_folder,
columns,
in_memory=in_memory,
**corpusargs,
)
class KEYPHRASE_INSPEC(ColumnCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
**corpusargs,
) -> None:
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# column format
columns = {0: "text", 1: "keyword"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
inspec_path = "https://raw.githubusercontent.com/midas-research/keyphrase-extraction-as-sequence-labeling-data/master/Inspec"
cached_path(f"{inspec_path}/train.txt", Path("datasets") / dataset_name)
cached_path(f"{inspec_path}/test.txt", Path("datasets") / dataset_name)
if "dev.txt" not in os.listdir(data_folder):
cached_path(f"{inspec_path}/valid.txt", Path("datasets") / dataset_name)
# rename according to train - test - dev - convention
os.rename(data_folder / "valid.txt", data_folder / "dev.txt")
super().__init__(
data_folder,
columns,
in_memory=in_memory,
**corpusargs,
)
class KEYPHRASE_SEMEVAL2010(ColumnCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
**corpusargs,
) -> None:
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# column format
columns = {0: "text", 1: "keyword"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
semeval2010_path = "https://raw.githubusercontent.com/midas-research/keyphrase-extraction-as-sequence-labeling-data/master/processed_semeval-2010"
cached_path(f"{semeval2010_path}/train.txt", Path("datasets") / dataset_name)
cached_path(f"{semeval2010_path}/test.txt", Path("datasets") / dataset_name)
super().__init__(
data_folder,
columns,
in_memory=in_memory,
**corpusargs,
)
class UP_CHINESE(ColumnCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
document_as_sequence: bool = False,
**corpusargs,
) -> None:
"""Initialize the Chinese dataset from the Universal Propositions Bank.
The dataset is downloaded from https://github.com/System-T/UniversalPropositions
:param base_path: Default is None, meaning that corpus gets auto-downloaded and loaded. You can override this
to point to a different folder but typically this should not be necessary.
:param in_memory: If True, keeps dataset in memory giving speedups in training.
:param document_as_sequence: If True, all sentences of a document are read into a single Sentence object
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# column format
columns = {1: "text", 9: "frame"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
up_zh_path = "https://raw.githubusercontent.com/System-T/UniversalPropositions/master/UP_Chinese/"
cached_path(f"{up_zh_path}zh-up-train.conllu", Path("datasets") / dataset_name)
cached_path(f"{up_zh_path}zh-up-dev.conllu", Path("datasets") / dataset_name)
cached_path(f"{up_zh_path}zh-up-test.conllu", Path("datasets") / dataset_name)
super().__init__(
data_folder,
columns,
encoding="utf-8",
train_file="zh-up-train.conllu",
test_file="zh-up-test.conllu",
dev_file="zh-up-dev.conllu",
in_memory=in_memory,
document_separator_token=None if not document_as_sequence else "-DOCSTART-",
comment_symbol="#",
**corpusargs,
)
class UP_ENGLISH(ColumnCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
document_as_sequence: bool = False,
**corpusargs,
) -> None:
"""Initialize the English dataset from the Universal Propositions Bank.
The dataset is downloaded from https://github.com/System-T/UniversalPropositions
:param base_path: Default is None, meaning that corpus gets auto-downloaded and loaded. You can override this
to point to a different folder but typically this should not be necessary.
:param in_memory: If True, keeps dataset in memory giving speedups in training.
:param document_as_sequence: If True, all sentences of a document are read into a single Sentence object
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# column format
columns = {1: "text", 10: "frame"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
up_en_path = "https://raw.githubusercontent.com/System-T/UniversalPropositions/master/UP_English-EWT/"
cached_path(f"{up_en_path}en_ewt-up-train.conllu", Path("datasets") / dataset_name)
cached_path(f"{up_en_path}en_ewt-up-dev.conllu", Path("datasets") / dataset_name)
cached_path(f"{up_en_path}en_ewt-up-test.conllu", Path("datasets") / dataset_name)
super().__init__(
data_folder,
columns,
encoding="utf-8",
train_file="en_ewt-up-train.conllu",
test_file="en_ewt-up-test.conllu",
dev_file="en_ewt-up-dev.conllu",
in_memory=in_memory,
document_separator_token=None if not document_as_sequence else "-DOCSTART-",
comment_symbol="#",
label_name_map={"_": "O"},
**corpusargs,
)
class UP_FRENCH(ColumnCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
document_as_sequence: bool = False,
**corpusargs,
) -> None:
"""Initialize the French dataset from the Universal Propositions Bank.
The dataset is downloaded from https://github.com/System-T/UniversalPropositions
:param base_path: Default is None, meaning that corpus gets auto-downloaded and loaded. You can override this
to point to a different folder but typically this should not be necessary.
:param in_memory: If True, keeps dataset in memory giving speedups in training.
:param document_as_sequence: If True, all sentences of a document are read into a single Sentence object
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# column format
columns = {1: "text", 9: "frame"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
up_fr_path = "https://raw.githubusercontent.com/System-T/UniversalPropositions/master/UP_French/"
cached_path(f"{up_fr_path}fr-up-train.conllu", Path("datasets") / dataset_name)
cached_path(f"{up_fr_path}fr-up-dev.conllu", Path("datasets") / dataset_name)
cached_path(f"{up_fr_path}fr-up-test.conllu", Path("datasets") / dataset_name)
super().__init__(
data_folder,
columns,
encoding="utf-8",
train_file="fr-up-train.conllu",
test_file="fr-up-test.conllu",
dev_file="fr-up-dev.conllu",
in_memory=in_memory,
document_separator_token=None if not document_as_sequence else "-DOCSTART-",
comment_symbol="#",
**corpusargs,
)
class UP_FINNISH(ColumnCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
document_as_sequence: bool = False,
**corpusargs,
) -> None:
"""Initialize the Finnish dataset from the Universal Propositions Bank.
The dataset is downloaded from https://github.com/System-T/UniversalPropositions
:param base_path: Default is None, meaning that corpus gets auto-downloaded and loaded. You can override this
to point to a different folder but typically this should not be necessary.
:param in_memory: If True, keeps dataset in memory giving speedups in training.
:param document_as_sequence: If True, all sentences of a document are read into a single Sentence object
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# column format
columns = {1: "text", 9: "frame"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
up_fi_path = "https://raw.githubusercontent.com/System-T/UniversalPropositions/master/UP_Finnish/"
cached_path(f"{up_fi_path}fi-up-train.conllu", Path("datasets") / dataset_name)
cached_path(f"{up_fi_path}fi-up-dev.conllu", Path("datasets") / dataset_name)
cached_path(f"{up_fi_path}fi-up-test.conllu", Path("datasets") / dataset_name)
super().__init__(
data_folder,
columns,
encoding="utf-8",
train_file="fi-up-train.conllu",
test_file="fi-up-test.conllu",
dev_file="fi-up-dev.conllu",
in_memory=in_memory,
document_separator_token=None if not document_as_sequence else "-DOCSTART-",
comment_symbol="#",
**corpusargs,
)
class UP_GERMAN(ColumnCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
document_as_sequence: bool = False,
**corpusargs,
) -> None:
"""Initialize the German dataset from the Universal Propositions Bank.
The dataset is downloaded from https://github.com/System-T/UniversalPropositions
:param base_path: Default is None, meaning that corpus gets auto-downloaded and loaded. You can override this
to point to a different folder but typically this should not be necessary.
:param in_memory: If True, keeps dataset in memory giving speedups in training.
:param document_as_sequence: If True, all sentences of a document are read into a single Sentence object
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# column format
columns = {1: "text", 9: "frame"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
up_de_path = "https://raw.githubusercontent.com/System-T/UniversalPropositions/master/UP_German/"
cached_path(f"{up_de_path}de-up-train.conllu", Path("datasets") / dataset_name)
cached_path(f"{up_de_path}de-up-dev.conllu", Path("datasets") / dataset_name)
cached_path(f"{up_de_path}de-up-test.conllu", Path("datasets") / dataset_name)
super().__init__(
data_folder,
columns,
encoding="utf-8",
train_file="de-up-train.conllu",
test_file="de-up-test.conllu",
dev_file="de-up-dev.conllu",
in_memory=in_memory,
document_separator_token=None if not document_as_sequence else "-DOCSTART-",
comment_symbol="#",
**corpusargs,
)
class UP_ITALIAN(ColumnCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
document_as_sequence: bool = False,
**corpusargs,
) -> None:
"""Initialize the Italian dataset from the Universal Propositions Bank.
The dataset is downloaded from https://github.com/System-T/UniversalPropositions
:param base_path: Default is None, meaning that corpus gets auto-downloaded and loaded. You can override this
to point to a different folder but typically this should not be necessary.
:param in_memory: If True, keeps dataset in memory giving speedups in training.
:param document_as_sequence: If True, all sentences of a document are read into a single Sentence object
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# column format
columns = {1: "text", 9: "frame"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
up_it_path = "https://raw.githubusercontent.com/System-T/UniversalPropositions/master/UP_Italian/"
cached_path(f"{up_it_path}it-up-train.conllu", Path("datasets") / dataset_name)
cached_path(f"{up_it_path}it-up-dev.conllu", Path("datasets") / dataset_name)
cached_path(f"{up_it_path}it-up-test.conllu", Path("datasets") / dataset_name)
super().__init__(
data_folder,
columns,
encoding="utf-8",
train_file="it-up-train.conllu",
test_file="it-up-test.conllu",
dev_file="it-up-dev.conllu",
in_memory=in_memory,
document_separator_token=None if not document_as_sequence else "-DOCSTART-",
comment_symbol="#",
**corpusargs,
)
class UP_SPANISH(ColumnCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
document_as_sequence: bool = False,
**corpusargs,
) -> None:
"""Initialize the Spanish dataset from the Universal Propositions Bank.
The dataset is downloaded from https://github.com/System-T/UniversalPropositions
:param base_path: Default is None, meaning that corpus gets auto-downloaded and loaded. You can override this
to point to a different folder but typically this should not be necessary.
:param in_memory: If True, keeps dataset in memory giving speedups in training.
:param document_as_sequence: If True, all sentences of a document are read into a single Sentence object
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# column format
columns = {1: "text", 9: "frame"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
up_es_path = "https://raw.githubusercontent.com/System-T/UniversalPropositions/master/UP_Spanish/"
cached_path(f"{up_es_path}es-up-train.conllu", Path("datasets") / dataset_name)
cached_path(f"{up_es_path}es-up-dev.conllu", Path("datasets") / dataset_name)
cached_path(f"{up_es_path}es-up-test.conllu", Path("datasets") / dataset_name)
super().__init__(
data_folder,
columns,
encoding="utf-8",
train_file="es-up-train.conllu",
test_file="es-up-test.conllu",
dev_file="es-up-dev.conllu",
in_memory=in_memory,
document_separator_token=None if not document_as_sequence else "-DOCSTART-",
comment_symbol="#",
**corpusargs,
)
class UP_SPANISH_ANCORA(ColumnCorpus):
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
document_as_sequence: bool = False,
**corpusargs,
) -> None:
"""Initialize the Spanish AnCora dataset from the Universal Propositions Bank.
The dataset is downloaded from https://github.com/System-T/UniversalPropositions
:param base_path: Default is None, meaning that corpus gets auto-downloaded and loaded. You can override this
to point to a different folder but typically this should not be necessary.
:param in_memory: If True, keeps dataset in memory giving speedups in training.
:param document_as_sequence: If True, all sentences of a document are read into a single Sentence object
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# column format
columns = {1: "text", 9: "frame"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
up_es_path = "https://raw.githubusercontent.com/System-T/UniversalPropositions/master/UP_Spanish-AnCora/"
cached_path(f"{up_es_path}es_ancora-up-train.conllu", Path("datasets") / dataset_name)
cached_path(f"{up_es_path}es_ancora-up-dev.conllu", Path("datasets") / dataset_name)
cached_path(f"{up_es_path}es_ancora-up-test.conllu", Path("datasets") / dataset_name)
super().__init__(
data_folder,
columns,
encoding="utf-8",
train_file="es_ancora-up-train.conllu",
test_file="es_ancora-up-test.conllu",
dev_file="es_ancora-up-dev.conllu",
in_memory=in_memory,
document_separator_token=None if not document_as_sequence else "-DOCSTART-",
comment_symbol="#",
**corpusargs,
)
class NER_HIPE_2022(ColumnCorpus):
@staticmethod
def _prepare_corpus(
file_in: Path, file_out: Path, eos_marker: str, document_separator: str, add_document_separator: bool
):
with open(file_in, encoding="utf-8") as f_p:
lines = f_p.readlines()
with open(file_out, "w", encoding="utf-8") as f_out:
# Add missing newline after header
f_out.write(lines[0] + "\n")
for line in lines[1:]:
if line.startswith(" \t"):
# Workaround for empty tokens
continue
line = line.strip()
# Add "real" document marker
if add_document_separator and line.startswith(document_separator):
f_out.write("-DOCSTART- O\n\n")
f_out.write(line + "\n")
if eos_marker in line:
f_out.write("\n")
def __init__(
self,
dataset_name: str,
language: str,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
version: str = "v2.1",
branch_name: str = "main",
dev_split_name="dev",
add_document_separator=False,
sample_missing_splits=False,
preproc_fn=None,
**corpusargs,
) -> None:
"""Initialize the CLEF-HIPE 2022 NER dataset.
The first time you call this constructor it will automatically
download the specified dataset (by given a language).
:dataset_name: Supported datasets are: ajmc, hipe2020, letemps, newseye, sonar and topres19th.
:language: Language for a supported dataset.
:base_path: Default is None, meaning that corpus gets auto-downloaded and loaded. You can override this
to point to a different folder but typically this should not be necessary.
:in_memory: If True, keeps dataset in memory giving speedups in training.
:version: Version of CLEF-HIPE dataset. Currently only v1.0 is supported and available.
:branch_name: Defines git branch name of HIPE data repository (main by default).
:dev_split_name: Defines default name of development split (dev by default). Only the NewsEye dataset has
currently two development splits: dev and dev2.
:add_document_separator: If True, a special document seperator will be introduced. This is highly
recommended when using our FLERT approach.
:sample_missing_splits: If True, data is automatically sampled when certain data splits are None.
:preproc_fn: Function that is used for dataset preprocessing. If None, default preprocessing will be performed.
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# Dataset split mapping
hipe_available_splits = {
"v1.0": {
"ajmc": {"de": ["sample"], "en": ["sample"]},
"hipe2020": {"de": ["train", "dev"], "en": ["dev"], "fr": ["train", "dev"]},
"letemps": {"fr": ["train", "dev"]},
"newseye": {
"de": ["train", "dev", "dev2"],
"fi": ["train", "dev", "dev2"],
"fr": ["train", "dev", "dev2"],
"sv": ["train", "dev", "dev2"],
},
"sonar": {"de": ["dev"]},
"topres19th": {"en": ["train", "dev"]},
}
}
# v2.0 only adds new language and splits for AJMC dataset
hipe_available_splits["v2.0"] = copy.deepcopy(hipe_available_splits["v1.0"])
hipe_available_splits["v2.0"]["ajmc"] = {"de": ["train", "dev"], "en": ["train", "dev"], "fr": ["train", "dev"]}
hipe_available_splits["v2.1"] = copy.deepcopy(hipe_available_splits["v2.0"])
for dataset_name_values in hipe_available_splits["v2.1"].values():
for splits in dataset_name_values.values():
splits.append("test") # test datasets are only available for >= v2.1
eos_marker = "EndOfSentence"
document_separator = "# hipe2022:document_id"
# Special document marker for sample splits in AJMC dataset
if f"{dataset_name}" == "ajmc":
document_separator = "# hipe2022:original_source"
columns = {0: "text", 1: "ner"}
dataset_base = self.__class__.__name__.lower()
data_folder = base_path / dataset_base / version / dataset_name / language
data_url = (
f"https://github.com/hipe-eval/HIPE-2022-data/raw/{branch_name}/data/{version}/{dataset_name}/{language}"
)
dataset_splits = hipe_available_splits[version][dataset_name][language]
for split in dataset_splits:
cached_path(
f"{data_url}/HIPE-2022-{version}-{dataset_name}-{split}-{language}.tsv", data_folder / "original"
)
train_file = "train.txt" if "train" in dataset_splits else None
dev_file = f"{dev_split_name}.txt" if "sample" not in dataset_splits else "sample.txt"
test_file = "test.txt" if "test" in dataset_splits else None
new_data_folder = data_folder
if add_document_separator:
new_data_folder = new_data_folder / "with_doc_seperator"
new_data_folder.mkdir(parents=True, exist_ok=True)
self.preproc_fn = preproc_fn if preproc_fn else self._prepare_corpus
if not all( # Only reprocess if some files are not there yet
split_path.exists()
for split_path in [new_data_folder / f"{split_file}.txt" for split_file in dataset_splits]
):
for split in dataset_splits:
original_filename = f"HIPE-2022-{version}-{dataset_name}-{split}-{language}.tsv"
self.preproc_fn(
data_folder / "original" / original_filename,
new_data_folder / f"{split}.txt",
eos_marker,
document_separator,
add_document_separator,
)
super().__init__(
new_data_folder,
columns,
train_file=train_file,
dev_file=dev_file,
test_file=test_file,
in_memory=in_memory,
document_separator_token="-DOCSTART-",
skip_first_line=True,
column_delimiter="\t",
comment_symbol="# ",
sample_missing_splits=sample_missing_splits,
**corpusargs,
)
class NER_ICDAR_EUROPEANA(ColumnCorpus):
def __init__(
self,
language: str,
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = True,
**corpusargs,
) -> None:
"""Initialize the ICDAR Europeana NER dataset.
The dataset is based on the French and Dutch Europeana NER corpora
from the Europeana Newspapers NER dataset (https://lab.kb.nl/dataset/europeana-newspapers-ner), with additional
preprocessing steps being performed (sentence splitting, punctuation normalizing, training/development/test splits).
The resulting dataset is released in the "Data Centric Domain Adaptation for Historical Text with OCR Errors" ICDAR paper
by Luisa März, Stefan Schweter, Nina Poerner, Benjamin Roth and Hinrich Schütze.
:param language: Language for a supported dataset. Supported languages are "fr" (French) and "nl" (Dutch).
:param base_path: Default is None, meaning that corpus gets auto-downloaded and loaded. You can override this
to point to a different folder but typically this should not be necessary.
:param in_memory: If True, keeps dataset in memory giving speedups in training. Not recommended due to heavy RAM usage.
"""
supported_languages = ["fr", "nl"]
if language not in supported_languages:
log.error(f"Language '{language}' is not in list of supported languages!")
log.error(f"Supported are '{supported_languages}'!")
raise Exception
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# column format
columns = {0: "text", 1: "ner"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name / language
# download data if necessary
github_path = "https://raw.githubusercontent.com/stefan-it/historic-domain-adaptation-icdar/main/data"
for split in ["train", "dev", "test"]:
cached_path(f"{github_path}/{language}/{split}.txt", data_folder)
super().__init__(
data_folder,
columns,
in_memory=in_memory,
train_file="train.txt",
dev_file="dev.txt",
test_file="test.txt",
comment_symbol="# ",
column_delimiter="\t",
**corpusargs,
)
class NER_NERMUD(MultiCorpus):
def __init__(
self,
domains: Union[str, List[str]] = "all",
base_path: Optional[Union[str, Path]] = None,
in_memory: bool = False,
**corpusargs,
) -> None:
"""Initilize the NERMuD 2023 dataset.
NERMuD is a task presented at EVALITA 2023 consisting in the extraction and classification
of named-entities in a document, such as persons, organizations, and locations. NERMuD 2023 will include two different sub-tasks:
- Domain-agnostic classification (DAC). Participants will be asked to select and classify entities among three categories
(person, organization, location) in different types of texts (news, fiction, political speeches) using one single general model.
- Domain-specific classification (DSC). Participants will be asked to deploy a different model for each of the above types,
trying to increase the accuracy for each considered type.
:param domains: Domains to be used. Supported are "WN" (Wikinews), "FIC" (fiction), "ADG" (De Gasperi subset) and "all".
:param base_path: Default is None, meaning that corpus gets auto-downloaded and loaded. You can override this
to point to a different folder but typically this should not be necessary.
:param in_memory: If True, keeps dataset in memory giving speedups in training. Not recommended due to heavy RAM usage.
"""
supported_domains = ["WN", "FIC", "ADG"]
if type(domains) == str and domains == "all":
domains = supported_domains
if type(domains) == str:
domains = [domains]
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# column format
columns = {0: "text", 1: "ner"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
corpora: List[Corpus] = []
github_path = "https://raw.githubusercontent.com/dhfbk/KIND/main/evalita-2023"
for domain in domains:
if domain not in supported_domains:
log.error(f"Domain '{domain}' is not in list of supported domains!")
log.error(f"Supported are '{supported_domains}'!")
raise Exception
domain_folder = data_folder / domain.lower()
for split in ["train", "dev"]:
cached_path(f"{github_path}/{domain}_{split}.tsv", domain_folder)
corpus = ColumnCorpus(
data_folder=domain_folder,
train_file=f"{domain}_train.tsv",
dev_file=f"{domain}_dev.tsv",
test_file=None,
column_format=columns,
in_memory=in_memory,
sample_missing_splits=False, # No test data is available, so do not shrink dev data for shared task preparation!
**corpusargs,
)
corpora.append(corpus)
super().__init__(
corpora,
sample_missing_splits=False,
name="nermud",
)
| 197,364 | 40.160584 | 192 | py |
flair | flair-master/flair/datasets/document_classification.py | import csv
import json
import logging
import os
from pathlib import Path
from typing import Dict, List, Optional, Union
import flair
from flair.data import (
Corpus,
DataPair,
FlairDataset,
Sentence,
Tokenizer,
_iter_dataset,
)
from flair.datasets.base import find_train_dev_test_files
from flair.file_utils import cached_path, unpack_file, unzip_file
from flair.tokenization import SegtokTokenizer, SpaceTokenizer
log = logging.getLogger("flair")
class ClassificationCorpus(Corpus):
"""A classification corpus from FastText-formatted text files."""
def __init__(
self,
data_folder: Union[str, Path],
label_type: str = "class",
train_file=None,
test_file=None,
dev_file=None,
truncate_to_max_tokens: int = -1,
truncate_to_max_chars: int = -1,
filter_if_longer_than: int = -1,
tokenizer: Union[bool, Tokenizer] = SegtokTokenizer(),
memory_mode: str = "partial",
label_name_map: Optional[Dict[str, str]] = None,
skip_labels: Optional[List[str]] = None,
allow_examples_without_labels=False,
sample_missing_splits: bool = True,
encoding: str = "utf-8",
) -> None:
"""Instantiates a Corpus from text classification-formatted task data.
:param data_folder: base folder with the task data
:param label_type: name of the label
:param train_file: the name of the train file
:param test_file: the name of the test file
:param dev_file: the name of the dev file, if None, dev data is sampled from train
:param truncate_to_max_tokens: If set, truncates each Sentence to a maximum number of tokens
:param truncate_to_max_chars: If set, truncates each Sentence to a maximum number of chars
:param filter_if_longer_than: If set, filters documents that are longer that the specified number of tokens.
:param tokenizer: Tokenizer for dataset, default is SegtokTokenizer
:param memory_mode: Set to what degree to keep corpus in memory ('full', 'partial' or 'disk'). Use 'full'
if full corpus and all embeddings fits into memory for speedups during training. Otherwise use 'partial' and if
even this is too much for your memory, use 'disk'.
:param label_name_map: Optionally map label names to different schema.
:param allow_examples_without_labels: set to True to allow Sentences without label in the corpus.
:param encoding: Default is 'utf-8' but some datasets are in 'latin-1
:return: a Corpus with annotated train, dev and test data
"""
# find train, dev and test files if not specified
dev_file, test_file, train_file = find_train_dev_test_files(data_folder, dev_file, test_file, train_file)
train: FlairDataset = ClassificationDataset(
train_file,
label_type=label_type,
tokenizer=tokenizer,
truncate_to_max_tokens=truncate_to_max_tokens,
truncate_to_max_chars=truncate_to_max_chars,
filter_if_longer_than=filter_if_longer_than,
memory_mode=memory_mode,
label_name_map=label_name_map,
skip_labels=skip_labels,
allow_examples_without_labels=allow_examples_without_labels,
encoding=encoding,
)
# use test_file to create test split if available
test = (
ClassificationDataset(
test_file,
label_type=label_type,
tokenizer=tokenizer,
truncate_to_max_tokens=truncate_to_max_tokens,
truncate_to_max_chars=truncate_to_max_chars,
filter_if_longer_than=filter_if_longer_than,
memory_mode=memory_mode,
label_name_map=label_name_map,
skip_labels=skip_labels,
allow_examples_without_labels=allow_examples_without_labels,
encoding=encoding,
)
if test_file is not None
else None
)
# use dev_file to create test split if available
dev = (
ClassificationDataset(
dev_file,
label_type=label_type,
tokenizer=tokenizer,
truncate_to_max_tokens=truncate_to_max_tokens,
truncate_to_max_chars=truncate_to_max_chars,
filter_if_longer_than=filter_if_longer_than,
memory_mode=memory_mode,
label_name_map=label_name_map,
skip_labels=skip_labels,
allow_examples_without_labels=allow_examples_without_labels,
encoding=encoding,
)
if dev_file is not None
else None
)
super().__init__(train, dev, test, name=str(data_folder), sample_missing_splits=sample_missing_splits)
log.info(f"Initialized corpus {self.name} (label type name is '{label_type}')")
class ClassificationDataset(FlairDataset):
"""Dataset for classification instantiated from a single FastText-formatted file."""
def __init__(
self,
path_to_file: Union[str, Path],
label_type: str,
truncate_to_max_tokens=-1,
truncate_to_max_chars=-1,
filter_if_longer_than: int = -1,
tokenizer: Union[bool, Tokenizer] = SegtokTokenizer(),
memory_mode: str = "partial",
label_name_map: Optional[Dict[str, str]] = None,
skip_labels: Optional[List[str]] = None,
allow_examples_without_labels=False,
encoding: str = "utf-8",
) -> None:
"""Reads a data file for text classification.
The file should contain one document/text per line.
The line should have the following format:
__label__<class_name> <text>
If you have a multi class task, you can have as many labels as you want at the beginning of the line, e.g.,
__label__<class_name_1> __label__<class_name_2> <text>
:param path_to_file: the path to the data file
:param label_type: name of the label
:param truncate_to_max_tokens: If set, truncates each Sentence to a maximum number of tokens
:param truncate_to_max_chars: If set, truncates each Sentence to a maximum number of chars
:param filter_if_longer_than: If set, filters documents that are longer that the specified number of tokens.
:param tokenizer: Custom tokenizer to use (default is SegtokTokenizer)
:param memory_mode: Set to what degree to keep corpus in memory ('full', 'partial' or 'disk'). Use 'full'
if full corpus and all embeddings fits into memory for speedups during training. Otherwise use 'partial' and if
even this is too much for your memory, use 'disk'.
:param label_name_map: Optionally map label names to different schema.
:param allow_examples_without_labels: set to True to allow Sentences without label in the Dataset.
:param encoding: Default is 'utf-8' but some datasets are in 'latin-1
:return: list of sentences
"""
path_to_file = Path(path_to_file)
assert path_to_file.exists()
self.label_prefix = "__label__"
self.label_type = label_type
self.memory_mode = memory_mode
self.tokenizer = tokenizer
if self.memory_mode == "full":
self.sentences = []
if self.memory_mode == "partial":
self.lines = []
if self.memory_mode == "disk":
self.indices = []
self.total_sentence_count: int = 0
self.truncate_to_max_chars = truncate_to_max_chars
self.truncate_to_max_tokens = truncate_to_max_tokens
self.filter_if_longer_than = filter_if_longer_than
self.label_name_map = label_name_map
self.allow_examples_without_labels = allow_examples_without_labels
self.path_to_file = path_to_file
with open(str(path_to_file), encoding=encoding) as f:
line = f.readline()
position = 0
while line:
if ("__label__" not in line and not allow_examples_without_labels) or (
" " not in line and "\t" not in line
):
position = f.tell()
line = f.readline()
continue
if 0 < self.filter_if_longer_than < len(line.split(" ")):
position = f.tell()
line = f.readline()
continue
# if data point contains black-listed label, do not use
if skip_labels:
skip = False
for skip_label in skip_labels:
if "__label__" + skip_label in line:
skip = True
if skip:
line = f.readline()
continue
if self.memory_mode == "full":
sentence = self._parse_line_to_sentence(line, self.label_prefix, tokenizer)
if sentence is not None and len(sentence.tokens) > 0:
self.sentences.append(sentence)
self.total_sentence_count += 1
if self.memory_mode == "partial" or self.memory_mode == "disk":
# first check if valid sentence
words = line.split()
l_len = 0
label = False
for i in range(len(words)):
if words[i].startswith(self.label_prefix):
l_len += len(words[i]) + 1
label = True
else:
break
text = line[l_len:].strip()
# if so, add to indices
if text and (label or allow_examples_without_labels):
if self.memory_mode == "partial":
self.lines.append(line)
self.total_sentence_count += 1
if self.memory_mode == "disk":
self.indices.append(position)
self.total_sentence_count += 1
position = f.tell()
line = f.readline()
def _parse_line_to_sentence(self, line: str, label_prefix: str, tokenizer: Union[bool, Tokenizer]):
words = line.split()
labels = []
l_len = 0
for i in range(len(words)):
if words[i].startswith(label_prefix):
l_len += len(words[i]) + 1
label = words[i].replace(label_prefix, "")
if self.label_name_map and label in self.label_name_map:
label = self.label_name_map[label]
labels.append(label)
else:
break
text = line[l_len:].strip()
if self.truncate_to_max_chars > 0:
text = text[: self.truncate_to_max_chars]
if text and (labels or self.allow_examples_without_labels):
sentence = Sentence(text, use_tokenizer=tokenizer)
for label in labels:
sentence.add_label(self.label_type, label)
if sentence is not None and 0 < self.truncate_to_max_tokens < len(sentence):
sentence.tokens = sentence.tokens[: self.truncate_to_max_tokens]
return sentence
return None
def is_in_memory(self) -> bool:
if self.memory_mode == "disk":
return False
if self.memory_mode == "partial":
return False
return True
def __len__(self) -> int:
return self.total_sentence_count
def __getitem__(self, index: int = 0) -> Sentence:
if self.memory_mode == "full":
return self.sentences[index]
if self.memory_mode == "partial":
sentence = self._parse_line_to_sentence(self.lines[index], self.label_prefix, self.tokenizer)
return sentence
if self.memory_mode == "disk":
with open(str(self.path_to_file), encoding="utf-8") as file:
file.seek(self.indices[index])
line = file.readline()
sentence = self._parse_line_to_sentence(line, self.label_prefix, self.tokenizer)
return sentence
raise AssertionError
class CSVClassificationCorpus(Corpus):
"""Classification corpus instantiated from CSV data files."""
def __init__(
self,
data_folder: Union[str, Path],
column_name_map: Dict[int, str],
label_type: str,
name: str = "csv_corpus",
train_file=None,
test_file=None,
dev_file=None,
max_tokens_per_doc=-1,
max_chars_per_doc=-1,
tokenizer: Tokenizer = SegtokTokenizer(),
in_memory: bool = False,
skip_header: bool = False,
encoding: str = "utf-8",
no_class_label=None,
**fmtparams,
) -> None:
"""Instantiates a Corpus for text classification from CSV column formatted data.
:param data_folder: base folder with the task data
:param column_name_map: a column name map that indicates which column is text and which the label(s)
:param label_type: name of the label
:param train_file: the name of the train file
:param test_file: the name of the test file
:param dev_file: the name of the dev file, if None, dev data is sampled from train
:param max_tokens_per_doc: If set, truncates each Sentence to a maximum number of Tokens
:param max_chars_per_doc: If set, truncates each Sentence to a maximum number of chars
:param tokenizer: Tokenizer for dataset, default is SegtokTokenizer
:param in_memory: If True, keeps dataset as Sentences in memory, otherwise only keeps strings
:param skip_header: If True, skips first line because it is header
:param encoding: Default is 'utf-8' but some datasets are in 'latin-1
:param fmtparams: additional parameters for the CSV file reader
:return: a Corpus with annotated train, dev and test data
"""
# find train, dev and test files if not specified
dev_file, test_file, train_file = find_train_dev_test_files(data_folder, dev_file, test_file, train_file)
train: FlairDataset = CSVClassificationDataset(
train_file,
column_name_map,
label_type=label_type,
tokenizer=tokenizer,
max_tokens_per_doc=max_tokens_per_doc,
max_chars_per_doc=max_chars_per_doc,
in_memory=in_memory,
skip_header=skip_header,
encoding=encoding,
no_class_label=no_class_label,
**fmtparams,
)
test = (
CSVClassificationDataset(
test_file,
column_name_map,
label_type=label_type,
tokenizer=tokenizer,
max_tokens_per_doc=max_tokens_per_doc,
max_chars_per_doc=max_chars_per_doc,
in_memory=in_memory,
skip_header=skip_header,
encoding=encoding,
no_class_label=no_class_label,
**fmtparams,
)
if test_file is not None
else None
)
dev = (
CSVClassificationDataset(
dev_file,
column_name_map,
label_type=label_type,
tokenizer=tokenizer,
max_tokens_per_doc=max_tokens_per_doc,
max_chars_per_doc=max_chars_per_doc,
in_memory=in_memory,
skip_header=skip_header,
encoding=encoding,
no_class_label=no_class_label,
**fmtparams,
)
if dev_file is not None
else None
)
super().__init__(train, dev, test, name=name)
class CSVClassificationDataset(FlairDataset):
"""Dataset for text classification from CSV column formatted data."""
def __init__(
self,
path_to_file: Union[str, Path],
column_name_map: Dict[int, str],
label_type: str,
max_tokens_per_doc: int = -1,
max_chars_per_doc: int = -1,
tokenizer: Tokenizer = SegtokTokenizer(),
in_memory: bool = True,
skip_header: bool = False,
encoding: str = "utf-8",
no_class_label=None,
**fmtparams,
) -> None:
"""Instantiates a Dataset for text classification from CSV column formatted data.
:param path_to_file: path to the file with the CSV data
:param column_name_map: a column name map that indicates which column is text and which the label(s)
:param label_type: name of the label
:param max_tokens_per_doc: If set, truncates each Sentence to a maximum number of Tokens
:param max_chars_per_doc: If set, truncates each Sentence to a maximum number of chars
:param tokenizer: Tokenizer for dataset, default is SegTokTokenizer
:param in_memory: If True, keeps dataset as Sentences in memory, otherwise only keeps strings
:param skip_header: If True, skips first line because it is header
:param encoding: Most datasets are 'utf-8' but some are 'latin-1'
:param fmtparams: additional parameters for the CSV file reader
:return: a Corpus with annotated train, dev and test data
"""
path_to_file = Path(path_to_file)
assert path_to_file.exists()
# variables
self.path_to_file = path_to_file
self.in_memory = in_memory
self.tokenizer = tokenizer
self.column_name_map = column_name_map
self.max_tokens_per_doc = max_tokens_per_doc
self.max_chars_per_doc = max_chars_per_doc
self.no_class_label = no_class_label
self.label_type = label_type
# different handling of in_memory data than streaming data
if self.in_memory:
self.sentences = []
else:
self.raw_data = []
self.total_sentence_count: int = 0
# most data sets have the token text in the first column, if not, pass 'text' as column
self.text_columns: List[int] = []
self.pair_columns: List[int] = []
for column in column_name_map:
if column_name_map[column] == "text":
self.text_columns.append(column)
if column_name_map[column] == "pair":
self.pair_columns.append(column)
with open(self.path_to_file, encoding=encoding) as csv_file:
csv_reader = csv.reader(csv_file, **fmtparams)
if skip_header:
next(csv_reader, None) # skip the headers
for row in csv_reader:
# test if format is OK
wrong_format = False
for text_column in self.text_columns:
if text_column >= len(row):
wrong_format = True
if wrong_format:
continue
# test if at least one label given
has_label = False
for column in self.column_name_map:
if self.column_name_map[column].startswith("label") and row[column]:
has_label = True
break
if not has_label:
continue
if self.in_memory:
sentence = self._make_labeled_data_point(row)
self.sentences.append(sentence)
else:
self.raw_data.append(row)
self.total_sentence_count += 1
def _make_labeled_data_point(self, row):
# make sentence from text (and filter for length)
text = " ".join([row[text_column] for text_column in self.text_columns])
if self.max_chars_per_doc > 0:
text = text[: self.max_chars_per_doc]
sentence = Sentence(text, use_tokenizer=self.tokenizer)
if 0 < self.max_tokens_per_doc < len(sentence):
sentence.tokens = sentence.tokens[: self.max_tokens_per_doc]
# if a pair column is defined, make a sentence pair object
if len(self.pair_columns) > 0:
text = " ".join([row[pair_column] for pair_column in self.pair_columns])
if self.max_chars_per_doc > 0:
text = text[: self.max_chars_per_doc]
pair = Sentence(text, use_tokenizer=self.tokenizer)
if 0 < self.max_tokens_per_doc < len(sentence):
pair.tokens = pair.tokens[: self.max_tokens_per_doc]
data_point = DataPair(first=sentence, second=pair)
else:
data_point = sentence
for column in self.column_name_map:
column_value = row[column]
if (
self.column_name_map[column].startswith("label")
and column_value
and column_value != self.no_class_label
):
data_point.add_label(self.label_type, column_value)
return data_point
def is_in_memory(self) -> bool:
return self.in_memory
def __len__(self) -> int:
return self.total_sentence_count
def __getitem__(self, index: int = 0) -> Sentence:
if self.in_memory:
return self.sentences[index]
else:
row = self.raw_data[index]
sentence = self._make_labeled_data_point(row)
return sentence
class AMAZON_REVIEWS(ClassificationCorpus):
"""A very large corpus of Amazon reviews with positivity ratings.
Corpus is downloaded from and documented at
https://nijianmo.github.io/amazon/index.html.
We download the 5-core subset which is still tens of millions of
reviews.
"""
# noinspection PyDefaultArgument
def __init__(
self,
split_max: int = 30000,
label_name_map: Dict[str, str] = {
"1.0": "NEGATIVE",
"2.0": "NEGATIVE",
"3.0": "NEGATIVE",
"4.0": "POSITIVE",
"5.0": "POSITIVE",
},
skip_labels=["3.0", "4.0"],
fraction_of_5_star_reviews: int = 10,
tokenizer: Tokenizer = SegtokTokenizer(),
memory_mode="partial",
**corpusargs,
) -> None:
"""Constructs corpus object.
Split_max indicates how many data points from each of the 28 splits are used, so
set this higher or lower to increase/decrease corpus size.
:param label_name_map: Map label names to different schema. By default, the 5-star rating is mapped onto 3
classes (POSITIVE, NEGATIVE, NEUTRAL)
:param split_max: Split_max indicates how many data points from each of the 28 splits are used, so
set this higher or lower to increase/decrease corpus size.
:param memory_mode: Set to what degree to keep corpus in memory ('full', 'partial' or 'disk'). Use 'full'
if full corpus and all embeddings fits into memory for speedups during training. Otherwise use 'partial' and if
even this is too much for your memory, use 'disk'.
:param tokenizer: Custom tokenizer to use (default is SegtokTokenizer)
:param corpusargs: Arguments for ClassificationCorpus
"""
# dataset name includes the split size
dataset_name = self.__class__.__name__.lower() + "_" + str(split_max) + "_" + str(fraction_of_5_star_reviews)
# default dataset folder is the cache root
data_folder = flair.cache_root / "datasets" / dataset_name
# download data if necessary
if not (data_folder / "train.txt").is_file():
# download each of the 28 splits
self.download_and_prepare_amazon_product_file(
data_folder, "AMAZON_FASHION_5.json.gz", split_max, fraction_of_5_star_reviews
)
self.download_and_prepare_amazon_product_file(
data_folder, "All_Beauty_5.json.gz", split_max, fraction_of_5_star_reviews
)
self.download_and_prepare_amazon_product_file(
data_folder, "Appliances_5.json.gz", split_max, fraction_of_5_star_reviews
)
self.download_and_prepare_amazon_product_file(
data_folder, "Arts_Crafts_and_Sewing_5.json.gz", split_max, fraction_of_5_star_reviews
)
self.download_and_prepare_amazon_product_file(
data_folder, "Arts_Crafts_and_Sewing_5.json.gz", split_max, fraction_of_5_star_reviews
)
self.download_and_prepare_amazon_product_file(
data_folder, "Automotive_5.json.gz", split_max, fraction_of_5_star_reviews
)
self.download_and_prepare_amazon_product_file(
data_folder, "Books_5.json.gz", split_max, fraction_of_5_star_reviews
)
self.download_and_prepare_amazon_product_file(
data_folder, "CDs_and_Vinyl_5.json.gz", split_max, fraction_of_5_star_reviews
)
self.download_and_prepare_amazon_product_file(
data_folder, "Cell_Phones_and_Accessories_5.json.gz", split_max, fraction_of_5_star_reviews
)
self.download_and_prepare_amazon_product_file(
data_folder, "Clothing_Shoes_and_Jewelry_5.json.gz", split_max, fraction_of_5_star_reviews
)
self.download_and_prepare_amazon_product_file(
data_folder, "Digital_Music_5.json.gz", split_max, fraction_of_5_star_reviews
)
self.download_and_prepare_amazon_product_file(
data_folder, "Electronics_5.json.gz", split_max, fraction_of_5_star_reviews
)
self.download_and_prepare_amazon_product_file(
data_folder, "Gift_Cards_5.json.gz", split_max, fraction_of_5_star_reviews
)
self.download_and_prepare_amazon_product_file(
data_folder, "Grocery_and_Gourmet_Food_5.json.gz", split_max, fraction_of_5_star_reviews
)
self.download_and_prepare_amazon_product_file(
data_folder, "Home_and_Kitchen_5.json.gz", split_max, fraction_of_5_star_reviews
)
self.download_and_prepare_amazon_product_file(
data_folder, "Industrial_and_Scientific_5.json.gz", split_max, fraction_of_5_star_reviews
)
self.download_and_prepare_amazon_product_file(
data_folder, "Kindle_Store_5.json.gz", split_max, fraction_of_5_star_reviews
)
self.download_and_prepare_amazon_product_file(
data_folder, "Luxury_Beauty_5.json.gz", split_max, fraction_of_5_star_reviews
)
self.download_and_prepare_amazon_product_file(
data_folder, "Magazine_Subscriptions_5.json.gz", split_max, fraction_of_5_star_reviews
)
self.download_and_prepare_amazon_product_file(
data_folder, "Movies_and_TV_5.json.gz", split_max, fraction_of_5_star_reviews
)
self.download_and_prepare_amazon_product_file(
data_folder, "Musical_Instruments_5.json.gz", split_max, fraction_of_5_star_reviews
)
self.download_and_prepare_amazon_product_file(
data_folder, "Office_Products_5.json.gz", split_max, fraction_of_5_star_reviews
)
self.download_and_prepare_amazon_product_file(
data_folder, "Patio_Lawn_and_Garden_5.json.gz", split_max, fraction_of_5_star_reviews
)
self.download_and_prepare_amazon_product_file(
data_folder, "Pet_Supplies_5.json.gz", split_max, fraction_of_5_star_reviews
)
self.download_and_prepare_amazon_product_file(
data_folder, "Prime_Pantry_5.json.gz", split_max, fraction_of_5_star_reviews
)
self.download_and_prepare_amazon_product_file(
data_folder, "Software_5.json.gz", split_max, fraction_of_5_star_reviews
)
self.download_and_prepare_amazon_product_file(
data_folder, "Sports_and_Outdoors_5.json.gz", split_max, fraction_of_5_star_reviews
)
self.download_and_prepare_amazon_product_file(
data_folder, "Tools_and_Home_Improvement_5.json.gz", split_max, fraction_of_5_star_reviews
)
self.download_and_prepare_amazon_product_file(
data_folder, "Toys_and_Games_5.json.gz", split_max, fraction_of_5_star_reviews
)
self.download_and_prepare_amazon_product_file(
data_folder, "Video_Games_5.json.gz", split_max, fraction_of_5_star_reviews
)
super().__init__(
data_folder,
label_type="sentiment",
label_name_map=label_name_map,
skip_labels=skip_labels,
tokenizer=tokenizer,
memory_mode=memory_mode,
**corpusargs,
)
def download_and_prepare_amazon_product_file(
self, data_folder, part_name, max_data_points=None, fraction_of_5_star_reviews=None
):
amazon__path = "http://deepyeti.ucsd.edu/jianmo/amazon/categoryFilesSmall"
cached_path(f"{amazon__path}/{part_name}", Path("datasets") / "Amazon_Product_Reviews")
import gzip
# create dataset directory if necessary
if not os.path.exists(data_folder):
os.makedirs(data_folder)
with open(data_folder / "train.txt", "a") as train_file:
write_count = 0
review_5_count = 0
# download senteval datasets if necessary und unzip
with gzip.open(flair.cache_root / "datasets" / "Amazon_Product_Reviews" / part_name, "rb") as f_in:
for line in f_in:
parsed_json = json.loads(line)
if "reviewText" not in parsed_json:
continue
if parsed_json["reviewText"].strip() == "":
continue
text = parsed_json["reviewText"].replace("\n", "")
if fraction_of_5_star_reviews and str(parsed_json["overall"]) == "5.0":
review_5_count += 1
if review_5_count != fraction_of_5_star_reviews:
continue
else:
review_5_count = 0
train_file.write(f"__label__{parsed_json['overall']} {text}\n")
write_count += 1
if max_data_points and write_count >= max_data_points:
break
class IMDB(ClassificationCorpus):
"""Corpus of IMDB movie reviews labeled by sentiment (POSITIVE, NEGATIVE).
Downloaded from and documented at http://ai.stanford.edu/~amaas/data/sentiment/.
"""
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
rebalance_corpus: bool = True,
tokenizer: Tokenizer = SegtokTokenizer(),
memory_mode="partial",
**corpusargs,
) -> None:
"""Initialize the IMDB move review sentiment corpus.
:param base_path: Provide this only if you store the IMDB corpus in a specific folder, otherwise use default.
:param tokenizer: Custom tokenizer to use (default is SegtokTokenizer)
:param rebalance_corpus: Default splits for this corpus have a strange 50/50 train/test split that are impractical.
With rebalance_corpus=True (default setting), corpus is rebalanced to a 80/10/10 train/dev/test split. If you
want to use original splits, set this to False.
:param memory_mode: Set to 'partial' because this is a huge corpus, but you can also set to 'full' for faster
processing or 'none' for less memory.
:param corpusargs: Other args for ClassificationCorpus.
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower() + "_v4"
# download data if necessary
imdb_acl_path = "http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz"
if rebalance_corpus:
dataset_name = dataset_name + "-rebalanced"
data_folder = base_path / dataset_name
data_path = flair.cache_root / "datasets" / dataset_name
train_data_file = data_path / "train.txt"
test_data_file = data_path / "test.txt"
if not train_data_file.is_file() or (not rebalance_corpus and not test_data_file.is_file()):
for file_path in [train_data_file, test_data_file]:
if file_path.is_file():
os.remove(file_path)
cached_path(imdb_acl_path, Path("datasets") / dataset_name)
import tarfile
with tarfile.open(flair.cache_root / "datasets" / dataset_name / "aclImdb_v1.tar.gz", "r:gz") as f_in:
datasets = ["train", "test"]
labels = ["pos", "neg"]
for label in labels:
for dataset in datasets:
f_in.extractall(
data_path, members=[m for m in f_in.getmembers() if f"{dataset}/{label}" in m.name]
)
data_file = train_data_file
if not rebalance_corpus and dataset == "test":
data_file = test_data_file
with open(data_file, "at") as f_p:
current_path = data_path / "aclImdb" / dataset / label
for file_name in current_path.iterdir():
if file_name.is_file() and file_name.name.endswith(".txt"):
if label == "pos":
sentiment_label = "POSITIVE"
if label == "neg":
sentiment_label = "NEGATIVE"
f_p.write(
f"__label__{sentiment_label} "
+ file_name.open("rt", encoding="utf-8").read()
+ "\n"
)
super().__init__(
data_folder, label_type="sentiment", tokenizer=tokenizer, memory_mode=memory_mode, **corpusargs
)
class NEWSGROUPS(ClassificationCorpus):
"""20 newsgroups corpus, classifying news items into one of 20 categories.
Downloaded from http://qwone.com/~jason/20Newsgroups
Each data point is a full news article so documents may be very
long.
"""
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
tokenizer: Tokenizer = SegtokTokenizer(),
memory_mode: str = "partial",
**corpusargs,
) -> None:
"""Instantiates 20 newsgroups corpus.
:param base_path: Provide this only if you store the IMDB corpus in a specific folder, otherwise use default.
:param tokenizer: Custom tokenizer to use (default is SegtokTokenizer)
:param memory_mode: Set to 'partial' because this is a big corpus, but you can also set to 'full' for faster
processing or 'none' for less memory.
:param corpusargs: Other args for ClassificationCorpus.
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
twenty_newsgroups_path = "http://qwone.com/~jason/20Newsgroups/20news-bydate.tar.gz"
data_path = flair.cache_root / "datasets" / dataset_name
data_file = data_path / "20news-bydate-train.txt"
if not data_file.is_file():
cached_path(twenty_newsgroups_path, Path("datasets") / dataset_name / "original")
import tarfile
with tarfile.open(
flair.cache_root / "datasets" / dataset_name / "original" / "20news-bydate.tar.gz", "r:gz"
) as f_in:
datasets = ["20news-bydate-test", "20news-bydate-train"]
labels = [
"alt.atheism",
"comp.graphics",
"comp.os.ms-windows.misc",
"comp.sys.ibm.pc.hardware",
"comp.sys.mac.hardware",
"comp.windows.x",
"misc.forsale",
"rec.autos",
"rec.motorcycles",
"rec.sport.baseball",
"rec.sport.hockey",
"sci.crypt",
"sci.electronics",
"sci.med",
"sci.space",
"soc.religion.christian",
"talk.politics.guns",
"talk.politics.mideast",
"talk.politics.misc",
"talk.religion.misc",
]
for label in labels:
for dataset in datasets:
f_in.extractall(
data_path / "original",
members=[m for m in f_in.getmembers() if f"{dataset}/{label}" in m.name],
)
with open(f"{data_path}/{dataset}.txt", "at", encoding="utf-8") as f_p:
current_path = data_path / "original" / dataset / label
for file_name in current_path.iterdir():
if file_name.is_file():
f_p.write(
f"__label__{label} "
+ file_name.open("rt", encoding="latin1").read().replace("\n", " <n> ")
+ "\n"
)
super().__init__(data_folder, tokenizer=tokenizer, memory_mode=memory_mode, **corpusargs)
class STACKOVERFLOW(ClassificationCorpus):
"""Stackoverflow corpus classifying questions into one of 20 labels.
The data will be downloaded from "https://github.com/jacoxu/StackOverflow",
Each data point is a question.
"""
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
tokenizer: Tokenizer = SegtokTokenizer(),
memory_mode: str = "partial",
**corpusargs,
) -> None:
"""Instantiates Stackoverflow corpus.
:param base_path: Provide this only if you store the IMDB corpus in a specific folder, otherwise use default.
:param tokenizer: Custom tokenizer to use (default is SegtokTokenizer)
:param memory_mode: Set to 'partial' because this is a big corpus, but you can also set to 'full' for faster
processing or 'none' for less memory.
:param corpusargs: Other args for ClassificationCorpus.
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
stackoverflow_path_data = (
"https://raw.githubusercontent.com/jacoxu/StackOverflow/master/rawText/title_StackOverflow.txt"
)
stackoverflow_path_label = (
"https://raw.githubusercontent.com/jacoxu/StackOverflow/master/rawText/label_StackOverflow.txt"
)
data_path = flair.cache_root / "datasets" / dataset_name
data_file = data_path / "title_StackOverflow.txt"
if not data_file.is_file():
cached_path(stackoverflow_path_data, Path("datasets") / dataset_name / "original")
cached_path(stackoverflow_path_label, Path("datasets") / dataset_name / "original")
label_list = []
labels = [
"wordpress",
"oracle",
"svn",
"apache",
"excel",
"matlab",
"visual-studio",
"cocoa",
"osx",
"bash",
"spring",
"hibernate",
"scala",
"sharepoint",
"ajax",
"qt",
"drupal",
"linq",
"haskell",
"magento",
]
# handle labels file
with open(data_path / "original" / "label_StackOverflow.txt", encoding="latin1") as open_fp:
for line in open_fp:
line = line.rstrip()
label_list.append(labels[int(line) - 1])
# handle data file
with (data_path / "original" / "title_StackOverflow.txt").open(encoding="latin1") as open_fp, (
data_folder / "train.txt"
).open("w", encoding="utf-8") as write_fp:
for idx, line in enumerate(open_fp):
line = line.rstrip()
# Create flair compatible labels
label = label_list[idx]
write_fp.write(f"__label__{label} {line}\n")
super().__init__(data_folder, label_type="class", tokenizer=tokenizer, memory_mode=memory_mode, **corpusargs)
class SENTIMENT_140(ClassificationCorpus):
"""Twitter sentiment corpus.
See http://help.sentiment140.com/for-students
Two sentiments in train data (POSITIVE, NEGATIVE) and three
sentiments in test data (POSITIVE, NEGATIVE, NEUTRAL).
"""
def __init__(
self, label_name_map=None, tokenizer: Tokenizer = SegtokTokenizer(), memory_mode: str = "partial", **corpusargs
) -> None:
"""Instantiates twitter sentiment corpus.
:param label_name_map: By default, the numeric values are mapped to ('NEGATIVE', 'POSITIVE' and 'NEUTRAL')
:param tokenizer: Custom tokenizer to use (default is SegtokTokenizer)
:param memory_mode: Set to 'partial' because this is a big corpus, but you can also set to 'full' for faster
processing or 'none' for less memory.
:param corpusargs: Other args for ClassificationCorpus.
"""
# by default, map point score to POSITIVE / NEGATIVE values
if label_name_map is None:
label_name_map = {"0": "NEGATIVE", "2": "NEUTRAL", "4": "POSITIVE"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
# default dataset folder is the cache root
data_folder = flair.cache_root / "datasets" / dataset_name
# download data if necessary
if True:
# download senteval datasets if necessary und unzip
sentiment_url = "https://cs.stanford.edu/people/alecmgo/trainingandtestdata.zip"
cached_path(sentiment_url, Path("datasets") / dataset_name / "raw")
senteval_folder = flair.cache_root / "datasets" / dataset_name / "raw"
unzip_file(senteval_folder / "trainingandtestdata.zip", senteval_folder)
# create dataset directory if necessary
if not os.path.exists(data_folder):
os.makedirs(data_folder)
# create train.txt file from CSV
with open(data_folder / "train.txt", "w") as train_file, open(
senteval_folder / "training.1600000.processed.noemoticon.csv", encoding="latin-1"
) as csv_train:
csv_reader = csv.reader(csv_train)
for row in csv_reader:
label = row[0]
text = row[5]
train_file.write(f"__label__{label} {text}\n")
# create test.txt file from CSV
with (data_folder / "test.txt").open("w", encoding="utf-8") as train_file, (
senteval_folder / "testdata.manual.2009.06.14.csv"
).open(encoding="latin-1") as csv_train:
csv_reader = csv.reader(csv_train)
for row in csv_reader:
label = row[0]
text = row[5]
train_file.write(f"__label__{label} {text}\n")
super().__init__(
data_folder,
label_type="sentiment",
tokenizer=tokenizer,
memory_mode=memory_mode,
label_name_map=label_name_map,
**corpusargs,
)
class SENTEVAL_CR(ClassificationCorpus):
"""The customer reviews dataset of SentEval, classified into NEGATIVE or POSITIVE sentiment.
see https://github.com/facebookresearch/SentEval
"""
def __init__(
self,
tokenizer: Union[bool, Tokenizer] = SpaceTokenizer(),
memory_mode: str = "full",
**corpusargs,
) -> None:
"""Instantiates SentEval customer reviews dataset.
:param corpusargs: Other args for ClassificationCorpus.
:param tokenizer: Custom tokenizer to use (default is SpaceTokenizer())
:param memory_mode: Set to 'full' by default since this is a small corpus. Can also be 'partial' or 'none'.
"""
# this dataset name
dataset_name = self.__class__.__name__.lower()
# default dataset folder is the cache root
data_folder = flair.cache_root / "datasets" / dataset_name
# download data if necessary
if not (data_folder / "train.txt").is_file():
# download senteval datasets if necessary und unzip
senteval_path = "https://dl.fbaipublicfiles.com/senteval/senteval_data/datasmall_NB_ACL12.zip"
cached_path(senteval_path, Path("datasets") / "senteval")
senteval_folder = flair.cache_root / "datasets" / "senteval"
unzip_file(senteval_folder / "datasmall_NB_ACL12.zip", senteval_folder)
# create dataset directory if necessary
if not os.path.exists(data_folder):
os.makedirs(data_folder)
# create train.txt file by iterating over pos and neg file
with open(data_folder / "train.txt", "a") as train_file:
with open(senteval_folder / "data" / "customerr" / "custrev.pos", encoding="latin1") as file:
for line in file:
train_file.write(f"__label__POSITIVE {line}")
with open(senteval_folder / "data" / "customerr" / "custrev.neg", encoding="latin1") as file:
for line in file:
train_file.write(f"__label__NEGATIVE {line}")
super().__init__(
data_folder, label_type="sentiment", tokenizer=tokenizer, memory_mode=memory_mode, **corpusargs
)
class SENTEVAL_MR(ClassificationCorpus):
"""The movie reviews dataset of SentEval, classified into NEGATIVE or POSITIVE sentiment.
see https://github.com/facebookresearch/SentEval
"""
def __init__(
self,
tokenizer: Union[bool, Tokenizer] = SpaceTokenizer(),
memory_mode: str = "full",
**corpusargs,
) -> None:
"""Instantiates SentEval movie reviews dataset.
:param corpusargs: Other args for ClassificationCorpus.
:param tokenizer: Custom tokenizer to use (default is SpaceTokenizer)
:param memory_mode: Set to 'full' by default since this is a small corpus. Can also be 'partial' or 'none'.
"""
# this dataset name
dataset_name = self.__class__.__name__.lower()
# default dataset folder is the cache root
data_folder = flair.cache_root / "datasets" / dataset_name
# download data if necessary
if not (data_folder / "train.txt").is_file():
# download senteval datasets if necessary und unzip
senteval_path = "https://dl.fbaipublicfiles.com/senteval/senteval_data/datasmall_NB_ACL12.zip"
cached_path(senteval_path, Path("datasets") / "senteval")
senteval_folder = flair.cache_root / "datasets" / "senteval"
unzip_file(senteval_folder / "datasmall_NB_ACL12.zip", senteval_folder)
# create dataset directory if necessary
if not os.path.exists(data_folder):
os.makedirs(data_folder)
# create train.txt file by iterating over pos and neg file
with open(data_folder / "train.txt", "a") as train_file:
with open(senteval_folder / "data" / "rt10662" / "rt-polarity.pos", encoding="latin1") as file:
for line in file:
train_file.write(f"__label__POSITIVE {line}")
with open(senteval_folder / "data" / "rt10662" / "rt-polarity.neg", encoding="latin1") as file:
for line in file:
train_file.write(f"__label__NEGATIVE {line}")
super().__init__(
data_folder, label_type="sentiment", tokenizer=tokenizer, memory_mode=memory_mode, **corpusargs
)
class SENTEVAL_SUBJ(ClassificationCorpus):
"""The subjectivity dataset of SentEval, classified into SUBJECTIVE or OBJECTIVE sentiment.
see https://github.com/facebookresearch/SentEval
"""
def __init__(
self,
tokenizer: Union[bool, Tokenizer] = SpaceTokenizer(),
memory_mode: str = "full",
**corpusargs,
) -> None:
"""Instantiates SentEval subjectivity dataset.
:param corpusargs: Other args for ClassificationCorpus.
:param tokenizer: Custom tokenizer to use (default is SpaceTokenizer)
:param memory_mode: Set to 'full' by default since this is a small corpus. Can also be 'partial' or 'none'.
"""
# this dataset name
dataset_name = self.__class__.__name__.lower()
# default dataset folder is the cache root
data_folder = flair.cache_root / "datasets" / dataset_name
# download data if necessary
if not (data_folder / "train.txt").is_file():
# download senteval datasets if necessary und unzip
senteval_path = "https://dl.fbaipublicfiles.com/senteval/senteval_data/datasmall_NB_ACL12.zip"
cached_path(senteval_path, Path("datasets") / "senteval")
senteval_folder = flair.cache_root / "datasets" / "senteval"
unzip_file(senteval_folder / "datasmall_NB_ACL12.zip", senteval_folder)
# create dataset directory if necessary
if not os.path.exists(data_folder):
os.makedirs(data_folder)
# create train.txt file by iterating over pos and neg file
with open(data_folder / "train.txt", "a") as train_file:
with open(senteval_folder / "data" / "subj" / "subj.subjective", encoding="latin1") as file:
for line in file:
train_file.write(f"__label__SUBJECTIVE {line}")
with open(senteval_folder / "data" / "subj" / "subj.objective", encoding="latin1") as file:
for line in file:
train_file.write(f"__label__OBJECTIVE {line}")
super().__init__(
data_folder, label_type="objectivity", tokenizer=tokenizer, memory_mode=memory_mode, **corpusargs
)
class SENTEVAL_MPQA(ClassificationCorpus):
"""The opinion-polarity dataset of SentEval, classified into NEGATIVE or POSITIVE polarity.
see https://github.com/facebookresearch/SentEval
"""
def __init__(
self,
tokenizer: Union[bool, Tokenizer] = SpaceTokenizer(),
memory_mode: str = "full",
**corpusargs,
) -> None:
"""Instantiates SentEval opinion polarity dataset.
:param corpusargs: Other args for ClassificationCorpus.
:param tokenizer: Custom tokenizer to use (default is SpaceTokenizer)
:param memory_mode: Set to 'full' by default since this is a small corpus. Can also be 'partial' or 'none'.
"""
# this dataset name
dataset_name = self.__class__.__name__.lower()
# default dataset folder is the cache root
data_folder = flair.cache_root / "datasets" / dataset_name
# download data if necessary
if not (data_folder / "train.txt").is_file():
# download senteval datasets if necessary und unzip
senteval_path = "https://dl.fbaipublicfiles.com/senteval/senteval_data/datasmall_NB_ACL12.zip"
cached_path(senteval_path, Path("datasets") / "senteval")
senteval_folder = flair.cache_root / "datasets" / "senteval"
unzip_file(senteval_folder / "datasmall_NB_ACL12.zip", senteval_folder)
# create dataset directory if necessary
if not os.path.exists(data_folder):
os.makedirs(data_folder)
# create train.txt file by iterating over pos and neg file
with open(data_folder / "train.txt", "a") as train_file:
with open(senteval_folder / "data" / "mpqa" / "mpqa.pos", encoding="latin1") as file:
for line in file:
train_file.write(f"__label__POSITIVE {line}")
with open(senteval_folder / "data" / "mpqa" / "mpqa.neg", encoding="latin1") as file:
for line in file:
train_file.write(f"__label__NEGATIVE {line}")
super().__init__(
data_folder, label_type="sentiment", tokenizer=tokenizer, memory_mode=memory_mode, **corpusargs
)
class SENTEVAL_SST_BINARY(ClassificationCorpus):
"""The Stanford sentiment treebank dataset of SentEval, classified into NEGATIVE or POSITIVE sentiment.
see https://github.com/facebookresearch/SentEval
"""
def __init__(
self,
tokenizer: Union[bool, Tokenizer] = SpaceTokenizer(),
memory_mode: str = "full",
**corpusargs,
) -> None:
"""Instantiates SentEval Stanford sentiment treebank dataset.
:param memory_mode: Set to 'full' by default since this is a small corpus. Can also be 'partial' or 'none'.
:param tokenizer: Custom tokenizer to use (default is SpaceTokenizer)
:param corpusargs: Other args for ClassificationCorpus.
"""
# this dataset name
dataset_name = self.__class__.__name__.lower() + "_v2"
# default dataset folder is the cache root
data_folder = flair.cache_root / "datasets" / dataset_name
# download data if necessary
if not (data_folder / "train.txt").is_file():
# download senteval datasets if necessary und unzip
cached_path(
"https://raw.githubusercontent.com/PrincetonML/SIF/master/data/sentiment-train",
Path("datasets") / dataset_name / "raw",
)
cached_path(
"https://raw.githubusercontent.com/PrincetonML/SIF/master/data/sentiment-test",
Path("datasets") / dataset_name / "raw",
)
cached_path(
"https://raw.githubusercontent.com/PrincetonML/SIF/master/data/sentiment-dev",
Path("datasets") / dataset_name / "raw",
)
original_filenames = ["sentiment-train", "sentiment-dev", "sentiment-test"]
new_filenames = ["train.txt", "dev.txt", "test.txt"]
# create train dev and test files in fasttext format
for new_filename, original_filename in zip(new_filenames, original_filenames):
with open(data_folder / new_filename, "a") as out_file, open(
data_folder / "raw" / original_filename
) as in_file:
for line in in_file:
fields = line.split("\t")
label = "POSITIVE" if fields[1].rstrip() == "1" else "NEGATIVE"
out_file.write(f"__label__{label} {fields[0]}\n")
super().__init__(data_folder, tokenizer=tokenizer, memory_mode=memory_mode, **corpusargs)
class SENTEVAL_SST_GRANULAR(ClassificationCorpus):
"""The Stanford sentiment treebank dataset of SentEval, classified into 5 sentiment classes.
see https://github.com/facebookresearch/SentEval
"""
def __init__(
self,
tokenizer: Union[bool, Tokenizer] = SpaceTokenizer(),
memory_mode: str = "full",
**corpusargs,
) -> None:
"""Instantiates SentEval Stanford sentiment treebank dataset.
:param memory_mode: Set to 'full' by default since this is a small corpus. Can also be 'partial' or 'none'.
:param tokenizer: Custom tokenizer to use (default is SpaceTokenizer)
:param corpusargs: Other args for ClassificationCorpus.
"""
# this dataset name
dataset_name = self.__class__.__name__.lower()
# default dataset folder is the cache root
data_folder = flair.cache_root / "datasets" / dataset_name
# download data if necessary
if not (data_folder / "train.txt").is_file():
# download senteval datasets if necessary und unzip
cached_path(
"https://raw.githubusercontent.com/AcademiaSinicaNLPLab/sentiment_dataset/master/data/stsa.fine.train",
Path("datasets") / dataset_name / "raw",
)
cached_path(
"https://raw.githubusercontent.com/AcademiaSinicaNLPLab/sentiment_dataset/master/data/stsa.fine.test",
Path("datasets") / dataset_name / "raw",
)
cached_path(
"https://raw.githubusercontent.com/AcademiaSinicaNLPLab/sentiment_dataset/master/data/stsa.fine.dev",
Path("datasets") / dataset_name / "raw",
)
# convert to FastText format
for split in ["train", "dev", "test"]:
with (data_folder / f"{split}.txt").open("w", encoding="utf-8") as train_file, (
data_folder / "raw" / f"stsa.fine.{split}"
).open(encoding="latin1") as file:
for line in file:
train_file.write(f"__label__{line[0]} {line[2:]}")
super().__init__(data_folder, tokenizer=tokenizer, memory_mode=memory_mode, **corpusargs)
class GLUE_COLA(ClassificationCorpus):
"""Corpus of Linguistic Acceptability from GLUE benchmark.
see https://gluebenchmark.com/tasks
The task is to predict whether an English sentence is grammatically
correct. Additionaly to the Corpus we have eval_dataset containing
the unlabeled test data for Glue evaluation.
"""
def __init__(
self,
label_type="acceptability",
base_path: Optional[Union[str, Path]] = None,
tokenizer: Tokenizer = SegtokTokenizer(),
**corpusargs,
) -> None:
"""Instantiates CoLA dataset.
:param base_path: Provide this only if you store the COLA corpus in a specific folder.
:param tokenizer: Custom tokenizer to use (default is SegtokTokenizer)
:param corpusargs: Other args for ClassificationCorpus.
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
dataset_name = "glue"
data_folder = base_path / dataset_name
# download data if necessary
cola_path = "https://dl.fbaipublicfiles.com/glue/data/CoLA.zip"
data_file = data_folder / "CoLA/train.txt"
# if data is not downloaded yet, download it
if not data_file.is_file():
# get the zip file
zipped_data_path = cached_path(cola_path, Path("datasets") / dataset_name)
unpack_file(zipped_data_path, data_folder, mode="zip", keep=False)
# move original .tsv files to another folder
Path(data_folder / "CoLA/train.tsv").rename(data_folder / "CoLA/original/train.tsv")
Path(data_folder / "CoLA/dev.tsv").rename(data_folder / "CoLA/original/dev.tsv")
Path(data_folder / "CoLA/test.tsv").rename(data_folder / "CoLA/original/test.tsv")
label_map = {0: "not_grammatical", 1: "grammatical"}
# create train and dev splits in fasttext format
for split in ["train", "dev"]:
with open(data_folder / "CoLA" / (split + ".txt"), "a") as out_file, open(
data_folder / "CoLA" / "original" / (split + ".tsv")
) as in_file:
for line in in_file:
fields = line.rstrip().split("\t")
label = int(fields[1])
sentence = fields[3]
out_file.write(f"__label__{label_map[label]} {sentence}\n")
# create eval_dataset file with no labels
with open(data_folder / "CoLA" / "eval_dataset.txt", "a") as out_file, open(
data_folder / "CoLA" / "original" / "test.tsv"
) as in_file:
for line in in_file:
fields = line.rstrip().split("\t")
sentence = fields[1]
out_file.write(f"{sentence}\n")
super().__init__(data_folder / "CoLA", label_type=label_type, tokenizer=tokenizer, **corpusargs)
self.eval_dataset = ClassificationDataset(
data_folder / "CoLA/eval_dataset.txt",
label_type=label_type,
allow_examples_without_labels=True,
tokenizer=tokenizer,
memory_mode="full",
)
def tsv_from_eval_dataset(self, folder_path: Union[str, Path]):
"""Create eval prediction file.
This function creates a tsv file with predictions of the eval_dataset (after calling
classifier.predict(corpus.eval_dataset, label_name='acceptability')). The resulting file
is called CoLA.tsv and is in the format required for submission to the Glue Benchmark.
"""
folder_path = Path(folder_path)
folder_path = folder_path / "CoLA.tsv"
with open(folder_path, mode="w") as tsv_file:
tsv_file.write("index\tprediction\n")
for index, datapoint in enumerate(_iter_dataset(self.eval_dataset)):
reverse_label_map = {"grammatical": 1, "not_grammatical": 0}
predicted_label = reverse_label_map[datapoint.get_labels("acceptability")[0].value]
tsv_file.write(str(index) + "\t" + str(predicted_label) + "\n")
class GLUE_SST2(CSVClassificationCorpus):
label_map = {0: "negative", 1: "positive"}
def __init__(
self,
label_type: str = "sentiment",
base_path: Optional[Union[str, Path]] = None,
max_tokens_per_doc=-1,
max_chars_per_doc=-1,
tokenizer: Tokenizer = SegtokTokenizer(),
in_memory: bool = False,
encoding: str = "utf-8",
**datasetargs,
) -> None:
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
dataset_name = "SST-2"
data_folder = base_path / dataset_name
train_file = data_folder / "train.tsv"
sst2_url = "https://dl.fbaipublicfiles.com/glue/data/SST-2.zip"
if not train_file.is_file():
# download zip archive
zipped_data_path = cached_path(sst2_url, data_folder)
# unpack file in datasets directory (zip archive contains a directory named SST-2)
unpack_file(zipped_data_path, data_folder.parent, "zip", False)
kwargs = dict(
delimiter="\t",
max_tokens_per_doc=max_tokens_per_doc,
max_chars_per_doc=max_chars_per_doc,
tokenizer=tokenizer,
in_memory=in_memory,
encoding=encoding,
skip_header=True,
**datasetargs,
)
super().__init__(
name=dataset_name,
data_folder=data_folder,
label_type=label_type,
column_name_map={0: "text", 1: "label"},
train_file=train_file,
dev_file=data_folder / "dev.tsv",
**kwargs,
)
eval_file = data_folder / "test.tsv"
log.info("Evaluation (no labels): %s", eval_file)
self.eval_dataset = CSVClassificationDataset(
eval_file,
label_type="sentence_index",
column_name_map={
0: "label_index",
1: "text",
},
**kwargs,
)
def tsv_from_eval_dataset(self, folder_path: Union[str, Path]):
"""Create eval prediction file."""
folder_path = Path(folder_path)
folder_path = folder_path / "SST-2.tsv"
reverse_label_map = {label_name: label_numerical for label_numerical, label_name in self.label_map.items()}
with open(folder_path, mode="w") as tsv_file:
tsv_file.write("index\tprediction\n")
for index, datapoint in enumerate(_iter_dataset(self.eval_dataset)):
predicted_label = reverse_label_map[datapoint.get_labels(self.eval_dataset.label_type)[0].value]
tsv_file.write(f"{index}\t{predicted_label}\n")
class GO_EMOTIONS(ClassificationCorpus):
"""GoEmotions dataset containing 58k Reddit comments labeled with 27 emotion categories.
see https://github.com/google-research/google-research/tree/master/goemotions
"""
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
tokenizer: Union[bool, Tokenizer] = SegtokTokenizer(),
memory_mode: str = "partial",
**corpusargs,
) -> None:
"""Initializes the GoEmotions corpus.
Parameters
----------
base_path: Union[str, Path]
Provide this only if you want to store the corpus in a specific folder, otherwise use default.
tokenizer: Union[bool, Tokenizer]
Specify which tokenizer to use, the default is SegtokTokenizer().
memory_mode: str
Set to what degree to keep corpus in memory ('full', 'partial' or 'disk'). Use 'full'
if full corpus and all embeddings fits into memory for speedups during training. Otherwise use 'partial' and if
even this is too much for your memory, use 'disk'.
"""
label_name_map = {
"0": "ADMIRATION",
"1": "AMUSEMENT",
"2": "ANGER",
"3": "ANNOYANCE",
"4": "APPROVAL",
"5": "CARING",
"6": "CONFUSION",
"7": "CURIOSITY",
"8": "DESIRE",
"9": "DISAPPOINTMENT",
"10": "DISAPPROVAL",
"11": "DISGUST",
"12": "EMBARRASSMENT",
"13": "EXCITEMENT",
"14": "FEAR",
"15": "GRATITUDE",
"16": "GRIEF",
"17": "JOY",
"18": "LOVE",
"19": "NERVOUSNESS",
"20": "OPTIMISM",
"21": "PRIDE",
"22": "REALIZATION",
"23": "RELIEF",
"24": "REMORSE",
"25": "SADNESS",
"26": "SURPRISE",
"27": "NEUTRAL",
}
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
# default dataset folder is the cache root
data_folder = base_path / dataset_name
# download data if necessary
if not (data_folder / "train.txt").is_file():
# download datasets if necessary
goemotions_url = "https://raw.githubusercontent.com/google-research/google-research/master/goemotions/data/"
for name in ["train.tsv", "test.tsv", "dev.tsv"]:
cached_path(goemotions_url + name, Path("datasets") / dataset_name / "raw")
# create dataset directory if necessary
if not os.path.exists(data_folder):
os.makedirs(data_folder)
data_path = flair.cache_root / "datasets" / dataset_name / "raw"
# create correctly formated txt files
for name in ["train", "test", "dev"]:
with (data_folder / (name + ".txt")).open("w", encoding="utf-8") as txt_file, (
data_path / (name + ".tsv")
).open(encoding="utf-8") as tsv_file:
lines = tsv_file.readlines()
for line in lines:
row = line.split("\t")
text = row[0]
# multiple labels are possible
labels = row[1].split(",")
label_string = ""
for label in labels:
label_string += "__label__"
label_string += label
label_string += " "
txt_file.write(f"{label_string}{text}\n")
super().__init__(
data_folder,
label_type="emotion",
tokenizer=tokenizer,
memory_mode=memory_mode,
label_name_map=label_name_map,
**corpusargs,
)
class TREC_50(ClassificationCorpus):
"""The TREC Question Classification Corpus, classifying questions into 50 fine-grained answer types."""
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
tokenizer: Union[bool, Tokenizer] = SpaceTokenizer(),
memory_mode="full",
**corpusargs,
) -> None:
"""Instantiates TREC Question Classification Corpus with 6 classes.
:param base_path: Provide this only if you store the TREC corpus in a specific folder, otherwise use default.
:param tokenizer: Custom tokenizer to use (default is SpaceTokenizer)
:param memory_mode: Set to 'full' by default since this is a small corpus. Can also be 'partial' or 'none'.
:param corpusargs: Other args for ClassificationCorpus.
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
trec_path = "https://cogcomp.seas.upenn.edu/Data/QA/QC/"
original_filenames = ["train_5500.label", "TREC_10.label"]
new_filenames = ["train.txt", "test.txt"]
for original_filename in original_filenames:
cached_path(f"{trec_path}{original_filename}", Path("datasets") / dataset_name / "original")
data_file = data_folder / new_filenames[0]
if not data_file.is_file():
for original_filename, new_filename in zip(original_filenames, new_filenames):
with (data_folder / "original" / original_filename).open(encoding="latin1") as open_fp, (
data_folder / new_filename
).open("w", encoding="utf-8") as write_fp:
for line in open_fp:
line = line.rstrip()
fields = line.split()
old_label = fields[0]
question = " ".join(fields[1:])
# Create flair compatible labels
# TREC-6 : NUM:dist -> __label__NUM
# TREC-50: NUM:dist -> __label__NUM:dist
new_label = "__label__"
new_label += old_label
write_fp.write(f"{new_label} {question}\n")
super().__init__(data_folder, tokenizer=tokenizer, memory_mode=memory_mode, **corpusargs)
class TREC_6(ClassificationCorpus):
"""The TREC Question Classification Corpus, classifying questions into 6 coarse-grained answer types."""
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
tokenizer: Union[bool, Tokenizer] = SpaceTokenizer(),
memory_mode="full",
**corpusargs,
) -> None:
"""Instantiates TREC Question Classification Corpus with 6 classes.
:param base_path: Provide this only if you store the TREC corpus in a specific folder, otherwise use default.
:param tokenizer: Custom tokenizer to use (default is SpaceTokenizer)
:param memory_mode: Set to 'full' by default since this is a small corpus. Can also be 'partial' or 'none'.
:param corpusargs: Other args for ClassificationCorpus.
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
trec_path = "https://cogcomp.seas.upenn.edu/Data/QA/QC/"
original_filenames = ["train_5500.label", "TREC_10.label"]
new_filenames = ["train.txt", "test.txt"]
for original_filename in original_filenames:
cached_path(f"{trec_path}{original_filename}", Path("datasets") / dataset_name / "original")
data_file = data_folder / new_filenames[0]
if not data_file.is_file():
for original_filename, new_filename in zip(original_filenames, new_filenames):
with (data_folder / "original" / original_filename).open(encoding="latin1") as open_fp, (
data_folder / new_filename
).open("w", encoding="utf-8") as write_fp:
for line in open_fp:
line = line.rstrip()
fields = line.split()
old_label = fields[0]
question = " ".join(fields[1:])
# Create flair compatible labels
# TREC-6 : NUM:dist -> __label__NUM
# TREC-50: NUM:dist -> __label__NUM:dist
new_label = "__label__"
new_label += old_label.split(":")[0]
write_fp.write(f"{new_label} {question}\n")
super().__init__(
data_folder, label_type="question_class", tokenizer=tokenizer, memory_mode=memory_mode, **corpusargs
)
class YAHOO_ANSWERS(ClassificationCorpus):
"""The YAHOO Question Classification Corpus, classifying questions into 10 coarse-grained answer types."""
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
tokenizer: Union[bool, Tokenizer] = SpaceTokenizer(),
memory_mode="partial",
**corpusargs,
) -> None:
"""Instantiates YAHOO Question Classification Corpus with 10 classes.
:param base_path: Provide this only if you store the YAHOO corpus in a specific folder, otherwise use default.
:param tokenizer: Custom tokenizer to use (default is SpaceTokenizer)
:param memory_mode: Set to 'partial' by default since this is a rather big corpus. Can also be 'full' or 'none'.
:param corpusargs: Other args for ClassificationCorpus.
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
url = "https://s3.amazonaws.com/fast-ai-nlp/yahoo_answers_csv.tgz"
label_map = {
"1": "Society_&_Culture",
"2": "Science_&_Mathematics",
"3": "Health",
"4": "Education_&_Reference",
"5": "Computers_&_Internet",
"6": "Sports",
"7": "Business_&_Finance",
"8": "Entertainment_&_Music",
"9": "Family_&_Relationships",
"10": "Politics_&_Government",
}
original = flair.cache_root / "datasets" / dataset_name / "original"
if not (data_folder / "train.txt").is_file():
cached_path(url, original)
import tarfile
tar = tarfile.open(original / "yahoo_answers_csv.tgz", "r:gz")
members = []
for member in tar.getmembers():
if "test.csv" in member.name or "train.csv" in member.name:
members.append(member)
tar.extractall(original, members=members)
for name in ["train", "test"]:
with (original / "yahoo_answers_csv" / (name + ".csv")).open(encoding="utf-8") as file, (
data_folder / (name + ".txt")
).open("w", encoding="utf-8") as writer:
reader = csv.reader(file)
for row in reader:
writer.write("__label__" + label_map[row[0]] + " " + row[1] + "\n")
super().__init__(
data_folder, label_type="question_type", tokenizer=tokenizer, memory_mode=memory_mode, **corpusargs
)
class GERMEVAL_2018_OFFENSIVE_LANGUAGE(ClassificationCorpus):
"""GermEval 2018 corpus for identification of offensive language.
Classifying German tweets into 2 coarse-grained categories OFFENSIVE
and OTHER or 4 fine-grained categories ABUSE, INSULT, PROFATINTY and
OTHER.
"""
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
tokenizer: Union[bool, Tokenizer] = SegtokTokenizer(),
memory_mode: str = "full",
fine_grained_classes: bool = False,
**corpusargs,
) -> None:
"""Instantiates GermEval 2018 Offensive Language Classification Corpus.
:param base_path: Provide this only if you store the Offensive Language corpus in a specific folder, otherwise use default.
:param tokenizer: Custom tokenizer to use (default is SegtokTokenizer)
:param memory_mode: Set to 'full' by default since this is a small corpus. Can also be 'partial' or 'none'.
:param fine_grained_classes: Set to True to load the dataset with 4 fine-grained classes
:param corpusargs: Other args for ClassificationCorpus.
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
offlang_path = "https://raw.githubusercontent.com/uds-lsv/GermEval-2018-Data/master/"
original_filenames = ["germeval2018.training.txt", "germeval2018.test.txt"]
new_filenames = ["train.txt", "test.txt"]
for original_filename in original_filenames:
cached_path(f"{offlang_path}{original_filename}", Path("datasets") / dataset_name / "original")
task_setting = "coarse_grained"
if fine_grained_classes:
task_setting = "fine_grained"
task_folder = data_folder / task_setting
data_file = task_folder / new_filenames[0]
# create a separate directory for different tasks
if not os.path.exists(task_folder):
os.makedirs(task_folder)
if not data_file.is_file():
for original_filename, new_filename in zip(original_filenames, new_filenames):
with (data_folder / "original" / original_filename).open(encoding="utf-8") as open_fp, (
data_folder / task_setting / new_filename
).open("w", encoding="utf-8") as write_fp:
for line in open_fp:
line = line.rstrip()
fields = line.split("\t")
tweet = fields[0]
old_label = fields[2] if task_setting == "fine_grained" else fields[1]
new_label = "__label__" + old_label
write_fp.write(f"{new_label} {tweet}\n")
super().__init__(data_folder=task_folder, tokenizer=tokenizer, memory_mode=memory_mode, **corpusargs)
class COMMUNICATIVE_FUNCTIONS(ClassificationCorpus):
"""The Communicative Functions Classification Corpus.
Classifying sentences from scientific papers into 39 communicative functions.
"""
def __init__(
self,
base_path: Optional[Union[str, Path]] = None,
memory_mode: str = "full",
tokenizer: Union[bool, Tokenizer] = SpaceTokenizer(),
**corpusargs,
) -> None:
"""Instantiates Communicative Functions Classification Corpus with 39 classes.
:param base_path: Provide this only if you store the Communicative Functions date in a specific folder, otherwise use default.
:param tokenizer: Custom tokenizer to use (default is SpaceTokenizer)
:param memory_mode: Set to 'full' by default since this is a small corpus. Can also be 'partial' or 'none'.
:param corpusargs: Other args for ClassificationCorpus.
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
original_filenames = ["background.tsv", "discussion.tsv", "introduction.tsv", "method.tsv", "result.tsv"]
# download data if necessary
comm_path = "https://raw.githubusercontent.com/Alab-NII/FECFevalDataset/master/sentences/"
for original_filename in original_filenames:
cached_path(f"{comm_path}{original_filename}", Path("datasets") / dataset_name / "original")
data_file = data_folder / "train.txt"
if not data_file.is_file(): # check if new file already exists
with open(data_folder / "train.txt", "a+", encoding="utf-8") as write_fp:
for original_filename in original_filenames[:4]:
with open(data_folder / "original" / original_filename, encoding="utf-8") as open_fp:
for line in open_fp:
liste = line.split("\t")
write_fp.write("__label__" + liste[0].replace(" ", "_") + " " + liste[2] + "\n")
with open(data_folder / "original" / "result.tsv", encoding="utf-8") as open_fp:
for line in open_fp:
liste = line.split("\t")
if liste[0].split(" ")[-1] == "(again)":
write_fp.write("__label__" + liste[0][:-8].replace(" ", "_") + " " + liste[2] + "\n")
else:
write_fp.write("__label__" + liste[0].replace(" ", "_") + " " + liste[2] + "\n")
super().__init__(
data_folder, label_type="communicative_function", tokenizer=tokenizer, memory_mode=memory_mode, **corpusargs
)
def _download_wassa_if_not_there(emotion, data_folder, dataset_name):
for split in ["train", "dev", "test"]:
data_file = data_folder / f"{emotion}-{split}.txt"
if not data_file.is_file():
if split == "train":
url = f"http://saifmohammad.com/WebDocs/EmoInt%20Train%20Data/{emotion}-ratings-0to1.train.txt"
if split == "dev":
url = f"http://saifmohammad.com/WebDocs/EmoInt%20Dev%20Data%20With%20Gold/{emotion}-ratings-0to1.dev.gold.txt"
if split == "test":
url = (
f"http://saifmohammad.com/WebDocs/EmoInt%20Test%20Gold%20Data/{emotion}-ratings-0to1.test.gold.txt"
)
path = cached_path(url, Path("datasets") / dataset_name)
with open(path, encoding="UTF-8") as f, open(data_file, "w", encoding="UTF-8") as out:
next(f)
for line in f:
fields = line.split("\t")
out.write(f"__label__{fields[3].rstrip()} {fields[1]}\n")
os.remove(path)
class WASSA_ANGER(ClassificationCorpus):
"""WASSA-2017 anger emotion-intensity corpus.
see https://saifmohammad.com/WebPages/EmotionIntensity-SharedTask.html.
"""
def __init__(
self, base_path: Optional[Union[str, Path]] = None, tokenizer: Tokenizer = SegtokTokenizer(), **corpusargs
) -> None:
"""Instantiates WASSA-2017 anger emotion-intensity corpus.
:param base_path: Provide this only if you store the WASSA corpus in a specific folder, otherwise use default.
:param tokenizer: Custom tokenizer to use (default is SegtokTokenizer)
:param corpusargs: Other args for ClassificationCorpus.
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
_download_wassa_if_not_there("anger", data_folder, dataset_name)
super().__init__(data_folder, tokenizer=tokenizer, **corpusargs)
class WASSA_FEAR(ClassificationCorpus):
"""WASSA-2017 fear emotion-intensity corpus.
see https://saifmohammad.com/WebPages/EmotionIntensity-SharedTask.html.
"""
def __init__(
self, base_path: Optional[Union[str, Path]] = None, tokenizer: Tokenizer = SegtokTokenizer(), **corpusargs
) -> None:
"""Instantiates WASSA-2017 fear emotion-intensity corpus.
:param base_path: Provide this only if you store the WASSA corpus in a specific folder, otherwise use default.
:param tokenizer: Custom tokenizer to use (default is SegtokTokenizer)
:param corpusargs: Other args for ClassificationCorpus.
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
_download_wassa_if_not_there("fear", data_folder, dataset_name)
super().__init__(data_folder, tokenizer=tokenizer, **corpusargs)
class WASSA_JOY(ClassificationCorpus):
"""WASSA-2017 joy emotion-intensity dataset corpus.
see https://saifmohammad.com/WebPages/EmotionIntensity-SharedTask.html
"""
def __init__(
self, base_path: Optional[Union[str, Path]] = None, tokenizer: Tokenizer = SegtokTokenizer(), **corpusargs
) -> None:
"""Instantiates WASSA-2017 joy emotion-intensity corpus.
:param base_path: Provide this only if you store the WASSA corpus in a specific folder, otherwise use default.
:param tokenizer: Custom tokenizer to use (default is SegtokTokenizer)
:param corpusargs: Other args for ClassificationCorpus.
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
_download_wassa_if_not_there("joy", data_folder, dataset_name)
super().__init__(data_folder, tokenizer=tokenizer, **corpusargs)
class WASSA_SADNESS(ClassificationCorpus):
"""WASSA-2017 sadness emotion-intensity corpus.
see https://saifmohammad.com/WebPages/EmotionIntensity-SharedTask.html.
"""
def __init__(
self, base_path: Optional[Union[str, Path]] = None, tokenizer: Tokenizer = SegtokTokenizer(), **corpusargs
) -> None:
"""Instantiates WASSA-2017 sadness emotion-intensity dataset.
:param base_path: Provide this only if you store the WASSA corpus in a specific folder, otherwise use default.
:param tokenizer: Custom tokenizer to use (default is SegtokTokenizer)
:param corpusargs: Other args for ClassificationCorpus.
"""
base_path = flair.cache_root / "datasets" if not base_path else Path(base_path)
# this dataset name
dataset_name = self.__class__.__name__.lower()
data_folder = base_path / dataset_name
# download data if necessary
_download_wassa_if_not_there("sadness", data_folder, dataset_name)
super().__init__(data_folder, tokenizer=tokenizer, **corpusargs)
| 88,472 | 41.049905 | 134 | py |
flair | flair-master/flair/trainers/language_model_trainer.py | import datetime
import logging
import math
import random
import time
from pathlib import Path
from typing import Iterable, Optional, Type, Union
import torch
from torch import cuda
from torch.optim import AdamW, Optimizer
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.optim.sgd import SGD
from torch.utils.data import DataLoader, Dataset
from flair.optim import SGDW, ReduceLRWDOnPlateau
try:
from apex import amp
except ImportError:
amp = None
import flair
from flair.data import Dictionary
from flair.models import LanguageModel
from flair.training_utils import add_file_handler
log = logging.getLogger("flair")
class TextDataset(Dataset):
def __init__(
self,
path: Union[str, Path],
dictionary: Dictionary,
expand_vocab: bool = False,
forward: bool = True,
split_on_char: bool = True,
random_case_flip: bool = True,
document_delimiter: str = "\n",
shuffle: bool = True,
) -> None:
path = Path(path)
assert path.exists()
self.path = path
self.dictionary = dictionary
self.split_on_char = split_on_char
self.forward = forward
self.random_case_flip = random_case_flip
self.expand_vocab = expand_vocab
self.document_delimiter = document_delimiter
self.shuffle = shuffle
if path.is_dir():
self.files = sorted([f for f in path.iterdir() if f.exists()])
else:
self.files = [path]
def __len__(self) -> int:
return len(self.files)
def __getitem__(self, index=0) -> torch.Tensor:
"""Tokenizes a text file on character basis."""
if type(self.files[index]) is str:
self.files[index] = Path(self.files[index])
assert self.files[index].exists()
with self.files[index].open("r", encoding="utf-8") as fin:
text_lines: Iterable[str] = (
doc + self.document_delimiter for doc in fin.read().split(self.document_delimiter) if doc
)
if self.random_case_flip:
text_lines = map(self.random_casechange, text_lines)
lines = [list(line) if self.split_on_char else line.split() for line in text_lines]
log.info(f"read text file with {len(lines)} lines")
if self.shuffle:
random.shuffle(lines)
log.info("shuffled")
if self.expand_vocab:
for chars in lines:
for char in chars:
self.dictionary.add_item(char)
ids = torch.tensor(
[self.dictionary.get_idx_for_item(char) for chars in lines for char in chars],
dtype=torch.long,
)
if not self.forward:
ids = ids.flip(0)
return ids
@staticmethod
def random_casechange(line: str) -> str:
no = random.randint(0, 99)
if no == 0:
line = line.lower()
if no == 1:
line = line.upper()
return line
class TextCorpus:
def __init__(
self,
path: Union[Path, str],
dictionary: Dictionary,
forward: bool = True,
character_level: bool = True,
random_case_flip: bool = True,
document_delimiter: str = "\n",
) -> None:
self.dictionary: Dictionary = dictionary
self.forward = forward
self.split_on_char = character_level
self.random_case_flip = random_case_flip
self.document_delimiter: str = document_delimiter
path = Path(path)
self.train = TextDataset(
path / "train",
dictionary,
False,
self.forward,
self.split_on_char,
self.random_case_flip,
document_delimiter=self.document_delimiter,
shuffle=True,
)
# TextDataset returns a list. valid and test are only one file,
# so return the first element
self.valid = TextDataset(
path / "valid.txt",
dictionary,
False,
self.forward,
self.split_on_char,
self.random_case_flip,
document_delimiter=document_delimiter,
shuffle=False,
)[0]
self.test = TextDataset(
path / "test.txt",
dictionary,
False,
self.forward,
self.split_on_char,
self.random_case_flip,
document_delimiter=document_delimiter,
shuffle=False,
)[0]
class LanguageModelTrainer:
def __init__(
self,
model: LanguageModel,
corpus: TextCorpus,
optimizer: Type[Optimizer] = SGD,
test_mode: bool = False,
epoch: int = 0,
split: int = 0,
loss: float = 10000,
optimizer_state: Optional[dict] = None,
) -> None:
self.model: LanguageModel = model
self.optimizer: Type[Optimizer] = optimizer
self.corpus: TextCorpus = corpus
self.test_mode: bool = test_mode
self.loss_function = torch.nn.CrossEntropyLoss()
self.log_interval = 100
self.epoch = epoch
self.split = split
self.loss = loss
self.optimizer_state = optimizer_state
def train(
self,
base_path: Union[Path, str],
sequence_length: int,
learning_rate: float = 20,
mini_batch_size: int = 100,
anneal_factor: float = 0.25,
patience: int = 10,
clip=0.25,
max_epochs: int = 1000,
checkpoint: bool = False,
grow_to_sequence_length: int = 0,
num_workers: int = 2,
use_amp: bool = False,
amp_opt_level: str = "O1",
**kwargs,
):
if use_amp and amp is None:
raise RuntimeError(
"Failed to import apex. Please install apex from "
"https://www.github.com/nvidia/apex "
"to enable mixed-precision training."
)
# cast string to Path
base_path = Path(base_path)
number_of_splits: int = len(self.corpus.train)
val_data = self._batchify(self.corpus.valid, mini_batch_size)
# error message if the validation dataset is too small
if val_data.size(0) == 1:
raise RuntimeError(
f"ERROR: Your validation dataset is too small. For your "
f"mini_batch_size, the data needs to "
f"consist of at least {mini_batch_size * 2} characters!"
)
base_path.mkdir(parents=True, exist_ok=True)
loss_txt = base_path / "loss.txt"
savefile = base_path / "best-lm.pt"
try:
log_handler = add_file_handler(log, base_path / "training.log")
best_val_loss = self.loss
kwargs["lr"] = learning_rate
optimizer = self.optimizer(self.model.parameters(), **kwargs)
if self.optimizer_state is not None:
optimizer.load_state_dict(self.optimizer_state)
if isinstance(optimizer, (AdamW, SGDW)):
scheduler: ReduceLROnPlateau = ReduceLRWDOnPlateau(
optimizer, verbose=True, factor=anneal_factor, patience=patience
)
else:
scheduler = ReduceLROnPlateau(optimizer, verbose=True, factor=anneal_factor, patience=patience)
if use_amp:
self.model, optimizer = amp.initialize(self.model, optimizer, opt_level=amp_opt_level)
training_generator = DataLoader(self.corpus.train, shuffle=False, num_workers=num_workers)
for epoch in range(self.epoch, max_epochs):
epoch_start_time = time.time()
# Shuffle training files randomly after serially iterating
# through corpus one
if epoch > 0:
training_generator = DataLoader(self.corpus.train, shuffle=True, num_workers=num_workers)
self.model.save_checkpoint(
base_path / f"epoch_{epoch}.pt",
optimizer,
epoch,
0,
best_val_loss,
)
# iterate through training data, starting at
# self.split (for checkpointing)
for curr_split, train_slice in enumerate(training_generator, self.split):
if sequence_length < grow_to_sequence_length:
sequence_length += 1
log.info(f"Sequence length is {sequence_length}")
split_start_time = time.time()
# off by one for printing
curr_split += 1
train_data = self._batchify(train_slice.flatten(), mini_batch_size)
log.info("Split %d" % curr_split + f"\t - ({datetime.datetime.now():%H:%M:%S})")
for group in optimizer.param_groups:
learning_rate = group["lr"]
# go into train mode
self.model.train()
# reset variables
hidden = self.model.init_hidden(mini_batch_size)
# not really sure what this does
ntokens = len(self.corpus.dictionary)
total_loss = torch.zeros(1, device=flair.device)
start_time = time.time()
for batch, i in enumerate(range(0, train_data.size(0) - 1, sequence_length)):
data, targets = self._get_batch(train_data, i, sequence_length)
if not data.is_cuda and cuda.is_available():
log.info("Batch %d is not on CUDA, training will be very slow" % (batch))
raise Exception("data isnt on cuda")
self.model.zero_grad()
optimizer.zero_grad()
# do the forward pass in the model
output, rnn_output, hidden = self.model.forward(data, hidden)
# try to predict the targets
loss = self.loss_function(output.view(-1, ntokens), targets)
# Backward
if use_amp:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
# `clip_grad_norm` helps prevent the exploding gradient
# problem in RNNs / LSTMs.
torch.nn.utils.clip_grad_norm_(self.model.parameters(), clip)
optimizer.step()
total_loss += loss.data
# We detach the hidden state from how it was
# previously produced.
# If we didn't, the model would try backpropagating
# all the way to start of the dataset.
hidden = self._repackage_hidden(hidden)
# explicitly remove loss to clear up memory
del loss, output, rnn_output
if batch % self.log_interval == 0 and batch > 0:
cur_loss = total_loss.item() / self.log_interval
elapsed = time.time() - start_time
log.info(
f"| split {curr_split:3d}/{number_of_splits:3d} | {batch:5d}/{len(train_data) // sequence_length:5d} batches "
f"| ms/batch {elapsed * 1000 / self.log_interval:5.2f} | loss {cur_loss:5.4f} | ppl {math.exp(cur_loss):5.4f}"
)
total_loss = torch.zeros(1, device=flair.device)
start_time = time.time()
##########################################################
self.model.eval()
val_loss = self.evaluate(val_data, mini_batch_size, sequence_length)
# Save the model if the validation loss is the best we've
# seen so far.
if val_loss < best_val_loss:
self.model.save(savefile)
best_val_loss = val_loss
log.info("best split so far")
scheduler.step(val_loss)
log.info(f"best loss so far {best_val_loss:5.8f}")
log.info(self.model.generate_text())
if checkpoint:
self.model.save_checkpoint(
base_path / "checkpoint.pt",
optimizer,
epoch,
curr_split,
best_val_loss,
)
##########################################################
# print info
##########################################################
log.info("-" * 89)
summary = (
f"| end of split {curr_split:3d} /{number_of_splits:3d} | epoch {epoch + 1:3d} | time: "
f"{(time.time() - split_start_time):5.2f}s | valid loss {val_loss:5.4f} | valid ppl "
f"{math.exp(val_loss):5.4f} | learning rate {learning_rate:3.4f}"
)
with open(loss_txt, "a") as myfile:
myfile.write("%s\n" % summary)
log.info(summary)
log.info("-" * 89)
log.info("%d seconds for train split %d" % (time.time() - split_start_time, curr_split))
log.info("Epoch time: %.2f" % (time.time() - epoch_start_time))
except KeyboardInterrupt:
log.info("-" * 89)
log.info("Exiting from training early")
finally:
if log_handler is not None:
log_handler.close()
log.removeHandler(log_handler)
###############################################################################
# final testing
###############################################################################
test_data = self._batchify(self.corpus.test, mini_batch_size)
test_loss = self.evaluate(test_data, mini_batch_size, sequence_length)
summary = f"TEST: valid loss {test_loss:5.4f} | valid ppl {math.exp(test_loss):8.4f}"
with open(loss_txt, "a") as myfile:
myfile.write("%s\n" % summary)
log.info(summary)
log.info("-" * 89)
def evaluate(self, data_source, eval_batch_size, sequence_length):
# Turn on evaluation mode which disables dropout.
self.model.eval()
with torch.no_grad():
total_loss = 0
ntokens = len(self.corpus.dictionary)
hidden = self.model.init_hidden(eval_batch_size)
for i in range(0, data_source.size(0) - 1, sequence_length):
data, targets = self._get_batch(data_source, i, sequence_length)
prediction, rnn_output, hidden = self.model.forward(data, hidden)
output_flat = prediction.view(-1, ntokens)
total_loss += len(data) * self.loss_function(output_flat, targets).data
hidden = self._repackage_hidden(hidden)
return total_loss.item() / len(data_source)
@staticmethod
def _batchify(data, batch_size):
# Work out how cleanly we can divide the dataset into bsz parts.
nbatch = data.size(0) // batch_size
# Trim off any extra elements that wouldn't cleanly fit (remainders).
data = data.narrow(0, 0, nbatch * batch_size)
# Evenly divide the data across the bsz batches.
data = data.view(batch_size, -1).t().contiguous()
return data
@staticmethod
def _get_batch(source, i, sequence_length):
seq_len = min(sequence_length, len(source) - 1 - i)
data = source[i : i + seq_len]
target = source[i + 1 : i + 1 + seq_len].view(-1)
data = data.to(flair.device)
target = target.to(flair.device)
return data, target
@staticmethod
def _repackage_hidden(h):
"""Wraps hidden states in new tensors, to detach them from their history."""
return tuple(v.detach() for v in h)
@staticmethod
def load_checkpoint(
checkpoint_file: Union[str, Path],
corpus: TextCorpus,
optimizer: Type[Optimizer] = SGD,
):
if type(checkpoint_file) is str:
checkpoint_file = Path(checkpoint_file)
checkpoint = LanguageModel.load_checkpoint(checkpoint_file)
return LanguageModelTrainer(
checkpoint["model"],
corpus,
optimizer,
epoch=checkpoint["epoch"],
split=checkpoint["split"],
loss=checkpoint["loss"],
optimizer_state=checkpoint["optimizer_state_dict"],
)
| 17,266 | 35.660297 | 142 | py |
flair | flair-master/flair/trainers/trainer.py | import inspect
import logging
import os
import random
import time
import warnings
from inspect import signature
from pathlib import Path
from typing import List, Optional, Tuple, Type, Union
import torch
from torch.optim.sgd import SGD
from torch.utils.data.dataset import ConcatDataset
import flair
import flair.nn
from flair.data import Corpus, Dictionary, _len_dataset
from flair.datasets import DataLoader
from flair.trainers.plugins import (
AnnealingPlugin,
CheckpointPlugin,
LinearSchedulerPlugin,
LogFilePlugin,
LossFilePlugin,
MetricName,
MetricRecord,
Pluggable,
TrainerPlugin,
TrainingInterrupt,
WeightExtractorPlugin,
)
from flair.training_utils import identify_dynamic_embeddings, log_line, store_embeddings
log = logging.getLogger("flair")
class ModelTrainer(Pluggable):
valid_events = {
"after_setup",
"before_training_epoch",
"before_training_batch",
"before_training_optimizer_step",
"after_training_batch",
"after_training_epoch",
"after_evaluation",
"after_training_loop",
"training_interrupt",
"_training_finally",
"_training_exception",
"after_training",
"metric_recorded",
}
def __init__(self, model: flair.nn.Model, corpus: Corpus) -> None:
"""Initialize a model trainer.
:param model: The model that you want to train. The model should inherit from flair.nn.Model # noqa: E501
:param corpus: The dataset used to train the model, should be of type Corpus
"""
super().__init__()
self.model: flair.nn.Model = model
self.corpus: Corpus = corpus
self.reset_training_attributes()
self.return_values: dict = {}
def reset_training_attributes(self):
if hasattr(self, "optimizer") and self.optimizer is not None:
self.optimizer.zero_grad(set_to_none=True)
del self.optimizer
self.optimizer = None
self.mini_batch_size = None
self.return_values: dict = {}
@staticmethod
def check_for_and_delete_previous_best_models(base_path):
all_best_model_names = [filename for filename in os.listdir(base_path) if filename.startswith("best-model")]
if len(all_best_model_names) != 0:
warnings.warn(
"There should be no best model saved at epoch 1 except there "
"is a model from previous trainings"
" in your training folder. All previous best models will be deleted."
)
for single_model in all_best_model_names:
previous_best_path = os.path.join(base_path, single_model)
if os.path.exists(previous_best_path):
os.remove(previous_best_path)
@staticmethod
def get_batch_steps(batch, mini_batch_chunk_size):
# if necessary, make batch_steps
if mini_batch_chunk_size is not None and len(batch) > mini_batch_chunk_size:
# break up the batch into slices of size
# mini_batch_chunk_size
return [batch[i : i + mini_batch_chunk_size] for i in range(0, len(batch), mini_batch_chunk_size)]
else:
return [batch]
def _get_train_data(self, train_with_dev, train_with_test):
# if training also uses dev/train data, include in training set
train_data = self.corpus.train
if train_with_dev or train_with_test:
parts = [self.corpus.train]
if train_with_dev and self.corpus.dev:
parts.append(self.corpus.dev)
if train_with_test and self.corpus.test:
parts.append(self.corpus.test)
train_data = ConcatDataset(parts)
return train_data
def _backward(self, loss):
"""Calls backward on the loss.
This allows plugins to overwrite the backward call.
"""
loss.backward()
def train(
self,
base_path,
anneal_factor: float = 0.5,
patience: int = 3,
min_learning_rate: Union[float, List[float]] = 0.0001,
initial_extra_patience: int = 0,
anneal_with_restarts: bool = False,
learning_rate: float = 0.1,
decoder_learning_rate: Optional[float] = None,
mini_batch_size: int = 32,
eval_batch_size: int = 64,
mini_batch_chunk_size: Optional[int] = None,
max_epochs: int = 100,
optimizer: Type[torch.optim.Optimizer] = torch.optim.SGD,
train_with_dev: bool = False,
train_with_test: bool = False,
# evaluation and monitoring
main_evaluation_metric: Tuple[str, str] = ("micro avg", "f1-score"),
monitor_test: bool = False,
monitor_train_sample: Union[float, int] = 0.0,
use_final_model_for_eval: bool = False,
gold_label_dictionary_for_eval: Optional[Dictionary] = None,
exclude_labels: List[str] = [],
# sampling and shuffling
sampler=None,
shuffle: bool = True,
shuffle_first_epoch: bool = True,
# evaluation and monitoring
embeddings_storage_mode: str = "cpu",
epoch: int = 0,
# when and what to save
save_final_model: bool = True,
save_optimizer_state: bool = False,
save_model_each_k_epochs: int = 0,
# logging parameters
create_file_logs: bool = True,
create_loss_file: bool = True,
write_weights: bool = False,
# plugins
plugins: Optional[List[TrainerPlugin]] = None,
attach_default_scheduler: bool = True,
**kwargs,
):
if plugins is None:
plugins = []
if attach_default_scheduler:
# activate annealing plugin
plugins.append(
AnnealingPlugin(
base_path=base_path,
anneal_factor=anneal_factor,
patience=patience,
min_learning_rate=min_learning_rate,
initial_extra_patience=initial_extra_patience,
anneal_with_restarts=anneal_with_restarts,
)
)
# call self.train_custom with all parameters (minus the ones specific to the AnnealingPlugin)
local_variables = locals()
for var in [
"self",
"anneal_factor",
"patience",
"min_learning_rate",
"initial_extra_patience",
"anneal_with_restarts",
"attach_default_scheduler",
"kwargs",
]:
local_variables.pop(var)
return self.train_custom(**local_variables, **kwargs)
def fine_tune(
self,
base_path: Union[Path, str],
# training parameters
warmup_fraction: float = 0.1,
learning_rate: float = 5e-5,
decoder_learning_rate: Optional[float] = None,
mini_batch_size: int = 4,
eval_batch_size: int = 16,
mini_batch_chunk_size: Optional[int] = None,
max_epochs: int = 10,
optimizer: Type[torch.optim.Optimizer] = torch.optim.AdamW,
train_with_dev: bool = False,
train_with_test: bool = False,
# evaluation and monitoring
main_evaluation_metric: Tuple[str, str] = ("micro avg", "f1-score"),
monitor_test: bool = False,
monitor_train_sample: Union[float, int] = 0.0,
use_final_model_for_eval: bool = True,
gold_label_dictionary_for_eval: Optional[Dictionary] = None,
exclude_labels: List[str] = [],
# sampling and shuffling
sampler=None,
shuffle: bool = True,
shuffle_first_epoch: bool = True,
# evaluation and monitoring
embeddings_storage_mode: str = "none",
epoch: int = 0,
# when and what to save
save_final_model: bool = True,
save_optimizer_state: bool = False,
save_model_each_k_epochs: int = 0,
# logging parameters
create_file_logs: bool = True,
create_loss_file: bool = True,
write_weights: bool = False,
# plugins
plugins: Optional[List[TrainerPlugin]] = None,
attach_default_scheduler: bool = True,
**kwargs,
):
# annealing logic
if plugins is None:
plugins = []
if attach_default_scheduler:
plugins.append(LinearSchedulerPlugin(warmup_fraction=warmup_fraction))
return self.train_custom(
base_path=base_path,
# training parameters
learning_rate=learning_rate,
decoder_learning_rate=decoder_learning_rate,
mini_batch_size=mini_batch_size,
eval_batch_size=eval_batch_size,
mini_batch_chunk_size=mini_batch_chunk_size,
max_epochs=max_epochs,
optimizer=optimizer,
train_with_dev=train_with_dev,
train_with_test=train_with_test,
# evaluation and monitoring
main_evaluation_metric=main_evaluation_metric,
monitor_test=monitor_test,
monitor_train_sample=monitor_train_sample,
use_final_model_for_eval=use_final_model_for_eval,
gold_label_dictionary_for_eval=gold_label_dictionary_for_eval,
exclude_labels=exclude_labels,
# sampling and shuffling
sampler=sampler,
shuffle=shuffle,
shuffle_first_epoch=shuffle_first_epoch,
# evaluation and monitoring
embeddings_storage_mode=embeddings_storage_mode,
epoch=epoch,
# when and what to save
save_final_model=save_final_model,
save_optimizer_state=save_optimizer_state,
save_model_each_k_epochs=save_model_each_k_epochs,
# logging parameters
create_file_logs=create_file_logs,
create_loss_file=create_loss_file,
write_weights=write_weights,
# plugins
plugins=plugins,
**kwargs,
)
def train_custom(
self,
base_path: Union[Path, str],
# training parameters
learning_rate: float = 0.1,
decoder_learning_rate: Optional[float] = None,
mini_batch_size: int = 32,
eval_batch_size: int = 64,
mini_batch_chunk_size: Optional[int] = None,
max_epochs: int = 100,
optimizer: Type[torch.optim.Optimizer] = SGD,
train_with_dev: bool = False,
train_with_test: bool = False,
# evaluation and monitoring
main_evaluation_metric: Tuple[str, str] = ("micro avg", "f1-score"),
monitor_test: bool = False,
monitor_train_sample: Union[float, int] = 0.0,
use_final_model_for_eval: bool = False,
gold_label_dictionary_for_eval: Optional[Dictionary] = None,
exclude_labels: List[str] = [],
# sampling and shuffling
sampler=None,
shuffle: bool = True,
shuffle_first_epoch: bool = True,
# evaluation and monitoring
embeddings_storage_mode: str = "cpu",
epoch: int = 0,
# when and what to save
save_final_model: bool = True,
save_optimizer_state: bool = False,
save_model_each_k_epochs: int = 0,
# logging parameters
create_file_logs: bool = True,
create_loss_file: bool = True,
write_weights: bool = False,
# plugins
plugins: List[TrainerPlugin] = [],
**kwargs,
) -> dict:
"""Trains any class that implements the flair.nn.Model interface.
Parameters
----------
base_path: Main path to which all output during training is logged and models are saved
learning_rate (float): The learning rate of the optimizer
decoder_learning_rate (Optional[float]): Optional, if set, the decoder is trained with a separate learning rate
mini_batch_size (int): Size of mini-batches during training
eval_batch_size (int): Size of mini-batches during evaluation
mini_batch_chunk_size (int): If mini-batches are larger than this number, they get broken down into chunks of
this size for processing purposes
max_epochs (int): Maximum number of epochs to train. Terminates training if this number is surpassed.
optimizer: The optimizer to use (typically SGD or Adam)
train_with_dev (bool): If True, the data from dev split is added to the training data
train_with_test (bool): If True, the data from test split is added to the training data
main_evaluation_metric: The metric to optimize (often micro-average or macro-average F1-score, or accuracy)
monitor_test (bool): If True, test data is evaluated at end of each epoch
monitor_train_sample: Set this to evaluate on a sample of the train data at the end of each epoch.
If you set an int, it will sample this many sentences to evaluate on. If you set a float, it will sample
a percentage of data points from train.
use_final_model_for_eval (bool): If True, the final model is used for the final evaluation. If False, the
model from the best epoch as determined by main_evaluation_metric is used for the final evaluation.
gold_label_dictionary_for_eval: Set to force evaluation to use a particular label dictionary
exclude_labels: Optionally define a list of labels to exclude from the evaluation
sampler: You can pass a data sampler here for special sampling of data.
shuffle: If True, data is shuffled during training
shuffle_first_epoch: If True, data is shuffled during the first epoch of training
embeddings_storage_mode: One of 'none' (all embeddings are deleted and freshly recomputed),
'cpu' (embeddings stored on CPU) or 'gpu' (embeddings stored on GPU)
epoch: The starting epoch (normally 0 but could be higher if you continue training model)
save_final_model: If True, the final model is saved at the end of training.
save_optimizer_state (bool): If True, the optimizer state is saved alongside the model
save_model_each_k_epochs: Each k epochs, a model state will be written out. If set to '5', a model will
be saved each 5 epochs. Default is 0 which means no model saving.
create_file_logs (bool): If True, logging output is written to a file
create_loss_file (bool): If True, a loss file logging output is created
write_weights (bool): If True, write weights to weights.txt on each batch logging event.
plugins: Any additional plugins you want to pass to the trainer
**kwargs: Additional arguments, for instance for the optimizer
Returns:
-------
dict: A dictionary with at least the key "test_score" containing the final evaluation score. Some plugins
add additional information to this dictionary, such as the :class:`MetricHistoryPlugin`
"""
# Create output folder
base_path = Path(base_path)
base_path.mkdir(exist_ok=True, parents=True)
# === START BLOCK: ACTIVATE PLUGINS === #
# We first activate all optional plugins. These take care of optional functionality such as various
# logging techniques and checkpointing
for plugin in plugins:
plugin.attach_to(self)
# log file plugin
if create_file_logs:
LogFilePlugin(base_path=base_path).attach_to(self)
# loss file plugin
if create_loss_file:
LossFilePlugin(base_path=base_path, epoch=epoch).attach_to(self)
# plugin for writing weights
if write_weights:
WeightExtractorPlugin(base_path=base_path).attach_to(self)
# plugin for checkpointing
if save_model_each_k_epochs > 0:
CheckpointPlugin(
save_model_each_k_epochs=save_model_each_k_epochs,
save_optimizer_state=save_optimizer_state,
base_path=base_path,
).attach_to(self)
# === END BLOCK: ACTIVATE PLUGINS === #
# derive parameters the function was called with (or defaults)
local_variables = locals()
training_parameters = {
parameter: local_variables[parameter] for parameter in signature(self.train_custom).parameters
}
training_parameters.update(kwargs)
# initialize model card with these parameters
self.model.model_card = self._initialize_model_card(**training_parameters)
# Prepare training data and get dataset size
train_data = self._get_train_data(train_with_dev=train_with_dev, train_with_test=train_with_test)
dataset_size = _len_dataset(train_data)
parameters = {"dataset_size": dataset_size, **training_parameters}
# determine what splits (train, dev, test) to evaluate
evaluation_splits = {}
if not train_with_dev and self.corpus.dev:
evaluation_splits["dev"] = self.corpus.dev
if self.corpus.test and monitor_test:
evaluation_splits["test"] = self.corpus.test
if monitor_train_sample > 0.0:
evaluation_splits["train_sample"] = self._sample_train_split(monitor_train_sample)
# determine how to determine best model and whether to save it
determine_best_epoch_using_dev_score = not train_with_dev and self.corpus.dev
best_epoch_score = 0 if determine_best_epoch_using_dev_score else float("inf")
save_best_model = not train_with_dev and not use_final_model_for_eval
# instantiate the optimizer
kwargs["lr"] = learning_rate
if decoder_learning_rate:
params = [
{
"params": [param for name, param in self.model.named_parameters() if "embeddings" not in name],
"lr": decoder_learning_rate,
},
{
"params": [param for name, param in self.model.named_parameters() if "embeddings" in name],
"lr": learning_rate,
},
]
self.optimizer = optimizer(params=params, **kwargs)
log.info(
f"Modifying learning rate to {decoder_learning_rate} for the following "
f"parameters: {[name for name, param in self.model.named_parameters() if 'embeddings' not in name]}"
)
else:
self.optimizer = optimizer(params=self.model.parameters(), **kwargs)
# initialize sampler if provided
if sampler is not None:
# init with default values if only class is provided
if inspect.isclass(sampler):
sampler = sampler()
# set dataset to sample from
sampler.set_dataset(train_data)
shuffle = False
# this field stores the names of all dynamic embeddings in the model (determined after first forward pass)
dynamic_embeddings = None
# Sanity checks
assert len(train_data) > 0
if epoch >= max_epochs:
log.warning(f"Starting at epoch {epoch + 1}/{max_epochs}. No training will be done.")
if epoch == 0:
self.check_for_and_delete_previous_best_models(base_path)
# -- AmpPlugin -> wraps with AMP
# -- AnnealingPlugin -> initialize schedulers (requires instantiated optimizer)
self.dispatch("after_setup", **parameters)
final_eval_info = (
"model after last epoch (final-model.pt)"
if use_final_model_for_eval
else "model from best epoch (best-model.pt)"
)
log_line(log)
log.info(f'Model: "{self.model}"')
log_line(log)
log.info(f"{self.corpus}")
log_line(log)
log.info(f"Train: {len(train_data)} sentences")
log.info(f" (train_with_dev={train_with_dev}, train_with_test={train_with_test})")
log_line(log)
log.info("Training Params:")
log.info(
f' - learning_rate: "{learning_rate}" '
f'{"(decoder: " + str(decoder_learning_rate) + ")" if decoder_learning_rate else ""}'
)
log.info(f' - mini_batch_size: "{mini_batch_size}"')
log.info(f' - max_epochs: "{max_epochs}"')
log.info(f' - shuffle: "{shuffle}"')
log_line(log)
log.info("Plugins:")
for plugin in plugins:
log.info(" - " + str(plugin))
log_line(log)
log.info(f"Final evaluation on {final_eval_info}")
log.info(f' - metric: "{main_evaluation_metric}"')
log_line(log)
log.info("Computation:")
log.info(f" - compute on device: {flair.device}")
log.info(f" - embedding storage: {embeddings_storage_mode}")
log_line(log)
log.info(f'Model training base path: "{base_path}"')
log_line(log)
# At any point you can hit Ctrl + C to break out of training early.
try:
total_train_samples = 0
for epoch in range(epoch + 1, max_epochs + 1):
log_line(log)
# - SchedulerPlugin -> load state for anneal_with_restarts, batch_growth_annealing, logic for early stopping
# - LossFilePlugin -> get the current epoch for loss file logging
self.dispatch("before_training_epoch", epoch=epoch)
self.model.model_card["training_parameters"]["epoch"] = epoch # type: ignore[index]
lr_info, momentum_info = self._get_current_lr_and_momentum(epoch)
# if shuffle_first_epoch==False, the first epoch is not shuffled
shuffle_data_this_epoch = shuffle
if not shuffle_first_epoch and epoch == 1:
shuffle_data_this_epoch = False
batch_loader = DataLoader(
train_data,
batch_size=mini_batch_size,
shuffle=shuffle_data_this_epoch,
sampler=sampler,
)
self.model.train()
epoch_train_loss: float = 0.0
epoch_train_samples: int = 0
epoch_start_time = time.time()
# log infos on training progress every `log_modulo` batches
log_modulo = max(1, int(len(batch_loader) / 10))
# process mini-batches
for batch_no, batch in enumerate(batch_loader):
# zero the gradients on the model and optimizer
self.model.zero_grad()
self.optimizer.zero_grad()
batch_train_loss = 0.0
batch_train_samples = 0
batch_kw = {
"batch_no": batch_no,
"batch": batch,
"total_number_of_batches": len(batch_loader),
"epoch": epoch,
}
self.dispatch("before_training_batch", **batch_kw)
batch_steps = self.get_batch_steps(batch, mini_batch_chunk_size=mini_batch_chunk_size)
# forward and backward for batch
for batch_step in batch_steps:
# forward pass
loss, datapoint_count = self.model.forward_loss(batch_step)
batch_train_samples += datapoint_count
batch_train_loss += loss.item()
self._backward(loss)
# identify dynamic embeddings (always deleted) on first sentence
if dynamic_embeddings is None:
dynamic_embeddings = identify_dynamic_embeddings(batch)
# depending on memory mode, embeddings are moved to CPU, GPU or deleted
store_embeddings(batch_step, embeddings_storage_mode, dynamic_embeddings)
self.dispatch("before_training_optimizer_step", **batch_kw)
# do the optimizer step
torch.nn.utils.clip_grad_norm_(self.model.parameters(), 5.0)
self.optimizer.step()
if batch_train_samples > 0:
train_loss = batch_train_loss / batch_train_samples
self._record(MetricRecord.scalar(("train", "batch_loss"), train_loss, total_train_samples))
epoch_train_loss += batch_train_loss
epoch_train_samples += batch_train_samples
if (batch_no + 1) % log_modulo == 0:
intermittent_loss = (
epoch_train_loss / epoch_train_samples
if epoch_train_samples > 0
else epoch_train_samples / (batch_no + 1)
)
current_time = time.time()
lr_info, momentum_info = self._get_current_lr_and_momentum(epoch)
log.info(
f"epoch {epoch}"
f" - iter {batch_no + 1}/{len(batch_loader)}"
f" - loss {intermittent_loss:.8f}"
f" - time (sec): {(current_time - epoch_start_time):.2f}"
f" - samples/sec: {epoch_train_samples / (current_time - epoch_start_time):.2f}"
f"{lr_info}{momentum_info}"
)
# - SchedulerPlugin -> do the scheduler step if one-cycle or linear decay
# - WeightExtractorPlugin -> extracts weights
self.dispatch("after_training_batch", **batch_kw)
train_loss = epoch_train_loss / epoch_train_samples
self._record(MetricRecord.scalar(("train", "loss"), train_loss, epoch))
total_train_samples += epoch_train_samples
log_line(log)
log.info(f"EPOCH {epoch} done: loss {train_loss:.4f}{lr_info}")
# - CheckpointPlugin -> executes save_model_each_k_epochs
# - SchedulerPlugin -> log bad epochs
self.dispatch("after_training_epoch", epoch=epoch)
self.model.eval()
# Determine if this is the best model or if we need to anneal
current_epoch_has_best_model_so_far = False
validation_scores: tuple
for evaluation_split, evaluation_split_data in evaluation_splits.items():
eval_result = self.model.evaluate(
evaluation_split_data,
out_path=base_path / f"{evaluation_split}.tsv",
mini_batch_size=eval_batch_size,
exclude_labels=exclude_labels,
main_evaluation_metric=main_evaluation_metric,
gold_label_dictionary=gold_label_dictionary_for_eval,
embedding_storage_mode=embeddings_storage_mode,
gold_label_type=self.model.label_type,
gold_label_dictionary_for_eval=gold_label_dictionary_for_eval,
)
# log results
log.info(
f"{evaluation_split.upper()} : loss {eval_result.loss}"
f" - {main_evaluation_metric[1]}"
f" ({main_evaluation_metric[0]})"
f" {round(eval_result.main_score, 4)}"
)
# depending on memory mode, embeddings are moved to CPU, GPU or deleted
store_embeddings(evaluation_split_data, embeddings_storage_mode)
self._publish_eval_result(eval_result, evaluation_split, global_step=epoch)
# use DEV split to determine if this is the best model so far
if determine_best_epoch_using_dev_score and evaluation_split == "dev":
validation_scores = eval_result.main_score, eval_result.loss
if eval_result.main_score > best_epoch_score:
current_epoch_has_best_model_so_far = True
best_epoch_score = eval_result.main_score
# if not using DEV score, determine best model using train loss
if not determine_best_epoch_using_dev_score:
validation_scores = (train_loss,)
if epoch_train_loss < best_epoch_score:
current_epoch_has_best_model_so_far = True
best_epoch_score = train_loss
# - LossFilePlugin -> somehow prints all relevant metrics
# - AnnealPlugin -> scheduler step
self.dispatch(
"after_evaluation",
epoch=epoch,
current_model_is_best=current_epoch_has_best_model_so_far,
validation_scores=validation_scores,
)
if save_best_model and current_epoch_has_best_model_so_far:
log.info("saving best model")
self.model.save(base_path / "best-model.pt", checkpoint=save_optimizer_state)
# - SWAPlugin -> restores SGD weights from SWA
self.dispatch("after_training_loop")
# if we do not use dev data for model selection, save final model
if save_final_model:
self.model.save(base_path / "final-model.pt", checkpoint=save_optimizer_state)
except KeyboardInterrupt:
log_line(log)
log.info("Exiting from training early.")
self.dispatch("training_interrupt") # TODO: no plugin calls this event
log.info("Saving model ...")
self.model.save(base_path / "final-model.pt", checkpoint=save_optimizer_state)
log.info("Done.")
except TrainingInterrupt as exc:
log_line(log)
log.info(str(exc))
log_line(log)
self.dispatch("training_interrupt") # TODO: no plugin calls this event
log.info("Saving model ...")
self.model.save(base_path / "final-model.pt", checkpoint=save_optimizer_state)
log.info("Done.")
except Exception:
self.dispatch("_training_exception")
raise
finally:
# TensorboardLogger -> closes writer
self.dispatch("_training_finally")
# test best model if test data is present
if self.corpus.test and not train_with_test:
log_line(log)
self.model.eval()
if (base_path / "best-model.pt").exists():
log.info("Loading model from best epoch ...")
self.model.load_state_dict(self.model.load(base_path / "best-model.pt").state_dict())
else:
log.info("Testing using last state of model ...")
test_results = self.model.evaluate(
self.corpus.test,
gold_label_type=self.model.label_type,
mini_batch_size=eval_batch_size,
out_path=base_path / "test.tsv",
embedding_storage_mode="none",
main_evaluation_metric=main_evaluation_metric,
gold_label_dictionary=gold_label_dictionary_for_eval,
exclude_labels=exclude_labels,
return_loss=False,
)
log.info(test_results.detailed_results)
log_line(log)
# get and return the final test score of best model
self.return_values["test_score"] = test_results.main_score
else:
self.return_values["test_score"] = 0
log.info("Test data not provided setting final score to 0")
# MetricHistoryPlugin -> stores the loss history in return_values
self.dispatch("after_training")
# Store return values, as they will be erased by reset_training_attributes
return_values = self.return_values
self.reset_training_attributes()
return return_values
def _get_current_lr_and_momentum(self, epoch):
current_learning_rate = [group["lr"] for group in self.optimizer.param_groups]
momentum = [group["momentum"] if "momentum" in group else 0 for group in self.optimizer.param_groups]
lr_info = " - lr: " + ",".join([f"{m:.6f}" for m in current_learning_rate])
momentum_info = " - momentum: " + ",".join([f"{m:.6f}" for m in momentum])
self._record(MetricRecord.scalar_list("learning_rate", current_learning_rate, epoch))
self._record(MetricRecord.scalar_list(("optimizer", "momentum"), momentum, epoch))
return lr_info, momentum_info
def _sample_train_split(self, monitor_train_sample):
train_part_size = 0
if isinstance(monitor_train_sample, float):
train_part_size = int(_len_dataset(self.corpus.train) * monitor_train_sample)
if isinstance(monitor_train_sample, int):
train_part_size = monitor_train_sample
assert train_part_size > 0
# get a random sample of training sentences
train_part_indices = list(range(_len_dataset(self.corpus.train)))
random.shuffle(train_part_indices)
train_part_indices = train_part_indices[:train_part_size]
train_part = torch.utils.data.dataset.Subset(self.corpus.train, train_part_indices)
return train_part
def _flat_dict_items(self, d, composite_key=()):
for key, value in d.items():
key = (*composite_key, key) if isinstance(key, str) else composite_key + tuple(key)
if isinstance(value, dict):
yield from self._flat_dict_items(value, composite_key=key)
else:
yield key, value
def _publish_eval_result(self, result, prefix=(), **kw):
for key, value in self._flat_dict_items(result.scores, composite_key=MetricName(prefix)):
try:
self._record(MetricRecord.scalar(name=key, value=float(value), **kw))
except TypeError:
if isinstance(value, list):
self._record(MetricRecord.scalar_list(name=key, value=value, **kw))
elif isinstance(value, torch.Tensor):
self._record(MetricRecord.histogram(name=key, value=value, **kw))
else:
value = str(value)
self._record(MetricRecord.string(name=key, value=value, **kw))
self._record(MetricRecord.string(name=MetricName(prefix) + "score", value=result.main_score, **kw))
self._record(
MetricRecord.string(name=MetricName(prefix) + "detailed_result", value=result.detailed_results, **kw)
)
def _initialize_model_card(self, **training_parameters):
"""Initializes model card with library versions and parameters.
:param training_parameters:
:return:
"""
# create a model card for this model with Flair and PyTorch version
model_card = {
"flair_version": flair.__version__,
"pytorch_version": torch.__version__,
}
# record Transformers version if library is loaded
try:
import transformers
model_card["transformers_version"] = transformers.__version__
except ImportError:
pass
# remember all parameters used in train() call
model_card["training_parameters"] = {
k: str(v) if isinstance(v, Path) else v for k, v in training_parameters.items()
}
plugins = [plugin.__class__ for plugin in model_card["training_parameters"]["plugins"]]
model_card["training_parameters"]["plugins"] = plugins
return model_card
def _record(self, metric):
self.dispatch("metric_recorded", metric)
| 36,077 | 41.245902 | 124 | py |
flair | flair-master/flair/trainers/plugins/loggers/tensorboard.py | import logging
import os
from flair.trainers.plugins.base import TrainerPlugin
from flair.training_utils import log_line
log = logging.getLogger("flair")
class TensorboardLogger(TrainerPlugin):
"""Plugin that takes care of tensorboard logging."""
def __init__(self, log_dir=None, comment="", tracked_metrics=()) -> None:
"""Initializes the TensorboardLogger.
:param log_dir: Directory into which tensorboard log files will be written # noqa: E501
:param tracked_metrics: List of tuples that specify which metrics (in addition to the main_score) shall be plotted in tensorboard, could be [("macro avg", 'f1-score'), ("macro avg", 'precision')] for example # noqa: E501
"""
super().__init__()
self.comment = comment
self.tracked_metrics = tracked_metrics
try:
from torch.utils.tensorboard import SummaryWriter
if log_dir is not None and not os.path.exists(log_dir):
os.mkdir(log_dir)
self.writer = SummaryWriter(log_dir=log_dir, comment=self.comment)
log.info(f"tensorboard logging path is {log_dir}")
except ImportError:
log_line(log)
log.warning("ATTENTION! PyTorch >= 1.1.0 and pillow are required for TensorBoard support!")
log_line(log)
self._warned = False
@TrainerPlugin.hook
def metric_recorded(self, record):
assert self.writer is not None
# TODO: check if metric is in tracked metrics
if record.is_scalar:
self.writer.add_scalar(str(record.name), record.value, record.global_step, walltime=record.walltime)
else:
if not self._warned:
log.warning("Logging anything other than scalars to TensorBoard is currently not supported.")
self._warned = True
@TrainerPlugin.hook
def _training_finally(self, **kw):
"""Closes the writer.
:param kw:
:return:
"""
assert self.writer is not None
self.writer.close()
| 2,065 | 33.433333 | 229 | py |
flair | flair-master/tests/test_lemmatizer.py | import torch
import flair
from flair.data import Sentence
from flair.models import Lemmatizer
def test_words_to_char_indices():
sentence = Sentence("Hello look what a beautiful day!")
lemmatizer = Lemmatizer() # lemmatizer uses standard char dictionary
d = lemmatizer.dummy_index
e = lemmatizer.end_index
s = lemmatizer.start_index
string_list = sentence.to_tokenized_string().split()
# With end symbol, without start symbol, padding in front
target = torch.tensor(
[
[d, d, d, d, 55, 5, 15, 15, 12, e],
[d, d, d, d, d, 15, 12, 12, 28, e],
[d, d, d, d, d, 23, 13, 9, 8, e],
[d, d, d, d, d, d, d, d, 9, e],
[24, 5, 9, 16, 8, 7, 22, 16, 15, e],
[d, d, d, d, d, d, 14, 9, 27, e],
[d, d, d, d, d, d, d, d, 76, e],
],
dtype=torch.long,
).to(flair.device)
out = lemmatizer.words_to_char_indices(string_list, end_symbol=True, start_symbol=False, padding_in_front=True)
assert torch.equal(target, out)
# Without end symbol, with start symbol, padding in back
target = torch.tensor(
[
[s, 55, 5, 15, 15, 12, d, d, d, d],
[s, 15, 12, 12, 28, d, d, d, d, d],
[s, 23, 13, 9, 8, d, d, d, d, d],
[s, 9, d, d, d, d, d, d, d, d],
[s, 24, 5, 9, 16, 8, 7, 22, 16, 15],
[s, 14, 9, 27, d, d, d, d, d, d],
[s, 76, d, d, d, d, d, d, d, d],
],
dtype=torch.long,
).to(flair.device)
out = lemmatizer.words_to_char_indices(string_list, end_symbol=False, start_symbol=True, padding_in_front=False)
assert torch.equal(target, out)
# Without end symbol, without start symbol, padding in front
assert lemmatizer.words_to_char_indices(
string_list, end_symbol=False, start_symbol=False, padding_in_front=True
).size() == (7, 9)
| 1,907 | 33.690909 | 116 | py |
flair | flair-master/tests/embedding_test_utils.py | from typing import Any, Dict, List, Optional, Type
import pytest
import torch
from flair.data import Sentence
from flair.embeddings import Embeddings
from flair.embeddings.base import load_embeddings
class BaseEmbeddingsTest:
embedding_cls: Type[Embeddings[Sentence]]
is_token_embedding: bool
is_document_embedding: bool
default_args: Dict[str, Any]
valid_args: List[Dict[str, Any]] = []
invalid_args: List[Dict[str, Any]] = []
invalid_names: List[str] = []
name_field: Optional[str] = None
weired_texts: List[str] = [
"Hybrid mesons , qq ̄ states with an admixture",
"typical proportionalities of \u223C 1nmV \u2212 1 [ 3,4 ] .",
"🤟 🤟 🤟 hüllo",
"🤟hallo 🤟 🤟 🤟 🤟",
"🤟",
"\uF8F9",
]
def create_embedding_from_name(self, name: str):
"""Overwrite this method if it is more complex to load an embedding by name."""
assert self.name_field is not None
kwargs = dict(self.default_args)
kwargs.pop(self.name_field)
return self.embedding_cls(name, **kwargs) # type: ignore[call-arg]
def create_embedding_with_args(self, args: Dict[str, Any]):
kwargs = dict(self.default_args)
for k, v in args.items():
kwargs[k] = v
return self.embedding_cls(**kwargs)
@pytest.mark.parametrize("text", weired_texts)
def test_embedding_works_with_weird_text(self, text):
embeddings = self.create_embedding_with_args(self.default_args)
embedding_names = embeddings.get_names()
sentence = Sentence(text)
embeddings.embed(sentence)
if self.is_token_embedding:
for token in sentence:
assert len(token.get_embedding(embedding_names)) == embeddings.embedding_length
if self.is_document_embedding:
assert len(sentence.get_embedding(embedding_names)) == embeddings.embedding_length
@pytest.mark.parametrize("args", valid_args)
def test_embedding_also_sets_trailing_whitespaces(self, args):
if not self.is_token_embedding:
pytest.skip("The test is only valid for token embeddings")
embeddings = self.create_embedding_with_args(args)
sentence: Sentence = Sentence(["hello", " ", "hm", " "])
embeddings.embed(sentence)
names = embeddings.get_names()
for token in sentence:
assert len(token.get_embedding(names)) == embeddings.embedding_length
@pytest.mark.parametrize("args", valid_args)
def test_generic_sentence(self, args):
embeddings = self.create_embedding_with_args(args)
sentence: Sentence = Sentence("I love Berlin")
embeddings.embed(sentence)
names = embeddings.get_names()
if self.is_token_embedding:
for token in sentence:
assert len(token.get_embedding(names)) == embeddings.embedding_length
if self.is_document_embedding:
assert len(sentence.get_embedding(names)) == embeddings.embedding_length
@pytest.mark.parametrize("name", invalid_names)
def test_load_non_existing_embedding(self, name):
with pytest.raises(ValueError):
self.create_embedding_from_name(name)
def test_keep_batch_order(self):
embeddings = self.create_embedding_with_args(self.default_args)
embedding_names = embeddings.get_names()
sentences_1 = [Sentence("First sentence"), Sentence("This is second sentence")]
sentences_2 = [Sentence("This is second sentence"), Sentence("First sentence")]
embeddings.embed(sentences_1)
embeddings.embed(sentences_2)
assert sentences_1[0].to_original_text() == "First sentence"
assert sentences_1[1].to_original_text() == "This is second sentence"
if self.is_document_embedding:
assert (
torch.norm(
sentences_1[0].get_embedding(embedding_names) - sentences_2[1].get_embedding(embedding_names)
)
== 0.0
)
assert (
torch.norm(
sentences_1[1].get_embedding(embedding_names) - sentences_2[0].get_embedding(embedding_names)
)
== 0.0
)
if self.is_token_embedding:
for i in range(len(sentences_1[0])):
assert (
torch.norm(
sentences_1[0][i].get_embedding(embedding_names)
- sentences_2[1][i].get_embedding(embedding_names)
)
== 0.0
)
for i in range(len(sentences_1[1])):
assert (
torch.norm(
sentences_1[1][i].get_embedding(embedding_names)
- sentences_2[0][i].get_embedding(embedding_names)
)
== 0.0
)
del embeddings
@pytest.mark.parametrize("args", valid_args)
def test_embeddings_stay_the_same_after_saving_and_loading(self, args):
embeddings = self.create_embedding_with_args(args)
sentence_old: Sentence = Sentence("I love Berlin")
embeddings.embed(sentence_old)
names_old = embeddings.get_names()
embedding_length_old = embeddings.embedding_length
save_data = embeddings.save_embeddings(use_state_dict=True)
del embeddings
new_embeddings = load_embeddings(save_data)
sentence_new: Sentence = Sentence("I love Berlin")
new_embeddings.embed(sentence_new)
names_new = new_embeddings.get_names()
embedding_length_new = new_embeddings.embedding_length
assert names_old == names_new
assert embedding_length_old == embedding_length_new
if self.is_token_embedding:
for token_old, token_new in zip(sentence_old, sentence_new):
assert (token_old.get_embedding(names_old) == token_new.get_embedding(names_new)).all()
if self.is_document_embedding:
assert (sentence_old.get_embedding(names_old) == sentence_new.get_embedding(names_new)).all()
def test_default_embeddings_stay_the_same_after_saving_and_loading(self):
embeddings = self.create_embedding_with_args(self.default_args)
sentence_old: Sentence = Sentence("I love Berlin")
embeddings.embed(sentence_old)
names_old = embeddings.get_names()
embedding_length_old = embeddings.embedding_length
save_data = embeddings.save_embeddings(use_state_dict=True)
new_embeddings = load_embeddings(save_data)
sentence_new: Sentence = Sentence("I love Berlin")
new_embeddings.embed(sentence_new)
names_new = new_embeddings.get_names()
embedding_length_new = new_embeddings.embedding_length
assert not new_embeddings.training
assert names_old == names_new
assert embedding_length_old == embedding_length_new
if self.is_token_embedding:
for token_old, token_new in zip(sentence_old, sentence_new):
assert (token_old.get_embedding(names_old) == token_new.get_embedding(names_new)).all()
if self.is_document_embedding:
assert (sentence_old.get_embedding(names_old) == sentence_new.get_embedding(names_new)).all()
def test_embeddings_load_in_eval_mode(self):
embeddings = self.create_embedding_with_args(self.default_args)
assert not embeddings.training
| 7,511 | 39.387097 | 113 | py |
flair | flair-master/tests/test_trainer.py | import pytest
from torch.optim import Adam
import flair
from flair.data import Sentence
from flair.datasets import ClassificationCorpus
from flair.embeddings import DocumentPoolEmbeddings, FlairEmbeddings, WordEmbeddings
from flair.models import SequenceTagger, TextClassifier
from flair.trainers import ModelTrainer
turian_embeddings = WordEmbeddings("turian")
@pytest.mark.integration()
def test_text_classifier_multi(results_base_path, tasks_base_path):
flair.set_seed(123)
flair_embeddings = FlairEmbeddings("news-forward-fast")
corpus = ClassificationCorpus(
tasks_base_path / "trivial" / "trivial_text_classification_single",
label_type="city",
)
label_dict = corpus.make_label_dictionary(label_type="city")
model: TextClassifier = TextClassifier(
embeddings=DocumentPoolEmbeddings([flair_embeddings], fine_tune_mode="linear"),
label_dictionary=label_dict,
label_type="city",
)
trainer = ModelTrainer(model, corpus)
trainer.train(results_base_path, mini_batch_size=2, max_epochs=1, shuffle=True)
del model
train_log_file = results_base_path / "training.log"
assert train_log_file.exists()
lines = train_log_file.read_text(encoding="utf-8").split("\n")
expected_substrings = [
"compute on device: ",
"Corpus: ",
"- learning_rate: ",
"patience",
"embedding storage:",
"epoch 1 - iter",
"EPOCH 1 done: loss",
"Results:",
]
for expected_substring in expected_substrings:
assert any(expected_substring in line for line in lines), expected_substring
@pytest.mark.integration()
def test_train_load_use_tagger_large(results_base_path, tasks_base_path):
corpus = flair.datasets.UD_ENGLISH().downsample(0.01)
tag_dictionary = corpus.make_label_dictionary("pos")
tagger: SequenceTagger = SequenceTagger(
hidden_size=64,
embeddings=turian_embeddings,
tag_dictionary=tag_dictionary,
tag_type="pos",
use_crf=False,
)
# initialize trainer
trainer: ModelTrainer = ModelTrainer(tagger, corpus)
trainer.train(
results_base_path,
learning_rate=0.1,
mini_batch_size=32,
max_epochs=2,
shuffle=False,
)
del trainer, tagger, tag_dictionary, corpus
loaded_model: SequenceTagger = SequenceTagger.load(results_base_path / "final-model.pt")
sentence = Sentence("I love Berlin")
sentence_empty = Sentence(" ")
loaded_model.predict(sentence)
loaded_model.predict([sentence, sentence_empty])
loaded_model.predict([sentence_empty])
del loaded_model
@pytest.mark.integration()
def test_train_load_use_tagger_adam(results_base_path, tasks_base_path):
corpus = flair.datasets.ColumnCorpus(data_folder=tasks_base_path / "fashion", column_format={0: "text", 3: "ner"})
tag_dictionary = corpus.make_label_dictionary("ner", add_unk=False)
tagger: SequenceTagger = SequenceTagger(
hidden_size=64,
embeddings=turian_embeddings,
tag_dictionary=tag_dictionary,
tag_type="ner",
use_crf=False,
)
# initialize trainer
trainer: ModelTrainer = ModelTrainer(tagger, corpus)
trainer.train(
results_base_path,
learning_rate=0.1,
mini_batch_size=2,
max_epochs=2,
shuffle=False,
optimizer=Adam,
)
del trainer, tagger, tag_dictionary, corpus
loaded_model: SequenceTagger = SequenceTagger.load(results_base_path / "final-model.pt")
sentence = Sentence("I love Berlin")
sentence_empty = Sentence(" ")
loaded_model.predict(sentence)
loaded_model.predict([sentence, sentence_empty])
loaded_model.predict([sentence_empty])
del loaded_model
def test_missing_validation_split(results_base_path, tasks_base_path):
corpus = flair.datasets.ColumnCorpus(
data_folder=tasks_base_path / "fewshot_conll",
train_file="1shot.txt",
sample_missing_splits=False,
column_format={0: "text", 1: "ner"},
)
tag_dictionary = corpus.make_label_dictionary("ner", add_unk=True)
tagger: SequenceTagger = SequenceTagger(
hidden_size=64,
embeddings=turian_embeddings,
tag_dictionary=tag_dictionary,
tag_type="ner",
use_crf=False,
)
# initialize trainer
trainer: ModelTrainer = ModelTrainer(tagger, corpus)
trainer.train(
results_base_path,
learning_rate=0.1,
mini_batch_size=2,
max_epochs=2,
shuffle=False,
optimizer=Adam,
)
del trainer, tagger, tag_dictionary, corpus
loaded_model: SequenceTagger = SequenceTagger.load(results_base_path / "final-model.pt")
sentence = Sentence("I love Berlin")
sentence_empty = Sentence(" ")
loaded_model.predict(sentence)
loaded_model.predict([sentence, sentence_empty])
loaded_model.predict([sentence_empty])
del loaded_model
| 4,999 | 28.585799 | 118 | py |
flair | flair-master/tests/conftest.py | from pathlib import Path
import pytest
import torch
import flair
@pytest.fixture(scope="module")
def resources_path():
return Path(__file__).parent / "resources"
@pytest.fixture(scope="module")
def tasks_base_path(resources_path):
return resources_path / "tasks"
@pytest.fixture()
def results_base_path(resources_path):
path = resources_path / "results"
try:
yield path
finally:
for p in reversed(list(path.rglob("*"))):
if p.is_file():
p.unlink()
else:
p.rmdir()
if path.is_dir():
path.rmdir()
@pytest.fixture(autouse=True)
def set_cpu(force_cpu):
if force_cpu:
flair.device = torch.device("cpu")
def pytest_addoption(parser):
parser.addoption(
"--runintegration",
action="store_true",
default=False,
help="run integration tests",
)
parser.addoption(
"--force-cpu",
action="store_true",
default=False,
help="use cpu for tests even when gpu is available",
)
def pytest_collection_modifyitems(config, items):
if not config.getoption("--runintegration"):
skip_integration = pytest.mark.skip(reason="need --runintegration option to run")
for item in items:
if "integration" in item.keywords:
item.add_marker(skip_integration)
def pytest_generate_tests(metafunc):
option_value = metafunc.config.getoption("--force-cpu")
if "force_cpu" in metafunc.fixturenames and option_value is not None:
metafunc.parametrize("force_cpu", [option_value])
| 1,620 | 23.19403 | 89 | py |
flair | flair-master/tests/models/test_relation_classifier.py | from operator import itemgetter
from typing import Dict, List, Optional, Set, Tuple
import pytest
from torch.utils.data import Dataset
from flair.data import Relation, Sentence
from flair.datasets import ColumnCorpus, DataLoader
from flair.embeddings import TransformerDocumentEmbeddings
from flair.models import RelationClassifier
from flair.models.relation_classifier_model import (
EncodedSentence,
EncodingStrategy,
EntityMarker,
EntityMarkerPunct,
EntityMask,
TypedEntityMarker,
TypedEntityMarkerPunct,
TypedEntityMask,
)
from tests.model_test_utils import BaseModelTest
encoding_strategies: Dict[EncodingStrategy, List[Tuple[str, str]]] = {
EntityMask(): [("[HEAD]", "[TAIL]") for _ in range(7)],
TypedEntityMask(): [
("[HEAD-ORG]", "[TAIL-PER]"),
("[HEAD-ORG]", "[TAIL-PER]"),
("[HEAD-ORG]", "[TAIL-PER]"),
("[HEAD-LOC]", "[TAIL-PER]"),
("[HEAD-LOC]", "[TAIL-PER]"),
("[HEAD-LOC]", "[TAIL-PER]"),
("[HEAD-ORG]", "[TAIL-PER]"),
],
EntityMarker(): [
("[HEAD] Google [/HEAD]", "[TAIL] Larry Page [/TAIL]"),
("[HEAD] Google [/HEAD]", "[TAIL] Sergey Brin [/TAIL]"),
("[HEAD] Microsoft [/HEAD]", "[TAIL] Bill Gates [/TAIL]"),
("[HEAD] Berlin [/HEAD]", "[TAIL] Konrad Zuse [/TAIL]"),
("[HEAD] Berlin [/HEAD]", "[TAIL] Joseph Weizenbaum [/TAIL]"),
("[HEAD] Germany [/HEAD]", "[TAIL] Joseph Weizenbaum [/TAIL]"),
("[HEAD] MIT [/HEAD]", "[TAIL] Joseph Weizenbaum [/TAIL]"),
],
TypedEntityMarker(): [
("[HEAD-ORG] Google [/HEAD-ORG]", "[TAIL-PER] Larry Page [/TAIL-PER]"),
("[HEAD-ORG] Google [/HEAD-ORG]", "[TAIL-PER] Sergey Brin [/TAIL-PER]"),
("[HEAD-ORG] Microsoft [/HEAD-ORG]", "[TAIL-PER] Bill Gates [/TAIL-PER]"),
("[HEAD-LOC] Berlin [/HEAD-LOC]", "[TAIL-PER] Konrad Zuse [/TAIL-PER]"),
("[HEAD-LOC] Berlin [/HEAD-LOC]", "[TAIL-PER] Joseph Weizenbaum [/TAIL-PER]"),
("[HEAD-LOC] Germany [/HEAD-LOC]", "[TAIL-PER] Joseph Weizenbaum [/TAIL-PER]"),
("[HEAD-ORG] MIT [/HEAD-ORG]", "[TAIL-PER] Joseph Weizenbaum [/TAIL-PER]"),
],
EntityMarkerPunct(): [
("@ Google @", "# Larry Page #"),
("@ Google @", "# Sergey Brin #"),
("@ Microsoft @", "# Bill Gates #"),
("@ Berlin @", "# Konrad Zuse #"),
("@ Berlin @", "# Joseph Weizenbaum #"),
("@ Germany @", "# Joseph Weizenbaum #"),
("@ MIT @", "# Joseph Weizenbaum #"),
],
TypedEntityMarkerPunct(): [
("@ * ORG * Google @", "# ^ PER ^ Larry Page #"),
("@ * ORG * Google @", "# ^ PER ^ Sergey Brin #"),
("@ * ORG * Microsoft @", "# ^ PER ^ Bill Gates #"),
("@ * LOC * Berlin @", "# ^ PER ^ Konrad Zuse #"),
("@ * LOC * Berlin @", "# ^ PER ^ Joseph Weizenbaum #"),
("@ * LOC * Germany @", "# ^ PER ^ Joseph Weizenbaum #"),
("@ * ORG * MIT @", "# ^ PER ^ Joseph Weizenbaum #"),
],
}
class TestRelationClassifier(BaseModelTest):
model_cls = RelationClassifier
train_label_type = "relation"
multiclass_prediction_labels = ["apple", "tv"]
model_args = {
"entity_label_types": "ner",
"entity_pair_labels": { # Define valid entity pair combinations, used as relation candidates
("ORG", "PER"), # founded_by
("LOC", "PER"), # place_of_birth
},
"allow_unk_tag": False,
}
training_args = {"max_epochs": 2, "learning_rate": 4e-4, "mini_batch_size": 4}
finetune_instead_of_train = True
@pytest.fixture()
def corpus(self, tasks_base_path):
return ColumnCorpus(
data_folder=tasks_base_path / "conllu",
train_file="train.conllup",
dev_file="train.conllup",
test_file="train.conllup",
column_format={1: "text", 2: "pos", 3: "ner"},
)
@pytest.fixture()
def embeddings(self):
return TransformerDocumentEmbeddings(model="distilbert-base-uncased", layers="-1", fine_tune=True)
def transform_corpus(self, model, corpus):
return model.transform_corpus(corpus)
@pytest.fixture()
def example_sentence(self):
sentence = Sentence(["Microsoft", "was", "found", "by", "Bill", "Gates"])
sentence[:1].add_label(typename="ner", value="ORG", score=1.0)
sentence[4:].add_label(typename="ner", value="PER", score=1.0)
return sentence
@pytest.fixture()
def train_test_sentence(self):
sentence: Sentence = Sentence(
[
"Intel",
"was",
"founded",
"on",
"July",
"18",
",",
"1968",
",",
"by",
"semiconductor",
"pioneers",
"Gordon",
"Moore",
"and",
"Robert",
"Noyce",
".",
]
)
sentence[:1].add_label(typename="ner", value="ORG", score=1.0) # Intel -> ORG
sentence[12:14].add_label(typename="ner", value="PER", score=1.0) # Gordon Moore -> PER
sentence[15:17].add_label(typename="ner", value="PER", score=1.0) # Robert Noyce -> PER
return sentence
def assert_training_example(self, predicted_training_example):
relations: List[Relation] = predicted_training_example.get_relations("relation")
assert len(relations) == 2
# Intel ----founded_by---> Gordon Moore
assert [label.value for label in relations[0].labels] == ["founded_by"]
assert (
relations[0].unlabeled_identifier
== Relation(
first=predicted_training_example[:1], second=predicted_training_example[12:14]
).unlabeled_identifier
)
# Intel ----founded_by---> Robert Noyce
assert [label.value for label in relations[1].labels] == ["founded_by"]
assert (
relations[1].unlabeled_identifier
== Relation(
first=predicted_training_example[:1], second=predicted_training_example[15:17]
).unlabeled_identifier
)
@staticmethod
def check_transformation_correctness(
split: Optional[Dataset],
ground_truth: Set[Tuple[str, Tuple[str, ...]]],
) -> None:
# Ground truth is a set of tuples of (<Sentence Text>, <Relation Label Values>)
assert split is not None
data_loader = DataLoader(split, batch_size=1)
assert all(isinstance(sentence, EncodedSentence) for sentence in map(itemgetter(0), data_loader))
assert {
(sentence.to_tokenized_string(), tuple(label.value for label in sentence.get_labels("relation")))
for sentence in map(itemgetter(0), data_loader)
} == ground_truth
@pytest.mark.parametrize(
"cross_augmentation", [True, False], ids=["with_cross_augmentation", "without_cross_augmentation"]
)
@pytest.mark.parametrize(
("encoding_strategy", "encoded_entity_pairs"),
encoding_strategies.items(),
ids=[type(encoding_strategy).__name__ for encoding_strategy in encoding_strategies],
)
def test_transform_corpus(
self,
corpus: ColumnCorpus,
embeddings: TransformerDocumentEmbeddings,
cross_augmentation: bool,
encoding_strategy: EncodingStrategy,
encoded_entity_pairs: List[Tuple[str, str]],
) -> None:
label_dictionary = corpus.make_label_dictionary("relation")
model: RelationClassifier = self.build_model(
embeddings, label_dictionary, cross_augmentation=cross_augmentation, encoding_strategy=encoding_strategy
)
transformed_corpus = model.transform_corpus(corpus)
# Check sentence masking and relation label annotation on
# training, validation and test dataset (in this test the splits are the same)
ground_truth: Set[Tuple[str, Tuple[str, ...]]] = {
# Entity pair permutations of: "Larry Page and Sergey Brin founded Google ."
(f"{encoded_entity_pairs[0][1]} and Sergey Brin founded {encoded_entity_pairs[0][0]} .", ("founded_by",)),
(f"Larry Page and {encoded_entity_pairs[1][1]} founded {encoded_entity_pairs[1][0]} .", ("founded_by",)),
# Entity pair permutations of: "Microsoft was founded by Bill Gates ."
(f"{encoded_entity_pairs[2][0]} was founded by {encoded_entity_pairs[2][1]} .", ("founded_by",)),
# Entity pair permutations of: "Konrad Zuse was born in Berlin on 22 June 1910 ."
(
f"{encoded_entity_pairs[3][1]} was born in {encoded_entity_pairs[3][0]} on 22 June 1910 .",
("place_of_birth",),
),
# Entity pair permutations of: "Joseph Weizenbaum , a professor at MIT , was born in Berlin , Germany."
(
f"{encoded_entity_pairs[4][1]} , a professor at MIT , "
f"was born in {encoded_entity_pairs[4][0]} , Germany .",
("place_of_birth",),
),
(
f"{encoded_entity_pairs[5][1]} , a professor at MIT , "
f"was born in Berlin , {encoded_entity_pairs[5][0]} .",
("place_of_birth",),
),
}
if cross_augmentation:
# This sentence is only included if we transform the corpus with cross augmentation
ground_truth.add(
(
f"{encoded_entity_pairs[6][1]} , a professor at {encoded_entity_pairs[6][0]} , "
f"was born in Berlin , Germany .",
("O",),
)
)
for split in (transformed_corpus.train, transformed_corpus.dev, transformed_corpus.test):
self.check_transformation_correctness(split, ground_truth)
| 9,948 | 40.627615 | 118 | py |
flair | flair-master/tests/embeddings/test_transformer_word_embeddings.py | import importlib.util
import warnings
import pytest
import torch
from PIL import Image
from transformers.utils import is_detectron2_available
from flair.data import BoundingBox, Dictionary, Sentence
from flair.embeddings import TransformerJitWordEmbeddings, TransformerWordEmbeddings
from flair.models import SequenceTagger
from tests.embedding_test_utils import BaseEmbeddingsTest
class TestTransformerWordEmbeddings(BaseEmbeddingsTest):
embedding_cls = TransformerWordEmbeddings
is_token_embedding = True
is_document_embedding = False
default_args = {"model": "distilbert-base-uncased", "allow_long_sentences": False}
valid_args = [
{"layers": "-1,-2,-3,-4", "layer_mean": False},
{"layers": "all", "layer_mean": True},
{"layers": "all", "layer_mean": False},
{"layers": "all", "layer_mean": True, "subtoken_pooling": "mean"},
]
name_field = "embeddings"
invalid_names = ["other", "not/existing/path/to/embeddings"]
@pytest.mark.integration()
def test_transformer_jit_embeddings(self, results_base_path):
base_embeddings = TransformerWordEmbeddings(
"distilbert-base-uncased", layers="-1,-2,-3,-4", layer_mean=False, allow_long_sentences=True
)
sentence: Sentence = Sentence("I love Berlin, but Vienna is where my hearth is.")
class JitWrapper(torch.nn.Module):
def __init__(self, embedding: TransformerWordEmbeddings) -> None:
super().__init__()
self.embedding = embedding
def forward(
self,
input_ids: torch.Tensor,
token_lengths: torch.LongTensor,
attention_mask: torch.Tensor,
overflow_to_sample_mapping: torch.Tensor,
word_ids: torch.Tensor,
):
return self.embedding.forward(
input_ids=input_ids,
token_lengths=token_lengths,
attention_mask=attention_mask,
overflow_to_sample_mapping=overflow_to_sample_mapping,
word_ids=word_ids,
)["token_embeddings"]
base_embeddings.embed(sentence)
base_token_embedding = sentence[5].get_embedding().clone()
sentence.clear_embeddings()
tensors = base_embeddings.prepare_tensors([sentence])
# ensure that the prepared tensors is what we expect
assert sorted(tensors.keys()) == [
"attention_mask",
"input_ids",
"overflow_to_sample_mapping",
"token_lengths",
"word_ids",
]
wrapper = JitWrapper(base_embeddings)
parameter_names, parameter_list = TransformerJitWordEmbeddings.parameter_to_list(
base_embeddings, wrapper, [sentence]
)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
script_module = torch.jit.trace(wrapper, parameter_list)
jit_embeddings = TransformerJitWordEmbeddings.create_from_embedding(
script_module, base_embeddings, parameter_names
)
jit_embeddings.embed(sentence)
jit_token_embedding = sentence[5].get_embedding().clone()
assert torch.isclose(base_token_embedding, jit_token_embedding).all()
sentence.clear_embeddings()
# use a SequenceTagger to save and reload the embedding in the manner it is supposed to work
example_tagger = SequenceTagger(embeddings=jit_embeddings, tag_dictionary=Dictionary(), tag_type="none")
results_base_path.mkdir(exist_ok=True, parents=True)
example_tagger.save(results_base_path / "tagger.pt")
del example_tagger
new_example_tagger = SequenceTagger.load(results_base_path / "tagger.pt")
loaded_jit_embedding = new_example_tagger.embeddings
loaded_jit_embedding.embed(sentence)
loaded_jit_token_embedding = sentence[5].get_embedding().clone()
sentence.clear_embeddings()
assert torch.isclose(jit_token_embedding, loaded_jit_token_embedding).all()
def test_transformers_context_expansion(self, results_base_path):
emb = TransformerWordEmbeddings(
"distilbert-base-uncased", use_context=True, use_context_separator=True, respect_document_boundaries=True
)
# previous and next sentence as context
sentence_previous = Sentence("How is it?")
sentence_next = Sentence("Then again, maybe not...")
# test expansion for sentence without context
sentence = Sentence("This is great!")
expanded, _ = emb._expand_sentence_with_context(sentence=sentence)
assert " ".join([token.text for token in expanded]) == "[FLERT] This is great ! [FLERT]"
# test expansion for with previous and next as context
sentence = Sentence("This is great.")
sentence._previous_sentence = sentence_previous
sentence._next_sentence = sentence_next
expanded, _ = emb._expand_sentence_with_context(sentence=sentence)
assert (
" ".join([token.text for token in expanded])
== "How is it ? [FLERT] This is great . [FLERT] Then again , maybe not ..."
)
# test expansion if first sentence is document boundary
sentence = Sentence("This is great?")
sentence_previous.is_document_boundary = True
sentence._previous_sentence = sentence_previous
sentence._next_sentence = sentence_next
expanded, _ = emb._expand_sentence_with_context(sentence=sentence)
assert (
" ".join([token.text for token in expanded]) == "[FLERT] This is great ? [FLERT] Then again , maybe not ..."
)
# test expansion if we don't use context
emb.context_length = 0
sentence = Sentence("I am here.")
sentence._previous_sentence = sentence_previous
sentence._next_sentence = sentence_next
expanded, _ = emb._expand_sentence_with_context(sentence=sentence)
assert " ".join([token.text for token in expanded]) == "I am here ."
@pytest.mark.integration()
def test_layoutlm_embeddings(self):
sentence = Sentence(["I", "love", "Berlin"])
sentence[0].add_metadata("bbox", BoundingBox(0, 0, 10, 10))
sentence[1].add_metadata("bbox", (12, 0, 22, 10))
sentence[2].add_metadata("bbox", (0, 12, 10, 22))
emb = TransformerWordEmbeddings("microsoft/layoutlm-base-uncased", layers="-1,-2,-3,-4", layer_mean=True)
emb.eval()
emb.embed(sentence)
@pytest.mark.integration()
@pytest.mark.skipif(
condition=not is_detectron2_available(), reason="layoutlmV2 requires detectron2 to be installed manually."
)
def test_layoutlmv2_embeddings(self, tasks_base_path):
with Image.open(tasks_base_path / "example_images" / "i_love_berlin.png") as img:
img.load()
img = img.convert("RGB")
sentence = Sentence(["I", "love", "Berlin"])
sentence.add_metadata("image", img)
sentence[0].add_metadata("bbox", BoundingBox(0, 0, 10, 10))
sentence[1].add_metadata("bbox", (12, 0, 22, 10))
sentence[2].add_metadata("bbox", (0, 12, 10, 22))
emb = TransformerWordEmbeddings("microsoft/layoutlmv2-base-uncased", layers="-1,-2,-3,-4", layer_mean=True)
emb.eval()
emb.embed(sentence)
@pytest.mark.integration()
def test_layoutlmv3_embeddings(self, tasks_base_path):
with Image.open(tasks_base_path / "example_images" / "i_love_berlin.png") as img:
img.load()
img = img.convert("RGB")
sentence = Sentence(["I", "love", "Berlin"])
sentence.add_metadata("image", img)
sentence[0].add_metadata("bbox", BoundingBox(0, 0, 10, 10))
sentence[1].add_metadata("bbox", (12, 0, 22, 10))
sentence[2].add_metadata("bbox", (0, 12, 10, 22))
emb = TransformerWordEmbeddings("microsoft/layoutlmv3-base", layers="-1,-2,-3,-4", layer_mean=True)
emb.eval()
emb.embed(sentence)
@pytest.mark.integration()
def test_layoutlmv3_embeddings_with_long_context(self, tasks_base_path):
with Image.open(tasks_base_path / "example_images" / "i_love_berlin.png") as img:
img.load()
img = img.convert("RGB")
sentence = Sentence(["I", "love", "Berlin"] * 512)
sentence.add_metadata("image", img)
for i in range(512):
sentence[i * 3].add_metadata("bbox", BoundingBox(0, 0, 10, 10))
sentence[i * 3 + 1].add_metadata("bbox", (12, 0, 22, 10))
sentence[i * 3 + 2].add_metadata("bbox", (0, 12, 0, 10))
emb = TransformerWordEmbeddings("microsoft/layoutlmv3-base", layers="-1,-2,-3,-4", layer_mean=True)
emb.eval()
emb.embed(sentence)
@pytest.mark.integration()
def test_ocr_embeddings_fails_when_no_bbox(self):
sentence = Sentence(["I", "love", "Berlin"])
emb = TransformerWordEmbeddings("microsoft/layoutlm-base-uncased", layers="-1,-2,-3,-4", layer_mean=True)
emb.eval()
with pytest.raises(ValueError):
emb.embed(sentence)
@pytest.mark.integration()
def test_layoutlm_embeddings_with_context_warns_user(self):
sentence = Sentence(["I", "love", "Berlin"])
sentence[0].add_metadata("bbox", BoundingBox(0, 0, 10, 10))
sentence[1].add_metadata("bbox", (12, 0, 22, 10))
sentence[2].add_metadata("bbox", (0, 12, 10, 22))
with pytest.warns(UserWarning) as record:
TransformerWordEmbeddings("microsoft/layoutlm-base-uncased", layers="-1,-2,-3,-4", use_context=True)
assert len(record) == 1
assert "microsoft/layoutlm" in record[0].message.args[0]
@pytest.mark.integration()
def test_layoutlmv3_without_image_embeddings_fails(self):
sentence = Sentence(["I", "love", "Berlin"])
sentence[0].add_metadata("bbox", BoundingBox(0, 0, 10, 10))
sentence[1].add_metadata("bbox", (12, 0, 22, 10))
sentence[2].add_metadata("bbox", (0, 12, 10, 22))
emb = TransformerWordEmbeddings("microsoft/layoutlmv3-base", layers="-1,-2,-3,-4", layer_mean=True)
emb.eval()
with pytest.raises(ValueError):
emb.embed(sentence)
@pytest.mark.skipif(importlib.util.find_spec("sacremoses") is None, reason="XLM-Embeddings require 'sacremoses'")
def test_transformer_word_embeddings_forward_language_ids(self):
cos = torch.nn.CosineSimilarity(dim=0, eps=1e-10)
sent_en = Sentence(["This", "is", "a", "sentence"], language_code="en")
sent_de = Sentence(["Das", "ist", "ein", "Satz"], language_code="de")
embeddings = TransformerWordEmbeddings("xlm-mlm-ende-1024", layers="all", allow_long_sentences=False)
embeddings.embed([sent_de, sent_en])
expected_similarities = [
0.7102344036102295,
0.7598986625671387,
0.7437312602996826,
0.5584433674812317,
]
for token_de, token_en, exp_sim in zip(sent_de, sent_en, expected_similarities):
sim = cos(token_de.embedding, token_en.embedding).item()
assert abs(exp_sim - sim) < 1e-5
def test_transformer_force_max_length(self):
sentence: Sentence = Sentence("I love Berlin, but Vienna is where my hearth is.")
short_embeddings = TransformerWordEmbeddings("distilbert-base-uncased", layers="-1,-2,-3,-4", layer_mean=False)
long_embeddings = TransformerWordEmbeddings(
"distilbert-base-uncased", layers="-1,-2,-3,-4", layer_mean=False, force_max_length=True
)
short_tensors = short_embeddings.prepare_tensors([sentence])
long_tensors = long_embeddings.prepare_tensors([sentence])
for tensor in short_tensors.values():
if tensor.dim() > 1: # all tensors that have a sequence length need to be shorter
assert tensor.shape[1] < 512
for tensor in long_tensors.values():
if tensor.dim() > 1: # all tensors that have a sequence length need to be exactly max length
assert tensor.shape[1] == 512
short_embeddings.embed(sentence)
short_embedding_0 = sentence[0].get_embedding()
sentence.clear_embeddings()
long_embeddings.embed(sentence)
long_embedding_0 = sentence[0].get_embedding()
# apparently the precision is not that high on cuda, hence the absolute tolerance needs to be higher.
assert torch.isclose(short_embedding_0, long_embedding_0, atol=1e-4).all()
def test_transformers_keep_tokenizer_when_saving(self, results_base_path):
embeddings = TransformerWordEmbeddings("distilbert-base-uncased")
results_base_path.mkdir(exist_ok=True, parents=True)
initial_tagger_path = results_base_path / "initial_tokenizer.pk"
reloaded_tagger_path = results_base_path / "reloaded_tokenizer.pk"
initial_tagger = SequenceTagger(embeddings, Dictionary(), "ner")
initial_tagger.save(initial_tagger_path)
reloaded_tagger = SequenceTagger.load(initial_tagger_path)
reloaded_tagger.save(reloaded_tagger_path)
def test_transformers_keep_tokenizer_bloom_when_saving(self, results_base_path):
embeddings = TransformerWordEmbeddings("Muennighoff/bloom-tiny-random")
results_base_path.mkdir(exist_ok=True, parents=True)
initial_tagger_path = results_base_path / "initial_tokenizer.pk"
reloaded_tagger_path = results_base_path / "reloaded_tokenizer.pk"
initial_tagger = SequenceTagger(embeddings, Dictionary(), "ner")
initial_tagger.save(initial_tagger_path)
reloaded_tagger = SequenceTagger.load(initial_tagger_path)
reloaded_tagger.save(reloaded_tagger_path)
def test_transformer_subword_token_mapping(self):
sentence = Sentence("El pasto es verde.")
embeddings = TransformerWordEmbeddings("PlanTL-GOB-ES/roberta-base-biomedical-es", layers="-1")
embeddings.embed(sentence)
@pytest.mark.skipif(importlib.util.find_spec("onnxruntime") is None, reason="Onnx export require 'onnxruntime'")
def test_onnx_export_works(self, results_base_path):
texts = [
"I live in Berlin",
"I live in Vienna",
"Berlin to Germany is like Vienna to Austria",
]
normal_sentences = [Sentence(text) for text in texts]
onnx_sentences = [Sentence(text) for text in texts]
embeddings = TransformerWordEmbeddings("distilbert-base-uncased")
results_base_path.mkdir(exist_ok=True, parents=True)
onnx_embeddings = embeddings.export_onnx(results_base_path / "onnx-export.onnx", normal_sentences)
embeddings.embed(normal_sentences)
onnx_embeddings.embed(onnx_sentences)
for sent_a, sent_b in zip(normal_sentences, onnx_sentences):
for token_a, token_b in zip(sent_a, sent_b):
assert torch.isclose(token_a.get_embedding(), token_b.get_embedding(), atol=1e-6).all()
| 15,110 | 45.352761 | 120 | py |
LSTM_Covariance | LSTM_Covariance-main/shallow_water/prediction_plotting.py | # %%
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 6 13:39:58 2021
@author: siboc
"""
import numpy as np
import scipy
import math
import matplotlib.pyplot as plt
#
data=np.load('data2/trainset_withx_repeat_shwater3_uniform0011_test6_1114.npy').astype(np.float32)
data1=np.load('data2/trainset_withx_repeat_shwater3_uniform0011_test6_948.npy').astype(np.float32)
train_data=np.concatenate((data,data1),axis=0)
del data
del data1
data2=np.load('data2/trainset_withx_repeat_shwater3_uniform0011_test6_947.npy').astype(np.float32)
train_data=np.concatenate((train_data,data2),axis=0)
del data2
data3=np.load('data2/trainset_withx_repeat_shwater3_uniform0011_test6_940.npy').astype(np.float32)
train_data=np.concatenate((train_data,data3),axis=0)
del data3
data4=np.load('data2/trainset_withx_repeat_shwater3_uniform0011_test6_941.npy').astype(np.float32)
train_data=np.concatenate((train_data,data4),axis=0)
del data4
data5=np.load('data2/trainset_withx_repeat_shwater3_uniform0011_test6_1111.npy').astype(np.float32)
train_data=np.concatenate((train_data,data5),axis=0)
del data5
data6=np.load('data2/trainset_withx_repeat_shwater3_uniform0011_test6_1112.npy').astype(np.float32)
train_data=np.concatenate((train_data,data6),axis=0)
del data6
data7=np.load('data2/trainset_withx_repeat_shwater3_uniform0011_test6_946.npy').astype(np.float32)
train_data=np.concatenate((train_data,data7),axis=0)
del data7
data8=np.load('data2/trainset_withx_repeat_shwater3_uniform0011_test6_945.npy').astype(np.float32)
train_data=np.concatenate((train_data,data8),axis=0)
del data8
data9=np.load('data2/trainset_withx_repeat_shwater3_uniform0011_test6_944.npy').astype(np.float32)
train_data=np.concatenate((train_data,data9),axis=0)
del data9
data10=np.load('data2/trainset_withx_repeat_shwater3_uniform0011_test6_943.npy').astype(np.float32)
train_data=np.concatenate((train_data,data10),axis=0)
del data10
data11=np.load('data2/trainset_withx_repeat_shwater3_uniform0011_test6_942.npy').astype(np.float32)
train_data=np.concatenate((train_data,data11),axis=0)
del data11
data12=np.load('data2/trainset_withx_repeat_shwater3_uniform0011_test6_1116.npy').astype(np.float32)
train_data=np.concatenate((train_data,data12),axis=0)
del data12
data13=np.load('data2/trainset_withx_repeat_shwater3_uniform0011_test6_1115.npy').astype(np.float32)
train_data=np.concatenate((train_data,data13),axis=0)
del data13
data14=np.load('data2/trainset_withx_repeat_shwater3_uniform0011_test6_1113.npy').astype(np.float32)
train_data=np.concatenate((train_data,data14),axis=0)
del data14
data15=np.load('data2/trainset_withx_repeat_shwater3_uniform0011_test6_939.npy').astype(np.float32)
train_data=np.concatenate((train_data,data15),axis=0)
del data15
data16=np.load('data2/trainset_withx_repeat_shwater3_uniform0011_test6_938.npy').astype(np.float32)
train_data=np.concatenate((train_data,data16),axis=0)
del data16
data17=np.load('data2/trainset_withx_repeat_shwater3_uniform0011_test6_937.npy').astype(np.float32)
train_data=np.concatenate((train_data,data17),axis=0)
del data17
data18=np.load('data2/trainset_withx_repeat_shwater3_uniform0011_test6_1117.npy').astype(np.float32)
train_data=np.concatenate((train_data,data18),axis=0)
del data18
data19=np.load('data2/trainset_withx_repeat_shwater3_uniform0011_test6_1118.npy').astype(np.float32)
train_data=np.concatenate((train_data,data19),axis=0)
del data19
# train_data=np.load('data2/trainset_withx_repeat_shwater3_uniform0011_test6_1179.npy').astype(np.float32) no
# train_data=np.load('data2/trainset_withx_repeat_shwater3_uniform0011_test6_944.npy').astype(np.float32)
# %%
print("train_data shape: ",train_data.shape)
# ###############################################################################
obs = train_data.reshape((train_data.shape[0],200*1000+101))
del train_data
# %%
X=None
y=None
y=obs[:,-101:]
y[:,-101:-1]=y[:,-101:-1]*1000
y[:,-1]=y[:,-1]/8
y=y.reshape((obs.shape[0],101))
X=obs[:,:200*1000].reshape((obs.shape[0],200,1000))
# X=std_scaler.transform(X)
X=np.array([X[i].transpose() for i in range(X.shape[0])])
# input_data=X[:,:1000,:]
input_data=X[:,:200,:]
output_data=y
##########################################################################
train_part = 0.97
threshold = int(train_part*obs.shape[0])
##########################################################################
# train_input = input_data[:threshold,:]
# train_output = output_data[:threshold,:]
# test_input = input_data [threshold:,:]
# true_test_output = output_data[threshold:,:]
# X1 = train_input
# Y1 = train_output
X2 = input_data
true_test_output=output_data
# train_input = input_data[:threshold,:]
# train_output = output_data[:threshold,:]
# test_input=input_data
# true_test_output = output_data
# X1 = train_input
# Y1 = train_output
# X2 = test_input
############################################################################
#def my_loss_fn(y_true, y_pred):
#
# return K.mean(K.abs(y_true - y_pred) * weight)
# %%
from tensorflow.keras.models import load_model
# model1=load_model('data2/sequentiallstm200_b128_h200_norm_out_gen22.h5')
model1=load_model('data2/sequentiallstm2222200_b128_h200_norm_out_gen22.h5')
# Calculate predictions
# PredValSet2 = model1.predict(X2.reshape(X2.shape[0],1000,200))
PredValSet2 = model1.predict(X2.reshape(X2.shape[0],200,200))
print("PredValSet2 shape: ",PredValSet2.shape)
print("true_test_output shape: ",true_test_output.shape)
# fig, ((ax1,ax2),(ax3,ax4),(ax5,ax6))=plt.subplots(nrows=3, ncols=2)
# %%
# sample1=np.random.randint(0,100,1)
sample1=np.array([0])
print(sample1[0])
plt.plot(PredValSet2[:,sample1[0]],true_test_output[:,sample1[0]],'o', color='b',markersize=5)
plt.plot(true_test_output[:,sample1[0]],true_test_output[:,sample1[0]],'-', color='r',linewidth=5)
plt.xlabel('prediction',fontsize=22)
plt.ylabel('true value',fontsize=22)
# plt.xticks([-1.00,-0.50,0.00,0.50,1.00])
# plt.yticks([-1.00,-0.50,0.00,0.50,1.00])
plt.xticks(fontsize=22)
plt.yticks(fontsize=22)
# plt.legend()
plt.show()
# %%
sample1=np.array([100])
print(sample1[0])
plt.plot(PredValSet2[:,sample1[0]],true_test_output[:,sample1[0]],'o', color='b',markersize=5)
plt.plot(true_test_output[:,sample1[0]],true_test_output[:,sample1[0]],'-', color='r',linewidth=5)
plt.xlabel('prediction',fontsize=22)
plt.ylabel('true value',fontsize=22)
# plt.xticks([-1.00,-0.50,0.00,0.50,1.00])
# plt.yticks([-1.00,-0.50,0.00,0.50,1.00])
plt.xticks(fontsize=22)
plt.yticks(fontsize=22)
plt.legend()
plt.show()
# %%
# plt.plot(PredValSet2[:,sample1[0]],true_test_output[:,sample1[0]],'o', color='b',markersize=5)
# plt.legend()
# plt.show()
# ax1.plot(PredValSet2[:,sample1[0]],true_test_output[:,sample1[0]],'o', color='b',markersize=5)
# ax1.plot(true_test_output[:,sample1[0]],true_test_output[:,sample1[0]],'-', color='r',linewidth=5)
# ax1.legend()
# # sample2=np.random.randint(0,100,1)
# sample2=np.array([82])
# while sample2[0]==sample1[0]:
# sample2=np.random.randint(0,100,1)
# print(sample2)
# # plt.plot(PredValSet2[:,sample2[0]],true_test_output[:,sample2[0]],'o', color='b',markersize=5)
# # plt.legend()
# # plt.show()
# # plt.xlim(-1, 1)
# # plt.ylim(-1, 1)
# # plt.plot(PredValSet1[:,1],true_test_output[:,1],'o', color='blue',markersize=5,label='lstm')
# ax2.plot(PredValSet2[:,sample2[0]],true_test_output[:,sample2[0]],'o', color='b',markersize=5)
# ax2.plot(true_test_output[:,sample2[0]],true_test_output[:,sample2[0]],'-', color='r',linewidth=5)
# # plt.plot(PredValSet3[:,1],true_test_output[:,1],'o', color='green',markersize=5)
# # plt.plot(PredValSet4[:,1],true_test_output[:,1],'o', color='c',markersize=5)
# # plt.plot(PredValSet5[:,1],true_test_output[:,1],'o', color='m',markersize=5)
# #plt.plot(list(range(0,1,0.1)),list(range(0,1,0.1)),'k')
# ax2.legend()
# # sample3=np.random.randint(0,100,1)
# sample3=np.array([84])
# while sample3[0]==sample2[0] or sample3[0]==sample1[0]:
# sample3=np.random.randint(0,100,1)
# print(sample3)
# # # plt.xlim(-1, 1)
# # # plt.ylim(-1, 1)
# # # plt.plot(PredValSet1[:,2],true_test_output[:,2],'o', color='blue',markersize=5,label='lstm')
# # plt.plot(PredValSet2[:,sample3[0]],true_test_output[:,sample3[0]],'o', color='b',markersize=5)
# # # plt.plot(true_test_output[:,2],true_test_output[:,2],'o', color='r',markersize=5)
# # # plt.plot(PredValSet3[:,2],true_test_output[:,2],'o', color='green',markersize=5)
# # # plt.plot(PredValSet4[:,2],true_test_output[:,2],'o', color='c',markersize=5)
# # # plt.plot(PredValSet5[:,2],true_test_output[:,2],'o', color='m',markersize=5)
# # #plt.plot(list(range(0,1,0.1)),list(range(0,1,0.1)),'k')
# # plt.legend()
# # plt.show()
# # plt.plot(PredValSet1[:,2],true_test_output[:,2],'o', color='blue',markersize=5,label='lstm')
# ax3.plot(PredValSet2[:,sample3[0]],true_test_output[:,sample3[0]],'o', color='b',markersize=5)
# ax3.plot(true_test_output[:,sample3[0]],true_test_output[:,sample3[0]],'-', color='r',linewidth=5)
# # plt.plot(PredValSet3[:,2],true_test_output[:,2],'o', color='green',markersize=5)
# # plt.plot(PredValSet4[:,2],true_test_output[:,2],'o', color='c',markersize=5)
# # plt.plot(PredValSet5[:,2],true_test_output[:,2],'o', color='m',markersize=5)
# #plt.plot(list(range(0,1,0.1)),list(range(0,1,0.1)),'k')
# ax3.legend()
# # sample4=np.random.randint(0,100,1)
# sample4=np.array([86])
# while sample4[0]==sample2[0] or sample4[0]==sample1[0] or sample4[0]==sample3[0]:
# sample4=np.random.randint(0,100,1)
# print(sample4)
# # # plt.xlim(-10, 10)
# # # plt.ylim(-10, 10)
# # # plt.plot(PredValSet1[:,3],true_test_output[:,3],'o',color='blue',markersize=5,label='lstm')
# # plt.plot(PredValSet2[:,sample4[0]],true_test_output[:,sample4[0]],'o', color='b',markersize=5)
# # # plt.plot(true_test_output[:,3],true_test_output[:,3],'o', color='r',markersize=5)
# # # plt.plot(PredValSet3[:,3],true_test_output[:,3],'o', color='green',markersize=5)
# # # plt.plot(PredValSet4[:,3],true_test_output[:,3],'o', color='c',markersize=5)
# # # plt.plot(PredValSet5[:,3],true_test_output[:,3],'o', color='m',markersize=5)
# # #plt.plot(list(range(0,1,0.1)),list(range(0,1,0.1)),'k')
# # plt.legend()
# # plt.show()
# # plt.xlim(-10, 10)
# # plt.ylim(-10, 10)
# # plt.plot(PredValSet1[:,3],true_test_output[:,3],'o',color='blue',markersize=5,label='lstm')
# ax4.plot(PredValSet2[:,sample4[0]],true_test_output[:,sample4[0]],'o', color='b',markersize=5)
# ax4.plot(true_test_output[:,sample4[0]],true_test_output[:,sample4[0]],'-', color='r',linewidth=5)
# # plt.plot(PredValSet3[:,3],true_test_output[:,3],'o', color='green',markersize=5)
# # plt.plot(PredValSet4[:,3],true_test_output[:,3],'o', color='c',markersize=5)
# # plt.plot(PredValSet5[:,3],true_test_output[:,3],'o', color='m',markersize=5)
# #plt.plot(list(range(0,1,0.1)),list(range(0,1,0.1)),'k')
# ax4.legend()
# # plt.show()
# # sample5=np.random.randint(0,100,1)
# sample5=np.array([88])
# while sample5[0]==sample2[0] or sample5[0]==sample1[0] or sample5[0]==sample3[0] or sample5[0]==sample4[0]:
# sample5=np.random.randint(0,100,1)
# print(sample5)
# ax5.plot(PredValSet2[:,sample5[0]],true_test_output[:,sample5[0]],'o', color='b',markersize=5)
# ax5.plot(true_test_output[:,sample5[0]],true_test_output[:,sample5[0]],'-', color='r',linewidth=5)
# ax5.legend()
# sample6=np.array([99])
# print(sample6)
# ax6.plot(PredValSet2[:,sample6[0]],true_test_output[:,sample6[0]],'o', color='b',markersize=5)
# ax6.plot(true_test_output[:,sample6[0]],true_test_output[:,sample6[0]],'-', color='r',linewidth=5)
# # plt.plot(PredValSet3[:,3],true_test_output[:,3],'o', color='green',markersize=5)
# # plt.plot(PredValSet4[:,3],true_test_output[:,3],'o', color='c',markersize=5)
# # plt.plot(PredValSet5[:,3],true_test_output[:,3],'o', color='m',markersize=5)
# #plt.plot(list(range(0,1,0.1)),list(range(0,1,0.1)),'k')
# ax6.legend()
# fig.supxlabel('Prediction')
# fig.supylabel('true value')
# plt.tight_layout()
# plt.show()
# # plt.plot(true_test_output[:,0],color='b',label='r0')
# # plt.plot(true_test_output[:,1],color='r',label='r1')
# # plt.plot(true_test_output[:,2],color='y',label='r2')
# # plt.legend()
# # plt.show()
# ##########################################################################################"
# # predint = model.predict(train_input[:3000])
# # trueint = train_output[:3000]
# # plt.plot(predint[:,3],trueint[:,3],'o', color='blue',markersize=5)
# # #plt.plot(list(range(0,1,0.1)),list(range(0,1,0.1)),'k')
# # plt.show()
| 12,529 | 31.973684 | 109 | py |
LSTM_Covariance | LSTM_Covariance-main/shallow_water/shallowwater_lstm1000_model.py | # -*- coding: utf-8 -*-
"""
Created on Wed Jan 6 13:39:58 2021
@author: siboc
"""
import numpy as np
import scipy
import math
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from sklearn.metrics import r2_score
import tensorflow as tf
import tensorflow.keras.backend as K
# check scikit-learn version
# check scikit-learn version
import sklearn
from sklearn.linear_model import LinearRegression
from sklearn.datasets import make_regression
from sklearn.neighbors import KNeighborsRegressor
from sklearn.tree import DecisionTreeRegressor
import pandas as pd
# import keras
from tensorflow.keras.models import Sequential,load_model
from tensorflow.keras.layers import LSTM,Dropout
from tensorflow.keras.layers import TimeDistributed
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.callbacks import ModelCheckpoint
from tensorflow.keras.optimizers import Adam
import os
import json
import pickle
# gpus = tf.config.experimental.list_physical_devices('GPU')
# if gpus:
# try:
# # Currently, memory growth needs to be the same across GPUs
# for gpu in gpus:
# tf.config.experimental.set_memory_growth(gpu, True)
# logical_gpus = tf.config.experimental.list_logical_devices('GPU')
# print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs")
# except RuntimeError as e:
# # Memory growth must be set before GPUs have been initialized
# print(e)
physical_devices = tf.config.list_physical_devices('GPU')
try:
tf.config.experimental.set_memory_growth(physical_devices[0:3], True)
except:
# Invalid device or cannot modify virtual devices once initialized.
pass
#=======================================================================
# Generator
class DataGenerator(tf.keras.utils.Sequence):
# Generates data for keras
# list_IDs: all IDs/all files
# list_IDs_temp: studying batch IDs
def __init__(self, list_IDs,batch_size=1,dim=(1000,200),n_channels=1, shuffle=True):
self.dim=dim
self.batch_size=batch_size
# self.labels=labels
self.list_IDs=list_IDs
self.n_channels=n_channels
# self.n_classes=n_classes
self.shuffle=shuffle
self.on_epoch_end()
def __len__(self):
return int(np.floor(len(self.list_IDs)/self.batch_size))
def __getitem__(self, index):
print(index)
# we do not know the whole length of the matrix in a file without loading all of the data ??????????
indexes=self.indexes[index*self.batch_size:(index+1)*self.batch_size]
list_IDs_temp = [self.list_IDs[k] for k in indexes]
X, y = self.__data_generation(list_IDs_temp)
return X,y
def on_epoch_end(self):
'Updates indexes after each epoch'
self.indexes = np.arange(len(self.list_IDs))
if self.shuffle == True:
np.random.shuffle(self.indexes)
def __data_generation(self, list_IDs_temp):
for i,ID in enumerate(list_IDs_temp):
# open the file
obs = np.load('uniform020/trainset_withx_repeat_shwater3_uniform0011_test6_'+str(ID)+'.npy')
obs_size=obs.shape[0]
print("ID: ",ID)
print("obs_size: ",obs_size)
X=np.empty((obs_size,200*1000))
y=np.empty((obs_size,101))
y=obs[:,-101:]
y[:,-101:-1]=y[:,-101:-1]*1000
y[:,-1]=y[:,-1]/8
X=obs[:,:200*1000]
X=X.reshape((obs_size,200,1000))
X=np.array([X[j].transpose() for j in range(obs_size)])
return X,y
print("31")
print("============================================================")
print("============================================================")
print("============================================================")
print("============================================================")
print("============================================================")
print("============================================================")
print("============================================================")
# #====================== Read file list_IDs =================================================
partition=np.array([i for i in range(211)])
# #====================== Parameters =================================================
Params={'dim':(1000,200),
'batch_size':1,
'n_channels':1,
'shuffle':True}
# train_data1000 = np.array(pd.read_csv('0001D07/trainset_withx_repeat_shwater3_0001D07_total_3.csv',delimiter=",",
# header=None,
# index_col=False))
# obs = train_data1000.reshape((train_data1000.shape[0],200*1000+101))
# X=np.empty((train_data1000.shape[0],200*1000))
# y=np.empty((train_data1000.shape[0],101))
# y=obs[:,-101:]
# X=obs[:,:200*1000].reshape((train_data1000.shape[0],200,1000))
# X=np.array([X[i].transpose() for i in range(X.shape[0])])
# input_data=X[:,:,:200]
# output_data=y
# #====================== Generators =================================================
train_part = 0.97
threshold = int(train_part*len(partition))
# input_generator=DataGenerator(partition,**Params)
training_generator=DataGenerator(partition[:threshold],**Params)
validation_generator=DataGenerator(partition[threshold:],**Params)
#============================= Model Design ==========================================
if not os.path.isdir('data2'):
os.makedirs('data2')
# try:
hidden_size=200
model = Sequential()
model.add(LSTM(hidden_size,input_shape=(1000,200)))
model.add(Dense(101))
# model=load_model('data2/sequentiallstm1000_b128_h200_norm_out_new_model.h5',compile = False)
model.compile(loss='mean_squared_error', optimizer='Adam', metrics=['mae'])
print(model.summary())
es=EarlyStopping(monitor='val_loss',mode='min',verbose=1,patience=100)
# modelcheckpoint
mc=ModelCheckpoint('data2/sequentiallstm1000_b128_h200_norm_out_new_model.h5',monitor='val_loss',mode='min',save_best_only=True,verbose=1)
# history=model.fit(input_data, output_data, validation_split=train_part, epochs=100, batch_size=128, verbose=1,callbacks=[es,mc])
history=model.fit(x=training_generator, validation_data=validation_generator, epochs=1000, validation_batch_size=5,verbose=1, callbacks=[es,mc])
# history=model.fit(x=training_generator, validation_data=validation_generator, epochs=1000, use_multiprocessing=True, workers=6,verbose=1,callbacks=[es,mc])
# model.save('save_data/sequentiallstm2')
model.save('data2/sequentiallstm1000_b128_h200_norm_out_new_model_f.h5')
# https://stackoverflow.com/a/44674337/10349608
with open('data2/sequentiallstm1000_b128_h200_norm_out_new_model_history.pickle', 'wb') as file_his:
pickle.dump(history.history, file_his)
| 6,880 | 30.135747 | 157 | py |
LSTM_Covariance | LSTM_Covariance-main/shallow_water/shallowwater_lstm200_model.py | # -*- coding: utf-8 -*-
"""
Created on Wed Jan 6 13:39:58 2021
@author: siboc
"""
import numpy as np
import scipy
import math
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from sklearn.metrics import r2_score
import tensorflow as tf
import tensorflow.keras.backend as K
# check scikit-learn version
# check scikit-learn version
import sklearn
from sklearn.linear_model import LinearRegression
from sklearn.datasets import make_regression
from sklearn.neighbors import KNeighborsRegressor
from sklearn.tree import DecisionTreeRegressor
import pandas as pd
# import keras
from tensorflow.keras.models import Sequential,load_model
from tensorflow.keras.layers import LSTM,Dropout
from tensorflow.keras.layers import TimeDistributed
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.callbacks import ModelCheckpoint
from tensorflow.keras.optimizers import Adam
import os
import json
import pickle
# gpus = tf.config.experimental.list_physical_devices('GPU')
# if gpus:
# try:
# # Currently, memory growth needs to be the same across GPUs
# for gpu in gpus:
# tf.config.experimental.set_memory_growth(gpu, True)
# logical_gpus = tf.config.experimental.list_logical_devices('GPU')
# print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs")
# except RuntimeError as e:
# # Memory growth must be set before GPUs have been initialized
# print(e)
physical_devices = tf.config.list_physical_devices('GPU')
try:
tf.config.experimental.set_memory_growth(physical_devices[0:3], True)
except:
# Invalid device or cannot modify virtual devices once initialized.
pass
#=======================================================================
# Generator
class DataGenerator(tf.keras.utils.Sequence):
# Generates data for keras
# list_IDs: all IDs/all files
# list_IDs_temp: studying batch IDs
def __init__(self, list_IDs,batch_size=1,dim=(1000,200),n_channels=1, shuffle=True):
self.dim=dim
self.batch_size=batch_size
# self.labels=labels
self.list_IDs=list_IDs
self.n_channels=n_channels
# self.n_classes=n_classes
self.shuffle=shuffle
self.on_epoch_end()
def __len__(self):
return int(np.floor(len(self.list_IDs)/self.batch_size))
def __getitem__(self, index):
print(index)
# we do not know the whole length of the matrix in a file without loading all of the data ??????????
indexes=self.indexes[index*self.batch_size:(index+1)*self.batch_size]
list_IDs_temp = [self.list_IDs[k] for k in indexes]
X, y = self.__data_generation(list_IDs_temp)
return X,y
def on_epoch_end(self):
'Updates indexes after each epoch'
self.indexes = np.arange(len(self.list_IDs))
if self.shuffle == True:
np.random.shuffle(self.indexes)
def __data_generation(self, list_IDs_temp):
for i,ID in enumerate(list_IDs_temp):
# open the file
obs = np.load('uniform020/trainset_withx_repeat_shwater3_uniform0011_test6_'+str(ID)+'.npy')
obs_size=obs.shape[0]
print("ID: ",ID)
print("obs_size: ",obs_size)
X=np.empty((obs_size,200*1000))
y=np.empty((obs_size,101))
y=obs[:,-101:]
y[:,-101:-1]=y[:,-101:-1]*1000
y[:,-1]=y[:,-1]/8
X=obs[:,:200*1000]
X=X.reshape((obs_size,200,1000))[:,:,:200]
X=np.array([X[j].transpose() for j in range(obs_size)])
return X,y
print("31")
print("============================================================")
print("============================================================")
print("============================================================")
print("============================================================")
print("============================================================")
print("============================================================")
print("============================================================")
# #====================== Read file list_IDs =================================================
partition=np.array([i for i in range(211)])
# #====================== Parameters =================================================
Params={'dim':(1000,200),
'batch_size':1,
'n_channels':1,
'shuffle':True}
# train_data1000 = np.array(pd.read_csv('0001D07/trainset_withx_repeat_shwater3_0001D07_total_3.csv',delimiter=",",
# header=None,
# index_col=False))
# obs = train_data1000.reshape((train_data1000.shape[0],200*1000+101))
# X=np.empty((train_data1000.shape[0],200*1000))
# y=np.empty((train_data1000.shape[0],101))
# y=obs[:,-101:]
# X=obs[:,:200*1000].reshape((train_data1000.shape[0],200,1000))
# X=np.array([X[i].transpose() for i in range(X.shape[0])])
# input_data=X[:,:,:200]
# output_data=y
# #====================== Generators =================================================
train_part = 0.97
threshold = int(train_part*len(partition))
# input_generator=DataGenerator(partition,**Params)
training_generator=DataGenerator(partition[:threshold],**Params)
validation_generator=DataGenerator(partition[threshold:],**Params)
#============================= Model Design ==========================================
if not os.path.isdir('data2'):
os.makedirs('data2')
# try:
hidden_size=200
model = Sequential()
model.add(LSTM(hidden_size,input_shape=(200,200)))
model.add(Dense(101))
# model=load_model('data2/sequentiallstm2222200_b128_h200_norm_out_gen22.h5',compile = False)
model.compile(loss='mean_squared_error', optimizer='Adam', metrics=['mae'])
print(model.summary())
es=EarlyStopping(monitor='val_loss',mode='min',verbose=1,patience=100)
# modelcheckpoint
mc=ModelCheckpoint('data2/sequentiallstm2222200_b128_h200_norm_out_gen22.h5',monitor='val_loss',mode='min',save_best_only=True,verbose=1)
# history=model.fit(input_data, output_data, validation_split=train_part, epochs=100, batch_size=128, verbose=1,callbacks=[es,mc])
history=model.fit(x=training_generator, validation_data=validation_generator, epochs=1000, validation_batch_size=5,verbose=1, callbacks=[es,mc])
# history=model.fit(x=training_generator, validation_data=validation_generator, epochs=1000, use_multiprocessing=True, workers=6,verbose=1,callbacks=[es,mc])
# model.save('save_data/sequentiallstm2')
model.save('data2/sequentiallstm2222200_b128_h200_norm_out_gen22_f.h5')
# https://stackoverflow.com/a/44674337/10349608
with open('data2/sequentiallstm2222200_b128_h200_norm_out_gen22_history.pickle', 'wb') as file_his:
pickle.dump(history.history, file_his)
| 6,889 | 29.622222 | 157 | py |
LSTM_Covariance | LSTM_Covariance-main/lorenz/lstmR_d05R_plotting.py | # -*- coding: utf-8 -*-
"""
Created on Wed Jan 6 13:39:58 2021
@author: siboc
"""
import numpy as np
import matplotlib.pyplot as plt
# check scikit-learn version
# check scikit-learn version
import pandas as pd
# def data_set_order(file):
# train_data = np.array(pd.read_csv(file))
# r0=train_data[:,:1001]
# r1=train_data[:,1001:2002]
# r2=train_data[:,2002:3003]
# r3=train_data[:,3003:]/10
# train_data=np.insert(r0,[i+1 for i in range(r0.shape[1])],r1,axis=1)
# train_data=np.insert(train_data,[(i+1)*2 for i in range(int(train_data.shape[1]/2))],r2,axis=1)
# train_data=np.concatenate((train_data,r3),axis=1)
# return train_data
def data_set_order(file):
train_data = np.array(pd.read_csv(file))[:-2,:]
r0=train_data[:,:1001]
r1=train_data[:,1001:2002]
r2=train_data[:,2002:3003]
r3=train_data[:,3003:]
r3[:,-1]=r3[:,-1]/100
train_data=np.insert(r0,[i+1 for i in range(r0.shape[1])],r1,axis=1)
train_data=np.insert(train_data,[(i+1)*2 for i in range(int(train_data.shape[1]/2))],r2,axis=1)
train_data=np.concatenate((train_data,r3),axis=1)
return train_data
# def data_set_order(file):
# train_data = np.array(pd.read_csv(file))[:-2,:]
# r0=train_data[:,:1001][:,:201]
# r1=train_data[:,1001:2002][:,:201]
# r2=train_data[:,2002:3003][:,:201]
# r3=train_data[:,3003:]
# r3[:,-1]=r3[:,-1]/100
# train_data=np.insert(r0,[i+1 for i in range(r0.shape[1])],r1,axis=1)
# train_data=np.insert(train_data,[(i+1)*2 for i in range(int(train_data.shape[1]/2))],r2,axis=1)
# train_data=np.concatenate((train_data,r3),axis=1)
# return train_data
train_data = data_set_order('lorenz_cov_train_v2/trainset_withx_steps1000_11.csv')[:10000,:]
print("train_data shape: ",train_data.shape)
# ###############################################################################
# LSTM1000
input_data = train_data[:,0:1001*3]
output_data = train_data[:,1001*3:]
# LSTM200
# input_data = train_data[:,0:603]
# output_data = train_data[:,603:]
########################################################################
train_part = 0.97
# threshold = int(train_part*train_data.shape[0])
threshold=10000
##########################################################################
test_input = input_data[:threshold,:]
true_test_output = output_data[:threshold,:]
# test_input = input_data [threshold:,:]
# true_test_output = output_data[threshold:,:]
# X1 = train_input
# Y1 = train_output
X2 = test_input
print("X2 shape: ",X2.shape[0])
#Y2 = ValidationSet_Y
############################################################################
R=np.load('label_data/di05_original_version_R_all_10000.npy')
PredValSet2=np.zeros((R.shape[0],4))
for i in range(R.shape[0]):
print(i)
r0=R[i,0,1]
r1=R[i,0,2]
r2=R[i,1,2]
r3=np.trace(R[i,:,:])/3
r=np.array([[r0,r1,r2,r3]])
if i==0:
PredValSet2=r.copy()
else:
PredValSet2=np.concatenate((PredValSet2,r),axis = 0)
#def my_loss_fn(y_true, y_pred):
#
# return K.mean(K.abs(y_true - y_pred) * weight)
# from tensorflow.keras.models import load_model
# model1=load_model('data2/sequentiallstm1000_ing_f.h5')
# model1=load_model('data2/sequentiallstm200_ing_f.h5')
# Calculate predictions
# PredValSet2 = model1.predict(X2.reshape(X2.shape[0],1001,3))
# PredValSet2 = model1.predict(X2.reshape(X2.shape[0],201,3))
# PredTestSet = model.predict(X1)
# PredValSet = model.predict(X2)
# Save predictions
#np.savetxt("numerique/trainresults_raindebit.csv", PredTestSet, delimiter=",")
#np.savetxt("numerique/valresults_raindebit.csv", PredValSet, delimiter=",")
# plt.plot(history.history['loss'])
# plt.plot(history.history['val_loss'])
# plt.title('Model loss')
# plt.ylabel('Loss')
# plt.xlabel('Epoch')
# plt.legend(['Train', 'Test'], loc='upper left')
# #plt.savefig('figure_dp/loss_trace.eps', format='eps',bbox_inches='tight')
# plt.show()
#plt.plot(true_test_output[:,1],'r',label = "true")
#plt.plot(PredValSet[:,1],label = "model")
#plt.title("1st coeff linear")
#plt.legend()
#plt.show()
#deep_error = []
#
#for i in range(150):
#
# deep_error.append(np.linalg.norm(PredValSet[:,i]-true_test_output[:,i]))
#
#print('deep_error',deep_error)
# %%
# plt.xlim(-1, 1)
# plt.ylim(-1, 1)
# plt.plot(PredValSet1[:,0],true_test_output[:,0],'o', color='blue',markersize=5,label='lstm')
# %%
plt.plot(PredValSet2[:,0]/PredValSet2[:,3],true_test_output[:,0],'o', color='b',markersize=5)
plt.plot(true_test_output[:,0],true_test_output[:,0],'-', color='r',linewidth=5)
plt.xlabel('prediction',fontsize=22)
plt.ylabel('true value',fontsize=22)
plt.xticks([-2.00,-1.00,0.00,1.00,2.00])
plt.yticks([-1.00,-0.50,0.00,0.50,1.00])
plt.xticks(fontsize=22)
plt.yticks(fontsize=22)
plt.legend()
plt.show()
# %%
# plt.xlim(-1, 1)
# plt.ylim(-1, 1)
# plt.plot(PredValSet1[:,0],true_test_output[:,0],'o', color='blue',markersize=5,label='lstm')
# plt.plot(PredValSet2[:,0],true_test_output[:,0],'o', color='b',markersize=5)
# plt.plot(true_test_output[:,0],true_test_output[:,0],'o', color='r',markersize=5)
# plt.plot(PredValSet3[:,0],true_test_output[:,0],'o', color='green',markersize=5)
# plt.plot(PredValSet4[:,0],true_test_output[:,0],'o', color='c',markersize=5)
# plt.plot(PredValSet5[:,0],true_test_output[:,0],'o', color='m',markersize=5)
#plt.plot(list(range(0,1,0.1)),list(range(0,1,0.1)),'k')
# plt.legend()
# plt.show()
# plt.xlim(-1, 1)
# plt.ylim(-1, 1)
# plt.plot(PredValSet1[:,1],true_test_output[:,1],'o', color='blue',markersize=5,label='lstm')
# %%
plt.plot(PredValSet2[:,1]/PredValSet2[:,3],true_test_output[:,1],'o', color='b',markersize=5)
plt.plot(true_test_output[:,1],true_test_output[:,1],'-', color='r',linewidth=5)
# plt.xlabel('prediction',fontsize=22)
# plt.ylabel('true value',fontsize=22)
# plt.xticks([-1.00,-0.50,0.00,0.50,1.00])
# plt.yticks([-1.00,-0.50,0.00,0.50,1.00])
# plt.xticks(fontsize=22)
# plt.yticks(fontsize=22)
# plt.plot(PredValSet3[:,1],true_test_output[:,1],'o', color='green',markersize=5)
# plt.plot(PredValSet4[:,1],true_test_output[:,1],'o', color='c',markersize=5)
# plt.plot(PredValSet5[:,1],true_test_output[:,1],'o', color='m',markersize=5)
#plt.plot(list(range(0,1,0.1)),list(range(0,1,0.1)),'k')
plt.legend()
plt.show()
# %%
# plt.xlim(-1, 1)
# plt.ylim(-1, 1)
# plt.plot(PredValSet1[:,1],true_test_output[:,1],'o', color='blue',markersize=5,label='lstm')
# plt.plot(PredValSet2[:,1],true_test_output[:,1],'o', color='b',markersize=5)
# plt.plot(true_test_output[:,1],true_test_output[:,1],'o', color='r',markersize=5)
# plt.plot(PredValSet3[:,1],true_test_output[:,1],'o', color='green',markersize=5)
# plt.plot(PredValSet4[:,1],true_test_output[:,1],'o', color='c',markersize=5)
# plt.plot(PredValSet5[:,1],true_test_output[:,1],'o', color='m',markersize=5)
#plt.plot(list(range(0,1,0.1)),list(range(0,1,0.1)),'k')
# plt.legend()
# plt.show()
# plt.xlim(-1, 1)
# plt.ylim(-1, 1)
# plt.plot(PredValSet1[:,2],true_test_output[:,2],'o', color='blue',markersize=5,label='lstm')
# plt.plot(PredValSet2[:,2],true_test_output[:,2],'o', color='b',markersize=5)
# plt.plot(true_test_output[:,2],true_test_output[:,2],'o', color='r',markersize=5)
# plt.plot(PredValSet3[:,2],true_test_output[:,2],'o', color='green',markersize=5)
# plt.plot(PredValSet4[:,2],true_test_output[:,2],'o', color='c',markersize=5)
# plt.plot(PredValSet5[:,2],true_test_output[:,2],'o', color='m',markersize=5)
#plt.plot(list(range(0,1,0.1)),list(range(0,1,0.1)),'k')
# plt.legend()
# plt.show()
# plt.plot(PredValSet1[:,2],true_test_output[:,2],'o', color='blue',markersize=5,label='lstm')
# %%
plt.plot(PredValSet2[:,2]/PredValSet2[:,3],true_test_output[:,2],'o', color='b',markersize=5)
plt.plot(true_test_output[:,2],true_test_output[:,2],'-', color='r',linewidth=5)
plt.xlabel('prediction',fontsize=22)
plt.ylabel('true value',fontsize=22)
plt.xticks([-1.00,-0.50,0.00,0.50,1.00])
plt.yticks([-1.00,-0.50,0.00,0.50,1.00])
plt.xticks(fontsize=22)
plt.yticks(fontsize=22)
plt.legend()
plt.show()
# %%
# plt.xlim(-10, 10)
# plt.ylim(-10, 10)
# plt.plot(PredValSet1[:,3],true_test_output[:,3],'o',color='blue',markersize=5,label='lstm')
# plt.plot(PredValSet2[:,3],true_test_output[:,3],'o', color='b',markersize=5)
# plt.plot(true_test_output[:,3],true_test_output[:,3],'o', color='r',markersize=5)
# plt.plot(PredValSet3[:,3],true_test_output[:,3],'o', color='green',markersize=5)
# plt.plot(PredValSet4[:,3],true_test_output[:,3],'o', color='c',markersize=5)
# plt.plot(PredValSet5[:,3],true_test_output[:,3],'o', color='m',markersize=5)
# plt.plot(list(range(0,1,0.1)),list(range(0,1,0.1)),'k')
# plt.legend()
# plt.show()
# plt.xlim(-10, 10)
# plt.ylim(-10, 10)
# plt.plot(PredValSet1[:,3],true_test_output[:,3],'o',color='blue',markersize=5,label='lstm')
# %%
plt.plot(PredValSet2[:,3],true_test_output[:,3]*100,'o', color='b',markersize=5)
plt.plot(true_test_output[:,3]*100,true_test_output[:,3]*100,'-', color='r',linewidth=5)
# plt.xlabel('prediction',fontsize=22)
# plt.ylabel('true value',fontsize=22)
# # plt.xticks([-1.00,-0.50,0.00,0.50,1.00])
# plt.xticks(fontsize=22)
# plt.yticks(fontsize=22)
plt.legend()
plt.show()
# %%
# plt.plot(true_test_output[:,0],color='b',label='r0')
# plt.plot(true_test_output[:,1],color='r',label='r1')
# plt.plot(true_test_output[:,2],color='y',label='r2')
# plt.legend()
# plt.show()
##########################################################################################"
# predint = model.predict(train_input[:3000])
# trueint = train_output[:3000]
# plt.plot(predint[:,3],trueint[:,3],'o', color='blue',markersize=5)
# #plt.plot(list(range(0,1,0.1)),list(range(0,1,0.1)),'k')
# plt.show()
| 9,710 | 29.731013 | 101 | py |
LSTM_Covariance | LSTM_Covariance-main/lorenz/lorenz_lstm1000.py | # -*- coding: utf-8 -*-
"""
Created on Wed Jan 6 13:39:58 2021
@author: siboc
"""
import numpy as np
import scipy
import math
import matplotlib.pyplot as plt
from keras.models import Sequential
from keras.layers import Dense
from sklearn.metrics import r2_score
import tensorflow as tf
import keras.backend as K
import sys
# check scikit-learn version
# check scikit-learn version
import sklearn
from sklearn.linear_model import LinearRegression
from sklearn.datasets import make_regression
from sklearn.neighbors import KNeighborsRegressor
from sklearn.tree import DecisionTreeRegressor
import pandas as pd
def data_set_order(file):
train_data = np.array(pd.read_csv(file))[:-2,:]
r0=train_data[:,:1001]
r1=train_data[:,1001:2002]
r2=train_data[:,2002:3003]
r3=train_data[:,3003:]
r3[:,-1]=r3[:,-1]/100
train_data=np.insert(r0,[i+1 for i in range(r0.shape[1])],r1,axis=1)
train_data=np.insert(train_data,[(i+1)*2 for i in range(int(train_data.shape[1]/2))],r2,axis=1)
train_data=np.concatenate((train_data,r3),axis=1)
return train_data
#input
train_data = data_set_order('lorenz_cov_train_v2/trainset_withx_steps1000_11.csv')
print("train_data shape: ",train_data.shape)
print(f"training dataset size: {train_data.shape[0]*0.9}")
print(f"validation dataset size: {train_data.shape[0]*0.1}")
sys.exit()
# train_data1 = data_set_order('lorenz_cov_train/trainset_withx_repeat10bis3.csv')
# print("train_data1 shape: ",train_data1.shape)
# train_data2 = data_set_order('lorenz_cov_train/trainset_withx_repeat10bis4.csv')
# print("train_data2 shape: ",train_data2.shape)
# train_data3 = data_set_order('lorenz_cov_train/trainset_withx_repeat10bis5.csv')
# print("train_data3 shape: ",train_data3.shape)
# train_data4 = data_set_order('lorenz_cov_train/trainset_withx_repeat10bis6.csv')
# print("train_data4 shape: ",train_data4.shape)
# train_data5 = data_set_order('lorenz_cov_train/trainset_withx_repeat10bis7.csv')
# print("train_data5 shape: ",train_data5.shape)
# train_data6 = data_set_order('lorenz_cov_train/trainset_withx_repeat10bis8.csv')
# print("train_data6 shape: ",train_data6.shape)
#size: num_steps*3,r1,r2,r3,v
#########################################################################################
#train_data = np.array(pd.read_csv('data_1000steps/trainset_withx_1000steps.csv'))
#
#
#train_data1 = np.array(pd.read_csv('data_1000steps/trainset_withx_1000stepsbis1.csv'))
#
#train_data2 = np.array(pd.read_csv('data_1000steps/trainset_withx_1000stepsbis2.csv'))
#
#train_data3 = np.array(pd.read_csv('data_1000steps/trainset_withx_1000stepsbis3.csv'))
# train_data = np.concatenate((train_data6,train_data5),axis = 0)
# train_data = np.concatenate((train_data,train_data4),axis = 0)
# train_data = np.concatenate((train_data,train_data3),axis = 0)
# train_data = np.concatenate((train_data,train_data2),axis = 0)
# train_data = np.concatenate((train_data,train_data1),axis = 0)
# train_data = np.concatenate((train_data,train_data0),axis = 0)
# train_data=train_data[:120000,:]
#weightstrain_data[:,604:]
np.random.shuffle(train_data )
input_data = train_data[:,0:3003]
output_data = train_data[:,3003:]
########################################################################
train_part = 0.97
threshold = int(train_part*train_data.shape[0])
##########################################################################
train_input = input_data[:threshold]
print("input_data shape: ",input_data.shape)
train_output = output_data[:threshold]
print("output_data shape: ",output_data.shape)
test_input = input_data [threshold:]
true_test_output = output_data[threshold:]
X1 = train_input
Y1 = train_output
X2 = test_input
#Y2 = ValidationSet_Y
############################################################################
#def my_loss_fn(y_true, y_pred):
#
# return K.mean(K.abs(y_true - y_pred) * weight)
# ========================================================================================
from keras.layers import LSTM,Dropout
from keras.layers import TimeDistributed
from keras.callbacks import EarlyStopping
from keras.callbacks import ModelCheckpoint
from keras.optimizers import Adam
# save data
import os
import json
import pickle
if not os.path.isdir('save_data_v2'):
os.makedirs('save_data_v2')
hidden_size=200
input_sample=input_data.shape[0] #for one sample
output_sample=output_data.shape[0]
input_data=input_data.reshape(input_sample,1001,3) #201 is the time steps in data_generation
output_data=output_data.reshape(output_sample,4)
use_dropout=True
model = Sequential()
model.add(LSTM(hidden_size,input_shape=(1001,3)))
model.add(Dense(4))
# opt = Adam(lr=0.0001)
model.compile(loss='mean_squared_error', optimizer='Adam', metrics=['mae'])
print(model.summary())
es=EarlyStopping(monitor='val_loss',mode='min',verbose=1,patience=50)
# modelcheckpoint
mc=ModelCheckpoint('save_data_v2/sequentiallstm1000_ing.h5',monitor='val_loss',mode='min',save_best_only=True,verbose=1)
history=model.fit(input_data, output_data, validation_split=0.1, epochs=100, batch_size=128, verbose=1,callbacks=[es,mc])
# model.save('save_data/sequentiallstm2')
model.save('save_data_v2/sequentiallstm1000_ing_f.h5')
# https://stackoverflow.com/a/44674337/10349608
with open('save_data_v2/sequentiallstm1000_ing_history.pickle', 'wb') as file_his:
pickle.dump(history.history, file_his)
# Calculate predictions
PredTestSet = model.predict(X1.reshape(X1.shape[0],1001,3))
PredValSet = model.predict(X2.reshape(X2.shape[0],1001,3))
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
#plt.savefig('figure_dp/loss_trace.eps', format='eps',bbox_inches='tight')
plt.show()
plt.plot(PredValSet[:,2],true_test_output[:,2],'o', color='blue',markersize=5)
#plt.plot(list(range(0,1,0.1)),list(range(0,1,0.1)),'k')
plt.show()
plt.plot(PredValSet[:,3],true_test_output[:,3],'o',color='blue',markersize=5)
#plt.plot(list(range(0,1,0.1)),list(range(0,1,0.1)),'k')
plt.show()
# predint = model.predict(train_input[:3000])
# trueint = train_output[:3000]
# plt.plot(predint[:,3],trueint[:,3],'o', color='blue',markersize=5)
# #plt.plot(list(range(0,1,0.1)),list(range(0,1,0.1)),'k')
# plt.show()
| 6,328 | 25.817797 | 121 | py |
LSTM_Covariance | LSTM_Covariance-main/lorenz/simulated_data_generation.py | # -*- coding: utf-8 -*-
# generate the trainning set for keras regression
import numpy as np
from scipy.optimize import fmin
from scipy.optimize import fmin_l_bfgs_b
#from scipy.optimize import fmin_ncg
from scipy.linalg import sqrtm
import math
from constructB import *
from lorentz_attractor import *
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import time
import random
import lorentz_attractor
import sklearn
from sklearn import datasets
import os
if not os.path.isdir('lorenz_cov_train_v2'):
os.makedirs('lorenz_cov_train_v2')
#######################################################################
def correlation_from_covariance(covariance):
v = np.sqrt(np.diag(covariance))
outer_v = np.outer(v, v)
correlation = covariance / outer_v #https://www.mygreatlearning.com/blog/covariance-vs-correlation/
correlation[covariance == 0] = 0
return correlation
######################################################################
#define matrix R by extra-diagonal elements
def R_covariance_dim3(r1,r2,r3):
M = np.zeros((3,3))
M[0,1] = r1
M[0,2] = r2
M[1,2] = r3
M = M + M.T
M += np.eye(3)
return M
################################################################################
#######################################################################
def cov_to_cor(M): # from a covariance matrix to its associated correlation matrix
inv_diag_M=np.linalg.inv(sqrtm(np.diag(np.diag(M))))
cor_M = np.dot(inv_diag_M, np.dot(M,inv_diag_M))
return cor_M
def lorenz_1step(x, y, z, s=10, r=28, b=2.667,dt = 0.001):
x_dot, y_dot, z_dot = lorenz(x, y, z)
x_next = x + (x_dot * dt)
y_next = y + (y_dot * dt)
z_next = z + (z_dot * dt)
return x_next, y_next, z_next
def VAR_3D(xb,Y,H,B,R): #booleen=1 garde la trace
xb1=np.copy(xb)
xb1.shape=(xb1.size,1)
Y.shape = (Y.size,1)
dim_x = xb1.size
K=np.dot(B,np.dot(np.transpose(H),np.linalg.inv(np.dot(H,np.dot(B,np.transpose(H)))+R))) #matrice de gain, Kalman gain
A=np.dot(np.dot((np.eye(dim_x)-np.dot(K,H)),B),np.transpose((np.eye(dim_x)-np.dot(K,H))))+np.dot(np.dot(K,R),np.transpose(K)) #not the kalman filter expression???
vect=np.dot(H,xb1)
xa=np.copy(xb1+np.dot(K,(Y-vect)))
return xa,A #xa is the new estimated data, A is the new covariance,
###################################################################################
###################################################################################
#parameters
num_steps = 1000
H = np.array([[1,1,0],[2,0,1],[0,0,3]])
R = 0.001*np.array([[1,0.4,0.1],[0.4,1,0.4],[0.1,0.4,1]])
B =0.01*np.array([[1,0.2,0.],[0.2,1,0.2],[0.,0.2,1]])
#Q = 0.0001*np.eye(3)
###################################################################################
#save the trainning set for different R
trainning_set = np.zeros((1,num_steps*3+3+4))
###################################################################################
#############################################################################
# true states vector 3 * number_steps
xs,ys,zs = lorenz_attractor(s=10, r=28, b=2.667, dt = 0.001, num_steps=1000)
x_true = np.zeros((3,num_steps+1))
x_true[0,:] = np.copy(xs)
x_true[1,:] = np.copy(ys)
x_true[2,:] = np.copy(zs)
###############################################################################
for ii in range(2000):
if ii%100 ==0:
print(ii)
# construct observations
#=========================================================================
#generate x with noise
for repetation in range(10):
xs,ys,zs = lorenz_attractor(s=10, r=28, b=2.667, dt = 0.001, num_steps = 1000,x0 = 0.+np.random.normal(0, 0.05),
y0=1.+np.random.normal(0, 0.05),z0=1.05+np.random.normal(0, 0.05))
x_true = np.zeros((3,num_steps+1))
x_true[0,:] = np.copy(xs)
x_true[1,:] = np.copy(ys)
x_true[2,:] = np.copy(zs)
#=========================================================================
y_true = np.zeros((3,num_steps+1))
y_obs = np.zeros((3,num_steps+1))
v = np.random.uniform(0,100.)
R = correlation_from_covariance(sklearn.datasets.make_spd_matrix(3)) #SPD covariance
r1 = R[0,1]
r2 = R[0,2]
r3 = R[1,2]
R = v*R
for i in range(num_steps+1):
print("sample time: ",ii)
print("iteration time: ",i)
x = x_true[:,i]
x.shape = (x.size,1)
y = np.dot(H,x) #why this is this expression to calculate y?
y.shape = (y.size,)
y_true[:,i] = y
y_noise = np.random.multivariate_normal(np.zeros(3),R)
y_noise.shape = (y_noise.size,)
y_noise += y
y_obs[:,i] = y_noise
parameters = np.array([r1,r2,r3,v]) #output for deep learning regression
#train_row = np.concatenate((y_obs.ravel(),x_true.ravel())) #input for deep learning #what are the functionalities of these r ->covaraicen! why v is not necessary???
train_row = y_obs.ravel()
train_row = np.concatenate((train_row.ravel(),parameters))
train_row.shape = (1,train_row.size)
trainning_set = np.concatenate((trainning_set,train_row), axis=0)
# if repetation+ii*10==5000:
# np.savetxt(f"lorenz_cov_train_v2/trainset_withx_steps1000_test_{10000+repetation+ii*10}.csv", trainning_set, delimiter=",")
trainning_set = trainning_set[1:,:]
#####################################################################################""
np.savetxt("lorenz_cov_train_v2/trainset_withx_steps1000_test8.csv", trainning_set, delimiter=",") | 5,902 | 34.993902 | 192 | py |
LSTM_Covariance | LSTM_Covariance-main/lorenz/lorenz_lstm200.py | # -*- coding: utf-8 -*-
"""
Created on Wed Jan 6 13:39:58 2021
@author: siboc
"""
import numpy as np
import scipy
import math
import matplotlib.pyplot as plt
from keras.models import Sequential
from keras.layers import Dense
from sklearn.metrics import r2_score
import tensorflow as tf
import keras.backend as K
# check scikit-learn version
# check scikit-learn version
import sklearn
from sklearn.linear_model import LinearRegression
from sklearn.datasets import make_regression
from sklearn.neighbors import KNeighborsRegressor
from sklearn.tree import DecisionTreeRegressor
import pandas as pd
def data_set_order(file):
train_data = np.array(pd.read_csv(file))[:-2,:]
r0=train_data[:,:1001][:,:201]
r1=train_data[:,1001:2002][:,:201]
r2=train_data[:,2002:3003][:,:201]
r3=train_data[:,3003:]
r3[:,-1]=r3[:,-1]/100
train_data=np.insert(r0,[i+1 for i in range(r0.shape[1])],r1,axis=1)
train_data=np.insert(train_data,[(i+1)*2 for i in range(int(train_data.shape[1]/2))],r2,axis=1)
train_data=np.concatenate((train_data,r3),axis=1)
return train_data
#input
train_data = data_set_order('lorenz_cov_train_v2/trainset_withx_steps1000_1.csv')
print("train_data shape: ",train_data.shape)
# train_data1 = data_set_order('lorenz_cov_train/trainset_withx_repeat10bis3.csv')
# print("train_data1 shape: ",train_data1.shape)
# train_data2 = data_set_order('lorenz_cov_train/trainset_withx_repeat10bis4.csv')
# print("train_data2 shape: ",train_data2.shape)
# train_data3 = data_set_order('lorenz_cov_train/trainset_withx_repeat10bis5.csv')
# print("train_data3 shape: ",train_data3.shape)
# train_data4 = data_set_order('lorenz_cov_train/trainset_withx_repeat10bis6.csv')
# print("train_data4 shape: ",train_data4.shape)
# train_data5 = data_set_order('lorenz_cov_train/trainset_withx_repeat10bis7.csv')
# print("train_data5 shape: ",train_data5.shape)
# train_data6 = data_set_order('lorenz_cov_train/trainset_withx_repeat10bis8.csv')
# print("train_data6 shape: ",train_data6.shape)
#size: num_steps*3,r1,r2,r3,v
#########################################################################################
#train_data = np.array(pd.read_csv('data_1000steps/trainset_withx_1000steps.csv'))
#
#
#train_data1 = np.array(pd.read_csv('data_1000steps/trainset_withx_1000stepsbis1.csv'))
#
#train_data2 = np.array(pd.read_csv('data_1000steps/trainset_withx_1000stepsbis2.csv'))
#
#train_data3 = np.array(pd.read_csv('data_1000steps/trainset_withx_1000stepsbis3.csv'))
# train_data = np.concatenate((train_data6,train_data5),axis = 0)
# train_data = np.concatenate((train_data,train_data4),axis = 0)
# train_data = np.concatenate((train_data,train_data3),axis = 0)
# train_data = np.concatenate((train_data,train_data2),axis = 0)
# train_data = np.concatenate((train_data,train_data1),axis = 0)
# train_data = np.concatenate((train_data,train_data0),axis = 0)
# train_data=train_data[:120000,:]
#weightstrain_data[:,604:]
np.random.shuffle(train_data )
input_data = train_data[:,0:603]
output_data = train_data[:,603:]
########################################################################
train_part = 0.97
threshold = int(train_part*train_data.shape[0])
##########################################################################
train_input = input_data[:threshold]
print("input_data shape: ",input_data.shape)
train_output = output_data[:threshold]
print("output_data shape: ",output_data.shape)
test_input = input_data [threshold:]
true_test_output = output_data[threshold:]
X1 = train_input
Y1 = train_output
X2 = test_input
#Y2 = ValidationSet_Y
############################################################################
#def my_loss_fn(y_true, y_pred):
#
# return K.mean(K.abs(y_true - y_pred) * weight)
# ========================================================================================
from keras.layers import LSTM,Dropout
from keras.layers import TimeDistributed
from keras.callbacks import EarlyStopping
from keras.callbacks import ModelCheckpoint
from keras.optimizers import Adam
# save data
import os
import json
import pickle
if not os.path.isdir('save_data_v2'):
os.makedirs('save_data_v2')
hidden_size=200
input_sample=input_data.shape[0] #for one sample
output_sample=output_data.shape[0]
input_data=input_data.reshape(input_sample,201,3) #201 is the time steps in data_generation
output_data=output_data.reshape(output_sample,4)
use_dropout=True
model = Sequential()
model.add(LSTM(hidden_size,input_shape=(201,3)))
model.add(Dense(4))
# opt = Adam(lr=0.0001)
model.compile(loss='mean_squared_error', optimizer='Adam', metrics=['mae'])
print(model.summary())
es=EarlyStopping(monitor='val_loss',mode='min',verbose=1,patience=50)
# modelcheckpoint
mc=ModelCheckpoint('save_data_v2/sequentiallstm200_ing.h5',monitor='val_loss',mode='min',save_best_only=True,verbose=1)
history=model.fit(input_data, output_data, validation_split=0.1, epochs=100, batch_size=128, verbose=1,callbacks=[es,mc])
# model.save('save_data/sequentiallstm2')
model.save('save_data_v2/sequentiallstm200_ing_f.h5')
# https://stackoverflow.com/a/44674337/10349608
with open('save_data_v2/sequentiallstm200_ing_history.pickle', 'wb') as file_his:
pickle.dump(history.history, file_his)
# Calculate predictions
PredTestSet = model.predict(X1.reshape(X1.shape[0],201,3))
PredValSet = model.predict(X2.reshape(X2.shape[0],201,3))
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
#plt.savefig('figure_dp/loss_trace.eps', format='eps',bbox_inches='tight')
plt.show()
plt.plot(PredValSet[:,2],true_test_output[:,2],'o', color='blue',markersize=5)
#plt.plot(list(range(0,1,0.1)),list(range(0,1,0.1)),'k')
plt.show()
plt.plot(PredValSet[:,3],true_test_output[:,3],'o',color='blue',markersize=5)
#plt.plot(list(range(0,1,0.1)),list(range(0,1,0.1)),'k')
plt.show()
# predint = model.predict(train_input[:3000])
# trueint = train_output[:3000]
# plt.plot(predint[:,3],trueint[:,3],'o', color='blue',markersize=5)
# #plt.plot(list(range(0,1,0.1)),list(range(0,1,0.1)),'k')
# plt.show()
| 6,201 | 25.618026 | 121 | py |
ExplainableAIImageMeasures | ExplainableAIImageMeasures-main/setup.py | import pathlib
from setuptools import setup
# The directory containing this file
HERE = pathlib.Path(__file__).parent
# The text of the README file
README = (HERE / "README.md").read_text()
# This call to setup() does all the work
setup(
name="explainable_ai_image_measures",
version="1.0.1",
description="Compute IAUC, DAUC, IROF scores to measure quality of image attributions",
long_description=README,
long_description_content_type="text/markdown",
url="https://github.com/Meier-Johannes/ExplainableAIImageMeasures",
author="Johannes Meier",
author_email="johannes-michael.meier@student.uni-tuebingen.de",
license="MIT",
classifiers=[
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.8",
],
packages=["explainable_ai_image_measures"],
include_package_data=True,
install_requires=["numpy", "torch", "scikit-image", "scikit-learn"],
)
| 989 | 32 | 91 | py |
ExplainableAIImageMeasures | ExplainableAIImageMeasures-main/explainable_ai_image_measures/scoring_metric.py | import numpy as np
import torch
from sklearn.metrics import auc
import torch.nn.functional as F
from explainable_ai_image_measures.irof import IrofDataset
from explainable_ai_image_measures.pixel_relevancy import PixelRelevancyDataset
class Measures:
def __init__(self,
model,
batch_size=64,
irof_segments=40,
irof_sigma=5,
pixel_package_size=1,
normalize=True,
clip01=False,
baseline_color=None):
"""
Parametrize the future measurements
model: PyTorch model
batch_size: During each iteration batch_size number of images will be sent through the network simultaneously
irof_segments: Maximum number of slic segments, that we want to use for measuring. Only relevant if you compute
IROF later
irof_sigma: Parameter used in the slic algorithm
pixel_package_size: E.g. for imagenet you may have 224*224=50,176 pixels. Therefore sending 50,176 pixels
through the network may lead to too much overhead. Instead you can also remove / add blocks
of pixels to speed up the computation. Only relevant for IAUC, DAUC
normalize: With activated normalization the new probabilities are divided by the probabiilties of the old image.
This allows the comparison of attributions independent of how sure the network is for the original
image. Activating normalization is highly encouraged if comparing attributions across several
images.
clip01: Clips the computed probabilities between [0, 1]. This is only relevant for normalize=True.
In some cases the probabilities after e.g. removing parts of the original image may be higher than
before. E.g. for IROF this could theoretically lead to negative scores. If you want to prohibit this,
activate clip01. Note that the clipping clips each individual score. Indirectly you also ensure that
the final score is within [0,1]
baseline_color: For IROF and DAUC we iteratively remove parts of the image and replace it by the baseline
color as specified here. For IAUC we start with an image consisting only of the baseline_color.
By default the mean color is used.
"""
self.model = model
self.batch_size = batch_size
self.irof_segments = irof_segments
self.irof_sigma = irof_sigma
self.pixel_package_size = pixel_package_size
self.normalize = normalize
self.clip01 = clip01
self.baseline_color = baseline_color
def _calc_probs(self, image_batch, label):
probs = F.softmax(self.model(image_batch), dim=1)
return probs[:, label]
def _calc_single_score(self, scoring_dataset, label):
probs = []
with torch.no_grad():
for j, img_batch in enumerate(scoring_dataset):
probs += [self._calc_probs(img_batch, label)]
probs = torch.cat(probs).flatten()
if self.normalize:
probs = probs[:-1] / probs[-1]
else:
probs = probs[:-1]
if self.clip01:
probs = torch.clamp(probs, 0, 1)
probs = scoring_dataset.postprocess_scores(probs)
x = np.arange(0, len(probs))
y = probs.detach().cpu().numpy()
score = auc(x, y) / len(probs)
return score, probs.detach()
def _assert_check(self, image, attribution):
assert(len(image.shape) == 3)
assert(image.shape[1:] == attribution.shape)
if self.baseline_color is not None:
assert(len(self.baseline_color.shape) == 1)
assert(len(self.baseline_color) == image.shape[0])
def compute_IAUC(self, image, attribution, label):
"""
Computes IAUC for a single image and attribution
image: Torch.FloatTensor(color_channel, width, height)
attribution: Torch.FloatTensor(width, height)
label: Label of the attribution
"""
self._assert_check(image, attribution)
with torch.no_grad():
dataset = PixelRelevancyDataset(
image,
attribution,
True,
self.batch_size,
self.pixel_package_size,
image.device,
self.baseline_color
)
return self._calc_single_score(dataset, label)
def compute_DAUC(self, image, attribution, label):
"""
Computes DAUC for a single image and attribution
image: Torch.FloatTensor(color_channel, width, height)
attribution: Torch.FloatTensor(width, height)
label: Label of the attribution
"""
self._assert_check(image, attribution)
with torch.no_grad():
dataset = PixelRelevancyDataset(
image,
attribution,
False,
self.batch_size,
self.pixel_package_size,
image.device,
self.baseline_color
)
return self._calc_single_score(dataset, label)
def compute_IROF(self, image, attribution, label):
"""
Computes IROF for a single image and attribution
image: Torch.FloatTensor(color_channel, width, height)
attribution: Torch.FloatTensor(width, height)
label: Label of the attribution
"""
self._assert_check(image, attribution)
with torch.no_grad():
dataset = IrofDataset(
image,
attribution,
self.batch_size,
self.irof_segments,
self.irof_sigma,
image.device,
self.baseline_color
)
return self._calc_single_score(dataset, label)
def compute_batch(self, images, attributions, labels, IROF=True, IAUC=True, DAUC=True):
"""
Computes the batch for many images and allows multiple attributions per image.
image: Torch.FloatTensor(nr_images, color_channel, width, height)
attribution: (nr_images, nr_attributions_per_image, width, height)
labels: Tuple / Array / Tensor of Int
IROF: Defines, whether IROF is computed
IAUC: Defines, whether IAUC is computed
DAUC: Defines, whether DAUC is computed
"""
assert(len(images) == len(attributions))
assert(len(images) == len(labels))
functions = dict()
if IROF:
functions["IROF"] = self.compute_IROF
if IAUC:
functions["IAUC"] = self.compute_IAUC
if DAUC:
functions["DAUC"] = self.compute_DAUC
if len(functions) == 0:
return None
result = dict()
for method in functions:
scores = torch.zeros(attributions.shape[0:2])
probs = []
for img_id in range(len(images)):
probs.append([])
for attr_id in range(len(attributions[img_id])):
score, prob = functions[method](
images[img_id],
attributions[img_id, attr_id],
labels[img_id]
)
scores[img_id, attr_id] = score
probs[-1].append(prob)
probs[-1] = torch.stack(probs[-1])
result[method] = (scores, probs)
return result
| 7,637 | 37.771574 | 120 | py |
ExplainableAIImageMeasures | ExplainableAIImageMeasures-main/explainable_ai_image_measures/pixel_manipulation.py | import torch
from torch.utils.data import Dataset
import abc
class PixelManipulationBase(Dataset):
"""
Requires that self._pixel_batches is defined in the constructor
"""
def __init__(self, image, attribution, insert, batch_size, device, baseline_color):
self._image = image
self._batch_size = batch_size
self._insert = insert
self._attribution = attribution
self._device = device
self._baseline_color = baseline_color
self._pixel_batches = None # Expected to be set by child class
self._temp_image = None # Expected to be set by child class
if self._baseline_color is None:
self._baseline_color = torch.mean(image.reshape(self.color_channels, self.width, self.height), dim=(1, 2))
self._temp_baseline = None
self._temp_image = None
@abc.abstractmethod
def generate_pixel_batches(self):
return
def generate_initial_image(self):
if self._insert:
# Create a new image of original width & height with every pixel set to the baseline color
self._temp_image = (
self._baseline_color.view(self.color_channels, 1)
.repeat(1, self.width * self.height)
)
else:
self._temp_image = self._image
self._temp_image = self._temp_image.flatten()
def generate_temp_baseline(self):
# Keep baseline image multiply times to avoid repeat the generation every iteration
self._temp_baseline = (
self._baseline_color.view(self.color_channels, 1).repeat(1, self.nr_pixels).flatten()
)
def __len__(self):
return len(self._pixel_batches)
@property
def color_channels(self):
return self._image.shape[0]
@property
def width(self):
return self._image.shape[1]
@property
def height(self):
return self._image.shape[2]
def _get_batch_size(self, index):
return len(self._pixel_batches[index])
def _index_shift(self, matrix, add_per_row):
# Adds (i-1) * add_per_row to every cell in row i. Nothing for row 1
factor = add_per_row * torch.diag(torch.arange(0, len(matrix), device=self._device)).float()
new_matrix = factor @ torch.ones_like(matrix, device=self._device).float() + matrix
return new_matrix.long()
@abc.abstractmethod
def _gen_indices(self, index):
return
@property
def nr_pixels(self):
return self.width * self.height
def _color_channel_shift(self, indices):
# For every color channel shift by nr_pixels
return torch.stack(
[indices + i*self.nr_pixels for i in range(self.color_channels)]
).to(self._device).T.flatten()
def _batch_shift(self, indices, pixel_per_image):
# Depending on pixel_per_image create an array of the following form:
# [0 0 1 1 1 2 2 2 2 2 2]
# How often a number is repeated depends on pixel_per_image
nr_pixels_cum = torch.cumsum(pixel_per_image, 0)
image_indices = [torch.Tensor(nr_pixels_cum[i]*[i]) for i in range(len(nr_pixels_cum))]
image_indices = torch.cat(image_indices).to(self._device).long()
# Multiply it with the total number of data points per image
image_indices_shift = image_indices * self.color_channels * self.nr_pixels
# Expand the shift for each color channel
nr_man_pixels = int(len(indices) / self.color_channels)
image_indices_shift = image_indices_shift.reshape(-1, 1).expand(nr_man_pixels, self.color_channels)
image_indices_shift = image_indices_shift.flatten()
# Shift the original indices
batch_indices = indices + image_indices_shift
return batch_indices
@abc.abstractmethod
def _get_fake_image_size(self):
return
@abc.abstractmethod
def postprocess_scores(self, y):
return y
def __getitem__(self, index):
"""
Returns batch_size of images, where the most important pixels have been removed / added
Important: Call the method with consecutive index values!
"""
batch_size = self._get_batch_size(index)
# Start with the image of the last run
# Create batch of image [batch_size, color_channels x width x height]
image_batch = self._temp_image.view(1, -1).repeat(batch_size, 1).flatten()
# Get the indices that need to be modified from the image of the last run
# template_indices = not unique, batch_indices = unique
template_indices, batch_indices = self._gen_indices(index)
if index == self.__len__() - 1:
# Only in the last run: Ensure that there is no problem for the original image
# Therefore remove the fake indices as added in the constructor
template_indices = template_indices[
: -self._get_fake_image_size() * self.color_channels * batch_size
]
batch_indices = batch_indices[
: -self._get_fake_image_size() * self.color_channels * batch_size
]
# Modify the pixels
if self._insert:
image_batch[batch_indices] = self._image.flatten()[template_indices]
else:
image_batch[batch_indices] = self._temp_baseline[template_indices]
# Reshape the image to proper sizes as required by the network
image_batch = image_batch.reshape(
-1, self.color_channels, self.width, self.height
)
if index == self.__len__() - 1:
# Only in the last run: Add the original image
image_batch[batch_size - 1] = self._image
else:
# Save last image for the next run
self._temp_image = image_batch[-1] # TODO
return image_batch
| 5,848 | 35.55625 | 118 | py |
ExplainableAIImageMeasures | ExplainableAIImageMeasures-main/explainable_ai_image_measures/irof.py | import torch
import numpy as np
from skimage.segmentation import slic
from explainable_ai_image_measures.pixel_manipulation import PixelManipulationBase
class IrofDataset(PixelManipulationBase):
def __init__(
self, image, attribution, batch_size, irof_segments, irof_sigma, device, baseline_color
):
PixelManipulationBase.__init__(
self, image, attribution, False, batch_size, device, baseline_color
)
self._irof_segments = irof_segments
self._irof_sigma = irof_sigma
self.generate_pixel_batches()
self.generate_initial_image()
self.generate_temp_baseline()
def generate_pixel_batches(self):
# Apply Slic algorithm to get superpixel areas
img_np = self._image.detach().cpu().numpy().transpose(1, 2, 0)
segments = slic(img_np, self._irof_segments, self._irof_segments).reshape(-1)
nr_segments = np.max(segments) + 1
segments = torch.LongTensor(segments).to(device=self._device)
# Attribution score of each segment = Mean attribution
attr = self._attribution.reshape(-1)
seg_mean = [
torch.mean(attr[segments == seg]).item() for seg in range(nr_segments)
]
seg_mean = torch.FloatTensor(seg_mean).to(device=self._device)
# Sort segments descending by mean attribution
seg_rank = torch.argsort(-seg_mean)
# Create lists of "shape" [batch_size, segment_size]
# containing the indices of the segments
self._pixel_batches = [[]]
for seg in seg_rank:
indices = torch.nonzero(segments == seg).flatten()
IrofDataset._add_to_hierarhical_list(
self._pixel_batches, self._batch_size, indices
)
# Add placeholder for original image
IrofDataset._add_to_hierarhical_list(
self._pixel_batches, self._batch_size, torch.Tensor([0]).to(device=self._device)
)
@staticmethod
def _add_to_hierarhical_list(list_element, target_size, item):
if len(list_element[-1]) == target_size:
list_element.append([])
list_element[-1].append(item)
def _gen_indices(self, index):
batch_size = self._get_batch_size(index)
# Get all pixels
all_pixels = torch.cat(self._pixel_batches[index]).to(self._device).long()
# Create a matrix of indices of size [batch_size, all_pixels]
template_indices = all_pixels.view(1, -1).repeat(batch_size, 1)
# For each package only keep the previous pixels and package size additional pixels
pixel_per_image = torch.LongTensor(
[len(package) for package in self._pixel_batches[index]]
).to(self._device)
cumsum = torch.cumsum(pixel_per_image, dim=0)
keep_index_template = torch.cat(
[torch.arange(0, s.item()) for s in cumsum]
).to(self._device)
template_indices = template_indices.reshape(-1)[keep_index_template]
template_indices = self._color_channel_shift(template_indices)
batch_indices = self._batch_shift(template_indices, pixel_per_image)
return template_indices, batch_indices
def _get_fake_image_size(self):
return 1
def postprocess_scores(self, y):
return 1-y
| 3,308 | 35.766667 | 95 | py |
ExplainableAIImageMeasures | ExplainableAIImageMeasures-main/explainable_ai_image_measures/pixel_relevancy.py | import torch
from explainable_ai_image_measures.pixel_manipulation import PixelManipulationBase
class PixelRelevancyDataset(PixelManipulationBase):
def __init__(self, image, attribution, insert, batch_size, package_size, device, baseline_color):
PixelManipulationBase.__init__(
self, image, attribution, insert, batch_size, device, baseline_color
)
self._package_size = package_size
self._device = device
self.generate_pixel_batches()
self.generate_initial_image()
self.generate_temp_baseline()
def generate_pixel_batches(self):
# For simplicity: Ensure that all packages have the same size.
max_nr_pixels = (
self.width * self.height - self.width * self.height % self._package_size
)
# Sort pixels in descending order by attribution score
pixel_relevancy_desc = torch.argsort(-self._attribution.flatten())[
:max_nr_pixels
]
# Add placeholder for original image
placeholder = torch.LongTensor(self._package_size * [0]).to(device=self._device)
pixel_relevancy_desc = torch.cat((pixel_relevancy_desc, placeholder))
# Form groups of size package_size
pixel_relevancy_groups = pixel_relevancy_desc.reshape(-1, self._package_size)
# Forms batches of groups: (batch_size x package_size)
self._pixel_batches = torch.split(
pixel_relevancy_groups, self._batch_size, dim=0
)
def _gen_indices(self, index):
batch_size = self._get_batch_size(index)
# Create a matrix of indices of size [batch_size, package_size * batch_size]
template_indices = self._pixel_batches[index].view(1, -1).repeat(batch_size, 1)
# For each package only keep the previous pixels and package_size additional pixels
keep_index_template = torch.cat(
[torch.arange(0, self._package_size * i) for i in range(1, batch_size + 1)]
)
template_indices = template_indices.reshape(-1)[keep_index_template]
template_indices = self._color_channel_shift(template_indices)
# Shift each batch item by all the pixels of previous images
pixel_per_image = torch.Tensor(batch_size * [self._package_size]).to(self._device).long()
batch_indices = self._batch_shift(template_indices, pixel_per_image)
return template_indices, batch_indices
def _get_fake_image_size(self):
return self._package_size
def postprocess_scores(self, y):
return y
| 2,552 | 41.55 | 101 | py |
DACBench | DACBench-main/examples/ray_ppo.py | import ray
from ray.tune.registry import register_env
from ray.rllib.agents import ppo
from dacbench import benchmarks
from dacbench.wrappers import ObservationWrapper
import argparse
def make_benchmark(config):
bench = getattr(benchmarks, config["benchmark"])()
env = bench.get_benchmark(seed=config["seed"])
if config["benchmark"] in ["SGDBenchmark", "CMAESBenchmark"]:
env = ObservationWrapper(env)
return env
parser = argparse.ArgumentParser(description="Run ray PPO for DACBench")
parser.add_argument("--outdir", type=str, default="output", help="Output directory")
parser.add_argument(
"--benchmarks", nargs="+", type=str, default=["LubyBenchmark"], help="Benchmarks to run PPO for"
)
parser.add_argument(
"--timesteps", type=int, default=10000, help="Number of timesteps to run"
)
parser.add_argument(
"--save_interval", type=int, default=100, help="Checkpoint interval"
)
parser.add_argument(
"--seeds",
nargs="+",
type=int,
default=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
help="Seeds for evaluation",
)
parser.add_argument("--torch", action="store_true")
parser.add_argument("--fd_port", type=int, default=55555)
args = parser.parse_args()
for b in args.benchmarks:
for s in args.seeds:
config = {"seed": s, "benchmark": b}
if b == "FastDownwardBenchmark":
config["port"] = args.fd_port
register_env(f"{b}", lambda conf: make_benchmark(conf))
ray.init()
trainer = ppo.PPOTrainer(
config={
"num_gpus": 0,
"env": f"{b}",
"env_config": config,
"framework": "tf" if not args.torch else "torch",
}
)
for i in range(args.timesteps):
trainer.train()
if i % args.save_interval == 0:
trainer.save(args.outdir + f"./{b}_{s}")
ray.shutdown()
| 1,902 | 30.716667 | 100 | py |
DACBench | DACBench-main/examples/coax_ppo_cmaes.py | import jax
import jax.numpy as jnp
import coax
import haiku as hk
from numpy import prod
import optax
from dacbench.benchmarks import CMAESBenchmark
from dacbench.wrappers import ObservationWrapper
# the name of this script
name = 'ppo'
# the Pendulum MDP
bench = CMAESBenchmark()
env = bench.get_environment()
env = ObservationWrapper(env)
env = coax.wrappers.TrainMonitor(env, name=name, tensorboard_dir=f"./data/tensorboard/{name}")
def func_pi(S, is_training):
shared = hk.Sequential((
hk.Linear(8), jax.nn.relu,
hk.Linear(8), jax.nn.relu,
))
mu = hk.Sequential((
shared,
hk.Linear(8), jax.nn.relu,
hk.Linear(prod(env.action_space.shape), w_init=jnp.zeros),
hk.Reshape(env.action_space.shape),
))
logvar = hk.Sequential((
shared,
hk.Linear(8), jax.nn.relu,
hk.Linear(prod(env.action_space.shape), w_init=jnp.zeros),
hk.Reshape(env.action_space.shape),
))
return {'mu': mu(S), 'logvar': logvar(S)}
def func_v(S, is_training):
seq = hk.Sequential((
hk.Linear(8), jax.nn.relu,
hk.Linear(8), jax.nn.relu,
hk.Linear(8), jax.nn.relu,
hk.Linear(1, w_init=jnp.zeros), jnp.ravel
))
return seq(S)
# define function approximators
pi = coax.Policy(func_pi, env)
v = coax.V(func_v, env)
# target network
pi_targ = pi.copy()
# experience tracer
tracer = coax.reward_tracing.NStep(n=5, gamma=0.9)
buffer = coax.experience_replay.SimpleReplayBuffer(capacity=512)
# policy regularizer (avoid premature exploitation)
policy_reg = coax.regularizers.EntropyRegularizer(pi, beta=0.01)
# updaters
simpletd = coax.td_learning.SimpleTD(v, optimizer=optax.adam(1e-3))
ppo_clip = coax.policy_objectives.PPOClip(pi, regularizer=policy_reg, optimizer=optax.adam(1e-4))
# train
for _ in range(10):
done, truncated = False, False
s, info = env.reset()
while not (done or truncated):
a, logp = pi_targ(s, return_logp=True)
s_next, r, done, truncated, info = env.step(a)
# trace rewards
tracer.add(s, a, r, done or truncated, logp)
while tracer:
buffer.add(tracer.pop())
# learn
if len(buffer) >= buffer.capacity:
for _ in range(int(4 * buffer.capacity / 32)): # 4 passes per round
transition_batch = buffer.sample(batch_size=32)
metrics_v, td_error = simpletd.update(transition_batch, return_td_error=True)
metrics_pi = ppo_clip.update(transition_batch, td_error)
env.record_metrics(metrics_v)
env.record_metrics(metrics_pi)
buffer.clear()
pi_targ.soft_update(pi, tau=0.1)
if done or truncated:
break
s = s_next | 2,786 | 26.058252 | 97 | py |
DACBench | DACBench-main/dacbench/benchmarks/sgd_benchmark.py | import csv
import os
import ConfigSpace as CS
import ConfigSpace.hyperparameters as CSH
import numpy as np
from gymnasium import spaces
from torch.nn import NLLLoss
from dacbench.abstract_benchmark import AbstractBenchmark, objdict
from dacbench.envs import SGDEnv
from dacbench.envs.sgd import Reward
DEFAULT_CFG_SPACE = CS.ConfigurationSpace()
LR = CSH.UniformIntegerHyperparameter(name="learning_rate", lower=0, upper=10)
DEFAULT_CFG_SPACE.add_hyperparameter(LR)
def __default_loss_function(**kwargs):
return NLLLoss(reduction="none", **kwargs)
INFO = {
"identifier": "LR",
"name": "Learning Rate Adaption for Neural Networks",
"reward": "Negative Log Differential Validation Loss",
"state_description": [
"Predictive Change Variance (Discounted Average)",
"Predictive Change Variance (Uncertainty)",
"Loss Variance (Discounted Average)",
"Loss Variance (Uncertainty)",
"Current Learning Rate",
"Training Loss",
"Validation Loss",
"Step",
"Alignment",
"Crashed",
],
}
SGD_DEFAULTS = objdict(
{
"config_space": DEFAULT_CFG_SPACE,
"action_space_class": "Box",
"action_space_args": [np.array([0]), np.array([10])],
"observation_space_class": "Dict",
"observation_space_type": None,
"observation_space_args": [
{
"predictiveChangeVarDiscountedAverage": spaces.Box(
low=-np.inf, high=np.inf, shape=(1,)
),
"predictiveChangeVarUncertainty": spaces.Box(
low=0, high=np.inf, shape=(1,)
),
"lossVarDiscountedAverage": spaces.Box(
low=-np.inf, high=np.inf, shape=(1,)
),
"lossVarUncertainty": spaces.Box(low=0, high=np.inf, shape=(1,)),
"currentLR": spaces.Box(low=0, high=1, shape=(1,)),
"trainingLoss": spaces.Box(low=0, high=np.inf, shape=(1,)),
"validationLoss": spaces.Box(low=0, high=np.inf, shape=(1,)),
"step": spaces.Box(low=0, high=np.inf, shape=(1,)),
"alignment": spaces.Box(low=0, high=1, shape=(1,)),
"crashed": spaces.Discrete(2),
}
],
"reward_type": Reward.LogDiffTraining,
"cutoff": 1e3,
"lr": 1e-3,
"discount_factor": 0.9,
"optimizer": "rmsprop",
"loss_function": __default_loss_function,
"loss_function_kwargs": {},
"val_loss_function": __default_loss_function,
"val_loss_function_kwargs": {},
"training_batch_size": 64,
"validation_batch_size": 64,
"train_validation_ratio": 0.8,
"dataloader_shuffle": True,
"no_cuda": False,
"beta1": 0.9,
"beta2": 0.9,
"epsilon": 1.0e-06,
"clip_grad": (-1.0, 1.0),
"seed": 0,
"cd_paper_reconstruction": False,
"cd_bias_correction": True,
"terminate_on_crash": False,
"crash_penalty": 0.0,
"instance_set_path": "../instance_sets/sgd/sgd_train_100instances.csv",
"benchmark_info": INFO,
"features": [
"predictiveChangeVarDiscountedAverage",
"predictiveChangeVarUncertainty",
"lossVarDiscountedAverage",
"lossVarUncertainty",
"currentLR",
"trainingLoss",
"validationLoss",
"step",
"alignment",
"crashed",
],
}
)
# Set reward range based on the chosen reward type
SGD_DEFAULTS.reward_range = SGD_DEFAULTS["reward_type"].func.frange
class SGDBenchmark(AbstractBenchmark):
"""
Benchmark with default configuration & relevant functions for SGD
"""
def __init__(self, config_path=None, config=None):
"""
Initialize SGD Benchmark
Parameters
-------
config_path : str
Path to config file (optional)
"""
super(SGDBenchmark, self).__init__(config_path, config)
if not self.config:
self.config = objdict(SGD_DEFAULTS.copy())
for key in SGD_DEFAULTS:
if key not in self.config:
self.config[key] = SGD_DEFAULTS[key]
def get_environment(self):
"""
Return SGDEnv env with current configuration
Returns
-------
SGDEnv
SGD environment
"""
if "instance_set" not in self.config.keys():
self.read_instance_set()
# Read test set if path is specified
if (
"test_set" not in self.config.keys()
and "test_set_path" in self.config.keys()
):
self.read_instance_set(test=True)
env = SGDEnv(self.config)
for func in self.wrap_funcs:
env = func(env)
return env
def read_instance_set(self, test=False):
"""
Read path of instances from config into list
"""
if test:
path = (
os.path.dirname(os.path.abspath(__file__))
+ "/"
+ self.config.test_set_path
)
keyword = "test_set"
else:
path = (
os.path.dirname(os.path.abspath(__file__))
+ "/"
+ self.config.instance_set_path
)
keyword = "instance_set"
self.config[keyword] = {}
with open(path, "r") as fh:
reader = csv.DictReader(fh, delimiter=";")
for row in reader:
if "_" in row["dataset"]:
dataset_info = row["dataset"].split("_")
dataset_name = dataset_info[0]
dataset_size = int(dataset_info[1])
else:
dataset_name = row["dataset"]
dataset_size = None
instance = [
dataset_name,
int(row["seed"]),
row["architecture"],
int(row["steps"]),
dataset_size,
]
self.config[keyword][int(row["ID"])] = instance
def get_benchmark(self, instance_set_path=None, seed=0):
"""
Get benchmark from the LTO paper
Parameters
-------
seed : int
Environment seed
Returns
-------
env : SGDEnv
SGD environment
"""
self.config = objdict(SGD_DEFAULTS.copy())
if instance_set_path is not None:
self.config["instance_set_path"] = instance_set_path
self.config.seed = seed
self.read_instance_set()
return SGDEnv(self.config)
| 6,789 | 30.004566 | 81 | py |
DACBench | DACBench-main/dacbench/envs/sgd.py | import json
import math
import numbers
import random
import warnings
from enum import IntEnum, auto
from functools import reduce
import numpy as np
import torch
from backpack import backpack, extend
from backpack.extensions import BatchGrad
from torchvision import datasets, transforms
from dacbench import AbstractEnv
warnings.filterwarnings("ignore")
def reward_range(frange):
def wrapper(f):
f.frange = frange
return f
return wrapper
class Reward(IntEnum):
TrainingLoss = auto()
ValidationLoss = auto()
LogTrainingLoss = auto()
LogValidationLoss = auto()
DiffTraining = auto()
DiffValidation = auto()
LogDiffTraining = auto()
LogDiffValidation = auto()
FullTraining = auto()
def __call__(self, f):
if hasattr(self, "func"):
raise ValueError("Can not assign the same reward to a different function!")
self.func = f
return f
class SGDEnv(AbstractEnv):
"""
Environment to control the learning rate of adam
"""
def __init__(self, config):
"""
Initialize SGD Env
Parameters
-------
config : objdict
Environment configuration
"""
super(SGDEnv, self).__init__(config)
self.batch_size = config.training_batch_size
self.validation_batch_size = config.validation_batch_size
self.no_cuda = config.no_cuda
self.current_batch_size = config.training_batch_size
self.on_features = config.features
self.cd_paper_reconstruction = config.cd_paper_reconstruction
self.cd_bias_correction = config.cd_bias_correction
self.crashed = False
self.terminate_on_crash = config.terminate_on_crash
self.crash_penalty = config.crash_penalty
if isinstance(config.reward_type, Reward):
self.reward_type = config.reward_type
elif isinstance(config.reward_type, str):
try:
self.reward_type = getattr(Reward, config.reward_type)
except AttributeError:
raise ValueError(f"{config.reward_type} is not a valid reward type!")
else:
raise ValueError(f"Type {type(config.reward_type)} is not valid!")
self.use_cuda = not self.no_cuda and torch.cuda.is_available()
self.device = torch.device("cuda" if self.use_cuda else "cpu")
self.training_validation_ratio = config.train_validation_ratio
self.dataloader_shuffle = config.dataloader_shuffle
# self.test_dataset = None
self.train_dataset = None
self.validation_dataset = None
self.train_loader = None
# self.test_loader = None
self.validation_loader = None
self.train_loader_it = None
self.validation_loader_it = None
self.train_batch_index = 0
self.epoch_index = 0
self.current_training_loss = None
self.loss_batch = None
self.prev_training_loss = None
self._current_validation_loss = torch.zeros(
1, device=self.device, requires_grad=False
)
self._current_validation_loss.calculated = False
self.prev_validation_loss = torch.zeros(
1, device=self.device, requires_grad=False
)
self.model = None
self.val_model = None
# TODO:
"""
TODO: Samuel Mueller (PhD student in our group) also uses backpack and has ran into a similar memory leak.
He solved it calling this custom made RECURSIVE memory_cleanup function:
# from backpack import memory_cleanup
# def recursive_backpack_memory_cleanup(module: torch.nn.Module):
# memory_cleanup(module)
# for m in module.modules():
# memory_cleanup(m)
(calling this after computing the training loss/gradients and after validation loss should suffice)
"""
self.parameter_count = 0
self.layer_sizes = []
self.loss_function = config.loss_function(**config.loss_function_kwargs)
self.loss_function = extend(self.loss_function)
self.val_loss_function = config.loss_function(**config.val_loss_function_kwargs)
self.initial_lr = config.lr * torch.ones(
1, device=self.device, requires_grad=False
)
self.current_lr = config.lr * torch.ones(
1, device=self.device, requires_grad=False
)
self.optimizer_name = config.optimizer
self.beta1 = config.beta1
self.beta2 = config.beta2
self.epsilon = config.epsilon
# RMSprop parameters
self.beta2 = config.beta2
self.m = 0
self.v = 0
# Momentum parameters
self.sgd_momentum_v = 0
self.sgd_rho = 0.9
self.clip_grad = config.clip_grad
self.t = 0
self.step_count = torch.zeros(1, device=self.device, requires_grad=False)
self.prev_direction = None
self.current_direction = None
self.predictiveChangeVarDiscountedAverage = torch.zeros(
1, device=self.device, requires_grad=False
)
self.predictiveChangeVarUncertainty = torch.zeros(
1, device=self.device, requires_grad=False
)
self.lossVarDiscountedAverage = torch.zeros(
1, device=self.device, requires_grad=False
)
self.lossVarUncertainty = torch.zeros(
1, device=self.device, requires_grad=False
)
self.discount_factor = config.discount_factor
self.firstOrderMomentum = torch.zeros(
1, device=self.device, requires_grad=False
)
self.secondOrderMomentum = torch.zeros(
1, device=self.device, requires_grad=False
)
if self.optimizer_name == "adam":
self.get_optimizer_direction = self.get_adam_direction
elif self.optimizer_name == "rmsprop":
self.get_optimizer_direction = self.get_rmsprop_direction
elif self.optimizer_name == "momentum":
self.get_optimizer_direction = self.get_momentum_direction
else:
raise NotImplementedError
if "reward_function" in config.keys():
self._get_reward = config["reward_function"]
else:
self._get_reward = self.reward_type.func
if "state_method" in config.keys():
self.get_state = config["state_method"]
else:
self.get_state = self.get_default_state
self.reward_range = self.reward_type.func.frange
def get_reward(self):
return self._get_reward(self)
@reward_range([-(10**9), 0])
@Reward.TrainingLoss
def get_training_reward(self):
return -self.current_training_loss.item()
@reward_range([-(10**9), 0])
@Reward.ValidationLoss
def get_validation_reward(self):
return -self.current_validation_loss.item()
@reward_range([-(10**9), (10**9)])
@Reward.LogTrainingLoss
def get_log_training_reward(self):
return -torch.log(self.current_training_loss).item()
@reward_range([-(10**9), (10**9)])
@Reward.LogValidationLoss
def get_log_validation_reward(self):
return -torch.log(self.current_validation_loss).item()
@reward_range([-(10**9), (10**9)])
@Reward.LogDiffTraining
def get_log_diff_training_reward(self):
return -(
torch.log(self.current_training_loss) - torch.log(self.prev_training_loss)
).item()
@reward_range([-(10**9), (10**9)])
@Reward.LogDiffValidation
def get_log_diff_validation_reward(self):
return -(
torch.log(self.current_validation_loss)
- torch.log(self.prev_validation_loss)
).item()
@reward_range([-(10**9), (10**9)])
@Reward.DiffTraining
def get_diff_training_reward(self):
return (self.current_training_loss - self.prev_training_loss).item()
@reward_range([-(10**9), (10**9)])
@Reward.DiffValidation
def get_diff_validation_reward(self):
return (self.current_validation_loss - self.prev_validation_loss).item()
@reward_range([-(10**9), 0])
@Reward.FullTraining
def get_full_training_reward(self):
return -self._get_full_training_loss(loader=self.train_loader).item()
def get_full_training_loss(self):
return -self.get_full_training_reward()
@property
def crash(self):
self.crashed = True
truncated = False
terminated = False
if self.c_step >= self.n_steps:
truncated = True
else:
terminated = self.terminate_on_crash
return self.get_state(self), self.crash_penalty, terminated, truncated, {}
def seed(self, seed=None, seed_action_space=False):
"""
Set rng seed
Parameters
----------
seed:
seed for rng
seed_action_space: bool, default False
if to seed the action space as well
"""
(seed,) = super().seed(seed, seed_action_space)
if seed is not None:
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
return [seed]
def step(self, action):
"""
Execute environment step
Parameters
----------
action : list
action to execute
Returns
-------
np.array, float, bool, bool, dict
state, reward, terminated, truncated, info
"""
truncated = super(SGDEnv, self).step_()
self.step_count += 1
index = 0
if not isinstance(action, int) and not isinstance(action, float):
action = action.item()
if not isinstance(action, numbers.Number):
action = action[0]
if np.isnan(action):
return self.crash
new_lr = torch.Tensor([action]).to(self.device)
self.current_lr = new_lr
direction = self.get_optimizer_direction()
if np.isnan(direction).any():
return self.crash
self.current_direction = direction
delta_w = torch.mul(new_lr, direction)
for i, p in enumerate(self.model.parameters()):
layer_size = self.layer_sizes[i]
p.data = p.data - delta_w[index : index + layer_size].reshape(
shape=p.data.shape
)
index += layer_size
self.model.zero_grad()
self.prev_training_loss = self.current_training_loss
if self._current_validation_loss.calculated:
self.prev_validation_loss = self.current_validation_loss
self.train_network()
reward = self.get_reward()
if np.isnan(reward):
return self.crash
state = self.get_state(self)
for value in state.values():
if np.isnan(value):
return self.crash
return state, reward, False, truncated, {}
def _architecture_constructor(self, arch_str):
layer_specs = []
layer_strs = arch_str.split("-")
for layer_str in layer_strs:
idx = layer_str.find("(")
if idx == -1:
nn_module_name = layer_str
vargs = []
else:
nn_module_name = layer_str[:idx]
vargs_json_str = '{"tmp": [' + layer_str[idx + 1 : -1] + "]}"
vargs = json.loads(vargs_json_str)["tmp"]
layer_specs.append((getattr(torch.nn, nn_module_name), vargs))
def model_constructor():
layers = [cls(*vargs) for cls, vargs in layer_specs]
return torch.nn.Sequential(*layers)
return model_constructor
def reset(self, seed=None, options={}):
"""
Reset environment
Returns
-------
np.array
Environment state
"""
super(SGDEnv, self).reset_(seed)
dataset = self.instance[0]
instance_seed = self.instance[1]
construct_model = self._architecture_constructor(self.instance[2])
self.n_steps = self.instance[3]
dataset_size = self.instance[4]
self.crashed = False
self.seed(instance_seed)
self.model = construct_model().to(self.device)
self.val_model = construct_model().to(self.device)
def init_weights(m):
if type(m) == torch.nn.Linear or type(m) == torch.nn.Conv2d:
torch.nn.init.xavier_normal(m.weight)
m.bias.data.fill_(0.0)
if self.cd_paper_reconstruction:
self.model.apply(init_weights)
train_dataloader_args = {
"batch_size": self.batch_size,
"drop_last": True,
"shuffle": self.dataloader_shuffle,
}
validation_dataloader_args = {
"batch_size": self.validation_batch_size,
"drop_last": True,
"shuffle": False,
} # SA: shuffling empty data loader causes exception
if self.use_cuda:
param = {"num_workers": 1, "pin_memory": True}
train_dataloader_args.update(param)
validation_dataloader_args.update(param)
if dataset == "MNIST":
transform = transforms.Compose(
[transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]
)
train_dataset = datasets.MNIST(
"../data", train=True, download=True, transform=transform
)
# self.test_dataset = datasets.MNIST('../data', train=False, transform=transform)
elif dataset == "CIFAR":
transform = transforms.Compose(
[
transforms.ToTensor(),
transforms.Normalize(
(0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)
),
]
)
train_dataset = datasets.CIFAR10(
"../data", train=True, download=True, transform=transform
)
# self.test_dataset = datasets.MNIST('../data', train=False, transform=transform)
else:
raise NotImplementedError
if dataset_size is not None:
train_dataset = torch.utils.data.Subset(
train_dataset, range(0, dataset_size)
)
training_dataset_limit = math.floor(
len(train_dataset) * self.training_validation_ratio
)
validation_dataset_limit = len(train_dataset)
self.train_dataset = torch.utils.data.Subset(
train_dataset, range(0, training_dataset_limit - 1)
)
self.validation_dataset = torch.utils.data.Subset(
train_dataset, range(training_dataset_limit, validation_dataset_limit)
)
self.train_loader = torch.utils.data.DataLoader(
self.train_dataset, **train_dataloader_args
)
# self.test_loader = torch.utils.data.DataLoader(self.test_dataset, **train_dataloader_args)
self.validation_loader = torch.utils.data.DataLoader(
self.validation_dataset, **validation_dataloader_args
)
self.train_batch_index = 0
self.epoch_index = 0
self.train_loader_it = iter(self.train_loader)
self.validation_loader_it = iter(self.validation_loader)
self.parameter_count = 0
self.layer_sizes = []
for p in self.model.parameters():
layer_size = reduce(lambda x, y: x * y, p.shape)
self.layer_sizes.append(layer_size)
self.parameter_count += layer_size
self.model = extend(self.model)
self.model.zero_grad()
self.model.train()
self.val_model.eval()
self.current_training_loss = None
self.loss_batch = None
# Momentum parameters
self.m = 0
self.v = 0
self.sgd_momentum_v = 0
self.t = 0
self.step_count = torch.zeros(1, device=self.device, requires_grad=False)
self.current_lr = self.initial_lr
self.prev_direction = torch.zeros(
(self.parameter_count,), device=self.device, requires_grad=False
)
self.current_direction = torch.zeros(
(self.parameter_count,), device=self.device, requires_grad=False
)
self.predictiveChangeVarDiscountedAverage = torch.zeros(
1, device=self.device, requires_grad=False
)
self.predictiveChangeVarUncertainty = torch.zeros(
1, device=self.device, requires_grad=False
)
self.lossVarDiscountedAverage = torch.zeros(
1, device=self.device, requires_grad=False
)
self.lossVarUncertainty = torch.zeros(
1, device=self.device, requires_grad=False
)
self.firstOrderMomentum = torch.zeros(
1, device=self.device, requires_grad=False
)
self.secondOrderMomentum = torch.zeros(
1, device=self.device, requires_grad=False
)
self._current_validation_loss = torch.zeros(
1, device=self.device, requires_grad=False
)
self._current_validation_loss.calculated = False
self.prev_validation_loss = torch.zeros(
1, device=self.device, requires_grad=False
)
self.train_network()
return self.get_state(self), {}
def set_writer(self, writer):
self.writer = writer
def close(self):
"""
No additional cleanup necessary
Returns
-------
bool
Cleanup flag
"""
return True
def render(self, mode: str = "human"):
"""
Render env in human mode
Parameters
----------
mode : str
Execution mode
"""
if mode != "human":
raise NotImplementedError
pass
def get_default_state(self, _):
"""
Gather state description
Returns
-------
dict
Environment state
"""
self.gradients = self._get_gradients()
self.gradients = self.gradients.clip(*self.clip_grad)
(
self.firstOrderMomentum,
self.secondOrderMomentum,
self.sgdMomentum,
) = self._get_momentum(self.gradients)
if (
"predictiveChangeVarDiscountedAverage" in self.on_features
or "predictiveChangeVarUncertainty" in self.on_features
):
(
predictiveChangeVarDiscountedAverage,
predictiveChangeVarUncertainty,
) = self._get_predictive_change_features(self.current_lr)
if (
"lossVarDiscountedAverage" in self.on_features
or "lossVarUncertainty" in self.on_features
):
lossVarDiscountedAverage, lossVarUncertainty = self._get_loss_features()
if "alignment" in self.on_features:
alignment = self._get_alignment()
state = {}
if "predictiveChangeVarDiscountedAverage" in self.on_features:
state[
"predictiveChangeVarDiscountedAverage"
] = predictiveChangeVarDiscountedAverage.item()
if "predictiveChangeVarUncertainty" in self.on_features:
state[
"predictiveChangeVarUncertainty"
] = predictiveChangeVarUncertainty.item()
if "lossVarDiscountedAverage" in self.on_features:
state["lossVarDiscountedAverage"] = lossVarDiscountedAverage.item()
if "lossVarUncertainty" in self.on_features:
state["lossVarUncertainty"] = lossVarUncertainty.item()
if "currentLR" in self.on_features:
state["currentLR"] = self.current_lr.item()
if "trainingLoss" in self.on_features:
if self.crashed:
state["trainingLoss"] = 0.0
else:
state["trainingLoss"] = self.current_training_loss.item()
if "validationLoss" in self.on_features:
if self.crashed:
state["validationLoss"] = 0.0
else:
state["validationLoss"] = self.current_validation_loss.item()
if "step" in self.on_features:
state["step"] = self.step_count.item()
if "alignment" in self.on_features:
state["alignment"] = alignment.item()
if "crashed" in self.on_features:
state["crashed"] = self.crashed
return state
def _train_batch_(self):
(data, target) = next(self.train_loader_it)
data, target = data.to(self.device), target.to(self.device)
self.current_batch_size = data.size()[0]
output = self.model(data)
loss = self.loss_function(output, target)
with backpack(BatchGrad()):
loss.mean().backward()
loss_value = loss.mean()
self.loss_batch = loss
self.current_training_loss = torch.unsqueeze(loss_value.detach(), dim=0)
self.train_batch_index += 1
self._current_validation_loss.calculated = False
def train_network(self):
try:
self._train_batch_()
except StopIteration:
self.train_batch_index = 0
self.epoch_index += 1
self.train_loader_it = iter(self.train_loader)
self._train_batch_()
def _get_full_training_loss(self, loader):
for target_param, param in zip(
self.val_model.parameters(), self.model.parameters()
):
target_param.data.copy_(param.data)
loss = torch.zeros(1, device=self.device, requires_grad=False)
with torch.no_grad():
for data, target in loader:
data, target = data.to(self.device), target.to(self.device)
output = self.val_model(data)
loss += self.val_loss_function(output, target).sum().detach().detach()
loss /= len(loader.dataset)
return loss
@property
def current_validation_loss(self):
if not self._current_validation_loss.calculated:
self._current_validation_loss = self._get_validation_loss()
self._current_validation_loss.calculated = True
return self._current_validation_loss
def _get_validation_loss_(self):
with torch.no_grad():
(data, target) = next(self.validation_loader_it)
data, target = data.to(self.device), target.to(self.device)
output = self.val_model(data)
validation_loss = self.val_loss_function(output, target).mean()
validation_loss = torch.unsqueeze(validation_loss.detach(), dim=0)
return validation_loss
def _get_validation_loss(self):
for target_param, param in zip(
self.val_model.parameters(), self.model.parameters()
):
target_param.data.copy_(param.data)
try:
validation_loss = self._get_validation_loss_()
except StopIteration:
self.validation_loader_it = iter(self.validation_loader)
validation_loss = self._get_validation_loss_()
return validation_loss
def _get_gradients(self):
gradients = []
for p in self.model.parameters():
if p.grad is None:
continue
gradients.append(p.grad.flatten())
gradients = torch.cat(gradients, dim=0)
return gradients
def _get_momentum(self, gradients):
self.t += 1
self.m = self.beta1 * self.m + (1 - self.beta1) * gradients
self.v = self.beta2 * self.v + (1 - self.beta2) * torch.square(gradients)
bias_corrected_m = self.m / (1 - self.beta1**self.t)
bias_corrected_v = self.v / (1 - self.beta2**self.t)
self.sgd_momentum_v = self.sgd_rho * self.sgd_momentum_v + gradients
return bias_corrected_m, bias_corrected_v, self.sgd_momentum_v
def get_adam_direction(self):
return self.firstOrderMomentum / (
torch.sqrt(self.secondOrderMomentum) + self.epsilon
)
def get_rmsprop_direction(self):
return self.gradients / (torch.sqrt(self.secondOrderMomentum) + self.epsilon)
def get_momentum_direction(self):
return self.sgd_momentum_v
def _get_loss_features(self):
if self.crashed:
return torch.tensor(0.0), torch.tensor(0.0)
bias_correction = (
(1 - self.discount_factor ** (self.c_step + 1))
if self.cd_bias_correction
else 1
)
with torch.no_grad():
loss_var = torch.log(torch.var(self.loss_batch))
self.lossVarDiscountedAverage = (
self.discount_factor * self.lossVarDiscountedAverage
+ (1 - self.discount_factor) * loss_var
)
self.lossVarUncertainty = (
self.discount_factor * self.lossVarUncertainty
+ (1 - self.discount_factor)
* (loss_var - self.lossVarDiscountedAverage / bias_correction) ** 2
)
return (
self.lossVarDiscountedAverage / bias_correction,
self.lossVarUncertainty / bias_correction,
)
def _get_predictive_change_features(self, lr):
if self.crashed:
return torch.tensor(0.0), torch.tensor(0.0)
bias_correction = (
(1 - self.discount_factor ** (self.c_step + 1))
if self.cd_bias_correction
else 1
)
batch_gradients = []
for i, (name, param) in enumerate(self.model.named_parameters()):
grad_batch = param.grad_batch.reshape(
self.current_batch_size, self.layer_sizes[i]
)
batch_gradients.append(grad_batch)
batch_gradients = torch.cat(batch_gradients, dim=1)
update_value = torch.mul(lr, self.get_optimizer_direction())
predictive_change = torch.log(
torch.var(-1 * torch.matmul(batch_gradients, update_value))
)
self.predictiveChangeVarDiscountedAverage = (
self.discount_factor * self.predictiveChangeVarDiscountedAverage
+ (1 - self.discount_factor) * predictive_change
)
self.predictiveChangeVarUncertainty = (
self.discount_factor * self.predictiveChangeVarUncertainty
+ (1 - self.discount_factor)
* (
predictive_change
- self.predictiveChangeVarDiscountedAverage / bias_correction
)
** 2
)
return (
self.predictiveChangeVarDiscountedAverage / bias_correction,
self.predictiveChangeVarUncertainty / bias_correction,
)
def _get_alignment(self):
if self.crashed:
return torch.tensor(0.0)
alignment = torch.mean(
torch.sign(torch.mul(self.prev_direction, self.current_direction))
)
alignment = torch.unsqueeze(alignment, dim=0)
self.prev_direction = self.current_direction
return alignment
def generate_instance_file(self, file_name, mode="test", n=100):
header = ["ID", "dataset", "architecture", "seed", "steps"]
# dataset name, architecture, dataset size, sample dimension, number of max pool layers, hidden layers, test architecture convolutional layers
architectures = [
(
"MNIST",
"Conv2d(1, {0}, 3, 1, 1)-MaxPool2d(2, 2)-Conv2d({0}, {1}, 3, 1, 1)-MaxPool2d(2, 2)-Conv2d({1}, {2}, 3, 1, 1)-ReLU-Flatten-Linear({3}, 10)-LogSoftmax(1)",
60000,
28,
2,
3,
[20, 50, 500],
),
(
"CIFAR",
"Conv2d(3, {0}, 3, 1, 1)-MaxPool2d(2, 2)-ReLU-Conv2d({0}, {1}, 3, 1, 1)-ReLU-MaxPool2d(2, 2)-Conv2d({1}, {2}, 3, 1, 1)-ReLU-MaxPool2d(2, 2)-Conv2d({2}, {3}, 3, 1, 1)-ReLU-Flatten-Linear({4}, 10)-LogSoftmax(1)",
60000,
32,
3,
4,
[32, 32, 64, 64],
),
]
if mode == "test":
seed_list = [random.randrange(start=0, stop=1e9) for _ in range(n)]
for i in range(len(architectures)):
fname = file_name + "_" + architectures[i][0].lower() + ".csv"
steps = int(1e8)
conv = architectures[i][6]
hidden_layers = architectures[i][5]
sample_size = architectures[i][3]
pool_layer_count = architectures[i][4]
linear_layer_size = conv[-1] * pow(
sample_size / pow(2, pool_layer_count), 2
)
linear_layer_size = int(round(linear_layer_size))
dataset = architectures[i][0]
if hidden_layers == 3:
architecture = architectures[i][1].format(
conv[0], conv[1], conv[2], linear_layer_size
)
else:
architecture = architectures[i][1].format(
conv[0], conv[1], conv[2], conv[3], linear_layer_size
)
# args = conv
# args.append(linear_layer_size)
# # architecture = architectures[i][1].format(**conv)
# args = {0: conv[0], 1: conv[1], 2: conv[2], 3: linear_layer_size}
# architecture = architectures[i][1].format(**args)
with open(fname, "w", encoding="UTF8") as f:
for h in header:
f.write(h + ";")
f.write("\n")
for id in range(0, n):
f.write(str(id) + ";")
f.write(dataset + ";")
f.write(architecture + ";")
seed = seed_list[id]
f.write(str(seed) + ";")
f.write(str(steps) + ";")
f.write("\n")
f.close()
else:
dataset_index = 0
dataset_size_start = 0.1
dataset_size_stop = 0.5
steps_start = 300
steps_stop = 1000
conv1_start = 2
conv1_stop = 10
conv2_start = 5
conv2_stop = 25
conv3_start = 50
conv3_stop = 250
dataset_list = [dataset_index for _ in range(n)]
dataset_size_list = [
random.uniform(dataset_size_start, dataset_size_stop) for _ in range(n)
]
seed_list = [random.randrange(start=0, stop=1e9) for _ in range(n)]
steps_list = [
random.randrange(start=steps_start, stop=steps_stop) for _ in range(n)
]
conv1_list = [
random.randrange(start=conv1_start, stop=conv1_stop) for _ in range(n)
]
conv2_list = [
random.randrange(start=conv2_start, stop=conv2_stop) for _ in range(n)
]
conv3_list = [
random.randrange(start=conv3_start, stop=conv3_stop) for _ in range(n)
]
fname = file_name + ".csv"
with open(fname, "w", encoding="UTF8") as f:
for h in header:
f.write(h + ";")
f.write("\n")
for id in range(0, n):
f.write(str(id) + ";")
sample_size = architectures[dataset_list[id]][3]
pool_layer_count = architectures[dataset_list[id]][4]
linear_layer_size = conv3_list[id] * pow(
sample_size / pow(2, pool_layer_count), 2
)
linear_layer_size = int(round(linear_layer_size))
dataset_size = int(
dataset_size_list[id] * architectures[dataset_list[id]][2]
)
dataset = (
architectures[dataset_list[id]][0] + "_" + str(dataset_size)
)
architecture = architectures[dataset_list[id]][1].format(
conv1_list[id],
conv2_list[id],
conv3_list[id],
linear_layer_size,
)
f.write(dataset + ";")
f.write(architecture + ";")
seed = seed_list[id]
f.write(str(seed) + ";")
steps = steps_list[id]
f.write(str(steps) + ";")
f.write("\n")
f.close()
| 32,877 | 32.721026 | 226 | py |
cvnn | cvnn-master/debug/having_same_result_two_runs.py | import tensorflow as tf
import tensorflow_datasets as tfds
import numpy as np
import os
tfds.disable_progress_bar()
def normalize_img(image, label):
"""Normalizes images: `uint8` -> `float32`."""
return tf.cast(image, tf.float32) / 255., label
def get_dataset():
(ds_train, ds_test), ds_info = tfds.load(
'mnist',
split=['train', 'test'],
shuffle_files=True,
as_supervised=True,
with_info=True,
)
ds_train = ds_train.map(normalize_img, num_parallel_calls=tf.data.experimental.AUTOTUNE)
ds_train = ds_train.cache()
# ds_train = ds_train.shuffle(ds_info.splits['train'].num_examples)
ds_train = ds_train.batch(128)
ds_train = ds_train.prefetch(tf.data.experimental.AUTOTUNE)
ds_test = ds_test.map(normalize_img, num_parallel_calls=tf.data.experimental.AUTOTUNE)
ds_test = ds_test.batch(128)
ds_test = ds_test.cache()
ds_test = ds_test.prefetch(tf.data.experimental.AUTOTUNE)
return ds_train, ds_test
def keras_fit(ds_train, ds_test, verbose=True, init1='glorot_uniform', init2='glorot_uniform'):
# https://www.tensorflow.org/datasets/keras_example
tf.random.set_seed(24)
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(input_shape=(28, 28, 1)),
tf.keras.layers.Dense(128, activation='relu', kernel_initializer=init1),
tf.keras.layers.Dense(10, activation='softmax', kernel_initializer=init2)
])
model.compile(
loss='sparse_categorical_crossentropy',
optimizer=tf.keras.optimizers.Adam(0.001),
metrics=['accuracy'],
)
history = model.fit(
ds_train,
epochs=6,
validation_data=ds_test,
verbose=verbose, shuffle=False
)
return history
def test_mnist():
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
if tf.test.gpu_device_name():
print('GPU found')
else:
print("No GPU found")
init = tf.keras.initializers.GlorotUniform()
init1 = tf.constant_initializer(init((784, 128)).numpy())
init2 = tf.constant_initializer(init((128, 10)).numpy())
ds_train, ds_test = get_dataset()
keras1 = keras_fit(ds_train, ds_test, init1=init1, init2=init2)
keras2 = keras_fit(ds_train, ds_test, init1=init1, init2=init2)
assert keras1.history == keras2.history, f"\n{keras1.history}\n!=\n{keras2.history}"
if __name__ == "__main__":
test_mnist() | 2,405 | 31.08 | 95 | py |
cvnn | cvnn-master/debug/ComplexDense_example.py | import numpy as np
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras import layers
from tensorflow.keras.optimizers import Adam
from tensorflow.keras import datasets
from layers.__init__ import ComplexDense, ComplexFlatten
from pdb import set_trace
(train_images, train_labels), (test_images, test_labels) = datasets.mnist.load_data()
train_images, test_images = tf.cast(train_images, tf.complex64) / 255.0, tf.cast(test_images, tf.complex64) / 255.0
model = Sequential([
ComplexFlatten(input_shape=(28, 28, 1)),
ComplexDense(128, activation='relu', input_shape=(28, 28, 1)),
ComplexDense(10, activation='softmax')
])
model.compile(
loss='sparse_categorical_crossentropy',
optimizer=Adam(0.001),
metrics=['accuracy'],
)
print(model.predict(train_images[:10]).dtype)
# model.fit(
# train_images, train_labels,
# epochs=6,
# validation_data=(test_images, test_labels),
# ) | 946 | 31.655172 | 115 | py |
cvnn | cvnn-master/debug/conv_memory_script.py | import sys
import tensorflow as tf
from tensorflow.keras import datasets
from time import perf_counter
import numpy as np
from pdb import set_trace
import sys
ENABLE_MEMORY_GROWTH = True # https://stackoverflow.com/questions/36927607/how-can-i-solve-ran-out-of-gpu-memory-in-tensorflow
DEBUG_CONV = False
TEST_KERAS_CONV2D = False
TEST_CONV_SPEED = False
if ENABLE_MEMORY_GROWTH:
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
try:
# Currently, memory growth needs to be the same across GPUs
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
logical_gpus = tf.config.experimental.list_logical_devices('GPU')
print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs")
except RuntimeError as e:
# Memory growth must be set before GPUs have been initialized
print(e)
if TEST_KERAS_CONV2D:
(train_images, train_labels), (test_images, test_labels) = datasets.cifar10.load_data()
train_images, test_images = train_images / 255.0, test_images / 255.0 # Normalize pixel values to be between 0 and 1
start_time = perf_counter()
conv2d = tf.keras.layers.Conv2D(1, 3, input_shape=(32, 32, 3))
k_out = conv2d(train_images[:32].astype(np.float32))
end_time = perf_counter()
# Without memory growth: 12.695380546000003; 3.7; 11.6; 13.1
# With Memory Growth: 1.4; 1.2; 1.15; 4.132;
# Failed to initialize GPU device #0: unknown error
print("Computing time was {} seconds".format(end_time - start_time))
sys.exit()
class Dense:
def __init__(self, output_size, input_size, activation=tf.keras.activations.relu):
self.w = tf.Variable(tf.keras.initializers.GlorotUniform()(shape=(input_size, output_size)))
self.b = tf.Variable(tf.keras.initializers.Zeros()(shape=output_size))
self.activation = activation
self.__class__.__call__ = self.call
def call(self, inputs):
return self.activation(tf.add(tf.matmul(inputs, self.w), self.b))
def trainable_variables(self):
return [self.w, self.b]
class Flatten:
def __init__(self, input_size):
self.input_size = input_size
self.output_size = np.prod(self.input_size)
self.__class__.__call__ = self.call
def call(self, inputs):
return tf.reshape(inputs, (inputs.shape[0], self.output_size))
def trainable_variables(self):
return []
class ConvND:
def __init__(self, kernels, input_size, kernel_shape=(3, 3), padding=0, stride=1, activation=tf.keras.activations.linear):
self.filters = kernels
self.input_size = input_size
self.activation = activation
self._calculate_shapes(kernel_shape, padding, stride)
self.__class__.last_layer_output_size = self.output_size
self._init_kernel()
self.__class__.__call__ = self.call
def _init_kernel(self):
self.kernels = []
this_shape = self.kernel_shape + (self.input_size[-1],)
for _ in range(self.filters):
self.kernels.append(tf.Variable(tf.keras.initializers.GlorotUniform()(shape=this_shape)))
self.bias = tf.Variable(tf.keras.initializers.Zeros()(shape=self.filters))
def _calculate_shapes(self, kernel_shape, padding, stride):
if isinstance(kernel_shape, int):
self.kernel_shape = (kernel_shape,) * (len(self.input_size) - 1) # -1 because the last is the channel
elif isinstance(kernel_shape, (tuple, list)):
self.kernel_shape = tuple(kernel_shape)
else:
print(
"Kernel shape: " + str(kernel_shape) + " format not supported. It must be an int or a tuple")
sys.exit(-1)
# Padding
if isinstance(padding, int):
self.padding_shape = (padding,) * (len(self.input_size) - 1) # -1 because the last is the channel
# I call super first in the case input_shape is none
elif isinstance(padding, (tuple, list)):
self.padding_shape = tuple(padding)
else:
print("Padding: " + str(padding) + " format not supported. It must be an int or a tuple")
sys.exit(-1)
# Stride
if isinstance(stride, int):
self.stride_shape = (stride,) * (len(self.input_size) - 1)
# I call super first in the case input_shape is none
elif isinstance(stride, (tuple, list)):
self.stride_shape = tuple(stride)
else:
print("stride: " + str(stride) + " format not supported. It must be an int or a tuple")
sys.exit(-1)
out_list = []
for i in range(len(self.input_size) - 1): # -1 because the number of input channels is irrelevant
# 2.4 on https://arxiv.org/abs/1603.07285
out_list.append(int(np.floor(
(self.input_size[i] + 2 * self.padding_shape[i] - self.kernel_shape[i]) / self.stride_shape[i]
) + 1))
out_list.append(self.filters) # New channels are actually the filters
self.output_size = tuple(out_list)
return self.output_size
def trainable_variables(self):
return self.kernels + [self.bias]
# @tf.function
def call(self, inputs):
inputs = self.apply_padding(inputs) # Add zeros if needed
output_np = np.zeros( # I use np because tf does not support the assigment
(inputs.shape[0],) + # Per each image
self.output_size, dtype=np.float32
)
img_index = 0
progbar = tf.keras.utils.Progbar(inputs.shape[0])
for image in inputs:
for filter_index in range(self.filters):
for i in range(int(np.prod(self.output_size[:-1]))): # for each element in the output
index = np.unravel_index(i, self.output_size[:-1])
start_index = tuple([a * b for a, b in zip(index, self.stride_shape)])
end_index = tuple([a+b for a, b in zip(start_index, self.kernel_shape)])
sector_slice = tuple(
[slice(start_index[ind], end_index[ind]) for ind in range(len(start_index))]
)
sector = image[sector_slice]
new_value = tf.reduce_sum(sector * self.kernels[filter_index]) + self.bias[filter_index]
indices = (img_index,) + index + (filter_index,)
mask = tf.Variable(tf.fill(output_np.shape, 1))
mask = mask[indices].assign(0)
mask = tf.cast(mask, dtype=np.float32)
output_np = output_np * mask + (1 - mask) * new_value
# import pdb; pdb.set_trace()
img_index += 1
progbar.update(img_index)
output = self.activation(output_np)
return output
def apply_padding(self, inputs):
pad = [[0, 0]] # No padding to the images itself
for p in self.padding_shape:
pad.append([p, p])
pad.append([0, 0]) # No padding to the channel
return tf.pad(inputs, tf.constant(pad), "CONSTANT", 0)
# Test conv works: https://www.analyticsvidhya.com/blog/2018/12/guide-convolutional-neural-network-cnn/
# set_trace()
# Prepare to test conv layers
"""
+-----------------------------------------------------------------------------+
| NVIDIA-SMI 425.31 Driver Version: 425.31 CUDA Version: 10.1 |
|-------------------------------+----------------------+----------------------+
| GPU Name TCC/WDDM | Bus-Id Disp.A | Volatile Uncorr. ECC |
| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |
|===============================+======================+======================|
| 0 GeForce GT 735M WDDM | 00000000:01:00.0 N/A | N/A |
| N/A 58C P0 N/A / N/A | 37MiB / 1024MiB | N/A Default |
+-------------------------------+----------------------+----------------------+
+-----------------------------------------------------------------------------+
| Processes: GPU Memory |
| GPU PID Type Process name Usage |
|=============================================================================|
| 0 Not Supported |
+-----------------------------------------------------------------------------+
"""
if DEBUG_CONV:
img1 = np.array([
[3, 0, 1, 2, 7, 4],
[1, 5, 8, 9, 3, 1],
[2, 7, 2, 5, 1, 3],
[0, 1, 3, 1, 7, 8],
[4, 2, 1, 6, 2, 8],
[2, 4, 5, 2, 3, 9]
]).astype(np.float32)
img2 = np.array([
[10, 10, 10, 0, 0, 0],
[10, 10, 10, 0, 0, 0],
[10, 10, 10, 0, 0, 0],
[10, 10, 10, 0, 0, 0],
[10, 10, 10, 0, 0, 0],
[10, 10, 10, 0, 0, 0]
]).astype(np.float32)
img1 = np.reshape(img1, (1, 6, 6, 1))
img2 = np.reshape(img2, (1, 6, 6, 1))
conv = ConvND(1, kernel_shape=(3, 3), input_size=(6, 6, 1), padding=0)
conv.kernels[0] = np.reshape(np.array([
[1, 0, -1],
[1, 0, -1],
[1, 0, -1]
]), (3, 3, 1))
out1 = conv(img1)
out2 = conv(img2)
print(out1[0,...,0])
print(out2[0,...,0])
# set_trace()
# conv tested
"""
+-----------------------------------------------------------------------------+
| NVIDIA-SMI 425.31 Driver Version: 425.31 CUDA Version: 10.1 |
|-------------------------------+----------------------+----------------------+
| GPU Name TCC/WDDM | Bus-Id Disp.A | Volatile Uncorr. ECC |
| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |
|===============================+======================+======================|
| 0 GeForce GT 735M WDDM | 00000000:01:00.0 N/A | N/A |
| N/A 58C P0 N/A / N/A | 110MiB / 1024MiB | N/A Default |
+-------------------------------+----------------------+----------------------+
+-----------------------------------------------------------------------------+
| Processes: GPU Memory |
| GPU PID Type Process name Usage |
|=============================================================================|
| 0 Not Supported |
+-----------------------------------------------------------------------------+
"""
# Model class to train network
class Model:
def __init__(self, shape):
self.shape = shape
self.__class__.__call__ = self.call
def call(self, x):
for i in range(len(self.shape)): # Apply all the layers
x = self.shape[i].call(x)
return x
def fit(self, x, y, epochs=10, batch_size=32, learning_rate=0.01):
train_dataset = tf.data.Dataset.from_tensor_slices((x, y)).batch(batch_size=batch_size)
num_tr_iter = int(x.shape[0] / batch_size)
for epoch in range(epochs):
iteration = 0
tf.print("\nEpoch {0}/{1}".format(epoch+1, epochs))
progbar = tf.keras.utils.Progbar(num_tr_iter)
for x_batch, y_batch in train_dataset.prefetch(tf.data.experimental.AUTOTUNE).cache():
progbar.update(iteration)
iteration += 1
self._train_step(x_batch, y_batch, learning_rate)
def _apply_loss(self, y_true, y_pred):
return tf.reduce_mean(input_tensor=tf.keras.losses.categorical_crossentropy(y_true, y_pred))
@tf.function # This makes all faster but harder to debug (set_trace is broken and print doesn't work)
def _train_step(self, x_train_batch, y_train_batch, learning_rate):
with tf.GradientTape() as tape:
with tf.name_scope("Forward_Phase") as scope:
tf.print("Forward mode")
x_called = self.call(x_train_batch) # Forward mode computation
# Loss function computation
with tf.name_scope("Loss") as scope:
tf.print("Compute loss")
current_loss = self._apply_loss(y_train_batch, x_called) # Compute loss
# Calculating gradient
with tf.name_scope("Gradient") as scope:
tf.print("Get trainable variables")
variables = []
for lay in self.shape:
variables.extend(lay.trainable_variables()) # TODO: Debug this for all layers.
tf.print("Compute gradients")
gradients = tape.gradient(current_loss, variables) # Compute gradients
assert all(g is not None for g in gradients)
# Backpropagation
with tf.name_scope("Optimizer") as scope:
tf.print("Assign values")
for i, val in enumerate(variables):
val.assign(val - learning_rate * gradients[i])
# Prepare Dataset
(train_images, train_labels), (test_images, test_labels) = datasets.cifar10.load_data()
train_images, test_images = train_images / 255.0, test_images / 255.0 # Normalize pixel values to be between 0 and 1
print(train_images.shape)
if TEST_CONV_SPEED:
start_time = perf_counter()
conv_layer = ConvND(1, kernel_shape=(3, 3), input_size=(32, 32, 3))
out = conv_layer(train_images[:32].astype(np.float32)) # 152x2 secs, 475.65
end_time = perf_counter()
print("Computing time was {} seconds".format(end_time - start_time))
"""
I sometimes have:
Failed to initialize GPU device #0: unknown error
with @tf.function decorator I have the error:
Failed to initialize GPU device #0: unknown error
2020-06-23 19:11:09.754024: F .\tensorflow/core/kernels/random_op_gpu.h:227] Non-OK-status: GpuLaunchKernel(FillPhiloxRandomKernelLaunch<Distribution>, num_blocks, block_size, 0, d.stream(), gen, data, size, dist) status: Internal: invalid configuration argument
or
Traceback (most recent call last):
TypeError: in converted code:
TypeError: tf__call() takes 2 positional arguments but 3 were given
+-----------------------------------------------------------------------------+
| NVIDIA-SMI 425.31 Driver Version: 425.31 CUDA Version: 10.1 |
|-------------------------------+----------------------+----------------------+
| GPU Name TCC/WDDM | Bus-Id Disp.A | Volatile Uncorr. ECC |
| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |
|===============================+======================+======================|
| 0 GeForce GT 735M WDDM | 00000000:01:00.0 N/A | N/A |
| N/A 65C P0 N/A / N/A | 112MiB / 1024MiB | N/A Default |
+-------------------------------+----------------------+----------------------+
+-----------------------------------------------------------------------------+
| Processes: GPU Memory |
| GPU PID Type Process name Usage |
|=============================================================================|
| 0 Not Supported |
+-----------------------------------------------------------------------------+
"""
# Define layers
model_layers = [
ConvND(1, kernel_shape=(3, 3), input_size=(32, 32, 3)),
Flatten((30, 30, 1)),
Dense(64, activation=tf.keras.activations.relu, input_size=900),
Dense(10, input_size=64, activation=tf.keras.activations.softmax)
]
model = Model(model_layers)
# set_trace()
# Train Model
model.fit(train_images[:1000].astype(np.float32), train_labels[:1000].astype(np.float32), epochs=5, batch_size=32)
"""
Epoch 1/5
0/31 [..............................] - ETA: 0sForward mode
1/32 [..............................] - ETA: 10:372020-06-23 19:38:17.893582: W tensorflow/core/common_runtime/bfc_allocator.cc:419] Allocator (GPU_0_bfc) ran out of memory trying to allocate 112.5KiB (rounded to 115200). Current allocation summary follows.
2020-06-23 19:38:17.930516: I tensorflow/core/common_runtime/bfc_allocator.cc:869] Bin (256): Total Chunks: 3940, Chunks in use: 3940. 985.0KiB allocated for chunks. 985.0KiB in use in bin. 215.6KiB client-requested in use in bin.
2020-06-23 19:38:17.977848: I tensorflow/core/common_runtime/bfc_allocator.cc:869] Bin (512): Total Chunks: 0, Chunks in use: 0. 0B allocated for chunks. 0B in use in bin. 0B client-requested in use in bin.
2020-06-23 19:38:18.017997: I tensorflow/core/common_runtime/bfc_allocator.cc:869] Bin (1024): Total Chunks: 1, Chunks in use: 1. 1.3KiB allocated for chunks. 1.3KiB in use in bin. 1.0KiB client-requested in use in bin.
2020-06-23 19:38:18.058818: I tensorflow/core/common_runtime/bfc_allocator.cc:869] Bin (2048): Total Chunks: 1, Chunks in use: 1. 2.5KiB allocated for chunks. 2.5KiB in use in bin. 2.5KiB client-requested in use in bin.
2020-06-23 19:38:18.100510: I tensorflow/core/common_runtime/bfc_allocator.cc:869] Bin (4096): Total Chunks: 0, Chunks in use: 0. 0B allocated for chunks. 0B in use in bin. 0B client-requested in use in bin.
2020-06-23 19:38:18.141216: I tensorflow/core/common_runtime/bfc_allocator.cc:869] Bin (8192): Total Chunks: 0, Chunks in use: 0. 0B allocated for chunks. 0B in use in bin. 0B client-requested in use in bin.
2020-06-23 19:38:18.176846: I tensorflow/core/common_runtime/bfc_allocator.cc:869] Bin (16384): Total Chunks: 0, Chunks in use: 0. 0B allocated for chunks. 0B in use in bin. 0B client-requested in use in bin.
2020-06-23 19:38:18.207463: I tensorflow/core/common_runtime/bfc_allocator.cc:869] Bin (32768): Total Chunks: 0, Chunks in use: 0. 0B allocated for chunks. 0B in use in bin. 0B client-requested in use in bin.
2020-06-23 19:38:18.242445: I tensorflow/core/common_runtime/bfc_allocator.cc:869] Bin (65536): Total Chunks: 4908, Chunks in use: 4908. 539.22MiB allocated for chunks. 539.22MiB in use in bin. 539.21MiB client-requested in use in bin.
2020-06-23 19:38:18.280173: I tensorflow/core/common_runtime/bfc_allocator.cc:869] Bin (131072): Total Chunks: 8, Chunks in use: 8. 1.40MiB allocated for chunks. 1.40MiB in use in bin. 1012.5KiB client-requested in use in bin.
2020-06-23 19:38:18.320035: I tensorflow/core/common_runtime/bfc_allocator.cc:869] Bin (262144): Total Chunks: 1, Chunks in use: 1. 450.3KiB allocated for chunks. 450.3KiB in use in bin. 384.0KiB client-requested in use in bin.
2020-06-23 19:38:18.376468: I tensorflow/core/common_runtime/bfc_allocator.cc:869] Bin (524288): Total Chunks: 0, Chunks in use: 0. 0B allocated for chunks. 0B in use in bin. 0B client-requested in use in bin.
2020-06-23 19:38:18.413848: I tensorflow/core/common_runtime/bfc_allocator.cc:869] Bin (1048576): Total Chunks: 0, Chunks in use: 0. 0B allocated for chunks. 0B in use in bin. 0B client-requested in use in bin.
2020-06-23 19:38:18.450537: I tensorflow/core/common_runtime/bfc_allocator.cc:869] Bin (2097152): Total Chunks: 0, Chunks in use: 0. 0B allocated for chunks. 0B in use in bin. 0B client-requested in use in bin.
2020-06-23 19:38:18.486643: I tensorflow/core/common_runtime/bfc_allocator.cc:869] Bin (4194304): Total Chunks: 0, Chunks in use: 0. 0B allocated for chunks. 0B in use in bin. 0B client-requested in use in bin.
2020-06-23 19:38:18.523049: I tensorflow/core/common_runtime/bfc_allocator.cc:869] Bin (8388608): Total Chunks: 0, Chunks in use: 0. 0B allocated for chunks. 0B in use in bin. 0B client-requested in use in bin.
2020-06-23 19:38:18.554221: I tensorflow/core/common_runtime/bfc_allocator.cc:869] Bin (16777216): Total Chunks: 0, Chunks in use: 0. 0B allocated for chunks. 0B in use in bin. 0B client-requested in use in bin.
2020-06-23 19:38:18.590211: I tensorflow/core/common_runtime/bfc_allocator.cc:869] Bin (33554432): Total Chunks: 0, Chunks in use: 0. 0B allocated for chunks. 0B in use in bin. 0B client-requested in use in bin.
2020-06-23 19:38:18.625964: I tensorflow/core/common_runtime/bfc_allocator.cc:869] Bin (67108864): Total Chunks: 0, Chunks in use: 0. 0B allocated for chunks. 0B in use in bin. 0B client-requested in use in bin.
2020-06-23 19:38:18.664359: I tensorflow/core/common_runtime/bfc_allocator.cc:869] Bin (134217728): Total Chunks: 0, Chunks in use: 0. 0B allocated for chunks. 0B in use in bin. 0B client-requested in use in bin.
2020-06-23 19:38:18.701712: I tensorflow/core/common_runtime/bfc_allocator.cc:869] Bin (268435456): Total Chunks: 0, Chunks in use: 0. 0B allocated for chunks. 0B in use in bin. 0B client-requested in use in bin.
2020-06-23 19:38:18.743990: I tensorflow/core/common_runtime/bfc_allocator.cc:885] Bin for 112.5KiB was 64.0KiB, Chunk State:
2020-06-23 19:38:18.760060: I tensorflow/core/common_runtime/bfc_allocator.cc:898] Next region of size 1048576
2020-06-23 19:38:18.778819: I tensorflow/core/common_runtime/bfc_allocator.cc:905] InUse at 0000000600F80000 next 1 of size 1280
2020-06-23 19:38:18.797981: I tensorflow/core/common_runtime/bfc_allocator.cc:905] InUse at 0000000600F80500 next 2 of size 256
.... A very long repetition of this message
2020-06-23 19:42:42.059088: I tensorflow/core/common_runtime/bfc_allocator.cc:905] InUse at 0000000622D51200 next 8858 of size 256
2020-06-23 19:42:42.077451: I tensorflow/core/common_runtime/bfc_allocator.cc:905] InUse at 0000000622D51300 next 18446744073709551615 of size 214528
2020-06-23 19:42:42.097273: I tensorflow/core/common_runtime/bfc_allocator.cc:914] Summary of in-use Chunks by size:
2020-06-23 19:42:42.115466: I tensorflow/core/common_runtime/bfc_allocator.cc:917] 3940 Chunks of size 256 totalling 985.0KiB
2020-06-23 19:42:42.132905: I tensorflow/core/common_runtime/bfc_allocator.cc:917] 1 Chunks of size 1280 totalling 1.3KiB
2020-06-23 19:42:42.149939: I tensorflow/core/common_runtime/bfc_allocator.cc:917] 1 Chunks of size 2560 totalling 2.5KiB
2020-06-23 19:42:42.167957: I tensorflow/core/common_runtime/bfc_allocator.cc:917] 4905 Chunks of size 115200 totalling 538.88MiB
2020-06-23 19:42:42.185877: I tensorflow/core/common_runtime/bfc_allocator.cc:917] 1 Chunks of size 116224 totalling 113.5KiB
2020-06-23 19:42:42.203308: I tensorflow/core/common_runtime/bfc_allocator.cc:917] 1 Chunks of size 117504 totalling 114.8KiB
2020-06-23 19:42:42.222310: I tensorflow/core/common_runtime/bfc_allocator.cc:917] 1 Chunks of size 119296 totalling 116.5KiB
2020-06-23 19:42:42.244583: I tensorflow/core/common_runtime/bfc_allocator.cc:917] 1 Chunks of size 138752 totalling 135.5KiB
2020-06-23 19:42:42.268096: I tensorflow/core/common_runtime/bfc_allocator.cc:917] 1 Chunks of size 155392 totalling 151.8KiB
2020-06-23 19:42:42.288503: I tensorflow/core/common_runtime/bfc_allocator.cc:917] 1 Chunks of size 158720 totalling 155.0KiB
2020-06-23 19:42:42.307893: I tensorflow/core/common_runtime/bfc_allocator.cc:917] 1 Chunks of size 173312 totalling 169.3KiB
2020-06-23 19:42:42.333169: I tensorflow/core/common_runtime/bfc_allocator.cc:917] 1 Chunks of size 195072 totalling 190.5KiB
2020-06-23 19:42:42.355288: I tensorflow/core/common_runtime/bfc_allocator.cc:917] 1 Chunks of size 202240 totalling 197.5KiB
2020-06-23 19:42:42.376818: I tensorflow/core/common_runtime/bfc_allocator.cc:917] 1 Chunks of size 214528 totalling 209.5KiB
2020-06-23 19:42:42.400296: I tensorflow/core/common_runtime/bfc_allocator.cc:917] 1 Chunks of size 230400 totalling 225.0KiB
2020-06-23 19:42:42.431019: I tensorflow/core/common_runtime/bfc_allocator.cc:917] 1 Chunks of size 461056 totalling 450.3KiB
2020-06-23 19:42:42.449943: I tensorflow/core/common_runtime/bfc_allocator.cc:921] Sum Total of in-use chunks: 542.02MiB
2020-06-23 19:42:42.473861: I tensorflow/core/common_runtime/bfc_allocator.cc:923] total_region_allocated_bytes_: 568350976 memory_limit_: 568351129 available bytes: 153 curr_region_allocation_bytes_: 1073741824
2020-06-23 19:42:42.505584: I tensorflow/core/common_runtime/bfc_allocator.cc:929] Stats:
Limit: 568351129
InUse: 568350976
MaxInUse: 568350976
NumAllocs: 12827
MaxAllocSize: 461056
2020-06-23 19:42:42.543755: W tensorflow/core/common_runtime/bfc_allocator.cc:424]
2020-06-23 19:42:42.572182: W tensorflow/core/framework/op_kernel.cc:1622] OP_REQUIRES failed at cast_op.cc:109 : Resource exhausted: OOM when allocating tensor with shape[32,30,30,1] and type float on /job:localhost/replica:0/task:0/device:GPU:0 by allocator GPU_0_bfc
2020-06-23 19:42:42.620366: W tensorflow/core/kernels/data/cache_dataset_ops.cc:820] The calling iterator did not fully read the dataset being cached. In order to avoid unexpected truncation of the dataset, the partially cached contents of the dataset will be discarded. This can happen if you have an input pipeline similar to dataset.cache().take(k).repeat(). You should use dataset.take(k).cache().repeat() instead.
tensorflow.python.framework.errors_impl.ResourceExhaustedError: OOM when allocating tensor with shape[32,30,30,1] and type float on /job:localhost/replica:0/task:0/device:GPU:0 by allocator GPU_0_bfc [Op:Cast] name: Forward_Phase/Cast/
""" | 25,639 | 61.689487 | 418 | py |
cvnn | cvnn-master/debug/mwe_testing_learning_algo.py | import tensorflow as tf
import numpy as np
from pdb import set_trace
BATCH_SIZE = 10
def get_dataset():
fashion_mnist = tf.keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
return (train_images, train_labels), (test_images, test_labels)
def get_model(init1='glorot_uniform', init2='glorot_uniform'):
tf.random.set_seed(1)
model = tf.keras.Sequential([
tf.keras.layers.Flatten(input_shape=(28, 28)),
tf.keras.layers.Dense(128, activation='relu', kernel_initializer=init1),
tf.keras.layers.Dense(10, kernel_initializer=init2)
])
model.compile(optimizer='sgd',
loss=tf.keras.losses.CategoricalCrossentropy(from_logits=False),
metrics=['accuracy'])
return model
def train(model, x_fit, y_fit):
np.save("initial_weights.npy", np.array(model.get_weights()))
with tf.GradientTape() as g:
y_pred = model(x_fit)
loss = tf.keras.losses.categorical_crossentropy(y_pred=y_pred, y_true=y_fit)
np.save("loss.npy", np.array(loss))
gradients = g.gradient(loss, model.trainable_weights)
np.save("gradients.npy", np.array(gradients))
model.fit(x_fit, y_fit, epochs=1, batch_size=BATCH_SIZE)
np.save("final_weights.npy", np.array(model.get_weights()))
if __name__ == "__main__":
(train_images, train_labels), (test_images, test_labels) = get_dataset()
model = get_model()
y_fit = np.zeros((BATCH_SIZE, 10))
for i, val in enumerate(train_labels[:BATCH_SIZE]):
y_fit[i][val] = 1.
train(model, train_images[:BATCH_SIZE], y_fit)
results = {
"loss": np.load("loss.npy", allow_pickle=True),
"init_weights": np.load("initial_weights.npy", allow_pickle=True),
"gradients": np.load("gradients.npy", allow_pickle=True),
"final_weights": np.load("final_weights.npy", allow_pickle=True)
}
for i_w, f_w, gr in zip(results["init_weights"], results["final_weights"], results["gradients"]):
gr = gr.numpy()
print(np.allclose(gr, (i_w - f_w) * BATCH_SIZE / 0.01))
| 2,133 | 37.8 | 101 | py |
cvnn | cvnn-master/debug/monte_carlo_tests.py | from cvnn.montecarlo import MonteCarlo
import tensorflow as tf
import layers.__init__ as layers
import numpy as np
fashion_mnist = tf.keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
own_model = tf.keras.Sequential([
layers.ComplexFlatten(input_shape=(28, 28)),
layers.ComplexDense(128, activation='cart_relu', dtype=np.float32),
layers.ComplexDense(10, dtype=np.float32)
], name="own_model")
own_model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
keras_model = tf.keras.Sequential([
tf.keras.layers.Flatten(input_shape=(28, 28)),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dense(10)
], name="keras_model")
keras_model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
monte_carlo = MonteCarlo()
monte_carlo.add_model(own_model)
monte_carlo.add_model(keras_model)
monte_carlo.run(x=train_images, y=train_labels, validation_data=(test_images, test_labels), iterations=2)
monte_carlo.monte_carlo_analyzer.do_all()
| 1,245 | 34.6 | 105 | py |
cvnn | cvnn-master/examples/u_net_example.py | import tensorflow as tf
from cvnn import layers
from pdb import set_trace
import tensorflow_datasets as tfds
# https://medium.com/analytics-vidhya/training-u-net-from-scratch-using-tensorflow2-0-fad541e2eaf1
BATCH_SIZE = 64
BUFFER_SIZE = 1000
INPUT_SIZE = (572, 572)
MASK_SIZE = (388, 388)
def _downsample_tf(inputs, units):
c0 = tf.keras.layers.Conv2D(units, activation='relu', kernel_size=3)(inputs)
c1 = tf.keras.layers.Conv2D(units, activation='relu', kernel_size=3)(c0)
c2 = tf.keras.layers.MaxPool2D(pool_size=(2, 2), strides=(2, 2), padding='valid')(c1)
return c0, c1, c2
def _downsample_cvnn(inputs, units, dtype=tf.float32):
c0 = layers.ComplexConv2D(units, activation='cart_relu', kernel_size=3, dtype=dtype)(inputs)
c1 = layers.ComplexConv2D(units, activation='cart_relu', kernel_size=3, dtype=dtype)(c0)
c2 = layers.ComplexMaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='valid', dtype=dtype)(c1)
return c0, c1, c2
def _upsample_tf(in1, in2, units, crop):
t01 = tf.keras.layers.Conv2DTranspose(units, kernel_size=2, strides=(2, 2), activation='relu')(in1)
crop01 = tf.keras.layers.Cropping2D(cropping=(crop, crop))(in2)
concat01 = tf.keras.layers.concatenate([t01, crop01], axis=-1)
out1 = tf.keras.layers.Conv2D(units, activation='relu', kernel_size=3)(concat01)
out2 = tf.keras.layers.Conv2D(units, activation='relu', kernel_size=3)(out1)
return out1, out2
def _upsample_cvnn(in1, in2, units, crop, dtype=tf.float32):
t01 = layers.ComplexConv2DTranspose(units, kernel_size=2, strides=(2, 2), activation='relu', dtype=dtype)(in1)
crop01 = tf.keras.layers.Cropping2D(cropping=(crop, crop))(in2)
concat01 = tf.keras.layers.concatenate([t01, crop01], axis=-1)
out1 = layers.ComplexConv2D(units, activation='relu', kernel_size=3, dtype=dtype)(concat01)
out2 = layers.ComplexConv2D(units, activation='relu', kernel_size=3, dtype=dtype)(out1)
return out1, out2
def normalize(input_image, input_mask):
input_image = tf.cast(input_image, tf.float32) / 255.0
# input_mask -= 1
return input_image, input_mask
def load_image(datapoint):
input_image = tf.image.resize_with_pad(datapoint['image'], INPUT_SIZE[0], INPUT_SIZE[1])
input_mask = tf.image.resize_with_pad(datapoint['segmentation_mask'], MASK_SIZE[0], MASK_SIZE[1])
input_image, input_mask = normalize(input_image, input_mask)
return input_image, input_mask
def get_dataset():
(train_images, test_images), info = tfds.load('oxford_iiit_pet:3.*.*', with_info=True,
split=['train[:1%]', 'test[:1%]'])
train_length = info.splits['train'].num_examples
steps_per_epoch = train_length // BATCH_SIZE
train_images = train_images.map(load_image)
test_images = test_images.map(load_image)
train_batches = train_images.batch(BATCH_SIZE).prefetch(buffer_size=tf.data.AUTOTUNE)
test_batches = test_images.batch(BATCH_SIZE)
# set_trace()
return train_batches, test_batches
def get_cvnn_model(dtype=tf.float32):
tf.random.set_seed(1)
inputs = layers.complex_input(shape=INPUT_SIZE + (3,), dtype=dtype)
# inputs = tf.keras.layers.InputLayer(input_shape=INPUT_SIZE + (3,), dtype=dtype)
# inputs = tf.keras.layers.Input(shape=INPUT_SIZE + (3,))
c0, c1, c2 = _downsample_cvnn(inputs, 64, dtype)
c3, c4, c5 = _downsample_cvnn(c2, 128, dtype)
c6, c7, c8 = _downsample_cvnn(c5, 256, dtype)
c9, c10, c11 = _downsample_cvnn(c8, 512, dtype)
c12 = layers.ComplexConv2D(1024, activation='relu', kernel_size=3, dtype=dtype)(c11)
c13 = layers.ComplexConv2D(1024, activation='relu', kernel_size=3, padding='valid', dtype=dtype)(c12)
c14, c15 = _upsample_cvnn(c13, c10, 512, 4, dtype)
c16, c17 = _upsample_cvnn(c15, c7, 256, 16, dtype)
c18, c19 = _upsample_cvnn(c17, c4, 128, 40, dtype)
c20, c21 = _upsample_cvnn(c19, c1, 64, 88, dtype)
outputs = layers.ComplexConv2D(4, kernel_size=1, dtype=dtype)(c21)
model = tf.keras.Model(inputs=inputs, outputs=outputs, name="u-net-cvnn")
model.compile(loss=tf.keras.losses.SparseCategoricalCrossentropy(), optimizer="adam", metrics=["accuracy"])
return model
def get_tf_model():
tf.random.set_seed(1)
inputs = tf.keras.layers.Input(shape=INPUT_SIZE + (3,))
c0, c1, c2 = _downsample_tf(inputs, 64)
c3, c4, c5 = _downsample_tf(c2, 128)
c6, c7, c8 = _downsample_tf(c5, 256)
c9, c10, c11 = _downsample_tf(c8, 512)
c12 = tf.keras.layers.Conv2D(1024, activation='relu', kernel_size=3)(c11)
c13 = tf.keras.layers.Conv2D(1024, activation='relu', kernel_size=3, padding='valid')(c12)
c14, c15 = _upsample_tf(c13, c10, 512, 4)
c16, c17 = _upsample_tf(c15, c7, 256, 16)
c18, c19 = _upsample_tf(c17, c4, 128, 40)
c20, c21 = _upsample_tf(c19, c1, 64, 88)
outputs = tf.keras.layers.Conv2D(4, kernel_size=1)(c21)
model = tf.keras.Model(inputs=inputs, outputs=outputs, name="u-net-tf")
model.compile(loss=tf.keras.losses.SparseCategoricalCrossentropy(),
optimizer="adam", metrics=["accuracy"])
return model
def test_model(model, train_batches, test_batches):
weigths = model.get_weights()
# with tf.GradientTape() as tape:
# # for elem, label in iter(ds_train):
# loss = model.compiled_loss(y_true=tf.convert_to_tensor(test_labels), y_pred=model(test_images))
# gradients = tape.gradient(loss, model.trainable_weights) # back-propagation
logs = {
'weights': weigths,
# 'loss': loss,
# 'gradients': gradients
}
history = model.fit(train_batches, epochs=2, validation_data=test_batches)
return history, logs
def test_unet():
train_batches, test_batches = get_dataset()
history_own, logs_own = test_model(get_cvnn_model(), train_batches, test_batches)
history_keras, logs_keras = test_model(get_tf_model(), train_batches, test_batches)
assert history_keras.history == history_own.history, f"\n{history_keras.history}\n !=\n{history_own.history}"
if __name__ == '__main__':
from importlib import reload
import os
import tensorflow
reload(tensorflow)
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
test_unet()
| 6,265 | 38.1625 | 114 | py |
cvnn | cvnn-master/examples/fashion_mnist_example.py | # TensorFlow and tf.keras
import tensorflow as tf
# Helper libraries
import numpy as np
import matplotlib.pyplot as plt
from cvnn import layers
print(tf.__version__)
def get_fashion_mnist_dataset():
fashion_mnist = tf.keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
return (train_images, train_labels), (test_images, test_labels)
def keras_fit(train_images, train_labels, test_images, test_labels,
init1='glorot_uniform', init2='glorot_uniform', epochs=10):
tf.random.set_seed(1)
model = tf.keras.Sequential([
tf.keras.layers.Flatten(input_shape=(28, 28)),
tf.keras.layers.Dense(128, activation='relu', kernel_initializer=init1),
tf.keras.layers.Dense(10, kernel_initializer=init2)
])
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
history = model.fit(train_images, train_labels, epochs=epochs, shuffle=False)
test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2)
print('\nTest accuracy:', test_acc)
return history
def own_fit(train_images, train_labels, test_images, test_labels,
init1='glorot_uniform', init2='glorot_uniform', epochs=10):
tf.random.set_seed(1)
model = tf.keras.Sequential([
layers.ComplexFlatten(input_shape=(28, 28)),
layers.ComplexDense(128, activation='cart_relu', dtype=np.float32, kernel_initializer=init1),
layers.ComplexDense(10, dtype=np.float32, kernel_initializer=init2)
])
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
history = model.fit(train_images, train_labels, epochs=epochs, shuffle=False)
test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2)
print('\nTest accuracy:', test_acc)
return history
def test_fashion_mnist():
visible_devices = tf.config.get_visible_devices()
for device in visible_devices:
assert device.device_type != 'GPU', "Using GPU not good for debugging"
seed = 117
epochs = 3
init = tf.keras.initializers.GlorotUniform(seed=seed)
init1 = tf.constant_initializer(init((784, 128)).numpy())
init2 = tf.constant_initializer(init((128, 10)).numpy())
(train_images, train_labels), (test_images, test_labels) = get_fashion_mnist_dataset()
keras = keras_fit(train_images, train_labels, test_images, test_labels, init1=init1, init2=init2, epochs=epochs)
# keras1 = keras_fit(train_images, train_labels, test_images, test_labels, init1=init1, init2=init2, epochs=epochs)
own = own_fit(train_images, train_labels, test_images, test_labels, init1=init1, init2=init2, epochs=epochs)
assert keras.history == own.history, f"{keras.history } != {own.history }"
if __name__ == "__main__":
# https://www.tensorflow.org/tutorials/keras/classification
from importlib import reload
import os
import tensorflow
reload(tensorflow)
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
test_fashion_mnist()
| 3,211 | 41.826667 | 119 | py |
cvnn | cvnn-master/examples/cifar410_example.py | import tensorflow as tf
from tensorflow.keras import datasets, layers, models
import cvnn.layers as complex_layers
import numpy as np
from pdb import set_trace
(train_images, train_labels), (test_images, test_labels) = datasets.cifar10.load_data()
# Normalize pixel values to be between 0 and 1
train_images, test_images = train_images.astype(dtype=np.float32) / 255.0, test_images.astype(dtype=np.float32) / 255.0
def keras_fit(epochs=10, use_bias=True):
tf.random.set_seed(1)
init = tf.keras.initializers.GlorotUniform(seed=117)
model = models.Sequential()
model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(32, 32, 3), kernel_initializer=init,
use_bias=use_bias))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu', kernel_initializer=init, use_bias=use_bias))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu', kernel_initializer=init, use_bias=use_bias))
model.add(layers.Flatten())
model.add(layers.Dense(64, activation='relu', kernel_initializer=init, use_bias=use_bias))
model.add(layers.Dense(10, kernel_initializer=init, use_bias=use_bias))
model.compile(optimizer='sgd',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
weigths = model.get_weights()
with tf.GradientTape() as tape:
# for elem, label in iter(ds_train):
loss = model.compiled_loss(y_true=tf.convert_to_tensor(test_labels), y_pred=model(test_images))
gradients = tape.gradient(loss, model.trainable_weights) # back-propagation
history = model.fit(train_images, train_labels, epochs=epochs, validation_data=(test_images, test_labels))
test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2)
logs = {
'weights_at_init': weigths,
'loss': loss,
'gradients': gradients,
'weights_at_end': model.get_weights()
}
return history, logs
def own_fit(epochs=10):
tf.random.set_seed(1)
init = tf.keras.initializers.GlorotUniform(seed=117)
model = models.Sequential()
model.add(complex_layers.ComplexConv2D(32, (3, 3), activation='cart_relu', input_shape=(32, 32, 3),
dtype=np.float32, kernel_initializer=init))
model.add(complex_layers.ComplexMaxPooling2D((2, 2), dtype=np.float32))
model.add(complex_layers.ComplexConv2D(64, (3, 3), activation='cart_relu', dtype=np.float32,
kernel_initializer=init))
model.add(complex_layers.ComplexMaxPooling2D((2, 2), dtype=np.float32))
model.add(complex_layers.ComplexConv2D(64, (3, 3), activation='cart_relu', dtype=np.float32,
kernel_initializer=init))
model.add(complex_layers.ComplexFlatten())
model.add(complex_layers.ComplexDense(64, activation='cart_relu', dtype=np.float32, kernel_initializer=init))
model.add(complex_layers.ComplexDense(10, dtype=np.float32, kernel_initializer=init))
# model.summary()
model.compile(optimizer='sgd',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
history = model.fit(train_images, train_labels, epochs=epochs, validation_data=(test_images, test_labels))
test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2)
return history
def own_complex_fit(epochs=10):
tf.random.set_seed(1)
init = tf.keras.initializers.GlorotUniform(seed=117)
model = models.Sequential()
model.add(complex_layers.ComplexConv2D(32, (3, 3), activation='cart_relu', input_shape=(32, 32, 3),
kernel_initializer=init, use_bias=False, init_technique='zero_imag'))
model.add(complex_layers.ComplexMaxPooling2D((2, 2)))
model.add(complex_layers.ComplexConv2D(64, (3, 3), activation='cart_relu', kernel_initializer=init,
use_bias=False, init_technique='zero_imag'))
model.add(complex_layers.ComplexMaxPooling2D((2, 2)))
model.add(complex_layers.ComplexConv2D(64, (3, 3), activation='cart_relu', kernel_initializer=init,
use_bias=False, init_technique='zero_imag'))
model.add(complex_layers.ComplexFlatten())
model.add(complex_layers.ComplexDense(64, activation='cart_relu', kernel_initializer=init,
use_bias=False, init_technique='zero_imag'))
model.add(complex_layers.ComplexDense(10, activation='cast_to_real', kernel_initializer=init,
use_bias=False, init_technique='zero_imag'))
# model.summary()
model.compile(optimizer='sgd',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
weigths = model.get_weights()
with tf.GradientTape() as tape:
loss = model.compiled_loss(y_true=tf.convert_to_tensor(test_labels), y_pred=model(test_images))
gradients = tape.gradient(loss, model.trainable_weights) # back-propagation
history = model.fit(train_images, train_labels, epochs=epochs, validation_data=(test_images, test_labels))
test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2)
logs = {
'weights_at_init': weigths,
'loss': loss,
'gradients': gradients,
'weights_at_end': model.get_weights()
}
return history, logs
def test_cifar10():
epochs = 3
assert not tf.test.gpu_device_name(), "Using GPU not good for debugging"
keras, keras_logs = keras_fit(epochs=epochs, use_bias=False)
# keras1 = keras_fit(epochs=epochs)
own, own_logs = own_complex_fit(epochs=epochs)
assert np.all([np.all(k_w == o_w) for k_w, o_w in zip(keras_logs['weights_at_init'],
own_logs['weights_at_init'][::2])]) # real part equal
assert np.all([np.all(o_w == 0) for o_w in own_logs['weights_at_init'][1::2]]) # imag part at zero
assert np.all([np.all(o_w == 0) for o_w in own_logs['weights_at_end'][1::2]])
assert own_logs['loss'] == keras_logs['loss'] # same loss
assert np.all([np.allclose(k, o) for k, o in zip(keras_logs['gradients'], own_logs['gradients'][::2])])
# assert keras.history == own.history, f"\n{keras.history}\n !=\n{own.history}"
keras, _ = keras_fit(epochs=epochs)
# keras1 = keras_fit(epochs=epochs)
own = own_fit(epochs=epochs)
assert keras.history == own.history, f"\n{keras.history}\n !=\n{own.history}"
# for k, k2, o in zip(keras.history.values(), keras1.history.values(), own.history.values()):
# if np.all(np.array(k) == np.array(k2)):
# assert np.all(np.array(k) == np.array(o)), f"\n{keras.history}\n !=\n{own.history}"
if __name__ == "__main__":
from importlib import reload
import os
import tensorflow
reload(tensorflow)
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
test_cifar10()
| 7,167 | 52.492537 | 119 | py |
cvnn | cvnn-master/examples/mnist_dataset_example.py | import tensorflow as tf
import tensorflow_datasets as tfds
from cvnn import layers
import numpy as np
import timeit
import datetime
from pdb import set_trace
try:
import plotly.graph_objects as go
import plotly
PLOTLY = True
except ModuleNotFoundError:
PLOTLY = False
# tf.enable_v2_behavior()
# tfds.disable_progress_bar()
PLOTLY_CONFIG = {
'scrollZoom': True,
'editable': True
}
def cast_to_complex(image, label):
return tf.cast(image, tf.complex64), label
def normalize_img(image, label):
"""Normalizes images: `uint8` -> `float32`."""
return tf.cast(image, tf.float32) / 255., label
def get_dataset():
(ds_train, ds_test), ds_info = tfds.load(
'mnist',
split=['train', 'test'],
shuffle_files=False,
as_supervised=True,
with_info=True,
)
ds_train = ds_train.map(normalize_img, num_parallel_calls=tf.data.experimental.AUTOTUNE)
ds_train = ds_train.cache()
# ds_train = ds_train.shuffle(ds_info.splits['train'].num_examples)
ds_train = ds_train.batch(128)
ds_train = ds_train.prefetch(tf.data.experimental.AUTOTUNE)
ds_test = ds_test.map(normalize_img, num_parallel_calls=tf.data.experimental.AUTOTUNE)
ds_test = ds_test.batch(128)
ds_test = ds_test.cache()
ds_test = ds_test.prefetch(tf.data.experimental.AUTOTUNE)
return ds_train, ds_test
def keras_fit(ds_train, ds_test, verbose=True, init1='glorot_uniform', init2='glorot_uniform', train_bias=True):
tf.random.set_seed(24)
# https://www.tensorflow.org/datasets/keras_example
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(input_shape=(28, 28, 1), dtype=np.float32),
tf.keras.layers.Dense(128, activation='relu', kernel_initializer=init1, dtype=np.float32, use_bias=train_bias),
tf.keras.layers.Dense(10, activation='softmax', kernel_initializer=init2, dtype=np.float32, use_bias=train_bias)
])
model.compile(
loss='sparse_categorical_crossentropy',
optimizer=tf.keras.optimizers.Adam(0.001),
metrics=['accuracy'],
)
weigths = model.get_weights()
with tf.GradientTape() as tape:
# for elem, label in iter(ds_train):
elem, label = next(iter(ds_test))
loss = model.compiled_loss(y_true=label, y_pred=model(elem)) # calculate loss
gradients = tape.gradient(loss, model.trainable_weights) # back-propagation
logs = {
'weights': weigths,
'loss': loss,
'gradients': gradients
}
start = timeit.default_timer()
history = model.fit(
ds_train,
epochs=6,
validation_data=ds_test,
verbose=verbose, shuffle=False
)
stop = timeit.default_timer()
return history, stop - start, logs
def own_complex_fit(ds_train, ds_test, verbose=True, init1='glorot_uniform', init2='glorot_uniform'):
tf.random.set_seed(24)
model = tf.keras.models.Sequential([
layers.ComplexFlatten(input_shape=(28, 28, 1), dtype=np.complex64),
layers.ComplexDense(128, activation='cart_relu', dtype=np.complex64, kernel_initializer=init1,
use_bias=False, init_technique='zero_imag'),
layers.ComplexDense(10, activation='cast_to_real', dtype=np.complex64, kernel_initializer=init2,
use_bias=False, init_technique='zero_imag'),
tf.keras.layers.Activation('softmax')
])
model.compile(
loss='sparse_categorical_crossentropy',
optimizer=tf.keras.optimizers.Adam(0.001),
metrics=['accuracy'],
)
# ds_train = ds_train.map(cast_to_complex)
# ds_test = ds_test.map(cast_to_complex)
weigths = model.get_weights()
with tf.GradientTape() as tape:
# for elem, label in iter(ds_train):
elem, label = next(iter(ds_test))
loss = model.compiled_loss(y_true=label, y_pred=model(elem)) # calculate loss
gradients = tape.gradient(loss, model.trainable_weights) # back-propagation
logs = {
'weights': weigths,
'loss': loss,
'gradients': gradients
}
start = timeit.default_timer()
history = model.fit(
ds_train,
epochs=6,
validation_data=ds_test,
verbose=verbose, shuffle=False
)
stop = timeit.default_timer()
return history, stop - start, logs
def own_fit(ds_train, ds_test, verbose=True, init1='glorot_uniform', init2='glorot_uniform'):
tf.random.set_seed(24)
model = tf.keras.models.Sequential([
layers.ComplexFlatten(input_shape=(28, 28, 1), dtype=np.float32),
layers.ComplexDense(128, activation='cart_relu', dtype=np.float32, kernel_initializer=init1),
layers.ComplexDense(10, activation='softmax_real_with_abs', dtype=np.float32, kernel_initializer=init2)
])
model.compile(
loss='sparse_categorical_crossentropy',
optimizer=tf.keras.optimizers.Adam(0.001),
metrics=['accuracy'],
)
weigths = model.get_weights()
with tf.GradientTape() as tape:
# for elem, label in iter(ds_train):
elem, label = next(iter(ds_test))
loss = model.compiled_loss(y_true=label, y_pred=model(elem)) # calculate loss
gradients = tape.gradient(loss, model.trainable_weights) # back-propagation
logs = {
'weights': weigths,
'loss': loss,
'gradients': gradients
}
start = timeit.default_timer()
history = model.fit(
ds_train,
epochs=6,
validation_data=ds_test,
verbose=verbose, shuffle=False
)
stop = timeit.default_timer()
return history, stop - start, logs
def test_mnist():
visible_devices = tf.config.get_visible_devices()
for device in visible_devices:
assert device.device_type != 'GPU', "Using GPU not good for debugging"
ds_train, ds_test = get_dataset()
# Don't use bias becase complex model gets a complex bias with imag not zero.
keras_hist, keras_time, keras_logs = keras_fit(ds_train, ds_test, train_bias=False)
keras_weigths = keras_logs['weights']
own_cvnn_hist, own_cvnn_time, own_cvnn_logs = own_complex_fit(ds_train, ds_test)
own_cvnn_weigths = own_cvnn_logs['weights']
assert np.all([np.all(k_w == o_w) for k_w, o_w in zip(keras_weigths, own_cvnn_weigths[::2])])
assert np.all([np.all(o_w == 0) for o_w in own_cvnn_weigths[1::2]])
assert own_cvnn_logs['loss'] == keras_logs['loss']
assert np.allclose(own_cvnn_logs['gradients'][2], keras_logs['gradients'][1])
# for k, o in zip(keras_hist.history.values(), own_cvnn_hist.history.values()):
# assert np.allclose(k, o), f"\n{keras_hist.history}\n !=\n{own_cvnn_hist.history}"
# DO AGAIN TO USE BIAS
keras_hist, keras_time, keras_logs = keras_fit(ds_train, ds_test)
keras_weigths = keras_logs['weights']
own_hist, own_time, own_logs = own_fit(ds_train, ds_test)
own_weigths = own_logs['weights']
assert [np.all(k_w == o_w) for k_w, o_w in zip(keras_weigths, own_weigths)]
assert keras_hist.history == own_hist.history, f"\n{keras_hist.history}\n !=\n{own_hist.history}"
assert own_logs['loss'] == keras_logs['loss']
# for k, k2, o in zip(keras_hist.history.values(), keras2_hist.history.values(), own_hist.history.values()):
# if np.all(np.array(k) == np.array(k2)):
# assert np.all(np.array(k) == np.array(o)), f"\n{keras_hist.history}\n !=\n{own_hist.history}"
if __name__ == "__main__":
from importlib import reload
import os
import tensorflow
reload(tensorflow)
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
test_mnist()
# test_mnist_montecarlo()
# ds_train, ds_test = get_dataset()
# keras_fit(ds_train, ds_test, train_bias=False)
# own_fit(ds_train, ds_test)
| 7,789 | 36.63285 | 118 | py |
cvnn | cvnn-master/tests/test_dropout.py | import tensorflow as tf
import tensorflow_datasets as tfds
import numpy as np
from pdb import set_trace
import cvnn.layers as complex_layers
from cvnn.montecarlo import run_montecarlo
def normalize_img(image, label):
"""Normalizes images: `uint8` -> `float32`."""
return tf.cast(image, tf.float32) / 255., label
def get_dataset():
(ds_train, ds_test), ds_info = tfds.load(
'mnist',
split=['train', 'test'],
shuffle_files=False,
as_supervised=True,
with_info=True,
)
ds_train = ds_train.map(normalize_img, num_parallel_calls=tf.data.experimental.AUTOTUNE)
ds_train = ds_train.cache()
# ds_train = ds_train.shuffle(ds_info.splits['train'].num_examples)
ds_train = ds_train.batch(128)
ds_train = ds_train.prefetch(tf.data.experimental.AUTOTUNE)
ds_test = ds_test.map(normalize_img, num_parallel_calls=tf.data.experimental.AUTOTUNE)
ds_test = ds_test.batch(128)
ds_test = ds_test.cache()
ds_test = ds_test.prefetch(tf.data.experimental.AUTOTUNE)
return ds_train, ds_test
@tf.autograph.experimental.do_not_convert
def simple_random_example():
tf.random.set_seed(0)
layer = complex_layers.ComplexDropout(.2, input_shape=(2,), seed=0)
data = np.arange(10).reshape(5, 2).astype(np.float32)
data = tf.complex(data, data)
outputs = layer(data, training=True)
expected_out = np.array([[0. + 0.j, 0. + 0.j],
[0. + 0.j, 3.75 + 3.75j],
[5. + 5.j, 6.25 + 6.25j],
[7.5 + 7.5j, 8.75 + 8.75j],
[10. + 10.j, 11.25 + 11.25j]])
assert np.all(data == layer(data, training=False))
assert np.all(outputs == expected_out)
tf.random.set_seed(0)
layer = tf.keras.layers.Dropout(.2, input_shape=(2,), seed=0)
real_outputs = layer(tf.math.real(data), training=True)
assert np.all(real_outputs == tf.math.real(outputs))
def get_real_mnist_model():
in1 = tf.keras.layers.Input(shape=(28, 28, 1))
flat = tf.keras.layers.Flatten(input_shape=(28, 28, 1))(in1)
dense = tf.keras.layers.Dense(128, activation='cart_relu')(flat)
# drop = complex_layers.ComplexDropout(rate=0.5)(dense)
drop = tf.keras.layers.Dropout(0.5)(dense)
out = tf.keras.layers.Dense(10, activation='softmax_real_with_abs', kernel_initializer="ComplexGlorotUniform")(drop)
real_model = tf.keras.Model(in1, out, name="tf_rvnn")
real_model.compile(
loss='sparse_categorical_crossentropy',
optimizer=tf.keras.optimizers.Adam(0.001),
metrics=['accuracy'],
)
real_intermediate_model = tf.keras.Model(in1, drop)
return real_model, real_intermediate_model
def get_complex_mnist_model():
inputs = complex_layers.complex_input(shape=(28, 28, 1), dtype=np.float32)
flat = complex_layers.ComplexFlatten(input_shape=(28, 28, 1), dtype=np.float32)(inputs)
dense = complex_layers.ComplexDense(128, activation='cart_relu', dtype=np.float32)(flat)
drop = complex_layers.ComplexDropout(rate=0.5)(dense)
out = complex_layers.ComplexDense(10, activation='softmax_real_with_abs', dtype=np.float32)(drop)
complex_model = tf.keras.Model(inputs, out, name="rvnn")
complex_model.compile(
loss='sparse_categorical_crossentropy',
optimizer=tf.keras.optimizers.Adam(0.001),
metrics=['accuracy'],
)
complex_intermediate_model = tf.keras.Model(inputs, drop)
return complex_model, complex_intermediate_model
def dropout():
ds_train, ds_test = get_dataset()
train_images, train_labels = convert_to_numpy(ds_train)
test_images, test_labels = convert_to_numpy(ds_test)
img, label = next(iter(ds_test))
tf.random.set_seed(0)
complex_model, complex_intermediate_model = get_complex_mnist_model()
tf.random.set_seed(0)
real_model, real_intermediate_model = get_real_mnist_model()
c_before_train_eval = complex_intermediate_model(img, training=False)
r_before_train_eval = real_intermediate_model(img, training=False)
assert np.all(r_before_train_eval == c_before_train_eval), f"Results are not equal after drop with training=False"
assert np.all(real_model.layers[2].get_weights()[0] == complex_model.layers[2].get_weights()[
0]), f"Output layer weights are not equal before any call"
assert np.all(real_model.layers[-1].get_weights()[0] == complex_model.layers[-1].get_weights()[
0]), f"Output layer weights are not equal before any call"
c_before_train_eval = complex_model(img, training=False)
r_before_train_eval = real_model(img, training=False)
assert np.all(r_before_train_eval == c_before_train_eval), f"Results are not equal with training=False"
tf.random.set_seed(0)
c_before_train_eval = complex_intermediate_model(img, training=True)
tf.random.set_seed(0)
r_before_train_eval = real_intermediate_model(img, training=True)
assert np.all(r_before_train_eval == c_before_train_eval), f"Results are not equal after drop with training=True"
tf.random.set_seed(0)
c_before_train_eval = complex_model(img, training=True)
tf.random.set_seed(0)
r_before_train_eval = real_model(img, training=True)
assert np.all(r_before_train_eval == c_before_train_eval), f"Results are not equal with training=True"
tf.random.set_seed(0)
complex_eval = complex_model.evaluate(ds_test, verbose=False)
tf.random.set_seed(0)
real_eval = real_model.evaluate(ds_test, verbose=False)
assert np.all(real_eval == complex_eval), f"\n{real_eval}\n !=\n{complex_eval}"
elem, label = convert_to_numpy(ds_test)
label = tf.convert_to_tensor(label)
# elem, label = next(iter(ds_test))
# set_trace()
tf.random.set_seed(0)
with tf.GradientTape() as tape:
r_loss = real_model.compiled_loss(y_true=label, y_pred=real_model(elem, training=True)) # calculate loss
real_gradients = tape.gradient(r_loss, real_model.trainable_weights) # back-propagation
tf.random.set_seed(0)
with tf.GradientTape() as tape:
c_loss = complex_model.compiled_loss(y_true=label, y_pred=complex_model(elem, training=True)) # calculate loss
complex_gradients = tape.gradient(c_loss, complex_model.trainable_weights) # back-propagation
assert r_loss == c_loss, f"\nReal loss:\t\t {r_loss};\nComplex loss:\t {c_loss}"
# Next assertions showed a rounding error with my library.
assert np.all([np.allclose(c_g, r_g) for c_g, r_g in zip(complex_gradients, real_gradients)])
def convert_to_numpy(ds):
ds_numpy = tfds.as_numpy(ds)
train_images = None
train_labels = None
for ex in ds_numpy:
if train_images is None:
train_images = ex[0]
train_labels = ex[1]
else:
train_images = np.concatenate((train_images, ex[0]), axis=0)
train_labels = np.concatenate((train_labels, ex[1]), axis=0)
return train_images, train_labels
def mnist(tf_data: bool = True):
ds_train, ds_test = get_dataset()
train_images, train_labels = convert_to_numpy(ds_train)
test_images, test_labels = convert_to_numpy(ds_test)
tf.random.set_seed(0)
complex_model, _ = get_complex_mnist_model()
tf.random.set_seed(0)
real_model, _ = get_real_mnist_model()
if tf_data:
r_history = real_model.fit(ds_train, epochs=6, validation_data=ds_test,
verbose=False, shuffle=False)
c_history = complex_model.fit(ds_train, epochs=6, validation_data=ds_test,
verbose=False, shuffle=False)
else:
r_history = real_model.fit(train_images, train_labels, epochs=6, validation_data=(test_images, test_labels),
verbose=False, shuffle=False)
c_history = complex_model.fit(train_images, train_labels, epochs=6, validation_data=(test_images, test_labels),
verbose=False, shuffle=False)
assert r_history.history == c_history.history, f"{r_history.history} != {c_history.history}"
def get_fashion_mnist_dataset():
fashion_mnist = tf.keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
return (train_images, train_labels), (test_images, test_labels)
def fashion_mnist():
(train_images, train_labels), (test_images, test_labels) = get_fashion_mnist_dataset()
tf.random.set_seed(0)
complex_model, _ = get_complex_mnist_model()
tf.random.set_seed(0)
real_model, _ = get_real_mnist_model()
c_history = complex_model.fit(train_images, train_labels, epochs=10, shuffle=True, verbose=False,
validation_data=(test_images, test_labels))
r_history = real_model.fit(train_images, train_labels, epochs=10, shuffle=True, verbose=False,
validation_data=(test_images, test_labels))
assert r_history.history == c_history.history, f"{r_history.history} != {c_history.history}"
def montecarlo():
ds_train, ds_test = get_dataset()
complex_model, _ = get_complex_mnist_model()
real_model, _ = get_real_mnist_model()
run_montecarlo(models=[complex_model, real_model], dataset=ds_train, iterations=30,
epochs=20, validation_data=ds_test, do_all=True, validation_split=0.0, preprocess_data=False)
def test_dropout():
from importlib import reload
import os
import tensorflow
# reload(tensorflow)
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
dropout()
mnist(True)
mnist(False)
fashion_mnist()
simple_random_example()
if __name__ == "__main__":
test_dropout()
| 9,706 | 42.142222 | 120 | py |
cvnn | cvnn-master/tests/test_doc_cvnn_example.py | import numpy as np
import cvnn.layers as complex_layers
import tensorflow as tf
from pdb import set_trace
def get_dataset():
(train_images, train_labels), (test_images, test_labels) = tf.keras.datasets.cifar10.load_data()
train_images = train_images.astype(dtype=np.complex64) / 255.0
test_images = test_images.astype(dtype=np.complex64) / 255.0
return (train_images, train_labels), (test_images, test_labels)
def test_cifar():
(train_images, train_labels), (test_images, test_labels) = get_dataset()
# Create your model
model = tf.keras.models.Sequential()
model.add(complex_layers.ComplexInput(input_shape=(32, 32, 3))) # Always use ComplexInput at the start
model.add(complex_layers.ComplexConv2D(32, (3, 3), activation='cart_relu'))
model.add(complex_layers.ComplexAvgPooling2D((2, 2)))
model.add(complex_layers.ComplexConv2D(64, (3, 3), activation='cart_relu'))
model.add(complex_layers.ComplexFlatten())
model.add(complex_layers.ComplexDense(64, activation='cart_relu'))
model.add(complex_layers.ComplexDense(10, activation='convert_to_real_with_abs'))
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
# model.summary()
history = model.fit(train_images, train_labels, epochs=1, validation_data=(test_images, test_labels))
test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2)
def test_regression():
input_shape = (4, 28, 28, 3)
x = tf.cast(tf.random.normal(input_shape), tf.complex64)
model = tf.keras.models.Sequential()
model.add(complex_layers.ComplexInput(input_shape=input_shape[1:]))
model.add(complex_layers.ComplexFlatten())
model.add(complex_layers.ComplexDense(units=64, activation='cart_relu'))
model.add(complex_layers.ComplexDense(units=10, activation='linear'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
y = model(x)
assert y.dtype == np.complex64
def test_functional_api():
inputs = complex_layers.complex_input(shape=(128, 128, 3))
c0 = complex_layers.ComplexConv2D(32, activation='cart_relu', kernel_size=3)(inputs)
c1 = complex_layers.ComplexConv2D(32, activation='cart_relu', kernel_size=3)(c0)
c2 = complex_layers.ComplexMaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='valid')(c1)
t01 = complex_layers.ComplexConv2DTranspose(5, kernel_size=2, strides=(2, 2), activation='cart_relu')(c2)
concat01 = tf.keras.layers.concatenate([t01, c1], axis=-1)
c3 = complex_layers.ComplexConv2D(4, activation='cart_relu', kernel_size=3)(concat01)
out = complex_layers.ComplexConv2D(4, activation='cart_relu', kernel_size=3)(c3)
model = tf.keras.Model(inputs, out)
if __name__ == '__main__':
test_functional_api()
test_regression()
test_cifar()
| 2,912 | 44.515625 | 109 | py |
cvnn | cvnn-master/tests/test_functional_api.py | from cvnn.layers import ComplexUnPooling2D, complex_input, ComplexMaxPooling2DWithArgmax, \
ComplexUpSampling2D, ComplexMaxPooling2D
import tensorflow as tf
import numpy as np
from pdb import set_trace
def get_img():
img_r = np.array([[
[0, 1, 2],
[0, 2, 2],
[0, 5, 7]
], [
[0, 7, 5],
[3, 7, 9],
[4, 5, 3]
]]).astype(np.float32)
img_i = np.array([[
[0, 4, 5],
[3, 7, 9],
[4, 5, 3]
], [
[0, 4, 5],
[3, 2, 2],
[4, 8, 9]
]]).astype(np.float32)
img = img_r + 1j * img_i
img = np.reshape(img, (2, 3, 3, 1))
return img
def unpooling_example():
x = get_img()
inputs = complex_input(shape=x.shape[1:])
max_pool_o, max_arg = ComplexMaxPooling2DWithArgmax(strides=1, data_format="channels_last", name="argmax")(inputs)
# max_pool_o = ComplexMaxPooling2D(strides=1, data_format="channels_last")(inputs)
max_unpool = ComplexUnPooling2D(x.shape[1:])
outputs = max_unpool([max_pool_o, max_arg])
model = tf.keras.Model(inputs=inputs, outputs=outputs, name="pooling_model")
# model.summary()
model(x)
# print(model(x)[..., 0])
# set_trace()
return model
def upsampling_example():
x = get_img()
inputs = complex_input(shape=x.shape[1:])
max_pool_o = ComplexMaxPooling2D(data_format="channels_last")(inputs)
upsampling = ComplexUpSampling2D(size=(2, 2))
outputs = upsampling(max_pool_o)
model = tf.keras.Model(inputs=inputs, outputs=outputs, name="pooling_model")
# model.summary()
model(x)
# print(model(x)[..., 0])
# set_trace()
return model
def test_functional_api():
upsampling_example()
unpooling_example()
if __name__ == "__main__":
test_functional_api() | 1,792 | 25.761194 | 118 | py |
cvnn | cvnn-master/tests/test_custom_layers.py | import numpy as np
from cvnn.layers import ComplexDense, ComplexFlatten, ComplexInput, ComplexConv2D, ComplexMaxPooling2D, \
ComplexAvgPooling2D, ComplexConv2DTranspose, ComplexUnPooling2D, ComplexMaxPooling2DWithArgmax, \
ComplexUpSampling2D, ComplexBatchNormalization, ComplexAvgPooling1D, ComplexPolarAvgPooling2D
import cvnn.layers as complex_layers
from tensorflow.keras.models import Sequential
import tensorflow as tf
import tensorflow_datasets as tfds
from pdb import set_trace
"""
This module tests:
Correct result of Complex AVG and MAX pooling layers.
Init ComplexConv2D layer and verifies output dtype and shape.
Trains using:
ComplexDense
ComplexFlatten
ComplexInput
ComplexDropout
"""
@tf.autograph.experimental.do_not_convert
def dense_example():
img_r = np.array([[
[0, 1, 2],
[0, 2, 2],
[0, 5, 7]
], [
[0, 4, 5],
[3, 7, 9],
[4, 5, 3]
]]).astype(np.float32)
img_i = np.array([[
[0, 4, 5],
[3, 7, 9],
[4, 5, 3]
], [
[0, 4, 5],
[3, 7, 9],
[4, 5, 3]
]]).astype(np.float32)
img = img_r + 1j * img_i
c_flat = ComplexFlatten()
c_dense = ComplexDense(units=10)
res = c_dense(c_flat(img.astype(np.complex64)))
assert res.shape == [2, 10]
assert res.dtype == tf.complex64
model = tf.keras.models.Sequential()
model.add(ComplexInput(input_shape=(3, 3)))
model.add(ComplexFlatten())
model.add(ComplexDense(32, activation='cart_relu'))
model.add(ComplexDense(32))
assert model.output_shape == (None, 32)
res = model(img.astype(np.complex64))
@tf.autograph.experimental.do_not_convert
def serial_layers():
model = Sequential()
model.add(ComplexDense(32, activation='relu', input_shape=(32, 32, 3)))
model.add(ComplexDense(32))
print(model.output_shape)
img_r = np.array([[
[0, 1, 2],
[0, 2, 2],
[0, 5, 7]
], [
[0, 4, 5],
[3, 7, 9],
[4, 5, 3]
]]).astype(np.float32)
img_i = np.array([[
[0, 4, 5],
[3, 7, 9],
[4, 5, 3]
], [
[0, 4, 5],
[3, 7, 9],
[4, 5, 3]
]]).astype(np.float32)
img = img_r + 1j * img_i
model = Sequential()
# model.add(ComplexInput(img.shape[1:]))
model.add(ComplexFlatten(input_shape=img.shape[1:]))
model.add(ComplexDense(units=10))
res = model(img)
@tf.autograph.experimental.do_not_convert
def shape_ad_dtype_of_conv2d():
input_shape = (4, 28, 28, 3)
x = tf.cast(tf.random.normal(input_shape), tf.complex64)
y = ComplexConv2D(2, 3, activation='cart_relu', padding="same", input_shape=input_shape[1:], dtype=x.dtype)(x)
assert y.shape == (4, 28, 28, 2)
assert y.dtype == tf.complex64
@tf.autograph.experimental.do_not_convert
def normalize_img(image, label):
"""Normalizes images: `uint8` -> `float32`."""
return tf.cast(image, tf.float32) / 255., label
def get_dataset():
(ds_train, ds_test), ds_info = tfds.load(
'mnist',
split=['train', 'test'],
shuffle_files=False,
as_supervised=True,
with_info=True,
)
ds_train = ds_train.map(normalize_img, num_parallel_calls=tf.data.experimental.AUTOTUNE)
ds_train = ds_train.cache()
# ds_train = ds_train.shuffle(ds_info.splits['train'].num_examples)
ds_train = ds_train.batch(128)
ds_train = ds_train.prefetch(tf.data.experimental.AUTOTUNE)
ds_test = ds_test.map(normalize_img, num_parallel_calls=tf.data.experimental.AUTOTUNE)
ds_test = ds_test.batch(128)
ds_test = ds_test.cache()
ds_test = ds_test.prefetch(tf.data.experimental.AUTOTUNE)
return ds_train, ds_test
def get_img():
img_r = np.array([[
[0, 1, 2],
[0, 2, 2],
[0, 5, 7]
], [
[0, 7, 5],
[3, 7, 9],
[4, 5, 3]
]]).astype(np.float32)
img_i = np.array([[
[0, 4, 5],
[3, 7, 9],
[4, 5, 3]
], [
[0, 4, 5],
[3, 2, 2],
[4, 8, 9]
]]).astype(np.float32)
img = img_r + 1j * img_i
img = np.reshape(img, (2, 3, 3, 1))
return img
@tf.autograph.experimental.do_not_convert
def complex_avg_pool_1d():
x = tf.constant([1., 2., 3., 4., 5.])
x = tf.reshape(x, [1, 5, 1])
avg_pool_1d = tf.keras.layers.AveragePooling1D(pool_size=2, strides=1, padding='valid')
tf_res = avg_pool_1d(x)
own_res = ComplexAvgPooling1D(pool_size=2, strides=1, padding='valid')(x)
assert np.all(tf_res.numpy() == own_res.numpy())
avg_pool_1d = tf.keras.layers.AveragePooling1D(pool_size=2, strides=2, padding='valid')
tf_res = avg_pool_1d(x)
own_res = ComplexAvgPooling1D(pool_size=2, strides=2, padding='valid')(x)
assert np.all(tf_res.numpy() == own_res.numpy())
avg_pool_1d = tf.keras.layers.AveragePooling1D(pool_size=2, strides=1, padding='same')
tf_res = avg_pool_1d(x)
own_res = ComplexAvgPooling1D(pool_size=2, strides=1, padding='same')(x)
assert np.all(tf_res.numpy() == own_res.numpy())
img_r = np.array([[
[0, 1, 2, 0, 2, 2, 0, 5, 7]
], [
[0, 4, 5, 3, 7, 9, 4, 5, 3]
]]).astype(np.float32)
img_i = np.array([[
[0, 4, 5, 3, 7, 9, 4, 5, 3]
], [
[0, 4, 5, 3, 2, 2, 4, 8, 9]
]]).astype(np.float32)
img = img_r + 1j * img_i
img = np.reshape(img, (2, 9, 1))
avg_pool = ComplexAvgPooling1D()
res = avg_pool(img.astype(np.complex64))
expected = tf.expand_dims(tf.convert_to_tensor([[0.5 + 2.j, 1. + 4.j, 2. + 8.j, 2.5 + 4.5j],
[2. + 2.j, 4. + 4.j, 8. + 2.j, 4.5 + 6.j]], dtype=tf.complex64),
axis=-1)
assert np.all(res.numpy() == expected.numpy())
@tf.autograph.experimental.do_not_convert
def complex_max_pool_2d(test_unpool=True):
img = get_img()
max_pool = ComplexMaxPooling2DWithArgmax(strides=1, data_format="channels_last")
max_pool_2 = ComplexMaxPooling2D(strides=1, data_format="channels_last")
res, argmax = max_pool(img.astype(np.complex64))
res2 = max_pool_2(img.astype(np.complex64))
expected_res = np.array([
[[
[2. + 7.j],
[2. + 9.j]],
[[2. + 7.j],
[2. + 9.j]]],
[[
[7. + 4.j],
[9. + 2.j]],
[
[5. + 8.j],
[3. + 9.j]]]
])
assert np.all(res.numpy() == res2.numpy())
assert (res.numpy() == expected_res.astype(np.complex64)).all()
if test_unpool:
max_unpooling = ComplexUnPooling2D(img.shape[1:])
unpooled = max_unpooling([res, argmax])
expected_unpooled = np.array([[[0. + 0.j, 0. + 0.j, 0. + 0.j],
[0. + 0.j, 4. + 14.j, 4. + 18.j],
[0. + 0.j, 0. + 0.j, 0. + 0.j]],
[[0. + 0.j, 7. + 4.j, 0. + 0.j],
[0. + 0.j, 0. + 0.j, 9. + 2.j],
[0. + 0.j, 5. + 8.j, 3. + 9.j]]]).reshape(2, 3, 3, 1)
assert np.all(unpooled.numpy() == expected_unpooled)
x = tf.constant([[1., 2., 3.],
[4., 5., 6.],
[7., 8., 9.]])
x = tf.reshape(x, [1, 3, 3, 1])
max_pool_2d = tf.keras.layers.MaxPooling2D(pool_size=(2, 2), strides=(1, 1), padding='valid')
complex_max_pool_2d = ComplexMaxPooling2D(pool_size=(2, 2), strides=(1, 1), padding='valid')
assert np.all(max_pool_2d(x) == complex_max_pool_2d(x))
def new_max_unpooling_2d_test():
img = get_img()
new_imag = tf.stack((img.reshape((2, 3, 3)), img.reshape((2, 3, 3))), axis=-1)
max_pool = ComplexMaxPooling2DWithArgmax(strides=1, data_format="channels_last")
res, argmax = max_pool(tf.cast(new_imag, dtype=np.complex64))
max_unpooling = ComplexUnPooling2D(new_imag.shape[1:])
unpooled = max_unpooling([res, argmax])
@tf.autograph.experimental.do_not_convert
def complex_avg_pool():
img = get_img()
avg_pool = ComplexAvgPooling2D(strides=1)
res = avg_pool(img.astype(np.complex64))
expected_res = np.array([[[[0.75 + 3.5j], [1.75 + 6.25j]], [[1.75 + 4.75j], [4. + 6.j]]],
[[[4.25 + 2.25j], [7 + 3.25j]], [[4.75 + 4.25j], [6. + 5.25j]]]])
assert (res.numpy() == expected_res.astype(np.complex64)).all()
@tf.autograph.experimental.do_not_convert
def complex_polar_avg_pool():
avg_pool = ComplexPolarAvgPooling2D(strides=1)
img_r = np.array([
[0, 1, -1],
[0, 1, 0],
[0, 1, 0]
]).astype(np.float32)
img_i = np.array([
[1, 0, 0],
[1, 0, -1],
[1, 0, -1]
]).astype(np.float32)
img = img_r + 1j * img_i
img = np.reshape(img, (1, 3, 3, 1))
res = avg_pool(img.astype(np.complex64))
assert np.allclose(tf.math.abs(res).numpy(), 1.)
img_r = np.array([
[1, 0],
[-1, 0]
]).astype(np.float32)
img_i = np.array([
[0, 1],
[0, -1]
]).astype(np.float32)
img = img_r + 1j * img_i
img = np.reshape(img, (1, 2, 2, 1))
res = avg_pool(img.astype(np.complex64))
assert np.allclose(tf.math.abs(res).numpy(), 1.)
img_r = np.array([
[1, 0],
[-1, 0]
]).astype(np.float32)
img_i = np.array([
[0, 1],
[0, 1]
]).astype(np.float32)
img = img_r + 1j * img_i
img = np.reshape(img, (1, 2, 2, 1))
res = avg_pool(img.astype(np.complex64))
assert np.allclose(tf.math.abs(res).numpy(), 1.)
assert np.allclose(tf.math.angle(res).numpy(), 1.57079632679) # pi/2
@tf.autograph.experimental.do_not_convert
def complex_conv_2d_transpose():
value = [[1, 2, 1], [2, 1, 2], [1, 1, 2]]
init = tf.constant_initializer(value)
transpose_2 = ComplexConv2DTranspose(1, kernel_size=3, kernel_initializer=init, dtype=np.float32)
input = np.array([[55, 52], [57, 50]]).astype(np.float32).reshape((1, 2, 2, 1))
expected = np.array([
[55., 162., 159., 52.],
[167., 323., 319., 154.],
[169., 264., 326., 204.],
[57., 107., 164., 100.]
], dtype=np.float32)
assert np.allclose(transpose_2(input).numpy().reshape((4, 4)), expected) # TODO: Check why the difference
value = [[1, 2], [2, 1]]
init = tf.constant_initializer(value)
transpose_3 = ComplexConv2DTranspose(1, kernel_size=2, kernel_initializer=init, dtype=np.float32)
expected = np.array([
[55., 162., 104],
[167., 323., 152],
[114., 157, 50]
], dtype=np.float32)
assert np.allclose(transpose_3(input).numpy().reshape((3, 3)), expected)
complex_transpose = ComplexConv2DTranspose(1, kernel_size=2, dtype=np.complex64)
complex_input = (input + 1j * np.zeros(input.shape)).astype(np.complex64)
assert complex_transpose(complex_input).dtype == tf.complex64
@tf.autograph.experimental.do_not_convert
def upsampling_near_neighbour():
input_shape = (2, 2, 1, 3)
x = np.arange(np.prod(input_shape)).reshape(input_shape).astype(np.float32)
z = tf.complex(real=x, imag=x)
upsample = ComplexUpSampling2D(size=(2, 3))
y = upsample(z)
expected = np.array([[[[0. + 0.j, 1. + 1.j, 2. + 2.j],
[0. + 0.j, 1. + 1.j, 2. + 2.j],
[0. + 0.j, 1. + 1.j, 2. + 2.j]],
[[0. + 0.j, 1. + 1.j, 2. + 2.j],
[0. + 0.j, 1. + 1.j, 2. + 2.j],
[0. + 0.j, 1. + 1.j, 2. + 2.j]],
[[3. + 3.j, 4. + 4.j, 5. + 5.j],
[3. + 3.j, 4. + 4.j, 5. + 5.j],
[3. + 3.j, 4. + 4.j, 5. + 5.j]],
[[3. + 3.j, 4. + 4.j, 5. + 5.j],
[3. + 3.j, 4. + 4.j, 5. + 5.j],
[3. + 3.j, 4. + 4.j, 5. + 5.j]]],
[[[6. + 6.j, 7. + 7.j, 8. + 8.j],
[6. + 6.j, 7. + 7.j, 8. + 8.j],
[6. + 6.j, 7. + 7.j, 8. + 8.j]],
[[6. + 6.j, 7. + 7.j, 8. + 8.j],
[6. + 6.j, 7. + 7.j, 8. + 8.j],
[6. + 6.j, 7. + 7.j, 8. + 8.j]],
[[9. + 9.j, 10. + 10.j, 11. + 11.j],
[9. + 9.j, 10. + 10.j, 11. + 11.j],
[9. + 9.j, 10. + 10.j, 11. + 11.j]],
[[9. + 9.j, 10. + 10.j, 11. + 11.j],
[9. + 9.j, 10. + 10.j, 11. + 11.j],
[9. + 9.j, 10. + 10.j, 11. + 11.j]]]])
assert np.all(y.numpy() == expected)
upsample = ComplexUpSampling2D(size=(1, 3))
y = upsample(z)
expected = np.array([[[[0. + 0.j, 1. + 1.j, 2. + 2.j],
[0. + 0.j, 1. + 1.j, 2. + 2.j],
[0. + 0.j, 1. + 1.j, 2. + 2.j]],
[[3. + 3.j, 4. + 4.j, 5. + 5.j],
[3. + 3.j, 4. + 4.j, 5. + 5.j],
[3. + 3.j, 4. + 4.j, 5. + 5.j]]],
[[[6. + 6.j, 7. + 7.j, 8. + 8.j],
[6. + 6.j, 7. + 7.j, 8. + 8.j],
[6. + 6.j, 7. + 7.j, 8. + 8.j]],
[[9. + 9.j, 10. + 10.j, 11. + 11.j],
[9. + 9.j, 10. + 10.j, 11. + 11.j],
[9. + 9.j, 10. + 10.j, 11. + 11.j]]]])
assert np.all(y.numpy() == expected)
upsample = ComplexUpSampling2D(size=(1, 2))
y = upsample(z)
# print(y)
y_tf = tf.keras.layers.UpSampling2D(size=(1, 2))(x)
my_y = upsample.get_real_equivalent()(x)
assert np.all(my_y == y_tf)
x = tf.convert_to_tensor([[[[1., 2.], [3., 4.]]]])
upsample = ComplexUpSampling2D(size=2, data_format='channels_first')
my_y = upsample(x)
y_tf = tf.keras.layers.UpSampling2D(size=(2, 2), data_format='channels_first')(x)
assert np.all(my_y == y_tf)
@tf.autograph.experimental.do_not_convert
def upsampling_bilinear_corners_aligned():
# Pytorch examples
# https://pytorch.org/docs/stable/generated/torch.nn.Upsample.html
x = tf.convert_to_tensor([[[[1., 2.], [3., 4.]]]])
z = tf.complex(real=x, imag=x)
expected = np.array([[[[1.0000, 1.3333, 1.6667, 2.0000],
[1.6667, 2.0000, 2.3333, 2.6667],
[2.3333, 2.6667, 3.0000, 3.3333],
[3.0000, 3.3333, 3.6667, 4.0000]]]])
upsample = ComplexUpSampling2D(size=2, interpolation='bilinear', data_format='channels_first', align_corners=True)
y_complex = upsample(z)
assert np.allclose(expected, tf.math.real(y_complex).numpy(), 0.0001)
x = tf.convert_to_tensor([[[[1., 2., 0.],
[3., 4., 0.],
[0., 0., 0.]]]])
expected = np.array([[[[1.0000, 1.4000, 1.8000, 1.6000, 0.8000, 0.0000],
[1.8000, 2.2000, 2.6000, 2.2400, 1.1200, 0.0000],
[2.6000, 3.0000, 3.4000, 2.8800, 1.4400, 0.0000],
[2.4000, 2.7200, 3.0400, 2.5600, 1.2800, 0.0000],
[1.2000, 1.3600, 1.5200, 1.2800, 0.6400, 0.0000],
[0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000]]]])
upsample = ComplexUpSampling2D(size=2, interpolation='bilinear', data_format='channels_first', align_corners=True)
y = upsample(x)
assert np.allclose(expected, tf.math.real(y).numpy(), 0.00001)
# https://blogs.sas.com/content/iml/2020/05/18/what-is-bilinear-interpolation.html#:~:text=Bilinear%20interpolation%20is%20a%20weighted,the%20point%20and%20the%20corners.&text=The%20only%20important%20formula%20is,x%20%5B0%2C1%5D.
x = tf.convert_to_tensor([[[[0., 4.], [2., 1.]]]])
z = tf.complex(real=x, imag=x)
upsample = ComplexUpSampling2D(size=3, interpolation='bilinear', data_format='channels_first', align_corners=True)
y_complex = upsample(z)
expected = np.array([[[[0. + 0.j, 0.8 + 0.8j,
1.6 + 1.6j, 2.4 + 2.4j,
3.2 + 3.2j, 4. + 4.j],
[0.4 + 0.4j, 1. + 1.j,
1.6 + 1.6j, 2.2 + 2.2j,
2.8 + 2.8j, 3.4 + 3.4j],
[0.8 + 0.8j, 1.2 + 1.2j,
1.6 + 1.6j, 2. + 2.j,
2.4 + 2.4j, 2.8 + 2.8j],
[1.2 + 1.2j, 1.4 + 1.4j,
1.6 + 1.6j, 1.8 + 1.8j,
2. + 2.j, 2.2 + 2.2j],
[1.6 + 1.6j, 1.6 + 1.6j,
1.6 + 1.6j, 1.6 + 1.6j,
1.6 + 1.6j, 1.6 + 1.6j],
[2. + 2.j, 1.8 + 1.8j,
1.6 + 1.6j, 1.4 + 1.4j,
1.2 + 1.2j, 1. + 1.j]]]])
assert np.allclose(expected, y_complex.numpy(), 0.000001)
@tf.autograph.experimental.do_not_convert
def upsampling_bilinear_corner_not_aligned():
# Pytorch
# https://pytorch.org/docs/stable/generated/torch.nn.Upsample.html
x = tf.convert_to_tensor([[[[1., 2.], [3., 4.]]]])
z = tf.complex(real=x, imag=x)
y_tf = tf.keras.layers.UpSampling2D(size=2, interpolation='bilinear', data_format='channels_first')(x)
y_own = ComplexUpSampling2D(size=2, interpolation='bilinear', data_format='channels_first')(z)
# set_trace()
assert np.all(y_tf == tf.math.real(y_own).numpy())
x = tf.convert_to_tensor([[[[1., 2., 0.],
[3., 4., 0.],
[0., 0., 0.]]]])
z = tf.complex(real=x, imag=x)
y_tf = tf.keras.layers.UpSampling2D(size=2, interpolation='bilinear', data_format='channels_first')(x)
y_own = ComplexUpSampling2D(size=2, interpolation='bilinear', data_format='channels_first')(z)
assert np.all(y_tf == tf.math.real(y_own).numpy())
x = tf.convert_to_tensor([[[[1., 2.], [3., 4.]]]])
z = tf.complex(real=x, imag=x)
y_tf = tf.keras.layers.UpSampling2D(size=3, interpolation='bilinear', data_format='channels_first')(x)
y_own = ComplexUpSampling2D(size=3, interpolation='bilinear', data_format='channels_first')(z)
assert np.allclose(y_tf, tf.math.real(y_own).numpy())
y_tf = tf.keras.layers.UpSampling2D(size=6, interpolation='bilinear', data_format='channels_first')(x)
y_own = ComplexUpSampling2D(size=6, interpolation='bilinear', data_format='channels_first')(z)
assert np.allclose(y_tf, tf.math.real(y_own).numpy())
y_tf = tf.keras.layers.UpSampling2D(size=8, interpolation='bilinear', data_format='channels_first')(x)
y_own = ComplexUpSampling2D(size=8, interpolation='bilinear', data_format='channels_first')(z)
assert np.all(y_tf == tf.math.real(y_own).numpy())
# to test bicubic= https://discuss.pytorch.org/t/what-we-should-use-align-corners-false/22663/17
# https://www.tensorflow.org/api_docs/python/tf/keras/layers/UpSampling2D
input_shape = (2, 2, 1, 3)
x = np.arange(np.prod(input_shape)).reshape(input_shape)
y_tf = tf.keras.layers.UpSampling2D(size=(1, 2), interpolation='bilinear')(x)
y_own = ComplexUpSampling2D(size=(1, 2), interpolation='bilinear')(x)
assert np.all(y_tf == y_own)
@tf.autograph.experimental.do_not_convert
def upsampling():
x = tf.convert_to_tensor([[[[1., 2.], [3., 4.]]]])
z = tf.complex(real=x, imag=x)
y_tf = tf.keras.layers.UpSampling2D(size=2, interpolation='bilinear', data_format='channels_first')(x)
y_cvnn = ComplexUpSampling2D(size=2, interpolation='bilinear', data_format='channels_first')(z)
assert np.all(y_tf == tf.math.real(y_cvnn).numpy())
upsampling_near_neighbour()
# test_upsampling_bilinear_corners_aligned()
upsampling_bilinear_corner_not_aligned()
def check_proximity(x1, x2, name: str):
th = 0.1
diff = np.max(np.abs(x1 - x2))
if 0 < diff < th:
print(f"{name} are equal with an error of {diff}")
if diff >= th:
return False
return True
def batch_norm():
# z = tf.transpose(tf.convert_to_tensor([[[-1, 1] * 10] * 20] * 2))
# c_bn = ComplexBatchNormalization(dtype=np.float32)
# c_out = c_bn(z, training=True)
# # set_trace()
# assert check_proximity(c_out, z, "Normalized input")
z = np.random.rand(3, 43, 12, 10) # + np.random.rand(3, 43, 12, 75)*1j
# z = np.random.rand(100, 10)
bn = tf.keras.layers.BatchNormalization(epsilon=0)
c_bn = ComplexBatchNormalization(dtype=np.float32) # If I use the complex64 then the init is different
c_bn_2 = ComplexBatchNormalization(dtype=np.float32, cov_method=2)
input = tf.convert_to_tensor(z.astype(np.float32), dtype=np.float32)
out = bn(input, training=False)
c_out = c_bn(input, training=False)
assert check_proximity(out, c_out, "Results before training")
assert check_proximity(bn.moving_mean, c_bn.moving_mean, "Moving mean before training")
assert check_proximity(bn.moving_variance, c_bn.moving_var[..., 0, 0], "Moving variance before training")
assert check_proximity(bn.gamma, c_bn.gamma, "Gamma before training")
assert check_proximity(bn.beta, c_bn.beta, "Beta before training")
out = bn(input, training=True)
c_out = c_bn(input, training=True)
assert check_proximity(out, c_out, "Results after training")
assert check_proximity(bn.moving_mean, c_bn.moving_mean, "Moving mean after training")
assert check_proximity(bn.moving_variance, c_bn.moving_var[..., 0, 0], "Moving variance after training")
assert check_proximity(bn.gamma, c_bn.gamma, "Gamma after training")
assert check_proximity(bn.beta, c_bn.beta, "Beta after training")
c_out_2 = c_bn_2(input, training=True)
assert check_proximity(c_out, c_out_2, "Method comparison results after training")
assert check_proximity(c_bn_2.moving_mean, c_bn.moving_mean, "Method comparison Moving mean after training")
assert check_proximity(c_bn_2.moving_var, c_bn.moving_var, "Method comparison Moving variance after training")
assert check_proximity(c_bn_2.gamma, c_bn.gamma, "Method comparison Gamma after training")
assert check_proximity(c_bn_2.beta, c_bn.beta, "Method comparison Beta after training")
def pooling_layers():
complex_polar_avg_pool()
complex_max_pool_2d()
complex_avg_pool_1d()
complex_avg_pool()
@tf.autograph.experimental.do_not_convert
def test_layers():
pooling_layers()
new_max_unpooling_2d_test()
batch_norm()
upsampling()
complex_conv_2d_transpose()
shape_ad_dtype_of_conv2d()
dense_example()
if __name__ == "__main__":
test_layers()
| 22,749 | 40.288566 | 234 | py |
cvnn | cvnn-master/tests/test_output_dtype.py | import tensorflow as tf
import numpy as np
import cvnn.layers as complex_layers
from pdb import set_trace
def all_layers_model():
"""
Creates a model using all possible layers to assert no layer changes the dtype to real.
"""
input_shape = (4, 28, 28, 3)
x = tf.cast(tf.random.normal(input_shape), tf.complex64)
model = tf.keras.models.Sequential()
model.add(complex_layers.ComplexInput(input_shape=input_shape[1:])) # Always use ComplexInput at the start
model.add(complex_layers.ComplexConv2D(32, (3, 3), activation='cart_relu'))
model.add(complex_layers.ComplexAvgPooling2D((2, 2)))
model.add(complex_layers.ComplexConv2D(64, (3, 3), activation='cart_sigmoid'))
model.add(complex_layers.ComplexDropout(0.5))
model.add(complex_layers.ComplexMaxPooling2D((2, 2)))
model.add(complex_layers.ComplexConv2DTranspose(32, (2, 2)))
model.add(complex_layers.ComplexFlatten())
model.add(complex_layers.ComplexDense(64, activation='cart_tanh'))
model.compile(loss=tf.keras.losses.MeanAbsoluteError(), optimizer='adam', metrics=['accuracy'])
y = model(x)
assert y.dtype == np.complex64
return model
def test_output_dtype():
model = all_layers_model()
if __name__ == "__main__":
test_output_dtype()
| 1,280 | 34.583333 | 111 | py |
cvnn | cvnn-master/tests/test_losses.py | from cvnn.losses import ComplexAverageCrossEntropy, ComplexWeightedAverageCrossEntropy, \
ComplexAverageCrossEntropyIgnoreUnlabeled
import numpy as np
import tensorflow as tf
from tensorflow.keras.losses import CategoricalCrossentropy
from cvnn.layers import ComplexDense, complex_input
from pdb import set_trace
def to_categorical_unlabeled(sparse, classes=2):
cat = np.zeros(shape=sparse.shape + (classes,))
for i in range(len(sparse)):
for row in range(len(sparse[i])):
for col in range(len(sparse[i][row])):
if sparse[i][row][col]:
cat[i][row][col][sparse[i][row][col] - 1] = 1
return cat
def averaging_method():
# Here, I see that the loss is not computed per image, but per pixel.
y_true = np.array([
[[1, 1], [1, 1]],
[[0, 0], [0, 2]]
])
y_pred = np.array([
[[1, 1], [1, 2]],
[[1, 1], [1, 1]]
])
y_true = to_categorical_unlabeled(y_true)
y_pred = to_categorical_unlabeled(y_pred)
class_loss_result = CategoricalCrossentropy()(y_pred=y_pred, y_true=y_true)
fun_loss_result = tf.keras.metrics.categorical_crossentropy(y_pred=y_pred, y_true=y_true)
two_dim_mean = np.mean(fun_loss_result.numpy(), axis=(1, 2))
mean = np.mean(fun_loss_result.numpy())
assert np.allclose(mean, class_loss_result)
assert np.mean(two_dim_mean) == mean
def no_label_test():
y_true = np.array([
[[0, 0], [0, 2]]
])
y_true_2 = np.array([
[[1, 2], [1, 2]]
])
y_pred = np.array([
[[1, 2], [1, 2]]
])
y_true = to_categorical_unlabeled(y_true)
y_pred = to_categorical_unlabeled(y_pred)
y_true_2 = to_categorical_unlabeled(y_true_2)
tf_result = ComplexAverageCrossEntropyIgnoreUnlabeled()(y_pred=tf.complex(y_pred, y_pred), y_true=y_true)
tf_result_2 = ComplexAverageCrossEntropyIgnoreUnlabeled()(y_pred=tf.complex(y_pred, y_pred), y_true=y_true_2)
assert tf_result == tf_result_2
def ace():
y_pred = np.random.rand(3, 43, 12, 10)
y_true = np.random.rand(3, 43, 12, 10)
tf_result = CategoricalCrossentropy()(y_pred=y_pred, y_true=y_true)
own_result = ComplexAverageCrossEntropy()(y_pred=tf.complex(y_pred, y_pred), y_true=y_true)
own_real_result = ComplexAverageCrossEntropy()(y_pred=tf.convert_to_tensor(y_pred, dtype=np.float64),
y_true=y_true)
assert tf_result == own_real_result, f"ComplexCrossentropy {own_real_result} != CategoricalCrossentropy {tf_result}"
assert tf_result == own_result, f"ComplexCrossentropy {own_result} != CategoricalCrossentropy {tf_result}"
def weighted_loss():
y_true = np.array([
[1., 0.],
[1., 0.],
[1., 0.],
[1., 0.],
[1., 0.],
[1., 0.],
[1., 0.],
[1., 0.],
[1., 0.],
[0., 1.]
])
y_pred = np.array([
[1., 0.],
[1., 0.],
[1., 0.],
[1., 0.],
[1., 0.],
[1., 0.],
[1., 0.],
[1., 0.],
[1., 0.],
[1., 0.]
])
ace = ComplexAverageCrossEntropy()(y_pred=tf.complex(y_pred, y_pred), y_true=y_true)
wace = ComplexWeightedAverageCrossEntropy(weights=[1., 9.])(y_pred=tf.complex(y_pred, y_pred), y_true=y_true)
assert ace.numpy() < wace.numpy(), f"ACE {ace.numpy()} > WACE {wace.numpy()}"
def test_losses():
no_label_test()
averaging_method()
# weighted_loss()
ace()
if __name__ == "__main__":
test_losses()
| 3,533 | 31.722222 | 120 | py |
cvnn | cvnn-master/tests/test_metrics.py | import numpy as np
from tensorflow.keras.metrics import CategoricalAccuracy
import tensorflow as tf
from pdb import set_trace
from cvnn.metrics import ComplexAverageAccuracy, ComplexCategoricalAccuracy
def test_with_tf():
classes = 3
y_true = tf.cast(tf.random.uniform(shape=(34, 54, 12), maxval=classes), dtype=tf.int32)
y_pred = tf.nn.softmax(tf.cast(tf.random.uniform(shape=(34, 54, 12, classes), maxval=1), dtype=tf.float64))
y_pred_one_hot = y_pred # tf.one_hot(y_pred, depth=classes)
y_true_one_hot = tf.one_hot(y_true, depth=classes)
tf_metric = CategoricalAccuracy()
tf_metric.update_state(y_true_one_hot, y_pred_one_hot)
own_metric = ComplexCategoricalAccuracy()
own_metric.update_state(y_true_one_hot, y_pred_one_hot)
# set_trace()
assert own_metric.result().numpy() == tf_metric.result().numpy()
y_true = np.array([
[1., 0., 0., 0.], # 1
[1., 0., 0., 0.], # 1
[1., 0., 0., 0.], # 1
[1., 0., 0., 0.], # 1
[0., 0., 0., 0.], # This shows tf does not ignore cases with [0. 0. 0. 0.] (unlabeled)
[0., 0., 1., 0.], # 3
[0., 0., 1., 0.], # 3
[0., 0., 0., 0.], # 3
[0., 0., 1., 0.] # 3
])
y_pred = np.array([
[1., 0., 0., 0.], # 1
[.8, 0., 0.2, 0.], # 1
[1., 0., 0., 0.], # 1
[1., 0., 0., 0.], # 1
[1., 0., 0., 0.], # 1
[0., 0., 0.1, .9], # 4
[0., 0., 0., 1.], # 4
[0., 0., 0., 1.], # 4
[0., 0., 0., 1.] # 4
])
tf_metric = CategoricalAccuracy()
tf_metric.update_state(y_true, y_pred)
own_metric = ComplexCategoricalAccuracy()
own_metric.update_state(y_true, y_pred, ignore_unlabeled=False) # to make it as tf
assert own_metric.result().numpy() == tf_metric.result().numpy()
y_true = np.array([
[1., 0.],
[1., 0.],
[1., 0.],
[1., 0.],
[1., 0.],
[1., 0.],
[1., 0.],
[1., 0.],
[1., 0.],
[0., 1.]
])
y_pred = np.array([
[1., 0.],
[1., 0.],
[1., 0.],
[1., 0.],
[1., 0.],
[1., 0.],
[1., 0.],
[1., 0.],
[1., 0.],
[1., 0.]
])
tf_metric = CategoricalAccuracy()
tf_metric.update_state(y_true, y_pred)
own_metric = ComplexCategoricalAccuracy()
own_metric.update_state(y_true, y_pred, ignore_unlabeled=False) # to make it as tf
assert own_metric.result().numpy() == tf_metric.result().numpy()
def test_metric():
y_true = [[0, 0, 0],
[0, 0, 1],
[0, 1, 0], [0, 1, 0],
[1, 0, 0]]
y_pred = [[0.1, 0.9, 0.8],
[0.1, 0.9, 0.8],
[0.05, 0.95, 0], [0.95, 0.05, 0],
[0, 1, 0]]
m = ComplexCategoricalAccuracy()
m.update_state(y_true, y_pred)
assert m.result().numpy() == 0.25
m = ComplexAverageAccuracy()
m.update_state(y_true, y_pred)
assert m.result().numpy() == np.cast['float32'](1/6) # I want 0.5/3 = 1/6
y_true = np.array([
[1., 0.],
[1., 0.],
[1., 0.],
[1., 0.],
[1., 0.],
[1., 0.],
[1., 0.],
[1., 0.],
[1., 0.],
[0., 1.]
])
y_pred = np.array([
[1., 0.],
[1., 0.],
[1., 0.],
[1., 0.],
[1., 0.],
[1., 0.],
[1., 0.],
[1., 0.],
[1., 0.],
[1., 0.]
])
m = ComplexCategoricalAccuracy()
m.update_state(y_true, y_pred)
assert m.result().numpy() == np.cast['float32'](.9)
m = ComplexAverageAccuracy()
m.update_state(y_true, y_pred)
assert m.result().numpy() == np.cast['float32'](0.5)
def test_null_label():
y_true = np.array([
[1., 0., 0., 0.], # 1
[1., 0., 0., 0.], # 1
[1., 0., 0., 0.], # 1
[1., 0., 0., 0.], # 1
[1., 0., 0., 0.], # 1
[0., 0., 1., 0.], # 3
[0., 0., 1., 0.], # 3
[0., 0., 1., 0.], # 3
[0., 0., 1., 0.] # 3
])
y_pred = np.array([
[1., 0., 0., 0.], # 1
[1., 0., 0., 0.], # 1
[1., 0., 0., 0.], # 1
[1., 0., 0., 0.], # 1
[1., 0., 0., 0.], # 1
[0., 0., 0., 1.], # 4
[0., 0., 0., 1.], # 4
[0., 0., 0., 1.], # 4
[0., 0., 0., 1.] # 4
])
m = ComplexAverageAccuracy()
m.update_state(y_true, y_pred)
assert m.result().numpy() == np.cast['float32'](0.5)
y_true = np.array([
[1., 0.],
[1., 0.],
[1., 0.],
[1., 0.],
[1., 0.],
[1., 0.],
[1., 0.],
[1., 0.],
[1., 0.],
[0., 0.],
[0., 0.],
[0., 0.],
[0., 0.],
[0., 1.]
])
y_pred = np.array([
[1., 0.],
[1., 0.],
[1., 0.],
[1., 0.],
[1., 0.],
[1., 0.],
[1., 0.],
[1., 0.],
[1., 0.],
[0., 1.],
[0., 1.],
[0., 1.],
[0., 1.],
[1., 0.]
])
m = ComplexCategoricalAccuracy()
m.update_state(y_true, y_pred)
# tf_metric = CategoricalAccuracy()
# tf_metric.update_state(y_true, y_pred)
assert m.result().numpy() == np.cast['float32'](.9)
# assert m.result().numpy() == tf_metric.result().numpy()
m = ComplexAverageAccuracy()
m.update_state(y_true, y_pred)
assert m.result().numpy() == np.cast['float32'](0.5)
y_true = np.array([
[1., 0., 0., 0.], # 1
[1., 0., 0., 0.], # 1
[1., 0., 0., 0.], # 1
[1., 0., 0., 0.], # 1
[1., 0., 0., 0.], # 1
[0., 0., 1., 0.], # 3
[0., 0., 1., 0.], # 3
[0., 0., 1., 0.], # 3
[0., 0., 0., 1.], # 4
[0., 1., 0., 0.] # 2
])
y_pred = np.array([
[1., 0., 0., 0.], # 1
[1., 0., 0., 0.], # 1
[1., 0., 0., 0.], # 1
[1., 0., 0., 0.], # 1
[1., 0., 0., 0.], # 1
[0., 0., 0., 1.], # 4
[0., 0., 0., 1.], # 4
[0., 0., 0., 1.], # 4
[0., 0., 0., 1.], # 4
[0., 1., 0., 0.] # 2
])
m = ComplexAverageAccuracy()
m.update_state(y_true, y_pred)
assert m.result().numpy() == np.cast['float32'](0.75)
if __name__ == "__main__":
test_null_label()
test_with_tf()
test_metric()
| 6,381 | 27.364444 | 111 | py |
cvnn | cvnn-master/tests/test_capacity_real_equivalent.py | import numpy as np
import cvnn.layers as layers
from time import sleep
from cvnn.layers import ComplexDense
from cvnn.real_equiv_tools import get_real_equivalent_multiplier
from tensorflow.keras.models import Sequential
from tensorflow.keras.losses import categorical_crossentropy
def shape_tst(input_size, output_size, shape_raw, classifier=True, equiv_technique='alternate_tp', expected_result=None):
shape = [
layers.ComplexInput(input_shape=input_size, dtype=np.complex64)
]
if len(shape_raw) == 0:
shape.append(
ComplexDense(units=output_size, activation='softmax_real_with_abs', dtype=np.complex64)
)
else: # len(shape_raw) > 0:
for s in shape_raw:
shape.append(ComplexDense(units=s, activation='cart_relu')) # Add dropout!
shape.append(ComplexDense(units=output_size, activation='softmax_real_with_abs'))
complex_network = Sequential(shape, name="complex_network")
complex_network.compile(optimizer='sgd', loss=categorical_crossentropy, metrics=['accuracy'])
result = get_real_equivalent_multiplier(complex_network.layers, classifier=classifier,
equiv_technique=equiv_technique)
# from pdb import set_trace; set_trace()
# rvnn = complex_network.get_real_equivalent(classifier, capacity_equivalent)
# complex_network.training_param_summary()
# rvnn.training_param_summary()
if expected_result is not None:
if not np.all(expected_result == result):
# from pdb import set_trace; set_trace()
raise f"Expecting result {expected_result} but got {result}."
else:
print(result)
def test_shape():
# Ratio
# The bigger the middle, it will tend to sqrt(2) = 1.4142135623730951
shape_tst(4, 2, [1, 30, 500, 400, 60, 50, 3], classifier=True, equiv_technique='ratio_tp')
sleep(2)
# this is 1 for regression
shape_tst(4, 2, [64], classifier=False, equiv_technique='ratio_tp', expected_result=[1., 2])
sleep(2)
# this is 2*(in+out)/(2*in+out) = 1.2
shape_tst(4, 2, [64], classifier=True, equiv_technique='ratio_tp', expected_result=[1.2, 1])
sleep(2)
# shape_tst(100, 2, [100, 30, 50, 40, 60, 50, 30], classifier=True, equiv_technique='ratio')
# sleep(2)
# shape_tst(100, 2, [100, 30, 50, 60, 50, 30], classifier=True, equiv_technique='ratio')
# sleep(2)
# shape_tst(100, 2, [100, 30, 50, 60, 50, 30], classifier=False, equiv_technique='ratio')
# sleep(2)
# shape_tst(100, 2, [100, 30, 50, 40, 60, 50, 30], classifier=False, equiv_technique='ratio')
# sleep(2)
# shape_tst(100, 2, [100, 30, 50, 40, 60, 50, 30], capacity_equivalent=False, equiv_technique='ratio')
# sleep(2)
# Alternate
shape_tst(100, 2, [], expected_result=[1])
sleep(2)
shape_tst(100, 2, [64], expected_result=[204/202, 1])
sleep(2)
shape_tst(100, 2, [100, 64], expected_result=[1, 2, 1])
sleep(2)
shape_tst(100, 2, [100, 30, 64], expected_result=[1, 328/228, 2, 1])
sleep(2)
shape_tst(100, 2, [100, 30, 40, 50], expected_result=[1, 2, 1, 2, 1])
sleep(2)
shape_tst(100, 2, [100, 30, 40, 60, 30], expected_result=[1, 2, 180/120, 1, 2, 1])
sleep(2)
shape_tst(100, 2, [100, 30, 40, 60, 50, 30], expected_result=[1, 2, 1, 2, 1, 2, 1])
sleep(2)
shape_tst(100, 2, [100, 30, 40, 60, 50, 30, 60], expected_result=[1, 2, 1, 180/140, 2, 1, 2, 1])
# Not capacity equivalent
sleep(2)
shape_tst(100, 2, [], equiv_technique='np', expected_result=[1])
sleep(2)
shape_tst(100, 2, [64], equiv_technique='np', expected_result=[2, 1])
sleep(2)
shape_tst(100, 2, [100, 64], equiv_technique='np', expected_result=[2, 2, 1])
sleep(2)
shape_tst(100, 2, [100, 30, 64], equiv_technique='np', expected_result=[2, 2, 2, 1])
sleep(2)
shape_tst(100, 2, [100, 30, 40, 50], equiv_technique='np', expected_result=[2, 2, 2, 2, 1])
sleep(2)
shape_tst(100, 2, [100, 30, 40, 60, 50, 30], equiv_technique='np', expected_result=[2, 2, 2, 2, 2, 2, 1])
sleep(2)
shape_tst(100, 2, [100, 30, 40, 60, 50, 30], classifier=False, equiv_technique='np',
expected_result=[2, 2, 2, 2, 2, 2, 2])
# Not capacity equivalent
sleep(2)
shape_tst(100, 2, [], equiv_technique='none', expected_result=[1])
sleep(2)
shape_tst(100, 2, [64], equiv_technique='none', expected_result=[1, 1])
sleep(2)
shape_tst(100, 2, [100, 64], equiv_technique='none', expected_result=[1, 1, 1])
sleep(2)
shape_tst(100, 2, [100, 30, 64], equiv_technique='none', expected_result=[1, 1, 1, 1])
sleep(2)
shape_tst(100, 2, [100, 30, 40, 50], equiv_technique='none', expected_result=[1, 1, 1, 1, 1])
sleep(2)
shape_tst(100, 2, [100, 30, 40, 60, 50, 30], equiv_technique='none', expected_result=[1, 1, 1, 1, 1, 1, 1])
sleep(2)
shape_tst(100, 2, [100, 30, 40, 60, 50, 30], classifier=False, equiv_technique='none',
expected_result=[1, 1, 1, 1, 1, 1, 1])
if __name__ == '__main__':
test_shape()
| 5,079 | 43.955752 | 121 | py |
cvnn | cvnn-master/tests/test_several_datasets.py | import tensorflow as tf
import numpy as np
import os
import tensorflow_datasets as tfds
from tensorflow.keras import datasets, models
from cvnn.initializers import ComplexGlorotUniform
from cvnn.layers import ComplexDense, ComplexFlatten, ComplexInput
import cvnn.layers as complex_layers
from cvnn import layers
from pdb import set_trace
from cvnn.montecarlo import run_gaussian_dataset_montecarlo
def normalize_img(image, label):
"""Normalizes images: `uint8` -> `float32`."""
return tf.cast(image, tf.float32) / 255., label
def mnist_example():
(ds_train, ds_test), ds_info = tfds.load(
'mnist',
split=['train', 'test'],
shuffle_files=True,
as_supervised=True,
with_info=True,
)
ds_train = ds_train.map(
normalize_img, num_parallel_calls=tf.data.experimental.AUTOTUNE)
ds_train = ds_train.cache()
ds_train = ds_train.shuffle(ds_info.splits['train'].num_examples)
ds_train = ds_train.batch(128)
ds_train = ds_train.prefetch(tf.data.experimental.AUTOTUNE)
ds_test = ds_test.map(
normalize_img, num_parallel_calls=tf.data.experimental.AUTOTUNE)
ds_test = ds_test.batch(128)
ds_test = ds_test.cache()
ds_test = ds_test.prefetch(tf.data.experimental.AUTOTUNE)
model = tf.keras.models.Sequential([
ComplexFlatten(input_shape=(28, 28, 1)),
ComplexDense(128, activation='relu', dtype=tf.float32),
ComplexDense(10, dtype=tf.float32)
])
model.compile(
optimizer=tf.keras.optimizers.Adam(0.001),
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=[tf.keras.metrics.SparseCategoricalAccuracy()],
)
model.fit(
ds_train,
epochs=2,
validation_data=ds_test,
)
def fashion_mnist_example():
dtype_1 = np.complex64
fashion_mnist = tf.keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
train_images = train_images.astype(dtype_1)
test_images = test_images.astype(dtype_1)
train_labels = train_labels.astype(dtype_1)
test_labels = test_labels.astype(dtype_1)
model = tf.keras.Sequential([
ComplexInput(input_shape=(28, 28)),
ComplexFlatten(),
ComplexDense(128, activation='cart_relu', kernel_initializer=ComplexGlorotUniform(seed=0)),
ComplexDense(10, activation='convert_to_real_with_abs', kernel_initializer=ComplexGlorotUniform(seed=0))
])
model.summary()
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy']
)
model.fit(train_images, train_labels, epochs=2)
# import pdb; pdb.set_trace()
def cifar10_test():
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
dtype_1 = 'complex64'
(train_images, train_labels), (test_images, test_labels) = datasets.cifar10.load_data()
# Normalize pixel values to be between 0 and 1
train_images, test_images = train_images / 255.0, test_images / 255.0
train_images = train_images.astype(dtype_1)
test_images = test_images.astype(dtype_1)
train_labels = train_labels.astype(dtype_1)
test_labels = test_labels.astype(dtype_1)
tf.random.set_seed(1)
hist1 = cifar10_test_model_1(train_images, train_labels, test_images, test_labels, dtype_1)
tf.random.set_seed(1)
hist2 = cifar10_test_model_2(train_images, train_labels, test_images, test_labels, dtype_1)
assert hist1.history == hist2.history, f"\n{hist1.history}\n !=\n{hist2.history}"
def cifar10_test_model_1(train_images, train_labels, test_images, test_labels, dtype_1='complex64'):
model = models.Sequential()
model.add(layers.ComplexInput(input_shape=(32, 32, 3), dtype=dtype_1)) # Never forget this!!!
model.add(layers.ComplexConv2D(32, (3, 3), activation='cart_relu'))
model.add(layers.ComplexMaxPooling2D((2, 2)))
model.add(layers.ComplexConv2D(64, (3, 3), activation='cart_relu'))
model.add(layers.ComplexAvgPooling2D((2, 2)))
model.add(layers.ComplexConv2D(64, (3, 3), activation='cart_relu'))
model.add(layers.ComplexFlatten())
model.add(layers.ComplexDense(64, activation='cart_relu'))
model.add(layers.ComplexDense(10, activation='convert_to_real_with_abs'))
model.summary()
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
return model.fit(train_images, train_labels, epochs=2, validation_data=(test_images, test_labels), shuffle=False)
def cifar10_test_model_2(train_images, train_labels, test_images, test_labels, dtype_1='complex64'):
x = layers.complex_input(shape=(32, 32, 3), dtype=dtype_1)
conv1 = layers.ComplexConv2D(32, (3, 3), activation='cart_relu')(x)
pool1 = layers.ComplexMaxPooling2D((2, 2))(conv1)
conv2 = layers.ComplexConv2D(64, (3, 3), activation='cart_relu')(pool1)
pool2 = layers.ComplexAvgPooling2D((2, 2))(conv2)
conv3 = layers.ComplexConv2D(64, (3, 3), activation='cart_relu')(pool2)
flat = layers.ComplexFlatten()(conv3)
dense1 = layers.ComplexDense(64, activation='cart_relu')(flat)
y = layers.ComplexDense(10, activation='convert_to_real_with_abs')(dense1)
model = models.Model(inputs=[x], outputs=[y])
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
model.summary()
return model.fit(train_images, train_labels, epochs=2, validation_data=(test_images, test_labels), shuffle=False)
def random_dataset():
x_train = np.complex64(tf.complex(tf.random.uniform([640, 65, 82, 1]), tf.random.uniform([640, 65, 82, 1])))
x_test = np.complex64(tf.complex(tf.random.uniform([200, 65, 82, 1]), tf.random.uniform([200, 65, 82, 1])))
y_train = np.uint8(np.random.randint(5, size=(640, 1)))
y_test = np.uint8(np.random.randint(5, size=(200, 1)))
model = tf.keras.models.Sequential()
model.add(complex_layers.ComplexInput(input_shape=(65, 82, 1))) # Always use ComplexInput at the start
model.add(complex_layers.ComplexConv2D(8, (5, 5), activation='cart_relu'))
model.add(complex_layers.ComplexMaxPooling2D((2, 2)))
model.add(complex_layers.ComplexConv2D(16, (5, 5), activation='cart_relu'))
model.add(complex_layers.ComplexFlatten())
model.add(complex_layers.ComplexDense(256, activation='cart_relu'))
model.add(complex_layers.ComplexDropout(0.1))
model.add(complex_layers.ComplexDense(64, activation='cart_relu'))
model.add(complex_layers.ComplexDropout(0.1))
model.add(complex_layers.ComplexDense(5, activation='convert_to_real_with_abs'))
# An activation that casts to real must be used at the last layer.
# The loss function cannot minimize a complex number
# Compile it
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'],
# run_eagerly=Trutest_regressione
)
model.summary()
# Train and evaluate
history = model.fit(x_train, y_train, epochs=2, validation_data=(x_test, y_test))
test_loss, test_acc = model.evaluate(x_test, y_test, verbose=2)
def test_datasets():
run_gaussian_dataset_montecarlo(epochs=2, iterations=1)
random_dataset()
fashion_mnist_example()
mnist_example()
# cifar10_test()
if __name__ == '__main__':
test_datasets()
| 7,601 | 41.233333 | 117 | py |
cvnn | cvnn-master/tests/test_activation_functions.py | import tensorflow as tf
from cvnn import layers, activations
if __name__ == '__main__':
for activation in activations.act_dispatcher.keys():
print(activation)
model = tf.keras.Sequential([
layers.ComplexInput(4),
layers.ComplexDense(1, activation=activation),
layers.ComplexDense(1, activation='linear')
]) | 371 | 32.818182 | 58 | py |
cvnn | cvnn-master/cvnn/initializers.py | from abc import abstractmethod
import numpy as np
import tensorflow as tf
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import stateless_random_ops
from tensorflow.keras.initializers import Initializer
import sys
from pdb import set_trace
# Typing
from typing import Optional
INIT_TECHNIQUES = {'zero_imag', 'mirror'}
def _compute_fans(shape):
"""
Taken from https://github.com/tensorflow/tensorflow/blob/2b96f3662bd776e277f86997659e61046b56c315/tensorflow/python/ops/init_ops_v2.py#L994
Computes the number of input and output units for a weight shape.
Args:
shape: Integer shape tuple or TF tensor shape.
Returns:
A tuple of scalars (fan_in, fan_out).
"""
if len(shape) < 1: # Just to avoid errors for constants.
fan_in = fan_out = 1.
elif len(shape) == 1:
fan_in = fan_out = shape[0]
elif len(shape) == 2:
fan_in = shape[0]
fan_out = shape[1]
else:
# Assuming convolution kernels (2D, 3D, or more).
# kernel shape: (..., input_depth, depth)
receptive_field_size = 1.
for dim in shape[:-2]:
receptive_field_size *= dim
fan_in = shape[-2] * receptive_field_size
fan_out = shape[-1] * receptive_field_size
return fan_in, fan_out
class _RandomGenerator(object):
"""
Random generator that selects appropriate random ops.
https://github.com/tensorflow/tensorflow/blob/2b96f3662bd776e277f86997659e61046b56c315/tensorflow/python/ops/init_ops_v2.py#L1041
"""
def __init__(self, seed=None):
super(_RandomGenerator, self).__init__()
if seed is not None:
# Stateless random ops requires 2-int seed.
self.seed = [seed, 0]
else:
self.seed = None
def random_normal(self, shape, mean=0.0, stddev=1, dtype=tf.dtypes.float32):
"""A deterministic random normal if seed is passed."""
if self.seed:
op = stateless_random_ops.stateless_random_normal
else:
op = random_ops.random_normal
return op(shape=shape, mean=mean, stddev=stddev, dtype=dtype, seed=self.seed)
def random_uniform(self, shape, minval, maxval, dtype):
"""A deterministic random uniform if seed is passed."""
if self.seed:
op = stateless_random_ops.stateless_random_uniform
else:
op = random_ops.random_uniform
return op(shape=shape, minval=minval, maxval=maxval, dtype=dtype, seed=self.seed)
def truncated_normal(self, shape, mean, stddev, dtype):
"""A deterministic truncated normal if seed is passed."""
if self.seed:
op = stateless_random_ops.stateless_truncated_normal
else:
op = random_ops.truncated_normal
return op(shape=shape, mean=mean, stddev=stddev, dtype=dtype, seed=self.seed)
class ComplexInitializer(Initializer):
def __init__(self, distribution: str = "uniform", seed: Optional[int] = None):
if distribution.lower() not in {"uniform", "normal"}:
raise ValueError("Invalid `distribution` argument:", distribution)
else:
self.distribution = distribution.lower()
self._random_generator = _RandomGenerator(seed)
def _call_random_generator(self, shape, arg, dtype):
if self.distribution == "uniform":
return self._random_generator.random_uniform(shape=shape, minval=-arg, maxval=arg, dtype=dtype)
elif self.distribution == "normal":
# I make this magic number division because that's what tf does on this case
return self._random_generator.truncated_normal(shape=shape, mean=0.0, stddev=arg / .87962566103423978,
dtype=dtype)
@abstractmethod
def _compute_limit(self, fan_in, fan_out):
pass
def __call__(self, shape, dtype=tf.dtypes.complex64, **kwargs):
fan_in, fan_out = _compute_fans(shape)
arg = self._compute_limit(fan_in, fan_out)
dtype = tf.dtypes.as_dtype(dtype)
if dtype.is_complex:
arg = arg / np.sqrt(2)
return self._call_random_generator(shape=shape, arg=arg, dtype=dtype.real_dtype)
def get_config(self): # To support serialization
return {"seed": self._random_generator.seed}
class ComplexGlorotUniform(ComplexInitializer):
"""
The Glorot uniform initializer, also called Xavier uniform initializer.
Reference: http://proceedings.mlr.press/v9/glorot10a.html
Draws samples from a uniform distribution:
- Real case: `x ~ U[-limit, limit]` where `limit = sqrt(6 / (fan_in + fan_out))`
- Complex case: `z / Re{z} = Im{z} ~ U[-limit, limit]` where `limit = sqrt(3 / (fan_in + fan_out))`
where `fan_in` is the number of input units in the weight tensor and `fan_out` is the number of output units.
```
# Standalone usage:
import cvnn
initializer = cvnn.initializers.ComplexGlorotUniform()
values = initializer(shape=(2, 2)) # Returns a complex Glorot Uniform tensor of shape (2, 2)
```
```
# Usage in a cvnn layer:
import cvnn
initializer = cvnn.initializers.ComplexGlorotUniform()
layer = cvnn.layers.ComplexDense(units=10, kernel_initializer=initializer)
```
"""
__name__ = "Complex Glorot Uniform"
def __init__(self, seed: Optional[int] = None):
super(ComplexGlorotUniform, self).__init__(distribution="uniform", seed=seed)
def _compute_limit(self, fan_in, fan_out):
return tf.math.sqrt(6. / (fan_in + fan_out))
class ComplexGlorotNormal(ComplexInitializer):
"""
The Glorot normal initializer, also called Xavier normal initializer.
Reference: http://proceedings.mlr.press/v9/glorot10a.html
*Note: The reference actually refers to the uniform case but it's analysis was adapted for a normal distribution
Draws samples from a truncated normal distribution centered on 0 with
- Real case: `stddev = sqrt(2 / (fan_in + fan_out))`
- Complex case: real part stddev = complex part stddev = `1 / sqrt(fan_in + fan_out)`
where `fan_in` is the number of input units in the weight tensor and `fan_out` is the number of output units.
```
# Standalone usage:
import cvnn
initializer = cvnn.initializers.ComplexGlorotNormal()
values = initializer(shape=(2, 2)) # Returns a complex Glorot Normal tensor of shape (2, 2)
```
```
# Usage in a cvnn layer:
import cvnn
initializer = cvnn.initializers.ComplexGlorotNormal()
layer = cvnn.layers.ComplexDense(units=10, kernel_initializer=initializer)
```
"""
__name__ = "Complex Glorot Normal"
def __init__(self, seed: Optional[int] = None):
super(ComplexGlorotNormal, self).__init__(distribution="normal", seed=seed)
def _compute_limit(self, fan_in, fan_out):
return tf.math.sqrt(2. / (fan_in + fan_out))
class ComplexHeUniform(ComplexInitializer):
"""
The He Uniform initializer.
Reference: http://proceedings.mlr.press/v9/glorot10a.html
Draws samples from a uniform distribution:
- Real case: `x ~ U[-limit, limit]` where `limit = sqrt(6 / fan_in)`
- Complex case: `z / Re{z} = Im{z} ~ U[-limit, limit]` where `limit = sqrt(3 / fan_in)`
where `fan_in` is the number of input units in the weight tensor.
```
# Standalone usage:
import cvnn
initializer = cvnn.initializers.ComplexHeUniform()
values = initializer(shape=(2, 2)) # Returns a real He Uniform tensor of shape (2, 2)
```
```
# Usage in a cvnn layer:
import cvnn
initializer = cvnn.initializers.ComplexHeUniform()
layer = cvnn.layers.ComplexDense(units=10, kernel_initializer=initializer)
```
"""
__name__ = "Complex He Uniform"
def __init__(self, seed: Optional[int] = None):
super(ComplexHeUniform, self).__init__(distribution="uniform", seed=seed)
def _compute_limit(self, fan_in, fan_out):
return tf.math.sqrt(6. / fan_in)
class ComplexHeNormal(ComplexInitializer):
"""
He normal initializer.
Reference: https://www.cv-foundation.org/openaccess/content_iccv_2015/html/He_Delving_Deep_into_ICCV_2015_paper.html
It draws samples from a truncated normal distribution centered on 0 with
- Real case: `stddev = sqrt(2 / fan_in)`
- Complex case: real part stddev = complex part stddev = `1 / sqrt(fan_in)`
where fan_in is the number of input units in the weight tensor.
```
# Standalone usage:
import cvnn
initializer = cvnn.initializers.ComplexHeNormal()
values = initializer(shape=(2, 2)) # Returns a complex He Normal tensor of shape (2, 2)
```
```
# Usage in a cvnn layer:
import cvnn
initializer = cvnn.initializers.ComplexHeNormal()
layer = cvnn.layers.ComplexDense(units=10, kernel_initializer=initializer)
```
"""
__name__ = "Complex He Normal"
def __init__(self, seed: Optional[int] = None):
super(ComplexHeNormal, self).__init__(distribution="normal", seed=seed)
def _compute_limit(self, fan_in, fan_out):
return tf.math.sqrt(2. / fan_in)
class Zeros:
"""
Creates a tensor with all elements set to zero.
```
> >> cvnn.initializers.Zeros()(shape=(2,2))
<tf.Tensor: shape=(2, 2), dtype=float32, numpy=
array([[0.+0.j, 0.+0.j],
[0.+0.j, 0.+0.j]], dtype=float32)>
```
```
# Usage in a cvnn layer:
import cvnn
initializer = cvnn.initializers.Zeros()
layer = cvnn.layers.ComplexDense(units=10, bias_initializer=initializer)
```
"""
__name__ = "Zeros"
def __call__(self, shape, dtype=tf.dtypes.complex64):
return tf.zeros(shape, dtype=tf.dtypes.as_dtype(dtype).real_dtype)
class Ones:
__name__ = "Ones"
def __call__(self, shape, dtype=tf.dtypes.complex64):
return tf.ones(shape, dtype=tf.dtypes.as_dtype(dtype).real_dtype)
init_dispatcher = {
"ComplexGlorotUniform": ComplexGlorotUniform,
"ComplexGlorotNormal": ComplexGlorotNormal,
"ComplexHeUniform": ComplexHeUniform,
"ComplexHeNormal": ComplexHeNormal
}
if __name__ == '__main__':
# Nothing yet
set_trace()
__author__ = 'J. Agustin BARRACHINA'
__version__ = '0.0.13'
__maintainer__ = 'J. Agustin BARRACHINA'
__email__ = 'joseagustin.barra@gmail.com; jose-agustin.barrachina@centralesupelec.fr'
| 10,508 | 35.237931 | 143 | py |
cvnn | cvnn-master/cvnn/tb.py | from tensorflow.keras.callbacks import TensorBoard
from tensorflow import GradientTape
import tensorflow as tf
# This extends TensorBoard to save gradients as histogram
# ExtendedTensorBoard can then be used in replace of tf.keras.callbacks.TensorBoard.
class ExtendedTensorBoard(TensorBoard):
def _log_gradients(self, epoch):
writer = self._writers['train']
with writer.as_default(), GradientTape() as g:
# here we use test data to calculate the gradients
features, y_true = list(dataset.batch(100).take(1))[0]
y_pred = self.model(features) # forward-propagation
loss = self.model.compiled_loss(y_true=y_true, y_pred=y_pred) # calculate loss
gradients = g.gradient(loss, self.model.trainable_weights) # back-propagation
# In eager mode, grads does not have name, so we get names from model.trainable_weights
for weights, grads in zip(self.model.trainable_weights, gradients):
tf.summary.histogram(
weights.name.replace(':', '_') + '_grads', data=grads, step=epoch)
writer.flush()
def on_epoch_end(self, epoch, logs=None):
# This function overwrites the on_epoch_end in tf.keras.callbacks.TensorBoard
# but we do need to run the original on_epoch_end, so here we use the super function.
super(ExtendedTensorBoard, self).on_epoch_end(epoch, logs=logs)
if self.histogram_freq and epoch % self.histogram_freq == 0:
self._log_gradients(epoch)
| 1,546 | 44.5 | 99 | py |
cvnn | cvnn-master/cvnn/losses.py | import tensorflow as tf
from tensorflow.keras import backend
from tensorflow.keras.losses import Loss, categorical_crossentropy
class ComplexAverageCrossEntropy(Loss):
def call(self, y_true, y_pred):
real_loss = categorical_crossentropy(y_true, tf.math.real(y_pred))
if y_pred.dtype.is_complex:
imag_loss = categorical_crossentropy(y_true, tf.math.imag(y_pred))
else:
imag_loss = real_loss
return (real_loss + imag_loss) / 2.
class ComplexAverageCrossEntropyIgnoreUnlabeled(ComplexAverageCrossEntropy):
def call(self, y_true, y_pred):
mask = tf.reduce_any(tf.cast(y_true, tf.bool), axis=-1)
y_true = tf.boolean_mask(y_true, mask)
y_pred = tf.boolean_mask(y_pred, mask)
return super(ComplexAverageCrossEntropyIgnoreUnlabeled, self).call(y_true, y_pred)
class ComplexMeanSquareError(Loss):
def call(self, y_true, y_pred):
y_pred = tf.convert_to_tensor(y_pred)
y_true = tf.convert_to_tensor(y_true)
if y_pred.dtype.is_complex and not y_true.dtype.is_complex: # Complex pred but real true
y_true = tf.complex(y_true, y_true)
y_true = tf.cast(y_true, y_pred.dtype)
return tf.cast(backend.mean(tf.math.square(tf.math.abs(y_true - y_pred)), axis=-1),
dtype=y_pred.dtype.real_dtype)
class ComplexWeightedAverageCrossEntropy(ComplexAverageCrossEntropy):
def __init__(self, weights, **kwargs):
self.class_weights = weights
super(ComplexWeightedAverageCrossEntropy, self).__init__(**kwargs)
def call(self, y_true, y_pred):
# https://stackoverflow.com/questions/44560549/unbalanced-data-and-weighted-cross-entropy
weights = tf.reduce_sum(self.class_weights * y_true, axis=-1)
unweighted_losses = super(ComplexWeightedAverageCrossEntropy, self).call(y_true, y_pred)
weighted_losses = unweighted_losses * tf.cast(weights, dtype=unweighted_losses.dtype)
return weighted_losses
class ComplexWeightedAverageCrossEntropyIgnoreUnlabeled(ComplexAverageCrossEntropy):
def __init__(self, weights, **kwargs):
self.class_weights = weights
super(ComplexWeightedAverageCrossEntropyIgnoreUnlabeled, self).__init__(**kwargs)
def call(self, y_true, y_pred):
mask = tf.reduce_any(tf.cast(y_true, tf.bool), axis=-1)
y_true = tf.boolean_mask(y_true, mask)
y_pred = tf.boolean_mask(y_pred, mask)
return super(ComplexWeightedAverageCrossEntropyIgnoreUnlabeled, self).call(y_true, y_pred)
if __name__ == "__main__":
import numpy as np
y_true = np.random.randint(0, 2, size=(2, 3)).astype("float32")
y_pred = tf.complex(np.random.random(size=(2, 3)).astype("float32"),
np.random.random(size=(2, 3)).astype("float32"))
loss = ComplexMeanSquareError().call(y_true, y_pred)
expected_loss = np.mean(np.square(np.abs(tf.complex(y_true, y_true) - y_pred)), axis=-1)
# import pdb; pdb.set_trace()
assert np.all(loss == expected_loss)
| 3,061 | 40.378378 | 100 | py |
cvnn | cvnn-master/cvnn/utils.py | import numpy as np
from datetime import datetime
from pathlib import Path
from pdb import set_trace
import sys
from tensorflow.python.keras import Model
import tensorflow as tf # TODO: Imported only for dtype
import os
from os.path import join
from scipy.io import loadmat
# To test logger:
import cvnn
import logging
from typing import Type
logger = logging.getLogger(cvnn.__name__)
REAL_CAST_MODES = {
'real_imag': 2,
'amplitude_phase': 2,
'amplitude_only': 1,
'real_only': 1
}
def reset_weights(model: Type[Model]):
# https://github.com/keras-team/keras/issues/341#issuecomment-539198392
for layer in model.layers:
if isinstance(layer, tf.keras.Model): #if you're using a model as a layer
reset_weights(layer) #apply function recursively
continue
#where are the initializers?
if hasattr(layer, 'cell'):
init_container = layer.cell
else:
init_container = layer
for key, initializer in init_container.__dict__.items():
if "initializer" not in key: #is this item an initializer?
continue #if no, skip it
# find the corresponding variable, like the kernel or the bias
if key == 'recurrent_initializer': #special case check
var = getattr(init_container, 'recurrent_kernel')
else:
var = getattr(init_container, key.replace("_initializer", ""))
var.assign(initializer(var.shape, var.dtype))
def load_matlab_matrices(fname="data_cnn1dT.mat", path="/media/barrachina/data/gilles_data/"):
"""
Opens Matlab matrix (.mat) as numpy array.
:param fname: file name to be opened
:param path: path to file
:return: numpy array with the Matlab matrix information
"""
mat_fname = join(path, fname)
mat = loadmat(mat_fname)
return mat
def create_folder(root_path, now=None):
"""
Creates folders within root_path using a date format.
:param root_path: root path where to create the folder chain
:param now: date to be used. If None then it will use current time
:return: the created path in pathlib format (compatible across different OS)
"""
if now is None:
now = datetime.today()
# path = Path(__file__).parents[1].absolute() / Path(root_path + now.strftime("%Y/%m%B/%d%A/run-%Hh%Mm%S/"))
# Last line was to create inside cvnn. I prefer now to save stuff on each project folder and not on libraries folder
path = Path(root_path + now.strftime("%Y/%m%B/%d%A/run-%Hh%Mm%S/"))
os.makedirs(path, exist_ok=True) # Do this not to have a problem if I run in parallel
return path
def cast_to_path(path):
if isinstance(path, str):
path = Path(path)
elif not isinstance(path, Path):
logger.error("Path datatype not recognized")
sys.exit(-1)
return path
def get_func_name(fun):
"""
Returns the name of a function passed as parameter being either a function itself or a string with the function name
:param fun: function or function name
:return: function name
"""
if callable(fun):
return fun.__name__
elif isinstance(fun, str):
return fun
else:
logger.error("Function not recognizable", stack_info=True)
sys.exit(-1)
def transform_to_real_map_function(image, label, mode: str = "real_imag"):
if mode not in REAL_CAST_MODES:
raise KeyError(f"Unknown real cast mode {mode}")
if mode == 'real_imag':
ret_value = tf.concat([tf.math.real(image), tf.math.imag(image)], axis=-1)
elif mode == 'amplitude_phase':
ret_value = tf.concat([tf.math.abs(image), tf.math.angle(image)], axis=-1)
elif mode == 'amplitude_only':
ret_value = tf.math.abs(image)
elif mode == 'real_only':
ret_value = tf.math.real(image)
else:
raise KeyError(f"Real cast mode {mode} not implemented")
return ret_value, label
def transform_to_real(x_complex, mode: str = "real_imag"):
"""
Transforms a complex input matrix into a real value matrix (double size)
:param x_complex: Complex-valued matrix of size mxn
:param mode: Mode on how to transform to real. One of the following:
- real_imag: Separate x_complex into real and imaginary making the size of the return double x_complex
- amplitude_phase: Separate x_complex into amplitude and phase making the size of the return double x_complex
- amplitude_only: Apply the absolute value to x_complex. Shape remains the same.
:return: real-valued matrix of real valued cast of x_complex
"""
# import pdb; pdb.set_trace()
if not tf.dtypes.as_dtype(x_complex.dtype).is_complex:
# Intput was not complex, nothing to do
return x_complex
if mode not in REAL_CAST_MODES:
raise KeyError(f"Unknown real cast mode {mode}")
if mode == 'real_imag':
ret_value = np.concatenate([np.real(image), np.imag(image)], axis=-1)
elif mode == 'amplitude_phase':
ret_value = np.concatenate([np.abs(image), np.angle(image)], axis=-1)
elif mode == 'amplitude_only':
ret_value = np.abs(image)
elif mode == 'real_only':
ret_value = np.real(image)
else:
raise KeyError(f"Real cast mode {mode} not implemented")
return x_real
def cart2polar(z):
"""
:param z: complex input
:return: tuple with the absolute value of the input and the phase
"""
return np.abs(z), np.angle(z)
def polar2cart(rho, angle):
"""
:param rho: absolute value
:param angle: phase
:return: complex number using phase and angle
"""
return rho * np.exp(1j*angle)
def randomize(x, y):
"""
Randomizes the order of data samples and their corresponding labels
:param x: data
:param y: data labels
:return: Tuple of (shuffled_x, shuffled_y) maintaining coherence of elements labels
"""
if isinstance(x, tf.data.Dataset):
return x.shuffle(1000), y
permutation = np.random.permutation(y.shape[0])
shuffled_x = x[permutation, :]
shuffled_y = y[permutation]
return shuffled_x, shuffled_y
def normalize(x):
return (x-np.amin(x))/np.abs(np.amax(x)-np.amin(x)) # Checked it works for complex values
def standarize(x):
return (x - np.mean(x)) / np.std(x)
def tensorflow_argmax_np_equivalent(x, num_classes):
res = np.zeros((np.argmax(x, 1).shape[0], num_classes))
indx = 0
for k in np.argmax(x, 1):
res[indx, k] = 1
indx += 1
return res
def compute_accuracy(x, y):
return np.average(np.equal(x, y).all(axis=1))
def median_error(q_75: float, q_25: float, n: int) -> int:
assert q_75 >= q_25 >= 0.0, f"q_75 {q_75} < q_25 {q_25}"
return 1.57*(q_75-q_25)/np.sqrt(n)
if __name__ == "__main__":
logger.warning("Testing logger")
__author__ = 'J. Agustin BARRACHINA'
__version__ = '0.0.28'
__maintainer__ = 'J. Agustin BARRACHINA'
__email__ = 'joseagustin.barra@gmail.com; jose-agustin.barrachina@centralesupelec.fr'
| 7,066 | 31.869767 | 120 | py |
cvnn | cvnn-master/cvnn/metrics.py | import tensorflow as tf
from tensorflow.keras.metrics import Accuracy, CategoricalAccuracy, Precision, Recall, Mean
from tensorflow_addons.metrics import F1Score, CohenKappa
from tensorflow.python.keras import backend
class ComplexAccuracy(Accuracy):
def __init__(self, name='complex_accuracy', dtype=tf.complex64, **kwargs):
super(ComplexAccuracy, self).__init__(name=name, dtype=dtype, **kwargs)
def update_state(self, y_true, y_pred, sample_weight=None, ignore_unlabeled=True):
if ignore_unlabeled: # WARNING, this will overwrite sample_weight!
sample_weight = tf.math.logical_not(tf.math.reduce_all(tf.math.logical_not(tf.cast(y_true, bool)), axis=-1))
y_pred = tf.convert_to_tensor(y_pred)
y_true = tf.convert_to_tensor(y_true)
if y_pred.dtype.is_complex:
y_pred = (tf.math.real(y_pred) + tf.math.imag(y_pred)) / 2
if y_true.dtype.is_complex:
assert tf.math.reduce_all(tf.math.real(y_pred) == tf.math.imag(y_pred)), "y_pred must be real valued"
super(ComplexAccuracy, self).update_state(y_true=y_true, y_pred=y_pred, sample_weight=sample_weight)
class ComplexCategoricalAccuracy(CategoricalAccuracy):
def __init__(self, name='complex_categorical_accuracy', **kwargs):
super(ComplexCategoricalAccuracy, self).__init__(name=name, **kwargs)
def update_state(self, y_true, y_pred, sample_weight=None, ignore_unlabeled=True):
if ignore_unlabeled: # WARNING, this will overwrite sample_weight!
sample_weight = tf.math.logical_not(tf.math.reduce_all(tf.math.logical_not(tf.cast(y_true, bool)), axis=-1))
y_pred = tf.convert_to_tensor(y_pred)
y_true = tf.convert_to_tensor(y_true)
if y_pred.dtype.is_complex:
y_pred = (tf.math.real(y_pred) + tf.math.imag(y_pred)) / 2
if y_true.dtype.is_complex:
assert tf.math.reduce_all(tf.math.real(y_pred) == tf.math.imag(y_pred)), "y_pred must be real valued"
super(ComplexCategoricalAccuracy, self).update_state(y_true=y_true, y_pred=y_pred, sample_weight=sample_weight)
class ComplexPrecision(Precision):
def __init__(self, name='complex_precision', **kwargs):
super(ComplexPrecision, self).__init__(name=name, **kwargs)
def update_state(self, y_true, y_pred, sample_weight=None, ignore_unlabeled=True):
if ignore_unlabeled: # WARNING, this will overwrite sample_weight!
sample_weight = tf.math.logical_not(tf.math.reduce_all(tf.math.logical_not(tf.cast(y_true, bool)), axis=-1))
y_pred = tf.convert_to_tensor(y_pred)
y_true = tf.convert_to_tensor(y_true)
if y_pred.dtype.is_complex:
y_pred = (tf.math.real(y_pred) + tf.math.imag(y_pred)) / 2
if y_true.dtype.is_complex:
assert tf.math.reduce_all(tf.math.real(y_pred) == tf.math.imag(y_pred)), "y_pred must be real valued"
super(ComplexPrecision, self).update_state(y_true=y_true, y_pred=y_pred, sample_weight=sample_weight)
class ComplexRecall(Recall):
def __init__(self, name='complex_recall', **kwargs):
super(ComplexRecall, self).__init__(name=name, **kwargs)
def update_state(self, y_true, y_pred, sample_weight=None, ignore_unlabeled=True):
if ignore_unlabeled: # WARNING, this will overwrite sample_weight!
sample_weight = tf.math.logical_not(tf.math.reduce_all(tf.math.logical_not(tf.cast(y_true, bool)), axis=-1))
y_pred = tf.convert_to_tensor(y_pred)
y_true = tf.convert_to_tensor(y_true)
if y_pred.dtype.is_complex:
y_pred = (tf.math.real(y_pred) + tf.math.imag(y_pred)) / 2
if y_true.dtype.is_complex:
assert tf.math.reduce_all(tf.math.real(y_pred) == tf.math.imag(y_pred)), "y_pred must be real valued"
super(ComplexRecall, self).update_state(y_true=y_true, y_pred=y_pred, sample_weight=sample_weight)
class ComplexCohenKappa(CohenKappa):
def __init__(self, name='complex_cohen_kappa', **kwargs):
super(ComplexCohenKappa, self).__init__(name=name, **kwargs)
def update_state(self, y_true, y_pred, sample_weight=None, ignore_unlabeled=True):
if ignore_unlabeled: # WARNING, this will overwrite sample_weight!
sample_weight = tf.math.logical_not(tf.math.reduce_all(tf.math.logical_not(tf.cast(y_true, bool)), axis=-1))
y_pred = tf.convert_to_tensor(y_pred)
y_true = tf.convert_to_tensor(y_true)
if y_pred.dtype.is_complex:
y_pred = (tf.math.real(y_pred) + tf.math.imag(y_pred)) / 2
if y_true.dtype.is_complex:
assert tf.math.reduce_all(tf.math.real(y_pred) == tf.math.imag(y_pred)), "y_pred must be real valued"
super(ComplexCohenKappa, self).update_state(y_true=y_true, y_pred=y_pred, sample_weight=sample_weight)
class ComplexF1Score(F1Score):
def __init__(self, name='complex_f1_score', **kwargs):
super(ComplexF1Score, self).__init__(name=name, **kwargs)
def update_state(self, y_true, y_pred, sample_weight=None, ignore_unlabeled=True):
if ignore_unlabeled: # WARNING, this will overwrite sample_weight!
sample_weight = tf.math.logical_not(tf.math.reduce_all(tf.math.logical_not(tf.cast(y_true, bool)), axis=-1))
y_pred = tf.convert_to_tensor(y_pred)
y_true = tf.convert_to_tensor(y_true)
if y_pred.dtype.is_complex:
y_pred = (tf.math.real(y_pred) + tf.math.imag(y_pred)) / 2
if y_true.dtype.is_complex:
assert tf.math.reduce_all(tf.math.real(y_pred) == tf.math.imag(y_pred)), "y_pred must be real valued"
super(ComplexF1Score, self).update_state(y_true=y_true, y_pred=y_pred, sample_weight=sample_weight)
def _accuracy(y_true, y_pred):
y_true.shape.assert_is_compatible_with(y_pred.shape)
if y_true.dtype != y_pred.dtype:
y_pred = tf.cast(y_pred, y_true.dtype)
reduced_sum = tf.reduce_sum(tf.cast(tf.math.equal(y_true, y_pred), backend.floatx()), axis=-1)
return tf.math.divide_no_nan(reduced_sum, tf.cast(tf.shape(y_pred)[-1], reduced_sum.dtype))
def custom_average_accuracy(y_true, y_pred):
# Mask to remove the labels (y_true) that are zero: ex. [0, 0, 0]
remove_zeros_mask = tf.math.logical_not(tf.math.reduce_all(tf.math.logical_not(tf.cast(y_true, bool)), axis=-1))
y_true = tf.boolean_mask(y_true, remove_zeros_mask)
y_pred = tf.boolean_mask(y_pred, remove_zeros_mask)
num_cls = y_true.shape[-1] # get total amount of classes
y_pred = tf.math.argmax(y_pred, axis=-1) # one hot encoded to sparse
y_true = tf.math.argmax(y_true, axis=-1) # ex. [0, 0, 1] -> [2]
accuracies = tf.TensorArray(tf.float32, size=0, dynamic_size=True)
for i in range(0, num_cls):
cls_mask = y_true == i
cls_y_true = tf.boolean_mask(y_true, cls_mask)
if tf.not_equal(tf.size(cls_y_true), 0):
new_acc = _accuracy(y_true=cls_y_true, y_pred=tf.boolean_mask(y_pred, cls_mask))
accuracies = accuracies.write(accuracies.size(), new_acc)
# import pdb; pdb.set_trace()
accuracies = accuracies.stack()
# return tf.cast(len(accuracies), dtype=accuracies.dtype)
return tf.math.reduce_sum(accuracies) / tf.cast(len(accuracies), dtype=accuracies.dtype)
class ComplexAverageAccuracy(Mean):
def __init__(self, name='complex_average_accuracy', dtype=None):
self._fn = custom_average_accuracy
super(ComplexAverageAccuracy, self).__init__(name, dtype=dtype)
def update_state(self, y_true, y_pred, sample_weight=None):
# WARNING: sample_weights will not be used
y_pred = tf.convert_to_tensor(y_pred)
y_true = tf.convert_to_tensor(y_true)
if y_pred.dtype.is_complex: # make y_pred real valued
y_pred = (tf.math.real(y_pred) + tf.math.imag(y_pred)) / 2
if y_true.dtype.is_complex: # make y_true real valued
assert tf.math.reduce_all(tf.math.real(y_true) == tf.math.imag(y_true)), "y_pred must be real valued"
y_true = tf.math.real(y_true)
matches = self._fn(y_true, y_pred)
return super(ComplexAverageAccuracy, self).update_state(matches)
if __name__ == '__main__':
m = ComplexAccuracy()
m.update_state([[1+1j], [2+1j], [3+1j], [4+1j]], [[1+1j], [2+1j], [3+5j], [4+5j]])
print(m.result().numpy())
| 8,397 | 50.521472 | 120 | py |
cvnn | cvnn-master/cvnn/activations.py | import tensorflow as tf
from tensorflow.keras.layers import Activation
from typing import Union, Callable, Optional
from tensorflow import Tensor
from numpy import pi
"""
This module contains many complex-valued activation functions to be used by CVNN class.
"""
# logger = logging.getLogger(cvnn.__name__)
t_activation = Union[str, Callable] # TODO: define better
# Regression
def linear(z: Tensor) -> Tensor:
"""
Does not apply any activation function. It just outputs the input.
:param z: Input tensor variable
:return: z
"""
return z
def modrelu(z: Tensor, b: float = 1., c: float = 1e-3) -> Tensor:
"""
mod ReLU presented in "Unitary Evolution Recurrent Neural Networks"
from M. Arjovsky et al. (2016)
URL: https://arxiv.org/abs/1511.06464
A variation of the ReLU named modReLU. It is a pointwise nonlinearity,
modReLU(z) : C -> C, which affects only the absolute
value of a complex number, defined:
modReLU(z) = ReLU(|z|+b)*z/|z|
TODO: See how to check the non zero abs.
"""
abs_z = tf.math.abs(z)
return tf.cast(tf.keras.activations.relu(abs_z + b), dtype=z.dtype) * z / tf.cast(abs_z + c, dtype=z.dtype)
def zrelu(z: Tensor, epsilon=1e-7) -> Tensor:
"""
zReLU presented in "On Complex Valued Convolutional Neural Networks"
from Nitzan Guberman (2016).
This methods let's the output as the input if both real and imaginary parts are positive.
https://stackoverflow.com/questions/49412717/advanced-custom-activation-function-in-keras-tensorflow
"""
imag_relu = tf.nn.relu(tf.math.imag(z))
real_relu = tf.nn.relu(tf.math.real(z))
ret_real = imag_relu*real_relu / (imag_relu + epsilon)
ret_imag = imag_relu*real_relu / (real_relu + epsilon)
ret_val = tf.complex(ret_real, ret_imag)
return ret_val
def crelu(z: Tensor, alpha: float = 0.0, max_value: Optional[float] = None, threshold: float = 0) -> Tensor:
"""
Mirror of cart_relu
"""
return cart_relu(z, alpha, max_value, threshold)
def complex_cardioid(z: Tensor) -> Tensor:
"""
Complex cardioid presented in "Better than Real: Complex-valued Neural Nets for MRI Fingerprinting"
from V. Patrick (2017).
This function maintains the phase information while attenuating the magnitude based on the phase itself.
For real-valued inputs, it reduces to the ReLU.
"""
return tf.cast(1 + tf.math.cos(tf.math.angle(z)), dtype=z.dtype) * z / 2.
"""
Complex input, real output
"""
def cast_to_real(z: Tensor) -> Tensor:
return tf.cast(z, z.dtype.real_dtype)
def sigmoid_real(z: Tensor) -> Tensor:
return tf.keras.activations.sigmoid(tf.math.real(z) + tf.math.imag(z))
def softmax_real_with_abs(z: Tensor, axis=-1) -> Tensor:
"""
Applies the softmax function to the modulus of z.
The softmax activation function transforms the outputs so that all values are in range (0, 1) and sum to 1.
It is often used as the activation for the last layer of a classification network because the result could be
interpreted as a probability distribution.
The softmax of x is calculated by exp(x)/tf.reduce_sum(exp(x)).
https://www.tensorflow.org/api_docs/python/tf/keras/activations/softmax
:param z: Input tensor.
:return: Real-valued tensor of the applied activation function
"""
if z.dtype.is_complex:
return tf.keras.activations.softmax(tf.math.abs(z), axis)
else:
return tf.keras.activations.softmax(z, axis)
def softmax_real_with_avg(z: Tensor, axis=-1) -> Tensor:
"""
The softmax activation function transforms the outputs so that all values are in range (0, 1) and sum to 1.
It is often used as the activation for the last layer of a classification network because the result could be
interpreted as a probability distribution.
The softmax of x is calculated by exp(x)/tf.reduce_sum(exp(x)).
https://www.tensorflow.org/api_docs/python/tf/keras/activations/softmax
:param z: Input tensor.
:return: Real-valued tensor of the applied activation function
"""
if z.dtype.is_complex:
return 0.5 * (tf.keras.activations.softmax(tf.math.real(z), axis) + tf.keras.activations.softmax(
tf.math.real(z), axis))
else:
return tf.keras.activations.softmax(z, axis)
def softmax_real_with_mult(z: Tensor, axis=-1) -> Tensor:
"""
Applies the softmax function to the modulus of z.
The softmax activation function transforms the outputs so that all values are in range (0, 1) and sum to 1.
It is often used as the activation for the last layer of a classification network because the result could be
interpreted as a probability distribution.
The softmax of x is calculated by exp(x)/tf.reduce_sum(exp(x)).
https://www.tensorflow.org/api_docs/python/tf/keras/activations/softmax
:param z: Input tensor.
:return: Real-valued tensor of the applied activation function
"""
if z.dtype.is_complex:
return tf.keras.activations.softmax(tf.math.real(z), axis) * tf.keras.activations.softmax(tf.math.real(z), axis)
else:
return tf.keras.activations.softmax(z, axis)
def softmax_of_softmax_real_with_mult(z: Tensor, axis=-1) -> Tensor:
"""
Applies the softmax function to the modulus of z.
The softmax activation function transforms the outputs so that all values are in range (0, 1) and sum to 1.
It is often used as the activation for the last layer of a classification network because the result could be
interpreted as a probability distribution.
The softmax of x is calculated by exp(x)/tf.reduce_sum(exp(x)).
https://www.tensorflow.org/api_docs/python/tf/keras/activations/softmax
:param z: Input tensor.
:return: Real-valued tensor of the applied activation function
"""
if z.dtype.is_complex:
return tf.keras.activations.softmax(
tf.keras.activations.softmax(tf.math.real(z), axis) * tf.keras.activations.softmax(tf.math.real(z), axis),
axis)
else:
return tf.keras.activations.softmax(z, axis)
def softmax_of_softmax_real_with_avg(z: Tensor, axis=-1) -> Tensor:
"""
Applies the softmax function to the modulus of z.
The softmax activation function transforms the outputs so that all values are in range (0, 1) and sum to 1.
It is often used as the activation for the last layer of a classification network because the result could be
interpreted as a probability distribution.
The softmax of x is calculated by exp(x)/tf.reduce_sum(exp(x)).
https://www.tensorflow.org/api_docs/python/tf/keras/activations/softmax
:param z: Input tensor.
:return: Real-valued tensor of the applied activation function
"""
if z.dtype.is_complex:
return tf.keras.activations.softmax(
tf.keras.activations.softmax(tf.math.real(z), axis) + tf.keras.activations.softmax(tf.math.real(z), axis),
axis)
else:
return tf.keras.activations.softmax(z, axis)
def softmax_real_by_parameter(z: Tensor, axis=-1, params: Optional[dict] = None) -> Tensor:
if params is None:
params = {
'abs': True,
'angle': True,
'real': True,
'imag': True
}
result = []
for k, v in params:
if k == 'abs' and v:
result.append(tf.keras.activations.softmax(tf.math.abs(z), axis))
if k == 'angle' and v:
result.append(tf.keras.activations.softmax(tf.math.angle(z), axis))
if k == 'real' and v:
result.append(tf.keras.activations.softmax(tf.math.real(z), axis))
if k == 'imag' and v:
result.append(tf.keras.activations.softmax(tf.math.imag(z), axis))
return tf.convert_to_tensor(result)
def convert_to_real_with_abs(z: Tensor) -> Tensor:
"""
Applies the absolute value and returns a real-valued output.
:param z: Input tensor.
:return: Real-valued tensor of the applied activation function
"""
if z.dtype.is_complex:
return tf.math.abs(z)
else:
return z
def softmax_real_with_polar(z: Tensor, axis=-1) -> Tensor:
"""
Applies the softmax function to the modulus of z.
The softmax activation function transforms the outputs so that all values are in range (0, 1) and sum to 1.
It is often used as the activation for the last layer of a classification network because the result could be
interpreted as a probability distribution.
The softmax of x is calculated by exp(x)/tf.reduce_sum(exp(x)).
https://www.tensorflow.org/api_docs/python/tf/keras/activations/softmax
:param z: Input tensor.
:return: Real-valued tensor of the applied activation function
"""
if z.dtype.is_complex:
return 0.5 * (tf.keras.activations.softmax(tf.math.abs(z), axis) + tf.keras.activations.softmax(tf.math.angle(z),
axis))
else:
return tf.keras.activations.softmax(z, axis)
"""
etf Functions
"""
def etf_circular_tan(z: Tensor) -> Tensor:
return tf.math.tan(z)
def etf_circular_sin(z: Tensor) -> Tensor:
return tf.math.sin(z)
def etf_inv_circular_atan(z: Tensor) -> Tensor:
return tf.math.atan(z)
def etf_inv_circular_asin(z: Tensor) -> Tensor:
return tf.math.asin(z)
def etf_inv_circular_acos(z: Tensor) -> Tensor:
return tf.math.acos(z)
def etf_circular_tanh(z: Tensor) -> Tensor:
return tf.math.tanh(z)
def etf_circular_sinh(z: Tensor) -> Tensor:
return tf.math.sinh(z)
def etf_inv_circular_atanh(z: Tensor) -> Tensor:
return tf.math.atanh(z)
def etf_inv_circular_asinh(z: Tensor) -> Tensor:
return tf.math.asinh(z)
"""
Phasor Networks
"""
def georgiou_cdbp(z:Tensor, r: float = 1, c: float = 1e-3) -> Tensor:
"""
Activation function proposed by G. M. Georgioy and C. Koutsougeras in
https://ieeexplore.ieee.org/abstract/document/142037
"""
return z / tf.cast(c + tf.math.abs(z)/r, dtype=z.dtype)
def complex_signum(z: Tensor, k: Optional[int] = None) -> Tensor:
"""
Complex signum activation function is very similar to mvn_activation.
For a detailed explanation refer to:
https://ieeexplore.ieee.org/abstract/document/548176
"""
if k:
# values = np.linspace(pi / k, 2 * pi - pi / k, k)
angle_cast = tf.math.floor(tf.math.angle(z) * k / (2 * pi))
# import pdb; pdb.set_trace()
return tf.math.exp(tf.complex(
tf.zeros(tf.shape(z), dtype=z.dtype.real_dtype), angle_cast * 2 * pi / k))
else:
return tf.math.exp(tf.complex(tf.zeros(tf.shape(z), dtype=z.dtype.real_dtype), tf.math.angle(z)))
def mvn_activation(z: Tensor, k: Optional[int] = None) -> Tensor:
"""
Function inspired by Naum Aizenberg.
A multi-valued neuron (MVN) is a neural element with n inputs and one output lying on the unit circle,
and with complex-valued weights.
Works:
https://link.springer.com/article/10.1007%2FBF01068667
http://pefmath2.etf.rs/files/93/399.pdf
"""
if k:
# values = np.linspace(pi / k, 2 * pi - pi / k, k)
angle_cast = tf.math.floor(tf.math.angle(z) * k / (2 * pi))
# import pdb; pdb.set_trace()
return tf.math.exp(tf.complex(
tf.zeros(tf.shape(z), dtype=z.dtype.real_dtype), (angle_cast + 0.5) * 2 * pi / k))
else:
return tf.math.exp(tf.complex(tf.zeros(tf.shape(z), dtype=z.dtype.real_dtype), tf.math.angle(z)))
"""
TYPE A: Cartesian form.
"""
# TODO: shall I use tf.nn or tf.keras.activation modules?
# https://stackoverflow.com/questions/54761088/tf-nn-relu-vs-tf-keras-activations-relu
# nn has leaky relu, activation doesn't
def cart_sigmoid(z: Tensor) -> Tensor:
"""
Applies the function (1.0 / (1.0 + exp(-x))) + j * (1.0 / (1.0 + exp(-y))) where z = x + j * y
https://www.tensorflow.org/api_docs/python/tf/keras/activations/sigmoid
:param z: Tensor to be used as input of the activation function
:return: Tensor result of the applied activation function
"""
return tf.cast(tf.complex(tf.keras.activations.sigmoid(tf.math.real(z)),
tf.keras.activations.sigmoid(tf.math.imag(z))),
dtype=z.dtype)
def cart_elu(z: Tensor, alpha=1.0) -> Tensor:
"""
Applies the "Exponential linear unit": x if x > 0 and alpha * (exp(x)-1) if x < 0
To both the real and imaginary part of z.
https://www.tensorflow.org/api_docs/python/tf/keras/activations/elu
:param z: Input tensor.
:param alpha: A scalar, slope of negative section.
:return: Tensor result of the applied activation function
"""
return tf.cast(tf.complex(tf.keras.activations.elu(tf.math.real(z), alpha),
tf.keras.activations.elu(tf.math.imag(z), alpha)), dtype=z.dtype)
def cart_exponential(z: Tensor) -> Tensor:
"""
Exponential activation function. Applies to both the real and imag part of z the exponential activation: exp(x)
https://www.tensorflow.org/api_docs/python/tf/keras/activations/exponential
:param z: Input tensor.
:return: Tensor result of the applied activation function
"""
return tf.cast(tf.complex(tf.keras.activations.exponential(tf.math.real(z)),
tf.keras.activations.exponential(tf.math.imag(z))), dtype=z.dtype)
def cart_hard_sigmoid(z: Tensor) -> Tensor:
"""
Applies the Hard Sigmoid function to both the real and imag part of z.
The hard sigmoid function is faster to compute than sigmoid activation.
Hard sigmoid activation: 0 if x < -2.5
1 if x > 2.5
0.2 * x + 0.5 if -2.5 <= x <= 2.5
https://www.tensorflow.org/api_docs/python/tf/keras/activations/hard_sigmoid
:param z: Input tensor.
:return: Tensor result of the applied activation function
"""
return tf.cast(tf.complex(tf.keras.activations.hard_sigmoid(tf.math.real(z)),
tf.keras.activations.hard_sigmoid(tf.math.imag(z))), dtype=z.dtype)
def cart_relu(z: Tensor, alpha: float = 0.0, max_value: Optional[float] = None, threshold: float = 0) -> Tensor:
"""
Applies Rectified Linear Unit to both the real and imag part of z
The relu function, with default values, it returns element-wise max(x, 0).
Otherwise, it follows: f(x) = max_value for x >= max_value,
f(x) = x for threshold <= x < max_value,
f(x) = alpha * (x - threshold) otherwise.
https://www.tensorflow.org/api_docs/python/tf/keras/activations/relu
:param z: Tensor -- Input tensor.
:param alpha: float -- A float that governs the slope for values lower than the threshold (default 0.0).
:param max_value: Optional float -- A float that sets the saturation threshold (the largest value the function will return)
(default None).
:param threshold: float -- A float giving the threshold value of the activation function below which
values will be damped or set to zero (default 0).
:return: Tensor result of the applied activation function
"""
return tf.cast(tf.complex(tf.keras.activations.relu(tf.math.real(z), alpha, max_value, threshold),
tf.keras.activations.relu(tf.math.imag(z), alpha, max_value, threshold)), dtype=z.dtype)
def cart_leaky_relu(z: Tensor, alpha=0.2, name=None) -> Tensor:
"""
Applies Leaky Rectified Linear Unit to both the real and imag part of z
https://www.tensorflow.org/api_docs/python/tf/nn/leaky_relu
http://robotics.stanford.edu/~amaas/papers/relu_hybrid_icml2013_final.pdf
:param z: Input tensor.
:param alpha: Slope of the activation function at x < 0. Default: 0.2
:param name: A name for the operation (optional).
:return: Tensor result of the applied activation function
"""
return tf.cast(tf.complex(tf.nn.leaky_relu(tf.math.real(z), alpha, name),
tf.nn.leaky_relu(tf.math.imag(z), alpha, name)), dtype=z.dtype)
def cart_selu(z: Tensor) -> Tensor:
"""
Applies Scaled Exponential Linear Unit (SELU) to both the real and imag part of z.
The scaled exponential unit activation: scale * elu(x, alpha).
https://www.tensorflow.org/api_docs/python/tf/keras/activations/selu
https://arxiv.org/abs/1706.02515
:param z: Input tensor.
:return: Tensor result of the applied activation function
"""
return tf.cast(tf.complex(tf.keras.activations.selu(tf.math.real(z)),
tf.keras.activations.selu(tf.math.imag(z))), dtype=z.dtype)
def cart_softplus(z: Tensor) -> Tensor:
"""
Applies Softplus activation function to both the real and imag part of z.
The Softplus function: log(exp(x) + 1)
https://www.tensorflow.org/api_docs/python/tf/keras/activations/softplus
:param z: Input tensor.
:return: Tensor result of the applied activation function
"""
return tf.cast(tf.complex(tf.keras.activations.softplus(tf.math.real(z)),
tf.keras.activations.softplus(tf.math.imag(z))), dtype=z.dtype)
def cart_softsign(z: Tensor) -> Tensor:
"""
Applies Softsign activation function to both the real and imag part of z.
The softsign activation: x / (abs(x) + 1). TODO: typo in tensorflow references (softplus instead of softsign)
https://www.tensorflow.org/api_docs/python/tf/keras/activations/softsign
:param z: Input tensor.
:return: Tensor result of the applied activation function
"""
return tf.cast(tf.complex(tf.keras.activations.softsign(tf.math.real(z)),
tf.keras.activations.softsign(tf.math.imag(z))), dtype=z.dtype)
def cart_tanh(z: Tensor) -> Tensor:
"""
Applies Hyperbolic Tangent (tanh) activation function to both the real and imag part of z.
The tanh activation: tanh(x) = sinh(x)/cosh(x) = ((exp(x) - exp(-x))/(exp(x) + exp(-x))).
The derivative if tanh is computed as 1 - tanh^2 so it should be fast to compute for backprop.
https://www.tensorflow.org/api_docs/python/tf/keras/activations/tanh
:param z: Input tensor.
:return: Tensor result of the applied activation function
"""
return tf.cast(tf.complex(tf.keras.activations.tanh(tf.math.real(z)),
tf.keras.activations.tanh(tf.math.imag(z))), dtype=z.dtype)
# Classification
def cart_softmax(z: Tensor, axis=-1) -> Tensor:
"""
Applies the softmax function to both the real and imag part of z.
The softmax activation function transforms the outputs so that all values are in range (0, 1) and sum to 1.
It is often used as the activation for the last layer of a classification network because the result could be
interpreted as a probability distribution.
The softmax of x is calculated by exp(x)/tf.reduce_sum(exp(x)).
https://www.tensorflow.org/api_docs/python/tf/keras/activations/softmax
:param z: Input tensor.
:return: Tensor result of the applied activation function
"""
return tf.cast(tf.complex(tf.keras.activations.softmax(tf.math.real(z), axis),
tf.keras.activations.softmax(tf.math.imag(z), axis)), dtype=z.dtype)
"""
TYPE B: Polar form.
"""
# For all ReLU functions, the polar form makes no real sense. If we keep the phase because abs(z) > 0
def _apply_pol(z: Tensor, amp_fun: Callable[[Tensor], Tensor],
pha_fun: Optional[Callable[[Tensor], Tensor]] = None) -> Tensor:
amp = amp_fun(tf.math.abs(z))
pha = tf.math.angle(z)
if pha_fun is not None:
pha = pha_fun(pha)
return tf.cast(tf.complex(amp * tf.math.cos(pha), amp * tf.math.sin(pha)), dtype=z.dtype)
def pol_tanh(z: Tensor) -> Tensor:
"""
Applies Hyperbolic Tangent (tanh) activation function to the amplitude of the complex number
leaving the phase untouched.
The derivative if tanh is computed as 1 - tanh^2 so it should be fast to compute for backprop.
https://www.tensorflow.org/api_docs/python/tf/keras/activations/tanh
:param z: Input tensor.
:return: Tensor result of the applied activation function
"""
return _apply_pol(z, tf.keras.activations.tanh)
def pol_sigmoid(z: Tensor) -> Tensor:
"""
Applies the sigmoid function to the amplitude of the complex number leaving the phase untouched
https://www.tensorflow.org/api_docs/python/tf/keras/activations/sigmoid
:param z: Tensor to be used as input of the activation function
:return: Tensor result of the applied activation function
"""
return _apply_pol(z, tf.keras.activations.sigmoid)
def pol_selu(z: Tensor) -> Tensor:
"""
Applies Scaled Exponential Linear Unit (SELU) to the absolute value of z, keeping the phase unchanged.
The scaled exponential unit activation: scale * elu(x, alpha).
https://www.tensorflow.org/api_docs/python/tf/keras/activations/selu
https://arxiv.org/abs/1706.02515
:param z: Input tensor.
:return: Tensor result of the applied activation function
Logic:
I must mantain the phase (angle) so: cos(theta) = x_0/r_0 = x_1/r_1.
For real case, x_0 = r_0 so it also works.
"""
r_0 = tf.abs(z)
r_1 = tf.keras.activations.selu(r_0)
return tf.cast(tf.complex(tf.math.real(z) * r_1 / r_0, tf.math.imag(z) * r_1 / r_0), dtype=z.dtype)
act_dispatcher = {
'linear': linear,
# Complex input, real output
'cast_to_real': cast_to_real,
'convert_to_real_with_abs': convert_to_real_with_abs,
'sigmoid_real': sigmoid_real,
'softmax_real_with_abs': softmax_real_with_abs,
'softmax_real_with_avg': softmax_real_with_avg,
'softmax_real_with_mult': softmax_real_with_mult,
'softmax_of_softmax_real_with_mult': softmax_of_softmax_real_with_mult,
'softmax_of_softmax_real_with_avg': softmax_of_softmax_real_with_avg,
'softmax_real_with_polar': softmax_real_with_polar,
# Phasor networks
'georgiou_cdbp': georgiou_cdbp,
'mvn_activation': mvn_activation,
'complex_signum': complex_signum,
# Type A (cartesian)
'cart_sigmoid': cart_sigmoid,
'cart_elu': cart_elu,
'cart_exponential': cart_exponential,
'cart_hard_sigmoid': cart_hard_sigmoid,
'cart_relu': cart_relu,
'cart_leaky_relu': cart_leaky_relu,
'cart_selu': cart_selu,
'cart_softplus': cart_softplus,
'cart_softsign': cart_softsign,
'cart_tanh': cart_tanh,
'cart_softmax': cart_softmax,
# Type B (polar)
'pol_tanh': pol_tanh,
'pol_sigmoid': pol_sigmoid,
'pol_selu': pol_selu,
# Elementary Transcendental Functions (ETF)
'etf_circular_tan': etf_circular_tan,
'etf_circular_sin': etf_circular_sin,
'etf_inv_circular_atan': etf_inv_circular_atan,
'etf_inv_circular_asin': etf_inv_circular_asin,
'etf_inv_circular_acos': etf_inv_circular_acos,
'etf_circular_tanh': etf_circular_tanh,
'etf_circular_sinh': etf_circular_sinh,
'etf_inv_circular_atanh': etf_inv_circular_atanh,
'etf_inv_circular_asinh': etf_inv_circular_asinh,
# ReLU
'modrelu': modrelu,
'crelu': crelu,
'zrelu': zrelu,
'complex_cardioid': complex_cardioid
}
if __name__ == '__main__':
x = tf.constant([-2, 1.0, 0.0, 1.0, -3, 0.8, 0.1], dtype=tf.float32)
y = tf.constant([-2.5, -1.5, 0.0, 1.0, 2, 0.4, -0.4], dtype=tf.float32)
z = tf.complex(x, y)
result = crelu(z)
result = modrelu(z, 4)
result = zrelu(z)
result = complex_cardioid(z)
"""import matplotlib.pyplot as plt
import numpy as np
x = tf.constant([-2, 1.0, 0.0, 1.0, -3, 0.8, 0.1], dtype=tf.float32)
y = tf.constant([-2.5, -1.5, 0.0, 1.0, 2, 0.4, -0.4], dtype=tf.float32)
z = tf.complex(x, y)
result = georgiou_cdbp(z)
ax = plt.axes()
ax.scatter(tf.math.real(z), tf.math.imag(z), color='red')
ax.scatter(tf.math.real(result), tf.math.imag(result), color='blue')
for x, y, dx, dy in zip(tf.math.real(z), tf.math.imag(z),
tf.math.real(result) - tf.math.real(z),
tf.math.imag(result) - tf.math.imag(z)):
ax.arrow(x, y, dx, dy, length_includes_head=True, head_width=0.1)
t = np.linspace(0, np.pi * 2, 100)
ax.plot(np.cos(t), np.sin(t), linewidth=1)
yabs_max = abs(max(ax.get_ylim(), key=abs))
xabs_max = abs(max(ax.get_xlim(), key=abs))
axis_max = max(yabs_max, xabs_max)
ax.set_ylim(ymin=-axis_max, ymax=axis_max)
ax.set_xlim(xmin=-axis_max, xmax=axis_max)
plt.show()"""
__author__ = 'J. Agustin BARRACHINA'
__version__ = '0.0.21'
__maintainer__ = 'J. Agustin BARRACHINA'
__email__ = 'joseagustin.barra@gmail.com; jose-agustin.barrachina@centralesupelec.fr'
| 24,929 | 39.080386 | 127 | py |
cvnn | cvnn-master/cvnn/__init__.py | import logging
import colorlog
import re
import os
from cvnn.utils import create_folder
from tensorflow.keras.utils import get_custom_objects
from cvnn.activations import act_dispatcher
from cvnn.initializers import init_dispatcher
get_custom_objects().update(act_dispatcher) # Makes my activation functions usable with a string
get_custom_objects().update(init_dispatcher)
def get_version() -> str:
versionfile = os.path.split(os.path.realpath(__file__))[0] + "/_version.py"
verstrline = open(versionfile, "rt").read()
VSRE = r"^__version__ = ['\"]([^'\"]*)['\"]"
mo = re.search(VSRE, verstrline, re.M)
if mo:
return mo.group(1)
else:
VSRE = r"\"version\": ['\"]([^'\"]*)['\"]"
mo = re.search(VSRE, verstrline, re.M)
if mo:
return mo.group(1)
else:
raise RuntimeError("Unable to find version string in %s." % (versionfile,))
# How to comment script header
# https://medium.com/@rukavina.andrei/how-to-write-a-python-script-header-51d3cec13731
__author__ = 'J. Agustin BARRACHINA'
__copyright__ = 'Copyright 2020, {project_name}'
__credits__ = ['{credit_list}']
__license__ = '{license}'
__version__ = get_version()
__maintainer__ = 'J. Agustin BARRACHINA'
__email__ = 'joseagustin.barra@gmail.com; jose-agustin.barrachina@centralesupelec.fr'
__status__ = '{dev_status}'
# logging.getLogger('tensorflow').disabled = True # Removes https://github.com/tensorflow/tensorflow/issues/41557
STRING_FORMATTER = "%(asctime)s — %(levelname)s - %(module)s::%(funcName)s line %(lineno)s — %(message)s"
# file_handler = logging.FileHandler(create_folder("./log/logs/") / "logs.log")
# formatter = logging.Formatter(STRING_FORMATTER)
# file_handler.setFormatter(formatter)
# https://github.com/borntyping/python-colorlog
# https://stackoverflow.com/a/23964880/5931672
console_handler = colorlog.StreamHandler()
console_handler.setFormatter(colorlog.ColoredFormatter('%(log_color)s' + STRING_FORMATTER))
logger = colorlog.getLogger(__name__)
logger.setLevel(logging.DEBUG)
logger.addHandler(console_handler)
# logger.addHandler(file_handler)
| 2,139 | 32.968254 | 117 | py |
cvnn | cvnn-master/cvnn/real_equiv_tools.py | import sys
import numpy as np
from tensorflow.keras import Sequential
from pdb import set_trace
from cvnn import logger
import cvnn.layers as layers
from cvnn.layers.core import ComplexLayer
from typing import Type, List
from typing import Optional
EQUIV_TECHNIQUES = {
"np", "alternate_tp", "ratio_tp", "none"
}
def get_real_equivalent_multiplier(layers_shape, classifier, equiv_technique, bias_adjust: bool = False):
"""
Returns an array (output_multiplier) of size `self.shape` (number of hidden layers + output layer)
one must multiply the real valued equivalent layer
In other words, the real valued equivalent layer 'i' will have:
neurons_real_valued_layer[i] = output_multiplier[i] * neurons_complex_valued_layer[i]
:param layers_shape:
:param classifier: Boolean (default = True) weather the model's task is to classify (True) or
a regression task (False)
:param equiv_technique: Used to define the strategy of the capacity equivalent model.
This parameter is ignored if capacity_equivalent=False
- 'np': double all layer size (except the last one if classifier=True)
- 'ratio': neurons_real_valued_layer[i] = r * neurons_complex_valued_layer[i], 'r' constant for all 'i'
- 'alternate': Method described in https://arxiv.org/abs/1811.12351 where one alternates between
multiplying by 2 or 1. Special case in the middle is treated as a compromise between the two.
:return: output_multiplier
"""
dense_layers = [d for d in layers_shape if isinstance(d, layers.ComplexDense)] # Keep only dense layers
return get_real_equivalent_multiplier_from_shape(_parse_sizes(dense_layers), classifier=classifier,
equiv_technique=equiv_technique, bias_adjust=bias_adjust)
def get_real_equivalent_multiplier_from_shape(layers_shape: List[int], equiv_technique: str,
classifier: bool = True, bias_adjust: bool = False):
equiv_technique = equiv_technique.lower()
if equiv_technique not in EQUIV_TECHNIQUES:
raise ValueError(f"Unknown equiv_technique {equiv_technique}")
if equiv_technique == "alternate_tp":
output_multiplier = _get_alternate_capacity_equivalent(layers_shape, classifier)
elif equiv_technique == "ratio_tp":
output_multiplier = _get_ratio_capacity_equivalent(layers_shape, classifier,
bias_adjust=bias_adjust)
elif equiv_technique == "np":
output_multiplier = 2 * np.ones(len(layers_shape)-1).astype(int)
if classifier:
output_multiplier[-1] = 1
elif equiv_technique == "none":
output_multiplier = np.ones(len(layers_shape) - 1).astype(int)
else:
raise ValueError(f"Unknown equiv_technique {equiv_technique} but listed on {EQUIV_TECHNIQUES}.")
return output_multiplier
def get_real_equivalent(complex_model: Type[Sequential], classifier: bool = True, capacity_equivalent: bool = True,
equiv_technique: str = 'ratio', name: Optional[str] = None):
assert isinstance(complex_model, Sequential), "Sorry, only sequential models supported for the moment"
equiv_technique = equiv_technique.lower()
if equiv_technique not in {"ratio", "alternate"}:
logger.error("Invalid `equivalent_technique` argument: " + equiv_technique)
sys.exit(-1)
# assert len(self.shape) != 0
real_input_shape = [inp for inp in complex_model.layers[0].input_shape if inp is not None]
real_input_shape[-1] = real_input_shape[-1]*2
real_shape = [layers.ComplexInput(input_shape=real_input_shape,
dtype=complex_model.layers[0].input.dtype.real_dtype)]
output_multiplier = get_real_equivalent_multiplier(complex_model.layers,
classifier, capacity_equivalent, equiv_technique)
counter = 0
for layer in complex_model.layers:
if isinstance(layer, ComplexLayer):
if isinstance(layer, layers.ComplexDense): # TODO: Check if I can do this with kargs or sth
real_shape.append(layer.get_real_equivalent(
output_multiplier=output_multiplier[counter]))
counter += 1
else:
real_shape.append(layer.get_real_equivalent())
else:
sys.exit("Layer " + str(layer) + " unknown")
assert counter == len(output_multiplier)
if name is None:
name = f"{complex_model.name}_real_equiv"
real_equiv = Sequential(real_shape, name=name)
real_equiv.compile(optimizer=complex_model.optimizer.__class__(), loss=complex_model.loss,
metrics=['accuracy'])
return real_equiv
def _parse_sizes(dense_layers):
assert len(dense_layers[0].input_shape) == 2, "Possibly a bug of cvnn. Please report it to github issues"
model_in_c = dense_layers[0].input_shape[-1] # -1 not to take the None part
model_out_c = dense_layers[-1].units
x_c = [dense_layers[i].units for i in range(len(dense_layers[:-1]))]
x_c.insert(0, model_in_c)
x_c.append(model_out_c)
return x_c
def _get_ratio_capacity_equivalent(layers_shape, classification: bool = True, bias_adjust: bool = True):
"""
Generates output_multiplier keeping not only the same capacity but keeping a constant ratio between the
model's layers
This helps keeps the 'aspect' or shape of the model my making:
neurons_real_layer_i = ratio * neurons_complex_layer_i
:param layers_shape:
:param classification: True (default) if the model is a classification model. False otherwise.
:param bias_adjust: True (default) if taking into account the bias as a trainable parameter. If not it will
only match the real valued parameters of the weights
"""
p_c = 0
for i in range(len(layers_shape[:-1])):
p_c += 2 * layers_shape[i] * layers_shape[i+1]
model_in_c = layers_shape[0]
model_out_c = layers_shape[-1]
x_c = layers_shape[1:-1]
if bias_adjust:
p_c = p_c + 2 * np.sum(x_c) + 2 * model_out_c
model_in_r = 2 * model_in_c
model_out_r = model_out_c if classification else 2 * model_out_c
# Quadratic equation
if len(x_c) > 1:
quadratic_c = float(-p_c)
quadratic_b = float(model_in_r * x_c[0] + model_out_r * x_c[-1])
if bias_adjust:
quadratic_b = quadratic_b + np.sum(x_c) + model_out_c
quadratic_a = float(np.sum([x_c[i] * x_c[i + 1] for i in range(len(x_c) - 1)]))
# The result MUST be positive so I use the '+' solution
ratio = (-quadratic_b + np.sqrt(quadratic_b ** 2 - 4 * quadratic_c * quadratic_a)) / (2 * quadratic_a)
if not 1 <= ratio < 2:
logger.error("Ratio {} has a weird value. This function must have a bug.".format(ratio))
else:
ratio = 2 * (model_in_c + model_out_c) / (model_in_r + model_out_r)
return [ratio] * len(x_c) + [1 if classification else 2]
def _get_alternate_capacity_equivalent(layers_shape, classification: bool = True):
"""
Generates output_multiplier using the alternate method described in https://arxiv.org/abs/1811.12351 which
doubles or not the layer if it's neighbor was doubled or not (making the opposite).
The code fills output_multiplier from both senses:
output_multiplier = [ ... , .... ]
---> <---
If when both ends meet there's not a coincidence (example: [..., 1, 1, ...]) then
the code will find a compromise between the two to keep the same real valued trainable parameters.
"""
output_multiplier = np.zeros(len(layers_shape))
output_multiplier[0] = 2 # Sets input multiplier
output_multiplier[-1] = 1 if classification else 2 # Output multiplier
i: int = 1
while i < (len(layers_shape) - i): # Fill the hidden layers (from 1 to len()-1)
output_multiplier[i] = 2 if output_multiplier[i - 1] == 1 else 1 # From beginning
output_multiplier[-1 - i] = 2 if output_multiplier[-i] == 1 else 1 # From the end
index_in_middle_with_diff_borders = i == len(layers_shape) - i - 1 and output_multiplier[i - 1] != output_multiplier[i + 1]
subsequent_indexes_are_equal = i == len(layers_shape) - i and output_multiplier[i] == output_multiplier[i + 1]
if index_in_middle_with_diff_borders or subsequent_indexes_are_equal:
m_inf = layers_shape[i - 1] # This is because dense_layers are len(output_multiplier) - 1
m_sup = layers_shape[i + 1]
if i == len(layers_shape) - i - 1: # index_in_middle_with_diff_borders
coef_sup = output_multiplier[i + 1]
coef_inf = output_multiplier[i - 1]
else: # subsequent_indexes_are_equal
coef_sup = output_multiplier[i + 1]
coef_inf = output_multiplier[i]
output_multiplier[i] = 2 * (m_inf + m_sup) / (coef_inf * m_inf + coef_sup * m_sup)
i += 1
return output_multiplier[1:]
| 9,341 | 53.631579 | 131 | py |
cvnn | cvnn-master/cvnn/layers/pooling.py | import tensorflow as tf
from packaging import version
from tensorflow.keras.layers import Layer
from tensorflow.python.keras import backend
from tensorflow.python.keras.utils import conv_utils
if version.parse(tf.__version__) < version.parse("2.6.0"):
from tensorflow.python.keras.engine.input_spec import InputSpec
else:
from tensorflow.keras.layers import InputSpec
from tensorflow.python.framework import tensor_shape
from abc import abstractmethod
# Typing
from typing import Union, Optional, Tuple
# Own models
from cvnn.layers.core import ComplexLayer
from cvnn.layers.core import DEFAULT_COMPLEX_TYPE
class ComplexPooling2D(Layer, ComplexLayer):
"""
Pooling layer for arbitrary pooling functions, for 2D inputs (e.g. images).
Abstract class. This class only exists for code reuse. It will never be an exposed API.
"""
def __init__(self, pool_size: Union[int, Tuple[int, int]] = (2, 2),
strides: Optional[Union[int, Tuple[int, int]]] = None,
padding: str = 'valid', data_format: Optional[str] = None,
name: Optional[str] = None, dtype=DEFAULT_COMPLEX_TYPE, **kwargs):
"""
:param pool_size: An integer or tuple/list of 2 integers: (pool_height, pool_width)
specifying the size of the pooling window.
Can be a single integer to specify the same value for all spatial dimensions.
:param strides: An integer or tuple/list of 2 integers, specifying the strides of the pooling operation.
Can be a single integer to specify the same value for all spatial dimensions.
:param padding: A string. The padding method, either 'valid' or 'same'. Case-insensitive.
:param data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, height, width)`.
:param name: A string, the name of the layer.
"""
self.my_dtype = tf.dtypes.as_dtype(dtype)
super(ComplexPooling2D, self).__init__(name=name, **kwargs)
if data_format is None:
data_format = backend.image_data_format()
if strides is None:
strides = pool_size
self.pool_size = conv_utils.normalize_tuple(pool_size, 2,
'pool_size') # Values are checked here. No need to check them later
self.strides = conv_utils.normalize_tuple(strides, 2, 'strides')
self.padding = conv_utils.normalize_padding(padding)
self.data_format = conv_utils.normalize_data_format(data_format)
self.input_spec = InputSpec(ndim=4)
@abstractmethod
def pool_function(self, inputs, ksize, strides, padding, data_format):
pass
def call(self, inputs, **kwargs):
if self.data_format == 'channels_last':
pool_shape = (1,) + self.pool_size + (1,)
strides = (1,) + self.strides + (1,)
else:
pool_shape = (1, 1) + self.pool_size
strides = (1, 1) + self.strides
outputs = self.pool_function(
inputs,
ksize=pool_shape,
strides=strides,
padding=self.padding.upper(),
data_format=conv_utils.convert_data_format(self.data_format, 4))
return outputs
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if self.data_format == 'channels_first':
rows = input_shape[2]
cols = input_shape[3]
else:
rows = input_shape[1]
cols = input_shape[2]
rows = conv_utils.conv_output_length(rows, self.pool_size[0], self.padding,
self.strides[0])
cols = conv_utils.conv_output_length(cols, self.pool_size[1], self.padding,
self.strides[1])
if self.data_format == 'channels_first':
return tensor_shape.TensorShape(
[input_shape[0], input_shape[1], rows, cols])
else:
return tensor_shape.TensorShape(
[input_shape[0], rows, cols, input_shape[3]])
def get_config(self):
config = super(ComplexPooling2D, self).get_config()
config.update({
'pool_size': self.pool_size,
'padding': self.padding,
'strides': self.strides,
'data_format': self.data_format,
'dtype': self.my_dtype
})
return config
class ComplexMaxPooling2D(ComplexPooling2D):
"""
Max pooling operation for 2D spatial data.
Works for complex dtype using the absolute value to get the max.
"""
def __init__(self, pool_size: Union[int, Tuple[int, int]] = (2, 2),
strides: Optional[Union[int, Tuple[int, int]]] = None,
padding: str = 'valid', data_format: Optional[str] = None,
name: Optional[str] = None, **kwargs):
super(ComplexMaxPooling2D, self).__init__(pool_size=pool_size, strides=strides, padding=padding,
data_format=data_format, name=name, **kwargs)
self.argmax = None
def pool_function(self, inputs, ksize, strides, padding, data_format):
# The max is calculated with the absolute value. This will still work on real values.
if inputs.dtype.is_complex:
abs_in = tf.math.abs(inputs)
else:
abs_in = inputs
output, argmax = tf.nn.max_pool_with_argmax(input=abs_in, ksize=ksize, strides=strides,
padding=padding, data_format=data_format,
include_batch_in_index=True)
self.argmax = argmax
shape = tf.shape(output)
tf_res = tf.reshape(tf.gather(tf.reshape(inputs, [-1]), argmax), shape)
# assert np.all(tf_res == output) # For debugging when the input is real only!
assert tf_res.dtype == inputs.dtype
return tf_res
def get_real_equivalent(self):
return ComplexMaxPooling2D(pool_size=self.pool_size, strides=self.strides, padding=self.padding,
data_format=self.data_format, name=self.name + "_real_equiv")
def get_max_index(self):
if self.argmax is None:
raise AttributeError("Variable argmax did not exist, call at least once the max-pooling layer")
return self.argmax # TODO: Shall I check this is use only once?
class ComplexMaxPooling2DWithArgmax(ComplexMaxPooling2D):
"""
Max pooling operation for 2D spatial data and outputs both max values and indices.
This class is equivalent to ComplexMaxPooling2D but that also outputs indices.
Useful to perform Max Unpooling using ComplexUnPooling2D.
Works for complex dtype using the absolute value to get the max.
"""
def pool_function(self, inputs, ksize, strides, padding, data_format):
"""
:param inputs: A Tensor. Input to pool over.
:param ksize: An int or list of ints that has length 1, 2 or 4.
The size of the window for each dimension of the input tensor.
:param strides: An int or list of ints that has length 1, 2 or 4.
The stride of the sliding window for each dimension of the input tensor.
:param padding: A string from: "SAME", "VALID". The type of padding algorithm to use.
:param data_format: An optional string, must be set to "NHWC". Defaults to "NHWC".
Specify the data format of the input and output data.
:return: A tuple of Tensor objects (output, argmax).
- output A Tensor. Has the same type as input.
- argmax A Tensor. The indices in argmax are flattened (Complains directly to TensorFlow)
"""
# The max is calculated with the absolute value. This will still work on real values.
if inputs.dtype.is_complex:
abs_in = tf.math.abs(inputs)
else:
abs_in = inputs
output, argmax = tf.nn.max_pool_with_argmax(input=abs_in, ksize=ksize, strides=strides,
padding=padding, data_format=data_format,
include_batch_in_index=True)
shape = tf.shape(output)
tf_res = tf.reshape(tf.gather(tf.reshape(inputs, [-1]), argmax), shape)
# assert np.all(tf_res == output) # For debugging when the input is real only!
assert tf_res.dtype == inputs.dtype
return tf_res, argmax
class ComplexAvgPooling2D(ComplexPooling2D):
def pool_function(self, inputs, ksize, strides, padding, data_format):
inputs_r = tf.math.real(inputs)
inputs_i = tf.math.imag(inputs)
output_r = tf.nn.avg_pool2d(input=inputs_r, ksize=ksize, strides=strides,
padding=padding, data_format=data_format)
output_i = tf.nn.avg_pool2d(input=inputs_i, ksize=ksize, strides=strides,
padding=padding, data_format=data_format)
if inputs.dtype.is_complex:
output = tf.complex(output_r, output_i)
else:
output = output_r
return output
def get_real_equivalent(self):
return ComplexAvgPooling2D(pool_size=self.pool_size, strides=self.strides, padding=self.padding,
data_format=self.data_format, name=self.name + "_real_equiv")
class ComplexCircularAvgPooling2D(ComplexPooling2D):
def pool_function(self, inputs, ksize, strides, padding, data_format):
inputs_abs = tf.math.abs(inputs)
inputs_phase = tf.math.phase(inputs)
amp = tf.nn.avg_pool2d(input=inputs_abs, ksize=ksize, strides=strides,
padding=padding, data_format=data_format)
raise NotImplemetedError("Still not implemented") # https://en.wikipedia.org/wiki/Circular_mean
pha = tf.nn.avg_pool2d(input=inputs_phase, ksize=ksize, strides=strides,
padding=padding, data_format=data_format) # TODO
output = tf.cast(tf.complex(amp * tf.math.cos(pha), amp * tf.math.sin(pha)), dtype=z.dtype)
return output
def get_real_equivalent(self):
return ComplexAvgPooling2D(pool_size=self.pool_size, strides=self.strides, padding=self.padding,
data_format=self.data_format, name=self.name + "_real_equiv")
class ComplexPolarAvgPooling2D(ComplexPooling2D):
def pool_function(self, inputs, ksize, strides, padding, data_format):
inputs_abs = tf.math.abs(inputs)
output_abs = tf.nn.avg_pool2d(input=inputs_abs, ksize=ksize, strides=strides,
padding=padding, data_format=data_format)
# Use circular mean
inputs_angle = tf.math.angle(inputs)
unit_x = tf.math.cos(inputs_angle) # Convert all angles to corresponding points on the unit circle
unit_y = tf.math.sin(inputs_angle) # convert polar coordinates to Cartesian coordinates.
avg_unit_x = tf.nn.avg_pool2d(input=unit_x, ksize=ksize, strides=strides, # Then compute the arithmetic
padding=padding, data_format=data_format) # mean of these points.
avg_unit_y = tf.nn.avg_pool2d(input=unit_y, ksize=ksize, strides=strides,
padding=padding, data_format=data_format)
# The angle is a reasonable mean of the input angles.
output_angle = tf.math.angle(tf.complex(avg_unit_x, avg_unit_y))
# Unknown result. If the angles are uniformly distributed on the circle,
# then the resulting radius will be 0, and there is no circular mean.
if inputs.dtype.is_complex:
output = tf.complex(output_abs * tf.math.cos(output_angle), output_abs * tf.math.sin(output_angle))
else:
output = output_abs
return output
def get_real_equivalent(self):
return ComplexPolarAvgPooling2D(pool_size=self.pool_size, strides=self.strides, padding=self.padding,
data_format=self.data_format, name=self.name + "_real_equiv")
class ComplexUnPooling2D(Layer, ComplexLayer):
"""
Performs UnPooling as explained in:
https://www.oreilly.com/library/view/hands-on-convolutional-neural/9781789130331/6476c4d5-19f2-455f-8590-c6f99504b7a5.xhtml
This class was inspired to recreate the CV-FCN model of https://www.mdpi.com/2072-4292/11/22/2653
As far as I am concerned this class should work for any dimensional input but I have not tested it
(and you need the argmax which I only implemented the 2D case).
"""
def __init__(self, desired_output_shape=None, upsampling_factor: Optional[int] = None, name=None,
dtype=DEFAULT_COMPLEX_TYPE, dynamic=False, **kwargs):
"""
:param desired_output_shape: tf.TensorShape (or equivalent like tuple or list).
The expected output shape without the batch size.
Meaning that for a 2D image to be enlarged, this is size 3 of the form HxWxC or CxHxW
:param upsampling_factor: Integer. The factor to which enlarge the image,
For example, if upsampling_factor=2, an input image of size 32x32 will be 64x64.
This parameter is ignored if desired_output_shape is used or if the output shape is given to the call funcion.
"""
self.my_dtype = tf.dtypes.as_dtype(dtype)
if desired_output_shape is not None:
if not tf.TensorShape(desired_output_shape).is_fully_defined():
# tf.print(f"Warning: Partially defined desired_output_shape will be casted to None")
# desired_output_shape = None
raise ValueError(f"desired_output_shape must be fully defined, got {desired_output_shape}")
elif len(desired_output_shape) != 3:
raise ValueError(f"desired_output_shape expected to be size 3 and got size {len(desired_output_shape)}")
self.desired_output_shape = desired_output_shape
if upsampling_factor is None or isinstance(upsampling_factor, int):
self.upsampling_factor = upsampling_factor
else:
raise ValueError(f"Unsuported upsampling_factor = {upsampling_factor}")
super(ComplexUnPooling2D, self).__init__(trainable=False, name=name, dtype=self.my_dtype.real_dtype,
dynamic=dynamic, **kwargs)
def call(self, inputs, **kwargs):
"""
TODO: Still has a bug, if argmax has coincident indexes. Don't think this is desired (but might).
:param inputs: A tuple of Tensor objects (input, argmax).
- input A Tensor.
- argmax A Tensor. The indices in argmax are flattened (Complains directly to TensorFlow)
- output_shape (Optional) A tf.TensorShape (or equivalent like tuple or list).
The expected output shape without the batch size.
Meaning that for a 2D image to be enlarged, this is size 3 of the form HxWxC or CxHxW
# TODO: I could make an automatic unpool mat if it is not given.
"""
if not isinstance(inputs, list):
raise ValueError('This layer should be called on a list of inputs.')
if len(inputs) == 2:
inputs_values, unpool_mat = inputs
output_shape = self.desired_output_shape
elif len(inputs) == 3:
inputs_values, unpool_mat, output_shape = inputs
else:
raise ValueError(f'inputs = {inputs} must have size 2 or 3 and had size {len(inputs)}')
# https://stackoverflow.com/a/42549265/5931672
# https://github.com/tensorflow/addons/issues/632#issuecomment-482580850
# This is for the case I don't know the expected output shape so I used the upsampling factor
if not tf.TensorShape(output_shape).is_fully_defined():
if self.upsampling_factor is None:
raise ValueError('output_shape should be passed as 3rd element or either desired_output_shape '
'or upsampling_factor should be passed on construction')
if inputs_values.get_shape()[1:].is_fully_defined():
output_shape = tf.tile(inputs_values,
[1, self.upsampling_factor, self.upsampling_factor, 1]).get_shape()[1:]
else:
output_shape = tf.shape(inputs_values)[1:]
elif self.upsampling_factor is not None:
tf.print("WARNING: Ignoring self.upsampling_factor parameter")
flat_output_shape = tf.reduce_prod(output_shape)
shape = (tf.shape(inputs_values)[0] * flat_output_shape,)
updates = tf.reshape(inputs_values, [-1])
indices = tf.expand_dims(tf.reshape(unpool_mat, [-1]), axis=-1)
# assert indices.shape[-1] == tf.rank(shape)
ret = tf.scatter_nd(indices, updates, shape=shape)
# import pdb; pdb.set_trace()
desired_output_shape_with_batch = tf.concat([[tf.shape(inputs_values)[0]], output_shape], axis=0)
ret = tf.reshape(ret, shape=desired_output_shape_with_batch)
return ret
def get_real_equivalent(self):
return ComplexUnPooling2D(desired_output_shape=self.desired_output_shape, name=self.name,
dtype=self.my_dtype.real_dtype, dynamic=self.dtype)
def get_config(self):
config = super(ComplexUnPooling2D, self).get_config()
config.update({
'desired_output_shape': self.desired_output_shape,
'name': self.name,
'dtype': self.my_dtype,
'dynamic': False,
})
return config
"""
3D Pooling
"""
class ComplexPooling3D(Layer, ComplexLayer):
def __init__(self, pool_size=(2, 2, 1), strides=None,
padding='valid', data_format='channels_last',
name=None, dtype=DEFAULT_COMPLEX_TYPE, **kwargs):
self.my_dtype = dtype
super(ComplexPooling3D, self).__init__(name=name, **kwargs)
if data_format is None:
data_format = backend.image_data_format()
if strides is None:
strides = pool_size
self.pool_size = conv_utils.normalize_tuple(pool_size, 3, 'pool_size')
self.strides = conv_utils.normalize_tuple(strides, 3, 'strides')
self.padding = conv_utils.normalize_padding(padding)
self.data_format = conv_utils.normalize_data_format(data_format)
self.input_spec = InputSpec(ndim=5)
@abstractmethod
def pool_function(self, inputs, ksize, strides, padding, data_format):
pass
def call(self, inputs, **kwargs):
outputs = self.pool_function(
inputs,
self.pool_size,
strides=self.strides,
padding=self.padding.upper(),
data_format=conv_utils.convert_data_format(self.data_format, 5))
return outputs
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if self.data_format == 'channels_first':
deps = input_shape[-3]
rows = input_shape[-2]
cols = input_shape[-1]
else:
deps = input_shape[-4]
rows = input_shape[-3]
cols = input_shape[-2]
rows = conv_utils.conv_output_length(rows, self.pool_size[0], self.padding, self.strides[0])
cols = conv_utils.conv_output_length(cols, self.pool_size[1], self.padding, self.strides[1])
if self.data_format == 'channels_first':
return tensor_shape.TensorShape(
[input_shape[0], input_shape[1], deps, rows, cols])
else:
return tensor_shape.TensorShape(
[input_shape[0], deps, rows, cols, input_shape[-1]])
def get_config(self):
config = super(ComplexPooling3D, self).get_config()
config.update({
'strides': self.strides,
'pool_size': self.pool_size,
'padding': self.padding,
'data_format': self.data_format,
'dtype': self.my_dtype
})
return config
class ComplexAvgPooling3D(ComplexPooling3D):
def pool_function(self, inputs, ksize, strides, padding, data_format):
inputs_r = tf.math.real(inputs)
inputs_i = tf.math.imag(inputs)
output_r = tf.nn.avg_pool3d(input=inputs_r, ksize=ksize, strides=strides,
padding=padding, data_format=data_format)
output_i = tf.nn.avg_pool3d(input=inputs_i, ksize=ksize, strides=strides,
padding=padding, data_format=data_format)
if inputs.dtype.is_complex:
output = tf.complex(output_r, output_i)
else:
output = output_r
return output
def get_real_equivalent(self):
return ComplexAvgPooling3D(pool_size=self.pool_size, strides=self.strides, padding=self.padding,
data_format=self.data_format, name=self.name + "_real_equiv")
"""
1D Pooling
"""
class ComplexPooling1D(Layer, ComplexLayer):
def __init__(self, pool_size=2, strides=None,
padding='valid', data_format='channels_last',
name=None, dtype=DEFAULT_COMPLEX_TYPE, **kwargs):
self.my_dtype = dtype
super(ComplexPooling1D, self).__init__(name=name, **kwargs)
if data_format is None:
data_format = backend.image_data_format()
if strides is None:
strides = pool_size
self.pool_size = conv_utils.normalize_tuple(pool_size, 1, 'pool_size')
self.strides = conv_utils.normalize_tuple(strides, 1, 'strides')
self.padding = conv_utils.normalize_padding(padding)
self.data_format = conv_utils.normalize_data_format(data_format)
self.input_spec = InputSpec(ndim=3)
@abstractmethod
def pool_function(self, inputs, ksize, strides, padding, data_format):
pass
def call(self, inputs, **kwargs):
outputs = self.pool_function(
inputs,
self.pool_size,
strides=self.strides,
padding=self.padding.upper(),
data_format=conv_utils.convert_data_format(self.data_format, 3))
return outputs
def compute_output_shape(self, input_shape):
input_shape = tf.TensorShape(input_shape).as_list()
if self.data_format == 'channels_first':
steps = input_shape[2]
features = input_shape[1]
else:
steps = input_shape[1]
features = input_shape[2]
length = conv_utils.conv_output_length(steps,
self.pool_size[0],
self.padding,
self.strides[0])
if self.data_format == 'channels_first':
return tf.TensorShape([input_shape[0], features, length])
else:
return tf.TensorShape([input_shape[0], length, features])
def get_config(self):
config = super(ComplexPooling1D, self).get_config()
config.update({
'strides': self.strides,
'pool_size': self.pool_size,
'padding': self.padding,
'data_format': self.data_format,
'dtype': self.my_dtype
})
return config
class ComplexAvgPooling1D(ComplexPooling1D):
def pool_function(self, inputs, ksize, strides, padding, data_format):
inputs_r = tf.math.real(inputs)
inputs_i = tf.math.imag(inputs)
output_r = tf.nn.avg_pool1d(input=inputs_r, ksize=ksize, strides=strides,
padding=padding, data_format=data_format)
output_i = tf.nn.avg_pool1d(input=inputs_i, ksize=ksize, strides=strides,
padding=padding, data_format=data_format)
if inputs.dtype.is_complex:
output = tf.complex(output_r, output_i)
else:
output = output_r
return output
def get_real_equivalent(self):
return ComplexAvgPooling1D(pool_size=self.pool_size, strides=self.strides, padding=self.padding,
data_format=self.data_format, name=self.name + "_real_equiv")
| 24,753 | 47.253411 | 138 | py |
cvnn | cvnn-master/cvnn/layers/convolutional.py | import six
import functools
import tensorflow as tf
from packaging import version
from tensorflow.keras import activations
from tensorflow.keras import backend
from tensorflow.keras import constraints
from tensorflow.keras import initializers
from tensorflow.keras import regularizers
from tensorflow.keras.layers import Layer
from tensorflow.python.keras.utils import conv_utils
if version.parse(tf.__version__) < version.parse("2.6.0"):
from tensorflow.python.keras.engine.input_spec import InputSpec
else:
from tensorflow.keras.layers import InputSpec
from tensorflow.python.eager import context
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import nn_ops
# Own modules
from cvnn.layers.core import ComplexLayer
from cvnn.initializers import ComplexGlorotUniform, Zeros, ComplexInitializer, INIT_TECHNIQUES
from cvnn import logger
from cvnn.layers.core import DEFAULT_COMPLEX_TYPE
class ComplexConv(Layer, ComplexLayer):
"""
Almost exact copy of
https://github.com/tensorflow/tensorflow/blob/v2.4.0/tensorflow/python/keras/layers/convolutional.py#L52
Abstract N-D complex convolution layer (private, used as implementation base).
This layer creates a convolution kernel that is convolved
(actually cross-correlated) with the layer input to produce a tensor of
outputs. If `use_bias` is True (and a `bias_initializer` is provided),
a bias vector is created and added to the outputs. Finally, if
`activation` is not `None`, it is applied to the outputs as well.
Note: layer attributes cannot be modified after the layer has been called
once (except the `trainable` attribute).
Arguments:
rank: An integer, the rank of the convolution, e.g. "2" for 2D convolution.
filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution).
kernel_size: An integer or tuple/list of n integers, specifying the
length of the convolution window.
strides: An integer or tuple/list of n integers,
specifying the stride length of the convolution.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: One of `"valid"`, `"same"`, or `"causal"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding evenly to
the left/right or up/down of the input such that output has the same
height/width dimension as the input. `"causal"` results in causal
(dilated) convolutions, e.g. `output[t]` does not depend on `input[t+1:]`.
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch_size, ..., channels)` while `channels_first` corresponds to
inputs with shape `(batch_size, channels, ...)`.
dilation_rate: An integer or tuple/list of n integers, specifying
the dilation rate to use for dilated convolution.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any `strides` value != 1.
groups: A positive integer specifying the number of groups in which the
input is split along the channel axis. Each group is convolved
separately with `filters / groups` filters. The output is the
concatenation of all the `groups` results along the channel axis.
Input channels and `filters` must both be divisible by `groups`.
activation: Activation function to use.
If you don't specify anything, no activation is applied.
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: An initializer for the convolution kernel.
bias_initializer: An initializer for the bias vector. If None, the default
initializer will be used.
kernel_regularizer: Optional regularizer for the convolution kernel.
ATTENTION: Not yet implemented! This parameter will have no effect.
bias_regularizer: Optional regularizer for the bias vector.
ATTENTION: Not yet implemented! This parameter will have no effect.
activity_regularizer: Optional regularizer function for the output.
kernel_constraint: Optional projection function to be applied to the
kernel after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
bias_constraint: Optional projection function to be applied to the
bias after being updated by an `Optimizer`.
:param init_technique: One of 'mirror' or 'zero_imag'. Tells the initializer how to init complex number if
the initializer was tensorflow's built in initializers (not supporting complex numbers).
- 'mirror': Uses the initializer for both real and imaginary part.
Note that some initializers such as Glorot or He will lose it's property if initialized this way.
- 'zero_imag': Initializer real part and let imaginary part to zero.
"""
def __init__(self, rank, filters, kernel_size, dtype=DEFAULT_COMPLEX_TYPE, strides=1, padding='valid', data_format=None, dilation_rate=1,
groups=1, activation=None, use_bias=True,
kernel_initializer=ComplexGlorotUniform(), bias_initializer=Zeros(),
kernel_regularizer=None, bias_regularizer=None, # TODO: Not yet working
activity_regularizer=None, kernel_constraint=None, bias_constraint=None,
init_technique: str = 'mirror',
trainable=True, name=None, conv_op=None, **kwargs):
if kernel_regularizer is not None or bias_regularizer is not None:
logger.warning(f"Sorry, regularizers are not implemented yet, this parameter will take no effect")
super(ComplexConv, self).__init__(
trainable=trainable,
name=name,
activity_regularizer=regularizers.get(activity_regularizer),
**kwargs)
self.rank = rank
self.my_dtype = tf.dtypes.as_dtype(dtype)
# I use no default dtype to make sure I don't forget to give it to my ComplexConv layers
if isinstance(filters, float):
filters = int(filters)
self.filters = filters
self.groups = groups or 1
self.kernel_size = conv_utils.normalize_tuple(
kernel_size, rank, 'kernel_size')
self.strides = conv_utils.normalize_tuple(strides, rank, 'strides')
self.padding = conv_utils.normalize_padding(padding)
self.data_format = conv_utils.normalize_data_format(data_format)
self.dilation_rate = conv_utils.normalize_tuple(
dilation_rate, rank, 'dilation_rate')
self.activation = activations.get(activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.input_spec = InputSpec(min_ndim=self.rank + 2)
self._validate_init()
self._is_causal = self.padding == 'causal'
self._channels_first = self.data_format == 'channels_first'
self._tf_data_format = conv_utils.convert_data_format(
self.data_format, self.rank + 2)
self.init_technique = init_technique.lower()
def _validate_init(self):
if self.filters is not None and self.filters % self.groups != 0:
raise ValueError(
'The number of filters must be evenly divisible by the number of '
'groups. Received: groups={}, filters={}'.format(
self.groups, self.filters))
if not all(self.kernel_size):
raise ValueError('The argument `kernel_size` cannot contain 0(s). '
'Received: %s' % (self.kernel_size,))
if (self.padding == 'causal' and not isinstance(self, (ComplexConv1D))):
raise ValueError('Causal padding is only supported for `Conv1D`'
'and `SeparableConv1D`.')
def build(self, input_shape):
input_shape = tf.TensorShape(input_shape)
input_channel = self._get_input_channel(input_shape)
if input_channel % self.groups != 0:
raise ValueError(
f'The number of input channels must be evenly divisible by the number '
f'of groups. Received groups={self.groups}, but the input has {input_channel} channels '
f'(full input shape is {input_shape}).')
kernel_shape = self.kernel_size + (input_channel // self.groups, self.filters)
if self.my_dtype.is_complex:
i_kernel_dtype = self.my_dtype if isinstance(self.kernel_initializer,
ComplexInitializer) else self.my_dtype.real_dtype
i_bias_dtype = self.my_dtype if isinstance(self.bias_initializer,
ComplexInitializer) else self.my_dtype.real_dtype
i_kernel_initializer = self.kernel_initializer
i_bias_initializer = self.bias_initializer
if not isinstance(self.kernel_initializer, ComplexInitializer):
tf.print(f"WARNING: you are using a Tensorflow Initializer for complex numbers. "
f"Using {self.init_technique} method.")
if self.init_technique in INIT_TECHNIQUES:
if self.init_technique == 'zero_imag':
# This section is done to initialize with tf initializers, making imaginary part zero
i_kernel_initializer = initializers.Zeros()
i_bias_initializer = initializers.Zeros()
else:
raise ValueError(f"Unsuported init_technique {self.init_technique}, "
f"supported techniques are {INIT_TECHNIQUES}")
self.kernel_r = tf.Variable(
initial_value=self.kernel_initializer(shape=kernel_shape, dtype=i_kernel_dtype),
name='kernel_r',
constraint=self.kernel_constraint,
trainable=True
) # TODO: regularizer=self.kernel_regularizer,
self.kernel_i = tf.Variable(
initial_value=i_kernel_initializer(shape=kernel_shape, dtype=i_kernel_dtype),
name='kernel_i',
constraint=self.kernel_constraint,
trainable=True
) # TODO: regularizer=self.kernel_regularizer
if self.use_bias:
self.bias_r = tf.Variable(
initial_value=self.bias_initializer(shape=(self.filters,), dtype=i_bias_dtype),
name='bias_r',
trainable=True
)
self.bias_i = tf.Variable(
initial_value=i_bias_initializer(shape=(self.filters,), dtype=i_bias_dtype),
name='bias_i',
constraint=self.bias_constraint,
trainable=True
) # TODO: regularizer=self.bias_regularizer
else:
self.kernel = self.add_weight(
name='kernel',
shape=kernel_shape,
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
trainable=True,
dtype=self.my_dtype)
if self.use_bias:
self.bias = self.add_weight(
name='bias',
shape=(self.filters,),
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
trainable=True,
dtype=self.my_dtype)
if not self.use_bias:
self.bias = None
channel_axis = self._get_channel_axis()
self.input_spec = InputSpec(min_ndim=self.rank + 2,
axes={channel_axis: input_channel})
self.built = True
def convolution_op(self, inputs, kernel):
# Convert Keras formats to TF native formats.
if self.padding == 'causal':
tf_padding = 'VALID' # Causal padding handled in `call`.
elif isinstance(self.padding, str):
tf_padding = self.padding.upper()
else:
tf_padding = self.padding
return tf.nn.convolution(
inputs,
kernel,
strides=list(self.strides),
padding=tf_padding,
dilations=list(self.dilation_rate),
data_format=self._tf_data_format,
name=self.__class__.__name__)
def call(self, inputs):
"""
Calls convolution, this function is divided in 4:
1. Input parser/verification
2. Convolution
3. Bias
4. Activation Function
:returns: A tensor of rank 4+ representing `activation(conv2d(inputs, kernel) + bias)`.
"""
if inputs.dtype != self.my_dtype:
tf.print(f"WARNING: {self.name} - Expected input to be {self.my_dtype}, but received {inputs.dtype}.")
if self.my_dtype.is_complex and inputs.dtype.is_floating:
tf.print("\tThis is normally fixed using ComplexInput() "
"at the start (tf casts input automatically to real).")
inputs = tf.cast(inputs, self.my_dtype)
if self._is_causal: # Apply causal padding to inputs for Conv1D.
inputs = tf.pad(inputs, self._compute_causal_padding(inputs))
# Convolution
inputs_r = tf.math.real(inputs)
inputs_i = tf.math.imag(inputs)
if self.my_dtype.is_complex:
kernel_r = self.kernel_r
kernel_i = self.kernel_i
if self.use_bias:
bias = tf.complex(self.bias_r, self.bias_i)
else:
kernel_r = tf.math.real(self.kernel)
kernel_i = tf.math.imag(self.kernel) # TODO: Check they are all zero
if self.use_bias:
bias = self.bias
real_outputs = self.convolution_op(inputs_r, kernel_r) - self.convolution_op(inputs_i, kernel_i)
imag_outputs = self.convolution_op(inputs_r, kernel_i) + self.convolution_op(inputs_i, kernel_r)
outputs = tf.cast(tf.complex(real_outputs, imag_outputs), dtype=self.my_dtype)
# Add bias
if self.use_bias:
output_rank = outputs.shape.rank
if self.rank == 1 and self._channels_first:
# tf.nn.bias_add does not accept a 1D input tensor.
bias = tf.reshape(bias, (1, self.filters, 1))
outputs += bias
else:
# Handle multiple batch dimensions.
if output_rank is not None and output_rank > 2 + self.rank:
def _apply_fn(o):
# TODO: Will this bias be visible? Horrible
return tf.nn.bias_add(o, bias, data_format=self._tf_data_format)
outputs = nn_ops.squeeze_batch_dims(
outputs, _apply_fn, inner_rank=self.rank + 1)
else:
outputs = tf.nn.bias_add(
outputs, bias, data_format=self._tf_data_format)
# Activation function
if self.activation is not None:
outputs = self.activation(outputs)
return outputs
def _spatial_output_shape(self, spatial_input_shape):
return [
conv_utils.conv_output_length(
length,
self.kernel_size[i],
padding=self.padding,
stride=self.strides[i],
dilation=self.dilation_rate[i])
for i, length in enumerate(spatial_input_shape)
]
def compute_output_shape(self, input_shape):
input_shape = tf.TensorShape(input_shape).as_list()
batch_rank = len(input_shape) - self.rank - 1
if self.data_format == 'channels_last':
return tf.TensorShape(
input_shape[:batch_rank]
+ self._spatial_output_shape(input_shape[batch_rank:-1])
+ [self.filters])
else:
return tf.TensorShape(
input_shape[:batch_rank] + [self.filters] +
self._spatial_output_shape(input_shape[batch_rank + 1:]))
def _recreate_conv_op(self, inputs): # pylint: disable=unused-argument
return False
def get_config(self):
config = super(ComplexConv, self).get_config()
config.update({
'filters': self.filters,
'kernel_size': self.kernel_size,
'strides': self.strides,
'padding': self.padding,
'data_format': self.data_format,
'dilation_rate': self.dilation_rate,
'groups': self.groups,
'activation': activations.serialize(self.activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'activity_regularizer': regularizers.serialize(self.activity_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint),
'dtype': self.my_dtype
})
return config
def _compute_causal_padding(self, inputs):
"""Calculates padding for 'causal' option for 1-d conv layers."""
left_pad = self.dilation_rate[0] * (self.kernel_size[0] - 1)
if getattr(inputs.shape, 'ndims', None) is None:
batch_rank = 1
else:
batch_rank = len(inputs.shape) - 2
if self.data_format == 'channels_last':
causal_padding = [[0, 0]] * batch_rank + [[left_pad, 0], [0, 0]]
else:
causal_padding = [[0, 0]] * batch_rank + [[0, 0], [left_pad, 0]]
return causal_padding
def _get_channel_axis(self):
if self.data_format == 'channels_first':
return -1 - self.rank
else:
return -1
def _get_input_channel(self, input_shape):
channel_axis = self._get_channel_axis()
if input_shape.dims[channel_axis].value is None:
raise ValueError('The channel dimension of the inputs '
'should be defined. Found `None`.')
return int(input_shape[channel_axis])
def _get_padding_op(self):
if self.padding == 'causal':
op_padding = 'valid'
else:
op_padding = self.padding
if not isinstance(op_padding, (list, tuple)):
op_padding = op_padding.upper()
return op_padding
def get_real_equivalent(self):
# TODO: Shall I check it's not already complex?
return ComplexConv(rank=self.rank, filters=self.filters, kernel_size=self.kernel_size,
dtype=self.my_dtype.real_dtype, strides=self.strides, padding=self.padding,
data_format=self.data_format, dilation_rate=self.dilation_rate, groups=self.groups,
activation=self.activation, use_bias=self.use_bias,
kernel_initializer=self.kernel_initializer, bias_initializer=self.bias_initializer,
kernel_regularizer=self.kernel_regularizer, bias_regularizer=self.bias_regularizer,
activity_regularizer=self.activity_regularizer, kernel_constraint=self.kernel_constraint,
bias_constraint=self.bias_constraint, trainable=self.trainable,
name=self.name + "_real_equiv")
class ComplexConv1D(ComplexConv):
def __init__(self,
filters,
kernel_size,
strides=1,
padding='valid', dtype=DEFAULT_COMPLEX_TYPE,
data_format='channels_last',
dilation_rate=1,
groups=1,
activation=None,
use_bias=True,
kernel_initializer=ComplexGlorotUniform(),
bias_initializer=Zeros(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
super(ComplexConv1D, self).__init__(
rank=1, dtype=dtype,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
groups=groups,
activation=activations.get(activation),
use_bias=use_bias,
kernel_initializer=initializers.get(kernel_initializer),
bias_initializer=initializers.get(bias_initializer),
kernel_regularizer=regularizers.get(kernel_regularizer),
bias_regularizer=regularizers.get(bias_regularizer),
activity_regularizer=regularizers.get(activity_regularizer),
kernel_constraint=constraints.get(kernel_constraint),
bias_constraint=constraints.get(bias_constraint),
**kwargs)
class ComplexConv2D(ComplexConv):
"""2D convolution layer (e.g. spatial convolution over images).
This layer creates a convolution kernel that is convolved
with the layer input to produce a tensor of outputs.
If `use_bias` is True, a bias vector is created and added to the outputs.
Finally, if `activation` is not `None`, it is applied to the outputs as well.
When using this layer as the first layer in a model, provide the keyword argument `input_shape`
(tuple of integers, does not include the sample axis),
e.g. `input_shape=(128, 128, 3)` for 128x128 RGB pictures in `data_format="channels_last"`.
Input shape:
4+D tensor with shape: `batch_shape + (channels, rows, cols)` if
`data_format='channels_first'`
or 4+D tensor with shape: `batch_shape + (rows, cols, channels)` if
`data_format='channels_last'`.
Output shape:
4+D tensor with shape: `batch_shape + (filters, new_rows, new_cols)` if
`data_format='channels_first'` or 4+D tensor with shape: `batch_shape +
(new_rows, new_cols, filters)` if `data_format='channels_last'`. `rows`
and `cols` values might have changed due to padding.
Returns:
A tensor of rank 4+ representing
`activation(conv2d(inputs, kernel) + bias)`.
Raises:
ValueError: if `padding` is `"causal"`.
ValueError: when both `strides > 1` and `dilation_rate > 1`.
"""
def __init__(self, filters, kernel_size, strides=(1, 1), padding='valid', data_format=None, dilation_rate=(1, 1),
groups=1, activation=None, use_bias=True, dtype=DEFAULT_COMPLEX_TYPE,
kernel_initializer=ComplexGlorotUniform(), bias_initializer=Zeros(),
kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None,
kernel_constraint=None, bias_constraint=None, **kwargs):
"""
:param filters: Integer, the dimensionality of the output space (i.e. the number of output filters in the convolution).
:param kernel_size: An integer or tuple/list of 2 integers, specifying the height
and width of the 2D convolution window. Can be a single integer to specify
the same value for all spatial dimensions.
:param strides: An integer or tuple/list of 2 integers, specifying the strides of
the convolution along the height and width. Can be a single integer to
specify the same value for all spatial dimensions. Specifying any stride
value != 1 is incompatible with specifying any `dilation_rate` value != 1.
:param padding: one of `"valid"` or `"same"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding evenly to
the left/right or up/down of the input such that output has the same
height/width dimension as the input.
:param data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs. `channels_last` corresponds
to inputs with shape `(batch_size, height, width, channels)` while
`channels_first` corresponds to inputs with shape `(batch_size, channels,
height, width)`. It defaults to the `image_data_format` value found in
your Keras config file at `~/.keras/keras.json`. If you never set it, then
it will be `channels_last`.
:param dilation_rate: an integer or tuple/list of 2 integers, specifying the
dilation rate to use for dilated convolution. Can be a single integer to
specify the same value for all spatial dimensions. Currently, specifying
any `dilation_rate` value != 1 is incompatible with specifying any stride
value != 1.
:param groups: A positive integer specifying the number of groups in which the
input is split along the channel axis. Each group is convolved separately
with `filters / groups` filters. The output is the concatenation of all
the `groups` results along the channel axis. Input channels and `filters`
must both be divisible by `groups`.
:param activation: Activation function to use. If you don't specify anything, no activation is applied.
For complex :code:`dtype`, this must be a :code:`cvnn.activations` module.
:param use_bias: Boolean, whether the layer uses a bias vector.
:param kernel_initializer: Initializer for the `kernel` weights matrix (see `keras.initializers`).
:param bias_initializer: Initializer for the bias vector (see `keras.initializers`).
:param kernel_regularizer: Regularizer function applied to the `kernel` weights matrix (see `keras.regularizers`).
:param bias_regularizer: Regularizer function applied to the bias vector (see `keras.regularizers`).
:param activity_regularizer: Regularizer function applied to the output of the layer (its "activation") (see `keras.regularizers`).
:param kernel_constraint: Constraint function applied to the kernel matrix (see `keras.constraints`).
:param bias_constraint: Constraint function applied to the bias vector (see `keras.constraints`).
"""
super(ComplexConv2D, self).__init__(
rank=2, dtype=dtype,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
groups=groups,
activation=activations.get(activation),
use_bias=use_bias,
kernel_initializer=initializers.get(kernel_initializer),
bias_initializer=initializers.get(bias_initializer),
kernel_regularizer=regularizers.get(kernel_regularizer),
bias_regularizer=regularizers.get(bias_regularizer),
activity_regularizer=regularizers.get(activity_regularizer),
kernel_constraint=constraints.get(kernel_constraint),
bias_constraint=constraints.get(bias_constraint),
**kwargs)
class ComplexConv3D(ComplexConv):
"""3D convolution layer (e.g. spatial convolution over volumes).
This layer creates a convolution kernel that is convolved
with the layer input to produce a tensor of
outputs. If `use_bias` is True,
a bias vector is created and added to the outputs. Finally, if
`activation` is not `None`, it is applied to the outputs as well.
When using this layer as the first layer in a model,
provide the keyword argument `input_shape`
(tuple of integers, does not include the sample axis),
e.g. `input_shape=(128, 128, 128, 1)` for 128x128x128 volumes
with a single channel,
in `data_format="channels_last"`.
Examples:
>>> # The inputs are 28x28x28 volumes with a single channel, and the
>>> # batch size is 4
>>> input_shape =(4, 28, 28, 28, 1)
>>> x = tf.random.normal(input_shape)
>>> y = tf.keras.layers.Conv3D(
... 2, 3, activation='relu', input_shape=input_shape[1:])(x)
>>> print(y.shape)
(4, 26, 26, 26, 2)
>>> # With extended batch shape [4, 7], e.g. a batch of 4 videos of 3D frames,
>>> # with 7 frames per video.
>>> input_shape = (4, 7, 28, 28, 28, 1)
>>> x = tf.random.normal(input_shape)
>>> y = tf.keras.layers.Conv3D(
... 2, 3, activation='relu', input_shape=input_shape[2:])(x)
>>> print(y.shape)
(4, 7, 26, 26, 26, 2)
Arguments:
filters: Integer, the dimensionality of the output space (i.e. the number of
output filters in the convolution).
kernel_size: An integer or tuple/list of 3 integers, specifying the depth,
height and width of the 3D convolution window. Can be a single integer to
specify the same value for all spatial dimensions.
strides: An integer or tuple/list of 3 integers, specifying the strides of
the convolution along each spatial dimension. Can be a single integer to
specify the same value for all spatial dimensions. Specifying any stride
value != 1 is incompatible with specifying any `dilation_rate` value != 1.
padding: one of `"valid"` or `"same"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding evenly to
the left/right or up/down of the input such that output has the same
height/width dimension as the input.
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs. `channels_last` corresponds
to inputs with shape `batch_shape + (spatial_dim1, spatial_dim2,
spatial_dim3, channels)` while `channels_first` corresponds to inputs with
shape `batch_shape + (channels, spatial_dim1, spatial_dim2,
spatial_dim3)`. It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`. If you never set it, then it
will be "channels_last".
dilation_rate: an integer or tuple/list of 3 integers, specifying the
dilation rate to use for dilated convolution. Can be a single integer to
specify the same value for all spatial dimensions. Currently, specifying
any `dilation_rate` value != 1 is incompatible with specifying any stride
value != 1.
groups: A positive integer specifying the number of groups in which the
input is split along the channel axis. Each group is convolved separately
with `filters / groups` filters. The output is the concatenation of all
the `groups` results along the channel axis. Input channels and `filters`
must both be divisible by `groups`.
activation: Activation function to use. If you don't specify anything, no
activation is applied (see `keras.activations`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix (see
`keras.initializers`).
bias_initializer: Initializer for the bias vector (see
`keras.initializers`).
kernel_regularizer: Regularizer function applied to the `kernel` weights
matrix (see `keras.regularizers`).
bias_regularizer: Regularizer function applied to the bias vector (see
`keras.regularizers`).
activity_regularizer: Regularizer function applied to the output of the
layer (its "activation") (see `keras.regularizers`).
kernel_constraint: Constraint function applied to the kernel matrix (see
`keras.constraints`).
bias_constraint: Constraint function applied to the bias vector (see
`keras.constraints`).
Input shape:
5+D tensor with shape: `batch_shape + (channels, conv_dim1, conv_dim2,
conv_dim3)` if data_format='channels_first'
or 5+D tensor with shape: `batch_shape + (conv_dim1, conv_dim2, conv_dim3,
channels)` if data_format='channels_last'.
Output shape:
5+D tensor with shape: `batch_shape + (filters, new_conv_dim1,
new_conv_dim2, new_conv_dim3)` if data_format='channels_first'
or 5+D tensor with shape: `batch_shape + (new_conv_dim1, new_conv_dim2,
new_conv_dim3, filters)` if data_format='channels_last'. `new_conv_dim1`,
`new_conv_dim2` and `new_conv_dim3` values might have changed due to
padding.
Returns:
A tensor of rank 5+ representing
`activation(conv3d(inputs, kernel) + bias)`.
Raises:
ValueError: if `padding` is "causal".
ValueError: when both `strides > 1` and `dilation_rate > 1`.
"""
def __init__(self,
filters, kernel_size, dtype=DEFAULT_COMPLEX_TYPE, strides=(1, 1, 1), padding='valid', data_format=None,
dilation_rate=(1, 1, 1), groups=1, activation=None, use_bias=True,
kernel_initializer=ComplexGlorotUniform(), bias_initializer=Zeros(),
kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None,
kernel_constraint=None, bias_constraint=None, **kwargs):
super(ComplexConv3D, self).__init__(
rank=3, dtype=dtype,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
groups=groups,
activation=activations.get(activation),
use_bias=use_bias,
kernel_initializer=initializers.get(kernel_initializer),
bias_initializer=initializers.get(bias_initializer),
kernel_regularizer=regularizers.get(kernel_regularizer),
bias_regularizer=regularizers.get(bias_regularizer),
activity_regularizer=regularizers.get(activity_regularizer),
kernel_constraint=constraints.get(kernel_constraint),
bias_constraint=constraints.get(bias_constraint),
**kwargs)
class ComplexConv2DTranspose(ComplexConv2D):
"""
Transposed convolution layer. Sometimes (wrongly) called Deconvolution.
The need for transposed convolutions generally arises
from the desire to use a transformation going in the opposite direction
of a normal convolution, i.e., from something that has the shape of the
output of some convolution to something that has the shape of its input
while maintaining a connectivity pattern that is compatible with
said convolution.
When using this layer as the first layer in a model,
provide the keyword argument `input_shape`
(tuple of integers, does not include the sample axis),
e.g. `input_shape=(128, 128, 3)` for 128x128 RGB pictures
in `data_format="channels_last"`.
Arguments:
filters: Integer, the dimensionality of the output space
(i.e. the number of output filters in the convolution).
kernel_size: An integer or tuple/list of 2 integers, specifying the
height and width of the 2D convolution window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the convolution along the height and width.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: one of `"valid"` or `"same"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding evenly to
the left/right or up/down of the input such that output has the same
height/width dimension as the input.
output_padding: An integer or tuple/list of 2 integers,
specifying the amount of padding along the height and width
of the output tensor.
Can be a single integer to specify the same value for all
spatial dimensions.
The amount of output padding along a given dimension must be
lower than the stride along that same dimension.
If set to `None` (default), the output shape is inferred.
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch_size, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch_size, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
dilation_rate: an integer or tuple/list of 2 integers, specifying
the dilation rate to use for dilated convolution.
Can be a single integer to specify the same value for
all spatial dimensions.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any stride value != 1.
activation: Activation function to use.
If you don't specify anything, no activation is applied (
see `keras.activations`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix (
see `keras.initializers`).
bias_initializer: Initializer for the bias vector (
see `keras.initializers`).
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix (see `keras.regularizers`).
bias_regularizer: Regularizer function applied to the bias vector (
see `keras.regularizers`).
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation") (see `keras.regularizers`).
kernel_constraint: Constraint function applied to the kernel matrix (
see `keras.constraints`).
bias_constraint: Constraint function applied to the bias vector (
see `keras.constraints`).
Input shape:
4D tensor with shape:
`(batch_size, channels, rows, cols)` if data_format='channels_first'
or 4D tensor with shape:
`(batch_size, rows, cols, channels)` if data_format='channels_last'.
Output shape:
4D tensor with shape:
`(batch_size, filters, new_rows, new_cols)` if data_format='channels_first'
or 4D tensor with shape:
`(batch_size, new_rows, new_cols, filters)` if data_format='channels_last'.
`rows` and `cols` values might have changed due to padding.
If `output_padding` is specified:
```
new_rows = ((rows - 1) * strides[0] + kernel_size[0] - 2 * padding[0] +
output_padding[0])
new_cols = ((cols - 1) * strides[1] + kernel_size[1] - 2 * padding[1] +
output_padding[1])
```
Returns:
A tensor of rank 4 representing
`activation(conv2dtranspose(inputs, kernel) + bias)`.
Raises:
ValueError: if `padding` is "causal".
ValueError: when both `strides` > 1 and `dilation_rate` > 1.
References:
- [A guide to convolution arithmetic for deep
learning](https://arxiv.org/abs/1603.07285v1)
- [Deconvolutional
Networks](https://www.matthewzeiler.com/mattzeiler/deconvolutionalnetworks.pdf)
"""
def __init__(self,
filters,
kernel_size,
strides=(1, 1),
padding='valid', dtype=DEFAULT_COMPLEX_TYPE,
output_padding=None,
data_format=None,
dilation_rate=(1, 1),
activation=None,
use_bias=True,
kernel_initializer=ComplexGlorotUniform(),
bias_initializer=Zeros(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
super(ComplexConv2DTranspose, self).__init__(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding, dtype=dtype,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activations.get(activation),
use_bias=use_bias,
kernel_initializer=initializers.get(kernel_initializer),
bias_initializer=initializers.get(bias_initializer),
kernel_regularizer=regularizers.get(kernel_regularizer),
bias_regularizer=regularizers.get(bias_regularizer),
activity_regularizer=regularizers.get(activity_regularizer),
kernel_constraint=constraints.get(kernel_constraint),
bias_constraint=constraints.get(bias_constraint),
**kwargs)
self.output_padding = output_padding
if self.output_padding is not None:
self.output_padding = conv_utils.normalize_tuple(self.output_padding, 2, 'output_padding')
for stride, out_pad in zip(self.strides, self.output_padding):
if out_pad >= stride:
raise ValueError(f'Stride {self.strides} must be greater than output padding {self.output_padding}')
def build(self, input_shape):
input_shape = tf.TensorShape(input_shape)
if len(input_shape) != 4:
raise ValueError(f'Inputs should have rank 4. Received input shape: {input_shape}')
channel_axis = self._get_channel_axis()
if input_shape.dims[channel_axis].value is None:
raise ValueError('The channel dimension of the inputs should be defined. Found `None`.')
input_dim = int(input_shape[channel_axis])
self.input_spec = InputSpec(ndim=4, axes={channel_axis: input_dim})
kernel_shape = self.kernel_size + (self.filters, input_dim)
if self.my_dtype.is_complex:
self.kernel_r = tf.Variable(
initial_value=self.kernel_initializer(shape=kernel_shape, dtype=self.my_dtype),
name='kernel_r',
constraint=self.kernel_constraint,
trainable=True
) # TODO: regularizer=self.kernel_regularizer,
self.kernel_i = tf.Variable(
initial_value=self.kernel_initializer(shape=kernel_shape, dtype=self.my_dtype),
name='kernel_i',
constraint=self.kernel_constraint,
trainable=True
) # TODO: regularizer=self.kernel_regularizer
if self.use_bias:
self.bias_r = tf.Variable(
initial_value=self.bias_initializer(shape=(self.filters,), dtype=self.my_dtype),
name='bias_r',
trainable=True
)
self.bias_i = tf.Variable(
initial_value=self.bias_initializer(shape=(self.filters,), dtype=self.my_dtype),
name='bias_i',
constraint=self.bias_constraint,
trainable=True
) # TODO: regularizer=self.bias_regularizer
else:
self.kernel = self.add_weight(
name='kernel',
shape=kernel_shape,
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
trainable=True,
dtype=self.my_dtype)
if self.use_bias:
self.bias = self.add_weight(
name='bias',
shape=(self.filters,),
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
trainable=True,
dtype=self.my_dtype)
if not self.use_bias:
self.bias = None
self.built = True
def call(self, inputs):
inputs_shape = tf.shape(inputs)
batch_size = inputs_shape[0]
if self.data_format == 'channels_first':
h_axis, w_axis = 2, 3
else:
h_axis, w_axis = 1, 2
# Use the constant height and weight when possible.
# to all convolutional layers, which currently lost the static shape information due to tf.shape().
height, width = None, None
if inputs.shape.rank is not None:
dims = inputs.shape.as_list()
height = dims[h_axis]
width = dims[w_axis]
height = height if height is not None else inputs_shape[h_axis]
width = width if width is not None else inputs_shape[w_axis]
kernel_h, kernel_w = self.kernel_size
stride_h, stride_w = self.strides
if self.output_padding is None:
out_pad_h = out_pad_w = None
else:
out_pad_h, out_pad_w = self.output_padding
# Infer the dynamic output shape:
out_height = conv_utils.deconv_output_length(height,
kernel_h,
padding=self.padding,
output_padding=out_pad_h,
stride=stride_h,
dilation=self.dilation_rate[0])
out_width = conv_utils.deconv_output_length(width,
kernel_w,
padding=self.padding,
output_padding=out_pad_w,
stride=stride_w,
dilation=self.dilation_rate[1])
if self.data_format == 'channels_first':
output_shape = (batch_size, self.filters, out_height, out_width)
else:
output_shape = (batch_size, out_height, out_width, self.filters)
output_shape_tensor = tf.stack(output_shape)
# Deconvolution part
inputs_r = tf.math.real(inputs)
inputs_i = tf.math.imag(inputs)
if self.my_dtype.is_complex:
kernel_r = self.kernel_r
kernel_i = self.kernel_i
if self.use_bias:
bias = tf.complex(self.bias_r, self.bias_i)
else:
kernel_r = tf.math.real(self.kernel)
kernel_i = tf.math.imag(self.kernel)
if self.use_bias:
bias = self.bias
real_outputs_ri_rk = backend.conv2d_transpose(
inputs_r,
kernel_r,
output_shape_tensor,
strides=self.strides,
padding=self.padding,
data_format=self.data_format,
dilation_rate=self.dilation_rate)
real_outputs_ii_ik = backend.conv2d_transpose(
inputs_i,
kernel_i,
output_shape_tensor,
strides=self.strides,
padding=self.padding,
data_format=self.data_format,
dilation_rate=self.dilation_rate)
real_outputs_ri_ik = backend.conv2d_transpose(
inputs_r,
kernel_i,
output_shape_tensor,
strides=self.strides,
padding=self.padding,
data_format=self.data_format,
dilation_rate=self.dilation_rate)
real_outputs_ii_rk = backend.conv2d_transpose(
inputs_i,
kernel_r,
output_shape_tensor,
strides=self.strides,
padding=self.padding,
data_format=self.data_format,
dilation_rate=self.dilation_rate)
real_outputs = real_outputs_ri_rk - real_outputs_ii_ik
imag_outputs = real_outputs_ii_rk + real_outputs_ri_ik
outputs = tf.cast(tf.complex(real_outputs, imag_outputs), dtype=self.my_dtype)
if not tf.executing_eagerly():
# Infer the static output shape:
out_shape = self.compute_output_shape(inputs.shape)
outputs.set_shape(out_shape)
# Apply bias
if self.use_bias:
outputs = tf.nn.bias_add(outputs, bias, data_format=conv_utils.convert_data_format(self.data_format, ndim=4))
# Apply activation function
if self.activation is not None:
return self.activation(outputs)
return outputs
def compute_output_shape(self, input_shape):
input_shape = tf.TensorShape(input_shape).as_list()
output_shape = list(input_shape)
if self.data_format == 'channels_first':
c_axis, h_axis, w_axis = 1, 2, 3
else:
c_axis, h_axis, w_axis = 3, 1, 2
kernel_h, kernel_w = self.kernel_size
stride_h, stride_w = self.strides
if self.output_padding is None:
out_pad_h = out_pad_w = None
else:
out_pad_h, out_pad_w = self.output_padding
output_shape[c_axis] = self.filters
output_shape[h_axis] = conv_utils.deconv_output_length(
output_shape[h_axis],
kernel_h,
padding=self.padding,
output_padding=out_pad_h,
stride=stride_h,
dilation=self.dilation_rate[0])
output_shape[w_axis] = conv_utils.deconv_output_length(
output_shape[w_axis],
kernel_w,
padding=self.padding,
output_padding=out_pad_w,
stride=stride_w,
dilation=self.dilation_rate[1])
return tf.TensorShape(output_shape)
def get_config(self):
config = super(ComplexConv2DTranspose, self).get_config()
config['output_padding'] = self.output_padding
return config
| 51,395 | 49.636453 | 141 | py |
cvnn | cvnn-master/cvnn/layers/core.py | from abc import ABC, abstractmethod
import numpy as np
import tensorflow as tf
from tensorflow.keras.layers import Flatten, Dense, InputLayer, Layer
from tensorflow.python.keras import backend as K
from tensorflow.keras import initializers
import tensorflow_probability as tfp
from tensorflow import TensorShape, Tensor
# from keras.utils import control_flow_util
# typing
from typing import Optional, Union, List, Tuple
# Own modules
from cvnn.activations import t_activation
from cvnn.initializers import ComplexGlorotUniform, Zeros, Ones, ComplexInitializer, INIT_TECHNIQUES
t_input = Union[Tensor, tuple, list]
t_input_shape = Union[TensorShape, List[TensorShape]]
DEFAULT_COMPLEX_TYPE = tf.as_dtype(np.complex64)
class ComplexLayer(ABC):
@abstractmethod
def get_real_equivalent(self):
"""
:return: Gets a real-valued COPY of the Complex Layer.
"""
pass
def complex_input(shape=None, batch_size=None, name=None, dtype=DEFAULT_COMPLEX_TYPE,
sparse=False, tensor=None, ragged=False, **kwargs):
"""
`complex_input()` is used to instantiate a Keras tensor.
A Keras tensor is a TensorFlow symbolic tensor object,
which we augment with certain attributes that allow us to build a Keras model
just by knowing the inputs and outputs of the model.
For instance, if `a`, `b` and `c` are Keras tensors,
it becomes possible to do:
`model = Model(input=[a, b], output=c)`
Arguments:
shape: A shape tuple (integers), not including the batch size.
For instance, `shape=(32,)` indicates that the expected input
will be batches of 32-dimensional vectors. Elements of this tuple
can be None; 'None' elements represent dimensions where the shape is
not known.
batch_size: optional static batch size (integer).
name: An optional name string for the layer.
Should be unique in a model (do not reuse the same name twice).
It will be autogenerated if it isn't provided.
dtype: The data type expected by the input
sparse: A boolean specifying whether the placeholder to be created is
sparse. Only one of 'ragged' and 'sparse' can be True. Note that,
if `sparse` is False, sparse tensors can still be passed into the
input - they will be densified with a default value of 0.
tensor: Optional existing tensor to wrap into the `Input` layer.
If set, the layer will use the `tf.TypeSpec` of this tensor rather
than creating a new placeholder tensor.
ragged: A boolean specifying whether the placeholder to be created is
ragged. Only one of 'ragged' and 'sparse' can be True. In this case,
values of 'None' in the 'shape' argument represent ragged dimensions.
For more information about RaggedTensors, see
[this guide](https://www.tensorflow.org/guide/ragged_tensors).
**kwargs: deprecated arguments support. Supports `batch_shape` and
`batch_input_shape`.
Returns:
A `tensor`.
Example:
```python
# this is a logistic regression in Keras
x = complex_input(shape=(32,))
y = Dense(16, activation='softmax')(x)
model = Model(x, y)
```
Note that even if eager execution is enabled,
`Input` produces a symbolic tensor (i.e. a placeholder).
This symbolic tensor can be used with other
TensorFlow ops, as such:
```python
x = complex_input(shape=(32,))
y = tf.square(x)
```
Raises:
ValueError: If both `sparse` and `ragged` are provided.
ValueError: If both `shape` and (`batch_input_shape` or `batch_shape`) are provided.
ValueError: If both `shape` and `tensor` are None.
ValueError: if any unrecognized parameters are provided.
"""
if sparse and ragged:
raise ValueError(
'Cannot set both sparse and ragged to True in a Keras input.')
dtype = tf.as_dtype(dtype)
input_layer_config = {'name': name, 'dtype': dtype.name, 'sparse': sparse,
'ragged': ragged, 'input_tensor': tensor}
batch_input_shape = kwargs.pop('batch_input_shape',
kwargs.pop('batch_shape', None))
if shape is not None and batch_input_shape is not None:
raise ValueError('Only provide the `shape` OR `batch_input_shape` argument '
'to Input, not both at the same time.')
if batch_input_shape is None and shape is None and tensor is None:
raise ValueError('Please provide to Input either a `shape`'
' or a `tensor` argument. Note that '
'`shape` does not include the batch '
'dimension.')
if kwargs:
raise ValueError('Unrecognized keyword arguments:', kwargs.keys())
if batch_input_shape:
shape = batch_input_shape[1:]
input_layer_config.update({'batch_input_shape': batch_input_shape})
else:
input_layer_config.update(
{'batch_size': batch_size, 'input_shape': shape})
# import pdb; pdb.set_trace()
input_layer = ComplexInput(**input_layer_config)
# Return tensor including `_keras_history`.
# Note that in this case train_output and test_output are the same pointer.
outputs = input_layer._inbound_nodes[0].output_tensors
if isinstance(outputs, list) and len(outputs) == 1:
return outputs[0]
else:
return outputs
class ComplexInput(InputLayer, ComplexLayer):
def __init__(self, input_shape=None, batch_size=None, dtype=DEFAULT_COMPLEX_TYPE, input_tensor=None, sparse=False,
name=None, ragged=False, **kwargs):
super(ComplexInput, self).__init__(input_shape=input_shape, batch_size=batch_size, dtype=dtype,
input_tensor=input_tensor, sparse=sparse,
name=name, ragged=ragged, **kwargs
)
def get_real_equivalent(self):
real_input_shape = self.input_shape[:-1] + (self.input_shape[-1] * 2,)
return ComplexInput(input_shape=real_input_shape, batch_size=self.batch_size, dtype=self.dtype,
input_tensor=self.input_tensor, sparse=self.sparse, name=self.name + "_real_equiv",
ragged=self.ragged)
class ComplexFlatten(Flatten, ComplexLayer):
def call(self, inputs: t_input):
# tf.print(f"inputs at ComplexFlatten are {inputs.dtype}")
real_flat = super(ComplexFlatten, self).call(tf.math.real(inputs))
imag_flat = super(ComplexFlatten, self).call(tf.math.imag(inputs))
return tf.cast(tf.complex(real_flat, imag_flat), inputs.dtype) # Keep input dtype
def get_real_equivalent(self):
# Dtype agnostic so just init one.
return ComplexFlatten(name=self.name + "_real_equiv")
class ComplexDense(Dense, ComplexLayer):
"""
Fully connected complex-valued layer.
Implements the operation:
activation(input * weights + bias)
* where data types can be either complex or real.
* activation is the element-wise activation function passed as the activation argument,
* weights is a matrix created by the layer
* bias is a bias vector created by the layer
"""
def __init__(self, units: int, activation: t_activation = None, use_bias: bool = True,
kernel_initializer="ComplexGlorotUniform",
bias_initializer="Zeros",
kernel_regularizer=None,
kernel_constraint=None,
dtype=DEFAULT_COMPLEX_TYPE, # TODO: Check typing of this.
init_technique: str = 'mirror',
**kwargs):
"""
:param units: Positive integer, dimensionality of the output space.
:param activation: Activation function to use.
Either from keras.activations or cvnn.activations. For complex dtype, only cvnn.activations module supported.
If you don't specify anything, no activation is applied (ie. "linear" activation: a(x) = x).
:param use_bias: Boolean, whether the layer uses a bias vector.
:param kernel_initializer: Initializer for the kernel weights matrix.
Recommended to use a `ComplexInitializer` such as `cvnn.initializers.ComplexGlorotUniform()` (default)
:param bias_initializer: Initializer for the bias vector.
Recommended to use a `ComplexInitializer` such as `cvnn.initializers.Zeros()` (default)
:param dtype: Dtype of the input and layer.
:param init_technique: One of 'mirror' or 'zero_imag'. Tells the initializer how to init complex number if
the initializer was tensorflow's built in initializers (not supporting complex numbers).
- 'mirror': Uses the initializer for both real and imaginary part.
Note that some initializers such as Glorot or He will lose it's property if initialized this way.
- 'zero_imag': Initializer real part and let imaginary part to zero.
"""
# TODO: verify the initializers? and that dtype complex has cvnn.activations.
if activation is None:
activation = "linear"
super(ComplexDense, self).__init__(units, activation=activation, use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_constraint=kernel_constraint, kernel_regularizer=kernel_regularizer,
**kwargs)
# !Cannot override dtype of the layer because it has a read-only @property
self.my_dtype = tf.dtypes.as_dtype(dtype)
self.init_technique = init_technique.lower()
def build(self, input_shape):
if self.my_dtype.is_complex:
i_kernel_dtype = self.my_dtype if isinstance(self.kernel_initializer,
ComplexInitializer) else self.my_dtype.real_dtype
i_bias_dtype = self.my_dtype if isinstance(self.bias_initializer,
ComplexInitializer) else self.my_dtype.real_dtype
i_kernel_initializer = self.kernel_initializer
i_bias_initializer = self.bias_initializer
if not isinstance(self.kernel_initializer, ComplexInitializer):
tf.print(f"WARNING: you are using a Tensorflow Initializer for complex numbers. "
f"Using {self.init_technique} method.")
if self.init_technique in INIT_TECHNIQUES:
if self.init_technique == 'zero_imag':
# This section is done to initialize with tf initializers, making imaginary part zero
i_kernel_initializer = initializers.Zeros()
i_bias_initializer = initializers.Zeros()
else:
raise ValueError(f"Unsuported init_technique {self.init_technique}, "
f"supported techniques are {INIT_TECHNIQUES}")
self.w_r = self.add_weight('kernel_r',
shape=(input_shape[-1], self.units),
dtype=self.my_dtype.real_dtype,
initializer=self.kernel_initializer,
trainable=True,
constraint=self.kernel_constraint, regularizer=self.kernel_regularizer)
#self.w_r = tf.Variable(
# name='kernel_r',
# initial_value=self.kernel_initializer(shape=(input_shape[-1], self.units), dtype=i_kernel_dtype),
# trainable=True
#)
self.w_i = self.add_weight('kernel_i',
shape=(input_shape[-1], self.units),
dtype=self.my_dtype.real_dtype,
initializer=self.kernel_initializer,
trainable=True,
constraint=self.kernel_constraint, regularizer=self.kernel_regularizer)
#self.w_i = tf.Variable(
# name='kernel_i',
# initial_value=i_kernel_initializer(shape=(input_shape[-1], self.units), dtype=i_kernel_dtype),
# trainable=True
#)
if self.use_bias:
self.b_r = tf.Variable(
name='bias_r',
initial_value=self.bias_initializer(shape=(self.units,), dtype=i_bias_dtype),
trainable=self.use_bias
)
self.b_i = tf.Variable(
name='bias_i',
initial_value=i_bias_initializer(shape=(self.units,), dtype=i_bias_dtype),
trainable=self.use_bias
)
else:
# TODO: For Complex you should probably want to use MY init for real keras. DO sth! at least error message
self.w = self.add_weight('kernel',
shape=(input_shape[-1], self.units),
dtype=self.my_dtype,
initializer=self.kernel_initializer,
trainable=True,
constraint=self.kernel_constraint, regularizer=self.kernel_regularizer)
if self.use_bias:
self.b = self.add_weight('bias', shape=(self.units,), dtype=self.my_dtype,
initializer=self.bias_initializer, trainable=self.use_bias)
def call(self, inputs: t_input):
# tf.print(f"inputs at ComplexDense are {inputs.dtype}")
if inputs.dtype != self.my_dtype:
tf.print(f"WARNING: {self.name} - Expected input to be {self.my_dtype}, but received {inputs.dtype}.")
if self.my_dtype.is_complex and inputs.dtype.is_floating:
tf.print("\tThis is normally fixed using ComplexInput() "
"at the start (tf casts input automatically to real).")
inputs = tf.cast(inputs, self.my_dtype)
if self.my_dtype.is_complex:
w = tf.complex(self.w_r, self.w_i)
if self.use_bias:
b = tf.complex(self.b_r, self.b_i)
else:
w = self.w
if self.use_bias:
b = self.b
out = tf.matmul(inputs, w)
if self.use_bias:
out = out + b
return self.activation(out)
def get_real_equivalent(self, output_multiplier=2):
# assert self.my_dtype.is_complex, "The layer was already real!" # TODO: Shall I check this?
# TODO: Does it pose a problem not to re-create an object of the initializer?
return ComplexDense(units=int(round(self.units * output_multiplier)),
activation=self.activation, use_bias=self.use_bias,
kernel_initializer=self.kernel_initializer, bias_initializer=self.bias_initializer,
kernel_constraint=self.kernel_constraint, kernel_regularizer=self.kernel_regularizer, #MODIFIED CODE ------
dtype=self.my_dtype.real_dtype, name=self.name + "_real_equiv")
def get_config(self):
config = super(ComplexDense, self).get_config()
config.update({
'dtype': self.my_dtype,
'init_technique': self.init_technique
})
return config
class ComplexDropout(Layer, ComplexLayer):
"""
Applies Dropout to the input.
It works also with complex inputs!
The Dropout layer randomly sets input units to 0 with a frequency of `rate`
at each step during training time, which helps prevent overfitting.
Inputs not set to 0 are scaled up by 1/(1 - rate) such that the sum over
all inputs is unchanged.
Note that the Dropout layer only applies when `training` is set to True
such that no values are dropped during inference. When using `model.fit`,
`training` will be appropriately set to True automatically, and in other
contexts, you can set the kwarg explicitly to True when calling the layer.
(This is in contrast to setting `trainable=False` for a Dropout layer.
`trainable` does not affect the layer's behavior, as Dropout does
not have any variables/weights that can be frozen during training.)
"""
def __init__(self, rate: float, noise_shape=None, seed: Optional[int] = None, **kwargs):
"""
:param rate: Float between 0 and 1. Fraction of the input units to drop.
:param noise_shape: 1D integer tensor representing the shape of the binary dropout mask that
will be multiplied with the input.
For instance, if your inputs have shape `(batch_size, timesteps, features)` and you want the dropout
mask to be the same for all timesteps, you can use `noise_shape=(batch_size, 1, features)`.
:param seed: A Python integer to use as random seed.
"""
super(ComplexDropout, self).__init__(**kwargs) # trainable=False,
if isinstance(rate, (int, float)) and not 0 <= rate <= 1:
raise ValueError(f'Invalid value {rate} received for `rate`, expected a value between 0 and 1.')
self.rate = rate
self.seed = seed
self.noise_shape = noise_shape
def _get_noise_shape(self, inputs):
# Subclasses of `Dropout` may implement `_get_noise_shape(self, inputs)`,
# which will override `self.noise_shape`, and allows for custom noise
# shapes with dynamically sized inputs.
if self.noise_shape is None:
return None
concrete_inputs_shape = tf.shape(inputs)
noise_shape = []
for i, value in enumerate(self.noise_shape):
noise_shape.append(concrete_inputs_shape[i] if value is None else value)
return tf.convert_to_tensor(noise_shape)
def call(self, inputs, training=None):
"""
:param inputs: Input tensor (of any rank).
:param training: Python boolean indicating whether the layer should behave in training mode (adding dropout)
or in inference mode (doing nothing).
"""
if training is None:
training = K.learning_phase()
tf.print(f"Training was None and now is {training}")
# This is used for my own debugging, I don't know WHEN this happens,
# I trust K.learning_phase() returns a correct boolean.
# def dropped_inputs():
# # import pdb; pdb.set_trace()
# drop_filter = tf.nn.dropout(tf.ones(tf.shape(inputs)), rate=self.rate,
# noise_shape=self._get_noise_shape(inputs), seed=self.seed)
# y_out = tf.multiply(tf.cast(drop_filter, dtype=inputs.dtype), inputs)
# y_out = tf.cast(y_out, dtype=inputs.dtype)
# return y_out
# output = control_flow_util.smart_cond(training, dropped_inputs, lambda: tf.identity(inputs))
# return output
if not training:
return inputs
drop_filter = tf.nn.dropout(tf.ones(tf.shape(inputs)), rate=self.rate,
noise_shape=self.noise_shape, seed=self.seed)
y_out = tf.multiply(tf.cast(drop_filter, dtype=inputs.dtype), inputs)
y_out = tf.cast(y_out, dtype=inputs.dtype)
return y_out
def compute_output_shape(self, input_shape):
return input_shape
def get_real_equivalent(self):
return ComplexDropout(rate=self.rate, seed=self.seed, noise_shape=self.noise_shape,
name=self.name + "_real_equiv")
def get_config(self):
config = super(ComplexDropout, self).get_config()
config.update({
'rate': self.rate,
'noise_shape': self.noise_shape,
'seed': self.seed
})
return config
class ComplexBatchNormalization(Layer, ComplexLayer):
"""
Complex Batch-Normalization as defined in section 3.5 of https://arxiv.org/abs/1705.09792
"""
def __init__(self, axis: Union[List[int], Tuple[int], int] = -1, momentum: float = 0.99,
center: bool = True, scale: bool = True, epsilon: float = 0.001,
beta_initializer=Zeros(), gamma_initializer=Ones(), dtype=DEFAULT_COMPLEX_TYPE,
moving_mean_initializer=Zeros(), moving_variance_initializer=Ones(), cov_method: int = 2, # TODO: Check inits
**kwargs):
self.my_dtype = tf.dtypes.as_dtype(dtype)
self.epsilon = epsilon
self.cov_method = cov_method
if isinstance(axis, int):
axis = [axis]
self.axis = list(axis)
super(ComplexBatchNormalization, self).__init__(**kwargs)
self.momentum = momentum
self.beta_initializer = initializers.get(beta_initializer)
self.gamma_initializer = initializers.get(gamma_initializer)
self.moving_mean_initializer = initializers.get(moving_mean_initializer)
self.moving_variance_initializer = initializers.get(moving_variance_initializer)
self.center = center
self.scale = scale
def build(self, input_shape):
self.epsilon_matrix = tf.eye(2, dtype=self.my_dtype.real_dtype) * self.epsilon
# Cast the negative indices to positive
self.axis = [len(input_shape) + ax if ax < 0 else ax for ax in self.axis]
self.used_axis = [ax for ax in range(0, len(input_shape)) if ax not in self.axis]
desired_shape = [input_shape[ax] for ax in self.axis]
if self.my_dtype.is_complex:
self.gamma_r = tf.Variable(
name='gamma_r',
initial_value=self.gamma_initializer(shape=tuple(desired_shape), dtype=self.my_dtype),
trainable=True
)
self.gamma_i = tf.Variable(
name='gamma_i',
initial_value=Zeros()(shape=tuple(desired_shape), dtype=self.my_dtype),
trainable=True
) # I think I just need to scale with gamma, so by default I leave the imag part to zero
self.beta_r = tf.Variable(
name="beta_r",
initial_value=self.beta_initializer(shape=desired_shape, dtype=self.my_dtype),
trainable=True
)
self.beta_i = tf.Variable(
name="beta_i",
initial_value=self.beta_initializer(shape=desired_shape, dtype=self.my_dtype),
trainable=True
)
self.moving_mean = tf.Variable(
name='moving_mean',
initial_value=tf.complex(real=self.moving_mean_initializer(shape=desired_shape,
dtype=self.my_dtype),
imag=self.moving_mean_initializer(shape=desired_shape,
dtype=self.my_dtype)),
trainable=False
)
self.moving_var = tf.Variable(
name='moving_var',
initial_value=tf.eye(2) * self.moving_variance_initializer(shape=tuple(desired_shape) + (2, 2),
dtype=self.my_dtype) / tf.math.sqrt(2.),
trainable=False
)
else:
self.gamma = tf.Variable(
name='gamma',
initial_value=self.gamma_initializer(shape=tuple(desired_shape), dtype=self.my_dtype),
trainable=True
)
self.beta = tf.Variable(
name="beta",
initial_value=self.beta_initializer(shape=desired_shape, dtype=self.my_dtype),
trainable=True
)
self.moving_mean = tf.Variable(
name='moving_mean',
initial_value=self.moving_mean_initializer(shape=desired_shape, dtype=self.my_dtype),
trainable=False
)
self.moving_var = tf.Variable(
name='moving_var',
initial_value=tf.eye(2, dtype=self.my_dtype) * self.moving_variance_initializer(
shape=tuple(desired_shape) + (2, 2),
dtype=self.my_dtype),
trainable=False
)
def call(self, inputs, training=None):
if inputs.dtype != self.my_dtype:
tf.print(f"Warning: Expecting input dtype {self.my_dtype} but got {inputs.dtype}. "
f"Automatic cast will be done.")
inputs = tf.cast(inputs, dtype=self.my_dtype)
if training is None:
training = K.learning_phase()
tf.print(f"Training was None and now is {training}")
# This is used for my own debugging, I don't know WHEN this happens,
# I trust K.learning_phase() returns a correct boolean.
if training:
# First get the mean and var
mean = tf.math.reduce_mean(inputs, axis=self.used_axis)
if self.cov_method == 1:
X_20 = tf.concat((tf.math.real(inputs), tf.math.imag(inputs)), axis=-1)
var_20_20 = tfp.stats.covariance(X_20, sample_axis=self.used_axis, event_axis=-1)
valu = int(var_20_20.shape[-1] / 2)
indices = [([[i, i], [i, i + valu]], [[i + valu, i], [i + valu, i + valu]]) for i in range(0, valu)]
var = tf.gather_nd(var_20_20, indices=indices)
elif self.cov_method == 2:
X_10_2 = tf.stack((tf.math.real(inputs), tf.math.imag(inputs)), axis=-1)
var_10_2_2 = tfp.stats.covariance(X_10_2, sample_axis=self.used_axis, event_axis=-1)
var = var_10_2_2
else:
raise ValueError(f"Method {self.method} not implemented")
# Now the train part with these values
self.moving_mean.assign(self.momentum * self.moving_mean + (1. - self.momentum) * mean)
self.moving_var.assign(self.moving_var * self.momentum + var * (1. - self.momentum))
out = self._normalize(inputs, var, mean)
else:
out = self._normalize(inputs, self.moving_var, self.moving_mean)
if self.scale:
if self.my_dtype.is_complex:
gamma = tf.complex(self.gamma_r, self.gamma_i) # TODO: Should this be real valued?
else:
gamma = self.gamma
out = gamma * out
if self.center:
if self.my_dtype.is_complex:
beta = tf.complex(self.beta_r, self.beta_i)
else:
beta = self.beta
out = out + beta
return out
def _normalize(self, inputs, var, mean):
"""
:inputs: Tensor
:param var: Tensor of shape [..., 2, 2], if inputs dtype is real, var[slice] = [[var_slice, 0], [0, 0]]
:param mean: Tensor with the mean in the corresponding dtype (same shape as inputs)
"""
complex_zero_mean = inputs - mean
# Inv and sqrtm is done over 2 inner most dimension [..., M, M] so it should be [..., 2, 2] for us.
inv_sqrt_var = tf.linalg.sqrtm(tf.linalg.inv(var + self.epsilon_matrix)) # var^(-1/2) # TODO: Check this exists always?
# Separate real and imag so I go from shape [...] to [..., 2]
zero_mean = tf.stack((tf.math.real(complex_zero_mean), tf.math.imag(complex_zero_mean)), axis=-1)
# I expand dims to make the mult of matrix [..., 2, 2] and [..., 2, 1] resulting in [..., 2, 1]
inputs_hat = tf.matmul(inv_sqrt_var, tf.expand_dims(zero_mean, axis=-1))
# Then I squeeze to remove the last shape so I go from [..., 2, 1] to [..., 2].
# Use reshape and not squeeze in case I have 1 channel for example.
squeeze_inputs_hat = tf.reshape(inputs_hat, shape=tf.shape(inputs_hat)[:-1])
# Get complex data
complex_inputs_hat = tf.cast(tf.complex(squeeze_inputs_hat[..., 0], squeeze_inputs_hat[..., 1]),
dtype=self.my_dtype)
# import pdb; pdb.set_trace()
return complex_inputs_hat
"""@staticmethod
def _normalize_real(inputs, var, mean):
numerator = inputs - mean
denominator = tf.math.sqrt(var[..., 0, 0])
return numerator / tf.cast(denominator, dtype=inputs.dtype)"""
def get_real_equivalent(self):
return ComplexBatchNormalization(axis=self.axis, momentum=self.momentum, center=self.center, scale=self.scale,
beta_initializer=self.beta_initializer, epsilon=self.epsilon_matrix[0],
gamma_initializer=self.gamma_initializer, dtype=self.my_dtype,
moving_mean_initializer=self.moving_mean_initializer,
moving_variance_initializer=self.moving_variance_initializer)
def get_config(self):
config = super(ComplexBatchNormalization, self).get_config()
config.update({
'axis': self.axis,
'momentum': self.momentum,
'center': self.center,
'scale': self.scale,
'beta_initializer': self.beta_initializer,
'gamma_initializer': self.gamma_initializer,
'dtype': self.my_dtype,
'moving_mean_initializer': self.moving_mean_initializer,
'moving_variance_initializer': self.moving_variance_initializer
})
return config
| 29,931 | 49.560811 | 135 | py |
cvnn | cvnn-master/cvnn/layers/upsampling.py | import tensorflow as tf
from tensorflow.keras import backend
from tensorflow.keras.layers import UpSampling2D
from typing import Optional, Union, Tuple
from cvnn.layers.core import ComplexLayer
from cvnn.layers.core import DEFAULT_COMPLEX_TYPE
class ComplexUpSampling2D(UpSampling2D, ComplexLayer):
def __init__(self, size: Union[int, Tuple[int, int]] = (2, 2),
data_format: Optional[str] = None, interpolation: str = 'nearest',
align_corners: bool = False, dtype=DEFAULT_COMPLEX_TYPE, **kwargs):
"""
:param size: Int, or tuple of 2 integers. The upsampling factors for rows and columns.
:param data_format: string, one of channels_last (default) or channels_first.
The ordering of the dimensions in the inputs. channels_last corresponds to inputs with shape
(batch_size, height, width, channels) while channels_first corresponds to inputs with shape
(batch_size, channels, height, width).
:param interpolation: A string, one of nearest or bilinear.
:param align_corners: if True, the corner pixels of the input and output tensors are aligned,
and thus preserving the values at those pixels.
Example of align corners: https://discuss.pytorch.org/t/what-we-should-use-align-corners-false/22663/9
"""
self.factor_upsample = size
self.my_dtype = tf.dtypes.as_dtype(dtype)
super(ComplexUpSampling2D, self).__init__(size=size, data_format=data_format, interpolation=interpolation,
dtype=self.my_dtype.real_dtype, **kwargs)
def call(self, inputs):
result = tf.complex(
backend.resize_images(tf.math.real(inputs), self.size[0], self.size[1], self.data_format,
interpolation=self.interpolation),
backend.resize_images(tf.math.imag(inputs), self.size[0], self.size[1], self.data_format,
interpolation=self.interpolation),
)
casted_value = inputs.dtype if not inputs.dtype.is_integer else tf.float32
return tf.cast(result, dtype=casted_value)
def get_real_equivalent(self):
return ComplexUpSampling2D(size=self.factor_upsample, data_format=self.data_format,
interpolation=self.interpolation, dtype=self.my_dtype.real_dtype)
def get_config(self):
config = super(ComplexUpSampling2D, self).get_config()
config.update({
'dtype': self.my_dtype,
'factor_upsample': self.factor_upsample
})
return config
if __name__ == "__main__":
image = tf.constant([
[1., 0., 0, 0, 0],
[0, 1., 0, 0, 0],
[0, 0, 1., 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 1],
])
image = tf.complex(image, image)
image = image[tf.newaxis, ..., tf.newaxis]
result = ComplexUpSampling2D([3, 5])(image)
import pdb; pdb.set_trace()
| 3,007 | 43.895522 | 114 | py |
eco-dqn | eco-dqn-master/src/networks/mpnn.py | import torch
import torch.nn as nn
import torch.nn.functional as F
class MPNN(nn.Module):
def __init__(self,
n_obs_in=7,
n_layers=3,
n_features=64,
tied_weights=False,
n_hid_readout=[],):
super().__init__()
self.n_obs_in = n_obs_in
self.n_layers = n_layers
self.n_features = n_features
self.tied_weights = tied_weights
self.node_init_embedding_layer = nn.Sequential(
nn.Linear(n_obs_in, n_features, bias=False),
nn.ReLU()
)
self.edge_embedding_layer = EdgeAndNodeEmbeddingLayer(n_obs_in, n_features)
if self.tied_weights:
self.update_node_embedding_layer = UpdateNodeEmbeddingLayer(n_features)
else:
self.update_node_embedding_layer = nn.ModuleList([UpdateNodeEmbeddingLayer(n_features) for _ in range(self.n_layers)])
self.readout_layer = ReadoutLayer(n_features, n_hid_readout)
@torch.no_grad()
def get_normalisation(self, adj):
norm = torch.sum((adj != 0), dim=1).unsqueeze(-1)
norm[norm == 0] = 1
return norm.float()
def forward(self, obs):
if obs.dim() == 2:
obs = obs.unsqueeze(0)
obs.transpose_(-1, -2)
# Calculate features to be used in the MPNN
node_features = obs[:, :, 0:self.n_obs_in]
# Get graph adj matrix.
adj = obs[:, :, self.n_obs_in:]
# adj_conns = (adj != 0).type(torch.FloatTensor).to(adj.device)
norm = self.get_normalisation(adj)
init_node_embeddings = self.node_init_embedding_layer(node_features)
edge_embeddings = self.edge_embedding_layer(node_features, adj, norm)
# Initialise embeddings.
current_node_embeddings = init_node_embeddings
if self.tied_weights:
for _ in range(self.n_layers):
current_node_embeddings = self.update_node_embedding_layer(current_node_embeddings,
edge_embeddings,
norm,
adj)
else:
for i in range(self.n_layers):
current_node_embeddings = self.update_node_embedding_layer[i](current_node_embeddings,
edge_embeddings,
norm,
adj)
out = self.readout_layer(current_node_embeddings)
out = out.squeeze()
return out
class EdgeAndNodeEmbeddingLayer(nn.Module):
def __init__(self, n_obs_in, n_features):
super().__init__()
self.n_obs_in = n_obs_in
self.n_features = n_features
self.edge_embedding_NN = nn.Linear(int(n_obs_in+1), n_features-1, bias=False)
self.edge_feature_NN = nn.Linear(n_features, n_features, bias=False)
def forward(self, node_features, adj, norm):
edge_features = torch.cat([adj.unsqueeze(-1),
node_features.unsqueeze(-2).transpose(-2, -3).repeat(1, adj.shape[-2], 1, 1)],
dim=-1)
edge_features *= (adj.unsqueeze(-1)!=0).float()
edge_features_unrolled = torch.reshape(edge_features, (edge_features.shape[0], edge_features.shape[1] * edge_features.shape[1], edge_features.shape[-1]))
embedded_edges_unrolled = F.relu(self.edge_embedding_NN(edge_features_unrolled))
embedded_edges_rolled = torch.reshape(embedded_edges_unrolled,
(adj.shape[0], adj.shape[1], adj.shape[1], self.n_features-1))
embedded_edges = embedded_edges_rolled.sum(dim=2) / norm
edge_embeddings = F.relu(self.edge_feature_NN(torch.cat([embedded_edges, norm / norm.max()],dim=-1)))
return edge_embeddings
class UpdateNodeEmbeddingLayer(nn.Module):
def __init__(self, n_features):
super().__init__()
self.message_layer = nn.Linear(2*n_features, n_features, bias=False)
self.update_layer = nn.Linear(2*n_features, n_features, bias=False)
def forward(self, current_node_embeddings, edge_embeddings, norm, adj):
node_embeddings_aggregated = torch.matmul(adj, current_node_embeddings) / norm
message = F.relu(self.message_layer(torch.cat([node_embeddings_aggregated, edge_embeddings], dim=-1)))
new_node_embeddings = F.relu(self.update_layer(torch.cat([current_node_embeddings, message], dim=-1)))
return new_node_embeddings
class ReadoutLayer(nn.Module):
def __init__(self, n_features, n_hid=[], bias_pool=False, bias_readout=True):
super().__init__()
self.layer_pooled = nn.Linear(int(n_features), int(n_features), bias=bias_pool)
if type(n_hid)!=list:
n_hid = [n_hid]
n_hid = [2*n_features] + n_hid + [1]
self.layers_readout = []
for n_in, n_out in list(zip(n_hid, n_hid[1:])):
layer = nn.Linear(n_in, n_out, bias=bias_readout)
self.layers_readout.append(layer)
self.layers_readout = nn.ModuleList(self.layers_readout)
def forward(self, node_embeddings):
f_local = node_embeddings
h_pooled = self.layer_pooled(node_embeddings.sum(dim=1) / node_embeddings.shape[1])
f_pooled = h_pooled.repeat(1, 1, node_embeddings.shape[1]).view(node_embeddings.shape)
features = F.relu( torch.cat([f_pooled, f_local], dim=-1) )
for i, layer in enumerate(self.layers_readout):
features = layer(features)
if i<len(self.layers_readout)-1:
features = F.relu(features)
else:
out = features
return out | 5,994 | 36.704403 | 161 | py |
eco-dqn | eco-dqn-master/src/envs/spinsystem.py | from abc import ABC, abstractmethod
from collections import namedtuple
from operator import matmul
import numpy as np
import torch.multiprocessing as mp
from numba import jit, float64, int64
from src.envs.utils import (EdgeType,
RewardSignal,
ExtraAction,
OptimisationTarget,
Observable,
SpinBasis,
DEFAULT_OBSERVABLES,
GraphGenerator,
RandomGraphGenerator,
HistoryBuffer)
# A container for get_result function below. Works just like tuple, but prettier.
ActionResult = namedtuple("action_result", ("snapshot","observation","reward","is_done","info"))
class SpinSystemFactory(object):
'''
Factory class for returning new SpinSystem.
'''
@staticmethod
def get(graph_generator=None,
max_steps=20,
observables = DEFAULT_OBSERVABLES,
reward_signal = RewardSignal.DENSE,
extra_action = ExtraAction.PASS,
optimisation_target = OptimisationTarget.ENERGY,
spin_basis = SpinBasis.SIGNED,
norm_rewards=False,
memory_length=None, # None means an infinite memory.
horizon_length=None, # None means an infinite horizon.
stag_punishment=None, # None means no punishment for re-visiting states.
basin_reward=None, # None means no reward for reaching a local minima.
reversible_spins=True, # Whether the spins can be flipped more than once (i.e. True-->Georgian MDP).
init_snap=None,
seed=None):
if graph_generator.biased:
return SpinSystemBiased(graph_generator,max_steps,
observables,reward_signal,extra_action,optimisation_target,spin_basis,
norm_rewards,memory_length,horizon_length,stag_punishment,basin_reward,
reversible_spins,
init_snap,seed)
else:
return SpinSystemUnbiased(graph_generator,max_steps,
observables,reward_signal,extra_action,optimisation_target,spin_basis,
norm_rewards,memory_length,horizon_length,stag_punishment,basin_reward,
reversible_spins,
init_snap,seed)
class SpinSystemBase(ABC):
'''
SpinSystemBase implements the functionality of a SpinSystem that is common to both
biased and unbiased systems. Methods that require significant enough changes between
these two case to not readily be served by an 'if' statement are left abstract, to be
implemented by a specialised subclass.
'''
# Note these are defined at the class level of SpinSystem to ensure that SpinSystem
# can be pickled.
class action_space():
def __init__(self, n_actions):
self.n = n_actions
self.actions = np.arange(self.n)
def sample(self, n=1):
return np.random.choice(self.actions, n)
class observation_space():
def __init__(self, n_spins, n_observables):
self.shape = [n_spins, n_observables]
def __init__(self,
graph_generator=None,
max_steps=20,
observables=DEFAULT_OBSERVABLES,
reward_signal = RewardSignal.DENSE,
extra_action = ExtraAction.PASS,
optimisation_target=OptimisationTarget.ENERGY,
spin_basis=SpinBasis.SIGNED,
norm_rewards=False,
memory_length=None, # None means an infinite memory.
horizon_length=None, # None means an infinite horizon.
stag_punishment=None,
basin_reward=None,
reversible_spins=False,
init_snap=None,
seed=None):
'''
Init method.
Args:
graph_generator: A GraphGenerator (or subclass thereof) object.
max_steps: Maximum number of steps before termination.
reward_signal: RewardSignal enum determining how and when rewards are returned.
extra_action: ExtraAction enum determining if and what additional action is allowed,
beyond simply flipping spins.
init_snap: Optional snapshot to load spin system into pre-configured state for MCTS.
seed: Optional random seed.
'''
if seed != None:
np.random.seed(seed)
# Ensure first observable is the spin state.
# This allows us to access the spins as self.state[0,:self.n_spins.]
assert observables[0] == Observable.SPIN_STATE, "First observable must be Observation.SPIN_STATE."
self.observables = list(enumerate(observables))
self.extra_action = extra_action
if graph_generator!=None:
assert isinstance(graph_generator,GraphGenerator), "graph_generator must be a GraphGenerator implementation."
self.gg = graph_generator
else:
# provide a default graph generator if one is not passed
self.gg = RandomGraphGenerator(n_spins=20,
edge_type=EdgeType.DISCRETE,
biased=False,
extra_action=(extra_action!=extra_action.NONE))
self.n_spins = self.gg.n_spins # Total number of spins in episode
self.max_steps = max_steps # Number of actions before reset
self.reward_signal = reward_signal
self.norm_rewards = norm_rewards
self.n_actions = self.n_spins
if extra_action != ExtraAction.NONE:
self.n_actions+=1
self.action_space = self.action_space(self.n_actions)
self.observation_space = self.observation_space(self.n_spins, len(self.observables))
self.current_step = 0
if self.gg.biased:
self.matrix, self.bias = self.gg.get()
else:
self.matrix = self.gg.get()
self.bias = None
self.optimisation_target = optimisation_target
self.spin_basis = spin_basis
self.memory_length = memory_length
self.horizon_length = horizon_length if horizon_length is not None else self.max_steps
self.stag_punishment = stag_punishment
self.basin_reward = basin_reward
self.reversible_spins = reversible_spins
self.reset()
self.score = self.calculate_score()
if self.reward_signal == RewardSignal.SINGLE:
self.init_score = self.score
self.best_score = self.score
self.best_spins = self.state[0,:]
if init_snap != None:
self.load_snapshot(init_snap)
def reset(self, spins=None):
"""
Explanation here
"""
self.current_step = 0
if self.gg.biased:
# self.matrix, self.bias = self.gg.get(with_padding=(self.extra_action != ExtraAction.NONE))
self.matrix, self.bias = self.gg.get()
else:
# self.matrix = self.gg.get(with_padding=(self.extra_action != ExtraAction.NONE))
self.matrix = self.gg.get()
self._reset_graph_observables()
spinsOne = np.array([1] * self.n_spins)
local_rewards_available = self.get_immeditate_rewards_avaialable(spinsOne)
local_rewards_available = local_rewards_available[np.nonzero(local_rewards_available)]
if local_rewards_available.size == 0:
# We've generated an empty graph, this is pointless, try again.
self.reset()
else:
self.max_local_reward_available = np.max(local_rewards_available)
self.state = self._reset_state(spins)
self.score = self.calculate_score()
if self.reward_signal == RewardSignal.SINGLE:
self.init_score = self.score
self.best_score = self.score
self.best_obs_score = self.score
self.best_spins = self.state[0, :self.n_spins].copy()
self.best_obs_spins = self.state[0, :self.n_spins].copy()
if self.memory_length is not None:
self.score_memory = np.array([self.best_score] * self.memory_length)
self.spins_memory = np.array([self.best_spins] * self.memory_length)
self.idx_memory = 1
self._reset_graph_observables()
if self.stag_punishment is not None or self.basin_reward is not None:
self.history_buffer = HistoryBuffer()
return self.get_observation()
def _reset_graph_observables(self):
# Reset observed adjacency matrix
if self.extra_action != self.extra_action.NONE:
# Pad adjacency matrix for disconnected extra-action spins of value 0.
self.matrix_obs = np.zeros((self.matrix.shape[0] + 1, self.matrix.shape[0] + 1))
self.matrix_obs [:-1, :-1] = self.matrix
else:
self.matrix_obs = self.matrix
# Reset observed bias vector,
if self.gg.biased:
if self.extra_action != self.extra_action.NONE:
# Pad bias for disconnected extra-action spins of value 0.
self.bias_obs = np.concatenate((self.bias, [0]))
else:
self.bias_obs = self.bias
def _reset_state(self, spins=None):
state = np.zeros((self.observation_space.shape[1], self.n_actions))
if spins is None:
if self.reversible_spins:
# For reversible spins, initialise randomly to {+1,-1}.
state[0, :self.n_spins] = 2 * np.random.randint(2, size=self.n_spins) - 1
else:
# For irreversible spins, initialise all to +1 (i.e. allowed to be flipped).
state[0, :self.n_spins] = 1
else:
state[0, :] = self._format_spins_to_signed(spins)
state = state.astype('float')
# If any observables other than "immediate energy available" require setting to values other than
# 0 at this stage, we should use a 'for k,v in enumerate(self.observables)' loop.
for idx, obs in self.observables:
if obs==Observable.IMMEDIATE_REWARD_AVAILABLE:
state[idx, :self.n_spins] = self.get_immeditate_rewards_avaialable(spins=state[0, :self.n_spins]) / self.max_local_reward_available
elif obs==Observable.NUMBER_OF_GREEDY_ACTIONS_AVAILABLE:
immeditate_rewards_avaialable = self.get_immeditate_rewards_avaialable(spins=state[0, :self.n_spins])
state[idx, :self.n_spins] = 1 - np.sum(immeditate_rewards_avaialable <= 0) / self.n_spins
return state
def _get_spins(self, basis=SpinBasis.SIGNED):
spins = self.state[0, :self.n_spins]
if basis == SpinBasis.SIGNED:
pass
elif basis == SpinSystemBiased:
# convert {1,-1} --> {0,1}
spins[0, :] = (1 - spins[0, :]) / 2
else:
raise NotImplementedError("Unrecognised SpinBasis")
return spins
def calculate_best_energy(self):
if self.n_spins <= 10:
# Generally, for small systems the time taken to start multiple processes is not worth it.
res = self.calculate_best_brute()
else:
# Start up processing pool
n_cpu = int(mp.cpu_count()) / 2
pool = mp.Pool(mp.cpu_count())
# Split up state trials across the number of cpus
iMax = 2 ** (self.n_spins)
args = np.round(np.linspace(0, np.ceil(iMax / n_cpu) * n_cpu, n_cpu + 1))
arg_pairs = [list(args) for args in zip(args, args[1:])]
# Try all the states.
# res = pool.starmap(self._calc_over_range, arg_pairs)
try:
res = pool.starmap(self._calc_over_range, arg_pairs)
# Return the best solution,
idx_best = np.argmin([e for e, s in res])
res = res[idx_best]
except Exception as e:
# Falling back to single-thread implementation.
# res = self.calculate_best_brute()
res = self._calc_over_range(0, 2 ** (self.n_spins))
finally:
# No matter what happens, make sure we tidy up after outselves.
pool.close()
if self.spin_basis == SpinBasis.BINARY:
# convert {1,-1} --> {0,1}
best_score, best_spins = res
best_spins = (1 - best_spins) / 2
res = best_score, best_spins
if self.optimisation_target == OptimisationTarget.CUT:
best_energy, best_spins = res
best_cut = self.calculate_cut(best_spins)
res = best_cut, best_spins
elif self.optimisation_target == OptimisationTarget.ENERGY:
pass
else:
raise NotImplementedError()
return res
def seed(self, seed):
return self.seed
def set_seed(self, seed):
self.seed = seed
np.random.seed(seed)
def step(self, action):
done = False
rew = 0 # Default reward to zero.
randomised_spins = False
self.current_step += 1
if self.current_step > self.max_steps:
print("The environment has already returned done. Stop it!")
raise NotImplementedError
new_state = np.copy(self.state)
############################################################
# 1. Performs the action and calculates the score change. #
############################################################
if action==self.n_spins:
if self.extra_action == ExtraAction.PASS:
delta_score = 0
if self.extra_action == ExtraAction.RANDOMISE:
# Randomise the spin configuration.
randomised_spins = True
random_actions = np.random.choice([1, -1], self.n_spins)
new_state[0, :] = self.state[0, :] * random_actions
new_score = self.calculate_score(new_state[0, :])
delta_score = new_score - self.score
self.score = new_score
else:
# Perform the action and calculate the score change.
new_state[0,action] = -self.state[0,action]
if self.gg.biased:
delta_score = self._calculate_score_change(new_state[0,:self.n_spins], self.matrix, self.bias, action)
else:
delta_score = self._calculate_score_change(new_state[0,:self.n_spins], self.matrix, action)
self.score += delta_score
#############################################################################################
# 2. Calculate reward for action and update anymemory buffers. #
# a) Calculate reward (always w.r.t best observable score). #
# b) If new global best has been found: update best ever score and spin parameters. #
# c) If the memory buffer is finite (i.e. self.memory_length is not None): #
# - Add score/spins to their respective buffers. #
# - Update best observable score and spins w.r.t. the new buffers. #
# else (if the memory is infinite): #
# - If new best has been found: update best observable score and spin parameters. # #
#############################################################################################
self.state = new_state
immeditate_rewards_avaialable = self.get_immeditate_rewards_avaialable()
if self.score > self.best_obs_score:
if self.reward_signal == RewardSignal.BLS:
rew = self.score - self.best_obs_score
elif self.reward_signal == RewardSignal.CUSTOM_BLS:
rew = self.score - self.best_obs_score
rew = rew / (rew + 0.1)
if self.reward_signal == RewardSignal.DENSE:
rew = delta_score
elif self.reward_signal == RewardSignal.SINGLE and done:
rew = self.score - self.init_score
if self.norm_rewards:
rew /= self.n_spins
if self.stag_punishment is not None or self.basin_reward is not None:
visiting_new_state = self.history_buffer.update(action)
if self.stag_punishment is not None:
if not visiting_new_state:
rew -= self.stag_punishment
if self.basin_reward is not None:
if np.all(immeditate_rewards_avaialable <= 0):
# All immediate score changes are +ive <--> we are in a local minima.
if visiting_new_state:
# #####TEMP####
# if self.reward_signal != RewardSignal.BLS or (self.score > self.best_obs_score):
# ####TEMP####
rew += self.basin_reward
if self.score > self.best_score:
self.best_score = self.score
self.best_spins = self.state[0, :self.n_spins].copy()
if self.memory_length is not None:
# For case of finite memory length.
self.score_memory[self.idx_memory] = self.score
self.spins_memory[self.idx_memory] = self.state[0, :self.n_spins]
self.idx_memory = (self.idx_memory + 1) % self.memory_length
self.best_obs_score = self.score_memory.max()
self.best_obs_spins = self.spins_memory[self.score_memory.argmax()].copy()
else:
self.best_obs_score = self.best_score
self.best_obs_spins = self.best_spins.copy()
#############################################################################################
# 3. Updates the state of the system (except self.state[0,:] as this is always the spin #
# configuration and has already been done. #
# a) Update self.state local features to reflect the chosen action. # #
# b) Update global features in self.state (always w.r.t. best observable score/spins) #
#############################################################################################
for idx, observable in self.observables:
### Local observables ###
if observable==Observable.IMMEDIATE_REWARD_AVAILABLE:
self.state[idx, :self.n_spins] = immeditate_rewards_avaialable / self.max_local_reward_available
elif observable==Observable.TIME_SINCE_FLIP:
self.state[idx, :] += (1. / self.max_steps)
if randomised_spins:
self.state[idx, :] = self.state[idx, :] * (random_actions > 0)
else:
self.state[idx, action] = 0
### Global observables ###
elif observable==Observable.EPISODE_TIME:
self.state[idx, :] += (1. / self.max_steps)
elif observable==Observable.TERMINATION_IMMANENCY:
# Update 'Immanency of episode termination'
self.state[idx, :] = max(0, ((self.current_step - self.max_steps) / self.horizon_length) + 1)
elif observable==Observable.NUMBER_OF_GREEDY_ACTIONS_AVAILABLE:
self.state[idx, :] = 1 - np.sum(immeditate_rewards_avaialable <= 0) / self.n_spins
elif observable==Observable.DISTANCE_FROM_BEST_SCORE:
self.state[idx, :] = np.abs(self.score - self.best_obs_score) / self.max_local_reward_available
elif observable==Observable.DISTANCE_FROM_BEST_STATE:
self.state[idx, :self.n_spins] = np.count_nonzero(self.best_obs_spins[:self.n_spins] - self.state[0, :self.n_spins])
#############################################################################################
# 4. Check termination criteria. #
#############################################################################################
if self.current_step == self.max_steps:
# Maximum number of steps taken --> done.
# print("Done : maximum number of steps taken")
done = True
if not self.reversible_spins:
if len((self.state[0, :self.n_spins] > 0).nonzero()[0]) == 0:
# If no more spins to flip --> done.
# print("Done : no more spins to flip")
done = True
return (self.get_observation(), rew, done, None)
def get_observation(self):
state = self.state.copy()
if self.spin_basis == SpinBasis.BINARY:
# convert {1,-1} --> {0,1}
state[0,:] = (1-state[0,:])/2
if self.gg.biased:
return np.vstack((state, self.matrix_obs, self.bias_obs))
else:
return np.vstack((state, self.matrix_obs))
def get_immeditate_rewards_avaialable(self, spins=None):
if spins is None:
spins = self._get_spins()
if self.optimisation_target==OptimisationTarget.ENERGY:
immediate_reward_function = lambda *args: -1*self._get_immeditate_energies_avaialable_jit(*args)
elif self.optimisation_target==OptimisationTarget.CUT:
immediate_reward_function = self._get_immeditate_cuts_avaialable_jit
else:
raise NotImplementedError("Optimisation target {} not recognised.".format(self.optimisation_ta))
spins = spins.astype('float64')
matrix = self.matrix.astype('float64')
if self.gg.biased:
bias = self.bias.astype('float64')
return immediate_reward_function(spins,matrix,bias)
else:
return immediate_reward_function(spins,matrix)
def get_allowed_action_states(self):
if self.reversible_spins:
# If MDP is reversible, both actions are allowed.
if self.spin_basis == SpinBasis.BINARY:
return (0,1)
elif self.spin_basis == SpinBasis.SIGNED:
return (1,-1)
else:
# If MDP is irreversible, only return the state of spins that haven't been flipped.
if self.spin_basis==SpinBasis.BINARY:
return 0
if self.spin_basis==SpinBasis.SIGNED:
return 1
def calculate_score(self, spins=None):
if self.optimisation_target==OptimisationTarget.CUT:
score = self.calculate_cut(spins)
elif self.optimisation_target==OptimisationTarget.ENERGY:
score = -1.*self.calculate_energy(spins)
else:
raise NotImplementedError
return score
def _calculate_score_change(self, new_spins, matrix, action):
if self.optimisation_target==OptimisationTarget.CUT:
delta_score = self._calculate_cut_change(new_spins, matrix, action)
elif self.optimisation_target == OptimisationTarget.ENERGY:
delta_score = -1. * self._calculate_energy_change(new_spins, matrix, action)
else:
raise NotImplementedError
return delta_score
def _format_spins_to_signed(self, spins):
if self.spin_basis == SpinBasis.BINARY:
if not np.isin(spins, [0, 1]).all():
raise Exception("SpinSystem is configured for binary spins ([0,1]).")
# Convert to signed spins for calculation.
spins = 2 * spins - 1
elif self.spin_basis == SpinBasis.SIGNED:
if not np.isin(spins, [-1, 1]).all():
raise Exception("SpinSystem is configured for signed spins ([-1,1]).")
return spins
@abstractmethod
def calculate_energy(self, spins=None):
raise NotImplementedError
@abstractmethod
def calculate_cut(self, spins=None):
raise NotImplementedError
@abstractmethod
def get_best_cut(self):
raise NotImplementedError
@abstractmethod
def _calc_over_range(self, i0, iMax):
raise NotImplementedError
@abstractmethod
def _calculate_energy_change(self, new_spins, matrix, action):
raise NotImplementedError
@abstractmethod
def _calculate_cut_change(self, new_spins, matrix, action):
raise NotImplementedError
##########
# Classes for implementing the calculation methods with/without biases.
##########
class SpinSystemUnbiased(SpinSystemBase):
def calculate_energy(self, spins=None):
if spins is None:
spins = self._get_spins()
else:
spins = self._format_spins_to_signed(spins)
spins = spins.astype('float64')
matrix = self.matrix.astype('float64')
return self._calculate_energy_jit(spins, matrix)
def calculate_cut(self, spins=None):
if spins is None:
spins = self._get_spins()
else:
spins = self._format_spins_to_signed(spins)
return (1/4) * np.sum( np.multiply( self.matrix, 1 - np.outer(spins, spins) ) )
def get_best_cut(self):
if self.optimisation_target==OptimisationTarget.CUT:
return self.best_score
else:
raise NotImplementedError("Can't return best cut when optimisation target is set to energy.")
def _calc_over_range(self, i0, iMax):
list_spins = [2 * np.array([int(x) for x in list_string]) - 1
for list_string in
[list(np.binary_repr(i, width=self.n_spins))
for i in range(int(i0), int(iMax))]]
matrix = self.matrix.astype('float64')
return self.__calc_over_range_jit(list_spins, matrix)
@staticmethod
@jit(float64(float64[:],float64[:,:],int64), nopython=True)
def _calculate_energy_change(new_spins, matrix, action):
return -2 * new_spins[action] * matmul(new_spins.T, matrix[:, action])
@staticmethod
@jit(float64(float64[:],float64[:,:],int64), nopython=True)
def _calculate_cut_change(new_spins, matrix, action):
return -1 * new_spins[action] * matmul(new_spins.T, matrix[:, action])
@staticmethod
@jit(float64(float64[:],float64[:,:]), nopython=True)
def _calculate_energy_jit(spins, matrix):
return - matmul(spins.T, matmul(matrix, spins)) / 2
@staticmethod
@jit(parallel=True)
def __calc_over_range_jit(list_spins, matrix):
energy = 1e50
best_spins = None
for spins in list_spins:
spins = spins.astype('float64')
# This is self._calculate_energy_jit without calling to the class or self so jit can do its thing.
current_energy = - matmul(spins.T, matmul(matrix, spins)) / 2
if current_energy < energy:
energy = current_energy
best_spins = spins
return energy, best_spins
@staticmethod
@jit(float64[:](float64[:],float64[:,:]), nopython=True)
def _get_immeditate_energies_avaialable_jit(spins, matrix):
return 2 * spins * matmul(matrix, spins)
@staticmethod
@jit(float64[:](float64[:],float64[:,:]), nopython=True)
def _get_immeditate_cuts_avaialable_jit(spins, matrix):
return spins * matmul(matrix, spins)
class SpinSystemBiased(SpinSystemBase):
def calculate_energy(self, spins=None):
if type(spins) == type(None):
spins = self._get_spins()
spins = spins.astype('float64')
matrix = self.matrix.astype('float64')
bias = self.bias.astype('float64')
return self._calculate_energy_jit(spins, matrix, bias)
def calculate_cut(self, spins=None):
raise NotImplementedError("MaxCut not defined/implemented for biased SpinSystems.")
def get_best_cut(self):
raise NotImplementedError("MaxCut not defined/implemented for biased SpinSystems.")
def _calc_over_range(self, i0, iMax):
list_spins = [2 * np.array([int(x) for x in list_string]) - 1
for list_string in
[list(np.binary_repr(i, width=self.n_spins))
for i in range(int(i0), int(iMax))]]
matrix = self.matrix.astype('float64')
bias = self.bias.astype('float64')
return self.__calc_over_range_jit(list_spins, matrix, bias)
@staticmethod
@jit(nopython=True)
def _calculate_energy_change(new_spins, matrix, bias, action):
return 2 * new_spins[action] * (matmul(new_spins.T, matrix[:, action]) + bias[action])
@staticmethod
@jit(nopython=True)
def _calculate_cut_change(new_spins, matrix, bias, action):
raise NotImplementedError("MaxCut not defined/implemented for biased SpinSystems.")
@staticmethod
@jit(nopython=True)
def _calculate_energy_jit(spins, matrix, bias):
return matmul(spins.T, matmul(matrix, spins))/2 + matmul(spins.T, bias)
@staticmethod
@jit(parallel=True)
def __calc_over_range_jit(list_spins, matrix, bias):
energy = 1e50
best_spins = None
for spins in list_spins:
spins = spins.astype('float64')
# This is self._calculate_energy_jit without calling to the class or self so jit can do its thing.
current_energy = -( matmul(spins.T, matmul(matrix, spins))/2 + matmul(spins.T, bias))
if current_energy < energy:
energy = current_energy
best_spins = spins
return energy, best_spins
@staticmethod
@jit(nopython=True)
def _get_immeditate_energies_avaialable_jit(spins, matrix, bias):
return - (2 * spins * (matmul(matrix, spins) + bias))
@staticmethod
@jit(nopython=True)
def _get_immeditate_cuts_avaialable_jit(spins, matrix, bias):
raise NotImplementedError("MaxCut not defined/implemented for biased SpinSystems.") | 30,398 | 41.279555 | 173 | py |
eco-dqn | eco-dqn-master/src/agents/solver.py | from abc import ABC, abstractmethod
import numpy as np
import torch
class SpinSolver(ABC):
"""Abstract base class for agents solving SpinSystem Ising problems."""
def __init__(self, env, record_cut=False, record_rewards=False, record_qs=False, verbose=False):
"""Base initialisation of a SpinSolver.
Args:
env (SpinSystem): The environment (an instance of SpinSystem) with
which the agent interacts.
verbose (bool, optional): The logging verbosity.
Attributes:
env (SpinSystem): The environment (an instance of SpinSystem) with
which the agent interacts.
verbose (bool): The logging verbosity.
total_reward (float): The cumulative total reward received.
"""
self.env = env
self.verbose = verbose
self.record_cut = record_cut
self.record_rewards = record_rewards
self.record_qs = record_qs
self.total_reward = 0
def reset(self):
self.total_reward = 0
self.env.reset()
def solve(self, *args):
"""Solve the SpinSystem by flipping individual spins until termination.
Args:
*args: The arguments passed through to the 'step' method to take the
next action. The implementation of 'step' depedens on the
solver instance used.
Returns:
(float): The cumulative total reward received.
"""
done = False
while not done:
reward, done = self.step(*args)
self.total_reward += reward
return self.total_reward
@abstractmethod
def step(self, *args):
"""Take the next step (flip the next spin).
The implementation of 'step' depedens on the
solver instance used.
Args:
*args: The arguments passed through to the 'step' method to take the
next action. The implementation of 'step' depedens on the
solver instance used.
Raises:
NotImplementedError: Every subclass of SpinSolver must implement the
step method.
"""
raise NotImplementedError()
class Greedy(SpinSolver):
"""A greedy solver for a SpinSystem."""
def __init__(self, *args, **kwargs):
"""Initialise a greedy solver.
Args:
*args: Passed through to the SpinSolver constructor.
Attributes:
trial_env (SpinSystemMCTS): The environment with in the agent tests
actions (a clone of self.env where the final actions are taken).
current_snap: The current state of the environment.
"""
super().__init__(*args, **kwargs)
def step(self):
"""Take the action which maximises the immediate reward.
Returns:
reward (float): The reward recieved.
done (bool): Whether the environment is in a terminal state after
the action is taken.
"""
rewards_avaialable = self.env.get_immeditate_rewards_avaialable()
if self.env.reversible_spins:
action = rewards_avaialable.argmax()
else:
masked_rewards_avaialable = rewards_avaialable.copy()
np.putmask(masked_rewards_avaialable,
self.env.get_observation()[0, :] != self.env.get_allowed_action_states(),
-100)
action = masked_rewards_avaialable.argmax()
if rewards_avaialable[action] < 0:
action = None
reward = 0
done = True
else:
observation, reward, done, _ = self.env.step(action)
return reward, done
class Random(SpinSolver):
"""A random solver for a SpinSystem."""
def step(self):
"""Take a random action.
Returns:
reward (float): The reward recieved.
done (bool): Whether the environment is in a terminal state after
the action is taken.
"""
observation, reward, done, _ = self.env.step(self.env.action_space.sample())
return reward, done
class Network(SpinSolver):
"""A network-only solver for a SpinSystem."""
epsilon = 0.
def __init__(self, network, *args, **kwargs):
"""Initialise a network-only solver.
Args:
network: The network.
*args: Passed through to the SpinSolver constructor.
Attributes:
current_snap: The last observation of the environment, used to choose the next action.
"""
super().__init__(*args, **kwargs)
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.network = network.to(self.device)
self.network.eval()
self.current_observation = self.env.get_observation()
self.current_observation = torch.FloatTensor(self.current_observation).to(self.device)
self.history = []
def reset(self, spins=None, clear_history=True):
if spins is None:
self.current_observation = self.env.reset()
else:
self.current_observation = self.env.reset(spins)
self.current_observation = torch.FloatTensor(self.current_observation).to(self.device)
self.total_reward = 0
if clear_history:
self.history = []
@torch.no_grad()
def step(self):
# Q-values predicted by the network.
qs = self.network(self.current_observation)
if self.env.reversible_spins:
if np.random.uniform(0, 1) >= self.epsilon:
# Action that maximises Q function
action = qs.argmax().item()
else:
# Random action
action = np.random.randint(0, self.env.action_space.n)
else:
x = (self.current_observation[0, :] == self.env.get_allowed_action_states()).nonzero()
if np.random.uniform(0, 1) >= self.epsilon:
action = x[qs[x].argmax().item()].item()
# Allowed action that maximises Q function
else:
# Random allowed action
action = x[np.random.randint(0, len(x))].item()
if action is not None:
observation, reward, done, _ = self.env.step(action)
self.current_observation = torch.FloatTensor(observation).to(self.device)
else:
reward = 0
done = True
if not self.record_cut and not self.record_rewards:
record = [action]
else:
record = [action]
if self.record_cut:
record += [self.env.calculate_cut()]
if self.record_rewards:
record += [reward]
if self.record_qs:
record += [qs]
record += [self.env.get_immeditate_rewards_avaialable()]
self.history.append(record)
return reward, done
| 6,973 | 31.138249 | 100 | py |
eco-dqn | eco-dqn-master/src/agents/dqn/utils.py | import math
import pickle
import random
import threading
from collections import namedtuple
from enum import Enum
import numpy as np
import torch
Transition = namedtuple(
'Transition', ('state', 'action', 'reward', 'state_next', 'done')
)
class TestMetric(Enum):
CUMULATIVE_REWARD = 1
BEST_ENERGY = 2
ENERGY_ERROR = 3
MAX_CUT = 4
FINAL_CUT = 5
def set_global_seed(seed, env):
torch.manual_seed(seed)
env.set_seed(seed)
np.random.seed(seed)
random.seed(seed)
class ReplayBuffer:
def __init__(self, capacity):
self._capacity = capacity
self._memory = {}
self._position = 0
self.next_batch_process=None
self.next_batch_size=None
self.next_batch_device=None
self.next_batch = None
def add(self, *args):
"""
Saves a transition.
"""
if self.next_batch_process is not None:
# Don't add to the buffer when sampling from it.
self.next_batch_process.join()
self._memory[self._position] = Transition(*args)
self._position = (self._position + 1) % self._capacity
def _prepare_sample(self, batch_size, device=None):
self.next_batch_size = batch_size
self.next_batch_device = device
batch = random.sample(list(self._memory.values()), batch_size)
self.next_batch = [torch.stack(tensors).to(device) for tensors in zip(*batch)]
self.next_batch_ready = True
def launch_sample(self, *args):
self.next_batch_process = threading.Thread(target=self._prepare_sample, args=args)
self.next_batch_process.start()
def sample(self, batch_size, device=None):
"""
Samples a batch of Transitions, with the tensors already stacked
and transfered to the specified device.
Return a list of tensors in the order specified in Transition.
"""
if self.next_batch_process is not None:
self.next_batch_process.join()
else:
self.launch_sample(batch_size, device)
self.sample(batch_size, device)
if self.next_batch_size==batch_size and self.next_batch_device==device:
next_batch = self.next_batch
self.launch_sample(batch_size, device)
return next_batch
else:
self.launch_sample(batch_size, device)
self.sample(batch_size, device)
def __len__(self):
return len(self._memory)
class PrioritisedReplayBuffer:
def __init__(self, capacity=10000, alpha=0.7, beta0=0.5):
# The capacity of the replay buffer.
self._capacity = capacity
# A binary (max-)heap of the buffer contents, sorted by the td_error <--> priority.
self.priority_heap = {} # heap_position : [buffer_position, td_error, transition]
# Maps a buffer position (when the transition was added) to the position of the
# transition in the priority_heap.
self.buffer2heap = {} # buffer_position : heap_position
# The current position in the replay buffer. Starts at 1 for ease of binary-heap calcs.
self._buffer_position = 1
# Flag for when the replay buffer reaches max capicity.
self.full = False
self.alpha = alpha
self.beta = beta0
self.beta_step = 0
self.partitions = []
self.probabilities = []
self.__partitions_fixed = False
def __get_max_td_err(self):
try:
return self.priority_heap[0][1]
except KeyError:
# Nothing exists in the priority heap yet!
return 1
def add(self, *args):
"""
Add the transition described by *args : (state, action, reward, state_next, done), to the
memory.
"""
# By default a new transition has equal highest priority in the heap.
trans = [self._buffer_position, self.__get_max_td_err(), Transition(*args)]
try:
# Find the heap position of the transition to be replaced
heap_pos = self.buffer2heap[self._buffer_position]
self.full = True # We found a transition in this buffer slot --> the memory is at capacity.
except KeyError:
# No transition in the buffer slot, therefore we will be adding one fresh.
heap_pos = self._buffer_position
# Update the heap, associated data stuctures and re-sort.
self.__update_heap(heap_pos, trans)
self.up_heap(heap_pos)
if self.full:
self.down_heap(heap_pos)
# Iterate to the next buffer position.
self._buffer_position = (self._buffer_position % self._capacity) + 1
def __update_heap(self, heap_pos, val):
"""
heapList[heap_pos] <-- val = [buffer_position, td_error, transition]
"""
self.priority_heap[heap_pos] = val
self.buffer2heap[val[0]] = heap_pos
def up_heap(self, i):
"""
Iteratively swap heap items with their parents until they are in the correct order.
"""
if i >= 2:
i_parent = i // 2
if self.priority_heap[i_parent][1] < self.priority_heap[i][1]:
tmp = self.priority_heap[i]
self.__update_heap(i, self.priority_heap[i_parent])
self.__update_heap(i_parent, tmp)
self.up_heap(i_parent)
def down_heap(self, i):
"""
Iteratively swap heap items with their children until they are in the correct order.
"""
i_largest = i
left = 2 * i
right = 2 * i + 1
size = self._capacity if self.full else len(self)
if left < size and self.priority_heap[left][1] > self.priority_heap[i_largest][1]:
i_largest = left
if right < size and self.priority_heap[right][1] > self.priority_heap[i_largest][1]:
i_largest = right
if i_largest != i:
tmp = self.priority_heap[i]
self.__update_heap(i, self.priority_heap[i_largest])
self.__update_heap(i_largest, tmp)
self.down_heap(i_largest)
def rebalance(self):
"""
rebalance priority_heap
"""
sort_array = sorted(self.priority_heap.values(), key=lambda x: x[1], reverse=True)
# reconstruct priority_queue
self.priority_heap.clear()
self.buffer2heap.clear()
count = 1
while count <= self._capacity:
self.__update_heap(count, sort_array[count - 1])
count += 1
# sort the heap
for i in range(self._capacity // 2, 1, -1):
self.down_heap(i)
def update_partitions(self, num_partitions):
# P(t_i) = p_i^alpha / Sum_k(p_k^alpha), where the priority p_i = 1 / rank_i.
priorities = [math.pow(rank, -self.alpha) for rank in range(1, len(self.priority_heap) + 1)]
priorities_sum = sum(priorities)
probabilities = dict(
[(rank0index + 1, priority / priorities_sum) for rank0index, priority in enumerate(priorities)])
partitions = [1]
partition_num = 1
cum_probabilty = 0
next_boundary = partition_num / num_partitions
rank = 1
while partition_num < num_partitions:
cum_probabilty += probabilities[rank]
rank += 1
if cum_probabilty >= next_boundary:
partitions.append(rank)
partition_num += 1
next_boundary = partition_num / num_partitions
partitions.append(len(self.priority_heap))
partitions = [(a, b) for a, b in zip(partitions, partitions[1:])]
return partitions, probabilities
def update_priorities(self, buffer_positions, td_error):
for buf_id, td_err in zip(buffer_positions, td_error):
heap_id = self.buffer2heap[buf_id]
[id, _, trans] = self.priority_heap[heap_id]
self.priority_heap[heap_id] = [id, td_err, trans]
self.down_heap(heap_id)
self.up_heap(heap_id)
def sample(self, batch_size, device=None):
# print("\nStarting sample():...")
# t = time.time()
if batch_size != len(self.partitions) or not self.__partitions_fixed:
# t1 = time.time()
self.partitions, self.probabilities = self.update_partitions(batch_size)
if self.full:
# Once the buffer is full, the partitions no longer need to be updated
# (as they depend only on the number of stored transitions and alpha).
self.__partitions_fixed = True
# print("\tupdate_partitions in :", time.time()-t1)
self.beta = min(self.beta + self.beta_step, 1)
# t1 = time.time()
batch_ranks = [np.random.randint(low, high) for low, high in self.partitions]
batch_buffer_positions, batch_td_errors, batch_transitions = zip(*[self.priority_heap[rank] for rank in batch_ranks])
batch = [torch.stack(tensors).to(device) for tensors in zip(*batch_transitions)]
# print("\tbatch sampled in :", time.time() - t1)
# t1 = time.time()
N = self._capacity if self.full else len(self)
# Note this is a column vector to match the dimensions of weights and td_target in dqn.train_step(...)
sample_probs = torch.FloatTensor([[self.probabilities[rank]] for rank in batch_ranks])
weights = (N * sample_probs).pow(-self.beta)
weights /= weights.max()
# print("\tweights calculated in :", time.time() - t1)
# print("...finished in :", time.time() - t)
return batch, weights.to(device), batch_buffer_positions
def configure_beta_anneal_time(self, beta_max_at_samples):
self.beta_step = (1 - self.beta) / beta_max_at_samples
def __len__(self):
return len(self.priority_heap)
class Logger:
def __init__(self):
self._memory = {}
self._saves = 0
self._maxsize = 1000000
self._dumps = 0
def add_scalar(self, name, data, timestep):
"""
Saves a scalar
"""
if isinstance(data, torch.Tensor):
data = data.item()
self._memory.setdefault(name, []).append([data, timestep])
self._saves += 1
if self._saves == self._maxsize - 1:
with open('log_data_' + str((self._dumps + 1) * self._maxsize) + '.pkl', 'wb') as output:
pickle.dump(self._memory, output, pickle.HIGHEST_PROTOCOL)
self._dumps += 1
self._saves = 0
self._memory = {}
def save(self):
with open('log_data.pkl', 'wb') as output:
pickle.dump(self._memory, output, pickle.HIGHEST_PROTOCOL)
| 10,733 | 33.850649 | 125 | py |
eco-dqn | eco-dqn-master/src/agents/dqn/dqn.py | """
Implements a DQN learning agent.
"""
import os
import pickle
import random
import time
from copy import deepcopy
import numpy as np
import torch
import torch.nn.functional as F
import torch.optim as optim
from src.agents.dqn.utils import ReplayBuffer, Logger, TestMetric, set_global_seed
from src.envs.utils import ExtraAction
class DQN:
"""
# Required parameters.
envs : List of environments to use.
network : Choice of neural network.
# Initial network parameters.
init_network_params : Pre-trained network to load upon initialisation.
init_weight_std : Standard deviation of initial network weights.
# DQN parameters
double_dqn : Whether to use double DQN (DDQN).
update_target_frequency : How often to update the DDQN target network.
gamma : Discount factor.
clip_Q_targets : Whether negative Q targets are clipped (generally True/False for irreversible/reversible agents).
# Replay buffer.
replay_start_size : The capacity of the replay buffer at which training can begin.
replay_buffer_size : Maximum buffer capacity.
minibatch_size : Minibatch size.
update_frequency : Number of environment steps taken between parameter update steps.
# Learning rate
update_learning_rate : Whether to dynamically update the learning rate (if False, initial_learning_rate is always used).
initial_learning_rate : Initial learning rate.
peak_learning_rate : The maximum learning rate.
peak_learning_rate_step : The timestep (from the start, not from when training starts) at which the peak_learning_rate is found.
final_learning_rate : The final learning rate.
final_learning_rate_step : The timestep of the final learning rate.
# Optional regularization.
max_grad_norm : The norm grad to clip gradients to (None means no clipping).
weight_decay : The weight decay term for regularisation.
# Exploration
update_exploration : Whether to update the exploration rate (False would tend to be used with NoisyNet layers).
initial_exploration_rate : Inital exploration rate.
final_exploration_rate : Final exploration rate.
final_exploration_step : Timestep at which the final exploration rate is reached.
# Loss function
adam_epsilon : epsilon for ADAM optimisation.
loss="mse" : Loss function to use.
# Saving the agent
save_network_frequency : Frequency with which the network parameters are saved.
network_save_path : Folder into which the network parameters are saved.
# Testing the agent
evaluate : Whether to test the agent during training.
test_envs : List of test environments. None means the training environments (envs) are used.
test_episodes : Number of episodes at each test point.
test_frequency : Frequency of tests.
test_save_path : Folder into which the test scores are saved.
test_metric : The metric used to quantify performance.
# Other
logging : Whether to log.
seed : The global seed to set. None means randomly selected.
"""
def __init__(
self,
envs,
network,
# Initial network parameters.
init_network_params = None,
init_weight_std = None,
# DQN parameters
double_dqn = True,
update_target_frequency=10000,
gamma=0.99,
clip_Q_targets=False,
# Replay buffer.
replay_start_size=50000,
replay_buffer_size=1000000,
minibatch_size=32,
update_frequency=1,
# Learning rate
update_learning_rate = True,
initial_learning_rate = 0,
peak_learning_rate = 1e-3,
peak_learning_rate_step = 10000,
final_learning_rate = 5e-5,
final_learning_rate_step = 200000,
# Optional regularization.
max_grad_norm=None,
weight_decay=0,
# Exploration
update_exploration=True,
initial_exploration_rate=1,
final_exploration_rate=0.1,
final_exploration_step=1000000,
# Loss function
adam_epsilon=1e-8,
loss="mse",
# Saving the agent
save_network_frequency=10000,
network_save_path='network',
# Testing the agent
evaluate=True,
test_envs=None,
test_episodes=20,
test_frequency=10000,
test_save_path='test_scores',
test_metric=TestMetric.ENERGY_ERROR,
# Other
logging=True,
seed=None
):
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.double_dqn = double_dqn
self.replay_start_size = replay_start_size
self.replay_buffer_size = replay_buffer_size
self.gamma = gamma
self.clip_Q_targets = clip_Q_targets
self.update_target_frequency = update_target_frequency
self.minibatch_size = minibatch_size
self.update_learning_rate = update_learning_rate
self.initial_learning_rate = initial_learning_rate
self.peak_learning_rate = peak_learning_rate
self.peak_learning_rate_step = peak_learning_rate_step
self.final_learning_rate = final_learning_rate
self.final_learning_rate_step = final_learning_rate_step
self.max_grad_norm = max_grad_norm
self.weight_decay=weight_decay
self.update_frequency = update_frequency
self.update_exploration = update_exploration,
self.initial_exploration_rate = initial_exploration_rate
self.epsilon = self.initial_exploration_rate
self.final_exploration_rate = final_exploration_rate
self.final_exploration_step = final_exploration_step
self.adam_epsilon = adam_epsilon
self.logging = logging
if callable(loss):
self.loss = loss
else:
try:
self.loss = {'huber': F.smooth_l1_loss, 'mse': F.mse_loss}[loss]
except KeyError:
raise ValueError("loss must be 'huber', 'mse' or a callable")
if type(envs)!=list:
envs = [envs]
self.envs = envs
self.env, self.acting_in_reversible_spin_env = self.get_random_env()
self.replay_buffers = {}
for n_spins in set([env.action_space.n for env in self.envs]):
self.replay_buffers[n_spins] = ReplayBuffer(self.replay_buffer_size)
self.replay_buffer = self.get_replay_buffer_for_env(self.env)
self.seed = random.randint(0, 1e6) if seed is None else seed
for env in self.envs:
set_global_seed(self.seed, env)
self.network = network().to(self.device)
self.init_network_params = init_network_params
self.init_weight_std = init_weight_std
if self.init_network_params != None:
print("Pre-loading network parameters from {}.\n".format(init_network_params))
self.load(init_network_params)
else:
if self.init_weight_std != None:
def init_weights(m):
if type(m) == torch.nn.Linear:
print("Setting weights for", m)
m.weight.normal_(0, init_weight_std)
with torch.no_grad():
self.network.apply(init_weights)
self.target_network = network().to(self.device)
self.target_network.load_state_dict(self.network.state_dict())
for param in self.target_network.parameters():
param.requires_grad = False
self.optimizer = optim.Adam(self.network.parameters(), lr=self.initial_learning_rate, eps=self.adam_epsilon,
weight_decay=self.weight_decay)
self.evaluate = evaluate
if test_envs in [None,[None]]:
# By default, test on the same environment(s) as are trained on.
self.test_envs = self.envs
else:
if type(test_envs) != list:
test_envs = [test_envs]
self.test_envs = test_envs
self.test_episodes = int(test_episodes)
self.test_frequency = test_frequency
self.test_save_path = test_save_path
self.test_metric = test_metric
self.losses_save_path = os.path.join(os.path.split(self.test_save_path)[0], "losses.pkl")
if not self.acting_in_reversible_spin_env:
for env in self.envs:
assert env.extra_action==ExtraAction.NONE, "For deterministic MDP, no extra action is allowed."
for env in self.test_envs:
assert env.extra_action==ExtraAction.NONE, "For deterministic MDP, no extra action is allowed."
self.allowed_action_state = self.env.get_allowed_action_states()
self.save_network_frequency = save_network_frequency
self.network_save_path = network_save_path
def get_random_env(self, envs=None):
if envs is None:
env = random.sample(self.envs, k=1)[0]
else:
env = random.sample(envs, k=1)[0]
return env, env.reversible_spins
def get_replay_buffer_for_env(self, env):
return self.replay_buffers[env.action_space.n]
def get_random_replay_buffer(self):
return random.sample(self.replay_buffers.items(), k=1)[0][1]
def learn(self, timesteps, verbose=False):
if self.logging:
logger = Logger()
# Initialise the state
state = torch.as_tensor(self.env.reset())
score = 0
losses_eps = []
t1 = time.time()
test_scores = []
losses = []
is_training_ready = False
for timestep in range(timesteps):
if not is_training_ready:
if all([len(rb)>=self.replay_start_size for rb in self.replay_buffers.values()]):
print('\nAll buffers have {} transitions stored - training is starting!\n'.format(
self.replay_start_size))
is_training_ready=True
# Choose action
action = self.act(state.to(self.device).float(), is_training_ready=is_training_ready)
# Update epsilon
if self.update_exploration:
self.update_epsilon(timestep)
# Update learning rate
if self.update_learning_rate:
self.update_lr(timestep)
# Perform action in environment
state_next, reward, done, _ = self.env.step(action)
score += reward
# Store transition in replay buffer
action = torch.as_tensor([action], dtype=torch.long)
reward = torch.as_tensor([reward], dtype=torch.float)
state_next = torch.as_tensor(state_next)
done = torch.as_tensor([done], dtype=torch.float)
self.replay_buffer.add(state, action, reward, state_next, done)
if done:
# Reinitialise the state
if verbose:
loss_str = "{:.2e}".format(np.mean(losses_eps)) if is_training_ready else "N/A"
print("timestep : {}, episode time: {}, score : {}, mean loss: {}, time : {} s".format(
(timestep+1),
self.env.current_step,
np.round(score,3),
loss_str,
round(time.time() - t1, 3)))
if self.logging:
logger.add_scalar('Episode_score', score, timestep)
self.env, self.acting_in_reversible_spin_env = self.get_random_env()
self.replay_buffer = self.get_replay_buffer_for_env(self.env)
state = torch.as_tensor(self.env.reset())
score = 0
losses_eps = []
t1 = time.time()
else:
state = state_next
if is_training_ready:
# Update the main network
if timestep % self.update_frequency == 0:
# Sample a batch of transitions
transitions = self.get_random_replay_buffer().sample(self.minibatch_size, self.device)
# Train on selected batch
loss = self.train_step(transitions)
losses.append([timestep,loss])
losses_eps.append(loss)
if self.logging:
logger.add_scalar('Loss', loss, timestep)
# Periodically update target network
if timestep % self.update_target_frequency == 0:
self.target_network.load_state_dict(self.network.state_dict())
if (timestep+1) % self.test_frequency == 0 and self.evaluate and is_training_ready:
test_score = self.evaluate_agent()
print('\nTest score: {}\n'.format(np.round(test_score,3)))
if self.test_metric in [TestMetric.FINAL_CUT,TestMetric.MAX_CUT,TestMetric.CUMULATIVE_REWARD]:
best_network = all([test_score > score for t,score in test_scores])
elif self.test_metric in [TestMetric.ENERGY_ERROR, TestMetric.BEST_ENERGY]:
best_network = all([test_score < score for t, score in test_scores])
else:
raise NotImplementedError("{} is not a recognised TestMetric".format(self.test_metric))
if best_network:
path = self.network_save_path
path_main, path_ext = os.path.splitext(path)
path_main += "_best"
if path_ext == '':
path_ext += '.pth'
self.save(path_main + path_ext)
test_scores.append([timestep+1,test_score])
if (timestep + 1) % self.save_network_frequency == 0 and is_training_ready:
path = self.network_save_path
path_main, path_ext = os.path.splitext(path)
path_main += str(timestep+1)
if path_ext == '':
path_ext += '.pth'
self.save(path_main+path_ext)
if self.logging:
logger.save()
path = self.test_save_path
if os.path.splitext(path)[-1] == '':
path += '.pkl'
with open(path, 'wb+') as output:
pickle.dump(np.array(test_scores), output, pickle.HIGHEST_PROTOCOL)
if verbose:
print('test_scores saved to {}'.format(path))
with open(self.losses_save_path, 'wb+') as output:
pickle.dump(np.array(losses), output, pickle.HIGHEST_PROTOCOL)
if verbose:
print('losses saved to {}'.format(self.losses_save_path))
@torch.no_grad()
def __only_bad_actions_allowed(self, state, network):
x = (state[0, :] == self.allowed_action_state).nonzero()
q_next = network(state.to(self.device).float())[x].max()
return True if q_next < 0 else False
def train_step(self, transitions):
states, actions, rewards, states_next, dones = transitions
if self.acting_in_reversible_spin_env:
# Calculate target Q
with torch.no_grad():
if self.double_dqn:
greedy_actions = self.network(states_next.float()).argmax(1, True)
q_value_target = self.target_network(states_next.float()).gather(1, greedy_actions)
else:
q_value_target = self.target_network(states_next.float()).max(1, True)[0]
else:
target_preds = self.target_network(states_next.float())
disallowed_actions_mask = (states_next[:, 0, :] != self.allowed_action_state)
# Calculate target Q, selecting ONLY ALLOWED ACTIONS greedily.
with torch.no_grad():
if self.double_dqn:
network_preds = self.network(states_next.float())
# Set the Q-value of disallowed actions to a large negative number (-10000) so they are not selected.
network_preds_allowed = network_preds.masked_fill(disallowed_actions_mask,-10000)
greedy_actions = network_preds_allowed.argmax(1, True)
q_value_target = target_preds.gather(1, greedy_actions)
else:
q_value_target = target_preds.masked_fill(disallowed_actions_mask,-10000).max(1, True)[0]
if self.clip_Q_targets:
q_value_target[q_value_target < 0] = 0
# Calculate TD target
td_target = rewards + (1 - dones) * self.gamma * q_value_target
# Calculate Q value
q_value = self.network(states.float()).gather(1, actions)
# Calculate loss
loss = self.loss(q_value, td_target, reduction='mean')
# Update weights
self.optimizer.zero_grad()
loss.backward()
if self.max_grad_norm is not None: #Optional gradient clipping
torch.nn.utils.clip_grad_norm_(self.network.parameters(), self.max_grad_norm)
self.optimizer.step()
return loss.item()
def act(self, state, is_training_ready=True):
if is_training_ready and random.uniform(0, 1) >= self.epsilon:
# Action that maximises Q function
action = self.predict(state)
else:
if self.acting_in_reversible_spin_env:
# Random random spin.
action = np.random.randint(0, self.env.action_space.n)
else:
# Flip random spin from that hasn't yet been flipped.
x = (state[0, :] == self.allowed_action_state).nonzero()
action = x[np.random.randint(0, len(x))].item()
return action
def update_epsilon(self, timestep):
eps = self.initial_exploration_rate - (self.initial_exploration_rate - self.final_exploration_rate) * (
timestep / self.final_exploration_step
)
self.epsilon = max(eps, self.final_exploration_rate)
def update_lr(self, timestep):
if timestep <= self.peak_learning_rate_step:
lr = self.initial_learning_rate - (self.initial_learning_rate - self.peak_learning_rate) * (
timestep / self.peak_learning_rate_step
)
elif timestep <= self.final_learning_rate_step:
lr = self.peak_learning_rate - (self.peak_learning_rate - self.final_learning_rate) * (
(timestep - self.peak_learning_rate_step) / (self.final_learning_rate_step - self.peak_learning_rate_step)
)
else:
lr = None
if lr is not None:
for g in self.optimizer.param_groups:
g['lr'] = lr
@torch.no_grad()
def predict(self, states, acting_in_reversible_spin_env=None):
if acting_in_reversible_spin_env is None:
acting_in_reversible_spin_env = self.acting_in_reversible_spin_env
qs = self.network(states)
if acting_in_reversible_spin_env:
if qs.dim() == 1:
actions = qs.argmax().item()
else:
actions = qs.argmax(1, True).squeeze(1).cpu().numpy()
return actions
else:
if qs.dim() == 1:
x = (states[0, :] == self.allowed_action_state).nonzero()
actions = x[qs[x].argmax().item()].item()
else:
disallowed_actions_mask = (states[:, :, 0] != self.allowed_action_state)
qs_allowed = qs.masked_fill(disallowed_actions_mask, -10000)
actions = qs_allowed.argmax(1, True).squeeze(1).cpu().numpy()
return actions
@torch.no_grad()
def evaluate_agent(self, batch_size=None):
"""
Evaluates agent's current performance. Run multiple evaluations at once
so the network predictions can be done in batches.
"""
if batch_size is None:
batch_size = self.minibatch_size
i_test = 0
i_comp = 0
test_scores = []
batch_scores = [0]*batch_size
test_envs = np.array([None]*batch_size)
obs_batch = []
while i_comp < self.test_episodes:
for i, env in enumerate(test_envs):
if env is None and i_test < self.test_episodes:
test_env, testing_in_reversible_spin_env = self.get_random_env(self.test_envs)
obs = test_env.reset()
test_env = deepcopy(test_env)
test_envs[i] = test_env
obs_batch.append(obs)
i_test += 1
actions = self.predict(torch.FloatTensor(np.array(obs_batch)).to(self.device),
testing_in_reversible_spin_env)
obs_batch = []
i = 0
for env, action in zip(test_envs, actions):
if env is not None:
obs, rew, done, info = env.step(action)
if self.test_metric == TestMetric.CUMULATIVE_REWARD:
batch_scores[i] += rew
if done:
if self.test_metric == TestMetric.BEST_ENERGY:
batch_scores[i] = env.best_energy
elif self.test_metric == TestMetric.ENERGY_ERROR:
batch_scores[i] = abs(env.best_energy - env.calculate_best()[0])
elif self.test_metric == TestMetric.MAX_CUT:
batch_scores[i] = env.get_best_cut()
elif self.test_metric == TestMetric.FINAL_CUT:
batch_scores[i] = env.calculate_cut()
test_scores.append(batch_scores[i])
if self.test_metric == TestMetric.CUMULATIVE_REWARD:
batch_scores[i] = 0
i_comp += 1
test_envs[i] = None
else:
obs_batch.append(obs)
i += 1
if self.test_metric == TestMetric.ENERGY_ERROR:
print("\n{}/{} graphs solved optimally".format(np.count_nonzero(np.array(test_scores)==0),self.test_episodes), end="")
return np.mean(test_scores)
def save(self, path='network.pth'):
if os.path.splitext(path)[-1]=='':
path + '.pth'
torch.save(self.network.state_dict(), path)
def load(self,path):
self.network.load_state_dict(torch.load(path,map_location=self.device)) | 22,538 | 37.396934 | 132 | py |
eco-dqn | eco-dqn-master/experiments/utils.py | import os
import pickle
import networkx as nx
import time
import numpy as np
import scipy as sp
import pandas as pd
import torch
from collections import namedtuple
from copy import deepcopy
import src.envs.core as ising_env
from src.envs.utils import (SingleGraphGenerator, SpinBasis)
from src.agents.solver import Network, Greedy
####################################################
# TESTING ON GRAPHS
####################################################
def test_network(network, env_args, graphs_test,device=None, step_factor=1, batched=True,
n_attempts=50, return_raw=False, return_history=False, max_batch_size=None):
if batched:
return __test_network_batched(network, env_args, graphs_test, device, step_factor,
n_attempts, return_raw, return_history, max_batch_size)
else:
if max_batch_size is not None:
print("Warning: max_batch_size argument will be ignored for when batched=False.")
return __test_network_sequential(network, env_args, graphs_test, step_factor,
n_attempts, return_raw, return_history)
def __test_network_batched(network, env_args, graphs_test, device=None, step_factor=1,
n_attempts=50, return_raw=False, return_history=False, max_batch_size=None):
if device is None:
device = "cuda" if torch.cuda.is_available() else "cpu"
torch.device(device)
# HELPER FUNCTION FOR NETWORK TESTING
acting_in_reversible_spin_env = env_args['reversible_spins']
if env_args['reversible_spins']:
# If MDP is reversible, both actions are allowed.
if env_args['spin_basis'] == SpinBasis.BINARY:
allowed_action_state = (0, 1)
elif env_args['spin_basis'] == SpinBasis.SIGNED:
allowed_action_state = (1, -1)
else:
# If MDP is irreversible, only return the state of spins that haven't been flipped.
if env_args['spin_basis'] == SpinBasis.BINARY:
allowed_action_state = 0
if env_args['spin_basis'] == SpinBasis.SIGNED:
allowed_action_state = 1
def predict(states):
qs = network(states)
if acting_in_reversible_spin_env:
if qs.dim() == 1:
actions = [qs.argmax().item()]
else:
actions = qs.argmax(1, True).squeeze(1).cpu().numpy()
return actions
else:
if qs.dim() == 1:
x = (states.squeeze()[:,0] == allowed_action_state).nonzero()
actions = [x[qs[x].argmax().item()].item()]
else:
disallowed_actions_mask = (states[:, :, 0] != allowed_action_state)
qs_allowed = qs.masked_fill(disallowed_actions_mask, -1000)
actions = qs_allowed.argmax(1, True).squeeze(1).cpu().numpy()
return actions
# NETWORK TESTING
results = []
results_raw = []
if return_history:
history = []
n_attempts = n_attempts if env_args["reversible_spins"] else 1
for j, test_graph in enumerate(graphs_test):
i_comp = 0
i_batch = 0
t_total = 0
n_spins = test_graph.shape[0]
n_steps = int(n_spins * step_factor)
test_env = ising_env.make("SpinSystem",
SingleGraphGenerator(test_graph),
n_steps,
**env_args)
print("Running greedy solver with +1 initialisation of spins...", end="...")
# Calculate the greedy cut with all spins initialised to +1
greedy_env = deepcopy(test_env)
greedy_env.reset(spins=np.array([1] * test_graph.shape[0]))
greedy_agent = Greedy(greedy_env)
greedy_agent.solve()
greedy_single_cut = greedy_env.get_best_cut()
greedy_single_spins = greedy_env.best_spins
print("done.")
if return_history:
actions_history = []
rewards_history = []
scores_history = []
best_cuts = []
init_spins = []
best_spins = []
greedy_cuts = []
greedy_spins = []
while i_comp < n_attempts:
if max_batch_size is None:
batch_size = n_attempts
else:
batch_size = min(n_attempts - i_comp, max_batch_size)
i_comp_batch = 0
if return_history:
actions_history_batch = [[None]*batch_size]
rewards_history_batch = [[None] * batch_size]
scores_history_batch = []
test_envs = [None] * batch_size
best_cuts_batch = [-1e3] * batch_size
init_spins_batch = [[] for _ in range(batch_size)]
best_spins_batch = [[] for _ in range(batch_size)]
greedy_envs = [None] * batch_size
greedy_cuts_batch = []
greedy_spins_batch = []
obs_batch = [None] * batch_size
print("Preparing batch of {} environments for graph {}.".format(batch_size,j), end="...")
for i in range(batch_size):
env = deepcopy(test_env)
obs_batch[i] = env.reset()
test_envs[i] = env
greedy_envs[i] = deepcopy(env)
init_spins_batch[i] = env.best_spins
if return_history:
scores_history_batch.append([env.calculate_score() for env in test_envs])
print("done.")
# Calculate the max cut acting w.r.t. the network
t_start = time.time()
# pool = mp.Pool(processes=16)
k = 0
while i_comp_batch < batch_size:
t1 = time.time()
# Note: Do not convert list of np.arrays to FloatTensor, it is very slow!
# see: https://github.com/pytorch/pytorch/issues/13918
# Hence, here we convert a list of np arrays to a np array.
obs_batch = torch.FloatTensor(np.array(obs_batch)).to(device)
actions = predict(obs_batch)
obs_batch = []
if return_history:
scores = []
rewards = []
i = 0
for env, action in zip(test_envs,actions):
if env is not None:
obs, rew, done, info = env.step(action)
if return_history:
scores.append(env.calculate_score())
rewards.append(rew)
if not done:
obs_batch.append(obs)
else:
best_cuts_batch[i] = env.get_best_cut()
best_spins_batch[i] = env.best_spins
i_comp_batch += 1
i_comp += 1
test_envs[i] = None
i+=1
k+=1
if return_history:
actions_history_batch.append(actions)
scores_history_batch.append(scores)
rewards_history_batch.append(rewards)
# print("\t",
# "Par. steps :", k,
# "Env steps : {}/{}".format(k/batch_size,n_steps),
# 'Time: {0:.3g}s'.format(time.time()-t1))
t_total += (time.time() - t_start)
i_batch+=1
print("Finished agent testing batch {}.".format(i_batch))
if env_args["reversible_spins"]:
print("Running greedy solver with {} random initialisations of spins for batch {}...".format(batch_size, i_batch), end="...")
for env in greedy_envs:
Greedy(env).solve()
cut = env.get_best_cut()
greedy_cuts_batch.append(cut)
greedy_spins_batch.append(env.best_spins)
print("done.")
if return_history:
actions_history += actions_history_batch
rewards_history += rewards_history_batch
scores_history += scores_history_batch
best_cuts += best_cuts_batch
init_spins += init_spins_batch
best_spins += best_spins_batch
if env_args["reversible_spins"]:
greedy_cuts += greedy_cuts_batch
greedy_spins += greedy_spins_batch
# print("\tGraph {}, par. steps: {}, comp: {}/{}".format(j, k, i_comp, batch_size),
# end="\r" if n_spins<100 else "")
i_best = np.argmax(best_cuts)
best_cut = best_cuts[i_best]
sol = best_spins[i_best]
mean_cut = np.mean(best_cuts)
if env_args["reversible_spins"]:
idx_best_greedy = np.argmax(greedy_cuts)
greedy_random_cut = greedy_cuts[idx_best_greedy]
greedy_random_spins = greedy_spins[idx_best_greedy]
greedy_random_mean_cut = np.mean(greedy_cuts)
else:
greedy_random_cut = greedy_single_cut
greedy_random_spins = greedy_single_spins
greedy_random_mean_cut = greedy_single_cut
print('Graph {}, best(mean) cut: {}({}), greedy cut (rand init / +1 init) : {} / {}. ({} attempts in {}s)\t\t\t'.format(
j, best_cut, mean_cut, greedy_random_cut, greedy_single_cut, n_attempts, np.round(t_total,2)))
results.append([best_cut, sol,
mean_cut,
greedy_single_cut, greedy_single_spins,
greedy_random_cut, greedy_random_spins,
greedy_random_mean_cut,
t_total/(n_attempts)])
results_raw.append([init_spins,
best_cuts, best_spins,
greedy_cuts, greedy_spins])
if return_history:
history.append([np.array(actions_history).T.tolist(),
np.array(scores_history).T.tolist(),
np.array(rewards_history).T.tolist()])
results = pd.DataFrame(data=results, columns=["cut", "sol",
"mean cut",
"greedy (+1 init) cut", "greedy (+1 init) sol",
"greedy (rand init) cut", "greedy (rand init) sol",
"greedy (rand init) mean cut",
"time"])
results_raw = pd.DataFrame(data=results_raw, columns=["init spins",
"cuts", "sols",
"greedy cuts", "greedy sols"])
if return_history:
history = pd.DataFrame(data=history, columns=["actions", "scores", "rewards"])
if return_raw==False and return_history==False:
return results
else:
ret = [results]
if return_raw:
ret.append(results_raw)
if return_history:
ret.append(history)
return ret
def __test_network_sequential(network, env_args, graphs_test, step_factor=1,
n_attempts=50, return_raw=False, return_history=False):
if return_raw or return_history:
raise NotImplementedError("I've not got to this yet! Used the batched test script (it's faster anyway).")
results = []
n_attempts = n_attempts if env_args["reversible_spins"] else 1
for i, test_graph in enumerate(graphs_test):
n_steps = int(test_graph.shape[0] * step_factor)
best_cut = -1e3
best_spins = []
greedy_random_cut = -1e3
greedy_random_spins = []
greedy_single_cut = -1e3
greedy_single_spins = []
times = []
test_env = ising_env.make("SpinSystem",
SingleGraphGenerator(test_graph),
n_steps,
**env_args)
net_agent = Network(network, test_env,
record_cut=False, record_rewards=False, record_qs=False)
greedy_env = deepcopy(test_env)
greedy_env.reset(spins=np.array([1] * test_graph.shape[0]))
greedy_agent = Greedy(greedy_env)
greedy_agent.solve()
greedy_single_cut = greedy_env.get_best_cut()
greedy_single_spins = greedy_env.best_spins
for k in range(n_attempts):
net_agent.reset(clear_history=True)
greedy_env = deepcopy(test_env)
greedy_agent = Greedy(greedy_env)
tstart = time.time()
net_agent.solve()
times.append(time.time() - tstart)
cut = test_env.get_best_cut()
if cut > best_cut:
best_cut = cut
best_spins = test_env.best_spins
greedy_agent.solve()
greedy_cut = greedy_env.get_best_cut()
if greedy_cut > greedy_random_cut:
greedy_random_cut = greedy_cut
greedy_random_spins = greedy_env.best_spins
# print('\nGraph {}, attempt : {}/{}, best cut : {}, greedy cut (rand init / +1 init) : {} / {}\t\t\t'.format(
# i + 1, k, n_attemps, best_cut, greedy_random_cut, greedy_single_cut),
# end="\r")
print('\nGraph {}, attempt : {}/{}, best cut : {}, greedy cut (rand init / +1 init) : {} / {}\t\t\t'.format(
i + 1, k, n_attempts, best_cut, greedy_random_cut, greedy_single_cut),
end=".")
results.append([best_cut, best_spins,
greedy_single_cut, greedy_single_spins,
greedy_random_cut, greedy_random_spins,
np.mean(times)])
return pd.DataFrame(data=results, columns=["cut", "sol",
"greedy (+1 init) cut", "greedy (+1 init) sol",
"greedy (rand init) cut", "greedy (rand init) sol",
"time"])
####################################################
# LOADING GRAPHS
####################################################
Graph = namedtuple('Graph', 'name n_vertices n_edges matrix bk_val bk_sol')
def load_graph(graph_dir, graph_name):
inst_loc = os.path.join(graph_dir, 'instances', graph_name+'.mc')
val_loc = os.path.join(graph_dir, 'bkvl', graph_name+'.bkvl')
sol_loc = os.path.join(graph_dir, 'bksol', graph_name+'.bksol')
vertices, edges, matrix = 0, 0, None
bk_val, bk_sol = None, None
with open(inst_loc) as f:
for line in f:
arr = list(map(int, line.strip().split(' ')))
if len(arr) == 2: # contains the number of vertices and edges
n_vertices, n_edges = arr
matrix = np.zeros((n_vertices,n_vertices))
else:
assert type(matrix)==np.ndarray, 'First line in file should define graph dimensions.'
i, j, w = arr[0]-1, arr[1]-1, arr[2]
matrix[ [i,j], [j,i] ] = w
with open(val_loc) as f:
bk_val = float( f.readline() )
with open(sol_loc) as f:
bk_sol_str = f.readline().strip()
bk_sol = np.array([int(x) for x in list(bk_sol_str)] + [ np.random.choice([0,1]) ]) # final spin is 'no-action'
return Graph(graph_name, n_vertices, n_edges, matrix, bk_val, bk_sol)
def load_graph_set(graph_save_loc):
graphs_test = pickle.load(open(graph_save_loc,'rb'))
def graph_to_array(g):
if type(g) == nx.Graph:
g = nx.to_numpy_array(g)
elif type(g) == sp.sparse.csr_matrix:
g = g.toarray()
return g
graphs_test = [graph_to_array(g) for g in graphs_test]
print('{} target graphs loaded from {}'.format(len(graphs_test), graph_save_loc))
return graphs_test
####################################################
# FILE UTILS
####################################################
def mk_dir(export_dir, quite=False):
if not os.path.exists(export_dir):
try:
os.makedirs(export_dir)
print('created dir: ', export_dir)
except OSError as exc: # Guard against race condition
if exc.errno != exc.errno.EEXIST:
raise
except Exception:
pass
else:
print('dir already exists: ', export_dir) | 16,669 | 36.209821 | 141 | py |
eco-dqn | eco-dqn-master/experiments/BA_60spin/test/test_eco.py | import os
import matplotlib.pyplot as plt
import torch
import src.envs.core as ising_env
from experiments.utils import test_network, load_graph_set
from src.envs.utils import (SingleGraphGenerator,
RewardSignal, ExtraAction,
OptimisationTarget, SpinBasis,
DEFAULT_OBSERVABLES)
from src.networks.mpnn import MPNN
try:
import seaborn as sns
plt.style.use('seaborn')
except ImportError:
pass
def run(save_loc="BA_60spin/eco",
graph_save_loc="_graphs/validation/BA_60spin_m4_100graphs.pkl",
batched=True,
max_batch_size=None):
print("\n----- Running {} -----\n".format(os.path.basename(__file__)))
####################################################
# NETWORK LOCATION
####################################################
data_folder = os.path.join(save_loc, 'data')
network_folder = os.path.join(save_loc, 'network')
print("data folder :", data_folder)
print("network folder :", network_folder)
test_save_path = os.path.join(network_folder, 'test_scores.pkl')
network_save_path = os.path.join(network_folder, 'network_best.pth')
print("network params :", network_save_path)
####################################################
# NETWORK SETUP
####################################################
network_fn = MPNN
network_args = {
'n_layers': 3,
'n_features': 64,
'n_hid_readout': [],
'tied_weights': False
}
####################################################
# SET UP ENVIRONMENTAL AND VARIABLES
####################################################
gamma = 0.95
step_factor = 2
env_args = {'observables': DEFAULT_OBSERVABLES,
'reward_signal': RewardSignal.BLS,
'extra_action': ExtraAction.NONE,
'optimisation_target': OptimisationTarget.CUT,
'spin_basis': SpinBasis.BINARY,
'norm_rewards': True,
'memory_length': None,
'horizon_length': None,
'stag_punishment': None,
'basin_reward': 1. / 60,
'reversible_spins': True}
####################################################
# LOAD VALIDATION GRAPHS
####################################################
graphs_test = load_graph_set(graph_save_loc)
####################################################
# SETUP NETWORK TO TEST
####################################################
test_env = ising_env.make("SpinSystem",
SingleGraphGenerator(graphs_test[0]),
graphs_test[0].shape[0]*step_factor,
**env_args)
device = "cuda" if torch.cuda.is_available() else "cpu"
torch.device(device)
print("Set torch default device to {}.".format(device))
network = network_fn(n_obs_in=test_env.observation_space.shape[1],
**network_args).to(device)
network.load_state_dict(torch.load(network_save_path,map_location=device))
for param in network.parameters():
param.requires_grad = False
network.eval()
print("Sucessfully created agent with pre-trained MPNN.\nMPNN architecture\n\n{}".format(repr(network)))
####################################################
# TEST NETWORK ON VALIDATION GRAPHS
####################################################
results, results_raw, history = test_network(network, env_args, graphs_test, device, step_factor,
return_raw=True, return_history=True,
batched=batched, max_batch_size=max_batch_size)
results_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + ".pkl"
results_raw_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + "_raw.pkl"
history_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + "_history.pkl"
for res, fname, label in zip([results, results_raw, history],
[results_fname, results_raw_fname, history_fname],
["results", "results_raw", "history"]):
save_path = os.path.join(data_folder, fname)
res.to_pickle(save_path)
print("{} saved to {}".format(label, save_path))
if __name__ == "__main__":
run() | 4,506 | 35.942623 | 108 | py |
eco-dqn | eco-dqn-master/experiments/BA_60spin/test/test_s2v.py | import os
import matplotlib.pyplot as plt
import torch
import src.envs.core as ising_env
from experiments.utils import test_network, load_graph_set
from src.envs.utils import (SingleGraphGenerator,
RewardSignal, ExtraAction,
OptimisationTarget, SpinBasis,
Observable)
from src.networks.mpnn import MPNN
try:
import seaborn as sns
plt.style.use('seaborn')
except ImportError:
pass
def run(save_loc="BA_60spin/s2v",
graph_save_loc="_graphs/validation/BA_60spin_m4_100graphs.pkl",
batched=True,
max_batch_size=None):
print("\n----- Running {} -----\n".format(os.path.basename(__file__)))
####################################################
# NETWORK LOCATION
####################################################
data_folder = os.path.join(save_loc, 'data')
network_folder = os.path.join(save_loc, 'network')
print("data folder :", data_folder)
print("network folder :", network_folder)
test_save_path = os.path.join(network_folder, 'test_scores.pkl')
network_save_path = os.path.join(network_folder, 'network_best.pth')
print("network params :", network_save_path)
####################################################
# NETWORK SETUP
####################################################
network_fn = MPNN
network_args = {
'n_layers': 3,
'n_features': 64,
'n_hid_readout': [],
'tied_weights': False
}
####################################################
# SET UP ENVIRONMENTAL AND VARIABLES
####################################################
gamma=1
step_factor = 1
env_args = {'observables':[Observable.SPIN_STATE],
'reward_signal':RewardSignal.DENSE,
'extra_action':ExtraAction.NONE,
'optimisation_target':OptimisationTarget.CUT,
'spin_basis':SpinBasis.BINARY,
'norm_rewards':True,
'memory_length':None,
'horizon_length':None,
'stag_punishment':None,
'basin_reward':None,
'reversible_spins':False}
####################################################
# LOAD VALIDATION GRAPHS
####################################################
graphs_test = load_graph_set(graph_save_loc)
####################################################
# SETUP NETWORK TO TEST
####################################################
test_env = ising_env.make("SpinSystem",
SingleGraphGenerator(graphs_test[0]),
graphs_test[0].shape[0]*step_factor,
**env_args)
device = "cuda" if torch.cuda.is_available() else "cpu"
torch.device(device)
print("Set torch default device to {}.".format(device))
network = network_fn(n_obs_in=test_env.observation_space.shape[1],
**network_args).to(device)
network.load_state_dict(torch.load(network_save_path,map_location=device))
for param in network.parameters():
param.requires_grad = False
network.eval()
print("Sucessfully created agent with pre-trained MPNN.\nMPNN architecture\n\n{}".format(repr(network)))
####################################################
# TEST NETWORK ON VALIDATION GRAPHS
####################################################
results, results_raw, history = test_network(network, env_args, graphs_test, device, step_factor,
return_raw=True, return_history=True,
batched=batched, max_batch_size=max_batch_size)
results_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + ".pkl"
results_raw_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + "_raw.pkl"
history_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + "_history.pkl"
for res, fname, label in zip([results, results_raw, history],
[results_fname, results_raw_fname, history_fname],
["results", "results_raw", "history"]):
save_path = os.path.join(data_folder, fname)
res.to_pickle(save_path)
print("{} saved to {}".format(label, save_path))
if __name__ == "__main__":
run() | 4,485 | 35.770492 | 108 | py |
eco-dqn | eco-dqn-master/experiments/ER_20spin/test/test_eco.py | import os
import matplotlib.pyplot as plt
import torch
import src.envs.core as ising_env
from experiments.utils import test_network, load_graph_set
from src.envs.utils import (SingleGraphGenerator,
RewardSignal, ExtraAction,
OptimisationTarget, SpinBasis,
DEFAULT_OBSERVABLES)
from src.networks.mpnn import MPNN
try:
import seaborn as sns
plt.style.use('seaborn')
except ImportError:
pass
def run(save_loc="ER_20spin/eco",
graph_save_loc="_graphs/validation/ER_20spin_p15_100graphs.pkl",
batched=True,
max_batch_size=None):
print("\n----- Running {} -----\n".format(os.path.basename(__file__)))
####################################################
# NETWORK LOCATION
####################################################
data_folder = os.path.join(save_loc, 'data')
network_folder = os.path.join(save_loc, 'network')
print("data folder :", data_folder)
print("network folder :", network_folder)
test_save_path = os.path.join(network_folder, 'test_scores.pkl')
network_save_path = os.path.join(network_folder, 'network_best.pth')
print("network params :", network_save_path)
####################################################
# NETWORK SETUP
####################################################
network_fn = MPNN
network_args = {
'n_layers': 3,
'n_features': 64,
'n_hid_readout': [],
'tied_weights': False
}
####################################################
# SET UP ENVIRONMENTAL AND VARIABLES
####################################################
gamma = 0.95
step_factor = 2
env_args = {'observables': DEFAULT_OBSERVABLES,
'reward_signal': RewardSignal.BLS,
'extra_action': ExtraAction.NONE,
'optimisation_target': OptimisationTarget.CUT,
'spin_basis': SpinBasis.BINARY,
'norm_rewards': True,
'memory_length': None,
'horizon_length': None,
'stag_punishment': None,
'basin_reward': 1. / 20,
'reversible_spins': True}
####################################################
# LOAD VALIDATION GRAPHS
####################################################
graphs_test = load_graph_set(graph_save_loc)
####################################################
# SETUP NETWORK TO TEST
####################################################
test_env = ising_env.make("SpinSystem",
SingleGraphGenerator(graphs_test[0]),
graphs_test[0].shape[0]*step_factor,
**env_args)
device = "cuda" if torch.cuda.is_available() else "cpu"
torch.device(device)
print("Set torch default device to {}.".format(device))
network = network_fn(n_obs_in=test_env.observation_space.shape[1],
**network_args).to(device)
network.load_state_dict(torch.load(network_save_path,map_location=device))
for param in network.parameters():
param.requires_grad = False
network.eval()
print("Sucessfully created agent with pre-trained MPNN.\nMPNN architecture\n\n{}".format(repr(network)))
####################################################
# TEST NETWORK ON VALIDATION GRAPHS
####################################################
results, results_raw, history = test_network(network, env_args, graphs_test, device, step_factor,
return_raw=True, return_history=True,
batched=batched, max_batch_size=max_batch_size)
results_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + ".pkl"
results_raw_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + "_raw.pkl"
history_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + "_history.pkl"
for res, fname, label in zip([results, results_raw, history],
[results_fname, results_raw_fname, history_fname],
["results", "results_raw", "history"]):
save_path = os.path.join(data_folder, fname)
res.to_pickle(save_path)
print("{} saved to {}".format(label, save_path))
if __name__ == "__main__":
run() | 4,507 | 35.95082 | 108 | py |
eco-dqn | eco-dqn-master/experiments/ER_20spin/test/test_s2v.py | import os
import matplotlib.pyplot as plt
import torch
import src.envs.core as ising_env
from experiments.utils import test_network, load_graph_set
from src.envs.utils import (SingleGraphGenerator,
RewardSignal, ExtraAction,
OptimisationTarget, SpinBasis,
Observable)
from src.networks.mpnn import MPNN
try:
import seaborn as sns
plt.style.use('seaborn')
except ImportError:
pass
def run(save_loc="ER_20spin/s2v",
graph_save_loc="_graphs/validation/ER_20spin_p15_100graphs.pkl",
batched=True,
max_batch_size=None):
print("\n----- Running {} -----\n".format(os.path.basename(__file__)))
####################################################
# NETWORK LOCATION
####################################################
data_folder = os.path.join(save_loc, 'data')
network_folder = os.path.join(save_loc, 'network')
print("data folder :", data_folder)
print("network folder :", network_folder)
test_save_path = os.path.join(network_folder, 'test_scores.pkl')
network_save_path = os.path.join(network_folder, 'network_best.pth')
print("network params :", network_save_path)
####################################################
# NETWORK SETUP
####################################################
network_fn = MPNN
network_args = {
'n_layers': 3,
'n_features': 64,
'n_hid_readout': [],
'tied_weights': False
}
####################################################
# SET UP ENVIRONMENTAL AND VARIABLES
####################################################
gamma=1
step_factor = 1
env_args = {'observables':[Observable.SPIN_STATE],
'reward_signal':RewardSignal.DENSE,
'extra_action':ExtraAction.NONE,
'optimisation_target':OptimisationTarget.CUT,
'spin_basis':SpinBasis.BINARY,
'norm_rewards':True,
'memory_length':None,
'horizon_length':None,
'stag_punishment':None,
'basin_reward':None,
'reversible_spins':False}
####################################################
# LOAD VALIDATION GRAPHS
####################################################
graphs_test = load_graph_set(graph_save_loc)
####################################################
# SETUP NETWORK TO TEST
####################################################
test_env = ising_env.make("SpinSystem",
SingleGraphGenerator(graphs_test[0]),
graphs_test[0].shape[0]*step_factor,
**env_args)
device = "cuda" if torch.cuda.is_available() else "cpu"
torch.device(device)
print("Set torch default device to {}.".format(device))
network = network_fn(n_obs_in=test_env.observation_space.shape[1],
**network_args).to(device)
network.load_state_dict(torch.load(network_save_path,map_location=device))
for param in network.parameters():
param.requires_grad = False
network.eval()
print("Sucessfully created agent with pre-trained MPNN.\nMPNN architecture\n\n{}".format(repr(network)))
####################################################
# TEST NETWORK ON VALIDATION GRAPHS
####################################################
results, results_raw, history = test_network(network, env_args, graphs_test, device, step_factor,
return_raw=True, return_history=True,
batched=batched, max_batch_size=max_batch_size)
results_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + ".pkl"
results_raw_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + "_raw.pkl"
history_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + "_history.pkl"
for res, fname, label in zip([results, results_raw, history],
[results_fname, results_raw_fname, history_fname],
["results", "results_raw", "history"]):
save_path = os.path.join(data_folder, fname)
res.to_pickle(save_path)
print("{} saved to {}".format(label, save_path))
if __name__ == "__main__":
run() | 4,486 | 35.778689 | 108 | py |
eco-dqn | eco-dqn-master/experiments/pretrained_agent/test_eco.py | import os
import matplotlib.pyplot as plt
import torch
import src.envs.core as ising_env
from experiments.utils import test_network, load_graph_set, mk_dir
from src.envs.utils import (SingleGraphGenerator,
RewardSignal, ExtraAction,
OptimisationTarget, SpinBasis,
DEFAULT_OBSERVABLES)
from src.networks.mpnn import MPNN
try:
import seaborn as sns
plt.style.use('seaborn')
except ImportError:
pass
def run(save_loc="pretrained_agent/eco",
network_save_loc="experiments_new/pretrained_agent/networks/eco/network_best_ER_200spin.pth",
graph_save_loc="_graphs/validation/ER_200spin_p15_100graphs.pkl",
batched=True,
max_batch_size=None,
step_factor=None,
n_attemps=50):
print("\n----- Running {} -----\n".format(os.path.basename(__file__)))
####################################################
# FOLDER LOCATIONS
####################################################
print("save location :", save_loc)
print("network params :", network_save_loc)
mk_dir(save_loc)
####################################################
# NETWORK SETUP
####################################################
network_fn = MPNN
network_args = {
'n_layers': 3,
'n_features': 64,
'n_hid_readout': [],
'tied_weights': False
}
####################################################
# SET UP ENVIRONMENTAL AND VARIABLES
####################################################
if step_factor is None:
step_factor = 2
env_args = {'observables': DEFAULT_OBSERVABLES,
'reward_signal': RewardSignal.BLS,
'extra_action': ExtraAction.NONE,
'optimisation_target': OptimisationTarget.CUT,
'spin_basis': SpinBasis.BINARY,
'norm_rewards': True,
'memory_length': None,
'horizon_length': None,
'stag_punishment': None,
'basin_reward': None,
'reversible_spins': True}
####################################################
# LOAD VALIDATION GRAPHS
####################################################
graphs_test = load_graph_set(graph_save_loc)
####################################################
# SETUP NETWORK TO TEST
####################################################
test_env = ising_env.make("SpinSystem",
SingleGraphGenerator(graphs_test[0]),
graphs_test[0].shape[0] * step_factor,
**env_args)
device = "cuda" if torch.cuda.is_available() else "cpu"
torch.device(device)
print("Set torch default device to {}.".format(device))
network = network_fn(n_obs_in=test_env.observation_space.shape[1],
**network_args).to(device)
network.load_state_dict(torch.load(network_save_loc, map_location=device))
for param in network.parameters():
param.requires_grad = False
network.eval()
print("Sucessfully created agent with pre-trained MPNN.\nMPNN architecture\n\n{}".format(repr(network)))
####################################################
# TEST NETWORK ON VALIDATION GRAPHS
####################################################
results, results_raw, history = test_network(network, env_args, graphs_test, device, step_factor,
return_raw=True, return_history=True, n_attempts=n_attemps,
batched=batched, max_batch_size=max_batch_size)
results_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + ".pkl"
results_raw_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + "_raw.pkl"
history_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + "_history.pkl"
for res, fname, label in zip([results, results_raw, history],
[results_fname, results_raw_fname, history_fname],
["results", "results_raw", "history"]):
save_path = os.path.join(save_loc, fname)
res.to_pickle(save_path)
print("{} saved to {}".format(label, save_path))
if __name__ == "__main__":
run() | 4,430 | 36.550847 | 108 | py |
eco-dqn | eco-dqn-master/experiments/pretrained_agent/test_s2v.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Tests an agent.
"""
import os
import matplotlib.pyplot as plt
import torch
import src.envs.core as ising_env
from experiments.utils import test_network, load_graph_set, mk_dir
from src.envs.utils import (SingleGraphGenerator,
RewardSignal, ExtraAction,
OptimisationTarget, SpinBasis,
Observable)
from src.networks.mpnn import MPNN
try:
import seaborn as sns
plt.style.use('seaborn')
except ImportError:
pass
def run(save_loc="pretrained_agent/s2v",
network_save_loc="experiments_new/pretrained_agent/networks/s2v/network_best_ER_200spin.pth",
graph_save_loc="_graphs/benchmarks/ising_125spin_graphs.pkl",
batched=True,
max_batch_size=5):
print("\n----- Running {} -----\n".format(os.path.basename(__file__)))
####################################################
# FOLDER LOCATIONS
####################################################
print("save location :", save_loc)
print("network params :", network_save_loc)
mk_dir(save_loc)
####################################################
# NETWORK SETUP
####################################################
network_fn = MPNN
network_args = {
'n_layers': 3,
'n_features': 64,
'n_hid_readout': [],
'tied_weights': False
}
####################################################
# SET UP ENVIRONMENTAL AND VARIABLES
####################################################
step_factor = 1
env_args = {'observables':[Observable.SPIN_STATE],
'reward_signal':RewardSignal.DENSE,
'extra_action':ExtraAction.NONE,
'optimisation_target':OptimisationTarget.CUT,
'spin_basis':SpinBasis.BINARY,
'norm_rewards':True,
'memory_length':None,
'horizon_length':None,
'stag_punishment':None,
'basin_reward':None,
'reversible_spins':False}
####################################################
# LOAD VALIDATION GRAPHS
####################################################
graphs_test = load_graph_set(graph_save_loc)
####################################################
# SETUP NETWORK TO TEST
####################################################
test_env = ising_env.make("SpinSystem",
SingleGraphGenerator(graphs_test[0]),
graphs_test[0].shape[0] * step_factor,
**env_args)
device = "cuda" if torch.cuda.is_available() else "cpu"
torch.device(device)
print("Set torch default device to {}.".format(device))
network = network_fn(n_obs_in=test_env.observation_space.shape[1],
**network_args).to(device)
network.load_state_dict(torch.load(network_save_loc, map_location=device))
for param in network.parameters():
param.requires_grad = False
network.eval()
print("Sucessfully created agent with pre-trained MPNN.\nMPNN architecture\n\n{}".format(repr(network)))
####################################################
# TEST NETWORK ON VALIDATION GRAPHS
####################################################
results, results_raw, history = test_network(network, env_args, graphs_test, device, step_factor,
return_raw=True, return_history=True, n_attempts=50,
batched=batched, max_batch_size=max_batch_size)
results_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + ".pkl"
results_raw_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + "_raw.pkl"
history_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + "_history.pkl"
for res, fname, label in zip([results, results_raw, history],
[results_fname, results_raw_fname, history_fname],
["results", "results_raw", "history"]):
save_path = os.path.join(save_loc, fname)
res.to_pickle(save_path)
print("{} saved to {}".format(label, save_path))
if __name__ == "__main__":
run() | 4,395 | 35.330579 | 108 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.