code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1
value |
|---|---|---|---|---|---|
import logging
from pathlib import Path
from typing import Any, Dict, List, Optional, Text
from rich.console import Console
from ruth.constants import TEXT
from ruth.nlu.featurizers.sparse_featurizers.constants import (
CLASS_FEATURIZER_UNIQUE_NAME,
)
from ruth.nlu.featurizers.sparse_featurizers.sparse_featurizer import SparseFeaturizer
from ruth.shared.nlu.training_data.collections import TrainData
from ruth.shared.nlu.training_data.feature import Feature
from ruth.shared.nlu.training_data.ruth_data import RuthData
from ruth.shared.utils import json_pickle, json_unpickle
from sklearn.feature_extraction.text import TfidfVectorizer
logger = logging.getLogger(__name__)
console = Console()
class TfidfVectorFeaturizer(SparseFeaturizer):
defaults = {
"analyzer": "word",
"stop_words": None,
"min_df": 1,
"max_df": 1.0,
"ngram_range": (1, 1),
"lowercase": True,
"max_features": None,
"norm": "l2",
"use_idf": True,
}
def __init__(
self,
element_config: Optional[Dict[Text, Any]],
vectorizer: Optional["TfidfVectorizer"] = None,
):
super(TfidfVectorFeaturizer, self).__init__(element_config)
self.vectorizer = vectorizer
self._load_params()
self._verify_analyzer()
def _load_params(self):
self.analyzer = self.element_config["analyzer"]
self.stop_words = self.element_config["stop_words"]
self.min_df = self.element_config["min_df"]
self.max_df = self.element_config["max_df"]
self.ngram_range = self.element_config["ngram_range"]
self.lowercase = self.element_config["lowercase"]
self.max_features = self.element_config["max_features"]
self.norm = self.element_config["norm"]
self.use_idf = self.element_config["use_idf"]
def _verify_analyzer(self) -> None:
if self.analyzer != "word":
if self.stop_words is not None:
logger.warning(
"You specified the character wise analyzer."
" So stop words will be ignored."
)
if self.ngram_range[1] == 1:
logger.warning(
"You specified the character wise analyzer"
" but max n-gram is set to 1."
" So, the vocabulary will only contain"
" the single characters. "
)
@staticmethod
def _build_vectorizer(
parameters: Dict[Text, Any], vacabulary=None
) -> TfidfVectorizer:
return TfidfVectorizer(
analyzer=parameters["analyzer"],
stop_words=parameters["stop_words"],
min_df=parameters["min_df"],
max_df=parameters["max_df"],
ngram_range=parameters["ngram_range"],
lowercase=parameters["lowercase"],
max_features=parameters["max_features"],
norm=parameters["norm"],
use_idf=parameters["use_idf"],
vocabulary=vacabulary,
)
def train(self, training_data: TrainData) -> TfidfVectorizer:
self.vectorizer = self._build_vectorizer(
parameters={
"analyzer": self.analyzer,
"stop_words": self.stop_words,
"min_df": self.min_df,
"max_df": self.max_df,
"ngram_range": self.ngram_range,
"lowercase": self.lowercase,
"max_features": self.max_features,
"norm": self.norm,
"use_idf": self.use_idf,
}
)
self.vectorizer.fit(self.get_data(training_data))
features = self._get_featurizer_data(training_data)
self._add_featurizer_data(training_data, features)
return self.vectorizer
def parse(self, message: RuthData):
feature = self.vectorizer.transform([message.get(TEXT)])
message.add_features(
Feature(feature, self.element_config[CLASS_FEATURIZER_UNIQUE_NAME])
)
def _check_attribute_vocabulary(self) -> bool:
"""Checks if trained vocabulary exists in attribute's count vectorizer."""
try:
return hasattr(self.vectorizer, "vocabulary_")
except (AttributeError, KeyError):
return False
def create_vector(self, examples: List[RuthData]):
features = []
for message in examples:
features.append(self.vectorizer.transform([message.get(TEXT)]))
return features
def _get_featurizer_data(self, training_data: TrainData):
if self._check_attribute_vocabulary():
return self.create_vector(training_data.training_examples)
else:
return []
def _add_featurizer_data(self, training_examples: List[RuthData], features):
for message, feature in zip(training_examples, features):
message.add_features(
Feature(feature, self.element_config[CLASS_FEATURIZER_UNIQUE_NAME])
)
def persist(self, file_name: Text, model_dir: Text):
file_name = file_name + ".pkl"
if self.vectorizer:
featurizer_path = Path(model_dir) / file_name
json_pickle(featurizer_path, self.vectorizer)
return {"file_name": file_name}
@classmethod
def load(
cls, meta: Dict[Text, Any], model_dir: Path, **kwargs: Any
) -> "TfidfVectorFeaturizer":
file_name = meta.get("file_name")
featurizer_file = model_dir / file_name
if not featurizer_file.exists():
return cls(meta)
vectorizer = json_unpickle(featurizer_file)
return cls(meta, vectorizer) | /ruth_python-0.0.8-py3-none-any.whl/ruth/nlu/featurizers/sparse_featurizers/tfidf_vector_featurizer.py | 0.832645 | 0.279872 | tfidf_vector_featurizer.py | pypi |
import os
import random
import uuid
from time import time
from urllib import request
import requests
import torch
import torch.nn.functional as F
import progressbar
import torchaudio
from ruth_tts_transformer.models.classifier import AudioMiniEncoderWithClassifierHead
from ruth_tts_transformer.models.diffusion_decoder import DiffusionTts
from ruth_tts_transformer.models.autoregressive import UnifiedVoice
from tqdm import tqdm
from ruth_tts_transformer.models.arch_util import TorchMelSpectrogram
from ruth_tts_transformer.models.clvp import CLVP
from ruth_tts_transformer.models.random_latent_generator import RandomLatentConverter
from ruth_tts_transformer.models.vocoder import UnivNetGenerator
from ruth_tts_transformer.utils.audio import wav_to_univnet_mel, denormalize_tacotron_mel
from ruth_tts_transformer.utils.diffusion import SpacedDiffusion, space_timesteps, get_named_beta_schedule
from ruth_tts_transformer.utils.tokenizer import VoiceBpeTokenizer
from ruth_tts_transformer.utils.wav2vec_alignment import Wav2VecAlignment
pbar = None
DEFAULT_MODELS_DIR = os.path.join(os.path.expanduser('~'), '.cache', 'tortoise', 'models')
MODELS_DIR = os.environ.get('TORTOISE_MODELS_DIR', DEFAULT_MODELS_DIR)
MODELS = {
'autoregressive.pth': 'https://huggingface.co/jbetker/tortoise-tts-v2/resolve/main/.models/autoregressive.pth',
'classifier.pth': 'https://huggingface.co/jbetker/tortoise-tts-v2/resolve/main/.models/classifier.pth',
'clvp2.pth': 'https://huggingface.co/jbetker/tortoise-tts-v2/resolve/main/.models/clvp2.pth',
'diffusion_decoder.pth': 'https://huggingface.co/jbetker/tortoise-tts-v2/resolve/main/.models/diffusion_decoder.pth',
'vocoder.pth': 'https://huggingface.co/jbetker/tortoise-tts-v2/resolve/main/.models/vocoder.pth',
'rlg_auto.pth': 'https://huggingface.co/jbetker/tortoise-tts-v2/resolve/main/.models/rlg_auto.pth',
'rlg_diffuser.pth': 'https://huggingface.co/jbetker/tortoise-tts-v2/resolve/main/.models/rlg_diffuser.pth',
}
def download_models(specific_models=None):
"""
Call to download all the models that Tortoise uses.
"""
os.makedirs(MODELS_DIR, exist_ok=True)
def show_progress(block_num, block_size, total_size):
global pbar
if pbar is None:
pbar = progressbar.ProgressBar(maxval=total_size)
pbar.start()
downloaded = block_num * block_size
if downloaded < total_size:
pbar.update(downloaded)
else:
pbar.finish()
pbar = None
for model_name, url in MODELS.items():
if specific_models is not None and model_name not in specific_models:
continue
model_path = os.path.join(MODELS_DIR, model_name)
if os.path.exists(model_path):
continue
request.urlretrieve(url, model_path, show_progress)
def get_model_path(model_name, models_dir=MODELS_DIR):
"""
Get path to given model, download it if it doesn't exist.
"""
if model_name not in MODELS:
raise ValueError(f'Model {model_name} not found in available models.')
model_path = os.path.join(models_dir, model_name)
if not os.path.exists(model_path) and models_dir == MODELS_DIR:
download_models([model_name])
return model_path
def pad_or_truncate(t, length):
"""
Utility function for forcing <t> to have the specified sequence length, whether by clipping it or padding it with 0s.
"""
if t.shape[-1] == length:
return t
elif t.shape[-1] < length:
return F.pad(t, (0, length - t.shape[-1]))
else:
return t[..., :length]
def load_discrete_vocoder_diffuser(trained_diffusion_steps=4000, desired_diffusion_steps=200, cond_free=True,
cond_free_k=1):
"""
Helper function to load a GaussianDiffusion instance configured for use as a vocoder.
"""
return SpacedDiffusion(use_timesteps=space_timesteps(trained_diffusion_steps, [desired_diffusion_steps]),
model_mean_type='epsilon',
model_var_type='learned_range', loss_type='mse',
betas=get_named_beta_schedule('linear', trained_diffusion_steps),
conditioning_free=cond_free, conditioning_free_k=cond_free_k)
def format_conditioning(clip, cond_length=132300, device='cuda'):
"""
Converts the given conditioning signal to a MEL spectrogram and clips it as expected by the models.
"""
gap = clip.shape[-1] - cond_length
if gap < 0:
clip = F.pad(clip, pad=(0, abs(gap)))
elif gap > 0:
rand_start = random.randint(0, gap)
clip = clip[:, rand_start:rand_start + cond_length]
mel_clip = TorchMelSpectrogram()(clip.unsqueeze(0)).squeeze(0)
return mel_clip.unsqueeze(0).to(device)
def fix_autoregressive_output(codes, stop_token, complain=True):
"""
This function performs some padding on coded audio that fixes a mismatch issue between what the diffusion model was
trained on and what the autoregressive code generator creates (which has no padding or end).
This is highly specific to the DVAE being used, so this particular coding will not necessarily work if used with
a different DVAE. This can be inferred by feeding a audio clip padded with lots of zeros on the end through the DVAE
and copying out the last few codes.
Failing to do this padding will produce speech with a harsh end that sounds like "BLAH" or similar.
"""
# Strip off the autoregressive stop token and add padding.
stop_token_indices = (codes == stop_token).nonzero()
if len(stop_token_indices) == 0:
if complain:
print("No stop tokens found in one of the generated voice clips. This typically means the spoken audio is "
"too long. In some cases, the output will still be good, though. Listen to it and if it is missing words, "
"try breaking up your input text.")
return codes
else:
codes[stop_token_indices] = 83
stm = stop_token_indices.min().item()
codes[stm:] = 83
if stm - 3 < codes.shape[0]:
codes[-3] = 45
codes[-2] = 45
codes[-1] = 248
return codes
def do_spectrogram_diffusion(diffusion_model, diffuser, latents, conditioning_latents, temperature=1, verbose=True):
"""
Uses the specified diffusion model to convert discrete codes into a spectrogram.
"""
with torch.no_grad():
output_seq_len = latents.shape[
1] * 4 * 24000 // 22050 # This diffusion model converts from 22kHz spectrogram codes to a 24kHz spectrogram signal.
output_shape = (latents.shape[0], 100, output_seq_len)
precomputed_embeddings = diffusion_model.timestep_independent(latents, conditioning_latents, output_seq_len,
False)
noise = torch.randn(output_shape, device=latents.device) * temperature
mel = diffuser.p_sample_loop(diffusion_model, output_shape, noise=noise,
model_kwargs={'precomputed_aligned_embeddings': precomputed_embeddings},
progress=verbose)
return denormalize_tacotron_mel(mel)[:, :, :output_seq_len]
def classify_audio_clip(clip):
"""
Returns whether or not Tortoises' classifier thinks the given clip came from Tortoise.
:param clip: torch tensor containing audio waveform data (get it from load_audio)
:return: True if the clip was classified as coming from Tortoise and false if it was classified as real.
"""
classifier = AudioMiniEncoderWithClassifierHead(2, spec_dim=1, embedding_dim=512, depth=5, downsample_factor=4,
resnet_blocks=2, attn_blocks=4, num_attn_heads=4, base_channels=32,
dropout=0, kernel_size=5, distribute_zero_label=False)
classifier.load_state_dict(torch.load(get_model_path('classifier.pth'), map_location=torch.device('cpu')))
clip = clip.cpu().unsqueeze(0)
results = F.softmax(classifier(clip), dim=-1)
return results[0][0]
def pick_best_batch_size_for_gpu():
"""
Tries to pick a batch size that will fit in your GPU. These sizes aren't guaranteed to work, but they should give
you a good shot.
"""
if torch.cuda.is_available():
_, available = torch.cuda.mem_get_info()
availableGb = available / (1024 ** 3)
if availableGb > 14:
return 16
elif availableGb > 10:
return 8
elif availableGb > 7:
return 4
return 1
class TextToSpeech:
"""
Main entry point into Tortoise.
"""
def __init__(self, autoregressive_batch_size=None, models_dir=MODELS_DIR, enable_redaction=True, device=None):
"""
Constructor
:param autoregressive_batch_size: Specifies how many samples to generate per batch. Lower this if you are seeing
GPU OOM errors. Larger numbers generates slightly faster.
:param models_dir: Where model weights are stored. This should only be specified if you are providing your own
models, otherwise use the defaults.
:param enable_redaction: When true, text enclosed in brackets are automatically redacted from the spoken output
(but are still rendered by the model). This can be used for prompt engineering.
Default is true.
:param device: Device to use when running the model. If omitted, the device will be automatically chosen.
"""
self.models_dir = models_dir
self.autoregressive_batch_size = pick_best_batch_size_for_gpu() if autoregressive_batch_size is None else autoregressive_batch_size
self.enable_redaction = enable_redaction
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
if self.enable_redaction:
self.aligner = Wav2VecAlignment()
self.tokenizer = VoiceBpeTokenizer()
if os.path.exists(f'{models_dir}/autoregressive.ptt'):
# Assume this is a traced directory.
self.autoregressive = torch.jit.load(f'{models_dir}/autoregressive.ptt')
self.diffusion = torch.jit.load(f'{models_dir}/diffusion_decoder.ptt')
else:
self.autoregressive = UnifiedVoice(max_mel_tokens=604, max_text_tokens=402, max_conditioning_inputs=2,
layers=30,
model_dim=1024,
heads=16, number_text_tokens=255, start_text_token=255,
checkpointing=False,
train_solo_embeddings=False).cpu().eval()
self.autoregressive.load_state_dict(torch.load(get_model_path('autoregressive.pth', models_dir)))
self.diffusion = DiffusionTts(model_channels=1024, num_layers=10, in_channels=100, out_channels=200,
in_latent_channels=1024, in_tokens=8193, dropout=0, use_fp16=False,
num_heads=16,
layer_drop=0, unconditioned_percentage=0).cpu().eval()
self.diffusion.load_state_dict(torch.load(get_model_path('diffusion_decoder.pth', models_dir)))
self.clvp = CLVP(dim_text=768, dim_speech=768, dim_latent=768, num_text_tokens=256, text_enc_depth=20,
text_seq_len=350, text_heads=12,
num_speech_tokens=8192, speech_enc_depth=20, speech_heads=12, speech_seq_len=430,
use_xformers=True).cpu().eval()
self.clvp.load_state_dict(torch.load(get_model_path('clvp2.pth', models_dir)))
self.vocoder = UnivNetGenerator().cpu()
self.vocoder.load_state_dict(
torch.load(get_model_path('vocoder.pth', models_dir), map_location=torch.device('cpu'))['model_g'])
self.vocoder.eval(inference=True)
# Random latent generators (RLGs) are loaded lazily.
self.rlg_auto = None
self.rlg_diffusion = None
def get_conditioning_latents(self, voice_samples, return_mels=False):
"""
Transforms one or more voice_samples into a tuple (autoregressive_conditioning_latent, diffusion_conditioning_latent).
These are expressive learned latents that encode aspects of the provided clips like voice, intonation, and acoustic
properties.
:param voice_samples: List of 2 or more ~10 second reference clips, which should be torch tensors containing 22.05kHz waveform data.
"""
with torch.no_grad():
voice_samples = [v.to(self.device) for v in voice_samples]
auto_conds = []
if not isinstance(voice_samples, list):
voice_samples = [voice_samples]
for vs in voice_samples:
auto_conds.append(format_conditioning(vs, device=self.device))
auto_conds = torch.stack(auto_conds, dim=1)
self.autoregressive = self.autoregressive.to(self.device)
auto_latent = self.autoregressive.get_conditioning(auto_conds)
self.autoregressive = self.autoregressive.cpu()
diffusion_conds = []
for sample in voice_samples:
# The diffuser operates at a sample rate of 24000 (except for the latent inputs)
sample = torchaudio.functional.resample(sample, 22050, 24000)
sample = pad_or_truncate(sample, 102400)
cond_mel = wav_to_univnet_mel(sample.to(self.device), do_normalization=False, device=self.device)
diffusion_conds.append(cond_mel)
diffusion_conds = torch.stack(diffusion_conds, dim=1)
self.diffusion = self.diffusion.to(self.device)
diffusion_latent = self.diffusion.get_conditioning(diffusion_conds)
self.diffusion = self.diffusion.cpu()
if return_mels:
return auto_latent, diffusion_latent, auto_conds, diffusion_conds
else:
return auto_latent, diffusion_latent
def get_random_conditioning_latents(self):
# Lazy-load the RLG models.
if self.rlg_auto is None:
self.rlg_auto = RandomLatentConverter(1024).eval()
self.rlg_auto.load_state_dict(
torch.load(get_model_path('rlg_auto.pth', self.models_dir), map_location=torch.device('cpu')))
self.rlg_diffusion = RandomLatentConverter(2048).eval()
self.rlg_diffusion.load_state_dict(
torch.load(get_model_path('rlg_diffuser.pth', self.models_dir), map_location=torch.device('cpu')))
with torch.no_grad():
return self.rlg_auto(torch.tensor([0.0])), self.rlg_diffusion(torch.tensor([0.0]))
def tts_with_preset(self, text, preset='fast', **kwargs):
"""
Calls TTS with one of a set of preset generation parameters. Options:
'ultra_fast': Produces speech at a speed which belies the name of this repo. (Not really, but it's definitely fastest).
'fast': Decent quality speech at a decent inference rate. A good choice for mass inference.
'standard': Very good quality. This is generally about as good as you are going to get.
'high_quality': Use if you want the absolute best. This is not really worth the compute, though.
"""
# Use generally found best tuning knobs for generation.
settings = {'temperature': .8, 'length_penalty': 1.0, 'repetition_penalty': 2.0,
'top_p': .8,
'cond_free_k': 2.0, 'diffusion_temperature': 1.0}
# Presets are defined here.
presets = {
'ultra_fast': {'num_autoregressive_samples': 16, 'diffusion_iterations': 30, 'cond_free': False},
'fast': {'num_autoregressive_samples': 96, 'diffusion_iterations': 80},
'standard': {'num_autoregressive_samples': 256, 'diffusion_iterations': 200},
'high_quality': {'num_autoregressive_samples': 256, 'diffusion_iterations': 400},
}
settings.update(presets[preset])
settings.update(kwargs) # allow overriding of preset settings with kwargs
return self.tts(text, **settings)
def tts(self, text, voice_samples=None, conditioning_latents=None, k=1, verbose=True, use_deterministic_seed=None,
return_deterministic_state=False,
# autoregressive generation parameters follow
num_autoregressive_samples=512, temperature=.8, length_penalty=1, repetition_penalty=2.0, top_p=.8,
max_mel_tokens=500,
# diffusion generation parameters follow
diffusion_iterations=100, cond_free=True, cond_free_k=2, diffusion_temperature=1.0,
**hf_generate_kwargs):
self.text = text
deterministic_seed = self.deterministic_state(seed=use_deterministic_seed)
text_tokens = torch.IntTensor(self.tokenizer.encode(text)).unsqueeze(0).to(self.device)
text_tokens = F.pad(text_tokens, (0, 1)) # This may not be necessary.
assert text_tokens.shape[
-1] < 400, 'Too much text provided. Break the text up into separate segments and re-try inference.'
auto_conds = None
if voice_samples is not None:
auto_conditioning, diffusion_conditioning, auto_conds, _ = self.get_conditioning_latents(voice_samples,
return_mels=True)
elif conditioning_latents is not None:
auto_conditioning, diffusion_conditioning = conditioning_latents
else:
auto_conditioning, diffusion_conditioning = self.get_random_conditioning_latents()
auto_conditioning = auto_conditioning.to(self.device)
diffusion_conditioning = diffusion_conditioning.to(self.device)
diffuser = load_discrete_vocoder_diffuser(desired_diffusion_steps=diffusion_iterations, cond_free=cond_free,
cond_free_k=cond_free_k)
with torch.no_grad():
samples = []
num_batches = num_autoregressive_samples // self.autoregressive_batch_size
stop_mel_token = self.autoregressive.stop_mel_token
calm_token = 83 # This is the token for coding silence, which is fixed in place with "fix_autoregressive_output"
self.autoregressive = self.autoregressive.to(self.device)
if verbose:
print("Generating autoregressive samples..")
for b in tqdm(range(num_batches), disable=not verbose):
codes = self.autoregressive.inference_speech(auto_conditioning, text_tokens,
do_sample=True,
top_p=top_p,
temperature=temperature,
num_return_sequences=self.autoregressive_batch_size,
length_penalty=length_penalty,
repetition_penalty=repetition_penalty,
max_generate_length=max_mel_tokens,
**hf_generate_kwargs)
padding_needed = max_mel_tokens - codes.shape[1]
codes = F.pad(codes, (0, padding_needed), value=stop_mel_token)
samples.append(codes)
self.autoregressive = self.autoregressive.cpu()
clip_results = []
if verbose:
print("Computing best candidates using CLVP")
for batch in tqdm(samples, disable=not verbose):
for i in range(batch.shape[0]):
batch[i] = fix_autoregressive_output(batch[i], stop_mel_token)
clvp = self.clvp(text_tokens.repeat(batch.shape[0], 1), batch, return_loss=False)
clip_results.append(clvp)
clip_results = torch.cat(clip_results, dim=0)
samples = torch.cat(samples, dim=0)
best_results = samples[torch.topk(clip_results, k=k).indices]
self.clvp = self.clvp.cpu()
del samples
# The diffusion model actually wants the last hidden layer from the autoregressive model as conditioning
# inputs. Re-produce those for the top results. This could be made more efficient by storing all of these
# results, but will increase memory usage.
self.autoregressive = self.autoregressive.to(self.device)
best_latents = self.autoregressive(auto_conditioning.repeat(k, 1), text_tokens.repeat(k, 1),
torch.tensor([text_tokens.shape[-1]], device=text_tokens.device),
best_results,
torch.tensor([best_results.shape[
-1] * self.autoregressive.mel_length_compression],
device=text_tokens.device),
return_latent=True, clip_inputs=False)
self.autoregressive = self.autoregressive.cpu()
del auto_conditioning
if verbose:
print("Transforming autoregressive outputs into audio..")
wav_candidates = []
self.diffusion = self.diffusion.to(self.device)
self.vocoder = self.vocoder.to(self.device)
for b in range(best_results.shape[0]):
codes = best_results[b].unsqueeze(0)
latents = best_latents[b].unsqueeze(0)
# Find the first occurrence of the "calm" token and trim the codes to that.
ctokens = 0
for k in range(codes.shape[-1]):
if codes[0, k] == calm_token:
ctokens += 1
else:
ctokens = 0
if ctokens > 8: # 8 tokens gives the diffusion model some "breathing room" to terminate speech.
latents = latents[:, :k]
break
mel = do_spectrogram_diffusion(self.diffusion, diffuser, latents, diffusion_conditioning,
temperature=diffusion_temperature, verbose=verbose)
wav = self.vocoder.inference(mel)
wav_candidates.append(wav.cpu())
self.diffusion = self.diffusion.cpu()
self.vocoder = self.vocoder.cpu()
def potentially_redact(clip, text):
if self.enable_redaction:
return self.aligner.redact(clip.squeeze(1), text).unsqueeze(1)
return clip
wav_candidates = [potentially_redact(wav_candidate, text) for wav_candidate in wav_candidates]
if len(wav_candidates) > 1:
res = wav_candidates
else:
res = wav_candidates[0]
if return_deterministic_state:
return res, (deterministic_seed, text, voice_samples, conditioning_latents)
else:
return res
def convert_to_audio(self, text, voice):
# get the audio file from post request
response = requests.post("https://api.puretalk.ai/tts", json={"text": text, "voice_id": voice})
# save audio file
with open("audio.wav", "wb") as f:
for chunk in response.iter_content(chunk_size=128):
f.write(chunk)
def deterministic_state(self, seed=None):
"""
Sets the random seeds that tortoise uses to the current time() and returns that seed so results can be
reproduced.
"""
seed = int(time()) if seed is None else seed
torch.manual_seed(seed)
random.seed(seed)
# Can't currently set this because of CUBLAS. TODO: potentially enable it if necessary.
# torch.use_deterministic_algorithms(True)
return seed | /ruth_text_to_speech-0.0.39-py3-none-any.whl/ruth_tts_transformer/parser.py | 0.522202 | 0.203985 | parser.py | pypi |
import torch
import torch.nn as nn
from ruth_tts_transformer.models.arch_util import Upsample, Downsample, normalization, zero_module, AttentionBlock
class ResBlock(nn.Module):
def __init__(
self,
channels,
dropout,
out_channels=None,
use_conv=False,
use_scale_shift_norm=False,
dims=2,
up=False,
down=False,
kernel_size=3,
do_checkpoint=True,
):
super().__init__()
self.channels = channels
self.dropout = dropout
self.out_channels = out_channels or channels
self.use_conv = use_conv
self.use_scale_shift_norm = use_scale_shift_norm
self.do_checkpoint = do_checkpoint
padding = 1 if kernel_size == 3 else 2
self.in_layers = nn.Sequential(
normalization(channels),
nn.SiLU(),
nn.Conv1d(channels, self.out_channels, kernel_size, padding=padding),
)
self.updown = up or down
if up:
self.h_upd = Upsample(channels, False, dims)
self.x_upd = Upsample(channels, False, dims)
elif down:
self.h_upd = Downsample(channels, False, dims)
self.x_upd = Downsample(channels, False, dims)
else:
self.h_upd = self.x_upd = nn.Identity()
self.out_layers = nn.Sequential(
normalization(self.out_channels),
nn.SiLU(),
nn.Dropout(p=dropout),
zero_module(
nn.Conv1d(self.out_channels, self.out_channels, kernel_size, padding=padding)
),
)
if self.out_channels == channels:
self.skip_connection = nn.Identity()
elif use_conv:
self.skip_connection = nn.Conv1d(
dims, channels, self.out_channels, kernel_size, padding=padding
)
else:
self.skip_connection = nn.Conv1d(dims, channels, self.out_channels, 1)
def forward(self, x):
if self.updown:
in_rest, in_conv = self.in_layers[:-1], self.in_layers[-1]
h = in_rest(x)
h = self.h_upd(h)
x = self.x_upd(x)
h = in_conv(h)
else:
h = self.in_layers(x)
h = self.out_layers(h)
return self.skip_connection(x) + h
class AudioMiniEncoder(nn.Module):
def __init__(self,
spec_dim,
embedding_dim,
base_channels=128,
depth=2,
resnet_blocks=2,
attn_blocks=4,
num_attn_heads=4,
dropout=0,
downsample_factor=2,
kernel_size=3):
super().__init__()
self.init = nn.Sequential(
nn.Conv1d(spec_dim, base_channels, 3, padding=1)
)
ch = base_channels
res = []
self.layers = depth
for l in range(depth):
for r in range(resnet_blocks):
res.append(ResBlock(ch, dropout, do_checkpoint=False, kernel_size=kernel_size))
res.append(Downsample(ch, use_conv=True, out_channels=ch*2, factor=downsample_factor))
ch *= 2
self.res = nn.Sequential(*res)
self.final = nn.Sequential(
normalization(ch),
nn.SiLU(),
nn.Conv1d(ch, embedding_dim, 1)
)
attn = []
for a in range(attn_blocks):
attn.append(AttentionBlock(embedding_dim, num_attn_heads, do_checkpoint=False))
self.attn = nn.Sequential(*attn)
self.dim = embedding_dim
def forward(self, x):
h = self.init(x)
h = self.res(h)
h = self.final(h)
for blk in self.attn:
h = blk(h)
return h[:, :, 0]
class AudioMiniEncoderWithClassifierHead(nn.Module):
def __init__(self, classes, distribute_zero_label=True, **kwargs):
super().__init__()
self.enc = AudioMiniEncoder(**kwargs)
self.head = nn.Linear(self.enc.dim, classes)
self.num_classes = classes
self.distribute_zero_label = distribute_zero_label
def forward(self, x, labels=None):
h = self.enc(x)
logits = self.head(h)
if labels is None:
return logits
else:
if self.distribute_zero_label:
oh_labels = nn.functional.one_hot(labels, num_classes=self.num_classes)
zeros_indices = (labels == 0).unsqueeze(-1)
# Distribute 20% of the probability mass on all classes when zero is specified, to compensate for dataset noise.
zero_extra_mass = torch.full_like(oh_labels, dtype=torch.float, fill_value=.2/(self.num_classes-1))
zero_extra_mass[:, 0] = -.2
zero_extra_mass = zero_extra_mass * zeros_indices
oh_labels = oh_labels + zero_extra_mass
else:
oh_labels = labels
loss = nn.functional.cross_entropy(logits, oh_labels)
return loss | /ruth_text_to_speech-0.0.39-py3-none-any.whl/ruth_tts_transformer/models/classifier.py | 0.946014 | 0.277954 | classifier.py | pypi |
import math
import random
from abc import abstractmethod
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import autocast
from ruth_tts_transformer.models.arch_util import normalization, AttentionBlock
def is_latent(t):
return t.dtype == torch.float
def is_sequence(t):
return t.dtype == torch.long
def timestep_embedding(timesteps, dim, max_period=10000):
"""
Create sinusoidal timestep embeddings.
:param timesteps: a 1-D Tensor of N indices, one per batch element.
These may be fractional.
:param dim: the dimension of the output.
:param max_period: controls the minimum frequency of the embeddings.
:return: an [N x dim] Tensor of positional embeddings.
"""
half = dim // 2
freqs = torch.exp(
-math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half
).to(device=timesteps.device)
args = timesteps[:, None].float() * freqs[None]
embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)
if dim % 2:
embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1)
return embedding
class TimestepBlock(nn.Module):
@abstractmethod
def forward(self, x, emb):
"""
Apply the module to `x` given `emb` timestep embeddings.
"""
class TimestepEmbedSequential(nn.Sequential, TimestepBlock):
def forward(self, x, emb):
for layer in self:
if isinstance(layer, TimestepBlock):
x = layer(x, emb)
else:
x = layer(x)
return x
class ResBlock(TimestepBlock):
def __init__(
self,
channels,
emb_channels,
dropout,
out_channels=None,
dims=2,
kernel_size=3,
efficient_config=True,
use_scale_shift_norm=False,
):
super().__init__()
self.channels = channels
self.emb_channels = emb_channels
self.dropout = dropout
self.out_channels = out_channels or channels
self.use_scale_shift_norm = use_scale_shift_norm
padding = {1: 0, 3: 1, 5: 2}[kernel_size]
eff_kernel = 1 if efficient_config else 3
eff_padding = 0 if efficient_config else 1
self.in_layers = nn.Sequential(
normalization(channels),
nn.SiLU(),
nn.Conv1d(channels, self.out_channels, eff_kernel, padding=eff_padding),
)
self.emb_layers = nn.Sequential(
nn.SiLU(),
nn.Linear(
emb_channels,
2 * self.out_channels if use_scale_shift_norm else self.out_channels,
),
)
self.out_layers = nn.Sequential(
normalization(self.out_channels),
nn.SiLU(),
nn.Dropout(p=dropout),
nn.Conv1d(self.out_channels, self.out_channels, kernel_size, padding=padding),
)
if self.out_channels == channels:
self.skip_connection = nn.Identity()
else:
self.skip_connection = nn.Conv1d(channels, self.out_channels, eff_kernel, padding=eff_padding)
def forward(self, x, emb):
h = self.in_layers(x)
emb_out = self.emb_layers(emb).type(h.dtype)
while len(emb_out.shape) < len(h.shape):
emb_out = emb_out[..., None]
if self.use_scale_shift_norm:
out_norm, out_rest = self.out_layers[0], self.out_layers[1:]
scale, shift = torch.chunk(emb_out, 2, dim=1)
h = out_norm(h) * (1 + scale) + shift
h = out_rest(h)
else:
h = h + emb_out
h = self.out_layers(h)
return self.skip_connection(x) + h
class DiffusionLayer(TimestepBlock):
def __init__(self, model_channels, dropout, num_heads):
super().__init__()
self.resblk = ResBlock(model_channels, model_channels, dropout, model_channels, dims=1,
use_scale_shift_norm=True)
self.attn = AttentionBlock(model_channels, num_heads, relative_pos_embeddings=True)
def forward(self, x, time_emb):
y = self.resblk(x, time_emb)
return self.attn(y)
class DiffusionTts(nn.Module):
def __init__(
self,
model_channels=512,
num_layers=8,
in_channels=100,
in_latent_channels=512,
in_tokens=8193,
out_channels=200, # mean and variance
dropout=0,
use_fp16=False,
num_heads=16,
# Parameters for regularization.
layer_drop=.1,
unconditioned_percentage=.1,
# This implements a mechanism similar to what is used in classifier-free training.
):
super().__init__()
self.in_channels = in_channels
self.model_channels = model_channels
self.out_channels = out_channels
self.dropout = dropout
self.num_heads = num_heads
self.unconditioned_percentage = unconditioned_percentage
self.enable_fp16 = use_fp16
self.layer_drop = layer_drop
self.inp_block = nn.Conv1d(in_channels, model_channels, 3, 1, 1)
self.time_embed = nn.Sequential(
nn.Linear(model_channels, model_channels),
nn.SiLU(),
nn.Linear(model_channels, model_channels),
)
# Either code_converter or latent_converter is used, depending on what type of conditioning data is fed.
# This model is meant to be able to be trained on both for efficiency purposes - it is far less computationally
# complex to generate tokens, while generating latents will normally mean propagating through a deep autoregressive
# transformer network.
self.code_embedding = nn.Embedding(in_tokens, model_channels)
self.code_converter = nn.Sequential(
AttentionBlock(model_channels, num_heads, relative_pos_embeddings=True),
AttentionBlock(model_channels, num_heads, relative_pos_embeddings=True),
AttentionBlock(model_channels, num_heads, relative_pos_embeddings=True),
)
self.code_norm = normalization(model_channels)
self.latent_conditioner = nn.Sequential(
nn.Conv1d(in_latent_channels, model_channels, 3, padding=1),
AttentionBlock(model_channels, num_heads, relative_pos_embeddings=True),
AttentionBlock(model_channels, num_heads, relative_pos_embeddings=True),
AttentionBlock(model_channels, num_heads, relative_pos_embeddings=True),
AttentionBlock(model_channels, num_heads, relative_pos_embeddings=True),
)
self.contextual_embedder = nn.Sequential(nn.Conv1d(in_channels, model_channels, 3, padding=1, stride=2),
nn.Conv1d(model_channels, model_channels * 2, 3, padding=1, stride=2),
AttentionBlock(model_channels * 2, num_heads,
relative_pos_embeddings=True, do_checkpoint=False),
AttentionBlock(model_channels * 2, num_heads,
relative_pos_embeddings=True, do_checkpoint=False),
AttentionBlock(model_channels * 2, num_heads,
relative_pos_embeddings=True, do_checkpoint=False),
AttentionBlock(model_channels * 2, num_heads,
relative_pos_embeddings=True, do_checkpoint=False),
AttentionBlock(model_channels * 2, num_heads,
relative_pos_embeddings=True, do_checkpoint=False))
self.unconditioned_embedding = nn.Parameter(torch.randn(1, model_channels, 1))
self.conditioning_timestep_integrator = TimestepEmbedSequential(
DiffusionLayer(model_channels, dropout, num_heads),
DiffusionLayer(model_channels, dropout, num_heads),
DiffusionLayer(model_channels, dropout, num_heads),
)
self.integrating_conv = nn.Conv1d(model_channels * 2, model_channels, kernel_size=1)
self.mel_head = nn.Conv1d(model_channels, in_channels, kernel_size=3, padding=1)
self.layers = nn.ModuleList([DiffusionLayer(model_channels, dropout, num_heads) for _ in range(num_layers)] +
[ResBlock(model_channels, model_channels, dropout, dims=1,
use_scale_shift_norm=True) for _ in range(3)])
self.out = nn.Sequential(
normalization(model_channels),
nn.SiLU(),
nn.Conv1d(model_channels, out_channels, 3, padding=1),
)
def get_grad_norm_parameter_groups(self):
groups = {
'minicoder': list(self.contextual_embedder.parameters()),
'layers': list(self.layers.parameters()),
'code_converters': list(self.code_embedding.parameters()) + list(self.code_converter.parameters()) + list(
self.latent_conditioner.parameters()) + list(self.latent_conditioner.parameters()),
'timestep_integrator': list(self.conditioning_timestep_integrator.parameters()) + list(
self.integrating_conv.parameters()),
'time_embed': list(self.time_embed.parameters()),
}
return groups
def get_conditioning(self, conditioning_input):
speech_conditioning_input = conditioning_input.unsqueeze(1) if len(
conditioning_input.shape) == 3 else conditioning_input
conds = []
for j in range(speech_conditioning_input.shape[1]):
conds.append(self.contextual_embedder(speech_conditioning_input[:, j]))
conds = torch.cat(conds, dim=-1)
conds = conds.mean(dim=-1)
return conds
def timestep_independent(self, aligned_conditioning, conditioning_latent, expected_seq_len, return_code_pred):
# Shuffle aligned_latent to BxCxS format
if is_latent(aligned_conditioning):
aligned_conditioning = aligned_conditioning.permute(0, 2, 1)
cond_scale, cond_shift = torch.chunk(conditioning_latent, 2, dim=1)
if is_latent(aligned_conditioning):
code_emb = self.latent_conditioner(aligned_conditioning)
else:
code_emb = self.code_embedding(aligned_conditioning).permute(0, 2, 1)
code_emb = self.code_converter(code_emb)
code_emb = self.code_norm(code_emb) * (1 + cond_scale.unsqueeze(-1)) + cond_shift.unsqueeze(-1)
unconditioned_batches = torch.zeros((code_emb.shape[0], 1, 1), device=code_emb.device)
# Mask out the conditioning branch for whole batch elements, implementing something similar to classifier-free guidance.
if self.training and self.unconditioned_percentage > 0:
unconditioned_batches = torch.rand((code_emb.shape[0], 1, 1),
device=code_emb.device) < self.unconditioned_percentage
code_emb = torch.where(unconditioned_batches,
self.unconditioned_embedding.repeat(aligned_conditioning.shape[0], 1, 1),
code_emb)
expanded_code_emb = F.interpolate(code_emb, size=expected_seq_len, mode='nearest')
if not return_code_pred:
return expanded_code_emb
else:
mel_pred = self.mel_head(expanded_code_emb)
# Multiply mel_pred by !unconditioned_branches, which drops the gradient on unconditioned branches. This is because we don't want that gradient being used to train parameters through the codes_embedder as it unbalances contributions to that network from the MSE loss.
mel_pred = mel_pred * unconditioned_batches.logical_not()
return expanded_code_emb, mel_pred
def forward(self, x, timesteps, aligned_conditioning=None, conditioning_latent=None,
precomputed_aligned_embeddings=None, conditioning_free=False, return_code_pred=False):
"""
Apply the model to an input batch.
:param x: an [N x C x ...] Tensor of inputs.
:param timesteps: a 1-D batch of timesteps.
:param aligned_conditioning: an aligned latent or sequence of tokens providing useful data about the sample to be produced.
:param conditioning_latent: a pre-computed conditioning latent; see get_conditioning().
:param precomputed_aligned_embeddings: Embeddings returned from self.timestep_independent()
:param conditioning_free: When set, all conditioning inputs (including tokens and conditioning_input) will not be considered.
:return: an [N x C x ...] Tensor of outputs.
"""
assert precomputed_aligned_embeddings is not None or (
aligned_conditioning is not None and conditioning_latent is not None)
assert not (
return_code_pred and precomputed_aligned_embeddings is not None) # These two are mutually exclusive.
unused_params = []
if conditioning_free:
code_emb = self.unconditioned_embedding.repeat(x.shape[0], 1, x.shape[-1])
unused_params.extend(list(self.code_converter.parameters()) + list(self.code_embedding.parameters()))
unused_params.extend(list(self.latent_conditioner.parameters()))
else:
if precomputed_aligned_embeddings is not None:
code_emb = precomputed_aligned_embeddings
else:
code_emb, mel_pred = self.timestep_independent(aligned_conditioning, conditioning_latent, x.shape[-1],
True)
if is_latent(aligned_conditioning):
unused_params.extend(
list(self.code_converter.parameters()) + list(self.code_embedding.parameters()))
else:
unused_params.extend(list(self.latent_conditioner.parameters()))
unused_params.append(self.unconditioned_embedding)
time_emb = self.time_embed(timestep_embedding(timesteps, self.model_channels))
code_emb = self.conditioning_timestep_integrator(code_emb, time_emb)
x = self.inp_block(x)
x = torch.cat([x, code_emb], dim=1)
x = self.integrating_conv(x)
for i, lyr in enumerate(self.layers):
# Do layer drop where applicable. Do not drop first and last layers.
if self.training and self.layer_drop > 0 and i != 0 and i != (
len(self.layers) - 1) and random.random() < self.layer_drop:
unused_params.extend(list(lyr.parameters()))
else:
# First and last blocks will have autocast disabled for improved precision.
with autocast(x.device.type, enabled=self.enable_fp16 and i != 0):
x = lyr(x, time_emb)
x = x.float()
out = self.out(x)
# Involve probabilistic or possibly unused parameters in loss so we don't get DDP errors.
extraneous_addition = 0
for p in unused_params:
extraneous_addition = extraneous_addition + p.mean()
out = out + extraneous_addition * 0
if return_code_pred:
return out, mel_pred
return out
if __name__ == '__main__':
clip = torch.randn(2, 100, 400)
aligned_latent = torch.randn(2, 388, 512)
aligned_sequence = torch.randint(0, 8192, (2, 100))
cond = torch.randn(2, 100, 400)
ts = torch.LongTensor([600, 600])
model = DiffusionTts(512, layer_drop=.3, unconditioned_percentage=.5)
# Test with latent aligned conditioning
# o = model(clip, ts, aligned_latent, cond)
# Test with sequence aligned conditioning
o = model(clip, ts, aligned_sequence, cond) | /ruth_text_to_speech-0.0.39-py3-none-any.whl/ruth_tts_transformer/models/diffusion_decoder.py | 0.945883 | 0.615001 | diffusion_decoder.py | pypi |
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import einsum
from ruth_tts_transformer.models.arch_util import AttentionBlock
from ruth_tts_transformer.models.xtransformers import ContinuousTransformerWrapper, Encoder
def exists(val):
return val is not None
def masked_mean(t, mask):
t = t.masked_fill(~mask, 0.)
return t.sum(dim=1) / mask.sum(dim=1)
class CollapsingTransformer(nn.Module):
def __init__(self, model_dim, output_dims, heads, dropout, depth, mask_percentage=0, **encoder_kwargs):
super().__init__()
self.transformer = ContinuousTransformerWrapper(
max_seq_len=-1,
use_pos_emb=False,
attn_layers=Encoder(
dim=model_dim,
depth=depth,
heads=heads,
ff_dropout=dropout,
ff_mult=1,
attn_dropout=dropout,
use_rmsnorm=True,
ff_glu=True,
rotary_pos_emb=True,
**encoder_kwargs,
))
self.pre_combiner = nn.Sequential(nn.Conv1d(model_dim, output_dims, 1),
AttentionBlock(
output_dims, num_heads=heads, do_checkpoint=False),
nn.Conv1d(output_dims, output_dims, 1))
self.mask_percentage = mask_percentage
def forward(self, x, **transformer_kwargs):
h = self.transformer(x, **transformer_kwargs)
h = h.permute(0, 2, 1)
h = self.pre_combiner(h).permute(0, 2, 1)
if self.training:
mask = torch.rand_like(h.float()) > self.mask_percentage
else:
mask = torch.ones_like(h.float()).bool()
return masked_mean(h, mask)
class ConvFormatEmbedding(nn.Module):
def __init__(self, *args, **kwargs):
super().__init__()
self.emb = nn.Embedding(*args, **kwargs)
def forward(self, x):
y = self.emb(x)
return y.permute(0, 2, 1)
class CVVP(nn.Module):
def __init__(
self,
model_dim=512,
transformer_heads=8,
dropout=.1,
conditioning_enc_depth=8,
cond_mask_percentage=0,
mel_channels=80,
mel_codes=None,
speech_enc_depth=8,
speech_mask_percentage=0,
latent_multiplier=1,
):
super().__init__()
latent_dim = latent_multiplier*model_dim
self.temperature = nn.Parameter(torch.tensor(1.))
self.cond_emb = nn.Sequential(nn.Conv1d(mel_channels, model_dim//2, kernel_size=5, stride=2, padding=2),
nn.Conv1d(model_dim//2, model_dim, kernel_size=3, stride=2, padding=1))
self.conditioning_transformer = CollapsingTransformer(
model_dim, model_dim, transformer_heads, dropout, conditioning_enc_depth, cond_mask_percentage)
self.to_conditioning_latent = nn.Linear(
latent_dim, latent_dim, bias=False)
if mel_codes is None:
self.speech_emb = nn.Conv1d(
mel_channels, model_dim, kernel_size=5, padding=2)
else:
self.speech_emb = ConvFormatEmbedding(mel_codes, model_dim)
self.speech_transformer = CollapsingTransformer(
model_dim, latent_dim, transformer_heads, dropout, speech_enc_depth, speech_mask_percentage)
self.to_speech_latent = nn.Linear(
latent_dim, latent_dim, bias=False)
def get_grad_norm_parameter_groups(self):
return {
'conditioning': list(self.conditioning_transformer.parameters()),
'speech': list(self.speech_transformer.parameters()),
}
def forward(
self,
mel_cond,
mel_input,
return_loss=False
):
cond_emb = self.cond_emb(mel_cond).permute(0, 2, 1)
enc_cond = self.conditioning_transformer(cond_emb)
cond_latents = self.to_conditioning_latent(enc_cond)
speech_emb = self.speech_emb(mel_input).permute(0, 2, 1)
enc_speech = self.speech_transformer(speech_emb)
speech_latents = self.to_speech_latent(enc_speech)
cond_latents, speech_latents = map(lambda t: F.normalize(
t, p=2, dim=-1), (cond_latents, speech_latents))
temp = self.temperature.exp()
if not return_loss:
sim = einsum('n d, n d -> n', cond_latents,
speech_latents) * temp
return sim
sim = einsum('i d, j d -> i j', cond_latents,
speech_latents) * temp
labels = torch.arange(
cond_latents.shape[0], device=mel_input.device)
loss = (F.cross_entropy(sim, labels) +
F.cross_entropy(sim.t(), labels)) / 2
return loss
if __name__ == '__main__':
clvp = CVVP()
clvp(torch.randn(2, 80, 100),
torch.randn(2, 80, 95),
return_loss=True) | /ruth_text_to_speech-0.0.39-py3-none-any.whl/ruth_tts_transformer/models/cvvp.py | 0.947015 | 0.367185 | cvvp.py | pypi |
import torch
import torch.nn as nn
import torch.nn.functional as F
MAX_WAV_VALUE = 32768.0
class KernelPredictor(torch.nn.Module):
''' Kernel predictor for the location-variable convolutions'''
def __init__(
self,
cond_channels,
conv_in_channels,
conv_out_channels,
conv_layers,
conv_kernel_size=3,
kpnet_hidden_channels=64,
kpnet_conv_size=3,
kpnet_dropout=0.0,
kpnet_nonlinear_activation="LeakyReLU",
kpnet_nonlinear_activation_params={"negative_slope": 0.1},
):
'''
Args:
cond_channels (int): number of channel for the conditioning sequence,
conv_in_channels (int): number of channel for the input sequence,
conv_out_channels (int): number of channel for the output sequence,
conv_layers (int): number of layers
'''
super().__init__()
self.conv_in_channels = conv_in_channels
self.conv_out_channels = conv_out_channels
self.conv_kernel_size = conv_kernel_size
self.conv_layers = conv_layers
kpnet_kernel_channels = conv_in_channels * conv_out_channels * conv_kernel_size * conv_layers # l_w
kpnet_bias_channels = conv_out_channels * conv_layers # l_b
self.input_conv = nn.Sequential(
nn.utils.weight_norm(nn.Conv1d(cond_channels, kpnet_hidden_channels, 5, padding=2, bias=True)),
getattr(nn, kpnet_nonlinear_activation)(**kpnet_nonlinear_activation_params),
)
self.residual_convs = nn.ModuleList()
padding = (kpnet_conv_size - 1) // 2
for _ in range(3):
self.residual_convs.append(
nn.Sequential(
nn.Dropout(kpnet_dropout),
nn.utils.weight_norm(
nn.Conv1d(kpnet_hidden_channels, kpnet_hidden_channels, kpnet_conv_size, padding=padding,
bias=True)),
getattr(nn, kpnet_nonlinear_activation)(**kpnet_nonlinear_activation_params),
nn.utils.weight_norm(
nn.Conv1d(kpnet_hidden_channels, kpnet_hidden_channels, kpnet_conv_size, padding=padding,
bias=True)),
getattr(nn, kpnet_nonlinear_activation)(**kpnet_nonlinear_activation_params),
)
)
self.kernel_conv = nn.utils.weight_norm(
nn.Conv1d(kpnet_hidden_channels, kpnet_kernel_channels, kpnet_conv_size, padding=padding, bias=True))
self.bias_conv = nn.utils.weight_norm(
nn.Conv1d(kpnet_hidden_channels, kpnet_bias_channels, kpnet_conv_size, padding=padding, bias=True))
def forward(self, c):
'''
Args:
c (Tensor): the conditioning sequence (batch, cond_channels, cond_length)
'''
batch, _, cond_length = c.shape
c = self.input_conv(c)
for residual_conv in self.residual_convs:
residual_conv.to(c.device)
c = c + residual_conv(c)
k = self.kernel_conv(c)
b = self.bias_conv(c)
kernels = k.contiguous().view(
batch,
self.conv_layers,
self.conv_in_channels,
self.conv_out_channels,
self.conv_kernel_size,
cond_length,
)
bias = b.contiguous().view(
batch,
self.conv_layers,
self.conv_out_channels,
cond_length,
)
return kernels, bias
def remove_weight_norm(self):
nn.utils.remove_weight_norm(self.input_conv[0])
nn.utils.remove_weight_norm(self.kernel_conv)
nn.utils.remove_weight_norm(self.bias_conv)
for block in self.residual_convs:
nn.utils.remove_weight_norm(block[1])
nn.utils.remove_weight_norm(block[3])
class LVCBlock(torch.nn.Module):
'''the location-variable convolutions'''
def __init__(
self,
in_channels,
cond_channels,
stride,
dilations=[1, 3, 9, 27],
lReLU_slope=0.2,
conv_kernel_size=3,
cond_hop_length=256,
kpnet_hidden_channels=64,
kpnet_conv_size=3,
kpnet_dropout=0.0,
):
super().__init__()
self.cond_hop_length = cond_hop_length
self.conv_layers = len(dilations)
self.conv_kernel_size = conv_kernel_size
self.kernel_predictor = KernelPredictor(
cond_channels=cond_channels,
conv_in_channels=in_channels,
conv_out_channels=2 * in_channels,
conv_layers=len(dilations),
conv_kernel_size=conv_kernel_size,
kpnet_hidden_channels=kpnet_hidden_channels,
kpnet_conv_size=kpnet_conv_size,
kpnet_dropout=kpnet_dropout,
kpnet_nonlinear_activation_params={"negative_slope": lReLU_slope}
)
self.convt_pre = nn.Sequential(
nn.LeakyReLU(lReLU_slope),
nn.utils.weight_norm(nn.ConvTranspose1d(in_channels, in_channels, 2 * stride, stride=stride,
padding=stride // 2 + stride % 2, output_padding=stride % 2)),
)
self.conv_blocks = nn.ModuleList()
for dilation in dilations:
self.conv_blocks.append(
nn.Sequential(
nn.LeakyReLU(lReLU_slope),
nn.utils.weight_norm(nn.Conv1d(in_channels, in_channels, conv_kernel_size,
padding=dilation * (conv_kernel_size - 1) // 2, dilation=dilation)),
nn.LeakyReLU(lReLU_slope),
)
)
def forward(self, x, c):
''' forward propagation of the location-variable convolutions.
Args:
x (Tensor): the input sequence (batch, in_channels, in_length)
c (Tensor): the conditioning sequence (batch, cond_channels, cond_length)
Returns:
Tensor: the output sequence (batch, in_channels, in_length)
'''
_, in_channels, _ = x.shape # (B, c_g, L')
x = self.convt_pre(x) # (B, c_g, stride * L')
kernels, bias = self.kernel_predictor(c)
for i, conv in enumerate(self.conv_blocks):
output = conv(x) # (B, c_g, stride * L')
k = kernels[:, i, :, :, :, :] # (B, 2 * c_g, c_g, kernel_size, cond_length)
b = bias[:, i, :, :] # (B, 2 * c_g, cond_length)
output = self.location_variable_convolution(output, k, b,
hop_size=self.cond_hop_length) # (B, 2 * c_g, stride * L'): LVC
x = x + torch.sigmoid(output[:, :in_channels, :]) * torch.tanh(
output[:, in_channels:, :]) # (B, c_g, stride * L'): GAU
return x
def location_variable_convolution(self, x, kernel, bias, dilation=1, hop_size=256):
''' perform location-variable convolution operation on the input sequence (x) using the local convolution kernl.
Time: 414 μs ± 309 ns per loop (mean ± std. dev. of 7 runs, 1000 loops each), test on NVIDIA V100.
Args:
x (Tensor): the input sequence (batch, in_channels, in_length).
kernel (Tensor): the local convolution kernel (batch, in_channel, out_channels, kernel_size, kernel_length)
bias (Tensor): the bias for the local convolution (batch, out_channels, kernel_length)
dilation (int): the dilation of convolution.
hop_size (int): the hop_size of the conditioning sequence.
Returns:
(Tensor): the output sequence after performing local convolution. (batch, out_channels, in_length).
'''
batch, _, in_length = x.shape
batch, _, out_channels, kernel_size, kernel_length = kernel.shape
assert in_length == (kernel_length * hop_size), "length of (x, kernel) is not matched"
padding = dilation * int((kernel_size - 1) / 2)
x = F.pad(x, (padding, padding), 'constant', 0) # (batch, in_channels, in_length + 2*padding)
x = x.unfold(2, hop_size + 2 * padding, hop_size) # (batch, in_channels, kernel_length, hop_size + 2*padding)
if hop_size < dilation:
x = F.pad(x, (0, dilation), 'constant', 0)
x = x.unfold(3, dilation,
dilation) # (batch, in_channels, kernel_length, (hop_size + 2*padding)/dilation, dilation)
x = x[:, :, :, :, :hop_size]
x = x.transpose(3, 4) # (batch, in_channels, kernel_length, dilation, (hop_size + 2*padding)/dilation)
x = x.unfold(4, kernel_size, 1) # (batch, in_channels, kernel_length, dilation, _, kernel_size)
o = torch.einsum('bildsk,biokl->bolsd', x, kernel)
o = o.to(memory_format=torch.channels_last_3d)
bias = bias.unsqueeze(-1).unsqueeze(-1).to(memory_format=torch.channels_last_3d)
o = o + bias
o = o.contiguous().view(batch, out_channels, -1)
return o
def remove_weight_norm(self):
self.kernel_predictor.remove_weight_norm()
nn.utils.remove_weight_norm(self.convt_pre[1])
for block in self.conv_blocks:
nn.utils.remove_weight_norm(block[1])
class UnivNetGenerator(nn.Module):
"""UnivNet Generator"""
def __init__(self, noise_dim=64, channel_size=32, dilations=[1,3,9,27], strides=[8,8,4], lReLU_slope=.2, kpnet_conv_size=3,
# Below are MEL configurations options that this generator requires.
hop_length=256, n_mel_channels=100):
super(UnivNetGenerator, self).__init__()
self.mel_channel = n_mel_channels
self.noise_dim = noise_dim
self.hop_length = hop_length
channel_size = channel_size
kpnet_conv_size = kpnet_conv_size
self.res_stack = nn.ModuleList()
hop_length = 1
for stride in strides:
hop_length = stride * hop_length
self.res_stack.append(
LVCBlock(
channel_size,
n_mel_channels,
stride=stride,
dilations=dilations,
lReLU_slope=lReLU_slope,
cond_hop_length=hop_length,
kpnet_conv_size=kpnet_conv_size
)
)
self.conv_pre = \
nn.utils.weight_norm(nn.Conv1d(noise_dim, channel_size, 7, padding=3, padding_mode='reflect'))
self.conv_post = nn.Sequential(
nn.LeakyReLU(lReLU_slope),
nn.utils.weight_norm(nn.Conv1d(channel_size, 1, 7, padding=3, padding_mode='reflect')),
nn.Tanh(),
)
def forward(self, c, z):
'''
Args:
c (Tensor): the conditioning sequence of mel-spectrogram (batch, mel_channels, in_length)
z (Tensor): the noise sequence (batch, noise_dim, in_length)
'''
z = self.conv_pre(z) # (B, c_g, L)
for res_block in self.res_stack:
res_block.to(z.device)
z = res_block(z, c) # (B, c_g, L * s_0 * ... * s_i)
z = self.conv_post(z) # (B, 1, L * 256)
return z
def eval(self, inference=False):
super(UnivNetGenerator, self).eval()
# don't remove weight norm while validation in training loop
if inference:
self.remove_weight_norm()
def remove_weight_norm(self):
nn.utils.remove_weight_norm(self.conv_pre)
for layer in self.conv_post:
if len(layer.state_dict()) != 0:
nn.utils.remove_weight_norm(layer)
for res_block in self.res_stack:
res_block.remove_weight_norm()
def inference(self, c, z=None):
# pad input mel with zeros to cut artifact
# see https://github.com/seungwonpark/melgan/issues/8
zero = torch.full((c.shape[0], self.mel_channel, 10), -11.5129).to(c.device)
mel = torch.cat((c, zero), dim=2)
if z is None:
z = torch.randn(c.shape[0], self.noise_dim, mel.size(2)).to(mel.device)
audio = self.forward(mel, z)
audio = audio[:, :, :-(self.hop_length * 10)]
audio = audio.clamp(min=-1, max=1)
return audio
if __name__ == '__main__':
model = UnivNetGenerator()
c = torch.randn(3, 100, 10)
z = torch.randn(3, 64, 10)
print(c.shape)
y = model(c, z)
print(y.shape)
assert y.shape == torch.Size([3, 1, 2560])
pytorch_total_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
print(pytorch_total_params) | /ruth_text_to_speech-0.0.39-py3-none-any.whl/ruth_tts_transformer/models/vocoder.py | 0.954774 | 0.386185 | vocoder.py | pypi |
from functools import partial
import torch
import torch.nn.functional as F
from einops import rearrange
from rotary_embedding_torch import RotaryEmbedding, broadcat
from torch import nn
# helpers
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def cast_tuple(val, depth = 1):
if isinstance(val, list):
val = tuple(val)
return val if isinstance(val, tuple) else (val,) * depth
def max_neg_value(t):
return -torch.finfo(t.dtype).max
def stable_softmax(t, dim = -1, alpha = 32 ** 2):
t = t / alpha
t = t - torch.amax(t, dim = dim, keepdim = True).detach()
return (t * alpha).softmax(dim = dim)
def route_args(router, args, depth):
routed_args = [(dict(), dict()) for _ in range(depth)]
matched_keys = [key for key in args.keys() if key in router]
for key in matched_keys:
val = args[key]
for depth, ((f_args, g_args), routes) in enumerate(zip(routed_args, router[key])):
new_f_args, new_g_args = map(lambda route: ({key: val} if route else {}), routes)
routed_args[depth] = ({**f_args, **new_f_args}, {**g_args, **new_g_args})
return routed_args
# classes
class SequentialSequence(nn.Module):
def __init__(self, layers, args_route = {}, layer_dropout = 0.):
super().__init__()
assert all(len(route) == len(layers) for route in args_route.values()), 'each argument route map must have the same depth as the number of sequential layers'
self.layers = layers
self.args_route = args_route
self.layer_dropout = layer_dropout
def forward(self, x, **kwargs):
args = route_args(self.args_route, kwargs, len(self.layers))
layers_and_args = list(zip(self.layers, args))
for (f, g), (f_args, g_args) in layers_and_args:
x = x + f(x, **f_args)
x = x + g(x, **g_args)
return x
class DivideMax(nn.Module):
def __init__(self, dim):
super().__init__()
self.dim = dim
def forward(self, x):
maxes = x.amax(dim = self.dim, keepdim = True).detach()
return x / maxes
# https://arxiv.org/abs/2103.17239
class LayerScale(nn.Module):
def __init__(self, dim, depth, fn):
super().__init__()
if depth <= 18:
init_eps = 0.1
elif depth > 18 and depth <= 24:
init_eps = 1e-5
else:
init_eps = 1e-6
scale = torch.zeros(1, 1, dim).fill_(init_eps)
self.scale = nn.Parameter(scale)
self.fn = fn
def forward(self, x, **kwargs):
return self.fn(x, **kwargs) * self.scale
# layer norm
class PreNorm(nn.Module):
def __init__(self, dim, fn, sandwich = False):
super().__init__()
self.norm = nn.LayerNorm(dim)
self.norm_out = nn.LayerNorm(dim) if sandwich else nn.Identity()
self.fn = fn
def forward(self, x, **kwargs):
x = self.norm(x)
x = self.fn(x, **kwargs)
return self.norm_out(x)
# feed forward
class GEGLU(nn.Module):
def forward(self, x):
x, gates = x.chunk(2, dim = -1)
return x * F.gelu(gates)
class FeedForward(nn.Module):
def __init__(self, dim, dropout = 0., mult = 4.):
super().__init__()
self.net = nn.Sequential(
nn.Linear(dim, dim * mult * 2),
GEGLU(),
nn.Dropout(dropout),
nn.Linear(dim * mult, dim)
)
def forward(self, x):
return self.net(x)
# Attention
class Attention(nn.Module):
def __init__(self, dim, seq_len, causal = True, heads = 8, dim_head = 64, dropout = 0.):
super().__init__()
inner_dim = dim_head * heads
self.heads = heads
self.seq_len = seq_len
self.scale = dim_head ** -0.5
self.causal = causal
self.to_qkv = nn.Linear(dim, inner_dim * 3, bias = False)
self.to_out = nn.Sequential(
nn.Linear(inner_dim, dim),
nn.Dropout(dropout)
)
def forward(self, x, mask = None):
b, n, _, h, device = *x.shape, self.heads, x.device
softmax = torch.softmax
qkv = self.to_qkv(x).chunk(3, dim = -1)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = h), qkv)
q = q * self.scale
dots = torch.einsum('b h i d, b h j d -> b h i j', q, k)
mask_value = max_neg_value(dots)
if exists(mask):
mask = rearrange(mask, 'b j -> b () () j')
dots.masked_fill_(~mask, mask_value)
del mask
if self.causal:
i, j = dots.shape[-2:]
mask = torch.ones(i, j, device = device).triu_(j - i + 1).bool()
dots.masked_fill_(mask, mask_value)
attn = softmax(dots, dim=-1)
out = torch.einsum('b h i j, b h j d -> b h i d', attn, v)
out = rearrange(out, 'b h n d -> b n (h d)')
out = self.to_out(out)
return out
# main transformer class
class Transformer(nn.Module):
def __init__(
self,
*,
dim,
depth,
seq_len,
causal = True,
heads = 8,
dim_head = 64,
ff_mult = 4,
attn_dropout = 0.,
ff_dropout = 0.,
sparse_attn = False,
sandwich_norm = False,
):
super().__init__()
layers = nn.ModuleList([])
sparse_layer = cast_tuple(sparse_attn, depth)
for ind, sparse_attn in zip(range(depth), sparse_layer):
attn = Attention(dim, causal = causal, seq_len = seq_len, heads = heads, dim_head = dim_head, dropout = attn_dropout)
ff = FeedForward(dim, mult = ff_mult, dropout = ff_dropout)
layers.append(nn.ModuleList([
LayerScale(dim, ind + 1, PreNorm(dim, attn, sandwich = sandwich_norm)),
LayerScale(dim, ind + 1, PreNorm(dim, ff, sandwich = sandwich_norm))
]))
execute_type = SequentialSequence
route_attn = ((True, False),) * depth
attn_route_map = {'mask': route_attn}
self.layers = execute_type(layers, args_route = attn_route_map)
def forward(self, x, **kwargs):
return self.layers(x, **kwargs) | /ruth_text_to_speech-0.0.39-py3-none-any.whl/ruth_tts_transformer/models/transformer.py | 0.946076 | 0.456591 | transformer.py | pypi |
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import einsum
from ruth_tts_transformer.models.arch_util import CheckpointedXTransformerEncoder
from ruth_tts_transformer.models.transformer import Transformer
from ruth_tts_transformer.models.xtransformers import Encoder
def exists(val):
return val is not None
def masked_mean(t, mask, dim = 1):
t = t.masked_fill(~mask[:, :, None], 0.)
return t.sum(dim = 1) / mask.sum(dim = 1)[..., None]
class CLVP(nn.Module):
"""
CLIP model retrofitted for performing contrastive evaluation between tokenized audio data and the corresponding
transcribed text.
Originally from https://github.com/lucidrains/DALLE-pytorch/blob/main/dalle_pytorch/dalle_pytorch.py
"""
def __init__(
self,
*,
dim_text=512,
dim_speech=512,
dim_latent=512,
num_text_tokens=256,
text_enc_depth=6,
text_seq_len=120,
text_heads=8,
num_speech_tokens=8192,
speech_enc_depth=6,
speech_heads=8,
speech_seq_len=250,
text_mask_percentage=0,
voice_mask_percentage=0,
wav_token_compression=1024,
use_xformers=False,
):
super().__init__()
self.text_emb = nn.Embedding(num_text_tokens, dim_text)
self.to_text_latent = nn.Linear(dim_text, dim_latent, bias=False)
self.speech_emb = nn.Embedding(num_speech_tokens, dim_speech)
self.to_speech_latent = nn.Linear(dim_speech, dim_latent, bias=False)
if use_xformers:
self.text_transformer = CheckpointedXTransformerEncoder(
needs_permute=False,
exit_permute=False,
max_seq_len=-1,
attn_layers=Encoder(
dim=dim_text,
depth=text_enc_depth,
heads=text_heads,
ff_dropout=.1,
ff_mult=2,
attn_dropout=.1,
use_rmsnorm=True,
ff_glu=True,
rotary_pos_emb=True,
))
self.speech_transformer = CheckpointedXTransformerEncoder(
needs_permute=False,
exit_permute=False,
max_seq_len=-1,
attn_layers=Encoder(
dim=dim_speech,
depth=speech_enc_depth,
heads=speech_heads,
ff_dropout=.1,
ff_mult=2,
attn_dropout=.1,
use_rmsnorm=True,
ff_glu=True,
rotary_pos_emb=True,
))
else:
self.text_transformer = Transformer(causal=False, seq_len=text_seq_len, dim=dim_text, depth=text_enc_depth,
heads=text_heads)
self.speech_transformer = Transformer(causal=False, seq_len=speech_seq_len, dim=dim_speech,
depth=speech_enc_depth, heads=speech_heads)
self.temperature = nn.Parameter(torch.tensor(1.))
self.text_mask_percentage = text_mask_percentage
self.voice_mask_percentage = voice_mask_percentage
self.wav_token_compression = wav_token_compression
self.xformers = use_xformers
if not use_xformers:
self.text_pos_emb = nn.Embedding(text_seq_len, dim_text)
self.speech_pos_emb = nn.Embedding(num_speech_tokens, dim_speech)
def forward(
self,
text,
speech_tokens,
return_loss=False
):
b, device = text.shape[0], text.device
if self.training:
text_mask = torch.rand_like(text.float()) > self.text_mask_percentage
voice_mask = torch.rand_like(speech_tokens.float()) > self.voice_mask_percentage
else:
text_mask = torch.ones_like(text.float()).bool()
voice_mask = torch.ones_like(speech_tokens.float()).bool()
text_emb = self.text_emb(text)
speech_emb = self.speech_emb(speech_tokens)
if not self.xformers:
text_emb += self.text_pos_emb(torch.arange(text.shape[1], device=device))
speech_emb += self.speech_pos_emb(torch.arange(speech_emb.shape[1], device=device))
enc_text = self.text_transformer(text_emb, mask=text_mask)
enc_speech = self.speech_transformer(speech_emb, mask=voice_mask)
text_latents = masked_mean(enc_text, text_mask, dim=1)
speech_latents = masked_mean(enc_speech, voice_mask, dim=1)
text_latents = self.to_text_latent(text_latents)
speech_latents = self.to_speech_latent(speech_latents)
text_latents, speech_latents = map(lambda t: F.normalize(t, p=2, dim=-1), (text_latents, speech_latents))
temp = self.temperature.exp()
if not return_loss:
sim = einsum('n d, n d -> n', text_latents, speech_latents) * temp
return sim
sim = einsum('i d, j d -> i j', text_latents, speech_latents) * temp
labels = torch.arange(b, device=device)
loss = (F.cross_entropy(sim, labels) + F.cross_entropy(sim.t(), labels)) / 2
return loss
if __name__ == '__main__':
clip = CLVP(text_mask_percentage=.2, voice_mask_percentage=.2)
clip(torch.randint(0,256,(2,120)),
torch.tensor([50,100]),
torch.randint(0,8192,(2,250)),
torch.tensor([101,102]),
return_loss=True)
nonloss = clip(torch.randint(0,256,(2,120)),
torch.tensor([50,100]),
torch.randint(0,8192,(2,250)),
torch.tensor([101,102]),
return_loss=False)
print(nonloss.shape) | /ruth_text_to_speech-0.0.39-py3-none-any.whl/ruth_tts_transformer/models/clvp.py | 0.888795 | 0.320143 | clvp.py | pypi |
import os
import functools
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchaudio
from ruth_tts_transformer.models.xtransformers import ContinuousTransformerWrapper, RelativePositionBias
def zero_module(module):
"""
Zero out the parameters of a module and return it.
"""
for p in module.parameters():
p.detach().zero_()
return module
class GroupNorm32(nn.GroupNorm):
def forward(self, x):
return super().forward(x.float()).type(x.dtype)
def normalization(channels):
"""
Make a standard normalization layer.
:param channels: number of input channels.
:return: an nn.Module for normalization.
"""
groups = 32
if channels <= 16:
groups = 8
elif channels <= 64:
groups = 16
while channels % groups != 0:
groups = int(groups / 2)
assert groups > 2
return GroupNorm32(groups, channels)
class QKVAttentionLegacy(nn.Module):
"""
A module which performs QKV attention. Matches legacy QKVAttention + input/ouput heads shaping
"""
def __init__(self, n_heads):
super().__init__()
self.n_heads = n_heads
def forward(self, qkv, mask=None, rel_pos=None):
"""
Apply QKV attention.
:param qkv: an [N x (H * 3 * C) x T] tensor of Qs, Ks, and Vs.
:return: an [N x (H * C) x T] tensor after attention.
"""
bs, width, length = qkv.shape
assert width % (3 * self.n_heads) == 0
ch = width // (3 * self.n_heads)
q, k, v = qkv.reshape(bs * self.n_heads, ch * 3, length).split(ch, dim=1)
scale = 1 / math.sqrt(math.sqrt(ch))
weight = torch.einsum(
"bct,bcs->bts", q * scale, k * scale
) # More stable with f16 than dividing afterwards
if rel_pos is not None:
weight = rel_pos(weight.reshape(bs, self.n_heads, weight.shape[-2], weight.shape[-1])).reshape(bs * self.n_heads, weight.shape[-2], weight.shape[-1])
weight = torch.softmax(weight.float(), dim=-1).type(weight.dtype)
if mask is not None:
# The proper way to do this is to mask before the softmax using -inf, but that doesn't work properly on CPUs.
mask = mask.repeat(self.n_heads, 1).unsqueeze(1)
weight = weight * mask
a = torch.einsum("bts,bcs->bct", weight, v)
return a.reshape(bs, -1, length)
class AttentionBlock(nn.Module):
"""
An attention block that allows spatial positions to attend to each other.
Originally ported from here, but adapted to the N-d case.
https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/models/unet.py#L66.
"""
def __init__(
self,
channels,
num_heads=1,
num_head_channels=-1,
do_checkpoint=True,
relative_pos_embeddings=False,
):
super().__init__()
self.channels = channels
self.do_checkpoint = do_checkpoint
if num_head_channels == -1:
self.num_heads = num_heads
else:
assert (
channels % num_head_channels == 0
), f"q,k,v channels {channels} is not divisible by num_head_channels {num_head_channels}"
self.num_heads = channels // num_head_channels
self.norm = normalization(channels)
self.qkv = nn.Conv1d(channels, channels * 3, 1)
# split heads before split qkv
self.attention = QKVAttentionLegacy(self.num_heads)
self.proj_out = zero_module(nn.Conv1d(channels, channels, 1))
if relative_pos_embeddings:
self.relative_pos_embeddings = RelativePositionBias(scale=(channels // self.num_heads) ** .5, causal=False, heads=num_heads, num_buckets=32, max_distance=64)
else:
self.relative_pos_embeddings = None
def forward(self, x, mask=None):
b, c, *spatial = x.shape
x = x.reshape(b, c, -1)
qkv = self.qkv(self.norm(x))
h = self.attention(qkv, mask, self.relative_pos_embeddings)
h = self.proj_out(h)
return (x + h).reshape(b, c, *spatial)
class Upsample(nn.Module):
"""
An upsampling layer with an optional convolution.
:param channels: channels in the inputs and outputs.
:param use_conv: a bool determining if a convolution is applied.
"""
def __init__(self, channels, use_conv, out_channels=None, factor=4):
super().__init__()
self.channels = channels
self.out_channels = out_channels or channels
self.use_conv = use_conv
self.factor = factor
if use_conv:
ksize = 5
pad = 2
self.conv = nn.Conv1d(self.channels, self.out_channels, ksize, padding=pad)
def forward(self, x):
assert x.shape[1] == self.channels
x = F.interpolate(x, scale_factor=self.factor, mode="nearest")
if self.use_conv:
x = self.conv(x)
return x
class Downsample(nn.Module):
"""
A downsampling layer with an optional convolution.
:param channels: channels in the inputs and outputs.
:param use_conv: a bool determining if a convolution is applied.
"""
def __init__(self, channels, use_conv, out_channels=None, factor=4, ksize=5, pad=2):
super().__init__()
self.channels = channels
self.out_channels = out_channels or channels
self.use_conv = use_conv
stride = factor
if use_conv:
self.op = nn.Conv1d(
self.channels, self.out_channels, ksize, stride=stride, padding=pad
)
else:
assert self.channels == self.out_channels
self.op = nn.AvgPool1d(kernel_size=stride, stride=stride)
def forward(self, x):
assert x.shape[1] == self.channels
return self.op(x)
class ResBlock(nn.Module):
def __init__(
self,
channels,
dropout,
out_channels=None,
use_conv=False,
use_scale_shift_norm=False,
up=False,
down=False,
kernel_size=3,
):
super().__init__()
self.channels = channels
self.dropout = dropout
self.out_channels = out_channels or channels
self.use_conv = use_conv
self.use_scale_shift_norm = use_scale_shift_norm
padding = 1 if kernel_size == 3 else 2
self.in_layers = nn.Sequential(
normalization(channels),
nn.SiLU(),
nn.Conv1d(channels, self.out_channels, kernel_size, padding=padding),
)
self.updown = up or down
if up:
self.h_upd = Upsample(channels, False)
self.x_upd = Upsample(channels, False)
elif down:
self.h_upd = Downsample(channels, False)
self.x_upd = Downsample(channels, False)
else:
self.h_upd = self.x_upd = nn.Identity()
self.out_layers = nn.Sequential(
normalization(self.out_channels),
nn.SiLU(),
nn.Dropout(p=dropout),
zero_module(
nn.Conv1d(self.out_channels, self.out_channels, kernel_size, padding=padding)
),
)
if self.out_channels == channels:
self.skip_connection = nn.Identity()
elif use_conv:
self.skip_connection = nn.Conv1d(
channels, self.out_channels, kernel_size, padding=padding
)
else:
self.skip_connection = nn.Conv1d(channels, self.out_channels, 1)
def forward(self, x):
if self.updown:
in_rest, in_conv = self.in_layers[:-1], self.in_layers[-1]
h = in_rest(x)
h = self.h_upd(h)
x = self.x_upd(x)
h = in_conv(h)
else:
h = self.in_layers(x)
h = self.out_layers(h)
return self.skip_connection(x) + h
class AudioMiniEncoder(nn.Module):
def __init__(self,
spec_dim,
embedding_dim,
base_channels=128,
depth=2,
resnet_blocks=2,
attn_blocks=4,
num_attn_heads=4,
dropout=0,
downsample_factor=2,
kernel_size=3):
super().__init__()
self.init = nn.Sequential(
nn.Conv1d(spec_dim, base_channels, 3, padding=1)
)
ch = base_channels
res = []
for l in range(depth):
for r in range(resnet_blocks):
res.append(ResBlock(ch, dropout, kernel_size=kernel_size))
res.append(Downsample(ch, use_conv=True, out_channels=ch*2, factor=downsample_factor))
ch *= 2
self.res = nn.Sequential(*res)
self.final = nn.Sequential(
normalization(ch),
nn.SiLU(),
nn.Conv1d(ch, embedding_dim, 1)
)
attn = []
for a in range(attn_blocks):
attn.append(AttentionBlock(embedding_dim, num_attn_heads,))
self.attn = nn.Sequential(*attn)
self.dim = embedding_dim
def forward(self, x):
h = self.init(x)
h = self.res(h)
h = self.final(h)
h = self.attn(h)
return h[:, :, 0]
DEFAULT_MEL_NORM_FILE = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../data/mel_norms.pth')
class TorchMelSpectrogram(nn.Module):
def __init__(self, filter_length=1024, hop_length=256, win_length=1024, n_mel_channels=80, mel_fmin=0, mel_fmax=8000,
sampling_rate=22050, normalize=False, mel_norm_file=DEFAULT_MEL_NORM_FILE):
super().__init__()
# These are the default tacotron values for the MEL spectrogram.
self.filter_length = filter_length
self.hop_length = hop_length
self.win_length = win_length
self.n_mel_channels = n_mel_channels
self.mel_fmin = mel_fmin
self.mel_fmax = mel_fmax
self.sampling_rate = sampling_rate
self.mel_stft = torchaudio.transforms.MelSpectrogram(n_fft=self.filter_length, hop_length=self.hop_length,
win_length=self.win_length, power=2, normalized=normalize,
sample_rate=self.sampling_rate, f_min=self.mel_fmin,
f_max=self.mel_fmax, n_mels=self.n_mel_channels,
norm="slaney")
self.mel_norm_file = mel_norm_file
if self.mel_norm_file is not None:
self.mel_norms = torch.load(self.mel_norm_file)
else:
self.mel_norms = None
def forward(self, inp):
if len(inp.shape) == 3: # Automatically squeeze out the channels dimension if it is present (assuming mono-audio)
inp = inp.squeeze(1)
assert len(inp.shape) == 2
self.mel_stft = self.mel_stft.to(inp.device)
mel = self.mel_stft(inp)
# Perform dynamic range compression
mel = torch.log(torch.clamp(mel, min=1e-5))
if self.mel_norms is not None:
self.mel_norms = self.mel_norms.to(mel.device)
mel = mel / self.mel_norms.unsqueeze(0).unsqueeze(-1)
return mel
class CheckpointedLayer(nn.Module):
"""
Wraps a module. When forward() is called, passes kwargs that require_grad through torch.checkpoint() and bypasses
checkpoint for all other args.
"""
def __init__(self, wrap):
super().__init__()
self.wrap = wrap
def forward(self, x, *args, **kwargs):
for k, v in kwargs.items():
assert not (isinstance(v, torch.Tensor) and v.requires_grad) # This would screw up checkpointing.
partial = functools.partial(self.wrap, **kwargs)
return partial(x, *args)
class CheckpointedXTransformerEncoder(nn.Module):
"""
Wraps a ContinuousTransformerWrapper and applies CheckpointedLayer to each layer and permutes from channels-mid
to channels-last that XTransformer expects.
"""
def __init__(self, needs_permute=True, exit_permute=True, checkpoint=True, **xtransformer_kwargs):
super().__init__()
self.transformer = ContinuousTransformerWrapper(**xtransformer_kwargs)
self.needs_permute = needs_permute
self.exit_permute = exit_permute
if not checkpoint:
return
for i in range(len(self.transformer.attn_layers.layers)):
n, b, r = self.transformer.attn_layers.layers[i]
self.transformer.attn_layers.layers[i] = nn.ModuleList([n, CheckpointedLayer(b), r])
def forward(self, x, **kwargs):
if self.needs_permute:
x = x.permute(0,2,1)
h = self.transformer(x, **kwargs)
if self.exit_permute:
h = h.permute(0,2,1)
return h | /ruth_text_to_speech-0.0.39-py3-none-any.whl/ruth_tts_transformer/models/arch_util.py | 0.924099 | 0.537648 | arch_util.py | pypi |
import functools
import torch
import torch.nn as nn
import torch.nn.functional as F
from transformers import GPT2Config, GPT2PreTrainedModel, LogitsProcessorList
from transformers.modeling_outputs import CausalLMOutputWithCrossAttentions
from transformers.utils.model_parallel_utils import get_device_map, assert_device_map
from ruth_tts_transformer.models.arch_util import AttentionBlock
from ruth_tts_transformer.utils.typical_sampling import TypicalLogitsWarper
def null_position_embeddings(range, dim):
return torch.zeros((range.shape[0], range.shape[1], dim), device=range.device)
class ResBlock(nn.Module):
"""
Basic residual convolutional block that uses GroupNorm.
"""
def __init__(self, chan):
super().__init__()
self.net = nn.Sequential(
nn.Conv1d(chan, chan, kernel_size=3, padding=1),
nn.GroupNorm(chan // 8, chan),
nn.ReLU(),
nn.Conv1d(chan, chan, kernel_size=3, padding=1),
nn.GroupNorm(chan // 8, chan)
)
def forward(self, x):
return F.relu(self.net(x) + x)
class GPT2InferenceModel(GPT2PreTrainedModel):
def __init__(self, config, gpt, text_pos_emb, embeddings, norm, linear):
super().__init__(config)
self.transformer = gpt
self.text_pos_embedding = text_pos_emb
self.embeddings = embeddings
self.lm_head = nn.Sequential(norm, linear)
# Model parallel
self.model_parallel = False
self.device_map = None
self.cached_mel_emb = None
def parallelize(self, device_map=None):
self.device_map = (
get_device_map(len(self.transformer.h), range(torch.cuda.device_count()))
if device_map is None
else device_map
)
assert_device_map(self.device_map, len(self.transformer.h))
self.transformer.parallelize(self.device_map)
self.lm_head = self.lm_head.to(self.transformer.first_device)
self.model_parallel = True
def deparallelize(self):
self.transformer.deparallelize()
self.transformer = self.transformer.to("cpu")
self.lm_head = self.lm_head.to("cpu")
self.model_parallel = False
torch.cuda.empty_cache()
def get_output_embeddings(self):
return self.lm_head
def set_output_embeddings(self, new_embeddings):
self.lm_head = new_embeddings
def store_mel_emb(self, mel_emb):
self.cached_mel_emb = mel_emb
def prepare_inputs_for_generation(self, input_ids, past=None, **kwargs):
token_type_ids = kwargs.get("token_type_ids", None)
# only last token for inputs_ids if past is defined in kwargs
if past:
input_ids = input_ids[:, -1].unsqueeze(-1)
if token_type_ids is not None:
token_type_ids = token_type_ids[:, -1].unsqueeze(-1)
attention_mask = kwargs.get("attention_mask", None)
position_ids = kwargs.get("position_ids", None)
if attention_mask is not None and position_ids is None:
# create position_ids on the fly for batch generation
position_ids = attention_mask.long().cumsum(-1) - 1
position_ids.masked_fill_(attention_mask == 0, 1)
if past:
position_ids = position_ids[:, -1].unsqueeze(-1)
else:
position_ids = None
return {
"input_ids": input_ids,
"past_key_values": past,
"use_cache": kwargs.get("use_cache"),
"position_ids": position_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
def forward(
self,
input_ids=None,
past_key_values=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
labels=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
assert self.cached_mel_emb is not None
assert inputs_embeds is None # Not supported by this inference model.
assert labels is None # Training not supported by this inference model.
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# Create embedding
mel_len = self.cached_mel_emb.shape[1]
if input_ids.shape[1] != 1:
text_inputs = input_ids[:, mel_len:]
text_emb = self.embeddings(text_inputs)
text_emb = text_emb + self.text_pos_embedding(text_emb)
if self.cached_mel_emb.shape[0] != text_emb.shape[0]:
mel_emb = self.cached_mel_emb.repeat_interleave(text_emb.shape[0] // self.cached_mel_emb.shape[0], 0)
else:
mel_emb = self.cached_mel_emb
emb = torch.cat([mel_emb, text_emb], dim=1)
else:
emb = self.embeddings(input_ids)
emb = emb + self.text_pos_embedding.get_fixed_embedding(attention_mask.shape[1] - mel_len,
attention_mask.device)
transformer_outputs = self.transformer(
inputs_embeds=emb,
past_key_values=past_key_values,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = transformer_outputs[0]
# Set device for model parallelism
if self.model_parallel:
torch.cuda.set_device(self.transformer.first_device)
hidden_states = hidden_states.to(self.lm_head.weight.device)
lm_logits = self.lm_head(hidden_states)
if not return_dict:
return (lm_logits,) + transformer_outputs[1:]
return CausalLMOutputWithCrossAttentions(
loss=None,
logits=lm_logits,
past_key_values=transformer_outputs.past_key_values,
hidden_states=transformer_outputs.hidden_states,
attentions=transformer_outputs.attentions,
cross_attentions=transformer_outputs.cross_attentions,
)
@staticmethod
def _reorder_cache(past, beam_idx):
"""
This function is used to re-order the :obj:`past_key_values` cache if
:meth:`~transformers.PreTrainedModel.beam_search` or :meth:`~transformers.PreTrainedModel.beam_sample` is
called. This is required to match :obj:`past_key_values` with the correct beam_idx at every generation step.
"""
return tuple(
tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past)
for layer_past in past
)
class ConditioningEncoder(nn.Module):
def __init__(self,
spec_dim,
embedding_dim,
attn_blocks=6,
num_attn_heads=4,
do_checkpointing=False,
mean=False):
super().__init__()
attn = []
self.init = nn.Conv1d(spec_dim, embedding_dim, kernel_size=1)
for a in range(attn_blocks):
attn.append(AttentionBlock(embedding_dim, num_attn_heads))
self.attn = nn.Sequential(*attn)
self.dim = embedding_dim
self.do_checkpointing = do_checkpointing
self.mean = mean
def forward(self, x):
h = self.init(x)
h = self.attn(h)
if self.mean:
return h.mean(dim=2)
else:
return h[:, :, 0]
class LearnedPositionEmbeddings(nn.Module):
def __init__(self, seq_len, model_dim, init=.02):
super().__init__()
self.emb = nn.Embedding(seq_len, model_dim)
# Initializing this way is standard for GPT-2
self.emb.weight.data.normal_(mean=0.0, std=init)
def forward(self, x):
sl = x.shape[1]
return self.emb(torch.arange(0, sl, device=x.device))
def get_fixed_embedding(self, ind, dev):
return self.emb(torch.tensor([ind], device=dev)).unsqueeze(0)
def build_hf_gpt_transformer(layers, model_dim, heads, max_mel_seq_len, max_text_seq_len, checkpointing):
"""
GPT-2 implemented by the HuggingFace library.
"""
from transformers import GPT2Config, GPT2Model
gpt_config = GPT2Config(vocab_size=256, # Unused.
n_positions=max_mel_seq_len + max_text_seq_len,
n_ctx=max_mel_seq_len + max_text_seq_len,
n_embd=model_dim,
n_layer=layers,
n_head=heads,
gradient_checkpointing=checkpointing,
use_cache=not checkpointing)
gpt = GPT2Model(gpt_config)
# Override the built in positional embeddings
del gpt.wpe
gpt.wpe = functools.partial(null_position_embeddings, dim=model_dim)
# Built-in token embeddings are unused.
del gpt.wte
return gpt, LearnedPositionEmbeddings(max_mel_seq_len, model_dim), LearnedPositionEmbeddings(max_text_seq_len,
model_dim), \
None, None
class MelEncoder(nn.Module):
def __init__(self, channels, mel_channels=80, resblocks_per_reduction=2):
super().__init__()
self.channels = channels
self.encoder = nn.Sequential(nn.Conv1d(mel_channels, channels // 4, kernel_size=3, padding=1),
nn.Sequential(*[ResBlock(channels // 4) for _ in range(resblocks_per_reduction)]),
nn.Conv1d(channels // 4, channels // 2, kernel_size=3, stride=2, padding=1),
nn.GroupNorm(channels // 16, channels // 2),
nn.ReLU(),
nn.Sequential(*[ResBlock(channels // 2) for _ in range(resblocks_per_reduction)]),
nn.Conv1d(channels // 2, channels, kernel_size=3, stride=2, padding=1),
nn.GroupNorm(channels // 8, channels),
nn.ReLU(),
nn.Sequential(*[ResBlock(channels) for _ in range(resblocks_per_reduction)]),
)
self.reduction = 4
def forward(self, x):
for e in self.encoder:
x = e(x)
return x.permute(0, 2, 1)
class UnifiedVoice(nn.Module):
def __init__(self, layers=8, model_dim=512, heads=8, max_text_tokens=120, max_mel_tokens=250,
max_conditioning_inputs=1,
mel_length_compression=1024, number_text_tokens=256,
start_text_token=None, number_mel_codes=8194, start_mel_token=8192,
stop_mel_token=8193, train_solo_embeddings=False, use_mel_codes_as_input=True,
checkpointing=True, types=1):
"""
Args:
layers: Number of layers in transformer stack.
model_dim: Operating dimensions of the transformer
heads: Number of transformer heads. Must be divisible by model_dim. Recommend model_dim//64
max_text_tokens: Maximum number of text tokens that will be encountered by model.
max_mel_tokens: Maximum number of MEL tokens that will be encountered by model.
max_conditioning_inputs: Maximum number of conditioning inputs provided to the model. If (1), conditioning input can be of format (b,80,s), otherwise (b,n,80,s).
mel_length_compression: The factor between <number_input_samples> and <mel_tokens>. Used to compute MEL code padding given wav input length.
number_text_tokens:
start_text_token:
stop_text_token:
number_mel_codes:
start_mel_token:
stop_mel_token:
train_solo_embeddings:
use_mel_codes_as_input:
checkpointing:
"""
super().__init__()
self.number_text_tokens = number_text_tokens
self.start_text_token = number_text_tokens * types if start_text_token is None else start_text_token
self.stop_text_token = 0
self.number_mel_codes = number_mel_codes
self.start_mel_token = start_mel_token
self.stop_mel_token = stop_mel_token
self.layers = layers
self.heads = heads
self.max_mel_tokens = max_mel_tokens
self.max_text_tokens = max_text_tokens
self.model_dim = model_dim
self.max_conditioning_inputs = max_conditioning_inputs
self.mel_length_compression = mel_length_compression
self.conditioning_encoder = ConditioningEncoder(80, model_dim, num_attn_heads=heads)
self.text_embedding = nn.Embedding(self.number_text_tokens * types + 1, model_dim)
if use_mel_codes_as_input:
self.mel_embedding = nn.Embedding(self.number_mel_codes, model_dim)
else:
self.mel_embedding = MelEncoder(model_dim, resblocks_per_reduction=1)
self.gpt, self.mel_pos_embedding, self.text_pos_embedding, self.mel_layer_pos_embedding, self.text_layer_pos_embedding = \
build_hf_gpt_transformer(layers, model_dim, heads, self.max_mel_tokens + 2 + self.max_conditioning_inputs,
self.max_text_tokens + 2, checkpointing)
if train_solo_embeddings:
self.mel_solo_embedding = nn.Parameter(torch.randn(1, 1, model_dim) * .02, requires_grad=True)
self.text_solo_embedding = nn.Parameter(torch.randn(1, 1, model_dim) * .02, requires_grad=True)
else:
self.mel_solo_embedding = 0
self.text_solo_embedding = 0
self.final_norm = nn.LayerNorm(model_dim)
self.text_head = nn.Linear(model_dim, self.number_text_tokens * types + 1)
self.mel_head = nn.Linear(model_dim, self.number_mel_codes)
# Initialize the embeddings per the GPT-2 scheme
embeddings = [self.text_embedding]
if use_mel_codes_as_input:
embeddings.append(self.mel_embedding)
for module in embeddings:
module.weight.data.normal_(mean=0.0, std=.02)
def build_aligned_inputs_and_targets(self, input, start_token, stop_token):
inp = F.pad(input, (1, 0), value=start_token)
tar = F.pad(input, (0, 1), value=stop_token)
return inp, tar
def set_mel_padding(self, mel_input_tokens, wav_lengths):
"""
Given mel tokens that are derived from a padded audio clip and the actual lengths of each batch element in
that audio clip, reformats the tokens with STOP_MEL_TOKEN in place of the zero padding. This is required
preformatting to create a working TTS model.
"""
# Set padding areas within MEL (currently it is coded with the MEL code for <zero>).
mel_lengths = torch.div(wav_lengths, self.mel_length_compression, rounding_mode='trunc')
for b in range(len(mel_lengths)):
actual_end = mel_lengths[
b] + 1 # Due to the convolutional nature of how these tokens are generated, it would be best if the model predicts a token past the actual last token.
if actual_end < mel_input_tokens.shape[-1]:
mel_input_tokens[b, actual_end:] = self.stop_mel_token
return mel_input_tokens
def get_logits(self, speech_conditioning_inputs, first_inputs, first_head, second_inputs=None, second_head=None,
get_attns=False, return_latent=False):
if second_inputs is not None:
emb = torch.cat([speech_conditioning_inputs, first_inputs, second_inputs], dim=1)
else:
emb = torch.cat([speech_conditioning_inputs, first_inputs], dim=1)
gpt_out = self.gpt(inputs_embeds=emb, return_dict=True, output_attentions=get_attns)
if get_attns:
return gpt_out.attentions
enc = gpt_out.last_hidden_state[:, 1:] # The first logit is tied to the speech_conditioning_input
enc = self.final_norm(enc)
if return_latent:
return enc[:, speech_conditioning_inputs.shape[1]:speech_conditioning_inputs.shape[1] + first_inputs.shape[
1]], enc[:, -second_inputs.shape[1]:]
first_logits = enc[:, :first_inputs.shape[1]]
first_logits = first_head(first_logits)
first_logits = first_logits.permute(0, 2, 1)
if second_inputs is not None:
second_logits = enc[:, -second_inputs.shape[1]:]
second_logits = second_head(second_logits)
second_logits = second_logits.permute(0, 2, 1)
return first_logits, second_logits
else:
return first_logits
def get_conditioning(self, speech_conditioning_input):
speech_conditioning_input = speech_conditioning_input.unsqueeze(1) if len(
speech_conditioning_input.shape) == 3 else speech_conditioning_input
conds = []
for j in range(speech_conditioning_input.shape[1]):
conds.append(self.conditioning_encoder(speech_conditioning_input[:, j]))
conds = torch.stack(conds, dim=1)
conds = conds.mean(dim=1)
return conds
def forward(self, speech_conditioning_latent, text_inputs, text_lengths, mel_codes, wav_lengths, types=None,
text_first=True, raw_mels=None, return_attentions=False,
return_latent=False, clip_inputs=True):
"""
Forward pass that uses both text and voice in either text conditioning mode or voice conditioning mode
(actuated by `text_first`).
speech_conditioning_input: MEL float tensor, (b,1024)
text_inputs: long tensor, (b,t)
text_lengths: long tensor, (b,)
mel_inputs: long tensor, (b,m)
wav_lengths: long tensor, (b,)
raw_mels: MEL float tensor (b,80,s)
If return_attentions is specified, only logits are returned.
If return_latent is specified, loss & logits are not computed or returned. Only the predicted latents are returned.
If clip_inputs is True, the inputs will be clipped to the smallest input size across each input modality.
"""
# Types are expressed by expanding the text embedding space.
if types is not None:
text_inputs = text_inputs * (1 + types).unsqueeze(-1)
if clip_inputs:
# This model will receive micro-batches with a ton of padding for both the text and MELs. Ameliorate this by
# chopping the inputs by the maximum actual length.
max_text_len = text_lengths.max()
text_inputs = text_inputs[:, :max_text_len]
max_mel_len = wav_lengths.max() // self.mel_length_compression
mel_codes = mel_codes[:, :max_mel_len]
if raw_mels is not None:
raw_mels = raw_mels[:, :, :max_mel_len * 4]
mel_codes = self.set_mel_padding(mel_codes, wav_lengths)
text_inputs = F.pad(text_inputs, (0, 1), value=self.stop_text_token)
mel_codes = F.pad(mel_codes, (0, 1), value=self.stop_mel_token)
conds = speech_conditioning_latent.unsqueeze(1)
text_inputs, text_targets = self.build_aligned_inputs_and_targets(text_inputs, self.start_text_token,
self.stop_text_token)
text_emb = self.text_embedding(text_inputs) + self.text_pos_embedding(text_inputs)
mel_codes, mel_targets = self.build_aligned_inputs_and_targets(mel_codes, self.start_mel_token,
self.stop_mel_token)
if raw_mels is not None:
mel_inp = F.pad(raw_mels, (0, 8))
else:
mel_inp = mel_codes
mel_emb = self.mel_embedding(mel_inp)
mel_emb = mel_emb + self.mel_pos_embedding(mel_codes)
if text_first:
text_logits, mel_logits = self.get_logits(conds, text_emb, self.text_head, mel_emb, self.mel_head,
get_attns=return_attentions, return_latent=return_latent)
if return_latent:
return mel_logits[:,
:-2] # Despite the name, these are not logits. Strip off the two tokens added by this forward pass.
else:
mel_logits, text_logits = self.get_logits(conds, mel_emb, self.mel_head, text_emb, self.text_head,
get_attns=return_attentions, return_latent=return_latent)
if return_latent:
return text_logits[:,
:-2] # Despite the name, these are not logits. Strip off the two tokens added by this forward pass.
if return_attentions:
return mel_logits
loss_text = F.cross_entropy(text_logits, text_targets.long())
loss_mel = F.cross_entropy(mel_logits, mel_targets.long())
return loss_text.mean(), loss_mel.mean(), mel_logits
def inference_speech(self, speech_conditioning_latent, text_inputs, input_tokens=None, num_return_sequences=1,
max_generate_length=None, typical_sampling=False, typical_mass=.9, **hf_generate_kwargs):
seq_length = self.max_mel_tokens + self.max_text_tokens + 2
if not hasattr(self, 'inference_model'):
# TODO: Decouple gpt_config from this inference model.
gpt_config = GPT2Config(vocab_size=self.max_mel_tokens,
n_positions=seq_length,
n_ctx=seq_length,
n_embd=self.model_dim,
n_layer=self.layers,
n_head=self.heads,
gradient_checkpointing=False,
use_cache=True)
self.inference_model = GPT2InferenceModel(gpt_config, self.gpt, self.mel_pos_embedding, self.mel_embedding,
self.final_norm, self.mel_head)
self.gpt.wte = self.mel_embedding
text_inputs = F.pad(text_inputs, (0, 1), value=self.stop_text_token)
text_inputs, text_targets = self.build_aligned_inputs_and_targets(text_inputs, self.start_text_token,
self.stop_text_token)
text_emb = self.text_embedding(text_inputs) + self.text_pos_embedding(text_inputs)
conds = speech_conditioning_latent.unsqueeze(1)
emb = torch.cat([conds, text_emb], dim=1)
self.inference_model.store_mel_emb(emb)
fake_inputs = torch.full((emb.shape[0], conds.shape[1] + emb.shape[1],), fill_value=1, dtype=torch.long,
device=text_inputs.device)
fake_inputs[:, -1] = self.start_mel_token
trunc_index = fake_inputs.shape[1]
if input_tokens is None:
inputs = fake_inputs
else:
assert num_return_sequences % input_tokens.shape[
0] == 0, "The number of return sequences must be divisible by the number of input sequences"
fake_inputs = fake_inputs.repeat(num_return_sequences, 1)
input_tokens = input_tokens.repeat(num_return_sequences // input_tokens.shape[0], 1)
inputs = torch.cat([fake_inputs, input_tokens], dim=1)
logits_processor = LogitsProcessorList(
[TypicalLogitsWarper(mass=typical_mass)]) if typical_sampling else LogitsProcessorList()
max_length = trunc_index + self.max_mel_tokens - 1 if max_generate_length is None else trunc_index + max_generate_length
gen = self.inference_model.generate(inputs, bos_token_id=self.start_mel_token, pad_token_id=self.stop_mel_token,
eos_token_id=self.stop_mel_token,
max_length=max_length, logits_processor=logits_processor,
num_return_sequences=num_return_sequences, **hf_generate_kwargs)
return gen[:, trunc_index:]
if __name__ == '__main__':
gpt = UnifiedVoice(model_dim=256, heads=4, train_solo_embeddings=True, use_mel_codes_as_input=True,
max_conditioning_inputs=4)
l = gpt(torch.randn(2, 3, 80, 800),
torch.randint(high=120, size=(2, 120)),
torch.tensor([32, 120]),
torch.randint(high=8192, size=(2, 250)),
torch.tensor([250 * 256, 195 * 256]))
gpt.text_forward(torch.randn(2, 80, 800), torch.randint(high=50, size=(2, 80)), torch.tensor([32, 80])) | /ruth_text_to_speech-0.0.39-py3-none-any.whl/ruth_tts_transformer/models/autoregressive.py | 0.948894 | 0.386069 | autoregressive.py | pypi |
import os
import subprocess
from glob import glob
import librosa
import torch
import torchaudio
import numpy as np
from scipy.io.wavfile import read
from ruth_tts_transformer.utils.stft import STFT
BUILTIN_VOICES_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../voices')
if not os.path.isdir(BUILTIN_VOICES_DIR):
git_voice_download = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')
os.system(f"cd {git_voice_download} && git init && git remote add origin "
f"https://github.com/prakashr7d/ruth-tts-files.git && "
f"git pull origin main && git checkout main -f ")
def load_wav_to_torch(full_path):
sampling_rate, data = read(full_path)
if data.dtype == np.int32:
norm_fix = 2 ** 31
elif data.dtype == np.int16:
norm_fix = 2 ** 15
elif data.dtype == np.float16 or data.dtype == np.float32:
norm_fix = 1.
else:
raise NotImplemented(f"Provided data dtype not supported: {data.dtype}")
return (torch.FloatTensor(data.astype(np.float32)) / norm_fix, sampling_rate)
def load_audio(audiopath, sampling_rate):
if audiopath[-4:] == '.wav':
audio, lsr = load_wav_to_torch(audiopath)
elif audiopath[-4:] == '.mp3':
audio, lsr = librosa.load(audiopath, sr=sampling_rate)
audio = torch.FloatTensor(audio)
else:
assert False, f"Unsupported audio format provided: {audiopath[-4:]}"
# Remove any channel data.
if len(audio.shape) > 1:
if audio.shape[0] < 5:
audio = audio[0]
else:
assert audio.shape[1] < 5
audio = audio[:, 0]
if lsr != sampling_rate:
audio = torchaudio.functional.resample(audio, lsr, sampling_rate)
# Check some assumptions about audio range. This should be automatically fixed in load_wav_to_torch, but might not be in some edge cases, where we should squawk.
# '2' is arbitrarily chosen since it seems like audio will often "overdrive" the [-1,1] bounds.
if torch.any(audio > 2) or not torch.any(audio < 0):
print(f"Error with {audiopath}. Max={audio.max()} min={audio.min()}")
audio.clip_(-1, 1)
return audio.unsqueeze(0)
TACOTRON_MEL_MAX = 2.3143386840820312
TACOTRON_MEL_MIN = -11.512925148010254
def denormalize_tacotron_mel(norm_mel):
return ((norm_mel + 1) / 2) * (TACOTRON_MEL_MAX - TACOTRON_MEL_MIN) + TACOTRON_MEL_MIN
def normalize_tacotron_mel(mel):
return 2 * ((mel - TACOTRON_MEL_MIN) / (TACOTRON_MEL_MAX - TACOTRON_MEL_MIN)) - 1
def dynamic_range_compression(x, C=1, clip_val=1e-5):
"""
PARAMS
------
C: compression factor
"""
return torch.log(torch.clamp(x, min=clip_val) * C)
def dynamic_range_decompression(x, C=1):
"""
PARAMS
------
C: compression factor used to compress
"""
return torch.exp(x) / C
def get_voices(extra_voice_dirs=[]):
dirs = [BUILTIN_VOICES_DIR] + extra_voice_dirs
voices = {}
for d in dirs:
subs = os.listdir(d)
for sub in subs:
subj = os.path.join(d, sub)
if os.path.isdir(subj):
voices[sub] = list(glob(f'{subj}/*.wav')) + list(glob(f'{subj}/*.mp3')) + list(glob(f'{subj}/*.pth'))
return voices
def load_voice(voice, extra_voice_dirs=[]):
if voice == 'random':
return None, None
voices = get_voices(extra_voice_dirs)
paths = voices[voice]
if len(paths) == 1 and paths[0].endswith('.pth'):
return None, torch.load(paths[0])
else:
conds = []
for cond_path in paths:
c = load_audio(cond_path, 22050)
conds.append(c)
return conds, None
def load_voices(voices, extra_voice_dirs=[]):
latents = []
clips = []
for voice in voices:
if voice == 'random':
if len(voices) > 1:
print("Cannot combine a random voice with a non-random voice. Just using a random voice.")
return None, None
clip, latent = load_voice(voice, extra_voice_dirs)
if latent is None:
assert len(
latents) == 0, "Can only combine raw audio voices or latent voices, not both. Do it yourself if you want this."
clips.extend(clip)
elif clip is None:
assert len(
clips) == 0, "Can only combine raw audio voices or latent voices, not both. Do it yourself if you want this."
latents.append(latent)
if len(latents) == 0:
return clips, None
else:
latents_0 = torch.stack([l[0] for l in latents], dim=0).mean(dim=0)
latents_1 = torch.stack([l[1] for l in latents], dim=0).mean(dim=0)
latents = (latents_0, latents_1)
return None, latents
class TacotronSTFT(torch.nn.Module):
def __init__(self, filter_length=1024, hop_length=256, win_length=1024,
n_mel_channels=80, sampling_rate=22050, mel_fmin=0.0,
mel_fmax=8000.0):
super(TacotronSTFT, self).__init__()
self.n_mel_channels = n_mel_channels
self.sampling_rate = sampling_rate
self.stft_fn = STFT(filter_length, hop_length, win_length)
from librosa.filters import mel as librosa_mel_fn
mel_basis = librosa_mel_fn(
sr=sampling_rate, n_fft=filter_length, n_mels=n_mel_channels, fmin=mel_fmin, fmax=mel_fmax)
mel_basis = torch.from_numpy(mel_basis).float()
self.register_buffer('mel_basis', mel_basis)
def spectral_normalize(self, magnitudes):
output = dynamic_range_compression(magnitudes)
return output
def spectral_de_normalize(self, magnitudes):
output = dynamic_range_decompression(magnitudes)
return output
def mel_spectrogram(self, y):
"""Computes mel-spectrograms from a batch of waves
PARAMS
------
y: Variable(torch.FloatTensor) with shape (B, T) in range [-1, 1]
RETURNS
-------
mel_output: torch.FloatTensor of shape (B, n_mel_channels, T)
"""
assert (torch.min(y.data) >= -10)
assert (torch.max(y.data) <= 10)
y = torch.clip(y, min=-1, max=1)
magnitudes, phases = self.stft_fn.transform(y)
magnitudes = magnitudes.data
mel_output = torch.matmul(self.mel_basis, magnitudes)
mel_output = self.spectral_normalize(mel_output)
return mel_output
def wav_to_univnet_mel(wav, do_normalization=False, device='cuda'):
stft = TacotronSTFT(1024, 256, 1024, 100, 24000, 0, 12000)
stft = stft.to(device)
mel = stft.mel_spectrogram(wav)
if do_normalization:
mel = normalize_tacotron_mel(mel)
return mel | /ruth_text_to_speech-0.0.39-py3-none-any.whl/ruth_tts_transformer/utils/audio.py | 0.652906 | 0.293019 | audio.py | pypi |
import re
import torch
import torchaudio
from transformers import Wav2Vec2ForCTC, Wav2Vec2FeatureExtractor, Wav2Vec2CTCTokenizer, Wav2Vec2Processor
from ruth_tts_transformer.utils.audio import load_audio
def max_alignment(s1, s2, skip_character='~', record=None):
"""
A clever function that aligns s1 to s2 as best it can. Wherever a character from s1 is not found in s2, a '~' is
used to replace that character.
Finally got to use my DP skills!
"""
if record is None:
record = {}
assert skip_character not in s1, f"Found the skip character {skip_character} in the provided string, {s1}"
if len(s1) == 0:
return ''
if len(s2) == 0:
return skip_character * len(s1)
if s1 == s2:
return s1
if s1[0] == s2[0]:
return s1[0] + max_alignment(s1[1:], s2[1:], skip_character, record)
take_s1_key = (len(s1), len(s2) - 1)
if take_s1_key in record:
take_s1, take_s1_score = record[take_s1_key]
else:
take_s1 = max_alignment(s1, s2[1:], skip_character, record)
take_s1_score = len(take_s1.replace(skip_character, ''))
record[take_s1_key] = (take_s1, take_s1_score)
take_s2_key = (len(s1) - 1, len(s2))
if take_s2_key in record:
take_s2, take_s2_score = record[take_s2_key]
else:
take_s2 = max_alignment(s1[1:], s2, skip_character, record)
take_s2_score = len(take_s2.replace(skip_character, ''))
record[take_s2_key] = (take_s2, take_s2_score)
return take_s1 if take_s1_score > take_s2_score else skip_character + take_s2
class Wav2VecAlignment:
"""
Uses wav2vec2 to perform audio<->text alignment.
"""
def __init__(self, device='cuda'):
self.model = Wav2Vec2ForCTC.from_pretrained("jbetker/wav2vec2-large-robust-ft-libritts-voxpopuli").cpu()
self.feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(f"facebook/wav2vec2-large-960h")
self.tokenizer = Wav2Vec2CTCTokenizer.from_pretrained('jbetker/tacotron-symbols')
self.device = device
def align(self, audio, expected_text, audio_sample_rate=24000):
orig_len = audio.shape[-1]
with torch.no_grad():
self.model = self.model.to(self.device)
audio = audio.to(self.device)
audio = torchaudio.functional.resample(audio, audio_sample_rate, 16000)
clip_norm = (audio - audio.mean()) / torch.sqrt(audio.var() + 1e-7)
logits = self.model(clip_norm).logits
self.model = self.model.cpu()
logits = logits[0]
pred_string = self.tokenizer.decode(logits.argmax(-1).tolist())
fixed_expectation = max_alignment(expected_text.lower(), pred_string)
w2v_compression = orig_len // logits.shape[0]
expected_tokens = self.tokenizer.encode(fixed_expectation)
expected_chars = list(fixed_expectation)
if len(expected_tokens) == 1:
return [0] # The alignment is simple; there is only one token.
expected_tokens.pop(0) # The first token is a given.
expected_chars.pop(0)
alignments = [0]
def pop_till_you_win():
if len(expected_tokens) == 0:
return None
popped = expected_tokens.pop(0)
popped_char = expected_chars.pop(0)
while popped_char == '~':
alignments.append(-1)
if len(expected_tokens) == 0:
return None
popped = expected_tokens.pop(0)
popped_char = expected_chars.pop(0)
return popped
next_expected_token = pop_till_you_win()
for i, logit in enumerate(logits):
top = logit.argmax()
if next_expected_token == top:
alignments.append(i * w2v_compression)
if len(expected_tokens) > 0:
next_expected_token = pop_till_you_win()
else:
break
pop_till_you_win()
if not (len(expected_tokens) == 0 and len(alignments) == len(expected_text)):
torch.save([audio, expected_text], 'alignment_debug.pth')
assert False, "Something went wrong with the alignment algorithm. I've dumped a file, 'alignment_debug.pth' to" \
"your current working directory. Please report this along with the file so it can get fixed."
# Now fix up alignments. Anything with -1 should be interpolated.
alignments.append(orig_len) # This'll get removed but makes the algorithm below more readable.
for i in range(len(alignments)):
if alignments[i] == -1:
for j in range(i+1, len(alignments)):
if alignments[j] != -1:
next_found_token = j
break
for j in range(i, next_found_token):
gap = alignments[next_found_token] - alignments[i-1]
alignments[j] = (j-i+1) * gap // (next_found_token-i+1) + alignments[i-1]
return alignments[:-1]
def redact(self, audio, expected_text, audio_sample_rate=24000):
if '[' not in expected_text:
return audio
splitted = expected_text.split('[')
fully_split = [splitted[0]]
for spl in splitted[1:]:
assert ']' in spl, 'Every "[" character must be paired with a "]" with no nesting.'
fully_split.extend(spl.split(']'))
# At this point, fully_split is a list of strings, with every other string being something that should be redacted.
non_redacted_intervals = []
last_point = 0
for i in range(len(fully_split)):
if i % 2 == 0:
end_interval = max(0, last_point + len(fully_split[i]) - 1)
non_redacted_intervals.append((last_point, end_interval))
last_point += len(fully_split[i])
bare_text = ''.join(fully_split)
alignments = self.align(audio, bare_text, audio_sample_rate)
output_audio = []
for nri in non_redacted_intervals:
start, stop = nri
output_audio.append(audio[:, alignments[start]:alignments[stop]])
return torch.cat(output_audio, dim=-1) | /ruth_text_to_speech-0.0.39-py3-none-any.whl/ruth_tts_transformer/utils/wav2vec_alignment.py | 0.679072 | 0.424412 | wav2vec_alignment.py | pypi |
import os
import re
import inflect
import torch
from tokenizers import Tokenizer
# Regular expression matching whitespace:
from unidecode import unidecode
_whitespace_re = re.compile(r'\s+')
# List of (regular expression, replacement) pairs for abbreviations:
_abbreviations = [(re.compile('\\b%s\\.' % x[0], re.IGNORECASE), x[1]) for x in [
('mrs', 'misess'),
('mr', 'mister'),
('dr', 'doctor'),
('st', 'saint'),
('co', 'company'),
('jr', 'junior'),
('maj', 'major'),
('gen', 'general'),
('drs', 'doctors'),
('rev', 'reverend'),
('lt', 'lieutenant'),
('hon', 'honorable'),
('sgt', 'sergeant'),
('capt', 'captain'),
('esq', 'esquire'),
('ltd', 'limited'),
('col', 'colonel'),
('ft', 'fort'),
]]
def expand_abbreviations(text):
for regex, replacement in _abbreviations:
text = re.sub(regex, replacement, text)
return text
_inflect = inflect.engine()
_comma_number_re = re.compile(r'([0-9][0-9\,]+[0-9])')
_decimal_number_re = re.compile(r'([0-9]+\.[0-9]+)')
_pounds_re = re.compile(r'£([0-9\,]*[0-9]+)')
_dollars_re = re.compile(r'\$([0-9\.\,]*[0-9]+)')
_ordinal_re = re.compile(r'[0-9]+(st|nd|rd|th)')
_number_re = re.compile(r'[0-9]+')
def _remove_commas(m):
return m.group(1).replace(',', '')
def _expand_decimal_point(m):
return m.group(1).replace('.', ' point ')
def _expand_dollars(m):
match = m.group(1)
parts = match.split('.')
if len(parts) > 2:
return match + ' dollars' # Unexpected format
dollars = int(parts[0]) if parts[0] else 0
cents = int(parts[1]) if len(parts) > 1 and parts[1] else 0
if dollars and cents:
dollar_unit = 'dollar' if dollars == 1 else 'dollars'
cent_unit = 'cent' if cents == 1 else 'cents'
return '%s %s, %s %s' % (dollars, dollar_unit, cents, cent_unit)
elif dollars:
dollar_unit = 'dollar' if dollars == 1 else 'dollars'
return '%s %s' % (dollars, dollar_unit)
elif cents:
cent_unit = 'cent' if cents == 1 else 'cents'
return '%s %s' % (cents, cent_unit)
else:
return 'zero dollars'
def _expand_ordinal(m):
return _inflect.number_to_words(m.group(0))
def _expand_number(m):
num = int(m.group(0))
if num > 1000 and num < 3000:
if num == 2000:
return 'two thousand'
elif num > 2000 and num < 2010:
return 'two thousand ' + _inflect.number_to_words(num % 100)
elif num % 100 == 0:
return _inflect.number_to_words(num // 100) + ' hundred'
else:
return _inflect.number_to_words(num, andword='', zero='oh', group=2).replace(', ', ' ')
else:
return _inflect.number_to_words(num, andword='')
def normalize_numbers(text):
text = re.sub(_comma_number_re, _remove_commas, text)
text = re.sub(_pounds_re, r'\1 pounds', text)
text = re.sub(_dollars_re, _expand_dollars, text)
text = re.sub(_decimal_number_re, _expand_decimal_point, text)
text = re.sub(_ordinal_re, _expand_ordinal, text)
text = re.sub(_number_re, _expand_number, text)
return text
def expand_numbers(text):
return normalize_numbers(text)
def lowercase(text):
return text.lower()
def collapse_whitespace(text):
return re.sub(_whitespace_re, ' ', text)
def convert_to_ascii(text):
return unidecode(text)
def basic_cleaners(text):
'''Basic pipeline that lowercases and collapses whitespace without transliteration.'''
text = lowercase(text)
text = collapse_whitespace(text)
return text
def transliteration_cleaners(text):
'''Pipeline for non-English text that transliterates to ASCII.'''
text = convert_to_ascii(text)
text = lowercase(text)
text = collapse_whitespace(text)
return text
def english_cleaners(text):
'''Pipeline for English text, including number and abbreviation expansion.'''
text = convert_to_ascii(text)
text = lowercase(text)
text = expand_numbers(text)
text = expand_abbreviations(text)
text = collapse_whitespace(text)
text = text.replace('"', '')
return text
def lev_distance(s1, s2):
if len(s1) > len(s2):
s1, s2 = s2, s1
distances = range(len(s1) + 1)
for i2, c2 in enumerate(s2):
distances_ = [i2 + 1]
for i1, c1 in enumerate(s1):
if c1 == c2:
distances_.append(distances[i1])
else:
distances_.append(1 + min((distances[i1], distances[i1 + 1], distances_[-1])))
distances = distances_
return distances[-1]
DEFAULT_VOCAB_FILE = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../data/tokenizer.json')
class VoiceBpeTokenizer:
def __init__(self, vocab_file=DEFAULT_VOCAB_FILE):
if vocab_file is not None:
self.tokenizer = Tokenizer.from_file(vocab_file)
def preprocess_text(self, txt):
txt = english_cleaners(txt)
return txt
def encode(self, txt):
txt = self.preprocess_text(txt)
txt = txt.replace(' ', '[SPACE]')
return self.tokenizer.encode(txt).ids
def decode(self, seq):
if isinstance(seq, torch.Tensor):
seq = seq.cpu().numpy()
txt = self.tokenizer.decode(seq, skip_special_tokens=False).replace(' ', '')
txt = txt.replace('[SPACE]', ' ')
txt = txt.replace('[STOP]', '')
txt = txt.replace('[UNK]', '')
return txt | /ruth_text_to_speech-0.0.39-py3-none-any.whl/ruth_tts_transformer/utils/tokenizer.py | 0.50708 | 0.308503 | tokenizer.py | pypi |
import re
def split_and_recombine_text(text, desired_length=200, max_length=300):
"""Split text it into chunks of a desired length trying to keep sentences intact."""
# normalize text, remove redundant whitespace and convert non-ascii quotes to ascii
text = re.sub(r'\n\n+', '\n', text)
text = re.sub(r'\s+', ' ', text)
text = re.sub(r'[“”]', '"', text)
rv = []
in_quote = False
current = ""
split_pos = []
pos = -1
end_pos = len(text) - 1
def seek(delta):
nonlocal pos, in_quote, current
is_neg = delta < 0
for _ in range(abs(delta)):
if is_neg:
pos -= 1
current = current[:-1]
else:
pos += 1
current += text[pos]
if text[pos] == '"':
in_quote = not in_quote
return text[pos]
def peek(delta):
p = pos + delta
return text[p] if p < end_pos and p >= 0 else ""
def commit():
nonlocal rv, current, split_pos
rv.append(current)
current = ""
split_pos = []
while pos < end_pos:
c = seek(1)
# do we need to force a split?
if len(current) >= max_length:
if len(split_pos) > 0 and len(current) > (desired_length / 2):
# we have at least one sentence and we are over half the desired length, seek back to the last split
d = pos - split_pos[-1]
seek(-d)
else:
# no full sentences, seek back until we are not in the middle of a word and split there
while c not in '!?.\n ' and pos > 0 and len(current) > desired_length:
c = seek(-1)
commit()
# check for sentence boundaries
elif not in_quote and (c in '!?\n' or (c == '.' and peek(1) in '\n ')):
# seek forward if we have consecutive boundary markers but still within the max length
while pos < len(text) - 1 and len(current) < max_length and peek(1) in '!?.':
c = seek(1)
split_pos.append(pos)
if len(current) >= desired_length:
commit()
# treat end of quote as a boundary if its followed by a space or newline
elif in_quote and peek(1) == '"' and peek(2) in '\n ':
seek(2)
split_pos.append(pos)
rv.append(current)
# clean up, remove lines with only whitespace or punctuation
rv = [s.strip() for s in rv]
rv = [s for s in rv if len(s) > 0 and not re.match(r'^[\s\.,;:!?]*$', s)]
return rv
if __name__ == '__main__':
import os
import unittest
class Test(unittest.TestCase):
def test_split_and_recombine_text(self):
text = """
This is a sample sentence.
This is another sample sentence.
This is a longer sample sentence that should force a split inthemiddlebutinotinthislongword.
"Don't split my quote... please"
"""
self.assertEqual(split_and_recombine_text(text, desired_length=20, max_length=40),
['This is a sample sentence.',
'This is another sample sentence.',
'This is a longer sample sentence that',
'should force a split',
'inthemiddlebutinotinthislongword.',
'"Don\'t split my quote... please"'])
def test_split_and_recombine_text_2(self):
text = """
When you are really angry sometimes you use consecutive exclamation marks!!!!!! Is this a good thing to do?!?!?!
I don't know but we should handle this situation..........................
"""
self.assertEqual(split_and_recombine_text(text, desired_length=30, max_length=50),
['When you are really angry sometimes you use',
'consecutive exclamation marks!!!!!!',
'Is this a good thing to do?!?!?!',
'I don\'t know but we should handle this situation.'])
def test_split_and_recombine_text_3(self):
text_src = os.path.join(os.path.dirname(__file__), '../data/riding_hood.txt')
with open(text_src, 'r') as f:
text = f.read()
self.assertEqual(
split_and_recombine_text(text),
[
'Once upon a time there lived in a certain village a little country girl, the prettiest creature who was ever seen. Her mother was excessively fond of her; and her grandmother doted on her still more. This good woman had a little red riding hood made for her.',
'It suited the girl so extremely well that everybody called her Little Red Riding Hood. One day her mother, having made some cakes, said to her, "Go, my dear, and see how your grandmother is doing, for I hear she has been very ill. Take her a cake, and this little pot of butter."',
'Little Red Riding Hood set out immediately to go to her grandmother, who lived in another village. As she was going through the wood, she met with a wolf, who had a very great mind to eat her up, but he dared not, because of some woodcutters working nearby in the forest.',
'He asked her where she was going. The poor child, who did not know that it was dangerous to stay and talk to a wolf, said to him, "I am going to see my grandmother and carry her a cake and a little pot of butter from my mother." "Does she live far off?" said the wolf "Oh I say,"',
'answered Little Red Riding Hood; "it is beyond that mill you see there, at the first house in the village." "Well," said the wolf, "and I\'ll go and see her too. I\'ll go this way and go you that, and we shall see who will be there first."',
'The wolf ran as fast as he could, taking the shortest path, and the little girl took a roundabout way, entertaining herself by gathering nuts, running after butterflies, and gathering bouquets of little flowers.',
'It was not long before the wolf arrived at the old woman\'s house. He knocked at the door: tap, tap. "Who\'s there?" "Your grandchild, Little Red Riding Hood," replied the wolf, counterfeiting her voice; "who has brought you a cake and a little pot of butter sent you by mother."',
'The good grandmother, who was in bed, because she was somewhat ill, cried out, "Pull the bobbin, and the latch will go up."',
'The wolf pulled the bobbin, and the door opened, and then he immediately fell upon the good woman and ate her up in a moment, for it been more than three days since he had eaten.',
'He then shut the door and got into the grandmother\'s bed, expecting Little Red Riding Hood, who came some time afterwards and knocked at the door: tap, tap. "Who\'s there?"',
'Little Red Riding Hood, hearing the big voice of the wolf, was at first afraid; but believing her grandmother had a cold and was hoarse, answered, "It is your grandchild Little Red Riding Hood, who has brought you a cake and a little pot of butter mother sends you."',
'The wolf cried out to her, softening his voice as much as he could, "Pull the bobbin, and the latch will go up." Little Red Riding Hood pulled the bobbin, and the door opened.',
'The wolf, seeing her come in, said to her, hiding himself under the bedclothes, "Put the cake and the little pot of butter upon the stool, and come get into bed with me." Little Red Riding Hood took off her clothes and got into bed.',
'She was greatly amazed to see how her grandmother looked in her nightclothes, and said to her, "Grandmother, what big arms you have!" "All the better to hug you with, my dear." "Grandmother, what big legs you have!" "All the better to run with, my child." "Grandmother, what big ears you have!"',
'"All the better to hear with, my child." "Grandmother, what big eyes you have!" "All the better to see with, my child." "Grandmother, what big teeth you have got!" "All the better to eat you up with." And, saying these words, this wicked wolf fell upon Little Red Riding Hood, and ate her all up.',
]
)
unittest.main() | /ruth_text_to_speech-0.0.39-py3-none-any.whl/ruth_tts_transformer/utils/text.py | 0.419767 | 0.425725 | text.py | pypi |
import os
import random
import uuid
from urllib import request
import torch
import torch.nn.functional as F
import progressbar
import torchaudio
from ruth_tts_transformer.ruth_tts.models.classifier import AudioMiniEncoderWithClassifierHead
from ruth_tts_transformer.ruth_tts.models.cvvp import CVVP
from ruth_tts_transformer.ruth_tts.models.diffusion_decoder import DiffusionTts
from ruth_tts_transformer.ruth_tts.models.autoregressive import UnifiedVoice
from tqdm import tqdm
from ruth_tts_transformer.ruth_tts.models.arch_util import TorchMelSpectrogram
from ruth_tts_transformer.ruth_tts.models.clvp import CLVP
from ruth_tts_transformer.ruth_tts.models.random_latent_generator import RandomLatentConverter
from ruth_tts_transformer.ruth_tts.models.vocoder import UnivNetGenerator
from ruth_tts_transformer.ruth_tts.utils.audio import wav_to_univnet_mel, denormalize_tacotron_mel
from ruth_tts_transformer.ruth_tts.utils.diffusion import SpacedDiffusion, space_timesteps, get_named_beta_schedule
from ruth_tts_transformer.ruth_tts.utils.tokenizer import VoiceBpeTokenizer
from ruth_tts_transformer.ruth_tts.utils.wav2vec_alignment import Wav2VecAlignment
pbar = None
def download_models(specific_models=None):
"""
Call to download all the models that Tortoise uses.
"""
MODELS = {
'autoregressive.pth': 'https://huggingface.co/jbetker/tortoise-tts-v2/resolve/main/.models/autoregressive.pth',
'classifier.pth': 'https://huggingface.co/jbetker/tortoise-tts-v2/resolve/main/.models/classifier.pth',
'clvp2.pth': 'https://huggingface.co/jbetker/tortoise-tts-v2/resolve/main/.models/clvp2.pth',
'cvvp.pth': 'https://huggingface.co/jbetker/tortoise-tts-v2/resolve/main/.models/cvvp.pth',
'diffusion_decoder.pth': 'https://huggingface.co/jbetker/tortoise-tts-v2/resolve/main/.models/diffusion_decoder.pth',
'vocoder.pth': 'https://huggingface.co/jbetker/tortoise-tts-v2/resolve/main/.models/vocoder.pth',
'rlg_auto.pth': 'https://huggingface.co/jbetker/tortoise-tts-v2/resolve/main/.models/rlg_auto.pth',
'rlg_diffuser.pth': 'https://huggingface.co/jbetker/tortoise-tts-v2/resolve/main/.models/rlg_diffuser.pth',
}
os.makedirs('.models', exist_ok=True)
def show_progress(block_num, block_size, total_size):
global pbar
if pbar is None:
pbar = progressbar.ProgressBar(maxval=total_size)
pbar.start()
downloaded = block_num * block_size
if downloaded < total_size:
pbar.update(downloaded)
else:
pbar.finish()
pbar = None
for model_name, url in MODELS.items():
if specific_models is not None and model_name not in specific_models:
continue
if os.path.exists(f'.models/{model_name}'):
continue
print(f'Downloading {model_name} from {url}...')
request.urlretrieve(url, f'.models/{model_name}', show_progress)
print('Done.')
def pad_or_truncate(t, length):
"""
Utility function for forcing <t> to have the specified sequence length, whether by clipping it or padding it with 0s.
"""
if t.shape[-1] == length:
return t
elif t.shape[-1] < length:
return F.pad(t, (0, length-t.shape[-1]))
else:
return t[..., :length]
def load_discrete_vocoder_diffuser(trained_diffusion_steps=4000, desired_diffusion_steps=200, cond_free=True, cond_free_k=1):
"""
Helper function to load a GaussianDiffusion instance configured for use as a vocoder.
"""
return SpacedDiffusion(use_timesteps=space_timesteps(trained_diffusion_steps, [desired_diffusion_steps]), model_mean_type='epsilon',
model_var_type='learned_range', loss_type='mse', betas=get_named_beta_schedule('linear', trained_diffusion_steps),
conditioning_free=cond_free, conditioning_free_k=cond_free_k)
def format_conditioning(clip, cond_length=132300):
"""
Converts the given conditioning signal to a MEL spectrogram and clips it as expected by the models.
"""
gap = clip.shape[-1] - cond_length
if gap < 0:
clip = F.pad(clip, pad=(0, abs(gap)))
elif gap > 0:
rand_start = random.randint(0, gap)
clip = clip[:, rand_start:rand_start + cond_length]
mel_clip = TorchMelSpectrogram()(clip.unsqueeze(0)).squeeze(0)
return mel_clip.unsqueeze(0).cuda()
def fix_autoregressive_output(codes, stop_token, complain=True):
"""
This function performs some padding on coded audio that fixes a mismatch issue between what the diffusion model was
trained on and what the autoregressive code generator creates (which has no padding or end).
This is highly specific to the DVAE being used, so this particular coding will not necessarily work if used with
a different DVAE. This can be inferred by feeding a audio clip padded with lots of zeros on the end through the DVAE
and copying out the last few codes.
Failing to do this padding will produce speech with a harsh end that sounds like "BLAH" or similar.
"""
# Strip off the autoregressive stop token and add padding.
stop_token_indices = (codes == stop_token).nonzero()
if len(stop_token_indices) == 0:
if complain:
print("No stop tokens found in one of the generated voice clips. This typically means the spoken audio is "
"too long. In some cases, the output will still be good, though. Listen to it and if it is missing words, "
"try breaking up your input text.")
return codes
else:
codes[stop_token_indices] = 83
stm = stop_token_indices.min().item()
codes[stm:] = 83
if stm - 3 < codes.shape[0]:
codes[-3] = 45
codes[-2] = 45
codes[-1] = 248
return codes
def do_spectrogram_diffusion(diffusion_model, diffuser, latents, conditioning_latents, temperature=1, verbose=True):
"""
Uses the specified diffusion model to convert discrete codes into a spectrogram.
"""
with torch.no_grad():
output_seq_len = latents.shape[1] * 4 * 24000 // 22050 # This diffusion model converts from 22kHz spectrogram codes to a 24kHz spectrogram signal.
output_shape = (latents.shape[0], 100, output_seq_len)
precomputed_embeddings = diffusion_model.timestep_independent(latents, conditioning_latents, output_seq_len, False)
noise = torch.randn(output_shape, device=latents.device) * temperature
mel = diffuser.p_sample_loop(diffusion_model, output_shape, noise=noise,
model_kwargs={'precomputed_aligned_embeddings': precomputed_embeddings},
progress=verbose)
return denormalize_tacotron_mel(mel)[:,:,:output_seq_len]
def classify_audio_clip(clip):
"""
Returns whether or not Tortoises' classifier thinks the given clip came from Tortoise.
:param clip: torch tensor containing audio waveform data (get it from load_audio)
:return: True if the clip was classified as coming from Tortoise and false if it was classified as real.
"""
download_models(['classifier.pth'])
classifier = AudioMiniEncoderWithClassifierHead(2, spec_dim=1, embedding_dim=512, depth=5, downsample_factor=4,
resnet_blocks=2, attn_blocks=4, num_attn_heads=4, base_channels=32,
dropout=0, kernel_size=5, distribute_zero_label=False)
classifier.load_state_dict(torch.load('.models/classifier.pth', map_location=torch.device('cpu')))
clip = clip.cpu().unsqueeze(0)
results = F.softmax(classifier(clip), dim=-1)
return results[0][0]
class TextToSpeech:
"""
Main entry point into Tortoise.
"""
def __init__(self, autoregressive_batch_size=16, models_dir='.models', enable_redaction=True):
"""
Constructor
:param autoregressive_batch_size: Specifies how many samples to generate per batch. Lower this if you are seeing
GPU OOM errors. Larger numbers generates slightly faster.
:param models_dir: Where model weights are stored. This should only be specified if you are providing your own
models, otherwise use the defaults.
:param enable_redaction: When true, text enclosed in brackets are automatically redacted from the spoken output
(but are still rendered by the model). This can be used for prompt engineering.
Default is true.
"""
self.autoregressive_batch_size = autoregressive_batch_size
self.enable_redaction = enable_redaction
if self.enable_redaction:
self.aligner = Wav2VecAlignment()
self.tokenizer = VoiceBpeTokenizer()
download_models()
if os.path.exists(f'{models_dir}/autoregressive.ptt'):
# Assume this is a traced directory.
self.autoregressive = torch.jit.load(f'{models_dir}/autoregressive.ptt')
self.diffusion = torch.jit.load(f'{models_dir}/diffusion_decoder.ptt')
else:
self.autoregressive = UnifiedVoice(max_mel_tokens=604, max_text_tokens=402, max_conditioning_inputs=2, layers=30,
model_dim=1024,
heads=16, number_text_tokens=255, start_text_token=255, checkpointing=False,
train_solo_embeddings=False).cpu().eval()
self.autoregressive.load_state_dict(torch.load(f'{models_dir}/autoregressive.pth'))
self.diffusion = DiffusionTts(model_channels=1024, num_layers=10, in_channels=100, out_channels=200,
in_latent_channels=1024, in_tokens=8193, dropout=0, use_fp16=False, num_heads=16,
layer_drop=0, unconditioned_percentage=0).cpu().eval()
self.diffusion.load_state_dict(torch.load(f'{models_dir}/diffusion_decoder.pth'))
self.clvp = CLVP(dim_text=768, dim_speech=768, dim_latent=768, num_text_tokens=256, text_enc_depth=20,
text_seq_len=350, text_heads=12,
num_speech_tokens=8192, speech_enc_depth=20, speech_heads=12, speech_seq_len=430,
use_xformers=True).cpu().eval()
self.clvp.load_state_dict(torch.load(f'{models_dir}/clvp2.pth'))
self.cvvp = CVVP(model_dim=512, transformer_heads=8, dropout=0, mel_codes=8192, conditioning_enc_depth=8, cond_mask_percentage=0,
speech_enc_depth=8, speech_mask_percentage=0, latent_multiplier=1).cpu().eval()
self.cvvp.load_state_dict(torch.load(f'{models_dir}/cvvp.pth'))
self.vocoder = UnivNetGenerator().cpu()
self.vocoder.load_state_dict(torch.load(f'{models_dir}/vocoder.pth')['model_g'])
self.vocoder.eval(inference=True)
# Random latent generators (RLGs) are loaded lazily.
self.rlg_auto = None
self.rlg_diffusion = None
def get_conditioning_latents(self, voice_samples, return_mels=False):
"""
Transforms one or more voice_samples into a tuple (autoregressive_conditioning_latent, diffusion_conditioning_latent).
These are expressive learned latents that encode aspects of the provided clips like voice, intonation, and acoustic
properties.
:param voice_samples: List of 2 or more ~10 second reference clips, which should be torch tensors containing 22.05kHz waveform data.
"""
with torch.no_grad():
voice_samples = [v.to('cuda') for v in voice_samples]
auto_conds = []
if not isinstance(voice_samples, list):
voice_samples = [voice_samples]
for vs in voice_samples:
auto_conds.append(format_conditioning(vs))
auto_conds = torch.stack(auto_conds, dim=1)
self.autoregressive = self.autoregressive.cuda()
auto_latent = self.autoregressive.get_conditioning(auto_conds)
self.autoregressive = self.autoregressive.cpu()
diffusion_conds = []
for sample in voice_samples:
# The diffuser operates at a sample rate of 24000 (except for the latent inputs)
sample = torchaudio.functional.resample(sample, 22050, 24000)
sample = pad_or_truncate(sample, 102400)
cond_mel = wav_to_univnet_mel(sample.to('cuda'), do_normalization=False)
diffusion_conds.append(cond_mel)
diffusion_conds = torch.stack(diffusion_conds, dim=1)
self.diffusion = self.diffusion.cuda()
diffusion_latent = self.diffusion.get_conditioning(diffusion_conds)
self.diffusion = self.diffusion.cpu()
if return_mels:
return auto_latent, diffusion_latent, auto_conds, diffusion_conds
else:
return auto_latent, diffusion_latent
def get_random_conditioning_latents(self):
# Lazy-load the RLG models.
if self.rlg_auto is None:
self.rlg_auto = RandomLatentConverter(1024).eval()
self.rlg_auto.load_state_dict(torch.load('.models/rlg_auto.pth', map_location=torch.device('cpu')))
self.rlg_diffusion = RandomLatentConverter(2048).eval()
self.rlg_diffusion.load_state_dict(torch.load('.models/rlg_diffuser.pth', map_location=torch.device('cpu')))
with torch.no_grad():
return self.rlg_auto(torch.tensor([0.0])), self.rlg_diffusion(torch.tensor([0.0]))
def tts_with_preset(self, text, preset='fast', **kwargs):
"""
Calls TTS with one of a set of preset generation parameters. Options:
'ultra_fast': Produces speech at a speed which belies the name of this repo. (Not really, but it's definitely fastest).
'fast': Decent quality speech at a decent inference rate. A good choice for mass inference.
'standard': Very good quality. This is generally about as good as you are going to get.
'high_quality': Use if you want the absolute best. This is not really worth the compute, though.
"""
# Use generally found best tuning knobs for generation.
kwargs.update({'temperature': .8, 'length_penalty': 1.0, 'repetition_penalty': 2.0,
'top_p': .8,
'cond_free_k': 2.0, 'diffusion_temperature': 1.0})
# Presets are defined here.
presets = {
'ultra_fast': {'num_autoregressive_samples': 16, 'diffusion_iterations': 30, 'cond_free': False},
'fast': {'num_autoregressive_samples': 96, 'diffusion_iterations': 80},
'standard': {'num_autoregressive_samples': 256, 'diffusion_iterations': 200},
'high_quality': {'num_autoregressive_samples': 256, 'diffusion_iterations': 400},
}
kwargs.update(presets[preset])
return self.tts(text, **kwargs)
def tts(self, text, voice_samples=None, conditioning_latents=None, k=1, verbose=True,
# autoregressive generation parameters follow
num_autoregressive_samples=512, temperature=.8, length_penalty=1, repetition_penalty=2.0, top_p=.8, max_mel_tokens=500,
# CLVP & CVVP parameters
clvp_cvvp_slider=.5,
# diffusion generation parameters follow
diffusion_iterations=100, cond_free=True, cond_free_k=2, diffusion_temperature=1.0,
**hf_generate_kwargs):
"""
Produces an audio clip of the given text being spoken with the given reference voice.
:param text: Text to be spoken.
:param voice_samples: List of 2 or more ~10 second reference clips which should be torch tensors containing 22.05kHz waveform data.
:param conditioning_latents: A tuple of (autoregressive_conditioning_latent, diffusion_conditioning_latent), which
can be provided in lieu of voice_samples. This is ignored unless voice_samples=None.
Conditioning latents can be retrieved via get_conditioning_latents().
:param k: The number of returned clips. The most likely (as determined by Tortoises' CLVP and CVVP models) clips are returned.
:param verbose: Whether or not to print log messages indicating the progress of creating a clip. Default=true.
~~AUTOREGRESSIVE KNOBS~~
:param num_autoregressive_samples: Number of samples taken from the autoregressive model, all of which are filtered using CLVP+CVVP.
As Tortoise is a probabilistic model, more samples means a higher probability of creating something "great".
:param temperature: The softmax temperature of the autoregressive model.
:param length_penalty: A length penalty applied to the autoregressive decoder. Higher settings causes the model to produce more terse outputs.
:param repetition_penalty: A penalty that prevents the autoregressive decoder from repeating itself during decoding. Can be used to reduce the incidence
of long silences or "uhhhhhhs", etc.
:param top_p: P value used in nucleus sampling. (0,1]. Lower values mean the decoder produces more "likely" (aka boring) outputs.
:param max_mel_tokens: Restricts the output length. (0,600] integer. Each unit is 1/20 of a second.
:param typical_sampling: Turns typical sampling on or off. This sampling mode is discussed in this paper: https://arxiv.org/abs/2202.00666
I was interested in the premise, but the results were not as good as I was hoping. This is off by default, but
could use some tuning.
:param typical_mass: The typical_mass parameter from the typical_sampling algorithm.
~~CLVP-CVVP KNOBS~~
:param clvp_cvvp_slider: Controls the influence of the CLVP and CVVP models in selecting the best output from the autoregressive model.
[0,1]. Values closer to 1 will cause Tortoise to emit clips that follow the text more. Values closer to
0 will cause Tortoise to emit clips that more closely follow the reference clip (e.g. the voice sounds more
similar).
~~DIFFUSION KNOBS~~
:param diffusion_iterations: Number of diffusion steps to perform. [0,4000]. More steps means the network has more chances to iteratively refine
the output, which should theoretically mean a higher quality output. Generally a value above 250 is not noticeably better,
however.
:param cond_free: Whether or not to perform conditioning-free diffusion. Conditioning-free diffusion performs two forward passes for
each diffusion step: one with the outputs of the autoregressive model and one with no conditioning priors. The output
of the two is blended according to the cond_free_k value below. Conditioning-free diffusion is the real deal, and
dramatically improves realism.
:param cond_free_k: Knob that determines how to balance the conditioning free signal with the conditioning-present signal. [0,inf].
As cond_free_k increases, the output becomes dominated by the conditioning-free signal.
Formula is: output=cond_present_output*(cond_free_k+1)-cond_absenct_output*cond_free_k
:param diffusion_temperature: Controls the variance of the noise fed into the diffusion model. [0,1]. Values at 0
are the "mean" prediction of the diffusion network and will sound bland and smeared.
~~OTHER STUFF~~
:param hf_generate_kwargs: The huggingface Transformers generate API is used for the autoregressive transformer.
Extra keyword args fed to this function get forwarded directly to that API. Documentation
here: https://huggingface.co/docs/transformers/internal/generation_utils
:return: Generated audio clip(s) as a torch tensor. Shape 1,S if k=1 else, (k,1,S) where S is the sample length.
Sample rate is 24kHz.
"""
text_tokens = torch.IntTensor(self.tokenizer.encode(text)).unsqueeze(0).cuda()
text_tokens = F.pad(text_tokens, (0, 1)) # This may not be necessary.
assert text_tokens.shape[-1] < 400, 'Too much text provided. Break the text up into separate segments and re-try inference.'
auto_conds = None
if voice_samples is not None:
auto_conditioning, diffusion_conditioning, auto_conds, _ = self.get_conditioning_latents(voice_samples, return_mels=True)
elif conditioning_latents is not None:
auto_conditioning, diffusion_conditioning = conditioning_latents
else:
auto_conditioning, diffusion_conditioning = self.get_random_conditioning_latents()
auto_conditioning = auto_conditioning.cuda()
diffusion_conditioning = diffusion_conditioning.cuda()
diffuser = load_discrete_vocoder_diffuser(desired_diffusion_steps=diffusion_iterations, cond_free=cond_free, cond_free_k=cond_free_k)
with torch.no_grad():
samples = []
num_batches = num_autoregressive_samples // self.autoregressive_batch_size
stop_mel_token = self.autoregressive.stop_mel_token
calm_token = 83 # This is the token for coding silence, which is fixed in place with "fix_autoregressive_output"
self.autoregressive = self.autoregressive.cuda()
if verbose:
print("Generating autoregressive samples..")
for b in tqdm(range(num_batches), disable=not verbose):
codes = self.autoregressive.inference_speech(auto_conditioning, text_tokens,
do_sample=True,
top_p=top_p,
temperature=temperature,
num_return_sequences=self.autoregressive_batch_size,
length_penalty=length_penalty,
repetition_penalty=repetition_penalty,
max_generate_length=max_mel_tokens,
**hf_generate_kwargs)
padding_needed = max_mel_tokens - codes.shape[1]
codes = F.pad(codes, (0, padding_needed), value=stop_mel_token)
samples.append(codes)
self.autoregressive = self.autoregressive.cpu()
clip_results = []
self.clvp = self.clvp.cuda()
self.cvvp = self.cvvp.cuda()
if verbose:
print("Computing best candidates using CLVP and CVVP")
for batch in tqdm(samples, disable=not verbose):
for i in range(batch.shape[0]):
batch[i] = fix_autoregressive_output(batch[i], stop_mel_token)
clvp = self.clvp(text_tokens.repeat(batch.shape[0], 1), batch, return_loss=False)
if auto_conds is not None:
cvvp_accumulator = 0
for cl in range(auto_conds.shape[1]):
cvvp_accumulator = cvvp_accumulator + self.cvvp(auto_conds[:, cl].repeat(batch.shape[0], 1, 1), batch, return_loss=False)
cvvp = cvvp_accumulator / auto_conds.shape[1]
clip_results.append(clvp * clvp_cvvp_slider + cvvp * (1-clvp_cvvp_slider))
else:
clip_results.append(clvp)
clip_results = torch.cat(clip_results, dim=0)
samples = torch.cat(samples, dim=0)
best_results = samples[torch.topk(clip_results, k=k).indices]
self.clvp = self.clvp.cpu()
self.cvvp = self.cvvp.cpu()
del samples
# The diffusion model actually wants the last hidden layer from the autoregressive model as conditioning
# inputs. Re-produce those for the top results. This could be made more efficient by storing all of these
# results, but will increase memory usage.
self.autoregressive = self.autoregressive.cuda()
best_latents = self.autoregressive(auto_conditioning.repeat(k, 1), text_tokens.repeat(k, 1),
torch.tensor([text_tokens.shape[-1]], device=text_tokens.device), best_results,
torch.tensor([best_results.shape[-1]*self.autoregressive.mel_length_compression], device=text_tokens.device),
return_latent=True, clip_inputs=False)
self.autoregressive = self.autoregressive.cpu()
del auto_conditioning
if verbose:
print("Transforming autoregressive outputs into audio..")
wav_candidates = []
self.diffusion = self.diffusion.cuda()
self.vocoder = self.vocoder.cuda()
for b in range(best_results.shape[0]):
codes = best_results[b].unsqueeze(0)
latents = best_latents[b].unsqueeze(0)
# Find the first occurrence of the "calm" token and trim the codes to that.
ctokens = 0
for k in range(codes.shape[-1]):
if codes[0, k] == calm_token:
ctokens += 1
else:
ctokens = 0
if ctokens > 8: # 8 tokens gives the diffusion model some "breathing room" to terminate speech.
latents = latents[:, :k]
break
mel = do_spectrogram_diffusion(self.diffusion, diffuser, latents, diffusion_conditioning,
temperature=diffusion_temperature, verbose=verbose)
wav = self.vocoder.inference(mel)
wav_candidates.append(wav.cpu())
self.diffusion = self.diffusion.cpu()
self.vocoder = self.vocoder.cpu()
def potentially_redact(clip, text):
if self.enable_redaction:
return self.aligner.redact(clip.squeeze(1), text).unsqueeze(1)
return clip
wav_candidates = [potentially_redact(wav_candidate, text) for wav_candidate in wav_candidates]
if len(wav_candidates) > 1:
return wav_candidates
return wav_candidates[0] | /ruth-tts-converter-python-0.0.2.tar.gz/ruth-tts-converter-python-0.0.2/src/ruth_tts_transformer/ruth_tts/api.py | 0.506591 | 0.218357 | api.py | pypi |
import torch
import torch.nn as nn
from torch.utils.checkpoint import checkpoint
from ruth_tts_transformer.ruth_tts.models.arch_util import Upsample, Downsample, normalization, zero_module, AttentionBlock
class ResBlock(nn.Module):
def __init__(
self,
channels,
dropout,
out_channels=None,
use_conv=False,
use_scale_shift_norm=False,
dims=2,
up=False,
down=False,
kernel_size=3,
do_checkpoint=True,
):
super().__init__()
self.channels = channels
self.dropout = dropout
self.out_channels = out_channels or channels
self.use_conv = use_conv
self.use_scale_shift_norm = use_scale_shift_norm
self.do_checkpoint = do_checkpoint
padding = 1 if kernel_size == 3 else 2
self.in_layers = nn.Sequential(
normalization(channels),
nn.SiLU(),
nn.Conv1d(channels, self.out_channels, kernel_size, padding=padding),
)
self.updown = up or down
if up:
self.h_upd = Upsample(channels, False, dims)
self.x_upd = Upsample(channels, False, dims)
elif down:
self.h_upd = Downsample(channels, False, dims)
self.x_upd = Downsample(channels, False, dims)
else:
self.h_upd = self.x_upd = nn.Identity()
self.out_layers = nn.Sequential(
normalization(self.out_channels),
nn.SiLU(),
nn.Dropout(p=dropout),
zero_module(
nn.Conv1d(self.out_channels, self.out_channels, kernel_size, padding=padding)
),
)
if self.out_channels == channels:
self.skip_connection = nn.Identity()
elif use_conv:
self.skip_connection = nn.Conv1d(
dims, channels, self.out_channels, kernel_size, padding=padding
)
else:
self.skip_connection = nn.Conv1d(dims, channels, self.out_channels, 1)
def forward(self, x):
if self.do_checkpoint:
return checkpoint(
self._forward, x
)
else:
return self._forward(x)
def _forward(self, x):
if self.updown:
in_rest, in_conv = self.in_layers[:-1], self.in_layers[-1]
h = in_rest(x)
h = self.h_upd(h)
x = self.x_upd(x)
h = in_conv(h)
else:
h = self.in_layers(x)
h = self.out_layers(h)
return self.skip_connection(x) + h
class AudioMiniEncoder(nn.Module):
def __init__(self,
spec_dim,
embedding_dim,
base_channels=128,
depth=2,
resnet_blocks=2,
attn_blocks=4,
num_attn_heads=4,
dropout=0,
downsample_factor=2,
kernel_size=3):
super().__init__()
self.init = nn.Sequential(
nn.Conv1d(spec_dim, base_channels, 3, padding=1)
)
ch = base_channels
res = []
self.layers = depth
for l in range(depth):
for r in range(resnet_blocks):
res.append(ResBlock(ch, dropout, do_checkpoint=False, kernel_size=kernel_size))
res.append(Downsample(ch, use_conv=True, out_channels=ch*2, factor=downsample_factor))
ch *= 2
self.res = nn.Sequential(*res)
self.final = nn.Sequential(
normalization(ch),
nn.SiLU(),
nn.Conv1d(ch, embedding_dim, 1)
)
attn = []
for a in range(attn_blocks):
attn.append(AttentionBlock(embedding_dim, num_attn_heads, do_checkpoint=False))
self.attn = nn.Sequential(*attn)
self.dim = embedding_dim
def forward(self, x):
h = self.init(x)
h = self.res(h)
h = self.final(h)
for blk in self.attn:
h = checkpoint(blk, h)
return h[:, :, 0]
class AudioMiniEncoderWithClassifierHead(nn.Module):
def __init__(self, classes, distribute_zero_label=True, **kwargs):
super().__init__()
self.enc = AudioMiniEncoder(**kwargs)
self.head = nn.Linear(self.enc.dim, classes)
self.num_classes = classes
self.distribute_zero_label = distribute_zero_label
def forward(self, x, labels=None):
h = self.enc(x)
logits = self.head(h)
if labels is None:
return logits
else:
if self.distribute_zero_label:
oh_labels = nn.functional.one_hot(labels, num_classes=self.num_classes)
zeros_indices = (labels == 0).unsqueeze(-1)
# Distribute 20% of the probability mass on all classes when zero is specified, to compensate for dataset noise.
zero_extra_mass = torch.full_like(oh_labels, dtype=torch.float, fill_value=.2/(self.num_classes-1))
zero_extra_mass[:, 0] = -.2
zero_extra_mass = zero_extra_mass * zeros_indices
oh_labels = oh_labels + zero_extra_mass
else:
oh_labels = labels
loss = nn.functional.cross_entropy(logits, oh_labels)
return loss | /ruth-tts-converter-python-0.0.2.tar.gz/ruth-tts-converter-python-0.0.2/src/ruth_tts_transformer/ruth_tts/models/classifier.py | 0.953008 | 0.292523 | classifier.py | pypi |
import math
import random
from abc import abstractmethod
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import autocast
from ruth_tts_transformer.ruth_tts.models.arch_util import normalization, AttentionBlock
def is_latent(t):
return t.dtype == torch.float
def is_sequence(t):
return t.dtype == torch.long
def timestep_embedding(timesteps, dim, max_period=10000):
"""
Create sinusoidal timestep embeddings.
:param timesteps: a 1-D Tensor of N indices, one per batch element.
These may be fractional.
:param dim: the dimension of the output.
:param max_period: controls the minimum frequency of the embeddings.
:return: an [N x dim] Tensor of positional embeddings.
"""
half = dim // 2
freqs = torch.exp(
-math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half
).to(device=timesteps.device)
args = timesteps[:, None].float() * freqs[None]
embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)
if dim % 2:
embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1)
return embedding
class TimestepBlock(nn.Module):
@abstractmethod
def forward(self, x, emb):
"""
Apply the module to `x` given `emb` timestep embeddings.
"""
class TimestepEmbedSequential(nn.Sequential, TimestepBlock):
def forward(self, x, emb):
for layer in self:
if isinstance(layer, TimestepBlock):
x = layer(x, emb)
else:
x = layer(x)
return x
class ResBlock(TimestepBlock):
def __init__(
self,
channels,
emb_channels,
dropout,
out_channels=None,
dims=2,
kernel_size=3,
efficient_config=True,
use_scale_shift_norm=False,
):
super().__init__()
self.channels = channels
self.emb_channels = emb_channels
self.dropout = dropout
self.out_channels = out_channels or channels
self.use_scale_shift_norm = use_scale_shift_norm
padding = {1: 0, 3: 1, 5: 2}[kernel_size]
eff_kernel = 1 if efficient_config else 3
eff_padding = 0 if efficient_config else 1
self.in_layers = nn.Sequential(
normalization(channels),
nn.SiLU(),
nn.Conv1d(channels, self.out_channels, eff_kernel, padding=eff_padding),
)
self.emb_layers = nn.Sequential(
nn.SiLU(),
nn.Linear(
emb_channels,
2 * self.out_channels if use_scale_shift_norm else self.out_channels,
),
)
self.out_layers = nn.Sequential(
normalization(self.out_channels),
nn.SiLU(),
nn.Dropout(p=dropout),
nn.Conv1d(self.out_channels, self.out_channels, kernel_size, padding=padding),
)
if self.out_channels == channels:
self.skip_connection = nn.Identity()
else:
self.skip_connection = nn.Conv1d(channels, self.out_channels, eff_kernel, padding=eff_padding)
def forward(self, x, emb):
h = self.in_layers(x)
emb_out = self.emb_layers(emb).type(h.dtype)
while len(emb_out.shape) < len(h.shape):
emb_out = emb_out[..., None]
if self.use_scale_shift_norm:
out_norm, out_rest = self.out_layers[0], self.out_layers[1:]
scale, shift = torch.chunk(emb_out, 2, dim=1)
h = out_norm(h) * (1 + scale) + shift
h = out_rest(h)
else:
h = h + emb_out
h = self.out_layers(h)
return self.skip_connection(x) + h
class DiffusionLayer(TimestepBlock):
def __init__(self, model_channels, dropout, num_heads):
super().__init__()
self.resblk = ResBlock(model_channels, model_channels, dropout, model_channels, dims=1, use_scale_shift_norm=True)
self.attn = AttentionBlock(model_channels, num_heads, relative_pos_embeddings=True)
def forward(self, x, time_emb):
y = self.resblk(x, time_emb)
return self.attn(y)
class DiffusionTts(nn.Module):
def __init__(
self,
model_channels=512,
num_layers=8,
in_channels=100,
in_latent_channels=512,
in_tokens=8193,
out_channels=200, # mean and variance
dropout=0,
use_fp16=False,
num_heads=16,
# Parameters for regularization.
layer_drop=.1,
unconditioned_percentage=.1, # This implements a mechanism similar to what is used in classifier-free training.
):
super().__init__()
self.in_channels = in_channels
self.model_channels = model_channels
self.out_channels = out_channels
self.dropout = dropout
self.num_heads = num_heads
self.unconditioned_percentage = unconditioned_percentage
self.enable_fp16 = use_fp16
self.layer_drop = layer_drop
self.inp_block = nn.Conv1d(in_channels, model_channels, 3, 1, 1)
self.time_embed = nn.Sequential(
nn.Linear(model_channels, model_channels),
nn.SiLU(),
nn.Linear(model_channels, model_channels),
)
# Either code_converter or latent_converter is used, depending on what type of conditioning data is fed.
# This model is meant to be able to be trained on both for efficiency purposes - it is far less computationally
# complex to generate tokens, while generating latents will normally mean propagating through a deep autoregressive
# transformer network.
self.code_embedding = nn.Embedding(in_tokens, model_channels)
self.code_converter = nn.Sequential(
AttentionBlock(model_channels, num_heads, relative_pos_embeddings=True),
AttentionBlock(model_channels, num_heads, relative_pos_embeddings=True),
AttentionBlock(model_channels, num_heads, relative_pos_embeddings=True),
)
self.code_norm = normalization(model_channels)
self.latent_conditioner = nn.Sequential(
nn.Conv1d(in_latent_channels, model_channels, 3, padding=1),
AttentionBlock(model_channels, num_heads, relative_pos_embeddings=True),
AttentionBlock(model_channels, num_heads, relative_pos_embeddings=True),
AttentionBlock(model_channels, num_heads, relative_pos_embeddings=True),
AttentionBlock(model_channels, num_heads, relative_pos_embeddings=True),
)
self.contextual_embedder = nn.Sequential(nn.Conv1d(in_channels,model_channels,3,padding=1,stride=2),
nn.Conv1d(model_channels, model_channels*2,3,padding=1,stride=2),
AttentionBlock(model_channels*2, num_heads, relative_pos_embeddings=True, do_checkpoint=False),
AttentionBlock(model_channels*2, num_heads, relative_pos_embeddings=True, do_checkpoint=False),
AttentionBlock(model_channels*2, num_heads, relative_pos_embeddings=True, do_checkpoint=False),
AttentionBlock(model_channels*2, num_heads, relative_pos_embeddings=True, do_checkpoint=False),
AttentionBlock(model_channels*2, num_heads, relative_pos_embeddings=True, do_checkpoint=False))
self.unconditioned_embedding = nn.Parameter(torch.randn(1,model_channels,1))
self.conditioning_timestep_integrator = TimestepEmbedSequential(
DiffusionLayer(model_channels, dropout, num_heads),
DiffusionLayer(model_channels, dropout, num_heads),
DiffusionLayer(model_channels, dropout, num_heads),
)
self.integrating_conv = nn.Conv1d(model_channels*2, model_channels, kernel_size=1)
self.mel_head = nn.Conv1d(model_channels, in_channels, kernel_size=3, padding=1)
self.layers = nn.ModuleList([DiffusionLayer(model_channels, dropout, num_heads) for _ in range(num_layers)] +
[ResBlock(model_channels, model_channels, dropout, dims=1, use_scale_shift_norm=True) for _ in range(3)])
self.out = nn.Sequential(
normalization(model_channels),
nn.SiLU(),
nn.Conv1d(model_channels, out_channels, 3, padding=1),
)
def get_grad_norm_parameter_groups(self):
groups = {
'minicoder': list(self.contextual_embedder.parameters()),
'layers': list(self.layers.parameters()),
'code_converters': list(self.code_embedding.parameters()) + list(self.code_converter.parameters()) + list(self.latent_conditioner.parameters()) + list(self.latent_conditioner.parameters()),
'timestep_integrator': list(self.conditioning_timestep_integrator.parameters()) + list(self.integrating_conv.parameters()),
'time_embed': list(self.time_embed.parameters()),
}
return groups
def get_conditioning(self, conditioning_input):
speech_conditioning_input = conditioning_input.unsqueeze(1) if len(
conditioning_input.shape) == 3 else conditioning_input
conds = []
for j in range(speech_conditioning_input.shape[1]):
conds.append(self.contextual_embedder(speech_conditioning_input[:, j]))
conds = torch.cat(conds, dim=-1)
conds = conds.mean(dim=-1)
return conds
def timestep_independent(self, aligned_conditioning, conditioning_latent, expected_seq_len, return_code_pred):
# Shuffle aligned_latent to BxCxS format
if is_latent(aligned_conditioning):
aligned_conditioning = aligned_conditioning.permute(0, 2, 1)
cond_scale, cond_shift = torch.chunk(conditioning_latent, 2, dim=1)
if is_latent(aligned_conditioning):
code_emb = self.latent_conditioner(aligned_conditioning)
else:
code_emb = self.code_embedding(aligned_conditioning).permute(0, 2, 1)
code_emb = self.code_converter(code_emb)
code_emb = self.code_norm(code_emb) * (1 + cond_scale.unsqueeze(-1)) + cond_shift.unsqueeze(-1)
unconditioned_batches = torch.zeros((code_emb.shape[0], 1, 1), device=code_emb.device)
# Mask out the conditioning branch for whole batch elements, implementing something similar to classifier-free guidance.
if self.training and self.unconditioned_percentage > 0:
unconditioned_batches = torch.rand((code_emb.shape[0], 1, 1),
device=code_emb.device) < self.unconditioned_percentage
code_emb = torch.where(unconditioned_batches, self.unconditioned_embedding.repeat(aligned_conditioning.shape[0], 1, 1),
code_emb)
expanded_code_emb = F.interpolate(code_emb, size=expected_seq_len, mode='nearest')
if not return_code_pred:
return expanded_code_emb
else:
mel_pred = self.mel_head(expanded_code_emb)
# Multiply mel_pred by !unconditioned_branches, which drops the gradient on unconditioned branches. This is because we don't want that gradient being used to train parameters through the codes_embedder as it unbalances contributions to that network from the MSE loss.
mel_pred = mel_pred * unconditioned_batches.logical_not()
return expanded_code_emb, mel_pred
def forward(self, x, timesteps, aligned_conditioning=None, conditioning_latent=None, precomputed_aligned_embeddings=None, conditioning_free=False, return_code_pred=False):
"""
Apply the model to an input batch.
:param x: an [N x C x ...] Tensor of inputs.
:param timesteps: a 1-D batch of timesteps.
:param aligned_conditioning: an aligned latent or sequence of tokens providing useful data about the sample to be produced.
:param conditioning_latent: a pre-computed conditioning latent; see get_conditioning().
:param precomputed_aligned_embeddings: Embeddings returned from self.timestep_independent()
:param conditioning_free: When set, all conditioning inputs (including tokens and conditioning_input) will not be considered.
:return: an [N x C x ...] Tensor of outputs.
"""
assert precomputed_aligned_embeddings is not None or (aligned_conditioning is not None and conditioning_latent is not None)
assert not (return_code_pred and precomputed_aligned_embeddings is not None) # These two are mutually exclusive.
unused_params = []
if conditioning_free:
code_emb = self.unconditioned_embedding.repeat(x.shape[0], 1, x.shape[-1])
unused_params.extend(list(self.code_converter.parameters()) + list(self.code_embedding.parameters()))
unused_params.extend(list(self.latent_conditioner.parameters()))
else:
if precomputed_aligned_embeddings is not None:
code_emb = precomputed_aligned_embeddings
else:
code_emb, mel_pred = self.timestep_independent(aligned_conditioning, conditioning_latent, x.shape[-1], True)
if is_latent(aligned_conditioning):
unused_params.extend(list(self.code_converter.parameters()) + list(self.code_embedding.parameters()))
else:
unused_params.extend(list(self.latent_conditioner.parameters()))
unused_params.append(self.unconditioned_embedding)
time_emb = self.time_embed(timestep_embedding(timesteps, self.model_channels))
code_emb = self.conditioning_timestep_integrator(code_emb, time_emb)
x = self.inp_block(x)
x = torch.cat([x, code_emb], dim=1)
x = self.integrating_conv(x)
for i, lyr in enumerate(self.layers):
# Do layer drop where applicable. Do not drop first and last layers.
if self.training and self.layer_drop > 0 and i != 0 and i != (len(self.layers)-1) and random.random() < self.layer_drop:
unused_params.extend(list(lyr.parameters()))
else:
# First and last blocks will have autocast disabled for improved precision.
with autocast(x.device.type, enabled=self.enable_fp16 and i != 0):
x = lyr(x, time_emb)
x = x.float()
out = self.out(x)
# Involve probabilistic or possibly unused parameters in loss so we don't get DDP errors.
extraneous_addition = 0
for p in unused_params:
extraneous_addition = extraneous_addition + p.mean()
out = out + extraneous_addition * 0
if return_code_pred:
return out, mel_pred
return out
if __name__ == '__main__':
clip = torch.randn(2, 100, 400)
aligned_latent = torch.randn(2,388,512)
aligned_sequence = torch.randint(0,8192,(2,100))
cond = torch.randn(2, 100, 400)
ts = torch.LongTensor([600, 600])
model = DiffusionTts(512, layer_drop=.3, unconditioned_percentage=.5)
# Test with latent aligned conditioning
#o = model(clip, ts, aligned_latent, cond)
# Test with sequence aligned conditioning
o = model(clip, ts, aligned_sequence, cond) | /ruth-tts-converter-python-0.0.2.tar.gz/ruth-tts-converter-python-0.0.2/src/ruth_tts_transformer/ruth_tts/models/diffusion_decoder.py | 0.93441 | 0.634628 | diffusion_decoder.py | pypi |
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import einsum
from torch.utils.checkpoint import checkpoint
from ruth_tts_transformer.ruth_tts.models.arch_util import AttentionBlock
from ruth_tts_transformer.ruth_tts.models.xtransformers import ContinuousTransformerWrapper, Encoder
def exists(val):
return val is not None
def masked_mean(t, mask):
t = t.masked_fill(~mask, 0.)
return t.sum(dim = 1) / mask.sum(dim = 1)
class CollapsingTransformer(nn.Module):
def __init__(self, model_dim, output_dims, heads, dropout, depth, mask_percentage=0, **encoder_kwargs):
super().__init__()
self.transformer = ContinuousTransformerWrapper(
max_seq_len=-1,
use_pos_emb=False,
attn_layers=Encoder(
dim=model_dim,
depth=depth,
heads=heads,
ff_dropout=dropout,
ff_mult=1,
attn_dropout=dropout,
use_rmsnorm=True,
ff_glu=True,
rotary_pos_emb=True,
**encoder_kwargs,
))
self.pre_combiner = nn.Sequential(nn.Conv1d(model_dim, output_dims, 1),
AttentionBlock(output_dims, num_heads=heads, do_checkpoint=False),
nn.Conv1d(output_dims, output_dims, 1))
self.mask_percentage = mask_percentage
def forward(self, x, **transformer_kwargs):
h = self.transformer(x, **transformer_kwargs)
h = h.permute(0,2,1)
h = checkpoint(self.pre_combiner, h).permute(0,2,1)
if self.training:
mask = torch.rand_like(h.float()) > self.mask_percentage
else:
mask = torch.ones_like(h.float()).bool()
return masked_mean(h, mask)
class ConvFormatEmbedding(nn.Module):
def __init__(self, *args, **kwargs):
super().__init__()
self.emb = nn.Embedding(*args, **kwargs)
def forward(self, x):
y = self.emb(x)
return y.permute(0,2,1)
class CVVP(nn.Module):
def __init__(
self,
model_dim=512,
transformer_heads=8,
dropout=.1,
conditioning_enc_depth=8,
cond_mask_percentage=0,
mel_channels=80,
mel_codes=None,
speech_enc_depth=8,
speech_mask_percentage=0,
latent_multiplier=1,
):
super().__init__()
latent_dim = latent_multiplier*model_dim
self.temperature = nn.Parameter(torch.tensor(1.))
self.cond_emb = nn.Sequential(nn.Conv1d(mel_channels, model_dim//2, kernel_size=5, stride=2, padding=2),
nn.Conv1d(model_dim//2, model_dim, kernel_size=3, stride=2, padding=1))
self.conditioning_transformer = CollapsingTransformer(model_dim, model_dim, transformer_heads, dropout, conditioning_enc_depth, cond_mask_percentage)
self.to_conditioning_latent = nn.Linear(latent_dim, latent_dim, bias=False)
if mel_codes is None:
self.speech_emb = nn.Conv1d(mel_channels, model_dim, kernel_size=5, padding=2)
else:
self.speech_emb = ConvFormatEmbedding(mel_codes, model_dim)
self.speech_transformer = CollapsingTransformer(model_dim, latent_dim, transformer_heads, dropout, speech_enc_depth, speech_mask_percentage)
self.to_speech_latent = nn.Linear(latent_dim, latent_dim, bias=False)
def get_grad_norm_parameter_groups(self):
return {
'conditioning': list(self.conditioning_transformer.parameters()),
'speech': list(self.speech_transformer.parameters()),
}
def forward(
self,
mel_cond,
mel_input,
return_loss=False
):
cond_emb = self.cond_emb(mel_cond).permute(0,2,1)
enc_cond = self.conditioning_transformer(cond_emb)
cond_latents = self.to_conditioning_latent(enc_cond)
speech_emb = self.speech_emb(mel_input).permute(0,2,1)
enc_speech = self.speech_transformer(speech_emb)
speech_latents = self.to_speech_latent(enc_speech)
cond_latents, speech_latents = map(lambda t: F.normalize(t, p=2, dim=-1), (cond_latents, speech_latents))
temp = self.temperature.exp()
if not return_loss:
sim = einsum('n d, n d -> n', cond_latents, speech_latents) * temp
return sim
sim = einsum('i d, j d -> i j', cond_latents, speech_latents) * temp
labels = torch.arange(cond_latents.shape[0], device=mel_input.device)
loss = (F.cross_entropy(sim, labels) + F.cross_entropy(sim.t(), labels)) / 2
return loss
if __name__ == '__main__':
clvp = CVVP()
clvp(torch.randn(2,80,100),
torch.randn(2,80,95),
return_loss=True) | /ruth-tts-converter-python-0.0.2.tar.gz/ruth-tts-converter-python-0.0.2/src/ruth_tts_transformer/ruth_tts/models/cvvp.py | 0.938166 | 0.377311 | cvvp.py | pypi |
import torch
import torch.nn as nn
import torch.nn.functional as F
MAX_WAV_VALUE = 32768.0
class KernelPredictor(torch.nn.Module):
''' Kernel predictor for the location-variable convolutions'''
def __init__(
self,
cond_channels,
conv_in_channels,
conv_out_channels,
conv_layers,
conv_kernel_size=3,
kpnet_hidden_channels=64,
kpnet_conv_size=3,
kpnet_dropout=0.0,
kpnet_nonlinear_activation="LeakyReLU",
kpnet_nonlinear_activation_params={"negative_slope": 0.1},
):
'''
Args:
cond_channels (int): number of channel for the conditioning sequence,
conv_in_channels (int): number of channel for the input sequence,
conv_out_channels (int): number of channel for the output sequence,
conv_layers (int): number of layers
'''
super().__init__()
self.conv_in_channels = conv_in_channels
self.conv_out_channels = conv_out_channels
self.conv_kernel_size = conv_kernel_size
self.conv_layers = conv_layers
kpnet_kernel_channels = conv_in_channels * conv_out_channels * conv_kernel_size * conv_layers # l_w
kpnet_bias_channels = conv_out_channels * conv_layers # l_b
self.input_conv = nn.Sequential(
nn.utils.weight_norm(nn.Conv1d(cond_channels, kpnet_hidden_channels, 5, padding=2, bias=True)),
getattr(nn, kpnet_nonlinear_activation)(**kpnet_nonlinear_activation_params),
)
self.residual_convs = nn.ModuleList()
padding = (kpnet_conv_size - 1) // 2
for _ in range(3):
self.residual_convs.append(
nn.Sequential(
nn.Dropout(kpnet_dropout),
nn.utils.weight_norm(
nn.Conv1d(kpnet_hidden_channels, kpnet_hidden_channels, kpnet_conv_size, padding=padding,
bias=True)),
getattr(nn, kpnet_nonlinear_activation)(**kpnet_nonlinear_activation_params),
nn.utils.weight_norm(
nn.Conv1d(kpnet_hidden_channels, kpnet_hidden_channels, kpnet_conv_size, padding=padding,
bias=True)),
getattr(nn, kpnet_nonlinear_activation)(**kpnet_nonlinear_activation_params),
)
)
self.kernel_conv = nn.utils.weight_norm(
nn.Conv1d(kpnet_hidden_channels, kpnet_kernel_channels, kpnet_conv_size, padding=padding, bias=True))
self.bias_conv = nn.utils.weight_norm(
nn.Conv1d(kpnet_hidden_channels, kpnet_bias_channels, kpnet_conv_size, padding=padding, bias=True))
def forward(self, c):
'''
Args:
c (Tensor): the conditioning sequence (batch, cond_channels, cond_length)
'''
batch, _, cond_length = c.shape
c = self.input_conv(c)
for residual_conv in self.residual_convs:
residual_conv.to(c.device)
c = c + residual_conv(c)
k = self.kernel_conv(c)
b = self.bias_conv(c)
kernels = k.contiguous().view(
batch,
self.conv_layers,
self.conv_in_channels,
self.conv_out_channels,
self.conv_kernel_size,
cond_length,
)
bias = b.contiguous().view(
batch,
self.conv_layers,
self.conv_out_channels,
cond_length,
)
return kernels, bias
def remove_weight_norm(self):
nn.utils.remove_weight_norm(self.input_conv[0])
nn.utils.remove_weight_norm(self.kernel_conv)
nn.utils.remove_weight_norm(self.bias_conv)
for block in self.residual_convs:
nn.utils.remove_weight_norm(block[1])
nn.utils.remove_weight_norm(block[3])
class LVCBlock(torch.nn.Module):
'''the location-variable convolutions'''
def __init__(
self,
in_channels,
cond_channels,
stride,
dilations=[1, 3, 9, 27],
lReLU_slope=0.2,
conv_kernel_size=3,
cond_hop_length=256,
kpnet_hidden_channels=64,
kpnet_conv_size=3,
kpnet_dropout=0.0,
):
super().__init__()
self.cond_hop_length = cond_hop_length
self.conv_layers = len(dilations)
self.conv_kernel_size = conv_kernel_size
self.kernel_predictor = KernelPredictor(
cond_channels=cond_channels,
conv_in_channels=in_channels,
conv_out_channels=2 * in_channels,
conv_layers=len(dilations),
conv_kernel_size=conv_kernel_size,
kpnet_hidden_channels=kpnet_hidden_channels,
kpnet_conv_size=kpnet_conv_size,
kpnet_dropout=kpnet_dropout,
kpnet_nonlinear_activation_params={"negative_slope": lReLU_slope}
)
self.convt_pre = nn.Sequential(
nn.LeakyReLU(lReLU_slope),
nn.utils.weight_norm(nn.ConvTranspose1d(in_channels, in_channels, 2 * stride, stride=stride,
padding=stride // 2 + stride % 2, output_padding=stride % 2)),
)
self.conv_blocks = nn.ModuleList()
for dilation in dilations:
self.conv_blocks.append(
nn.Sequential(
nn.LeakyReLU(lReLU_slope),
nn.utils.weight_norm(nn.Conv1d(in_channels, in_channels, conv_kernel_size,
padding=dilation * (conv_kernel_size - 1) // 2, dilation=dilation)),
nn.LeakyReLU(lReLU_slope),
)
)
def forward(self, x, c):
''' forward propagation of the location-variable convolutions.
Args:
x (Tensor): the input sequence (batch, in_channels, in_length)
c (Tensor): the conditioning sequence (batch, cond_channels, cond_length)
Returns:
Tensor: the output sequence (batch, in_channels, in_length)
'''
_, in_channels, _ = x.shape # (B, c_g, L')
x = self.convt_pre(x) # (B, c_g, stride * L')
kernels, bias = self.kernel_predictor(c)
for i, conv in enumerate(self.conv_blocks):
output = conv(x) # (B, c_g, stride * L')
k = kernels[:, i, :, :, :, :] # (B, 2 * c_g, c_g, kernel_size, cond_length)
b = bias[:, i, :, :] # (B, 2 * c_g, cond_length)
output = self.location_variable_convolution(output, k, b,
hop_size=self.cond_hop_length) # (B, 2 * c_g, stride * L'): LVC
x = x + torch.sigmoid(output[:, :in_channels, :]) * torch.tanh(
output[:, in_channels:, :]) # (B, c_g, stride * L'): GAU
return x
def location_variable_convolution(self, x, kernel, bias, dilation=1, hop_size=256):
''' perform location-variable convolution operation on the input sequence (x) using the local convolution kernl.
Time: 414 μs ± 309 ns per loop (mean ± std. dev. of 7 runs, 1000 loops each), test on NVIDIA V100.
Args:
x (Tensor): the input sequence (batch, in_channels, in_length).
kernel (Tensor): the local convolution kernel (batch, in_channel, out_channels, kernel_size, kernel_length)
bias (Tensor): the bias for the local convolution (batch, out_channels, kernel_length)
dilation (int): the dilation of convolution.
hop_size (int): the hop_size of the conditioning sequence.
Returns:
(Tensor): the output sequence after performing local convolution. (batch, out_channels, in_length).
'''
batch, _, in_length = x.shape
batch, _, out_channels, kernel_size, kernel_length = kernel.shape
assert in_length == (kernel_length * hop_size), "length of (x, kernel) is not matched"
padding = dilation * int((kernel_size - 1) / 2)
x = F.pad(x, (padding, padding), 'constant', 0) # (batch, in_channels, in_length + 2*padding)
x = x.unfold(2, hop_size + 2 * padding, hop_size) # (batch, in_channels, kernel_length, hop_size + 2*padding)
if hop_size < dilation:
x = F.pad(x, (0, dilation), 'constant', 0)
x = x.unfold(3, dilation,
dilation) # (batch, in_channels, kernel_length, (hop_size + 2*padding)/dilation, dilation)
x = x[:, :, :, :, :hop_size]
x = x.transpose(3, 4) # (batch, in_channels, kernel_length, dilation, (hop_size + 2*padding)/dilation)
x = x.unfold(4, kernel_size, 1) # (batch, in_channels, kernel_length, dilation, _, kernel_size)
o = torch.einsum('bildsk,biokl->bolsd', x, kernel)
o = o.to(memory_format=torch.channels_last_3d)
bias = bias.unsqueeze(-1).unsqueeze(-1).to(memory_format=torch.channels_last_3d)
o = o + bias
o = o.contiguous().view(batch, out_channels, -1)
return o
def remove_weight_norm(self):
self.kernel_predictor.remove_weight_norm()
nn.utils.remove_weight_norm(self.convt_pre[1])
for block in self.conv_blocks:
nn.utils.remove_weight_norm(block[1])
class UnivNetGenerator(nn.Module):
"""UnivNet Generator"""
def __init__(self, noise_dim=64, channel_size=32, dilations=[1,3,9,27], strides=[8,8,4], lReLU_slope=.2, kpnet_conv_size=3,
# Below are MEL configurations options that this generator requires.
hop_length=256, n_mel_channels=100):
super(UnivNetGenerator, self).__init__()
self.mel_channel = n_mel_channels
self.noise_dim = noise_dim
self.hop_length = hop_length
channel_size = channel_size
kpnet_conv_size = kpnet_conv_size
self.res_stack = nn.ModuleList()
hop_length = 1
for stride in strides:
hop_length = stride * hop_length
self.res_stack.append(
LVCBlock(
channel_size,
n_mel_channels,
stride=stride,
dilations=dilations,
lReLU_slope=lReLU_slope,
cond_hop_length=hop_length,
kpnet_conv_size=kpnet_conv_size
)
)
self.conv_pre = \
nn.utils.weight_norm(nn.Conv1d(noise_dim, channel_size, 7, padding=3, padding_mode='reflect'))
self.conv_post = nn.Sequential(
nn.LeakyReLU(lReLU_slope),
nn.utils.weight_norm(nn.Conv1d(channel_size, 1, 7, padding=3, padding_mode='reflect')),
nn.Tanh(),
)
def forward(self, c, z):
'''
Args:
c (Tensor): the conditioning sequence of mel-spectrogram (batch, mel_channels, in_length)
z (Tensor): the noise sequence (batch, noise_dim, in_length)
'''
z = self.conv_pre(z) # (B, c_g, L)
for res_block in self.res_stack:
res_block.to(z.device)
z = res_block(z, c) # (B, c_g, L * s_0 * ... * s_i)
z = self.conv_post(z) # (B, 1, L * 256)
return z
def eval(self, inference=False):
super(UnivNetGenerator, self).eval()
# don't remove weight norm while validation in training loop
if inference:
self.remove_weight_norm()
def remove_weight_norm(self):
nn.utils.remove_weight_norm(self.conv_pre)
for layer in self.conv_post:
if len(layer.state_dict()) != 0:
nn.utils.remove_weight_norm(layer)
for res_block in self.res_stack:
res_block.remove_weight_norm()
def inference(self, c, z=None):
# pad input mel with zeros to cut artifact
# see https://github.com/seungwonpark/melgan/issues/8
zero = torch.full((c.shape[0], self.mel_channel, 10), -11.5129).to(c.device)
mel = torch.cat((c, zero), dim=2)
if z is None:
z = torch.randn(c.shape[0], self.noise_dim, mel.size(2)).to(mel.device)
audio = self.forward(mel, z)
audio = audio[:, :, :-(self.hop_length * 10)]
audio = audio.clamp(min=-1, max=1)
return audio
if __name__ == '__main__':
model = UnivNetGenerator()
c = torch.randn(3, 100, 10)
z = torch.randn(3, 64, 10)
print(c.shape)
y = model(c, z)
print(y.shape)
assert y.shape == torch.Size([3, 1, 2560])
pytorch_total_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
print(pytorch_total_params) | /ruth-tts-converter-python-0.0.2.tar.gz/ruth-tts-converter-python-0.0.2/src/ruth_tts_transformer/ruth_tts/models/vocoder.py | 0.954774 | 0.386185 | vocoder.py | pypi |
from functools import partial
import torch
import torch.nn.functional as F
from einops import rearrange
from rotary_embedding_torch import RotaryEmbedding, broadcat
from torch import nn
# helpers
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def cast_tuple(val, depth = 1):
if isinstance(val, list):
val = tuple(val)
return val if isinstance(val, tuple) else (val,) * depth
def max_neg_value(t):
return -torch.finfo(t.dtype).max
def stable_softmax(t, dim = -1, alpha = 32 ** 2):
t = t / alpha
t = t - torch.amax(t, dim = dim, keepdim = True).detach()
return (t * alpha).softmax(dim = dim)
def route_args(router, args, depth):
routed_args = [(dict(), dict()) for _ in range(depth)]
matched_keys = [key for key in args.keys() if key in router]
for key in matched_keys:
val = args[key]
for depth, ((f_args, g_args), routes) in enumerate(zip(routed_args, router[key])):
new_f_args, new_g_args = map(lambda route: ({key: val} if route else {}), routes)
routed_args[depth] = ({**f_args, **new_f_args}, {**g_args, **new_g_args})
return routed_args
# classes
class SequentialSequence(nn.Module):
def __init__(self, layers, args_route = {}, layer_dropout = 0.):
super().__init__()
assert all(len(route) == len(layers) for route in args_route.values()), 'each argument route map must have the same depth as the number of sequential layers'
self.layers = layers
self.args_route = args_route
self.layer_dropout = layer_dropout
def forward(self, x, **kwargs):
args = route_args(self.args_route, kwargs, len(self.layers))
layers_and_args = list(zip(self.layers, args))
for (f, g), (f_args, g_args) in layers_and_args:
x = x + f(x, **f_args)
x = x + g(x, **g_args)
return x
class DivideMax(nn.Module):
def __init__(self, dim):
super().__init__()
self.dim = dim
def forward(self, x):
maxes = x.amax(dim = self.dim, keepdim = True).detach()
return x / maxes
# https://arxiv.org/abs/2103.17239
class LayerScale(nn.Module):
def __init__(self, dim, depth, fn):
super().__init__()
if depth <= 18:
init_eps = 0.1
elif depth > 18 and depth <= 24:
init_eps = 1e-5
else:
init_eps = 1e-6
scale = torch.zeros(1, 1, dim).fill_(init_eps)
self.scale = nn.Parameter(scale)
self.fn = fn
def forward(self, x, **kwargs):
return self.fn(x, **kwargs) * self.scale
# layer norm
class PreNorm(nn.Module):
def __init__(self, dim, fn, sandwich = False):
super().__init__()
self.norm = nn.LayerNorm(dim)
self.norm_out = nn.LayerNorm(dim) if sandwich else nn.Identity()
self.fn = fn
def forward(self, x, **kwargs):
x = self.norm(x)
x = self.fn(x, **kwargs)
return self.norm_out(x)
# feed forward
class GEGLU(nn.Module):
def forward(self, x):
x, gates = x.chunk(2, dim = -1)
return x * F.gelu(gates)
class FeedForward(nn.Module):
def __init__(self, dim, dropout = 0., mult = 4.):
super().__init__()
self.net = nn.Sequential(
nn.Linear(dim, dim * mult * 2),
GEGLU(),
nn.Dropout(dropout),
nn.Linear(dim * mult, dim)
)
def forward(self, x):
return self.net(x)
# Attention
class Attention(nn.Module):
def __init__(self, dim, seq_len, causal = True, heads = 8, dim_head = 64, dropout = 0.):
super().__init__()
inner_dim = dim_head * heads
self.heads = heads
self.seq_len = seq_len
self.scale = dim_head ** -0.5
self.causal = causal
self.to_qkv = nn.Linear(dim, inner_dim * 3, bias = False)
self.to_out = nn.Sequential(
nn.Linear(inner_dim, dim),
nn.Dropout(dropout)
)
def forward(self, x, mask = None):
b, n, _, h, device = *x.shape, self.heads, x.device
softmax = torch.softmax
qkv = self.to_qkv(x).chunk(3, dim = -1)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = h), qkv)
q = q * self.scale
dots = torch.einsum('b h i d, b h j d -> b h i j', q, k)
mask_value = max_neg_value(dots)
if exists(mask):
mask = rearrange(mask, 'b j -> b () () j')
dots.masked_fill_(~mask, mask_value)
del mask
if self.causal:
i, j = dots.shape[-2:]
mask = torch.ones(i, j, device = device).triu_(j - i + 1).bool()
dots.masked_fill_(mask, mask_value)
attn = softmax(dots, dim=-1)
out = torch.einsum('b h i j, b h j d -> b h i d', attn, v)
out = rearrange(out, 'b h n d -> b n (h d)')
out = self.to_out(out)
return out
# main transformer class
class Transformer(nn.Module):
def __init__(
self,
*,
dim,
depth,
seq_len,
causal = True,
heads = 8,
dim_head = 64,
ff_mult = 4,
attn_dropout = 0.,
ff_dropout = 0.,
sparse_attn = False,
sandwich_norm = False,
):
super().__init__()
layers = nn.ModuleList([])
sparse_layer = cast_tuple(sparse_attn, depth)
for ind, sparse_attn in zip(range(depth), sparse_layer):
attn = Attention(dim, causal = causal, seq_len = seq_len, heads = heads, dim_head = dim_head, dropout = attn_dropout)
ff = FeedForward(dim, mult = ff_mult, dropout = ff_dropout)
layers.append(nn.ModuleList([
LayerScale(dim, ind + 1, PreNorm(dim, attn, sandwich = sandwich_norm)),
LayerScale(dim, ind + 1, PreNorm(dim, ff, sandwich = sandwich_norm))
]))
execute_type = SequentialSequence
route_attn = ((True, False),) * depth
attn_route_map = {'mask': route_attn}
self.layers = execute_type(layers, args_route = attn_route_map)
def forward(self, x, **kwargs):
return self.layers(x, **kwargs) | /ruth-tts-converter-python-0.0.2.tar.gz/ruth-tts-converter-python-0.0.2/src/ruth_tts_transformer/ruth_tts/models/transformer.py | 0.946076 | 0.456591 | transformer.py | pypi |
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import einsum
from ruth_tts_transformer.ruth_tts.models.arch_util import CheckpointedXTransformerEncoder
from ruth_tts_transformer.ruth_tts.models.transformer import Transformer
from ruth_tts_transformer.ruth_tts.models.xtransformers import Encoder
def exists(val):
return val is not None
def masked_mean(t, mask, dim = 1):
t = t.masked_fill(~mask[:, :, None], 0.)
return t.sum(dim = 1) / mask.sum(dim = 1)[..., None]
class CLVP(nn.Module):
"""
CLIP model retrofitted for performing contrastive evaluation between tokenized audio data and the corresponding
transcribed text.
Originally from https://github.com/lucidrains/DALLE-pytorch/blob/main/dalle_pytorch/dalle_pytorch.py
"""
def __init__(
self,
*,
dim_text=512,
dim_speech=512,
dim_latent=512,
num_text_tokens=256,
text_enc_depth=6,
text_seq_len=120,
text_heads=8,
num_speech_tokens=8192,
speech_enc_depth=6,
speech_heads=8,
speech_seq_len=250,
text_mask_percentage=0,
voice_mask_percentage=0,
wav_token_compression=1024,
use_xformers=False,
):
super().__init__()
self.text_emb = nn.Embedding(num_text_tokens, dim_text)
self.to_text_latent = nn.Linear(dim_text, dim_latent, bias=False)
self.speech_emb = nn.Embedding(num_speech_tokens, dim_speech)
self.to_speech_latent = nn.Linear(dim_speech, dim_latent, bias=False)
if use_xformers:
self.text_transformer = CheckpointedXTransformerEncoder(
needs_permute=False,
exit_permute=False,
max_seq_len=-1,
attn_layers=Encoder(
dim=dim_text,
depth=text_enc_depth,
heads=text_heads,
ff_dropout=.1,
ff_mult=2,
attn_dropout=.1,
use_rmsnorm=True,
ff_glu=True,
rotary_pos_emb=True,
))
self.speech_transformer = CheckpointedXTransformerEncoder(
needs_permute=False,
exit_permute=False,
max_seq_len=-1,
attn_layers=Encoder(
dim=dim_speech,
depth=speech_enc_depth,
heads=speech_heads,
ff_dropout=.1,
ff_mult=2,
attn_dropout=.1,
use_rmsnorm=True,
ff_glu=True,
rotary_pos_emb=True,
))
else:
self.text_transformer = Transformer(causal=False, seq_len=text_seq_len, dim=dim_text, depth=text_enc_depth,
heads=text_heads)
self.speech_transformer = Transformer(causal=False, seq_len=speech_seq_len, dim=dim_speech,
depth=speech_enc_depth, heads=speech_heads)
self.temperature = nn.Parameter(torch.tensor(1.))
self.text_mask_percentage = text_mask_percentage
self.voice_mask_percentage = voice_mask_percentage
self.wav_token_compression = wav_token_compression
self.xformers = use_xformers
if not use_xformers:
self.text_pos_emb = nn.Embedding(text_seq_len, dim_text)
self.speech_pos_emb = nn.Embedding(num_speech_tokens, dim_speech)
def forward(
self,
text,
speech_tokens,
return_loss=False
):
b, device = text.shape[0], text.device
if self.training:
text_mask = torch.rand_like(text.float()) > self.text_mask_percentage
voice_mask = torch.rand_like(speech_tokens.float()) > self.voice_mask_percentage
else:
text_mask = torch.ones_like(text.float()).bool()
voice_mask = torch.ones_like(speech_tokens.float()).bool()
text_emb = self.text_emb(text)
speech_emb = self.speech_emb(speech_tokens)
if not self.xformers:
text_emb += self.text_pos_emb(torch.arange(text.shape[1], device=device))
speech_emb += self.speech_pos_emb(torch.arange(speech_emb.shape[1], device=device))
enc_text = self.text_transformer(text_emb, mask=text_mask)
enc_speech = self.speech_transformer(speech_emb, mask=voice_mask)
text_latents = masked_mean(enc_text, text_mask, dim=1)
speech_latents = masked_mean(enc_speech, voice_mask, dim=1)
text_latents = self.to_text_latent(text_latents)
speech_latents = self.to_speech_latent(speech_latents)
text_latents, speech_latents = map(lambda t: F.normalize(t, p=2, dim=-1), (text_latents, speech_latents))
temp = self.temperature.exp()
if not return_loss:
sim = einsum('n d, n d -> n', text_latents, speech_latents) * temp
return sim
sim = einsum('i d, j d -> i j', text_latents, speech_latents) * temp
labels = torch.arange(b, device=device)
loss = (F.cross_entropy(sim, labels) + F.cross_entropy(sim.t(), labels)) / 2
return loss
if __name__ == '__main__':
clip = CLVP(text_mask_percentage=.2, voice_mask_percentage=.2)
clip(torch.randint(0,256,(2,120)),
torch.tensor([50,100]),
torch.randint(0,8192,(2,250)),
torch.tensor([101,102]),
return_loss=True)
nonloss = clip(torch.randint(0,256,(2,120)),
torch.tensor([50,100]),
torch.randint(0,8192,(2,250)),
torch.tensor([101,102]),
return_loss=False)
print(nonloss.shape) | /ruth-tts-converter-python-0.0.2.tar.gz/ruth-tts-converter-python-0.0.2/src/ruth_tts_transformer/ruth_tts/models/clvp.py | 0.888263 | 0.32146 | clvp.py | pypi |
import functools
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchaudio
from ruth_tts_transformer.ruth_tts.models.xtransformers import ContinuousTransformerWrapper, RelativePositionBias
def zero_module(module):
"""
Zero out the parameters of a module and return it.
"""
for p in module.parameters():
p.detach().zero_()
return module
class GroupNorm32(nn.GroupNorm):
def forward(self, x):
return super().forward(x.float()).type(x.dtype)
def normalization(channels):
"""
Make a standard normalization layer.
:param channels: number of input channels.
:return: an nn.Module for normalization.
"""
groups = 32
if channels <= 16:
groups = 8
elif channels <= 64:
groups = 16
while channels % groups != 0:
groups = int(groups / 2)
assert groups > 2
return GroupNorm32(groups, channels)
class QKVAttentionLegacy(nn.Module):
"""
A module which performs QKV attention. Matches legacy QKVAttention + input/ouput heads shaping
"""
def __init__(self, n_heads):
super().__init__()
self.n_heads = n_heads
def forward(self, qkv, mask=None, rel_pos=None):
"""
Apply QKV attention.
:param qkv: an [N x (H * 3 * C) x T] tensor of Qs, Ks, and Vs.
:return: an [N x (H * C) x T] tensor after attention.
"""
bs, width, length = qkv.shape
assert width % (3 * self.n_heads) == 0
ch = width // (3 * self.n_heads)
q, k, v = qkv.reshape(bs * self.n_heads, ch * 3, length).split(ch, dim=1)
scale = 1 / math.sqrt(math.sqrt(ch))
weight = torch.einsum(
"bct,bcs->bts", q * scale, k * scale
) # More stable with f16 than dividing afterwards
if rel_pos is not None:
weight = rel_pos(weight.reshape(bs, self.n_heads, weight.shape[-2], weight.shape[-1])).reshape(bs * self.n_heads, weight.shape[-2], weight.shape[-1])
weight = torch.softmax(weight.float(), dim=-1).type(weight.dtype)
if mask is not None:
# The proper way to do this is to mask before the softmax using -inf, but that doesn't work properly on CPUs.
mask = mask.repeat(self.n_heads, 1).unsqueeze(1)
weight = weight * mask
a = torch.einsum("bts,bcs->bct", weight, v)
return a.reshape(bs, -1, length)
class AttentionBlock(nn.Module):
"""
An attention block that allows spatial positions to attend to each other.
Originally ported from here, but adapted to the N-d case.
https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/models/unet.py#L66.
"""
def __init__(
self,
channels,
num_heads=1,
num_head_channels=-1,
do_checkpoint=True,
relative_pos_embeddings=False,
):
super().__init__()
self.channels = channels
self.do_checkpoint = do_checkpoint
if num_head_channels == -1:
self.num_heads = num_heads
else:
assert (
channels % num_head_channels == 0
), f"q,k,v channels {channels} is not divisible by num_head_channels {num_head_channels}"
self.num_heads = channels // num_head_channels
self.norm = normalization(channels)
self.qkv = nn.Conv1d(channels, channels * 3, 1)
# split heads before split qkv
self.attention = QKVAttentionLegacy(self.num_heads)
self.proj_out = zero_module(nn.Conv1d(channels, channels, 1))
if relative_pos_embeddings:
self.relative_pos_embeddings = RelativePositionBias(scale=(channels // self.num_heads) ** .5, causal=False, heads=num_heads, num_buckets=32, max_distance=64)
else:
self.relative_pos_embeddings = None
def forward(self, x, mask=None):
b, c, *spatial = x.shape
x = x.reshape(b, c, -1)
qkv = self.qkv(self.norm(x))
h = self.attention(qkv, mask, self.relative_pos_embeddings)
h = self.proj_out(h)
return (x + h).reshape(b, c, *spatial)
class Upsample(nn.Module):
"""
An upsampling layer with an optional convolution.
:param channels: channels in the inputs and outputs.
:param use_conv: a bool determining if a convolution is applied.
"""
def __init__(self, channels, use_conv, out_channels=None, factor=4):
super().__init__()
self.channels = channels
self.out_channels = out_channels or channels
self.use_conv = use_conv
self.factor = factor
if use_conv:
ksize = 5
pad = 2
self.conv = nn.Conv1d(self.channels, self.out_channels, ksize, padding=pad)
def forward(self, x):
assert x.shape[1] == self.channels
x = F.interpolate(x, scale_factor=self.factor, mode="nearest")
if self.use_conv:
x = self.conv(x)
return x
class Downsample(nn.Module):
"""
A downsampling layer with an optional convolution.
:param channels: channels in the inputs and outputs.
:param use_conv: a bool determining if a convolution is applied.
"""
def __init__(self, channels, use_conv, out_channels=None, factor=4, ksize=5, pad=2):
super().__init__()
self.channels = channels
self.out_channels = out_channels or channels
self.use_conv = use_conv
stride = factor
if use_conv:
self.op = nn.Conv1d(
self.channels, self.out_channels, ksize, stride=stride, padding=pad
)
else:
assert self.channels == self.out_channels
self.op = nn.AvgPool1d(kernel_size=stride, stride=stride)
def forward(self, x):
assert x.shape[1] == self.channels
return self.op(x)
class ResBlock(nn.Module):
def __init__(
self,
channels,
dropout,
out_channels=None,
use_conv=False,
use_scale_shift_norm=False,
up=False,
down=False,
kernel_size=3,
):
super().__init__()
self.channels = channels
self.dropout = dropout
self.out_channels = out_channels or channels
self.use_conv = use_conv
self.use_scale_shift_norm = use_scale_shift_norm
padding = 1 if kernel_size == 3 else 2
self.in_layers = nn.Sequential(
normalization(channels),
nn.SiLU(),
nn.Conv1d(channels, self.out_channels, kernel_size, padding=padding),
)
self.updown = up or down
if up:
self.h_upd = Upsample(channels, False)
self.x_upd = Upsample(channels, False)
elif down:
self.h_upd = Downsample(channels, False)
self.x_upd = Downsample(channels, False)
else:
self.h_upd = self.x_upd = nn.Identity()
self.out_layers = nn.Sequential(
normalization(self.out_channels),
nn.SiLU(),
nn.Dropout(p=dropout),
zero_module(
nn.Conv1d(self.out_channels, self.out_channels, kernel_size, padding=padding)
),
)
if self.out_channels == channels:
self.skip_connection = nn.Identity()
elif use_conv:
self.skip_connection = nn.Conv1d(
channels, self.out_channels, kernel_size, padding=padding
)
else:
self.skip_connection = nn.Conv1d(channels, self.out_channels, 1)
def forward(self, x):
if self.updown:
in_rest, in_conv = self.in_layers[:-1], self.in_layers[-1]
h = in_rest(x)
h = self.h_upd(h)
x = self.x_upd(x)
h = in_conv(h)
else:
h = self.in_layers(x)
h = self.out_layers(h)
return self.skip_connection(x) + h
class AudioMiniEncoder(nn.Module):
def __init__(self,
spec_dim,
embedding_dim,
base_channels=128,
depth=2,
resnet_blocks=2,
attn_blocks=4,
num_attn_heads=4,
dropout=0,
downsample_factor=2,
kernel_size=3):
super().__init__()
self.init = nn.Sequential(
nn.Conv1d(spec_dim, base_channels, 3, padding=1)
)
ch = base_channels
res = []
for l in range(depth):
for r in range(resnet_blocks):
res.append(ResBlock(ch, dropout, kernel_size=kernel_size))
res.append(Downsample(ch, use_conv=True, out_channels=ch*2, factor=downsample_factor))
ch *= 2
self.res = nn.Sequential(*res)
self.final = nn.Sequential(
normalization(ch),
nn.SiLU(),
nn.Conv1d(ch, embedding_dim, 1)
)
attn = []
for a in range(attn_blocks):
attn.append(AttentionBlock(embedding_dim, num_attn_heads,))
self.attn = nn.Sequential(*attn)
self.dim = embedding_dim
def forward(self, x):
h = self.init(x)
h = self.res(h)
h = self.final(h)
h = self.attn(h)
return h[:, :, 0]
class TorchMelSpectrogram(nn.Module):
def __init__(self, filter_length=1024, hop_length=256, win_length=1024, n_mel_channels=80, mel_fmin=0, mel_fmax=8000,
sampling_rate=22050, normalize=False, mel_norm_file='ruth-tts-files/data/mel_norms.pth'):
super().__init__()
# These are the default tacotron values for the MEL spectrogram.
self.filter_length = filter_length
self.hop_length = hop_length
self.win_length = win_length
self.n_mel_channels = n_mel_channels
self.mel_fmin = mel_fmin
self.mel_fmax = mel_fmax
self.sampling_rate = sampling_rate
self.mel_stft = torchaudio.transforms.MelSpectrogram(n_fft=self.filter_length, hop_length=self.hop_length,
win_length=self.win_length, power=2, normalized=normalize,
sample_rate=self.sampling_rate, f_min=self.mel_fmin,
f_max=self.mel_fmax, n_mels=self.n_mel_channels,
norm="slaney")
self.mel_norm_file = mel_norm_file
if self.mel_norm_file is not None:
self.mel_norms = torch.load(self.mel_norm_file)
else:
self.mel_norms = None
def forward(self, inp):
if len(inp.shape) == 3: # Automatically squeeze out the channels dimension if it is present (assuming mono-audio)
inp = inp.squeeze(1)
assert len(inp.shape) == 2
self.mel_stft = self.mel_stft.to(inp.device)
mel = self.mel_stft(inp)
# Perform dynamic range compression
mel = torch.log(torch.clamp(mel, min=1e-5))
if self.mel_norms is not None:
self.mel_norms = self.mel_norms.to(mel.device)
mel = mel / self.mel_norms.unsqueeze(0).unsqueeze(-1)
return mel
class CheckpointedLayer(nn.Module):
"""
Wraps a module. When forward() is called, passes kwargs that require_grad through torch.checkpoint() and bypasses
checkpoint for all other args.
"""
def __init__(self, wrap):
super().__init__()
self.wrap = wrap
def forward(self, x, *args, **kwargs):
for k, v in kwargs.items():
assert not (isinstance(v, torch.Tensor) and v.requires_grad) # This would screw up checkpointing.
partial = functools.partial(self.wrap, **kwargs)
return torch.utils.checkpoint.checkpoint(partial, x, *args)
class CheckpointedXTransformerEncoder(nn.Module):
"""
Wraps a ContinuousTransformerWrapper and applies CheckpointedLayer to each layer and permutes from channels-mid
to channels-last that XTransformer expects.
"""
def __init__(self, needs_permute=True, exit_permute=True, checkpoint=True, **xtransformer_kwargs):
super().__init__()
self.transformer = ContinuousTransformerWrapper(**xtransformer_kwargs)
self.needs_permute = needs_permute
self.exit_permute = exit_permute
if not checkpoint:
return
for i in range(len(self.transformer.attn_layers.layers)):
n, b, r = self.transformer.attn_layers.layers[i]
self.transformer.attn_layers.layers[i] = nn.ModuleList([n, CheckpointedLayer(b), r])
def forward(self, x, **kwargs):
if self.needs_permute:
x = x.permute(0,2,1)
h = self.transformer(x, **kwargs)
if self.exit_permute:
h = h.permute(0,2,1)
return h | /ruth-tts-converter-python-0.0.2.tar.gz/ruth-tts-converter-python-0.0.2/src/ruth_tts_transformer/ruth_tts/models/arch_util.py | 0.956497 | 0.597461 | arch_util.py | pypi |
import functools
import torch
import torch.nn as nn
import torch.nn.functional as F
from transformers import GPT2Config, GPT2PreTrainedModel, LogitsProcessorList
from transformers.modeling_outputs import CausalLMOutputWithCrossAttentions
from transformers.utils.model_parallel_utils import get_device_map, assert_device_map
from ruth_tts_transformer.ruth_tts.models.arch_util import AttentionBlock
from ruth_tts_transformer.ruth_tts.utils.typical_sampling import TypicalLogitsWarper
def null_position_embeddings(range, dim):
return torch.zeros((range.shape[0], range.shape[1], dim), device=range.device)
class ResBlock(nn.Module):
"""
Basic residual convolutional block that uses GroupNorm.
"""
def __init__(self, chan):
super().__init__()
self.net = nn.Sequential(
nn.Conv1d(chan, chan, kernel_size=3, padding=1),
nn.GroupNorm(chan//8, chan),
nn.ReLU(),
nn.Conv1d(chan, chan, kernel_size=3, padding=1),
nn.GroupNorm(chan//8, chan)
)
def forward(self, x):
return F.relu(self.net(x) + x)
class GPT2InferenceModel(GPT2PreTrainedModel):
def __init__(self, config, gpt, text_pos_emb, embeddings, norm, linear):
super().__init__(config)
self.transformer = gpt
self.text_pos_embedding = text_pos_emb
self.embeddings = embeddings
self.lm_head = nn.Sequential(norm, linear)
# Model parallel
self.model_parallel = False
self.device_map = None
self.cached_mel_emb = None
def parallelize(self, device_map=None):
self.device_map = (
get_device_map(len(self.transformer.h), range(torch.cuda.device_count()))
if device_map is None
else device_map
)
assert_device_map(self.device_map, len(self.transformer.h))
self.transformer.parallelize(self.device_map)
self.lm_head = self.lm_head.to(self.transformer.first_device)
self.model_parallel = True
def deparallelize(self):
self.transformer.deparallelize()
self.transformer = self.transformer.to("cpu")
self.lm_head = self.lm_head.to("cpu")
self.model_parallel = False
torch.cuda.empty_cache()
def get_output_embeddings(self):
return self.lm_head
def set_output_embeddings(self, new_embeddings):
self.lm_head = new_embeddings
def store_mel_emb(self, mel_emb):
self.cached_mel_emb = mel_emb
def prepare_inputs_for_generation(self, input_ids, past=None, **kwargs):
token_type_ids = kwargs.get("token_type_ids", None)
# only last token for inputs_ids if past is defined in kwargs
if past:
input_ids = input_ids[:, -1].unsqueeze(-1)
if token_type_ids is not None:
token_type_ids = token_type_ids[:, -1].unsqueeze(-1)
attention_mask = kwargs.get("attention_mask", None)
position_ids = kwargs.get("position_ids", None)
if attention_mask is not None and position_ids is None:
# create position_ids on the fly for batch generation
position_ids = attention_mask.long().cumsum(-1) - 1
position_ids.masked_fill_(attention_mask == 0, 1)
if past:
position_ids = position_ids[:, -1].unsqueeze(-1)
else:
position_ids = None
return {
"input_ids": input_ids,
"past_key_values": past,
"use_cache": kwargs.get("use_cache"),
"position_ids": position_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
def forward(
self,
input_ids=None,
past_key_values=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
labels=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
assert self.cached_mel_emb is not None
assert inputs_embeds is None # Not supported by this inference model.
assert labels is None # Training not supported by this inference model.
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# Create embedding
mel_len = self.cached_mel_emb.shape[1]
if input_ids.shape[1] != 1:
text_inputs = input_ids[:, mel_len:]
text_emb = self.embeddings(text_inputs)
text_emb = text_emb + self.text_pos_embedding(text_emb)
if self.cached_mel_emb.shape[0] != text_emb.shape[0]:
mel_emb = self.cached_mel_emb.repeat_interleave(text_emb.shape[0]//self.cached_mel_emb.shape[0], 0)
else:
mel_emb = self.cached_mel_emb
emb = torch.cat([mel_emb, text_emb], dim=1)
else:
emb = self.embeddings(input_ids)
emb = emb + self.text_pos_embedding.get_fixed_embedding(attention_mask.shape[1]-mel_len, attention_mask.device)
transformer_outputs = self.transformer(
inputs_embeds=emb,
past_key_values=past_key_values,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = transformer_outputs[0]
# Set device for model parallelism
if self.model_parallel:
torch.cuda.set_device(self.transformer.first_device)
hidden_states = hidden_states.to(self.lm_head.weight.device)
lm_logits = self.lm_head(hidden_states)
if not return_dict:
return (lm_logits,) + transformer_outputs[1:]
return CausalLMOutputWithCrossAttentions(
loss=None,
logits=lm_logits,
past_key_values=transformer_outputs.past_key_values,
hidden_states=transformer_outputs.hidden_states,
attentions=transformer_outputs.attentions,
cross_attentions=transformer_outputs.cross_attentions,
)
@staticmethod
def _reorder_cache(past, beam_idx):
"""
This function is used to re-order the :obj:`past_key_values` cache if
:meth:`~transformers.PreTrainedModel.beam_search` or :meth:`~transformers.PreTrainedModel.beam_sample` is
called. This is required to match :obj:`past_key_values` with the correct beam_idx at every generation step.
"""
return tuple(
tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past)
for layer_past in past
)
class ConditioningEncoder(nn.Module):
def __init__(self,
spec_dim,
embedding_dim,
attn_blocks=6,
num_attn_heads=4,
do_checkpointing=False,
mean=False):
super().__init__()
attn = []
self.init = nn.Conv1d(spec_dim, embedding_dim, kernel_size=1)
for a in range(attn_blocks):
attn.append(AttentionBlock(embedding_dim, num_attn_heads))
self.attn = nn.Sequential(*attn)
self.dim = embedding_dim
self.do_checkpointing = do_checkpointing
self.mean = mean
def forward(self, x):
h = self.init(x)
h = self.attn(h)
if self.mean:
return h.mean(dim=2)
else:
return h[:, :, 0]
class LearnedPositionEmbeddings(nn.Module):
def __init__(self, seq_len, model_dim, init=.02):
super().__init__()
self.emb = nn.Embedding(seq_len, model_dim)
# Initializing this way is standard for GPT-2
self.emb.weight.data.normal_(mean=0.0, std=init)
def forward(self, x):
sl = x.shape[1]
return self.emb(torch.arange(0, sl, device=x.device))
def get_fixed_embedding(self, ind, dev):
return self.emb(torch.tensor([ind], device=dev)).unsqueeze(0)
def build_hf_gpt_transformer(layers, model_dim, heads, max_mel_seq_len, max_text_seq_len, checkpointing):
"""
GPT-2 implemented by the HuggingFace library.
"""
from transformers import GPT2Config, GPT2Model
gpt_config = GPT2Config(vocab_size=256, # Unused.
n_positions=max_mel_seq_len+max_text_seq_len,
n_ctx=max_mel_seq_len+max_text_seq_len,
n_embd=model_dim,
n_layer=layers,
n_head=heads,
gradient_checkpointing=checkpointing,
use_cache=not checkpointing)
gpt = GPT2Model(gpt_config)
# Override the built in positional embeddings
del gpt.wpe
gpt.wpe = functools.partial(null_position_embeddings, dim=model_dim)
# Built-in token embeddings are unused.
del gpt.wte
return gpt, LearnedPositionEmbeddings(max_mel_seq_len, model_dim), LearnedPositionEmbeddings(max_text_seq_len, model_dim),\
None, None
class MelEncoder(nn.Module):
def __init__(self, channels, mel_channels=80, resblocks_per_reduction=2):
super().__init__()
self.channels = channels
self.encoder = nn.Sequential(nn.Conv1d(mel_channels, channels//4, kernel_size=3, padding=1),
nn.Sequential(*[ResBlock(channels//4) for _ in range(resblocks_per_reduction)]),
nn.Conv1d(channels//4, channels//2, kernel_size=3, stride=2, padding=1),
nn.GroupNorm(channels//16, channels//2),
nn.ReLU(),
nn.Sequential(*[ResBlock(channels//2) for _ in range(resblocks_per_reduction)]),
nn.Conv1d(channels//2, channels, kernel_size=3, stride=2, padding=1),
nn.GroupNorm(channels//8, channels),
nn.ReLU(),
nn.Sequential(*[ResBlock(channels) for _ in range(resblocks_per_reduction)]),
)
self.reduction = 4
def forward(self, x):
for e in self.encoder:
x = e(x)
return x.permute(0,2,1)
class UnifiedVoice(nn.Module):
def __init__(self, layers=8, model_dim=512, heads=8, max_text_tokens=120, max_mel_tokens=250, max_conditioning_inputs=1,
mel_length_compression=1024, number_text_tokens=256,
start_text_token=None, number_mel_codes=8194, start_mel_token=8192,
stop_mel_token=8193, train_solo_embeddings=False, use_mel_codes_as_input=True,
checkpointing=True, types=1):
"""
Args:
layers: Number of layers in transformer stack.
model_dim: Operating dimensions of the transformer
heads: Number of transformer heads. Must be divisible by model_dim. Recommend model_dim//64
max_text_tokens: Maximum number of text tokens that will be encountered by model.
max_mel_tokens: Maximum number of MEL tokens that will be encountered by model.
max_conditioning_inputs: Maximum number of conditioning inputs provided to the model. If (1), conditioning input can be of format (b,80,s), otherwise (b,n,80,s).
mel_length_compression: The factor between <number_input_samples> and <mel_tokens>. Used to compute MEL code padding given wav input length.
number_text_tokens:
start_text_token:
stop_text_token:
number_mel_codes:
start_mel_token:
stop_mel_token:
train_solo_embeddings:
use_mel_codes_as_input:
checkpointing:
"""
super().__init__()
self.number_text_tokens = number_text_tokens
self.start_text_token = number_text_tokens * types if start_text_token is None else start_text_token
self.stop_text_token = 0
self.number_mel_codes = number_mel_codes
self.start_mel_token = start_mel_token
self.stop_mel_token = stop_mel_token
self.layers = layers
self.heads = heads
self.max_mel_tokens = max_mel_tokens
self.max_text_tokens = max_text_tokens
self.model_dim = model_dim
self.max_conditioning_inputs = max_conditioning_inputs
self.mel_length_compression = mel_length_compression
self.conditioning_encoder = ConditioningEncoder(80, model_dim, num_attn_heads=heads)
self.text_embedding = nn.Embedding(self.number_text_tokens*types+1, model_dim)
if use_mel_codes_as_input:
self.mel_embedding = nn.Embedding(self.number_mel_codes, model_dim)
else:
self.mel_embedding = MelEncoder(model_dim, resblocks_per_reduction=1)
self.gpt, self.mel_pos_embedding, self.text_pos_embedding, self.mel_layer_pos_embedding, self.text_layer_pos_embedding = \
build_hf_gpt_transformer(layers, model_dim, heads, self.max_mel_tokens+2+self.max_conditioning_inputs, self.max_text_tokens+2, checkpointing)
if train_solo_embeddings:
self.mel_solo_embedding = nn.Parameter(torch.randn(1, 1, model_dim) * .02, requires_grad=True)
self.text_solo_embedding = nn.Parameter(torch.randn(1, 1, model_dim) * .02, requires_grad=True)
else:
self.mel_solo_embedding = 0
self.text_solo_embedding = 0
self.final_norm = nn.LayerNorm(model_dim)
self.text_head = nn.Linear(model_dim, self.number_text_tokens*types+1)
self.mel_head = nn.Linear(model_dim, self.number_mel_codes)
# Initialize the embeddings per the GPT-2 scheme
embeddings = [self.text_embedding]
if use_mel_codes_as_input:
embeddings.append(self.mel_embedding)
for module in embeddings:
module.weight.data.normal_(mean=0.0, std=.02)
def build_aligned_inputs_and_targets(self, input, start_token, stop_token):
inp = F.pad(input, (1,0), value=start_token)
tar = F.pad(input, (0,1), value=stop_token)
return inp, tar
def set_mel_padding(self, mel_input_tokens, wav_lengths):
"""
Given mel tokens that are derived from a padded audio clip and the actual lengths of each batch element in
that audio clip, reformats the tokens with STOP_MEL_TOKEN in place of the zero padding. This is required
preformatting to create a working TTS model.
"""
# Set padding areas within MEL (currently it is coded with the MEL code for <zero>).
mel_lengths = torch.div(wav_lengths, self.mel_length_compression, rounding_mode='trunc')
for b in range(len(mel_lengths)):
actual_end = mel_lengths[b] + 1 # Due to the convolutional nature of how these tokens are generated, it would be best if the model predicts a token past the actual last token.
if actual_end < mel_input_tokens.shape[-1]:
mel_input_tokens[b, actual_end:] = self.stop_mel_token
return mel_input_tokens
def get_logits(self, speech_conditioning_inputs, first_inputs, first_head, second_inputs=None, second_head=None, get_attns=False, return_latent=False):
if second_inputs is not None:
emb = torch.cat([speech_conditioning_inputs, first_inputs, second_inputs], dim=1)
else:
emb = torch.cat([speech_conditioning_inputs, first_inputs], dim=1)
gpt_out = self.gpt(inputs_embeds=emb, return_dict=True, output_attentions=get_attns)
if get_attns:
return gpt_out.attentions
enc = gpt_out.last_hidden_state[:, 1:] # The first logit is tied to the speech_conditioning_input
enc = self.final_norm(enc)
if return_latent:
return enc[:, speech_conditioning_inputs.shape[1]:speech_conditioning_inputs.shape[1]+first_inputs.shape[1]], enc[:, -second_inputs.shape[1]:]
first_logits = enc[:, :first_inputs.shape[1]]
first_logits = first_head(first_logits)
first_logits = first_logits.permute(0,2,1)
if second_inputs is not None:
second_logits = enc[:, -second_inputs.shape[1]:]
second_logits = second_head(second_logits)
second_logits = second_logits.permute(0,2,1)
return first_logits, second_logits
else:
return first_logits
def get_conditioning(self, speech_conditioning_input):
speech_conditioning_input = speech_conditioning_input.unsqueeze(1) if len(
speech_conditioning_input.shape) == 3 else speech_conditioning_input
conds = []
for j in range(speech_conditioning_input.shape[1]):
conds.append(self.conditioning_encoder(speech_conditioning_input[:, j]))
conds = torch.stack(conds, dim=1)
conds = conds.mean(dim=1)
return conds
def forward(self, speech_conditioning_latent, text_inputs, text_lengths, mel_codes, wav_lengths, types=None, text_first=True, raw_mels=None, return_attentions=False,
return_latent=False, clip_inputs=True):
"""
Forward pass that uses both text and voice in either text conditioning mode or voice conditioning mode
(actuated by `text_first`).
speech_conditioning_input: MEL float tensor, (b,1024)
text_inputs: long tensor, (b,t)
text_lengths: long tensor, (b,)
mel_inputs: long tensor, (b,m)
wav_lengths: long tensor, (b,)
raw_mels: MEL float tensor (b,80,s)
If return_attentions is specified, only logits are returned.
If return_latent is specified, loss & logits are not computed or returned. Only the predicted latents are returned.
If clip_inputs is True, the inputs will be clipped to the smallest input size across each input modality.
"""
# Types are expressed by expanding the text embedding space.
if types is not None:
text_inputs = text_inputs * (1+types).unsqueeze(-1)
if clip_inputs:
# This model will receive micro-batches with a ton of padding for both the text and MELs. Ameliorate this by
# chopping the inputs by the maximum actual length.
max_text_len = text_lengths.max()
text_inputs = text_inputs[:, :max_text_len]
max_mel_len = wav_lengths.max() // self.mel_length_compression
mel_codes = mel_codes[:, :max_mel_len]
if raw_mels is not None:
raw_mels = raw_mels[:, :, :max_mel_len*4]
mel_codes = self.set_mel_padding(mel_codes, wav_lengths)
text_inputs = F.pad(text_inputs, (0,1), value=self.stop_text_token)
mel_codes = F.pad(mel_codes, (0,1), value=self.stop_mel_token)
conds = speech_conditioning_latent.unsqueeze(1)
text_inputs, text_targets = self.build_aligned_inputs_and_targets(text_inputs, self.start_text_token, self.stop_text_token)
text_emb = self.text_embedding(text_inputs) + self.text_pos_embedding(text_inputs)
mel_codes, mel_targets = self.build_aligned_inputs_and_targets(mel_codes, self.start_mel_token, self.stop_mel_token)
if raw_mels is not None:
mel_inp = F.pad(raw_mels, (0, 8))
else:
mel_inp = mel_codes
mel_emb = self.mel_embedding(mel_inp)
mel_emb = mel_emb + self.mel_pos_embedding(mel_codes)
if text_first:
text_logits, mel_logits = self.get_logits(conds, text_emb, self.text_head, mel_emb, self.mel_head, get_attns=return_attentions, return_latent=return_latent)
if return_latent:
return mel_logits[:, :-2] # Despite the name, these are not logits. Strip off the two tokens added by this forward pass.
else:
mel_logits, text_logits = self.get_logits(conds, mel_emb, self.mel_head, text_emb, self.text_head, get_attns=return_attentions, return_latent=return_latent)
if return_latent:
return text_logits[:, :-2] # Despite the name, these are not logits. Strip off the two tokens added by this forward pass.
if return_attentions:
return mel_logits
loss_text = F.cross_entropy(text_logits, text_targets.long())
loss_mel = F.cross_entropy(mel_logits, mel_targets.long())
return loss_text.mean(), loss_mel.mean(), mel_logits
def inference_speech(self, speech_conditioning_latent, text_inputs, input_tokens=None, num_return_sequences=1,
max_generate_length=None, typical_sampling=False, typical_mass=.9, **hf_generate_kwargs):
seq_length = self.max_mel_tokens + self.max_text_tokens + 2
if not hasattr(self, 'inference_model'):
# TODO: Decouple gpt_config from this inference model.
gpt_config = GPT2Config(vocab_size=self.max_mel_tokens,
n_positions=seq_length,
n_ctx=seq_length,
n_embd=self.model_dim,
n_layer=self.layers,
n_head=self.heads,
gradient_checkpointing=False,
use_cache=True)
self.inference_model = GPT2InferenceModel(gpt_config, self.gpt, self.mel_pos_embedding, self.mel_embedding, self.final_norm, self.mel_head)
self.gpt.wte = self.mel_embedding
text_inputs = F.pad(text_inputs, (0, 1), value=self.stop_text_token)
text_inputs, text_targets = self.build_aligned_inputs_and_targets(text_inputs, self.start_text_token, self.stop_text_token)
text_emb = self.text_embedding(text_inputs) + self.text_pos_embedding(text_inputs)
conds = speech_conditioning_latent.unsqueeze(1)
emb = torch.cat([conds, text_emb], dim=1)
self.inference_model.store_mel_emb(emb)
fake_inputs = torch.full((emb.shape[0], conds.shape[1] + emb.shape[1],), fill_value=1, dtype=torch.long,
device=text_inputs.device)
fake_inputs[:, -1] = self.start_mel_token
trunc_index = fake_inputs.shape[1]
if input_tokens is None:
inputs = fake_inputs
else:
assert num_return_sequences % input_tokens.shape[0] == 0, "The number of return sequences must be divisible by the number of input sequences"
fake_inputs = fake_inputs.repeat(num_return_sequences, 1)
input_tokens = input_tokens.repeat(num_return_sequences // input_tokens.shape[0], 1)
inputs = torch.cat([fake_inputs, input_tokens], dim=1)
logits_processor = LogitsProcessorList([TypicalLogitsWarper(mass=typical_mass)]) if typical_sampling else LogitsProcessorList()
max_length = trunc_index + self.max_mel_tokens - 1 if max_generate_length is None else trunc_index + max_generate_length
gen = self.inference_model.generate(inputs, bos_token_id=self.start_mel_token, pad_token_id=self.stop_mel_token, eos_token_id=self.stop_mel_token,
max_length=max_length, logits_processor=logits_processor,
num_return_sequences=num_return_sequences, **hf_generate_kwargs)
return gen[:, trunc_index:]
if __name__ == '__main__':
gpt = UnifiedVoice(model_dim=256, heads=4, train_solo_embeddings=True, use_mel_codes_as_input=True, max_conditioning_inputs=4)
l = gpt(torch.randn(2, 3, 80, 800),
torch.randint(high=120, size=(2,120)),
torch.tensor([32, 120]),
torch.randint(high=8192, size=(2,250)),
torch.tensor([250*256,195*256]))
gpt.text_forward(torch.randn(2,80,800), torch.randint(high=50, size=(2,80)), torch.tensor([32, 80])) | /ruth-tts-converter-python-0.0.2.tar.gz/ruth-tts-converter-python-0.0.2/src/ruth_tts_transformer/ruth_tts/models/autoregressive.py | 0.937667 | 0.354796 | autoregressive.py | pypi |
import os
from glob import glob
import librosa
import torch
import torchaudio
import numpy as np
from scipy.io.wavfile import read
from ruth_tts_transformer.ruth_tts.utils.stft import STFT
def load_wav_to_torch(full_path):
sampling_rate, data = read(full_path)
if data.dtype == np.int32:
norm_fix = 2 ** 31
elif data.dtype == np.int16:
norm_fix = 2 ** 15
elif data.dtype == np.float16 or data.dtype == np.float32:
norm_fix = 1.
else:
raise NotImplemented(f"Provided data dtype not supported: {data.dtype}")
return (torch.FloatTensor(data.astype(np.float32)) / norm_fix, sampling_rate)
def load_audio(audiopath, sampling_rate):
if audiopath[-4:] == '.wav':
audio, lsr = load_wav_to_torch(audiopath)
elif audiopath[-4:] == '.mp3':
audio, lsr = librosa.load(audiopath, sr=sampling_rate)
audio = torch.FloatTensor(audio)
else:
assert False, f"Unsupported audio format provided: {audiopath[-4:]}"
# Remove any channel data.
if len(audio.shape) > 1:
if audio.shape[0] < 5:
audio = audio[0]
else:
assert audio.shape[1] < 5
audio = audio[:, 0]
if lsr != sampling_rate:
audio = torchaudio.functional.resample(audio, lsr, sampling_rate)
# Check some assumptions about audio range. This should be automatically fixed in load_wav_to_torch, but might not be in some edge cases, where we should squawk.
# '2' is arbitrarily chosen since it seems like audio will often "overdrive" the [-1,1] bounds.
if torch.any(audio > 2) or not torch.any(audio < 0):
print(f"Error with {audiopath}. Max={audio.max()} min={audio.min()}")
audio.clip_(-1, 1)
return audio.unsqueeze(0)
TACOTRON_MEL_MAX = 2.3143386840820312
TACOTRON_MEL_MIN = -11.512925148010254
def denormalize_tacotron_mel(norm_mel):
return ((norm_mel+1)/2)*(TACOTRON_MEL_MAX-TACOTRON_MEL_MIN)+TACOTRON_MEL_MIN
def normalize_tacotron_mel(mel):
return 2 * ((mel - TACOTRON_MEL_MIN) / (TACOTRON_MEL_MAX - TACOTRON_MEL_MIN)) - 1
def dynamic_range_compression(x, C=1, clip_val=1e-5):
"""
PARAMS
------
C: compression factor
"""
return torch.log(torch.clamp(x, min=clip_val) * C)
def dynamic_range_decompression(x, C=1):
"""
PARAMS
------
C: compression factor used to compress
"""
return torch.exp(x) / C
def get_voices():
subs = os.listdir('ruth-tts-files/voices')
voices = {}
for sub in subs:
subj = os.path.join('ruth-tts-files/voices', sub)
if os.path.isdir(subj):
voices[sub] = list(glob(f'{subj}/*.wav')) + list(glob(f'{subj}/*.mp3')) + list(glob(f'{subj}/*.pth'))
return voices
def load_voice(voice):
if voice == 'random':
return None, None
voices = get_voices()
paths = voices[voice]
if len(paths) == 1 and paths[0].endswith('.pth'):
return None, torch.load(paths[0])
else:
conds = []
for cond_path in paths:
c = load_audio(cond_path, 22050)
conds.append(c)
return conds, None
def load_voices(voices):
latents = []
clips = []
for voice in voices:
if voice == 'random':
print("Cannot combine a random voice with a non-random voice. Just using a random voice.")
return None, None
clip, latent = load_voice(voice)
if latent is None:
assert len(latents) == 0, "Can only combine raw audio voices or latent voices, not both. Do it yourself if you want this."
clips.extend(clip)
elif voice is None:
assert len(voices) == 0, "Can only combine raw audio voices or latent voices, not both. Do it yourself if you want this."
latents.append(latent)
if len(latents) == 0:
return clips, None
else:
latents = torch.stack(latents, dim=0)
return None, latents.mean(dim=0)
class TacotronSTFT(torch.nn.Module):
def __init__(self, filter_length=1024, hop_length=256, win_length=1024,
n_mel_channels=80, sampling_rate=22050, mel_fmin=0.0,
mel_fmax=8000.0):
super(TacotronSTFT, self).__init__()
self.n_mel_channels = n_mel_channels
self.sampling_rate = sampling_rate
self.stft_fn = STFT(filter_length, hop_length, win_length)
from librosa.filters import mel as librosa_mel_fn
mel_basis = librosa_mel_fn(
sr=sampling_rate, n_fft=filter_length, n_mels=n_mel_channels, fmin=mel_fmin, fmax=mel_fmax)
mel_basis = torch.from_numpy(mel_basis).float()
self.register_buffer('mel_basis', mel_basis)
def spectral_normalize(self, magnitudes):
output = dynamic_range_compression(magnitudes)
return output
def spectral_de_normalize(self, magnitudes):
output = dynamic_range_decompression(magnitudes)
return output
def mel_spectrogram(self, y):
"""Computes mel-spectrograms from a batch of waves
PARAMS
------
y: Variable(torch.FloatTensor) with shape (B, T) in range [-1, 1]
RETURNS
-------
mel_output: torch.FloatTensor of shape (B, n_mel_channels, T)
"""
assert(torch.min(y.data) >= -10)
assert(torch.max(y.data) <= 10)
y = torch.clip(y, min=-1, max=1)
magnitudes, phases = self.stft_fn.transform(y)
magnitudes = magnitudes.data
mel_output = torch.matmul(self.mel_basis, magnitudes)
mel_output = self.spectral_normalize(mel_output)
return mel_output
def wav_to_univnet_mel(wav, do_normalization=False):
stft = TacotronSTFT(1024, 256, 1024, 100, 24000, 0, 12000)
stft = stft.cuda()
mel = stft.mel_spectrogram(wav)
if do_normalization:
mel = normalize_tacotron_mel(mel)
return mel | /ruth-tts-converter-python-0.0.2.tar.gz/ruth-tts-converter-python-0.0.2/src/ruth_tts_transformer/ruth_tts/utils/audio.py | 0.774669 | 0.403009 | audio.py | pypi |
import re
import torch
import torchaudio
from transformers import Wav2Vec2ForCTC, Wav2Vec2FeatureExtractor, Wav2Vec2CTCTokenizer, Wav2Vec2Processor
from ruth_tts_transformer.ruth_tts.utils.audio import load_audio
def max_alignment(s1, s2, skip_character='~', record={}):
"""
A clever function that aligns s1 to s2 as best it can. Wherever a character from s1 is not found in s2, a '~' is
used to replace that character.
Finally got to use my DP skills!
"""
assert skip_character not in s1, f"Found the skip character {skip_character} in the provided string, {s1}"
if len(s1) == 0:
return ''
if len(s2) == 0:
return skip_character * len(s1)
if s1 == s2:
return s1
if s1[0] == s2[0]:
return s1[0] + max_alignment(s1[1:], s2[1:], skip_character, record)
take_s1_key = (len(s1), len(s2) - 1)
if take_s1_key in record:
take_s1, take_s1_score = record[take_s1_key]
else:
take_s1 = max_alignment(s1, s2[1:], skip_character, record)
take_s1_score = len(take_s1.replace(skip_character, ''))
record[take_s1_key] = (take_s1, take_s1_score)
take_s2_key = (len(s1) - 1, len(s2))
if take_s2_key in record:
take_s2, take_s2_score = record[take_s2_key]
else:
take_s2 = max_alignment(s1[1:], s2, skip_character, record)
take_s2_score = len(take_s2.replace(skip_character, ''))
record[take_s2_key] = (take_s2, take_s2_score)
return take_s1 if take_s1_score > take_s2_score else skip_character + take_s2
class Wav2VecAlignment:
"""
Uses wav2vec2 to perform audio<->text alignment.
"""
def __init__(self):
self.model = Wav2Vec2ForCTC.from_pretrained("jbetker/wav2vec2-large-robust-ft-libritts-voxpopuli").cpu()
self.feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(f"facebook/wav2vec2-large-960h")
self.tokenizer = Wav2Vec2CTCTokenizer.from_pretrained('jbetker/tacotron-symbols')
def align(self, audio, expected_text, audio_sample_rate=24000):
orig_len = audio.shape[-1]
with torch.no_grad():
self.model = self.model.cuda()
audio = audio.to('cuda')
audio = torchaudio.functional.resample(audio, audio_sample_rate, 16000)
clip_norm = (audio - audio.mean()) / torch.sqrt(audio.var() + 1e-7)
logits = self.model(clip_norm).logits
self.model = self.model.cpu()
logits = logits[0]
pred_string = self.tokenizer.decode(logits.argmax(-1).tolist())
fixed_expectation = max_alignment(expected_text.lower(), pred_string)
w2v_compression = orig_len // logits.shape[0]
expected_tokens = self.tokenizer.encode(fixed_expectation)
expected_chars = list(fixed_expectation)
if len(expected_tokens) == 1:
return [0] # The alignment is simple; there is only one token.
expected_tokens.pop(0) # The first token is a given.
expected_chars.pop(0)
alignments = [0]
def pop_till_you_win():
if len(expected_tokens) == 0:
return None
popped = expected_tokens.pop(0)
popped_char = expected_chars.pop(0)
while popped_char == '~':
alignments.append(-1)
if len(expected_tokens) == 0:
return None
popped = expected_tokens.pop(0)
popped_char = expected_chars.pop(0)
return popped
next_expected_token = pop_till_you_win()
for i, logit in enumerate(logits):
top = logit.argmax()
if next_expected_token == top:
alignments.append(i * w2v_compression)
if len(expected_tokens) > 0:
next_expected_token = pop_till_you_win()
else:
break
pop_till_you_win()
if not (len(expected_tokens) == 0 and len(alignments) == len(expected_text)):
torch.save([audio, expected_text], 'alignment_debug.pth')
assert False, "Something went wrong with the alignment algorithm. I've dumped a file, 'alignment_debug.pth' to" \
"your current working directory. Please report this along with the file so it can get fixed."
# Now fix up alignments. Anything with -1 should be interpolated.
alignments.append(orig_len) # This'll get removed but makes the algorithm below more readable.
for i in range(len(alignments)):
if alignments[i] == -1:
for j in range(i+1, len(alignments)):
if alignments[j] != -1:
next_found_token = j
break
for j in range(i, next_found_token):
gap = alignments[next_found_token] - alignments[i-1]
alignments[j] = (j-i+1) * gap // (next_found_token-i+1) + alignments[i-1]
return alignments[:-1]
def redact(self, audio, expected_text, audio_sample_rate=24000):
if '[' not in expected_text:
return audio
splitted = expected_text.split('[')
fully_split = [splitted[0]]
for spl in splitted[1:]:
assert ']' in spl, 'Every "[" character must be paired with a "]" with no nesting.'
fully_split.extend(spl.split(']'))
# At this point, fully_split is a list of strings, with every other string being something that should be redacted.
non_redacted_intervals = []
last_point = 0
for i in range(len(fully_split)):
if i % 2 == 0:
end_interval = max(0, last_point + len(fully_split[i]) - 1)
non_redacted_intervals.append((last_point, end_interval))
last_point += len(fully_split[i])
bare_text = ''.join(fully_split)
alignments = self.align(audio, bare_text, audio_sample_rate)
output_audio = []
for nri in non_redacted_intervals:
start, stop = nri
output_audio.append(audio[:, alignments[start]:alignments[stop]])
return torch.cat(output_audio, dim=-1) | /ruth-tts-converter-python-0.0.2.tar.gz/ruth-tts-converter-python-0.0.2/src/ruth_tts_transformer/ruth_tts/utils/wav2vec_alignment.py | 0.680135 | 0.430506 | wav2vec_alignment.py | pypi |
import json
import re
import inflect
import requests
import torch
from tokenizers import Tokenizer
# Regular expression matching whitespace:
from unidecode import unidecode
_whitespace_re = re.compile(r'\s+')
# List of (regular expression, replacement) pairs for abbreviations:
_abbreviations = [(re.compile('\\b%s\\.' % x[0], re.IGNORECASE), x[1]) for x in [
('mrs', 'misess'),
('mr', 'mister'),
('dr', 'doctor'),
('st', 'saint'),
('co', 'company'),
('jr', 'junior'),
('maj', 'major'),
('gen', 'general'),
('drs', 'doctors'),
('rev', 'reverend'),
('lt', 'lieutenant'),
('hon', 'honorable'),
('sgt', 'sergeant'),
('capt', 'captain'),
('esq', 'esquire'),
('ltd', 'limited'),
('col', 'colonel'),
('ft', 'fort'),
]]
def expand_abbreviations(text):
for regex, replacement in _abbreviations:
text = re.sub(regex, replacement, text)
return text
_inflect = inflect.engine()
_comma_number_re = re.compile(r'([0-9][0-9\,]+[0-9])')
_decimal_number_re = re.compile(r'([0-9]+\.[0-9]+)')
_pounds_re = re.compile(r'£([0-9\,]*[0-9]+)')
_dollars_re = re.compile(r'\$([0-9\.\,]*[0-9]+)')
_ordinal_re = re.compile(r'[0-9]+(st|nd|rd|th)')
_number_re = re.compile(r'[0-9]+')
def _remove_commas(m):
return m.group(1).replace(',', '')
def _expand_decimal_point(m):
return m.group(1).replace('.', ' point ')
def _expand_dollars(m):
match = m.group(1)
parts = match.split('.')
if len(parts) > 2:
return match + ' dollars' # Unexpected format
dollars = int(parts[0]) if parts[0] else 0
cents = int(parts[1]) if len(parts) > 1 and parts[1] else 0
if dollars and cents:
dollar_unit = 'dollar' if dollars == 1 else 'dollars'
cent_unit = 'cent' if cents == 1 else 'cents'
return '%s %s, %s %s' % (dollars, dollar_unit, cents, cent_unit)
elif dollars:
dollar_unit = 'dollar' if dollars == 1 else 'dollars'
return '%s %s' % (dollars, dollar_unit)
elif cents:
cent_unit = 'cent' if cents == 1 else 'cents'
return '%s %s' % (cents, cent_unit)
else:
return 'zero dollars'
def _expand_ordinal(m):
return _inflect.number_to_words(m.group(0))
def _expand_number(m):
num = int(m.group(0))
if num > 1000 and num < 3000:
if num == 2000:
return 'two thousand'
elif num > 2000 and num < 2010:
return 'two thousand ' + _inflect.number_to_words(num % 100)
elif num % 100 == 0:
return _inflect.number_to_words(num // 100) + ' hundred'
else:
return _inflect.number_to_words(num, andword='', zero='oh', group=2).replace(', ', ' ')
else:
return _inflect.number_to_words(num, andword='')
def normalize_numbers(text):
text = re.sub(_comma_number_re, _remove_commas, text)
text = re.sub(_pounds_re, r'\1 pounds', text)
text = re.sub(_dollars_re, _expand_dollars, text)
text = re.sub(_decimal_number_re, _expand_decimal_point, text)
text = re.sub(_ordinal_re, _expand_ordinal, text)
text = re.sub(_number_re, _expand_number, text)
return text
def expand_numbers(text):
return normalize_numbers(text)
def lowercase(text):
return text.lower()
def collapse_whitespace(text):
return re.sub(_whitespace_re, ' ', text)
def convert_to_ascii(text):
return unidecode(text)
def basic_cleaners(text):
'''Basic pipeline that lowercases and collapses whitespace without transliteration.'''
text = lowercase(text)
text = collapse_whitespace(text)
return text
def transliteration_cleaners(text):
'''Pipeline for non-English text that transliterates to ASCII.'''
text = convert_to_ascii(text)
text = lowercase(text)
text = collapse_whitespace(text)
return text
def english_cleaners(text):
'''Pipeline for English text, including number and abbreviation expansion.'''
text = convert_to_ascii(text)
text = lowercase(text)
text = expand_numbers(text)
text = expand_abbreviations(text)
text = collapse_whitespace(text)
text = text.replace('"', '')
return text
def lev_distance(s1, s2):
if len(s1) > len(s2):
s1, s2 = s2, s1
distances = range(len(s1) + 1)
for i2, c2 in enumerate(s2):
distances_ = [i2 + 1]
for i1, c1 in enumerate(s1):
if c1 == c2:
distances_.append(distances[i1])
else:
distances_.append(1 + min((distances[i1], distances[i1 + 1], distances_[-1])))
distances = distances_
return distances[-1]
class VoiceBpeTokenizer:
def __init__(self, vocab_file='ruth-tts-files/data/tokenizer.json'):
if vocab_file is not None:
self.tokenizer = Tokenizer.from_file(vocab_file)
def preprocess_text(self, txt):
txt = english_cleaners(txt)
return txt
def encode(self, txt):
txt = self.preprocess_text(txt)
txt = txt.replace(' ', '[SPACE]')
return self.tokenizer.encode(txt).ids
def decode(self, seq):
if isinstance(seq, torch.Tensor):
seq = seq.cpu().numpy()
txt = self.tokenizer.decode(seq, skip_special_tokens=False).replace(' ', '')
txt = txt.replace('[SPACE]', ' ')
txt = txt.replace('[STOP]', '')
txt = txt.replace('[UNK]', '')
return txt | /ruth-tts-converter-python-0.0.2.tar.gz/ruth-tts-converter-python-0.0.2/src/ruth_tts_transformer/ruth_tts/utils/tokenizer.py | 0.543106 | 0.305956 | tokenizer.py | pypi |
import re
def split_and_recombine_text(text, desired_length=200, max_length=300):
"""Split text it into chunks of a desired length trying to keep sentences intact."""
# normalize text, remove redundant whitespace and convert non-ascii quotes to ascii
text = re.sub(r'\n\n+', '\n', text)
text = re.sub(r'\s+', ' ', text)
text = re.sub(r'[“”]', '"', text)
rv = []
in_quote = False
current = ""
split_pos = []
pos = -1
def seek(delta):
nonlocal pos, in_quote, text
is_neg = delta < 0
for _ in range(abs(delta)):
if is_neg:
pos -= 1
else:
pos += 1
if text[pos] == '"':
in_quote = not in_quote
return text[pos], text[pos+1] if pos < len(text)-1 else ""
def commit():
nonlocal rv, current, split_pos
rv.append(current)
current = ""
split_pos = []
while pos < len(text) - 1:
c, next_c = seek(1)
current += c
# do we need to force a split?
if len(current) >= max_length:
if len(split_pos) > 0 and len(current) > (desired_length / 2):
# we have at least one sentence and we are over half the desired length, seek back to the last split
d = pos - split_pos[-1]
seek(-d)
current = current[:-d]
else:
# no full sentences, seek back until we are not in the middle of a word and split there
while c not in '!?.\n ' and pos > 0 and len(current) > desired_length:
c, _ = seek(-1)
current = current[:-1]
commit()
# check for sentence boundaries
elif not in_quote and (c in '!?\n' or (c == '.' and next_c in '\n ')):
split_pos.append(pos)
if len(current) >= desired_length:
commit()
rv.append(current)
# clean up
rv = [s.strip() for s in rv]
rv = [s for s in rv if len(s) > 0]
return rv
if __name__ == '__main__':
import unittest
class Test(unittest.TestCase):
def test_split_and_recombine_text(self):
text = """
This is a sample sentence.
This is another sample sentence.
This is a longer sample sentence that should force a split inthemiddlebutinotinthislongword.
"Don't split my quote... please"
"""
self.assertEqual(split_and_recombine_text(text, desired_length=20, max_length=40),
['This is a sample sentence.',
'This is another sample sentence.',
'This is a longer sample sentence that',
'should force a split',
'inthemiddlebutinotinthislongword.',
'"Don\'t split my quote... please"'])
unittest.main() | /ruth-tts-converter-python-0.0.2.tar.gz/ruth-tts-converter-python-0.0.2/src/ruth_tts_transformer/ruth_tts/utils/text.py | 0.455199 | 0.376337 | text.py | pypi |
import os
import random
import uuid
from urllib import request
import torch
import torch.nn.functional as F
import progressbar
import torchaudio
from ruth_tts_transformer.ruth_tts.models.classifier import AudioMiniEncoderWithClassifierHead
from ruth_tts_transformer.ruth_tts.models.cvvp import CVVP
from ruth_tts_transformer.ruth_tts.models.diffusion_decoder import DiffusionTts
from ruth_tts_transformer.ruth_tts.models.autoregressive import UnifiedVoice
from tqdm import tqdm
from ruth_tts_transformer.ruth_tts.models.arch_util import TorchMelSpectrogram
from ruth_tts_transformer.ruth_tts.models.clvp import CLVP
from ruth_tts_transformer.ruth_tts.models.random_latent_generator import RandomLatentConverter
from ruth_tts_transformer.ruth_tts.models.vocoder import UnivNetGenerator
from ruth_tts_transformer.ruth_tts.utils.audio import wav_to_univnet_mel, denormalize_tacotron_mel
from ruth_tts_transformer.ruth_tts.utils.diffusion import SpacedDiffusion, space_timesteps, get_named_beta_schedule
from ruth_tts_transformer.ruth_tts.utils.tokenizer import VoiceBpeTokenizer
from ruth_tts_transformer.ruth_tts.utils.wav2vec_alignment import Wav2VecAlignment
pbar = None
def download_models(specific_models=None):
"""
Call to download all the models that Tortoise uses.
"""
MODELS = {
'autoregressive.pth': 'https://huggingface.co/jbetker/tortoise-tts-v2/resolve/main/.models/autoregressive.pth',
'classifier.pth': 'https://huggingface.co/jbetker/tortoise-tts-v2/resolve/main/.models/classifier.pth',
'clvp2.pth': 'https://huggingface.co/jbetker/tortoise-tts-v2/resolve/main/.models/clvp2.pth',
'cvvp.pth': 'https://huggingface.co/jbetker/tortoise-tts-v2/resolve/main/.models/cvvp.pth',
'diffusion_decoder.pth': 'https://huggingface.co/jbetker/tortoise-tts-v2/resolve/main/.models/diffusion_decoder.pth',
'vocoder.pth': 'https://huggingface.co/jbetker/tortoise-tts-v2/resolve/main/.models/vocoder.pth',
'rlg_auto.pth': 'https://huggingface.co/jbetker/tortoise-tts-v2/resolve/main/.models/rlg_auto.pth',
'rlg_diffuser.pth': 'https://huggingface.co/jbetker/tortoise-tts-v2/resolve/main/.models/rlg_diffuser.pth',
}
os.makedirs('.models', exist_ok=True)
def show_progress(block_num, block_size, total_size):
global pbar
if pbar is None:
pbar = progressbar.ProgressBar(maxval=total_size)
pbar.start()
downloaded = block_num * block_size
if downloaded < total_size:
pbar.update(downloaded)
else:
pbar.finish()
pbar = None
for model_name, url in MODELS.items():
if specific_models is not None and model_name not in specific_models:
continue
if os.path.exists(f'.models/{model_name}'):
continue
print(f'Downloading {model_name} from {url}...')
request.urlretrieve(url, f'.models/{model_name}', show_progress)
print('Done.')
def pad_or_truncate(t, length):
"""
Utility function for forcing <t> to have the specified sequence length, whether by clipping it or padding it with 0s.
"""
if t.shape[-1] == length:
return t
elif t.shape[-1] < length:
return F.pad(t, (0, length-t.shape[-1]))
else:
return t[..., :length]
def load_discrete_vocoder_diffuser(trained_diffusion_steps=4000, desired_diffusion_steps=200, cond_free=True, cond_free_k=1):
"""
Helper function to load a GaussianDiffusion instance configured for use as a vocoder.
"""
return SpacedDiffusion(use_timesteps=space_timesteps(trained_diffusion_steps, [desired_diffusion_steps]), model_mean_type='epsilon',
model_var_type='learned_range', loss_type='mse', betas=get_named_beta_schedule('linear', trained_diffusion_steps),
conditioning_free=cond_free, conditioning_free_k=cond_free_k)
def format_conditioning(clip, cond_length=132300):
"""
Converts the given conditioning signal to a MEL spectrogram and clips it as expected by the models.
"""
gap = clip.shape[-1] - cond_length
if gap < 0:
clip = F.pad(clip, pad=(0, abs(gap)))
elif gap > 0:
rand_start = random.randint(0, gap)
clip = clip[:, rand_start:rand_start + cond_length]
mel_clip = TorchMelSpectrogram()(clip.unsqueeze(0)).squeeze(0)
return mel_clip.unsqueeze(0).cuda()
def fix_autoregressive_output(codes, stop_token, complain=True):
"""
This function performs some padding on coded audio that fixes a mismatch issue between what the diffusion model was
trained on and what the autoregressive code generator creates (which has no padding or end).
This is highly specific to the DVAE being used, so this particular coding will not necessarily work if used with
a different DVAE. This can be inferred by feeding a audio clip padded with lots of zeros on the end through the DVAE
and copying out the last few codes.
Failing to do this padding will produce speech with a harsh end that sounds like "BLAH" or similar.
"""
# Strip off the autoregressive stop token and add padding.
stop_token_indices = (codes == stop_token).nonzero()
if len(stop_token_indices) == 0:
if complain:
print("No stop tokens found in one of the generated voice clips. This typically means the spoken audio is "
"too long. In some cases, the output will still be good, though. Listen to it and if it is missing words, "
"try breaking up your input text.")
return codes
else:
codes[stop_token_indices] = 83
stm = stop_token_indices.min().item()
codes[stm:] = 83
if stm - 3 < codes.shape[0]:
codes[-3] = 45
codes[-2] = 45
codes[-1] = 248
return codes
def do_spectrogram_diffusion(diffusion_model, diffuser, latents, conditioning_latents, temperature=1, verbose=True):
"""
Uses the specified diffusion model to convert discrete codes into a spectrogram.
"""
with torch.no_grad():
output_seq_len = latents.shape[1] * 4 * 24000 // 22050 # This diffusion model converts from 22kHz spectrogram codes to a 24kHz spectrogram signal.
output_shape = (latents.shape[0], 100, output_seq_len)
precomputed_embeddings = diffusion_model.timestep_independent(latents, conditioning_latents, output_seq_len, False)
noise = torch.randn(output_shape, device=latents.device) * temperature
mel = diffuser.p_sample_loop(diffusion_model, output_shape, noise=noise,
model_kwargs={'precomputed_aligned_embeddings': precomputed_embeddings},
progress=verbose)
return denormalize_tacotron_mel(mel)[:,:,:output_seq_len]
def classify_audio_clip(clip):
"""
Returns whether or not Tortoises' classifier thinks the given clip came from Tortoise.
:param clip: torch tensor containing audio waveform data (get it from load_audio)
:return: True if the clip was classified as coming from Tortoise and false if it was classified as real.
"""
download_models(['classifier.pth'])
classifier = AudioMiniEncoderWithClassifierHead(2, spec_dim=1, embedding_dim=512, depth=5, downsample_factor=4,
resnet_blocks=2, attn_blocks=4, num_attn_heads=4, base_channels=32,
dropout=0, kernel_size=5, distribute_zero_label=False)
classifier.load_state_dict(torch.load('.models/classifier.pth', map_location=torch.device('cpu')))
clip = clip.cpu().unsqueeze(0)
results = F.softmax(classifier(clip), dim=-1)
return results[0][0]
class TextToSpeech:
"""
Main entry point into Tortoise.
"""
def __init__(self, autoregressive_batch_size=16, models_dir='.models', enable_redaction=True):
"""
Constructor
:param autoregressive_batch_size: Specifies how many samples to generate per batch. Lower this if you are seeing
GPU OOM errors. Larger numbers generates slightly faster.
:param models_dir: Where model weights are stored. This should only be specified if you are providing your own
models, otherwise use the defaults.
:param enable_redaction: When true, text enclosed in brackets are automatically redacted from the spoken output
(but are still rendered by the model). This can be used for prompt engineering.
Default is true.
"""
self.autoregressive_batch_size = autoregressive_batch_size
self.enable_redaction = enable_redaction
if self.enable_redaction:
self.aligner = Wav2VecAlignment()
self.tokenizer = VoiceBpeTokenizer()
download_models()
if os.path.exists(f'{models_dir}/autoregressive.ptt'):
# Assume this is a traced directory.
self.autoregressive = torch.jit.load(f'{models_dir}/autoregressive.ptt')
self.diffusion = torch.jit.load(f'{models_dir}/diffusion_decoder.ptt')
else:
self.autoregressive = UnifiedVoice(max_mel_tokens=604, max_text_tokens=402, max_conditioning_inputs=2, layers=30,
model_dim=1024,
heads=16, number_text_tokens=255, start_text_token=255, checkpointing=False,
train_solo_embeddings=False).cpu().eval()
self.autoregressive.load_state_dict(torch.load(f'{models_dir}/autoregressive.pth'))
self.diffusion = DiffusionTts(model_channels=1024, num_layers=10, in_channels=100, out_channels=200,
in_latent_channels=1024, in_tokens=8193, dropout=0, use_fp16=False, num_heads=16,
layer_drop=0, unconditioned_percentage=0).cpu().eval()
self.diffusion.load_state_dict(torch.load(f'{models_dir}/diffusion_decoder.pth'))
self.clvp = CLVP(dim_text=768, dim_speech=768, dim_latent=768, num_text_tokens=256, text_enc_depth=20,
text_seq_len=350, text_heads=12,
num_speech_tokens=8192, speech_enc_depth=20, speech_heads=12, speech_seq_len=430,
use_xformers=True).cpu().eval()
self.clvp.load_state_dict(torch.load(f'{models_dir}/clvp2.pth'))
self.cvvp = CVVP(model_dim=512, transformer_heads=8, dropout=0, mel_codes=8192, conditioning_enc_depth=8, cond_mask_percentage=0,
speech_enc_depth=8, speech_mask_percentage=0, latent_multiplier=1).cpu().eval()
self.cvvp.load_state_dict(torch.load(f'{models_dir}/cvvp.pth'))
self.vocoder = UnivNetGenerator().cpu()
self.vocoder.load_state_dict(torch.load(f'{models_dir}/vocoder.pth')['model_g'])
self.vocoder.eval(inference=True)
# Random latent generators (RLGs) are loaded lazily.
self.rlg_auto = None
self.rlg_diffusion = None
def get_conditioning_latents(self, voice_samples, return_mels=False):
"""
Transforms one or more voice_samples into a tuple (autoregressive_conditioning_latent, diffusion_conditioning_latent).
These are expressive learned latents that encode aspects of the provided clips like voice, intonation, and acoustic
properties.
:param voice_samples: List of 2 or more ~10 second reference clips, which should be torch tensors containing 22.05kHz waveform data.
"""
with torch.no_grad():
voice_samples = [v.to('cuda') for v in voice_samples]
auto_conds = []
if not isinstance(voice_samples, list):
voice_samples = [voice_samples]
for vs in voice_samples:
auto_conds.append(format_conditioning(vs))
auto_conds = torch.stack(auto_conds, dim=1)
self.autoregressive = self.autoregressive.cuda()
auto_latent = self.autoregressive.get_conditioning(auto_conds)
self.autoregressive = self.autoregressive.cpu()
diffusion_conds = []
for sample in voice_samples:
# The diffuser operates at a sample rate of 24000 (except for the latent inputs)
sample = torchaudio.functional.resample(sample, 22050, 24000)
sample = pad_or_truncate(sample, 102400)
cond_mel = wav_to_univnet_mel(sample.to('cuda'), do_normalization=False)
diffusion_conds.append(cond_mel)
diffusion_conds = torch.stack(diffusion_conds, dim=1)
self.diffusion = self.diffusion.cuda()
diffusion_latent = self.diffusion.get_conditioning(diffusion_conds)
self.diffusion = self.diffusion.cpu()
if return_mels:
return auto_latent, diffusion_latent, auto_conds, diffusion_conds
else:
return auto_latent, diffusion_latent
def get_random_conditioning_latents(self):
# Lazy-load the RLG models.
if self.rlg_auto is None:
self.rlg_auto = RandomLatentConverter(1024).eval()
self.rlg_auto.load_state_dict(torch.load('.models/rlg_auto.pth', map_location=torch.device('cpu')))
self.rlg_diffusion = RandomLatentConverter(2048).eval()
self.rlg_diffusion.load_state_dict(torch.load('.models/rlg_diffuser.pth', map_location=torch.device('cpu')))
with torch.no_grad():
return self.rlg_auto(torch.tensor([0.0])), self.rlg_diffusion(torch.tensor([0.0]))
def tts_with_preset(self, text, preset='fast', **kwargs):
"""
Calls TTS with one of a set of preset generation parameters. Options:
'ultra_fast': Produces speech at a speed which belies the name of this repo. (Not really, but it's definitely fastest).
'fast': Decent quality speech at a decent inference rate. A good choice for mass inference.
'standard': Very good quality. This is generally about as good as you are going to get.
'high_quality': Use if you want the absolute best. This is not really worth the compute, though.
"""
# Use generally found best tuning knobs for generation.
kwargs.update({'temperature': .8, 'length_penalty': 1.0, 'repetition_penalty': 2.0,
'top_p': .8,
'cond_free_k': 2.0, 'diffusion_temperature': 1.0})
# Presets are defined here.
presets = {
'ultra_fast': {'num_autoregressive_samples': 16, 'diffusion_iterations': 30, 'cond_free': False},
'fast': {'num_autoregressive_samples': 96, 'diffusion_iterations': 80},
'standard': {'num_autoregressive_samples': 256, 'diffusion_iterations': 200},
'high_quality': {'num_autoregressive_samples': 256, 'diffusion_iterations': 400},
}
kwargs.update(presets[preset])
return self.tts(text, **kwargs)
def tts(self, text, voice_samples=None, conditioning_latents=None, k=1, verbose=True,
# autoregressive generation parameters follow
num_autoregressive_samples=512, temperature=.8, length_penalty=1, repetition_penalty=2.0, top_p=.8, max_mel_tokens=500,
# CLVP & CVVP parameters
clvp_cvvp_slider=.5,
# diffusion generation parameters follow
diffusion_iterations=100, cond_free=True, cond_free_k=2, diffusion_temperature=1.0,
**hf_generate_kwargs):
"""
Produces an audio clip of the given text being spoken with the given reference voice.
:param text: Text to be spoken.
:param voice_samples: List of 2 or more ~10 second reference clips which should be torch tensors containing 22.05kHz waveform data.
:param conditioning_latents: A tuple of (autoregressive_conditioning_latent, diffusion_conditioning_latent), which
can be provided in lieu of voice_samples. This is ignored unless voice_samples=None.
Conditioning latents can be retrieved via get_conditioning_latents().
:param k: The number of returned clips. The most likely (as determined by Tortoises' CLVP and CVVP models) clips are returned.
:param verbose: Whether or not to print log messages indicating the progress of creating a clip. Default=true.
~~AUTOREGRESSIVE KNOBS~~
:param num_autoregressive_samples: Number of samples taken from the autoregressive model, all of which are filtered using CLVP+CVVP.
As Tortoise is a probabilistic model, more samples means a higher probability of creating something "great".
:param temperature: The softmax temperature of the autoregressive model.
:param length_penalty: A length penalty applied to the autoregressive decoder. Higher settings causes the model to produce more terse outputs.
:param repetition_penalty: A penalty that prevents the autoregressive decoder from repeating itself during decoding. Can be used to reduce the incidence
of long silences or "uhhhhhhs", etc.
:param top_p: P value used in nucleus sampling. (0,1]. Lower values mean the decoder produces more "likely" (aka boring) outputs.
:param max_mel_tokens: Restricts the output length. (0,600] integer. Each unit is 1/20 of a second.
:param typical_sampling: Turns typical sampling on or off. This sampling mode is discussed in this paper: https://arxiv.org/abs/2202.00666
I was interested in the premise, but the results were not as good as I was hoping. This is off by default, but
could use some tuning.
:param typical_mass: The typical_mass parameter from the typical_sampling algorithm.
~~CLVP-CVVP KNOBS~~
:param clvp_cvvp_slider: Controls the influence of the CLVP and CVVP models in selecting the best output from the autoregressive model.
[0,1]. Values closer to 1 will cause Tortoise to emit clips that follow the text more. Values closer to
0 will cause Tortoise to emit clips that more closely follow the reference clip (e.g. the voice sounds more
similar).
~~DIFFUSION KNOBS~~
:param diffusion_iterations: Number of diffusion steps to perform. [0,4000]. More steps means the network has more chances to iteratively refine
the output, which should theoretically mean a higher quality output. Generally a value above 250 is not noticeably better,
however.
:param cond_free: Whether or not to perform conditioning-free diffusion. Conditioning-free diffusion performs two forward passes for
each diffusion step: one with the outputs of the autoregressive model and one with no conditioning priors. The output
of the two is blended according to the cond_free_k value below. Conditioning-free diffusion is the real deal, and
dramatically improves realism.
:param cond_free_k: Knob that determines how to balance the conditioning free signal with the conditioning-present signal. [0,inf].
As cond_free_k increases, the output becomes dominated by the conditioning-free signal.
Formula is: output=cond_present_output*(cond_free_k+1)-cond_absenct_output*cond_free_k
:param diffusion_temperature: Controls the variance of the noise fed into the diffusion model. [0,1]. Values at 0
are the "mean" prediction of the diffusion network and will sound bland and smeared.
~~OTHER STUFF~~
:param hf_generate_kwargs: The huggingface Transformers generate API is used for the autoregressive transformer.
Extra keyword args fed to this function get forwarded directly to that API. Documentation
here: https://huggingface.co/docs/transformers/internal/generation_utils
:return: Generated audio clip(s) as a torch tensor. Shape 1,S if k=1 else, (k,1,S) where S is the sample length.
Sample rate is 24kHz.
"""
text_tokens = torch.IntTensor(self.tokenizer.encode(text)).unsqueeze(0).cuda()
text_tokens = F.pad(text_tokens, (0, 1)) # This may not be necessary.
assert text_tokens.shape[-1] < 400, 'Too much text provided. Break the text up into separate segments and re-try inference.'
auto_conds = None
if voice_samples is not None:
auto_conditioning, diffusion_conditioning, auto_conds, _ = self.get_conditioning_latents(voice_samples, return_mels=True)
elif conditioning_latents is not None:
auto_conditioning, diffusion_conditioning = conditioning_latents
else:
auto_conditioning, diffusion_conditioning = self.get_random_conditioning_latents()
auto_conditioning = auto_conditioning.cuda()
diffusion_conditioning = diffusion_conditioning.cuda()
diffuser = load_discrete_vocoder_diffuser(desired_diffusion_steps=diffusion_iterations, cond_free=cond_free, cond_free_k=cond_free_k)
with torch.no_grad():
samples = []
num_batches = num_autoregressive_samples // self.autoregressive_batch_size
stop_mel_token = self.autoregressive.stop_mel_token
calm_token = 83 # This is the token for coding silence, which is fixed in place with "fix_autoregressive_output"
self.autoregressive = self.autoregressive.cuda()
if verbose:
print("Generating autoregressive samples..")
for b in tqdm(range(num_batches), disable=not verbose):
codes = self.autoregressive.inference_speech(auto_conditioning, text_tokens,
do_sample=True,
top_p=top_p,
temperature=temperature,
num_return_sequences=self.autoregressive_batch_size,
length_penalty=length_penalty,
repetition_penalty=repetition_penalty,
max_generate_length=max_mel_tokens,
**hf_generate_kwargs)
padding_needed = max_mel_tokens - codes.shape[1]
codes = F.pad(codes, (0, padding_needed), value=stop_mel_token)
samples.append(codes)
self.autoregressive = self.autoregressive.cpu()
clip_results = []
self.clvp = self.clvp.cuda()
self.cvvp = self.cvvp.cuda()
if verbose:
print("Computing best candidates using CLVP and CVVP")
for batch in tqdm(samples, disable=not verbose):
for i in range(batch.shape[0]):
batch[i] = fix_autoregressive_output(batch[i], stop_mel_token)
clvp = self.clvp(text_tokens.repeat(batch.shape[0], 1), batch, return_loss=False)
if auto_conds is not None:
cvvp_accumulator = 0
for cl in range(auto_conds.shape[1]):
cvvp_accumulator = cvvp_accumulator + self.cvvp(auto_conds[:, cl].repeat(batch.shape[0], 1, 1), batch, return_loss=False)
cvvp = cvvp_accumulator / auto_conds.shape[1]
clip_results.append(clvp * clvp_cvvp_slider + cvvp * (1-clvp_cvvp_slider))
else:
clip_results.append(clvp)
clip_results = torch.cat(clip_results, dim=0)
samples = torch.cat(samples, dim=0)
best_results = samples[torch.topk(clip_results, k=k).indices]
self.clvp = self.clvp.cpu()
self.cvvp = self.cvvp.cpu()
del samples
# The diffusion model actually wants the last hidden layer from the autoregressive model as conditioning
# inputs. Re-produce those for the top results. This could be made more efficient by storing all of these
# results, but will increase memory usage.
self.autoregressive = self.autoregressive.cuda()
best_latents = self.autoregressive(auto_conditioning.repeat(k, 1), text_tokens.repeat(k, 1),
torch.tensor([text_tokens.shape[-1]], device=text_tokens.device), best_results,
torch.tensor([best_results.shape[-1]*self.autoregressive.mel_length_compression], device=text_tokens.device),
return_latent=True, clip_inputs=False)
self.autoregressive = self.autoregressive.cpu()
del auto_conditioning
if verbose:
print("Transforming autoregressive outputs into audio..")
wav_candidates = []
self.diffusion = self.diffusion.cuda()
self.vocoder = self.vocoder.cuda()
for b in range(best_results.shape[0]):
codes = best_results[b].unsqueeze(0)
latents = best_latents[b].unsqueeze(0)
# Find the first occurrence of the "calm" token and trim the codes to that.
ctokens = 0
for k in range(codes.shape[-1]):
if codes[0, k] == calm_token:
ctokens += 1
else:
ctokens = 0
if ctokens > 8: # 8 tokens gives the diffusion model some "breathing room" to terminate speech.
latents = latents[:, :k]
break
mel = do_spectrogram_diffusion(self.diffusion, diffuser, latents, diffusion_conditioning,
temperature=diffusion_temperature, verbose=verbose)
wav = self.vocoder.inference(mel)
wav_candidates.append(wav.cpu())
self.diffusion = self.diffusion.cpu()
self.vocoder = self.vocoder.cpu()
def potentially_redact(clip, text):
if self.enable_redaction:
return self.aligner.redact(clip.squeeze(1), text).unsqueeze(1)
return clip
wav_candidates = [potentially_redact(wav_candidate, text) for wav_candidate in wav_candidates]
if len(wav_candidates) > 1:
return wav_candidates
return wav_candidates[0] | /ruth-tts-converter-0.0.2.tar.gz/ruth-tts-converter-0.0.2/src/ruth_tts_transformer/ruth_tts/api.py | 0.506591 | 0.218357 | api.py | pypi |
import torch
import torch.nn as nn
from torch.utils.checkpoint import checkpoint
from ruth_tts_transformer.ruth_tts.models.arch_util import Upsample, Downsample, normalization, zero_module, AttentionBlock
class ResBlock(nn.Module):
def __init__(
self,
channels,
dropout,
out_channels=None,
use_conv=False,
use_scale_shift_norm=False,
dims=2,
up=False,
down=False,
kernel_size=3,
do_checkpoint=True,
):
super().__init__()
self.channels = channels
self.dropout = dropout
self.out_channels = out_channels or channels
self.use_conv = use_conv
self.use_scale_shift_norm = use_scale_shift_norm
self.do_checkpoint = do_checkpoint
padding = 1 if kernel_size == 3 else 2
self.in_layers = nn.Sequential(
normalization(channels),
nn.SiLU(),
nn.Conv1d(channels, self.out_channels, kernel_size, padding=padding),
)
self.updown = up or down
if up:
self.h_upd = Upsample(channels, False, dims)
self.x_upd = Upsample(channels, False, dims)
elif down:
self.h_upd = Downsample(channels, False, dims)
self.x_upd = Downsample(channels, False, dims)
else:
self.h_upd = self.x_upd = nn.Identity()
self.out_layers = nn.Sequential(
normalization(self.out_channels),
nn.SiLU(),
nn.Dropout(p=dropout),
zero_module(
nn.Conv1d(self.out_channels, self.out_channels, kernel_size, padding=padding)
),
)
if self.out_channels == channels:
self.skip_connection = nn.Identity()
elif use_conv:
self.skip_connection = nn.Conv1d(
dims, channels, self.out_channels, kernel_size, padding=padding
)
else:
self.skip_connection = nn.Conv1d(dims, channels, self.out_channels, 1)
def forward(self, x):
if self.do_checkpoint:
return checkpoint(
self._forward, x
)
else:
return self._forward(x)
def _forward(self, x):
if self.updown:
in_rest, in_conv = self.in_layers[:-1], self.in_layers[-1]
h = in_rest(x)
h = self.h_upd(h)
x = self.x_upd(x)
h = in_conv(h)
else:
h = self.in_layers(x)
h = self.out_layers(h)
return self.skip_connection(x) + h
class AudioMiniEncoder(nn.Module):
def __init__(self,
spec_dim,
embedding_dim,
base_channels=128,
depth=2,
resnet_blocks=2,
attn_blocks=4,
num_attn_heads=4,
dropout=0,
downsample_factor=2,
kernel_size=3):
super().__init__()
self.init = nn.Sequential(
nn.Conv1d(spec_dim, base_channels, 3, padding=1)
)
ch = base_channels
res = []
self.layers = depth
for l in range(depth):
for r in range(resnet_blocks):
res.append(ResBlock(ch, dropout, do_checkpoint=False, kernel_size=kernel_size))
res.append(Downsample(ch, use_conv=True, out_channels=ch*2, factor=downsample_factor))
ch *= 2
self.res = nn.Sequential(*res)
self.final = nn.Sequential(
normalization(ch),
nn.SiLU(),
nn.Conv1d(ch, embedding_dim, 1)
)
attn = []
for a in range(attn_blocks):
attn.append(AttentionBlock(embedding_dim, num_attn_heads, do_checkpoint=False))
self.attn = nn.Sequential(*attn)
self.dim = embedding_dim
def forward(self, x):
h = self.init(x)
h = self.res(h)
h = self.final(h)
for blk in self.attn:
h = checkpoint(blk, h)
return h[:, :, 0]
class AudioMiniEncoderWithClassifierHead(nn.Module):
def __init__(self, classes, distribute_zero_label=True, **kwargs):
super().__init__()
self.enc = AudioMiniEncoder(**kwargs)
self.head = nn.Linear(self.enc.dim, classes)
self.num_classes = classes
self.distribute_zero_label = distribute_zero_label
def forward(self, x, labels=None):
h = self.enc(x)
logits = self.head(h)
if labels is None:
return logits
else:
if self.distribute_zero_label:
oh_labels = nn.functional.one_hot(labels, num_classes=self.num_classes)
zeros_indices = (labels == 0).unsqueeze(-1)
# Distribute 20% of the probability mass on all classes when zero is specified, to compensate for dataset noise.
zero_extra_mass = torch.full_like(oh_labels, dtype=torch.float, fill_value=.2/(self.num_classes-1))
zero_extra_mass[:, 0] = -.2
zero_extra_mass = zero_extra_mass * zeros_indices
oh_labels = oh_labels + zero_extra_mass
else:
oh_labels = labels
loss = nn.functional.cross_entropy(logits, oh_labels)
return loss | /ruth-tts-converter-0.0.2.tar.gz/ruth-tts-converter-0.0.2/src/ruth_tts_transformer/ruth_tts/models/classifier.py | 0.953008 | 0.292523 | classifier.py | pypi |
import math
import random
from abc import abstractmethod
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import autocast
from ruth_tts_transformer.ruth_tts.models.arch_util import normalization, AttentionBlock
def is_latent(t):
return t.dtype == torch.float
def is_sequence(t):
return t.dtype == torch.long
def timestep_embedding(timesteps, dim, max_period=10000):
"""
Create sinusoidal timestep embeddings.
:param timesteps: a 1-D Tensor of N indices, one per batch element.
These may be fractional.
:param dim: the dimension of the output.
:param max_period: controls the minimum frequency of the embeddings.
:return: an [N x dim] Tensor of positional embeddings.
"""
half = dim // 2
freqs = torch.exp(
-math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half
).to(device=timesteps.device)
args = timesteps[:, None].float() * freqs[None]
embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)
if dim % 2:
embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1)
return embedding
class TimestepBlock(nn.Module):
@abstractmethod
def forward(self, x, emb):
"""
Apply the module to `x` given `emb` timestep embeddings.
"""
class TimestepEmbedSequential(nn.Sequential, TimestepBlock):
def forward(self, x, emb):
for layer in self:
if isinstance(layer, TimestepBlock):
x = layer(x, emb)
else:
x = layer(x)
return x
class ResBlock(TimestepBlock):
def __init__(
self,
channels,
emb_channels,
dropout,
out_channels=None,
dims=2,
kernel_size=3,
efficient_config=True,
use_scale_shift_norm=False,
):
super().__init__()
self.channels = channels
self.emb_channels = emb_channels
self.dropout = dropout
self.out_channels = out_channels or channels
self.use_scale_shift_norm = use_scale_shift_norm
padding = {1: 0, 3: 1, 5: 2}[kernel_size]
eff_kernel = 1 if efficient_config else 3
eff_padding = 0 if efficient_config else 1
self.in_layers = nn.Sequential(
normalization(channels),
nn.SiLU(),
nn.Conv1d(channels, self.out_channels, eff_kernel, padding=eff_padding),
)
self.emb_layers = nn.Sequential(
nn.SiLU(),
nn.Linear(
emb_channels,
2 * self.out_channels if use_scale_shift_norm else self.out_channels,
),
)
self.out_layers = nn.Sequential(
normalization(self.out_channels),
nn.SiLU(),
nn.Dropout(p=dropout),
nn.Conv1d(self.out_channels, self.out_channels, kernel_size, padding=padding),
)
if self.out_channels == channels:
self.skip_connection = nn.Identity()
else:
self.skip_connection = nn.Conv1d(channels, self.out_channels, eff_kernel, padding=eff_padding)
def forward(self, x, emb):
h = self.in_layers(x)
emb_out = self.emb_layers(emb).type(h.dtype)
while len(emb_out.shape) < len(h.shape):
emb_out = emb_out[..., None]
if self.use_scale_shift_norm:
out_norm, out_rest = self.out_layers[0], self.out_layers[1:]
scale, shift = torch.chunk(emb_out, 2, dim=1)
h = out_norm(h) * (1 + scale) + shift
h = out_rest(h)
else:
h = h + emb_out
h = self.out_layers(h)
return self.skip_connection(x) + h
class DiffusionLayer(TimestepBlock):
def __init__(self, model_channels, dropout, num_heads):
super().__init__()
self.resblk = ResBlock(model_channels, model_channels, dropout, model_channels, dims=1, use_scale_shift_norm=True)
self.attn = AttentionBlock(model_channels, num_heads, relative_pos_embeddings=True)
def forward(self, x, time_emb):
y = self.resblk(x, time_emb)
return self.attn(y)
class DiffusionTts(nn.Module):
def __init__(
self,
model_channels=512,
num_layers=8,
in_channels=100,
in_latent_channels=512,
in_tokens=8193,
out_channels=200, # mean and variance
dropout=0,
use_fp16=False,
num_heads=16,
# Parameters for regularization.
layer_drop=.1,
unconditioned_percentage=.1, # This implements a mechanism similar to what is used in classifier-free training.
):
super().__init__()
self.in_channels = in_channels
self.model_channels = model_channels
self.out_channels = out_channels
self.dropout = dropout
self.num_heads = num_heads
self.unconditioned_percentage = unconditioned_percentage
self.enable_fp16 = use_fp16
self.layer_drop = layer_drop
self.inp_block = nn.Conv1d(in_channels, model_channels, 3, 1, 1)
self.time_embed = nn.Sequential(
nn.Linear(model_channels, model_channels),
nn.SiLU(),
nn.Linear(model_channels, model_channels),
)
# Either code_converter or latent_converter is used, depending on what type of conditioning data is fed.
# This model is meant to be able to be trained on both for efficiency purposes - it is far less computationally
# complex to generate tokens, while generating latents will normally mean propagating through a deep autoregressive
# transformer network.
self.code_embedding = nn.Embedding(in_tokens, model_channels)
self.code_converter = nn.Sequential(
AttentionBlock(model_channels, num_heads, relative_pos_embeddings=True),
AttentionBlock(model_channels, num_heads, relative_pos_embeddings=True),
AttentionBlock(model_channels, num_heads, relative_pos_embeddings=True),
)
self.code_norm = normalization(model_channels)
self.latent_conditioner = nn.Sequential(
nn.Conv1d(in_latent_channels, model_channels, 3, padding=1),
AttentionBlock(model_channels, num_heads, relative_pos_embeddings=True),
AttentionBlock(model_channels, num_heads, relative_pos_embeddings=True),
AttentionBlock(model_channels, num_heads, relative_pos_embeddings=True),
AttentionBlock(model_channels, num_heads, relative_pos_embeddings=True),
)
self.contextual_embedder = nn.Sequential(nn.Conv1d(in_channels,model_channels,3,padding=1,stride=2),
nn.Conv1d(model_channels, model_channels*2,3,padding=1,stride=2),
AttentionBlock(model_channels*2, num_heads, relative_pos_embeddings=True, do_checkpoint=False),
AttentionBlock(model_channels*2, num_heads, relative_pos_embeddings=True, do_checkpoint=False),
AttentionBlock(model_channels*2, num_heads, relative_pos_embeddings=True, do_checkpoint=False),
AttentionBlock(model_channels*2, num_heads, relative_pos_embeddings=True, do_checkpoint=False),
AttentionBlock(model_channels*2, num_heads, relative_pos_embeddings=True, do_checkpoint=False))
self.unconditioned_embedding = nn.Parameter(torch.randn(1,model_channels,1))
self.conditioning_timestep_integrator = TimestepEmbedSequential(
DiffusionLayer(model_channels, dropout, num_heads),
DiffusionLayer(model_channels, dropout, num_heads),
DiffusionLayer(model_channels, dropout, num_heads),
)
self.integrating_conv = nn.Conv1d(model_channels*2, model_channels, kernel_size=1)
self.mel_head = nn.Conv1d(model_channels, in_channels, kernel_size=3, padding=1)
self.layers = nn.ModuleList([DiffusionLayer(model_channels, dropout, num_heads) for _ in range(num_layers)] +
[ResBlock(model_channels, model_channels, dropout, dims=1, use_scale_shift_norm=True) for _ in range(3)])
self.out = nn.Sequential(
normalization(model_channels),
nn.SiLU(),
nn.Conv1d(model_channels, out_channels, 3, padding=1),
)
def get_grad_norm_parameter_groups(self):
groups = {
'minicoder': list(self.contextual_embedder.parameters()),
'layers': list(self.layers.parameters()),
'code_converters': list(self.code_embedding.parameters()) + list(self.code_converter.parameters()) + list(self.latent_conditioner.parameters()) + list(self.latent_conditioner.parameters()),
'timestep_integrator': list(self.conditioning_timestep_integrator.parameters()) + list(self.integrating_conv.parameters()),
'time_embed': list(self.time_embed.parameters()),
}
return groups
def get_conditioning(self, conditioning_input):
speech_conditioning_input = conditioning_input.unsqueeze(1) if len(
conditioning_input.shape) == 3 else conditioning_input
conds = []
for j in range(speech_conditioning_input.shape[1]):
conds.append(self.contextual_embedder(speech_conditioning_input[:, j]))
conds = torch.cat(conds, dim=-1)
conds = conds.mean(dim=-1)
return conds
def timestep_independent(self, aligned_conditioning, conditioning_latent, expected_seq_len, return_code_pred):
# Shuffle aligned_latent to BxCxS format
if is_latent(aligned_conditioning):
aligned_conditioning = aligned_conditioning.permute(0, 2, 1)
cond_scale, cond_shift = torch.chunk(conditioning_latent, 2, dim=1)
if is_latent(aligned_conditioning):
code_emb = self.latent_conditioner(aligned_conditioning)
else:
code_emb = self.code_embedding(aligned_conditioning).permute(0, 2, 1)
code_emb = self.code_converter(code_emb)
code_emb = self.code_norm(code_emb) * (1 + cond_scale.unsqueeze(-1)) + cond_shift.unsqueeze(-1)
unconditioned_batches = torch.zeros((code_emb.shape[0], 1, 1), device=code_emb.device)
# Mask out the conditioning branch for whole batch elements, implementing something similar to classifier-free guidance.
if self.training and self.unconditioned_percentage > 0:
unconditioned_batches = torch.rand((code_emb.shape[0], 1, 1),
device=code_emb.device) < self.unconditioned_percentage
code_emb = torch.where(unconditioned_batches, self.unconditioned_embedding.repeat(aligned_conditioning.shape[0], 1, 1),
code_emb)
expanded_code_emb = F.interpolate(code_emb, size=expected_seq_len, mode='nearest')
if not return_code_pred:
return expanded_code_emb
else:
mel_pred = self.mel_head(expanded_code_emb)
# Multiply mel_pred by !unconditioned_branches, which drops the gradient on unconditioned branches. This is because we don't want that gradient being used to train parameters through the codes_embedder as it unbalances contributions to that network from the MSE loss.
mel_pred = mel_pred * unconditioned_batches.logical_not()
return expanded_code_emb, mel_pred
def forward(self, x, timesteps, aligned_conditioning=None, conditioning_latent=None, precomputed_aligned_embeddings=None, conditioning_free=False, return_code_pred=False):
"""
Apply the model to an input batch.
:param x: an [N x C x ...] Tensor of inputs.
:param timesteps: a 1-D batch of timesteps.
:param aligned_conditioning: an aligned latent or sequence of tokens providing useful data about the sample to be produced.
:param conditioning_latent: a pre-computed conditioning latent; see get_conditioning().
:param precomputed_aligned_embeddings: Embeddings returned from self.timestep_independent()
:param conditioning_free: When set, all conditioning inputs (including tokens and conditioning_input) will not be considered.
:return: an [N x C x ...] Tensor of outputs.
"""
assert precomputed_aligned_embeddings is not None or (aligned_conditioning is not None and conditioning_latent is not None)
assert not (return_code_pred and precomputed_aligned_embeddings is not None) # These two are mutually exclusive.
unused_params = []
if conditioning_free:
code_emb = self.unconditioned_embedding.repeat(x.shape[0], 1, x.shape[-1])
unused_params.extend(list(self.code_converter.parameters()) + list(self.code_embedding.parameters()))
unused_params.extend(list(self.latent_conditioner.parameters()))
else:
if precomputed_aligned_embeddings is not None:
code_emb = precomputed_aligned_embeddings
else:
code_emb, mel_pred = self.timestep_independent(aligned_conditioning, conditioning_latent, x.shape[-1], True)
if is_latent(aligned_conditioning):
unused_params.extend(list(self.code_converter.parameters()) + list(self.code_embedding.parameters()))
else:
unused_params.extend(list(self.latent_conditioner.parameters()))
unused_params.append(self.unconditioned_embedding)
time_emb = self.time_embed(timestep_embedding(timesteps, self.model_channels))
code_emb = self.conditioning_timestep_integrator(code_emb, time_emb)
x = self.inp_block(x)
x = torch.cat([x, code_emb], dim=1)
x = self.integrating_conv(x)
for i, lyr in enumerate(self.layers):
# Do layer drop where applicable. Do not drop first and last layers.
if self.training and self.layer_drop > 0 and i != 0 and i != (len(self.layers)-1) and random.random() < self.layer_drop:
unused_params.extend(list(lyr.parameters()))
else:
# First and last blocks will have autocast disabled for improved precision.
with autocast(x.device.type, enabled=self.enable_fp16 and i != 0):
x = lyr(x, time_emb)
x = x.float()
out = self.out(x)
# Involve probabilistic or possibly unused parameters in loss so we don't get DDP errors.
extraneous_addition = 0
for p in unused_params:
extraneous_addition = extraneous_addition + p.mean()
out = out + extraneous_addition * 0
if return_code_pred:
return out, mel_pred
return out
if __name__ == '__main__':
clip = torch.randn(2, 100, 400)
aligned_latent = torch.randn(2,388,512)
aligned_sequence = torch.randint(0,8192,(2,100))
cond = torch.randn(2, 100, 400)
ts = torch.LongTensor([600, 600])
model = DiffusionTts(512, layer_drop=.3, unconditioned_percentage=.5)
# Test with latent aligned conditioning
#o = model(clip, ts, aligned_latent, cond)
# Test with sequence aligned conditioning
o = model(clip, ts, aligned_sequence, cond) | /ruth-tts-converter-0.0.2.tar.gz/ruth-tts-converter-0.0.2/src/ruth_tts_transformer/ruth_tts/models/diffusion_decoder.py | 0.93441 | 0.634628 | diffusion_decoder.py | pypi |
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import einsum
from torch.utils.checkpoint import checkpoint
from ruth_tts_transformer.ruth_tts.models.arch_util import AttentionBlock
from ruth_tts_transformer.ruth_tts.models.xtransformers import ContinuousTransformerWrapper, Encoder
def exists(val):
return val is not None
def masked_mean(t, mask):
t = t.masked_fill(~mask, 0.)
return t.sum(dim = 1) / mask.sum(dim = 1)
class CollapsingTransformer(nn.Module):
def __init__(self, model_dim, output_dims, heads, dropout, depth, mask_percentage=0, **encoder_kwargs):
super().__init__()
self.transformer = ContinuousTransformerWrapper(
max_seq_len=-1,
use_pos_emb=False,
attn_layers=Encoder(
dim=model_dim,
depth=depth,
heads=heads,
ff_dropout=dropout,
ff_mult=1,
attn_dropout=dropout,
use_rmsnorm=True,
ff_glu=True,
rotary_pos_emb=True,
**encoder_kwargs,
))
self.pre_combiner = nn.Sequential(nn.Conv1d(model_dim, output_dims, 1),
AttentionBlock(output_dims, num_heads=heads, do_checkpoint=False),
nn.Conv1d(output_dims, output_dims, 1))
self.mask_percentage = mask_percentage
def forward(self, x, **transformer_kwargs):
h = self.transformer(x, **transformer_kwargs)
h = h.permute(0,2,1)
h = checkpoint(self.pre_combiner, h).permute(0,2,1)
if self.training:
mask = torch.rand_like(h.float()) > self.mask_percentage
else:
mask = torch.ones_like(h.float()).bool()
return masked_mean(h, mask)
class ConvFormatEmbedding(nn.Module):
def __init__(self, *args, **kwargs):
super().__init__()
self.emb = nn.Embedding(*args, **kwargs)
def forward(self, x):
y = self.emb(x)
return y.permute(0,2,1)
class CVVP(nn.Module):
def __init__(
self,
model_dim=512,
transformer_heads=8,
dropout=.1,
conditioning_enc_depth=8,
cond_mask_percentage=0,
mel_channels=80,
mel_codes=None,
speech_enc_depth=8,
speech_mask_percentage=0,
latent_multiplier=1,
):
super().__init__()
latent_dim = latent_multiplier*model_dim
self.temperature = nn.Parameter(torch.tensor(1.))
self.cond_emb = nn.Sequential(nn.Conv1d(mel_channels, model_dim//2, kernel_size=5, stride=2, padding=2),
nn.Conv1d(model_dim//2, model_dim, kernel_size=3, stride=2, padding=1))
self.conditioning_transformer = CollapsingTransformer(model_dim, model_dim, transformer_heads, dropout, conditioning_enc_depth, cond_mask_percentage)
self.to_conditioning_latent = nn.Linear(latent_dim, latent_dim, bias=False)
if mel_codes is None:
self.speech_emb = nn.Conv1d(mel_channels, model_dim, kernel_size=5, padding=2)
else:
self.speech_emb = ConvFormatEmbedding(mel_codes, model_dim)
self.speech_transformer = CollapsingTransformer(model_dim, latent_dim, transformer_heads, dropout, speech_enc_depth, speech_mask_percentage)
self.to_speech_latent = nn.Linear(latent_dim, latent_dim, bias=False)
def get_grad_norm_parameter_groups(self):
return {
'conditioning': list(self.conditioning_transformer.parameters()),
'speech': list(self.speech_transformer.parameters()),
}
def forward(
self,
mel_cond,
mel_input,
return_loss=False
):
cond_emb = self.cond_emb(mel_cond).permute(0,2,1)
enc_cond = self.conditioning_transformer(cond_emb)
cond_latents = self.to_conditioning_latent(enc_cond)
speech_emb = self.speech_emb(mel_input).permute(0,2,1)
enc_speech = self.speech_transformer(speech_emb)
speech_latents = self.to_speech_latent(enc_speech)
cond_latents, speech_latents = map(lambda t: F.normalize(t, p=2, dim=-1), (cond_latents, speech_latents))
temp = self.temperature.exp()
if not return_loss:
sim = einsum('n d, n d -> n', cond_latents, speech_latents) * temp
return sim
sim = einsum('i d, j d -> i j', cond_latents, speech_latents) * temp
labels = torch.arange(cond_latents.shape[0], device=mel_input.device)
loss = (F.cross_entropy(sim, labels) + F.cross_entropy(sim.t(), labels)) / 2
return loss
if __name__ == '__main__':
clvp = CVVP()
clvp(torch.randn(2,80,100),
torch.randn(2,80,95),
return_loss=True) | /ruth-tts-converter-0.0.2.tar.gz/ruth-tts-converter-0.0.2/src/ruth_tts_transformer/ruth_tts/models/cvvp.py | 0.938166 | 0.377311 | cvvp.py | pypi |
import torch
import torch.nn as nn
import torch.nn.functional as F
MAX_WAV_VALUE = 32768.0
class KernelPredictor(torch.nn.Module):
''' Kernel predictor for the location-variable convolutions'''
def __init__(
self,
cond_channels,
conv_in_channels,
conv_out_channels,
conv_layers,
conv_kernel_size=3,
kpnet_hidden_channels=64,
kpnet_conv_size=3,
kpnet_dropout=0.0,
kpnet_nonlinear_activation="LeakyReLU",
kpnet_nonlinear_activation_params={"negative_slope": 0.1},
):
'''
Args:
cond_channels (int): number of channel for the conditioning sequence,
conv_in_channels (int): number of channel for the input sequence,
conv_out_channels (int): number of channel for the output sequence,
conv_layers (int): number of layers
'''
super().__init__()
self.conv_in_channels = conv_in_channels
self.conv_out_channels = conv_out_channels
self.conv_kernel_size = conv_kernel_size
self.conv_layers = conv_layers
kpnet_kernel_channels = conv_in_channels * conv_out_channels * conv_kernel_size * conv_layers # l_w
kpnet_bias_channels = conv_out_channels * conv_layers # l_b
self.input_conv = nn.Sequential(
nn.utils.weight_norm(nn.Conv1d(cond_channels, kpnet_hidden_channels, 5, padding=2, bias=True)),
getattr(nn, kpnet_nonlinear_activation)(**kpnet_nonlinear_activation_params),
)
self.residual_convs = nn.ModuleList()
padding = (kpnet_conv_size - 1) // 2
for _ in range(3):
self.residual_convs.append(
nn.Sequential(
nn.Dropout(kpnet_dropout),
nn.utils.weight_norm(
nn.Conv1d(kpnet_hidden_channels, kpnet_hidden_channels, kpnet_conv_size, padding=padding,
bias=True)),
getattr(nn, kpnet_nonlinear_activation)(**kpnet_nonlinear_activation_params),
nn.utils.weight_norm(
nn.Conv1d(kpnet_hidden_channels, kpnet_hidden_channels, kpnet_conv_size, padding=padding,
bias=True)),
getattr(nn, kpnet_nonlinear_activation)(**kpnet_nonlinear_activation_params),
)
)
self.kernel_conv = nn.utils.weight_norm(
nn.Conv1d(kpnet_hidden_channels, kpnet_kernel_channels, kpnet_conv_size, padding=padding, bias=True))
self.bias_conv = nn.utils.weight_norm(
nn.Conv1d(kpnet_hidden_channels, kpnet_bias_channels, kpnet_conv_size, padding=padding, bias=True))
def forward(self, c):
'''
Args:
c (Tensor): the conditioning sequence (batch, cond_channels, cond_length)
'''
batch, _, cond_length = c.shape
c = self.input_conv(c)
for residual_conv in self.residual_convs:
residual_conv.to(c.device)
c = c + residual_conv(c)
k = self.kernel_conv(c)
b = self.bias_conv(c)
kernels = k.contiguous().view(
batch,
self.conv_layers,
self.conv_in_channels,
self.conv_out_channels,
self.conv_kernel_size,
cond_length,
)
bias = b.contiguous().view(
batch,
self.conv_layers,
self.conv_out_channels,
cond_length,
)
return kernels, bias
def remove_weight_norm(self):
nn.utils.remove_weight_norm(self.input_conv[0])
nn.utils.remove_weight_norm(self.kernel_conv)
nn.utils.remove_weight_norm(self.bias_conv)
for block in self.residual_convs:
nn.utils.remove_weight_norm(block[1])
nn.utils.remove_weight_norm(block[3])
class LVCBlock(torch.nn.Module):
'''the location-variable convolutions'''
def __init__(
self,
in_channels,
cond_channels,
stride,
dilations=[1, 3, 9, 27],
lReLU_slope=0.2,
conv_kernel_size=3,
cond_hop_length=256,
kpnet_hidden_channels=64,
kpnet_conv_size=3,
kpnet_dropout=0.0,
):
super().__init__()
self.cond_hop_length = cond_hop_length
self.conv_layers = len(dilations)
self.conv_kernel_size = conv_kernel_size
self.kernel_predictor = KernelPredictor(
cond_channels=cond_channels,
conv_in_channels=in_channels,
conv_out_channels=2 * in_channels,
conv_layers=len(dilations),
conv_kernel_size=conv_kernel_size,
kpnet_hidden_channels=kpnet_hidden_channels,
kpnet_conv_size=kpnet_conv_size,
kpnet_dropout=kpnet_dropout,
kpnet_nonlinear_activation_params={"negative_slope": lReLU_slope}
)
self.convt_pre = nn.Sequential(
nn.LeakyReLU(lReLU_slope),
nn.utils.weight_norm(nn.ConvTranspose1d(in_channels, in_channels, 2 * stride, stride=stride,
padding=stride // 2 + stride % 2, output_padding=stride % 2)),
)
self.conv_blocks = nn.ModuleList()
for dilation in dilations:
self.conv_blocks.append(
nn.Sequential(
nn.LeakyReLU(lReLU_slope),
nn.utils.weight_norm(nn.Conv1d(in_channels, in_channels, conv_kernel_size,
padding=dilation * (conv_kernel_size - 1) // 2, dilation=dilation)),
nn.LeakyReLU(lReLU_slope),
)
)
def forward(self, x, c):
''' forward propagation of the location-variable convolutions.
Args:
x (Tensor): the input sequence (batch, in_channels, in_length)
c (Tensor): the conditioning sequence (batch, cond_channels, cond_length)
Returns:
Tensor: the output sequence (batch, in_channels, in_length)
'''
_, in_channels, _ = x.shape # (B, c_g, L')
x = self.convt_pre(x) # (B, c_g, stride * L')
kernels, bias = self.kernel_predictor(c)
for i, conv in enumerate(self.conv_blocks):
output = conv(x) # (B, c_g, stride * L')
k = kernels[:, i, :, :, :, :] # (B, 2 * c_g, c_g, kernel_size, cond_length)
b = bias[:, i, :, :] # (B, 2 * c_g, cond_length)
output = self.location_variable_convolution(output, k, b,
hop_size=self.cond_hop_length) # (B, 2 * c_g, stride * L'): LVC
x = x + torch.sigmoid(output[:, :in_channels, :]) * torch.tanh(
output[:, in_channels:, :]) # (B, c_g, stride * L'): GAU
return x
def location_variable_convolution(self, x, kernel, bias, dilation=1, hop_size=256):
''' perform location-variable convolution operation on the input sequence (x) using the local convolution kernl.
Time: 414 μs ± 309 ns per loop (mean ± std. dev. of 7 runs, 1000 loops each), test on NVIDIA V100.
Args:
x (Tensor): the input sequence (batch, in_channels, in_length).
kernel (Tensor): the local convolution kernel (batch, in_channel, out_channels, kernel_size, kernel_length)
bias (Tensor): the bias for the local convolution (batch, out_channels, kernel_length)
dilation (int): the dilation of convolution.
hop_size (int): the hop_size of the conditioning sequence.
Returns:
(Tensor): the output sequence after performing local convolution. (batch, out_channels, in_length).
'''
batch, _, in_length = x.shape
batch, _, out_channels, kernel_size, kernel_length = kernel.shape
assert in_length == (kernel_length * hop_size), "length of (x, kernel) is not matched"
padding = dilation * int((kernel_size - 1) / 2)
x = F.pad(x, (padding, padding), 'constant', 0) # (batch, in_channels, in_length + 2*padding)
x = x.unfold(2, hop_size + 2 * padding, hop_size) # (batch, in_channels, kernel_length, hop_size + 2*padding)
if hop_size < dilation:
x = F.pad(x, (0, dilation), 'constant', 0)
x = x.unfold(3, dilation,
dilation) # (batch, in_channels, kernel_length, (hop_size + 2*padding)/dilation, dilation)
x = x[:, :, :, :, :hop_size]
x = x.transpose(3, 4) # (batch, in_channels, kernel_length, dilation, (hop_size + 2*padding)/dilation)
x = x.unfold(4, kernel_size, 1) # (batch, in_channels, kernel_length, dilation, _, kernel_size)
o = torch.einsum('bildsk,biokl->bolsd', x, kernel)
o = o.to(memory_format=torch.channels_last_3d)
bias = bias.unsqueeze(-1).unsqueeze(-1).to(memory_format=torch.channels_last_3d)
o = o + bias
o = o.contiguous().view(batch, out_channels, -1)
return o
def remove_weight_norm(self):
self.kernel_predictor.remove_weight_norm()
nn.utils.remove_weight_norm(self.convt_pre[1])
for block in self.conv_blocks:
nn.utils.remove_weight_norm(block[1])
class UnivNetGenerator(nn.Module):
"""UnivNet Generator"""
def __init__(self, noise_dim=64, channel_size=32, dilations=[1,3,9,27], strides=[8,8,4], lReLU_slope=.2, kpnet_conv_size=3,
# Below are MEL configurations options that this generator requires.
hop_length=256, n_mel_channels=100):
super(UnivNetGenerator, self).__init__()
self.mel_channel = n_mel_channels
self.noise_dim = noise_dim
self.hop_length = hop_length
channel_size = channel_size
kpnet_conv_size = kpnet_conv_size
self.res_stack = nn.ModuleList()
hop_length = 1
for stride in strides:
hop_length = stride * hop_length
self.res_stack.append(
LVCBlock(
channel_size,
n_mel_channels,
stride=stride,
dilations=dilations,
lReLU_slope=lReLU_slope,
cond_hop_length=hop_length,
kpnet_conv_size=kpnet_conv_size
)
)
self.conv_pre = \
nn.utils.weight_norm(nn.Conv1d(noise_dim, channel_size, 7, padding=3, padding_mode='reflect'))
self.conv_post = nn.Sequential(
nn.LeakyReLU(lReLU_slope),
nn.utils.weight_norm(nn.Conv1d(channel_size, 1, 7, padding=3, padding_mode='reflect')),
nn.Tanh(),
)
def forward(self, c, z):
'''
Args:
c (Tensor): the conditioning sequence of mel-spectrogram (batch, mel_channels, in_length)
z (Tensor): the noise sequence (batch, noise_dim, in_length)
'''
z = self.conv_pre(z) # (B, c_g, L)
for res_block in self.res_stack:
res_block.to(z.device)
z = res_block(z, c) # (B, c_g, L * s_0 * ... * s_i)
z = self.conv_post(z) # (B, 1, L * 256)
return z
def eval(self, inference=False):
super(UnivNetGenerator, self).eval()
# don't remove weight norm while validation in training loop
if inference:
self.remove_weight_norm()
def remove_weight_norm(self):
nn.utils.remove_weight_norm(self.conv_pre)
for layer in self.conv_post:
if len(layer.state_dict()) != 0:
nn.utils.remove_weight_norm(layer)
for res_block in self.res_stack:
res_block.remove_weight_norm()
def inference(self, c, z=None):
# pad input mel with zeros to cut artifact
# see https://github.com/seungwonpark/melgan/issues/8
zero = torch.full((c.shape[0], self.mel_channel, 10), -11.5129).to(c.device)
mel = torch.cat((c, zero), dim=2)
if z is None:
z = torch.randn(c.shape[0], self.noise_dim, mel.size(2)).to(mel.device)
audio = self.forward(mel, z)
audio = audio[:, :, :-(self.hop_length * 10)]
audio = audio.clamp(min=-1, max=1)
return audio
if __name__ == '__main__':
model = UnivNetGenerator()
c = torch.randn(3, 100, 10)
z = torch.randn(3, 64, 10)
print(c.shape)
y = model(c, z)
print(y.shape)
assert y.shape == torch.Size([3, 1, 2560])
pytorch_total_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
print(pytorch_total_params) | /ruth-tts-converter-0.0.2.tar.gz/ruth-tts-converter-0.0.2/src/ruth_tts_transformer/ruth_tts/models/vocoder.py | 0.954774 | 0.386185 | vocoder.py | pypi |
from functools import partial
import torch
import torch.nn.functional as F
from einops import rearrange
from rotary_embedding_torch import RotaryEmbedding, broadcat
from torch import nn
# helpers
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def cast_tuple(val, depth = 1):
if isinstance(val, list):
val = tuple(val)
return val if isinstance(val, tuple) else (val,) * depth
def max_neg_value(t):
return -torch.finfo(t.dtype).max
def stable_softmax(t, dim = -1, alpha = 32 ** 2):
t = t / alpha
t = t - torch.amax(t, dim = dim, keepdim = True).detach()
return (t * alpha).softmax(dim = dim)
def route_args(router, args, depth):
routed_args = [(dict(), dict()) for _ in range(depth)]
matched_keys = [key for key in args.keys() if key in router]
for key in matched_keys:
val = args[key]
for depth, ((f_args, g_args), routes) in enumerate(zip(routed_args, router[key])):
new_f_args, new_g_args = map(lambda route: ({key: val} if route else {}), routes)
routed_args[depth] = ({**f_args, **new_f_args}, {**g_args, **new_g_args})
return routed_args
# classes
class SequentialSequence(nn.Module):
def __init__(self, layers, args_route = {}, layer_dropout = 0.):
super().__init__()
assert all(len(route) == len(layers) for route in args_route.values()), 'each argument route map must have the same depth as the number of sequential layers'
self.layers = layers
self.args_route = args_route
self.layer_dropout = layer_dropout
def forward(self, x, **kwargs):
args = route_args(self.args_route, kwargs, len(self.layers))
layers_and_args = list(zip(self.layers, args))
for (f, g), (f_args, g_args) in layers_and_args:
x = x + f(x, **f_args)
x = x + g(x, **g_args)
return x
class DivideMax(nn.Module):
def __init__(self, dim):
super().__init__()
self.dim = dim
def forward(self, x):
maxes = x.amax(dim = self.dim, keepdim = True).detach()
return x / maxes
# https://arxiv.org/abs/2103.17239
class LayerScale(nn.Module):
def __init__(self, dim, depth, fn):
super().__init__()
if depth <= 18:
init_eps = 0.1
elif depth > 18 and depth <= 24:
init_eps = 1e-5
else:
init_eps = 1e-6
scale = torch.zeros(1, 1, dim).fill_(init_eps)
self.scale = nn.Parameter(scale)
self.fn = fn
def forward(self, x, **kwargs):
return self.fn(x, **kwargs) * self.scale
# layer norm
class PreNorm(nn.Module):
def __init__(self, dim, fn, sandwich = False):
super().__init__()
self.norm = nn.LayerNorm(dim)
self.norm_out = nn.LayerNorm(dim) if sandwich else nn.Identity()
self.fn = fn
def forward(self, x, **kwargs):
x = self.norm(x)
x = self.fn(x, **kwargs)
return self.norm_out(x)
# feed forward
class GEGLU(nn.Module):
def forward(self, x):
x, gates = x.chunk(2, dim = -1)
return x * F.gelu(gates)
class FeedForward(nn.Module):
def __init__(self, dim, dropout = 0., mult = 4.):
super().__init__()
self.net = nn.Sequential(
nn.Linear(dim, dim * mult * 2),
GEGLU(),
nn.Dropout(dropout),
nn.Linear(dim * mult, dim)
)
def forward(self, x):
return self.net(x)
# Attention
class Attention(nn.Module):
def __init__(self, dim, seq_len, causal = True, heads = 8, dim_head = 64, dropout = 0.):
super().__init__()
inner_dim = dim_head * heads
self.heads = heads
self.seq_len = seq_len
self.scale = dim_head ** -0.5
self.causal = causal
self.to_qkv = nn.Linear(dim, inner_dim * 3, bias = False)
self.to_out = nn.Sequential(
nn.Linear(inner_dim, dim),
nn.Dropout(dropout)
)
def forward(self, x, mask = None):
b, n, _, h, device = *x.shape, self.heads, x.device
softmax = torch.softmax
qkv = self.to_qkv(x).chunk(3, dim = -1)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = h), qkv)
q = q * self.scale
dots = torch.einsum('b h i d, b h j d -> b h i j', q, k)
mask_value = max_neg_value(dots)
if exists(mask):
mask = rearrange(mask, 'b j -> b () () j')
dots.masked_fill_(~mask, mask_value)
del mask
if self.causal:
i, j = dots.shape[-2:]
mask = torch.ones(i, j, device = device).triu_(j - i + 1).bool()
dots.masked_fill_(mask, mask_value)
attn = softmax(dots, dim=-1)
out = torch.einsum('b h i j, b h j d -> b h i d', attn, v)
out = rearrange(out, 'b h n d -> b n (h d)')
out = self.to_out(out)
return out
# main transformer class
class Transformer(nn.Module):
def __init__(
self,
*,
dim,
depth,
seq_len,
causal = True,
heads = 8,
dim_head = 64,
ff_mult = 4,
attn_dropout = 0.,
ff_dropout = 0.,
sparse_attn = False,
sandwich_norm = False,
):
super().__init__()
layers = nn.ModuleList([])
sparse_layer = cast_tuple(sparse_attn, depth)
for ind, sparse_attn in zip(range(depth), sparse_layer):
attn = Attention(dim, causal = causal, seq_len = seq_len, heads = heads, dim_head = dim_head, dropout = attn_dropout)
ff = FeedForward(dim, mult = ff_mult, dropout = ff_dropout)
layers.append(nn.ModuleList([
LayerScale(dim, ind + 1, PreNorm(dim, attn, sandwich = sandwich_norm)),
LayerScale(dim, ind + 1, PreNorm(dim, ff, sandwich = sandwich_norm))
]))
execute_type = SequentialSequence
route_attn = ((True, False),) * depth
attn_route_map = {'mask': route_attn}
self.layers = execute_type(layers, args_route = attn_route_map)
def forward(self, x, **kwargs):
return self.layers(x, **kwargs) | /ruth-tts-converter-0.0.2.tar.gz/ruth-tts-converter-0.0.2/src/ruth_tts_transformer/ruth_tts/models/transformer.py | 0.946076 | 0.456591 | transformer.py | pypi |
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import einsum
from ruth_tts_transformer.ruth_tts.models.arch_util import CheckpointedXTransformerEncoder
from ruth_tts_transformer.ruth_tts.models.transformer import Transformer
from ruth_tts_transformer.ruth_tts.models.xtransformers import Encoder
def exists(val):
return val is not None
def masked_mean(t, mask, dim = 1):
t = t.masked_fill(~mask[:, :, None], 0.)
return t.sum(dim = 1) / mask.sum(dim = 1)[..., None]
class CLVP(nn.Module):
"""
CLIP model retrofitted for performing contrastive evaluation between tokenized audio data and the corresponding
transcribed text.
Originally from https://github.com/lucidrains/DALLE-pytorch/blob/main/dalle_pytorch/dalle_pytorch.py
"""
def __init__(
self,
*,
dim_text=512,
dim_speech=512,
dim_latent=512,
num_text_tokens=256,
text_enc_depth=6,
text_seq_len=120,
text_heads=8,
num_speech_tokens=8192,
speech_enc_depth=6,
speech_heads=8,
speech_seq_len=250,
text_mask_percentage=0,
voice_mask_percentage=0,
wav_token_compression=1024,
use_xformers=False,
):
super().__init__()
self.text_emb = nn.Embedding(num_text_tokens, dim_text)
self.to_text_latent = nn.Linear(dim_text, dim_latent, bias=False)
self.speech_emb = nn.Embedding(num_speech_tokens, dim_speech)
self.to_speech_latent = nn.Linear(dim_speech, dim_latent, bias=False)
if use_xformers:
self.text_transformer = CheckpointedXTransformerEncoder(
needs_permute=False,
exit_permute=False,
max_seq_len=-1,
attn_layers=Encoder(
dim=dim_text,
depth=text_enc_depth,
heads=text_heads,
ff_dropout=.1,
ff_mult=2,
attn_dropout=.1,
use_rmsnorm=True,
ff_glu=True,
rotary_pos_emb=True,
))
self.speech_transformer = CheckpointedXTransformerEncoder(
needs_permute=False,
exit_permute=False,
max_seq_len=-1,
attn_layers=Encoder(
dim=dim_speech,
depth=speech_enc_depth,
heads=speech_heads,
ff_dropout=.1,
ff_mult=2,
attn_dropout=.1,
use_rmsnorm=True,
ff_glu=True,
rotary_pos_emb=True,
))
else:
self.text_transformer = Transformer(causal=False, seq_len=text_seq_len, dim=dim_text, depth=text_enc_depth,
heads=text_heads)
self.speech_transformer = Transformer(causal=False, seq_len=speech_seq_len, dim=dim_speech,
depth=speech_enc_depth, heads=speech_heads)
self.temperature = nn.Parameter(torch.tensor(1.))
self.text_mask_percentage = text_mask_percentage
self.voice_mask_percentage = voice_mask_percentage
self.wav_token_compression = wav_token_compression
self.xformers = use_xformers
if not use_xformers:
self.text_pos_emb = nn.Embedding(text_seq_len, dim_text)
self.speech_pos_emb = nn.Embedding(num_speech_tokens, dim_speech)
def forward(
self,
text,
speech_tokens,
return_loss=False
):
b, device = text.shape[0], text.device
if self.training:
text_mask = torch.rand_like(text.float()) > self.text_mask_percentage
voice_mask = torch.rand_like(speech_tokens.float()) > self.voice_mask_percentage
else:
text_mask = torch.ones_like(text.float()).bool()
voice_mask = torch.ones_like(speech_tokens.float()).bool()
text_emb = self.text_emb(text)
speech_emb = self.speech_emb(speech_tokens)
if not self.xformers:
text_emb += self.text_pos_emb(torch.arange(text.shape[1], device=device))
speech_emb += self.speech_pos_emb(torch.arange(speech_emb.shape[1], device=device))
enc_text = self.text_transformer(text_emb, mask=text_mask)
enc_speech = self.speech_transformer(speech_emb, mask=voice_mask)
text_latents = masked_mean(enc_text, text_mask, dim=1)
speech_latents = masked_mean(enc_speech, voice_mask, dim=1)
text_latents = self.to_text_latent(text_latents)
speech_latents = self.to_speech_latent(speech_latents)
text_latents, speech_latents = map(lambda t: F.normalize(t, p=2, dim=-1), (text_latents, speech_latents))
temp = self.temperature.exp()
if not return_loss:
sim = einsum('n d, n d -> n', text_latents, speech_latents) * temp
return sim
sim = einsum('i d, j d -> i j', text_latents, speech_latents) * temp
labels = torch.arange(b, device=device)
loss = (F.cross_entropy(sim, labels) + F.cross_entropy(sim.t(), labels)) / 2
return loss
if __name__ == '__main__':
clip = CLVP(text_mask_percentage=.2, voice_mask_percentage=.2)
clip(torch.randint(0,256,(2,120)),
torch.tensor([50,100]),
torch.randint(0,8192,(2,250)),
torch.tensor([101,102]),
return_loss=True)
nonloss = clip(torch.randint(0,256,(2,120)),
torch.tensor([50,100]),
torch.randint(0,8192,(2,250)),
torch.tensor([101,102]),
return_loss=False)
print(nonloss.shape) | /ruth-tts-converter-0.0.2.tar.gz/ruth-tts-converter-0.0.2/src/ruth_tts_transformer/ruth_tts/models/clvp.py | 0.888263 | 0.32146 | clvp.py | pypi |
import functools
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchaudio
from ruth_tts_transformer.ruth_tts.models.xtransformers import ContinuousTransformerWrapper, RelativePositionBias
def zero_module(module):
"""
Zero out the parameters of a module and return it.
"""
for p in module.parameters():
p.detach().zero_()
return module
class GroupNorm32(nn.GroupNorm):
def forward(self, x):
return super().forward(x.float()).type(x.dtype)
def normalization(channels):
"""
Make a standard normalization layer.
:param channels: number of input channels.
:return: an nn.Module for normalization.
"""
groups = 32
if channels <= 16:
groups = 8
elif channels <= 64:
groups = 16
while channels % groups != 0:
groups = int(groups / 2)
assert groups > 2
return GroupNorm32(groups, channels)
class QKVAttentionLegacy(nn.Module):
"""
A module which performs QKV attention. Matches legacy QKVAttention + input/ouput heads shaping
"""
def __init__(self, n_heads):
super().__init__()
self.n_heads = n_heads
def forward(self, qkv, mask=None, rel_pos=None):
"""
Apply QKV attention.
:param qkv: an [N x (H * 3 * C) x T] tensor of Qs, Ks, and Vs.
:return: an [N x (H * C) x T] tensor after attention.
"""
bs, width, length = qkv.shape
assert width % (3 * self.n_heads) == 0
ch = width // (3 * self.n_heads)
q, k, v = qkv.reshape(bs * self.n_heads, ch * 3, length).split(ch, dim=1)
scale = 1 / math.sqrt(math.sqrt(ch))
weight = torch.einsum(
"bct,bcs->bts", q * scale, k * scale
) # More stable with f16 than dividing afterwards
if rel_pos is not None:
weight = rel_pos(weight.reshape(bs, self.n_heads, weight.shape[-2], weight.shape[-1])).reshape(bs * self.n_heads, weight.shape[-2], weight.shape[-1])
weight = torch.softmax(weight.float(), dim=-1).type(weight.dtype)
if mask is not None:
# The proper way to do this is to mask before the softmax using -inf, but that doesn't work properly on CPUs.
mask = mask.repeat(self.n_heads, 1).unsqueeze(1)
weight = weight * mask
a = torch.einsum("bts,bcs->bct", weight, v)
return a.reshape(bs, -1, length)
class AttentionBlock(nn.Module):
"""
An attention block that allows spatial positions to attend to each other.
Originally ported from here, but adapted to the N-d case.
https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/models/unet.py#L66.
"""
def __init__(
self,
channels,
num_heads=1,
num_head_channels=-1,
do_checkpoint=True,
relative_pos_embeddings=False,
):
super().__init__()
self.channels = channels
self.do_checkpoint = do_checkpoint
if num_head_channels == -1:
self.num_heads = num_heads
else:
assert (
channels % num_head_channels == 0
), f"q,k,v channels {channels} is not divisible by num_head_channels {num_head_channels}"
self.num_heads = channels // num_head_channels
self.norm = normalization(channels)
self.qkv = nn.Conv1d(channels, channels * 3, 1)
# split heads before split qkv
self.attention = QKVAttentionLegacy(self.num_heads)
self.proj_out = zero_module(nn.Conv1d(channels, channels, 1))
if relative_pos_embeddings:
self.relative_pos_embeddings = RelativePositionBias(scale=(channels // self.num_heads) ** .5, causal=False, heads=num_heads, num_buckets=32, max_distance=64)
else:
self.relative_pos_embeddings = None
def forward(self, x, mask=None):
b, c, *spatial = x.shape
x = x.reshape(b, c, -1)
qkv = self.qkv(self.norm(x))
h = self.attention(qkv, mask, self.relative_pos_embeddings)
h = self.proj_out(h)
return (x + h).reshape(b, c, *spatial)
class Upsample(nn.Module):
"""
An upsampling layer with an optional convolution.
:param channels: channels in the inputs and outputs.
:param use_conv: a bool determining if a convolution is applied.
"""
def __init__(self, channels, use_conv, out_channels=None, factor=4):
super().__init__()
self.channels = channels
self.out_channels = out_channels or channels
self.use_conv = use_conv
self.factor = factor
if use_conv:
ksize = 5
pad = 2
self.conv = nn.Conv1d(self.channels, self.out_channels, ksize, padding=pad)
def forward(self, x):
assert x.shape[1] == self.channels
x = F.interpolate(x, scale_factor=self.factor, mode="nearest")
if self.use_conv:
x = self.conv(x)
return x
class Downsample(nn.Module):
"""
A downsampling layer with an optional convolution.
:param channels: channels in the inputs and outputs.
:param use_conv: a bool determining if a convolution is applied.
"""
def __init__(self, channels, use_conv, out_channels=None, factor=4, ksize=5, pad=2):
super().__init__()
self.channels = channels
self.out_channels = out_channels or channels
self.use_conv = use_conv
stride = factor
if use_conv:
self.op = nn.Conv1d(
self.channels, self.out_channels, ksize, stride=stride, padding=pad
)
else:
assert self.channels == self.out_channels
self.op = nn.AvgPool1d(kernel_size=stride, stride=stride)
def forward(self, x):
assert x.shape[1] == self.channels
return self.op(x)
class ResBlock(nn.Module):
def __init__(
self,
channels,
dropout,
out_channels=None,
use_conv=False,
use_scale_shift_norm=False,
up=False,
down=False,
kernel_size=3,
):
super().__init__()
self.channels = channels
self.dropout = dropout
self.out_channels = out_channels or channels
self.use_conv = use_conv
self.use_scale_shift_norm = use_scale_shift_norm
padding = 1 if kernel_size == 3 else 2
self.in_layers = nn.Sequential(
normalization(channels),
nn.SiLU(),
nn.Conv1d(channels, self.out_channels, kernel_size, padding=padding),
)
self.updown = up or down
if up:
self.h_upd = Upsample(channels, False)
self.x_upd = Upsample(channels, False)
elif down:
self.h_upd = Downsample(channels, False)
self.x_upd = Downsample(channels, False)
else:
self.h_upd = self.x_upd = nn.Identity()
self.out_layers = nn.Sequential(
normalization(self.out_channels),
nn.SiLU(),
nn.Dropout(p=dropout),
zero_module(
nn.Conv1d(self.out_channels, self.out_channels, kernel_size, padding=padding)
),
)
if self.out_channels == channels:
self.skip_connection = nn.Identity()
elif use_conv:
self.skip_connection = nn.Conv1d(
channels, self.out_channels, kernel_size, padding=padding
)
else:
self.skip_connection = nn.Conv1d(channels, self.out_channels, 1)
def forward(self, x):
if self.updown:
in_rest, in_conv = self.in_layers[:-1], self.in_layers[-1]
h = in_rest(x)
h = self.h_upd(h)
x = self.x_upd(x)
h = in_conv(h)
else:
h = self.in_layers(x)
h = self.out_layers(h)
return self.skip_connection(x) + h
class AudioMiniEncoder(nn.Module):
def __init__(self,
spec_dim,
embedding_dim,
base_channels=128,
depth=2,
resnet_blocks=2,
attn_blocks=4,
num_attn_heads=4,
dropout=0,
downsample_factor=2,
kernel_size=3):
super().__init__()
self.init = nn.Sequential(
nn.Conv1d(spec_dim, base_channels, 3, padding=1)
)
ch = base_channels
res = []
for l in range(depth):
for r in range(resnet_blocks):
res.append(ResBlock(ch, dropout, kernel_size=kernel_size))
res.append(Downsample(ch, use_conv=True, out_channels=ch*2, factor=downsample_factor))
ch *= 2
self.res = nn.Sequential(*res)
self.final = nn.Sequential(
normalization(ch),
nn.SiLU(),
nn.Conv1d(ch, embedding_dim, 1)
)
attn = []
for a in range(attn_blocks):
attn.append(AttentionBlock(embedding_dim, num_attn_heads,))
self.attn = nn.Sequential(*attn)
self.dim = embedding_dim
def forward(self, x):
h = self.init(x)
h = self.res(h)
h = self.final(h)
h = self.attn(h)
return h[:, :, 0]
class TorchMelSpectrogram(nn.Module):
def __init__(self, filter_length=1024, hop_length=256, win_length=1024, n_mel_channels=80, mel_fmin=0, mel_fmax=8000,
sampling_rate=22050, normalize=False, mel_norm_file='ruth-tts-files/data/mel_norms.pth'):
super().__init__()
# These are the default tacotron values for the MEL spectrogram.
self.filter_length = filter_length
self.hop_length = hop_length
self.win_length = win_length
self.n_mel_channels = n_mel_channels
self.mel_fmin = mel_fmin
self.mel_fmax = mel_fmax
self.sampling_rate = sampling_rate
self.mel_stft = torchaudio.transforms.MelSpectrogram(n_fft=self.filter_length, hop_length=self.hop_length,
win_length=self.win_length, power=2, normalized=normalize,
sample_rate=self.sampling_rate, f_min=self.mel_fmin,
f_max=self.mel_fmax, n_mels=self.n_mel_channels,
norm="slaney")
self.mel_norm_file = mel_norm_file
if self.mel_norm_file is not None:
self.mel_norms = torch.load(self.mel_norm_file)
else:
self.mel_norms = None
def forward(self, inp):
if len(inp.shape) == 3: # Automatically squeeze out the channels dimension if it is present (assuming mono-audio)
inp = inp.squeeze(1)
assert len(inp.shape) == 2
self.mel_stft = self.mel_stft.to(inp.device)
mel = self.mel_stft(inp)
# Perform dynamic range compression
mel = torch.log(torch.clamp(mel, min=1e-5))
if self.mel_norms is not None:
self.mel_norms = self.mel_norms.to(mel.device)
mel = mel / self.mel_norms.unsqueeze(0).unsqueeze(-1)
return mel
class CheckpointedLayer(nn.Module):
"""
Wraps a module. When forward() is called, passes kwargs that require_grad through torch.checkpoint() and bypasses
checkpoint for all other args.
"""
def __init__(self, wrap):
super().__init__()
self.wrap = wrap
def forward(self, x, *args, **kwargs):
for k, v in kwargs.items():
assert not (isinstance(v, torch.Tensor) and v.requires_grad) # This would screw up checkpointing.
partial = functools.partial(self.wrap, **kwargs)
return torch.utils.checkpoint.checkpoint(partial, x, *args)
class CheckpointedXTransformerEncoder(nn.Module):
"""
Wraps a ContinuousTransformerWrapper and applies CheckpointedLayer to each layer and permutes from channels-mid
to channels-last that XTransformer expects.
"""
def __init__(self, needs_permute=True, exit_permute=True, checkpoint=True, **xtransformer_kwargs):
super().__init__()
self.transformer = ContinuousTransformerWrapper(**xtransformer_kwargs)
self.needs_permute = needs_permute
self.exit_permute = exit_permute
if not checkpoint:
return
for i in range(len(self.transformer.attn_layers.layers)):
n, b, r = self.transformer.attn_layers.layers[i]
self.transformer.attn_layers.layers[i] = nn.ModuleList([n, CheckpointedLayer(b), r])
def forward(self, x, **kwargs):
if self.needs_permute:
x = x.permute(0,2,1)
h = self.transformer(x, **kwargs)
if self.exit_permute:
h = h.permute(0,2,1)
return h | /ruth-tts-converter-0.0.2.tar.gz/ruth-tts-converter-0.0.2/src/ruth_tts_transformer/ruth_tts/models/arch_util.py | 0.956497 | 0.597461 | arch_util.py | pypi |
import functools
import torch
import torch.nn as nn
import torch.nn.functional as F
from transformers import GPT2Config, GPT2PreTrainedModel, LogitsProcessorList
from transformers.modeling_outputs import CausalLMOutputWithCrossAttentions
from transformers.utils.model_parallel_utils import get_device_map, assert_device_map
from ruth_tts_transformer.ruth_tts.models.arch_util import AttentionBlock
from ruth_tts_transformer.ruth_tts.utils.typical_sampling import TypicalLogitsWarper
def null_position_embeddings(range, dim):
return torch.zeros((range.shape[0], range.shape[1], dim), device=range.device)
class ResBlock(nn.Module):
"""
Basic residual convolutional block that uses GroupNorm.
"""
def __init__(self, chan):
super().__init__()
self.net = nn.Sequential(
nn.Conv1d(chan, chan, kernel_size=3, padding=1),
nn.GroupNorm(chan//8, chan),
nn.ReLU(),
nn.Conv1d(chan, chan, kernel_size=3, padding=1),
nn.GroupNorm(chan//8, chan)
)
def forward(self, x):
return F.relu(self.net(x) + x)
class GPT2InferenceModel(GPT2PreTrainedModel):
def __init__(self, config, gpt, text_pos_emb, embeddings, norm, linear):
super().__init__(config)
self.transformer = gpt
self.text_pos_embedding = text_pos_emb
self.embeddings = embeddings
self.lm_head = nn.Sequential(norm, linear)
# Model parallel
self.model_parallel = False
self.device_map = None
self.cached_mel_emb = None
def parallelize(self, device_map=None):
self.device_map = (
get_device_map(len(self.transformer.h), range(torch.cuda.device_count()))
if device_map is None
else device_map
)
assert_device_map(self.device_map, len(self.transformer.h))
self.transformer.parallelize(self.device_map)
self.lm_head = self.lm_head.to(self.transformer.first_device)
self.model_parallel = True
def deparallelize(self):
self.transformer.deparallelize()
self.transformer = self.transformer.to("cpu")
self.lm_head = self.lm_head.to("cpu")
self.model_parallel = False
torch.cuda.empty_cache()
def get_output_embeddings(self):
return self.lm_head
def set_output_embeddings(self, new_embeddings):
self.lm_head = new_embeddings
def store_mel_emb(self, mel_emb):
self.cached_mel_emb = mel_emb
def prepare_inputs_for_generation(self, input_ids, past=None, **kwargs):
token_type_ids = kwargs.get("token_type_ids", None)
# only last token for inputs_ids if past is defined in kwargs
if past:
input_ids = input_ids[:, -1].unsqueeze(-1)
if token_type_ids is not None:
token_type_ids = token_type_ids[:, -1].unsqueeze(-1)
attention_mask = kwargs.get("attention_mask", None)
position_ids = kwargs.get("position_ids", None)
if attention_mask is not None and position_ids is None:
# create position_ids on the fly for batch generation
position_ids = attention_mask.long().cumsum(-1) - 1
position_ids.masked_fill_(attention_mask == 0, 1)
if past:
position_ids = position_ids[:, -1].unsqueeze(-1)
else:
position_ids = None
return {
"input_ids": input_ids,
"past_key_values": past,
"use_cache": kwargs.get("use_cache"),
"position_ids": position_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
def forward(
self,
input_ids=None,
past_key_values=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
labels=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
assert self.cached_mel_emb is not None
assert inputs_embeds is None # Not supported by this inference model.
assert labels is None # Training not supported by this inference model.
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# Create embedding
mel_len = self.cached_mel_emb.shape[1]
if input_ids.shape[1] != 1:
text_inputs = input_ids[:, mel_len:]
text_emb = self.embeddings(text_inputs)
text_emb = text_emb + self.text_pos_embedding(text_emb)
if self.cached_mel_emb.shape[0] != text_emb.shape[0]:
mel_emb = self.cached_mel_emb.repeat_interleave(text_emb.shape[0]//self.cached_mel_emb.shape[0], 0)
else:
mel_emb = self.cached_mel_emb
emb = torch.cat([mel_emb, text_emb], dim=1)
else:
emb = self.embeddings(input_ids)
emb = emb + self.text_pos_embedding.get_fixed_embedding(attention_mask.shape[1]-mel_len, attention_mask.device)
transformer_outputs = self.transformer(
inputs_embeds=emb,
past_key_values=past_key_values,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = transformer_outputs[0]
# Set device for model parallelism
if self.model_parallel:
torch.cuda.set_device(self.transformer.first_device)
hidden_states = hidden_states.to(self.lm_head.weight.device)
lm_logits = self.lm_head(hidden_states)
if not return_dict:
return (lm_logits,) + transformer_outputs[1:]
return CausalLMOutputWithCrossAttentions(
loss=None,
logits=lm_logits,
past_key_values=transformer_outputs.past_key_values,
hidden_states=transformer_outputs.hidden_states,
attentions=transformer_outputs.attentions,
cross_attentions=transformer_outputs.cross_attentions,
)
@staticmethod
def _reorder_cache(past, beam_idx):
"""
This function is used to re-order the :obj:`past_key_values` cache if
:meth:`~transformers.PreTrainedModel.beam_search` or :meth:`~transformers.PreTrainedModel.beam_sample` is
called. This is required to match :obj:`past_key_values` with the correct beam_idx at every generation step.
"""
return tuple(
tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past)
for layer_past in past
)
class ConditioningEncoder(nn.Module):
def __init__(self,
spec_dim,
embedding_dim,
attn_blocks=6,
num_attn_heads=4,
do_checkpointing=False,
mean=False):
super().__init__()
attn = []
self.init = nn.Conv1d(spec_dim, embedding_dim, kernel_size=1)
for a in range(attn_blocks):
attn.append(AttentionBlock(embedding_dim, num_attn_heads))
self.attn = nn.Sequential(*attn)
self.dim = embedding_dim
self.do_checkpointing = do_checkpointing
self.mean = mean
def forward(self, x):
h = self.init(x)
h = self.attn(h)
if self.mean:
return h.mean(dim=2)
else:
return h[:, :, 0]
class LearnedPositionEmbeddings(nn.Module):
def __init__(self, seq_len, model_dim, init=.02):
super().__init__()
self.emb = nn.Embedding(seq_len, model_dim)
# Initializing this way is standard for GPT-2
self.emb.weight.data.normal_(mean=0.0, std=init)
def forward(self, x):
sl = x.shape[1]
return self.emb(torch.arange(0, sl, device=x.device))
def get_fixed_embedding(self, ind, dev):
return self.emb(torch.tensor([ind], device=dev)).unsqueeze(0)
def build_hf_gpt_transformer(layers, model_dim, heads, max_mel_seq_len, max_text_seq_len, checkpointing):
"""
GPT-2 implemented by the HuggingFace library.
"""
from transformers import GPT2Config, GPT2Model
gpt_config = GPT2Config(vocab_size=256, # Unused.
n_positions=max_mel_seq_len+max_text_seq_len,
n_ctx=max_mel_seq_len+max_text_seq_len,
n_embd=model_dim,
n_layer=layers,
n_head=heads,
gradient_checkpointing=checkpointing,
use_cache=not checkpointing)
gpt = GPT2Model(gpt_config)
# Override the built in positional embeddings
del gpt.wpe
gpt.wpe = functools.partial(null_position_embeddings, dim=model_dim)
# Built-in token embeddings are unused.
del gpt.wte
return gpt, LearnedPositionEmbeddings(max_mel_seq_len, model_dim), LearnedPositionEmbeddings(max_text_seq_len, model_dim),\
None, None
class MelEncoder(nn.Module):
def __init__(self, channels, mel_channels=80, resblocks_per_reduction=2):
super().__init__()
self.channels = channels
self.encoder = nn.Sequential(nn.Conv1d(mel_channels, channels//4, kernel_size=3, padding=1),
nn.Sequential(*[ResBlock(channels//4) for _ in range(resblocks_per_reduction)]),
nn.Conv1d(channels//4, channels//2, kernel_size=3, stride=2, padding=1),
nn.GroupNorm(channels//16, channels//2),
nn.ReLU(),
nn.Sequential(*[ResBlock(channels//2) for _ in range(resblocks_per_reduction)]),
nn.Conv1d(channels//2, channels, kernel_size=3, stride=2, padding=1),
nn.GroupNorm(channels//8, channels),
nn.ReLU(),
nn.Sequential(*[ResBlock(channels) for _ in range(resblocks_per_reduction)]),
)
self.reduction = 4
def forward(self, x):
for e in self.encoder:
x = e(x)
return x.permute(0,2,1)
class UnifiedVoice(nn.Module):
def __init__(self, layers=8, model_dim=512, heads=8, max_text_tokens=120, max_mel_tokens=250, max_conditioning_inputs=1,
mel_length_compression=1024, number_text_tokens=256,
start_text_token=None, number_mel_codes=8194, start_mel_token=8192,
stop_mel_token=8193, train_solo_embeddings=False, use_mel_codes_as_input=True,
checkpointing=True, types=1):
"""
Args:
layers: Number of layers in transformer stack.
model_dim: Operating dimensions of the transformer
heads: Number of transformer heads. Must be divisible by model_dim. Recommend model_dim//64
max_text_tokens: Maximum number of text tokens that will be encountered by model.
max_mel_tokens: Maximum number of MEL tokens that will be encountered by model.
max_conditioning_inputs: Maximum number of conditioning inputs provided to the model. If (1), conditioning input can be of format (b,80,s), otherwise (b,n,80,s).
mel_length_compression: The factor between <number_input_samples> and <mel_tokens>. Used to compute MEL code padding given wav input length.
number_text_tokens:
start_text_token:
stop_text_token:
number_mel_codes:
start_mel_token:
stop_mel_token:
train_solo_embeddings:
use_mel_codes_as_input:
checkpointing:
"""
super().__init__()
self.number_text_tokens = number_text_tokens
self.start_text_token = number_text_tokens * types if start_text_token is None else start_text_token
self.stop_text_token = 0
self.number_mel_codes = number_mel_codes
self.start_mel_token = start_mel_token
self.stop_mel_token = stop_mel_token
self.layers = layers
self.heads = heads
self.max_mel_tokens = max_mel_tokens
self.max_text_tokens = max_text_tokens
self.model_dim = model_dim
self.max_conditioning_inputs = max_conditioning_inputs
self.mel_length_compression = mel_length_compression
self.conditioning_encoder = ConditioningEncoder(80, model_dim, num_attn_heads=heads)
self.text_embedding = nn.Embedding(self.number_text_tokens*types+1, model_dim)
if use_mel_codes_as_input:
self.mel_embedding = nn.Embedding(self.number_mel_codes, model_dim)
else:
self.mel_embedding = MelEncoder(model_dim, resblocks_per_reduction=1)
self.gpt, self.mel_pos_embedding, self.text_pos_embedding, self.mel_layer_pos_embedding, self.text_layer_pos_embedding = \
build_hf_gpt_transformer(layers, model_dim, heads, self.max_mel_tokens+2+self.max_conditioning_inputs, self.max_text_tokens+2, checkpointing)
if train_solo_embeddings:
self.mel_solo_embedding = nn.Parameter(torch.randn(1, 1, model_dim) * .02, requires_grad=True)
self.text_solo_embedding = nn.Parameter(torch.randn(1, 1, model_dim) * .02, requires_grad=True)
else:
self.mel_solo_embedding = 0
self.text_solo_embedding = 0
self.final_norm = nn.LayerNorm(model_dim)
self.text_head = nn.Linear(model_dim, self.number_text_tokens*types+1)
self.mel_head = nn.Linear(model_dim, self.number_mel_codes)
# Initialize the embeddings per the GPT-2 scheme
embeddings = [self.text_embedding]
if use_mel_codes_as_input:
embeddings.append(self.mel_embedding)
for module in embeddings:
module.weight.data.normal_(mean=0.0, std=.02)
def build_aligned_inputs_and_targets(self, input, start_token, stop_token):
inp = F.pad(input, (1,0), value=start_token)
tar = F.pad(input, (0,1), value=stop_token)
return inp, tar
def set_mel_padding(self, mel_input_tokens, wav_lengths):
"""
Given mel tokens that are derived from a padded audio clip and the actual lengths of each batch element in
that audio clip, reformats the tokens with STOP_MEL_TOKEN in place of the zero padding. This is required
preformatting to create a working TTS model.
"""
# Set padding areas within MEL (currently it is coded with the MEL code for <zero>).
mel_lengths = torch.div(wav_lengths, self.mel_length_compression, rounding_mode='trunc')
for b in range(len(mel_lengths)):
actual_end = mel_lengths[b] + 1 # Due to the convolutional nature of how these tokens are generated, it would be best if the model predicts a token past the actual last token.
if actual_end < mel_input_tokens.shape[-1]:
mel_input_tokens[b, actual_end:] = self.stop_mel_token
return mel_input_tokens
def get_logits(self, speech_conditioning_inputs, first_inputs, first_head, second_inputs=None, second_head=None, get_attns=False, return_latent=False):
if second_inputs is not None:
emb = torch.cat([speech_conditioning_inputs, first_inputs, second_inputs], dim=1)
else:
emb = torch.cat([speech_conditioning_inputs, first_inputs], dim=1)
gpt_out = self.gpt(inputs_embeds=emb, return_dict=True, output_attentions=get_attns)
if get_attns:
return gpt_out.attentions
enc = gpt_out.last_hidden_state[:, 1:] # The first logit is tied to the speech_conditioning_input
enc = self.final_norm(enc)
if return_latent:
return enc[:, speech_conditioning_inputs.shape[1]:speech_conditioning_inputs.shape[1]+first_inputs.shape[1]], enc[:, -second_inputs.shape[1]:]
first_logits = enc[:, :first_inputs.shape[1]]
first_logits = first_head(first_logits)
first_logits = first_logits.permute(0,2,1)
if second_inputs is not None:
second_logits = enc[:, -second_inputs.shape[1]:]
second_logits = second_head(second_logits)
second_logits = second_logits.permute(0,2,1)
return first_logits, second_logits
else:
return first_logits
def get_conditioning(self, speech_conditioning_input):
speech_conditioning_input = speech_conditioning_input.unsqueeze(1) if len(
speech_conditioning_input.shape) == 3 else speech_conditioning_input
conds = []
for j in range(speech_conditioning_input.shape[1]):
conds.append(self.conditioning_encoder(speech_conditioning_input[:, j]))
conds = torch.stack(conds, dim=1)
conds = conds.mean(dim=1)
return conds
def forward(self, speech_conditioning_latent, text_inputs, text_lengths, mel_codes, wav_lengths, types=None, text_first=True, raw_mels=None, return_attentions=False,
return_latent=False, clip_inputs=True):
"""
Forward pass that uses both text and voice in either text conditioning mode or voice conditioning mode
(actuated by `text_first`).
speech_conditioning_input: MEL float tensor, (b,1024)
text_inputs: long tensor, (b,t)
text_lengths: long tensor, (b,)
mel_inputs: long tensor, (b,m)
wav_lengths: long tensor, (b,)
raw_mels: MEL float tensor (b,80,s)
If return_attentions is specified, only logits are returned.
If return_latent is specified, loss & logits are not computed or returned. Only the predicted latents are returned.
If clip_inputs is True, the inputs will be clipped to the smallest input size across each input modality.
"""
# Types are expressed by expanding the text embedding space.
if types is not None:
text_inputs = text_inputs * (1+types).unsqueeze(-1)
if clip_inputs:
# This model will receive micro-batches with a ton of padding for both the text and MELs. Ameliorate this by
# chopping the inputs by the maximum actual length.
max_text_len = text_lengths.max()
text_inputs = text_inputs[:, :max_text_len]
max_mel_len = wav_lengths.max() // self.mel_length_compression
mel_codes = mel_codes[:, :max_mel_len]
if raw_mels is not None:
raw_mels = raw_mels[:, :, :max_mel_len*4]
mel_codes = self.set_mel_padding(mel_codes, wav_lengths)
text_inputs = F.pad(text_inputs, (0,1), value=self.stop_text_token)
mel_codes = F.pad(mel_codes, (0,1), value=self.stop_mel_token)
conds = speech_conditioning_latent.unsqueeze(1)
text_inputs, text_targets = self.build_aligned_inputs_and_targets(text_inputs, self.start_text_token, self.stop_text_token)
text_emb = self.text_embedding(text_inputs) + self.text_pos_embedding(text_inputs)
mel_codes, mel_targets = self.build_aligned_inputs_and_targets(mel_codes, self.start_mel_token, self.stop_mel_token)
if raw_mels is not None:
mel_inp = F.pad(raw_mels, (0, 8))
else:
mel_inp = mel_codes
mel_emb = self.mel_embedding(mel_inp)
mel_emb = mel_emb + self.mel_pos_embedding(mel_codes)
if text_first:
text_logits, mel_logits = self.get_logits(conds, text_emb, self.text_head, mel_emb, self.mel_head, get_attns=return_attentions, return_latent=return_latent)
if return_latent:
return mel_logits[:, :-2] # Despite the name, these are not logits. Strip off the two tokens added by this forward pass.
else:
mel_logits, text_logits = self.get_logits(conds, mel_emb, self.mel_head, text_emb, self.text_head, get_attns=return_attentions, return_latent=return_latent)
if return_latent:
return text_logits[:, :-2] # Despite the name, these are not logits. Strip off the two tokens added by this forward pass.
if return_attentions:
return mel_logits
loss_text = F.cross_entropy(text_logits, text_targets.long())
loss_mel = F.cross_entropy(mel_logits, mel_targets.long())
return loss_text.mean(), loss_mel.mean(), mel_logits
def inference_speech(self, speech_conditioning_latent, text_inputs, input_tokens=None, num_return_sequences=1,
max_generate_length=None, typical_sampling=False, typical_mass=.9, **hf_generate_kwargs):
seq_length = self.max_mel_tokens + self.max_text_tokens + 2
if not hasattr(self, 'inference_model'):
# TODO: Decouple gpt_config from this inference model.
gpt_config = GPT2Config(vocab_size=self.max_mel_tokens,
n_positions=seq_length,
n_ctx=seq_length,
n_embd=self.model_dim,
n_layer=self.layers,
n_head=self.heads,
gradient_checkpointing=False,
use_cache=True)
self.inference_model = GPT2InferenceModel(gpt_config, self.gpt, self.mel_pos_embedding, self.mel_embedding, self.final_norm, self.mel_head)
self.gpt.wte = self.mel_embedding
text_inputs = F.pad(text_inputs, (0, 1), value=self.stop_text_token)
text_inputs, text_targets = self.build_aligned_inputs_and_targets(text_inputs, self.start_text_token, self.stop_text_token)
text_emb = self.text_embedding(text_inputs) + self.text_pos_embedding(text_inputs)
conds = speech_conditioning_latent.unsqueeze(1)
emb = torch.cat([conds, text_emb], dim=1)
self.inference_model.store_mel_emb(emb)
fake_inputs = torch.full((emb.shape[0], conds.shape[1] + emb.shape[1],), fill_value=1, dtype=torch.long,
device=text_inputs.device)
fake_inputs[:, -1] = self.start_mel_token
trunc_index = fake_inputs.shape[1]
if input_tokens is None:
inputs = fake_inputs
else:
assert num_return_sequences % input_tokens.shape[0] == 0, "The number of return sequences must be divisible by the number of input sequences"
fake_inputs = fake_inputs.repeat(num_return_sequences, 1)
input_tokens = input_tokens.repeat(num_return_sequences // input_tokens.shape[0], 1)
inputs = torch.cat([fake_inputs, input_tokens], dim=1)
logits_processor = LogitsProcessorList([TypicalLogitsWarper(mass=typical_mass)]) if typical_sampling else LogitsProcessorList()
max_length = trunc_index + self.max_mel_tokens - 1 if max_generate_length is None else trunc_index + max_generate_length
gen = self.inference_model.generate(inputs, bos_token_id=self.start_mel_token, pad_token_id=self.stop_mel_token, eos_token_id=self.stop_mel_token,
max_length=max_length, logits_processor=logits_processor,
num_return_sequences=num_return_sequences, **hf_generate_kwargs)
return gen[:, trunc_index:]
if __name__ == '__main__':
gpt = UnifiedVoice(model_dim=256, heads=4, train_solo_embeddings=True, use_mel_codes_as_input=True, max_conditioning_inputs=4)
l = gpt(torch.randn(2, 3, 80, 800),
torch.randint(high=120, size=(2,120)),
torch.tensor([32, 120]),
torch.randint(high=8192, size=(2,250)),
torch.tensor([250*256,195*256]))
gpt.text_forward(torch.randn(2,80,800), torch.randint(high=50, size=(2,80)), torch.tensor([32, 80])) | /ruth-tts-converter-0.0.2.tar.gz/ruth-tts-converter-0.0.2/src/ruth_tts_transformer/ruth_tts/models/autoregressive.py | 0.937667 | 0.354796 | autoregressive.py | pypi |
import os
from glob import glob
import librosa
import torch
import torchaudio
import numpy as np
from scipy.io.wavfile import read
from ruth_tts_transformer.ruth_tts.utils.stft import STFT
def load_wav_to_torch(full_path):
sampling_rate, data = read(full_path)
if data.dtype == np.int32:
norm_fix = 2 ** 31
elif data.dtype == np.int16:
norm_fix = 2 ** 15
elif data.dtype == np.float16 or data.dtype == np.float32:
norm_fix = 1.
else:
raise NotImplemented(f"Provided data dtype not supported: {data.dtype}")
return (torch.FloatTensor(data.astype(np.float32)) / norm_fix, sampling_rate)
def load_audio(audiopath, sampling_rate):
if audiopath[-4:] == '.wav':
audio, lsr = load_wav_to_torch(audiopath)
elif audiopath[-4:] == '.mp3':
audio, lsr = librosa.load(audiopath, sr=sampling_rate)
audio = torch.FloatTensor(audio)
else:
assert False, f"Unsupported audio format provided: {audiopath[-4:]}"
# Remove any channel data.
if len(audio.shape) > 1:
if audio.shape[0] < 5:
audio = audio[0]
else:
assert audio.shape[1] < 5
audio = audio[:, 0]
if lsr != sampling_rate:
audio = torchaudio.functional.resample(audio, lsr, sampling_rate)
# Check some assumptions about audio range. This should be automatically fixed in load_wav_to_torch, but might not be in some edge cases, where we should squawk.
# '2' is arbitrarily chosen since it seems like audio will often "overdrive" the [-1,1] bounds.
if torch.any(audio > 2) or not torch.any(audio < 0):
print(f"Error with {audiopath}. Max={audio.max()} min={audio.min()}")
audio.clip_(-1, 1)
return audio.unsqueeze(0)
TACOTRON_MEL_MAX = 2.3143386840820312
TACOTRON_MEL_MIN = -11.512925148010254
def denormalize_tacotron_mel(norm_mel):
return ((norm_mel+1)/2)*(TACOTRON_MEL_MAX-TACOTRON_MEL_MIN)+TACOTRON_MEL_MIN
def normalize_tacotron_mel(mel):
return 2 * ((mel - TACOTRON_MEL_MIN) / (TACOTRON_MEL_MAX - TACOTRON_MEL_MIN)) - 1
def dynamic_range_compression(x, C=1, clip_val=1e-5):
"""
PARAMS
------
C: compression factor
"""
return torch.log(torch.clamp(x, min=clip_val) * C)
def dynamic_range_decompression(x, C=1):
"""
PARAMS
------
C: compression factor used to compress
"""
return torch.exp(x) / C
def get_voices():
subs = os.listdir('ruth-tts-files/voices')
voices = {}
for sub in subs:
subj = os.path.join('ruth-tts-files/voices', sub)
if os.path.isdir(subj):
voices[sub] = list(glob(f'{subj}/*.wav')) + list(glob(f'{subj}/*.mp3')) + list(glob(f'{subj}/*.pth'))
return voices
def load_voice(voice):
if voice == 'random':
return None, None
voices = get_voices()
paths = voices[voice]
if len(paths) == 1 and paths[0].endswith('.pth'):
return None, torch.load(paths[0])
else:
conds = []
for cond_path in paths:
c = load_audio(cond_path, 22050)
conds.append(c)
return conds, None
def load_voices(voices):
latents = []
clips = []
for voice in voices:
if voice == 'random':
print("Cannot combine a random voice with a non-random voice. Just using a random voice.")
return None, None
clip, latent = load_voice(voice)
if latent is None:
assert len(latents) == 0, "Can only combine raw audio voices or latent voices, not both. Do it yourself if you want this."
clips.extend(clip)
elif voice is None:
assert len(voices) == 0, "Can only combine raw audio voices or latent voices, not both. Do it yourself if you want this."
latents.append(latent)
if len(latents) == 0:
return clips, None
else:
latents = torch.stack(latents, dim=0)
return None, latents.mean(dim=0)
class TacotronSTFT(torch.nn.Module):
def __init__(self, filter_length=1024, hop_length=256, win_length=1024,
n_mel_channels=80, sampling_rate=22050, mel_fmin=0.0,
mel_fmax=8000.0):
super(TacotronSTFT, self).__init__()
self.n_mel_channels = n_mel_channels
self.sampling_rate = sampling_rate
self.stft_fn = STFT(filter_length, hop_length, win_length)
from librosa.filters import mel as librosa_mel_fn
mel_basis = librosa_mel_fn(
sr=sampling_rate, n_fft=filter_length, n_mels=n_mel_channels, fmin=mel_fmin, fmax=mel_fmax)
mel_basis = torch.from_numpy(mel_basis).float()
self.register_buffer('mel_basis', mel_basis)
def spectral_normalize(self, magnitudes):
output = dynamic_range_compression(magnitudes)
return output
def spectral_de_normalize(self, magnitudes):
output = dynamic_range_decompression(magnitudes)
return output
def mel_spectrogram(self, y):
"""Computes mel-spectrograms from a batch of waves
PARAMS
------
y: Variable(torch.FloatTensor) with shape (B, T) in range [-1, 1]
RETURNS
-------
mel_output: torch.FloatTensor of shape (B, n_mel_channels, T)
"""
assert(torch.min(y.data) >= -10)
assert(torch.max(y.data) <= 10)
y = torch.clip(y, min=-1, max=1)
magnitudes, phases = self.stft_fn.transform(y)
magnitudes = magnitudes.data
mel_output = torch.matmul(self.mel_basis, magnitudes)
mel_output = self.spectral_normalize(mel_output)
return mel_output
def wav_to_univnet_mel(wav, do_normalization=False):
stft = TacotronSTFT(1024, 256, 1024, 100, 24000, 0, 12000)
stft = stft.cuda()
mel = stft.mel_spectrogram(wav)
if do_normalization:
mel = normalize_tacotron_mel(mel)
return mel | /ruth-tts-converter-0.0.2.tar.gz/ruth-tts-converter-0.0.2/src/ruth_tts_transformer/ruth_tts/utils/audio.py | 0.774669 | 0.403009 | audio.py | pypi |
import re
import torch
import torchaudio
from transformers import Wav2Vec2ForCTC, Wav2Vec2FeatureExtractor, Wav2Vec2CTCTokenizer, Wav2Vec2Processor
from ruth_tts_transformer.ruth_tts.utils.audio import load_audio
def max_alignment(s1, s2, skip_character='~', record={}):
"""
A clever function that aligns s1 to s2 as best it can. Wherever a character from s1 is not found in s2, a '~' is
used to replace that character.
Finally got to use my DP skills!
"""
assert skip_character not in s1, f"Found the skip character {skip_character} in the provided string, {s1}"
if len(s1) == 0:
return ''
if len(s2) == 0:
return skip_character * len(s1)
if s1 == s2:
return s1
if s1[0] == s2[0]:
return s1[0] + max_alignment(s1[1:], s2[1:], skip_character, record)
take_s1_key = (len(s1), len(s2) - 1)
if take_s1_key in record:
take_s1, take_s1_score = record[take_s1_key]
else:
take_s1 = max_alignment(s1, s2[1:], skip_character, record)
take_s1_score = len(take_s1.replace(skip_character, ''))
record[take_s1_key] = (take_s1, take_s1_score)
take_s2_key = (len(s1) - 1, len(s2))
if take_s2_key in record:
take_s2, take_s2_score = record[take_s2_key]
else:
take_s2 = max_alignment(s1[1:], s2, skip_character, record)
take_s2_score = len(take_s2.replace(skip_character, ''))
record[take_s2_key] = (take_s2, take_s2_score)
return take_s1 if take_s1_score > take_s2_score else skip_character + take_s2
class Wav2VecAlignment:
"""
Uses wav2vec2 to perform audio<->text alignment.
"""
def __init__(self):
self.model = Wav2Vec2ForCTC.from_pretrained("jbetker/wav2vec2-large-robust-ft-libritts-voxpopuli").cpu()
self.feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(f"facebook/wav2vec2-large-960h")
self.tokenizer = Wav2Vec2CTCTokenizer.from_pretrained('jbetker/tacotron-symbols')
def align(self, audio, expected_text, audio_sample_rate=24000):
orig_len = audio.shape[-1]
with torch.no_grad():
self.model = self.model.cuda()
audio = audio.to('cuda')
audio = torchaudio.functional.resample(audio, audio_sample_rate, 16000)
clip_norm = (audio - audio.mean()) / torch.sqrt(audio.var() + 1e-7)
logits = self.model(clip_norm).logits
self.model = self.model.cpu()
logits = logits[0]
pred_string = self.tokenizer.decode(logits.argmax(-1).tolist())
fixed_expectation = max_alignment(expected_text.lower(), pred_string)
w2v_compression = orig_len // logits.shape[0]
expected_tokens = self.tokenizer.encode(fixed_expectation)
expected_chars = list(fixed_expectation)
if len(expected_tokens) == 1:
return [0] # The alignment is simple; there is only one token.
expected_tokens.pop(0) # The first token is a given.
expected_chars.pop(0)
alignments = [0]
def pop_till_you_win():
if len(expected_tokens) == 0:
return None
popped = expected_tokens.pop(0)
popped_char = expected_chars.pop(0)
while popped_char == '~':
alignments.append(-1)
if len(expected_tokens) == 0:
return None
popped = expected_tokens.pop(0)
popped_char = expected_chars.pop(0)
return popped
next_expected_token = pop_till_you_win()
for i, logit in enumerate(logits):
top = logit.argmax()
if next_expected_token == top:
alignments.append(i * w2v_compression)
if len(expected_tokens) > 0:
next_expected_token = pop_till_you_win()
else:
break
pop_till_you_win()
if not (len(expected_tokens) == 0 and len(alignments) == len(expected_text)):
torch.save([audio, expected_text], 'alignment_debug.pth')
assert False, "Something went wrong with the alignment algorithm. I've dumped a file, 'alignment_debug.pth' to" \
"your current working directory. Please report this along with the file so it can get fixed."
# Now fix up alignments. Anything with -1 should be interpolated.
alignments.append(orig_len) # This'll get removed but makes the algorithm below more readable.
for i in range(len(alignments)):
if alignments[i] == -1:
for j in range(i+1, len(alignments)):
if alignments[j] != -1:
next_found_token = j
break
for j in range(i, next_found_token):
gap = alignments[next_found_token] - alignments[i-1]
alignments[j] = (j-i+1) * gap // (next_found_token-i+1) + alignments[i-1]
return alignments[:-1]
def redact(self, audio, expected_text, audio_sample_rate=24000):
if '[' not in expected_text:
return audio
splitted = expected_text.split('[')
fully_split = [splitted[0]]
for spl in splitted[1:]:
assert ']' in spl, 'Every "[" character must be paired with a "]" with no nesting.'
fully_split.extend(spl.split(']'))
# At this point, fully_split is a list of strings, with every other string being something that should be redacted.
non_redacted_intervals = []
last_point = 0
for i in range(len(fully_split)):
if i % 2 == 0:
end_interval = max(0, last_point + len(fully_split[i]) - 1)
non_redacted_intervals.append((last_point, end_interval))
last_point += len(fully_split[i])
bare_text = ''.join(fully_split)
alignments = self.align(audio, bare_text, audio_sample_rate)
output_audio = []
for nri in non_redacted_intervals:
start, stop = nri
output_audio.append(audio[:, alignments[start]:alignments[stop]])
return torch.cat(output_audio, dim=-1) | /ruth-tts-converter-0.0.2.tar.gz/ruth-tts-converter-0.0.2/src/ruth_tts_transformer/ruth_tts/utils/wav2vec_alignment.py | 0.680135 | 0.430506 | wav2vec_alignment.py | pypi |
import json
import re
import inflect
import requests
import torch
from tokenizers import Tokenizer
# Regular expression matching whitespace:
from unidecode import unidecode
_whitespace_re = re.compile(r'\s+')
# List of (regular expression, replacement) pairs for abbreviations:
_abbreviations = [(re.compile('\\b%s\\.' % x[0], re.IGNORECASE), x[1]) for x in [
('mrs', 'misess'),
('mr', 'mister'),
('dr', 'doctor'),
('st', 'saint'),
('co', 'company'),
('jr', 'junior'),
('maj', 'major'),
('gen', 'general'),
('drs', 'doctors'),
('rev', 'reverend'),
('lt', 'lieutenant'),
('hon', 'honorable'),
('sgt', 'sergeant'),
('capt', 'captain'),
('esq', 'esquire'),
('ltd', 'limited'),
('col', 'colonel'),
('ft', 'fort'),
]]
def expand_abbreviations(text):
for regex, replacement in _abbreviations:
text = re.sub(regex, replacement, text)
return text
_inflect = inflect.engine()
_comma_number_re = re.compile(r'([0-9][0-9\,]+[0-9])')
_decimal_number_re = re.compile(r'([0-9]+\.[0-9]+)')
_pounds_re = re.compile(r'£([0-9\,]*[0-9]+)')
_dollars_re = re.compile(r'\$([0-9\.\,]*[0-9]+)')
_ordinal_re = re.compile(r'[0-9]+(st|nd|rd|th)')
_number_re = re.compile(r'[0-9]+')
def _remove_commas(m):
return m.group(1).replace(',', '')
def _expand_decimal_point(m):
return m.group(1).replace('.', ' point ')
def _expand_dollars(m):
match = m.group(1)
parts = match.split('.')
if len(parts) > 2:
return match + ' dollars' # Unexpected format
dollars = int(parts[0]) if parts[0] else 0
cents = int(parts[1]) if len(parts) > 1 and parts[1] else 0
if dollars and cents:
dollar_unit = 'dollar' if dollars == 1 else 'dollars'
cent_unit = 'cent' if cents == 1 else 'cents'
return '%s %s, %s %s' % (dollars, dollar_unit, cents, cent_unit)
elif dollars:
dollar_unit = 'dollar' if dollars == 1 else 'dollars'
return '%s %s' % (dollars, dollar_unit)
elif cents:
cent_unit = 'cent' if cents == 1 else 'cents'
return '%s %s' % (cents, cent_unit)
else:
return 'zero dollars'
def _expand_ordinal(m):
return _inflect.number_to_words(m.group(0))
def _expand_number(m):
num = int(m.group(0))
if num > 1000 and num < 3000:
if num == 2000:
return 'two thousand'
elif num > 2000 and num < 2010:
return 'two thousand ' + _inflect.number_to_words(num % 100)
elif num % 100 == 0:
return _inflect.number_to_words(num // 100) + ' hundred'
else:
return _inflect.number_to_words(num, andword='', zero='oh', group=2).replace(', ', ' ')
else:
return _inflect.number_to_words(num, andword='')
def normalize_numbers(text):
text = re.sub(_comma_number_re, _remove_commas, text)
text = re.sub(_pounds_re, r'\1 pounds', text)
text = re.sub(_dollars_re, _expand_dollars, text)
text = re.sub(_decimal_number_re, _expand_decimal_point, text)
text = re.sub(_ordinal_re, _expand_ordinal, text)
text = re.sub(_number_re, _expand_number, text)
return text
def expand_numbers(text):
return normalize_numbers(text)
def lowercase(text):
return text.lower()
def collapse_whitespace(text):
return re.sub(_whitespace_re, ' ', text)
def convert_to_ascii(text):
return unidecode(text)
def basic_cleaners(text):
'''Basic pipeline that lowercases and collapses whitespace without transliteration.'''
text = lowercase(text)
text = collapse_whitespace(text)
return text
def transliteration_cleaners(text):
'''Pipeline for non-English text that transliterates to ASCII.'''
text = convert_to_ascii(text)
text = lowercase(text)
text = collapse_whitespace(text)
return text
def english_cleaners(text):
'''Pipeline for English text, including number and abbreviation expansion.'''
text = convert_to_ascii(text)
text = lowercase(text)
text = expand_numbers(text)
text = expand_abbreviations(text)
text = collapse_whitespace(text)
text = text.replace('"', '')
return text
def lev_distance(s1, s2):
if len(s1) > len(s2):
s1, s2 = s2, s1
distances = range(len(s1) + 1)
for i2, c2 in enumerate(s2):
distances_ = [i2 + 1]
for i1, c1 in enumerate(s1):
if c1 == c2:
distances_.append(distances[i1])
else:
distances_.append(1 + min((distances[i1], distances[i1 + 1], distances_[-1])))
distances = distances_
return distances[-1]
class VoiceBpeTokenizer:
def __init__(self, vocab_file='ruth-tts-files/data/tokenizer.json'):
if vocab_file is not None:
self.tokenizer = Tokenizer.from_file(vocab_file)
def preprocess_text(self, txt):
txt = english_cleaners(txt)
return txt
def encode(self, txt):
txt = self.preprocess_text(txt)
txt = txt.replace(' ', '[SPACE]')
return self.tokenizer.encode(txt).ids
def decode(self, seq):
if isinstance(seq, torch.Tensor):
seq = seq.cpu().numpy()
txt = self.tokenizer.decode(seq, skip_special_tokens=False).replace(' ', '')
txt = txt.replace('[SPACE]', ' ')
txt = txt.replace('[STOP]', '')
txt = txt.replace('[UNK]', '')
return txt | /ruth-tts-converter-0.0.2.tar.gz/ruth-tts-converter-0.0.2/src/ruth_tts_transformer/ruth_tts/utils/tokenizer.py | 0.543106 | 0.305956 | tokenizer.py | pypi |
import re
def split_and_recombine_text(text, desired_length=200, max_length=300):
"""Split text it into chunks of a desired length trying to keep sentences intact."""
# normalize text, remove redundant whitespace and convert non-ascii quotes to ascii
text = re.sub(r'\n\n+', '\n', text)
text = re.sub(r'\s+', ' ', text)
text = re.sub(r'[“”]', '"', text)
rv = []
in_quote = False
current = ""
split_pos = []
pos = -1
def seek(delta):
nonlocal pos, in_quote, text
is_neg = delta < 0
for _ in range(abs(delta)):
if is_neg:
pos -= 1
else:
pos += 1
if text[pos] == '"':
in_quote = not in_quote
return text[pos], text[pos+1] if pos < len(text)-1 else ""
def commit():
nonlocal rv, current, split_pos
rv.append(current)
current = ""
split_pos = []
while pos < len(text) - 1:
c, next_c = seek(1)
current += c
# do we need to force a split?
if len(current) >= max_length:
if len(split_pos) > 0 and len(current) > (desired_length / 2):
# we have at least one sentence and we are over half the desired length, seek back to the last split
d = pos - split_pos[-1]
seek(-d)
current = current[:-d]
else:
# no full sentences, seek back until we are not in the middle of a word and split there
while c not in '!?.\n ' and pos > 0 and len(current) > desired_length:
c, _ = seek(-1)
current = current[:-1]
commit()
# check for sentence boundaries
elif not in_quote and (c in '!?\n' or (c == '.' and next_c in '\n ')):
split_pos.append(pos)
if len(current) >= desired_length:
commit()
rv.append(current)
# clean up
rv = [s.strip() for s in rv]
rv = [s for s in rv if len(s) > 0]
return rv
if __name__ == '__main__':
import unittest
class Test(unittest.TestCase):
def test_split_and_recombine_text(self):
text = """
This is a sample sentence.
This is another sample sentence.
This is a longer sample sentence that should force a split inthemiddlebutinotinthislongword.
"Don't split my quote... please"
"""
self.assertEqual(split_and_recombine_text(text, desired_length=20, max_length=40),
['This is a sample sentence.',
'This is another sample sentence.',
'This is a longer sample sentence that',
'should force a split',
'inthemiddlebutinotinthislongword.',
'"Don\'t split my quote... please"'])
unittest.main() | /ruth-tts-converter-0.0.2.tar.gz/ruth-tts-converter-0.0.2/src/ruth_tts_transformer/ruth_tts/utils/text.py | 0.455199 | 0.376337 | text.py | pypi |
from .enums import Url
from .exceptions import ServerException
from typing import Union
from requests import Session
# More about API: http://api.rutracker.org/v1/docs/
class ApiProvider(object):
"""This class provides access to some official methods of the Rutracker API"""
def __init__(self, session: Session):
self.session = session
def _request(self, endpoint: str, params: dict) -> dict:
response = self.session.get(Url.API.value + endpoint, params=params)
json = response.json()
if "error" in json:
raise ServerException(json["error"]["text"])
return json
def get_peer_stats(self, val: Union[list, str], by: str = "topic_id") -> dict:
"""Get peer stats by topic id (topic_id) or torrent hash (hash)"""
if isinstance(val, list):
val = ",".join(map(str, val))
response = self._request("get_peer_stats", {"by": by, "val": val})
result = {}
for key, value in response["result"].items():
result[key] = {
"seeders": value[0],
"leechers": value[1],
"seeder_last_seen": value[2],
}
return result
def get_topic_id(self, val: Union[list, str]) -> dict:
"""Get topic id by torrent hash"""
if isinstance(val, list):
val = ",".join(map(str, val))
response = self._request("get_topic_id", {"by": "hash", "val": val})
return response["result"]
def get_tor_hash(self, val: Union[list, str]) -> dict:
"""Get torrent hash by topic id"""
if isinstance(val, list):
val = ",".join(map(str, val))
response = self._request("get_tor_hash", {"by": "topic_id", "val": val})
return response["result"]
def get_tor_topic_data(self, val: Union[list, str]) -> dict:
"""Get torrent topic data by topic id"""
if isinstance(val, list):
val = ",".join(map(str, val))
response = self._request("get_tor_topic_data", {"by": "topic_id", "val": val})
return response["result"] | /rutracker-api-0.22.92.tar.gz/rutracker-api-0.22.92/rutracker_api/api_provider.py | 0.83772 | 0.191177 | api_provider.py | pypi |
from .utils import format_size, generate_magnet
from datetime import datetime
from .enums import Url
class Torrent(object):
"""Stores data about the torrent"""
def __init__(
self,
author=None,
category=None,
downloads=None,
host=None,
leeches=None,
registered=None,
seeds=None,
size=None,
state=None,
title=None,
topic_id=None,
hash=None,
magnet=None,
):
self.author = author
self.category = category
self.downloads = downloads
self.leeches = leeches
self.registered = registered
self.seeds = seeds
self.size = size
self.state = state
self.title = title
self.topic_id = topic_id
self.url = f"{Url.HOST.value}/forum/viewtopic.php?t={topic_id}"
self.hash = hash
self.magnet = magnet
def formatted_size(self) -> str:
"""Returns the size formated as XXX KB/MB/GB/TB"""
return format_size(self.size)
def formatted_registered(self) -> str:
"""Returns the date formatted as YYYY-MM-DD"""
return datetime.utcfromtimestamp(self.registered).strftime("%Y-%m-%d")
def get_magnet(self, hash: str = None) -> str:
"""Returns the magnet link. Requires hash"""
if self.magnet:
return self.magnet
if hash:
self.hash = hash
if not self.hash:
raise Exception("No hash provided")
self.magnet = generate_magnet(
self.hash, Url.MAGNET_ANN.value, self.title, self.url
)
return self.magnet
def __str__(self):
return f"[{self.topic_id}] {self.title}"
def __repr__(self):
return f"<Torrent {self.topic_id}>"
def as_dict(self) -> dict:
return {
"author": self.author,
"category": self.category,
"downloads": self.downloads,
"leeches": self.leeches,
"registered": self.registered,
"seeds": self.seeds,
"size": self.size,
"state": self.state,
"title": self.title,
"topic_id": self.topic_id,
"url": self.url,
"hash": self.hash,
"magnet": self.magnet,
}
def __getitem__(self, key):
return self.__getattribute__(key)
def __iter__(self):
return iter(self.as_dict().items()) | /rutracker-api-0.22.92.tar.gz/rutracker-api-0.22.92/rutracker_api/torrent.py | 0.732018 | 0.166574 | torrent.py | pypi |
from __future__ import division
import math
import logging
import struct
log = logging.getLogger(__name__)
class Df5Decoder(object):
"""
Decodes data from RuuviTag with Data Format 5
Protocol specification:
https://github.com/ruuvi/ruuvi-sensor-protocols
"""
def _get_temperature(self, data):
"""Return temperature in celsius"""
if data[1] == -32768:
return None
return round(data[1] / 200, 2)
def _get_humidity(self, data):
"""Return humidity %"""
if data[2] == 65535:
return None
return round(data[2] / 400, 2)
def _get_pressure(self, data):
"""Return air pressure hPa"""
if data[3] == 0xFFFF:
return None
return round((data[3] + 50000) / 100, 2)
def _get_acceleration(self, data):
"""Return acceleration mG"""
if (data[4] == -32768 or data[5] == -32768 or data[6] == -32768):
return (None, None, None)
return data[4:7]
def _get_powerinfo(self, data):
"""Return battery voltage and tx power"""
battery_voltage = data[7] >> 5
tx_power = data[7] & 0x001F
return (battery_voltage, tx_power)
def _get_battery(self, data):
"""Return battery mV"""
battery_voltage = self._get_powerinfo(data)[0]
if battery_voltage == 0b11111111111:
return None
return battery_voltage + 1600
def _get_txpower(self, data):
"""Return transmit power"""
tx_power = self._get_powerinfo(data)[1]
if tx_power == 0b11111:
return None
return -40 + (tx_power * 2)
def _get_movementcounter(self, data):
return data[8]
def _get_measurementsequencenumber(self, data):
return data[9]
def _get_mac(self, data):
return ''.join('{:02x}'.format(x) for x in data[10:])
def decode_data(self, data):
"""
Decode sensor data.
Returns:
dict: Sensor values
"""
try:
byte_data = struct.unpack(
'>BhHHhhhHBH6B', bytearray.fromhex(data[:48])
)
acc_x, acc_y, acc_z = self._get_acceleration(byte_data)
return {
'data_format': 5,
'humidity': self._get_humidity(byte_data),
'temperature': self._get_temperature(byte_data),
'pressure': self._get_pressure(byte_data),
'acceleration': math.sqrt(
acc_x * acc_x + acc_y * acc_y + acc_z * acc_z
),
'acceleration_x': acc_x,
'acceleration_y': acc_y,
'acceleration_z': acc_z,
'tx_power': self._get_txpower(byte_data),
'battery': self._get_battery(byte_data),
'movement_counter': self._get_movementcounter(byte_data),
'measurement_sequence_number':
self._get_measurementsequencenumber(byte_data),
'mac': self._get_mac(byte_data)
}
except Exception:
log.exception('Value: %s not valid', data)
return None | /ruuvi_decoders-0.2.0.tar.gz/ruuvi_decoders-0.2.0/ruuvi_decoders/df5_decoder.py | 0.830009 | 0.541712 | df5_decoder.py | pypi |
from __future__ import division
import base64
import logging
log = logging.getLogger(__name__)
class UrlDecoder(object):
"""
Decodes data from RuuviTag url
Protocol specification:
https://github.com/ruuvi/ruuvi-sensor-protocols
Decoder operations are ported from:
https://github.com/ruuvi/sensor-protocol-for-eddystone-url/blob/master/index.html
0: uint8_t format; // (0x02 = realtime sensor readings)
1: uint8_t humidity; // one lsb is 0.5%
2-3: uint16_t temperature; // Signed 8.8 fixed-point notation.
4-5: uint16_t pressure; // (-50kPa)
6-7: uint16_t time; // seconds (now from reset)
The bytes for temperature, pressure and time are swaped during the encoding
"""
def _get_temperature(self, decoded):
"""Return temperature in celsius"""
temp = (decoded[2] & 127) + decoded[3] / 100
sign = (decoded[2] >> 7) & 1
if sign == 0:
return round(temp, 2)
return round(-1 * temp, 2)
def _get_humidity(self, decoded):
"""Return humidity %"""
return decoded[1] * 0.5
def _get_pressure(self, decoded):
"""Return air pressure hPa"""
pres = ((decoded[4] << 8) + decoded[5]) + 50000
return pres / 100
def decode_data(self, encoded):
"""
Decode sensor data.
Returns:
dict: Sensor values
"""
try:
identifier = None
data_format = 2
if len(encoded) > 8:
data_format = 4
identifier = encoded[8:]
encoded = encoded[:8]
decoded = bytearray(base64.b64decode(encoded, '-_'))
return {
'data_format': data_format,
'temperature': self._get_temperature(decoded),
'humidity': self._get_humidity(decoded),
'pressure': self._get_pressure(decoded),
'identifier': identifier
}
except:
log.exception('Encoded value: %s not valid', encoded)
return None | /ruuvi_decoders-0.2.0.tar.gz/ruuvi_decoders-0.2.0/ruuvi_decoders/url_decoder.py | 0.841858 | 0.322313 | url_decoder.py | pypi |
from typing import Dict, Tuple
from aiohttp.client import ClientSession
import aiohttp
from result import Ok, Err, Result
from ruuvi_decoders import get_decoder
from ruuvi_gateway_client.types import SensorData, SensorPayload, ParsedDatas, Payload
from ruuvi_gateway_client.parser import parse_session_cookie, parse_password
def _parse_sensor_payload(mac: str, payload: SensorPayload) -> Tuple[str, SensorData]:
raw = payload["data"]
try:
companyIndex = raw.index("FF9904")
except ValueError:
print("Ruuvi company id not found in data")
return [mac, None]
rt: SensorData = {}
rt["rssi"] = payload["rssi"]
try:
broadcast_data = raw[companyIndex+6:]
data_format = broadcast_data[0:2]
rt = get_decoder(int(data_format)).decode_data(broadcast_data)
except ValueError:
print("Valid data format data not found in payload")
return [mac, None]
return [mac, rt]
def _parse_received_data(payload: Payload) -> ParsedDatas:
data = payload["data"]
sensor_datas = [_parse_sensor_payload(key, value)
for key, value in data["tags"].items()]
return dict(sensor_datas)
async def get_auth_info(session: ClientSession, ip: str, cookies: Dict[str, str] = {}) -> Result[str, None]:
async with session.get(f'http://{ip}/auth', cookies=cookies) as response:
if response.status == 401:
auth_info = response.headers["WWW-Authenticate"]
return Ok(auth_info)
return Err()
async def authorize_user(session: ClientSession, ip: str, cookies, username: str, password_encrypted: str) -> Result[int, int]:
auth_payload = '{"login":"' + username + \
'","password":"' + password_encrypted + '"}'
async with session.post(f'http://{ip}/auth', data=auth_payload, cookies=cookies) as response:
return Ok(response.status) if response.status == 200 else Err(response.status)
async def get_data(session: ClientSession, ip: str, cookies: Dict[str, str] = {}) -> Result[ParsedDatas, int]:
try:
async with session.get(f'http://{ip}/history?time=5', cookies=cookies) as response:
if response.status == 200:
data = await response.json()
parsed = _parse_received_data(data)
return Ok(parsed)
else:
return Err(response.status)
except aiohttp.ClientConnectionError as e:
message = e.args[0]
if hasattr(message, 'code') and message.code == 302:
return Err(302)
return Err(500)
async def get_authenticate_cookies(session: ClientSession, ip: str, username: str, password: str) -> Result[Dict[str, str], str]:
auth_info_result = await get_auth_info(session, ip)
if not auth_info_result.is_ok():
return Err()
cookies = parse_session_cookie(auth_info_result.value)
password_encrypted = parse_password(
auth_info_result.value, username, password)
auth_result = await authorize_user(session, ip, cookies, username, password_encrypted)
if not auth_result.is_ok():
return Err(auth_result.value)
return Ok(cookies)
async def fetch_data(ip: str, username: str, password: str) -> Result[ParsedDatas, str]:
async with aiohttp.ClientSession() as session:
get_result = await get_data(session, ip)
if get_result.is_ok():
return Ok(get_result.value)
if get_result.value != 302:
return Err(f'Fetch failed - {get_result.value}')
cookie_result = await get_authenticate_cookies(session, ip, username, password)
if not cookie_result.is_ok():
return Err(f'Authentication failed - {cookie_result.value}')
get_result = await get_data(session, ip, cookie_result.value)
if get_result.is_ok():
return Ok(get_result.value)
else:
return Err(f'Fetch failed after authentication - {get_result.value}') | /ruuvi_gateway_client-0.1.0-py3-none-any.whl/ruuvi_gateway_client/gateway.py | 0.579638 | 0.217545 | gateway.py | pypi |
import argparse
import logging
import json
from concurrent.futures import Future
import asyncio
from aiohttp import ClientSession, TCPConnector
from typing import Callable, Dict, List, Union
async def handle_queue(
args: argparse.Namespace,
queue,
future: Future,
verify_ssl=True,
api_key: Union[str, None] = None,
):
async def send_post(session, update_data, headers: Dict[str, str]):
async with session.post(
args.dest,
data=json.dumps(update_data),
headers=headers,
) as response:
logging.debug("Server responded: %s", response.status)
body = await response.text()
if response.status != 201:
logging.warning("%s: %s", response.status, body)
headers = {"content-type": "application/json"}
if api_key:
headers["X-API-KEY"] = api_key
connector = TCPConnector(verify_ssl=verify_ssl)
async with ClientSession(connector=connector) as session:
tasks: List[asyncio.Task] = []
while future.running():
if not queue.empty():
data = queue.get()
tasks.append(asyncio.create_task(send_post(session, data, headers)))
else:
await asyncio.sleep(0.1)
tasks = [task for task in tasks if not task.done()]
for task in tasks:
task.cancel()
def format_data(data: dict) -> dict:
keys_with_decimals = (
"humidity",
"temperature",
"pressure",
"acceleration",
)
keys_as_is = (
"battery",
"measurement_sequence_number",
"movement_counter",
"tx_power",
"acceleration_x",
"acceleration_y",
"acceleration_z",
)
res = {}
for key in keys_with_decimals:
res[key] = int(data[key] * 100) # precise enough
for key in keys_as_is:
res[key] = int(data[key]) # ensure int
res["mac"] = data["mac"]
return res
def run_get_datas_background(queue, sensor_reader: Callable):
def handle_new_data(new_data):
sensor_data = new_data[1]
formatted_data = format_data(sensor_data)
logging.debug("Formatted data: %s", format_data)
queue.put(formatted_data)
sensor_reader(handle_new_data) | /ruuvi_lapio-0.3.1.tar.gz/ruuvi_lapio-0.3.1/ruuvi_lapio/main.py | 0.528533 | 0.155848 | main.py | pypi |
import time
import random
from typing import Callable, List, Union
from ruuvitag_sensor.ruuvi import MacAndSensorData, RunFlag
class MockSensor:
def __init__(self):
# Generate random mac address
self.mac = "".join(random.choice("0123456789ABCDEF") for _ in range(12)).lower()
self.battery = random.uniform(2.0, 3.0)
self.pressure = random.uniform(1000.0, 1010.0)
self.measurement_sequence_number = random.randint(1000, 10000)
self.temperature = random.uniform(-20.0, 25.0)
self.acceleration = random.uniform(1000.0, 2050.0)
self.acceleration_z = self.acceleration
self.acceleration_y = random.randint(-10, 10)
self.acceleration_x = random.randint(-10, 10)
self.humidity = random.uniform(20.0, 80.0)
self.tx_power = random.randint(0, 10)
self.movement_counter = random.randint(0, 100)
self.rssi = random.randint(-100, 0)
def update_data(self):
self.temperature += max(-25.0, min(80.0, random.uniform(-0.05, 0.05)))
self.humidity += max(0.0, min(90.0, random.uniform(-0.05, 0.05)))
self.movement_counter += 1 if random.randint(0, 10) == 0 else 0
self.pressure += random.uniform(-0.01, 0.01)
self.measurement_sequence_number += 1
def get_reading(self) -> Union[MacAndSensorData, None]:
if random.randint(0, 3) == 0:
self.measurement_sequence_number += 1
return None
self.update_data()
return [
self.mac,
{
"data_format": 5,
"battery": self.battery,
"pressure": self.pressure,
"mac": self.mac,
"measurement_sequence_number": self.measurement_sequence_number,
"acceleration_z": self.acceleration_z,
"acceleration": self.acceleration,
"temperature": self.temperature,
"acceleration_y": self.acceleration_y,
"acceleration_x": self.acceleration_x,
"humidity": self.humidity,
"tx_power": self.tx_power,
"movement_counter": self.movement_counter,
"rssi": self.rssi,
},
]
class MockSensorReader:
def __init__(self, count: int = 3):
self.sensors = [MockSensor() for _ in range(count)]
def get_data(
self,
callback: Callable[[MacAndSensorData], None],
macs: List[str] = [],
run_flag: RunFlag = RunFlag(),
bt_device: str = None,
):
while True:
for sensor in self.sensors:
reading = sensor.get_reading()
if reading:
callback(reading)
time.sleep(1) | /ruuvi_lapio-0.3.1.tar.gz/ruuvi_lapio-0.3.1/ruuvi_lapio/mock_sensor.py | 0.796372 | 0.276633 | mock_sensor.py | pypi |
from __future__ import annotations
import math
import struct
class DataFormat5Decoder:
def __init__(self, raw_data: bytes) -> None:
if len(raw_data) < 24:
raise ValueError("Data must be at least 24 bytes long for data format 5")
self.data: tuple[int, ...] = struct.unpack(">BhHHhhhHBH6B", raw_data)
@property
def temperature_celsius(self) -> float | None:
if self.data[1] == -32768:
return None
return round(self.data[1] / 200.0, 2)
@property
def humidity_percentage(self) -> float | None:
if self.data[2] == 65535:
return None
return round(self.data[2] / 400, 2)
@property
def pressure_hpa(self) -> float | None:
if self.data[3] == 0xFFFF:
return None
return round((self.data[3] + 50000) / 100, 2)
@property
def acceleration_vector_mg(self) -> tuple[int, int, int] | tuple[None, None, None]:
ax = self.data[4]
ay = self.data[5]
az = self.data[6]
if ax == -32768 or ay == -32768 or az == -32768:
return (None, None, None)
return (ax, ay, az)
@property
def acceleration_total_mg(self) -> float | None:
ax, ay, az = self.acceleration_vector_mg
if ax is None or ay is None or az is None:
return None
return math.sqrt(ax * ax + ay * ay + az * az)
@property
def battery_voltage_mv(self) -> int | None:
voltage = self.data[7] >> 5
if voltage == 0b11111111111:
return None
return voltage + 1600
@property
def tx_power_dbm(self) -> int | None:
tx_power = self.data[7] & 0x001F
if tx_power == 0b11111:
return None
return -40 + (tx_power * 2)
@property
def movement_counter(self) -> int:
return self.data[8]
@property
def measurement_sequence_number(self) -> int:
return self.data[9]
@property
def mac(self) -> str:
return ":".join(f"{x:02X}" for x in self.data[10:]) | /ruuvitag_ble-0.1.2.tar.gz/ruuvitag_ble-0.1.2/src/ruuvitag_ble/df5_decoder.py | 0.90673 | 0.451629 | df5_decoder.py | pypi |
import logging
import time
from multiprocessing import Manager
from multiprocessing.managers import ListProxy
from typing import AsyncGenerator, Callable, Dict, Generator, List, Optional
from warnings import warn
from ruuvitag_sensor.adapters import get_ble_adapter, is_async_adapter
from ruuvitag_sensor.data_formats import DataFormats
from ruuvitag_sensor.decoder import get_decoder, parse_mac
from ruuvitag_sensor.ruuvi_types import DataFormatAndRawSensorData, Mac, MacAndRawData, MacAndSensorData, SensorData
log = logging.getLogger(__name__)
ble = get_ble_adapter()
class RunFlag:
"""
Wrapper for boolean run flag
Attributes:
running (bool): Defines if the function should continue execution
"""
running = True
class RuuviTagSensor:
"""
RuuviTag communication functionality
"""
@staticmethod
def get_first_raw_data(mac: str, bt_device: str = "") -> DataFormatAndRawSensorData:
"""
Get raw data for selected RuuviTag. This method is intended to be used only by
RuuviTag-class.
Args:
mac (string): MAC address
bt_device (string): Bluetooth device id
Returns:
tuple (int, string): Data Format type and raw Sensor data
"""
raw = ble.get_first_data(mac, bt_device)
return DataFormats.convert_data(raw)
@staticmethod
async def get_first_raw_data_async(mac: str, bt_device: str = "") -> DataFormatAndRawSensorData:
"""
Get raw data for selected RuuviTag. This method is intended to be used only by
RuuviTag-class.
It doesn't have asynchronous implementation.
Args:
mac (string): MAC address
bt_device (string): Bluetooth device id
Returns:
tuple (int, string): Data Format type and raw Sensor data
"""
raw = await ble.get_first_data(mac, bt_device)
return DataFormats.convert_data(raw)
@staticmethod
def find_ruuvitags(bt_device: str = "") -> Dict[Mac, SensorData]:
"""
CLI helper function.
Find all RuuviTags. Function will print the MAC and the state of the sensors when found.
Function will execute as long as it is stopped. Stop execution with Ctrl+C.
Returns:
dict: MAC and state of found sensors
"""
log.info("Finding RuuviTags. Stop with Ctrl+C.")
data: Dict[str, SensorData] = {}
for new_data in RuuviTagSensor._get_ruuvitag_data(bt_device=bt_device):
mac, sensor_data = new_data
if not mac or mac in data:
continue
data[mac] = sensor_data
log.info(mac)
log.info(sensor_data)
return data
@staticmethod
async def find_ruuvitags_async(bt_device: str = "") -> Dict[Mac, MacAndSensorData]:
"""
CLI helper function.
Find all RuuviTags. Function will print the MAC and the state of the sensors when found.
Function will execute as long as it is stopped. Stop execution with Ctrl+C.
Returns:
dict: MAC and state of found sensors
"""
if not is_async_adapter(ble):
raise Exception("Only Bleak BLE communication is supported")
log.info("Finding RuuviTags. Stop with Ctrl+C.")
data: Dict[Mac, MacAndSensorData] = {}
mac_blacklist = Manager().list()
data_iter = ble.get_data(mac_blacklist, bt_device)
async for new_data in data_iter:
if new_data[0] in data:
continue
parsed_data = RuuviTagSensor._parse_data(new_data, mac_blacklist)
if parsed_data:
data[new_data[0]] = parsed_data
log.info(new_data[0])
log.info(parsed_data)
return data
@staticmethod
def get_data_for_sensors(
macs: List[str] = [], search_duratio_sec: int = 5, bt_device: str = ""
) -> Dict[Mac, SensorData]:
"""
Get latest data for sensors in the MAC address list.
Args:
macs (array): MAC addresses
search_duratio_sec (int): Search duration in seconds. Default 5
bt_device (string): Bluetooth device id
Returns:
dict: MAC and state of found sensors
"""
log.info("Get latest data for sensors. Stop with Ctrl+C.")
log.info("Stops automatically in %ss", search_duratio_sec)
log.info("MACs: %s", macs)
data: Dict[Mac, SensorData] = {}
for new_data in RuuviTagSensor._get_ruuvitag_data(macs, search_duratio_sec, bt_device=bt_device):
mac, sensor_data = new_data
data[mac] = sensor_data
return data
@staticmethod
async def get_data_async(macs: List[str] = [], bt_device: str = "") -> AsyncGenerator[MacAndSensorData, None]:
if not is_async_adapter(ble):
raise Exception("Only Bleak BLE communication is supported")
mac_blacklist = Manager().list()
data_iter = ble.get_data(mac_blacklist, bt_device)
async for ble_data in data_iter:
data = RuuviTagSensor._parse_data(ble_data, mac_blacklist, macs)
# Check MAC whitelist if advertised MAC available
if ble_data[0] and macs and not ble_data[0] in macs:
log.debug("MAC not whitelisted: %s", ble_data[0])
continue
if data:
yield data
@staticmethod
def get_data(
callback: Callable[[MacAndSensorData], None],
macs: List[str] = [],
run_flag: RunFlag = RunFlag(),
bt_device: str = "",
) -> None:
"""
Get data for all ruuvitag sensors or sensors in the MAC's list.
Args:
callback (func): callback function to be called when new data is received
macs (list): MAC addresses
run_flag (object): RunFlag object. Function executes while run_flag.running
bt_device (string): Bluetooth device id
"""
log.info("Get latest data for sensors. Stop with Ctrl+C.")
log.info("MACs: %s", macs)
for new_data in RuuviTagSensor._get_ruuvitag_data(macs, None, run_flag, bt_device):
callback(new_data)
@staticmethod
def get_datas(
callback: Callable[[MacAndSensorData], None],
macs: List[str] = [],
run_flag: RunFlag = RunFlag(),
bt_device: str = "",
) -> None:
"""
DEPRECATED
This method will be removed in a future version.
Use get_data-method instead.
"""
warn("This method will be removed in a future version, use get_data() instead", FutureWarning)
return RuuviTagSensor.get_data(callback, macs, run_flag, bt_device)
@staticmethod
def _get_ruuvitag_data(
macs: List[str] = [],
search_duratio_sec: Optional[int] = None,
run_flag: RunFlag = RunFlag(),
bt_device: str = "",
) -> Generator[MacAndSensorData, None, None]:
"""
Get data from BluetoothCommunication and handle data encoding.
Args:
macs (list): MAC addresses. Default empty list
search_duratio_sec (int): Search duration in seconds. Default None
run_flag (object): RunFlag object. Function executes while run_flag.running.
Default new RunFlag
bt_device (string): Bluetooth device id
Yields:
tuple: MAC and State of RuuviTag sensor data
"""
mac_blacklist = Manager().list()
start_time = time.time()
data_iter = ble.get_data(mac_blacklist, bt_device)
for ble_data in data_iter:
# Check duration
if search_duratio_sec and time.time() - start_time > search_duratio_sec:
data_iter.close()
break
# Check running flag
if not run_flag.running:
data_iter.close()
break
# Check MAC whitelist if advertised MAC available
if ble_data[0] and macs and not ble_data[0] in macs:
log.debug("MAC not whitelisted: %s", ble_data[0])
continue
data = RuuviTagSensor._parse_data(ble_data, mac_blacklist, macs)
if data:
yield data
@staticmethod
def _parse_data(
ble_data: MacAndRawData, mac_blacklist: ListProxy, allowed_macs: List[str] = []
) -> Optional[MacAndSensorData]:
(mac, payload) = ble_data
(data_format, data) = DataFormats.convert_data(payload)
# Check that encoded data is valid RuuviTag data and it is sensor data
# If data is not valid RuuviTag data add MAC to blacklist if MAC is available
if data is None:
if mac:
log.debug("Blacklisting MAC %s", mac)
mac_blacklist.append(mac)
return None
if data_format is None:
# Whatever we heard was from a Ruuvitag, but did not contain
# any measurements. Ignore this.
return None
decoded = get_decoder(data_format).decode_data(data)
if decoded is None:
log.error("Decoded data is null. MAC: %s - Raw: %s", mac, payload)
return None
# If advertised MAC is missing, try to parse it from the payload
mac_to_send = (
mac
if mac
else parse_mac(data_format, decoded["mac"])
if "mac" in decoded and decoded["mac"] is not None
else ""
)
# Check whitelist using MAC from decoded data if advertised MAC is not available
if allowed_macs and mac_to_send not in allowed_macs:
log.debug("MAC not whitelisted: %s", mac_to_send)
return None
return (mac_to_send, decoded) | /ruuvitag_sensor-2.1.0-py3-none-any.whl/ruuvitag_sensor/ruuvi.py | 0.832747 | 0.343617 | ruuvi.py | pypi |
import time
from concurrent.futures import ProcessPoolExecutor
from datetime import datetime
from multiprocessing import Manager
from multiprocessing.managers import DictProxy
from queue import Queue
from threading import Thread
from typing import List
from reactivex import Subject
from ruuvitag_sensor.ruuvi import RunFlag, RuuviTagSensor
def _run_get_data_background(macs: List[str], queue: Queue, shared_data: DictProxy, bt_device: str):
"""
Background process function for RuuviTag Sensors
"""
run_flag = RunFlag()
def add_data(data):
if not shared_data["run_flag"]:
run_flag.running = False
data[1]["time"] = datetime.utcnow().isoformat()
queue.put(data)
RuuviTagSensor.get_data(add_data, macs, run_flag, bt_device)
class RuuviTagReactive:
"""
Reactive wrapper and background process for RuuviTagSensor
get_data
"""
@staticmethod
def _data_update(subjects: List[Subject], queue: Queue, run_flag: RunFlag):
"""
Get data from background process and notify all subscribed observers
with the new data
"""
while run_flag.running:
while not queue.empty():
data = queue.get()
for subject in [s for s in subjects if not s.is_disposed]:
subject.on_next(data)
time.sleep(0.1)
def __init__(self, macs: List[str] = [], bt_device: str = ""):
"""
Start background process for get_data and async task for notifying
all subscribed observers
Args:
macs (list): MAC addresses
bt_device (string): Bluetooth device id
"""
self._run_flag = RunFlag()
self._subjects: List[Subject] = []
m = Manager()
q = m.Queue()
# Use Manager dict to share data between processes
self._shared_data = m.dict()
self._shared_data["run_flag"] = True
# Start data updater
notify_thread = Thread(target=RuuviTagReactive._data_update, args=(self._subjects, q, self._run_flag))
notify_thread.start()
# Start background process
executor = ProcessPoolExecutor(1)
executor.submit(_run_get_data_background, macs, q, self._shared_data, bt_device)
def get_subject(self) -> Subject:
"""
Returns:
subject : Reactive Extension Subject
"""
if not self._run_flag.running:
raise Exception("RuuviTagReactive stopped")
subject: Subject = Subject()
self._subjects.append(subject)
return subject
def stop(self):
"""
Stop get_data
"""
self._run_flag.running = False
self._shared_data["run_flag"] = False
for s in self._subjects:
s.dispose() | /ruuvitag_sensor-2.1.0-py3-none-any.whl/ruuvitag_sensor/ruuvi_rx.py | 0.7011 | 0.158826 | ruuvi_rx.py | pypi |
import os
import logging
from ruv_dl.date_utils import parse_date
from ruv_dl.constants import DATE_FORMAT
logger = logging.getLogger(__name__)
class Episode:
def __init__(self, data):
self.data = data or {}
self.id = self.data.get('id', None)
@property
def number(self):
return self.data.get('number', None)
@number.setter
def number(self, number):
self.data['number'] = number
def to_dict(self):
return self.data
class Entry:
def __init__(self, fn, url, date, etag, episode=None):
self.fn = fn
self.url = url
self.date = date
self.etag = etag
self.episode = Episode(episode)
self.target_path = None
def to_dict(self):
return {
'fn': self.fn,
'url': self.url,
'date': self.date.strftime(DATE_FORMAT),
'etag': self.etag,
'episode': self.episode.to_dict(),
}
@classmethod
def from_dict(cls, data):
return cls(
fn=data['fn'],
url=data['url'],
date=parse_date(data['date']),
etag=data['etag'],
episode=data.get('episode'),
)
def get_target_basename(self, program, season):
return (
f'{program["title"]} - '
f'S{str(season).zfill(2)}E{str(self.episode.number).zfill(2)}.mp4'
)
@classmethod
def get_season_folder(cls, destination, program, season):
return os.path.join(destination, program['title'], f'Season {season}',)
def set_target_path(self, path):
# TODO: Get rid of this maybe
self.target_path = path
def exists_on_disk(self):
if self.target_path is None:
raise RuntimeError(f'Missing target path for {self.to_dict}')
return os.path.exists(self.target_path)
def __hash__(self):
return hash(self.etag)
def __eq__(self, other):
return isinstance(other, Entry) and hash(self) == hash(other)
def __str__(self):
return f'{self.fn} - {self.date}'
class EntrySet(set):
def add(self, item):
for member in self:
if member == item:
# If we have the same item twice, we want the one with a full
# episode entry, if available, chosen.
item = self._choose_best_item(item, member)
self.remove(member)
break
super().add(item)
def sorted(self):
return sorted(self, key=lambda entry: entry.date)
@classmethod
def find_target_number(cls, entries, i):
'''
Find a target number for entries[i] wrt entries
'''
for k in range(0, len(entries)):
if entries[k].episode and entries[k].episode.id:
target = entries[k].episode.number + i - k
if target < 1:
logger.warning(
'Found strange episode number. You probably want to '
'manually fix this entry: %s',
entries[i],
)
target += len(entries)
logger.info(
'Found related episode in %d for %d: %s, '
'target number is %d',
k,
i,
entries[i],
target,
)
return target
logger.warning(
'Found no episode number for %s, assigning it 1 '
'(number of entries: %d)',
entries[i],
len(entries),
)
return 1
def _choose_best_item(self, item, member):
if item.episode and item.episode.id:
return item
elif member.episode:
return member
return item
def __getitem__(self, i):
return self.sorted()[i] | /ruv-dl-0.5.4.tar.gz/ruv-dl-0.5.4/ruv_dl/data.py | 0.467818 | 0.182098 | data.py | pypi |
import math
import matplotlib.pyplot as plt
from .Generaldistribution import Distribution
class Gaussian(Distribution):
""" Gaussian distribution class for calculating and
visualizing a Gaussian distribution.
Attributes:
mean (float) representing the mean value of the distribution
stdev (float) representing the standard deviation of the distribution
data_list (list of floats) a list of floats extracted from the data file
"""
def __init__(self, mu=0, sigma=1):
Distribution.__init__(self, mu, sigma)
def calculate_mean(self):
"""Function to calculate the mean of the data set.
Args:
None
Returns:
float: mean of the data set
"""
avg = 1.0 * sum(self.data) / len(self.data)
self.mean = avg
return self.mean
def calculate_stdev(self, sample=True):
"""Function to calculate the standard deviation of the data set.
Args:
sample (bool): whether the data represents a sample or population
Returns:
float: standard deviation of the data set
"""
if sample:
n = len(self.data) - 1
else:
n = len(self.data)
mean = self.calculate_mean()
sigma = 0
for d in self.data:
sigma += (d - mean) ** 2
sigma = math.sqrt(sigma / n)
self.stdev = sigma
return self.stdev
def plot_histogram(self):
"""Function to output a histogram of the instance variable data using
matplotlib pyplot library.
Args:
None
Returns:
None
"""
plt.hist(self.data)
plt.title('Histogram of Data')
plt.xlabel('data')
plt.ylabel('count')
def pdf(self, x):
"""Probability density function calculator for the gaussian distribution.
Args:
x (float): point for calculating the probability density function
Returns:
float: probability density function output
"""
return (1.0 / (self.stdev * math.sqrt(2*math.pi))) * math.exp(-0.5*((x - self.mean) / self.stdev) ** 2)
def plot_histogram_pdf(self, n_spaces = 50):
"""Function to plot the normalized histogram of the data and a plot of the
probability density function along the same range
Args:
n_spaces (int): number of data points
Returns:
list: x values for the pdf plot
list: y values for the pdf plot
"""
mu = self.mean
sigma = self.stdev
min_range = min(self.data)
max_range = max(self.data)
# calculates the interval between x values
interval = 1.0 * (max_range - min_range) / n_spaces
x = []
y = []
# calculate the x values to visualize
for i in range(n_spaces):
tmp = min_range + interval*i
x.append(tmp)
y.append(self.pdf(tmp))
# make the plots
fig, axes = plt.subplots(2,sharex=True)
fig.subplots_adjust(hspace=.5)
axes[0].hist(self.data, density=True)
axes[0].set_title('Normed Histogram of Data')
axes[0].set_ylabel('Density')
axes[1].plot(x, y)
axes[1].set_title('Normal Distribution for \n Sample Mean and Sample Standard Deviation')
axes[0].set_ylabel('Density')
plt.show()
return x, y
def __add__(self, other):
"""Function to add together two Gaussian distributions
Args:
other (Gaussian): Gaussian instance
Returns:
Gaussian: Gaussian distribution
"""
result = Gaussian()
result.mean = self.mean + other.mean
result.stdev = math.sqrt(self.stdev ** 2 + other.stdev ** 2)
return result
def __repr__(self):
"""Function to output the characteristics of the Gaussian instance
Args:
None
Returns:
string: characteristics of the Gaussian
"""
return "mean {}, standard deviation {}".format(self.mean, self.stdev) | /rv_distributions-1.0.tar.gz/rv_distributions-1.0/rv_distributions/Gaussiandistribution.py | 0.688364 | 0.853058 | Gaussiandistribution.py | pypi |
from decimal import Decimal
from .rv_api import RVapi
from .objetos.recarga import Recarga
class Transacao5(RVapi):
def execute(
self,
compra: int,
produto: str,
ddd: str,
fone: str,
codigo_assinante: str = None,
valor: Decimal = None,
id_terminal: str = None,
bit_boleto: int = None,
usuario_local: str = None,
mock=None
) -> Recarga:
"""
Parametros
:param compra: Codigo da compra no sistema do cliente, sequencial unico com maximo de 9 digitos
:param produto: Codigo do produto consultado na Transacao1
:param ddd: DDD se recarga de telefone
:param fone: Telefone do cliente se recarga de telefone com 8 ou 9 digitos
:param codigo_assinante: Codigo do assintante para venda de produtos TV
:param valor: Valor da recarga para produtos que aceitem configuracao
:param id_terminal: Codigo do terminal que esta realizando a venda
:param bit_boleto: Informar se houver boletos em aberto
:param usuario_local: Codigo ou Login do cliente local se houver para relatorio de compras
:param mock:
:return:
"""
if mock is not None:
response = mock
else:
payload = {
"codigo_transacao": 3,
"loja_primaria": self.CREDENCIAIS['loja_primaria'],
"nome_primario": self.CREDENCIAIS['nome_primario'],
"senha_primaria": self.CREDENCIAIS['senha_primaria'],
"versao": self.VERSAO,
"compra": compra,
"produto": produto,
"ddd": ddd,
"fone": fone,
"codigoAssinante": codigo_assinante,
}
if valor is not None:
payload['valor'] = valor
if id_terminal is not None:
payload['id_terminal'] = valor
if bit_boleto is not None:
payload['bitBoleto'] = bit_boleto
if usuario_local is not None:
payload['usuario_local'] = usuario_local
response = self.doPOST(payload)
return Recarga().parse_dict(self.convert_xml_to_dict(response)) | /rv-schubert-sdk-1.1.0.tar.gz/rv-schubert-sdk-1.1.0/rv_schubert_sdk/resources/transacao_5.py | 0.489992 | 0.249973 | transacao_5.py | pypi |
class ErroRV(Exception):
def __init__(self, message):
Exception.__init__(self, message)
class FoneIncompletoInvalido(ErroRV):
def __init__(self, *args):
ErroRV.__init__(self, "Fone Incompleto / Invalido [Codigo 1]")
class LimiteCreditoInsuficiente(ErroRV):
def __init__(self, *args):
ErroRV.__init__(self, "Limite de Crédito Insuficiente [Codigo 2]")
class EstoqueInsuficiente(ErroRV):
def __init__(self, *args):
ErroRV.__init__(self, "Estoque Insuficiente [Codigo 3]")
class TelefoneNaoAutorizado(ErroRV):
def __init__(self, *args):
ErroRV.__init__(self, "Telefone não autorizado [Codigo 4]")
class SenhaInvalida(ErroRV):
def __init__(self, *args):
ErroRV.__init__(self, "Senha Inválida [Codigo 5]")
class MaximoNumeroConexoesAtingida(ErroRV):
def __init__(self, *args):
ErroRV.__init__(self, "Máximo número de conexões atingida [Codigo 6]")
class SistemaEmManutencao(ErroRV):
def __init__(self, *args):
ErroRV.__init__(self, "Sistema em Manutenção [Codigo 7]")
class OperadoraProdutoNaoEncontrado(ErroRV):
def __init__(self, *args):
ErroRV.__init__(self, "Operadora / Produto não encontrado [Codigo 8]")
class CodigoInvalido(ErroRV):
def __init__(self, *args):
ErroRV.__init__(self, "Código inválido [Codigo 9]")
class ValorInvalido(ErroRV):
def __init__(self, *args):
ErroRV.__init__(self, "Valor Inválido [Codigo 10]")
class Timeout(ErroRV):
def __init__(self, *args):
ErroRV.__init__(self, "Timeout [Codigo 11]")
class CompraExpirada(ErroRV):
def __init__(self, *args):
ErroRV.__init__(self, "Compra Expirada [Codigo 13]")
class CompraInexistente(ErroRV):
def __init__(self, *args):
ErroRV.__init__(self, "Compra inexistente [Codigo 14]")
class UsuarioLojaNaoEncontrado(ErroRV):
def __init__(self, *args):
ErroRV.__init__(self, "Usuario/Loja não encontrados [Codigo 15]")
class ParametrosInsuficientes(ErroRV):
def __init__(self, *args):
ErroRV.__init__(self, "Parâmetros Insuficientes [Codigo 16]")
class CompraJaConfirmada(ErroRV):
def __init__(self, *args):
ErroRV.__init__(self, "Compra já confirmada [Codigo 17]")
class BoletoNaoEncontrado(ErroRV):
def __init__(self, *args):
ErroRV.__init__(self, "Boleto não Encontrado [Codigo 18]")
class ParametrosNaoEnviadosViaPOST(ErroRV):
def __init__(self, *args):
ErroRV.__init__(self, "Parametros não enviados via POST [Codigo 19]")
class CodigoTransacaoNaoInformado(ErroRV):
def __init__(self, *args):
ErroRV.__init__(self, "Codigo de Transacao não informado [Codigo 20]")
class VersaoNaoInformada(ErroRV):
def __init__(self, *args):
ErroRV.__init__(self, "Versão não informada [Codigo 21]")
class UsuarioSemNivelDeAcesso(ErroRV):
def __init__(self, *args):
ErroRV.__init__(self, "Usuário sem nível de acesso [Codigo 22]")
class CobrancaAindaNaoVisualizada(ErroRV):
def __init__(self, *args):
ErroRV.__init__(self, "Cobrança ainda não visualizada [Codigo 23]")
class TransacaoNaoPermitida(ErroRV):
def __init__(self, *args):
ErroRV.__init__(self, "Transação não permitida [Codigo 24]") | /rv-schubert-sdk-1.1.0.tar.gz/rv-schubert-sdk-1.1.0/rv_schubert_sdk/resources/exceptions.py | 0.512937 | 0.210837 | exceptions.py | pypi |
# Robotics, Vision & Control: 3rd edition in Python (2023)
[](https://github.com/petercorke/robotics-toolbox-python)
[](https://qcr.github.io)
[](https://opensource.org/licenses/MIT)
[](https://badge.fury.io/py/rvc3python)

[](https://pypistats.org/packages/rvc3python)
<table style="border:0px">
<tr style="border:0px">
<td style="border:0px">
<img src="https://github.com/petercorke/RVC3-python/raw/main/doc/frontcover.png" alt="Front cover 978-3-031-06468-5_5208" width="300">
</td>
<td style="border:0px">
Welcome to the online hub for the book:
<ul type="none">
<li><b>Robotics, Vision & Control</b>: fundamental algorithms in Python (3rd edition)
<li>Peter Corke, published by Springer-Nature 2023.</li>
<li><b>ISBN</b> 978-3-031-06468-5 (hardcopy), 978-3-031-06469-2 (eBook)</li>
<li><b>DOI</b> <a href="https://doi.org/10.1007/978-3-031-06469-2">10.1007/978-3-031-06469-2</a></li>
</ul>
<br><br>
<p>Report an issue with the book or its supporting code <a href="https://github.com/petercorke/RVC3-python/issues/new/choose">here</a>.</p>
<p>Knwon errata for the book can be viewed <a href="https://github.com/petercorke/RVC3-python/wiki/Errata">here</a>.</p>
</td>
</tr>
</table>
This book uses many examples based on the following open-source Python packages
<a href="https://github.com/petercorke/robotics-toolbox-python"><img alt="Robotics Toolbox for Python" src="https://github.com/petercorke/robotics-toolbox-python/raw/master/docs/figs/RobToolBox_RoundLogoB.png" width="130"></a>
<a href="https://github.com/petercorke/machinevision-toolbox-python"><img alt="Machine Vision Toolbox for Python" src="https://github.com/petercorke/machinevision-toolbox-python/raw/master/figs/VisionToolboxLogo_NoBackgnd@2x.png" width="150"></a>
<a href="https://github.com/petercorke/spatialmath-python"><img alt="Spatial Maths Toolbox for Python" src="https://github.com/petercorke/spatialmath-python/raw/master/docs/figs/CartesianSnakes_LogoW.png" width="130"></a>
<a href="https://github.com/petercorke/bdsim"><img alt="Block diagram simulation for Python" src="https://github.com/petercorke/bdsim/raw/master/figs/BDSimLogo_NoBackgnd@2x.png" width="250"></a>
**Robotics Toolbox for Python**, **Machine Vision Toolbox for Python**, **Spatial Maths Toolbox for Python**, **Block Diagram Simulation for Python**. These in turn have dependencies on other packages created by the author and
third parties.
## Installing the package
This package provides a simple one-step installation of *all* the required Toolboxes
```shell
pip install rvc3python
```
or
```shell
conda install rvc3python
```
There are a lot of dependencies and this might take a minute or so. You now have a very
powerful computing environment for robotics and computer vision.
### Python version
Given the rapid rate of language additions, particularly around type hinting, use at
least Python 3.8. Python 3.7 goes end of life in June 2023.
Not all package dependencies will work with the latest release of Python. In particular, check:
* [PyTorch](https://pypi.org/project/torch/) used for segmentation examples in Chapter 12
* [Open3D](https://pypi.org/project/open3d), used for point cloud examples in Chapter 14.
### Installing into a Conda environment
It's probably a good idea to create a virtual environment to keep this package
and its dependencies separated from your other Python code and projects. If you've
never used virtual environments before this might be a good time to start, and it
is really easy [using Conda](https://conda.io/projects/conda/en/latest/user-guide/install/index.html):
```shell
conda create -n RVC3 python=3.10
conda activate RVC3
pip install rvc3python
```
### Installing deep learning tools
Chapter 11 has some deep learning examples based on PyTorch. If you don't have
PyTorch installed you can use the `pytorch` install option
```shell
pip install rvc3python[pytorch]
```
or
```shell
conda install rvc3python[pytorch]
```
## Using the Toolboxes
The simplest way to get going is to use the command line tool
```shell
$ rvctool
____ _ _ _ __ ___ _ ___ ____ _ _ _____
| _ \ ___ | |__ ___ | |_(_) ___ ___ \ \ / (_)___(_) ___ _ __ ( _ ) / ___|___ _ __ | |_ _ __ ___ | | |___ /
| |_) / _ \| '_ \ / _ \| __| |/ __/ __| \ \ / /| / __| |/ _ \| '_ \ / _ \/\ | | / _ \| '_ \| __| '__/ _ \| | |_ \
| _ < (_) | |_) | (_) | |_| | (__\__ \_ \ V / | \__ \ | (_) | | | | | (_> < | |__| (_) | | | | |_| | | (_) | | ___) |
|_| \_\___/|_.__/ \___/ \__|_|\___|___( ) \_/ |_|___/_|\___/|_| |_| \___/\/ \____\___/|_| |_|\__|_| \___/|_| |____/
|/
for Python (RTB==1.1.0, MVTB==0.9.5, SG==1.1.7, SMTB==1.1.7, NumPy==1.24.2, SciPy==1.10.1, Matplotlib==3.7.1)
import math
import numpy as np
from scipy import linalg, optimize
import matplotlib.pyplot as plt
from spatialmath import *
from spatialmath.base import *
from spatialmath.base import sym
from spatialgeometry import *
from roboticstoolbox import *
from machinevisiontoolbox import *
import machinevisiontoolbox.base as mvb
# useful variables
from math import pi
puma = models.DH.Puma560()
panda = models.DH.Panda()
func/object? - show brief help
help(func/object) - show detailed help
func/object?? - show source code
Results of assignments will be displayed, use trailing ; to suppress
Python 3.10.9 | packaged by conda-forge | (main, Feb 2 2023, 20:24:27) [Clang 14.0.6 ]
Type 'copyright', 'credits' or 'license' for more information
IPython 8.11.0 -- An enhanced Interactive Python. Type '?' for help.
>>>
```
This provides an interactive Python
([IPython](https://ipython.readthedocs.io/en/stable)) session with all the Toolboxes and
supporting packages imported, and ready to go. It's a highly capable, convenient, and
"MATLAB-like" workbench environment for robotics and computer vision.
For example to load an ETS model of a Panda robot, solve a forward kinematics
and inverse kinematics problem, and an interactive graphical display is simply:
```python
>>> panda = models.ETS.Panda()
ERobot: Panda (by Franka Emika), 7 joints (RRRRRRR)
┌─────┬───────┬───────┬────────┬─────────────────────────────────────────────┐
│link │ link │ joint │ parent │ ETS: parent to link │
├─────┼───────┼───────┼────────┼─────────────────────────────────────────────┤
│ 0 │ link0 │ 0 │ BASE │ tz(0.333) ⊕ Rz(q0) │
│ 1 │ link1 │ 1 │ link0 │ Rx(-90°) ⊕ Rz(q1) │
│ 2 │ link2 │ 2 │ link1 │ Rx(90°) ⊕ tz(0.316) ⊕ Rz(q2) │
│ 3 │ link3 │ 3 │ link2 │ tx(0.0825) ⊕ Rx(90°) ⊕ Rz(q3) │
│ 4 │ link4 │ 4 │ link3 │ tx(-0.0825) ⊕ Rx(-90°) ⊕ tz(0.384) ⊕ Rz(q4) │
│ 5 │ link5 │ 5 │ link4 │ Rx(90°) ⊕ Rz(q5) │
│ 6 │ link6 │ 6 │ link5 │ tx(0.088) ⊕ Rx(90°) ⊕ tz(0.107) ⊕ Rz(q6) │
│ 7 │ @ee │ │ link6 │ tz(0.103) ⊕ Rz(-45°) │
└─────┴───────┴───────┴────────┴─────────────────────────────────────────────┘
┌─────┬─────┬────────┬─────┬───────┬─────┬───────┬──────┐
│name │ q0 │ q1 │ q2 │ q3 │ q4 │ q5 │ q6 │
├─────┼─────┼────────┼─────┼───────┼─────┼───────┼──────┤
│ qr │ 0° │ -17.2° │ 0° │ -126° │ 0° │ 115° │ 45° │
│ qz │ 0° │ 0° │ 0° │ 0° │ 0° │ 0° │ 0° │
└─────┴─────┴────────┴─────┴───────┴─────┴───────┴──────┘
>>> panda.fkine(panda.qz)
0.7071 0.7071 0 0.088
0.7071 -0.7071 0 0
0 0 -1 0.823
0 0 0 1
>>> panda.ikine_LM(SE3.Trans(0.4, 0.5, 0.2) * SE3.Ry(pi/2))
IKSolution(q=array([ -1.849, -2.576, -2.914, 1.22, -1.587, 2.056, -1.013]), success=True, iterations=13, searches=1, residual=3.3549072615799585e-10, reason='Success')
>>> panda.teach(panda.qz)
```

Computer vision is just as easy. For example, we can import an image, blur it
and display it alongside the original
```python
>>> mona = Image.Read("monalisa.png")
>>> Image.Hstack([mona, mona.smooth(sigma=5)]).disp()
```

or load two images of the same scene, compute SIFT features and display putative
matches
```python
>>> sf1 = Image.Read("eiffel-1.png", mono=True).SIFT()
>>> sf2 = Image.Read("eiffel-2.png", mono=True).SIFT()
>>> matches = sf1.match(sf2)
>>> matches.subset(100).plot("w")
```

`rvctool` is a wrapper around
[IPython](https://ipython.readthedocs.io/en/stable) where:
- robotics and vision functions and classes can be accessed without needing
package prefixes
- results are displayed by default like MATLAB does, and like MATLAB you need to
put a semicolon on the end of the line to prevent this
- the prompt is the standard Python REPL prompt `>>>` rather than the IPython
prompt, this can be overridden by a command-line switch
- allows cutting and pasting in lines from the book, and prompt characters are
ignored
The Robotics, Vision & Control book uses `rvctool` for all the included
examples.
`rvctool` imports the all the above mentioned packages using `import *` which is
not considered best Python practice. It is very convenient for interactive
experimentation, but in your own code you can handle the imports as you see
fit.
### Cutting and pasting
IPython is very forgiving when it comes to cutting and pasting in blocks of Python
code. It will strip off the `>>>` prompt character and ignore indentation. The normal
python REPL is not so forgiving. IPython also allows maintains a command history and
allows command editing.
### Simple scripting
You can write very simple scripts, for example `test.py` is
```python
T = puma.fkine(puma.qn)
sol = puma.ikine_LM(T)
sol.q
puma.plot(sol.q);
```
then
```shell
$ rvctool test.py
0 0 1 0.5963
0 1 0 -0.1501
-1 0 0 0.6575
0 0 0 1
IKSolution(q=array([7.235e-08, -0.8335, 0.09396, 3.142, 0.8312, -3.142]), success=True, iterations=15, searches=1, residual=1.406125546650288e-07, reason='Success')
array([7.235e-08, -0.8335, 0.09396, 3.142, 0.8312, -3.142])
PyPlot3D backend, t = 0.05, scene:
robot: Text(0.0, 0.0, 'Puma 560')
>>>
```
and you are dropped into an IPython session after the script has run.
## Using Jupyter and Colab
Graphics and animations are problematic in these environments, some things work
well, some don't. As much as possible I've tweaked the Jupyter notebooks to work
as best they can in these environments.
For local use the [Jupyter plugin for Visual Studio Code](https://marketplace.visualstudio.com/items?itemName=ms-toolsai.jupyter) is pretty decent. Colab suffers
from old versions of major packages (though they are getting better at keeping up to date)
and animations can suffer from slow update over the network.
## Other command line tools
Additional command line tools available (from the Robotics Toolbox) include:
- `eigdemo`, animation showing linear transformation of a rotating unit vector
which demonstrates eigenvalues and eigenvectors.
- `tripleangledemo`, Swift visualization that lets you experiment with various triple-angle sequences.
- `twistdemo`, Swift visualization that lets you experiment with 3D twists. The screw axis is the blue rod and you can
position and orient it using the sliders, and adjust its pitch. Then apply a rotation
about the screw using the bottom slider.
# Block diagram models
<a href="https://github.com/petercorke/bdsim"><img
src="https://github.com/petercorke/bdsim/raw/master/figs/BDSimLogo_NoBackgnd%402x.png"
alt="bdsim logo" width="300"></a>
Block diagram models are key to the pedagogy of the RVC3 book and 25 models are
included. To simulate these models we use the Python package
[bdsim](https://github.com/petercorke/bdsim) which can run models:
- written in Python using
[bdsim](https://github.com/petercorke/bdsim#getting-started) blocks and
wiring.
- created graphically using
[bdedit](https://github.com/petercorke/bdsim#bdedit-the-graphical-editing-tool)
and saved as a `.bd` (JSON format) file.
The models are included in the `RVC3` package when it is installed and `rvctool`
adds them to the module search path. This means you can invoke them from
`rvctool` by
```python
>>> %run -m vloop_test
```
If you want to directly access the folder containing the models, the command
line tool
```shell
bdsim_path
```
will display the full path to where they have been installed in the Python
package tree.
# Additional book resources
<img src="https://github.com/petercorke/RVC3-python/raw/main/doc/frontcover.png" alt="Front cover 978-3-031-06468-5_5208" width="100">
This GitHub repo provides additional resources for readers including:
- Jupyter notebooks containing all code lines from each chapter, see
the [`notebooks`](notebooks) folder
- The code to produce every Python/Matplotlib (2D) figure in the book, see the [`figures`](figures) folder
- 3D points clouds from chapter 14, and the code to create them, see
the [`pointclouds`](../pointclouds) folder.
- 3D figures from chapters 2-3, 7-9, and the code to create them, see the [`3dfigures`](../3dfigures) folder.
- All example scripts, see the [`examples`](examples) folder.
- To run the visual odometry example in Sect. 14.8.3 you need to download two image sequence, each over 100MB, [see the instructions here](https://github.com/petercorke/machinevision-toolbox-python/blob/master/mvtb-data/README.md#install-big-image-files).
To get that material you must clone the repo
```shell
git clone https://github.com/petercorke/RVC3-python.git
``` | /rvc3python-0.9.0.tar.gz/rvc3python-0.9.0/README.md | 0.458591 | 0.934275 | README.md | pypi |
## vectorial sensor data
from collections import namedtuple
import numpy as np
from numpy.core.shape_base import block
from scipy.integrate import odeint
from spatialmath.base import unitvec
from spatialmath import UnitQuaternion
import matplotlib.pyplot as plt
def IMU():
# accelerometer
g0 = unitvec( [0, 0, 9.8] ).T
gbias = 0.02 * np.r_[2, -2, 2].T # bias 2% of norm
# magnetometer, use N E U data in nT for Brisbane
m0 = unitvec( np.r_[28067.5, -5439.4, 44800.5] * 1e-9).T
mbias = 0.02 * np.r_[-1, -1, 2] # bias 2# of norm
# gyro
wbias = 0.05 * np.r_[-1, 2, -1] # bias 10% of max
## simulation
#parameters
dt = 0.05
# make an asymmetric mass
J = np.diag([2, 2, 2])
J = np.diag([2, 4, 3])
J[0,1] = -1
J[1,0] = -1
J[0,2] = -2
J[2,0] = -2
#eig(J)
# initial condition
w0 = 0.2 * np.r_[1, 2, 2].T
# Solve Euler's rotational dynamics to get omega
# 1 row per timestep
t = np.arange(0, 20, dt)
omega = odeint( lambda w, t: -np.linalg.inv(J) @ np.cross(w, J @ w),
w0, t)
# Solve for simulated sensor readings and true attitude
# 1 row per timestep
am = np.zeros(omega.shape)
mm = np.zeros(omega.shape)
truth = UnitQuaternion()
for k, w in enumerate(omega):
iq = truth[k].inv()
am[k,:] = iq * g0 + gbias # sensor reading in body frame
mm[k,:] = iq * m0 + mbias # sensor reading
truth.append(truth[k] * UnitQuaternion.EulerVec(w * dt))
del truth[-1]
# add bias to measured
wm = omega + wbias
imu = namedtuple('imu', 't dt gyro accel magno')
true = namedtuple('true', 't dt omega orientation g B')
return true(t, dt, omega, truth, g0, m0), imu(t, dt, wm, am, mm)
if __name__ == "__main__":
def plot(t, y, title):
plt.figure()
plt.plot(t, y)
plt.grid(True)
plt.title(title)
data = tumble()
print(data.attitude_true[100])
print(data.attitude_true[100].rpy())
plot(data.t, data.attitude_true.rpy(), 'attitude')
plot(data.t, data.gyro, 'gyro')
plot(data.t, data.accel, 'accel')
plot(data.t, data.magno, 'magno')
plt.show(block=True) | /rvc3python-0.9.0.tar.gz/rvc3python-0.9.0/RVC3/examples/imu_data.py | 0.889361 | 0.66296 | imu_data.py | pypi |
import numpy as np
from math import pi, sqrt, inf
quadrotor = {}
quadrotor["nrotors"] = 4 # 4 rotors
quadrotor["g"] = 9.81 # g Gravity
quadrotor["rho"] = 1.184 # rho Density of air
quadrotor["muv"] = 1.5e-5 # muv Viscosity of air
# Airframe
quadrotor["M"] = 4 # M Mass
Ixx = 0.082
Iyy = 0.082
Izz = 0.149 # 0.160
quadrotor["J"] = np.diag([Ixx, Iyy, Izz]) # I Flyer rotational inertia matrix 3x3
quadrotor["h"] = -0.007 # h Height of rotors above CoG
quadrotor["d"] = 0.315 # d Length of flyer arms
# Rotor
quadrotor["nb"] = 2 # b Number of blades per rotor
quadrotor["r"] = 0.165 # r Rotor radius
quadrotor["c"] = 0.018 # c Blade chord
quadrotor["e"] = 0.0 # e Flapping hinge offset
quadrotor["Mb"] = 0.005 # Mb Rotor blade mass
quadrotor["Mc"] = 0.010 # Mc Estimated hub clamp mass
quadrotor["ec"] = 0.004 # ec Blade root clamp displacement
quadrotor["Ib"] = (
quadrotor["Mb"] * (quadrotor["r"] - quadrotor["ec"]) ** 2 / 4
) # Ib Rotor blade rotational inertia
quadrotor["Ic"] = (
quadrotor["Mc"] * (quadrotor["ec"]) ** 2 / 4
) # Ic Estimated root clamp inertia
quadrotor["mb"] = quadrotor["g"] * (
quadrotor["Mc"] * quadrotor["ec"] / 2 + quadrotor["Mb"] * quadrotor["r"] / 2
) # mb Static blade moment
quadrotor["Ir"] = quadrotor["nb"] * (
quadrotor["Ib"] + quadrotor["Ic"]
) # Ir Total rotor inertia
quadrotor["Ct"] = 0.0048 # Ct Non-dim. thrust coefficient
quadrotor["Cq"] = quadrotor["Ct"] * sqrt(
quadrotor["Ct"] / 2
) # Cq Non-dim. torque coefficient
quadrotor["sigma"] = (
quadrotor["c"] * quadrotor["nb"] / (pi * quadrotor["r"])
) # sigma Rotor solidity ratio
quadrotor["thetat"] = 6.8 * (pi / 180) # thetat Blade tip angle
quadrotor["theta0"] = 14.6 * (pi / 180) # theta0 Blade root angle
quadrotor["theta1"] = (
quadrotor["thetat"] - quadrotor["theta0"]
) # theta1 Blade twist angle
quadrotor["theta75"] = (
quadrotor["theta0"] + 0.75 * quadrotor["theta1"]
) # theta76 3/4 blade angle
try:
quadrotor["thetai"] = quadrotor["thetat"] * (
quadrotor["r"] / quadrotor["e"]
) # thetai Blade ideal root approximation
except ZeroDivisionError:
quadrotor["thetai"] = inf
quadrotor["a"] = 5.5 # a Lift slope gradient
# derived constants
quadrotor["A"] = pi * quadrotor["r"] ** 2 # A Rotor disc area
quadrotor["gamma"] = (
quadrotor["rho"]
* quadrotor["a"]
* quadrotor["c"]
* quadrotor["r"] ** 4
/ (quadrotor["Ib"] + quadrotor["Ic"])
) # gamma Lock number
quadrotor["b"] = (
quadrotor["Ct"] * quadrotor["rho"] * quadrotor["A"] * quadrotor["r"] ** 2
) # T = b w^2
quadrotor["k"] = (
quadrotor["Cq"] * quadrotor["rho"] * quadrotor["A"] * quadrotor["r"] ** 3
) # Q = k w^2
quadrotor["verbose"] = False | /rvc3python-0.9.0.tar.gz/rvc3python-0.9.0/RVC3/models/quad_model.py | 0.568895 | 0.408985 | quad_model.py | pypi |
# run with command line -a switch to show animation
import numpy as np
import math
import roboticstoolbox as rtb
import bdsim
# parameters for the path
look_ahead = 5
speed = 1
dt = 0.1
tacc = 1
x0 = [2, 2, 0]
# create the path
path = np.array([[10, 10], [10, 60], [80, 80], [50, 10]])
robot_traj = rtb.mstraj(path[1:, :], qdmax=speed, q0=path[0, :], dt=0.1, tacc=tacc).q
total_time = robot_traj.shape[0] * dt + look_ahead / speed
print(robot_traj.shape)
sim = bdsim.BDSim(animation=True)
bd = sim.blockdiagram()
def background_graphics(ax):
ax.plot(path[:, 0], path[:, 1], "r", linewidth=3, alpha=0.7)
def pure_pursuit(cp, R=None, traj=None):
# find closest point on the path to current point
d = np.linalg.norm(traj - cp, axis=1) # rely on implicit expansion
i = np.argmin(d)
# find all points on the path at least R away
(k,) = np.where(d[i + 1 :] >= R) # find all points beyond horizon
if len(k) == 0:
# no such points, we must be near the end, goal is the end
pstar = traj[-1, :]
else:
# many such points, take the first one
k = k[0] # first point beyond look ahead distance
pstar = traj[k + i, :]
return pstar.flatten()
speed = bd.CONSTANT(speed, name="speed")
pos_error = bd.SUM("+-", name="err")
# d2goal = bd.FUNCTION(lambda d: math.sqrt(d[0]**2 + d[1]**2), name='d2goal')
h2goal = bd.FUNCTION(lambda d: math.atan2(d[1], d[0]), name="h2goal")
heading_error = bd.SUM("+-", mode="c", name="herr")
Kh = bd.GAIN(0.5, name="Kh")
bike = bd.BICYCLE(x0=x0)
vplot = bd.VEHICLEPLOT(
scale=[0, 80, 0, 80], size=0.7, shape="box", init=background_graphics
) # , movie='rvc4_8.mp4')
sscope = bd.SCOPE(name="steer angle")
hscope = bd.SCOPE(name="heading angle")
stop = bd.STOP(lambda x: np.linalg.norm(x - np.r_[50, 10]) < 0.1, name="close_enough")
pp = bd.FUNCTION(
pure_pursuit, fkwargs={"R": look_ahead, "traj": robot_traj}, name="pure_pursuit"
)
xy = bd.INDEX([0, 1], name="xy")
theta = bd.INDEX([2], name="theta")
bd.connect(pp, pos_error[0])
bd.connect(pos_error, h2goal)
# bd.connect(d2goal, stop)
bd.connect(h2goal, heading_error[0])
bd.connect(theta, heading_error[1], hscope)
bd.connect(heading_error, Kh)
bd.connect(Kh, bike.gamma, sscope)
bd.connect(speed, bike.v)
bd.connect(xy, pp, stop, pos_error[1])
bd.connect(bike, xy, theta, vplot)
bd.compile()
if __name__ == "__main__":
sim.report(bd)
out = sim.run(bd, T=total_time) | /rvc3python-0.9.0.tar.gz/rvc3python-0.9.0/RVC3/models/drivepursuit.py | 0.454956 | 0.504272 | drivepursuit.py | pypi |
# run with command line -a switch to show animation
from math import pi, sqrt, atan, atan2
import bdsim
sim = bdsim.BDSim(animation=True)
bd = sim.blockdiagram()
# parameters
xg = [5, 5, pi / 2]
Krho = bd.GAIN(1, name="Krho")
Kalpha = bd.GAIN(5, name="Kalpha")
Kbeta = bd.GAIN(-2, name="Kbeta")
xg = [5, 5, pi / 2]
x0 = [5, 2, 0]
x0 = [5, 9, 0]
# annotate the graphics
def background_graphics(ax):
ax.plot(*xg[:2], "*")
ax.plot(*x0[:2], "o")
# convert x,y,theta state to polar form
def polar(x, dict):
rho = sqrt(x[0] ** 2 + x[1] ** 2)
if not "direction" in dict:
# direction not yet set, set it
beta = -atan2(-x[1], -x[0])
alpha = -x[2] - beta
print("alpha", alpha)
if -pi / 2 <= alpha <= pi / 2:
dict["direction"] = 1
else:
dict["direction"] = -1
print("set direction to ", dict["direction"])
if dict["direction"] == -1:
beta = -atan2(x[1], x[0])
else:
beta = -atan2(-x[1], -x[0])
alpha = -x[2] - beta
# clip alpha
if alpha > pi / 2:
alpha = pi / 2
elif alpha < -pi / 2:
alpha = -pi / 2
return [
dict["direction"],
rho,
alpha,
beta,
]
# constants
goal0 = bd.CONSTANT([xg[0], xg[1], 0], name="goal_pos")
goalh = bd.CONSTANT(xg[2], name="goal_heading")
# stateful blocks
bike = bd.BICYCLE(x0=x0, vlim=2, slim=1.3, name="vehicle")
# functions
fabs = bd.FUNCTION(lambda x: abs(x), name="abs")
polar = bd.FUNCTION(
polar,
nout=4,
persistent=True,
name="polar",
inames=("x",),
onames=("direction", r"$\rho$", r"$\alpha$", r"$\beta"),
)
stop = bd.STOP(lambda x: x < 0.01, name="close enough")
steer_rate = bd.FUNCTION(lambda u: atan(u), name="atan")
# arithmetic
vprod = bd.PROD("**", name="vprod")
wprod = bd.PROD("**/", name="aprod")
xerror = bd.SUM("+-")
heading_sum = bd.SUM("++")
gsum = bd.SUM("++")
# displays
vplot = bd.VEHICLEPLOT(
scale=[0, 10],
size=0.7,
shape="box",
path="b:",
init=background_graphics,
)
# movie="rvc4_11.mp4",)
ascope = bd.SCOPE(name=r"$\alpha$")
bscope = bd.SCOPE(name=r"$\beta$")
# connections
bd.connect(bike, vplot)
bd.connect(bike, xerror[0])
bd.connect(goal0, xerror[1])
bd.connect(xerror, polar)
bd.connect(polar[1], Krho, stop) # rho
bd.connect(Krho, vprod[1])
bd.connect(polar[2], Kalpha, ascope) # alpha
bd.connect(Kalpha, gsum[0])
bd.connect(polar[3], heading_sum[0]) # beta
bd.connect(goalh, heading_sum[1])
bd.connect(heading_sum, Kbeta, bscope)
bd.connect(polar[0], vprod[0], wprod[1])
bd.connect(vprod, fabs, bike.v)
bd.connect(fabs, wprod[2])
bd.connect(wprod, steer_rate)
bd.connect(steer_rate, bike.gamma)
bd.connect(Kbeta, gsum[1])
bd.connect(gsum, wprod[0])
bd.compile()
if __name__ == "__main__":
sim.report(bd)
out = sim.run(bd, T=10) | /rvc3python-0.9.0.tar.gz/rvc3python-0.9.0/RVC3/models/driveconfig.py | 0.534855 | 0.474083 | driveconfig.py | pypi |
import numpy as np
import bdsim
def SEA(obstacle_pos=0.8, block=False, graphics=False):
sim = bdsim.BDSim(name="SEA", graphics=graphics)
bd = sim.blockdiagram()
m1 = 0.5
m2 = 1
LQR = np.c_[169.9563, 62.9010, -19.9563, 71.1092].T
print(LQR)
Ks = 5
force_lim = 2
# define the blocks
step = bd.STEP(1)
inputgain = bd.GAIN(np.r_[1, 0, 1, 0])
sum1 = bd.SUM("+-")
lqr = bd.GAIN(LQR)
limit = bd.CLIP(min=-force_lim, max=force_lim, name="torquelimit")
motor_sum = bd.SUM("+-")
motor_accel = bd.GAIN(1 / m1)
motor_vel = bd.INTEGRATOR(x0=0)
motor_pos = bd.INTEGRATOR(x0=0)
spring_sum = bd.SUM("+-")
spring_force = bd.GAIN(Ks)
load_accel = bd.GAIN(1 / m2)
load_vel = bd.INTEGRATOR(x0=0)
load_pos = bd.INTEGRATOR(x0=0)
obstacle = bd.FUNCTION(lambda x: 0 if x >= obstacle_pos else 1)
load_prod = bd.PROD("**")
spring_scope = bd.SCOPE(name="spring scope")
state_scope = bd.SCOPE(
vector=4,
labels=["$x_m$", r"$\dot{x}_m$", "$x_l$", r"$\dot{x}_l$"],
name="state scope",
)
fig_scope = bd.SCOPE(nin=3, labels=["$x_l$", "$u$", "$F_s$"], name="figure scope")
mux = bd.MUX(4)
# connect the blocks
# controller
bd.connect(step, inputgain)
bd.connect(inputgain, sum1[0])
bd.connect(mux, sum1[1])
bd.connect(sum1, lqr)
bd.connect(lqr, limit)
# motor block
bd.connect(limit, motor_sum[0], fig_scope[1])
bd.connect(motor_sum, motor_accel)
bd.connect(motor_accel, motor_vel)
bd.connect(motor_vel, motor_pos, mux[1])
bd.connect(motor_pos, spring_sum[0], mux[0])
# load block
bd.connect(load_pos, spring_sum[1], obstacle, mux[2], fig_scope[0])
bd.connect(load_accel, load_vel)
bd.connect(load_vel, load_prod[0])
bd.connect(obstacle, load_prod[1])
bd.connect(load_prod, load_pos, mux[3])
# spring block
bd.connect(spring_sum, spring_force)
bd.connect(spring_force, motor_sum[1], load_accel, spring_scope, fig_scope[2])
bd.connect(mux, state_scope)
bd.compile() # check the diagram
sim.report(bd)
out = sim.run(bd, 5, dt=5e-3, watch=[limit, spring_force])
# out = vloop.run(2, checkstep=1e-6) # simulate for 5s
# # vloop.dotfile('bd1.dot') # output a graphviz dot file
# # vloop.savefig('pdf') # save all figures as pdf
return out
if __name__ == "__main__":
SEA(block=True, graphics=True) | /rvc3python-0.9.0.tar.gz/rvc3python-0.9.0/RVC3/models/SEA.py | 0.563858 | 0.364636 | SEA.py | pypi |
import numpy as np
from scipy import linalg
import bdsim
from roboticstoolbox import models
import spatialmath.base as smb
from spatialmath import SE3
# equation numbers are with reference to:
# A Unified Approach for Motion and Force Control of Robot Manipulators: The
# Operational Space Formulation, Khatib, IEEE J.Robotics, RA-3/1, Feb 1987.
# http://khatib.stanford.edu/publications/pdfs/Khatib_1987_RA.pdf
robot = models.DH.Puma560().nofriction()
T_E = SE3(0.6, -0.2, 0.8) * SE3.OA([0, 1, 0], [0, 0, -1])
sol = robot.ikine_a(T_E)
# compliance frame is EE frame, constant in this case
Sf = T_E.R
# compliance specification
# edit next 2 lines to change compliant motion axes
# 1 = freespace controlled motion
# 0 = constrained compliant motion
sigma_t = np.diag([1, 1, 0])
# position specification matrix, Sigma_f in (1)
sigma_r = np.diag([1, 1, 1])
# rotation specification matrix, Sigma_tau
# compute the generalized task specification matrices (3) and (4)
omega_p = linalg.block_diag(Sf.T @ sigma_t @ Sf, Sf.T @ sigma_r @ Sf)
one = np.eye(3)
omega_f = linalg.block_diag(Sf.T @ (one - sigma_t) @ Sf, Sf.T @ (one - sigma_r) @ Sf)
# setpoints
Fstar = np.r_[0, 0, -5, 0, 0, 0]
Xstar = np.r_[0.8, 0.2, 0.3, 0, np.pi / 2, 0]
# control parameters
Kvf = 20.0
Kf = 20.0
Kp = 100.0
Kv = 50.0
# choose a representation that is singularity free for tool down configuration
rep = "rpy/xyz"
## create block diagram
sim = bdsim.BDSim(graphics=True)
bd = sim.blockdiagram(name="opspace")
# blocks
robot_x = bd.FDYN_X(robot, q0=sol.q, gravcomp=True, velcomp=True, representation=rep)
fstar = bd.CONSTANT(Fstar, name="f*")
xstar = bd.CONSTANT(Xstar, name="x*")
xdstar = bd.CONSTANT(np.zeros((6,)), name="xd*")
fprod = bd.PROD("**", matrix=True, name="fprod")
pprod = bd.PROD("**", matrix=True, name="pprod")
fsum = bd.SUM("+-", name="fsum")
# force/torque sensor
def ft_sensor_func(x):
z = x[2]
surface = 0.5
stiffness = 100
if z <= surface:
f = stiffness * (z - surface)
else:
f = 0
return np.r_[0, 0, f, 0, 0, 0]
ftsensor = bd.FUNCTION(ft_sensor_func, name="f/t sensor")
# x error
def x_error_func(x1, x2):
e = x1 - x2
e[3:] = smb.angdiff(e[3:])
return e
x_error = bd.FUNCTION(x_error_func, nin=2, name="xerror")
Mx = bd.INERTIA_X(robot, representation=rep)
# scopes
pos_scope = bd.SCOPE(
vector=3, labels=["x", "y", "z"], styles=["r", "g", "b--"], name="position"
)
force_scope = bd.SCOPE(vector=6, name="force/torque")
wrench_scope = bd.SCOPE(
vector=3, labels=["x", "y", "z"], styles=["r", "g", "b--"], name="command wrench"
)
xe_scope = bd.SCOPE(vector=6, name="x error")
fsum_scope = bd.SCOPE(vector=6, name="fsum scope")
fprod_scope = bd.SCOPE(vector=6, name="fprod scope")
sum1_scope = bd.SCOPE(vector=6, name="_sum1 scope")
x_scope = bd.SCOPE(vector=6, name="x scope")
## connect the blocks
# force control
fsum[0] = omega_f * (fstar + (fstar - ftsensor) * Kf)
fsum[1] = fprod
fprod[1] = omega_f * Kvf * robot_x.xd
# position control
pprod[1] = omega_p * (Kp * x_error + Kv * (xdstar - robot_x.xd))
x_error[0] = xstar
x_error[1] = robot_x.x
# the rest
robot_x.w = fsum + pprod
Mx[0] = robot_x.q
bd.connect(Mx, fprod[0], pprod[0])
ftsensor[0] = robot_x.x
x_scope[0] = robot_x.x
pos_scope[0] = robot_x.x >> bd.INDEX([0, 1, 2])
force_scope[0] = ftsensor
wrench_scope[0] = pprod >> bd.INDEX([0, 1, 2])
xe_scope[0] = x_error
fsum_scope[0] = fsum
fprod_scope[0] = fprod
sum1_scope[0] = bd["_sum.1"]
bd.compile() # check the diagram
sim.report(bd, sortby="type")
if __name__ == "__main__":
out = sim.run(
bd, 2, dt=5e-3, watch=[x_error, pprod, robot_x.x, robot_x.xd, robot_x.xdd]
) | /rvc3python-0.9.0.tar.gz/rvc3python-0.9.0/RVC3/models/opspace.py | 0.74512 | 0.487856 | opspace.py | pypi |
import math
import matplotlib.pyplot as plt
from .Generaldistribution import Distribution
class Gaussian(Distribution):
""" Gaussian distribution class for calculating and
visualizing a Gaussian distribution.
Attributes:
mean (float) representing the mean value of the distribution
stdev (float) representing the standard deviation of the distribution
data_list (list of floats) a list of floats extracted from the data file
"""
def __init__(self, mu=0, sigma=1):
Distribution.__init__(self, mu, sigma)
def calculate_mean(self):
"""Function to calculate the mean of the data set.
Args:
None
Returns:
float: mean of the data set
"""
avg = 1.0 * sum(self.data) / len(self.data)
self.mean = avg
return self.mean
def calculate_stdev(self, sample=True):
"""Function to calculate the standard deviation of the data set.
Args:
sample (bool): whether the data represents a sample or population
Returns:
float: standard deviation of the data set
"""
if sample:
n = len(self.data) - 1
else:
n = len(self.data)
mean = self.calculate_mean()
sigma = 0
for d in self.data:
sigma += (d - mean) ** 2
sigma = math.sqrt(sigma / n)
self.stdev = sigma
return self.stdev
def plot_histogram(self):
"""Function to output a histogram of the instance variable data using
matplotlib pyplot library.
Args:
None
Returns:
None
"""
plt.hist(self.data)
plt.title('Histogram of Data')
plt.xlabel('data')
plt.ylabel('count')
def pdf(self, x):
"""Probability density function calculator for the gaussian distribution.
Args:
x (float): point for calculating the probability density function
Returns:
float: probability density function output
"""
return (1.0 / (self.stdev * math.sqrt(2*math.pi))) * math.exp(-0.5*((x - self.mean) / self.stdev) ** 2)
def plot_histogram_pdf(self, n_spaces = 50):
"""Function to plot the normalized histogram of the data and a plot of the
probability density function along the same range
Args:
n_spaces (int): number of data points
Returns:
list: x values for the pdf plot
list: y values for the pdf plot
"""
mu = self.mean
sigma = self.stdev
min_range = min(self.data)
max_range = max(self.data)
# calculates the interval between x values
interval = 1.0 * (max_range - min_range) / n_spaces
x = []
y = []
# calculate the x values to visualize
for i in range(n_spaces):
tmp = min_range + interval*i
x.append(tmp)
y.append(self.pdf(tmp))
# make the plots
fig, axes = plt.subplots(2,sharex=True)
fig.subplots_adjust(hspace=.5)
axes[0].hist(self.data, density=True)
axes[0].set_title('Normed Histogram of Data')
axes[0].set_ylabel('Density')
axes[1].plot(x, y)
axes[1].set_title('Normal Distribution for \n Sample Mean and Sample Standard Deviation')
axes[0].set_ylabel('Density')
plt.show()
return x, y
def __add__(self, other):
"""Function to add together two Gaussian distributions
Args:
other (Gaussian): Gaussian instance
Returns:
Gaussian: Gaussian distribution
"""
result = Gaussian()
result.mean = self.mean + other.mean
result.stdev = math.sqrt(self.stdev ** 2 + other.stdev ** 2)
return result
def __repr__(self):
"""Function to output the characteristics of the Gaussian instance
Args:
None
Returns:
string: characteristics of the Gaussian
"""
return "mean {}, standard deviation {}".format(self.mean, self.stdev) | /rvd_distributions-0.1.tar.gz/rvd_distributions-0.1/rvd_distributions/Gaussiandistribution.py | 0.688364 | 0.853058 | Gaussiandistribution.py | pypi |
from typing import List, Tuple
from rvid.networknumbers.client import ExampleIntsClient
from rvid.seq.basic import RIDMaker
from rvid.seq.common import RIDType, epoch_ms2rid, epoch_ms_now
class RIDMakerProxy(RIDMaker):
def __init__(self) -> None:
super().__init__()
self.client = ExampleIntsClient(name="netseq-client")
self.resolution = 3333 # TODO: Fetch value from server
self.cached_ids: List[int] = list()
self.last_fetch = 0
self.drift = 0
self.top = epoch_ms_now()
def next(self) -> str:
now = epoch_ms_now() - self.drift
if now - self.last_fetch > self.resolution:
self._update()
elif not self.cached_ids:
self._update()
best_match, match_at_idx = self._search(now)
del self.cached_ids[: match_at_idx + 1]
return epoch_ms2rid(best_match)
def _search(self, now: int) -> Tuple[int, int]:
"""
Find the earliest acceptable ID among the ones we have stored. Since we prune on
each search, this should usually be close to the start. We also don't want to
waste usable ID:s.
A binary search for the first value at the left edge of acceptability would perhaps
be better.
"""
step = 1
offset = 0
while (step + offset) < len(self.cached_ids):
diff = abs(now - self.cached_ids[offset])
# If we are within half of the resolution from "now", that is good enough
if diff < self.resolution // 2:
break
step += 1
offset += step
offset = min(offset, len(self.cached_ids) - 1)
return self.cached_ids[offset], offset
def _update(self) -> None:
self.cached_ids = self.client.get()
self.last_fetch = max(self.cached_ids)
self.drift = epoch_ms_now() - self.last_fetch
def adjust_top_from_rid(self, rid: RIDType) -> None:
raise NotImplementedError("Can't adjust top from client-side yet.")
def reset(self) -> None:
raise NotImplementedError("Can't reset top from client-side yet.") | /rvid.seq-0.2.0-py3-none-any.whl/rvid/netseq/client.py | 0.664758 | 0.427755 | client.py | pypi |
from logging import getLogger
from typing import Callable, Dict, Sequence
from rvid.networknumbers.request import RequestForNumbers
from rvid.networknumbers.server import NumberRequestTCPHandler
from rvid.seq.common import epoch_ms_now
log = getLogger(__name__)
class TranchKeeper:
"""
TODO: Split out generic server functionality to the networknumbers module and inherit/mix
that into this RID-specific class.
"""
def __init__(self) -> None:
self.tranches: Dict[str, Sequence[int]] = dict()
self.client_last_seen: Dict[str, int] = dict()
self.resolution = 3333 # Milliseconds of tranche length
self.top = -1 # Millisecond epoch
def get_tranche_for(self, request: RequestForNumbers) -> Sequence[int]:
client_id = request.key()
self.client_last_seen[client_id] = epoch_ms_now()
self._age_out_clients()
# Check if client is new
if client_id not in self.tranches:
self.tranches[client_id] = []
self._make_new_tranches()
# Check if client has already asked for its currently allocated tranche
if not self.tranches[client_id]:
self._make_new_tranches()
# Check if current tranches are too old
for tranche in self.tranches.values():
if epoch_ms_now() - tranche[0] > self.resolution // 2:
self._make_new_tranches()
break
return_tranch = self.tranches[client_id]
self.tranches[client_id] = [] # Make sure we don't return same tranche twice
return return_tranch
def _make_new_tranches(self) -> None:
now = epoch_ms_now()
client_count = len(self.tranches.keys())
log.info("Making new tranches for %i clients at ms-epoch %i" % (client_count, now))
# Set current top to now, if now is higher than top
self.top = now if now > self.top else self.top + 1
rid_range = range(self.top, self.top + self.resolution)
for idx, client in enumerate(self.tranches.keys()):
self.tranches[client] = rid_range[idx::client_count]
# Set top to the highest awarded RID + 1
self.top += self.resolution + 1
def _age_out_clients(self) -> None:
now = epoch_ms_now()
to_forget = list()
for client, last_seen in self.client_last_seen.items():
if now - last_seen > self.resolution * 10:
to_forget.append(client)
for client in to_forget:
log.info(f"Forgetting client {client} due to not having asked for anything in a while")
del self.tranches[client]
del self.client_last_seen[client]
TK = TranchKeeper()
class RIDHandler(NumberRequestTCPHandler):
"""
The Handler class will get instantiated for each connection to the server, so
any persistent state needs to live outside the instance.
"""
def number_getter(self) -> Callable[[RequestForNumbers], Sequence[int]]:
return TK.get_tranche_for
def array_type(self) -> str:
return "L" | /rvid.seq-0.2.0-py3-none-any.whl/rvid/netseq/server.py | 0.758958 | 0.167049 | server.py | pypi |
import re
import time
from typing import NewType
_allowed_letters = "A-HJ-NP-Y0-9*#" # No India or Oscar
RID_REGEXP = re.compile(
"^([%s]{3})-([%s]{3})-([%s]{3})$" % ((_allowed_letters,) * 3), re.IGNORECASE
)
BASE35_CHARS = "0123456789ABCDEFGH*JKLMN#PQRSTUVWXY"
RIDType = NewType("RIDType", str) # String like ABC-DEF-GHI, see _rid_regexp.
def encode_base35(number: int) -> str:
if not isinstance(number, int):
raise TypeError("Number must be an integer")
if number < 0:
raise ValueError("Timestamp cannot be negative")
if number > 78815638671874:
raise ValueError("Numbers larger than 78815638671874 are not allowed")
base35 = ""
while number != 0:
number, i = divmod(number, 35)
base35 = BASE35_CHARS[i] + base35 # + is faster than join, in this case
return base35 or BASE35_CHARS[0]
def rid2epoch_ms(rid: RIDType) -> int:
"""
Convert a RID to milliseconds since epoch
"""
mo = RID_REGEXP.match(rid)
if mo:
# * and # replace letters easy to confuse with digits. The dashes are just flair.
plain_rid = rid.replace("*", "I").replace("#", "O").replace("-", "")
return int(plain_rid, 35)
else:
raise ValueError(f"Cannot convert invalid RID to epoch: '{rid}'")
def epoch_ms2rid(epoch_ms: int) -> RIDType:
"""
Convert milliseconds since epoch to RID
(n.b time.time() gives you seconds, don't forget to multiply by 1000 before casting to int)
"""
plain_rid = encode_base35(epoch_ms)
plain_rid = plain_rid.rjust(9, "0")
return RIDType(plain_rid[0:3] + "-" + plain_rid[3:6] + "-" + plain_rid[6:9])
def epoch_ms_now() -> int:
"""
Milliseconds since epoch.
time.time() returns seconds since epoch start as a floating-point number with
varying accuracy between platforms. Just multiply by 1000 and round to int.
We really don't care too much about the accuracy since collisions are detected
and handled.
"""
return round(time.time() * 1000) | /rvid.seq-0.2.0-py3-none-any.whl/rvid/seq/common.py | 0.711431 | 0.20266 | common.py | pypi |
import os
import shutil
from dataclasses import dataclass, field
from logging import getLogger
from pathlib import Path
from typing import List, Optional, Pattern, Union
log = getLogger(__name__)
@dataclass
class ExternalResources:
"""
Represent one or more external file that is referenced from a tex-file.
The files may be directly next to the tex-file or in a sub-directory next
to the tex-file. You cannot have a relative path above the tex-file in the
filesystem hierarchy.
The basedir attribute represents the name of the subdirectory where the
files, which are listed in the "files" attribute, are to be copied.
Use basedir value "." to place files next to the tex-file.
"""
basedir: str
files: List[Path] = field(default_factory=list)
def __post_init__(self) -> None:
if self.basedir.startswith("."):
raise ValueError("Basedir for resources can't be above the tex-file itself")
if self.basedir.startswith("/") or self.basedir.startswith("\\"):
raise ValueError("Basedir must be a relative path")
def clone_into_directory(self, target: Path) -> None:
target_directory = os.path.join(target, self.basedir)
os.makedirs(target_directory, exist_ok=True)
for external_path in self.files:
final_path = os.path.join(target_directory, os.path.basename(external_path))
if os.path.isfile(final_path):
raise IOError(f"File is already created by other ExternalResource: {final_path}")
shutil.copy(external_path, final_path)
def make_external_resource_library(
source_directory: Union[Path, str],
relative_target_directory: str,
file_include_filter: Optional[Pattern[str]] = None,
file_exclude_filter: Optional[Pattern[str]] = None,
) -> ExternalResources:
"""
Automatically generate an ExternalResources object from a directory.
The `relative_target_directory` is the base of relative paths as used in the .tex file (e.g.
"images" for the complete path "images/ape.png"). All *files* below the `source_directory`
will be copied into the relative_target_directory. *Sub-directories* in the source directory
will be ignored.
You can optionally exclude/include files using compiled regexes.
"""
if not os.path.isdir(source_directory):
raise IOError("Source directory must exist")
library_path = os.path.abspath(source_directory)
paths_in_library = []
for dir_entry in os.listdir(library_path):
if os.path.isdir(os.path.join(library_path, dir_entry)):
continue
if file_include_filter and not file_include_filter.match(dir_entry):
continue
if file_exclude_filter and file_exclude_filter.match(dir_entry):
continue
fpath = os.path.join(library_path, dir_entry)
paths_in_library.append(Path(fpath))
if not paths_in_library:
log.warning(
f"Created external resource library without any files in it for: {source_directory}"
)
return ExternalResources(basedir=relative_target_directory, files=paths_in_library) | /rvid.tex_runner-0.2.0-py3-none-any.whl/rvid/tex_runner/external_resources.py | 0.84759 | 0.266333 | external_resources.py | pypi |
## `rvlib`
Anyone who has used [`Distributions.jl`](https://github.com/JuliaStats/Distributions.jl) will tell
you how nice the interface is relative to the "exotic" (the most polite word
we can think of) interface to distributions exposed by
[scipy.stats](http://docs.scipy.org/doc/scipy-0.17.1/reference/stats.html).
`Distributions.jl` also brings better performance, particularly when its
methods are used inside loops.
For these reason we've put together `rvlib`, which mimics the
interface of [`Distributions.jl`](https://github.com/JuliaStats/Distributions.jl), while at the same
time attaining similar performance by exploiting [`numba`](http://numba.pydata.org/).
This package was inspired by Joshua Adelman's ([@synapticarbors](https://github.com/synapticarbors))
[blog post](https://www.continuum.io/blog/developer-blog/calling-c-libraries-numba-using-cffi) describing how
to call the Rmath C library from numba using [CFFI](http://cffi.readthedocs.io/), and utilizes his build script
to set up the CFFI interface.
### Objectives
* Follow the API of the `Distributions.jl` package as closely as possible
* Create a python package that has better performance than `scipy.stats`.
### Methodology
All the classes are marked for optimization using the `@jitclass` decorator. As a result, instances of different distributions can be called within user specific routines or passed as arguments in `nopython` mode using `numba`.
The evaluation and sampling methods are built on the `Rmath` C library -- also used by the `Distributions.jl` package.
### Distributions currently implemented
Univariate continuous:
* Normal
* Chisq
* Uniform
* T
* Log-normal
* F
* Beta
* Gamma
* Exponential
* Cauchy
* Logistic
* Weibull
Univariate discrete:
* Poisson
* Geometric
* Hypergeometric
* Binomial
* Negative Binomial
Multivariate continuous:
* check for updates on mulitvariate normal in `multivariate` branch
### Functionality
The following properties are shared by all the univariate distributions:
* `params`: tuple of the distribution's parameters
* `location`: the location of the distribution (if exists)
* `scale`: the scale of the distribution (if exists)
* `shape`: the shape of the distribution (if exists)
* `mean`: the mean of the distribution
* `median`: the median of the distribution
* `mode`: the mode of the distribution
* `var`: the variance of the distribution
* `std`: the standard deviation of the distribution
* `skewness`: the skewness of the distribution
* `kurtosis`: the kurtosis of the distribution
* `isplatykurtic`: boolean indicating if kurtosis is greater than zero
* `isleptokurtic`: boolean indicating if kurtosis is less than zero
* `ismesokurtic`: boolean indicating if kurtosis is equal to zero
* `entropy`: the entropy of the distribution
The following methods can be called for all univariate distributions:
* `mgf`: evaluate the moment generating function (if exists)
* `cf`: evaluate the characteristic function (if exists)
* `pdf`: evaluate the probability density function
* `logpdf`: evaluate the logarithm of the prabability density function
* `loglikelihood`: evaluate the log-likelihood of the distribution with respect to all samples contained in array x
* `cdf`: evaluate the cumulative distribution function
* `ccdf`: evaluate the complementary cdf, i.e. (1 - cdf)
* `logcdf`: evaluate the logarithm of the cdf
* `logccdf`: evaluate the logarithm of the complementary cdf
* `quantile`: evaluate the quantile function at a critical value
* `cquantile`: evaluate the complementary quantile function
* `invlogcdf`: evaluate the inverse function of the logcdf
* `invlogccdf`: evaluate the inverse function of the logccdf
* `rand`: generate array of independent random draws
Seed setting
As the package is built around the `Rmath` library the seed for the random number generator has to be set using the `Rmath` `set_seed(x,y)` function. For example:
```python
import rvlib as rl
rl.set_seed(123, 456) # note that it requires two arguments
```
### Use and Performance
Preliminary comparison with the `scipy.stats` package.
```python
from rvlib import Normal
from scipy.stats import norm
import numpy as np
import timeit
N_dist = Normal(0,1) # rvlib version
N_scipy = norm(0,1) # scipy.stats version
x = np.linspace(0,100,100)
```
```python
In [1]: %timeit N_dist.pdf(x)
Out[1]: The slowest run took 8.85 times longer than the fastest. This could mean that an intermediate result is being cached.
100000 loops, best of 3: 9.69 µs per loop
In [2]: %timeit N_scipy.pdf(x)
Out[2]: 10000 loops, best of 3: 150 µs per loop
```
```python
In [3]: %timeit N_dist.cdf(x)
Out[3]: The slowest run took 20325.82 times longer than the fastest. This could mean that an intermediate result is being cached.
100000 loops, best of 3: 8.08 µs per loop
In [4]: %timeit N_scipy.cdf(x)
Out[4]:The slowest run took 190.64 times longer than the fastest. This could mean that an intermediate result is being cached.
10000 loops, best of 3: 126 µs per loop
```
```python
In [5]: %timeit N_dist.rand(1000)
Out[5]: The slowest run took 2166.80 times longer than the fastest. This could mean that an intermediate result is being cached.
10000 loops, best of 3: 85.8 µs per loop
In [6]: %timeit N_scipy.rvs(1000)
Out[6]: 10000 loops, best of 3: 119 µs per loop
```
# Contributors
* Daniel Csaba (daniel.csaba@nyu.edu)
* Spencer Lyon (spencer.lyon@stern.nyu.edu)
---
This is a fork of the [Rmath-julia](https://github.com/JuliaLang/Rmath-julia)
library, with Python support added.
The original readme of the Rmath-julia repository is included below.
---
## Rmath-julia
This is the Rmath library from R, which is used mainly by Julia's
[Distributions.jl](https://github.com/JuliaStats/Distributions.jl)
package.
The main difference here is that this library has been patched to use
the [DSFMT](http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/) RNG
in `src/runif.c`.
The Julia RNG is in sync with the one used by the Distributions.jl package:
````
julia> srand(1);
julia> [rand(), rand()]
2-element Array{Float64,1}:
0.236033
0.346517
julia> srand(1);
julia> using Distributions
julia> [rand(Uniform()), rand(Uniform())]
2-element Array{Float64,1}:
0.236033
0.346517
````
### Build instructions
Rmath-julia requires GNU Make (https://www.gnu.org/software/make). Just run
`make` to compile the library.
| /rvlib-0.0.6.tar.gz/rvlib-0.0.6/README.md | 0.476336 | 0.969179 | README.md | pypi |
.. image:: https://img.shields.io/pypi/v/rvmath.svg
:target: https://pypi.python.org/pypi/rvmath
:alt: Latest Version
.. image:: https://img.shields.io/pypi/l/rvmath.svg
:target: https://pypi.python.org/pypi/rvmath
:alt: License
.. image:: https://img.shields.io/pypi/pyversions/rvmath.svg
:target: https://pypi.python.org/pypi/rvmath
:alt: Python Versions
.. image:: https://github.com/hgrecco/rvmath/workflows/CI/badge.svg?branch=main
:target: https://github.com/hgrecco/rvmath/actions?query=workflow%3ACI
.. image:: https://github.com/hgrecco/rvmath/workflows/Lint/badge.svg?branch=main
:target: https://github.com/hgrecco/rvmath/actions?query=workflow%3ALint
.. image:: https://coveralls.io/repos/github/hgrecco/rvmath/badge.svg?branch=main
:target: https://coveralls.io/github/hgrecco/rvmath?branch=main
rvmath: math with random variables, the easy way
================================================
**rvmath** is a Python package to build and evaluate
mathematical expressions involving random variables.
Do you want to draw 10 values from a distribution resulting
from ``a * cos(b + c)`` where ``a ~ Poisson``, ``b ~ Uniform``,
and ``c ~ Normal``? No problem:
.. code-block:: python
>>> import rvmath as rvm
>>> z = rvm.poisson(mu=5) * np.cos(rvm.uniform() + rvm.norm())
>>> z.rvs(10)
It runs in Python 3.7+ depending on NumPy_ and SciPy_.
It is licensed under BSD.
It is extremely easy and natural to use:
.. code-block:: python
>>> import rvmath as rvm
>>> x = rvm.uniform()
>>> y = rvm.uniform()
>>> z = x - y
>>> z.rvs(3) #doctest: +SKIP
[ 0.56791289 -0.1547692 -0.73984907]
>>> z.rvs(3) #doctest: +SKIP
[-0.33095289 -0.08664128 0.09938225]
Briefly, ``x`` and ``y`` are random variables drawn from a uniform distribution.
``z`` is a random variable drawn from a distribution obtained by subtracting
two uniform distributions. ``z.rvs(3)`` draw 3 values from such distribution.
Behind the scenes, **rvmath** generate random variates of all random variables
and perform all necessary calculations.
**rvmath** builds upon `Scipy Stats`_ and therefore all continuous distributions
available there are also here, with the same name and arguments. ``rvs`` also follows
the same API, namely:
- **size**: int or tuple of ints, optional
Defining number of random variates (default is 1).
- **random_state**: None, int, RandomState, Generator, optional
If seed is None the RandomState singleton is used. If seed is an int,
a new RandomState instance is used, seeded with seed. If seed is already
a RandomState or Generator instance, then that object is used. Default is None.
An important feature is that random variables have an identity and therefore
the following code gives the expected result.
.. code-block:: python
>>> w = x - x
>>> w.rvs(3)
[0., 0., 0.]
You can also use NumPy functions.
.. code-block:: python
>>> c = np.cos(x)
>>> c.rvs(3)
Finally, you can convert the expression into a SciPy distribution:
.. code-block:: python
>>> distro = c.to_distro(name="my_distro")
to obtain an object with useful methods such as ``rvs``, ``pdf``, ``cdf`` and others.
Quick Installation
------------------
To install **rvmath**, simply (*soon*):
.. code-block:: bash
$ pip install rvmath
and then simply enjoy it!
Other functionality
-------------------
All **rvmath** objects have a few useful options and methods:
You can assign an id to the random variable:
.. code-block:: python
>>> x = rvm.uniform(rvid="x")
to simplify debugging. If ``rvid`` is not given, a random string
will be generated.
You can ask for dict containing all random variable names and their
underlying SciPy distribution objects.
.. code-block:: python
>>> x = rvm.uniform(rvid="x")
>>> y = rvm.norm(rvid="y")
>>> z = x + y
>>> dict(z.random_vars())
{'x': <scipy.stats._distn_infrastructure.rv_frozen at 0x7ff57f196220>,
'y': <scipy.stats._distn_infrastructure.rv_frozen at 0x7ff57e5a81f0>}
You can draw values from all random variables within an object.
.. code-block:: python
>>> realization = z.draw(3)
>>> print(realization)
{'x': array([0.75633395, 0.99657116, 0.26853511]),
'y': array([-1.23407414, 0.5261816 , 2.62764828])}
and finally you can evaluate the object for this particular realization.
.. code-block:: python
>>> z.eval(realization)
array([-0.47774019, 1.52275276, 2.89618339])
This is exactly what happens when ``rvs`` is called, but it can be particularly
useful for debugging, testing and evaluating subexpressions.
In certain cases you might want to predefine the size of certain random variable.
You can do it using the ``size`` parameter upon construction.
.. code-block:: python
>>> z = np.sum(rvm.uniform(size=(3, 3))) * rvm.norm()
Random variables without an explicit size parameters will be evaluated to the size
given when calling ``rvs``.
Together with this feature, you can build distributions that depend on others:
.. code-block:: python
>>> m = rvm.uniform(size=1)
>>> w = rvm.norm(loc=m)
You can combine distribution defined size with ``rvs`` provided size
.. code-block:: python
>>> m = rvm.uniform(size=(None, 3, None)).rvs(2)
>>> m.shape
(2, 3, 2)
``None`` values will be replaced by scalars provided to ``rvs``.
FAQ
---
1. **Which distributions are supported by rvmath?**
All continuous distributions available in Scipy, you can read the
list in the `SciPy Stats`_ docs.
2. **Do you plan tu support discrete distributions?**
Yes, hopefully in the next version.
3. **I have built my own distribution subclassing ``rv_continuous``, can I use it with rvmath?**
Yes, just call ``rvm.wrap(distro, *args, **kwargs)``
----
**rvmath** is maintained by a community. See AUTHORS_ for a complete list.
To review an ordered list of notable changes for each version of a project,
see CHANGES_
.. _`NumPy`: http://www.numpy.org/
.. _`SciPy`: http://www.scipy.org/
.. _`SciPy Stats`: https://docs.scipy.org/doc/scipy/reference/stats.html
.. _`pytest`: https://docs.pytest.org/
.. _`AUTHORS`: https://github.com/hgrecco/rvmath/blob/master/AUTHORS
.. _`CHANGES`: https://github.com/hgrecco/rvmath/blob/master/CHANGES
| /rvmath-0.1.tar.gz/rvmath-0.1/README.rst | 0.944177 | 0.752013 | README.rst | pypi |
from PIL import Image, ImageDraw, ImageFont
from io import BytesIO
from base64 import b64encode
from requests import get
class ImageToASCII:
def __init__(self, image_path, source='local', font_path=None, font_size=15, charset=list('#Wo- ')):
if source == 'array':
self.image = Image.fromarray(image_path)
elif source == 'local':
self.image = Image.open(image_path)
elif source == 'url':
self.image = Image.open(BytesIO(get(image_path).content))
self.charset = charset
if font_path:
self.font = ImageFont.truetype(font_path, font_size)
else:
try:
self.font = ImageFont.truetype(r'./Consolas.TTF', font_size)
except:
self.font = ImageFont.load_default()
def generate_ascii_text(self, target_width=100, character_width=7, character_height=10, inverted=False):
if inverted:
charset = self.charset[::-1]
else:
charset = self.charset
resized_image = self.resize_image(self.image, target_width, character_width, character_height)
grayscale_image = self.convert_image_to_grayscale(resized_image)
ascii_characters = self.convert_pixels_to_ascii(grayscale_image, charset)
number_of_pixels = len(ascii_characters)
ascii_text = '\n'.join(ascii_characters[i:i+target_width] for i in range(0, number_of_pixels, target_width))
return ascii_text
def generate_colored_ascii_image(self, target_width=100, character_width=7, character_height=10, inverted=True):
if inverted:
charset = self.charset[::-1]
color = (0, 0, 0)
else:
charset = self.charset
color = (255, 255, 255)
resized_image = self.resize_image(self.image, target_width, character_width, character_height)
grayscale_image = self.convert_image_to_grayscale(resized_image)
ascii_characters = self.convert_pixels_to_ascii(grayscale_image, charset)
rgb_pixels = resized_image.load()
colored_ascii_image = Image.new('RGB', (character_width*target_width, character_height*resized_image.height), color=color)
drawer = ImageDraw.Draw(colored_ascii_image)
charset_length = len(charset)
for y in range(resized_image.height):
for x in range(target_width):
r, g, b = rgb_pixels[x, y]
h = int((r+g+b) / 3)
drawer.text((x*character_width, y*character_height), charset[h*charset_length//256], font=self.font, fill=(r, g, b))
return colored_ascii_image
def resize_image(self, image, target_width, character_width, character_height):
width, height = image.size
ratio = height / width
new_height = int(target_width * ratio * character_width / character_height)
resized_image = image.resize((target_width, new_height))
return resized_image
def convert_image_to_grayscale(self, colored_image):
grayscale_image = colored_image.convert('L')
return grayscale_image
def convert_pixels_to_ascii(self, image, charset):
pixels = image.getdata()
charset_length = len(charset)
ascii_characters = ''.join([charset[pixel*charset_length//256] for pixel in pixels])
return ascii_characters
def save_text(self, text, filename='ascii_text.txt'):
with open(filename, 'w') as text_file:
text_file.write(text)
def save_image(self, image, filename='colored_ascii_image.png'):
image.save(filename)
def convert_image_to_base64(self, image):
temporary_file = BytesIO()
image.save(temporary_file, format='PNG')
base64_string = b64encode(temporary_file.getvalue())
return base64_string | /rvmendillo_image_to_ascii-1.6.4-py3-none-any.whl/rvmendillo_image_to_ascii/__init__.py | 0.551815 | 0.184915 | __init__.py | pypi |
import torch
import torch.nn as nn
import torch.nn.functional as F
class Similarity(nn.Module):
def __init__(self, encoder, config):
super(Similarity, self).__init__()
self.config = config
self.encoder = encoder
self.hidden_similarity_size = config.hidden_similarity_size
self.hidden_size = encoder.config.hidden_size
self.num_classes = config.num_classes
self.wh = nn.Linear(2 * self.hidden_size, self.hidden_similarity_size)
# self.wi = nn.Linear(self.hidden_similarity_size, self.hidden_similarity_size) # added from Conneau, et al. (2018)
# self.wii = nn.Linear(self.hidden_similarity_size,
# self.hidden_similarity_size) # added from Choi, et al. (2018)
self.wp = nn.Linear(self.hidden_similarity_size, self.num_classes)
# self.bn_mlp_input = nn.BatchNorm1d(num_features=4 * self.hidden_size) # added from Choi, et al. (2018)
# self.bn_mlp_output = nn.BatchNorm1d(num_features=self.hidden_similarity_size) # added from Choi, et al. (2018)
# self.dropout = nn.Dropout(0.2) # added from Choi, et al. (2018)
self.criterion = nn.KLDivLoss(reduction='batchmean')
self.device = next(self.encoder.parameters()).device
def forward(self, **kwargs):
inputs = {a: kwargs[a] for a in kwargs}
targets = inputs['labels']
_, lhidden = self.encoder({k[:-2]: v.to(self.device) if torch.is_tensor(v) else v for (k, v) in inputs.items() if k.endswith('_A')})
_, rhidden = self.encoder({k[:-2]: v.to(self.device) if torch.is_tensor(v) else v for (k, v) in inputs.items() if k.endswith('_B')})
# lhidden = F.normalize(lhidden, p=2, dim=1)
# rhidden = F.normalize(rhidden, p=2, dim=1)
# output = self.similarity(lhidden, rhidden)
mult_dist = torch.mul(lhidden, rhidden)
abs_dist = torch.abs(torch.add(lhidden, -rhidden))
vec_dist = torch.cat((mult_dist, abs_dist), 1) # lvec, rvec added from Conneau, et al. (2018)
# vec_dist = torch.cat((mult_dist, abs_dist, lhidden, rhidden), 1) # lvec, rvec added from Conneau, et al. (20
# mlp_input = self.bn_mlp_input(vec_dist) # added from Choi, et al. (2018)
# mlp_input = self.dropout(mlp_input) # added from Choi, et al. (2018)
# outputs = torch.relu(self.wh(mlp_input)) # added from Choi, et al. (2018)
# outputs = torch.relu(self.wi(outputs)) # added from Choi, et al. (2018)
# outputs = torch.relu(self.wii(outputs)) # added from Choi, et al. (2018)
# mlp_output = self.bn_mlp_output(outputs) # added from Choi, et al. (2018)
# mlp_output = self.dropout(mlp_output) # added from Choi, et al. (2018)
# outputs = F.log_softmax(self.wp(mlp_output), dim=1) # added from Choi, et al. (2018)
# outputs = torch.relu(self.wh(vec_dist)) # added from Conneau, et al. (2018)
outputs = torch.sigmoid(self.wh(vec_dist)) # added from Conneau, et al. (2018)
# outputs = torch.relu(self.wi(outputs)) # added from Conneau, et al. (2018)
outputs = F.log_softmax(self.wp(outputs), dim=1) # added from Conneau, et al. (2018)
# outputs = self.wp(outputs)
loss = self.criterion(outputs, targets.to(self.device))
return {'logits': outputs, 'loss': loss} | /rvnn-0.0.1.tar.gz/rvnn-0.0.1/pytree/models/similarity/modeling_similarity.py | 0.924858 | 0.346458 | modeling_similarity.py | pypi |
import nltk
from tqdm.auto import tqdm
import numpy as np
class GloveTokenizer:
def __init__(self, glove_file_path, vocab_size=None):
self.glove_file_path = glove_file_path
vocab, self.embeddings_arr = self._read_embedding_file(glove_file_path, vocab_size)
self.unk_token_id = 1
self.unk_token = "UNK"
self.pad_token_id = 0
self.pad_token = "PAD"
self.w2idx = {w: i for (i, w) in enumerate(vocab, 2)}
self.idx2w = {i: w for (i, w) in enumerate(vocab, 2)}
@property
def vocab_size(self):
return len(self.w2idx)
@property
def vocab(self):
return sorted(self.w2idx.keys(), reverse=False) # sorted(self.w2idx, key=self.w2idx.get, reverse=True)
@staticmethod
def _read_embedding_file(embeddings_f_path, vocab_size):
# num_lines = sum(1 for _ in open(embeddings_f_path))
num_lines = min(vocab_size, 2000000)
with open(embeddings_f_path, 'rb') as f:
for i in tqdm(range(0, num_lines - 2), total=(num_lines - 2), desc="load embedding file"):
line = next(f)
values = line.decode('utf-8').split()
if i == 0:
embeddings_size = len(values[1:])
w_emb = np.zeros((num_lines, embeddings_size), dtype='float32')
w = []
word_len = len(values) - embeddings_size
w.append(' '.join(values[:word_len]))
w_emb[i + 2] = values[word_len:]
w_emb[0] = np.zeros_like(w_emb[1]) # np.mean(w_emb[2:, :], axis=0)
w_emb[1] = np.zeros_like(w_emb[1])
return w, w_emb
@staticmethod
def tokenize(text):
tokens = nltk.word_tokenize(text)
return tokens
def convert_ids_to_tokens(self, token_ids):
tokens = [self.idx2w.get(ids, self.unk_token) for ids in token_ids]
return tokens
def convert_tokens_to_ids(self, tokens):
ids = [self.w2idx.get(t, self.unk_token_id) for t in tokens]
return ids
@staticmethod
def convert_tokens_to_string(tokens):
return " ".join(tokens)
def convert_ids_to_embeddings(self, token_ids):
embeddings = [self.embeddings_arr[ids] for ids in token_ids]
embeddings = np.vstack(embeddings)
return embeddings
def pad_token_ids(self, token_ids, max_len):
pad_len = max(max_len - len(token_ids), 0)
token_ids.extend([self.pad_token_id] * pad_len)
return token_ids
def decode(self, token_ids):
tokens = [self.idx2w.get(ti, self.unk_token) for ti in token_ids]
return tokens
def encode(self, text_or_tokens, max_length=-1, pad_to_max_length=False, return_embedding=False):
if not isinstance(text_or_tokens, list):
tokens = self.tokenize(text_or_tokens)
else:
tokens = text_or_tokens
if len(tokens) == 0:
tokens = [self.unk_token]
if max_length > 0:
tokens = tokens[:max_length]
if pad_to_max_length:
tokens = self.pad_tokens(tokens, max_length)
token_ids = self.convert_tokens_to_ids(tokens)
if return_embedding:
embeddings = self.convert_ids_to_embeddings(token_ids)
return embeddings
return token_ids | /rvnn-0.0.1.tar.gz/rvnn-0.0.1/pytree/data/glove_tokenizer.py | 0.773986 | 0.272817 | glove_tokenizer.py | pypi |
import re
def prepare_input_from_constituency_tree(constituency_tree):
cons_tree = ConsTree([])
tree = cons_tree.read_tree(constituency_tree[5:-1])
tree.close_unaries()
tree.left_markovize(dummy_annotation="")
const = cons_tree.linearize_parse_tree(str(tree))
clean_const = re.sub(r'\(([^ ]+) ', r'([\1] ', const)
tokens = []
for token in re.sub(r'\(([^ ]+) ', r'([\1] ', clean_const).split():
token = token.strip('()')
tokens.append(token)
n_original_tokens = len([t for t in tokens if not t.startswith('[') and not t.endswith(']')])
n_tokens = len(tokens)
clean_const = re.sub(r'\)', r' )', clean_const)
n_original_tokens_idx = 0
n_specific_tokens_idx = n_original_tokens
clean_const_idx = ""
vocab = [''] * n_tokens
for t in clean_const.split():
if t != ')':
if not t[1:].startswith('[') and not t.endswith(']'):
clean_const_idx += str(n_original_tokens_idx)
vocab[n_original_tokens_idx] = t
n_original_tokens_idx += 1
else:
clean_const_idx += '('
clean_const_idx += str(n_specific_tokens_idx)
vocab[n_specific_tokens_idx] = t[1:]
n_specific_tokens_idx += 1
else:
clean_const_idx += ')'
clean_const_idx += ' '
clean_const_idx = ''.join(clean_const_idx[:-1])
head_idx_ = [0] * n_tokens
head_idx_l = [0] * (n_tokens + 1)
head_idx_r = [0] * (n_tokens + 1)
regexp = re.compile(r'\((\d+) (\d+) (\d+) \)')
while regexp.search(clean_const_idx):
for (head_idx, child_1_idx, child_2_idx) in re.findall(regexp, clean_const_idx):
head_idx_[int(child_1_idx)] = int(head_idx) + 1
head_idx_[int(child_2_idx)] = int(head_idx) + 1
head_idx_r[int(child_1_idx) + 1] = 1
head_idx_l[int(child_2_idx) + 1] = 1
clean_const_idx = re.sub(r'\((\d+) \d+ \d+ \)', r'\1', clean_const_idx)
return ['[CLS]'] + vocab, [0] + head_idx_, head_idx_r, head_idx_l
class ConsTree(object):
"""
That's your phrase structure tree.
"""
def __init__(self, label, children=None):
self.label = label
self.children = [] if children is None else children
def copy(self):
"""
Performs a deep copy of this tree
"""
return ConsTree(self.label, [c.copy() for c in self.children])
@staticmethod
def linearize_parse_tree(parse_tree):
linearized_tree = re.sub(r'(:? ?\([^(]*\_ )([^)]*)\)', r' \2', parse_tree)
# linearized_tree = [x.rstrip() for x in re.split('([\(\)])', parse_tree) if x.rstrip()]
# linearized_tree = [x for x in linearized_tree[2:] if x != "("]
return linearized_tree
def is_leaf(self):
return self.children == []
def add_child(self, child_node):
self.children.append(child_node)
def arity(self):
return len(self.children)
def get_child(self, idx=0):
"""
@return the idx-th child of this node.
"""
return self.children[idx]
def __str__(self):
"""
Pretty prints the tree
"""
return self.label if self.is_leaf() else '(%s %s)' % (
self.label, ' '.join([str(child) for child in self.children]))
def tokens(self, labels=True):
"""
@param labels: returns a list of strings if true else returns
a list of ConsTree objects (leaves)
@return the list of words at the leaves of the tree
"""
if self.is_leaf():
return [self.label] if labels else [self]
else:
result = []
for child in self.children:
result.extend(child.tokens(labels))
return result
def pos_tags(self):
"""
@return the list of pos tags as ConsTree objects
"""
if self.arity() == 1 and self.get_child().is_leaf():
return [self]
else:
result = []
for child in self.children:
result.extend(child.pos_tags())
return result
def index_leaves(self):
"""
Adds an numeric index to each leaf node
"""
for idx, elt in enumerate(self.tokens(labels=False)):
elt.idx = idx
def triples(self):
"""
Extracts a list of evalb triples from the tree
(supposes leaves are indexed)
"""
subtriples = []
if self.is_leaf():
return [(self.idx, self.idx + 1, self.label)]
for child in self.children:
subtriples.extend(child.triples())
leftidx = min([idx for idx, jdx, label in subtriples])
rightidx = max([jdx for idx, jdx, label in subtriples])
subtriples.append((leftidx, rightidx, self.label))
return subtriples
def compare(self, other):
"""
Compares this tree to another and computes precision,recall,
fscore. Assumes self is the reference tree
@param other: the predicted tree
@return (precision,recall,fscore)
"""
print('***', str(self), str(other))
self.index_leaves()
other.index_leaves()
# filter out leaves
# ref_triples = set([(i,j,X) for i,j,X in self.triples() if j != i+1])
# pred_triples = set([(i,j,X) for i,j,X in other.triples() if j != i+1])
ref_triples = set(self.triples())
pred_triples = set(other.triples())
intersect = ref_triples.intersection(pred_triples)
isize = len(intersect)
P = isize / len(pred_triples)
R = isize / len(ref_triples)
F = (2 * P * R) / (P + R)
return (P, R, F)
def strip_tags(self):
"""
In place (destructive) removal of pos tags
"""
def gen_child(node):
if len(node.children) == 1 and node.children[0].is_leaf():
return node.children[0]
return node
self.children = [gen_child(child) for child in self.children]
for child in self.children:
child.strip_tags()
def normalize_OOV(self, lexicon, unk_token):
"""
Destructively replaces all leaves by the unk_token when the leaf label is not in
lexicon. Normalizes numbers
@param lexicon : a set of strings
@param unk_token: a string
@return a pointer to the tree root
"""
if self.is_leaf():
if self.label not in lexicon:
self.label = unk_token
for child in self.children:
child.normalize_OOV(lexicon, unk_token)
return self
def add_gold_tags(self, tag_sequence=None, idx=0):
"""
Adds gold tags to the tree on top of leaves(for evalb compatibility).
Destructive method.
"""
newchildren = []
for child in self.children:
if child.is_leaf():
label = tag_sequence[idx]
tag = ConsTree(label, children=[child])
newchildren.append(tag)
idx += 1
else:
newchildren.append(child)
idx = child.add_gold_tags(tag_sequence, idx)
self.children = newchildren
return idx
def add_dummy_root(self, root_label='TOP'):
"""
In place addition of a dummy root
"""
selfcopy = ConsTree(self.label, children=self.children)
self.children = [selfcopy]
self.label = root_label
def close_unaries(self, dummy_annotation='@'):
"""
In place (destructive) unary closure of unary branches
"""
if self.arity() == 1:
current = self
unary_labels = []
while current.arity() == 1 and not current.get_child().is_leaf():
unary_labels.append(current.label)
current = current.get_child()
unary_labels.append(current.label)
self.label = dummy_annotation.join(unary_labels)
self.children = current.children
for child in self.children:
child.close_unaries()
def expand_unaries(self, dummy_annotation='@'):
"""
In place (destructive) expansion of unary symbols.
"""
if dummy_annotation in self.label:
unary_chain = self.label.split(dummy_annotation)
self.label = unary_chain[0]
backup = self.children
current = self
for label in unary_chain[1:]:
c = ConsTree(label)
current.children = [c]
current = c
current.children = backup
for child in self.children:
child.expand_unaries()
def left_markovize(self, dummy_annotation=':'):
"""
In place (destructive) left markovization (order 0)
"""
if len(self.children) > 2:
left_sequence = self.children[:-1]
dummy_label = self.label if self.label[-1] == dummy_annotation else self.label + dummy_annotation
dummy_tree = ConsTree(dummy_label, left_sequence)
self.children = [dummy_tree, self.children[-1]]
for child in self.children:
child.left_markovize()
def right_markovize(self, dummy_annotation=':'):
"""
In place (destructive) right markovization (order 0)
"""
if len(self.children) > 2:
right_sequence = self.children[1:]
dummy_label = self.label if self.label[-1] == dummy_annotation else self.label + dummy_annotation
dummy_tree = ConsTree(dummy_label, right_sequence)
self.children = [self.children[0], dummy_tree]
for child in self.children:
child.right_markovize()
def unbinarize(self, dummy_annotation=':'):
"""
In place (destructive) unbinarization
"""
newchildren = []
for child in self.children:
if child.label[-1] == dummy_annotation:
child.unbinarize()
newchildren.extend(child.children)
else:
child.unbinarize()
newchildren.append(child)
self.children = newchildren
def collect_nonterminals(self):
"""
Returns the list of nonterminals found in a tree:
"""
if not self.is_leaf():
result = [self.label]
for child in self.children:
result.extend(child.collect_nonterminals())
return result
return []
@staticmethod
def read_tree(input_str):
"""
Reads a one line s-expression.
This is a non robust function to syntax errors
@param input_str: a s-expr string
@return a ConsTree object
"""
tokens = input_str.replace('(', ' ( ').replace(')', ' ) ').split()
stack = [ConsTree('dummy')]
for idx, tok in enumerate(tokens):
if tok == '(':
current = ConsTree(tokens[idx + 1])
stack[-1].add_child(current)
stack.append(current)
elif tok == ')':
stack.pop()
else:
if tokens[idx - 1] != '(':
stack[-1].add_child(ConsTree(tok))
assert (len(stack) == 1)
return stack[-1].get_child() | /rvnn-0.0.1.tar.gz/rvnn-0.0.1/pytree/data/constituency_tree.py | 0.630002 | 0.308542 | constituency_tree.py | pypi |
import warnings
import numpy as np
from matplotlib.cm import get_cmap
from matplotlib.colors import LinearSegmentedColormap
def gray_scale_to_color_ramp(gray_scale, colormap, min_colormap_cut=None, max_colormap_cut=None, alpha=False,
output_8bit=True):
"""
Turns normalized gray scale np.array to rgba (np.array of 4 np.arrays r, g, b, a).
Parameters
----------
gray_scale : np.array (2D)
Normalized gray_scale img as np.array (0-1)
colormap : str
Colormap form matplotlib (https://matplotlib.org/3.3.2/tutorials/colors/colormaps.html)
min_colormap_cut : float
What lower part of colormap to cut to select part of colormap.
Valid values are between 0 and 1, if 0.2 it cuts off (deletes) 20% of lower colors in colormap.
If None cut is not applied.
max_colormap_cut : float
What upper part of colormap to cut to select part of colormap.
Valid values are between 0 and 1, if 0.8 it cuts off (deletes) 20% of upper colors in colormap.
If None cut is not applied.
alpha : bool
If True outputs 4D array RGBA, if False outputs 3D array RGB
output_8bit : bool
If true output values will be int 0-255 instead of normalized values.
Returns
-------
rgba_out : np.array (3D: red 0-255, green 0-255, blue 0-255)
If alpha False: np.array (4D: red 0-255, green 0-255, blue 0-255, alpha 0-255)
"""
cm = get_cmap(colormap)
# Truncate colormap if required
if min_colormap_cut is not None or max_colormap_cut is not None:
if min_colormap_cut is None:
min_colormap_cut = 0.0
if max_colormap_cut is None:
max_colormap_cut = 1.0
if min_colormap_cut > 1 or min_colormap_cut < 0 or max_colormap_cut > 1 or max_colormap_cut < 0:
raise Exception("rvt.blend_func.gray_scale_to_color_ramp: min_colormap_cut and max_colormap_cut must be"
" between 0 and 1!")
if min_colormap_cut >= max_colormap_cut:
raise Exception("rvt.blend_func.gray_scale_to_color_ramp: min_colormap_cut can't be smaller than"
" max_colormap_cut!")
cm = truncate_colormap(cmap=cm, minval=min_colormap_cut, maxval=max_colormap_cut)
# Compute normalized RGBA
rgba_mtpl_out = cm(gray_scale)
if output_8bit:
nan_mask = np.isnan(gray_scale)
rgba_mtpl_out[nan_mask] = 0 # Change nan to 0
rgba_mtpl_out = np.uint8(rgba_mtpl_out * 255) # 0-1 scale to 0-255 and change type to uint8
# Move array axes to correct positions, i.e. (x, y, bands) to (bands, x, y)
rgba_out = rgba_mtpl_out.transpose(2, 0, 1)
# Discard 4th band if not using Alpha
if not alpha:
rgba_out = rgba_out[:3, ...]
return rgba_out
def truncate_colormap(cmap, minval=0.0, maxval=1.0, n=100):
new_cmap = LinearSegmentedColormap.from_list(
'trunc({n},{a:.2f},{b:.2f})'.format(n=cmap.name, a=minval, b=maxval),
cmap(np.linspace(minval, maxval, n)))
return new_cmap
def normalize_lin(image, minimum, maximum):
# linear cut off
image[image > maximum] = maximum
image[image < minimum] = minimum
# stretch to 0.0 - 1.0 interval
image = (image - minimum) / (maximum - minimum)
image[image > 1] = 1
image[image < 0] = 0
return np.float32(image)
def lin_cutoff_calc_from_perc(image, minimum, maximum):
"""Minimum cutoff in percent, maximum cutoff in percent (0%-100%). Returns min and max values for linear
stretch (cut-off)."""
if minimum < 0 or maximum < 0 or minimum > 100 or maximum > 100:
raise Exception("rvt.blend_func.lin_cutoff_calc_from_perc: minimum, maximum are percent and have to be in "
"range 0-100!")
if minimum + maximum > 100:
raise Exception("rvt.blend_func.lin_cutoff_calc_from_perc: if minimum + maximum > 100% then there are no"
" values left! You can't cutoff whole image!")
distribution = np.nanpercentile(a=image, q=np.array([minimum, 100 - maximum]))
min_lin = distribution[0]
max_lin = distribution[1]
if min_lin == max_lin:
min_lin = np.nanmin(image)
max_lin = np.nanmax(image)
return {"min_lin": min_lin, "max_lin": max_lin}
def normalize_perc(image, minimum, maximum):
min_max_lin_dict = lin_cutoff_calc_from_perc(image, minimum, maximum)
min_lin = min_max_lin_dict["min_lin"]
max_lin = min_max_lin_dict["max_lin"]
return normalize_lin(image, min_lin, max_lin)
def advanced_normalization(image, minimum, maximum, normalization):
"""Runs normalization based on the selected normalization type: value or percent."""
# Preform checks if correct values were given
if minimum == maximum and normalization == "value":
raise Exception("rvt.blend_func.advanced_normalization: If normalization == value, min and max cannot be the"
" same!")
if minimum > maximum and normalization == "value":
raise Exception("rvt.blend_func.advanced_normalization: If normalization == value, max can't be smaller"
" than min!")
# Select normalization type
if normalization.lower() == "value":
equ_image = normalize_lin(image=image, minimum=minimum, maximum=maximum)
elif normalization.lower() == "perc":
equ_image = normalize_perc(image=image, minimum=minimum, maximum=maximum)
elif normalization is None:
equ_image = image
else:
raise Exception(f"rvt.blend_func.advanced_normalization: Unknown normalization type: {normalization}")
return equ_image
def lum(img):
if len(img.shape) == 3:
r = img[0]
g = img[1]
b = img[2]
lum_img = np.float32((0.3 * r) + (0.59 * g) + (0.11 * b))
else:
lum_img = img
return lum_img
def matrix_eq_min_lt_zero(r: np.ndarray, idx_min_lt_zero, lum_c, min_c):
r[idx_min_lt_zero] = lum_c[idx_min_lt_zero] + (((r[idx_min_lt_zero] - lum_c[idx_min_lt_zero]) *
lum_c[idx_min_lt_zero]) / (lum_c[idx_min_lt_zero] -
min_c[idx_min_lt_zero]))
return r
def matrix_eq_max_gt_one(r: np.ndarray, idx_max_c_gt_one, lum_c, max_c):
r[idx_max_c_gt_one] = lum_c[idx_max_c_gt_one] + (((r[idx_max_c_gt_one] - lum_c[idx_max_c_gt_one]) *
(1.0 - lum_c[idx_max_c_gt_one])) / (max_c[idx_max_c_gt_one]
- lum_c[idx_max_c_gt_one]))
return r
def channel_min(r: np.ndarray, g: np.ndarray, b: np.ndarray):
min_c = r * 1.0
idx_min = np.where(g < min_c)
min_c[idx_min] = g[idx_min]
idx_min = np.where(b < min_c)
min_c[idx_min] = b[idx_min]
return min_c
def channel_max(r: np.ndarray, g: np.ndarray, b: np.ndarray):
max_c = r * 1.0
idx_max = np.where(g > max_c)
max_c[idx_max] = g[idx_max]
idx_max = np.where(b > max_c)
max_c[idx_max] = b[idx_max]
return max_c
def clip_color(c, min_c=None, max_c=None):
lum_c = lum(c)
r = np.float32(c[0])
g = np.float32(c[1])
b = np.float32(c[2])
if min_c is None and max_c is None:
min_c = channel_min(r, g, b)
max_c = channel_max(r, g, b)
idx_min_lt_zero = np.where(min_c < 0)
r = matrix_eq_min_lt_zero(r, idx_min_lt_zero, lum_c, min_c)
g = matrix_eq_min_lt_zero(g, idx_min_lt_zero, lum_c, min_c)
b = matrix_eq_min_lt_zero(b, idx_min_lt_zero, lum_c, min_c)
idx_max_c_gt_one = np.where(max_c > 1)
r = matrix_eq_max_gt_one(r, idx_max_c_gt_one, lum_c, max_c)
g = matrix_eq_max_gt_one(g, idx_max_c_gt_one, lum_c, max_c)
b = matrix_eq_max_gt_one(b, idx_max_c_gt_one, lum_c, max_c)
c_out = np.zeros(c.shape)
c_out[0, :, :] = r
c_out[1, :, :] = g
c_out[2, :, :] = b
return c_out
def blend_normal(active, background):
return active
def blend_screen(active, background):
return 1 - (1 - active) * (1 - background)
def blend_multiply(active, background):
return active * background
def blend_overlay(active, background):
idx1 = np.where(background > 0.5)
idx2 = np.where(background <= 0.5)
background[idx1] = (1 - (1 - 2 * (background[idx1] - 0.5)) * (1 - active[idx1]))
background[idx2] = ((2 * background[idx2]) * active[idx2])
return background
def blend_soft_light(active, background):
# idx1 = np.where(active > 0.5)
# idx2 = np.where(active <= 0.5)
# background[idx1] = 1 - (1-background[idx1]) * (1-(active[idx1]-0.5))
# background[idx2] = background[idx2] * (active[idx2]+0.5)
idx1 = np.where(active < 0.5)
idx2 = np.where(active >= 0.5)
background[idx1] = 2 * background[idx1] * active[idx1] + background[idx1] ** 2 * (1.0 - 2 * active[idx1])
background[idx2] = 2 * background[idx2] * (1.0 - active[idx2]) + np.sqrt(background[idx2]) * (
2 * active[idx2] - 1.0)
return background
def blend_luminosity(active, background, min_c=None, max_c=None):
lum_active = lum(active)
lum_background = lum(background)
luminosity = lum_active - lum_background
if len(background.shape) < 3:
return lum_active
r = background[0] + luminosity
g = background[1] + luminosity
b = background[2] + luminosity
c = np.zeros(background.shape)
c[0, :, :] = r
c[1, :, :] = g
c[2, :, :] = b
clipped_image = clip_color(c, min_c, max_c)
return clipped_image
def equation_blend(blend_mode, active, background):
if blend_mode.lower() == "screen":
return blend_screen(active, background)
elif blend_mode.lower() == "multiply":
return blend_multiply(active, background)
elif blend_mode.lower() == "overlay":
return blend_overlay(active, background)
elif blend_mode.lower() == "soft_light":
return blend_soft_light(active, background)
def blend_multi_dim_images(blend_mode, active, background):
a_rgb = len(active.shape) == 3 # bool, is active rgb
b_rgb = len(background.shape) == 3 # bool, is background rgb
blended_image = None
if a_rgb and b_rgb:
blended_image = np.zeros(background.shape)
for i in range(3):
blended_image[i, :, :] = equation_blend(blend_mode, active[i, :, :], background[i, :, :])
if a_rgb and not b_rgb:
blended_image = np.zeros(active.shape)
for i in range(3):
blended_image[i, :, :] = equation_blend(blend_mode, active[i, :, :], background)
if not a_rgb and b_rgb:
blended_image = np.zeros(background.shape)
for i in range(3):
blended_image[i, :, :] = equation_blend(blend_mode, active, background[i, :, :])
if not a_rgb and not b_rgb:
blended_image = equation_blend(blend_mode, active, background)
return blended_image
def blend_images(blend_mode, active, background, min_c=None, max_c=None):
if blend_mode.lower() == "multiply" or blend_mode.lower() == "overlay" or blend_mode.lower() == "screen" \
or blend_mode.lower() == "soft_light":
return blend_multi_dim_images(blend_mode, active, background)
elif blend_mode.lower() == "luminosity":
return blend_luminosity(active, background, min_c, max_c)
else:
return blend_normal(active, background)
def render_images(active, background, opacity):
# Both active and background image have to be between 0 and 1, scale if not
if np.nanmin(active) < 0 or np.nanmax(active) > 1:
active = scale_0_to_1(active)
if np.nanmin(background) < 0 or np.nanmax(background) > 1:
background = scale_0_to_1(background)
# True if image has 3 bands (RGB), false if single band
a_rgb = len(active.shape) == 3
b_rgb = len(background.shape) == 3
# Apply opacity
if a_rgb and b_rgb:
# Both images 3 bands
render_image = np.zeros(background.shape)
for i in range(3):
render_image[i, :, :] = apply_opacity(active[i, :, :], background[i, :, :], opacity)
elif a_rgb and not b_rgb:
# Active image 3 bands
render_image = np.zeros(active.shape)
for i in range(3):
render_image[i, :, :] = apply_opacity(active[i, :, :], background, opacity)
elif not a_rgb and b_rgb:
# Background image 3 bands
render_image = np.zeros(background.shape)
for i in range(3):
render_image[i, :, :] = apply_opacity(active, background[i, :, :], opacity)
else:
render_image = apply_opacity(active, background, opacity)
return render_image
def scale_within_0_and_1(numeric_value):
if np.nanmin(numeric_value) >= 0 and np.nanmax(numeric_value) <= 1:
return numeric_value
# Create mask for NaN values
# nan_mask = np.isnan(numeric_value)
numeric_value[np.isnan(numeric_value)] = np.nanmin(numeric_value) # nan change to nanmin
actual_min = np.nanmin(numeric_value)
norm_min_value = np.nanmax(np.array(0, actual_min))
actual_max = np.nanmax(numeric_value)
norm_max_value = np.nanmin(np.array(1, actual_max))
# Do not scale values where max is between 1 and 255 if the max-min values difference is at least 30 and min >0
# and numeric values are integer type
if 255 >= actual_max > 1:
if actual_max - actual_min > 30 and actual_min > 0:
scaled = numeric_value / 255
return scaled
scaled = (numeric_value - norm_min_value) / (norm_max_value - norm_min_value)
if np.nanmin(scaled) > -0.01:
scaled[(0 > scaled) & (scaled > -0.01)] = 0
# scaled[nan_mask] = np.nan
return scaled
def scale_strict_0_to_1(numeric_value):
if np.nanmin(numeric_value) == 0 and np.nanmax(numeric_value) == 1:
return numeric_value
numeric_value[np.isnan(numeric_value)] = 0 # nan change to 0
min_value = np.nanmin(numeric_value)
max_value = np.nanmax(numeric_value)
scaled = (numeric_value - min_value) / (max_value - min_value)
if np.nanmin(scaled) > -0.01:
scaled[0 > scaled > -0.01] = 0
return scaled
def scale_0_to_1(numeric_value):
if 1 >= np.nanmax(numeric_value) > 0.9 and np.nanmin(numeric_value) == 0:
return numeric_value
elif np.nanmax(numeric_value) - np.nanmin(numeric_value) > 0.3:
return scale_within_0_and_1(numeric_value)
else:
return scale_strict_0_to_1(numeric_value)
def apply_opacity(active, background, opacity):
if opacity > 1:
opacity = opacity / 100
return active * opacity + background * (1 - opacity)
def normalize_image(visualization, image, min_norm, max_norm, normalization):
"""Main function for normalization. Runs advanced normalization on the array and preforms special operations for
some visualization types (e.g. invert scale for slope, scale for mhs, etc.).
"""
if visualization is None:
return None
if normalization == "percent":
normalization = "perc"
norm_image = advanced_normalization(image=image, minimum=min_norm, maximum=max_norm, normalization=normalization)
# Make sure it scales 0 to 1
if np.nanmax(norm_image) > 1:
if visualization.lower() == "multiple directions hillshade" or visualization == "mhs":
norm_image = scale_0_to_1(norm_image)
else:
norm_image = scale_0_to_1(norm_image)
warnings.warn("rvt.blend_func.normalize_image: unexpected values! max > 1")
if np.nanmin(norm_image) < 0:
norm_image = scale_0_to_1(norm_image)
warnings.warn("rvt.blend_func.normalize_image: unexpected values! min < 0")
# For slope invert scale (high slopes will be black)
if visualization.lower() == "slope gradient" or visualization.lower() == "openness - negative" or \
visualization == "slp" or visualization == "neg_opns":
norm_image = 1 - norm_image
return norm_image
def cut_off_normalize(image, mode, cutoff_min=None, cutoff_max=None, bool_norm=True):
"""
One band image cut-off or normalization or both. Image is 2D np.ndarray of raster, mode is perc or value
(min and max units), min and max are minimum value to cutoff and maximum value to cutoff.
(e.x. percent min=2 and max=3 -> cutoff lower 2% values and higher 3% values;
e.x. value min=10 and max=60 -> cutoff bellow 10 and above 60, image values will be 10-60)
"""
if cutoff_min is not None and cutoff_max is not None:
if cutoff_min == cutoff_max and mode == "value":
raise Exception("rvt.blend_func.cut_off_normalize: If normalization == value, min and max cannot be the"
" same!")
if cutoff_min > cutoff_max and mode == "value":
raise Exception("rvt.blend_func.cut_off_normalize: If normalization == value, max can't be smaller"
" than min!")
cut_off_arr = image
if cutoff_min is None and mode.lower() == "value":
cutoff_min = np.amin(image)
if cutoff_max is None and mode.lower() == "value":
cutoff_max = np.amax(image)
if cutoff_min is None and (mode.lower() == "perc" or mode.lower() == "percent"):
cutoff_min = 0
if cutoff_max is None and (mode.lower() == "perc" or mode.lower() == "percent"):
cutoff_max = 0
if bool_norm:
if mode.lower() == "value":
cut_off_arr = normalize_lin(cut_off_arr, cutoff_min, cutoff_max)
elif mode.lower() == "perc" or mode.lower() == "percent":
cut_off_arr = normalize_perc(cut_off_arr, cutoff_min, cutoff_max)
else:
if mode.lower() == "value":
cut_off_arr[cut_off_arr > cutoff_max] = cutoff_max
cut_off_arr[cut_off_arr < cutoff_min] = cutoff_min
elif mode.lower() == "perc" or mode.lower() == "percent":
min_max_value_dict = lin_cutoff_calc_from_perc(cut_off_arr, cutoff_min, cutoff_max)
min_value = min_max_value_dict["min_lin"]
max_value = min_max_value_dict["max_lin"]
cut_off_arr[cut_off_arr > max_value] = max_value
cut_off_arr[cut_off_arr < min_value] = min_value
return cut_off_arr | /rvt_py-2.2.1.tar.gz/rvt_py-2.2.1/rvt/blend_func.py | 0.900004 | 0.532972 | blend_func.py | pypi |
import requests
import pandas as pd
class rw_api_tools:
def __init__(self):
"""constructor for this class"""
self.data = "None yet"
def get_rw_datasets(provider=None):
url = "https://api.resourcewatch.org/v1/dataset?sort=slug,-provider,userId&status=saved&includes=metadata,vocabulary,widget,layer"
payload = { "application":"rw", "page[size]": 100000000000}
res = requests.request("GET", url, params=payload)
data = res.json()["data"]
datasets_on_api = {}
for ix, dset in enumerate(data):
atts = dset["attributes"]
metadata = atts["metadata"]
layers = atts["layer"]
widgets = atts["widget"]
tags = atts["vocabulary"]
datasets_on_api[dset["id"]] = {
"name": atts["name"],
"slug": atts["slug"],
"provider":atts["provider"],
"date_updated":atts["updatedAt"],
"num_layers":len(layers),
"layers": layers,
"num_widgets":len(widgets),
"widgets": widgets,
"num_metadata":len(metadata),
"metadata": metadata,
"num_tags":len(tags),
"tags":tags,
"table_name":atts["tableName"],
"position":ix
}
print("num datasets: ", len(data))
current_datasets_on_api = pd.DataFrame(datasets_on_api).transpose()
current_datasets_on_api.index.rename("rw_id", inplace=True)
current_datasets_on_api.sort_values(by=["date_updated"], inplace=True, ascending = False)
if provider == None:
return(current_datasets_on_api)
elif (provider in current_datasets_on_api["provider"].unique()):
match = current_datasets_on_api["provider"]==provider
return(current_datasets_on_api.loc[match])
else:
return("Not a valid provider!") | /rw_api1-1.0.3-py3-none-any.whl/rw_api_tools/rw_api_tools.py | 0.401219 | 0.21213 | rw_api_tools.py | pypi |
import os
from pathlib import Path
from dependence.function_write import *
from dependence.function_read import *
from dependence.control_folder_exist import *
__all__ = [
"file_rw",
]
def file_rw(file_path, data=None, mode='read', sep=',', file_extension='csv', parent_directory=None, serie=False,
column_name=None, use_cols=None):
"""
A function to read or write data to a file. Supports CSV, JSON, and Pickle file formats.
Parameters:
----------
file_path : str
The path to the file.
data : pandas.DataFrame or pandas.Series or any Python object, optional
The data to be written to the file, by default None.
file_extension : str, optional
The file format, either 'csv', 'json', or 'pkl', by default 'csv'.
mode : str, optional
The mode in which the file is opened, either 'read' or 'write', by default 'read'.
parent_directory : str, optional
allows to save in a specific folder, by default None
serie: bool = False,
Read a csv to return a Pandas series
use_col: Type[str] = None
If the parameter "serie" is true, allows to select the column to read. If None, all columns will be read.
column_name: list or None, optional
A list of new column names for the DataFrame, by default None.
Returns:
-------
pandas.DataFrame or pandas.Series or any Python object
The data read from the file.
"""
directory = "dataset"
if parent_directory is None:
folder_path = Path(f"{directory}/data_{file_extension}")
file_root = f"{directory}/data_{file_extension}/{file_path}.{file_extension}"
else:
folder_path = Path(parent_directory)
os.makedirs(parent_directory, exist_ok=True)
file_root = f"{parent_directory}/{file_path}.{file_extension}"
if not folder_path.exists():
print("the path to the file does not exist, create the destination.")
folder_path.mkdir(parents=True, exist_ok=True)
file_path = Path(file_root)
if mode == 'read':
if file_extension == 'csv':
if serie:
usecols = [use_cols] if use_cols else None
return read_csv(file_path, sep=sep, column_names=column_name, use_cols=usecols, squeeze=True)
return read_csv(file_path, sep=sep, column_names=column_name)
elif file_extension == 'json':
return read_json(file_path)
elif file_extension == 'pkl':
return read_pickle(file_path)
else:
print("error extension file")
elif mode == 'write':
if file_extension == 'csv':
return write_csv(file_path, data=data, sep=sep, column_names=column_name)
elif file_extension == 'json':
return write_json(file_path, data=data)
elif file_extension == 'pkl':
return write_pickle(file_path, data=data)
else:
print("error extension file")
else:
raise ValueError("'mode' must be either 'read' or 'write'") | /rw_dataframe_data_io-0.1.4.tar.gz/rw_dataframe_data_io-0.1.4/utils/DataIO.py | 0.571049 | 0.276324 | DataIO.py | pypi |
# rw-dynamicworld-cd
A repository holding code and example notebooks for change detection methods and post-classificaiton processing for the Dynamic World Land Cover product. Dynamic World is a joint iniative between the World Resources Institute, Natioanl Geographic Society, Google, and Impact Observatory. The Dynamic World Land Cover product is a 10-meter resolution, Sentinel-2 based land cover dataset that runs globally. Dynamic World classifies Sentinel-2 scenes and can be reduced to annual classifications.
Due to noise and classification errors, some changes in classifications from year to year may not represent actual change on the ground. Therefore many land cover products apply change detection or post-classification filters to improve the consistency of the land cover product through time or improve the classification accuracy on the training data. The Resource Watch team within the World Resources Institute has developed this repository to demonstrate five change detection and post-classification approaches that can be used in Dynamic World.
All Python modules are contained in the [wri_change_detection](https://github.com/wri/rw-dynamicworld-cd/tree/master/wri_change_detection) folder which will soon be installable via pip as well. Jupyter notebooks demonstrating methods for change detection and post-classification processing are contained in the [demo_notebooks](https://github.com/wri/rw-dynamicworld-cd/tree/master/demo_notebooks) folder.
The demo notebooks contain 4 notebooks:
1. [MapBiomas_Spatial_Temporal_Filters.ipynb](https://github.com/wri/rw-dynamicworld-cd/blob/master/demo_notebooks/MapBiomas_Spatial_Temporal_Filters.ipynb): demonstrating the application of gap filling, spatial filters, temporal filters, frequency filters, and incidence filters following code from the [MapBiomas](https://github.com/mapbiomas-brazil) land cover product.
2. [NeighborhoodPrediction_LC_ChangeDetection_Example.ipynb](https://github.com/wri/rw-dynamicworld-cd/blob/master/demo_notebooks/NeighborhoodPrediction_LC_ChangeDetection_Example.ipynb): allows the user to train a model to predict whether change that occurs from year<sub>i</sub> to year<sub>i+1</sub> stays consistent in year<sub>i+2</sub> using properties of the neighboring pixels as predictor variables.
3. [SeasonalProbabilitiesPrediction_LC_ChangeDetection_Example.ipynb](https://github.com/wri/rw-dynamicworld-cd/blob/master/demo_notebooks/SeasonalProbabilitiesPrediction_LC_ChangeDetection_Example.ipynb): allows the user to train a model to predict whether change that occurs from year<sub>i</sub> to year<sub>i+1</sub> stays consistent in year<sub>i+2</sub> using seasonal class probabilities in year<sub>i</sub> and the difference in seasonal class probabilities from year<sub>i</sub> to year<sub>i+1</sub>.
4. [Probability_Filters.ipynb](https://github.com/wri/rw-dynamicworld-cd/blob/master/demo_notebooks/Probability_Filters.ipynb): allows users to apply probability filters as general rules in post-processing, such as "classify all pixels with built-area probability greater than 0.3 as built-area" or "replace all pixels with forest probability less than 0.2 with the neighboring class". This method was inspired by [Malinowski et al. 2020](https://www.mdpi.com/2072-4292/12/21/3523/htm).
The approach for each of the four methods is defined below.
## MapBiomas Spatial and Temporal Filters:
[MapBiomas](https://mapbiomas.org/en/about-us) is an initiative of the Greenhouse Gas Emissions Estimation System (SEEG) from the Climate Observatory's and is produced by a collaborative network of co-creators made up of NGOs, universities and technology companies organized by biomes and cross-cutting themes. The MapBiomas land cover products are 30-meter resolution, Landsat based land cover products that covers Brazil and other regions in South America. You can learn more about the MapBiomas project at their [home page](https://mapbiomas.org/). You can read more of the methodology in the [Algorithm Theoretical Basis Document (ATBD) Page](https://mapbiomas.org/en/download-of-atbds) on their website, including the main ATBD and appendices for each each biome and cross-cutting themes.
The ATBD goes on to describe five post-classification filters that are applied. For each filter, the Resource Watch team converted original code from MapBiomas from Javascript to Python when code was available, and otherwise coded the filters as close to the description. You can view the associated demo notebook [MapBiomas_Spatial_Temporal_Filters.ipynb](https://github.com/wri/rw-dynamicworld-cd/blob/master/demo_notebooks/MapBiomas_Spatial_Temporal_Filters.ipynb) and Python modules in [post_classification_filters.py](https://github.com/wri/rw-dynamicworld-cd/blob/master/wri_change_detection/post_classification_filters.py) to view how this method can be applied to the Dynamic World land cover product.
From Section 3.5 of the ATBD, MapBiomas defines post-classification filters,
"[due] to the pixel-based classification method and the long temporal series, a chain of post-classification filters was applied. The first post-classification action involves the application of temporal filters. Then, a spatial filter was applied followed by a gap fill filter. The application of these filters remove classification noise.
These post-classification procedures were implemented in the Google Earth Engine platform"
* 3.5.1. Gap fill
* The Gap fill filter was used to fill possible no-data values. In a long time series of severely cloud-affected regions, it is expected that no-data values may populate some of the resultant median composite pixels. In this filter, no-data values (“gaps”) are theoretically not allowed and are replaced by the temporally nearest valid classification. In this procedure, if no “future” valid position is available, then the no-data value is replaced by its previous valid class. Up to three prior years can be used to fill in persistent no-data positions. Therefore, gaps should only exist if a given pixel has been permanently classified as no-data throughout the entire temporal domain.
* 3.5.2. Spatial filter
* Spatial filter was applied to avoid unwanted modifications to the edges of the pixel groups (blobs), a spatial filter was built based on the “connectedPixelCount” function. Native to the GEE platform, this function locates connected components (neighbours) that share the same pixel value. Thus, only pixels that do not share connections to a predefined number of identical neighbours are considered isolated. In this filter, at least five connected pixels are needed to reach the minimum connection value. Consequently, the minimum mapping unit is directly affected by the spatial filter applied, and it was defined as 5 pixels (~0.5 ha).
* 3.5.3. Temporal filter
* The temporal filter uses sequential classifications in a three-to-five-years unidirectional moving window to identify temporally non-permitted transitions. Based on generic rules (GR), the temporal filter inspects the central position of three to five consecutive years, and if the extremities of the consecutive years are identical but the centre position is not, then the central pixels are reclassified to match its temporal neighbour class. For the three years based temporal filter, a single central position shall exist, for the four and five years filters, two and there central positions are respectively considered.
* Another generic temporal rule is applied to extremity of consecutive years. In this case, a three consecutive years window is used and if the classifications of the first and last years are different from its neighbours, this values are replaced by the classification of its matching neighbours.
* 3.5.4. Frequency filter
* This filter takes into consideration the occurrence frequency throughout the entire time series. Thus, all class occurrence with less than given percentage of temporal persistence (eg. 3 years or fewer out of 33) are filtered out. This mechanism contributes to reducing the temporal oscillation associated to a given class, decreasing the number of false positives and preserving consolidated trajectories. Each biome and cross-cutting themes may have constituted customized applications of frequency filters, see more details in their respective appendices.
* 3.5.5. Incident filter
* An incident filter were applied to remove pixels that changed too many times in the 34 years of time span. All pixels that changed more than eight times and is connected to less than 6 pixels was replaced by the MODE value of that given pixel position in the stack of years. This avoids changes in the border of the classes and helps to stabilize originally noise pixel trajectories. Each biome and cross-cutting themes may have constituted customized applications of incident filters, see more details in its respective appendices.
## Predicting Consistent Change Using Properties of the Neighborhood:
The goal of this method is to predict whether change that occurs in one year is "permanent" or "consistent", which can be defined by the user, using properties of the neighboring pixels as predictor variables in the model. One definition of consistent change is if change occurred from year<sub>i</sub> to year<sub>i+1</sub>, then did not change from year<sub>i+1</sub> to year<sub>i+2</sub>. This approach builds a binary classification model and gathers training data from the land cover classification product. The model uses properties of the neighboring pixels as predictor variables for example how many of the surrounding pixels transitioned, how many of the surrounding pixels are of each class, etc. Neighborhoods are defined using kernels, and there are many options for the kernel including shape and size. The output of the model is a probability ranging from 0 to 1 that the transition is consistent.
You can view the associated demo notebook [NeighborhoodPrediction_LC_ChangeDetection_Example.ipynb](https://github.com/wri/rw-dynamicworld-cd/blob/master/demo_notebooks/NeighborhoodPrediction_LC_ChangeDetection_Example.ipynb). The demo notebook uses Python modules for preprocessing in [preprocessing.py](https://github.com/wri/rw-dynamicworld-cd/blob/master/wri_change_detection/preprocessing.py) and for training a classifier in [gee_classifier.py](https://github.com/wri/rw-dynamicworld-cd/blob/master/wri_change_detection/gee_classifier.py).
## Predicting Consistent Change Using Seasonal Probabilities:
This approach is similar to the one above, except now the predictor variables are seasonal probabilities for each land cover class for year<sub>i</sub> and the difference in seasonal probabilities from year<sub>i</sub> to year<sub>i+1</sub>.
The goal of this method is to predict whether change that occurs in one year is "permanent" or "consistent", which can be defined by the user, using seasonal probabilities of the current year and the following year. One definition of consistent change is if change occurred from year<sub>i</sub> to year<sub>i+1</sub>, then did not change from year<sub>i+1</sub> to year<sub>i+2</sub>. This approach builds a binary classification model and gathers training data from the land cover classification product. The predictor variables can include the seasonal probabilities for year<sub>i</sub> and the difference in seasonal probabilities from year<sub>i</sub> to year<sub>i+1</sub>, but could also include the difference in seasonal probabilities from year<sub>i-1</sub> to year<sub>i</sub>. The user controls the definition of the seasons, including the number of seasons and date ranges of the seasons. The output of the model is a probability ranging from 0 to 1 that the transition is consistent.
You can view the associated demo notebook [SeasonalProbabilitiesPrediction_LC_ChangeDetection_Example.ipynb](https://github.com/wri/rw-dynamicworld-cd/blob/master/demo_notebooks/SeasonalProbabilitiesPrediction_LC_ChangeDetection_Example.ipynb). The demo notebook uses Python modules for preprocessing in [preprocessing.py](https://github.com/wri/rw-dynamicworld-cd/blob/master/wri_change_detection/preprocessing.py) and for training a classifier in [gee_classifier.py](https://github.com/wri/rw-dynamicworld-cd/blob/master/wri_change_detection/gee_classifier.py).
## Post-Classificaiton Probability Filters:
In classifying land cover, pixels are assigned probabilities for each land cover class that the pixel belongs in that land cover class. Oftentimes the land cover class with the highest probability is chosen as the final classification. However some land cover products choose to apply rules to these classification probabilities in order to increase the final accuracy, such as the [10m Sentinel-2 Based European Land Cover map](http://s2glc.cbk.waw.pl/extension) created by [Malinowski et al. 2020](https://www.mdpi.com/2072-4292/12/21/3523/htm).
In this approach, there are two ways the probabilities can be filtered.
1. Greater Than Filter:
This defines a minimum probability for that land cover class to be chosen, regardless if there are other classes with higher probabiliites. If the class probability is higher than the defined threshold, then the pixel is assigned that class.
For example, all pixels with the "built-area" class probability greather than 0.3 will be reclassed as the "built-area" class.
2. Less Than Filter:
This defines a maximum probability for the land cover class to be replaced by the mode of the neighboring pixels. If the central pixel's class probability is lower than the defined threshold and the central pixel was assigned that class, it will be replaced by the mode of the neighboring pixels.
For example, all pixels that were assigned "snow-cover" with a "snow-cover" class probability less than 0.5 will be reclassed as the mode of the neighboring pixels.
You can view the associated demo notebook [Probability_Filters.ipynb](https://github.com/wri/rw-dynamicworld-cd/blob/master/demo_notebooks/Probability_Filters.ipynb). The demo notebook uses Python modules for preprocessing in [preprocessing.py](https://github.com/wri/rw-dynamicworld-cd/blob/master/wri_change_detection/preprocessing.py) and [post_classification_filters.py](https://github.com/wri/rw-dynamicworld-cd/blob/master/wri_change_detection/post_classification_filters.py) to apply the post-classification probability filters.
| /rw-dynamicworld-cd-0.0.1.tar.gz/rw-dynamicworld-cd-0.0.1/README.md | 0.89112 | 0.992547 | README.md | pypi |
import os
import ee
import numpy as np
import pandas as pd
import random
import itertools
def pretty_print_confusion_matrix_binary(confusion_list):
"""
Function to print a confusion matrix list
Args:
confusion_list (List): a list of confusion matrix values, can be taken from ee.ConfusionMatrix().getInfo()
Returns:
An pandas.DataFrame of confusion matrix with column names and row names
"""
out_confusion_matrix = pd.DataFrame({'_':['Observed_False','Observed_True'],
'Predicted_False':confusion_list[:][0],
'Predicted_True':confusion_list[:][1]})
out_confusion_matrix = out_confusion_matrix.set_index('_')
return out_confusion_matrix
def pretty_print_confusion_matrix_multiclass(gee_error_matrix, class_names):
"""
Function to print a confusion matrix list
Args:
confusion_list (ee.ConfusionMatrix): ee.ConfusionMatrix
class_names (List of Strings): list of class names
Returns:
An pandas.DataFrame of confusion matrix with column names and row names matching classes and totals for observed and predicted values
"""
#Get the confusion matrix as a list
gee_error_matrix = gee_error_matrix.getInfo()
#Print the confusion matrix with the class names as a dataframe
#Axis 1 (the rows) of the matrix correspond to the actual values, and Axis 0 (the columns) to the predicted values.
errorMatrixDf = pd.DataFrame(gee_error_matrix, index = class_names,
columns = class_names)
errorMatrixDf['Sum Observed'] = errorMatrixDf.sum(axis=1)
errorMatrixDf.loc['Sum Predicted'] = errorMatrixDf.sum()
return errorMatrixDf
def buildGridSearchList(parameters,classifier_name):
"""
Function to build a list of classifiers to use in kFoldCrossValidation to test mutliple parameters similar to scikit learn's gridSearchCV
Args:
parameters (Dictionary): dictionary of parameters from ee.Classifier
classifier_name (String): name of the classifier as a string in the last part of ee.Classifier.classifier_name
Returns:
A list of dictionaries of classifiers and parameters to be used in kFoldCrossValidation
"""
param_keys, param_values = zip(*parameters.items())
param_list = [dict(zip(param_keys, v)) for v in itertools.product(*param_values)]
classifier_list = None
if classifier_name == 'smileRandomForest':
classifier_list = [{'Type':'smileRandomForest','Params':str(x),'Classifier':ee.Classifier.smileRandomForest(**x)} for x in param_list]
elif classifier_name == 'smileNaiveBayes':
classifier_list = [{'Type':'smileNaiveBayes','Params':str(x),'Classifier':ee.Classifier.smileNaiveBayes(**x)} for x in param_list]
elif classifier_name == 'libsvm':
classifier_list = [{'Type':'libsvm','Params':str(x),'Classifier':ee.Classifier.libsvm(**x)} for x in param_list]
elif classifier_name == 'gmoMaxEnt':
classifier_list = [{'Type':'gmoMaxEnt','Params':str(x),'Classifier':ee.Classifier.gmoMaxEnt(**x)} for x in param_list]
return classifier_list
def defineClassifier(parameters,classifier_name):
"""
Function to take parameters and classifier_name and load ee.Classifier
Args:
parameters (Dictionary): dictionary of parameters from ee.Classifier
classifier_name (String): name of the classifier as a string in the last part of ee.Classifier.classifier_name
Returns:
An ee.Classifier object with inputted parameters
"""
classifier = None
if classifier_name == 'smileRandomForest':
classifier = ee.Classifier.smileRandomForest(**parameters)
elif classifier_name == 'smileNaiveBayes':
classifier = ee.Classifier.smileNaiveBayes(**parameters)
elif classifier_name == 'libsvm':
classifier = ee.Classifier.libsvm(**parameters)
elif classifier_name == 'gmoMaxEnt':
classifier = ee.Classifier.gmoMaxEnt(**parameters)
else:
print('Classifier not recognized')
return classifier
def kFoldCrossValidation(inputtedFeatureCollection, propertyToPredictAsString, predictors, listOfClassifiers, k, seed=200):
"""
Args:
inputtedFeatureCollection (ee.FeatureCollection): an ee.FeatureCollection() of sample points object with a property of interest
propertyToPredictAsString (String): the property to predict
predictors (List of Strings): properties to use in training
listOfClassifiers (List of dictionaries): a list of classifiers created using buildGridSearchList
k (Int): the number of folds
seed (Int): seed to use in training
Returns:
An ee.Feature Collection of cross validation results, with training and validaiton score, parameters, and classifier name
Much of this code was taken from Devin Routh's [https://devinrouth.com/] work at the Crowther Lab at ETH Zurich [https://www.crowtherlab.com/]
The code is released under Apache License Version 2.0 [http://www.apache.org/licenses/], and you can learn more about the license here [https://gitlab.ethz.ch/devinrouth/crowther_lab_nematodes/-/blob/master/LICENSE]
The code was originally written in JavaScript and was converted to Python and adapted for our purposes by Kristine Lister.
You can find the original code written by Devin in this Earth Engine toolbox: users/devinrouth/toolbox:KFoldCrossValConvolveGapFillEnsemble.js
"""
np.random.seed(seed)
#The sections below are the function's code, beginning with
#preparation of the inputted feature collection of sample points
collLength = inputtedFeatureCollection.size()
print('Number of Sample Points',collLength.getInfo())
sampleSeq = ee.List.sequence(1, collLength)
inputtedFCWithRand = inputtedFeatureCollection.randomColumn('Rand_Num', seed).sort('Rand_Num').toList(collLength)
# Prep the feature collection with random fold assignment numbers
preppedListOfFeats = sampleSeq.map(lambda numberToSet: ee.Feature(inputtedFCWithRand.get(ee.Number(numberToSet).subtract(1))).set('Fold_ID', ee.Number(numberToSet)))
# ———————————————————————————————————————————————————————————————
# This section divides the feature collection into the k folds
averageFoldSize = collLength.divide(k).floor()
print('Average Fold Size',averageFoldSize.getInfo())
remainingSampleSize = collLength.mod(k)
def fold_function(fold):
foldStart = ee.Number(fold).multiply(averageFoldSize).add(1)
foldEnd = ee.Number(foldStart).add(averageFoldSize.subtract(1))
foldNumbers = ee.List.sequence(foldStart, foldEnd)
return ee.List(foldNumbers)
foldSequenceWithoutRemainder = ee.List.sequence(0, k - 1).map(fold_function)
remainingFoldSequence = ee.List.sequence(ee.Number(ee.List(foldSequenceWithoutRemainder.get(foldSequenceWithoutRemainder.length().subtract(1))).get(averageFoldSize.subtract(1))).add(1),
ee.Number(ee.List(foldSequenceWithoutRemainder.get(foldSequenceWithoutRemainder.length().subtract(1))).get(averageFoldSize.subtract(1))).add(ee.Number(remainingSampleSize)))
# This is a list of lists describing which features will go into each fold
listsWithRemaindersAdded = foldSequenceWithoutRemainder.zip(remainingFoldSequence).map(lambda x: ee.List(x).flatten())
finalFoldLists = listsWithRemaindersAdded.cat(foldSequenceWithoutRemainder.slice(listsWithRemaindersAdded.length()))
mainFoldList = ee.List.sequence(0, k - 1)
# Make a feature collection with a number of null features equal to the number of folds
# This is done to stay in a collection rather than moving to a list
foldFeatures = ee.FeatureCollection(mainFoldList.map(lambda foldNumber: ee.Feature(None).set({'Fold_Number': ee.Number(foldNumber)})))
# print('Null FC',foldFeatures)
def assign_fold_number(feature):
featureNumbersInFold = finalFoldLists.get(ee.Feature(feature).get('Fold_Number'))
featuresWithFoldNumbers = ee.FeatureCollection(preppedListOfFeats).filter(ee.Filter.inList('Fold_ID', featureNumbersInFold)).map(lambda f: f.set('Fold_Number', ee.Feature(feature).get('Fold_Number')))
return featuresWithFoldNumbers
# Use the null FC to filter and assign a fold number to each feature, then flatten it back to a collection
featuresWithFoldAssignments = foldFeatures.map(assign_fold_number).flatten()
# ———————————————————————————————————————————————————————————————
# Train the data and retrieve the values at the sample points
def grid_search(classifier_object):
classifier = classifier_object.get('Classifier')
def cross_val(foldFeature):
trainingFold = featuresWithFoldAssignments.filterMetadata('Fold_Number', 'not_equals', ee.Number(foldFeature.get('Fold_Number')))
validationFold = featuresWithFoldAssignments.filterMetadata('Fold_Number', 'equals', ee.Number(foldFeature.get('Fold_Number')))
trained_classifier = classifier.train(features=trainingFold, classProperty=propertyToPredictAsString, inputProperties=predictors)
trainAccuracy = trained_classifier.confusionMatrix().accuracy()
validation_points_predicted = validationFold.classify(trained_classifier)
validationAccuracy = validation_points_predicted.errorMatrix(propertyToPredictAsString, 'classification').accuracy()
foldFeature = foldFeature.set({'Training Score':trainAccuracy})
foldFeature = foldFeature.set({'Validation Score':validationAccuracy})
return foldFeature
cross_val_results = foldFeatures.map(cross_val)
average_training_score = cross_val_results.aggregate_mean('Training Score')
average_validation_score = cross_val_results.aggregate_mean('Validation Score')
classifier_feature = ee.Feature(ee.Geometry.Point([0,0])).set('Classifier Type',classifier_object.get('Type'))
classifier_feature = classifier_feature.set('Params',classifier_object.get('Params'))
classifier_feature = classifier_feature.set('CV Training Score',average_training_score)
classifier_feature = classifier_feature.set('CV Validation Score',average_validation_score)
return classifier_feature
return ee.FeatureCollection([grid_search(x) for x in listOfClassifiers]) | /rw-dynamicworld-cd-0.0.1.tar.gz/rw-dynamicworld-cd-0.0.1/wri_change_detection/gee_classifier.py | 0.803637 | 0.595287 | gee_classifier.py | pypi |
import os
import ee
import numpy as np
import pandas as pd
import random
import json
import calendar
import time
#Image bands must be ordered by increasing years
def getYearStackIC(image, band_names, band_indices=[-1,0,1]):
"""
Function takes an image with bands for each time period (e.g. annual) and returns a restacked image collection where for each image,
the bands are taken from band_indices of the inputted image. This function can be used if you want to get an image collection
where each image has bands in a certain order, such as the year before, the central year, and the year after.
If the band indices cannot be selected from the input image, that image will not be returned.
For example if one band index is less than 0, an image for the first band will not be returned
because there is not a band corresponding to that index.
Args:
image (ee.Image): image where each band represents the land cover classification for a time period (e.g. year)
band_names (List of strings): list of band names in the image, bands names should be ordered by
increasing years
band_indices (List or np.array of integers): list of indices you want to collect from the image, the default [-1,0,1] will return an
image collection where each image will have the bands [previous year, current year, following year]
Returns:
out_image_list: an ee.ImageCollection where each image corresponds to a band in band_names, where the bands
of the image correspond to the band_indices input
Example:
Args:
image = image of land cover classification for years [1986,1987,1988,1989]
band_names = [1986,1987,1988,1989]
band_indices = [-1,0,1]
Returns:
out_image_list = image collection with the following images:
image 1: bands: [1986,1987,1988], property {'OriginalBand': 1987}
image 2: bands: [1987,1988,1989], property {'OriginalBand': 1988}
(an image for 1986 is not included because there is not a year before 1986,
and an image for 1989 is not included because there is not a year after 1989)
"""
out_image_list = []
for i,band_name in enumerate(band_names):
if all(np.array([int(i+x) for x in band_indices])>=0):
try:
band_list = [band_names[i+x] for x in band_indices]
out_image = ee.Image.cat(image.select(band_list))
out_image = out_image.set(ee.Dictionary({'OriginalBand':band_name}))
out_image_list.append(out_image)
except:
None
#print('Inputted band indices do not match inputted image for band {}'.format(band_name))
return ee.ImageCollection(out_image_list)
#Functions for binary land cover change properties
def LC_OneChange(image):
'''
Function to determine if there was one change occurance from year i to year i+1. Returns an image with values:
1 if state(i) != state(i+1)
0 if state(i) == state(i+1)
Compatible with outputs from getYearStackIC, the image must have a property "OriginalBand" determining the central year
Args:
image (ee.Image): and image with 2 bands, state(i) and state(i+1)
Returns:
And ee.Image defined above
'''
band_names = image.bandNames()
out_image = image.select([band_names.get(0)]).neq(image.select([band_names.get(1)]))
out_image = out_image.rename([image.get('OriginalBand')])
out_image = out_image.set('OriginalBand',image.get('OriginalBand'))
return out_image
def LC_NoChange(image):
'''
Function to determine if there was no change occurance from year i to year i+1. Returns an image with values:
1 if state(i) != state(i+1)
0 if state(i) == state(i+1)
Compatible with outputs from getYearStackIC, the image must have a property "OriginalBand" determining the central year
Args:
image (ee.Image): and image with 2 bands, state(i) and state(i+1)
Returns:
And ee.Image defined above
'''
band_names = image.bandNames()
out_image = image.select([band_names.get(0)]).eq(image.select([band_names.get(1)]))
out_image = out_image.rename([image.get('OriginalBand')])
out_image = out_image.set('OriginalBand',image.get('OriginalBand'))
return out_image
def LC_Reverse(image):
'''
Function to determine if change that occured from i to i+1 reversed back to state i in i+2
1 if state(i) != state(i+1) and state(i) == state(i+2)
0 otherwise
Compatible with outputs from getYearStackIC, the image must have a property "OriginalBand" determining the central year
Args:
image (ee.Image): and image with 3 bands, state(i), state(i+1), and state(i+2)
Returns:
And ee.Image defined above
'''
band_names = image.bandNames()
current_year = image.select([band_names.get(0)])
next_year = image.select([band_names.get(1)])
next_next_year = image.select([band_names.get(2)])
returnback = current_year.eq(next_next_year)
changed = current_year.neq(next_year)
out_image = returnback.bitwise_and(changed)
out_image = out_image.rename([image.get('OriginalBand')])
out_image = out_image.set('OriginalBand',image.get('OriginalBand'))
return out_image
def LC_ChangeToAnother(image):
'''
Function to determine if change occured from i to i+1 and change occured in i+1 to i+2 where state(i)!=state(i+2)
1 if state(i) != state(i+1) and state(i) != state(i+2) and state(i+1) != state(i+2)
0 otherwise
Compatible with outputs from getYearStackIC, the image must have a property "OriginalBand" determining the central year
Args:
image (ee.Image): and image with 3 bands, state(i), state(i+1), and state(i+2)
Returns:
And ee.Image defined above
'''
band_names = image.bandNames()
current_year = image.select([band_names.get(0)])
next_year = image.select([band_names.get(1)])
next_next_year = image.select([band_names.get(2)])
changed = current_year.neq(next_year)
changed_again = next_year.neq(next_next_year)
not_reversed = current_year.neq(next_next_year)
out_image = changed.bitwise_and(changed_again.bitwise_and(not_reversed))
out_image = out_image.rename([image.get('OriginalBand')])
out_image = out_image.set('OriginalBand',image.get('OriginalBand'))
return out_image
def LC_ConsistentChangeOneYear(image):
'''
Function to determine if change that occured from i to i+1 stayed in i+2
1 if state(i) != state(i+1) and state(i+1) == state(i+2)
0 otherwise
Compatible with outputs from getYearStackIC, the image must have a property "OriginalBand" determining the central year
Args:
image (ee.Image): and image with 3 bands, state(i), state(i+1), and state(i+2)
Returns:
And ee.Image defined above
'''
band_names = image.bandNames()
current_year = image.select([band_names.get(0)])
next_year = image.select([band_names.get(1)])
next_next_year = image.select([band_names.get(2)])
changed = current_year.neq(next_year)
stayed = next_year.eq(next_next_year)
out_image = changed.bitwise_and(stayed)
out_image = out_image.rename([image.get('OriginalBand')])
out_image = out_image.set('OriginalBand',image.get('OriginalBand'))
return out_image
def LC_ConsistentChangeTwoYears(image):
'''
Function to determine if change that occured from i to i+1 stayed in i+2 and i+3
1 if state(i) != state(i+1) and state(i+1) == state(i+2) and state(i+1) == state(i+3)
0 otherwise
Compatible with outputs from getYearStackIC, the image must have a property "OriginalBand" determining the central year
Args:
image (ee.Image): and image with 4 bands, state(i), state(i+1), state(i+2), and state(i+3)
Returns:
And ee.Image defined above
'''
band_names = image.bandNames()
current_year = image.select([band_names.get(0)])
next_year = image.select([band_names.get(1)])
next_next_year = image.select([band_names.get(2)])
next_next_next_year = image.select([band_names.get(3)])
changed = current_year.neq(next_year)
stayed = next_year.eq(next_next_year)
stayed_again = next_year.eq(next_next_next_year)
out_image = changed.bitwise_and(stayed.bitwise_and(stayed_again))
out_image = out_image.rename([image.get('OriginalBand')])
out_image = out_image.set('OriginalBand',image.get('OriginalBand'))
return out_image
def LC_YearAfter(image):
'''
Function to returns land cover class for following year
Compatible with outputs from getYearStackIC, the image must have a property "OriginalBand" determining the central year
Args:
image (ee.Image): and image with 2 bands, state(i) and state(i+1)
Returns:
And ee.Image defined above
'''
band_names = image.bandNames()
current_year = image.select([band_names.get(0)])
next_year = image.select([band_names.get(1)])
out_image = next_year.rename([image.get('OriginalBand')])
out_image = out_image.set('OriginalBand',image.get('OriginalBand'))
return out_image
def getTemporalProbabilityDifference(probability_collection, date_1_start, date_1_end, date_2_start, date_2_end, reduce_method='median'):
"""
Function to calculate the difference in land cover probabilities reduced across two time periods. The first date is defined as
date_1_start through date_1_end. The second date is defined as date_2_start through date_2_end. The probabilities are reduced
based on the reduce_method input.
Args:
probability_collection (ee.ImageCollection): an ImageCollection of images of classified land cover probabilities, each image must have
either the start_date and end_date properties defined in order to filter by season definitions
date_1_start (ee.Date): start date of the first period
date_1_end (ee.Date): end date of the first period
date_2_start (ee.Date): start date of the second period
date_2_end (ee.Date): end date of the second period
reduce_method (String): reduction method to reduce probabilities with the following options:
- 'mean': takes the mean of the probability ee.Images from start_date to end_date
- 'median': takes the median of the probability ee.Images from start_date to end_date
Defaults to 'median'
Returns:
An ee.Image of probability differences with second_image - first_image, the outputted image has dates set with the second date's range
"""
if reduce_method == 'mean':
reducer = ee.Reducer.mean()
else:
reducer = ee.Reducer.median()
bandNames = probability_collection.first().bandNames()
first_image = probability_collection.filterDate(date_1_start,date_1_end).reduce(reducer).rename(bandNames)
second_image = probability_collection.filterDate(date_2_start,date_2_end).reduce(reducer).rename(bandNames)
output_image = second_image.subtract(first_image)
output_image = output_image.set('system:time_start',ee.Date(date_2_start))
output_image = output_image.set('system:time_end',ee.Date(date_2_end))
return second_image.subtract(first_image)
def getSeasonalProbabilities(probability_collection, year, band_names, reduce_method='median', season_list = [['winter',-1,12,1,0,2,'end'],['spring',0,3,1,0,5,'end'],['summer',0,6,1,0,8,'end'],['fall',0,9,1,0,11,'end']], include_difference=True, year_difference=1, image_name='season_probs_{}'):
"""
Function to convert from daily, monthly, or scene by scene land cover probabilities to seasonal probabilities for year and find the
difference in year+1 and year's seasonal probabilities.
Args:
probability_collection (ee.ImageCollection): an ImageCollection of images of classified land cover probabilities, each image must have
either the start_date and end_date properties defined in order to filter by season definitions
year (Int): base year to calculate seasonal probabilities
band_names (List of Strings): image band names to rename after reducing
reduce_method (String): reduction method to calculate seasonal probabilities with the following options:
- 'mean': takes the mean of the probability ee.Images from start_date to end_date, then takes the ArgMax
to find the most probable class
- 'median': takes the median of the probability ee.Images from start_date to end_date, then takes the ArgMax
to find the most probable class
Defaults to 'median'
season_list (List): seasons to calculate probabilities for, a 2-dimensional list of size N x 7, where N = number of seasons.
The format is [['season_1_name', season_1_start_year_position, season_1_start_month, season_1_start_day, season_1_end_year_position, season_1_end_month, season_1_end_day],
['season_2_name', season_2_start_year_position, season_2_start_month, season_2_start_day, season_2_end_year_position, season_2_end_month, season_2_end_day]]
season_name is used to rename the image for that season
season_1_start_position is used to define the year of the start date of the first season, allowing the user to filter to dates including the previous year (for example if December of last year was counted in the current year's winter).
season_1_start_month is used to define the month of the start date of the first season
season_1_start_day is used to define the day of the start date of the first season, can accept "end" which will calculate the end of the month based on the year (to allow leap year)
season_1_end_position is used to define the year of the end date of the first season, allowing the user to filter to dates including the previous year (for example if December of last year was counted in the current year's winter).
season_1_end_month is used to define the month of the end date of the first season
season_1_end_day is used to define the day of the end date of first the season, can accept "end" which will calculate the end of the month based on the year (to allow leap year)
Defaults to:
[['winter',-1,12,1,0,2,'end'],
['spring',0,3,1,0,5,'end'],
['summer',0,6,1,0,8,'end'],
['fall',0,9,1,0,11,'end']]
Which translates to
winter ranging from December of the previous year to the end of February
spring ranging from March to the end of May
summer ranging from June to the end of August
Fall ranging from September to the end of November
include_difference (Boolean): whether to include the difference from year's seasons to the following year's seasons, defaults to True.
Set to False if you want to only include the current year's season probabilities
year_difference (Int): if include_difference is True, which year after inputted year to calculate the difference.
For example if year_difference=1, then the difference will be calculated as probabilities[year(i+1)]-probabilities[year(i)]
if year_difference=2, then the difference will be calculated as probabilities[year(i+2)]-probabilities[year(i)]
image_name (String): a name convention for the outputted image, will be formatted with year, defaults to 'season_probs_{year}'
Returns:
An ee.Image of seasonal probabilities and, if include_difference is True, seasonal differences, with system:index set using the image_name and year
"""
season_changes = []
year = int(year)
for season_definition in season_list:
season_name = season_definition[0]
season_name = season_name.lower()
season_start_year_position = season_definition[1]
season_start_month = season_definition[2]
season_start_day = season_definition[3]
season_end_year_position = season_definition[4]
season_end_month = season_definition[5]
season_end_day = season_definition[6]
season_start_year_firstYear = year+season_start_year_position
season_end_year_firstYear = year+season_end_year_position
if include_difference:
season_start_year_secondYear = year+season_start_year_position+year_difference
season_end_year_secondYear = year+season_end_year_position+year_difference
if season_start_day == 'end':
season_firstYear_start_day = calendar.monthrange(season_start_year_firstYear, int(season_start_month))[1]
if include_difference:
season_secondYear_start_day = calendar.monthrange(season_end_year_firstYear, int(season_start_month))[1]
else:
season_firstYear_start_day = season_start_day
if include_difference:
season_secondYear_start_day = season_start_day
if season_end_day == 'end':
season_firstYear_end_day = calendar.monthrange(season_end_year_firstYear, int(season_end_month))[1]
if include_difference:
season_secondYear_end_day = calendar.monthrange(season_start_year_secondYear, int(season_end_month))[1]
else:
season_firstYear_end_day = season_end_day
if include_difference:
season_secondYear_end_day = season_end_day
season_firstYear_start = '{}-{}-{}'.format(season_start_year_firstYear, season_start_month, season_firstYear_start_day)
season_firstYear_end = '{}-{}-{}'.format(season_end_year_firstYear, season_end_month, season_firstYear_end_day)
if include_difference:
season_secondYear_start = '{}-{}-{}'.format(season_start_year_secondYear, season_start_month, season_secondYear_start_day)
season_secondYear_end = '{}-{}-{}'.format(season_end_year_secondYear, season_end_month, season_secondYear_end_day)
if reduce_method=='mean':
season_image = probability_collection.filterDate(season_firstYear_start,season_firstYear_end).reduce(ee.Reducer.mean()).rename(band_names)
if include_difference:
diff_image = getTemporalProbabilityDifference(probability_collection, season_firstYear_start,
season_firstYear_end, season_secondYear_start, season_secondYear_end, reduce_method='mean').rename(band_names)
else:
season_image = probability_collection.filterDate(season_firstYear_start,season_firstYear_end).reduce(ee.Reducer.median()).rename(band_names)
if include_difference:
diff_image = getTemporalProbabilityDifference(probability_collection, season_firstYear_start,
season_firstYear_end, season_secondYear_start, season_secondYear_end, reduce_method='median').rename(band_names)
season_image = season_image.set('system:index','{}_start'.format(season_name))
season_changes.append(season_image)
if include_difference:
diff_image = diff_image.set('system:index','{}_difference'.format(season_name))
season_changes.append(diff_image)
season_changes = ee.ImageCollection(season_changes)
season_changes = season_changes.toBands()
season_changes = season_changes.set('system:index',image_name.format(year))
season_changes = season_changes.set('system:time_start',ee.Date(season_firstYear_start))
season_changes = season_changes.set('system:time_end',ee.Date(season_firstYear_end))
return season_changes
def convertDftoFC(feature_collection, property_names):
"""
Function to convert an ee.FeatureCollection to a pandas.DataFrame
Args:
feature_collection (ee.FeatureCollection): an image to sample
property_names (List): list of feature names to select from feature_collection
Returns:
A pandas.DataFrame of feature_collection
"""
df = pd.DataFrame()
for property_name in property_names:
property_values = feature_collection.aggregate_array(property_name).getInfo()
df[property_name] = property_values
return df
def convertPointsDfToFc(df,projection=None,lat_name='latitude',lon_name='longitude'):
"""
Function to convert a DataFrame containing a point locations to an ee.FeatureCollection
Args:
df (pandas.DataFrame): an image to sample
projection (ee.Projection): projection of points
lat_name (String): column name containing the latitude value
lon_name (String): column name containing the longitude value
Returns:
An ee.FeatureCollection of inputted df
"""
feature_collection_list = []
for i,row in df.iterrows():
geometry = ee.Geometry.Point([row[lon_name],row[lat_name]],projection)
row_dict = row.to_dict()
row_feature = ee.Feature(geometry,row_dict)
feature_collection_list.append(row_feature)
return ee.FeatureCollection(feature_collection_list)
def convertSeriesToFeature(series,projection='EPSG:4326',lat_name='latitude',lon_name='longitude'):
"""
Function to convert a Series containing a point location to an ee.Feature
Args:
series (pd.Series): an image to sample
projection (String): projection
lat_name (String): key containing the latitude value
lon_name (String): key containing the longitude value
Returns:
An ee.Feature containing Series
"""
geometry = ee.Geometry.Point([series[lon_name],series[lat_name]])#,projection)
row_dict = series.to_dict()
row_feature = ee.Feature(geometry,row_dict)
return row_feature
def getStratifiedSampleBandPoints(image, region, bandName, **kwargs):
"""
Function to perform stratitfied sampling of an image over a given region, using ee.Image.stratifiedSample(image, region, bandName, **kwargs)
Args:
image (ee.Image): an image to sample
region (ee.Geometry): the geometry over which to sample
bandName (String): the bandName to select for stratification
Returns:
An ee.FeatureCollection of sampled points along with coordinates
"""
dargs = {
'numPoints': 1000,
'classBand': bandName,
'region': region
}
dargs.update(kwargs)
stratified_sample = image.stratifiedSample(**dargs)
return stratified_sample
def getSampleBandPoints(image, region, **kwargs):
"""
Function to perform sampling of an image over a given region, using ee.Image.samp;e(image, region, **kwargs)
Args:
image (ee.Image): an image to sample
region (ee.Geometry): the geometry over which to sample
Returns:
An ee.FeatureCollection of sampled points along with coordinates
"""
dargs = {
'numPixels': 1000,
'region': region
}
dargs.update(kwargs)
sample = image.sample(**dargs)
return sample
def squashScenesToAnnualProbability(probability_collection, years, start_date='{}-01-01', end_date='{}-12-31', method='median',image_name='{}'):
"""
Function to convert from daily, monthly, or scene by scene land cover classification probabilities images to annual probabilities.
Args:
probability_collection (ee.ImageCollection): an Image Collection of classified images where each band represents
the predicted probability of the pixel being in class x. Each image must have the
'start_date' and 'end_date' properties set in order to filter by date.
years (List or numpy.Array): a list or numpy array of years to reduce
start_date (String): the first day of the year to reduce over, in the format '{}-month-day where {} will be
replaced by the year, defaults to January 1st
end_date (String): the last day of the year to reduce over, in the format '{}-month-day where {} will be replaced
by the year, defaults to December 31st
method (String): the method to reduce the Image Collection, with the following options:
- 'mean': takes the mean of the probability ee.Images from start_date to end_date, then takes the ArgMax
to find the most probable class
- 'median': takes the median of the probability ee.Images from start_date to end_date, then takes the ArgMax
to find the most probable class
- 'mode': takes ArgMax of every probability ee.Image from start_date to end_date to find the most probable
class for each ee.Image, then takes the mode.
Defaults to 'median'
image_name (String): name for outputted image names, images will be named image_name.format(year), defaults to '{}'.format(year)
Returns:
An ee.ImageCollection where each image is the annual class probabilities for each year
"""
predicted_collection = []
for year in years:
year = int(year)
year_probs = probability_collection.filterDate(start_date.format(year),end_date.format(year))
band_names = year_probs.first().bandNames()
if method=='mean':
year_probs = year_probs.reduce(ee.Reducer.mean()).rename(band_names)
else:
year_probs = year_probs.reduce(ee.Reducer.median()).rename(band_names)
year_probs = year_probs.set('system:index',image_name.format(year))
year_probs = year_probs.set('system:time_start',ee.Date(start_date.format(year)))
year_probs = year_probs.set('system:time_end',ee.Date(end_date.format(year)))
predicted_collection.append(year_probs)
return ee.ImageCollection(predicted_collection)
def squashScenesToAnnualClassification(probability_collection, years, start_date='{}-01-01', end_date='{}-12-31', method='median',image_name='{}'):
"""
Function to convert from daily, monthly, or scene by scene land cover classification probabilities images to annual classifications.
Args:
probability_collection (ee.ImageCollection): an Image Collection of classified images where each band represents
the predicted probability of the pixel being in class x. Each image must have the
'start_date' and 'end_date' properties set in order to filter by date.
years (List or numpy.Array): a list or numpy array of years to reduce
start_date (String): the first day of the year to reduce over, in the format '{}-month-day where {} will be
replaced by the year, defaults to January 1st
end_date (String): the last day of the year to reduce over, in the format '{}-month-day where {} will be replaced
by the year, defaults to December 31st
method (String): the method to reduce the Image Collection, with the following options:
- 'mean': takes the mean of the probability ee.Images from start_date to end_date, then takes the ArgMax
to find the most probable class
- 'median': takes the median of the probability ee.Images from start_date to end_date, then takes the ArgMax
to find the most probable class
- 'mode': takes ArgMax of every probability ee.Image from start_date to end_date to find the most probable
class for each ee.Image, then takes the mode.
Defaults to 'median'
image_name (String): name for outputted image names, images will be named image_name.format(year), defaults to '{}'.format(year)
Returns:
An ee.ImageCollection where each image has a single band "class" with the most likely class for that year
"""
predicted_collection = []
for year in years:
year = int(year)
year_probs = probability_collection.filterDate(start_date.format(year),end_date.format(year))
if method=='mean':
year_probs = year_probs.reduce(ee.Reducer.mean())
probs_array = year_probs.toArray().toFloat()
probs_max = probs_array.arrayArgmax().arrayGet(0).add(1)
elif method=='mode':
year_maxes = year_probs.map(lambda x: x.toArray().toFloat().arrayArgmax().arrayGet(0).add(1))
probs_max = year_maxes.reduce(ee.Reducer.mode())
else:
year_probs = year_probs.reduce(ee.Reducer.median())
probs_array = year_probs.toArray().toFloat()
probs_max = probs_array.arrayArgmax().arrayGet(0).add(1)
probs_max = probs_max.rename('class')
probs_max = probs_max.set('system:index',image_name.format(year))
probs_max = probs_max.set('system:time_start',ee.Date(start_date.format(year)))
probs_max = probs_max.set('system:time_end',ee.Date(end_date.format(year)))
predicted_collection.append(probs_max)
return ee.ImageCollection(predicted_collection)
def probabilityToClassification(image):
"""
Function to convert an image of land cover classification probabilities to land cover classification
Args:
image (ee.Image): an Image where each band represents a land cover classification probability
Returns:
An ee.Image with one band representing the land cover classification with the highest probability, each image will have
integer values representing the number band that has the highest value. For instance if the third band has the
highest probability in pixel[i,j], the outputted value in pixel[i,j] will be 3.
If there are multiple bands of the same maximum value, returns the first band.
Example:
The inputted image has the following bands, Agriculture, Forest, Grassland, Water, Urban, where each band represents the
probability that the pixel is of that classification.
Say for pixel[i,j] of image k, the bands have the following values:
Agriculture: 0.1
Forest: 0.6
Grassland: 0.2
Water: 0.5
Urban: 0.5
Then the returned value of pixel[i,j] in image k would be 2 as Forest is the second band in the image
"""
#Convert bands to an array
probs_array = image.toArray().toFloat()
#Get the argMax to find the band that has the highest probability, add 1 because indices start at 0
probs_max = probs_array.arrayArgmax().arrayGet(0).add(1)
probs_max = probs_max.set('system:index',image.get('system:index'))
probs_max = probs_max.set('system:time_start',image.get('system:time_start'))
probs_max = probs_max.set('system:time_end',image.get('system:time_end'))
return probs_max
def convertClassificationsToBinaryImages(image, classes_dict):
"""
Function to convert a single band image of land cover classifications to a multiband image of binary (0,1) bands where each band is the binary
Args:
image (ee.Image): an Image with one band that represents the land cover classification
classes_dict (ee.Dicitonary): a dictionary with keys for land cover class names and values for the land cover class values
Returns:
An ee.ImageCollection with each image corresponding to a band from the input image, and multiple bands for binary (0,1) variables representing
if the pixel was in the band class
Example:
image is an ee.Image with bands '2016', '2017', '2018
classes_dict is an ee.Dictionary({'Agriculture':1, 'Forest':2, 'Grassland':3, 'Water':4, 'Urban':5})
If pixel[i,j] in band '2016' is 2, pixel[i,j] in the image '2016' will have the following band values
'Agriculture': 0
'Forest': 1
'Grassland': 0
'Water': 0
'Urban': 0
"""
def lcYearToBinary(band):
band_image = image.select([band])
def lcYearToBinaryNested(key):
key = ee.String(key)
class_image = ee.Image.constant(classes_dict.get(key))
out_image = ee.Image(band_image.eq(class_image))
return ee.Image(out_image)
band_collection = ee.ImageCollection(classes_dict.keys().map(lcYearToBinaryNested))
band_collection = band_collection.toBands().rename(classes_dict.keys())
band_collection = band_collection.set('system:index',band)
band_collection = band_collection.set('system:time_start',image.get('system:time_start'))
band_collection = band_collection.set('system:time_end',image.get('system:time_end'))
return band_collection
imageCollection = ee.ImageCollection(image.bandNames().map(lcYearToBinary))
return imageCollection | /rw-dynamicworld-cd-0.0.1.tar.gz/rw-dynamicworld-cd-0.0.1/wri_change_detection/preprocessing.py | 0.759047 | 0.545165 | preprocessing.py | pypi |
import pandas
import matplotlib.pyplot as plt
import numpy as np
import argparse
import seaborn as sns
import os
import json
import math
from matplotlib.ticker import LogFormatterSciNotation
parser = argparse.ArgumentParser()
parser.add_argument("csv", type=str, help="Data to plot", nargs="*")
parser.add_argument("--output", type=str, help="Write figures to specified folder")
args = parser.parse_args()
figure_suffix = ".pdf"
fig_size_x_inches = 10
fig_size_y_inches = 5
def load_data(file):
all_data = pandas.read_csv(file, delimiter=";")
all_data.method_params = all_data.method_params.fillna(value="")
max_scores = ["accuracy"]
min_scores = ["voi", "arand"]
max_scores = list(filter(lambda a: a in all_data.columns, max_scores))
min_scores = list(filter(lambda a: a in all_data.columns, min_scores))
has_fixed = "fixed" in all_data['method_name'].values
if has_fixed:
for s in min_scores:
all_data = add_virtual_opt_methods(all_data, s, "min", "fixed", "")
for s in max_scores:
all_data = add_virtual_opt_methods(all_data, s, "max", "fixed", "")
has_fixed_scalar = "fixed_scalar" in all_data['method_name'].values
if has_fixed_scalar:
for s in min_scores:
all_data = add_virtual_opt_methods(all_data, s, "min", "fixed_scalar", "scalar_")
for s in max_scores:
all_data = add_virtual_opt_methods(all_data, s, "max", "fixed_scalar", "scalar_")
return all_data
def find_methods(data):
to_remove = [
"fixed",
"fixed_scalar",
]
methods = data[["method_name", "method_params"]].drop_duplicates()
methods = methods.reset_index()
for m in to_remove:
methods = methods.drop(methods[methods["method_name"] == m].index)
return methods
def line_style(t):
name = t.method_name
if "scalar" in name:
return "--"
else:
return "-"
def color(t):
name = t.method_name
if "individual_best" in name:
return [0.5, 0.5, 0.5]
elif "globally_best" in name:
return [0.0, 0.0, 0.0]
elif "variable_gaussian" in name:
return [0, 0.66, 1]
elif "global_gaussian" in name:
if "bian" in name or "ang" in name:
return [1, 0, 0.3]
else:
return [0.33, 0, 1]
elif "poisson" in name:
return "green"
elif "ttest" in name:
return [1, 0.5, 0]
def method_params_to_json(p):
if p and isinstance(p, str):
return json.loads(p)
else:
return None
def format_method(t):
name = t.method_name
if "individual_best" in name:
base = "Grady [16] (best $\\beta$ per image)"
elif "globally_best" in name:
base = "Grady [16] (globally tuned $\\beta$)"
elif "variable_gaussian" in name:
base = "Ours (Gaussian, var. $\\sigma$)"
elif "global_gaussian" in name:
if "bian" in name or "ang" in name:
base = "Bian [4] (Gaussian, const. $\\sigma$)"
else:
base = "Ours (Gaussian, const. $\\sigma$)"
elif "poisson" in name:
base = "Ours (Poisson)"
elif "loupas" in name:
base = "Ours (Loupas)"
elif "ttest" in name:
base = "Bian [5] (Gaussian, var. $\\sigma$)"
if "scalar" in name:
base += " $||\cdot||_2$"
return base
def select_noise(data, noise):
return data.loc[data["noise_name"] == noise]
def select_by_parameter(data, parameter, value):
def filter_fn(params):
j = method_params_to_json(params)
if j:
p = j.get(parameter)
if not p is None:
return p == value
else:
return True
else:
return True
return data[data['method_params'].map(filter_fn)]
def select_method_by_name(data, name):
return data.loc[data["method_name"] == name]
def select_method(data, method):
return data.loc[(data["method_name"] == method.method_name) & (data["method_params"] == method.method_params)]
def select_subset(data, key, allowed_values):
return data.loc[data[key].isin(allowed_values)]
def show(figure_name):
if args.output:
name = os.path.join(args.output, figure_name + figure_suffix)
plt.savefig(name, bbox_inches='tight', pad_inches=0)
plt.cla()
else:
plt.show()
def optimal_betas(data, key, opt="max"):
beta_agg = data.groupby(["seed", "num_labels", "noise_name", "noise_param"])[key]
if opt == "max":
return data.loc[beta_agg.idxmax()]
elif opt == "min":
return data.loc[beta_agg.idxmin()]
else:
raise f"Invalid optimization method: {opt}"
def best_beta_map(data, keys, opt="max"):
data = select_method_by_name(data, "fixed")
betas = {}
fixed = select_method_by_name(data, "fixed")
for key in keys:
opt_betas = optimal_betas(fixed, key, opt)
all_opt_betas = opt_betas.merge(data, on=["seed", "num_labels", "noise_name", "noise_param", "method_name", key], how="inner")
all_opt_beta_agg = all_opt_betas.groupby(["seed", "num_labels", "noise_name", "noise_param"])
all_opt_beta_agg = all_opt_beta_agg['method_params_y'].agg(
weight=lambda col: 1.0/len(col),
betas=lambda col: list(map(lambda v: json.loads(v)['beta'], iter(col)))
)
all_opt_beta_agg.reset_index()
for _, row in all_opt_beta_agg.iterrows():
for beta in row.betas:
if beta not in betas:
betas[beta] = 0
betas[beta] += row['weight']
return betas
def plot_optimal_beta(data, keys):
betas = best_beta_map(data, keys)
ax = plt.gca()
plt.gcf().set_size_inches(fig_size_x_inches, fig_size_y_inches)
data = select_method_by_name(data, "fixed")
all_betas = list(map(lambda v: json.loads(v)['beta'], iter(data['method_params'])))
ax.set_xscale('log')
min_beta = min(all_betas)
max_beta = max(all_betas)
logbins = np.logspace(np.log10(max(0.00000001, min_beta)),np.log10(max_beta),100)
ax.hist(betas.keys(), bins=logbins, weights=betas.values())
ax.set_title("Optimal beta histogram")
show("best_beta")
def best_beta_by_avg(data, keys, opt="max"):
data['beta'] = data.apply(lambda row: json.loads(row["method_params"])["beta"], axis=1)
beta_mean = data.groupby(["beta"]).mean()
x = beta_mean.index
y = np.zeros(x.size)
for key in keys:
y += beta_mean[key]
if opt == "max":
best_i = np.argmax(y)
elif opt == "min":
best_i = np.argmin(y)
else:
raise f"Invalid optimization method: {opt}"
return x[best_i]
def best_beta_by_best_count(data, keys):
best_beta_counts = best_beta_map(a, keys)
return max(best_beta_counts, key=best_beta_counts.get)
def avg_score_per_beta(data, key):
data = select_method_by_name(data, "fixed")
data['beta'] = data.apply(lambda row: json.loads(row["method_params"])["beta"], axis=1)
beta_mean = data.groupby(["beta"]).mean()
x = beta_mean.index
y = beta_mean[key]
ax = plt.gca()
plt.gcf().set_size_inches(fig_size_x_inches, fig_size_y_inches)
ax.set_xscale('log')
ax.plot(x, y)
ax.set_title(f"Average {key} per beta")
show("best_beta_" + str(key))
def box_plot_multi(data, methods, keys):
fig, axes = plt.subplots(ncols=len(keys), sharey=True)
fig.set_size_inches(fig_size_x_inches, fig_size_y_inches)
fig.subplots_adjust(wspace=0)
methodnames = [format_method(method) for method in methods.itertuples()]
for ax, key in zip(axes, keys):
scores = []
for method in methods.itertuples():
m = select_method(data, method)
s = m[key]
scores.append(s.to_numpy())
ax.set(xticklabels=methodnames, xlabel=key)
ax.tick_params(axis='x', rotation=90)
ax.boxplot(scores, labels=methodnames)
show("boxplot")
def box_plot_single(data, methods, key):
methodnames = [format_method(method) for method in methods.itertuples()]
ax = plt.gca()
plt.gcf().set_size_inches(fig_size_x_inches, fig_size_y_inches)
scores = []
for method in methods.itertuples():
m = select_method(data, method)
dice_scores = m[key]
scores.append(dice_scores.to_numpy())
ax.set(xticklabels=methodnames)
ax.tick_params(axis='x', rotation=90)
ax.boxplot(scores, labels=methodnames)
ax.set_title(key)
show("boxplot_"+key)
def plot_key_over_n(data, methods, key, suffix):
ax = plt.gca()
plt.gcf().set_size_inches(fig_size_x_inches, fig_size_y_inches)
x = sorted(data["num_labels"].unique().tolist())
grouped = data.groupby(["method_name", "method_params", "num_labels"])
means = grouped.mean().reset_index()
stddevs = grouped.std().reset_index()
for method in methods.itertuples():
m = select_method(means, method)
s = select_method(stddevs, method)
y = m[key]
yerr = s[key]
#ax.errorbar(x, y, yerr=yerr, label=method, fmt='-', capsize=4, elinewidth=0)
ax.plot(x, y, label=format_method(method), color=color(method), linestyle=line_style(method))
#ax.fill_between(x, y-yerr, y+yerr, alpha=0.1)
ax.set_title(key)
ax.legend()
plt.xlabel("# Seed refinements")
plt.ylabel(f"Mean {key} score")
ax.set_title(f"Mean {key} score for # of seed refinements")
plt.yscale('log')
ax.yaxis.set_minor_formatter(LogFormatterSciNotation(labelOnlyBase=False, minor_thresholds=(2, 0.4)))
show("mean_over_number_of_refinements_"+key+"_"+suffix)
def plot_median_n_until_below(data, methods, key, suffix, max_val=None, legendpos=None):
min_val = 0
if max_val is None:
g = data
g = select_subset(g, "method_name", methods["method_name"])
g = g.groupby(["method_name", "method_params", "num_labels"]).mean()
g = g.groupby(["method_name", "method_params"]).mean()
max_val = g.max()[key]
if legendpos is None:
legendpos = 'best'
num_datasets = len(data["seed"].unique())
median_index = num_datasets//2
xs = []
ys = {}
for method in methods.itertuples():
ys[method] = []
d = data
val_range = np.linspace(max_val, min_val, 100)
for v in val_range:
xs.append(v)
d = d[d[key] < v]
grouped = d.groupby(["method_name", "method_params", "seed"])
mins = grouped.min().reset_index()
for method in methods.itertuples():
m = select_method(mins, method)
first_below = m["num_labels"].to_numpy()
if len(first_below) > median_index:
median = sorted(first_below)[median_index]
ys[method].append(median)
ax = plt.gca()
plt.gcf().set_size_inches(fig_size_x_inches, fig_size_y_inches)
for method in methods.itertuples():
y = ys[method]
x = xs[:len(y)]
ax.plot(x, y, label=format_method(method), color=color(method), linestyle=line_style(method))
plt.xscale('log')
ax.set_xlim([min(val_range), max(val_range)])
ax.legend(loc=legendpos)
plt.xlabel(f"{key} score")
plt.ylabel("# Seed refinements required")
ax.set_title(f"Median # of seed refinements until better {key} score")
show("score_over_median_n_until_below_"+key+"_"+suffix)
def boxplot_n_until_below(data, methods, key, threshold):
d = data[data[key] < threshold]
grouped = d.groupby(["method_name", "method_params", "seed"])
mins = grouped.min().reset_index()
print(mins)
methodnames = [format_method(method) for method in methods.itertuples()]
ax = plt.gca()
plt.gcf().set_size_inches(fig_size_x_inches, fig_size_y_inches)
scores = []
for method in methods.itertuples():
m = select_method(mins, method)
first_below = m["num_labels"]
scores.append(first_below.to_numpy())
ax.set(xticklabels=methodnames)
ax.tick_params(axis='x', rotation=90)
ax.boxplot(scores, labels=methodnames)
ax.set_title(f"num additional until {key} < {threshold}")
show("boxplot_below_"+key+str(threshold))
def param_plot_methods_mean_std(data, methods, key, xlabel, title, suffix, sizex=None, sizey=None, legendpos=None, xticklabels=None, invertx=False):
if sizex is None:
sizex = fig_size_x_inches
if sizey is None:
sizey = fig_size_y_inches
if legendpos is None:
legendpos = 'best'
fig = plt.gcf()
fig.set_size_inches(sizex, sizey)
params = sorted(data["noise_param"].unique().tolist())
grouped = data.groupby(["method_name", "noise_param", "method_params"])
means = grouped.mean().reset_index()
stddevs = grouped.std().reset_index()
ax = plt.gca()
for method in methods.itertuples():
m = select_method(means, method)
std = select_method(stddevs, method)
x = params
y = m[key]
yerr = std[key]
#ax.errorbar(x, y, yerr=yerr, label=method, fmt='-', capsize=4, elinewidth=0)
ax.plot(x, y, label=format_method(method), color=color(method), linestyle=line_style(method))
ax.fill_between(x, y-yerr, y+yerr, alpha=0.1, color=color(method))
if xticklabels is not None:
ax.set_xticklabels(xticklabels)
if invertx:
ax.invert_xaxis()
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(key)
ax.legend(loc=legendpos)
show("noise_vs_" + str(key) + "_" + suffix)
def param_plot_methods_median_min_max(data, methods, keys):
fig, axes = plt.subplots(ncols=len(keys), sharey=True)
if len(keys) == 1:
axes = [axes]
fig.set_size_inches(fig_size_x_inches, fig_size_y_inches)
fig.subplots_adjust(wspace=0)
params = sorted(data["noise_param"].unique().tolist())
grouped = data.groupby(["method_name", "noise_param", "method_params"])
medians = grouped.median().reset_index()
mins = grouped.min().reset_index()
maxs = grouped.max().reset_index()
for ax, key in zip(axes, keys):
for method in methods.itertuples():
med = select_method(medians, method)
min_e = select_method(mins, method)[key].to_numpy()
max_e = select_method(maxs, method)[key].to_numpy()
x = params
y = med[key]
#ax.errorbar(x, y, yerr=yerr, label=method, fmt='-', capsize=4, elinewidth=0)
ax.plot(x, y, label=format_method(method))
ax.fill_between(x, min_e, max_e, alpha=0.1)
ax.legend()
show("noise_vs_" + str(keys))
def add_virtual_opt_methods(all_data, s, opt, method_name, name_suffix):
fixed = all_data.loc[(all_data["method_name"] == method_name)]
best_betas = optimal_betas(fixed, s, opt)
best_betas["method_name"] = "individual_best_beta_" + name_suffix + s
best_betas["method_params"] = "{}"
all_data = all_data.append(best_betas)
first_data = fixed[fixed["num_labels"] == 0]
globally_best_beta = best_beta_by_avg(first_data, [s], opt)
print(f"Selected best beta {globally_best_beta} for {s}")
globally_best_beta_scores = select_by_parameter(fixed, "beta", globally_best_beta)
globally_best_beta_scores["method_name"] = "globally_best_beta_" + name_suffix + s
all_data = all_data.append(globally_best_beta_scores)
return all_data
def without_methods(methods, to_remove):
methods = methods.copy()
for m in to_remove:
methods = methods.drop(methods[methods["method_name"] == m].index)
return methods
def find_with_best(data, key, noise_name, noise_param_min, noise_param_max, seed, num_labels, modifier=None):
d = data
d = d[d["num_labels"] == num_labels]
d = d[d["noise_name"] == noise_name]
d = d[d["noise_param"] >= noise_param_min]
d = d[d["noise_param"] <= noise_param_max]
d = d[d["seed"] == seed]
if modifier is not None:
method_name_selector = "individual_best_beta_" + modifier + "_" + key
fixed_selector = "fixed_" + modifier
else:
method_name_selector = "individual_best_beta_" + key
fixed_selector = "fixed"
best = d[d["method_name"] == method_name_selector]
print(best[key])
best_key_val = float(best[key])
fixed = d[d["method_name"] == fixed_selector]
return fixed[fixed[key] == best_key_val]
def spiral_data_plots():
all_data = load_data("spiral/result.csv")
seed = 4
print(find_with_best(all_data, "accuracy", "poisson", 5, 5, seed, 0))
print(find_with_best(all_data, "accuracy", "loupas", 0.18, 0.19, seed, 0))
print(find_with_best(all_data, "accuracy", "gaussian2d", 0.3, 0.3, seed, 0))
poisson_data = select_noise(all_data, "poisson")
poisson_methods = find_methods(poisson_data)
poisson_methods = without_methods(poisson_methods, ["global_gaussian"])
sizex = 7
sizey = 5
param_plot_methods_mean_std(poisson_data, poisson_methods, "accuracy", "$\\lambda_0/\\lambda_1$", "Poisson Noise", "poisson", sizex, sizey, 'lower left', [f'{int(math.pow(2, i))}/{int(math.pow(2, i+1))}' for i in range(2, 9)], True)
loupas_data = select_noise(all_data, "loupas")
loupas_methods = find_methods(loupas_data)
loupas_methods = without_methods(loupas_methods, ["global_gaussian"])
param_plot_methods_mean_std(loupas_data, loupas_methods, "accuracy", "$\\sigma$", "Loupas Noise", "loupas", sizex, sizey)
gaussian2d_data = select_noise(all_data, "gaussian2d")
gaussian2d_methods = find_methods(gaussian2d_data)
param_plot_methods_mean_std(gaussian2d_data, gaussian2d_methods, "accuracy", "$\\sigma$", "2D Gaussian Noise", "gaussian2d", sizex, sizey)
def larvae_data():
all_data = load_data("fim/result.csv")
methods = find_methods(all_data)
methods = without_methods(methods, ["global_gaussian"])
voi_methods = without_methods(methods, ["individual_best_beta_arand", "globally_best_beta_arand", "individual_best_beta_scalar_arand", "globally_best_beta_scalar_arand"])
arand_methods = without_methods(methods, ["individual_best_beta_voi", "globally_best_beta_voi", "individual_best_beta_scalar_voi", "globally_best_beta_scalar_voi"])
seed = 3
print(find_with_best(all_data, "voi", "fim", 0, 0, seed, 0))
plot_key_over_n(all_data, voi_methods, "voi", "fim")
plot_median_n_until_below(all_data, voi_methods, "voi", "fim", legendpos="upper center")
plot_key_over_n(all_data, arand_methods, "arand", "fim")
plot_median_n_until_below(all_data, arand_methods, "arand", "fim", legendpos="upper center")
def mri_data():
all_data = load_data("fastmri/result.csv")
methods = find_methods(all_data)
methods = without_methods(methods, ["variable_gaussian_scalar"])
voi_methods = without_methods(methods, ["individual_best_beta_arand", "globally_best_beta_arand", "individual_best_beta_scalar_arand", "globally_best_beta_scalar_arand", ])
arand_methods = without_methods(methods, ["individual_best_beta_voi", "globally_best_beta_voi", "individual_best_beta_scalar_voi", "globally_best_beta_scalar_voi"])
seed = 90
num_labels = 10
print(find_with_best(all_data, "voi", "mri", 0, 0, seed, num_labels))
print(find_with_best(all_data, "voi", "mri", 0, 0, seed, num_labels, "scalar"))
plot_key_over_n(all_data, voi_methods, "voi", "mri")
plot_median_n_until_below(all_data, voi_methods, "voi", "mri")
plot_key_over_n(all_data, arand_methods, "arand", "mri")
plot_median_n_until_below(all_data, arand_methods, "arand", "mri")
larvae_data()
mri_data()
spiral_data_plots() | /rw_noise-0.1.2.tar.gz/rw_noise-0.1.2/evaluation/plot_results.py | 0.4206 | 0.395076 | plot_results.py | pypi |
from __future__ import absolute_import
from .generic import *
class ScipyStorable(Storable):
def __init__(self, python_type, key=None, handlers=[]):
Storable.__init__(self, python_type, key, handlers)
self.deprivatize = True
class ScipySpatialStorable(ScipyStorable):
@property
def default_version(self):
if six.PY2:
return min([ h.version for h in self.handlers ])
try:
from scipy.sparse import bsr_matrix, coo_matrix, csc_matrix, csr_matrix, \
dia_matrix, dok_matrix, lil_matrix
except ImportError:
sparse_storables = []
else:
# scipy.sparse storable instances mostly for Python2
bsr_exposes = ['shape', 'data', 'indices', 'indptr']
def mk_bsr(shape, data, indices, indptr):
return bsr_matrix((data, indices, indptr), shape=shape)
bsr_handler = handler(mk_bsr, bsr_exposes)
coo_exposes = ['shape', 'data', 'row', 'col']
def mk_coo(shape, data, row, col):
return coo_matrix((data, (row, col)), shape=shape)
coo_handler = handler(mk_coo, coo_exposes)
csc_exposes = ['shape', 'data', 'indices', 'indptr']
def mk_csc(shape, data, indices, indptr):
return csc_matrix((data, indices, indptr), shape=shape)
csc_handler = handler(mk_csc, csc_exposes)
csr_exposes = ['shape', 'data', 'indices', 'indptr']
def mk_csr(shape, data, indices, indptr):
if any(s < 0 for s in shape):
warnings.warn("corrupted shape: {}".format(shape))
warnings.warn("data corruption is known to happen on newly created files and a known fix consists in restarting the Python interpreter session")
return None
if indptr[0] != 0:
warnings.warn("corrupted first pointer (should be 0): {}".format(indptr[0]))
warnings.warn("data corruption is known to happen on newly created files and a known fix consists in restarting the Python interpreter session")
return None
return csr_matrix((data, indices, indptr), shape=shape)
csr_handler = handler(mk_csr, csr_exposes)
dia_exposes = ['shape', 'data', 'offsets']
def mk_dia(shape, data, offsets):
return dia_matrix((data, offsets), shape=shape)
dia_handler = handler(mk_dia, dia_exposes)
# previously
def dok_recommend(*args, **kwargs):
raise TypeErrorWithAlternative('dok_matrix', 'coo_matrix')
dok_handler = StorableHandler(poke=dok_recommend, peek=dok_recommend)
# now
def dok_poke(service, matname, mat, *args, **kwargs):
coo_handler.poke(service, matname, mat.tocoo(), *args, **kwargs)
def dok_peek(*args, **kwargs):
return coo_handler.peek(*args, **kwargs).todok()
dok_handler = StorableHandler(poke=dok_poke, peek=dok_peek)
# previously
def lil_recommend(*args, **kwargs):
raise TypeErrorWithAlternative('lil_matrix', ('csr_matrix', 'csc_matrix'))
lil_handler = StorableHandler(poke=lil_recommend, peek=lil_recommend)
# now
def lil_poke(service, matname, mat, *args, **kwargs):
csr_handler.poke(service, matname, mat.tocsr(), *args, **kwargs)
def lil_peek(*args, **kwargs):
return csr_handler.peek(*args, **kwargs).tolil()
lil_handler = StorableHandler(poke=lil_poke, peek=lil_peek)
sparse_storables = [ScipyStorable(bsr_matrix, handlers=bsr_handler), \
ScipyStorable(coo_matrix, handlers=coo_handler), \
ScipyStorable(csc_matrix, handlers=csc_handler), \
ScipyStorable(csr_matrix, handlers=csr_handler), \
ScipyStorable(dia_matrix, handlers=dia_handler), \
ScipyStorable(dok_matrix, handlers=dok_handler), \
ScipyStorable(lil_matrix, handlers=lil_handler)]
spatial_storables = []
try:
import scipy.spatial
except ImportError:
pass
else:
# scipy.sparse storable instances for Python2.
# Python3 can autoserialize ConvexHull and may actually do a better job
Delaunay_exposes = ['points', 'simplices', 'neighbors', 'equations', 'paraboloid_scale', 'paraboloid_shift', 'transform', 'vertex_to_simplex', 'convex_hull', 'coplanar', 'vertex_neighbor_vertices']
ConvexHull_exposes = ['points', 'vertices', 'simplices', 'neighbors', 'equations', 'coplanar', 'area', 'volume']
Voronoi_exposes = ['points', 'vertices', 'ridge_points', 'ridge_vertices', 'regions', 'point_region']
Delaunay_v1_exposes = [ '_points', 'coplanar', 'equations', 'good', 'max_bound', 'min_bound', 'ndim', 'neighbors', 'npoints', 'nsimplex', 'paraboloid_scale', 'paraboloid_shift', 'simplices', 'vertices' ]
ConvexHull_v1_exposes = [ '_points', '_vertices', 'area', 'coplanar', 'equations', 'max_bound', 'min_bound', 'ndim', 'neighbors', 'npoints', 'nsimplex', 'simplices', 'volume' ]
Voronoi_v1_exposes = [ '_points', 'max_bound', 'min_bound', 'ndim', 'npoints', 'point_region', 'regions', 'ridge_points', 'ridge_vertices', 'vertices' ]
_scipy_spatial_types = [
('Delaunay', Delaunay_exposes, Delaunay_v1_exposes, ('vertices', 'simplices')),
('ConvexHull', ConvexHull_exposes, ConvexHull_v1_exposes, ('vertices', 'equations')),
('Voronoi', Voronoi_exposes, Voronoi_v1_exposes, ('regions', 'point_region'))]
def scipy_spatial_storable(name, exposes, v1_exposes, check):
_fallback = namedtuple(name, exposes)
try:
_type = getattr(scipy.spatial, name)
except AttributeError:
# deprecated location
_type = getattr(scipy.spatial.qhull, name)
def _init(_exposes):
def __init(*args):
#print(args) # debug
struct = _type(args[0])
check_attrs = list(check) # copy
ok = True
while ok and check_attrs:
attr = check_attrs.pop()
try:
i = _exposes.index(attr)
except ValueError:
if attr[0] == '_':
attr = attr[1:]
else:
attr = '_'+attr
i = _exposes.index(attr)
try:
arg = getattr(struct, attr)
if isinstance(args[i], list):
ok = arg == args[i]
else:
ok = numpy.all(numpy.isclose(arg, args[i]))
except (SystemExit, KeyboardInterrupt):
raise
except:
#print(attr, arg, args[i]) # debug
raise # debug
#ok = False
if not ok:
warn("object of type '{}' could not be properly regenerated from the `points` argument only; using method-free fallback".format(name), RuntimeWarning)
struct = _fallback(*args)
return struct
return __init
handlers = [handler(_init(exposes), exposes, version=(0,))] # Py2
if six.PY3:
spatial_peek = lambda _, exposes: default_peek(_type, exposes, excess_attributes=check)
auto = default_storable(_type, peek=spatial_peek)
assert not auto.handlers[1:]
assert handlers[0].version[0] < auto.handlers[0].version[0]
handlers.append(auto.handlers[0])
elif six.PY2 and v1_exposes:
handlers.append(handler(_init(v1_exposes), v1_exposes, version=(1,)))
return ScipySpatialStorable(_type,
key='Python.scipy.spatial._qhull.' + _type.__name__,
handlers=handlers)
spatial_storables += \
[ scipy_spatial_storable(*_specs) for _specs in _scipy_spatial_types ] | /rwa-python-0.9.3.tar.gz/rwa-python-0.9.3/rwa/scipy.py | 0.41941 | 0.217691 | scipy.py | pypi |
import os
import glob
import logging
from typing import List
import numpy as np
import matplotlib.pyplot as plt
__all__ = ('write_bp_to_disk', 'write_it_to_disk', 'plot_bp')
logger = logging.getLogger(__name__)
def write_bp_to_disk(result_dir: str,
filename: str, bplist: List[float]) -> None:
"""Writes blocking probabilities to text file
Args:
result_dir: directory to write files to
filename: name of the file to be written
itlist: list of blocking probability values, as percentages, to be
dumped to file
"""
if not os.path.isdir(result_dir):
logger.info('Creating result dir in %s' % result_dir)
os.mkdir(result_dir)
filepath = os.path.join(result_dir, filename)
logger.info('Writing blocking probability results to file "%s"' % filepath)
with open(filepath, 'a') as f:
for bp in bplist:
f.write(' %7.3f' % bp)
f.write('\n')
def write_it_to_disk(result_dir: str,
filename: str, itlist: List[float]) -> None:
"""Writes profiling time information to text file
Args:
result_dir: directory to write files to
filename: name of the file to be written
itlist: list of times, in seconds, to be dumped to file
"""
if not os.path.isdir(result_dir):
logger.info('Creating result dir in %s' % result_dir)
os.mkdir(result_dir)
filepath = os.path.join(result_dir, filename)
logger.info('Writing simulation profiling times to file "%s"' % filepath)
with open(filepath, 'a') as f:
for it in itlist:
f.write(' %7.7f' % it)
def plot_bp(result_dir: str) -> None:
"""Reads blocking probabilities from file and plot overlapping graph
Args:
result_dir: directory that stores files to be read
"""
filelist = []
for f in glob.glob(os.path.join(result_dir, '*.bp')):
filelist.append(os.path.basename(f))
data = np.loadtxt(f)
if data.ndim == 1:
max_load = data.shape[0] + 1
plt.plot(np.arange(1, max_load), data, '--')
else:
max_load = data.shape[1] + 1
plt.plot(np.arange(1, max_load), data.mean(axis=0), '--')
plt.xlim(0.5, max_load - 0.5)
if data.ndim == 1 or data.shape[0] < 10:
logger.warning('Remember you should simulate at least 10 times '
'(found only %d in %s)' % (data.shape[0], f))
plt.grid()
plt.ylabel('Blocking probability (%)', fontsize=18)
plt.xlabel('Load (Erlangs)', fontsize=18)
plt.title('Average mean blocking probability', fontsize=20)
plt.legend(filelist)
plt.show(block=True) | /rwa_wdm-0.2.3.tar.gz/rwa_wdm-0.2.3/rwa_wdm/io.py | 0.807271 | 0.276094 | io.py | pypi |
from typing import Callable, Union
from ..net import Lightpath, Network
from .routing import dijkstra, yen
from .wlassignment import vertex_coloring, first_fit, random_fit
from .ga import GeneticAlgorithm
__all__ = (
'dijkstra_vertex_coloring',
'dijkstra_first_fit',
'yen_vertex_coloring',
'yen_first_fit',
'genetic_algorithm',
)
# genetic algorithm object (global)
# FIXME this looks bad. perhaps this whole script should be a class
ga: Union[GeneticAlgorithm, None] = None
def dijkstra_vertex_coloring(net: Network, k: int) -> Union[Lightpath, None]:
"""Dijkstra and vertex coloring combination as RWA algorithm
Args:
net: Network topology instance
k: number of alternate paths (ignored)
Returns:
Lightpath: if successful, returns both route and wavelength index as a
lightpath
"""
route = dijkstra(net.a, net.s, net.d)
wavelength = vertex_coloring(net, Lightpath(route, None))
if wavelength is not None and wavelength < net.nchannels:
return Lightpath(route, wavelength)
return None
def dijkstra_first_fit(net: Network, k: int) -> Union[Lightpath, None]:
"""Dijkstra and first-fit combination as RWA algorithm
Args:
net: Network topology instance
k: number of alternate paths (ignored)
Returns:
Lightpath: if successful, returns both route and wavelength index as a
lightpath
"""
route = dijkstra(net.a, net.s, net.d)
wavelength = first_fit(net, route)
if wavelength is not None and wavelength < net.nchannels:
return Lightpath(route, wavelength)
return None
def dijkstra_random_fit(net: Network, k: int) -> Union[Lightpath, None]:
"""Dijkstra and random-fit combination as RWA algorithm
Args:
net: Network topology instance
k: number of alternate paths (ignored)
Returns:
Lightpath: if successful, returns both route and wavelength index as a
lightpath
"""
route = dijkstra(net.a, net.s, net.d)
wavelength = random_fit(net, route)
if wavelength is not None and wavelength < net.nchannels:
return Lightpath(route, wavelength)
return None
def yen_vertex_coloring(net: Network, k: int) -> Union[Lightpath, None]:
"""Yen and vertex coloring combination as RWA algorithm
Args:
net: Network topology instance
k: number of alternate paths (ignored)
Returns:
Lightpath: if successful, returns both route and wavelength index as a
lightpath
"""
routes = yen(net.a, net.s, net.d, k)
for route in routes:
wavelength = vertex_coloring(net, Lightpath(route, None))
if wavelength is not None and wavelength < net.nchannels:
return Lightpath(route, wavelength)
return None
def yen_first_fit(net: Network, k: int) -> Union[Lightpath, None]:
"""Yen and first-fit combination as RWA algorithm
Args:
net: Network topology instance
k: number of alternate paths (ignored)
Returns:
Lightpath: if successful, returns both route and wavelength index as a
lightpath
"""
routes = yen(net.a, net.s, net.d, k)
for route in routes:
wavelength = first_fit(net, route)
if wavelength is not None and wavelength < net.nchannels:
return Lightpath(route, wavelength)
return None
def yen_random_fit(net: Network, k: int) -> Union[Lightpath, None]:
"""Yen and random-fit combination as RWA algorithm
Args:
net: Network topology instance
k: number of alternate paths (ignored)
Returns:
Lightpath: if successful, returns both route and wavelength index as a
lightpath
"""
routes = yen(net.a, net.s, net.d, k)
for route in routes:
wavelength = random_fit(net, route)
if wavelength is not None and wavelength < net.nchannels:
return Lightpath(route, wavelength)
return None
def genetic_algorithm_callback(net: Network, k: int) -> Union[Lightpath, None]:
"""Callback function to perform RWA via genetic algorithm
Args:
net: Network topology instance
k: number of alternate paths (ignored)
Returns:
Lightpath: if successful, returns both route and wavelength index as a
lightpath
"""
route, wavelength = ga.run(net, k)
if wavelength is not None and wavelength < net.nchannels:
return Lightpath(route, wavelength)
return None
def genetic_algorithm(pop_size: int, num_gen: int,
cross_rate: float, mut_rate: float) -> Callable:
"""Genetic algorithm as both routing and wavelength assignment algorithm
This function just sets the parameters to the GA, so it acts as if it were
a class constructor, setting a global variable as instance to the
`GeneticAlgorithm` object in order to be further used by a callback
function, which in turn returns the lightpath itself upon RWA success. This
split into two classes is due to the fact that the class instance needs to
be executed only once, while the callback may be called multiple times
during simulation, namely one time per number of arriving call times number
of load in Erlags (calls * loads)
Note:
Maybe this entire script should be a class and `ga` instance could be
an attribute. Not sure I'm a good programmer.
Args:
pop_size: number of chromosomes in the population
num_gen: number of generations towards evolve
cross_rate: percentage of individuals to perform crossover
mut_rate: percentage of individuals to undergo mutation
Returns:
callable: a callback function that calls the `GeneticAlgorithm` runner
class, which finally and properly performs the RWA procedure
"""
global ga
ga = GeneticAlgorithm(pop_size, num_gen, cross_rate, mut_rate)
return genetic_algorithm_callback | /rwa_wdm-0.2.3.tar.gz/rwa_wdm-0.2.3/rwa_wdm/rwa/rwa.py | 0.924135 | 0.635534 | rwa.py | pypi |
import logging
from typing import List, Tuple, Union
from .pop import Population
from .env import evaluate, select, cross, mutate
from ...net import Network
__all__ = (
'GeneticAlgorithm',
)
logger = logging.getLogger(__name__)
class GeneticAlgorithm(object):
"""Genetic algorithm
Chromosomes are encoded as routes and fitness is based on a general
objective funcion (GOF)'s labels to each wavelength index supported.
Chromosome creation and mutation procedures are based on depth-first search
(DFS) operation, crossover is based on the one-point strategy, and
selection takes place under a k=3-size tournament rule.
Attributes:
_population_size: number of individuals that comprise a population
_num_generations: number of generations a population has to evolve
_crossover_rate: percentage of individuals to undergo crossover
_mutation_rate: percentage of individuals to undergo mutation
_best_fits: collection of best fitness values across generations
"""
def __init__(self, pop_size: int, num_gen: int,
cross_rate: float, mut_rate: float) -> None:
"""Constructor
Args:
pop_size: size of the population
num_gen: number of evolution generations
cross_rate: crossover rate
mut_rate: mutation rate
"""
self._population_size: int = pop_size
self._num_generations: int = num_gen
self._crossover_rate: float = cross_rate
self._mutation_rate: float = mut_rate
self._best_fits: List[int] = []
@property
def bestfit(self) -> List[int]:
"""A lisf of the best fitness values across all generations"""
return self._best_fits
# FIXME this ain't seem right
@bestfit.setter
def bestfit(self, value: int) -> None:
self._best_fits.append(value)
def run(self, net: Network, k: int) -> Tuple[List[int], Union[int, None]]:
"""Run the main genetic algorithm's evolution pipeline
Args:
net: Network instance object
k: number of alternative paths (ignored)
Returns:
:obj: `tuple`: route as a list of router indices and wavelength
index upon RWA success
"""
# generates initial population with random but valid chromosomes
population = Population()
trial = 0
logger.debug('Creating population')
while len(population) < self._population_size and trial < 300: # FIXME
allels = set(range(net.nnodes)) # router indices
chromosome = population.make_chromosome(net.a, net.s, net.d,
allels, net.nnodes)
if chromosome is not None:
population.add_chromosome(chromosome)
trial = 0
else:
trial += 1
logger.debug('Initiating GA main loop')
for generation in range(self._num_generations + 1):
# perform evaluation (fitness calculation)
logger.debug('Gen %d: fitness evaluation' % generation)
for chromosome in population.individuals:
chromosome.fit = evaluate(net, chromosome)
# sort in-place by fitness considering λ avail. and route length
logger.debug('Gen %d: sort by fitness' % generation)
self.bestfit = population.sort() # FIXME
# avoid ugly evaluation and sort after loop
if generation == self._num_generations:
break
# perform selection
logger.debug('Gen %d: applying selection operator' % generation)
mating_pool = select(population.copy(), self._population_size)
# perform crossover over the lucky ones selected to the mating pool
logger.debug('Gen %d: applying crossover operator' % generation)
offspring = cross(mating_pool, self._population_size,
self._crossover_rate)
# perform mutation over offspring, overwriting original population
logger.debug('Gen %d: applying mutation operator' % generation)
population = mutate(offspring, self._population_size,
self._mutation_rate, net)
route = population.best.genes
try:
wavelength = population.best.fit.labels.tolist().index(1)
except ValueError:
wavelength = None
return route, wavelength | /rwa_wdm-0.2.3.tar.gz/rwa_wdm-0.2.3/rwa_wdm/rwa/ga/ga.py | 0.895271 | 0.593521 | ga.py | pypi |
from __future__ import annotations
import copy
import logging
from typing import List, Set, Union
import numpy as np
from .chromo import Chromosome
__all__ = (
'Population',
)
logger = logging.getLogger(__name__)
class Population(object):
"""Class to store a collection of Chromosome objects
Population is also responsible for sorting chromosomes by their fitness
values, always keeping tracking of the best one.
"""
def __init__(self):
self._individuals: List[Chromosome] = []
# 1: Start from source node
# 2: Randomly choose, with equal probability, one of the nodes
# that is surely connected to the current node to be the next in path
# 3: If the chosen node hasn't been visited before, mark it as the
# next in the path (gene). Otherwise find another node
# 6: Do this until the destination node is found
def make_chromosome(self, mat: np.ndarray,
s: int, d: int, allels: Set[int],
max_size: int) -> Union[None, Chromosome]:
"""Creates a single Chromosome via DFS-like procedure
Args:
mat: Network's wavelength availability matrix
s: source node of the connection
d: destination node of the connection
allels: values the chromosome's genes are allowed to assume, which
basically comprises router indices
max_size: value to prevent chromosomes from being too long
Returns:
Chromosome: returns an individual if random procedure is
successfull
"""
trial = 0
reset = 0
rcurr = s # step 1
allels = list(allels)
genes = [allels.pop(allels.index(rcurr))]
while rcurr != d: # step 6
rnext = np.random.choice(allels) # step 2
if mat[rcurr][rnext]: # ensure neighbourhood
rcurr = rnext # step 3
genes.append(allels.pop(allels.index(rcurr)))
trial = 0
else:
trial += 1
if trial > 50: # chances per gene to find a valid path
while genes[-1] != s:
allels.append(genes.pop())
trial = 0
reset += 1
if reset == 5:
return None
if len(genes) > max_size:
return None
return Chromosome(genes)
def add_chromosome(self, chromosome: Chromosome) -> None:
"""Adds a Chromosome into the population
Args:
chromosome: an individual encoded as a Chromosome instance
"""
self._individuals.append(chromosome)
def remove_chromosome_by_id(self, _id: int) -> None:
"""Removes a Chromosome from the population
Args:
_id: unique index identifying a particular individual
"""
for i, chromosome in enumerate(self._individuals):
if chromosome.id == _id:
del self._individuals[i]
break
@property
def individuals(self) -> List[Chromosome]:
"""The population as a sequence of Chromosomes"""
return self._individuals
@property
def best(self) -> Chromosome:
"""The fittest chromosome (requires sorting)"""
return self._individuals[0]
def copy(self) -> Population:
"""Deep copy of the Population's own instance"""
return copy.deepcopy(self)
# https://stackoverflow.com/questions/403421/how-to-sort-a-list-of-objects-based-on-an-attribute-of-the-objects
def sort(self) -> int:
"""Sorts the population following some criteria
Returns:
:obj: `int`: number of fit individuals, i.e., with at least one λ
available on each link
"""
# sort according to λ availability .:. least congested paths first
self.individuals.sort(key=lambda x: x.fit.lambdas, reverse=True)
# sort according to number of hops .:. shortest paths first
for j in range(1, len(self.individuals)):
chrom = self.individuals[j]
i = j - 1
if chrom.fit.lambdas:
while i >= 0 and self.individuals[i].fit.hops > chrom.fit.hops:
self.individuals[i + 1] = self.individuals[i]
i -= 1
self.individuals[i + 1] = chrom
# FIXME this wasn't supposed to be here, yet...
# compute the number of individuals with at least one λ available
fit = 0
for individual in self.individuals:
if individual.fit.lambdas:
fit += 1
return fit
def __len__(self) -> int:
return len(self._individuals) | /rwa_wdm-0.2.3.tar.gz/rwa_wdm-0.2.3/rwa_wdm/rwa/ga/pop.py | 0.842831 | 0.45048 | pop.py | pypi |
import logging
from itertools import count
from typing import List
import numpy as np
__all__ = (
'Fitness',
'Chromosome'
)
logger = logging.getLogger(__name__)
np.set_printoptions(precision=2)
class Fitness(object):
"""Fitness 'namedtuple'-like object
Easy to handle ready-to-use properties such as number of wavelengths
available per route, and number of hops in the route.
Args:
labels: general objective function (GOF)'s label `L`
lambdas: number of wavelengths available on a single link
hops: number of hops in the route
"""
def __init__(self, labels: np.ndarray, lambdas: int, hops: int) -> None:
self._gof_labels: np.ndarray = labels
self._num_wavelenths_available: int = lambdas
self._route_length: int = hops
@property
def labels(self) -> np.ndarray:
"""The labels `L` produced by the general objective function (GOF)"""
return self._gof_labels
@labels.setter
def labels(self, value: np.ndarray) -> None:
self._gof_labels = value
@property
def lambdas(self) -> int:
"""The total number of λ available on a single link"""
return self._num_wavelenths_available
@lambdas.setter
def lambdas(self, value: int) -> None:
self._num_wavelenths_available = value
@property
def hops(self) -> int:
"""The number of hops comprising a route"""
return self._route_length
@hops.setter
def hops(self, value: int) -> None:
self._route_length = value
def __str__(self) -> str:
return '{} {} {}'.format(self.labels, self.lambdas, self.hops)
class Chromosome(object):
"""Encodes an invididual with genes and a fitness attribute
Args:
genes: sequence of router indices comprising a route
fitness: a Fitness object comprising GOF labels, number of λ available,
and number of hops
"""
_ids = count(0)
def __init__(self, genes: List[int],
fitness: Fitness = None) -> None:
self._id: int = next(self._ids)
self._genes: List[int] = genes
self._fitness: Fitness = fitness
@property
def id(self) -> int:
"""A unique identifier to the Chromosome object"""
return self._id
@property
def genes(self) -> List[int]:
"""The route encoded as chromosome's genes"""
return self._genes
@property
def fit(self) -> Fitness:
"""The Fitness object"""
return self._fitness
@fit.setter
def fit(self, value: Fitness) -> None:
self._fitness = value
def __len__(self) -> int:
return len(self._genes)
def __str__(self) -> str:
return '%s (%s)' % (self.genes, self.fit) | /rwa_wdm-0.2.3.tar.gz/rwa_wdm-0.2.3/rwa_wdm/rwa/ga/chromo.py | 0.920272 | 0.440168 | chromo.py | pypi |
import logging
import numpy as np
from .utils import gof
from .chromo import Chromosome, Fitness
from .pop import Population
from ...net import Network
__all__ = (
'evaluate',
'select',
'cross',
'mutate',
)
logger = logging.getLogger(__name__)
def evaluate(net: Network, chromosome: Chromosome) -> Fitness:
"""Fitness calculation
Args:
net: Network instance object
chromosome: Chromosome object
Returns:
Fitness: Fitness object storing GOF labels, number of λ available per
link, and number of hops in the route
"""
labels = gof(net.n, net.nchannels, chromosome.genes)
lambdas_available = np.count_nonzero(labels == 1.0)
route_length = len(chromosome)
return Fitness(labels, lambdas_available, route_length)
def select(population: Population,
pop_size: int, tourn_size: int = 3) -> Population:
"""Tournament selection strategy
First we choose a random candidate from population. Then, under trials,
we choose another candidate and compare the two fitnesses. The winner
becomes the top candidate; loser is eliminated.
Args:
population: Population instance object after evaluation
pop_size: number of individuals in the mating pool pre-crossover
tourn_size: number of individuals to compete under the tournament pool
Returns:
Population: set of parents ready to mate under crossover operation
"""
parents = Population()
while len(parents) < pop_size:
candidates = [np.random.choice(population.individuals)]
for trial in range(tourn_size):
candidates.append(np.random.choice(population.individuals))
if candidates[0].fit.lambdas >= candidates[1].fit.lambdas:
candidates.remove(candidates[1])
else:
candidates.remove(candidates[0])
parents.add_chromosome(candidates[0])
return parents
def cross(parents: Population, pop_size: int, tc: float) -> Population:
"""One-point crossover strategy
Args:
parents: set of chromosomes ready to mate
pop_size: number of individuals in the offspring after crossover
tc: crossover rate, which defines the percentage of the selected
individuals to undergo crossover
Returns:
Population: set of children in offspring to undergo mutation operation
"""
children = Population()
while len(children) < pop_size:
# choose parents and make sure they are differente ones
# TODO parents.pop(np.random.randint(len(parents))) ?
dad = np.random.choice(parents.individuals)
mom = np.random.choice(parents.individuals)
parents.remove_chromosome_by_id(dad.id)
parents.remove_chromosome_by_id(mom.id)
dad = dad.genes
mom = mom.genes
if tc > np.random.random():
# common nodes between father and mother, excluding s and d
ridx = []
for gene in dad[1:len(dad) - 1]:
if gene in mom[1:len(mom) - 1]:
ridx.append([dad.index(gene), mom.index(gene)])
# randomly choose a common node index to be the crossover point
if len(ridx):
rcommon = ridx.pop(np.random.choice(len(ridx)))
son = dad[:rcommon[0]] + mom[rcommon[1]:]
daughter = mom[:rcommon[1]] + dad[rcommon[0]:]
else:
son = dad
daughter = mom
else:
son = dad
daughter = mom
children.add_chromosome(Chromosome(son))
children.add_chromosome(Chromosome(daughter))
return children
def mutate(children: Population, pop_size: int,
tm: float, net: Network) -> Population:
"""Custom mutation procedure based on DFS-like path creation
Args:
children: Chromosome offspring after crossover
pop_size: number of individuals to compose the new population
tm: mutation rate, which defines the percentage of individuals to
undergo mutation
net: Network instance
Returns:
Population: set of chromosomes to compose the new population
"""
population = Population()
while len(population) < pop_size:
normal_chrom = np.random.choice(children.individuals)
# DO NOT perform mutation if:
# route has only one link which directly connects source to target
# FIXME how the hell does it happen? It shouldn't ITFP.
if len(normal_chrom) == 2:
population.add_chromosome(normal_chrom)
continue
children.remove_chromosome_by_id(normal_chrom.id)
trans_genes = list(normal_chrom.genes)
if tm < np.random.random():
# choose a random mutation point, excluding the first and the last
geneid = np.random.randint(1, len(normal_chrom) - 1)
# extract or pop() source and target nodes from chromosome
start_router = trans_genes.pop(geneid)
end_router = trans_genes.pop()
# remove all genes after mutation point
# FIXME no way this is right
for gene in range(geneid, len(trans_genes)):
trans_genes.pop()
# alphabet: vertices that are not in genes before mutation point
allels = {start_router, end_router}
for node in range(net.nnodes):
if node not in trans_genes:
allels.add(node)
# create a new route R from mutation point to target node
route = population.make_chromosome(net.a, start_router, end_router,
allels, net.nnodes)
# check if new route/path is valid
if route is not None:
trans_genes += route.genes
else:
trans_genes = list(normal_chrom.genes)
population.add_chromosome(Chromosome(trans_genes))
return population | /rwa_wdm-0.2.3.tar.gz/rwa_wdm-0.2.3/rwa_wdm/rwa/ga/env.py | 0.685739 | 0.566798 | env.py | pypi |
from itertools import count
from typing import Union
import numpy as np
import networkx as nx
# FIXME https://mypy.readthedocs.io/en/latest/common_issues.html#import-cycles
from ...net import Network, Lightpath
def vertex_coloring(net: Network, lightpath: Lightpath) -> Union[int, None]:
"""Vertex coloring algorithm
Args:
net: Network object
lightpath: the lightpath we are trying to allocate a λ to
Returns:
:obj:`int`: upon wavelength assignment success, return the wavelength
index to be used on the lightpath
"""
net.t.add_lightpath(lightpath) # this is temporary
# NOTE `nconns` gotta be at least one for this to work. The current
# route is assumed to be already in the graph when vertex coloring
# strategies take place, because the route we are trying to find a λ
# to must be already accounted for as part of the "group" of routes.
H = np.zeros((net.t.nconns, net.t.nconns), dtype=np.uint16)
if net.t.nconns > 1:
# cross compare paths over indices i and j
for i in range(net.t.nconns):
for j in range(i + 1, net.t.nconns):
r1 = net.t.lightpaths[i].r
r2 = net.t.lightpaths[j].r
# cross compare routers over indicies m and n
for m in range(1, len(r1)):
for n in range(1, len(r2)):
if (r1[m - 1] == r2[n - 1] and r1[m] == r2[n]) or \
(r1[m] == r2[n - 1] and r1[m - 1] == r2[n]):
H[i][j] = 1
H[j][i] = 1
colors = {}
for i in range(net.t.nconns):
wavelength = net.t.lightpaths[i].w
if wavelength is not None:
colors[i] = wavelength
net.t.remove_lightpath_by_id(lightpath.id) # I told you it was temporary
# The following is like NetworkX's greedy color procedure
G = nx.from_numpy_matrix(H, create_using=nx.Graph())
if len(G):
u = H.shape[0] - 1
neighbour_colors = {colors[v] for v in G[u] if v in colors}
for color in count():
if color not in neighbour_colors:
break
# assign the node the newly found color
return color
else:
return None # NOTE I think this is never called | /rwa_wdm-0.2.3.tar.gz/rwa_wdm-0.2.3/rwa_wdm/rwa/wlassignment/vcolor.py | 0.541651 | 0.535281 | vcolor.py | pypi |
from typing import Dict, List, Tuple
from collections import OrderedDict
from . import Network
class RedeNacionalPesquisa(Network):
"""Rede (Brasileira) Nacional de Pesquisa (Rede Ipê / RNP)"""
def __init__(self, ch_n):
self._name = 'rnp'
self._fullname = u'Rede Nacional de Pesquisas (Rede Ipê)'
self._s = 3 # DF
self._d = 11 # PE
super().__init__(ch_n,
len(self.get_nodes_2D_pos()),
len(self.get_edges()))
def get_edges(self) -> List[Tuple[int, int]]:
"""get"""
return [
(0, 1),
(1, 3), (1, 4),
(2, 4),
(3, 4), (3, 7), (3, 17), (3, 19), (3, 25),
(4, 6), (4, 12),
(5, 25),
(6, 7),
(7, 8), (7, 11), (7, 18), (7, 19),
(8, 9),
(9, 10),
(10, 11),
(11, 12), (11, 13), (11, 15),
(13, 14),
(14, 15),
(15, 16), (15, 19),
(16, 17),
(17, 18),
(18, 19), (18, 20), (18, 22),
(20, 21),
(21, 22),
(22, 23),
(23, 24),
(24, 25), (24, 26),
(26, 27)
]
def get_nodes_2D_pos(self) -> Dict[str, Tuple[float, float]]:
"""Get position of the nodes on the bidimensional Cartesian plan"""
return OrderedDict([
('RR', (5.00, 3.25)), # 0
('AM', (5.50, 3.75)), # 1
('AP', (8.25, 3.75)), # 2
('DF', (4.00, 5.00)), # 3
('PA', (9.00, 3.00)), # 4
('TO', (3.00, 3.00)), # 5
('MA', (9.00, 4.00)), # 6
('CE', (9.50, 5.00)), # 7
('RN', (10.50, 5.00)), # 8
('PB1', (10.50, 3.00)), # 9
('PB2', (10.50, 1.00)), # 10
('PE', (9.50, 1.00)), # 11
('PI', (9.00, 2.00)), # 12
('AL', (8.00, 2.00)), # 13
('SE', (7.00, 2.00)), # 14
('BA', (6.00, 2.00)), # 15
('ES', (6.00, 1.00)), # 16
('RJ', (4.00, 1.00)), # 17
('SP', (2.00, 1.00)), # 18
('MG', (6.00, 5.50)), # 19
('SC', (1.00, 1.00)), # 20
('RS', (1.00, 2.00)), # 21
('PR', (2.00, 2.00)), # 22
('MS', (2.00, 4.00)), # 23
('MT', (2.00, 5.00)), # 24
('GO', (3.00, 5.00)), # 25
('RO', (1.00, 5.00)), # 26
('AC', (1.00, 4.00)) # 27
]) | /rwa_wdm-0.2.3.tar.gz/rwa_wdm-0.2.3/rwa_wdm/net/rnp.py | 0.791338 | 0.428712 | rnp.py | pypi |
__author__ = 'Cassio Batista'
import logging
from itertools import count
from operator import itemgetter
from typing import Iterable, List, Tuple
import numpy as np
import matplotlib.pyplot as plt
__all__ = (
'Lightpath',
'AdjacencyMatrix',
'WavelengthAvailabilityMatrix',
'TrafficMatrix',
'Network',
)
logger = logging.getLogger(__name__)
class Lightpath(object):
"""Emulates a lightpath composed by a route and a wavelength channel
Lightpath is pretty much a regular path, but must also specify a wavelength
index, since WDM optical networks span multiple wavelength channels over a
single fiber link on the topology.
A Lightpath object also store a holding time parameter, which is set along
the simulation to specify how long the connection may be alive and running
on network links, and therefore taking up space in the traffic matrix,
before it finally terminates and resources are deallocated.
Args:
route: a liste of nodes encoded as integer indices
wavelength: a single number representing the wavelength channel index
"""
# https://stackoverflow.com/questions/8628123/counting-instances-of-a-class
_ids = count(0)
def __init__(self, route: List[int], wavelength: int):
self._id: int = next(self._ids)
self._route: List[int] = route
self._wavelength: int = wavelength
self._holding_time: float = 0.0
@property
def id(self) -> int:
"""A unique identifier to the Lightpath object"""
return self._id
@property
def r(self) -> List[int]:
"""The path as a sequence of router indices"""
return self._route
# https://stackoverflow.com/questions/5389507/iterating-over-every-two-elements-in-a-list
# pairwise: https://docs.python.org/3/library/itertools.html
@property
def links(self) -> Iterable[Tuple[int, int]]:
"""Network links as a sequence of pairs of nodes"""
iterable = iter(self._route)
while True:
try:
yield next(iterable), next(iterable)
except StopIteration:
return
@property
def w(self) -> int:
"""The wavelength channel index"""
return self._wavelength
@property
def holding_time(self) -> float:
"""Time that the lightpath remains occupying net resources"""
return self._holding_time
@holding_time.setter
def holding_time(self, time: float) -> None:
self._holding_time = time
def __len__(self):
return len(self.r)
def __str__(self):
return '%s %d' % (self._route, self._wavelength)
class AdjacencyMatrix(np.ndarray):
"""Boolean 2D matrix that stores network neighbourhood info
The adjacency matrix is basically a binary, bidimensional matrix that
informs whether two nodes in a network physical topology are neighbours,
i.e,. share a link connection. This class is a subclass of a NumPy array.
Args:
num_nodes: number of nodes in the network, which define a square
matrix's dimensions
"""
def __new__(cls, num_nodes: int):
arr = np.zeros((num_nodes, num_nodes))
obj = np.asarray(arr, dtype=np.bool).view(cls)
return obj
def __array_finalize__(self, obj):
if obj is None:
return
class WavelengthAvailabilityMatrix(np.ndarray):
"""Boolean 3D matrix that stores network wavelength availability info
The wavelength availability matrix is a tridimensional, binary matrix that
stores information on whether a particular wavelength λ is available on an
optical link (i, j). This class is a subclass of a NumPy array.
Args:
num_nodes: number of nodes in the network, which defines two of the
matrix's dimensions
num_ch: number of wavelength channels on each link, defining the shape
of the third dimension of the matrix
"""
def __new__(cls, num_nodes: int, num_ch: int):
arr = np.zeros((num_nodes, num_nodes, num_ch))
obj = np.asarray(arr, dtype=np.bool).view(cls)
return obj
def __array_finalize__(self, obj):
if obj is None:
return
class TrafficMatrix(np.ndarray):
"""Boolean 3D matrix that stores traffic info
Args:
num_nodes: number of nodes in the network, which defines two of the
matrix's dimensions
num_ch: number of wavelength channels on each link, defining the shape
of the third dimension of the matrix
"""
def __new__(cls, num_nodes: int, num_ch: int):
arr = np.zeros((num_nodes, num_nodes, num_ch))
obj = np.asarray(arr, dtype=np.float32).view(cls)
# set extra parameters
obj._usage: np.ndarray = np.zeros(num_ch, dtype=np.uint16)
obj._lightpaths: List[Lightpath] = []
return obj
def __array_finalize__(self, obj):
if obj is None:
return
self._usage = getattr(obj, "_usage", None)
self._lightpaths = getattr(obj, "_lightpaths", None)
@property
def lightpaths(self) -> List[Lightpath]:
"""The list of connections (lightpaths) currently running"""
return self._lightpaths
@property
def nconns(self):
"""The number of connections (lightpaths) currently running"""
return len(self._lightpaths)
def add_lightpath(self, lightpath: Lightpath) -> None:
"""Add a lightpath to the list of lightpath
Args:
lightpath: a Lightpath instance
"""
self._lightpaths.append(lightpath)
# FIXME this seems silly, but...
# https://stackoverflow.com/questions/9140857/oop-python-removing-class-instance-from-a-list/9140906
def remove_lightpath_by_id(self, _id: int) -> None:
"""Remove a lightpath from the list of currently running connections
Args:
_id: the unique identifier of a lightpath
"""
for i, lightpath in enumerate(self.lightpaths):
if lightpath.id == _id:
del self._lightpaths[i]
break
class Network(object):
"""Network base class
Hols network properties such as adjacency, wavelength-availability and
traffic graph matrices, fixed source and destination nodes for all
connections, number of λ channels per link
Args:
num_channels: number of wavelength channels per link
num_nodes: number of routes along the path
num_links: number of links along the path, typically `num_nodes` - 1
"""
def __init__(self,
num_channels: int, num_nodes: int, num_links: int) -> None:
self._num_channels = num_channels
self._num_nodes = num_nodes
self._num_links = num_links
self._n = WavelengthAvailabilityMatrix(self._num_nodes,
self._num_channels)
self._a = AdjacencyMatrix(self._num_nodes)
self._t = TrafficMatrix(self._num_nodes, self._num_channels)
# fill in wavelength availability matrix
for (i, j) in self.get_edges():
for w in range(self._num_channels):
availability = np.random.choice((0, 1))
self._n[i][j][w] = availability
self._n[j][i][w] = self._n[i][j][w]
# fill in adjacency matrix
for (i, j) in self.get_edges():
neigh = 1
self._a[i][j] = neigh
self._a[j][i] = self._a[i][j]
# fill in traffic matrix
# FIXME when updating the traffic matrix via holding time parameter,
# these random time attributions may seem not the very smart ones,
# since decreasing values by until_next leads T to be uneven and
# unbalanced
for (i, j) in self.get_edges():
for w in range(self._num_channels):
random_time = self._n[i][j][w] * np.random.rand()
self._t[i][j][w] = random_time
self._t[j][i][w] = self._t[i][j][w]
# Children are responsible for overriding this method
def get_edges(self):
raise NotImplementedError
# Children are responsible for overriding this method
def get_nodes_2D_pos(self):
raise NotImplementedError
@property
def n(self) -> np.ndarray:
"""The wavelength availability matrix graph"""
return self._n
@property
def a(self) -> np.ndarray:
"""The adjacency matrix graph"""
return self._a
@property
def t(self) -> np.ndarray:
"""The traffic matrix"""
return self._t
@property
def s(self) -> int:
"""The source node"""
return self._s
@property
def d(self) -> int:
"""The destination node"""
return self._d
@property
def name(self) -> str:
"""The short name tag idenfier of the network topology"""
return self._name
@property
def nchannels(self) -> int:
"""The number of wavelength channels per fiber link"""
return self._num_channels
@property
def nnodes(self) -> int:
"""The number of router nodes (vertices) in the network"""
return self._num_nodes
@property
def nlinks(self) -> int:
"""The number of links (edges) in the network"""
return self._num_links
def plot_topology(self, bestroute: List[int] = None) -> None:
"""Plots the physical topology in a 2D Cartesian plan
Args:
bestroute: a route encoded as a list of router indices to be
highlighted in red over some network edges
"""
fig, ax = plt.subplots()
ax.grid()
# define vertices or nodes as points in 2D cartesian plan
# define links or edges as node index ordered pairs in cartesian plan
links = self.get_edges()
nodes = self.get_nodes_2D_pos()
node_coords = list(nodes.values()) # get only 2D coordinates
# draw edges before vertices
for (i, j) in links:
x = (node_coords[i][0], node_coords[j][0])
y = (node_coords[i][1], node_coords[j][1])
ax.plot(x, y, 'k', lw=2)
# highlight in red the shortest path with wavelength(s) available
# a.k.a. 'best route'
if bestroute is not None:
for i in range(len(bestroute) - 1):
rcurr, rnext = bestroute[i], bestroute[i + 1]
x = (node_coords[rcurr][0], node_coords[rnext][0])
y = (node_coords[rcurr][1], node_coords[rnext][1])
ax.plot(x, y, 'r', lw=3)
# draw vertices
for label, (i, j) in nodes.items():
ax.plot(i, j, 'wo', ms=25, mec='k')
ax.annotate(label, xy=(i, j), ha='center', va='center')
# https://stackoverflow.com/questions/13145368/find-the-maximum-value-in-a-list-of-tuples-in-python
xlim = np.ceil(max(node_coords, key=itemgetter(0))[0]) + 2
ylim = np.ceil(max(node_coords, key=itemgetter(1))[1]) + 2
if self.name == 'nsf':
xlim -= 1 # FIXME gambiarra, hehe. NSF needs redrawing
# adjust values over both x and y axis
ax.set_xticks(np.arange(xlim))
ax.set_yticks(np.arange(ylim))
# finally, show the plotted graph
plt.show(block=True) | /rwa_wdm-0.2.3.tar.gz/rwa_wdm-0.2.3/rwa_wdm/net/net.py | 0.900825 | 0.617686 | net.py | pypi |
from typing import Dict, List, Tuple
from collections import OrderedDict
from . import Network
class Italian(Network):
"""Italian Network"""
def __init__(self, ch_n):
self._name = 'italian'
self._fullname = u'Italian'
self._s = 0 # FIXME
self._d = 12
super().__init__(ch_n,
len(self.get_nodes_2D_pos()),
len(self.get_edges()))
def get_edges(self) -> List[Tuple[int, int]]:
"""get"""
return [
(0, 1), (0, 2),
(1, 2), (1, 3), (1, 4),
(2, 7), (2, 8), (2, 9),
(3, 4), (3, 5),
(4, 6), (4, 7),
(5, 6),
(6, 7),
(7, 9), (7, 10),
(8, 9), (8, 12),
(9, 11), (9, 12),
(10, 13),
(11, 12), (11, 13),
(12, 14), (12, 20),
(13, 14), (13, 15),
(14, 15), (14, 16), (14, 18), (14, 19),
(15, 16),
(16, 17),
(17, 18),
(18, 19),
(19, 20)
]
def get_nodes_2D_pos(self) -> Dict[str, Tuple[float, float]]:
"""Get position of the nodes on the bidimensional Cartesian plan"""
return OrderedDict([
('x', (0.70, 6.50)), # 0
('x', (1.80, 7.00)), # 1
('x', (1.80, 6.00)), # 2
('x', (3.00, 7.70)), # 3
('x', (2.70, 6.80)), # 4
('x', (4.00, 6.70)), # 5
('x', (3.30, 6.30)), # 6
('x', (2.90, 5.70)), # 7
('x', (2.00, 5.00)), # 8
('x', (2.90, 5.00)), # 9
('x', (3.80, 5.20)), # 10
('x', (3.20, 4.50)), # 11
('x', (2.50, 3.50)), # 12
('x', (3.90, 4.00)), # 13
('x', (3.70, 2.50)), # 14
('x', (4.90, 3.00)), # 15
('x', (4.50, 2.00)), # 16
('x', (4.70, 1.00)), # 17
('x', (3.80, 0.50)), # 18
('x', (2.70, 0.60)), # 19
('x', (1.20, 1.50)) # 20
]) | /rwa_wdm-0.2.3.tar.gz/rwa_wdm-0.2.3/rwa_wdm/net/italian.py | 0.49292 | 0.437884 | italian.py | pypi |
from typing import Dict, List, Tuple
from collections import OrderedDict
from . import Network
class AdvancedResearchProjectsAgency(Network):
"""U.S. Advanced Research Projects Agency (ARPANET)"""
def __init__(self, ch_n):
self._name = 'arpa'
self._fullname = u'Advanced Research Projects Agency'
self._s = 0 # FIXME
self._d = 12
super().__init__(ch_n,
len(self.get_nodes_2D_pos()),
len(self.get_edges()))
def get_edges(self) -> List[Tuple[int, int]]:
"""Get edges as a list of tuples of pairs of nodes"""
return [
(0, 1), (0, 2), (0, 19),
(1, 2), (1, 3),
(2, 4),
(3, 4), (3, 5),
(4, 6),
(5, 6), (5, 7),
(6, 9),
(7, 8), (7, 9), (7, 10),
(8, 9), (8, 19),
(9, 15),
(10, 11), (10, 12),
(11, 12),
(12, 13),
(13, 14), (13, 16),
(14, 15),
(15, 17), (15, 18),
(16, 17), (16, 19),
(17, 18)
]
def get_nodes_2D_pos(self) -> Dict[str, Tuple[float, float]]:
"""Get position of the nodes on the bidimensional Cartesian plan"""
return OrderedDict([
('0', (1.80, 5.70)), # 0
('1', (2.80, 5.00)), # 1
('2', (3.40, 6.30)), # 2
('3', (3.40, 5.50)), # 3
('4', (4.50, 5.60)), # 4
('5', (4.70, 4.60)), # 5
('6', (5.30, 4.80)), # 6
('7', (3.60, 4.40)), # 7
('8', (2.20, 4.00)), # 8
('9', (4.80, 3.50)), # 9
('10', (2.40, 2.60)), # 10
('11', (2.50, 1.50)), # 11
('12', (1.40, 2.30)), # 12
('13', (1.80, 3.20)), # 13
('14', (3.70, 2.70)), # 14
('15', (5.20, 2.50)), # 15
('16', (0.80, 3.90)), # 16
('17', (1.20, 0.50)), # 17
('18', (3.60, 0.80)), # 18
('19', (0.80, 5.50)) # 19
]) | /rwa_wdm-0.2.3.tar.gz/rwa_wdm-0.2.3/rwa_wdm/net/arpa.py | 0.496094 | 0.39712 | arpa.py | pypi |
<p align="center">
<img width="350px" src="docs/img/rware.png" align="center" alt="Multi-Robot Warehouse (RWARE)" />
<p align="center">A multi-agent reinforcement learning environment</p>
</p>
[](https://GitHub.com/Naereen/StrapDown.js/graphs/commit-activity)
[](https://github.com/Naereen/StrapDown.js/blob/master/LICENSE)
<h1>Table of Contents</h1>
- [Environment Description](#environment-description)
- [What does it look like?](#what-does-it-look-like)
- [Action Space](#action-space)
- [Observation Space](#observation-space)
- [Dynamics: Collisions](#dynamics-collisions)
- [Rewards](#rewards)
- [Environment Parameters](#environment-parameters)
- [Naming Scheme](#naming-scheme)
- [Custom layout](#custom-layout)
- [Installation](#installation)
- [Getting Started](#getting-started)
- [Please Cite](#please-cite)
# Environment Description
The multi-robot warehouse (RWARE) environment simulates a warehouse with robots moving and delivering requested goods. The simulator is inspired by real-world applications, in which robots pick-up shelves and deliver them to a workstation. Humans access the content of a shelf, and then robots can return them to empty shelf locations.
The environment is configurable: it allows for different sizes (difficulty), number of agents, communication capabilities, and reward settings (cooperative/individual). Of course, the parameters used in each experiment must be clearly reported to allow for fair comparisons between algorithms.
## What does it look like?
Below is an illustration of a small (10x20) warehouse with four trained agents. Agents have been trained with the SEAC algorithm [[2](#please-cite)]. This visualisation can be achieved using the `env.render()` function as described later.
<p align="center">
<img width="450px" src="docs/img/rware.gif" align="center" alt="Multi-Robot Warehouse (RWARE) illustration" />
</p>
## Action Space
In this simulation, robots have the following discrete action space:
A={ Turn Left, Turn Right, Forward, Load/Unload Shelf }
The first three actions allow each robot only to rotate and move forward. Loading/Unloading only works when an agent is beneath a shelf on one of the predesignated locations.
## Observation Space
The observation of an agent is partially observable and consists of a 3x3 (configurable) square centred on the agent. Inside this limited grid, all entities are observable:
- The location, the rotation and whether the agent is carrying a shelf.
- The location and rotation of other robots.
- Shelves and whether they are currently in the request queue.
## Dynamics: Collisions
The dynamics of the environment are also of particular interest. Like a real, 3-dimensional warehouse, the robots can move beneath the shelves. Of course, when the robots are loaded, they must use the corridors, avoiding any standing shelves.
Any collisions are resolved in a way that allows for maximum mobility. When two or more agents attempt to move to the same location, we prioritise the one that also blocks others. Otherwise, the selection is done arbitrarily. The visuals below demonstrate the resolution of various collisions.
Example 1 | Example 2 | Example 3
:-------------------------:|:-------------------------:|:-------------------------:
 |  | 
## Rewards
At each time a set number of shelves R is requested. When a requested shelf is brought to a goal location, another shelf is uniformly sampled and added to the current requests. Agents are rewarded for successfully delivering a requested shelf to a goal location, with a reward of 1. A significant challenge in these environments is for agents to deliver requested shelves but also finding an empty location to return the previously delivered shelf. Having multiple steps between deliveries leads a very sparse reward signal.
# Environment Parameters
The multi-robot warehouse task is parameterised by:
- The size of the warehouse which is preset to either tiny (10x11), small (10x20), medium (16x20), or large (16x29).
- The number of agents N.
- The number of requested shelves R. By default R=N, but easy and hard variations of the environment use R = 2N and R = N/2, respectively.
Note that R directly affects the difficulty of the environment. A small R, especially on a larger grid, dramatically affects the sparsity of the reward and thus exploration: randomly bringing the correct shelf becomes increasingly improbable.
## Naming Scheme
While RWARE allows fine tuning of multiple parameters when using the Warehouse class, it also registers multiple default environments with Gym for simplicity.
The registered names look like `rware-tiny-2ag-v1` and might cryptic in the beginning, but it is not actually complicated. Every name always starts with rware. Next, the map size is appended as -tiny, -small, -medium, or -large. The number of robots in the map is selected as Xag with X being a number larger than one (e.g. -4ag for 4 agents). A difficulty modifier is optionally appended in the form of -easy or -hard, making requested shelves twice or half the number of agents (see section Rewards). Finally -v1 is the version as required by OpenAI Gym. In the time of writing all environments are v1, but we will increase it during changes or bugfixes.
A few examples:
```python
env = gym.make("rware-tiny-2ag-v1")
env = gym.make("rware-small-4ag-v1")
env = gym.make("rware-medium-6ag-hard-v1")
```
Of course, more settings are available, but have to be changed during environment creation. For example:
```python
env = gym.make("rware-tiny-2ag-v1", sensor_range=3, request_queue_size=6)
```
## Custom layout
You can design a custom warehouse layout with the following:
```python
layout = """
........
...x....
..x.x...
.x...x..
..x.x...
...x....
.g....g.
"""
gym = env.make("rware:rware-tiny-2ag-v1", layout=layout)
```
This will transform "X"s to shelves and "G"s to goal locations with a result like the one below:
<p align="center">
<img width="300px" src="docs/img/rware_round.png" align="center" alt="Multi-Robot Warehouse (RWARE) illustration" />
</p>
A detailed explanation of all parameters can be found [here](https://github.com/semitable/robotic-warehouse/blob/4307b1fe3afa26de4ca4003fd04ab1319879832a/robotic_warehouse/warehouse.py#L132)
# Installation
Assuming you have Python3 (preferably on a virtual environment: venv or Anaconda) installed, you can use PyPI:
```sh
pip install rware
```
If you prefer to have the code available and be able to edit it, you can use Git to download and install it:
```sh
git clone git@github.com:uoe-agents/robotic-warehouse.git
cd robotic-warehouse
pip install -e .
```
# Getting Started
RWARE was designed to be compatible with Open AI's Gym framework.
Creating the environment is done exactly as one would create a Gym environment:
```python
import gym
import rware
env = gym.make("rware-tiny-2ag-v1")
```
You can even bypass the `import` statement with Gym, and directly use:
```python
import gym
env = gym.make("rware:rware-tiny-2ag-v1")
```
The `rware:` in the beginning of the environment name tells Gym to import the respective package.
The number of agents, the observation space, and the action space are accessed using:
```python
env.n_agents # 2
env.action_space # Tuple(Discrete(5), Discrete(5))
env.observation_space # Tuple(Box(XX,), Box(XX,))
```
The returned spaces are from the Gym library (`gym.spaces`) Each element of the tuple corresponds to an agent, meaning that `len(env.action_space) == env.n_agents` and `len(env.observation_space) == env.n_agents` are always true.
The reset and step functions again are identical to Gym:
```python
obs = env.reset() # a tuple of observations
actions = env.action_space.sample() # the action space can be sampled
print(actions) # (1, 0)
n_obs, reward, done, info = env.step(actions)
print(done) # [False, False]
print(reward) # [0.0, 0.0]
```
which leaves as to the only difference with Gym: the rewards and the done flag are lists, and each element corresponds to the respective agent.
Finally, the environment can be rendered for debugging purposes:
```python
env.render()
```
and should be closed before terminating:
```python
env.close()
```
# Please Cite
If you use this environment, consider citing
1. A comperative evaluation of MARL algorithms that includes this environment
```
@inproceedings{papoudakis2021benchmarking,
title={Benchmarking Multi-Agent Deep Reinforcement Learning Algorithms in Cooperative Tasks},
author={Georgios Papoudakis and Filippos Christianos and Lukas Schäfer and Stefano V. Albrecht},
booktitle = {Proceedings of the Neural Information Processing Systems Track on Datasets and Benchmarks (NeurIPS)},
year={2021},
url = {http://arxiv.org/abs/2006.07869},
openreview = {https://openreview.net/forum?id=cIrPX-Sn5n},
code = {https://github.com/uoe-agents/epymarl},
}
```
2. A method that achieves state-of-the-art performance in the robotic warehouse task
```
@inproceedings{christianos2020shared,
author = {Christianos, Filippos and Sch\"{a}fer, Lukas and Albrecht, Stefano},
booktitle = {Advances in Neural Information Processing Systems},
editor = {H. Larochelle and M. Ranzato and R. Hadsell and M. F. Balcan and H. Lin},
pages = {10707--10717},
publisher = {Curran Associates, Inc.},
title = {Shared Experience Actor-Critic for Multi-Agent Reinforcement Learning},
url = {https://proceedings.neurips.cc/paper/2020/file/7967cc8e3ab559e68cc944c44b1cf3e8-Paper.pdf},
volume = {33},
year = {2020}
}
```
| /rware-1.0.3.tar.gz/rware-1.0.3/README.md | 0.755637 | 0.971293 | README.md | pypi |
import math
from typing import Dict, Optional
from rweb_datatable.html import Node
from rweb_datatable.models import Table, TableContext, SortColumn, Dataset, Column, Pagination, PaginationPage
from rweb_datatable.utils import url, make_table_section_id
def get_table_context_from_args(table: Table, args: dict, extra_args: Optional[dict]=None) -> TableContext:
"""
Create a TableContext object from a set of incoming arguments
:param table: the table definition
:param args: the incoming arguments, typically from a request object
:return: a TableContext object
"""
page_number = get_page_number(args=args)
per_page = get_per_page(args=args)
sort = get_sort_columns(args=args, table=table)
hx_target = f"#{make_table_section_id(table)}"
is_download = args.get("download", False)
search = args.get("search", None)
return TableContext(
page=page_number,
per_page=per_page,
sort=sort,
hx_target=hx_target,
args=dict(args),
is_download=is_download,
search=search,
extra_args=extra_args if extra_args else {}
)
def get_page_number(args: dict) -> int:
try:
page = int(args.get("page", 1))
if page < 1:
page = 1
except ValueError:
page = 1
return page
def get_per_page(args: dict) -> int:
try:
per_page = int(args.get("per_page", 20))
if per_page < 10:
per_page = 10
if per_page > 200:
per_page = 200
except ValueError:
per_page = 20
return per_page
def get_sort_columns(table: Table, args: dict) -> Dict[str, SortColumn]:
"""
Given a dict of args (typically from a request.args object)
Extract the sort_by1+sort_dir1, sort_by2+sort_dir2 values until a pair of values is missing of invalid
sort_byX must be a key/ID of a column in table.columns (case sensitive)
sort_dirX must be one of asc, desc, none
"""
i = 1
sorted_cols = set()
sort = {}
while True:
try:
sort_by = args.get(f"sort_by{i}")
sort_dir = args.get(f"sort_dir{i}").lower()
if (
sort_by not in table.columns.keys()
or sort_dir not in ("asc", "desc", "none")
or sort_by in sorted_cols
):
break
# clicking on a column heading multiple times sorts that column by asc->desc->none->asc etc...
if sort_dir == "none":
break
sorted_cols.add(sort_by)
sort[sort_by] = SortColumn(sort_by=sort_by, sort_dir=sort_dir, sort_order=i)
except Exception:
break
i += 1
return sort
def make_pagination_data(table: Table, data: Dataset, context: TableContext) -> Pagination:
"""
Make a data structure that describes all the data needed to render both the "position" and the "pagination"
sections of a table:
"""
max_page = math.ceil(data.total_rows / context.per_page)
base_params = context.args.copy()
base_params.update(dict(per_page=context.per_page))
if context.search:
base_params["search"] = context.search
if context.sort:
sort_cols = list(context.sort.values())
base_params.update(dict(sort_by1=sort_cols[0].sort_by, sort_dir1=sort_cols[0].sort_dir))
# remove the page param from the base_params as it is set in each url explicitly below
try:
del base_params["page"]
except KeyError:
pass
if context.page > 1:
first_url = url(context.path, page=1, **base_params)
prev_url = url(context.path, page=context.page - 1, **base_params)
else:
first_url = prev_url = None
pages = get_pagination_pages(current_page=context.page, max_page=max_page, delta=2)
page_links = [
PaginationPage(
page_number=page, url=url(context.path, page=page, **base_params), is_current=page == context.page,
)
for page in pages
]
if context.page < max_page:
next_url = url(context.path, page=context.page + 1, **base_params)
last_url = url(context.path, page=max_page, **base_params)
else:
next_url = last_url = None
first_row = 1 + ((context.page - 1) * context.per_page)
last_row = first_row + len(data.rows) - 1
return Pagination(
first_url=first_url,
prev_url=prev_url,
next_url=next_url,
last_url=last_url,
page_links=page_links,
first_row=first_row,
last_row=last_row,
total_rows=data.total_rows,
total_pages=max_page,
)
def get_pagination_pages(current_page: int, max_page: int, delta: int = 2):
left = max(current_page - delta, 1)
right = min(current_page + delta, max_page)
return range(left, right + 1) | /rweb_datatable-0.1.18.tar.gz/rweb_datatable-0.1.18/rweb_datatable/__init__.py | 0.733547 | 0.255646 | __init__.py | pypi |
from dataclasses import dataclass, field
from typing import Union, Callable, Any, Optional, Dict, List
StringOrCallable = Union[None, str, Callable[..., str]]
@dataclass
class Column:
id: str
title: str
is_sortable: bool = field(default=True)
render_header: StringOrCallable = field(default=None)
render_body: StringOrCallable = field(default=None)
render_footer: StringOrCallable = field(default=None)
render_header_config: Any = field(default_factory=dict)
render_body_config: Any = field(default_factory=dict)
render_footer_config: Any = field(default_factory=dict)
@dataclass
class Table:
"""
Definition of a table
"""
id: str
caption: Optional[StringOrCallable] = field(default=None)
columns: Dict[str, Column] = field(default_factory=dict)
table_attributes: Dict[str, str] = field(default_factory=dict)
thead_attributes: Dict[str, str] = field(default_factory=dict)
tbody_attributes: Dict[str, str] = field(default_factory=dict)
extra_classes: str = field(default='')
@dataclass
class PaginationPage:
page_number: int
url: str
is_current: bool
@dataclass
class Pagination:
"""
Describe the pagination structure which will be passed to the pagination renderer
"""
first_url: Optional[str]
prev_url: Optional[str]
next_url: Optional[str]
last_url: Optional[str]
# list of the page numbers and links to show in the central section of the pagination
page_links: List[PaginationPage]
# start row being shown (i.e. Showing X to Y of Z ... this is X)
first_row: int
# slice end row being shown (i.e. Showing X to Y of Z ... this is Y)
last_row: int
# (i.e. Showing X to Y of Z ... this is Z)
total_rows: int
total_pages: int
@dataclass
class Dataset:
rows: List[dict]
total_rows: int
totals: Optional[dict] = field(default=None)
@dataclass
class SortColumn:
sort_by: str
sort_dir: str
sort_order: int = field(default=1)
@dataclass
class TableContext:
# the CSS selector that acts as the htmlx target for all table nav links
# such as pagination, sorting, filtering etc...
hx_target: str
page: int = field(default=1)
per_page: int = field(default=20)
sort: Dict[str, SortColumn] = field(default_factory=list)
# the url path to the page?
path: str = field(default="")
args: dict = field(default_factory=dict)
is_download: bool = field(default=False)
search: Optional[str] = field(default=None)
search_box_attributes: dict = field(default_factory=dict)
# additional args used for formatting
extra_args: dict = field(default_factory=dict) | /rweb_datatable-0.1.18.tar.gz/rweb_datatable-0.1.18/rweb_datatable/models.py | 0.861902 | 0.310812 | models.py | pypi |
from copy import copy
from typing import Union, Callable, Optional
from rweb_datatable.html import Node
from rweb_datatable.models import Column, Table, Dataset, TableContext, Pagination
from rweb_datatable.utils import url, make_table_section_id
def render_table_section(
data: Dataset, table: Table, context: TableContext, pagination: Optional[Pagination] = None, actions: bool=True, search_button: bool = True, download_button: bool = True
) -> Node:
section = Node("section", attributes={"id": make_table_section_id(table)})
if actions:
section += make_actions(data=data, table=table, context=context, search_button=search_button, download_button=download_button)
table_data = Node("div", attributes={"id": f"table-data-{table.id}"})
table_wrapper = Node("div", attributes={"class": "rweb-datatable-wrapper"})
table_data += table_wrapper
table_wrapper += make_table(data=data, table=table, context=context)
if pagination:
table_data += make_pagination(context=context, pagination=pagination)
section += table_data
return section
def make_actions(data: Dataset, table: Table, context: TableContext, search_button: bool = True, download_button: bool = True) -> Node:
"""
Table section that includes filters, download button, column selector eventually
"""
div = Node("div", attributes={"class": "d-flex justify-content-between align-items-center mb-3"})
if download_button:
download_url = url(context.path, download="csv", **context.args)
div.node("div", Node("a", "Download", attributes={"href": download_url, "class": "btn btn-primary "}))
form = div.node(
"form",
attributes={
"method": "GET",
"class": "input-group ml-auto",
"style": "max-width: 15rem",
"hx-get": url(context.path, args=context.args, exclude=["search"]),
"hx-target": context.hx_target,
"hx-select": context.hx_target,
"hx-trigger": "submit,keyup delay:500ms from:#search",
**context.search_box_attributes,
},
)
form.node(
"input",
attributes={
"id": "search",
"type": "search",
"name": "search",
"class": "form-control",
"aria-label": "Search",
"placeholder": "Search...",
"value": context.search or "",
"hx-preserve": "true",
},
)
if search_button:
form.node(
"button",
"""<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="bi bi-search" viewBox="0 0 16 16">
<path d="M11.742 10.344a6.5 6.5 0 1 0-1.397 1.398h-.001c.03.04.062.078.098.115l3.85 3.85a1 1 0 0 0 1.415-1.414l-3.85-3.85a1.007 1.007 0 0 0-.115-.1zM12 6.5a5.5 5.5 0 1 1-11 0 5.5 5.5 0 0 1 11 0z"/>
</svg>""",
attributes={"class": "input-group-text btn btn-primary", "type": "submit"},
)
return div
def make_table(data: Dataset, table: Table, context: TableContext) -> Node:
t = Node(
"table", attributes={"id": f"table-{table.id}", "class": "table table-striped table-hover table-sm " + table.extra_classes}
)
t += make_thead(table=table, data=data, context=context)
t += make_tbody(table=table, data=data, context=context)
if data.totals:
t += make_tfoot(table=table, data=data, context=context)
return t
def make_thead(table: Table, data: Dataset, context: TableContext) -> Node:
thead = Node("thead", attributes={"class": "table-dark"})
tr = thead.node("tr")
for col_id, col in table.columns.items():
th_factory = col.render_header or make_th
tr += th_factory(table=table, data=data, context=context, column=col)
return thead
def make_th(table: Table, data: Dataset, context: TableContext, column: Column) -> Node:
th = Node("th", attributes={"scope": "col", **column.render_header_config.get("attributes", {})})
args = copy(context.args)
target = th
sort_symbol = ""
if column.is_sortable:
current_dir = context.sort[column.id].sort_dir if column.id in context.sort else None
if current_dir == "asc":
sort_dir = "desc"
sort_symbol = "↑"
elif current_dir == "desc":
sort_dir = None
sort_symbol = "↓"
else:
sort_dir = "asc"
if sort_dir:
sort_args = dict(sort_dir1=sort_dir, sort_by1=column.id)
else:
sort_args = {}
if "sort_dir1" in args:
del args["sort_dir1"]
if "sort_by1" in args:
del args["sort_by1"]
sort_url = url(path=context.path, args=args, search=context.search, **sort_args)
a = Node(
"a",
attributes={
"href": sort_url,
"data-hx-get": sort_url,
"data-hx-target": context.hx_target,
"data-hx-select": context.hx_target,
},
)
th += a
target = a
target += column.title.format(**context.args, **context.extra_args)
target += " "
target += Node(
"span",
sort_symbol,
)
return th
def make_tbody(table: Table, data: Dataset, context: TableContext) -> Node:
tbody = Node("tbody")
if data.rows:
for row in data.rows:
tr = tbody.node("tr")
for col_id, col in table.columns.items():
value = row.get(col_id)
rendered_value = render_cell(context=context, row=row, value=value, renderer=col.render_body)
tr.node("td", rendered_value, attributes=col.render_body_config.get("attributes"))
else:
tr = tbody.node("tr")
tr.node("td", "There is no data to display", attributes={"colspan": len(table.columns)})
return tbody
def make_tfoot(table: Table, data: Dataset, context: TableContext) -> Optional[Node]:
if data.totals is None:
return
row = data.totals
tfoot = Node("tfoot", attributes={"class": "table-dark"})
tr = tfoot.node("tr")
for col_id, col in table.columns.items():
value = row.get(col_id)
rendered_value = render_cell(context=context, row=row, value=value, renderer=col.render_footer)
tr.node("th", rendered_value, attributes=col.render_footer_config.get("attributes"))
return tfoot
def render_cell(context: TableContext, row, value: str, renderer: Union[None, str, Callable]):
if not renderer:
renderer = str
try:
try:
return renderer(value, row)
except Exception as e:
return renderer(value)
except Exception as e:
raise ValueError(f"Could not render table cell context={context}, value={value}, renderer={renderer}") from e
def make_pagination(context: TableContext, pagination: Pagination):
target = context.hx_target
p = Node("nav", attributes={"aria-label": "Paginate Table"})
ul = p.node("ul", attributes={"class": "pagination"})
ul += make_pagination_li(text="First", pagination_url=pagination.first_url, target=target, aria_label="Goto First Page")
ul += make_pagination_li(text="Prev", pagination_url=pagination.prev_url, target=target, aria_label="Goto Previous Page")
for page_link in pagination.page_links:
ul += make_pagination_li(
text=str(page_link.page_number),
pagination_url=page_link.url,
target=target,
aria_label=f"Goto Page {page_link.page_number}",
is_current=page_link.is_current,
)
ul += make_pagination_li(text="Next", pagination_url=pagination.next_url, target=target, aria_label="Goto Next Page")
ul += make_pagination_li(text="Last", pagination_url=pagination.last_url, target=target, aria_label="Goto Last Page")
current_position = Node(
"span",
f"Showing {pagination.first_row} - {pagination.last_row} of {pagination.total_rows} entries",
attributes={"class": "pagination-page"},
)
div = Node("div", current_position, p, attributes={"class": "d-flex justify-content-between align-items-center"})
return div
def make_pagination_li(
text: str, pagination_url: str, target: str, aria_label: str, is_current: bool = False, is_disabled: bool = False
) -> Node:
li_attributes = {"class": "page-item"}
a_attributes = {"class": "page-link"}
if is_current:
li_attributes["class"] += " active"
a_attributes["aria-current"] = "page"
if is_disabled or pagination_url is None:
li_attributes["class"] += " disabled"
a_attributes["tabindex"] = -1
a_attributes["aria-disabled"] = True
else:
a_attributes.update(
{
"href": pagination_url,
"data-hx-get": pagination_url,
"data-hx-target": target,
"data-hx-select": target,
"aria-label": aria_label,
}
)
li = Node("li", attributes=li_attributes)
li += Node(
"a",
text,
attributes=a_attributes,
)
return li | /rweb_datatable-0.1.18.tar.gz/rweb_datatable-0.1.18/rweb_datatable/renderers/htmx.py | 0.640861 | 0.267381 | htmx.py | pypi |
import abc
import builtins
import datetime
import enum
import typing
import jsii
import jsii.compat
import publication
from ._jsii import *
import aws_cdk.aws_ec2
import aws_cdk.aws_iam
import aws_cdk.aws_lambda
import aws_cdk.aws_logs
import aws_cdk.aws_sqs
import aws_cdk.core
class GolangFunction(aws_cdk.aws_lambda.Function, metaclass=jsii.JSIIMeta, jsii_type="aws-lambda-golang.GolangFunction"):
"""A Node.js Lambda function bundled using Parcel.
stability
:stability: experimental
"""
def __init__(self, scope: aws_cdk.core.Construct, id: str, *, build_cmd: typing.Optional[str]=None, build_dir: typing.Optional[str]=None, entry: typing.Optional[str]=None, extra_env: typing.Any=None, handler: typing.Optional[str]=None, allow_all_outbound: typing.Optional[bool]=None, current_version_options: typing.Optional[aws_cdk.aws_lambda.VersionOptions]=None, dead_letter_queue: typing.Optional[aws_cdk.aws_sqs.IQueue]=None, dead_letter_queue_enabled: typing.Optional[bool]=None, description: typing.Optional[str]=None, environment: typing.Optional[typing.Mapping[str, str]]=None, events: typing.Optional[typing.List[aws_cdk.aws_lambda.IEventSource]]=None, function_name: typing.Optional[str]=None, initial_policy: typing.Optional[typing.List[aws_cdk.aws_iam.PolicyStatement]]=None, layers: typing.Optional[typing.List[aws_cdk.aws_lambda.ILayerVersion]]=None, log_retention: typing.Optional[aws_cdk.aws_logs.RetentionDays]=None, log_retention_role: typing.Optional[aws_cdk.aws_iam.IRole]=None, memory_size: typing.Optional[jsii.Number]=None, reserved_concurrent_executions: typing.Optional[jsii.Number]=None, role: typing.Optional[aws_cdk.aws_iam.IRole]=None, security_group: typing.Optional[aws_cdk.aws_ec2.ISecurityGroup]=None, security_groups: typing.Optional[typing.List[aws_cdk.aws_ec2.ISecurityGroup]]=None, timeout: typing.Optional[aws_cdk.core.Duration]=None, tracing: typing.Optional[aws_cdk.aws_lambda.Tracing]=None, vpc: typing.Optional[aws_cdk.aws_ec2.IVpc]=None, vpc_subnets: typing.Optional[aws_cdk.aws_ec2.SubnetSelection]=None, max_event_age: typing.Optional[aws_cdk.core.Duration]=None, on_failure: typing.Optional[aws_cdk.aws_lambda.IDestination]=None, on_success: typing.Optional[aws_cdk.aws_lambda.IDestination]=None, retry_attempts: typing.Optional[jsii.Number]=None) -> None:
"""
:param scope: -
:param id: -
:param build_cmd: The build command. Default: - ``go build -ldflags="-s -w"``
:param build_dir: The build directory. Default: - ``.build`` in the entry file directory
:param entry: Path to the entry Golang source file. Default: - Derived from the name of the defining file and the construct's id. If the ``GolangFunction`` is defined in ``stack.ts`` with ``my-handler`` as id (``new GolangFunction(this, 'my-handler')``), the construct will look at ``stack/my-handler/main.go``
:param extra_env: Additional environment variables. Default: - ``{ GOOS: 'linux' }``
:param handler: The name of the exported handler in the entry file. Default: main
:param allow_all_outbound: Whether to allow the Lambda to send all network traffic. If set to false, you must individually add traffic rules to allow the Lambda to connect to network targets. Default: true
:param current_version_options: Options for the ``lambda.Version`` resource automatically created by the ``fn.currentVersion`` method. Default: - default options as described in ``VersionOptions``
:param dead_letter_queue: The SQS queue to use if DLQ is enabled. Default: - SQS queue with 14 day retention period if ``deadLetterQueueEnabled`` is ``true``
:param dead_letter_queue_enabled: Enabled DLQ. If ``deadLetterQueue`` is undefined, an SQS queue with default options will be defined for your Function. Default: - false unless ``deadLetterQueue`` is set, which implies DLQ is enabled.
:param description: A description of the function. Default: - No description.
:param environment: Key-value pairs that Lambda caches and makes available for your Lambda functions. Use environment variables to apply configuration changes, such as test and production environment configurations, without changing your Lambda function source code. Default: - No environment variables.
:param events: Event sources for this function. You can also add event sources using ``addEventSource``. Default: - No event sources.
:param function_name: A name for the function. Default: - AWS CloudFormation generates a unique physical ID and uses that ID for the function's name. For more information, see Name Type.
:param initial_policy: Initial policy statements to add to the created Lambda Role. You can call ``addToRolePolicy`` to the created lambda to add statements post creation. Default: - No policy statements are added to the created Lambda role.
:param layers: A list of layers to add to the function's execution environment. You can configure your Lambda function to pull in additional code during initialization in the form of layers. Layers are packages of libraries or other dependencies that can be used by mulitple functions. Default: - No layers.
:param log_retention: The number of days log events are kept in CloudWatch Logs. When updating this property, unsetting it doesn't remove the log retention policy. To remove the retention policy, set the value to ``INFINITE``. Default: logs.RetentionDays.INFINITE
:param log_retention_role: The IAM role for the Lambda function associated with the custom resource that sets the retention policy. Default: - A new role is created.
:param memory_size: The amount of memory, in MB, that is allocated to your Lambda function. Lambda uses this value to proportionally allocate the amount of CPU power. For more information, see Resource Model in the AWS Lambda Developer Guide. Default: 128
:param reserved_concurrent_executions: The maximum of concurrent executions you want to reserve for the function. Default: - No specific limit - account limit.
:param role: Lambda execution role. This is the role that will be assumed by the function upon execution. It controls the permissions that the function will have. The Role must be assumable by the 'lambda.amazonaws.com' service principal. The default Role automatically has permissions granted for Lambda execution. If you provide a Role, you must add the relevant AWS managed policies yourself. The relevant managed policies are "service-role/AWSLambdaBasicExecutionRole" and "service-role/AWSLambdaVPCAccessExecutionRole". Default: - A unique role will be generated for this lambda function. Both supplied and generated roles can always be changed by calling ``addToRolePolicy``.
:param security_group: What security group to associate with the Lambda's network interfaces. This property is being deprecated, consider using securityGroups instead. Only used if 'vpc' is supplied. Use securityGroups property instead. Function constructor will throw an error if both are specified. Default: - If the function is placed within a VPC and a security group is not specified, either by this or securityGroups prop, a dedicated security group will be created for this function.
:param security_groups: The list of security groups to associate with the Lambda's network interfaces. Only used if 'vpc' is supplied. Default: - If the function is placed within a VPC and a security group is not specified, either by this or securityGroup prop, a dedicated security group will be created for this function.
:param timeout: The function execution time (in seconds) after which Lambda terminates the function. Because the execution time affects cost, set this value based on the function's expected execution time. Default: Duration.seconds(3)
:param tracing: Enable AWS X-Ray Tracing for Lambda Function. Default: Tracing.Disabled
:param vpc: VPC network to place Lambda network interfaces. Specify this if the Lambda function needs to access resources in a VPC. Default: - Function is not placed within a VPC.
:param vpc_subnets: Where to place the network interfaces within the VPC. Only used if 'vpc' is supplied. Note: internet access for Lambdas requires a NAT gateway, so picking Public subnets is not allowed. Default: - the Vpc default strategy if not specified
:param max_event_age: The maximum age of a request that Lambda sends to a function for processing. Minimum: 60 seconds Maximum: 6 hours Default: Duration.hours(6)
:param on_failure: The destination for failed invocations. Default: - no destination
:param on_success: The destination for successful invocations. Default: - no destination
:param retry_attempts: The maximum number of times to retry when the function returns an error. Minimum: 0 Maximum: 2 Default: 2
stability
:stability: experimental
"""
props = GolangFunctionProps(build_cmd=build_cmd, build_dir=build_dir, entry=entry, extra_env=extra_env, handler=handler, allow_all_outbound=allow_all_outbound, current_version_options=current_version_options, dead_letter_queue=dead_letter_queue, dead_letter_queue_enabled=dead_letter_queue_enabled, description=description, environment=environment, events=events, function_name=function_name, initial_policy=initial_policy, layers=layers, log_retention=log_retention, log_retention_role=log_retention_role, memory_size=memory_size, reserved_concurrent_executions=reserved_concurrent_executions, role=role, security_group=security_group, security_groups=security_groups, timeout=timeout, tracing=tracing, vpc=vpc, vpc_subnets=vpc_subnets, max_event_age=max_event_age, on_failure=on_failure, on_success=on_success, retry_attempts=retry_attempts)
jsii.create(GolangFunction, self, [scope, id, props])
@jsii.data_type(jsii_type="aws-lambda-golang.GolangFunctionProps", jsii_struct_bases=[aws_cdk.aws_lambda.FunctionOptions], name_mapping={'max_event_age': 'maxEventAge', 'on_failure': 'onFailure', 'on_success': 'onSuccess', 'retry_attempts': 'retryAttempts', 'allow_all_outbound': 'allowAllOutbound', 'current_version_options': 'currentVersionOptions', 'dead_letter_queue': 'deadLetterQueue', 'dead_letter_queue_enabled': 'deadLetterQueueEnabled', 'description': 'description', 'environment': 'environment', 'events': 'events', 'function_name': 'functionName', 'initial_policy': 'initialPolicy', 'layers': 'layers', 'log_retention': 'logRetention', 'log_retention_role': 'logRetentionRole', 'memory_size': 'memorySize', 'reserved_concurrent_executions': 'reservedConcurrentExecutions', 'role': 'role', 'security_group': 'securityGroup', 'security_groups': 'securityGroups', 'timeout': 'timeout', 'tracing': 'tracing', 'vpc': 'vpc', 'vpc_subnets': 'vpcSubnets', 'build_cmd': 'buildCmd', 'build_dir': 'buildDir', 'entry': 'entry', 'extra_env': 'extraEnv', 'handler': 'handler'})
class GolangFunctionProps(aws_cdk.aws_lambda.FunctionOptions):
def __init__(self, *, max_event_age: typing.Optional[aws_cdk.core.Duration]=None, on_failure: typing.Optional[aws_cdk.aws_lambda.IDestination]=None, on_success: typing.Optional[aws_cdk.aws_lambda.IDestination]=None, retry_attempts: typing.Optional[jsii.Number]=None, allow_all_outbound: typing.Optional[bool]=None, current_version_options: typing.Optional[aws_cdk.aws_lambda.VersionOptions]=None, dead_letter_queue: typing.Optional[aws_cdk.aws_sqs.IQueue]=None, dead_letter_queue_enabled: typing.Optional[bool]=None, description: typing.Optional[str]=None, environment: typing.Optional[typing.Mapping[str, str]]=None, events: typing.Optional[typing.List[aws_cdk.aws_lambda.IEventSource]]=None, function_name: typing.Optional[str]=None, initial_policy: typing.Optional[typing.List[aws_cdk.aws_iam.PolicyStatement]]=None, layers: typing.Optional[typing.List[aws_cdk.aws_lambda.ILayerVersion]]=None, log_retention: typing.Optional[aws_cdk.aws_logs.RetentionDays]=None, log_retention_role: typing.Optional[aws_cdk.aws_iam.IRole]=None, memory_size: typing.Optional[jsii.Number]=None, reserved_concurrent_executions: typing.Optional[jsii.Number]=None, role: typing.Optional[aws_cdk.aws_iam.IRole]=None, security_group: typing.Optional[aws_cdk.aws_ec2.ISecurityGroup]=None, security_groups: typing.Optional[typing.List[aws_cdk.aws_ec2.ISecurityGroup]]=None, timeout: typing.Optional[aws_cdk.core.Duration]=None, tracing: typing.Optional[aws_cdk.aws_lambda.Tracing]=None, vpc: typing.Optional[aws_cdk.aws_ec2.IVpc]=None, vpc_subnets: typing.Optional[aws_cdk.aws_ec2.SubnetSelection]=None, build_cmd: typing.Optional[str]=None, build_dir: typing.Optional[str]=None, entry: typing.Optional[str]=None, extra_env: typing.Any=None, handler: typing.Optional[str]=None) -> None:
"""Properties for a GolangFunction.
:param max_event_age: The maximum age of a request that Lambda sends to a function for processing. Minimum: 60 seconds Maximum: 6 hours Default: Duration.hours(6)
:param on_failure: The destination for failed invocations. Default: - no destination
:param on_success: The destination for successful invocations. Default: - no destination
:param retry_attempts: The maximum number of times to retry when the function returns an error. Minimum: 0 Maximum: 2 Default: 2
:param allow_all_outbound: Whether to allow the Lambda to send all network traffic. If set to false, you must individually add traffic rules to allow the Lambda to connect to network targets. Default: true
:param current_version_options: Options for the ``lambda.Version`` resource automatically created by the ``fn.currentVersion`` method. Default: - default options as described in ``VersionOptions``
:param dead_letter_queue: The SQS queue to use if DLQ is enabled. Default: - SQS queue with 14 day retention period if ``deadLetterQueueEnabled`` is ``true``
:param dead_letter_queue_enabled: Enabled DLQ. If ``deadLetterQueue`` is undefined, an SQS queue with default options will be defined for your Function. Default: - false unless ``deadLetterQueue`` is set, which implies DLQ is enabled.
:param description: A description of the function. Default: - No description.
:param environment: Key-value pairs that Lambda caches and makes available for your Lambda functions. Use environment variables to apply configuration changes, such as test and production environment configurations, without changing your Lambda function source code. Default: - No environment variables.
:param events: Event sources for this function. You can also add event sources using ``addEventSource``. Default: - No event sources.
:param function_name: A name for the function. Default: - AWS CloudFormation generates a unique physical ID and uses that ID for the function's name. For more information, see Name Type.
:param initial_policy: Initial policy statements to add to the created Lambda Role. You can call ``addToRolePolicy`` to the created lambda to add statements post creation. Default: - No policy statements are added to the created Lambda role.
:param layers: A list of layers to add to the function's execution environment. You can configure your Lambda function to pull in additional code during initialization in the form of layers. Layers are packages of libraries or other dependencies that can be used by mulitple functions. Default: - No layers.
:param log_retention: The number of days log events are kept in CloudWatch Logs. When updating this property, unsetting it doesn't remove the log retention policy. To remove the retention policy, set the value to ``INFINITE``. Default: logs.RetentionDays.INFINITE
:param log_retention_role: The IAM role for the Lambda function associated with the custom resource that sets the retention policy. Default: - A new role is created.
:param memory_size: The amount of memory, in MB, that is allocated to your Lambda function. Lambda uses this value to proportionally allocate the amount of CPU power. For more information, see Resource Model in the AWS Lambda Developer Guide. Default: 128
:param reserved_concurrent_executions: The maximum of concurrent executions you want to reserve for the function. Default: - No specific limit - account limit.
:param role: Lambda execution role. This is the role that will be assumed by the function upon execution. It controls the permissions that the function will have. The Role must be assumable by the 'lambda.amazonaws.com' service principal. The default Role automatically has permissions granted for Lambda execution. If you provide a Role, you must add the relevant AWS managed policies yourself. The relevant managed policies are "service-role/AWSLambdaBasicExecutionRole" and "service-role/AWSLambdaVPCAccessExecutionRole". Default: - A unique role will be generated for this lambda function. Both supplied and generated roles can always be changed by calling ``addToRolePolicy``.
:param security_group: What security group to associate with the Lambda's network interfaces. This property is being deprecated, consider using securityGroups instead. Only used if 'vpc' is supplied. Use securityGroups property instead. Function constructor will throw an error if both are specified. Default: - If the function is placed within a VPC and a security group is not specified, either by this or securityGroups prop, a dedicated security group will be created for this function.
:param security_groups: The list of security groups to associate with the Lambda's network interfaces. Only used if 'vpc' is supplied. Default: - If the function is placed within a VPC and a security group is not specified, either by this or securityGroup prop, a dedicated security group will be created for this function.
:param timeout: The function execution time (in seconds) after which Lambda terminates the function. Because the execution time affects cost, set this value based on the function's expected execution time. Default: Duration.seconds(3)
:param tracing: Enable AWS X-Ray Tracing for Lambda Function. Default: Tracing.Disabled
:param vpc: VPC network to place Lambda network interfaces. Specify this if the Lambda function needs to access resources in a VPC. Default: - Function is not placed within a VPC.
:param vpc_subnets: Where to place the network interfaces within the VPC. Only used if 'vpc' is supplied. Note: internet access for Lambdas requires a NAT gateway, so picking Public subnets is not allowed. Default: - the Vpc default strategy if not specified
:param build_cmd: The build command. Default: - ``go build -ldflags="-s -w"``
:param build_dir: The build directory. Default: - ``.build`` in the entry file directory
:param entry: Path to the entry Golang source file. Default: - Derived from the name of the defining file and the construct's id. If the ``GolangFunction`` is defined in ``stack.ts`` with ``my-handler`` as id (``new GolangFunction(this, 'my-handler')``), the construct will look at ``stack/my-handler/main.go``
:param extra_env: Additional environment variables. Default: - ``{ GOOS: 'linux' }``
:param handler: The name of the exported handler in the entry file. Default: main
stability
:stability: experimental
"""
if isinstance(current_version_options, dict): current_version_options = aws_cdk.aws_lambda.VersionOptions(**current_version_options)
if isinstance(vpc_subnets, dict): vpc_subnets = aws_cdk.aws_ec2.SubnetSelection(**vpc_subnets)
self._values = {
}
if max_event_age is not None: self._values["max_event_age"] = max_event_age
if on_failure is not None: self._values["on_failure"] = on_failure
if on_success is not None: self._values["on_success"] = on_success
if retry_attempts is not None: self._values["retry_attempts"] = retry_attempts
if allow_all_outbound is not None: self._values["allow_all_outbound"] = allow_all_outbound
if current_version_options is not None: self._values["current_version_options"] = current_version_options
if dead_letter_queue is not None: self._values["dead_letter_queue"] = dead_letter_queue
if dead_letter_queue_enabled is not None: self._values["dead_letter_queue_enabled"] = dead_letter_queue_enabled
if description is not None: self._values["description"] = description
if environment is not None: self._values["environment"] = environment
if events is not None: self._values["events"] = events
if function_name is not None: self._values["function_name"] = function_name
if initial_policy is not None: self._values["initial_policy"] = initial_policy
if layers is not None: self._values["layers"] = layers
if log_retention is not None: self._values["log_retention"] = log_retention
if log_retention_role is not None: self._values["log_retention_role"] = log_retention_role
if memory_size is not None: self._values["memory_size"] = memory_size
if reserved_concurrent_executions is not None: self._values["reserved_concurrent_executions"] = reserved_concurrent_executions
if role is not None: self._values["role"] = role
if security_group is not None: self._values["security_group"] = security_group
if security_groups is not None: self._values["security_groups"] = security_groups
if timeout is not None: self._values["timeout"] = timeout
if tracing is not None: self._values["tracing"] = tracing
if vpc is not None: self._values["vpc"] = vpc
if vpc_subnets is not None: self._values["vpc_subnets"] = vpc_subnets
if build_cmd is not None: self._values["build_cmd"] = build_cmd
if build_dir is not None: self._values["build_dir"] = build_dir
if entry is not None: self._values["entry"] = entry
if extra_env is not None: self._values["extra_env"] = extra_env
if handler is not None: self._values["handler"] = handler
@builtins.property
def max_event_age(self) -> typing.Optional[aws_cdk.core.Duration]:
"""The maximum age of a request that Lambda sends to a function for processing.
Minimum: 60 seconds
Maximum: 6 hours
default
:default: Duration.hours(6)
"""
return self._values.get('max_event_age')
@builtins.property
def on_failure(self) -> typing.Optional[aws_cdk.aws_lambda.IDestination]:
"""The destination for failed invocations.
default
:default: - no destination
"""
return self._values.get('on_failure')
@builtins.property
def on_success(self) -> typing.Optional[aws_cdk.aws_lambda.IDestination]:
"""The destination for successful invocations.
default
:default: - no destination
"""
return self._values.get('on_success')
@builtins.property
def retry_attempts(self) -> typing.Optional[jsii.Number]:
"""The maximum number of times to retry when the function returns an error.
Minimum: 0
Maximum: 2
default
:default: 2
"""
return self._values.get('retry_attempts')
@builtins.property
def allow_all_outbound(self) -> typing.Optional[bool]:
"""Whether to allow the Lambda to send all network traffic.
If set to false, you must individually add traffic rules to allow the
Lambda to connect to network targets.
default
:default: true
"""
return self._values.get('allow_all_outbound')
@builtins.property
def current_version_options(self) -> typing.Optional[aws_cdk.aws_lambda.VersionOptions]:
"""Options for the ``lambda.Version`` resource automatically created by the ``fn.currentVersion`` method.
default
:default: - default options as described in ``VersionOptions``
"""
return self._values.get('current_version_options')
@builtins.property
def dead_letter_queue(self) -> typing.Optional[aws_cdk.aws_sqs.IQueue]:
"""The SQS queue to use if DLQ is enabled.
default
:default: - SQS queue with 14 day retention period if ``deadLetterQueueEnabled`` is ``true``
"""
return self._values.get('dead_letter_queue')
@builtins.property
def dead_letter_queue_enabled(self) -> typing.Optional[bool]:
"""Enabled DLQ.
If ``deadLetterQueue`` is undefined,
an SQS queue with default options will be defined for your Function.
default
:default: - false unless ``deadLetterQueue`` is set, which implies DLQ is enabled.
"""
return self._values.get('dead_letter_queue_enabled')
@builtins.property
def description(self) -> typing.Optional[str]:
"""A description of the function.
default
:default: - No description.
"""
return self._values.get('description')
@builtins.property
def environment(self) -> typing.Optional[typing.Mapping[str, str]]:
"""Key-value pairs that Lambda caches and makes available for your Lambda functions.
Use environment variables to apply configuration changes, such
as test and production environment configurations, without changing your
Lambda function source code.
default
:default: - No environment variables.
"""
return self._values.get('environment')
@builtins.property
def events(self) -> typing.Optional[typing.List[aws_cdk.aws_lambda.IEventSource]]:
"""Event sources for this function.
You can also add event sources using ``addEventSource``.
default
:default: - No event sources.
"""
return self._values.get('events')
@builtins.property
def function_name(self) -> typing.Optional[str]:
"""A name for the function.
default
:default:
- AWS CloudFormation generates a unique physical ID and uses that
ID for the function's name. For more information, see Name Type.
"""
return self._values.get('function_name')
@builtins.property
def initial_policy(self) -> typing.Optional[typing.List[aws_cdk.aws_iam.PolicyStatement]]:
"""Initial policy statements to add to the created Lambda Role.
You can call ``addToRolePolicy`` to the created lambda to add statements post creation.
default
:default: - No policy statements are added to the created Lambda role.
"""
return self._values.get('initial_policy')
@builtins.property
def layers(self) -> typing.Optional[typing.List[aws_cdk.aws_lambda.ILayerVersion]]:
"""A list of layers to add to the function's execution environment.
You can configure your Lambda function to pull in
additional code during initialization in the form of layers. Layers are packages of libraries or other dependencies
that can be used by mulitple functions.
default
:default: - No layers.
"""
return self._values.get('layers')
@builtins.property
def log_retention(self) -> typing.Optional[aws_cdk.aws_logs.RetentionDays]:
"""The number of days log events are kept in CloudWatch Logs.
When updating
this property, unsetting it doesn't remove the log retention policy. To
remove the retention policy, set the value to ``INFINITE``.
default
:default: logs.RetentionDays.INFINITE
"""
return self._values.get('log_retention')
@builtins.property
def log_retention_role(self) -> typing.Optional[aws_cdk.aws_iam.IRole]:
"""The IAM role for the Lambda function associated with the custom resource that sets the retention policy.
default
:default: - A new role is created.
"""
return self._values.get('log_retention_role')
@builtins.property
def memory_size(self) -> typing.Optional[jsii.Number]:
"""The amount of memory, in MB, that is allocated to your Lambda function.
Lambda uses this value to proportionally allocate the amount of CPU
power. For more information, see Resource Model in the AWS Lambda
Developer Guide.
default
:default: 128
"""
return self._values.get('memory_size')
@builtins.property
def reserved_concurrent_executions(self) -> typing.Optional[jsii.Number]:
"""The maximum of concurrent executions you want to reserve for the function.
default
:default: - No specific limit - account limit.
see
:see: https://docs.aws.amazon.com/lambda/latest/dg/concurrent-executions.html
"""
return self._values.get('reserved_concurrent_executions')
@builtins.property
def role(self) -> typing.Optional[aws_cdk.aws_iam.IRole]:
"""Lambda execution role.
This is the role that will be assumed by the function upon execution.
It controls the permissions that the function will have. The Role must
be assumable by the 'lambda.amazonaws.com' service principal.
The default Role automatically has permissions granted for Lambda execution. If you
provide a Role, you must add the relevant AWS managed policies yourself.
The relevant managed policies are "service-role/AWSLambdaBasicExecutionRole" and
"service-role/AWSLambdaVPCAccessExecutionRole".
default
:default:
- A unique role will be generated for this lambda function.
Both supplied and generated roles can always be changed by calling ``addToRolePolicy``.
"""
return self._values.get('role')
@builtins.property
def security_group(self) -> typing.Optional[aws_cdk.aws_ec2.ISecurityGroup]:
"""What security group to associate with the Lambda's network interfaces. This property is being deprecated, consider using securityGroups instead.
Only used if 'vpc' is supplied.
Use securityGroups property instead.
Function constructor will throw an error if both are specified.
default
:default:
- If the function is placed within a VPC and a security group is
not specified, either by this or securityGroups prop, a dedicated security
group will be created for this function.
deprecated
:deprecated: - This property is deprecated, use securityGroups instead
stability
:stability: deprecated
"""
return self._values.get('security_group')
@builtins.property
def security_groups(self) -> typing.Optional[typing.List[aws_cdk.aws_ec2.ISecurityGroup]]:
"""The list of security groups to associate with the Lambda's network interfaces.
Only used if 'vpc' is supplied.
default
:default:
- If the function is placed within a VPC and a security group is
not specified, either by this or securityGroup prop, a dedicated security
group will be created for this function.
"""
return self._values.get('security_groups')
@builtins.property
def timeout(self) -> typing.Optional[aws_cdk.core.Duration]:
"""The function execution time (in seconds) after which Lambda terminates the function.
Because the execution time affects cost, set this value
based on the function's expected execution time.
default
:default: Duration.seconds(3)
"""
return self._values.get('timeout')
@builtins.property
def tracing(self) -> typing.Optional[aws_cdk.aws_lambda.Tracing]:
"""Enable AWS X-Ray Tracing for Lambda Function.
default
:default: Tracing.Disabled
"""
return self._values.get('tracing')
@builtins.property
def vpc(self) -> typing.Optional[aws_cdk.aws_ec2.IVpc]:
"""VPC network to place Lambda network interfaces.
Specify this if the Lambda function needs to access resources in a VPC.
default
:default: - Function is not placed within a VPC.
"""
return self._values.get('vpc')
@builtins.property
def vpc_subnets(self) -> typing.Optional[aws_cdk.aws_ec2.SubnetSelection]:
"""Where to place the network interfaces within the VPC.
Only used if 'vpc' is supplied. Note: internet access for Lambdas
requires a NAT gateway, so picking Public subnets is not allowed.
default
:default: - the Vpc default strategy if not specified
"""
return self._values.get('vpc_subnets')
@builtins.property
def build_cmd(self) -> typing.Optional[str]:
"""The build command.
default
:default: - ``go build -ldflags="-s -w"``
stability
:stability: experimental
"""
return self._values.get('build_cmd')
@builtins.property
def build_dir(self) -> typing.Optional[str]:
"""The build directory.
default
:default: - ``.build`` in the entry file directory
stability
:stability: experimental
"""
return self._values.get('build_dir')
@builtins.property
def entry(self) -> typing.Optional[str]:
"""Path to the entry Golang source file.
default
:default:
- Derived from the name of the defining file and the construct's id.
If the ``GolangFunction`` is defined in ``stack.ts`` with ``my-handler`` as id
(``new GolangFunction(this, 'my-handler')``), the construct will look at ``stack/my-handler/main.go``
stability
:stability: experimental
"""
return self._values.get('entry')
@builtins.property
def extra_env(self) -> typing.Any:
"""Additional environment variables.
default
:default: - ``{ GOOS: 'linux' }``
stability
:stability: experimental
"""
return self._values.get('extra_env')
@builtins.property
def handler(self) -> typing.Optional[str]:
"""The name of the exported handler in the entry file.
default
:default: main
stability
:stability: experimental
"""
return self._values.get('handler')
def __eq__(self, rhs) -> bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs) -> bool:
return not (rhs == self)
def __repr__(self) -> str:
return 'GolangFunctionProps(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items())
__all__ = [
"GolangFunction",
"GolangFunctionProps",
]
publication.publish() | /rwilinski.aws-lambda-golang-0.1.1.tar.gz/rwilinski.aws-lambda-golang-0.1.1/src/rwilinski/aws-lambda-golang/__init__.py | 0.622459 | 0.18352 | __init__.py | pypi |
# rwkv.cpp
This is a port of [BlinkDL/RWKV-LM](https://github.com/BlinkDL/RWKV-LM) to [ggerganov/ggml](https://github.com/ggerganov/ggml).
Besides the usual **FP32**, it supports **FP16** and **quantized INT4** inference on CPU. This project is **CPU only**.
RWKV is a novel large language model architecture, [with the largest model in the family having 14B parameters](https://huggingface.co/BlinkDL/rwkv-4-pile-14b). In contrast to Transformer with `O(n^2)` attention, RWKV requires only state from previous step to calculate logits. This makes RWKV very CPU-friendly on large context lenghts.
This project provides [a C library rwkv.h](rwkv.h) and [a convinient Python wrapper](rwkv%2Frwkv_cpp_model.py) for it.
**TODO (contributions welcome!)**:
1. Optimize AVX2 implementation of `Q4_1_O` matmul — currently, it is as slow as `FP32`
2. Measure latency and perplexity of different model sizes (169M to 14B) and data types (`FP32`, `FP16`, `Q4_0`, `Q4_1`, `Q4_1_O`)
3. Test on Linux (including Colab) and MacOS
4. Make required memory calculation more robust (see [#4](https://github.com/saharNooby/rwkv.cpp/issues/4))
## How to use
### 1. Clone the repo
**Requirements**: [git](https://gitforwindows.org/).
```commandline
git clone https://github.com/saharNooby/rwkv.cpp.git
cd rwkv.cpp
```
### 2. Get the rwkv.cpp library
#### Option 2.1. Download a pre-compiled library
##### Windows / Linux / MacOS
Check out [Releases](https://github.com/saharNooby/rwkv.cpp/releases), download appropriate ZIP for your OS and CPU, extract `rwkv` library file into the repository directory.
On Windows: to check whether your CPU supports AVX2 or AVX-512, [use CPU-Z](https://www.cpuid.com/softwares/cpu-z.html).
#### Option 2.2. Build the library yourself
##### Windows
**Requirements**: [CMake](https://cmake.org/download/) or [CMake from anaconda](https://anaconda.org/conda-forge/cmake), MSVC compiler.
```commandline
cmake -DBUILD_SHARED_LIBS=ON .
cmake --build . --config Release
```
If everything went OK, `bin\Release\rwkv.dll` file should appear.
##### Linux / MacOS
**Requirements**: CMake (Linux: `sudo apt install cmake`, MacOS: `brew install cmake`, anaconoda: [cmake package](https://anaconda.org/conda-forge/cmake)).
```commandline
cmake -DBUILD_SHARED_LIBS=ON .
cmake --build . --config Release
```
**Anaconda & M1 users**: please verify that `CMAKE_SYSTEM_PROCESSOR: arm64` after running `cmake -DBUILD_SHARED_LIBS=ON .` — if it detects `x86_64`, edit the `CMakeLists.txt` file under the `# Compile flags` to add `set(CMAKE_SYSTEM_PROCESSOR "arm64")`.
If everything went OK, `librwkv.so` (Linux) or `librwkv.dylib` (MacOS) file should appear in the base repo folder.
### 3. Download an RWKV model from [Hugging Face](https://huggingface.co/BlinkDL) like [this one](https://huggingface.co/BlinkDL/rwkv-4-pile-169m/blob/main/RWKV-4-Pile-169M-20220807-8023.pth) and convert it into `ggml` format
**Requirements**: Python 3.x with [PyTorch](https://pytorch.org/get-started/locally/).
```commandline
# Windows
python rwkv\convert_pytorch_to_ggml.py C:\RWKV-4-Pile-169M-20220807-8023.pth C:\rwkv.cpp-169M.bin float16
# Linux / MacOS
python rwkv/convert_pytorch_to_ggml.py ~/Downloads/RWKV-4-Pile-169M-20220807-8023.pth ~/Downloads/rwkv.cpp-169M.bin float16
```
#### 3.1. Optionally, quantize the model
To convert the model into INT4 quantized format, run:
```commandline
# Windows
python rwkv\quantize.py C:\rwkv.cpp-169M.bin C:\rwkv.cpp-169M-Q4_1_O.bin 4
# Linux / MacOS
python rwkv/quantize.py ~/Downloads/rwkv.cpp-169M.bin ~/Downloads/rwkv.cpp-169M-Q4_1_O.bin 4
```
Formats available:
- `4`: `Q4_1_O`, best quality, very slow (as `FP32`).
- `3`: `Q4_1`, poor quality, very fast (as `FP16`).
- `2`: `Q4_0`, worst quality, breaks larger models, moderately fast (between `FP16` and `FP32`).
### 4. Run the model
**Requirements**: Python 3.x with [PyTorch](https://pytorch.org/get-started/locally/) and [tokenizers](https://pypi.org/project/tokenizers/).
**Note**: change the model path with the non-quantized model for the full weights model.
To generate some text, run:
```commandline
# Windows
python rwkv\generate_completions.py C:\rwkv.cpp-169M-Q4_1_O.bin
# Linux / MacOS
python rwkv/generate_completions.py ~/Downloads/rwkv.cpp-169M-Q4_1_O.bin
```
To chat with a bot, run:
```commandline
# Windows
python rwkv\chat_with_bot.py C:\rwkv.cpp-169M-Q4_1_O.bin
# Linux / MacOS
python rwkv/chat_with_bot.py ~/Downloads/rwkv.cpp-169M-Q4_1_O.bin
```
Edit [generate_completions.py](rwkv%2Fgenerate_completions.py) or [chat_with_bot.py](rwkv%2Fchat_with_bot.py) to change prompts and sampling settings.
---
Example of using `rwkv.cpp` in your custom Python script:
```python
import rwkv_cpp_model
import rwkv_cpp_shared_library
# Change to model paths used above (quantized or full weights)
model_path = r'C:\rwkv.cpp-169M.bin'
model = rwkv_cpp_model.RWKVModel(
rwkv_cpp_shared_library.load_rwkv_shared_library(),
model_path
)
logits, state = None, None
for token in [1, 2, 3]:
logits, state = model.eval(token, state)
print(f'Output logits: {logits}')
# Don't forget to free the memory after you've done working with the model
model.free()
```
| /rwkv_cpp_python-0.0.1.tar.gz/rwkv_cpp_python-0.0.1/README.md | 0.414188 | 0.940134 | README.md | pypi |
import argparse
import os
import pathlib
import time
import sampling
import tokenizers
import rwkv_cpp_model
import rwkv_cpp_shared_library
# ======================================== Script settings ========================================
prompt: str = """# rwkv.cpp
This is a port of [BlinkDL/RWKV-LM](https://github.com/BlinkDL/RWKV-LM) to [ggerganov/ggml](https://github.com/ggerganov/ggml).
Besides usual **FP32**, it supports **FP16** and **quantized INT4** inference on CPU. This project is **CPU only**."""
# How many completions to generate.
generation_count: int = 3
# Token count per single completion.
tokens_per_generation: int = 100
# Sampling settings.
temperature: float = 0.8
top_p: float = 0.5
# =================================================================================================
parser = argparse.ArgumentParser(description='Generate completions from RWKV model based on a prompt')
parser.add_argument('model_path', help='Path to RWKV model in ggml format')
args = parser.parse_args()
assert prompt != '', 'Prompt must not be empty'
print('Loading 20B tokenizer')
tokenizer_path = pathlib.Path(os.path.abspath(__file__)).parent / '20B_tokenizer.json'
tokenizer = tokenizers.Tokenizer.from_file(str(tokenizer_path))
library = rwkv_cpp_shared_library.load_rwkv_shared_library()
print(f'System info: {library.rwkv_get_system_info_string()}')
print('Loading RWKV model')
model = rwkv_cpp_model.RWKVModel(library, args.model_path)
prompt_tokens = tokenizer.encode(prompt).ids
prompt_token_count = len(prompt_tokens)
print(f'{prompt_token_count} tokens in prompt')
init_logits, init_state = None, None
for token in prompt_tokens:
init_logits, init_state = model.eval(token, init_state, init_state, init_logits)
for GENERATION in range(generation_count):
print(f'\n--- Generation {GENERATION} ---\n')
print(prompt, end='[')
start = time.time()
logits, state = init_logits.clone(), init_state.clone()
for i in range(tokens_per_generation):
token = sampling.sample_logits(logits, temperature, top_p)
print(tokenizer.decode([token]), end='')
logits, state = model.eval(token, state, state, logits)
delay = time.time() - start
print(']\n\nTook %.3f sec, %d ms per token' % (delay, delay / tokens_per_generation * 1000)) | /rwkv_cpp_python-0.0.1.tar.gz/rwkv_cpp_python-0.0.1/rwkv/generate_completions.py | 0.558809 | 0.403097 | generate_completions.py | pypi |
import os
import sys
import argparse
import pathlib
import sampling
import tokenizers
import rwkv_cpp_model
import rwkv_cpp_shared_library
# ======================================== Script settings ========================================
# Copied from https://github.com/ggerganov/llama.cpp/blob/6e7801d08d81c931a5427bae46f00763e993f54a/prompts/chat-with-bob.txt
prompt: str = """Transcript of a dialog, where the User interacts with an Assistant named Bob. Bob is helpful, kind, honest, good at writing, and never fails to answer the User's requests immediately and with precision.
User: Hello, Bob.
Bob: Hello. How may I help you today?
User: Please tell me the largest city in Europe.
Bob: Sure. The largest city in Europe is Moscow, the capital of Russia."""
# No trailing space here!
bot_message_prefix: str = 'Bob:'
user_message_prefix: str = 'User:'
max_tokens_per_generation: int = 100
# Sampling settings.
temperature: float = 0.8
top_p: float = 0.5
# =================================================================================================
parser = argparse.ArgumentParser(description='Provide terminal-based chat interface for RWKV model')
parser.add_argument('model_path', help='Path to RWKV model in ggml format')
args = parser.parse_args()
assert prompt != '', 'Prompt must not be empty'
print('Loading 20B tokenizer')
tokenizer_path = pathlib.Path(os.path.abspath(__file__)).parent / '20B_tokenizer.json'
tokenizer = tokenizers.Tokenizer.from_file(str(tokenizer_path))
library = rwkv_cpp_shared_library.load_rwkv_shared_library()
print(f'System info: {library.rwkv_get_system_info_string()}')
print('Loading RWKV model')
model = rwkv_cpp_model.RWKVModel(library, args.model_path)
prompt_tokens = tokenizer.encode(prompt).ids
prompt_token_count = len(prompt_tokens)
print(f'Processing {prompt_token_count} prompt tokens, may take a while')
logits, state = None, None
for token in prompt_tokens:
logits, state = model.eval(token, state, state, logits)
print('\nChat initialized! Write something and press Enter.')
while True:
# Read user input
print('> ', end='')
user_input = sys.stdin.readline()
# Process the input
new_tokens = tokenizer.encode('\n' + user_message_prefix + ' ' + user_input + '\n' + bot_message_prefix).ids
for token in new_tokens:
logits, state = model.eval(token, state, state, logits)
# Generate and print bot response
print(bot_message_prefix, end='')
decoded = ''
for i in range(max_tokens_per_generation):
token = sampling.sample_logits(logits, temperature, top_p)
decoded = tokenizer.decode([token])
print(decoded, end='', flush=True)
if '\n' in decoded:
break
logits, state = model.eval(token, state, state, logits)
if '\n' not in decoded:
print() | /rwkv_cpp_python-0.0.1.tar.gz/rwkv_cpp_python-0.0.1/rwkv/chat_with_bot.py | 0.467575 | 0.267193 | chat_with_bot.py | pypi |
import os
import time
import pathlib
import argparse
import tokenizers
import torch
import rwkv_cpp_model
import rwkv_cpp_shared_library
from typing import List
def parse_args():
parser = argparse.ArgumentParser(description='Measure perplexity and per-token latency of an RWKV model on a given text file')
parser.add_argument('model_path', help='Path to model checkpoint file')
parser.add_argument('text_path', help='Path to text file in UTF-8 encoding')
parser.add_argument('ignore_first_n_tokens', help='How many tokens should be skipped before loss is measured', type=int, default=1024)
return parser.parse_args()
args = parse_args()
# ---
print('Loading 20B tokenizer')
tokenizer_path: pathlib.Path = pathlib.Path(os.path.abspath(__file__)).parent / '20B_tokenizer.json'
tokenizer: tokenizers.Tokenizer = tokenizers.Tokenizer.from_file(str(tokenizer_path))
print('Loading text')
text: str = open(args.text_path, encoding='utf-8').read()
tokens: List[int] = tokenizer.encode(text).ids
token_count: int = len(tokens)
print(f'{token_count} tokens in the text')
assert token_count - args.ignore_first_n_tokens > 1, 'Need at least 2 tokens for evaluation'
# ---
def format_loss(loss: torch.Tensor) -> str:
return str(['%.3f' % (loss[i].item(),) for i in range(len(loss))]).replace('\'', '')[1:-1]
def format_loss_with_perplexity(loss: torch.Tensor) -> str:
return f'loss [{format_loss(loss)}], perplexity {"%.3f" % (torch.exp(loss[0]).item(),)}'
# ---
model: rwkv_cpp_model.RWKVModel = rwkv_cpp_model.RWKVModel(
rwkv_cpp_shared_library.load_rwkv_shared_library(),
args.model_path
)
logits, state = None, None
loss_sum: torch.Tensor = torch.tensor([0.0])
loss_count: int = 0
start: float = time.time()
run_count: int = token_count - 1
for i in range(run_count):
token: int = tokens[i]
target: int = tokens[i + 1]
logits, state = model.eval(token, state, state, logits)
if args.ignore_first_n_tokens == 0 or i + 1 >= args.ignore_first_n_tokens:
losses = torch.tensor([
torch.nn.functional.cross_entropy(logits, torch.tensor(target, dtype=torch.long), reduction='none').item()
])
loss_sum += losses
loss_count += 1
if i % 10 == 0:
avg_loss_so_far = loss_sum / loss_count
duration: float = time.time() - start
duration_per_token: float = duration / (i + 1)
runs_remaining: int = run_count - i - 1
duration_remaining: int = int(runs_remaining * duration_per_token)
print(f'Token #{i}/{token_count}, '
f'{int(100.0 * i / token_count)}%, '
f'ETA {duration_remaining // 60} m {duration_remaining % 60} s', end='')
if loss_count > 0:
print(f', averages so far: {format_loss_with_perplexity(avg_loss_so_far)}')
else:
print()
print()
print(f'Average latency: {int((time.time() - start) * 1000 / run_count)} ms per token')
print()
print(f'Model: {os.path.basename(args.model_path)}, '
f'data: {os.path.basename(args.text_path)} with {token_count} tokens, '
f'skipped {args.ignore_first_n_tokens} tokens, '
f'averages: {format_loss_with_perplexity(loss_sum / loss_count)}') | /rwkv_cpp_python-0.0.1.tar.gz/rwkv_cpp_python-0.0.1/rwkv/measure_pexplexity.py | 0.809878 | 0.321966 | measure_pexplexity.py | pypi |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.