python_code stringlengths 0 229k |
|---|
import argparse
import logging
import os
import unittest
from interactive_asr.utils import setup_asr, transcribe_file
class ASRTest(unittest.TestCase):
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
arguments_dict = {
"path": "/scratch/jamarshon/downloads/model.pt",
"... |
#!/usr/bin/env python3
"""This is the preprocessing script for HuBERT model training.
The script includes:
- File list creation
- MFCC/HuBERT feature extraction
- KMeans clustering model training
- Pseudo-label generation
"""
import logging
from argparse import ArgumentParser, RawTextHelpFormatter
from ... |
from .common_utils import create_tsv
from .feature_utils import dump_features
from .kmeans import learn_kmeans, get_km_label
__all__ = [
"create_tsv",
"dump_features",
"learn_kmeans",
"get_km_label",
]
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# https://github.com/pytorch/fairseq/blob/265df7144c79446f5ea8d835bda6e727f54dad9d/LICENSE
import logging
from pathlib import Path
from typing import (
Tuple,
)
import jobli... |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# https://github.com/pytorch/fairseq/blob/265df7144c79446f5ea8d835bda6e727f54dad9d/LICENSE
import logging
from pathlib import Path
from typing import (
Tuple,
Union,
)
i... |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# https://github.com/pytorch/fairseq/blob/265df7144c79446f5ea8d835bda6e727f54dad9d/LICENSE
"""
Data pre-processing: create tsv files for training (and valiation).
"""
import logg... |
# *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions... |
# *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions... |
# *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions... |
# *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions... |
"""
Text-to-speech pipeline using Tacotron2.
"""
from functools import partial
import argparse
import os
import random
import sys
import torch
import torchaudio
import numpy as np
from torchaudio.models import Tacotron2
from torchaudio.models import tacotron2 as pretrained_tacotron2
from utils import prepare_input_s... |
# *****************************************************************************
# Copyright (c) 2017 Keith Ito
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including... |
# *****************************************************************************
# Copyright (c) 2017 Keith Ito
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including... |
from . import utils, vad
__all__ = ['utils', 'vad']
|
#!/usr/bin/env python3
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
"""
Run infe... |
#!/usr/bin/env python3
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import os
im... |
#!/usr/bin/env python3
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
"""
Followin... |
#!/usr/bin/env python3
"""Launch souce separation training.
This script runs training in Distributed Data Parallel (DDP) framework and has two major
operation modes. This behavior depends on if `--worker-id` argument is given or not.
1. (`--worker-id` is not given) Launchs worker subprocesses that performs the actual... |
#!/usr/bin/env python3
# pyre-strict
from pathlib import Path
from argparse import ArgumentParser
from typing import (
Any,
Callable,
Dict,
Mapping,
List,
Optional,
Tuple,
TypedDict,
Union,
)
import torch
import torchaudio
from pytorch_lightning import LightningModule, Trainer
from... |
from argparse import ArgumentParser
from pathlib import Path
from lightning_train import _get_model, _get_dataloader, sisdri_metric
import mir_eval
import torch
def _eval(model, data_loader, device):
results = torch.zeros(4)
with torch.no_grad():
for _, batch in enumerate(data_loader):
mi... |
import math
from typing import Optional
from itertools import permutations
import torch
def sdr(
estimate: torch.Tensor,
reference: torch.Tensor,
mask: Optional[torch.Tensor] = None,
epsilon: float = 1e-8
) -> torch.Tensor:
"""Computes source-to-distortion ratio.
1. scale the... |
from . import (
dataset,
dist_utils,
metrics,
)
__all__ = ['dataset', 'dist_utils', 'metrics']
|
import os
import csv
import types
import logging
import torch
import torch.distributed as dist
def _info_on_master(self, *args, **kwargs):
if dist.get_rank() == 0:
self.info(*args, **kwargs)
def getLogger(name):
"""Get logging.Logger module with additional ``info_on_master`` method."""
logger =... |
from . import utils, wsj0mix
__all__ = ['utils', 'wsj0mix']
|
from typing import List
from functools import partial
from collections import namedtuple
from torchaudio.datasets import LibriMix
import torch
from . import wsj0mix
Batch = namedtuple("Batch", ["mix", "src", "mask"])
def get_dataset(dataset_type, root_dir, num_speakers, sample_rate, task=None, librimix_tr_split=No... |
from pathlib import Path
from typing import Union, Tuple, List
import torch
from torch.utils.data import Dataset
import torchaudio
SampleType = Tuple[int, torch.Tensor, List[torch.Tensor]]
class WSJ0Mix(Dataset):
"""Create a Dataset for wsj0-mix.
Args:
root (str or Path): Path to the directory whe... |
from . import (
train,
trainer
)
__all__ = ['train', 'trainer']
|
#!/usr/bin/env python3
"""Train Conv-TasNet"""
import time
import pathlib
import argparse
import torch
import torchaudio
import torchaudio.models
import conv_tasnet
from utils import dist_utils
from utils.dataset import utils as dataset_utils
_LG = dist_utils.getLogger(__name__)
def _parse_args(args):
parser =... |
import time
from typing import Tuple
from collections import namedtuple
import torch
import torch.distributed as dist
from utils import dist_utils, metrics
_LG = dist_utils.getLogger(__name__)
Metric = namedtuple("SNR", ["si_snri", "sdri"])
Metric.__str__ = (
lambda self: f"SI-SNRi: {self.si_snri:10.3e}, SDRi: ... |
import torch
class Normalize(torch.nn.Module):
def forward(self, tensor):
return (tensor - tensor.mean(-1, keepdim=True)) / tensor.std(-1, keepdim=True)
class UnsqueezeFirst(torch.nn.Module):
def forward(self, tensor):
return tensor.unsqueeze(0)
|
import torch
from torchaudio.datasets import LIBRISPEECH
class MapMemoryCache(torch.utils.data.Dataset):
"""
Wrap a dataset so that, whenever a new item is returned, it is saved to memory.
"""
def __init__(self, dataset):
self.dataset = dataset
self._cache = [None] * len(dataset)
... |
import json
import logging
import os
import shutil
from collections import defaultdict
import torch
class MetricLogger(defaultdict):
def __init__(self, name, print_freq=1, disable=False):
super().__init__(lambda: 0.0)
self.disable = disable
self.print_freq = print_freq
self._iter ... |
from torch import topk
class GreedyDecoder:
def __call__(self, outputs):
"""Greedy Decoder. Returns highest probability of class labels for each timestep
Args:
outputs (torch.Tensor): shape (input length, batch size, number of classes (including blank))
Returns:
t... |
import collections
import itertools
class LanguageModel:
def __init__(self, labels, char_blank, char_space):
self.char_space = char_space
self.char_blank = char_blank
labels = list(labels)
self.length = len(labels)
enumerated = list(enumerate(labels))
flipped = [(... |
import argparse
import logging
import os
import string
from datetime import datetime
from time import time
import torch
import torchaudio
from torch.optim import SGD, Adadelta, Adam, AdamW
from torch.optim.lr_scheduler import ExponentialLR, ReduceLROnPlateau
from torch.utils.data import DataLoader
from torchaudio.data... |
# -*- coding: utf-8 -*-
"""
Audio Resampling
================
Here, we will walk through resampling audio waveforms using ``torchaudio``.
"""
# When running this tutorial in Google Colab, install the required packages
# with the following.
# !pip install torchaudio librosa
import torch
import torchaudio
import torc... |
"""
Speech Recognition with Wav2Vec2
================================
**Author**: `Moto Hira <moto@fb.com>`__
This tutorial shows how to perform speech recognition using using
pre-trained models from wav2vec 2.0
[`paper <https://arxiv.org/abs/2006.11477>`__].
"""
###################################################... |
# -*- coding: utf-8 -*-
"""
Audio I/O
=========
``torchaudio`` integrates ``libsox`` and provides a rich set of audio I/O.
"""
# When running this tutorial in Google Colab, install the required packages
# with the following.
# !pip install torchaudio boto3
import torch
import torchaudio
print(torch.__version__)
pri... |
"""
Forced Alignment with Wav2Vec2
==============================
**Author** `Moto Hira <moto@fb.com>`__
This tutorial shows how to align transcript to speech with
``torchaudio``, using CTC segmentation algorithm described in
`CTC-Segmentation of Large Corpora for German End-to-end Speech
Recognition <https://arxiv.o... |
# -*- coding: utf-8 -*-
"""
Audio Feature Augmentation
==========================
"""
# When running this tutorial in Google Colab, install the required packages
# with the following.
# !pip install torchaudio librosa
import torch
import torchaudio
import torchaudio.transforms as T
print(torch.__version__)
print(tor... |
"""
Text-to-Speech with Tacotron2
=============================
**Author** `Yao-Yuan Yang <https://github.com/yangarbiter>`__,
`Moto Hira <moto@fb.com>`__
"""
######################################################################
# Overview
# --------
#
# This tutorial shows how to build text-to-speech pipeline, usi... |
# -*- coding: utf-8 -*-
"""
Audio Feature Extractions
=========================
``torchaudio`` implements feature extractions commonly used in the audio
domain. They are available in ``torchaudio.functional`` and
``torchaudio.transforms``.
``functional`` implements features as standalone functions.
They are stateless... |
"""
MVDR with torchaudio
====================
**Author** `Zhaoheng Ni <zni@fb.com>`__
"""
######################################################################
# Overview
# --------
#
# This is a tutorial on how to apply MVDR beamforming by using `torchaudio <https://github.com/pytorch/audio>`__.
#
# Steps
#
# - Id... |
# -*- coding: utf-8 -*-
"""
Audio Datasets
==============
``torchaudio`` provides easy access to common, publicly accessible
datasets. Please refer to the official documentation for the list of
available datasets.
"""
# When running this tutorial in Google Colab, install the required packages
# with the following.
# ... |
# -*- coding: utf-8 -*-
"""
Audio Data Augmentation
=======================
``torchaudio`` provides a variety of ways to augment audio data.
"""
# When running this tutorial in Google Colab, install the required packages
# with the following.
# !pip install torchaudio
import torch
import torchaudio
import torchaudio... |
import random
import torch
from torch.utils.data.dataset import random_split
from torchaudio.datasets import LJSPEECH, LIBRITTS
from torchaudio.transforms import MuLawEncoding
from processing import bits_to_normalized_waveform, normalized_waveform_to_bits
class MapMemoryCache(torch.utils.data.Dataset):
r"""Wrap... |
# *****************************************************************************
# Copyright (c) 2019 fatchord (https://github.com/fatchord)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software w... |
import logging
import os
import shutil
from collections import defaultdict, deque
import torch
class MetricLogger:
r"""Logger for model metrics
"""
def __init__(self, group, print_freq=1):
self.print_freq = print_freq
self._iter = 0
self.data = defaultdict(lambda: deque(maxlen=se... |
import argparse
import torch
import torchaudio
from torchaudio.transforms import MelSpectrogram
from torchaudio.models import wavernn
from torchaudio.models.wavernn import _MODEL_CONFIG_AND_URLS
from torchaudio.datasets import LJSPEECH
from wavernn_inference_wrapper import WaveRNNInferenceWrapper
from processing impo... |
import math
import torch
from torch import nn as nn
from torch.nn import functional as F
class LongCrossEntropyLoss(nn.Module):
r""" CrossEntropy loss
"""
def __init__(self):
super(LongCrossEntropyLoss, self).__init__()
def forward(self, output, target):
output = output.transpose(1,... |
import torch
import torch.nn as nn
class NormalizeDB(nn.Module):
r"""Normalize the spectrogram with a minimum db value
"""
def __init__(self, min_level_db, normalization):
super().__init__()
self.min_level_db = min_level_db
self.normalization = normalization
def forward(self,... |
import argparse
import logging
import os
from collections import defaultdict
from datetime import datetime
from time import time
from typing import List
import torch
import torchaudio
from torch.optim import Adam
from torch.utils.data import DataLoader
from torchaudio.datasets.utils import bg_iterator
from torchaudio.... |
"""
This script finds the merger responsible for labeling a PR by a commit SHA. It is used by the workflow in
'.github/workflows/pr-labels.yml'. If there exists no PR associated with the commit or the PR is properly labeled,
this script is a no-op.
Note: we ping the merger only, not the reviewers, as the reviewers can ... |
# -*- coding: utf-8 -*-
import math
import warnings
from typing import Callable, Optional
import torch
from torch import Tensor
from torchaudio import functional as F
from .functional.functional import (
_get_sinc_resample_kernel,
_apply_sinc_resample_kernel,
)
__all__ = [
'Spectrogram',
'InverseSpe... |
# To use this file, the dependency (https://github.com/vesis84/kaldi-io-for-python)
# needs to be installed. This is a light wrapper around kaldi_io that returns
# torch.Tensors.
from typing import Any, Callable, Iterable, Tuple
import torch
from torch import Tensor
from torchaudio._internal import module_utils as _mo... |
from torchaudio import _extension # noqa: F401
from torchaudio import (
compliance,
datasets,
functional,
models,
pipelines,
kaldi_io,
utils,
sox_effects,
transforms,
)
from torchaudio.backend import (
list_audio_backends,
get_audio_backend,
set_audio_backend,
)
try:
... |
import os
import warnings
from pathlib import Path
import torch
from torchaudio._internal import module_utils as _mod_utils # noqa: F401
def _init_extension():
if not _mod_utils.is_module_available('torchaudio._torchaudio'):
warnings.warn('torchaudio C++ extension is not available.')
return
... |
from torch.hub import load_state_dict_from_url, download_url_to_file
__all__ = [
"load_state_dict_from_url",
"download_url_to_file",
]
|
import warnings
import importlib.util
from typing import Optional
from functools import wraps
import torch
def is_module_available(*modules: str) -> bool:
r"""Returns if a top-level module with :attr:`name` exists *without**
importing it. This is generally safer than try-catch block around a
`import X`. ... |
import os
from typing import Tuple, Optional, Union
from pathlib import Path
import torchaudio
from torch.utils.data import Dataset
from torch import Tensor
from torchaudio.datasets.utils import (
download_url,
extract_archive,
)
FOLDER_IN_ARCHIVE = "SpeechCommands"
URL = "speech_commands_v0.02"
HASH_DIVIDER ... |
from pathlib import Path
from typing import Dict, Tuple, Union
from torch import Tensor
from torch.utils.data import Dataset
import torchaudio
from torchaudio.datasets.utils import (
download_url,
extract_archive,
validate_file,
)
_URL = "https://datashare.ed.ac.uk/bitstream/handle/10283/3038/DR-VCTK.zi... |
import os
import re
from pathlib import Path
from typing import Iterable, Tuple, Union, List
from torch.utils.data import Dataset
from torchaudio.datasets.utils import download_url
_CHECKSUMS = {
"http://svn.code.sf.net/p/cmusphinx/code/trunk/cmudict/cmudict-0.7b":
"825f4ebd9183f2417df9f067a9cabe86",
"htt... |
import os
from pathlib import Path
from typing import Tuple, Optional, Union
import torchaudio
from torch import Tensor
from torch.utils.data import Dataset
from torchaudio.datasets.utils import (
download_url,
extract_archive,
)
# The following lists prefixed with `filtered_` provide a filtered split
# that:... |
import os
import csv
from pathlib import Path
from typing import Tuple, Union
import torchaudio
from torch import Tensor
from torch.utils.data import Dataset
from torchaudio.datasets.utils import (
download_url,
extract_archive,
)
URL = "aew"
FOLDER_IN_ARCHIVE = "ARCTIC"
_CHECKSUMS = {
"http://festvox.org... |
import csv
import os
from pathlib import Path
from typing import List, Dict, Tuple, Union
from torch import Tensor
from torch.utils.data import Dataset
import torchaudio
def load_commonvoice_item(line: List[str],
header: List[str],
path: str,
... |
from .commonvoice import COMMONVOICE
from .librispeech import LIBRISPEECH
from .speechcommands import SPEECHCOMMANDS
from .vctk import VCTK_092
from .dr_vctk import DR_VCTK
from .gtzan import GTZAN
from .yesno import YESNO
from .ljspeech import LJSPEECH
from .cmuarctic import CMUARCTIC
from .cmudict import CMUDict
from... |
import os
from typing import Tuple, Union
from pathlib import Path
import torchaudio
from torch import Tensor
from torch.utils.data import Dataset
from torchaudio.datasets.utils import (
download_url,
extract_archive,
)
URL = "train-clean-100"
FOLDER_IN_ARCHIVE = "LibriTTS"
_CHECKSUMS = {
"http://www.open... |
import os
from typing import Tuple, Union
from pathlib import Path
import torchaudio
from torch import Tensor
from torch.utils.data import Dataset
from torchaudio.datasets.utils import (
download_url,
extract_archive,
)
_RELEASE_CONFIGS = {
"release1": {
"folder_in_archive": "TEDLIUM_release1",
... |
import hashlib
import logging
import os
import tarfile
import urllib
import urllib.request
import zipfile
from typing import Any, Iterable, List, Optional
from torch.utils.model_zoo import tqdm
def stream_url(url: str,
start_byte: Optional[int] = None,
block_size: int = 32 * 1024,
... |
import os
import csv
from typing import Tuple, Union
from pathlib import Path
import torchaudio
from torchaudio.datasets.utils import download_url, extract_archive
from torch import Tensor
from torch.utils.data import Dataset
_RELEASE_CONFIGS = {
"release1": {
"folder_in_archive": "wavs",
"url": "... |
import os
from pathlib import Path
from typing import List, Tuple, Union
from torch import Tensor
from torch.utils.data import Dataset
import torchaudio
from torchaudio.datasets.utils import (
download_url,
extract_archive,
)
_RELEASE_CONFIGS = {
"release1": {
"folder_in_archive": "waves_yesno",... |
import os
from typing import Tuple, Union
from pathlib import Path
import torchaudio
from torch import Tensor
from torch.utils.data import Dataset
from torchaudio.datasets.utils import (
download_url,
extract_archive,
)
URL = "train-clean-100"
FOLDER_IN_ARCHIVE = "LibriSpeech"
_CHECKSUMS = {
"http://www.o... |
from pathlib import Path
from typing import Union, Tuple, List
import torch
from torch.utils.data import Dataset
import torchaudio
SampleType = Tuple[int, torch.Tensor, List[torch.Tensor]]
class LibriMix(Dataset):
r"""Create the LibriMix dataset.
Args:
root (str or Path): The path to the directory... |
import os
from typing import Tuple
from torch import Tensor
from torch.utils.data import Dataset
import torchaudio
from torchaudio.datasets.utils import (
download_url,
extract_archive,
)
URL = "https://datashare.is.ed.ac.uk/bitstream/handle/10283/3443/VCTK-Corpus-0.92.zip"
_CHECKSUMS = {
"https://datash... |
from ._wav2vec2.impl import (
Wav2Vec2Bundle,
Wav2Vec2ASRBundle,
WAV2VEC2_BASE,
WAV2VEC2_LARGE,
WAV2VEC2_LARGE_LV60K,
WAV2VEC2_ASR_BASE_10M,
WAV2VEC2_ASR_BASE_100H,
WAV2VEC2_ASR_BASE_960H,
WAV2VEC2_ASR_LARGE_10M,
WAV2VEC2_ASR_LARGE_100H,
WAV2VEC2_ASR_LARGE_960H,
WAV2VEC2_... |
def _get_en_labels():
return (
'|',
'E',
'T',
'A',
'O',
'N',
'I',
'H',
'S',
'R',
'D',
'L',
'U',
'M',
'W',
'C',
'F',
'G',
'Y',
'P',
'B',
'V',... |
from dataclasses import dataclass
from typing import Dict, Tuple, Any
import torch
from torchaudio._internal import load_state_dict_from_url
from torchaudio.models import wav2vec2_model, Wav2Vec2Model
from . import utils
__all__ = []
@dataclass
class Wav2Vec2Bundle:
"""torchaudio.pipelines.Wav2Vec2Bundle()
... |
from abc import ABC, abstractmethod
from typing import Union, List, Tuple, Optional
from torch import Tensor
from torchaudio.models import Tacotron2
class _TextProcessor(ABC):
@property
@abstractmethod
def tokens(self):
"""The tokens that the each value in the processed tensor represent.
... |
from .interface import Tacotron2TTSBundle
from .impl import (
TACOTRON2_GRIFFINLIM_CHAR_LJSPEECH,
TACOTRON2_GRIFFINLIM_PHONE_LJSPEECH,
TACOTRON2_WAVERNN_CHAR_LJSPEECH,
TACOTRON2_WAVERNN_PHONE_LJSPEECH,
)
__all__ = [
'Tacotron2TTSBundle',
'TACOTRON2_GRIFFINLIM_CHAR_LJSPEECH',
'TACOTRON2_GRI... |
import os
import logging
import torch
from torchaudio._internal import (
download_url_to_file,
module_utils as _mod_utils,
)
def _get_chars():
return (
'_',
'-',
'!',
"'",
'(',
')',
',',
'.',
':',
';',
'?',
'... |
from dataclasses import dataclass
import re
from typing import Union, Optional, Dict, Any, Tuple, List
import torch
from torch import Tensor
from torchaudio._internal import load_state_dict_from_url
from torchaudio.models import Tacotron2, WaveRNN
from torchaudio.functional import mu_law_decoding
from torchaudio.tran... |
from . import (
sox_utils,
)
from torchaudio._internal import module_utils as _mod_utils
if _mod_utils.is_sox_available():
sox_utils.set_verbosity(1)
|
from typing import List, Dict
import torch
from torchaudio._internal import module_utils as _mod_utils
@_mod_utils.requires_sox()
def set_seed(seed: int):
"""Set libsox's PRNG
Args:
seed (int): seed value. valid range is int32.
See Also:
http://sox.sourceforge.net/sox.html
"""
t... |
# flake8: noqa
from . import utils
from .utils import (
list_audio_backends,
get_audio_backend,
set_audio_backend,
)
utils._init_audio_backend()
|
import os
from typing import Tuple, Optional
import torch
from torchaudio._internal import (
module_utils as _mod_utils,
)
import torchaudio
from .common import AudioMetaData
@_mod_utils.requires_sox()
def info(
filepath: str,
format: Optional[str] = None,
) -> AudioMetaData:
"""Get signal i... |
class AudioMetaData:
"""Return type of ``torchaudio.info`` function.
This class is used by :ref:`"sox_io" backend<sox_io_backend>` and
:ref:`"soundfile" backend with the new interface<soundfile_backend>`.
:ivar int sample_rate: Sample rate
:ivar int num_frames: The number of frames
:ivar int n... |
"""Defines utilities for switching audio backends"""
import warnings
from typing import Optional, List
import torchaudio
from torchaudio._internal import module_utils as _mod_utils
from . import (
no_backend,
sox_io_backend,
soundfile_backend,
)
__all__ = [
'list_audio_backends',
'get_audio_backen... |
from pathlib import Path
from typing import Callable, Optional, Tuple, Union
from torch import Tensor
def load(filepath: Union[str, Path],
out: Optional[Tensor] = None,
normalization: Union[bool, float, Callable] = True,
channels_first: bool = True,
num_frames: int = 0,
o... |
"""The new soundfile backend which will become default in 0.8.0 onward"""
from typing import Tuple, Optional
import warnings
import torch
from torchaudio._internal import module_utils as _mod_utils
from .common import AudioMetaData
if _mod_utils.is_soundfile_available():
import soundfile
# Mapping from soundfil... |
from torch import Tensor
from torch import nn
__all__ = [
"Wav2Letter",
]
class Wav2Letter(nn.Module):
r"""Wav2Letter model architecture from *Wav2Letter: an End-to-End ConvNet-based Speech
Recognition System* [:footcite:`collobert2016wav2letter`].
:math:`\text{padding} = \frac{\text{ceil}(\text{ke... |
# *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions... |
"""Implements Conv-TasNet with building blocks of it.
Based on https://github.com/naplab/Conv-TasNet/tree/e66d82a8f956a69749ec8a4ae382217faa097c5c
"""
from typing import Tuple, Optional
import torch
class ConvBlock(torch.nn.Module):
"""1D Convolutional block.
Args:
io_channels (int): The number of... |
from .wav2letter import Wav2Letter
from .wavernn import WaveRNN
from .conv_tasnet import ConvTasNet
from .deepspeech import DeepSpeech
from .tacotron2 import Tacotron2
from .wav2vec2 import (
Wav2Vec2Model,
wav2vec2_model,
wav2vec2_base,
wav2vec2_large,
wav2vec2_large_lv60k,
hubert_base,
hub... |
import torch
__all__ = ["DeepSpeech"]
class FullyConnected(torch.nn.Module):
"""
Args:
n_feature: Number of input features
n_hidden: Internal hidden unit size.
"""
def __init__(self,
n_feature: int,
n_hidden: int,
dropout: float,
... |
from typing import List, Tuple, Optional
import math
import torch
from torch import Tensor
from torch import nn
import torch.nn.functional as F
__all__ = [
"ResBlock",
"MelResNet",
"Stretch2d",
"UpsampleNetwork",
"WaveRNN",
]
class ResBlock(nn.Module):
r"""ResNet block based on *Efficient Ne... |
from .model import (
Wav2Vec2Model,
wav2vec2_model,
wav2vec2_base,
wav2vec2_large,
wav2vec2_large_lv60k,
hubert_base,
hubert_large,
hubert_xlarge,
)
from . import utils
__all__ = [
'Wav2Vec2Model',
'wav2vec2_model',
'wav2vec2_base',
'wav2vec2_large',
'wav2vec2_large_... |
from typing import Optional, Tuple, List
import torch
from torch import Tensor
from torch.nn import Module
from . import components
class Wav2Vec2Model(Module):
"""torchaudio.models.Wav2Vec2Model(feature_extractor: torch.nn.Module, encoder: torch.nn.Module, aux: Optional[torch.nn.Module] = None)
Encoder mo... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.