id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
181,021 | import re
import os
import json
import execjs
import pickle
import platform
import requests
from datetime import datetime
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
def create_seat_dict(detail, save_dict):
""" 构建座位层级信息 """
floor = detail.get('fn')
row = detail.get('x')
col = detail.get('y')
sid = detail.get('sid')
if floor not in save_dict:
save_dict.update({floor: {}})
if row not in save_dict.get(floor):
save_dict[floor].update({row: {}})
if col not in save_dict.get(floor).get(row):
save_dict[floor][row].update({col: sid})
The provided code snippet includes necessary dependencies for implementing the `format_valuable_seatid` function. Write a Python function `def format_valuable_seatid(all_seats_info, valuable_seats_info, price_id)` to solve the following problem:
格式化 seatid 相关信息
Here is the function:
def format_valuable_seatid(all_seats_info, valuable_seats_info, price_id):
""" 格式化 seatid 相关信息 """
sid2coordinate = {}
coordinate2sid = {}
for detail in all_seats_info.get('seats'):
create_seat_dict(detail, coordinate2sid)
sid2coordinate.update({
detail.get('sid'): {
'sid': detail.get('sid'),
'plid': detail.get('plid'),
'fn': detail.get('fn'),
'x': detail.get('x'),
'y': detail.get('y')
}})
if 'noseat' in valuable_seats_info:
# 去除不可用的座位信息
noseat_data = valuable_seats_info.get('noseat')
for line in noseat_data:
sid = line.get('sid')
floor = sid2coordinate.get(sid).get('floor')
row = sid2coordinate.get(sid).get('row')
col = sid2coordinate.get(sid).get('col')
coordinate2sid[floor][row].pop(col)
for line in coordinate2sid:
if line.get('plid') != price_id:
floor = line.get('fn')
row = line.get('row')
col = line.get('col')
coordinate2sid[floor][row].pop(col)
return coordinate2sid
else:
valuable_sid = {}
seat_data = valuable_seats_info.get('seat')
for line in seat_data:
sid = line.get('sid')
detail = sid2coordinate.get(sid)
if detail.get('plid') == price_id:
create_seat_dict(detail, valuable_sid)
return valuable_sid | 格式化 seatid 相关信息 |
181,022 | import re
import os
import json
import execjs
import pickle
import platform
import requests
from datetime import datetime
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
The provided code snippet includes necessary dependencies for implementing the `pick_seat` function. Write a Python function `def pick_seat(valuable_seat, stand_id, buy_nums)` to solve the following problem:
简单实现选取座位信息
Here is the function:
def pick_seat(valuable_seat, stand_id, buy_nums):
""" 简单实现选取座位信息 """
selected_seats = []
for floor, floor_info in valuable_seat.items():
for row, row_info in floor_info.items():
for col, sid in row_info.items():
selected_seats.append({'seatId': sid, 'standId': stand_id})
if len(selected_seats) == buy_nums:
return selected_seats | 简单实现选取座位信息 |
181,023 | from .iterators import create_source_iterator, CheckpointableIterator, SelectManyIterator, PrefetchIterator, BufferedShuffleIterator, BlockwiseShuffleIterator, MapIterator
from typing import List, Union, Iterable, Iterator, Callable, Any, Optional, Dict
import os, sys
def bump_seed(seed: Optional[int], step = 1):
"""
Helper to bump a random seed if not None.
"""
return None if seed is None else seed + 1
class CheckpointableIterator(collections.abc.Iterator):
"""
Abstract base class that defines the interface for checkpointing.
The interface (getstate, setstate) is inspired by Python's random package.
"""
def __iter__(self) -> 'CheckpointableIterator':
return self
def getstate(self) -> Dict:
"""
Get checkpoint of current state of iterator
In a pipeline of iterators, this function __recursively__ calls itself on the preceeding iterator
and includes the gathered information in the returned checkpoint.
Thereby, to obtain a checkpoint of the state of an entire pipeline of iterators
you only have to call this function on the __last__ iterator in the pipeline.
A checkpoint is represented as a `dict`,
but the caller should treat a checkpoint as an opaque object
and not make any assumptions about the existence or meaning of the `dict` entries.
"""
pass
def setstate(self, checkpoint: Optional[Dict]):
"""
Set state of iterator to given checkpoint
In a pipeline of iterators, this function __recursively__ calls itself on the preceeding iterator.
Thereby, to set the state of an entire pipeline of iterators to a given checkpoint
you only have to call this function on the __last__ iterator in the pipeline.
Args:
checkpoint: Checkpoint that should be used to reset the state of the iterator (or pipeline).
If this is __None__, the state of the iterator (or pipeline) is reset to the initial
state immediately after construction.
"""
pass
def __getstate__(self) -> Dict: # implementation of pickle Protocol
return self.getstate()
def __setstate__(self, checkpoint: Optional[Dict]):
self.setstate(checkpoint)
def __next__(self):
pass
def close(self):
"""
Close all PrefetchIterators in this pipeline
PrefetchIterators have internal resources that need to be properly managed by calling close() manually.
Failure to do so can lead to dangling processes and threads, or the PrefetchIterator hanging on finalization.
Note that it is not correct to rely on the garbage collector to destroy PrefetchIterators
as CPython does not assure that the finalizer (__del__) of a PrefetchIterator will be called.
This function, which is implemented for every CheckpointableIterator, recursively traverses all preceeding
iterators and closes all PrefetchIterators in the pipeline.
For pipelines that do not contain PrefetchIterators this function has no effect.
"""
pass
def create_source_iterator(source_items: List, train: bool=True, seed: Optional[int]=None, shuffle: bool=True, num_instances: int=1, instance_rank: int=0) -> CheckpointableIterator:
if not train and shuffle:
raise ValueError('shuffling is not supported when train=False')
if train:
return InfinitePermutationSourceIterator(source_items, seed=seed, shuffle=shuffle, num_instances=num_instances, instance_rank=instance_rank)
else:
return ChunkedSourceIterator(source_items, num_instances=num_instances, instance_rank=instance_rank)
class SelectManyIterator(CheckpointableIterator):
"""
Projects each element of a source sequence to a sequence and flattens the resulting sequences into one sequence.
"""
def __init__(self, source_iterator: CheckpointableIterator, collection_selector: Optional[Callable[[Any], Iterator]]=None):
"""
Args:
source_iterator: iterator over the items to pass to collection_selector()
collection_selector: user callback that maps an item into an Iterable, whose items will be yielded.
The returned Iterator is used only once. Hence, it is also allowed to
return self-iterables, such as iterators and generator expressions.
If None is given, no callback is applied.
"""
if not isinstance(source_iterator, CheckpointableIterator):
raise ValueError('source_iterator has to be a CheckpointableIterator')
self._source_iterator = source_iterator # type: CheckpointableIterator
self._collection_selector = collection_selector # type: Optional[Callable[[Any], Iterator]]
self.setstate(None)
def getstate(self) -> Dict:
return {'source_state': self._source_state,
'flattened_items_yielded': self._flattened_items_yielded}
def setstate(self, checkpoint: Optional[Dict]):
self._source_state = checkpoint['source_state'] if checkpoint else None
self._flattened_items_yielded = checkpoint['flattened_items_yielded'] if checkpoint else 0
self._source_iterator.setstate(self._source_state)
def _generate():
skip_to_checkpoint = self._flattened_items_yielded
# main loop over source source_items
for source_item in self._source_iterator:
if self._collection_selector is not None:
data = iter(self._collection_selector(source_item))
else:
data = iter(source_item)
self._flattened_items_yielded = 0
if skip_to_checkpoint:
#print("Skipping to index", skip_to_checkpoint, file=sys.stderr)
self._flattened_items_yielded += _advance_iterator(data, skip_to_checkpoint)
skip_to_checkpoint = 0
# main loop over lines
for item in data:
self._flattened_items_yielded += 1
yield item
self._source_state = self._source_iterator.getstate()
self._iterator = _generate()
def __next__(self):
return next(self._iterator)
def close(self):
self._source_iterator.close()
class BufferedShuffleIterator(CheckpointableIterator):
"""
Shuffles given iterable using a limited buffer.
"""
def __init__(self, source_iterator: CheckpointableIterator, buffer_size: int, seed: int=0):
"""
Args:
source_iterator: checkpointable iterator or restartable iterable over input items to shuffle
buffer_size: size of the buffer in number of items used for shuffling
seed: random seed used for shuffling (or None)
"""
if not isinstance(source_iterator, CheckpointableIterator):
raise ValueError('source_iterator has to be a CheckpointableIterator')
self._source_iterator = source_iterator
self._buffer_size = buffer_size
self._seed = seed
self.setstate(None)
def getstate(self) -> Dict:
return {'source_state': self._source_iterator.getstate(),
'buffer': copy.deepcopy(self._buffer), # create deepcopy so that iterator cannot modify checkpoint after it was taken
'random_state': self._random.getstate()}
def setstate(self, checkpoint: Optional[Dict]):
if checkpoint:
self._source_iterator.setstate(checkpoint['source_state'])
self._buffer = copy.deepcopy(checkpoint['buffer']) # create deepcopy so that iterator cannot modify checkpoint
self._random.setstate(checkpoint['random_state'])
# @TODO: Can we add a comment how the flush part is handled?
else:
self._source_iterator.setstate(None)
self._buffer = [None for _ in range(self._buffer_size)]
self._random = Random(self._seed) # type: Random
self._iterator = self._generate()
def _generate(self) -> Iterator:
# shuffle data with a buffer:
# this is similar to what the Fisher-Yates shuffle does,
# but modified to run with a constant-size buffer
# see https://en.wikipedia.org/wiki/Fisher%E2%80%93Yates_shuffle
# this was inspired by an algorithm implemented in Kaldi
# see https://kaldi-asr.org/doc/nnet-shuffle-egs_8cc.html
for item in self._source_iterator:
index = self._random.randrange(0, len(self._buffer))
result = None
if self._buffer[index] is not None:
result = self._buffer[index]
self._buffer[index] = item
# only yield value once buffer is updated to allow for correct checkpointing!
if result is not None:
yield result
# flush buffer
while self._buffer:
item = self._buffer.pop()
if item is not None:
yield item
def __next__(self):
return next(self._iterator)
def close(self):
self._source_iterator.close()
class MapIterator(CheckpointableIterator):
"""
Applies given tranform to each data item
"""
def __init__(self, source_iterator: CheckpointableIterator, transform: Callable[[str],Any]):
"""
Args:
source_iterator: checkpointable iterator
transform: function to be applied to each data item
"""
if not isinstance(source_iterator, CheckpointableIterator):
raise ValueError('source_iterator has to be a CheckpointableIterator')
self._source_iterator = source_iterator
self._transform = transform
def getstate(self) -> Dict:
return self._source_iterator.getstate()
def setstate(self, checkpoint: Optional[Dict]):
self._source_iterator.setstate(checkpoint)
def __next__(self):
return self._transform(next(self._source_iterator))
def close(self):
self._source_iterator.close()
def BlockwiseShuffleIterator(source_iterator: CheckpointableIterator, block_size: int, seed: int=0):
"""
Shuffles a sequence of items by grouping consecutive items in blocks of fixed size, shuffling
each block, and yielding the shuffled items of all blocks as a flat sequence.
E.g. [1, 2, 3, 4, 5, 6, 7, 8] with block_size = 3 may yield [3, 1, 2, 4, 6, 5, 8, 7].
Args:
source_iterator: checkpointable iterator or restartable iterable over input items to shuffle
block_size: size of the buffer in number of items used for shuffling
seed: random seed used for shuffling (or None)
"""
# This is implemented as a pipeline:
# - group N consecutive items together
# - shuffle them
# - flatten the result
blocks = FixedBatchIterator(source_iterator, batch_size=block_size)
def shuffle_block_fn(random: Random, block: List):
random.shuffle(block)
return block
shuffled_blocks = SamplingRandomMapIterator(blocks, transform=shuffle_block_fn, seed=seed)
samples = SelectManyIterator(shuffled_blocks, collection_selector=lambda shuffled_block: iter(shuffled_block))
return samples
def PrefetchIterator(source_iterator: CheckpointableIterator, buffer_size: int, buffer_in_main_process:bool=False, log_empty_buffer_warning: bool=False):
"""
An iterator prefetching data into a buffer on a seperate process.
Args:
source_iterator: checkpointable iterator to recur over
buffer_size: number of items to prefetch; this is the maximum number of items held in the prefetch queue
buffer_in_main_process: use experimental version of PrefetchBuffer that has buffer in main process instead of prefetch process
log_empty_buffer_warning: log warning message if prefetch buffer is empty, only supported if buffer_in_main_process=True
"""
if not isinstance(source_iterator, CheckpointableIterator):
raise ValueError('source_iterator has to be a CheckpointableIterator')
if buffer_size <= 0:
raise ValueError('buffer_size must be positive')
if multiprocessing.get_start_method() != 'fork':
print('WARNING: \
PrefetchIterator is only supported on operating system that use fork to create new processes.\
This excludes Windows.\
A dummy iterator is inserted instead of a PrefetchIterator.\
This also means that checkpoints of this iterator pipeline cannot be ported to a system that uses fork.')
return source_iterator
else:
if buffer_in_main_process:
return _ForkPrefetchIteratorExperimental(source_iterator, buffer_size, log_empty_buffer_warning)
else:
return _ForkPrefetchIterator(source_iterator, buffer_size)
The provided code snippet includes necessary dependencies for implementing the `chunked_dataset_iterator` function. Write a Python function `def chunked_dataset_iterator(chunk_refs: List, read_chunk_fn: Callable[[Any], Iterator], buffer_size: int, train: bool=True, seed: Optional[int]=None, shuffle: bool=True, use_windowed: bool=False, transform: Callable[[Any],Any]=None, prefetch: bool=False, num_instances: int=1, instance_rank: int=0) -> CheckpointableIterator` to solve the following problem:
Dataset reading data from gzipped chunks. If train=True, this chunks are strided assigned to instances in strides and the data is infinitely repeated in permutations. Otherwise, the chunks are split among the instances in consecutive blocks and the data is not repeated. This way, when using this dataset for inference on multiple GPUs, to order the outputs in a way that corresponds to the original order of the data items in the dataset, one simply has to collect the lists of outputs from each GPU and then concatenate these lists in order of increasing rank. When using MPI, this can be achieved by a gather-operation to get a list of lists of outputs, one list per GPU, followed by flattening the lists back into a single list. Args: chunk_refs: references (such as path names) to chunk files read_chunk_fn: function(chunk_ref) -> Iterator to read a chunk's content into an iterator over its items, e.g. read a file and split into text lines train: see above shuffle: if true, the data is shuffled. If train is False then shuffle must be False as well. buffer_size: size of the buffer in number of samples / data items used for shuffling (default: 2**20) transform: transform to be applied to each data item (transform(Any) -> Any) prefetch: if True, insert a prefetch iterator with buffer_size seed: random seed (or None) num_instances: number of instances of this dataset. Meant for use with multi-process data loading, e.g., in distributed training. instance_rank: rank of this instance of the dataset. Meant for use with multi-process data loading, e.g., in distributed training. use_windowed: temporary option to switch back to the WindowedShuffleIterator (default False). Will go away once shown that we don't need it anymore.
Here is the function:
def chunked_dataset_iterator(chunk_refs: List, read_chunk_fn: Callable[[Any], Iterator], buffer_size: int,
train: bool=True,
seed: Optional[int]=None, shuffle: bool=True, use_windowed: bool=False,
transform: Callable[[Any],Any]=None,
prefetch: bool=False,
num_instances: int=1, instance_rank: int=0) -> CheckpointableIterator:
"""
Dataset reading data from gzipped chunks.
If train=True, this chunks are strided assigned to instances in strides and the data is infinitely repeated in permutations.
Otherwise, the chunks are split among the instances in consecutive blocks and the data is not repeated.
This way, when using this dataset for inference on multiple GPUs, to order the outputs in a way that corresponds
to the original order of the data items in the dataset, one simply has to collect the lists of outputs from each GPU
and then concatenate these lists in order of increasing rank.
When using MPI, this can be achieved by a gather-operation to get a list of lists of outputs, one list per GPU,
followed by flattening the lists back into a single list.
Args:
chunk_refs: references (such as path names) to chunk files
read_chunk_fn: function(chunk_ref) -> Iterator to read a chunk's content into an iterator over its items, e.g. read a file and split into text lines
train: see above
shuffle: if true, the data is shuffled. If train is False then shuffle must be False as well.
buffer_size: size of the buffer in number of samples / data items used for shuffling (default: 2**20)
transform: transform to be applied to each data item (transform(Any) -> Any)
prefetch: if True, insert a prefetch iterator with buffer_size
seed: random seed (or None)
num_instances: number of instances of this dataset. Meant for use with multi-process data loading, e.g., in distributed training.
instance_rank: rank of this instance of the dataset. Meant for use with multi-process data loading, e.g., in distributed training.
use_windowed: temporary option to switch back to the WindowedShuffleIterator (default False). Will go away once shown that we don't need it anymore.
"""
if not train and shuffle:
raise ValueError('shuffling is not supported when train=False')
# set up the chunk reader
chunks = create_source_iterator(chunk_refs, train=train, seed=seed, shuffle=shuffle, num_instances=num_instances, instance_rank=instance_rank)
# set up the item reader
samples = SelectManyIterator(source_iterator=chunks, collection_selector=read_chunk_fn) # type: CheckpointableIterator
# wrap the I/O operation in a prefetch iterator
if prefetch:
samples = PrefetchIterator(samples, buffer_size)
# set up the item randomizer
if shuffle:
if use_windowed:
samples = BufferedShuffleIterator(samples, buffer_size, bump_seed(seed, 1))
else:
samples = BlockwiseShuffleIterator(samples, buffer_size, bump_seed(seed, 1))
# apply transform, if given
if transform is not None:
samples = MapIterator(samples, transform)
# this is what we are serving out
return samples | Dataset reading data from gzipped chunks. If train=True, this chunks are strided assigned to instances in strides and the data is infinitely repeated in permutations. Otherwise, the chunks are split among the instances in consecutive blocks and the data is not repeated. This way, when using this dataset for inference on multiple GPUs, to order the outputs in a way that corresponds to the original order of the data items in the dataset, one simply has to collect the lists of outputs from each GPU and then concatenate these lists in order of increasing rank. When using MPI, this can be achieved by a gather-operation to get a list of lists of outputs, one list per GPU, followed by flattening the lists back into a single list. Args: chunk_refs: references (such as path names) to chunk files read_chunk_fn: function(chunk_ref) -> Iterator to read a chunk's content into an iterator over its items, e.g. read a file and split into text lines train: see above shuffle: if true, the data is shuffled. If train is False then shuffle must be False as well. buffer_size: size of the buffer in number of samples / data items used for shuffling (default: 2**20) transform: transform to be applied to each data item (transform(Any) -> Any) prefetch: if True, insert a prefetch iterator with buffer_size seed: random seed (or None) num_instances: number of instances of this dataset. Meant for use with multi-process data loading, e.g., in distributed training. instance_rank: rank of this instance of the dataset. Meant for use with multi-process data loading, e.g., in distributed training. use_windowed: temporary option to switch back to the WindowedShuffleIterator (default False). Will go away once shown that we don't need it anymore. |
181,024 | from abc import abstractmethod
import collections
import copy
import gzip
from itertools import cycle, islice
import logging
import math
import multiprocessing
import os
import queue
from random import Random
import threading
import time
from typing import Any, Callable, Dict, Generator, Iterable, Iterator, List, Optional, Tuple, Union, cast
The provided code snippet includes necessary dependencies for implementing the `_advance_iterator` function. Write a Python function `def _advance_iterator(iterator: Iterator, n: int)` to solve the following problem:
Little helper to advance an iterator by n items
Here is the function:
def _advance_iterator(iterator: Iterator, n: int):
""" Little helper to advance an iterator by n items """
for i in range(n):
try:
next(iterator)
except StopIteration:
raise RuntimeError('Trying to advance iterator by {} but iterator raised StopIteration exception on call to next with index {}.'.format(n, i))
return n | Little helper to advance an iterator by n items |
181,025 | from abc import abstractmethod
import collections
import copy
import gzip
from itertools import cycle, islice
import logging
import math
import multiprocessing
import os
import queue
from random import Random
import threading
import time
from typing import Any, Callable, Dict, Generator, Iterable, Iterator, List, Optional, Tuple, Union, cast
class CheckpointableIterator(collections.abc.Iterator):
"""
Abstract base class that defines the interface for checkpointing.
The interface (getstate, setstate) is inspired by Python's random package.
"""
def __iter__(self) -> 'CheckpointableIterator':
return self
def getstate(self) -> Dict:
"""
Get checkpoint of current state of iterator
In a pipeline of iterators, this function __recursively__ calls itself on the preceeding iterator
and includes the gathered information in the returned checkpoint.
Thereby, to obtain a checkpoint of the state of an entire pipeline of iterators
you only have to call this function on the __last__ iterator in the pipeline.
A checkpoint is represented as a `dict`,
but the caller should treat a checkpoint as an opaque object
and not make any assumptions about the existence or meaning of the `dict` entries.
"""
pass
def setstate(self, checkpoint: Optional[Dict]):
"""
Set state of iterator to given checkpoint
In a pipeline of iterators, this function __recursively__ calls itself on the preceeding iterator.
Thereby, to set the state of an entire pipeline of iterators to a given checkpoint
you only have to call this function on the __last__ iterator in the pipeline.
Args:
checkpoint: Checkpoint that should be used to reset the state of the iterator (or pipeline).
If this is __None__, the state of the iterator (or pipeline) is reset to the initial
state immediately after construction.
"""
pass
def __getstate__(self) -> Dict: # implementation of pickle Protocol
return self.getstate()
def __setstate__(self, checkpoint: Optional[Dict]):
self.setstate(checkpoint)
def __next__(self):
pass
def close(self):
"""
Close all PrefetchIterators in this pipeline
PrefetchIterators have internal resources that need to be properly managed by calling close() manually.
Failure to do so can lead to dangling processes and threads, or the PrefetchIterator hanging on finalization.
Note that it is not correct to rely on the garbage collector to destroy PrefetchIterators
as CPython does not assure that the finalizer (__del__) of a PrefetchIterator will be called.
This function, which is implemented for every CheckpointableIterator, recursively traverses all preceeding
iterators and closes all PrefetchIterators in the pipeline.
For pipelines that do not contain PrefetchIterators this function has no effect.
"""
pass
class SelectManyIterator(CheckpointableIterator):
"""
Projects each element of a source sequence to a sequence and flattens the resulting sequences into one sequence.
"""
def __init__(self, source_iterator: CheckpointableIterator, collection_selector: Optional[Callable[[Any], Iterator]]=None):
"""
Args:
source_iterator: iterator over the items to pass to collection_selector()
collection_selector: user callback that maps an item into an Iterable, whose items will be yielded.
The returned Iterator is used only once. Hence, it is also allowed to
return self-iterables, such as iterators and generator expressions.
If None is given, no callback is applied.
"""
if not isinstance(source_iterator, CheckpointableIterator):
raise ValueError('source_iterator has to be a CheckpointableIterator')
self._source_iterator = source_iterator # type: CheckpointableIterator
self._collection_selector = collection_selector # type: Optional[Callable[[Any], Iterator]]
self.setstate(None)
def getstate(self) -> Dict:
return {'source_state': self._source_state,
'flattened_items_yielded': self._flattened_items_yielded}
def setstate(self, checkpoint: Optional[Dict]):
self._source_state = checkpoint['source_state'] if checkpoint else None
self._flattened_items_yielded = checkpoint['flattened_items_yielded'] if checkpoint else 0
self._source_iterator.setstate(self._source_state)
def _generate():
skip_to_checkpoint = self._flattened_items_yielded
# main loop over source source_items
for source_item in self._source_iterator:
if self._collection_selector is not None:
data = iter(self._collection_selector(source_item))
else:
data = iter(source_item)
self._flattened_items_yielded = 0
if skip_to_checkpoint:
#print("Skipping to index", skip_to_checkpoint, file=sys.stderr)
self._flattened_items_yielded += _advance_iterator(data, skip_to_checkpoint)
skip_to_checkpoint = 0
# main loop over lines
for item in data:
self._flattened_items_yielded += 1
yield item
self._source_state = self._source_iterator.getstate()
self._iterator = _generate()
def __next__(self):
return next(self._iterator)
def close(self):
self._source_iterator.close()
class MapIterator(CheckpointableIterator):
"""
Applies given tranform to each data item
"""
def __init__(self, source_iterator: CheckpointableIterator, transform: Callable[[str],Any]):
"""
Args:
source_iterator: checkpointable iterator
transform: function to be applied to each data item
"""
if not isinstance(source_iterator, CheckpointableIterator):
raise ValueError('source_iterator has to be a CheckpointableIterator')
self._source_iterator = source_iterator
self._transform = transform
def getstate(self) -> Dict:
return self._source_iterator.getstate()
def setstate(self, checkpoint: Optional[Dict]):
self._source_iterator.setstate(checkpoint)
def __next__(self):
return self._transform(next(self._source_iterator))
def close(self):
self._source_iterator.close()
class FixedBatchIterator(CheckpointableIterator):
"""
Batches N consecutive items into a single item that is a list of these items.
E.g. [1, 2, 3 4, 5, 6, 7, 8] with batch_size = 3 will yield
[[1, 2, 3], [4, 5, 6], [7, 8]]
"""
def __init__(self, source_iterator: CheckpointableIterator, batch_size: int):
"""
Args:
source_iterator: checkpointable input iterators
batch_size: number of items per batch
"""
if not isinstance(source_iterator, CheckpointableIterator):
raise ValueError('source_iterator has to be a CheckpointableIterator')
if batch_size <= 0:
raise ValueError('batch_size has to be positive')
self._source_iterator = source_iterator # type: CheckpointableIterator
self._batch_size = batch_size # type: int
self.setstate(None)
def getstate(self) -> Dict:
return {'source_state': self._source_iterator.getstate()} # state for first item in next batch
def setstate(self, checkpoint: Optional[Dict]):
self._source_state = checkpoint['source_state'] if checkpoint else None
self._source_iterator.setstate(self._source_state)
self._iterator = self._generate()
def _generate(self) -> Iterator:
while True:
batch = list(islice(self._source_iterator, self._batch_size))
if not batch:
break
yield batch
def __next__(self):
return next(self._iterator)
def close(self):
self._source_iterator.close()
The provided code snippet includes necessary dependencies for implementing the `ParallelMapIterator` function. Write a Python function `def ParallelMapIterator(source_iterator: CheckpointableIterator, transform: Callable[[str],Any], num_processes: int, num_items_per_process: int) -> CheckpointableIterator` to solve the following problem:
Applies given transform to each data item Behaves the same as MapIterator, but applies transform in parallel using multiple processes in a parallel map operation. Warning: The transform function has to be pickleable because it is sent across process boundaries. To achieve this, transform should be a top-level function. Args: source_iterator: checkpointable iterator transform: function to be applied to each data item, has to be pickleable, see above num_processes: number of processes to use for parallel map num_items_per_process: number of data items each process operates on
Here is the function:
def ParallelMapIterator(source_iterator: CheckpointableIterator, transform: Callable[[str],Any], num_processes: int, num_items_per_process: int) -> CheckpointableIterator:
"""
Applies given transform to each data item
Behaves the same as MapIterator, but applies transform in parallel using multiple processes in a parallel map operation.
Warning:
The transform function has to be pickleable because it is sent across process boundaries.
To achieve this, transform should be a top-level function.
Args:
source_iterator: checkpointable iterator
transform: function to be applied to each data item, has to be pickleable, see above
num_processes: number of processes to use for parallel map
num_items_per_process: number of data items each process operates on
"""
# divide stream of data items into batches
batched_samples = FixedBatchIterator(source_iterator, num_processes * num_items_per_process)
# create process pool and capture it in closure that performs parallel map
p = multiprocessing.Pool(num_processes)
def parallel_map_transform(buffer):
return p.map(transform, buffer)
# apply transform in parallel to data items in a batch
batched_transformed_samples = MapIterator(batched_samples, parallel_map_transform)
# unpack batches to go back to stream of (now transformed) data items
transformed_samples = SelectManyIterator(batched_transformed_samples)
return transformed_samples | Applies given transform to each data item Behaves the same as MapIterator, but applies transform in parallel using multiple processes in a parallel map operation. Warning: The transform function has to be pickleable because it is sent across process boundaries. To achieve this, transform should be a top-level function. Args: source_iterator: checkpointable iterator transform: function to be applied to each data item, has to be pickleable, see above num_processes: number of processes to use for parallel map num_items_per_process: number of data items each process operates on |
181,026 | import json
import os
import random
import numpy as np
import torch
from PIL import Image
from accelerate import Accelerator
from omegaconf import OmegaConf
from torch.nn.utils.rnn import pad_sequence
from torchmetrics.image.fid import FrechetInceptionDistance
from torchvision.transforms import functional as F
from tqdm import tqdm
from app_model import AppModel
from app_utils import randomize_seed_fn
from fairseq import options
from fairseq.dataclass.utils import convert_namespace_to_omegaconf
def collate_fn(batch):
src_tokens = [x[0] for x in batch]
img_gpt_input_mask = [x[1] for x in batch]
negative_tokens = batch[0][2].unsqueeze(0)
src_tokens = pad_sequence(src_tokens, batch_first=True, padding_value=1)
img_gpt_input_mask = pad_sequence(img_gpt_input_mask, batch_first=True, padding_value=0)
return src_tokens, img_gpt_input_mask, negative_tokens | null |
181,027 | import requests
import os
import multiprocessing as mp
from io import BytesIO
import numpy as np
import PIL
from PIL import Image
import pickle
import sys
The provided code snippet includes necessary dependencies for implementing the `grab` function. Write a Python function `def grab(line)` to solve the following problem:
Download a single image from the TSV.
Here is the function:
def grab(line):
"""
Download a single image from the TSV.
"""
uid, split, line = line
try:
caption, url = line.split("\t")[:2]
except:
print("Parse error")
return
if os.path.exists(ROOT+"/%s/%d/%d.jpg"%(split,uid%1000,uid)):
print("Finished", uid)
return uid, caption, url
# Let's not crash if anythign weird happens
try:
dat = requests.get(url, timeout=20)
if dat.status_code != 200:
print("404 file", url)
return
# Try to parse this as an Image file, we'll fail out if not
im = Image.open(BytesIO(dat.content))
im.thumbnail((512, 512), PIL.Image.BICUBIC)
if min(*im.size) < max(*im.size)/3:
print("Too small", url)
return
im.save(ROOT+"/%s/%d/%d.jpg"%(split,uid%1000,uid))
# Another try/catch just because sometimes saving and re-loading
# the image is different than loading it once.
try:
o = Image.open(ROOT+"/%s/%d/%d.jpg"%(split,uid%1000,uid))
o = np.array(o)
print("Success", o.shape, uid, url)
return uid, caption, url
except:
print("Failed", uid, url)
except Exception as e:
print("Unknown error", e)
pass | Download a single image from the TSV. |
181,033 | import gzip
import html
import os
from functools import lru_cache
from typing import Union, List
import ftfy
import regex as re
import torch
_tokenizer = SimpleTokenizer()
The provided code snippet includes necessary dependencies for implementing the `tokenize` function. Write a Python function `def tokenize(texts: Union[str, List[str]], context_length: int = 77) -> torch.LongTensor` to solve the following problem:
Returns the tokenized representation of given input string(s) Parameters ---------- texts : Union[str, List[str]] An input string or a list of input strings to tokenize context_length : int The context length to use; all CLIP models use 77 as the context length Returns ------- A two-dimensional tensor containing the resulting tokens, shape = [number of input strings, context_length]
Here is the function:
def tokenize(texts: Union[str, List[str]], context_length: int = 77) -> torch.LongTensor:
"""
Returns the tokenized representation of given input string(s)
Parameters
----------
texts : Union[str, List[str]]
An input string or a list of input strings to tokenize
context_length : int
The context length to use; all CLIP models use 77 as the context length
Returns
-------
A two-dimensional tensor containing the resulting tokens, shape = [number of input strings, context_length]
"""
if isinstance(texts, str):
texts = [texts]
sot_token = _tokenizer.encoder["<start_of_text>"]
eot_token = _tokenizer.encoder["<end_of_text>"]
all_tokens = [[sot_token] + _tokenizer.encode(text) + [eot_token] for text in texts]
result = torch.zeros(len(all_tokens), context_length, dtype=torch.long)
for i, tokens in enumerate(all_tokens):
if len(tokens) > context_length:
tokens = tokens[:context_length] # Truncate
tokens[-1] = eot_token
result[i, :len(tokens)] = torch.tensor(tokens)
return result | Returns the tokenized representation of given input string(s) Parameters ---------- texts : Union[str, List[str]] An input string or a list of input strings to tokenize context_length : int The context length to use; all CLIP models use 77 as the context length Returns ------- A two-dimensional tensor containing the resulting tokens, shape = [number of input strings, context_length] |
181,034 | from itertools import repeat
import collections.abc
from torch import nn as nn
from torchvision.ops.misc import FrozenBatchNorm2d
The provided code snippet includes necessary dependencies for implementing the `freeze_batch_norm_2d` function. Write a Python function `def freeze_batch_norm_2d(module, module_match={}, name='')` to solve the following problem:
Converts all `BatchNorm2d` and `SyncBatchNorm` layers of provided module into `FrozenBatchNorm2d`. If `module` is itself an instance of either `BatchNorm2d` or `SyncBatchNorm`, it is converted into `FrozenBatchNorm2d` and returned. Otherwise, the module is walked recursively and submodules are converted in place. Args: module (torch.nn.Module): Any PyTorch module. module_match (dict): Dictionary of full module names to freeze (all if empty) name (str): Full module name (prefix) Returns: torch.nn.Module: Resulting module Inspired by https://github.com/pytorch/pytorch/blob/a5895f85be0f10212791145bfedc0261d364f103/torch/nn/modules/batchnorm.py#L762
Here is the function:
def freeze_batch_norm_2d(module, module_match={}, name=''):
"""
Converts all `BatchNorm2d` and `SyncBatchNorm` layers of provided module into `FrozenBatchNorm2d`. If `module` is
itself an instance of either `BatchNorm2d` or `SyncBatchNorm`, it is converted into `FrozenBatchNorm2d` and
returned. Otherwise, the module is walked recursively and submodules are converted in place.
Args:
module (torch.nn.Module): Any PyTorch module.
module_match (dict): Dictionary of full module names to freeze (all if empty)
name (str): Full module name (prefix)
Returns:
torch.nn.Module: Resulting module
Inspired by https://github.com/pytorch/pytorch/blob/a5895f85be0f10212791145bfedc0261d364f103/torch/nn/modules/batchnorm.py#L762
"""
res = module
is_match = True
if module_match:
is_match = name in module_match
if is_match and isinstance(module, (nn.modules.batchnorm.BatchNorm2d, nn.modules.batchnorm.SyncBatchNorm)):
res = FrozenBatchNorm2d(module.num_features)
res.num_features = module.num_features
res.affine = module.affine
if module.affine:
res.weight.data = module.weight.data.clone().detach()
res.bias.data = module.bias.data.clone().detach()
res.running_mean.data = module.running_mean.data
res.running_var.data = module.running_var.data
res.eps = module.eps
else:
for child_name, child in module.named_children():
full_child_name = '.'.join([name, child_name]) if name else child_name
new_child = freeze_batch_norm_2d(child, module_match, full_child_name)
if new_child is not child:
res.add_module(child_name, new_child)
return res | Converts all `BatchNorm2d` and `SyncBatchNorm` layers of provided module into `FrozenBatchNorm2d`. If `module` is itself an instance of either `BatchNorm2d` or `SyncBatchNorm`, it is converted into `FrozenBatchNorm2d` and returned. Otherwise, the module is walked recursively and submodules are converted in place. Args: module (torch.nn.Module): Any PyTorch module. module_match (dict): Dictionary of full module names to freeze (all if empty) name (str): Full module name (prefix) Returns: torch.nn.Module: Resulting module Inspired by https://github.com/pytorch/pytorch/blob/a5895f85be0f10212791145bfedc0261d364f103/torch/nn/modules/batchnorm.py#L762 |
181,035 | from itertools import repeat
import collections.abc
from torch import nn as nn
from torchvision.ops.misc import FrozenBatchNorm2d
def _ntuple(n):
def parse(x):
if isinstance(x, collections.abc.Iterable):
return x
return tuple(repeat(x, n))
return parse | null |
181,036 | import hashlib
import os
import urllib
import warnings
from tqdm import tqdm
_PRETRAINED = {
"RN50": _RN50,
"RN50-quickgelu": _RN50_quickgelu,
"RN101": _RN101,
"RN101-quickgelu": _RN101_quickgelu,
"RN50x4": _RN50x4,
"RN50x16": _RN50x16,
"RN50x64": _RN50x64,
"ViT-B-32": _VITB32,
"ViT-B-32-quickgelu": _VITB32_quickgelu,
"ViT-B-16": _VITB16,
"ViT-B-16-plus-240": _VITB16_PLUS_240,
"ViT-L-14": _VITL14,
"ViT-L-14-336": _VITL14_336,
}
The provided code snippet includes necessary dependencies for implementing the `list_pretrained` function. Write a Python function `def list_pretrained(as_str: bool = False)` to solve the following problem:
returns list of pretrained models Returns a tuple (model_name, pretrain_tag) by default or 'name:tag' if as_str == True
Here is the function:
def list_pretrained(as_str: bool = False):
""" returns list of pretrained models
Returns a tuple (model_name, pretrain_tag) by default or 'name:tag' if as_str == True
"""
return [':'.join([k, t]) if as_str else (k, t) for k in _PRETRAINED.keys() for t in _PRETRAINED[k].keys()] | returns list of pretrained models Returns a tuple (model_name, pretrain_tag) by default or 'name:tag' if as_str == True |
181,037 | import hashlib
import os
import urllib
import warnings
from tqdm import tqdm
_PRETRAINED = {
"RN50": _RN50,
"RN50-quickgelu": _RN50_quickgelu,
"RN101": _RN101,
"RN101-quickgelu": _RN101_quickgelu,
"RN50x4": _RN50x4,
"RN50x16": _RN50x16,
"RN50x64": _RN50x64,
"ViT-B-32": _VITB32,
"ViT-B-32-quickgelu": _VITB32_quickgelu,
"ViT-B-16": _VITB16,
"ViT-B-16-plus-240": _VITB16_PLUS_240,
"ViT-L-14": _VITL14,
"ViT-L-14-336": _VITL14_336,
}
The provided code snippet includes necessary dependencies for implementing the `list_pretrained_model_tags` function. Write a Python function `def list_pretrained_model_tags(model: str)` to solve the following problem:
return all pretrain tags for the specified model architecture
Here is the function:
def list_pretrained_model_tags(model: str):
""" return all pretrain tags for the specified model architecture """
tags = []
if model in _PRETRAINED:
tags.extend(_PRETRAINED[model].keys())
return tags | return all pretrain tags for the specified model architecture |
181,038 | from collections import OrderedDict
from dataclasses import dataclass
import logging
import math
from typing import Tuple, Union, Callable, Optional
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
from torch.utils.checkpoint import checkpoint
from .timm_model import TimmModel
from .utils import freeze_batch_norm_2d, to_2tuple
from argparse import Namespace
from torchscale.component.multihead_attention import MultiheadAttention
def trace_model(model, batch_size=256, device=torch.device('cpu')):
model.eval()
image_size = model.visual.image_size
example_images = torch.ones((batch_size, 3, image_size, image_size), device=device)
example_text = torch.zeros((batch_size, model.context_length), dtype=torch.int, device=device)
model = torch.jit.trace_module(
model,
inputs=dict(
forward=(example_images, example_text),
encode_text=(example_text,),
encode_image=(example_images,)
))
model.visual.image_size = image_size
return model | null |
181,039 | import torch
import torch.nn as nn
from torch.nn import functional as F
try:
import torch.distributed.nn
from torch import distributed as dist
has_distributed = True
except ImportError:
has_distributed = False
def gather_features(
image_features,
text_features,
local_loss=False,
gather_with_grad=False,
rank=0,
world_size=1,
use_horovod=False
):
assert has_distributed, 'torch.distributed did not import correctly, please use a PyTorch version with support.'
if use_horovod:
assert hvd is not None, 'Please install horovod'
if gather_with_grad:
all_image_features = hvd.allgather(image_features)
all_text_features = hvd.allgather(text_features)
else:
with torch.no_grad():
all_image_features = hvd.allgather(image_features)
all_text_features = hvd.allgather(text_features)
if not local_loss:
# ensure grads for local rank when all_* features don't have a gradient
gathered_image_features = list(all_image_features.chunk(world_size, dim=0))
gathered_text_features = list(all_text_features.chunk(world_size, dim=0))
gathered_image_features[rank] = image_features
gathered_text_features[rank] = text_features
all_image_features = torch.cat(gathered_image_features, dim=0)
all_text_features = torch.cat(gathered_text_features, dim=0)
else:
# We gather tensors from all gpus
if gather_with_grad:
all_image_features = torch.cat(torch.distributed.nn.all_gather(image_features), dim=0)
all_text_features = torch.cat(torch.distributed.nn.all_gather(text_features), dim=0)
else:
gathered_image_features = [torch.zeros_like(image_features) for _ in range(world_size)]
gathered_text_features = [torch.zeros_like(text_features) for _ in range(world_size)]
dist.all_gather(gathered_image_features, image_features)
dist.all_gather(gathered_text_features, text_features)
if not local_loss:
# ensure grads for local rank when all_* features don't have a gradient
gathered_image_features[rank] = image_features
gathered_text_features[rank] = text_features
all_image_features = torch.cat(gathered_image_features, dim=0)
all_text_features = torch.cat(gathered_text_features, dim=0)
return all_image_features, all_text_features | null |
181,040 | import json
import logging
import os
import pathlib
import re
from copy import deepcopy
from pathlib import Path
from typing import Optional, Tuple
import torch
from .model import CLIP, convert_weights_to_fp16, resize_pos_embed
from .openai import load_openai_model
from .pretrained import get_pretrained_url, download_pretrained
from .transform import image_transform
def create_model(
model_name: str,
pretrained: str = '',
precision: str = 'fp32',
device: torch.device = torch.device('cpu'),
jit: bool = False,
force_quick_gelu: bool = False,
pretrained_image: bool = False,
):
model_name = model_name.replace('/', '-') # for callers using old naming with / in ViT names
if pretrained.lower() == 'openai':
logging.info(f'Loading pretrained {model_name} from OpenAI.')
model = load_openai_model(model_name, device=device, jit=jit)
# See https://discuss.pytorch.org/t/valueerror-attemting-to-unscale-fp16-gradients/81372
if precision == "amp" or precision == "fp32":
model = model.float()
else:
if model_name in _MODEL_CONFIGS:
logging.info(f'Loading {model_name} model config.')
model_cfg = deepcopy(_MODEL_CONFIGS[model_name])
else:
logging.error(f'Model config for {model_name} not found; available models {list_models()}.')
raise RuntimeError(f'Model config for {model_name} not found.')
if force_quick_gelu:
# override for use of QuickGELU on non-OpenAI transformer models
model_cfg["quick_gelu"] = True
if pretrained_image:
if 'timm_model_name' in model_cfg.get('vision_cfg', {}):
# pretrained weight loading for timm models set via vision_cfg
model_cfg['vision_cfg']['timm_model_pretrained'] = True
else:
assert False, 'pretrained image towers currently only supported for timm models'
model = CLIP(**model_cfg)
if pretrained:
checkpoint_path = ''
url = get_pretrained_url(model_name, pretrained)
if url:
checkpoint_path = download_pretrained(url)
elif os.path.exists(pretrained):
checkpoint_path = pretrained
if checkpoint_path:
logging.info(f'Loading pretrained {model_name} weights ({pretrained}).')
load_checkpoint(model, checkpoint_path)
else:
logging.warning(f'Pretrained weights ({pretrained}) not found for model {model_name}.')
raise RuntimeError(f'Pretrained weights ({pretrained}) not found for model {model_name}.')
model.to(device=device)
if precision == "fp16":
assert device.type != 'cpu'
convert_weights_to_fp16(model)
if jit:
model = torch.jit.script(model)
return model
def image_transform(
image_size: int,
is_train: bool,
mean: Optional[Tuple[float, ...]] = None,
std: Optional[Tuple[float, ...]] = None,
resize_longest_max: bool = False,
fill_color: int = 0,
):
mean = mean or (0.48145466, 0.4578275, 0.40821073) # OpenAI dataset mean
std = std or (0.26862954, 0.26130258, 0.27577711) # OpenAI dataset std
if isinstance(image_size, (list, tuple)) and image_size[0] == image_size[1]:
# for square size, pass size as int so that Resize() uses aspect preserving shortest edge
image_size = image_size[0]
normalize = Normalize(mean=mean, std=std)
if is_train:
return Compose([
RandomResizedCrop(image_size, scale=(0.9, 1.0), interpolation=InterpolationMode.BICUBIC),
_convert_to_rgb,
ToTensor(),
normalize,
])
else:
if resize_longest_max:
transforms = [
ResizeMaxSize(image_size, fill=fill_color)
]
else:
transforms = [
Resize(image_size, interpolation=InterpolationMode.BICUBIC),
CenterCrop(image_size),
]
transforms.extend([
_convert_to_rgb,
ToTensor(),
normalize,
])
return Compose(transforms)
def create_model_and_transforms(
model_name: str,
pretrained: str = '',
precision: str = 'fp32',
device: torch.device = torch.device('cpu'),
jit: bool = False,
force_quick_gelu: bool = False,
pretrained_image: bool = False,
mean: Optional[Tuple[float, ...]] = None,
std: Optional[Tuple[float, ...]] = None,
):
model = create_model(
model_name, pretrained, precision, device, jit,
force_quick_gelu=force_quick_gelu,
pretrained_image=pretrained_image)
preprocess_train = image_transform(model.visual.image_size, is_train=True, mean=mean, std=std)
preprocess_val = image_transform(model.visual.image_size, is_train=False, mean=mean, std=std)
return model, preprocess_train, preprocess_val | null |
181,041 | import json
import logging
import os
import pathlib
import re
from copy import deepcopy
from pathlib import Path
from typing import Optional, Tuple
import torch
from .model import CLIP, convert_weights_to_fp16, resize_pos_embed
from .openai import load_openai_model
from .pretrained import get_pretrained_url, download_pretrained
from .transform import image_transform
_MODEL_CONFIG_PATHS = [Path(__file__).parent / f"model_configs/"]
def _rescan_model_configs():
global _MODEL_CONFIGS
config_ext = ('.json',)
config_files = []
for config_path in _MODEL_CONFIG_PATHS:
if config_path.is_file() and config_path.suffix in config_ext:
config_files.append(config_path)
elif config_path.is_dir():
for ext in config_ext:
config_files.extend(config_path.glob(f'*{ext}'))
for cf in config_files:
with open(cf, 'r') as f:
model_cfg = json.load(f)
if all(a in model_cfg for a in ('embed_dim', 'vision_cfg', 'text_cfg')):
_MODEL_CONFIGS[cf.stem] = model_cfg
_MODEL_CONFIGS = {k: v for k, v in sorted(_MODEL_CONFIGS.items(), key=lambda x: _natural_key(x[0]))}
_rescan_model_configs()
The provided code snippet includes necessary dependencies for implementing the `add_model_config` function. Write a Python function `def add_model_config(path)` to solve the following problem:
add model config path or file and update registry
Here is the function:
def add_model_config(path):
""" add model config path or file and update registry """
if not isinstance(path, Path):
path = Path(path)
_MODEL_CONFIG_PATHS.append(path)
_rescan_model_configs() | add model config path or file and update registry |
181,042 | import json
import logging
import math
import os
import time
from contextlib import suppress
import numpy as np
import torch
import torch.nn.functional as F
try:
import wandb
except ImportError:
wandb = None
from open_clip import ClipLoss
from .distributed import is_master
from .zero_shot import zero_shot_eval
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def unwrap_model(model):
if hasattr(model, 'module'):
return model.module
else:
return model
def is_master(args, local=False):
return is_local_master(args) if local else is_global_master(args)
def train_one_epoch(model, data, epoch, optimizer, scaler, scheduler, args, tb_writer=None):
device = torch.device(args.device)
autocast = torch.cuda.amp.autocast if args.precision == 'amp' else suppress
model.train()
loss = ClipLoss(
local_loss=args.local_loss,
gather_with_grad=args.gather_with_grad,
cache_labels=True,
rank=args.rank,
world_size=args.world_size,
use_horovod=args.horovod)
data['train'].set_epoch(epoch) # set epoch in process safe manner via sampler or shared_epoch
dataloader = data['train'].dataloader
num_batches_per_epoch = dataloader.num_batches
sample_digits = math.ceil(math.log(dataloader.num_samples + 1, 10))
loss_m = AverageMeter()
batch_time_m = AverageMeter()
data_time_m = AverageMeter()
end = time.time()
for i, batch in enumerate(dataloader):
step = num_batches_per_epoch * epoch + i
scheduler(step)
images, texts = batch
images = images.to(device=device, non_blocking=True)
texts = texts.to(device=device, non_blocking=True)
data_time_m.update(time.time() - end)
optimizer.zero_grad()
with autocast():
image_features, text_features, logit_scale = model(images, texts)
total_loss = loss(image_features, text_features, logit_scale)
if scaler is not None:
scaler.scale(total_loss).backward()
if args.horovod:
optimizer.synchronize()
scaler.unscale_(optimizer)
with optimizer.skip_synchronize():
scaler.step(optimizer)
else:
scaler.step(optimizer)
scaler.update()
else:
total_loss.backward()
optimizer.step()
# Note: we clamp to 4.6052 = ln(100), as in the original paper.
with torch.no_grad():
unwrap_model(model).logit_scale.clamp_(0, math.log(100))
batch_time_m.update(time.time() - end)
end = time.time()
batch_count = i + 1
if is_master(args) and (i % 100 == 0 or batch_count == num_batches_per_epoch):
batch_size = len(images)
num_samples = batch_count * batch_size * args.world_size
samples_per_epoch = dataloader.num_samples
percent_complete = 100.0 * batch_count / num_batches_per_epoch
# NOTE loss is coarsely sampled, just master node and per log update
loss_m.update(total_loss.item(), batch_size)
logit_scale_scalar = logit_scale.item()
logging.info(
f"Train Epoch: {epoch} [{num_samples:>{sample_digits}}/{samples_per_epoch} ({percent_complete:.0f}%)] "
f"Loss: {loss_m.val:#.5g} ({loss_m.avg:#.4g}) "
f"Data (t): {data_time_m.avg:.3f} "
f"Batch (t): {batch_time_m.avg:.3f}, {args.batch_size*args.world_size / batch_time_m.val:#g}/s "
f"LR: {optimizer.param_groups[0]['lr']:5f} "
f"Logit Scale: {logit_scale_scalar:.3f}"
)
# Save train loss / etc. Using non avg meter values as loggers have their own smoothing
log_data = {
"loss": loss_m.val,
"data_time": data_time_m.val,
"batch_time": batch_time_m.val,
"samples_per_scond": args.batch_size*args.world_size / batch_time_m.val,
"scale": logit_scale_scalar,
"lr": optimizer.param_groups[0]["lr"]
}
for name, val in log_data.items():
name = "train/" + name
if tb_writer is not None:
tb_writer.add_scalar(name, val, step)
if args.wandb:
assert wandb is not None, 'Please install wandb.'
wandb.log({name: val, 'step': step})
# resetting batch / data time meters per log window
batch_time_m.reset()
data_time_m.reset()
# end for | null |
181,043 | import json
import logging
import math
import os
import time
from contextlib import suppress
import numpy as np
import torch
import torch.nn.functional as F
try:
import wandb
except ImportError:
wandb = None
from open_clip import ClipLoss
from .distributed import is_master
from .zero_shot import zero_shot_eval
def get_metrics(image_features, text_features, logit_scale):
metrics = {}
logits_per_image = (logit_scale * image_features @ text_features.t()).detach().cpu()
logits_per_text = logits_per_image.t().detach().cpu()
logits = {"image_to_text": logits_per_image, "text_to_image": logits_per_text}
ground_truth = torch.arange(len(text_features)).view(-1, 1)
for name, logit in logits.items():
ranking = torch.argsort(logit, descending=True)
preds = torch.where(ranking == ground_truth)[1]
preds = preds.detach().cpu().numpy()
metrics[f"{name}_mean_rank"] = preds.mean() + 1
metrics[f"{name}_median_rank"] = np.floor(np.median(preds)) + 1
for k in [1, 5, 10]:
metrics[f"{name}_R@{k}"] = np.mean(preds < k)
return metrics
def is_master(args, local=False):
return is_local_master(args) if local else is_global_master(args)
def zero_shot_eval(model, data, epoch, args):
if 'imagenet-val' not in data and 'imagenet-v2' not in data:
return {}
if args.zeroshot_frequency == 0:
return {}
if (epoch % args.zeroshot_frequency) != 0 and epoch != args.epochs:
return {}
logging.info('Starting zero-shot imagenet.')
logging.info('Building zero-shot classifier')
classifier = zero_shot_classifier(model, imagenet_classnames, openai_imagenet_template, args)
logging.info('Using classifier')
results = {}
if 'imagenet-val' in data:
top1, top5 = run(model, classifier, data['imagenet-val'].dataloader, args)
results['imagenet-zeroshot-val-top1'] = top1
results['imagenet-zeroshot-val-top5'] = top5
if 'imagenet-v2' in data:
top1, top5 = run(model, classifier, data['imagenet-v2'].dataloader, args)
results['imagenetv2-zeroshot-val-top1'] = top1
results['imagenetv2-zeroshot-val-top5'] = top5
logging.info('Finished zero-shot imagenet.')
return results
def evaluate(model, data, epoch, args, tb_writer=None):
metrics = {}
if not is_master(args):
return metrics
device = torch.device(args.device)
model.eval()
zero_shot_metrics = zero_shot_eval(model, data, epoch, args)
metrics.update(zero_shot_metrics)
autocast = torch.cuda.amp.autocast if args.precision == 'amp' else suppress
if 'val' in data and (args.val_frequency and ((epoch % args.val_frequency) == 0 or epoch == args.epochs)):
dataloader = data['val'].dataloader
num_samples = 0
samples_per_val = dataloader.num_samples
# FIXME this does not scale past small eval datasets
# all_image_features @ all_text_features will blow up memory and compute very quickly
cumulative_loss = 0.0
all_image_features, all_text_features = [], []
with torch.no_grad():
for i, batch in enumerate(dataloader):
images, texts = batch
images = images.to(device=device, non_blocking=True)
texts = texts.to(device=device, non_blocking=True)
with autocast():
image_features, text_features, logit_scale = model(images, texts)
# features are accumulated in CPU tensors, otherwise GPU memory exhausted quickly
# however, system RAM is easily exceeded and compute time becomes problematic
all_image_features.append(image_features.cpu())
all_text_features.append(text_features.cpu())
logit_scale = logit_scale.mean()
logits_per_image = logit_scale * image_features @ text_features.t()
logits_per_text = logits_per_image.t()
batch_size = images.shape[0]
labels = torch.arange(batch_size, device=device).long()
total_loss = (
F.cross_entropy(logits_per_image, labels) +
F.cross_entropy(logits_per_text, labels)
) / 2
cumulative_loss += total_loss * batch_size
num_samples += batch_size
if is_master(args) and (i % 100) == 0:
logging.info(
f"Eval Epoch: {epoch} [{num_samples} / {samples_per_val}]\t"
f"Loss: {cumulative_loss / num_samples:.6f}\t")
val_metrics = get_metrics(
image_features=torch.cat(all_image_features),
text_features=torch.cat(all_text_features),
logit_scale=logit_scale.cpu(),
)
loss = cumulative_loss / num_samples
metrics.update(
{**val_metrics, "val_loss": loss.item(), "epoch": epoch, "num_samples": num_samples}
)
if not metrics:
return metrics
logging.info(
f"Eval Epoch: {epoch} "
+ "\t".join([f"{k}: {round(v, 4):.4f}" for k, v in metrics.items()])
)
if args.save_logs:
for name, val in metrics.items():
if tb_writer is not None:
tb_writer.add_scalar(f"val/{name}", val, epoch)
with open(os.path.join(args.checkpoint_path, "results.jsonl"), "a+") as f:
f.write(json.dumps(metrics))
f.write("\n")
if args.wandb:
assert wandb is not None, 'Please install wandb.'
for name, val in metrics.items():
wandb.log({f"val/{name}": val, 'epoch': epoch})
return metrics | null |
181,044 | import argparse
def get_default_params(model_name):
# Params from paper (https://arxiv.org/pdf/2103.00020.pdf)
model_name = model_name.lower()
if "vit" in model_name:
return {"lr": 5.0e-4, "beta1": 0.9, "beta2": 0.98, "eps": 1.0e-6}
else:
return {"lr": 5.0e-4, "beta1": 0.9, "beta2": 0.999, "eps": 1.0e-8}
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--train-data",
type=str,
default=None,
help="Path to csv filewith training data",
)
parser.add_argument(
"--val-data",
type=str,
default=None,
help="Path to csv file with validation data",
)
parser.add_argument(
"--train-num-samples",
type=int,
default=None,
help="Number of samples in dataset. Required for webdataset if not available in info file.",
)
parser.add_argument(
"--val-num-samples",
type=int,
default=None,
help="Number of samples in dataset. Useful for webdataset if not available in info file.",
)
parser.add_argument(
"--dataset-type",
choices=["webdataset", "csv", "auto"],
default="auto",
help="Which type of dataset to process."
)
parser.add_argument(
"--dataset-resampled",
default=False,
action="store_true",
help="Whether to use sampling with replacement for webdataset shard selection."
)
parser.add_argument(
"--csv-separator",
type=str,
default="\t",
help="For csv-like datasets, which separator to use."
)
parser.add_argument(
"--csv-img-key",
type=str,
default="filepath",
help="For csv-like datasets, the name of the key for the image paths."
)
parser.add_argument(
"--csv-caption-key",
type=str,
default="title",
help="For csv-like datasets, the name of the key for the captions."
)
parser.add_argument(
"--imagenet-val",
type=str,
default=None,
help="Path to imagenet val set for conducting zero shot evaluation.",
)
parser.add_argument(
"--imagenet-v2",
type=str,
default=None,
help="Path to imagenet v2 for conducting zero shot evaluation.",
)
parser.add_argument(
"--logs",
type=str,
default="./logs/",
help="Where to store tensorboard logs. Use None to avoid storing logs.",
)
parser.add_argument(
"--log-local",
action="store_true",
default=False,
help="log files on local master, otherwise global master only.",
)
parser.add_argument(
"--name",
type=str,
default=None,
help="Optional identifier for the experiment when storing logs. Otherwise use current time.",
)
parser.add_argument(
"--workers", type=int, default=1, help="Number of dataloader workers per GPU."
)
parser.add_argument(
"--batch-size", type=int, default=64, help="Batch size per GPU."
)
parser.add_argument(
"--epochs", type=int, default=32, help="Number of epochs to train for."
)
parser.add_argument("--lr", type=float, default=None, help="Learning rate.")
parser.add_argument("--beta1", type=float, default=None, help="Adam beta 1.")
parser.add_argument("--beta2", type=float, default=None, help="Adam beta 2.")
parser.add_argument("--eps", type=float, default=None, help="Adam epsilon.")
parser.add_argument("--wd", type=float, default=0.2, help="Weight decay.")
parser.add_argument(
"--warmup", type=int, default=10000, help="Number of steps to warmup for."
)
parser.add_argument(
"--use-bn-sync",
default=False,
action="store_true",
help="Whether to use batch norm sync.")
parser.add_argument(
"--skip-scheduler",
action="store_true",
default=False,
help="Use this flag to skip the learning rate decay.",
)
parser.add_argument(
"--save-frequency", type=int, default=1, help="How often to save checkpoints."
)
parser.add_argument(
"--save-most-recent",
action="store_true",
default=False,
help="Always save the most recent model trained to epoch_latest.pt.",
)
parser.add_argument(
"--zeroshot-frequency", type=int, default=2, help="How often to run zero shot."
)
parser.add_argument(
"--val-frequency", type=int, default=1, help="How often to run evaluation with val data."
)
parser.add_argument(
"--resume",
default=None,
type=str,
help="path to latest checkpoint (default: none)",
)
parser.add_argument(
"--precision",
choices=["amp", "fp16", "fp32"],
default="amp",
help="Floating point precision."
)
parser.add_argument(
"--model",
type=str,
default="RN50",
help="Name of the vision backbone to use.",
)
parser.add_argument(
"--pretrained",
default='',
type=str,
help="Use a pretrained CLIP model weights with the specified tag or file path.",
)
parser.add_argument(
"--pretrained-image",
default=False,
action='store_true',
help="Load imagenet pretrained weights for image tower backbone if available.",
)
parser.add_argument(
"--lock-image",
default=False,
action='store_true',
help="Lock full image tower by disabling gradients.",
)
parser.add_argument(
"--lock-image-unlocked-groups",
type=int,
default=0,
help="Leave last n image tower layer groups unlocked.",
)
parser.add_argument(
"--lock-image-freeze-bn-stats",
default=False,
action='store_true',
help="Freeze BatchNorm running stats in image tower for any locked layers.",
)
parser.add_argument(
"--grad-checkpointing",
default=False,
action='store_true',
help="Enable gradient checkpointing.",
)
parser.add_argument(
"--local-loss",
default=False,
action="store_true",
help="calculate loss w/ local features @ global (instead of realizing full global @ global matrix)"
)
parser.add_argument(
"--gather-with-grad",
default=False,
action="store_true",
help="enable full distributed gradient for feature gather"
)
parser.add_argument(
"--force-quick-gelu",
default=False,
action='store_true',
help="Force use of QuickGELU activation for non-OpenAI transformer models.",
)
parser.add_argument(
"--torchscript",
default=False,
action='store_true',
help="torch.jit.script the model, also uses jit version of OpenAI models if pretrained=='openai'",
)
parser.add_argument(
"--trace",
default=False,
action='store_true',
help="torch.jit.trace the model for inference / eval only",
)
# arguments for distributed training
parser.add_argument(
"--dist-url",
default="env://",
type=str,
help="url used to set up distributed training",
)
parser.add_argument(
"--dist-backend", default="nccl", type=str, help="distributed backend"
)
parser.add_argument(
"--report-to",
default='',
type=str,
help="Options are ['wandb', 'tensorboard', 'wandb,tensorboard']"
)
parser.add_argument(
"--wandb-notes",
default='',
type=str,
help="Notes if logging with wandb"
)
parser.add_argument(
"--debug",
default=False,
action="store_true",
help="If true, more information is logged."
)
parser.add_argument(
"--copy-codebase",
default=False,
action="store_true",
help="If true, we copy the entire base on the log diretory, and execute from there."
)
parser.add_argument(
"--horovod",
default=False,
action="store_true",
help="Use horovod for distributed training."
)
parser.add_argument(
"--ddp-static-graph",
default=False,
action='store_true',
help="Enable static graph optimization for DDP in PyTorch >= 1.11.",
)
parser.add_argument(
"--no-set-device-rank",
default=False,
action="store_true",
help="Don't set device index from local rank (when CUDA_VISIBLE_DEVICES restricted to one per proc)."
)
parser.add_argument(
"--seed", type=int, default=0, help="Default random seed."
)
args = parser.parse_args()
# If some params are not passed, we use the default values based on model name.
default_params = get_default_params(args.model)
for name, val in default_params.items():
if getattr(args, name) is None:
setattr(args, name, val)
return args | null |
181,045 | import logging
import os
import random
from datetime import datetime
import numpy as np
import torch
from torch import optim
from torch.cuda.amp import GradScaler
try:
import torch.utils.tensorboard as tensorboard
except ImportError:
tensorboard = None
from open_clip import create_model_and_transforms, trace_model
from training.data import get_data
from training.distributed import is_master, init_distributed_device, world_info_from_env
from training.logger import setup_logging
from training.params import parse_args
from training.scheduler import cosine_lr
from training.train import train_one_epoch, evaluate
def random_seed(seed=42, rank=0):
torch.manual_seed(seed + rank)
np.random.seed(seed + rank)
random.seed(seed + rank) | null |
181,046 | import logging
import os
import random
from datetime import datetime
import numpy as np
import torch
from torch import optim
from torch.cuda.amp import GradScaler
from open_clip import create_model_and_transforms, trace_model
from training.data import get_data
from training.distributed import is_master, init_distributed_device, world_info_from_env
from training.logger import setup_logging
from training.params import parse_args
from training.scheduler import cosine_lr
from training.train import train_one_epoch, evaluate
def copy_codebase(args):
from shutil import copytree, ignore_patterns
new_code_path = os.path.join(args.logs, args.name, "code")
if os.path.exists(new_code_path):
print(
f"Error. Experiment already exists at {new_code_path}. Use --name to specify a new experiment."
)
return -1
print(f"Copying codebase to {new_code_path}")
current_code_path = os.path.realpath(__file__)
for _ in range(3):
current_code_path = os.path.dirname(current_code_path)
copytree(current_code_path, new_code_path, ignore=ignore_patterns('log', 'logs', 'wandb'))
print("Done copying code.")
return 1 | null |
181,047 | import ast
import json
import logging
import math
import os
import random
import sys
import time
from dataclasses import dataclass
from multiprocessing import Value
import braceexpand
import numpy as np
import pandas as pd
import torch
import torchvision.datasets as datasets
import webdataset as wds
from PIL import Image
from torch.utils.data import Dataset, DataLoader, SubsetRandomSampler, IterableDataset, get_worker_info
from torch.utils.data.distributed import DistributedSampler
from webdataset.filters import _shuffle
from webdataset.tariterators import base_plus_ext, url_opener, tar_file_expander, valid_sample
from open_clip import tokenize
def count_samples(dataloader):
os.environ["WDS_EPOCH"] = "0"
n_elements, n_batches = 0, 0
for images, texts in dataloader:
n_batches += 1
n_elements += len(images)
assert len(images) == len(texts)
return n_elements, n_batches | null |
181,048 | import ast
import json
import logging
import math
import os
import random
import sys
import time
from dataclasses import dataclass
from multiprocessing import Value
import braceexpand
import numpy as np
import pandas as pd
import torch
import torchvision.datasets as datasets
import webdataset as wds
from PIL import Image
from torch.utils.data import Dataset, DataLoader, SubsetRandomSampler, IterableDataset, get_worker_info
from torch.utils.data.distributed import DistributedSampler
from webdataset.filters import _shuffle
from webdataset.tariterators import base_plus_ext, url_opener, tar_file_expander, valid_sample
from open_clip import tokenize
The provided code snippet includes necessary dependencies for implementing the `pytorch_worker_seed` function. Write a Python function `def pytorch_worker_seed()` to solve the following problem:
get dataloader worker seed from pytorch
Here is the function:
def pytorch_worker_seed():
"""get dataloader worker seed from pytorch"""
worker_info = get_worker_info()
if worker_info is not None:
# favour the seed already created for pytorch dataloader workers if it exists
return worker_info.seed
# fallback to wds rank based seed
return wds.utils.pytorch_worker_seed() | get dataloader worker seed from pytorch |
181,049 | import ast
import json
import logging
import math
import os
import random
import sys
import time
from dataclasses import dataclass
from multiprocessing import Value
import braceexpand
import numpy as np
import pandas as pd
import torch
import torchvision.datasets as datasets
import webdataset as wds
from PIL import Image
from torch.utils.data import Dataset, DataLoader, SubsetRandomSampler, IterableDataset, get_worker_info
from torch.utils.data.distributed import DistributedSampler
from webdataset.filters import _shuffle
from webdataset.tariterators import base_plus_ext, url_opener, tar_file_expander, valid_sample
from open_clip import tokenize
def get_imagenet(args, preprocess_fns, split):
assert split in ["train", "val", "v2"]
is_train = split == "train"
preprocess_train, preprocess_val = preprocess_fns
if split == "v2":
from imagenetv2_pytorch import ImageNetV2Dataset
dataset = ImageNetV2Dataset(location=args.imagenet_v2, transform=preprocess_val)
else:
if is_train:
data_path = args.imagenet_train
preprocess_fn = preprocess_train
else:
data_path = args.imagenet_val
preprocess_fn = preprocess_val
assert data_path
dataset = datasets.ImageFolder(data_path, transform=preprocess_fn)
if is_train:
idxs = np.zeros(len(dataset.targets))
target_array = np.array(dataset.targets)
k = 50
for c in range(1000):
m = target_array == c
n = len(idxs[m])
arr = np.zeros(n)
arr[:k] = 1
np.random.shuffle(arr)
idxs[m] = arr
idxs = idxs.astype('int')
sampler = SubsetRandomSampler(np.where(idxs)[0])
else:
sampler = None
dataloader = torch.utils.data.DataLoader(
dataset,
batch_size=args.batch_size,
num_workers=args.workers,
sampler=sampler,
)
return DataInfo(dataloader=dataloader, sampler=sampler)
def get_dataset_fn(data_path, dataset_type):
if dataset_type == "webdataset":
return get_wds_dataset
elif dataset_type == "csv":
return get_csv_dataset
elif dataset_type == "auto":
ext = data_path.split('.')[-1]
if ext in ['csv', 'tsv']:
return get_csv_dataset
elif ext in ['tar']:
return get_wds_dataset
else:
raise ValueError(
f"Tried to figure out dataset type, but failed for extention {ext}.")
else:
raise ValueError(f"Unsupported dataset type: {dataset_type}")
def get_data(args, preprocess_fns, epoch=0):
preprocess_train, preprocess_val = preprocess_fns
data = {}
if args.train_data:
data["train"] = get_dataset_fn(args.train_data, args.dataset_type)(
args, preprocess_train, is_train=True, epoch=epoch)
if args.val_data:
data["val"] = get_dataset_fn(args.val_data, args.dataset_type)(
args, preprocess_val, is_train=False)
if args.imagenet_val is not None:
data["imagenet-val"] = get_imagenet(args, preprocess_fns, "val")
if args.imagenet_v2 is not None:
data["imagenet-v2"] = get_imagenet(args, preprocess_fns, "v2")
return data | null |
181,050 | import os
import torch
def is_using_horovod():
# NOTE w/ horovod run, OMPI vars should be set, but w/ SLURM PMI vars will be set
# Differentiating between horovod and DDP use via SLURM may not be possible, so horovod arg still required...
ompi_vars = ["OMPI_COMM_WORLD_RANK", "OMPI_COMM_WORLD_SIZE"]
pmi_vars = ["PMI_RANK", "PMI_SIZE"]
if all([var in os.environ for var in ompi_vars]) or all([var in os.environ for var in pmi_vars]):
return True
else:
return False | null |
181,051 | import os
import torch
try:
import horovod.torch as hvd
except ImportError:
hvd = None
def is_using_distributed():
if 'WORLD_SIZE' in os.environ:
return int(os.environ['WORLD_SIZE']) > 1
if 'SLURM_NTASKS' in os.environ:
return int(os.environ['SLURM_NTASKS']) > 1
return False
def world_info_from_env():
local_rank = 0
for v in ('LOCAL_RANK', 'MPI_LOCALRANKID', 'SLURM_LOCALID', 'OMPI_COMM_WORLD_LOCAL_RANK'):
if v in os.environ:
local_rank = int(os.environ[v])
break
global_rank = 0
for v in ('RANK', 'PMI_RANK', 'SLURM_PROCID', 'OMPI_COMM_WORLD_RANK'):
if v in os.environ:
global_rank = int(os.environ[v])
break
world_size = 1
for v in ('WORLD_SIZE', 'PMI_SIZE', 'SLURM_NTASKS', 'OMPI_COMM_WORLD_SIZE'):
if v in os.environ:
world_size = int(os.environ[v])
break
return local_rank, global_rank, world_size
def init_distributed_device(args):
# Distributed training = training on more than one GPU.
# Works in both single and multi-node scenarios.
args.distributed = False
args.world_size = 1
args.rank = 0 # global rank
args.local_rank = 0
if args.horovod:
assert hvd is not None, "Horovod is not installed"
hvd.init()
args.local_rank = int(hvd.local_rank())
args.rank = hvd.rank()
args.world_size = hvd.size()
args.distributed = True
os.environ['LOCAL_RANK'] = str(args.local_rank)
os.environ['RANK'] = str(args.rank)
os.environ['WORLD_SIZE'] = str(args.world_size)
elif is_using_distributed():
if 'SLURM_PROCID' in os.environ:
# DDP via SLURM
args.local_rank, args.rank, args.world_size = world_info_from_env()
# SLURM var -> torch.distributed vars in case needed
os.environ['LOCAL_RANK'] = str(args.local_rank)
os.environ['RANK'] = str(args.rank)
os.environ['WORLD_SIZE'] = str(args.world_size)
torch.distributed.init_process_group(
backend=args.dist_backend,
init_method=args.dist_url,
world_size=args.world_size,
rank=args.rank,
)
else:
# DDP via torchrun, torch.distributed.launch
args.local_rank, _, _ = world_info_from_env()
torch.distributed.init_process_group(
backend=args.dist_backend,
init_method=args.dist_url)
args.world_size = torch.distributed.get_world_size()
args.rank = torch.distributed.get_rank()
args.distributed = True
if torch.cuda.is_available():
if args.distributed and not args.no_set_device_rank:
device = 'cuda:%d' % args.local_rank
else:
device = 'cuda:0'
torch.cuda.set_device(device)
else:
device = 'cpu'
args.device = device
device = torch.device(device)
return device | null |
181,065 | from typing import Union
from fairseq.data.dictionary import Dictionary
from .decoder_config import DecoderConfig, FlashlightDecoderConfig
from .base_decoder import BaseDecoder
class Dictionary:
"""A mapping from symbols to consecutive integers"""
def __init__(
self,
*, # begin keyword-only arguments
bos="<s>",
pad="<pad>",
eos="</s>",
unk="<unk>",
extra_special_symbols=None,
):
self.bos_word, self.unk_word, self.pad_word, self.eos_word = bos, unk, pad, eos
self.symbols = []
self.count = []
self.indices = {}
self.bos_index = self.add_symbol(bos)
self.pad_index = self.add_symbol(pad)
self.eos_index = self.add_symbol(eos)
self.unk_index = self.add_symbol(unk)
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(s)
self.nspecial = len(self.symbols)
def __eq__(self, other):
return self.indices == other.indices
def __getitem__(self, idx):
if idx < len(self.symbols):
return self.symbols[idx]
return self.unk_word
def get_count(self, idx):
return self.count[idx]
def __len__(self):
"""Returns the number of symbols in the dictionary"""
return len(self.symbols)
def __contains__(self, sym):
return sym in self.indices
def index(self, sym):
"""Returns the index of the specified symbol"""
assert isinstance(sym, str)
if sym in self.indices:
return self.indices[sym]
return self.unk_index
def string(
self,
tensor,
bpe_symbol=None,
escape_unk=False,
extra_symbols_to_ignore=None,
unk_string=None,
include_eos=False,
separator=" ",
):
"""Helper for converting a tensor of token indices to a string.
Can optionally remove BPE symbols or escape <unk> words.
"""
if torch.is_tensor(tensor) and tensor.dim() == 2:
return "\n".join(
self.string(
t,
bpe_symbol,
escape_unk,
extra_symbols_to_ignore,
include_eos=include_eos,
)
for t in tensor
)
extra_symbols_to_ignore = set(extra_symbols_to_ignore or [])
if not include_eos:
extra_symbols_to_ignore.add(self.eos())
def token_string(i):
if i == self.unk():
if unk_string is not None:
return unk_string
else:
return self.unk_string(escape_unk)
else:
return self[i]
if hasattr(self, "bos_index"):
extra_symbols_to_ignore.add(self.bos())
sent = separator.join(
token_string(i)
for i in tensor
if utils.item(i) not in extra_symbols_to_ignore
)
return data_utils.post_process(sent, bpe_symbol)
def unk_string(self, escape=False):
"""Return unknown string, optionally escaped as: <<unk>>"""
if escape:
return "<{}>".format(self.unk_word)
else:
return self.unk_word
def add_symbol(self, word, n=1, overwrite=False):
"""Adds a word to the dictionary"""
if word in self.indices and not overwrite:
idx = self.indices[word]
self.count[idx] = self.count[idx] + n
return idx
else:
idx = len(self.symbols)
self.indices[word] = idx
self.symbols.append(word)
self.count.append(n)
return idx
def update(self, new_dict):
"""Updates counts from new dictionary."""
for word in new_dict.symbols:
idx2 = new_dict.indices[word]
if word in self.indices:
idx = self.indices[word]
self.count[idx] = self.count[idx] + new_dict.count[idx2]
else:
idx = len(self.symbols)
self.indices[word] = idx
self.symbols.append(word)
self.count.append(new_dict.count[idx2])
def finalize(self, threshold=-1, nwords=-1, padding_factor=8):
"""Sort symbols by frequency in descending order, ignoring special ones.
Args:
- threshold defines the minimum word count
- nwords defines the total number of words in the final dictionary,
including special symbols
- padding_factor can be used to pad the dictionary size to be a
multiple of 8, which is important on some hardware (e.g., Nvidia
Tensor Cores).
"""
if nwords <= 0:
nwords = len(self)
new_indices = dict(zip(self.symbols[: self.nspecial], range(self.nspecial)))
new_symbols = self.symbols[: self.nspecial]
new_count = self.count[: self.nspecial]
c = Counter(
dict(
sorted(zip(self.symbols[self.nspecial :], self.count[self.nspecial :]))
)
)
for symbol, count in c.most_common(nwords - self.nspecial):
if count >= threshold:
new_indices[symbol] = len(new_symbols)
new_symbols.append(symbol)
new_count.append(count)
else:
break
assert len(new_symbols) == len(new_indices)
self.count = list(new_count)
self.symbols = list(new_symbols)
self.indices = new_indices
self.pad_to_multiple_(padding_factor)
def pad_to_multiple_(self, padding_factor):
"""Pad Dictionary size to be a multiple of *padding_factor*."""
if padding_factor > 1:
i = 0
while len(self) % padding_factor != 0:
symbol = "madeupword{:04d}".format(i)
self.add_symbol(symbol, n=0)
i += 1
def bos(self):
"""Helper to get index of beginning-of-sentence symbol"""
return self.bos_index
def pad(self):
"""Helper to get index of pad symbol"""
return self.pad_index
def eos(self):
"""Helper to get index of end-of-sentence symbol"""
return self.eos_index
def unk(self):
"""Helper to get index of unk symbol"""
return self.unk_index
def load(cls, f):
"""Loads the dictionary from a text file with the format:
```
<symbol0> <count0>
<symbol1> <count1>
...
```
"""
d = cls()
d.add_from_file(f)
return d
def add_from_file(self, f):
"""
Loads a pre-existing dictionary from a text file and adds its symbols
to this instance.
"""
if isinstance(f, str):
try:
with open(PathManager.get_local_path(f), "r", encoding="utf-8") as fd:
self.add_from_file(fd)
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(
"Incorrect encoding detected in {}, please "
"rebuild the dataset".format(f)
)
return
lines = f.readlines()
indices_start_line = self._load_meta(lines)
for line in lines[indices_start_line:]:
try:
line, field = line.rstrip().rsplit(" ", 1)
if field == "#fairseq:overwrite":
overwrite = True
line, field = line.rsplit(" ", 1)
else:
overwrite = False
count = int(field)
word = line
if word in self and not overwrite:
raise RuntimeError(
"Duplicate word found when loading Dictionary: '{}'. "
"Duplicate words can overwrite earlier ones by adding the "
"#fairseq:overwrite flag at the end of the corresponding row "
"in the dictionary file. If using the Camembert model, please "
"download an updated copy of the model file.".format(word)
)
self.add_symbol(word, n=count, overwrite=overwrite)
except ValueError:
raise ValueError(
f"Incorrect dictionary format, expected '<token> <cnt> [flags]': \"{line}\""
)
def _save(self, f, kv_iterator):
if isinstance(f, str):
PathManager.mkdirs(os.path.dirname(f))
with PathManager.open(f, "w", encoding="utf-8") as fd:
return self.save(fd)
for k, v in kv_iterator:
print("{} {}".format(k, v), file=f)
def _get_meta(self):
return [], []
def _load_meta(self, lines):
return 0
def save(self, f):
"""Stores dictionary into a text file"""
ex_keys, ex_vals = self._get_meta()
self._save(
f,
zip(
ex_keys + self.symbols[self.nspecial :],
ex_vals + self.count[self.nspecial :],
),
)
def dummy_sentence(self, length):
t = torch.Tensor(length).uniform_(self.nspecial + 1, len(self)).long()
t[-1] = self.eos()
return t
def encode_line(
self,
line,
line_tokenizer=tokenize_line,
add_if_not_exist=True,
consumer=None,
append_eos=True,
reverse_order=False,
) -> torch.IntTensor:
words = line_tokenizer(line)
if reverse_order:
words = list(reversed(words))
nwords = len(words)
ids = torch.IntTensor(nwords + 1 if append_eos else nwords)
for i, word in enumerate(words):
if add_if_not_exist:
idx = self.add_symbol(word)
else:
idx = self.index(word)
if consumer is not None:
consumer(word, idx)
ids[i] = idx
if append_eos:
ids[nwords] = self.eos_index
return ids
def _add_file_to_dictionary_single_worker(
filename,
tokenize,
eos_word,
start_offset,
end_offset,
):
counter = Counter()
with Chunker(filename, start_offset, end_offset) as line_iterator:
for line in line_iterator:
for word in tokenize(line):
counter.update([word])
counter.update([eos_word])
return counter
def add_file_to_dictionary(filename, dict, tokenize, num_workers):
def merge_result(counter):
for w, c in sorted(counter.items()):
dict.add_symbol(w, c)
local_file = PathManager.get_local_path(filename)
offsets = find_offsets(local_file, num_workers)
if num_workers > 1:
chunks = zip(offsets, offsets[1:])
pool = Pool(processes=num_workers)
results = []
for (start_offset, end_offset) in chunks:
results.append(
pool.apply_async(
Dictionary._add_file_to_dictionary_single_worker,
(
local_file,
tokenize,
dict.eos_word,
start_offset,
end_offset,
),
)
)
pool.close()
pool.join()
for r in results:
merge_result(r.get())
else:
merge_result(
Dictionary._add_file_to_dictionary_single_worker(
local_file, tokenize, dict.eos_word, offsets[0], offsets[1]
)
)
class DecoderConfig(FairseqDataclass):
type: DECODER_CHOICES = field(
default="viterbi",
metadata={"help": "The type of decoder to use"},
)
class FlashlightDecoderConfig(FairseqDataclass):
nbest: int = field(
default=1,
metadata={"help": "Number of decodings to return"},
)
unitlm: bool = field(
default=False,
metadata={"help": "If set, use unit language model"},
)
lmpath: str = field(
default=MISSING,
metadata={"help": "Language model for KenLM decoder"},
)
lexicon: Optional[str] = field(
default=None,
metadata={"help": "Lexicon for Flashlight decoder"},
)
beam: int = field(
default=50,
metadata={"help": "Number of beams to use for decoding"},
)
beamthreshold: float = field(
default=50.0,
metadata={"help": "Threshold for beam search decoding"},
)
beamsizetoken: Optional[int] = field(
default=None, metadata={"help": "Beam size to use"}
)
wordscore: float = field(
default=-1,
metadata={"help": "Word score for KenLM decoder"},
)
unkweight: float = field(
default=-math.inf,
metadata={"help": "Unknown weight for KenLM decoder"},
)
silweight: float = field(
default=0,
metadata={"help": "Silence weight for KenLM decoder"},
)
lmweight: float = field(
default=2,
metadata={"help": "Weight for LM while interpolating score"},
)
class BaseDecoder:
def __init__(self, tgt_dict: Dictionary) -> None:
self.tgt_dict = tgt_dict
self.vocab_size = len(tgt_dict)
self.blank = (
tgt_dict.index("<ctc_blank>")
if "<ctc_blank>" in tgt_dict.indices
else tgt_dict.bos()
)
if "<sep>" in tgt_dict.indices:
self.silence = tgt_dict.index("<sep>")
elif "|" in tgt_dict.indices:
self.silence = tgt_dict.index("|")
else:
self.silence = tgt_dict.eos()
def generate(
self, models: List[FairseqModel], sample: Dict[str, Any], **unused
) -> List[List[Dict[str, torch.LongTensor]]]:
encoder_input = {
k: v for k, v in sample["net_input"].items() if k != "prev_output_tokens"
}
emissions = self.get_emissions(models, encoder_input)
return self.decode(emissions)
def get_emissions(
self,
models: List[FairseqModel],
encoder_input: Dict[str, Any],
) -> torch.FloatTensor:
model = models[0]
encoder_out = model(**encoder_input)
if hasattr(model, "get_logits"):
emissions = model.get_logits(encoder_out)
else:
emissions = model.get_normalized_probs(encoder_out, log_probs=True)
return emissions.transpose(0, 1).float().cpu().contiguous()
def get_tokens(self, idxs: torch.IntTensor) -> torch.LongTensor:
idxs = (g[0] for g in it.groupby(idxs))
idxs = filter(lambda x: x != self.blank, idxs)
return torch.LongTensor(list(idxs))
def decode(
self,
emissions: torch.FloatTensor,
) -> List[List[Dict[str, torch.LongTensor]]]:
raise NotImplementedError
class ViterbiDecoder(BaseDecoder):
def decode(
self,
emissions: torch.FloatTensor,
) -> List[List[Dict[str, torch.LongTensor]]]:
def get_pred(e):
toks = e.argmax(dim=-1).unique_consecutive()
return toks[toks != self.blank]
return [[{"tokens": get_pred(x), "score": 0}] for x in emissions]
class KenLMDecoder(BaseDecoder):
def __init__(self, cfg: FlashlightDecoderConfig, tgt_dict: Dictionary) -> None:
super().__init__(tgt_dict)
self.nbest = cfg.nbest
self.unitlm = cfg.unitlm
if cfg.lexicon:
self.lexicon = load_words(cfg.lexicon)
self.word_dict = create_word_dict(self.lexicon)
self.unk_word = self.word_dict.get_index("<unk>")
self.lm = KenLM(cfg.lmpath, self.word_dict)
self.trie = Trie(self.vocab_size, self.silence)
start_state = self.lm.start(False)
for word, spellings in self.lexicon.items():
word_idx = self.word_dict.get_index(word)
_, score = self.lm.score(start_state, word_idx)
for spelling in spellings:
spelling_idxs = [tgt_dict.index(token) for token in spelling]
assert (
tgt_dict.unk() not in spelling_idxs
), f"{word} {spelling} {spelling_idxs}"
self.trie.insert(spelling_idxs, word_idx, score)
self.trie.smear(SmearingMode.MAX)
self.decoder_opts = LexiconDecoderOptions(
beam_size=cfg.beam,
beam_size_token=cfg.beamsizetoken or len(tgt_dict),
beam_threshold=cfg.beamthreshold,
lm_weight=cfg.lmweight,
word_score=cfg.wordscore,
unk_score=cfg.unkweight,
sil_score=cfg.silweight,
log_add=False,
criterion_type=CriterionType.CTC,
)
self.decoder = LexiconDecoder(
self.decoder_opts,
self.trie,
self.lm,
self.silence,
self.blank,
self.unk_word,
[],
self.unitlm,
)
else:
assert self.unitlm, "Lexicon-free decoding requires unit LM"
d = {w: [[w]] for w in tgt_dict.symbols}
self.word_dict = create_word_dict(d)
self.lm = KenLM(cfg.lmpath, self.word_dict)
self.decoder_opts = LexiconFreeDecoderOptions(
beam_size=cfg.beam,
beam_size_token=cfg.beamsizetoken or len(tgt_dict),
beam_threshold=cfg.beamthreshold,
lm_weight=cfg.lmweight,
sil_score=cfg.silweight,
log_add=False,
criterion_type=CriterionType.CTC,
)
self.decoder = LexiconFreeDecoder(
self.decoder_opts, self.lm, self.silence, self.blank, []
)
def get_timesteps(self, token_idxs: List[int]) -> List[int]:
"""Returns frame numbers corresponding to every non-blank token.
Parameters
----------
token_idxs : List[int]
IDs of decoded tokens.
Returns
-------
List[int]
Frame numbers corresponding to every non-blank token.
"""
timesteps = []
for i, token_idx in enumerate(token_idxs):
if token_idx == self.blank:
continue
if i == 0 or token_idx != token_idxs[i-1]:
timesteps.append(i)
return timesteps
def decode(
self,
emissions: torch.FloatTensor,
) -> List[List[Dict[str, torch.LongTensor]]]:
B, T, N = emissions.size()
hypos = []
for b in range(B):
emissions_ptr = emissions.data_ptr() + 4 * b * emissions.stride(0)
results = self.decoder.decode(emissions_ptr, T, N)
nbest_results = results[: self.nbest]
hypos.append(
[
{
"tokens": self.get_tokens(result.tokens),
"score": result.score,
"timesteps": self.get_timesteps(result.tokens),
"words": [
self.word_dict.get_entry(x) for x in result.words if x >= 0
],
}
for result in nbest_results
]
)
return hypos
class FairseqLMDecoder(BaseDecoder):
def __init__(self, cfg: FlashlightDecoderConfig, tgt_dict: Dictionary) -> None:
super().__init__(tgt_dict)
self.nbest = cfg.nbest
self.unitlm = cfg.unitlm
self.lexicon = load_words(cfg.lexicon) if cfg.lexicon else None
self.idx_to_wrd = {}
checkpoint = torch.load(cfg.lmpath, map_location="cpu")
if "cfg" in checkpoint and checkpoint["cfg"] is not None:
lm_args = checkpoint["cfg"]
else:
lm_args = convert_namespace_to_omegaconf(checkpoint["args"])
if not OmegaConf.is_dict(lm_args):
lm_args = OmegaConf.create(lm_args)
with open_dict(lm_args.task):
lm_args.task.data = osp.dirname(cfg.lmpath)
task = tasks.setup_task(lm_args.task)
model = task.build_model(lm_args.model)
model.load_state_dict(checkpoint["model"], strict=False)
self.trie = Trie(self.vocab_size, self.silence)
self.word_dict = task.dictionary
self.unk_word = self.word_dict.unk()
self.lm = FairseqLM(self.word_dict, model)
if self.lexicon:
start_state = self.lm.start(False)
for i, (word, spellings) in enumerate(self.lexicon.items()):
if self.unitlm:
word_idx = i
self.idx_to_wrd[i] = word
score = 0
else:
word_idx = self.word_dict.index(word)
_, score = self.lm.score(start_state, word_idx, no_cache=True)
for spelling in spellings:
spelling_idxs = [tgt_dict.index(token) for token in spelling]
assert (
tgt_dict.unk() not in spelling_idxs
), f"{spelling} {spelling_idxs}"
self.trie.insert(spelling_idxs, word_idx, score)
self.trie.smear(SmearingMode.MAX)
self.decoder_opts = LexiconDecoderOptions(
beam_size=cfg.beam,
beam_size_token=cfg.beamsizetoken or len(tgt_dict),
beam_threshold=cfg.beamthreshold,
lm_weight=cfg.lmweight,
word_score=cfg.wordscore,
unk_score=cfg.unkweight,
sil_score=cfg.silweight,
log_add=False,
criterion_type=CriterionType.CTC,
)
self.decoder = LexiconDecoder(
self.decoder_opts,
self.trie,
self.lm,
self.silence,
self.blank,
self.unk_word,
[],
self.unitlm,
)
else:
assert self.unitlm, "Lexicon-free decoding requires unit LM"
d = {w: [[w]] for w in tgt_dict.symbols}
self.word_dict = create_word_dict(d)
self.lm = KenLM(cfg.lmpath, self.word_dict)
self.decoder_opts = LexiconFreeDecoderOptions(
beam_size=cfg.beam,
beam_size_token=cfg.beamsizetoken or len(tgt_dict),
beam_threshold=cfg.beamthreshold,
lm_weight=cfg.lmweight,
sil_score=cfg.silweight,
log_add=False,
criterion_type=CriterionType.CTC,
)
self.decoder = LexiconFreeDecoder(
self.decoder_opts, self.lm, self.silence, self.blank, []
)
def decode(
self,
emissions: torch.FloatTensor,
) -> List[List[Dict[str, torch.LongTensor]]]:
B, T, N = emissions.size()
hypos = []
def make_hypo(result: DecodeResult) -> Dict[str, Any]:
hypo = {
"tokens": self.get_tokens(result.tokens),
"score": result.score,
}
if self.lexicon:
hypo["words"] = [
self.idx_to_wrd[x] if self.unitlm else self.word_dict[x]
for x in result.words
if x >= 0
]
return hypo
for b in range(B):
emissions_ptr = emissions.data_ptr() + 4 * b * emissions.stride(0)
results = self.decoder.decode(emissions_ptr, T, N)
nbest_results = results[: self.nbest]
hypos.append([make_hypo(result) for result in nbest_results])
self.lm.empty_cache()
return hypos
def Decoder(
cfg: Union[DecoderConfig, FlashlightDecoderConfig], tgt_dict: Dictionary
) -> BaseDecoder:
if cfg.type == "viterbi":
from .viterbi_decoder import ViterbiDecoder
return ViterbiDecoder(tgt_dict)
if cfg.type == "kenlm":
from .flashlight_decoder import KenLMDecoder
return KenLMDecoder(cfg, tgt_dict)
if cfg.type == "fairseqlm":
from .flashlight_decoder import FairseqLMDecoder
return FairseqLMDecoder(cfg, tgt_dict)
raise NotImplementedError(f"Invalid decoder name: {cfg.name}") | null |
181,067 | import torch
def calc_mean_invstddev(feature):
if len(feature.size()) != 2:
raise ValueError("We expect the input feature to be 2-D tensor")
mean = feature.mean(0)
var = feature.var(0)
# avoid division by ~zero
eps = 1e-8
if (var < eps).any():
return mean, 1.0 / (torch.sqrt(var) + eps)
return mean, 1.0 / torch.sqrt(var)
def apply_mv_norm(features):
# If there is less than 2 spectrograms, the variance cannot be computed (is NaN)
# and normalization is not possible, so return the item as it is
if features.size(0) < 2:
return features
mean, invstddev = calc_mean_invstddev(features)
res = (features - mean) * invstddev
return res | null |
181,075 | import ast
import logging
import math
import os
import sys
import editdistance
import numpy as np
import torch
from fairseq import checkpoint_utils, options, progress_bar, tasks, utils
from fairseq.data.data_utils import post_process
from fairseq.logging.meters import StopwatchMeter, TimeMeter
logger = logging.getLogger(__name__)
def post_process(sentence: str, symbol: str):
if symbol == "sentencepiece":
sentence = sentence.replace(" ", "").replace("\u2581", " ").strip()
elif symbol == "wordpiece":
sentence = sentence.replace(" ", "").replace("_", " ").strip()
elif symbol == "letter":
sentence = sentence.replace(" ", "").replace("|", " ").strip()
elif symbol == "silence":
import re
sentence = sentence.replace("<SIL>", "")
sentence = re.sub(' +', ' ', sentence).strip()
elif symbol == "_EOW":
sentence = sentence.replace(" ", "").replace("_EOW", " ").strip()
elif symbol in {"subword_nmt", "@@ ", "@@"}:
if symbol == "subword_nmt":
symbol = "@@ "
sentence = (sentence + " ").replace(symbol, "").rstrip()
elif symbol == "none":
pass
elif symbol is not None:
raise NotImplementedError(f"Unknown post_process option: {symbol}")
return sentence
def process_predictions(
args, hypos, sp, tgt_dict, target_tokens, res_files, speaker, id
):
for hypo in hypos[: min(len(hypos), args.nbest)]:
hyp_pieces = tgt_dict.string(hypo["tokens"].int().cpu())
if "words" in hypo:
hyp_words = " ".join(hypo["words"])
else:
hyp_words = post_process(hyp_pieces, args.post_process)
if res_files is not None:
print(
"{} ({}-{})".format(hyp_pieces, speaker, id),
file=res_files["hypo.units"],
)
print(
"{} ({}-{})".format(hyp_words, speaker, id),
file=res_files["hypo.words"],
)
tgt_pieces = tgt_dict.string(target_tokens)
tgt_words = post_process(tgt_pieces, args.post_process)
if res_files is not None:
print(
"{} ({}-{})".format(tgt_pieces, speaker, id),
file=res_files["ref.units"],
)
print(
"{} ({}-{})".format(tgt_words, speaker, id), file=res_files["ref.words"]
)
if not args.quiet:
logger.info("HYPO:" + hyp_words)
logger.info("TARGET:" + tgt_words)
logger.info("___________________")
hyp_words = hyp_words.split()
tgt_words = tgt_words.split()
return editdistance.eval(hyp_words, tgt_words), len(tgt_words) | null |
181,113 | import argparse
import random
import sys
from itertools import chain
import numpy as np
from sacrebleu import compute_bleu, corpus_bleu as _corpus_bleu
def corpus_bleu(sys_stream, ref_streams):
def pairwise(sents):
def intra_ref(refs):
print("ref pairwise BLEU: %.2f" % pairwise(refs))
refs = list(zip(*refs))
m = len(refs)
concat_h = []
concat_rest = [[] for j in range(m - 1)]
for i, h in enumerate(refs):
rest = refs[:i] + refs[i + 1 :]
concat_h.append(h)
for j in range(m - 1):
concat_rest[j].extend(rest[j])
concat_h = list(chain.from_iterable(concat_h))
bleu = corpus_bleu(concat_h, concat_rest)
print("multi-reference BLEU (leave-one-out): %.2f" % bleu) | null |
181,119 | import os
from contextlib import redirect_stdout
from fairseq import options
from fairseq_cli import generate
from examples.noisychannel import rerank_options, rerank_utils
def score_bw(args):
def cli_main():
parser = rerank_options.get_reranking_parser()
args = options.parse_args_and_arch(parser)
score_bw(args) | null |
181,129 | from typing import Dict, List, NamedTuple, Optional
import torch
import torch.nn as nn
from examples.simultaneous_translation.modules.monotonic_transformer_layer import (
TransformerMonotonicDecoderLayer,
TransformerMonotonicEncoderLayer,
)
from fairseq.models import (
register_model,
register_model_architecture,
)
from fairseq.models.transformer import (
TransformerModel,
TransformerEncoder,
TransformerDecoder,
base_architecture,
transformer_iwslt_de_en,
transformer_vaswani_wmt_en_de_big,
tiny_architecture
)
from torch import Tensor
def base_monotonic_architecture(args):
def transformer_iwslt_de_en(args):
def transformer_monotonic_iwslt_de_en(args):
transformer_iwslt_de_en(args)
base_monotonic_architecture(args) | null |
181,130 | from typing import Dict, List, NamedTuple, Optional
import torch
import torch.nn as nn
from examples.simultaneous_translation.modules.monotonic_transformer_layer import (
TransformerMonotonicDecoderLayer,
TransformerMonotonicEncoderLayer,
)
from fairseq.models import (
register_model,
register_model_architecture,
)
from fairseq.models.transformer import (
TransformerModel,
TransformerEncoder,
TransformerDecoder,
base_architecture,
transformer_iwslt_de_en,
transformer_vaswani_wmt_en_de_big,
tiny_architecture
)
from torch import Tensor
def transformer_vaswani_wmt_en_de_big(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 4096)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 16)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 1024)
args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 4096)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16)
args.dropout = getattr(args, "dropout", 0.3)
base_architecture(args)
def transformer_monotonic_vaswani_wmt_en_de_big(args):
transformer_vaswani_wmt_en_de_big(args) | null |
181,132 | from typing import Dict, List, NamedTuple, Optional
import torch
import torch.nn as nn
from examples.simultaneous_translation.modules.monotonic_transformer_layer import (
TransformerMonotonicDecoderLayer,
TransformerMonotonicEncoderLayer,
)
from fairseq.models import (
register_model,
register_model_architecture,
)
from fairseq.models.transformer import (
TransformerModel,
TransformerEncoder,
TransformerDecoder,
base_architecture,
transformer_iwslt_de_en,
transformer_vaswani_wmt_en_de_big,
tiny_architecture
)
from torch import Tensor
def transformer_iwslt_de_en(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 1024)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 4)
args.encoder_layers = getattr(args, "encoder_layers", 6)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512)
args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 1024)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 4)
args.decoder_layers = getattr(args, "decoder_layers", 6)
base_architecture(args)
def transformer_unidirectional_iwslt_de_en(args):
transformer_iwslt_de_en(args) | null |
181,133 | from typing import Dict, List, NamedTuple, Optional
import torch
import torch.nn as nn
from examples.simultaneous_translation.modules.monotonic_transformer_layer import (
TransformerMonotonicDecoderLayer,
TransformerMonotonicEncoderLayer,
)
from fairseq.models import (
register_model,
register_model_architecture,
)
from fairseq.models.transformer import (
TransformerModel,
TransformerEncoder,
TransformerDecoder,
base_architecture,
transformer_iwslt_de_en,
transformer_vaswani_wmt_en_de_big,
tiny_architecture
)
from torch import Tensor
def base_monotonic_architecture(args):
base_architecture(args)
args.encoder_unidirectional = getattr(args, "encoder_unidirectional", False)
"transformer_monotonic", "transformer_monotonic_iwslt_de_en"
def tiny_architecture(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 64)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 64)
args.encoder_layers = getattr(args, "encoder_layers", 2)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 2)
args.decoder_layers = getattr(args, "decoder_layers", 2)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 2)
return base_architecture(args)
def monotonic_tiny_architecture(args):
tiny_architecture(args)
base_monotonic_architecture(args) | null |
181,135 | from typing import Optional
import torch
from torch import Tensor
from examples.simultaneous_translation.utils.functions import (
exclusive_cumprod,
prob_check,
moving_sum,
)
def prob_check(tensor, eps=1e-10):
assert not torch.isnan(tensor).any(), (
"Nan in a probability tensor."
)
# Add the eps here to prevent errors introduced by precision
assert tensor.le(1.0 + eps).all() and tensor.ge(0.0 - eps).all(), (
"Incorrect values in a probability tensor"
", 0.0 <= tensor <= 1.0"
)
def moving_sum(x, start_idx: int, end_idx: int):
"""
From MONOTONIC CHUNKWISE ATTENTION
https://arxiv.org/pdf/1712.05382.pdf
Equation (18)
x = [x_1, x_2, ..., x_N]
MovingSum(x, start_idx, end_idx)_n = Sigma_{m=n−(start_idx−1)}^{n+end_idx-1} x_m
for n in {1, 2, 3, ..., N}
x : src_len, batch_size
start_idx : start idx
end_idx : end idx
Example
src_len = 5
batch_size = 3
x =
[[ 0, 5, 10],
[ 1, 6, 11],
[ 2, 7, 12],
[ 3, 8, 13],
[ 4, 9, 14]]
MovingSum(x, 3, 1) =
[[ 0, 5, 10],
[ 1, 11, 21],
[ 3, 18, 33],
[ 6, 21, 36],
[ 9, 24, 39]]
MovingSum(x, 1, 3) =
[[ 3, 18, 33],
[ 6, 21, 36],
[ 9, 24, 39],
[ 7, 17, 27],
[ 4, 9, 14]]
"""
# TODO: Make dimension configurable
assert start_idx > 0 and end_idx > 0
batch_size, tgt_len, src_len = x.size()
x = x.view(-1, src_len).unsqueeze(1)
# batch_size, 1, src_len
moving_sum_weight = torch.ones([1, 1, end_idx + start_idx - 1]).type_as(x)
moving_sum = torch.nn.functional.conv1d(
x, moving_sum_weight, padding=start_idx + end_idx - 1
).squeeze(1)
moving_sum = moving_sum[:, end_idx:-start_idx]
assert src_len == moving_sum.size(1)
assert batch_size * tgt_len == moving_sum.size(0)
moving_sum = moving_sum.view(batch_size, tgt_len, src_len)
return moving_sum
The provided code snippet includes necessary dependencies for implementing the `expected_soft_attention` function. Write a Python function `def expected_soft_attention( alpha: Tensor, soft_energy: Tensor, padding_mask: Optional[Tensor] = None, chunk_size: Optional[int] = None, eps: float = 1e-10 )` to solve the following problem:
Function to compute expected soft attention for monotonic infinite lookback attention from expected alignment and soft energy. Reference: Monotonic Chunkwise Attention https://arxiv.org/abs/1712.05382 Monotonic Infinite Lookback Attention for Simultaneous Machine Translation https://arxiv.org/abs/1906.05218 alpha: bsz, tgt_len, src_len soft_energy: bsz, tgt_len, src_len padding_mask: bsz, src_len left_padding: bool
Here is the function:
def expected_soft_attention(
alpha: Tensor,
soft_energy: Tensor,
padding_mask: Optional[Tensor] = None,
chunk_size: Optional[int] = None,
eps: float = 1e-10
):
"""
Function to compute expected soft attention for
monotonic infinite lookback attention from
expected alignment and soft energy.
Reference:
Monotonic Chunkwise Attention
https://arxiv.org/abs/1712.05382
Monotonic Infinite Lookback Attention for Simultaneous Machine Translation
https://arxiv.org/abs/1906.05218
alpha: bsz, tgt_len, src_len
soft_energy: bsz, tgt_len, src_len
padding_mask: bsz, src_len
left_padding: bool
"""
if padding_mask is not None:
alpha = alpha.masked_fill(padding_mask.unsqueeze(1), 0.0)
soft_energy = soft_energy.masked_fill(
padding_mask.unsqueeze(1), -float("inf")
)
prob_check(alpha)
dtype = alpha.dtype
alpha = alpha.float()
soft_energy = soft_energy.float()
soft_energy = soft_energy - soft_energy.max(dim=2, keepdim=True)[0]
exp_soft_energy = torch.exp(soft_energy) + eps
if chunk_size is not None:
# Chunkwise
beta = (
exp_soft_energy
* moving_sum(
alpha / (eps + moving_sum(exp_soft_energy, chunk_size, 1)),
1, chunk_size
)
)
else:
# Infinite lookback
# Notice that infinite lookback is a special case of chunkwise
# where chunksize = inf
inner_items = alpha / (eps + torch.cumsum(exp_soft_energy, dim=2))
beta = (
exp_soft_energy
* torch.cumsum(inner_items.flip(dims=[2]), dim=2)
.flip(dims=[2])
)
if padding_mask is not None:
beta = beta.masked_fill(
padding_mask.unsqueeze(1).to(torch.bool), 0.0)
# Mix precision to prevent overflow for fp16
beta = beta.type(dtype)
beta = beta.clamp(0, 1)
prob_check(beta)
return beta | Function to compute expected soft attention for monotonic infinite lookback attention from expected alignment and soft energy. Reference: Monotonic Chunkwise Attention https://arxiv.org/abs/1712.05382 Monotonic Infinite Lookback Attention for Simultaneous Machine Translation https://arxiv.org/abs/1906.05218 alpha: bsz, tgt_len, src_len soft_energy: bsz, tgt_len, src_len padding_mask: bsz, src_len left_padding: bool |
181,137 | import torch
def safe_cumprod(tensor, dim: int, eps: float = 1e-10):
"""
An implementation of cumprod to prevent precision issue.
cumprod(x)
= [x1, x1x2, x1x2x3, ....]
= [exp(log(x1)), exp(log(x1) + log(x2)), exp(log(x1) + log(x2) + log(x3)), ...]
= exp(cumsum(log(x)))
"""
if (tensor + eps < 0).any().item():
raise RuntimeError(
"Safe cumprod can only take non-negative tensors as input."
"Consider use torch.cumprod if you want to calculate negative values."
)
log_tensor = torch.log(tensor + eps)
cumsum_log_tensor = torch.cumsum(log_tensor, dim)
exp_cumsum_log_tensor = torch.exp(cumsum_log_tensor)
return exp_cumsum_log_tensor
The provided code snippet includes necessary dependencies for implementing the `exclusive_cumprod` function. Write a Python function `def exclusive_cumprod(tensor, dim: int, eps: float = 1e-10)` to solve the following problem:
Implementing exclusive cumprod. There is cumprod in pytorch, however there is no exclusive mode. cumprod(x) = [x1, x1x2, x2x3x4, ..., prod_{i=1}^n x_i] exclusive means cumprod(x) = [1, x1, x1x2, x1x2x3, ..., prod_{i=1}^{n-1} x_i]
Here is the function:
def exclusive_cumprod(tensor, dim: int, eps: float = 1e-10):
"""
Implementing exclusive cumprod.
There is cumprod in pytorch, however there is no exclusive mode.
cumprod(x) = [x1, x1x2, x2x3x4, ..., prod_{i=1}^n x_i]
exclusive means
cumprod(x) = [1, x1, x1x2, x1x2x3, ..., prod_{i=1}^{n-1} x_i]
"""
tensor_size = list(tensor.size())
tensor_size[dim] = 1
return_tensor = safe_cumprod(
torch.cat([torch.ones(tensor_size).type_as(tensor), tensor], dim=dim),
dim=dim,
eps=eps,
)
if dim == 0:
return return_tensor[:-1]
elif dim == 1:
return return_tensor[:, :-1]
elif dim == 2:
return return_tensor[:, :, :-1]
else:
raise RuntimeError(
"Cumprod on dimension 3 and more is not implemented"
) | Implementing exclusive cumprod. There is cumprod in pytorch, however there is no exclusive mode. cumprod(x) = [x1, x1x2, x2x3x4, ..., prod_{i=1}^n x_i] exclusive means cumprod(x) = [1, x1, x1x2, x1x2x3, ..., prod_{i=1}^{n-1} x_i] |
181,138 | from typing import Optional, Dict
from torch import Tensor
import torch
def waitk_p_choose(
tgt_len: int,
src_len: int,
bsz: int,
waitk_lagging: int,
key_padding_mask: Optional[Tensor] = None,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None
):
max_src_len = src_len
if incremental_state is not None:
# Retrieve target length from incremental states
# For inference the length of query is always 1
max_tgt_len = incremental_state["steps"]["tgt"]
assert max_tgt_len is not None
max_tgt_len = int(max_tgt_len)
else:
max_tgt_len = tgt_len
if max_src_len < waitk_lagging:
if incremental_state is not None:
max_tgt_len = 1
return torch.zeros(
bsz, max_tgt_len, max_src_len
)
# Assuming the p_choose looks like this for wait k=3
# src_len = 6, max_tgt_len = 5
# [0, 0, 1, 0, 0, 0, 0]
# [0, 0, 0, 1, 0, 0, 0]
# [0, 0, 0, 0, 1, 0, 0]
# [0, 0, 0, 0, 0, 1, 0]
# [0, 0, 0, 0, 0, 0, 1]
# linearize the p_choose matrix:
# [0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0...]
# The indices of linearized matrix that equals 1 is
# 2 + 6 * 0
# 3 + 6 * 1
# ...
# n + src_len * n + k - 1 = n * (src_len + 1) + k - 1
# n from 0 to max_tgt_len - 1
#
# First, generate the indices (activate_indices_offset: bsz, max_tgt_len)
# Second, scatter a zeros tensor (bsz, max_tgt_len * src_len)
# with activate_indices_offset
# Third, resize the tensor to (bsz, max_tgt_len, src_len)
activate_indices_offset = (
(
torch.arange(max_tgt_len) * (max_src_len + 1)
+ waitk_lagging - 1
)
.unsqueeze(0)
.expand(bsz, max_tgt_len)
.long()
)
if key_padding_mask is not None:
if key_padding_mask[:, 0].any():
# Left padding
activate_indices_offset += (
key_padding_mask.sum(dim=1, keepdim=True)
)
# Need to clamp the indices that are too large
activate_indices_offset = (
activate_indices_offset
.clamp(
0,
min(
[
max_tgt_len,
max_src_len - waitk_lagging + 1
]
) * max_src_len - 1
)
)
p_choose = torch.zeros(bsz, max_tgt_len * max_src_len)
p_choose = p_choose.scatter(
1,
activate_indices_offset,
1.0
).view(bsz, max_tgt_len, max_src_len)
if key_padding_mask is not None:
p_choose = p_choose.to(key_padding_mask)
p_choose = p_choose.masked_fill(key_padding_mask.unsqueeze(1), 0)
if incremental_state is not None:
p_choose = p_choose[:, -1:]
return p_choose.float() | null |
181,139 | from typing import Optional, Dict
from torch import Tensor
import torch
The provided code snippet includes necessary dependencies for implementing the `learnable_p_choose` function. Write a Python function `def learnable_p_choose( energy, noise_mean: float = 0.0, noise_var: float = 0.0, training: bool = True )` to solve the following problem:
Calculating step wise prob for reading and writing 1 to read, 0 to write energy: bsz, tgt_len, src_len
Here is the function:
def learnable_p_choose(
energy,
noise_mean: float = 0.0,
noise_var: float = 0.0,
training: bool = True
):
"""
Calculating step wise prob for reading and writing
1 to read, 0 to write
energy: bsz, tgt_len, src_len
"""
noise = 0
if training:
# add noise here to encourage discretness
noise = (
torch.normal(noise_mean, noise_var, energy.size())
.type_as(energy)
.to(energy.device)
)
p_choose = torch.sigmoid(energy + noise)
# p_choose: bsz * self.num_heads, tgt_len, src_len
return p_choose | Calculating step wise prob for reading and writing 1 to read, 0 to write energy: bsz, tgt_len, src_len |
181,140 | import logging
import os
import sys
import tqdm
from npy_append_array import NpyAppendArray
def get_shard_range(tot, nshard, rank):
def get_path_iterator(tsv, nshard, rank):
with open(tsv, "r") as f:
root = f.readline().rstrip()
lines = [line.rstrip() for line in f]
start, end = get_shard_range(len(lines), nshard, rank)
lines = lines[start:end]
def iterate():
for line in lines:
subpath, nsample = line.split("\t")
yield f"{root}/{subpath}", int(nsample)
return iterate, len(lines) | null |
181,149 | import logging
from typing import Dict, List, Optional
from pathlib import Path
import torch.nn as nn
from torch import Tensor
from fairseq import checkpoint_utils
from fairseq.models import register_model, register_model_architecture
from fairseq.utils import safe_hasattr
from fairseq.models.speech_to_text.s2t_transformer import (
S2TTransformerModel,
S2TTransformerEncoder,
TransformerDecoderScriptable
)
from fairseq.models.speech_to_text.s2t_transformer import base_architecture as s2t_base_architecture
from ..modules.attn_head_selector import AttnHeadSelector
from ..modules.head_selection_transformer_layer import HeadSelectionTransformerEncoderLayer
from .head_selection_transformer import HeadSelectionTransformerDecoder
def base_architecture(args):
def base_architecture(args):
def head_selection_s2t_transformer_s(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 256)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 256 * 8)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 4)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 4)
args.dropout = getattr(args, "dropout", 0.1)
base_architecture(args) | null |
181,156 | import ast
import logging
import matplotlib.pyplot as plt
import numpy as np
from pathlib import Path
import soundfile as sf
import sys
import torch
import torchaudio
from fairseq import checkpoint_utils, options, tasks, utils
from fairseq.logging import progress_bar
from fairseq.tasks.text_to_speech import plot_tts_output
from fairseq.data.audio.text_to_speech_dataset import TextToSpeechDataset
class TextToSpeechDataset(SpeechToTextDataset):
def __init__(
self,
split: str,
is_train_split: bool,
cfg: S2TDataConfig,
audio_paths: List[str],
n_frames: List[int],
src_texts: Optional[List[str]] = None,
tgt_texts: Optional[List[str]] = None,
speakers: Optional[List[str]] = None,
src_langs: Optional[List[str]] = None,
tgt_langs: Optional[List[str]] = None,
ids: Optional[List[str]] = None,
tgt_dict: Optional[Dictionary] = None,
pre_tokenizer=None,
bpe_tokenizer=None,
n_frames_per_step=1,
speaker_to_id=None,
durations: Optional[List[List[int]]] = None,
pitches: Optional[List[str]] = None,
energies: Optional[List[str]] = None
):
def __getitem__(self, index: int) -> TextToSpeechDatasetItem:
def collater(self, samples: List[TextToSpeechDatasetItem]) -> Dict[str, Any]:
def postprocess_results(
dataset: TextToSpeechDataset, sample, hypos, resample_fn, dump_target
):
def to_np(x):
return None if x is None else x.detach().cpu().numpy()
sample_ids = [dataset.ids[i] for i in sample["id"].tolist()]
texts = sample["src_texts"] if "src_texts" in sample else [""] * len(hypos)
attns = [to_np(hypo["attn"]) for hypo in hypos]
eos_probs = [to_np(hypo.get("eos_prob", None)) for hypo in hypos]
feat_preds = [to_np(hypo["feature"]) for hypo in hypos]
wave_preds = [to_np(resample_fn(h["waveform"])) for h in hypos]
if dump_target:
feat_targs = [to_np(hypo["targ_feature"]) for hypo in hypos]
wave_targs = [to_np(resample_fn(h["targ_waveform"])) for h in hypos]
else:
feat_targs = [None for _ in hypos]
wave_targs = [None for _ in hypos]
return zip(sample_ids, texts, attns, eos_probs, feat_preds, wave_preds,
feat_targs, wave_targs) | null |
181,157 | import ast
import logging
import matplotlib.pyplot as plt
import numpy as np
from pathlib import Path
import soundfile as sf
import sys
import torch
import torchaudio
from fairseq import checkpoint_utils, options, tasks, utils
from fairseq.logging import progress_bar
from fairseq.tasks.text_to_speech import plot_tts_output
from fairseq.data.audio.text_to_speech_dataset import TextToSpeechDataset
def plot_tts_output(
data_2d, title_2d, data_1d, title_1d, figsize=(24, 4),
v_min=DEFAULT_V_MIN, v_max=3, ret_np=False, suptitle=""
):
try:
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
except ImportError:
raise ImportError("Please install Matplotlib: pip install matplotlib")
data_2d = [
x.detach().cpu().float().numpy()
if isinstance(x, torch.Tensor) else x for x in data_2d
]
fig, axes = plt.subplots(1, len(data_2d) + 1, figsize=figsize)
if suptitle:
fig.suptitle(suptitle[:400]) # capped at 400 chars
axes = [axes] if len(data_2d) == 0 else axes
for ax, x, name in zip(axes, data_2d, title_2d):
ax.set_title(name)
divider = make_axes_locatable(ax)
cax = divider.append_axes('right', size='5%', pad=0.05)
im = ax.imshow(
x, origin="lower", aspect="auto", vmin=max(x.min(), v_min),
vmax=min(x.max(), v_max)
)
fig.colorbar(im, cax=cax, orientation='vertical')
if isinstance(data_1d, torch.Tensor):
data_1d = data_1d.detach().cpu().numpy()
axes[-1].plot(data_1d)
axes[-1].set_title(title_1d)
plt.tight_layout()
if ret_np:
fig.canvas.draw()
data = save_figure_to_numpy(fig)
plt.close(fig)
return data
def dump_result(
is_na_model,
args,
vocoder,
sample_id,
text,
attn,
eos_prob,
feat_pred,
wave_pred,
feat_targ,
wave_targ,
):
sample_rate = args.output_sample_rate
out_root = Path(args.results_path)
if args.dump_features:
feat_dir = out_root / "feat"
feat_dir.mkdir(exist_ok=True, parents=True)
np.save(feat_dir / f"{sample_id}.npy", feat_pred)
if args.dump_target:
feat_tgt_dir = out_root / "feat_tgt"
feat_tgt_dir.mkdir(exist_ok=True, parents=True)
np.save(feat_tgt_dir / f"{sample_id}.npy", feat_targ)
if args.dump_attentions:
attn_dir = out_root / "attn"
attn_dir.mkdir(exist_ok=True, parents=True)
np.save(attn_dir / f"{sample_id}.npy", attn.numpy())
if args.dump_eos_probs and not is_na_model:
eos_dir = out_root / "eos"
eos_dir.mkdir(exist_ok=True, parents=True)
np.save(eos_dir / f"{sample_id}.npy", eos_prob)
if args.dump_plots:
images = [feat_pred.T] if is_na_model else [feat_pred.T, attn]
names = ["output"] if is_na_model else ["output", "alignment"]
if feat_targ is not None:
images = [feat_targ.T] + images
names = [f"target (idx={sample_id})"] + names
if is_na_model:
plot_tts_output(images, names, attn, "alignment", suptitle=text)
else:
plot_tts_output(images, names, eos_prob, "eos prob", suptitle=text)
plot_dir = out_root / "plot"
plot_dir.mkdir(exist_ok=True, parents=True)
plt.savefig(plot_dir / f"{sample_id}.png")
plt.close()
if args.dump_waveforms:
ext = args.audio_format
if wave_pred is not None:
wav_dir = out_root / f"{ext}_{sample_rate}hz_{vocoder}"
wav_dir.mkdir(exist_ok=True, parents=True)
sf.write(wav_dir / f"{sample_id}.{ext}", wave_pred, sample_rate)
if args.dump_target and wave_targ is not None:
wav_tgt_dir = out_root / f"{ext}_{sample_rate}hz_{vocoder}_tgt"
wav_tgt_dir.mkdir(exist_ok=True, parents=True)
sf.write(wav_tgt_dir / f"{sample_id}.{ext}", wave_targ, sample_rate) | null |
181,162 | import numpy as np
import os.path as op
import torchaudio
import tqdm
from tabulate import tabulate
from examples.speech_synthesis.utils import (
gross_pitch_error, voicing_decision_error, f0_frame_error
)
from examples.speech_synthesis.evaluation.eval_sp import load_eval_spec
def eval_f0_error(samples, distortion_fn):
def f0_frame_error(true_t, true_f, est_t, est_f):
def eval_f0_frame_error(samples):
return eval_f0_error(samples, f0_frame_error) | null |
181,168 | import csv
import numpy as np
import os.path as op
import torch
import tqdm
from tabulate import tabulate
import torchaudio
from examples.speech_synthesis.utils import batch_mel_spectral_distortion
from fairseq.tasks.text_to_speech import batch_mel_cepstral_distortion
def eval_distortion(samples, distortion_fn, device="cuda"):
nmiss = 0
results = []
for sample in tqdm.tqdm(samples):
if not op.isfile(sample["ref"]) or not op.isfile(sample["syn"]):
nmiss += 1
results.append(None)
continue
# assume single channel
yref, sr = torchaudio.load(sample["ref"])
ysyn, _sr = torchaudio.load(sample["syn"])
yref, ysyn = yref[0].to(device), ysyn[0].to(device)
assert sr == _sr, f"{sr} != {_sr}"
distortion, extra = distortion_fn([yref], [ysyn], sr, None)[0]
_, _, _, _, _, pathmap = extra
nins = torch.sum(pathmap.sum(dim=1) - 1) # extra frames in syn
ndel = torch.sum(pathmap.sum(dim=0) - 1) # missing frames from syn
results.append(
(distortion.item(), # path distortion
pathmap.size(0), # yref num frames
pathmap.size(1), # ysyn num frames
pathmap.sum().item(), # path length
nins.item(), # insertion
ndel.item(), # deletion
)
)
return results
def batch_mel_cepstral_distortion(
y1, y2, sr, normalize_type="path", mfcc_fn=None
):
"""
https://arxiv.org/pdf/2011.03568.pdf
The root mean squared error computed on 13-dimensional MFCC using DTW for
alignment. MFCC features are computed from an 80-channel log-mel
spectrogram using a 50ms Hann window and hop of 12.5ms.
y1: list of waveforms
y2: list of waveforms
sr: sampling rate
"""
try:
import torchaudio
except ImportError:
raise ImportError("Please install torchaudio: pip install torchaudio")
if mfcc_fn is None or mfcc_fn.sample_rate != sr:
melkwargs = {
"n_fft": int(0.05 * sr), "win_length": int(0.05 * sr),
"hop_length": int(0.0125 * sr), "f_min": 20,
"n_mels": 80, "window_fn": torch.hann_window
}
mfcc_fn = torchaudio.transforms.MFCC(
sr, n_mfcc=13, log_mels=True, melkwargs=melkwargs
).to(y1[0].device)
return batch_compute_distortion(
y1, y2, sr, lambda y: mfcc_fn(y).transpose(-1, -2), compute_rms_dist,
normalize_type
)
def eval_mel_cepstral_distortion(samples, device="cuda"):
return eval_distortion(samples, batch_mel_cepstral_distortion, device) | null |
181,172 | import argparse
import logging
from pathlib import Path
import shutil
from tempfile import NamedTemporaryFile
from collections import Counter, defaultdict
import pandas as pd
import torchaudio
from tqdm import tqdm
from fairseq.data.audio.audio_utils import convert_waveform
from examples.speech_to_text.data_utils import (
create_zip,
gen_config_yaml,
gen_vocab,
get_zip_manifest,
load_tsv_to_dicts,
save_df_to_tsv
)
from examples.speech_synthesis.data_utils import (
extract_logmel_spectrogram, extract_pitch, extract_energy, get_global_cmvn,
ipa_phonemize, get_mfa_alignment, get_unit_alignment,
get_feature_value_min_max
)
def convert_waveform(
waveform: Union[np.ndarray, torch.Tensor], sample_rate: int,
normalize_volume: bool = False, to_mono: bool = False,
to_sample_rate: Optional[int] = None
) -> Tuple[Union[np.ndarray, torch.Tensor], int]:
"""convert a waveform:
- to a target sample rate
- from multi-channel to mono channel
- volume normalization
Args:
waveform (numpy.ndarray or torch.Tensor): 2D original waveform
(channels x length)
sample_rate (int): original sample rate
normalize_volume (bool): perform volume normalization
to_mono (bool): convert to mono channel if having multiple channels
to_sample_rate (Optional[int]): target sample rate
Returns:
waveform (numpy.ndarray): converted 2D waveform (channels x length)
sample_rate (float): target sample rate
"""
try:
import torchaudio.sox_effects as ta_sox
except ImportError:
raise ImportError("Please install torchaudio: pip install torchaudio")
effects = []
if normalize_volume:
effects.append(["gain", "-n"])
if to_sample_rate is not None and to_sample_rate != sample_rate:
effects.append(["rate", f"{to_sample_rate}"])
if to_mono and waveform.shape[0] > 1:
effects.append(["channels", "1"])
if len(effects) > 0:
is_np_input = isinstance(waveform, np.ndarray)
_waveform = torch.from_numpy(waveform) if is_np_input else waveform
converted, converted_sample_rate = ta_sox.apply_effects_tensor(
_waveform, sample_rate, effects
)
if is_np_input:
converted = converted.numpy()
return converted, converted_sample_rate
return waveform, sample_rate
def gen_vocab(
input_path: Path, output_path_prefix: Path, model_type="bpe",
vocab_size=1000, special_symbols: Optional[List[str]] = None
):
# Train SentencePiece Model
arguments = [
f"--input={input_path.as_posix()}",
f"--model_prefix={output_path_prefix.as_posix()}",
f"--model_type={model_type}",
f"--vocab_size={vocab_size}",
"--character_coverage=1.0",
f"--num_threads={cpu_count()}",
f"--unk_id={UNK_TOKEN_ID}",
f"--bos_id={BOS_TOKEN_ID}",
f"--eos_id={EOS_TOKEN_ID}",
f"--pad_id={PAD_TOKEN_ID}",
]
if special_symbols is not None:
_special_symbols = ",".join(special_symbols)
arguments.append(f"--user_defined_symbols={_special_symbols}")
sp.SentencePieceTrainer.Train(" ".join(arguments))
# Export fairseq dictionary
spm = sp.SentencePieceProcessor()
spm.Load(output_path_prefix.as_posix() + ".model")
vocab = {i: spm.IdToPiece(i) for i in range(spm.GetPieceSize())}
assert (
vocab.get(UNK_TOKEN_ID) == UNK_TOKEN
and vocab.get(PAD_TOKEN_ID) == PAD_TOKEN
and vocab.get(BOS_TOKEN_ID) == BOS_TOKEN
and vocab.get(EOS_TOKEN_ID) == EOS_TOKEN
)
vocab = {
i: s
for i, s in vocab.items()
if s not in {UNK_TOKEN, BOS_TOKEN, EOS_TOKEN, PAD_TOKEN}
}
with open(output_path_prefix.as_posix() + ".txt", "w") as f_out:
for _, s in sorted(vocab.items(), key=lambda x: x[0]):
f_out.write(f"{s} 1\n")
def create_zip(data_root: Path, zip_path: Path):
paths = list(data_root.glob("*.npy"))
paths.extend(data_root.glob("*.flac"))
with zipfile.ZipFile(zip_path, "w", zipfile.ZIP_STORED) as f:
for path in tqdm(paths):
f.write(path, arcname=path.name)
def get_zip_manifest(
zip_path: Path, zip_root: Optional[Path] = None, is_audio=False
):
_zip_path = Path.joinpath(zip_root or Path(""), zip_path)
with zipfile.ZipFile(_zip_path, mode="r") as f:
info = f.infolist()
paths, lengths = {}, {}
for i in tqdm(info):
utt_id = Path(i.filename).stem
offset, file_size = i.header_offset + 30 + len(i.filename), i.file_size
paths[utt_id] = f"{zip_path.as_posix()}:{offset}:{file_size}"
with open(_zip_path, "rb") as f:
f.seek(offset)
byte_data = f.read(file_size)
assert len(byte_data) > 1
if is_audio:
assert is_sf_audio_data(byte_data), i
else:
assert is_npy_data(byte_data), i
byte_data_fp = io.BytesIO(byte_data)
if is_audio:
lengths[utt_id] = sf.info(byte_data_fp).frames
else:
lengths[utt_id] = np.load(byte_data_fp).shape[0]
return paths, lengths
def gen_config_yaml(
manifest_root: Path,
spm_filename: Optional[str] = None,
vocab_name: Optional[str] = None,
yaml_filename: str = "config.yaml",
specaugment_policy: Optional[str] = "lb",
prepend_tgt_lang_tag: bool = False,
sampling_alpha: Optional[float] = None,
input_channels: Optional[int] = 1,
input_feat_per_channel: Optional[int] = 80,
audio_root: str = "",
cmvn_type: str = "utterance",
gcmvn_path: Optional[Path] = None,
extra=None
):
manifest_root = manifest_root.absolute()
writer = S2TDataConfigWriter(manifest_root / yaml_filename)
assert spm_filename is not None or vocab_name is not None
vocab_name = spm_filename.replace(".model", ".txt") if vocab_name is None \
else vocab_name
writer.set_vocab_filename(vocab_name)
if input_channels is not None:
writer.set_input_channels(input_channels)
if input_feat_per_channel is not None:
writer.set_input_feat_per_channel(input_feat_per_channel)
specaugment_setters = {
"lb": writer.set_specaugment_lb_policy,
"ld": writer.set_specaugment_ld_policy,
"sm": writer.set_specaugment_sm_policy,
"ss": writer.set_specaugment_ss_policy,
}
specaugment_setter = specaugment_setters.get(specaugment_policy, None)
if specaugment_setter is not None:
specaugment_setter()
if spm_filename is not None:
writer.set_bpe_tokenizer(
{
"bpe": "sentencepiece",
"sentencepiece_model": (manifest_root / spm_filename).as_posix(),
}
)
if prepend_tgt_lang_tag:
writer.set_prepend_tgt_lang_tag(True)
if sampling_alpha is not None:
writer.set_sampling_alpha(sampling_alpha)
if cmvn_type not in ["global", "utterance"]:
raise NotImplementedError
if specaugment_policy is not None:
writer.set_feature_transforms(
"_train", [f"{cmvn_type}_cmvn", "specaugment"]
)
writer.set_feature_transforms("*", [f"{cmvn_type}_cmvn"])
if cmvn_type == "global":
if gcmvn_path is None:
raise ValueError("Please provide path of global cmvn file.")
else:
writer.set_global_cmvn(gcmvn_path.as_posix())
if len(audio_root) > 0:
writer.set_audio_root(audio_root)
if extra is not None:
writer.set_extra(extra)
writer.flush()
def save_df_to_tsv(dataframe, path: Union[str, Path]):
_path = path if isinstance(path, str) else path.as_posix()
dataframe.to_csv(
_path,
sep="\t",
header=True,
index=False,
encoding="utf-8",
escapechar="\\",
quoting=csv.QUOTE_NONE,
)
def load_tsv_to_dicts(path: Union[str, Path]) -> List[dict]:
with open(path, "r") as f:
reader = csv.DictReader(
f,
delimiter="\t",
quotechar=None,
doublequote=False,
lineterminator="\n",
quoting=csv.QUOTE_NONE,
)
rows = [dict(e) for e in reader]
return rows
def extract_logmel_spectrogram(
waveform: torch.Tensor, sample_rate: int,
output_path: Optional[Path] = None, win_length: int = 1024,
hop_length: int = 256, n_fft: int = 1024,
win_fn: callable = torch.hann_window, n_mels: int = 80,
f_min: float = 0., f_max: float = 8000, eps: float = 1e-5,
overwrite: bool = False, target_length: Optional[int] = None
):
if output_path is not None and output_path.is_file() and not overwrite:
return
spectrogram_transform = TTSSpectrogram(
n_fft=n_fft, win_length=win_length, hop_length=hop_length,
window_fn=win_fn
)
mel_scale_transform = TTSMelScale(
n_mels=n_mels, sample_rate=sample_rate, f_min=f_min, f_max=f_max,
n_stft=n_fft // 2 + 1
)
spectrogram = spectrogram_transform(waveform)
mel_spec = mel_scale_transform(spectrogram)
logmel_spec = torch.clamp(mel_spec, min=eps).log()
assert len(logmel_spec.shape) == 3 and logmel_spec.shape[0] == 1
logmel_spec = logmel_spec.squeeze().t() # D x T -> T x D
if target_length is not None:
logmel_spec = trim_or_pad_to_target_length(logmel_spec, target_length)
if output_path is not None:
np.save(output_path.as_posix(), logmel_spec)
else:
return logmel_spec
def extract_pitch(
waveform: torch.Tensor, sample_rate: int,
output_path: Optional[Path] = None, hop_length: int = 256,
log_scale: bool = True, phoneme_durations: Optional[List[int]] = None
):
if output_path is not None and output_path.is_file():
return
try:
import pyworld
except ImportError:
raise ImportError("Please install PyWORLD: pip install pyworld")
_waveform = waveform.squeeze(0).double().numpy()
pitch, t = pyworld.dio(
_waveform, sample_rate, frame_period=hop_length / sample_rate * 1000
)
pitch = pyworld.stonemask(_waveform, pitch, t, sample_rate)
if phoneme_durations is not None:
pitch = trim_or_pad_to_target_length(pitch, sum(phoneme_durations))
try:
from scipy.interpolate import interp1d
except ImportError:
raise ImportError("Please install SciPy: pip install scipy")
nonzero_ids = np.where(pitch != 0)[0]
if len(nonzero_ids) == 0:
print((f"{output_path} has all empty values in the pitch contour"))
return
elif len(nonzero_ids) == 1:
print((f"{output_path} has only one non-zero values in the pitch contour"))
return
else:
interp_fn = interp1d(
nonzero_ids,
pitch[nonzero_ids],
fill_value=(pitch[nonzero_ids[0]], pitch[nonzero_ids[-1]]),
bounds_error=False,
)
pitch = interp_fn(np.arange(0, len(pitch)))
d_cumsum = np.cumsum(np.concatenate([np.array([0]), phoneme_durations]))
pitch = np.array(
[
np.mean(pitch[d_cumsum[i-1]: d_cumsum[i]])
for i in range(1, len(d_cumsum))
]
)
assert len(pitch) == len(phoneme_durations)
if log_scale:
pitch = np.log(pitch + 1)
if output_path is not None:
np.save(output_path.as_posix(), pitch)
else:
return pitch
def extract_energy(
waveform: torch.Tensor, output_path: Optional[Path] = None,
hop_length: int = 256, n_fft: int = 1024, log_scale: bool = True,
phoneme_durations: Optional[List[int]] = None
):
if output_path is not None and output_path.is_file():
return
assert len(waveform.shape) == 2 and waveform.shape[0] == 1
waveform = waveform.view(1, 1, waveform.shape[1])
waveform = F.pad(
waveform.unsqueeze(1), [n_fft // 2, n_fft // 2, 0, 0],
mode="reflect"
)
waveform = waveform.squeeze(1)
fourier_basis = np.fft.fft(np.eye(n_fft))
cutoff = int((n_fft / 2 + 1))
fourier_basis = np.vstack(
[np.real(fourier_basis[:cutoff, :]),
np.imag(fourier_basis[:cutoff, :])]
)
forward_basis = torch.FloatTensor(fourier_basis[:, None, :])
forward_transform = F.conv1d(
waveform, forward_basis, stride=hop_length, padding=0
)
real_part = forward_transform[:, :cutoff, :]
imag_part = forward_transform[:, cutoff:, :]
magnitude = torch.sqrt(real_part ** 2 + imag_part ** 2)
energy = torch.norm(magnitude, dim=1).squeeze(0).numpy()
if phoneme_durations is not None:
energy = trim_or_pad_to_target_length(energy, sum(phoneme_durations))
d_cumsum = np.cumsum(np.concatenate([np.array([0]), phoneme_durations]))
energy = np.array(
[
np.mean(energy[d_cumsum[i - 1]: d_cumsum[i]])
for i in range(1, len(d_cumsum))
]
)
assert len(energy) == len(phoneme_durations)
if log_scale:
energy = np.log(energy + 1)
if output_path is not None:
np.save(output_path.as_posix(), energy)
else:
return energy
def get_global_cmvn(feature_root: Path, output_path: Optional[Path] = None):
mean_x, mean_x2, n_frames = None, None, 0
feature_paths = feature_root.glob("*.npy")
for p in tqdm(feature_paths):
with open(p, 'rb') as f:
frames = np.load(f).squeeze()
n_frames += frames.shape[0]
cur_mean_x = frames.sum(axis=0)
if mean_x is None:
mean_x = cur_mean_x
else:
mean_x += cur_mean_x
cur_mean_x2 = (frames ** 2).sum(axis=0)
if mean_x2 is None:
mean_x2 = cur_mean_x2
else:
mean_x2 += cur_mean_x2
mean_x /= n_frames
mean_x2 /= n_frames
var_x = mean_x2 - mean_x ** 2
std_x = np.sqrt(np.maximum(var_x, 1e-10))
if output_path is not None:
with open(output_path, 'wb') as f:
np.savez(f, mean=mean_x, std=std_x)
else:
return {"mean": mean_x, "std": std_x}
def ipa_phonemize(text, lang="en-us", use_g2p=False):
if use_g2p:
assert lang == "en-us", "g2pE phonemizer only works for en-us"
try:
from g2p_en import G2p
g2p = G2p()
return " ".join("|" if p == " " else p for p in g2p(text))
except ImportError:
raise ImportError(
"Please install phonemizer: pip install g2p_en"
)
else:
try:
from phonemizer import phonemize
from phonemizer.separator import Separator
return phonemize(
text, backend='espeak', language=lang,
separator=Separator(word="| ", phone=" ")
)
except ImportError:
raise ImportError(
"Please install phonemizer: pip install phonemizer"
)
def get_mfa_alignment(
textgrid_zip_path: str, sample_ids: List[str], sample_rate: int,
hop_length: int
) -> Dict[str, ForceAlignmentInfo]:
return {
i: get_mfa_alignment_by_sample_id(
textgrid_zip_path, i, sample_rate, hop_length
) for i in tqdm(sample_ids)
}
def get_unit_alignment(
id_to_unit_tsv_path: str, sample_ids: List[str]
) -> Dict[str, ForceAlignmentInfo]:
id_to_units = {
e["id"]: e["units"] for e in load_tsv_to_dicts(id_to_unit_tsv_path)
}
id_to_units = {i: id_to_units[i].split() for i in sample_ids}
id_to_units_collapsed = {
i: [uu for uu, _ in groupby(u)] for i, u in id_to_units.items()
}
id_to_durations = {
i: [len(list(g)) for _, g in groupby(u)] for i, u in id_to_units.items()
}
return {
i: ForceAlignmentInfo(
tokens=id_to_units_collapsed[i], frame_durations=id_to_durations[i],
start_sec=None, end_sec=None
)
for i in sample_ids
}
def get_feature_value_min_max(feature_paths: List[str]):
v_min, v_max = 1e-8, -1e-8
for p in tqdm(feature_paths):
_path, slice_ptr = parse_path(p)
assert len(slice_ptr) == 2
byte_data = read_from_stored_zip(_path, slice_ptr[0], slice_ptr[1])
assert is_npy_data(byte_data)
path_or_fp = io.BytesIO(byte_data)
features = np.load(path_or_fp).squeeze()
v_min = min(v_min, features.min().item())
v_max = max(v_max, features.max().item())
return v_min, v_max
def process(args):
assert "train" in args.splits
out_root = Path(args.output_root).absolute()
out_root.mkdir(exist_ok=True)
print("Fetching data...")
audio_manifest_root = Path(args.audio_manifest_root).absolute()
samples = []
for s in args.splits:
for e in load_tsv_to_dicts(audio_manifest_root / f"{s}.audio.tsv"):
e["split"] = s
samples.append(e)
sample_ids = [s["id"] for s in samples]
# Get alignment info
id_to_alignment = None
if args.textgrid_zip is not None:
assert args.id_to_units_tsv is None
id_to_alignment = get_mfa_alignment(
args.textgrid_zip, sample_ids, args.sample_rate, args.hop_length
)
elif args.id_to_units_tsv is not None:
# assume identical hop length on the unit sequence
id_to_alignment = get_unit_alignment(args.id_to_units_tsv, sample_ids)
# Extract features and pack features into ZIP
feature_name = "logmelspec80"
zip_path = out_root / f"{feature_name}.zip"
pitch_zip_path = out_root / "pitch.zip"
energy_zip_path = out_root / "energy.zip"
gcmvn_npz_path = out_root / "gcmvn_stats.npz"
if zip_path.exists() and gcmvn_npz_path.exists():
print(f"{zip_path} and {gcmvn_npz_path} exist.")
else:
feature_root = out_root / feature_name
feature_root.mkdir(exist_ok=True)
pitch_root = out_root / "pitch"
energy_root = out_root / "energy"
if args.add_fastspeech_targets:
pitch_root.mkdir(exist_ok=True)
energy_root.mkdir(exist_ok=True)
print("Extracting Mel spectrogram features...")
for sample in tqdm(samples):
waveform, sample_rate = torchaudio.load(sample["audio"])
waveform, sample_rate = convert_waveform(
waveform, sample_rate, normalize_volume=args.normalize_volume,
to_sample_rate=args.sample_rate
)
sample_id = sample["id"]
target_length = None
if id_to_alignment is not None:
a = id_to_alignment[sample_id]
target_length = sum(a.frame_durations)
if a.start_sec is not None and a.end_sec is not None:
start_frame = int(a.start_sec * sample_rate)
end_frame = int(a.end_sec * sample_rate)
waveform = waveform[:, start_frame: end_frame]
extract_logmel_spectrogram(
waveform, sample_rate, feature_root / f"{sample_id}.npy",
win_length=args.win_length, hop_length=args.hop_length,
n_fft=args.n_fft, n_mels=args.n_mels, f_min=args.f_min,
f_max=args.f_max, target_length=target_length
)
if args.add_fastspeech_targets:
assert id_to_alignment is not None
extract_pitch(
waveform, sample_rate, pitch_root / f"{sample_id}.npy",
hop_length=args.hop_length, log_scale=True,
phoneme_durations=id_to_alignment[sample_id].frame_durations
)
extract_energy(
waveform, energy_root / f"{sample_id}.npy",
hop_length=args.hop_length, n_fft=args.n_fft,
log_scale=True,
phoneme_durations=id_to_alignment[sample_id].frame_durations
)
print("ZIPing features...")
create_zip(feature_root, zip_path)
get_global_cmvn(feature_root, gcmvn_npz_path)
shutil.rmtree(feature_root)
if args.add_fastspeech_targets:
create_zip(pitch_root, pitch_zip_path)
shutil.rmtree(pitch_root)
create_zip(energy_root, energy_zip_path)
shutil.rmtree(energy_root)
print("Fetching ZIP manifest...")
audio_paths, audio_lengths = get_zip_manifest(zip_path)
pitch_paths, pitch_lengths, energy_paths, energy_lengths = [None] * 4
if args.add_fastspeech_targets:
pitch_paths, pitch_lengths = get_zip_manifest(pitch_zip_path)
energy_paths, energy_lengths = get_zip_manifest(energy_zip_path)
# Generate TSV manifest
print("Generating manifest...")
id_to_cer = None
if args.cer_threshold is not None:
assert Path(args.cer_tsv_path).is_file()
id_to_cer = {
x["id"]: x["uer"] for x in load_tsv_to_dicts(args.cer_tsv_path)
}
manifest_by_split = {split: defaultdict(list) for split in args.splits}
for sample in tqdm(samples):
sample_id, split = sample["id"], sample["split"]
if args.snr_threshold is not None and "snr" in sample \
and sample["snr"] < args.snr_threshold:
continue
if args.cer_threshold is not None \
and id_to_cer[sample_id] > args.cer_threhold:
continue
normalized_utt = sample["tgt_text"]
if id_to_alignment is not None:
normalized_utt = " ".join(id_to_alignment[sample_id].tokens)
elif args.ipa_vocab:
normalized_utt = ipa_phonemize(
normalized_utt, lang=args.lang, use_g2p=args.use_g2p
)
manifest_by_split[split]["id"].append(sample_id)
manifest_by_split[split]["audio"].append(audio_paths[sample_id])
manifest_by_split[split]["n_frames"].append(audio_lengths[sample_id])
manifest_by_split[split]["tgt_text"].append(normalized_utt)
manifest_by_split[split]["speaker"].append(sample["speaker"])
manifest_by_split[split]["src_text"].append(sample["src_text"])
if args.add_fastspeech_targets:
assert id_to_alignment is not None
duration = " ".join(
str(d) for d in id_to_alignment[sample_id].frame_durations
)
manifest_by_split[split]["duration"].append(duration)
manifest_by_split[split]["pitch"].append(pitch_paths[sample_id])
manifest_by_split[split]["energy"].append(energy_paths[sample_id])
for split in args.splits:
save_df_to_tsv(
pd.DataFrame.from_dict(manifest_by_split[split]),
out_root / f"{split}.tsv"
)
# Generate vocab
vocab_name, spm_filename = None, None
if id_to_alignment is not None or args.ipa_vocab:
vocab = Counter()
for t in manifest_by_split["train"]["tgt_text"]:
vocab.update(t.split(" "))
vocab_name = "vocab.txt"
with open(out_root / vocab_name, "w") as f:
for s, c in vocab.most_common():
f.write(f"{s} {c}\n")
else:
spm_filename_prefix = "spm_char"
spm_filename = f"{spm_filename_prefix}.model"
with NamedTemporaryFile(mode="w") as f:
for t in manifest_by_split["train"]["tgt_text"]:
f.write(t + "\n")
f.flush() # needed to ensure gen_vocab sees dumped text
gen_vocab(Path(f.name), out_root / spm_filename_prefix, "char")
# Generate speaker list
speakers = sorted({sample["speaker"] for sample in samples})
speakers_path = out_root / "speakers.txt"
with open(speakers_path, "w") as f:
for speaker in speakers:
f.write(f"{speaker}\n")
# Generate config YAML
win_len_t = args.win_length / args.sample_rate
hop_len_t = args.hop_length / args.sample_rate
extra = {
"sample_rate": args.sample_rate,
"features": {
"type": "spectrogram+melscale+log",
"eps": 1e-5, "n_mels": args.n_mels, "n_fft": args.n_fft,
"window_fn": "hann", "win_length": args.win_length,
"hop_length": args.hop_length, "sample_rate": args.sample_rate,
"win_len_t": win_len_t, "hop_len_t": hop_len_t,
"f_min": args.f_min, "f_max": args.f_max,
"n_stft": args.n_fft // 2 + 1
}
}
if len(speakers) > 1:
extra["speaker_set_filename"] = "speakers.txt"
if args.add_fastspeech_targets:
pitch_min, pitch_max = get_feature_value_min_max(
[(out_root / n).as_posix() for n in pitch_paths.values()]
)
energy_min, energy_max = get_feature_value_min_max(
[(out_root / n).as_posix() for n in energy_paths.values()]
)
extra["features"]["pitch_min"] = pitch_min
extra["features"]["pitch_max"] = pitch_max
extra["features"]["energy_min"] = energy_min
extra["features"]["energy_max"] = energy_max
gen_config_yaml(
out_root, spm_filename=spm_filename, vocab_name=vocab_name,
audio_root=out_root.as_posix(), input_channels=None,
input_feat_per_channel=None, specaugment_policy=None,
cmvn_type="global", gcmvn_path=gcmvn_npz_path, extra=extra
) | null |
181,187 | import argparse
import logging
from pathlib import Path
from collections import defaultdict
from typing import List, Dict, Tuple
import pandas as pd
import numpy as np
import torchaudio
from tqdm import tqdm
from examples.speech_to_text.data_utils import load_df_from_tsv, save_df_to_tsv
SPLITS = ["train", "dev", "test"]
def get_top_n(
root: Path, n_speakers: int = 10, min_n_tokens: int = 5
) -> pd.DataFrame:
def get_splits(
df, train_split_ratio=0.99, speaker_in_all_splits=False, rand_seed=0
) -> Tuple[Dict[str, str], List[str]]:
def convert_to_wav(root: Path, filenames: List[str], target_sr=16_000):
def save_df_to_tsv(dataframe, path: Union[str, Path]):
def process(args):
data_root = Path(args.data_root).absolute() / args.lang
# Generate TSV manifest
print("Generating manifest...")
df_top_n = get_top_n(data_root)
id_to_split, speakers = get_splits(df_top_n)
if args.convert_to_wav:
convert_to_wav(data_root, df_top_n["path"].tolist())
manifest_by_split = {split: defaultdict(list) for split in SPLITS}
for sample in tqdm(df_top_n.to_dict(orient="index").values()):
sample_id = sample["id"]
split = id_to_split[sample_id]
manifest_by_split[split]["id"].append(sample_id)
if args.convert_to_wav:
audio_path = data_root / "wav" / f"{sample_id}.wav"
else:
audio_path = data_root / "clips" / f"{sample_id}.mp3"
manifest_by_split[split]["audio"].append(audio_path.as_posix())
manifest_by_split[split]["n_frames"].append(sample["n_frames"])
manifest_by_split[split]["tgt_text"].append(sample["sentence"])
manifest_by_split[split]["speaker"].append(sample["client_id"])
manifest_by_split[split]["src_text"].append(sample["sentence"])
output_root = Path(args.output_manifest_root).absolute()
output_root.mkdir(parents=True, exist_ok=True)
for split in SPLITS:
save_df_to_tsv(
pd.DataFrame.from_dict(manifest_by_split[split]),
output_root / f"{split}.audio.tsv"
) | null |
181,194 | import copy
import torch.nn as nn
from fairseq import checkpoint_utils
from fairseq import utils
from fairseq.data.data_utils import lengths_to_padding_mask
from fairseq.models import (
register_model,
register_model_architecture,
FairseqEncoder,
)
from fairseq.models.speech_to_text import Wav2VecEncoderWithAdaptor
from fairseq.models.speech_to_text.xm_transformer import (
set_default_adaptor_args,
set_default_w2v_encoder_args,
need_finetuning
)
from fairseq.models.transformer import TransformerEncoder, TransformerDecoder
from fairseq.models.wav2vec import TransformerSentenceEncoderLayer
from fairseq.utils import safe_hasattr
from .s2t_dualinputtransformer import (
DualInputS2TTransformerModel,
TransformerMultiInputDecoder,
DualInputEncoder,
)
def set_default_w2v_encoder_args(args):
args.no_pretrained_weights = getattr(args, "no_pretrained_weights", False)
args.dropout_input = getattr(args, "dropout_input", 0)
args.final_dropout = getattr(args, "final_dropout", 0)
args.apply_mask = getattr(args, "apply_mask", False)
args.dropout = getattr(args, "dropout", 0)
args.attention_dropout = getattr(args, "attention_dropout", 0)
args.activation_dropout = getattr(args, "activation_dropout", 0)
args.mask_length = getattr(args, "mask_length", 10)
args.mask_prob = getattr(args, "mask_prob", 0.5)
args.mask_selection = getattr(args, "mask_selection", "static")
args.mask_other = getattr(args, "mask_other", 0)
args.no_mask_overlap = getattr(args, "no_mask_overlap", False)
args.mask_channel_length = getattr(args, "mask_channel_length", 10)
args.mask_channel_prob = getattr(args, "mask_channel_prob", 0.5)
args.mask_channel_before = getattr(args, "mask_channel_before", False)
args.mask_channel_selection = getattr(args, "mask_channel_selection",
"static")
args.mask_channel_other = getattr(args, "mask_channel_other", 0)
args.no_mask_channel_overlap = getattr(args, "no_mask_channel_overlap",
False)
args.freeze_finetune_updates = getattr(args, "freeze_finetune_updates", 0)
args.feature_grad_mult = 0.1
args.layerdrop = getattr(args, "layerdrop", 0.0)
args.normalize = getattr(args, "normalize", False)
def set_default_adaptor_args(args):
args.adaptor_n_layers = getattr(args, "adaptor_n_layers", 3)
args.adaptor_kernel_size = getattr(args, "adaptor_kernel_size", 3)
args.adaptor_stride = getattr(args, "adaptor_stride", 2)
args.adaptor_layernorm = getattr(args, "adaptor_layernorm", False)
def dualinputxmtransformer_base(args):
# wav2vec encoder
set_default_w2v_encoder_args(args)
set_default_adaptor_args(args)
# mbart model
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024)
args.encoder_ffn_embed_dim = getattr(
args, "encoder_ffn_embed_dim", 4 * args.encoder_embed_dim
)
args.encoder_layers = getattr(args, "encoder_layers", 12)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 16)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", True)
args.encoder_layerdrop = getattr(args, "encoder_layerdrop", 0)
args.encoder_learned_pos = getattr(args, "encoder_learned_pos", True)
args.decoder_embed_path = getattr(args, "decoder_embed_path", None)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 1024)
args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 4 * 1024)
args.decoder_layers = getattr(args, "decoder_layers", 12)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", True)
args.decoder_learned_pos = getattr(args, "decoder_learned_pos", True)
args.decoder_layerdrop = getattr(args, "decoder_layerdrop", 0.0)
args.adaptive_input = getattr(args, "adaptive_input", False)
args.mbart_attention_dropout = getattr(args, "mbart_attention_dropout", 0.0)
args.mbart_activation_dropout = getattr(args, "mbart_activation_dropout", 0.0)
args.mbart_dropout = getattr(args, "mbart_dropout", 0.1)
args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
args.share_decoder_input_output_embed = getattr(
args, "share_decoder_input_output_embed", True
)
args.no_token_positional_embeddings = getattr(
args, "no_token_positional_embeddings", False
)
args.decoder_output_dim = getattr(
args, "decoder_output_dim", args.decoder_embed_dim
)
args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim)
args.no_scale_embedding = getattr(args, "no_scale_embedding", False)
args.quant_noise_pq = getattr(args, "quant_noise_pq", 0)
args.layernorm_embedding = getattr(args, "layernorm_embedding", True)
args.activation_fn = getattr(args, "activation_fn", "gelu")
args.pooler_activation_fn = getattr(args, "pooler_activation_fn", "tanh")
args.pooler_dropout = getattr(args, "pooler_dropout", 0.0) | null |
181,228 | import numpy as np
import nltk
from misc.bleu_utils import sentence_bleu
import warnings
def auto_bleu(sentence, weights, mean_mode='arithmetic'):
def get_auto_bleu3_geometric(utterances):
weights = (1./3, 1./3, 1./3)
return [auto_bleu(u, mean_mode='geometric', weights=weights) for u in utterances] | null |
181,229 | import numpy as np
import nltk
from misc.bleu_utils import sentence_bleu
import warnings
def auto_bleu(sentence, weights, mean_mode='arithmetic'):
if len(sentence) <= 1:
return 0
N = len(weights)
bleu_n = np.zeros([N])
for n in range(N):
targ_ngrams = list(nltk.ngrams(sentence, n+1))
for p in range(len(targ_ngrams)):
left = sentence[:p]
right = sentence[(p+n+1):]
rest_ngrams = list(nltk.ngrams(left, n+1)) + \
list(nltk.ngrams(right, n+1))
# compute the nb of matching ngrams
bleu_n[n] += targ_ngrams[p] in rest_ngrams
bleu_n[n] /= len(targ_ngrams) # average them to get a proportion
weights = np.array(weights)
if mean_mode == 'arithmetic':
return (bleu_n * weights).sum()
elif mean_mode == 'geometric':
return (bleu_n ** weights).prod()
else:
raise ValueError(f'Unknown agggregation mode {mean_mode}')
def get_auto_bleu3_arithmetic(utterances):
weights = (1./3, 1./3, 1./3)
return [auto_bleu(u, mean_mode='arithmetic', weights=weights) for u in utterances] | null |
181,249 | import gc
import os
import random
import shutil
import numpy as np
import torch
import tqdm
from examples.textless_nlp.gslm.speech2unit.pretrained.cpc_feature_reader import (
CpcFeatureReader,
)
from examples.textless_nlp.gslm.speech2unit.pretrained.hubert_feature_reader import (
HubertFeatureReader,
)
from examples.textless_nlp.gslm.speech2unit.pretrained.logmel_feature_reader import (
LogMelFeatureReader,
)
from examples.textless_nlp.gslm.speech2unit.pretrained.w2v2_feature_reader import (
Wav2VecFeatureReader,
)
def get_features(
feature_type, checkpoint_path, layer, manifest_path, sample_pct, flatten
):
def get_and_dump_features(
feature_type,
checkpoint_path,
layer,
manifest_path,
sample_pct,
flatten,
out_features_path,
):
# Feature extraction
features_batch = get_features(
feature_type=feature_type,
checkpoint_path=checkpoint_path,
layer=layer,
manifest_path=manifest_path,
sample_pct=sample_pct,
flatten=flatten,
)
# Save features
out_dir_path = os.path.dirname(out_features_path)
os.makedirs(out_dir_path, exist_ok=True)
shutil.copyfile(
manifest_path,
os.path.join(out_dir_path, os.path.basename(manifest_path)),
)
np.save(out_features_path, features_batch)
return features_batch | null |
181,274 | import re
_alt_re = re.compile(r'\([0-9]+\)')
def _get_pronunciation(s):
def _parse_cmudict(file):
cmudict = {}
for line in file:
if len(line) and (line[0] >= 'A' and line[0] <= 'Z' or line[0] == "'"):
parts = line.split(' ')
word = re.sub(_alt_re, '', parts[0])
pronunciation = _get_pronunciation(parts[1])
if pronunciation:
if word in cmudict:
cmudict[word].append(pronunciation)
else:
cmudict[word] = [pronunciation]
return cmudict | null |
181,288 | import logging
from typing import Any, Dict, List, Optional
from torch import Tensor
import torch
import torch.nn as nn
from fairseq.models import (
FairseqEncoderDecoderModel,
register_model,
register_model_architecture,
)
from fairseq.models.transformer import (
base_architecture,
Embedding,
TransformerModel,
TransformerEncoder,
TransformerDecoder,
)
from fairseq.modules import (
TransformerDecoderLayer,
)
def base_architecture(args):
args.encoder_embed_path = getattr(args, "encoder_embed_path", None)
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048)
args.encoder_layers = getattr(args, "encoder_layers", 6)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False)
args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False)
args.decoder_embed_path = getattr(args, "decoder_embed_path", None)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim)
args.decoder_ffn_embed_dim = getattr(
args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim
)
args.decoder_layers = getattr(args, "decoder_layers", 6)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False)
args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False)
args.attention_dropout = getattr(args, "attention_dropout", 0.0)
args.activation_dropout = getattr(args, "activation_dropout", 0.0)
args.activation_fn = getattr(args, "activation_fn", "relu")
args.dropout = getattr(args, "dropout", 0.1)
args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
args.share_decoder_input_output_embed = getattr(
args, "share_decoder_input_output_embed", False
)
args.share_all_embeddings = getattr(args, "share_all_embeddings", False)
args.no_token_positional_embeddings = getattr(
args, "no_token_positional_embeddings", False
)
args.adaptive_input = getattr(args, "adaptive_input", False)
args.no_cross_attention = getattr(args, "no_cross_attention", False)
args.cross_self_attention = getattr(args, "cross_self_attention", False)
args.decoder_output_dim = getattr(
args, "decoder_output_dim", args.decoder_embed_dim
)
args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim)
args.no_scale_embedding = getattr(args, "no_scale_embedding", False)
args.layernorm_embedding = getattr(args, "layernorm_embedding", False)
args.tie_adaptive_weights = getattr(args, "tie_adaptive_weights", False)
args.checkpoint_activations = getattr(args, "checkpoint_activations", False)
args.offload_activations = getattr(args, "offload_activations", False)
if args.offload_activations:
args.checkpoint_activations = True
args.encoder_layers_to_keep = getattr(args, "encoder_layers_to_keep", None)
args.decoder_layers_to_keep = getattr(args, "decoder_layers_to_keep", None)
args.encoder_layerdrop = getattr(args, "encoder_layerdrop", 0)
args.decoder_layerdrop = getattr(args, "decoder_layerdrop", 0)
args.quant_noise_pq = getattr(args, "quant_noise_pq", 0)
args.quant_noise_pq_block_size = getattr(args, "quant_noise_pq_block_size", 8)
args.quant_noise_scalar = getattr(args, "quant_noise_scalar", 0)
def base_laser_transformer_architecture(args):
base_architecture(args)
args.decoder_lang_embed_dim = getattr(args, "decoder_lang_embed_dim", 0) | null |
181,289 | from fairseq.models import register_model, register_model_architecture
from fairseq.models.multilingual_transformer import MultilingualTransformerModel
from fairseq.models.transformer import (
TransformerDecoder,
TransformerEncoder,
base_architecture,
)
from fairseq.utils import safe_hasattr
from .latent_transformer import LatentTransformerDecoder, LatentTransformerEncoder
def base_architecture(args):
args.encoder_embed_path = getattr(args, "encoder_embed_path", None)
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048)
args.encoder_layers = getattr(args, "encoder_layers", 6)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False)
args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False)
args.decoder_embed_path = getattr(args, "decoder_embed_path", None)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim)
args.decoder_ffn_embed_dim = getattr(
args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim
)
args.decoder_layers = getattr(args, "decoder_layers", 6)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False)
args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False)
args.attention_dropout = getattr(args, "attention_dropout", 0.0)
args.activation_dropout = getattr(args, "activation_dropout", 0.0)
args.activation_fn = getattr(args, "activation_fn", "relu")
args.dropout = getattr(args, "dropout", 0.1)
args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
args.share_decoder_input_output_embed = getattr(
args, "share_decoder_input_output_embed", False
)
args.share_all_embeddings = getattr(args, "share_all_embeddings", False)
args.no_token_positional_embeddings = getattr(
args, "no_token_positional_embeddings", False
)
args.adaptive_input = getattr(args, "adaptive_input", False)
args.no_cross_attention = getattr(args, "no_cross_attention", False)
args.cross_self_attention = getattr(args, "cross_self_attention", False)
args.decoder_output_dim = getattr(
args, "decoder_output_dim", args.decoder_embed_dim
)
args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim)
args.no_scale_embedding = getattr(args, "no_scale_embedding", False)
args.layernorm_embedding = getattr(args, "layernorm_embedding", False)
args.tie_adaptive_weights = getattr(args, "tie_adaptive_weights", False)
args.checkpoint_activations = getattr(args, "checkpoint_activations", False)
args.offload_activations = getattr(args, "offload_activations", False)
if args.offload_activations:
args.checkpoint_activations = True
args.encoder_layers_to_keep = getattr(args, "encoder_layers_to_keep", None)
args.decoder_layers_to_keep = getattr(args, "decoder_layers_to_keep", None)
args.encoder_layerdrop = getattr(args, "encoder_layerdrop", 0)
args.decoder_layerdrop = getattr(args, "decoder_layerdrop", 0)
args.quant_noise_pq = getattr(args, "quant_noise_pq", 0)
args.quant_noise_pq_block_size = getattr(args, "quant_noise_pq_block_size", 8)
args.quant_noise_scalar = getattr(args, "quant_noise_scalar", 0)
def latent_multilingual_architecture(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 1024)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 4)
args.encoder_layers = getattr(args, "encoder_layers", 12)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512)
args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 1024)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 4)
args.decoder_layers = getattr(args, "decoder_layers", 24)
args.share_encoders = getattr(args, "share_encoders", True)
args.share_decoders = getattr(args, "share_decoders", True)
args.share_encoder_embeddings = getattr(args, "share_encoder_embeddings", True)
args.share_decoder_embeddings = getattr(args, "share_decoder_embeddings", True)
base_architecture(args) | null |
181,293 | from collections import namedtuple
import logging
from multiprocessing import Pool
import sys
import os
import random
import numpy as np
import sacrebleu
import torch
from fairseq import checkpoint_utils, options, utils
pool_init_variables = {}
def get_best_hyps(mt_scores, md_scores, hypos, fw_weight, lenpen, beam):
def eval_metric(args, hypos, ref):
def score_target_hypo(args, fw_weight, lp):
mt_scores = pool_init_variables["mt_scores"]
model_scores = pool_init_variables["model_scores"]
hyp = pool_init_variables["hyp"]
ref = pool_init_variables["ref"]
best_hypos, _ = get_best_hyps(
mt_scores, model_scores, hyp, fw_weight, lp, args.beam
)
rerank_eval = None
if ref:
rerank_eval = eval_metric(args, best_hypos, ref)
print(f"fw_weight {fw_weight}, lenpen {lp}, eval {rerank_eval}")
return rerank_eval | null |
181,298 | import argparse
from multiprocessing import Pool
from pathlib import Path
import sacrebleu
import sentencepiece as spm
def get_bleu(in_sent, target_sent):
def get_ter(in_sent, target_sent):
def process(source_sent, target_sent, hypo_sent, metric):
source_bpe = " ".join(sp.EncodeAsPieces(source_sent))
hypo_bpe = [" ".join(sp.EncodeAsPieces(h)) for h in hypo_sent]
if metric == "bleu":
score_str = [get_bleu(h, target_sent) for h in hypo_sent]
else: # ter
score_str = [get_ter(h, target_sent) for h in hypo_sent]
return source_bpe, hypo_bpe, score_str | null |
181,313 | import argparse
import logging
import os
from pathlib import Path
import shutil
from itertools import groupby
from tempfile import NamedTemporaryFile
from typing import Tuple
import numpy as np
import pandas as pd
import soundfile as sf
from examples.speech_to_text.data_utils import (
create_zip,
extract_fbank_features,
filter_manifest_df,
gen_config_yaml,
gen_vocab,
get_zip_manifest,
load_df_from_tsv,
save_df_to_tsv,
cal_gcmvn_stats,
)
import torch
from torch.utils.data import Dataset
from tqdm import tqdm
from fairseq.data.audio.audio_utils import get_waveform, convert_waveform
MANIFEST_COLUMNS = ["id", "audio", "n_frames", "tgt_text", "speaker"]
class MUSTC(Dataset):
"""
Create a Dataset for MuST-C. Each item is a tuple of the form:
waveform, sample_rate, source utterance, target utterance, speaker_id,
utterance_id
"""
SPLITS = ["train", "dev", "tst-COMMON", "tst-HE"]
LANGUAGES = ["de", "es", "fr", "it", "nl", "pt", "ro", "ru"]
def __init__(self, root: str, lang: str, split: str) -> None:
assert split in self.SPLITS and lang in self.LANGUAGES
_root = Path(root) / f"en-{lang}" / "data" / split
wav_root, txt_root = _root / "wav", _root / "txt"
assert _root.is_dir() and wav_root.is_dir() and txt_root.is_dir()
# Load audio segments
try:
import yaml
except ImportError:
print("Please install PyYAML to load the MuST-C YAML files")
with open(txt_root / f"{split}.yaml") as f:
segments = yaml.load(f, Loader=yaml.BaseLoader)
# Load source and target utterances
for _lang in ["en", lang]:
with open(txt_root / f"{split}.{_lang}") as f:
utterances = [r.strip() for r in f]
assert len(segments) == len(utterances)
for i, u in enumerate(utterances):
segments[i][_lang] = u
# Gather info
self.data = []
for wav_filename, _seg_group in groupby(segments, lambda x: x["wav"]):
wav_path = wav_root / wav_filename
sample_rate = sf.info(wav_path.as_posix()).samplerate
seg_group = sorted(_seg_group, key=lambda x: x["offset"])
for i, segment in enumerate(seg_group):
offset = int(float(segment["offset"]) * sample_rate)
n_frames = int(float(segment["duration"]) * sample_rate)
_id = f"{wav_path.stem}_{i}"
self.data.append(
(
wav_path.as_posix(),
offset,
n_frames,
sample_rate,
segment["en"],
segment[lang],
segment["speaker_id"],
_id,
)
)
def __getitem__(
self, n: int
) -> Tuple[torch.Tensor, int, str, str, str, str]:
wav_path, offset, n_frames, sr, src_utt, tgt_utt, spk_id, \
utt_id = self.data[n]
waveform, _ = get_waveform(wav_path, frames=n_frames, start=offset)
waveform = torch.from_numpy(waveform)
return waveform, sr, src_utt, tgt_utt, spk_id, utt_id
def __len__(self) -> int:
return len(self.data)
def gen_vocab(
input_path: Path, output_path_prefix: Path, model_type="bpe",
vocab_size=1000, special_symbols: Optional[List[str]] = None
):
# Train SentencePiece Model
arguments = [
f"--input={input_path.as_posix()}",
f"--model_prefix={output_path_prefix.as_posix()}",
f"--model_type={model_type}",
f"--vocab_size={vocab_size}",
"--character_coverage=1.0",
f"--num_threads={cpu_count()}",
f"--unk_id={UNK_TOKEN_ID}",
f"--bos_id={BOS_TOKEN_ID}",
f"--eos_id={EOS_TOKEN_ID}",
f"--pad_id={PAD_TOKEN_ID}",
]
if special_symbols is not None:
_special_symbols = ",".join(special_symbols)
arguments.append(f"--user_defined_symbols={_special_symbols}")
sp.SentencePieceTrainer.Train(" ".join(arguments))
# Export fairseq dictionary
spm = sp.SentencePieceProcessor()
spm.Load(output_path_prefix.as_posix() + ".model")
vocab = {i: spm.IdToPiece(i) for i in range(spm.GetPieceSize())}
assert (
vocab.get(UNK_TOKEN_ID) == UNK_TOKEN
and vocab.get(PAD_TOKEN_ID) == PAD_TOKEN
and vocab.get(BOS_TOKEN_ID) == BOS_TOKEN
and vocab.get(EOS_TOKEN_ID) == EOS_TOKEN
)
vocab = {
i: s
for i, s in vocab.items()
if s not in {UNK_TOKEN, BOS_TOKEN, EOS_TOKEN, PAD_TOKEN}
}
with open(output_path_prefix.as_posix() + ".txt", "w") as f_out:
for _, s in sorted(vocab.items(), key=lambda x: x[0]):
f_out.write(f"{s} 1\n")
def extract_fbank_features(
waveform: torch.FloatTensor,
sample_rate: int,
output_path: Optional[Path] = None,
n_mel_bins: int = 80,
overwrite: bool = False,
):
if output_path is not None and output_path.is_file() and not overwrite:
return
_waveform, _ = convert_waveform(waveform, sample_rate, to_mono=True)
# Kaldi compliance: 16-bit signed integers
_waveform = _waveform * (2 ** 15)
_waveform = _waveform[0].numpy()
features = _get_kaldi_fbank(_waveform, sample_rate, n_mel_bins)
if features is None:
features = _get_torchaudio_fbank(_waveform, sample_rate, n_mel_bins)
if features is None:
raise ImportError(
"Please install pyKaldi or torchaudio to enable fbank feature extraction"
)
if output_path is not None:
np.save(output_path.as_posix(), features)
return features
def create_zip(data_root: Path, zip_path: Path):
paths = list(data_root.glob("*.npy"))
paths.extend(data_root.glob("*.flac"))
with zipfile.ZipFile(zip_path, "w", zipfile.ZIP_STORED) as f:
for path in tqdm(paths):
f.write(path, arcname=path.name)
def get_zip_manifest(
zip_path: Path, zip_root: Optional[Path] = None, is_audio=False
):
_zip_path = Path.joinpath(zip_root or Path(""), zip_path)
with zipfile.ZipFile(_zip_path, mode="r") as f:
info = f.infolist()
paths, lengths = {}, {}
for i in tqdm(info):
utt_id = Path(i.filename).stem
offset, file_size = i.header_offset + 30 + len(i.filename), i.file_size
paths[utt_id] = f"{zip_path.as_posix()}:{offset}:{file_size}"
with open(_zip_path, "rb") as f:
f.seek(offset)
byte_data = f.read(file_size)
assert len(byte_data) > 1
if is_audio:
assert is_sf_audio_data(byte_data), i
else:
assert is_npy_data(byte_data), i
byte_data_fp = io.BytesIO(byte_data)
if is_audio:
lengths[utt_id] = sf.info(byte_data_fp).frames
else:
lengths[utt_id] = np.load(byte_data_fp).shape[0]
return paths, lengths
def gen_config_yaml(
manifest_root: Path,
spm_filename: Optional[str] = None,
vocab_name: Optional[str] = None,
yaml_filename: str = "config.yaml",
specaugment_policy: Optional[str] = "lb",
prepend_tgt_lang_tag: bool = False,
sampling_alpha: Optional[float] = None,
input_channels: Optional[int] = 1,
input_feat_per_channel: Optional[int] = 80,
audio_root: str = "",
cmvn_type: str = "utterance",
gcmvn_path: Optional[Path] = None,
extra=None
):
manifest_root = manifest_root.absolute()
writer = S2TDataConfigWriter(manifest_root / yaml_filename)
assert spm_filename is not None or vocab_name is not None
vocab_name = spm_filename.replace(".model", ".txt") if vocab_name is None \
else vocab_name
writer.set_vocab_filename(vocab_name)
if input_channels is not None:
writer.set_input_channels(input_channels)
if input_feat_per_channel is not None:
writer.set_input_feat_per_channel(input_feat_per_channel)
specaugment_setters = {
"lb": writer.set_specaugment_lb_policy,
"ld": writer.set_specaugment_ld_policy,
"sm": writer.set_specaugment_sm_policy,
"ss": writer.set_specaugment_ss_policy,
}
specaugment_setter = specaugment_setters.get(specaugment_policy, None)
if specaugment_setter is not None:
specaugment_setter()
if spm_filename is not None:
writer.set_bpe_tokenizer(
{
"bpe": "sentencepiece",
"sentencepiece_model": (manifest_root / spm_filename).as_posix(),
}
)
if prepend_tgt_lang_tag:
writer.set_prepend_tgt_lang_tag(True)
if sampling_alpha is not None:
writer.set_sampling_alpha(sampling_alpha)
if cmvn_type not in ["global", "utterance"]:
raise NotImplementedError
if specaugment_policy is not None:
writer.set_feature_transforms(
"_train", [f"{cmvn_type}_cmvn", "specaugment"]
)
writer.set_feature_transforms("*", [f"{cmvn_type}_cmvn"])
if cmvn_type == "global":
if gcmvn_path is None:
raise ValueError("Please provide path of global cmvn file.")
else:
writer.set_global_cmvn(gcmvn_path.as_posix())
if len(audio_root) > 0:
writer.set_audio_root(audio_root)
if extra is not None:
writer.set_extra(extra)
writer.flush()
def save_df_to_tsv(dataframe, path: Union[str, Path]):
_path = path if isinstance(path, str) else path.as_posix()
dataframe.to_csv(
_path,
sep="\t",
header=True,
index=False,
encoding="utf-8",
escapechar="\\",
quoting=csv.QUOTE_NONE,
)
def filter_manifest_df(
df, is_train_split=False, extra_filters=None, min_n_frames=5, max_n_frames=3000
):
filters = {
"no speech": df["audio"] == "",
f"short speech (<{min_n_frames} frames)": df["n_frames"] < min_n_frames,
"empty sentence": df["tgt_text"] == "",
}
if is_train_split:
filters[f"long speech (>{max_n_frames} frames)"] = df["n_frames"] > max_n_frames
if extra_filters is not None:
filters.update(extra_filters)
invalid = reduce(lambda x, y: x | y, filters.values())
valid = ~invalid
print(
"| "
+ ", ".join(f"{n}: {f.sum()}" for n, f in filters.items())
+ f", total {invalid.sum()} filtered, {valid.sum()} remained."
)
return df[valid]
def cal_gcmvn_stats(features_list):
features = np.concatenate(features_list)
square_sums = (features ** 2).sum(axis=0)
mean = features.mean(axis=0)
features = np.subtract(features, mean)
var = square_sums / features.shape[0] - mean ** 2
std = np.sqrt(np.maximum(var, 1e-8))
return {"mean": mean.astype("float32"), "std": std.astype("float32")}
def convert_waveform(
waveform: Union[np.ndarray, torch.Tensor], sample_rate: int,
normalize_volume: bool = False, to_mono: bool = False,
to_sample_rate: Optional[int] = None
) -> Tuple[Union[np.ndarray, torch.Tensor], int]:
"""convert a waveform:
- to a target sample rate
- from multi-channel to mono channel
- volume normalization
Args:
waveform (numpy.ndarray or torch.Tensor): 2D original waveform
(channels x length)
sample_rate (int): original sample rate
normalize_volume (bool): perform volume normalization
to_mono (bool): convert to mono channel if having multiple channels
to_sample_rate (Optional[int]): target sample rate
Returns:
waveform (numpy.ndarray): converted 2D waveform (channels x length)
sample_rate (float): target sample rate
"""
try:
import torchaudio.sox_effects as ta_sox
except ImportError:
raise ImportError("Please install torchaudio: pip install torchaudio")
effects = []
if normalize_volume:
effects.append(["gain", "-n"])
if to_sample_rate is not None and to_sample_rate != sample_rate:
effects.append(["rate", f"{to_sample_rate}"])
if to_mono and waveform.shape[0] > 1:
effects.append(["channels", "1"])
if len(effects) > 0:
is_np_input = isinstance(waveform, np.ndarray)
_waveform = torch.from_numpy(waveform) if is_np_input else waveform
converted, converted_sample_rate = ta_sox.apply_effects_tensor(
_waveform, sample_rate, effects
)
if is_np_input:
converted = converted.numpy()
return converted, converted_sample_rate
return waveform, sample_rate
def process(args):
root = Path(args.data_root).absolute()
for lang in MUSTC.LANGUAGES:
cur_root = root / f"en-{lang}"
if not cur_root.is_dir():
print(f"{cur_root.as_posix()} does not exist. Skipped.")
continue
# Extract features
audio_root = cur_root / ("flac" if args.use_audio_input else "fbank80")
audio_root.mkdir(exist_ok=True)
for split in MUSTC.SPLITS:
print(f"Fetching split {split}...")
dataset = MUSTC(root.as_posix(), lang, split)
if args.use_audio_input:
print("Converting audios...")
for waveform, sample_rate, _, _, _, utt_id in tqdm(dataset):
tgt_sample_rate = 16_000
_wavform, _ = convert_waveform(
waveform, sample_rate, to_mono=True,
to_sample_rate=tgt_sample_rate
)
sf.write(
(audio_root / f"{utt_id}.flac").as_posix(),
_wavform.T.numpy(), tgt_sample_rate
)
else:
print("Extracting log mel filter bank features...")
gcmvn_feature_list = []
if split == 'train' and args.cmvn_type == "global":
print("And estimating cepstral mean and variance stats...")
for waveform, sample_rate, _, _, _, utt_id in tqdm(dataset):
features = extract_fbank_features(
waveform, sample_rate, audio_root / f"{utt_id}.npy"
)
if split == 'train' and args.cmvn_type == "global":
if len(gcmvn_feature_list) < args.gcmvn_max_num:
gcmvn_feature_list.append(features)
if split == 'train' and args.cmvn_type == "global":
# Estimate and save cmv
stats = cal_gcmvn_stats(gcmvn_feature_list)
with open(cur_root / "gcmvn.npz", "wb") as f:
np.savez(f, mean=stats["mean"], std=stats["std"])
# Pack features into ZIP
zip_path = cur_root / f"{audio_root.name}.zip"
print("ZIPing audios/features...")
create_zip(audio_root, zip_path)
print("Fetching ZIP manifest...")
audio_paths, audio_lengths = get_zip_manifest(
zip_path,
is_audio=args.use_audio_input,
)
# Generate TSV manifest
print("Generating manifest...")
train_text = []
for split in MUSTC.SPLITS:
is_train_split = split.startswith("train")
manifest = {c: [] for c in MANIFEST_COLUMNS}
dataset = MUSTC(args.data_root, lang, split)
for _, _, src_utt, tgt_utt, speaker_id, utt_id in tqdm(dataset):
manifest["id"].append(utt_id)
manifest["audio"].append(audio_paths[utt_id])
manifest["n_frames"].append(audio_lengths[utt_id])
manifest["tgt_text"].append(
src_utt if args.task == "asr" else tgt_utt
)
manifest["speaker"].append(speaker_id)
if is_train_split:
train_text.extend(manifest["tgt_text"])
df = pd.DataFrame.from_dict(manifest)
df = filter_manifest_df(df, is_train_split=is_train_split)
save_df_to_tsv(df, cur_root / f"{split}_{args.task}.tsv")
# Generate vocab
v_size_str = "" if args.vocab_type == "char" else str(args.vocab_size)
spm_filename_prefix = f"spm_{args.vocab_type}{v_size_str}_{args.task}"
with NamedTemporaryFile(mode="w") as f:
for t in train_text:
f.write(t + "\n")
gen_vocab(
Path(f.name),
cur_root / spm_filename_prefix,
args.vocab_type,
args.vocab_size,
)
# Generate config YAML
if args.use_audio_input:
gen_config_yaml(
cur_root,
spm_filename=spm_filename_prefix + ".model",
yaml_filename=f"config_{args.task}.yaml",
specaugment_policy=None,
extra={"use_audio_input": True}
)
else:
gen_config_yaml(
cur_root,
spm_filename=spm_filename_prefix + ".model",
yaml_filename=f"config_{args.task}.yaml",
specaugment_policy="lb",
cmvn_type=args.cmvn_type,
gcmvn_path=(
cur_root / "gcmvn.npz" if args.cmvn_type == "global"
else None
),
)
# Clean up
shutil.rmtree(audio_root) | null |
181,314 | import argparse
import logging
import os
from pathlib import Path
import shutil
from itertools import groupby
from tempfile import NamedTemporaryFile
from typing import Tuple
import numpy as np
import pandas as pd
import soundfile as sf
from examples.speech_to_text.data_utils import (
create_zip,
extract_fbank_features,
filter_manifest_df,
gen_config_yaml,
gen_vocab,
get_zip_manifest,
load_df_from_tsv,
save_df_to_tsv,
cal_gcmvn_stats,
)
import torch
from torch.utils.data import Dataset
from tqdm import tqdm
from fairseq.data.audio.audio_utils import get_waveform, convert_waveform
class MUSTC(Dataset):
def __init__(self, root: str, lang: str, split: str) -> None:
def __getitem__(
self, n: int
) -> Tuple[torch.Tensor, int, str, str, str, str]:
def __len__(self) -> int:
def gen_vocab(
input_path: Path, output_path_prefix: Path, model_type="bpe",
vocab_size=1000, special_symbols: Optional[List[str]] = None
):
def gen_config_yaml(
manifest_root: Path,
spm_filename: Optional[str] = None,
vocab_name: Optional[str] = None,
yaml_filename: str = "config.yaml",
specaugment_policy: Optional[str] = "lb",
prepend_tgt_lang_tag: bool = False,
sampling_alpha: Optional[float] = None,
input_channels: Optional[int] = 1,
input_feat_per_channel: Optional[int] = 80,
audio_root: str = "",
cmvn_type: str = "utterance",
gcmvn_path: Optional[Path] = None,
extra=None
):
def load_df_from_tsv(path: Union[str, Path]) -> pd.DataFrame:
def process_joint(args):
cur_root = Path(args.data_root)
assert all(
(cur_root / f"en-{lang}").is_dir() for lang in MUSTC.LANGUAGES
), "do not have downloaded data available for all 8 languages"
# Generate vocab
vocab_size_str = "" if args.vocab_type == "char" else str(args.vocab_size)
spm_filename_prefix = f"spm_{args.vocab_type}{vocab_size_str}_{args.task}"
with NamedTemporaryFile(mode="w") as f:
for lang in MUSTC.LANGUAGES:
tsv_path = cur_root / f"en-{lang}" / f"train_{args.task}.tsv"
df = load_df_from_tsv(tsv_path)
for t in df["tgt_text"]:
f.write(t + "\n")
special_symbols = None
if args.task == 'st':
special_symbols = [f'<lang:{lang}>' for lang in MUSTC.LANGUAGES]
gen_vocab(
Path(f.name),
cur_root / spm_filename_prefix,
args.vocab_type,
args.vocab_size,
special_symbols=special_symbols
)
# Generate config YAML
gen_config_yaml(
cur_root,
spm_filename=spm_filename_prefix + ".model",
yaml_filename=f"config_{args.task}.yaml",
specaugment_policy="ld",
prepend_tgt_lang_tag=(args.task == "st"),
)
# Make symbolic links to manifests
for lang in MUSTC.LANGUAGES:
for split in MUSTC.SPLITS:
src_path = cur_root / f"en-{lang}" / f"{split}_{args.task}.tsv"
desc_path = cur_root / f"{split}_{lang}_{args.task}.tsv"
if not desc_path.is_symlink():
os.symlink(src_path, desc_path) | null |
181,315 | import argparse
import logging
import os
from pathlib import Path
import shutil
from itertools import groupby
from tempfile import NamedTemporaryFile
from typing import Tuple
import pandas as pd
import soundfile as sf
from examples.speech_to_text.data_utils import (
create_zip,
extract_fbank_features,
filter_manifest_df,
gen_config_yaml,
gen_vocab,
get_zip_manifest,
load_df_from_tsv,
save_df_to_tsv,
)
import torch
from torch.utils.data import Dataset
from tqdm import tqdm
from fairseq.data.audio.audio_utils import get_waveform, convert_waveform
MANIFEST_COLUMNS = [
"id", "audio", "n_frames", "tgt_text", "speaker", "tgt_lang"
]
class mTEDx(Dataset):
"""
Create a Dataset for Multilingual TEDx.
Each item is a tuple of the form: waveform, sample_rate, source utterance,
target utterance, speaker_id, utterance_id
"""
SPLITS = ["train", "valid", "test"]
LANGPAIRS = ["es-es", "fr-fr", "pt-pt", "it-it", "ru-ru", "el-el", "ar-ar",
"de-de", "es-en", "es-fr", "es-pt", "es-it", "fr-en", "fr-es",
"fr-pt", "pt-en", "pt-es", "it-en", "it-es", "ru-en", "el-en"]
def __init__(self, root: str, lang: str, split: str) -> None:
assert split in self.SPLITS and lang in self.LANGPAIRS
_root = Path(root) / f"{lang}" / "data" / split
wav_root, txt_root = _root / "wav", _root / "txt"
assert _root.is_dir() and wav_root.is_dir() and txt_root.is_dir()
# Load audio segments
try:
import yaml
except ImportError:
print(
"Please install PyYAML to load the Multilingual TEDx YAML files"
)
with open(txt_root / f"{split}.yaml") as f:
segments = yaml.load(f, Loader=yaml.BaseLoader)
# Load source and target utterances
src, tgt = lang.split("-")
for _lang in [src, tgt]:
with open(txt_root / f"{split}.{_lang}") as f:
utterances = [r.strip() for r in f]
assert len(segments) == len(utterances)
for i, u in enumerate(utterances):
segments[i][_lang] = u
# Gather info
self.data = []
for wav_filename, _seg_group in groupby(segments, lambda x: x["wav"]):
wav_filename = wav_filename.replace(".wav", ".flac")
wav_path = wav_root / wav_filename
sample_rate = sf.info(wav_path.as_posix()).samplerate
seg_group = sorted(_seg_group, key=lambda x: float(x["offset"]))
for i, segment in enumerate(seg_group):
offset = int(float(segment["offset"]) * sample_rate)
n_frames = int(float(segment["duration"]) * sample_rate)
_id = f"{wav_path.stem}_{i}"
self.data.append(
(
wav_path.as_posix(),
offset,
n_frames,
sample_rate,
segment[src],
segment[tgt],
segment["speaker_id"],
tgt,
_id,
)
)
def __getitem__(
self, n: int
) -> Tuple[torch.Tensor, int, str, str, str, str, str]:
wav_path, offset, n_frames, sr, src_utt, tgt_utt, spk_id, tgt_lang, \
utt_id = self.data[n]
waveform, _ = get_waveform(wav_path, frames=n_frames, start=offset)
waveform = torch.from_numpy(waveform)
return waveform, sr, src_utt, tgt_utt, spk_id, tgt_lang, utt_id
def __len__(self) -> int:
return len(self.data)
def gen_vocab(
input_path: Path, output_path_prefix: Path, model_type="bpe",
vocab_size=1000, special_symbols: Optional[List[str]] = None
):
# Train SentencePiece Model
arguments = [
f"--input={input_path.as_posix()}",
f"--model_prefix={output_path_prefix.as_posix()}",
f"--model_type={model_type}",
f"--vocab_size={vocab_size}",
"--character_coverage=1.0",
f"--num_threads={cpu_count()}",
f"--unk_id={UNK_TOKEN_ID}",
f"--bos_id={BOS_TOKEN_ID}",
f"--eos_id={EOS_TOKEN_ID}",
f"--pad_id={PAD_TOKEN_ID}",
]
if special_symbols is not None:
_special_symbols = ",".join(special_symbols)
arguments.append(f"--user_defined_symbols={_special_symbols}")
sp.SentencePieceTrainer.Train(" ".join(arguments))
# Export fairseq dictionary
spm = sp.SentencePieceProcessor()
spm.Load(output_path_prefix.as_posix() + ".model")
vocab = {i: spm.IdToPiece(i) for i in range(spm.GetPieceSize())}
assert (
vocab.get(UNK_TOKEN_ID) == UNK_TOKEN
and vocab.get(PAD_TOKEN_ID) == PAD_TOKEN
and vocab.get(BOS_TOKEN_ID) == BOS_TOKEN
and vocab.get(EOS_TOKEN_ID) == EOS_TOKEN
)
vocab = {
i: s
for i, s in vocab.items()
if s not in {UNK_TOKEN, BOS_TOKEN, EOS_TOKEN, PAD_TOKEN}
}
with open(output_path_prefix.as_posix() + ".txt", "w") as f_out:
for _, s in sorted(vocab.items(), key=lambda x: x[0]):
f_out.write(f"{s} 1\n")
def extract_fbank_features(
waveform: torch.FloatTensor,
sample_rate: int,
output_path: Optional[Path] = None,
n_mel_bins: int = 80,
overwrite: bool = False,
):
if output_path is not None and output_path.is_file() and not overwrite:
return
_waveform, _ = convert_waveform(waveform, sample_rate, to_mono=True)
# Kaldi compliance: 16-bit signed integers
_waveform = _waveform * (2 ** 15)
_waveform = _waveform[0].numpy()
features = _get_kaldi_fbank(_waveform, sample_rate, n_mel_bins)
if features is None:
features = _get_torchaudio_fbank(_waveform, sample_rate, n_mel_bins)
if features is None:
raise ImportError(
"Please install pyKaldi or torchaudio to enable fbank feature extraction"
)
if output_path is not None:
np.save(output_path.as_posix(), features)
return features
def create_zip(data_root: Path, zip_path: Path):
paths = list(data_root.glob("*.npy"))
paths.extend(data_root.glob("*.flac"))
with zipfile.ZipFile(zip_path, "w", zipfile.ZIP_STORED) as f:
for path in tqdm(paths):
f.write(path, arcname=path.name)
def get_zip_manifest(
zip_path: Path, zip_root: Optional[Path] = None, is_audio=False
):
_zip_path = Path.joinpath(zip_root or Path(""), zip_path)
with zipfile.ZipFile(_zip_path, mode="r") as f:
info = f.infolist()
paths, lengths = {}, {}
for i in tqdm(info):
utt_id = Path(i.filename).stem
offset, file_size = i.header_offset + 30 + len(i.filename), i.file_size
paths[utt_id] = f"{zip_path.as_posix()}:{offset}:{file_size}"
with open(_zip_path, "rb") as f:
f.seek(offset)
byte_data = f.read(file_size)
assert len(byte_data) > 1
if is_audio:
assert is_sf_audio_data(byte_data), i
else:
assert is_npy_data(byte_data), i
byte_data_fp = io.BytesIO(byte_data)
if is_audio:
lengths[utt_id] = sf.info(byte_data_fp).frames
else:
lengths[utt_id] = np.load(byte_data_fp).shape[0]
return paths, lengths
def gen_config_yaml(
manifest_root: Path,
spm_filename: Optional[str] = None,
vocab_name: Optional[str] = None,
yaml_filename: str = "config.yaml",
specaugment_policy: Optional[str] = "lb",
prepend_tgt_lang_tag: bool = False,
sampling_alpha: Optional[float] = None,
input_channels: Optional[int] = 1,
input_feat_per_channel: Optional[int] = 80,
audio_root: str = "",
cmvn_type: str = "utterance",
gcmvn_path: Optional[Path] = None,
extra=None
):
manifest_root = manifest_root.absolute()
writer = S2TDataConfigWriter(manifest_root / yaml_filename)
assert spm_filename is not None or vocab_name is not None
vocab_name = spm_filename.replace(".model", ".txt") if vocab_name is None \
else vocab_name
writer.set_vocab_filename(vocab_name)
if input_channels is not None:
writer.set_input_channels(input_channels)
if input_feat_per_channel is not None:
writer.set_input_feat_per_channel(input_feat_per_channel)
specaugment_setters = {
"lb": writer.set_specaugment_lb_policy,
"ld": writer.set_specaugment_ld_policy,
"sm": writer.set_specaugment_sm_policy,
"ss": writer.set_specaugment_ss_policy,
}
specaugment_setter = specaugment_setters.get(specaugment_policy, None)
if specaugment_setter is not None:
specaugment_setter()
if spm_filename is not None:
writer.set_bpe_tokenizer(
{
"bpe": "sentencepiece",
"sentencepiece_model": (manifest_root / spm_filename).as_posix(),
}
)
if prepend_tgt_lang_tag:
writer.set_prepend_tgt_lang_tag(True)
if sampling_alpha is not None:
writer.set_sampling_alpha(sampling_alpha)
if cmvn_type not in ["global", "utterance"]:
raise NotImplementedError
if specaugment_policy is not None:
writer.set_feature_transforms(
"_train", [f"{cmvn_type}_cmvn", "specaugment"]
)
writer.set_feature_transforms("*", [f"{cmvn_type}_cmvn"])
if cmvn_type == "global":
if gcmvn_path is None:
raise ValueError("Please provide path of global cmvn file.")
else:
writer.set_global_cmvn(gcmvn_path.as_posix())
if len(audio_root) > 0:
writer.set_audio_root(audio_root)
if extra is not None:
writer.set_extra(extra)
writer.flush()
def save_df_to_tsv(dataframe, path: Union[str, Path]):
_path = path if isinstance(path, str) else path.as_posix()
dataframe.to_csv(
_path,
sep="\t",
header=True,
index=False,
encoding="utf-8",
escapechar="\\",
quoting=csv.QUOTE_NONE,
)
def filter_manifest_df(
df, is_train_split=False, extra_filters=None, min_n_frames=5, max_n_frames=3000
):
filters = {
"no speech": df["audio"] == "",
f"short speech (<{min_n_frames} frames)": df["n_frames"] < min_n_frames,
"empty sentence": df["tgt_text"] == "",
}
if is_train_split:
filters[f"long speech (>{max_n_frames} frames)"] = df["n_frames"] > max_n_frames
if extra_filters is not None:
filters.update(extra_filters)
invalid = reduce(lambda x, y: x | y, filters.values())
valid = ~invalid
print(
"| "
+ ", ".join(f"{n}: {f.sum()}" for n, f in filters.items())
+ f", total {invalid.sum()} filtered, {valid.sum()} remained."
)
return df[valid]
def convert_waveform(
waveform: Union[np.ndarray, torch.Tensor], sample_rate: int,
normalize_volume: bool = False, to_mono: bool = False,
to_sample_rate: Optional[int] = None
) -> Tuple[Union[np.ndarray, torch.Tensor], int]:
"""convert a waveform:
- to a target sample rate
- from multi-channel to mono channel
- volume normalization
Args:
waveform (numpy.ndarray or torch.Tensor): 2D original waveform
(channels x length)
sample_rate (int): original sample rate
normalize_volume (bool): perform volume normalization
to_mono (bool): convert to mono channel if having multiple channels
to_sample_rate (Optional[int]): target sample rate
Returns:
waveform (numpy.ndarray): converted 2D waveform (channels x length)
sample_rate (float): target sample rate
"""
try:
import torchaudio.sox_effects as ta_sox
except ImportError:
raise ImportError("Please install torchaudio: pip install torchaudio")
effects = []
if normalize_volume:
effects.append(["gain", "-n"])
if to_sample_rate is not None and to_sample_rate != sample_rate:
effects.append(["rate", f"{to_sample_rate}"])
if to_mono and waveform.shape[0] > 1:
effects.append(["channels", "1"])
if len(effects) > 0:
is_np_input = isinstance(waveform, np.ndarray)
_waveform = torch.from_numpy(waveform) if is_np_input else waveform
converted, converted_sample_rate = ta_sox.apply_effects_tensor(
_waveform, sample_rate, effects
)
if is_np_input:
converted = converted.numpy()
return converted, converted_sample_rate
return waveform, sample_rate
def process(args):
root = Path(args.data_root).absolute()
for lang in mTEDx.LANGPAIRS:
cur_root = root / f"{lang}"
if not cur_root.is_dir():
print(f"{cur_root.as_posix()} does not exist. Skipped.")
continue
# Extract features
audio_root = cur_root / ("flac" if args.use_audio_input else "fbank80")
audio_root.mkdir(exist_ok=True)
for split in mTEDx.SPLITS:
print(f"Fetching split {split}...")
dataset = mTEDx(root.as_posix(), lang, split)
if args.use_audio_input:
print("Converting audios...")
for waveform, sample_rate, _, _, _, utt_id in tqdm(dataset):
tgt_sample_rate = 16_000
_wavform, _ = convert_waveform(
waveform, sample_rate, to_mono=True,
to_sample_rate=tgt_sample_rate
)
sf.write(
(audio_root / f"{utt_id}.flac").as_posix(),
_wavform.numpy(), tgt_sample_rate
)
else:
print("Extracting log mel filter bank features...")
for waveform, sample_rate, _, _, _, _, utt_id in tqdm(dataset):
extract_fbank_features(
waveform, sample_rate, audio_root / f"{utt_id}.npy"
)
# Pack features into ZIP
zip_path = cur_root / f"{audio_root.name}.zip"
print("ZIPing audios/features...")
create_zip(audio_root, zip_path)
print("Fetching ZIP manifest...")
audio_paths, audio_lengths = get_zip_manifest(zip_path)
# Generate TSV manifest
print("Generating manifest...")
train_text = []
for split in mTEDx.SPLITS:
is_train_split = split.startswith("train")
manifest = {c: [] for c in MANIFEST_COLUMNS}
ds = mTEDx(args.data_root, lang, split)
for _, _, src_utt, tgt_utt, spk_id, tgt_lang, utt_id in tqdm(ds):
manifest["id"].append(utt_id)
manifest["audio"].append(audio_paths[utt_id])
manifest["n_frames"].append(audio_lengths[utt_id])
manifest["tgt_text"].append(
src_utt if args.task == "asr" else tgt_utt
)
manifest["speaker"].append(spk_id)
manifest["tgt_lang"].append(tgt_lang)
if is_train_split:
train_text.extend(manifest["tgt_text"])
df = pd.DataFrame.from_dict(manifest)
df = filter_manifest_df(df, is_train_split=is_train_split)
save_df_to_tsv(df, cur_root / f"{split}_{args.task}.tsv")
# Generate vocab
v_size_str = "" if args.vocab_type == "char" else str(args.vocab_size)
spm_filename_prefix = f"spm_{args.vocab_type}{v_size_str}_{args.task}"
with NamedTemporaryFile(mode="w") as f:
for t in train_text:
f.write(t + "\n")
gen_vocab(
Path(f.name),
cur_root / spm_filename_prefix,
args.vocab_type,
args.vocab_size,
)
# Generate config YAML
if args.use_audio_input:
gen_config_yaml(
cur_root,
spm_filename=spm_filename_prefix + ".model",
yaml_filename=f"config_{args.task}.yaml",
specaugment_policy=None,
extra={"use_audio_input": True}
)
else:
gen_config_yaml(
cur_root,
spm_filename=spm_filename_prefix + ".model",
yaml_filename=f"config_{args.task}.yaml",
specaugment_policy="lb",
)
# Clean up
shutil.rmtree(audio_root) | null |
181,323 | import argparse
import logging
import os
from pathlib import Path
import shutil
import torchaudio
import soundfile as sf
from tqdm import tqdm
import pandas as pd
from examples.speech_synthesis.data_utils import extract_logmel_spectrogram
from examples.speech_to_speech.preprocessing.data_utils import gen_config_yaml
from examples.speech_to_text.data_utils import create_zip, get_zip_manifest, save_df_to_tsv
from fairseq.data.audio.audio_utils import convert_waveform
MANIFEST_COLUMNS = ["id", "src_audio", "src_n_frames", "tgt_audio", "tgt_n_frames"]
def prepare_target_data(args, tgt_audios):
def gen_config_yaml(
manifest_root: Path,
yaml_filename: str = "config.yaml",
specaugment_policy: Optional[str] = "lb",
feature_transform: Optional[List[str]] = None,
input_channels: Optional[int] = 1,
input_feat_per_channel: Optional[int] = 80,
audio_root: str = "",
vocoder_type: Optional[str] = None,
vocoder_checkpoint: Optional[str] = None,
vocoder_cfg: Optional[str] = None,
extra=None,
):
def get_zip_manifest(
zip_path: Path, zip_root: Optional[Path] = None, is_audio=False
):
def save_df_to_tsv(dataframe, path: Union[str, Path]):
def process(args):
os.makedirs(args.output_root, exist_ok=True)
manifest = {}
tgt_audios = []
for split in args.data_split:
print(f"Processing {split}...")
manifest[split] = {c: [] for c in MANIFEST_COLUMNS}
missing_tgt_audios = []
src_audios = list(args.source_dir.glob(f"{split}/*.wav"))
for src_audio in tqdm(src_audios):
sample_id = src_audio.stem
tgt_audio = args.target_dir / split / f"{sample_id}.wav"
if not tgt_audio.is_file():
missing_tgt_audios.append(sample_id)
continue
tgt_audios.append(tgt_audio)
src_n_frames = sf.info(src_audio.as_posix()).frames
manifest[split]["id"].append(sample_id)
manifest[split]["src_audio"].append(src_audio.as_posix())
manifest[split]["src_n_frames"].append(
src_n_frames // 160
) # estimation of 10-ms frame for 16kHz audio
print(f"Processed {len(manifest[split]['id'])} samples")
if len(missing_tgt_audios) > 0:
print(
f"{len(missing_tgt_audios)} with missing target data (first 3 examples: {', '.join(missing_tgt_audios[:3])})"
)
# Extract features and pack features into ZIP
zip_path = prepare_target_data(args, tgt_audios)
print("Fetching ZIP manifest...")
tgt_audio_paths, tgt_audio_lengths = get_zip_manifest(zip_path)
print("Generating manifest...")
for split in args.data_split:
print(f"Processing {split}...")
for sample_id in tqdm(manifest[split]["id"]):
manifest[split]["tgt_audio"].append(tgt_audio_paths[sample_id])
manifest[split]["tgt_n_frames"].append(tgt_audio_lengths[sample_id])
out_manifest = args.output_root / f"{split}.tsv"
print(f"Writing manifest to {out_manifest}...")
save_df_to_tsv(pd.DataFrame.from_dict(manifest[split]), out_manifest)
# Generate config YAML
win_len_t = args.win_length / args.sample_rate
hop_len_t = args.hop_length / args.sample_rate
extra = {
"features": {
"type": "spectrogram+melscale+log",
"sample_rate": args.sample_rate,
"eps": 1e-5, "n_mels": args.n_mels, "n_fft": args.n_fft,
"window_fn": "hann", "win_length": args.win_length,
"hop_length": args.hop_length,
"win_len_t": win_len_t, "hop_len_t": hop_len_t,
"f_min": args.f_min, "f_max": args.f_max,
"n_stft": args.n_fft // 2 + 1
}
}
gen_config_yaml(
args.output_root,
audio_root=args.output_root.as_posix(),
specaugment_policy="lb",
feature_transform=["utterance_cmvn", "delta_deltas"],
extra=extra,
) | null |
181,325 | import copy
import torch
import logging
from argparse import Namespace
import yaml
from fairseq import options
from examples.speech_to_speech.benchmarking.core import (
Processing,
SpeechGeneration,
Cascaded2StageS2ST,
Cascaded3StageS2ST,
S2UT,
)
from examples.speech_to_speech.benchmarking.data_utils import (
load_dataset_npy,
load_dataset_raw_to_waveforms,
)
def make_parser():
class Processing(BenchmarkingBase):
def __init__(self, args):
def setUp(self, cfg):
def encode_source(self, src):
def decode_target(self, hypos):
def forward(self, sample):
class SpeechGeneration(BenchmarkingBase):
def __init__(self, args):
def setUp(self, args):
def processTextInput(self, text):
def forward(self, sample):
class S2UT(BenchmarkingBase):
def __init__(self, s2u_args, vocoder_args=None):
def forward(self, sample):
def generate_s2u_outputs(self, dataset):
def compute_metrics(self, metric_type, dataset, repeat=None):
def benchmark_run_time(self, dataset, repeat):
def count_flops(self, dataset, repeat):
def max_memory(self, dataset, repeat):
class Cascaded2StageS2ST(BenchmarkingBase):
def __init__(self, s2t_args, tts_args):
def forward(self, sample):
def generate_s2t_outputs(self, dataset):
def generate_tts_inputs(self, dataset):
def compute_metrics(self, metric_type, dataset, repeat=None):
def benchmark_run_time(self, dataset, repeat):
def count_flops(self, dataset, repeat):
def max_memory(self, dataset, repeat):
class Cascaded3StageS2ST(Cascaded2StageS2ST):
def __init__(self, s2t_args, tts_args, mt_args):
def forward(self, sample):
def generate_mt_inputs(self, dataset):
def generate_mt_outputs(self, dataset):
def compute_metrics(self, metric_type, dataset, repeat=None):
def load_dataset_npy(file_name, dataset_size=None):
def load_dataset_raw_to_waveforms(
file_name,
dataset_size=None,
need_waveform=True,
sample_rate=16000,
read_using_soundfile=False,
):
def cli_main():
parser = make_parser()
args = options.parse_args_and_arch(parser)
with open(
args.config,
"r",
) as f:
config = yaml.load(f, Loader=yaml.FullLoader)
dict_args = vars(args)
dict_args.update(config["general"])
args = Namespace(**dict_args)
i = 1
stage_args = []
while i <= 3:
var = f"stage{i}"
tmp_args = copy.deepcopy(dict_args)
if var in config:
tmp_args.update(config[var])
stage_args.append(Namespace(**tmp_args))
i += 1
else:
break
if args.model_type == "S2S" or args.model_type == "TTS":
model = SpeechGeneration(stage_args[0])
elif args.model_type == "S2UT":
model = S2UT(stage_args[0], stage_args[1] if len(stage_args) > 1 else None)
elif args.model_type == "MT" or args.model_type == "S2T":
model = Processing(stage_args[0])
elif args.model_type == "2StageS2ST":
model = Cascaded2StageS2ST(stage_args[0], stage_args[1])
elif args.model_type == "3StageS2ST":
model = Cascaded3StageS2ST(stage_args[0], stage_args[2], stage_args[1])
else:
raise Exception(f"Currently unsupported model type {args.model_type}")
print(f"Evaluating on dataset - {args.dataset_path}\n")
if args.dataset_type == "npy":
dataset = load_dataset_npy(args.dataset_path, dataset_size=args.dataset_size)
elif args.dataset_type == "raw":
dataset = load_dataset_raw_to_waveforms(
args.dataset_path,
dataset_size=args.dataset_size,
read_using_soundfile=args.read_using_sf,
)
else:
raise Exception(f"Invalid dataset type {args.dataset_type}")
model.warm_up(sample=dataset[0], repeat=2)
run_time, memory, flops = model.gather_all_metrics(dataset, repeat=1)
print(f"run_time = {run_time}sec \tmemory = {memory}MiB \tflops = {flops}")
if args.dump_speech_waveforms_dir:
model.dump_final_speech_output(
dataset,
args.dump_speech_waveforms_dir,
lambda x: x,
args.target_sr,
prefix=args.dump_waveform_file_prefix,
) | null |
181,329 | import logging
import torch
from fairseq import utils
from fairseq.models import register_model, register_model_architecture
from fairseq.models.roberta import (
init_bert_params,
roberta_base_architecture,
roberta_large_architecture,
RobertaEncoder,
RobertaModel,
)
from fairseq.utils import safe_hasattr
from ..modules.linformer_sentence_encoder import LinformerTransformerEncoder
def base_architecture(args):
def linformer_roberta_large_architecture(args):
roberta_large_architecture(args)
base_architecture(args) | null |
181,330 | import argparse
import glob
import numpy as np
def compute_dist(source_embs, target_embs, k=5, return_sim_mat=False):
target_ids = [tid for tid in target_embs]
source_mat = np.stack(source_embs.values(), axis=0)
normalized_source_mat = source_mat / np.linalg.norm(
source_mat, axis=1, keepdims=True
)
target_mat = np.stack(target_embs.values(), axis=0)
normalized_target_mat = target_mat / np.linalg.norm(
target_mat, axis=1, keepdims=True
)
sim_mat = normalized_source_mat.dot(normalized_target_mat.T)
if return_sim_mat:
return sim_mat
neighbors_map = {}
for i, sentence_id in enumerate(source_embs):
idx = np.argsort(sim_mat[i, :])[::-1][:k]
neighbors_map[sentence_id] = [target_ids[tid] for tid in idx]
return neighbors_map
def load_embeddings(directory, LANGS):
sentence_embeddings = {}
sentence_texts = {}
for lang in LANGS:
sentence_embeddings[lang] = {}
sentence_texts[lang] = {}
lang_dir = f"{directory}/{lang}"
embedding_files = glob.glob(f"{lang_dir}/all_avg_pool.{lang}.*")
for embed_file in embedding_files:
shard_id = embed_file.split(".")[-1]
embeddings = np.fromfile(embed_file, dtype=np.float32)
num_rows = embeddings.shape[0] // DIM
embeddings = embeddings.reshape((num_rows, DIM))
with open(f"{lang_dir}/sentences.{lang}.{shard_id}") as sentence_file:
for idx, line in enumerate(sentence_file):
sentence_id, sentence = line.strip().split("\t")
sentence_texts[lang][sentence_id] = sentence
sentence_embeddings[lang][sentence_id] = embeddings[idx, :]
return sentence_embeddings, sentence_texts
def compute_accuracy(directory, LANGS):
sentence_embeddings, sentence_texts = load_embeddings(directory, LANGS)
top_1_accuracy = {}
top1_str = " ".join(LANGS) + "\n"
for source_lang in LANGS:
top_1_accuracy[source_lang] = {}
top1_str += f"{source_lang} "
for target_lang in LANGS:
top1 = 0
top5 = 0
neighbors_map = compute_dist(
sentence_embeddings[source_lang], sentence_embeddings[target_lang]
)
for sentence_id, neighbors in neighbors_map.items():
if sentence_id == neighbors[0]:
top1 += 1
if sentence_id in neighbors[:5]:
top5 += 1
n = len(sentence_embeddings[target_lang])
top1_str += f"{top1/n} "
top1_str += "\n"
print(top1_str)
print(top1_str, file=open(f"{directory}/accuracy", "w")) | null |
181,331 | import numpy as np
import torch
from fairseq import checkpoint_utils, options, progress_bar, tasks, utils
from fairseq.sequence_generator import EnsembleModel
from fairseq.utils import safe_hasattr
class EnsembleModel(nn.Module):
"""A wrapper around an ensemble of models."""
def __init__(self, models):
super().__init__()
self.models_size = len(models)
# method '__len__' is not supported in ModuleList for torch script
self.single_model = models[0]
self.models = nn.ModuleList(models)
self.has_incremental: bool = False
if all(
hasattr(m, "decoder") and isinstance(m.decoder, FairseqIncrementalDecoder)
for m in models
):
self.has_incremental = True
def forward(self):
pass
def has_encoder(self):
return hasattr(self.single_model, "encoder")
def has_incremental_states(self):
return self.has_incremental
def max_decoder_positions(self):
return min([m.max_decoder_positions() for m in self.models if hasattr(m, "max_decoder_positions")] + [sys.maxsize])
def forward_encoder(self, net_input: Dict[str, Tensor]):
if not self.has_encoder():
return None
return [model.encoder.forward_torchscript(net_input) for model in self.models]
def forward_decoder(
self,
tokens,
encoder_outs: List[Dict[str, List[Tensor]]],
incremental_states: List[Dict[str, Dict[str, Optional[Tensor]]]],
temperature: float = 1.0,
):
log_probs = []
avg_attn: Optional[Tensor] = None
encoder_out: Optional[Dict[str, List[Tensor]]] = None
for i, model in enumerate(self.models):
if self.has_encoder():
encoder_out = encoder_outs[i]
# decode each model
if self.has_incremental_states():
decoder_out = model.decoder.forward(
tokens,
encoder_out=encoder_out,
incremental_state=incremental_states[i],
)
else:
if hasattr(model, "decoder"):
decoder_out = model.decoder.forward(tokens, encoder_out=encoder_out)
else:
decoder_out = model.forward(tokens)
attn: Optional[Tensor] = None
decoder_len = len(decoder_out)
if decoder_len > 1 and decoder_out[1] is not None:
if isinstance(decoder_out[1], Tensor):
attn = decoder_out[1]
else:
attn_holder = decoder_out[1]["attn"]
if isinstance(attn_holder, Tensor):
attn = attn_holder
elif attn_holder is not None:
attn = attn_holder[0]
if attn is not None:
attn = attn[:, -1, :]
decoder_out_tuple = (
decoder_out[0][:, -1:, :].div_(temperature),
None if decoder_len <= 1 else decoder_out[1],
)
probs = model.get_normalized_probs(
decoder_out_tuple, log_probs=True, sample=None
)
probs = probs[:, -1, :]
if self.models_size == 1:
return probs, attn
log_probs.append(probs)
if attn is not None:
if avg_attn is None:
avg_attn = attn
else:
avg_attn.add_(attn)
avg_probs = torch.logsumexp(torch.stack(log_probs, dim=0), dim=0) - math.log(
self.models_size
)
if avg_attn is not None:
avg_attn.div_(self.models_size)
return avg_probs, avg_attn
def reorder_encoder_out(
self, encoder_outs: Optional[List[Dict[str, List[Tensor]]]], new_order
):
"""
Reorder encoder output according to *new_order*.
Args:
encoder_out: output from the ``forward()`` method
new_order (LongTensor): desired order
Returns:
*encoder_out* rearranged according to *new_order*
"""
new_outs: List[Dict[str, List[Tensor]]] = []
if not self.has_encoder():
return new_outs
for i, model in enumerate(self.models):
assert encoder_outs is not None
new_outs.append(
model.encoder.reorder_encoder_out(encoder_outs[i], new_order)
)
return new_outs
def reorder_incremental_state(
self,
incremental_states: List[Dict[str, Dict[str, Optional[Tensor]]]],
new_order,
):
if not self.has_incremental_states():
return
for i, model in enumerate(self.models):
model.decoder.reorder_incremental_state_scripting(
incremental_states[i], new_order
)
def get_avg_pool(
models, sample, prefix_tokens, src_dict, remove_bpe, has_langtok=False
):
model = EnsembleModel(models)
# model.forward normally channels prev_output_tokens into the decoder
# separately, but SequenceGenerator directly calls model.encoder
encoder_input = {
k: v for k, v in sample["net_input"].items() if k != "prev_output_tokens"
}
# compute the encoder output for each beam
encoder_outs = model.forward_encoder(encoder_input)
np_encoder_outs = encoder_outs[0].encoder_out.cpu().numpy().astype(np.float32)
encoder_mask = 1 - encoder_outs[0].encoder_padding_mask.cpu().numpy().astype(
np.float32
)
encoder_mask = np.expand_dims(encoder_mask.T, axis=2)
if has_langtok:
encoder_mask = encoder_mask[1:, :, :]
np_encoder_outs = np_encoder_outs[1, :, :]
masked_encoder_outs = encoder_mask * np_encoder_outs
avg_pool = (masked_encoder_outs / encoder_mask.sum(axis=0)).sum(axis=0)
return avg_pool | null |
181,345 | import argparse
import os
import os.path as op
from collections import namedtuple
from multiprocessing import cpu_count
from typing import List, Optional
import sentencepiece as sp
from fairseq.data.encoders.byte_bpe import ByteBPE
from fairseq.data.encoders.byte_utils import byte_encode
from fairseq.data.encoders.bytes import Bytes
from fairseq.data.encoders.characters import Characters
from fairseq.data.encoders.moses_tokenizer import MosesTokenizer
from fairseq.data.encoders.sentencepiece_bpe import SentencepieceBPE
SPLITS = ["train", "valid", "test"]
def _convert_xml(in_path: str, out_path: str):
def _convert_train(in_path: str, out_path: str):
def _get_bytes(in_path: str, out_path: str):
def _get_chars(in_path: str, out_path: str):
def pretokenize(in_path: str, out_path: str, src: str, tgt: str):
def _convert_to_bchar(in_path_prefix: str, src: str, tgt: str, out_path: str):
def _get_bpe(in_path: str, model_prefix: str, vocab_size: int):
def _apply_bbpe(model_path: str, in_path: str, out_path: str):
def _apply_bpe(model_path: str, in_path: str, out_path: str):
def _concat_files(in_paths: List[str], out_path: str):
def preprocess_iwslt17(
root: str,
src: str,
tgt: str,
bpe_size: Optional[int],
need_chars: bool,
bbpe_size: Optional[int],
need_bytes: bool,
):
# extract bitext
in_root = op.join(root, f"{src}-{tgt}")
for lang in [src, tgt]:
_convert_train(
op.join(in_root, f"train.tags.{src}-{tgt}.{lang}"),
op.join(root, f"train.{lang}"),
)
_convert_xml(
op.join(in_root, f"IWSLT17.TED.dev2010.{src}-{tgt}.{lang}.xml"),
op.join(root, f"valid.{lang}"),
)
_convert_xml(
op.join(in_root, f"IWSLT17.TED.tst2015.{src}-{tgt}.{lang}.xml"),
op.join(root, f"test.{lang}"),
)
# pre-tokenize
for lang in [src, tgt]:
for split in SPLITS:
pretokenize(
op.join(root, f"{split}.{lang}"),
op.join(root, f"{split}.moses.{lang}"),
src,
tgt,
)
# tokenize with BPE vocabulary
if bpe_size is not None:
# learn vocabulary
concated_train_path = op.join(root, "train.all")
_concat_files(
[op.join(root, "train.moses.fr"), op.join(root, "train.moses.en")],
concated_train_path,
)
bpe_model_prefix = op.join(root, f"spm_bpe{bpe_size}")
_get_bpe(concated_train_path, bpe_model_prefix, bpe_size)
os.remove(concated_train_path)
# apply
for lang in [src, tgt]:
for split in SPLITS:
_apply_bpe(
bpe_model_prefix + ".model",
op.join(root, f"{split}.moses.{lang}"),
op.join(root, f"{split}.moses.bpe{bpe_size}.{lang}"),
)
# tokenize with bytes vocabulary
if need_bytes:
for lang in [src, tgt]:
for split in SPLITS:
_get_bytes(
op.join(root, f"{split}.moses.{lang}"),
op.join(root, f"{split}.moses.bytes.{lang}"),
)
# tokenize with characters vocabulary
if need_chars:
for lang in [src, tgt]:
for split in SPLITS:
_get_chars(
op.join(root, f"{split}.moses.{lang}"),
op.join(root, f"{split}.moses.chars.{lang}"),
)
# tokenize with byte-level BPE vocabulary
if bbpe_size is not None:
# learn vocabulary
bchar_path = op.join(root, "train.bchar")
_convert_to_bchar(op.join(root, "train.moses"), src, tgt, bchar_path)
bbpe_model_prefix = op.join(root, f"spm_bbpe{bbpe_size}")
_get_bpe(bchar_path, bbpe_model_prefix, bbpe_size)
os.remove(bchar_path)
# apply
for lang in [src, tgt]:
for split in SPLITS:
_apply_bbpe(
bbpe_model_prefix + ".model",
op.join(root, f"{split}.moses.{lang}"),
op.join(root, f"{split}.moses.bbpe{bbpe_size}.{lang}"),
) | null |
181,350 | import ast
from collections import namedtuple
from dataclasses import dataclass, field
from enum import Enum, auto
import hydra
from hydra.core.config_store import ConfigStore
import logging
import math
import os
from omegaconf import OmegaConf
from typing import Optional
import sys
import editdistance
import torch
from hydra.core.hydra_config import HydraConfig
from fairseq import checkpoint_utils, progress_bar, tasks, utils
from fairseq.data.data_utils import post_process
from fairseq.dataclass.configs import FairseqDataclass, FairseqConfig
from fairseq.logging.meters import StopwatchMeter
from omegaconf import open_dict
from examples.speech_recognition.kaldi.kaldi_decoder import KaldiDecoderConfig
logger = logging.getLogger(__name__)
class DecoderType(Enum):
class UnsupGenerateConfig(FairseqDataclass):
def get_dataset_itr(cfg, task):
def process_predictions(
cfg: UnsupGenerateConfig,
hypos,
tgt_dict,
target_tokens,
res_files,
):
def prepare_result_files(cfg: UnsupGenerateConfig):
GenResult = namedtuple(
"GenResult",
[
"count",
"errs_t",
"gen_timer",
"lengths_hyp_unit_t",
"lengths_hyp_t",
"lengths_t",
"lm_score_t",
"num_feats",
"num_sentences",
"num_symbols",
"vt_err_t",
"vt_length_t",
],
)
def gen_hypos(generator, models, num_feats, sample, task, use_cuda):
class StopwatchMeter(Meter):
def __init__(self, round: Optional[int] = None):
def start(self):
def stop(self, n=1, prehook=None):
def reset(self):
def state_dict(self):
def load_state_dict(self, state_dict):
def avg(self):
def elapsed_time(self):
def smoothed_value(self) -> float:
class W2lViterbiDecoder(W2lDecoder):
def __init__(self, args, tgt_dict):
def decode(self, emissions):
class W2lKenLMDecoder(W2lDecoder):
def __init__(self, args, tgt_dict):
def get_timesteps(self, token_idxs: List[int]) -> List[int]:
def decode(self, emissions):
class W2lFairseqLMDecoder(W2lDecoder):
def __init__(self, args, tgt_dict):
def decode(self, emissions):
def idx_to_word(idx):
def make_hypo(result):
class KaldiDecoder(object):
def __init__(
self,
cfg: KaldiDecoderConfig,
beam: int,
nbest: int = 1,
):
def generate(self, models, sample, **unused):
def get_emissions(self, models, encoder_input):
def decode_one(self, logits, padding):
def decode(self, emissions, padding):
def generate(cfg: UnsupGenerateConfig, models, saved_cfg, use_cuda):
task = tasks.setup_task(cfg.fairseq.task)
saved_cfg.task.labels = cfg.fairseq.task.labels
task.load_dataset(cfg.fairseq.dataset.gen_subset, task_cfg=saved_cfg.task)
# Set dictionary
tgt_dict = task.target_dictionary
logger.info(
"| {} {} {} examples".format(
cfg.fairseq.task.data,
cfg.fairseq.dataset.gen_subset,
len(task.dataset(cfg.fairseq.dataset.gen_subset)),
)
)
# Load dataset (possibly sharded)
itr = get_dataset_itr(cfg, task)
# Initialize generator
gen_timer = StopwatchMeter()
def build_generator(cfg: UnsupGenerateConfig):
w2l_decoder = cfg.w2l_decoder
if w2l_decoder == DecoderType.VITERBI:
from examples.speech_recognition.w2l_decoder import W2lViterbiDecoder
return W2lViterbiDecoder(cfg, task.target_dictionary)
elif w2l_decoder == DecoderType.KENLM:
from examples.speech_recognition.w2l_decoder import W2lKenLMDecoder
return W2lKenLMDecoder(cfg, task.target_dictionary)
elif w2l_decoder == DecoderType.FAIRSEQ:
from examples.speech_recognition.w2l_decoder import W2lFairseqLMDecoder
return W2lFairseqLMDecoder(cfg, task.target_dictionary)
elif w2l_decoder == DecoderType.KALDI:
from examples.speech_recognition.kaldi.kaldi_decoder import KaldiDecoder
assert cfg.kaldi_decoder_config is not None
return KaldiDecoder(
cfg.kaldi_decoder_config,
cfg.beam,
)
else:
raise NotImplementedError(
"only wav2letter decoders with (viterbi, kenlm, fairseqlm) options are supported at the moment but found "
+ str(w2l_decoder)
)
generator = build_generator(cfg)
kenlm = None
fairseq_lm = None
if cfg.lm_model is not None:
import kenlm
kenlm = kenlm.Model(cfg.lm_model)
num_sentences = 0
if cfg.results_path is not None and not os.path.exists(cfg.results_path):
os.makedirs(cfg.results_path)
res_files = prepare_result_files(cfg)
errs_t = 0
lengths_hyp_t = 0
lengths_hyp_unit_t = 0
lengths_t = 0
count = 0
num_feats = 0
all_hyp_pieces = []
all_hyp_words = []
num_symbols = (
len([s for s in tgt_dict.symbols if not s.startswith("madeup")])
- tgt_dict.nspecial
)
targets = None
if cfg.targets is not None:
tgt_path = os.path.join(
cfg.fairseq.task.data, cfg.fairseq.dataset.gen_subset + "." + cfg.targets
)
if os.path.exists(tgt_path):
with open(tgt_path, "r") as f:
targets = f.read().splitlines()
viterbi_transcript = None
if cfg.viterbi_transcript is not None and len(cfg.viterbi_transcript) > 0:
logger.info(f"loading viterbi transcript from {cfg.viterbi_transcript}")
with open(cfg.viterbi_transcript, "r") as vf:
viterbi_transcript = vf.readlines()
viterbi_transcript = [v.rstrip().split() for v in viterbi_transcript]
gen_timer.start()
start = 0
end = len(itr)
hypo_futures = None
if cfg.w2l_decoder == DecoderType.KALDI:
logger.info("Extracting features")
hypo_futures = []
samples = []
with progress_bar.build_progress_bar(cfg.fairseq.common, itr) as t:
for i, sample in enumerate(t):
if "net_input" not in sample or i < start or i >= end:
continue
if "padding_mask" not in sample["net_input"]:
sample["net_input"]["padding_mask"] = None
hypos, num_feats = gen_hypos(
generator, models, num_feats, sample, task, use_cuda
)
hypo_futures.append(hypos)
samples.append(sample)
itr = list(zip(hypo_futures, samples))
start = 0
end = len(itr)
logger.info("Finished extracting features")
with progress_bar.build_progress_bar(cfg.fairseq.common, itr) as t:
for i, sample in enumerate(t):
if i < start or i >= end:
continue
if hypo_futures is not None:
hypos, sample = sample
hypos = [h.result() for h in hypos]
else:
if "net_input" not in sample:
continue
hypos, num_feats = gen_hypos(
generator, models, num_feats, sample, task, use_cuda
)
for i, sample_id in enumerate(sample["id"].tolist()):
if targets is not None:
target_tokens = targets[sample_id]
elif "target" in sample or "target_label" in sample:
toks = (
sample["target"][i, :]
if "target_label" not in sample
else sample["target_label"][i, :]
)
target_tokens = utils.strip_pad(toks, tgt_dict.pad()).int().cpu()
else:
target_tokens = None
# Process top predictions
(
errs,
length_hyp,
length,
hyp_pieces,
hyp_words,
) = process_predictions(
cfg,
hypos[i],
tgt_dict,
target_tokens,
res_files,
)
errs_t += errs
lengths_hyp_t += length_hyp
lengths_hyp_unit_t += (
len(hyp_pieces) if len(hyp_pieces) > 0 else len(hyp_words)
)
lengths_t += length
count += 1
all_hyp_pieces.append(hyp_pieces)
all_hyp_words.append(hyp_words)
num_sentences += (
sample["nsentences"] if "nsentences" in sample else sample["id"].numel()
)
lm_score_sum = 0
if kenlm is not None:
if cfg.unit_lm:
lm_score_sum = sum(kenlm.score(w) for w in all_hyp_pieces)
else:
lm_score_sum = sum(kenlm.score(w) for w in all_hyp_words)
elif fairseq_lm is not None:
lm_score_sum = sum(fairseq_lm.score([h.split() for h in all_hyp_words])[0])
vt_err_t = 0
vt_length_t = 0
if viterbi_transcript is not None:
unit_hyps = []
if cfg.targets is not None and cfg.lexicon is not None:
lex = {}
with open(cfg.lexicon, "r") as lf:
for line in lf:
items = line.rstrip().split()
lex[items[0]] = items[1:]
for h in all_hyp_pieces:
hyp_ws = []
for w in h.split():
assert w in lex, w
hyp_ws.extend(lex[w])
unit_hyps.append(hyp_ws)
else:
unit_hyps.extend([h.split() for h in all_hyp_words])
vt_err_t = sum(
editdistance.eval(vt, h) for vt, h in zip(viterbi_transcript, unit_hyps)
)
vt_length_t = sum(len(h) for h in viterbi_transcript)
if res_files is not None:
for r in res_files.values():
r.close()
gen_timer.stop(lengths_hyp_t)
return GenResult(
count,
errs_t,
gen_timer,
lengths_hyp_unit_t,
lengths_hyp_t,
lengths_t,
lm_score_sum,
num_feats,
num_sentences,
num_symbols,
vt_err_t,
vt_length_t,
) | null |
181,366 | import argparse
import os
import os.path as osp
import numpy as np
import tqdm
import torch
import sys
import faiss
import torch.nn.functional as F
from wav2vec_cluster_faiss import parse_faiss_specs, Wav2VecFeatureReader
class Wav2VecFeatureReader(object):
def __init__(self, cp_file, layer):
def read_audio(self, fname):
def get_feats(self, loc):
def get_iterator(args):
label_path = osp.join(args.data, f"{args.split}.{args.labels}")
if osp.exists(label_path):
lp = open(label_path, "r")
else:
lp = None
with open(osp.join(args.data, f"{args.split}.tsv"), "r") as fp:
lines = fp.read().split("\n")
root = lines.pop(0).strip()
files = [line.rstrip() for line in lines if len(line) > 0]
if lp is not None:
lbls = [line.rstrip() for line in lp]
else:
lbls = [None] * len(files)
num = len(files)
reader = Wav2VecFeatureReader(args.checkpoint, args.layer)
def iterate():
for fname, lbl in zip(files, lbls):
file = osp.join(root, fname.split("\t")[0])
feats = reader.get_feats(file)
yield feats.data, fname, lbl
return iterate, num, root | null |
181,394 | import logging
import torch
import torch.nn.functional as F
logger = logging.getLogger(__name__)
def _cross_entropy_pytorch(logits, target, ignore_index=None, reduction="mean"):
def cross_entropy(logits, target, ignore_index=-100, reduction="mean"):
if logits.device == torch.device("cpu"):
return _cross_entropy_pytorch(logits, target, ignore_index, reduction)
else:
if not getattr(cross_entropy, "_has_logged_once", False):
logger.info("using fused cross entropy")
cross_entropy._has_logged_once = True
half_to_float = logits.dtype == torch.half
losses = xentropy.SoftmaxCrossEntropyLoss.apply(
logits,
target,
0.0,
ignore_index,
half_to_float,
)
if reduction == "sum":
return losses.sum()
elif reduction == "mean":
if ignore_index >= 0:
return losses.sum() / target.ne(ignore_index).sum()
else:
return losses.mean()
elif reduction == "none":
return losses
else:
raise NotImplementedError | null |
181,395 | import logging
import torch
import torch.nn.functional as F
def _cross_entropy_pytorch(logits, target, ignore_index=None, reduction="mean"):
def cross_entropy(logits, target, ignore_index=-100, reduction="mean"):
return _cross_entropy_pytorch(logits, target, ignore_index, reduction) | null |
181,411 | import torch
import torch.nn as nn
import torch.nn.functional as F
import math
from inspect import isfunction
from operator import mul
from functools import reduce, wraps
from aml.multimodal_video.utils.einops.lib import rearrange, repeat
from aml.multimodal_video.utils.einops.lib.layers.torch import Rearrange
from fairseq.modules.local_attention import LocalAttention
def is_empty(t):
return t.nelement() == 0
def ema_inplace(moving_avg, new, decay):
if is_empty(moving_avg):
moving_avg.data.copy_(new)
return
moving_avg.data.mul_(decay).add_(new, alpha=(1 - decay)) | null |
181,413 | import torch
import torch.nn as nn
import torch.nn.functional as F
import math
from inspect import isfunction
from operator import mul
from functools import reduce, wraps
from aml.multimodal_video.utils.einops.lib import rearrange, repeat
from aml.multimodal_video.utils.einops.lib.layers.torch import Rearrange
from fairseq.modules.local_attention import LocalAttention
def rotate_every_two(x):
def apply_rotary_pos_emb(q, k, sinu_pos):
sinu_pos = rearrange(sinu_pos, "() n (j d) -> n j d", j=2)
sin, cos = sinu_pos.unbind(dim=-2)
sin, cos = map(lambda t: repeat(t, "b n -> b (n j)", j=2), (sin, cos))
q, k = map(lambda t: (t * cos) + (rotate_every_two(t) * sin), (q, k))
return q, k | null |
181,429 | import ast
import collections
import contextlib
import logging
import numpy as np
import os
import re
import time
import traceback
from collections import OrderedDict
from typing import Any, Dict, Optional, Union
import torch
from fairseq.data import data_utils
from fairseq.dataclass.configs import CheckpointConfig
from fairseq.dataclass.utils import (
convert_namespace_to_omegaconf,
overwrite_args_by_name,
)
from fairseq.distributed.fully_sharded_data_parallel import FSDP, has_FSDP
from fairseq.distributed import utils as distributed_utils
from fairseq.file_io import PathManager
from fairseq.models import FairseqDecoder, FairseqEncoder
from omegaconf import DictConfig, open_dict, OmegaConf
from fairseq.ds_trainer import DeepSpeedTrainer
logger = logging.getLogger(__name__)
def save_checkpoint(cfg: CheckpointConfig, trainer, epoch_itr, val_loss):
from fairseq import meters
# only one worker should attempt to create the required dir
if trainer.data_parallel_rank == 0:
os.makedirs(cfg.save_dir, exist_ok=True)
prev_best = getattr(save_checkpoint, "best", val_loss)
if val_loss is not None:
best_function = max if cfg.maximize_best_checkpoint_metric else min
save_checkpoint.best = best_function(val_loss, prev_best)
if cfg.no_save:
return
trainer.consolidate_optimizer() # TODO(SS): do we need this if no_save_optimizer_state
extra_state = {"train_iterator": epoch_itr.state_dict(), "val_loss": val_loss}
if hasattr(save_checkpoint, "best"):
extra_state.update({"best": save_checkpoint.best})
if getattr(epoch_itr, "sharded_checkpoint", False):
local_state_dict = extra_state["train_iterator"]
all_state_dicts = distributed_utils.all_gather_list(
local_state_dict,
max_size=getattr(trainer.cfg.common, "all_gather_list_size", 16384),
group=trainer.data_parallel_process_group,
)
extra_state["train_iterator"] = all_state_dicts
if not trainer.should_save_checkpoint_on_current_rank:
if trainer.always_call_state_dict_during_save_checkpoint:
trainer.state_dict()
return
write_timer = meters.StopwatchMeter()
write_timer.start()
epoch = epoch_itr.epoch
end_of_epoch = epoch_itr.end_of_epoch()
updates = trainer.get_num_updates()
logger.info(f"Preparing to save checkpoint for epoch {epoch} @ {updates} updates")
def is_better(a, b):
return a >= b if cfg.maximize_best_checkpoint_metric else a <= b
suffix = trainer.checkpoint_suffix
checkpoint_conds = collections.OrderedDict()
if isinstance(trainer, DeepSpeedTrainer):
checkpoint_conds["checkpoints"] = (
not end_of_epoch
and cfg.save_interval_updates > 0
and updates % cfg.save_interval_updates == 0
)
else:
checkpoint_conds["checkpoint{}{}.pt".format(epoch, suffix)] = (
end_of_epoch and not cfg.no_epoch_checkpoints and epoch % cfg.save_interval == 0
)
checkpoint_conds["checkpoint_{}_{}{}.pt".format(epoch, updates, suffix)] = (
not end_of_epoch
and cfg.save_interval_updates > 0
and updates % cfg.save_interval_updates == 0
)
checkpoint_conds["checkpoint_best{}.pt".format(suffix)] = val_loss is not None and (
not hasattr(save_checkpoint, "best")
or is_better(val_loss, save_checkpoint.best)
)
if val_loss is not None and cfg.keep_best_checkpoints > 0:
worst_best = getattr(save_checkpoint, "best", None)
chkpts = checkpoint_paths(
cfg.save_dir,
pattern=r"checkpoint\.best_{}_(\d+\.?\d*){}\.pt".format(
cfg.best_checkpoint_metric, suffix
),
)
if len(chkpts) > 0:
p = chkpts[-1] if cfg.maximize_best_checkpoint_metric else chkpts[0]
worst_best = float(p.rsplit("_")[-1].replace("{}.pt".format(suffix), ""))
# add random digits to resolve ties
with data_utils.numpy_seed(epoch, updates, val_loss):
rand_sfx = np.random.randint(0, cfg.keep_best_checkpoints)
checkpoint_conds[
"checkpoint.best_{}_{:.3f}{}{}.pt".format(
cfg.best_checkpoint_metric,
val_loss,
rand_sfx,
suffix
)
] = worst_best is None or is_better(val_loss, worst_best)
checkpoint_conds[
"checkpoint_last{}.pt".format(suffix)
] = not cfg.no_last_checkpoints
checkpoints = [
os.path.join(cfg.save_dir, fn) for fn, cond in checkpoint_conds.items() if cond
]
if len(checkpoints) > 0:
trainer.save_checkpoint(checkpoints[0], extra_state)
if not isinstance(trainer, DeepSpeedTrainer):
for cp in checkpoints[1:]:
if cfg.write_checkpoints_asynchronously:
# TODO[ioPath]: Need to implement a delayed asynchronous
# file copying/moving feature.
logger.warning(
f"ioPath is not copying {checkpoints[0]} to {cp} "
"since async write mode is on."
)
else:
assert PathManager.copy(
checkpoints[0], cp, overwrite=True
), f"Failed to copy {checkpoints[0]} to {cp}"
write_timer.stop()
logger.info(
"Saved checkpoint {} (epoch {} @ {} updates, score {}) (writing took {} seconds)".format(
checkpoints[0], epoch, updates, val_loss, write_timer.sum
)
)
if not end_of_epoch and cfg.keep_interval_updates > 0:
# remove old checkpoints; checkpoints are sorted in descending order
if cfg.keep_interval_updates_pattern == -1:
checkpoints = checkpoint_paths(
cfg.save_dir, pattern=r"checkpoint_\d+_(\d+){}\.pt".format(suffix)
)
else:
checkpoints = checkpoint_paths(
cfg.save_dir,
pattern=r"checkpoint_\d+_(\d+){}\.pt".format(suffix),
keep_match=True,
)
checkpoints = [
x[0]
for x in checkpoints
if x[1] % cfg.keep_interval_updates_pattern != 0
]
for old_chk in checkpoints[cfg.keep_interval_updates:]:
if os.path.lexists(old_chk):
os.remove(old_chk)
elif PathManager.exists(old_chk):
PathManager.rm(old_chk)
if cfg.keep_last_epochs > 0:
# remove old epoch checkpoints; checkpoints are sorted in descending order
checkpoints = checkpoint_paths(
cfg.save_dir, pattern=r"checkpoint(\d+){}\.pt".format(suffix)
)
for old_chk in checkpoints[cfg.keep_last_epochs:]:
if os.path.lexists(old_chk):
os.remove(old_chk)
elif PathManager.exists(old_chk):
PathManager.rm(old_chk)
if cfg.keep_best_checkpoints > 0:
# only keep the best N checkpoints according to validation metric
checkpoints = checkpoint_paths(
cfg.save_dir,
pattern=r"checkpoint\.best_{}_(\d+\.?\d*){}\.pt".format(
cfg.best_checkpoint_metric, suffix
),
)
if not cfg.maximize_best_checkpoint_metric:
checkpoints = checkpoints[::-1]
for old_chk in checkpoints[cfg.keep_best_checkpoints:]:
if os.path.lexists(old_chk):
os.remove(old_chk)
elif PathManager.exists(old_chk):
PathManager.rm(old_chk)
class CheckpointConfig(FairseqDataclass):
save_dir: str = field(
default="checkpoints", metadata={"help": "path to save checkpoints"}
)
restore_file: str = field(
default="checkpoint_last.pt",
metadata={
"help": "filename from which to load checkpoint "
"(default: <save-dir>/checkpoint_last.pt"
},
)
finetune_from_model: Optional[str] = field(
default=None,
metadata={
"help": "finetune from a pretrained model; note that meters and lr scheduler will be reset"
},
)
reset_dataloader: bool = field(
default=False,
metadata={
"help": "if set, does not reload dataloader state from the checkpoint"
},
)
reset_lr_scheduler: bool = field(
default=False,
metadata={
"help": "if set, does not load lr scheduler state from the checkpoint"
},
)
reset_meters: bool = field(
default=False,
metadata={"help": "if set, does not load meters from the checkpoint"},
)
reset_optimizer: bool = field(
default=False,
metadata={"help": "if set, does not load optimizer state from the checkpoint"},
)
optimizer_overrides: str = field(
default="{}",
metadata={
"help": "a dictionary used to override optimizer args when loading a checkpoint"
},
)
save_interval: int = field(
default=1, metadata={"help": "save a checkpoint every N epochs"}
)
save_interval_updates: int = field(
default=0, metadata={"help": "save a checkpoint (and validate) every N updates"}
)
keep_interval_updates: int = field(
default=-1,
metadata={
"help": "keep the last N checkpoints saved with --save-interval-updates"
},
)
keep_interval_updates_pattern: int = field(
default=-1,
metadata={
"help": "when used with --keep-interval-updates, skips deleting "
"any checkpoints with update X where "
"X %% keep_interval_updates_pattern == 0"
},
)
keep_last_epochs: int = field(
default=-1, metadata={"help": "keep last N epoch checkpoints"}
)
keep_best_checkpoints: int = field(
default=-1, metadata={"help": "keep best N checkpoints based on scores"}
)
no_save: bool = field(
default=False, metadata={"help": "don't save models or checkpoints"}
)
no_epoch_checkpoints: bool = field(
default=False, metadata={"help": "only store last and best checkpoints"}
)
no_last_checkpoints: bool = field(
default=False, metadata={"help": "don't store last checkpoints"}
)
no_save_optimizer_state: bool = field(
default=False,
metadata={"help": "don't save optimizer-state as part of checkpoint"},
)
best_checkpoint_metric: str = field(
default="loss", metadata={"help": 'metric to use for saving "best" checkpoints'}
)
maximize_best_checkpoint_metric: bool = field(
default=False,
metadata={
"help": 'select the largest metric value for saving "best" checkpoints'
},
)
patience: int = field(
default=-1,
metadata={
"help": (
"early stop training if valid performance doesn't "
"improve for N consecutive validation runs; note "
"that this is influenced by --validate-interval"
)
},
)
checkpoint_suffix: str = field(
default="", metadata={"help": "suffix to add to the checkpoint file name"}
)
checkpoint_shard_count: int = field(
default=1,
metadata={
"help": "Number of shards containing the checkpoint - "
"if the checkpoint is over 300GB, it is preferable "
"to split it into shards to prevent OOM on CPU while loading "
"the checkpoint"
},
)
load_checkpoint_on_all_dp_ranks: bool = field(
default=False,
metadata={
"help": "load checkpoints on all data parallel devices "
"(default: only load on rank 0 and broadcast to other devices)"
},
)
write_checkpoints_asynchronously: bool = field(
default=False,
metadata={
"help": (
"Write checkpoints asynchronously in a separate "
"thread. NOTE: This feature is currently being tested."
),
"argparse_alias": "--save-async",
},
)
model_parallel_size: int = II("common.model_parallel_size")
class PathManager:
"""
Wrapper for insulating OSS I/O (using Python builtin operations) from
iopath's PathManager abstraction (for transparently handling various
internal backends).
"""
def open(
path: str,
mode: str = "r",
buffering: int = -1,
encoding: Optional[str] = None,
errors: Optional[str] = None,
newline: Optional[str] = None,
):
if IOPathManager:
return IOPathManager.open(
path=path,
mode=mode,
buffering=buffering,
encoding=encoding,
errors=errors,
newline=newline,
)
return open(
path,
mode=mode,
buffering=buffering,
encoding=encoding,
errors=errors,
newline=newline,
)
def copy(src_path: str, dst_path: str, overwrite: bool = False) -> bool:
if IOPathManager:
return IOPathManager.copy(
src_path=src_path, dst_path=dst_path, overwrite=overwrite
)
return shutil.copyfile(src_path, dst_path)
def get_local_path(path: str, **kwargs) -> str:
if IOPathManager:
return IOPathManager.get_local_path(path, **kwargs)
return path
def exists(path: str) -> bool:
if IOPathManager:
return IOPathManager.exists(path)
return os.path.exists(path)
def isfile(path: str) -> bool:
if IOPathManager:
return IOPathManager.isfile(path)
return os.path.isfile(path)
def ls(path: str) -> List[str]:
if IOPathManager:
return IOPathManager.ls(path)
return os.listdir(path)
def mkdirs(path: str) -> None:
if IOPathManager:
return IOPathManager.mkdirs(path)
os.makedirs(path, exist_ok=True)
def rm(path: str) -> None:
if IOPathManager:
return IOPathManager.rm(path)
os.remove(path)
def chmod(path: str, mode: int) -> None:
if not PathManager.path_requires_pathmanager(path):
os.chmod(path, mode)
def register_handler(handler) -> None:
if IOPathManager:
return IOPathManager.register_handler(handler=handler)
def copy_from_local(
local_path: str, dst_path: str, overwrite: bool = False, **kwargs
) -> None:
if IOPathManager:
return IOPathManager.copy_from_local(
local_path=local_path, dst_path=dst_path, overwrite=overwrite, **kwargs
)
return shutil.copyfile(local_path, dst_path)
def path_requires_pathmanager(path: str) -> bool:
"""Do we require PathManager to access given path?"""
if IOPathManager:
for p in IOPathManager._path_handlers.keys():
if path.startswith(p):
return True
return False
def supports_rename(path: str) -> bool:
# PathManager doesn't yet support renames
return not PathManager.path_requires_pathmanager(path)
def rename(src: str, dst: str):
os.rename(src, dst)
"""
ioPath async PathManager methods:
"""
def opena(
path: str,
mode: str = "r",
buffering: int = -1,
encoding: Optional[str] = None,
errors: Optional[str] = None,
newline: Optional[str] = None,
):
"""
Return file descriptor with asynchronous write operations.
"""
global IOPathManager
if not IOPathManager:
logging.info("ioPath is initializing PathManager.")
try:
from iopath.common.file_io import PathManager
IOPathManager = PathManager()
except Exception:
logging.exception("Failed to initialize ioPath PathManager object.")
return IOPathManager.opena(
path=path,
mode=mode,
buffering=buffering,
encoding=encoding,
errors=errors,
newline=newline,
)
def async_close() -> bool:
"""
Wait for files to be written and clean up asynchronous PathManager.
NOTE: `PathManager.async_close()` must be called at the end of any
script that uses `PathManager.opena(...)`.
"""
global IOPathManager
if IOPathManager:
return IOPathManager.async_close()
return False
The provided code snippet includes necessary dependencies for implementing the `load_checkpoint` function. Write a Python function `def load_checkpoint(cfg: CheckpointConfig, trainer, **passthrough_args)` to solve the following problem:
Load a checkpoint and restore the training iterator. *passthrough_args* will be passed through to ``trainer.get_train_iterator``.
Here is the function:
def load_checkpoint(cfg: CheckpointConfig, trainer, **passthrough_args):
"""
Load a checkpoint and restore the training iterator.
*passthrough_args* will be passed through to
``trainer.get_train_iterator``.
"""
reset_optimizer = cfg.reset_optimizer
reset_lr_scheduler = cfg.reset_lr_scheduler
optimizer_overrides = ast.literal_eval(cfg.optimizer_overrides)
reset_meters = cfg.reset_meters
reset_dataloader = cfg.reset_dataloader
if cfg.finetune_from_model is not None and (
reset_optimizer or reset_lr_scheduler or reset_meters or reset_dataloader
):
raise ValueError(
"--finetune-from-model can not be set together with either --reset-optimizer"
" or reset_lr_scheduler or reset_meters or reset_dataloader"
)
suffix = trainer.checkpoint_suffix
if isinstance(trainer, DeepSpeedTrainer):
checkpoint_path = os.path.join(cfg.save_dir, "checkpoints/")
else:
if (
cfg.restore_file == "checkpoint_last.pt"
): # default value of restore_file is 'checkpoint_last.pt'
checkpoint_path = os.path.join(
cfg.save_dir, "checkpoint_last{}.pt".format(suffix)
)
first_launch = not PathManager.exists(checkpoint_path)
if cfg.finetune_from_model is not None and first_launch:
# if there is no last checkpoint to restore, start the finetune from pretrained model
# else just use usual logic to load checkpoint, e.g. restart from last checkpoint and etc.
if PathManager.exists(cfg.finetune_from_model):
checkpoint_path = cfg.finetune_from_model
reset_optimizer = True
reset_lr_scheduler = True
reset_meters = True
reset_dataloader = True
logger.info(
f"loading pretrained model from {checkpoint_path}: "
"optimizer, lr scheduler, meters, dataloader will be reset"
)
else:
raise ValueError(
f"--funetune-from-model {cfg.finetune_from_model} does not exist"
)
elif suffix is not None:
checkpoint_path = cfg.restore_file.replace(".pt", suffix + ".pt")
else:
checkpoint_path = cfg.restore_file
if cfg.restore_file != "checkpoint_last.pt" and cfg.finetune_from_model:
raise ValueError(
"--finetune-from-model and --restore-file (non-default value) "
"can not be specified together: " + str(cfg)
)
extra_state = trainer.load_checkpoint(
checkpoint_path,
reset_optimizer,
reset_lr_scheduler,
optimizer_overrides,
reset_meters=reset_meters,
)
if (
extra_state is not None
and "best" in extra_state
and not reset_optimizer
and not reset_meters
):
save_checkpoint.best = extra_state["best"]
if extra_state is not None and not reset_dataloader:
# restore iterator from checkpoint
itr_state = extra_state["train_iterator"]
epoch_itr = trainer.get_train_iterator(
epoch=itr_state.get("epoch", 1), load_dataset=True, **passthrough_args
)
epoch_itr.load_state_dict(itr_state)
else:
epoch_itr = trainer.get_train_iterator(
epoch=1, load_dataset=True, **passthrough_args
)
trainer.lr_step(epoch_itr.epoch)
return extra_state, epoch_itr | Load a checkpoint and restore the training iterator. *passthrough_args* will be passed through to ``trainer.get_train_iterator``. |
181,430 | import ast
import collections
import contextlib
import logging
import numpy as np
import os
import re
import time
import traceback
from collections import OrderedDict
from typing import Any, Dict, Optional, Union
import torch
from fairseq.data import data_utils
from fairseq.dataclass.configs import CheckpointConfig
from fairseq.dataclass.utils import (
convert_namespace_to_omegaconf,
overwrite_args_by_name,
)
from fairseq.distributed.fully_sharded_data_parallel import FSDP, has_FSDP
from fairseq.distributed import utils as distributed_utils
from fairseq.file_io import PathManager
from fairseq.models import FairseqDecoder, FairseqEncoder
from omegaconf import DictConfig, open_dict, OmegaConf
from fairseq.ds_trainer import DeepSpeedTrainer
class PathManager:
"""
Wrapper for insulating OSS I/O (using Python builtin operations) from
iopath's PathManager abstraction (for transparently handling various
internal backends).
"""
def open(
path: str,
mode: str = "r",
buffering: int = -1,
encoding: Optional[str] = None,
errors: Optional[str] = None,
newline: Optional[str] = None,
):
if IOPathManager:
return IOPathManager.open(
path=path,
mode=mode,
buffering=buffering,
encoding=encoding,
errors=errors,
newline=newline,
)
return open(
path,
mode=mode,
buffering=buffering,
encoding=encoding,
errors=errors,
newline=newline,
)
def copy(src_path: str, dst_path: str, overwrite: bool = False) -> bool:
if IOPathManager:
return IOPathManager.copy(
src_path=src_path, dst_path=dst_path, overwrite=overwrite
)
return shutil.copyfile(src_path, dst_path)
def get_local_path(path: str, **kwargs) -> str:
if IOPathManager:
return IOPathManager.get_local_path(path, **kwargs)
return path
def exists(path: str) -> bool:
if IOPathManager:
return IOPathManager.exists(path)
return os.path.exists(path)
def isfile(path: str) -> bool:
if IOPathManager:
return IOPathManager.isfile(path)
return os.path.isfile(path)
def ls(path: str) -> List[str]:
if IOPathManager:
return IOPathManager.ls(path)
return os.listdir(path)
def mkdirs(path: str) -> None:
if IOPathManager:
return IOPathManager.mkdirs(path)
os.makedirs(path, exist_ok=True)
def rm(path: str) -> None:
if IOPathManager:
return IOPathManager.rm(path)
os.remove(path)
def chmod(path: str, mode: int) -> None:
if not PathManager.path_requires_pathmanager(path):
os.chmod(path, mode)
def register_handler(handler) -> None:
if IOPathManager:
return IOPathManager.register_handler(handler=handler)
def copy_from_local(
local_path: str, dst_path: str, overwrite: bool = False, **kwargs
) -> None:
if IOPathManager:
return IOPathManager.copy_from_local(
local_path=local_path, dst_path=dst_path, overwrite=overwrite, **kwargs
)
return shutil.copyfile(local_path, dst_path)
def path_requires_pathmanager(path: str) -> bool:
"""Do we require PathManager to access given path?"""
if IOPathManager:
for p in IOPathManager._path_handlers.keys():
if path.startswith(p):
return True
return False
def supports_rename(path: str) -> bool:
# PathManager doesn't yet support renames
return not PathManager.path_requires_pathmanager(path)
def rename(src: str, dst: str):
os.rename(src, dst)
"""
ioPath async PathManager methods:
"""
def opena(
path: str,
mode: str = "r",
buffering: int = -1,
encoding: Optional[str] = None,
errors: Optional[str] = None,
newline: Optional[str] = None,
):
"""
Return file descriptor with asynchronous write operations.
"""
global IOPathManager
if not IOPathManager:
logging.info("ioPath is initializing PathManager.")
try:
from iopath.common.file_io import PathManager
IOPathManager = PathManager()
except Exception:
logging.exception("Failed to initialize ioPath PathManager object.")
return IOPathManager.opena(
path=path,
mode=mode,
buffering=buffering,
encoding=encoding,
errors=errors,
newline=newline,
)
def async_close() -> bool:
"""
Wait for files to be written and clean up asynchronous PathManager.
NOTE: `PathManager.async_close()` must be called at the end of any
script that uses `PathManager.opena(...)`.
"""
global IOPathManager
if IOPathManager:
return IOPathManager.async_close()
return False
def load_checkpoint_to_cpu_(path, arg_overrides=None, load_on_all_ranks=False):
local_path = PathManager.get_local_path(path)
if local_path != path and PathManager.path_requires_pathmanager(path):
try:
os.remove(local_path)
except FileNotFoundError:
pass
if load_on_all_ranks:
torch.distributed.barrier()
local_path = PathManager.get_local_path(path)
with open(local_path, "rb") as f:
state = torch.load(f, map_location=torch.device("cpu"))
return state | null |
181,431 | import ast
import collections
import contextlib
import logging
import numpy as np
import os
import re
import time
import traceback
from collections import OrderedDict
from typing import Any, Dict, Optional, Union
import torch
from fairseq.data import data_utils
from fairseq.dataclass.configs import CheckpointConfig
from fairseq.dataclass.utils import (
convert_namespace_to_omegaconf,
overwrite_args_by_name,
)
from fairseq.distributed.fully_sharded_data_parallel import FSDP, has_FSDP
from fairseq.distributed import utils as distributed_utils
from fairseq.file_io import PathManager
from fairseq.models import FairseqDecoder, FairseqEncoder
from omegaconf import DictConfig, open_dict, OmegaConf
from fairseq.ds_trainer import DeepSpeedTrainer
def load_model_ensemble_and_task(
filenames,
arg_overrides: Optional[Dict[str, Any]] = None,
task=None,
strict=True,
suffix="",
num_shards=1,
state=None,
):
assert state is None or len(filenames) == 1
from fairseq import tasks
assert not (
strict and num_shards > 1
), "Cannot load state dict with strict=True and checkpoint shards > 1"
ensemble = []
cfg = None
for filename in filenames:
orig_filename = filename
model_shard_state = {"shard_weights": [], "shard_metadata": []}
assert num_shards > 0
st = time.time()
for shard_idx in range(num_shards):
filename = get_maybe_sharded_checkpoint_filename(
orig_filename, suffix, shard_idx, num_shards
)
if not PathManager.exists(filename):
raise IOError("Model file not found: {}".format(filename))
if state is None:
state = load_checkpoint_to_cpu(filename, arg_overrides)
if "args" in state and state["args"] is not None:
cfg = convert_namespace_to_omegaconf(state["args"])
elif "cfg" in state and state["cfg"] is not None:
cfg = state["cfg"]
else:
raise RuntimeError(
f"Neither args nor cfg exist in state keys = {state.keys()}"
)
if task is None:
task = tasks.setup_task(cfg.task)
if "task_state" in state:
task.load_state_dict(state["task_state"])
if "fsdp_metadata" in state and num_shards > 1:
model_shard_state["shard_weights"].append(state["model"])
model_shard_state["shard_metadata"].append(state["fsdp_metadata"])
# check FSDP import before the code goes too far
if not has_FSDP:
raise ImportError(
"Cannot find FullyShardedDataParallel. "
"Please install fairscale with: pip install fairscale"
)
if shard_idx == num_shards - 1:
consolidated_model_state = FSDP.consolidate_shard_weights(
shard_weights=model_shard_state["shard_weights"],
shard_metadata=model_shard_state["shard_metadata"],
)
model = task.build_model(cfg.model)
if (
"optimizer_history" in state
and len(state["optimizer_history"]) > 0
and "num_updates" in state["optimizer_history"][-1]
):
model.set_num_updates(
state["optimizer_history"][-1]["num_updates"]
)
model.load_state_dict(
consolidated_model_state, strict=strict, model_cfg=cfg.model
)
else:
# model parallel checkpoint or unsharded checkpoint
model = task.build_model(cfg.model)
if (
"optimizer_history" in state
and len(state["optimizer_history"]) > 0
and "num_updates" in state["optimizer_history"][-1]
):
model.set_num_updates(
state["optimizer_history"][-1]["num_updates"]
)
model.load_state_dict(
state["model"], strict=strict, model_cfg=cfg.model
)
# reset state so it gets loaded for the next model in ensemble
state = None
if shard_idx % 10 == 0 and shard_idx > 0:
elapsed = time.time() - st
logger.info(
f"Loaded {shard_idx} shards in {elapsed:.2f}s, {elapsed / (shard_idx + 1):.2f}s/shard"
)
# build model for ensemble
ensemble.append(model)
return ensemble, cfg, task
The provided code snippet includes necessary dependencies for implementing the `load_model_ensemble` function. Write a Python function `def load_model_ensemble( filenames, arg_overrides: Optional[Dict[str, Any]] = None, task=None, strict=True, suffix="", num_shards=1, state=None, )` to solve the following problem:
Loads an ensemble of models. Args: filenames (List[str]): checkpoint files to load arg_overrides (Dict[str,Any], optional): override model args that were used during model training task (fairseq.tasks.FairseqTask, optional): task to use for loading
Here is the function:
def load_model_ensemble(
filenames,
arg_overrides: Optional[Dict[str, Any]] = None,
task=None,
strict=True,
suffix="",
num_shards=1,
state=None,
):
"""Loads an ensemble of models.
Args:
filenames (List[str]): checkpoint files to load
arg_overrides (Dict[str,Any], optional): override model args that
were used during model training
task (fairseq.tasks.FairseqTask, optional): task to use for loading
"""
assert not (
strict and num_shards > 1
), "Cannot load state dict with strict=True and checkpoint shards > 1"
ensemble, args, _task = load_model_ensemble_and_task(
filenames,
arg_overrides,
task,
strict,
suffix,
num_shards,
state,
)
return ensemble, args | Loads an ensemble of models. Args: filenames (List[str]): checkpoint files to load arg_overrides (Dict[str,Any], optional): override model args that were used during model training task (fairseq.tasks.FairseqTask, optional): task to use for loading |
181,432 | import ast
import collections
import contextlib
import logging
import numpy as np
import os
import re
import time
import traceback
from collections import OrderedDict
from typing import Any, Dict, Optional, Union
import torch
from fairseq.data import data_utils
from fairseq.dataclass.configs import CheckpointConfig
from fairseq.dataclass.utils import (
convert_namespace_to_omegaconf,
overwrite_args_by_name,
)
from fairseq.distributed.fully_sharded_data_parallel import FSDP, has_FSDP
from fairseq.distributed import utils as distributed_utils
from fairseq.file_io import PathManager
from fairseq.models import FairseqDecoder, FairseqEncoder
from omegaconf import DictConfig, open_dict, OmegaConf
from fairseq.ds_trainer import DeepSpeedTrainer
logger = logging.getLogger(__name__)
The provided code snippet includes necessary dependencies for implementing the `prune_state_dict` function. Write a Python function `def prune_state_dict(state_dict, model_cfg: Optional[DictConfig])` to solve the following problem:
Prune the given state_dict if desired for LayerDrop (https://arxiv.org/abs/1909.11556). Training with LayerDrop allows models to be robust to pruning at inference time. This function prunes state_dict to allow smaller models to be loaded from a larger model and re-maps the existing state_dict for this to occur. It's called by functions that load models from checkpoints and does not need to be called directly.
Here is the function:
def prune_state_dict(state_dict, model_cfg: Optional[DictConfig]):
"""Prune the given state_dict if desired for LayerDrop
(https://arxiv.org/abs/1909.11556).
Training with LayerDrop allows models to be robust to pruning at inference
time. This function prunes state_dict to allow smaller models to be loaded
from a larger model and re-maps the existing state_dict for this to occur.
It's called by functions that load models from checkpoints and does not
need to be called directly.
"""
arch = None
if model_cfg is not None:
arch = (
model_cfg._name
if isinstance(model_cfg, DictConfig)
else getattr(model_cfg, "arch", None)
)
if not model_cfg or arch is None or arch == "ptt_transformer":
# args should not be none, but don't crash if it is.
return state_dict
encoder_layers_to_keep = getattr(model_cfg, "encoder_layers_to_keep", None)
decoder_layers_to_keep = getattr(model_cfg, "decoder_layers_to_keep", None)
if not encoder_layers_to_keep and not decoder_layers_to_keep:
return state_dict
# apply pruning
logger.info(
"Pruning model to specified layer configuration - this works best if the model was trained with LayerDrop"
)
def create_pruning_pass(layers_to_keep, layer_name):
keep_layers = sorted(
int(layer_string) for layer_string in layers_to_keep.split(",")
)
mapping_dict = {}
for i in range(len(keep_layers)):
mapping_dict[str(keep_layers[i])] = str(i)
regex = re.compile(r"^{layer}.*\.layers\.(\d+)".format(layer=layer_name))
return {"substitution_regex": regex, "mapping_dict": mapping_dict}
pruning_passes = []
if encoder_layers_to_keep:
pruning_passes.append(create_pruning_pass(encoder_layers_to_keep, "encoder"))
if decoder_layers_to_keep:
pruning_passes.append(create_pruning_pass(decoder_layers_to_keep, "decoder"))
new_state_dict = {}
for layer_name in state_dict.keys():
match = re.search(r"\.layers\.(\d+)\.", layer_name)
# if layer has no number in it, it is a supporting layer, such as an
# embedding
if not match:
new_state_dict[layer_name] = state_dict[layer_name]
continue
# otherwise, layer should be pruned.
original_layer_number = match.group(1)
# figure out which mapping dict to replace from
for pruning_pass in pruning_passes:
if original_layer_number in pruning_pass["mapping_dict"] and pruning_pass[
"substitution_regex"
].search(layer_name):
new_layer_number = pruning_pass["mapping_dict"][original_layer_number]
substitution_match = pruning_pass["substitution_regex"].search(
layer_name
)
new_state_key = (
layer_name[: substitution_match.start(1)]
+ new_layer_number
+ layer_name[substitution_match.end(1):]
)
new_state_dict[new_state_key] = state_dict[layer_name]
# Since layers are now pruned, *_layers_to_keep are no longer needed.
# This is more of "It would make it work fix" rather than a proper fix.
if isinstance(model_cfg, DictConfig):
context = open_dict(model_cfg)
else:
context = contextlib.ExitStack()
with context:
if hasattr(model_cfg, "encoder_layers_to_keep"):
model_cfg.encoder_layers_to_keep = None
if hasattr(model_cfg, "decoder_layers_to_keep"):
model_cfg.decoder_layers_to_keep = None
return new_state_dict | Prune the given state_dict if desired for LayerDrop (https://arxiv.org/abs/1909.11556). Training with LayerDrop allows models to be robust to pruning at inference time. This function prunes state_dict to allow smaller models to be loaded from a larger model and re-maps the existing state_dict for this to occur. It's called by functions that load models from checkpoints and does not need to be called directly. |
181,433 | import ast
import collections
import contextlib
import logging
import numpy as np
import os
import re
import time
import traceback
from collections import OrderedDict
from typing import Any, Dict, Optional, Union
import torch
from fairseq.data import data_utils
from fairseq.dataclass.configs import CheckpointConfig
from fairseq.dataclass.utils import (
convert_namespace_to_omegaconf,
overwrite_args_by_name,
)
from fairseq.distributed.fully_sharded_data_parallel import FSDP, has_FSDP
from fairseq.distributed import utils as distributed_utils
from fairseq.file_io import PathManager
from fairseq.models import FairseqDecoder, FairseqEncoder
from omegaconf import DictConfig, open_dict, OmegaConf
from fairseq.ds_trainer import DeepSpeedTrainer
def load_checkpoint_to_cpu(path, arg_overrides=None, load_on_all_ranks=False):
"""Loads a checkpoint to CPU (with upgrading for backward compatibility).
If doing single-GPU training or if the checkpoint is only being loaded by at
most one process on each node (current default behavior is for only rank 0
to read the checkpoint from disk), load_on_all_ranks should be False to
avoid errors from torch.distributed not having been initialized or
torch.distributed.barrier() hanging.
If all processes on each node may be loading the checkpoint
simultaneously, load_on_all_ranks should be set to True to avoid I/O
conflicts.
There's currently no support for > 1 but < all processes loading the
checkpoint on each node.
"""
local_path = PathManager.get_local_path(path)
# The locally cached file returned by get_local_path() may be stale for
# remote files that are periodically updated/overwritten (ex:
# checkpoint_last.pt) - so we remove the local copy, sync across processes
# (if needed), and then download a fresh copy.
if local_path != path and PathManager.path_requires_pathmanager(path):
try:
os.remove(local_path)
except FileNotFoundError:
# With potentially multiple processes removing the same file, the
# file being missing is benign (missing_ok isn't available until
# Python 3.8).
pass
if load_on_all_ranks:
torch.distributed.barrier()
local_path = PathManager.get_local_path(path)
with open(local_path, "rb") as f:
state = torch.load(f, map_location=torch.device("cpu"))
if "args" in state and state["args"] is not None and arg_overrides is not None:
args = state["args"]
for arg_name, arg_val in arg_overrides.items():
setattr(args, arg_name, arg_val)
if "cfg" in state and state["cfg"] is not None:
# hack to be able to set Namespace in dict config. this should be removed when we update to newer
# omegaconf version that supports object flags, or when we migrate all existing models
from omegaconf import _utils
old_primitive = _utils.is_primitive_type
_utils.is_primitive_type = lambda _: True
state["cfg"] = OmegaConf.create(state["cfg"])
_utils.is_primitive_type = old_primitive
OmegaConf.set_struct(state["cfg"], True)
if arg_overrides is not None:
overwrite_args_by_name(state["cfg"], arg_overrides)
state = _upgrade_state_dict(state)
return state
class PathManager:
"""
Wrapper for insulating OSS I/O (using Python builtin operations) from
iopath's PathManager abstraction (for transparently handling various
internal backends).
"""
def open(
path: str,
mode: str = "r",
buffering: int = -1,
encoding: Optional[str] = None,
errors: Optional[str] = None,
newline: Optional[str] = None,
):
if IOPathManager:
return IOPathManager.open(
path=path,
mode=mode,
buffering=buffering,
encoding=encoding,
errors=errors,
newline=newline,
)
return open(
path,
mode=mode,
buffering=buffering,
encoding=encoding,
errors=errors,
newline=newline,
)
def copy(src_path: str, dst_path: str, overwrite: bool = False) -> bool:
if IOPathManager:
return IOPathManager.copy(
src_path=src_path, dst_path=dst_path, overwrite=overwrite
)
return shutil.copyfile(src_path, dst_path)
def get_local_path(path: str, **kwargs) -> str:
if IOPathManager:
return IOPathManager.get_local_path(path, **kwargs)
return path
def exists(path: str) -> bool:
if IOPathManager:
return IOPathManager.exists(path)
return os.path.exists(path)
def isfile(path: str) -> bool:
if IOPathManager:
return IOPathManager.isfile(path)
return os.path.isfile(path)
def ls(path: str) -> List[str]:
if IOPathManager:
return IOPathManager.ls(path)
return os.listdir(path)
def mkdirs(path: str) -> None:
if IOPathManager:
return IOPathManager.mkdirs(path)
os.makedirs(path, exist_ok=True)
def rm(path: str) -> None:
if IOPathManager:
return IOPathManager.rm(path)
os.remove(path)
def chmod(path: str, mode: int) -> None:
if not PathManager.path_requires_pathmanager(path):
os.chmod(path, mode)
def register_handler(handler) -> None:
if IOPathManager:
return IOPathManager.register_handler(handler=handler)
def copy_from_local(
local_path: str, dst_path: str, overwrite: bool = False, **kwargs
) -> None:
if IOPathManager:
return IOPathManager.copy_from_local(
local_path=local_path, dst_path=dst_path, overwrite=overwrite, **kwargs
)
return shutil.copyfile(local_path, dst_path)
def path_requires_pathmanager(path: str) -> bool:
"""Do we require PathManager to access given path?"""
if IOPathManager:
for p in IOPathManager._path_handlers.keys():
if path.startswith(p):
return True
return False
def supports_rename(path: str) -> bool:
# PathManager doesn't yet support renames
return not PathManager.path_requires_pathmanager(path)
def rename(src: str, dst: str):
os.rename(src, dst)
"""
ioPath async PathManager methods:
"""
def opena(
path: str,
mode: str = "r",
buffering: int = -1,
encoding: Optional[str] = None,
errors: Optional[str] = None,
newline: Optional[str] = None,
):
"""
Return file descriptor with asynchronous write operations.
"""
global IOPathManager
if not IOPathManager:
logging.info("ioPath is initializing PathManager.")
try:
from iopath.common.file_io import PathManager
IOPathManager = PathManager()
except Exception:
logging.exception("Failed to initialize ioPath PathManager object.")
return IOPathManager.opena(
path=path,
mode=mode,
buffering=buffering,
encoding=encoding,
errors=errors,
newline=newline,
)
def async_close() -> bool:
"""
Wait for files to be written and clean up asynchronous PathManager.
NOTE: `PathManager.async_close()` must be called at the end of any
script that uses `PathManager.opena(...)`.
"""
global IOPathManager
if IOPathManager:
return IOPathManager.async_close()
return False
The provided code snippet includes necessary dependencies for implementing the `load_pretrained_component_from_model` function. Write a Python function `def load_pretrained_component_from_model( component: Union[FairseqEncoder, FairseqDecoder], checkpoint: str )` to solve the following problem:
Load a pretrained FairseqEncoder or FairseqDecoder from checkpoint into the provided `component` object. If state_dict fails to load, there may be a mismatch in the architecture of the corresponding `component` found in the `checkpoint` file.
Here is the function:
def load_pretrained_component_from_model(
component: Union[FairseqEncoder, FairseqDecoder], checkpoint: str
):
"""
Load a pretrained FairseqEncoder or FairseqDecoder from checkpoint into the
provided `component` object. If state_dict fails to load, there may be a
mismatch in the architecture of the corresponding `component` found in the
`checkpoint` file.
"""
if not PathManager.exists(checkpoint):
raise IOError("Model file not found: {}".format(checkpoint))
state = load_checkpoint_to_cpu(checkpoint)
if isinstance(component, FairseqEncoder):
component_type = "encoder"
elif isinstance(component, FairseqDecoder):
component_type = "decoder"
else:
raise ValueError(
"component to load must be either a FairseqEncoder or "
"FairseqDecoder. Loading other component types are not supported."
)
component_state_dict = OrderedDict()
for key in state["model"].keys():
if key.startswith(component_type):
# encoder.input_layers.0.0.weight --> input_layers.0.0.weight
component_subkey = key[len(component_type) + 1:]
component_state_dict[component_subkey] = state["model"][key]
component.load_state_dict(component_state_dict, strict=True)
return component | Load a pretrained FairseqEncoder or FairseqDecoder from checkpoint into the provided `component` object. If state_dict fails to load, there may be a mismatch in the architecture of the corresponding `component` found in the `checkpoint` file. |
181,434 | import ast
import collections
import contextlib
import logging
import numpy as np
import os
import re
import time
import traceback
from collections import OrderedDict
from typing import Any, Dict, Optional, Union
import torch
from fairseq.data import data_utils
from fairseq.dataclass.configs import CheckpointConfig
from fairseq.dataclass.utils import (
convert_namespace_to_omegaconf,
overwrite_args_by_name,
)
from fairseq.distributed.fully_sharded_data_parallel import FSDP, has_FSDP
from fairseq.distributed import utils as distributed_utils
from fairseq.file_io import PathManager
from fairseq.models import FairseqDecoder, FairseqEncoder
from omegaconf import DictConfig, open_dict, OmegaConf
from fairseq.ds_trainer import DeepSpeedTrainer
logger = logging.getLogger(__name__)
def verify_checkpoint_directory(save_dir: str, rank: int) -> None:
if not os.path.exists(save_dir):
os.makedirs(save_dir, exist_ok=True)
temp_file_path = os.path.join(save_dir, f"dummy-{rank}")
try:
with open(temp_file_path, "w"):
pass
except OSError as e:
logger.warning(
"Unable to access checkpoint save directory: {}".format(save_dir)
)
raise e
else:
os.remove(temp_file_path) | null |
181,435 | import ast
import collections
import contextlib
import logging
import numpy as np
import os
import re
import time
import traceback
from collections import OrderedDict
from typing import Any, Dict, Optional, Union
import torch
from fairseq.data import data_utils
from fairseq.dataclass.configs import CheckpointConfig
from fairseq.dataclass.utils import (
convert_namespace_to_omegaconf,
overwrite_args_by_name,
)
from fairseq.distributed.fully_sharded_data_parallel import FSDP, has_FSDP
from fairseq.distributed import utils as distributed_utils
from fairseq.file_io import PathManager
from fairseq.models import FairseqDecoder, FairseqEncoder
from omegaconf import DictConfig, open_dict, OmegaConf
from fairseq.ds_trainer import DeepSpeedTrainer
class PathManager:
"""
Wrapper for insulating OSS I/O (using Python builtin operations) from
iopath's PathManager abstraction (for transparently handling various
internal backends).
"""
def open(
path: str,
mode: str = "r",
buffering: int = -1,
encoding: Optional[str] = None,
errors: Optional[str] = None,
newline: Optional[str] = None,
):
if IOPathManager:
return IOPathManager.open(
path=path,
mode=mode,
buffering=buffering,
encoding=encoding,
errors=errors,
newline=newline,
)
return open(
path,
mode=mode,
buffering=buffering,
encoding=encoding,
errors=errors,
newline=newline,
)
def copy(src_path: str, dst_path: str, overwrite: bool = False) -> bool:
if IOPathManager:
return IOPathManager.copy(
src_path=src_path, dst_path=dst_path, overwrite=overwrite
)
return shutil.copyfile(src_path, dst_path)
def get_local_path(path: str, **kwargs) -> str:
if IOPathManager:
return IOPathManager.get_local_path(path, **kwargs)
return path
def exists(path: str) -> bool:
if IOPathManager:
return IOPathManager.exists(path)
return os.path.exists(path)
def isfile(path: str) -> bool:
if IOPathManager:
return IOPathManager.isfile(path)
return os.path.isfile(path)
def ls(path: str) -> List[str]:
if IOPathManager:
return IOPathManager.ls(path)
return os.listdir(path)
def mkdirs(path: str) -> None:
if IOPathManager:
return IOPathManager.mkdirs(path)
os.makedirs(path, exist_ok=True)
def rm(path: str) -> None:
if IOPathManager:
return IOPathManager.rm(path)
os.remove(path)
def chmod(path: str, mode: int) -> None:
if not PathManager.path_requires_pathmanager(path):
os.chmod(path, mode)
def register_handler(handler) -> None:
if IOPathManager:
return IOPathManager.register_handler(handler=handler)
def copy_from_local(
local_path: str, dst_path: str, overwrite: bool = False, **kwargs
) -> None:
if IOPathManager:
return IOPathManager.copy_from_local(
local_path=local_path, dst_path=dst_path, overwrite=overwrite, **kwargs
)
return shutil.copyfile(local_path, dst_path)
def path_requires_pathmanager(path: str) -> bool:
"""Do we require PathManager to access given path?"""
if IOPathManager:
for p in IOPathManager._path_handlers.keys():
if path.startswith(p):
return True
return False
def supports_rename(path: str) -> bool:
# PathManager doesn't yet support renames
return not PathManager.path_requires_pathmanager(path)
def rename(src: str, dst: str):
os.rename(src, dst)
"""
ioPath async PathManager methods:
"""
def opena(
path: str,
mode: str = "r",
buffering: int = -1,
encoding: Optional[str] = None,
errors: Optional[str] = None,
newline: Optional[str] = None,
):
"""
Return file descriptor with asynchronous write operations.
"""
global IOPathManager
if not IOPathManager:
logging.info("ioPath is initializing PathManager.")
try:
from iopath.common.file_io import PathManager
IOPathManager = PathManager()
except Exception:
logging.exception("Failed to initialize ioPath PathManager object.")
return IOPathManager.opena(
path=path,
mode=mode,
buffering=buffering,
encoding=encoding,
errors=errors,
newline=newline,
)
def async_close() -> bool:
"""
Wait for files to be written and clean up asynchronous PathManager.
NOTE: `PathManager.async_close()` must be called at the end of any
script that uses `PathManager.opena(...)`.
"""
global IOPathManager
if IOPathManager:
return IOPathManager.async_close()
return False
The provided code snippet includes necessary dependencies for implementing the `load_ema_from_checkpoint` function. Write a Python function `def load_ema_from_checkpoint(fpath)` to solve the following problem:
Loads exponential moving averaged (EMA) checkpoint from input and returns a model with ema weights. Args: fpath: A string path of checkpoint to load from. Returns: A dict of string keys mapping to various values. The 'model' key from the returned dict should correspond to an OrderedDict mapping string parameter names to torch Tensors.
Here is the function:
def load_ema_from_checkpoint(fpath):
"""Loads exponential moving averaged (EMA) checkpoint from input and
returns a model with ema weights.
Args:
fpath: A string path of checkpoint to load from.
Returns:
A dict of string keys mapping to various values. The 'model' key
from the returned dict should correspond to an OrderedDict mapping
string parameter names to torch Tensors.
"""
params_dict = collections.OrderedDict()
new_state = None
with PathManager.open(fpath, 'rb') as f:
new_state = torch.load(
f,
map_location=(
lambda s, _: torch.serialization.default_restore_location(s, 'cpu')
),
)
# EMA model is stored in a separate "extra state"
model_params = new_state['extra_state']['ema']
for key in list(model_params.keys()):
p = model_params[key]
if isinstance(p, torch.HalfTensor):
p = p.float()
if key not in params_dict:
params_dict[key] = p.clone()
# NOTE: clone() is needed in case of p is a shared parameter
else:
raise ValueError("Key {} is repeated in EMA model params.".format(key))
if len(params_dict) == 0:
raise ValueError(
f"Input checkpoint path '{fpath}' does not contain "
"ema model weights, is this model trained with EMA?"
)
new_state['model'] = params_dict
return new_state | Loads exponential moving averaged (EMA) checkpoint from input and returns a model with ema weights. Args: fpath: A string path of checkpoint to load from. Returns: A dict of string keys mapping to various values. The 'model' key from the returned dict should correspond to an OrderedDict mapping string parameter names to torch Tensors. |
181,455 | import argparse
import contextlib
import copy
import importlib
import logging
import os
import sys
import warnings
from itertools import accumulate
from typing import Callable, Dict, List, Optional, TYPE_CHECKING
import torch
import torch.nn.functional as F
from torch import Tensor
import collections
import sys
sys.modules["fairseq.distributed_utils"] = distributed_utils
sys.modules["fairseq.meters"] = meters
sys.modules["fairseq.metrics"] = metrics
sys.modules["fairseq.progress_bar"] = progress_bar
def import_tasks(tasks_dir, namespace):
for file in os.listdir(tasks_dir):
path = os.path.join(tasks_dir, file)
if (
not file.startswith("_")
and not file.startswith(".")
and (file.endswith(".py") or os.path.isdir(path))
):
task_name = file[: file.find(".py")] if file.endswith(".py") else file
importlib.import_module(namespace + "." + task_name)
# expose `task_parser` for sphinx
if task_name in TASK_REGISTRY:
parser = argparse.ArgumentParser(add_help=False)
group_task = parser.add_argument_group("Task name")
# fmt: off
group_task.add_argument('--task', metavar=task_name,
help='Enable this task with: ``--task=' + task_name + '``')
# fmt: on
group_args = parser.add_argument_group(
"Additional command-line arguments"
)
TASK_REGISTRY[task_name].add_args(group_args)
globals()[task_name + "_parser"] = parser
import_tasks(tasks_dir, "fairseq.tasks")
def import_models(models_dir, namespace):
for file in os.listdir(models_dir):
path = os.path.join(models_dir, file)
if (
not file.startswith("_")
and not file.startswith(".")
and (file.endswith(".py") or os.path.isdir(path))
):
model_name = file[: file.find(".py")] if file.endswith(".py") else file
importlib.import_module(namespace + "." + model_name)
# extra `model_parser` for sphinx
if model_name in MODEL_REGISTRY:
parser = argparse.ArgumentParser(add_help=False)
group_archs = parser.add_argument_group("Named architectures")
group_archs.add_argument(
"--arch", choices=ARCH_MODEL_INV_REGISTRY[model_name]
)
group_args = parser.add_argument_group(
"Additional command-line arguments"
)
MODEL_REGISTRY[model_name].add_args(group_args)
globals()[model_name + "_parser"] = parser
import_models(models_dir, "fairseq.models")
def import_user_module(args):
module_path = getattr(args, "user_dir", None)
if module_path is not None:
module_path = os.path.abspath(args.user_dir)
if not os.path.exists(module_path) and not os.path.isfile(
os.path.dirname(module_path)
):
fairseq_rel_path = os.path.join(os.path.dirname(__file__), args.user_dir)
if os.path.exists(fairseq_rel_path):
module_path = fairseq_rel_path
else:
fairseq_rel_path = os.path.join(
os.path.dirname(__file__), "..", args.user_dir
)
if os.path.exists(fairseq_rel_path):
module_path = fairseq_rel_path
else:
raise FileNotFoundError(module_path)
# ensure that user modules are only imported once
import_user_module.memo = getattr(import_user_module, "memo", set())
if module_path not in import_user_module.memo:
import_user_module.memo.add(module_path)
module_parent, module_name = os.path.split(module_path)
if module_name not in sys.modules:
sys.path.insert(0, module_parent)
importlib.import_module(module_name)
tasks_path = os.path.join(module_path, "tasks")
if os.path.exists(tasks_path):
from fairseq.tasks import import_tasks
import_tasks(tasks_path, f"{module_name}.tasks")
models_path = os.path.join(module_path, "models")
if os.path.exists(models_path):
from fairseq.models import import_models
import_models(models_path, f"{module_name}.models")
else:
raise ImportError(
"Failed to import --user-dir={} because the corresponding module name "
"({}) is not globally unique. Please rename the directory to "
"something unique and try again.".format(module_path, module_name)
) | null |
181,458 | import argparse
import contextlib
import copy
import importlib
import logging
import os
import sys
import warnings
from itertools import accumulate
from typing import Callable, Dict, List, Optional, TYPE_CHECKING
import torch
import torch.nn.functional as F
from torch import Tensor
import collections
def safe_round(number, ndigits):
if hasattr(number, "__round__"):
return round(number, ndigits)
elif torch is not None and torch.is_tensor(number) and number.numel() == 1:
return safe_round(number.item(), ndigits)
elif np is not None and np.ndim(number) == 0 and hasattr(number, "item"):
return safe_round(number.item(), ndigits)
else:
return number
def get_perplexity(loss, round=2, base=2):
from fairseq.logging.meters import safe_round
if loss is None:
return 0.0
try:
return safe_round(base ** loss, round)
except OverflowError:
return float("inf") | null |
181,459 | import argparse
import contextlib
import copy
import importlib
import logging
import os
import sys
import warnings
from itertools import accumulate
from typing import Callable, Dict, List, Optional, TYPE_CHECKING
import torch
import torch.nn.functional as F
from torch import Tensor
import collections
def deprecation_warning(message, stacklevel=3):
# don't use DeprecationWarning, since it's ignored by default
warnings.warn(message, stacklevel=stacklevel)
def relu_squared(x: torch.Tensor):
return F.relu(x).pow(2)
def gelu(x: torch.Tensor) -> torch.Tensor:
return torch.nn.functional.gelu(x.float()).type_as(x)
The provided code snippet includes necessary dependencies for implementing the `get_activation_fn` function. Write a Python function `def get_activation_fn(activation: str) -> Callable` to solve the following problem:
Returns the activation function corresponding to `activation`
Here is the function:
def get_activation_fn(activation: str) -> Callable:
"""Returns the activation function corresponding to `activation`"""
from fairseq.modules import gelu, gelu_accurate
if activation == "relu":
return F.relu
elif activation == "relu_squared":
return relu_squared
elif activation == "gelu":
return gelu
elif activation == "gelu_fast":
deprecation_warning(
"--activation-fn=gelu_fast has been renamed to gelu_accurate"
)
return gelu_accurate
elif activation == "gelu_accurate":
return gelu_accurate
elif activation == "tanh":
return torch.tanh
elif activation == "linear":
return lambda x: x
elif activation == "swish":
return torch.nn.SiLU
else:
raise RuntimeError("--activation-fn {} not supported".format(activation)) | Returns the activation function corresponding to `activation` |
181,471 | import argparse
import contextlib
import copy
import importlib
import logging
import os
import sys
import warnings
from itertools import accumulate
from typing import Callable, Dict, List, Optional, TYPE_CHECKING
import torch
import torch.nn.functional as F
from torch import Tensor
import collections
def is_xla_tensor(tensor):
return torch.is_tensor(tensor) and tensor.device.type == "xla"
def index_put(tensor, indices, value):
if is_xla_tensor(tensor):
for _ in range(indices.dim(), tensor.dim()):
indices = indices.unsqueeze(-1)
if indices.size(-1) < tensor.size(-1):
indices = indices.expand_as(tensor)
tensor = torch.mul(tensor, ~indices) + torch.mul(value, indices)
else:
tensor[indices] = value
return tensor | null |
181,477 | import argparse
import contextlib
import copy
import importlib
import logging
import os
import sys
import warnings
from itertools import accumulate
from typing import Callable, Dict, List, Optional, TYPE_CHECKING
import torch
import torch.nn.functional as F
from torch import Tensor
import collections
import sys
sys.modules["fairseq.distributed_utils"] = distributed_utils
sys.modules["fairseq.meters"] = meters
sys.modules["fairseq.metrics"] = metrics
sys.modules["fairseq.progress_bar"] = progress_bar
def reset_logging():
root = logging.getLogger()
for handler in root.handlers:
root.removeHandler(handler)
root.setLevel(os.environ.get("LOGLEVEL", "INFO").upper())
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(
logging.Formatter(
fmt="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
)
)
root.addHandler(handler) | null |
181,503 | import contextlib
import logging
import os
import sys
import time
from argparse import Namespace
from itertools import chain
from typing import Any, Dict, List
import torch
from fairseq import checkpoint_utils, models, optim, utils
from fairseq.dataclass.configs import FairseqConfig
from fairseq.dataclass.utils import convert_namespace_to_omegaconf
from fairseq.distributed import utils as distributed_utils
from fairseq.file_io import PathManager
from fairseq.logging import meters, metrics
from fairseq.nan_detector import NanDetector
from fairseq.optim import lr_scheduler
from fairseq.utils import safe_hasattr
from omegaconf import OmegaConf
def _catalog_shared_params(module, memo=None, prefix=""):
if memo is None:
first_call = True
memo = {}
else:
first_call = False
for name, param in module._parameters.items():
param_prefix = prefix + ("." if prefix else "") + name
if param not in memo:
memo[param] = []
memo[param].append(param_prefix)
for name, m in module._modules.items():
if m is None:
continue
submodule_prefix = prefix + ("." if prefix else "") + name
_catalog_shared_params(m, memo, submodule_prefix)
if first_call:
return [x for x in memo.values() if len(x) > 1] | null |
181,504 | import contextlib
import logging
import os
import sys
import time
from argparse import Namespace
from itertools import chain
from typing import Any, Dict, List
import torch
from fairseq import checkpoint_utils, models, optim, utils
from fairseq.dataclass.configs import FairseqConfig
from fairseq.dataclass.utils import convert_namespace_to_omegaconf
from fairseq.distributed import utils as distributed_utils
from fairseq.file_io import PathManager
from fairseq.logging import meters, metrics
from fairseq.nan_detector import NanDetector
from fairseq.optim import lr_scheduler
from fairseq.utils import safe_hasattr
from omegaconf import OmegaConf
def _get_module_by_path(module, path):
path = path.split(".")
for name in path:
module = getattr(module, name)
return module | null |
181,505 | import contextlib
import logging
import os
import sys
import time
from argparse import Namespace
from itertools import chain
from typing import Any, Dict, List
import torch
from fairseq import checkpoint_utils, models, optim, utils
from fairseq.dataclass.configs import FairseqConfig
from fairseq.dataclass.utils import convert_namespace_to_omegaconf
from fairseq.distributed import utils as distributed_utils
from fairseq.file_io import PathManager
from fairseq.logging import meters, metrics
from fairseq.nan_detector import NanDetector
from fairseq.optim import lr_scheduler
from fairseq.utils import safe_hasattr
from omegaconf import OmegaConf
def _set_module_by_path(module, path, value):
path = path.split(".")
for name in path[:-1]:
module = getattr(module, name)
setattr(module, path[-1], value) | null |
181,511 | import os
import sys
import torch
import time
import logging
import deepspeed
import json
import subprocess
from typing import Any, Dict, List
from itertools import chain
from argparse import Namespace
import torch.distributed as dist
from fairseq import checkpoint_utils, models, optim, utils
from fairseq.distributed import utils as distributed_utils
from fairseq.dataclass.configs import FairseqConfig
from fairseq.logging import meters, metrics
from fairseq.optim import lr_scheduler
from fairseq.optim.dynamic_loss_scaler import DynamicLossScaler
from fairseq.trainer import Trainer
from fairseq.file_io import PathManager
from omegaconf import OmegaConf
def get_config(config, full_name, fairseq_value):
_config = config
for name in full_name.split(":"):
if name in _config:
_config = _config[name]
else:
_config = fairseq_value
break
assert _config == fairseq_value, f"deepspeed config: {full_name} does not align with fairseq value: {fairseq_value}"
return _config | null |
181,512 | import os
import sys
import torch
import time
import logging
import deepspeed
import json
import subprocess
from typing import Any, Dict, List
from itertools import chain
from argparse import Namespace
import torch.distributed as dist
from fairseq import checkpoint_utils, models, optim, utils
from fairseq.distributed import utils as distributed_utils
from fairseq.dataclass.configs import FairseqConfig
from fairseq.logging import meters, metrics
from fairseq.optim import lr_scheduler
from fairseq.optim.dynamic_loss_scaler import DynamicLossScaler
from fairseq.trainer import Trainer
from fairseq.file_io import PathManager
from omegaconf import OmegaConf
def _catalog_shared_params(module, memo=None, prefix=""):
if memo is None:
first_call = True
memo = {}
else:
first_call = False
for name, param in module._parameters.items():
param_prefix = prefix + ("." if prefix else "") + name
if param not in memo:
memo[param] = []
memo[param].append(param_prefix)
for name, m in module._modules.items():
if m is None:
continue
submodule_prefix = prefix + ("." if prefix else "") + name
_catalog_shared_params(m, memo, submodule_prefix)
if first_call:
return [x for x in memo.values() if len(x) > 1] | null |
181,513 | import os
import sys
import torch
import time
import logging
import deepspeed
import json
import subprocess
from typing import Any, Dict, List
from itertools import chain
from argparse import Namespace
import torch.distributed as dist
from fairseq import checkpoint_utils, models, optim, utils
from fairseq.distributed import utils as distributed_utils
from fairseq.dataclass.configs import FairseqConfig
from fairseq.logging import meters, metrics
from fairseq.optim import lr_scheduler
from fairseq.optim.dynamic_loss_scaler import DynamicLossScaler
from fairseq.trainer import Trainer
from fairseq.file_io import PathManager
from omegaconf import OmegaConf
def _get_module_by_path(module, path):
path = path.split(".")
for name in path:
module = getattr(module, name)
return module | null |
181,514 | import os
import sys
import torch
import time
import logging
import deepspeed
import json
import subprocess
from typing import Any, Dict, List
from itertools import chain
from argparse import Namespace
import torch.distributed as dist
from fairseq import checkpoint_utils, models, optim, utils
from fairseq.distributed import utils as distributed_utils
from fairseq.dataclass.configs import FairseqConfig
from fairseq.logging import meters, metrics
from fairseq.optim import lr_scheduler
from fairseq.optim.dynamic_loss_scaler import DynamicLossScaler
from fairseq.trainer import Trainer
from fairseq.file_io import PathManager
from omegaconf import OmegaConf
def _set_module_by_path(module, path, value):
path = path.split(".")
for name in path[:-1]:
module = getattr(module, name)
setattr(module, path[-1], value) | null |
181,519 | import contextlib
import itertools
import logging
import re
import warnings
from typing import Optional, Tuple
import numpy as np
import torch
from fairseq.file_io import PathManager
from fairseq import utils
import os
logger = logging.getLogger(__name__)
class ConcatDataset(FairseqDataset):
def cumsum(sequence, sample_ratios):
r, s = [], 0
for e, ratio in zip(sequence, sample_ratios):
curr_len = int(ratio * len(e))
r.append(curr_len + s)
s += curr_len
return r
def __init__(self, datasets, sample_ratios=1):
super(ConcatDataset, self).__init__()
assert len(datasets) > 0, "datasets should not be an empty iterable"
self.datasets = list(datasets)
if isinstance(sample_ratios, int):
sample_ratios = [sample_ratios] * len(self.datasets)
self.sample_ratios = sample_ratios
self.cumulative_sizes = self.cumsum(self.datasets, sample_ratios)
self.real_sizes = [len(d) for d in self.datasets]
def __len__(self):
return self.cumulative_sizes[-1]
def __getitem__(self, idx):
dataset_idx, sample_idx = self._get_dataset_and_sample_index(idx)
return self.datasets[dataset_idx][sample_idx]
def _get_dataset_and_sample_index(self, idx: int):
dataset_idx = bisect.bisect_right(self.cumulative_sizes, idx)
if dataset_idx == 0:
sample_idx = idx
else:
sample_idx = idx - self.cumulative_sizes[dataset_idx - 1]
sample_idx = sample_idx % self.real_sizes[dataset_idx]
return dataset_idx, sample_idx
def collater(self, samples, **extra_args):
# For now only supports datasets with same underlying collater implementations
if hasattr(self.datasets[0], "collater"):
return self.datasets[0].collater(samples, **extra_args)
else:
return default_collate(samples, **extra_args)
def size(self, idx: int):
"""
Return an example's size as a float or tuple.
"""
dataset_idx, sample_idx = self._get_dataset_and_sample_index(idx)
return self.datasets[dataset_idx].size(sample_idx)
def num_tokens(self, index: int):
return np.max(self.size(index))
def attr(self, attr: str, index: int):
dataset_idx = bisect.bisect_right(self.cumulative_sizes, index)
return getattr(self.datasets[dataset_idx], attr, None)
def sizes(self):
_dataset_sizes = []
for ds, sr in zip(self.datasets, self.sample_ratios):
if isinstance(ds.sizes, np.ndarray):
_dataset_sizes.append(np.tile(ds.sizes, sr))
else:
# Only support underlying dataset with single size array.
assert isinstance(ds.sizes, list)
_dataset_sizes.append(np.tile(ds.sizes[0], sr))
return np.concatenate(_dataset_sizes)
def supports_prefetch(self):
return all(d.supports_prefetch for d in self.datasets)
def ordered_indices(self):
"""
Returns indices sorted by length. So less padding is needed.
"""
if isinstance(self.sizes, np.ndarray) and len(self.sizes.shape) > 1:
# special handling for concatenating lang_pair_datasets
indices = np.arange(len(self))
sizes = self.sizes
tgt_sizes = (
sizes[:, 1] if len(sizes.shape) > 0 and sizes.shape[1] > 1 else None
)
src_sizes = (
sizes[:, 0] if len(sizes.shape) > 0 and sizes.shape[1] > 1 else sizes
)
# sort by target length, then source length
if tgt_sizes is not None:
indices = indices[np.argsort(tgt_sizes[indices], kind="mergesort")]
return indices[np.argsort(src_sizes[indices], kind="mergesort")]
else:
return np.argsort(self.sizes)
def prefetch(self, indices):
frm = 0
for to, ds in zip(self.cumulative_sizes, self.datasets):
real_size = len(ds)
if getattr(ds, "supports_prefetch", False):
ds.prefetch([(i - frm) % real_size for i in indices if frm <= i < to])
frm = to
def can_reuse_epoch_itr_across_epochs(self):
return all(d.can_reuse_epoch_itr_across_epochs for d in self.datasets)
def set_epoch(self, epoch):
super().set_epoch(epoch)
for ds in self.datasets:
if hasattr(ds, "set_epoch"):
ds.set_epoch(epoch)
The provided code snippet includes necessary dependencies for implementing the `load_indexed_dataset` function. Write a Python function `def load_indexed_dataset( path, dictionary=None, dataset_impl=None, combine=False, default="cached" )` to solve the following problem:
A helper function for loading indexed datasets. Args: path (str): path to indexed dataset (e.g., 'data-bin/train') dictionary (~fairseq.data.Dictionary): data dictionary dataset_impl (str, optional): which dataset implementation to use. If not provided, it will be inferred automatically. For legacy indexed data we use the 'cached' implementation by default. combine (bool, optional): automatically load and combine multiple datasets. For example, if *path* is 'data-bin/train', then we will combine 'data-bin/train', 'data-bin/train1', ... and return a single ConcatDataset instance.
Here is the function:
def load_indexed_dataset(
path, dictionary=None, dataset_impl=None, combine=False, default="cached"
):
"""A helper function for loading indexed datasets.
Args:
path (str): path to indexed dataset (e.g., 'data-bin/train')
dictionary (~fairseq.data.Dictionary): data dictionary
dataset_impl (str, optional): which dataset implementation to use. If
not provided, it will be inferred automatically. For legacy indexed
data we use the 'cached' implementation by default.
combine (bool, optional): automatically load and combine multiple
datasets. For example, if *path* is 'data-bin/train', then we will
combine 'data-bin/train', 'data-bin/train1', ... and return a
single ConcatDataset instance.
"""
import fairseq.data.indexed_dataset as indexed_dataset
from fairseq.data.concat_dataset import ConcatDataset
datasets = []
for k in itertools.count():
path_k = path + (str(k) if k > 0 else "")
try:
path_k = indexed_dataset.get_indexed_dataset_to_local(path_k)
except Exception as e:
if "StorageException: [404] Path not found" in str(e):
logger.warning(f"path_k: {e} not found")
else:
raise e
dataset_impl_k = dataset_impl
if dataset_impl_k is None:
dataset_impl_k = indexed_dataset.infer_dataset_impl(path_k)
dataset = indexed_dataset.make_dataset(
path_k,
impl=dataset_impl_k or default,
fix_lua_indexing=True,
dictionary=dictionary,
)
if dataset is None:
break
logger.info("loaded {:,} examples from: {}".format(len(dataset), path_k))
datasets.append(dataset)
if not combine:
break
if len(datasets) == 0:
return None
elif len(datasets) == 1:
return datasets[0]
else:
return ConcatDataset(datasets) | A helper function for loading indexed datasets. Args: path (str): path to indexed dataset (e.g., 'data-bin/train') dictionary (~fairseq.data.Dictionary): data dictionary dataset_impl (str, optional): which dataset implementation to use. If not provided, it will be inferred automatically. For legacy indexed data we use the 'cached' implementation by default. combine (bool, optional): automatically load and combine multiple datasets. For example, if *path* is 'data-bin/train', then we will combine 'data-bin/train', 'data-bin/train1', ... and return a single ConcatDataset instance. |
181,527 | import contextlib
import itertools
import logging
import re
import warnings
from typing import Optional, Tuple
import numpy as np
import torch
from fairseq.file_io import PathManager
from fairseq import utils
import os
def lengths_to_padding_mask(lens):
def lengths_to_mask(lens):
return ~lengths_to_padding_mask(lens) | null |
181,534 | from pathlib import Path
from typing import BinaryIO, Optional, Tuple, Union, List
import mmap
import numpy as np
import torch
import torch.nn.functional as F
def mmap_read(path: str, offset: int, length: int) -> bytes:
def read_from_stored_zip(zip_path: str, offset: int, length: int) -> bytes:
return mmap_read(zip_path, offset, length) | null |
181,543 | import csv
import io
import logging
import re
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Optional
from dataclasses import dataclass
import numpy as np
import torch
from fairseq.data import (
ConcatDataset,
Dictionary,
FairseqDataset,
ResamplingDataset,
data_utils as fairseq_data_utils,
)
from fairseq.data.audio.audio_utils import (
get_fbank,
get_waveform,
read_from_stored_zip,
is_npy_data,
is_sf_audio_data,
parse_path,
FEATURE_OR_SF_AUDIO_FILE_EXTENSIONS,
)
from fairseq.data.audio.feature_transforms import CompositeAudioFeatureTransform
from fairseq.data.audio.data_cfg import S2TDataConfig
def get_features_from_npy_or_audio(path):
ext = Path(path).suffix
if ext not in FEATURE_OR_SF_AUDIO_FILE_EXTENSIONS:
raise ValueError(f'Unsupported file format for "{path}"')
return np.load(path) if ext == ".npy" else get_fbank(path)
def get_features_or_waveform_from_stored_zip(
path,
byte_offset,
byte_size,
need_waveform=False,
use_sample_rate=None,
):
assert path.endswith(".zip")
data = read_from_stored_zip(path, byte_offset, byte_size)
f = io.BytesIO(data)
if is_npy_data(data):
features_or_waveform = np.load(f)
elif is_sf_audio_data(data):
features_or_waveform = (
get_waveform(f, always_2d=False, output_sample_rate=use_sample_rate)[0]
if need_waveform
else get_fbank(f)
)
else:
raise ValueError(f'Unknown file format for "{path}"')
return features_or_waveform
def get_waveform(
path_or_fp: Union[str, BinaryIO], normalization: bool = True,
mono: bool = True, frames: int = -1, start: int = 0,
always_2d: bool = True, output_sample_rate: Optional[int] = None,
normalize_volume: bool = False
) -> Tuple[np.ndarray, int]:
"""Get the waveform and sample rate of a 16-bit WAV/FLAC/OGG Vorbis audio.
Args:
path_or_fp (str or BinaryIO): the path or file-like object
normalization (bool): normalize values to [-1, 1] (Default: True)
mono (bool): convert multi-channel audio to mono-channel one
frames (int): the number of frames to read. (-1 for reading all)
start (int): Where to start reading. A negative value counts from the end.
always_2d (bool): always return 2D array even for mono-channel audios
output_sample_rate (Optional[int]): output sample rate
normalize_volume (bool): normalize volume
Returns:
waveform (numpy.ndarray): 1D or 2D waveform (channels x length)
sample_rate (float): sample rate
"""
if isinstance(path_or_fp, str):
ext = Path(path_or_fp).suffix
if ext not in SF_AUDIO_FILE_EXTENSIONS:
raise ValueError(f"Unsupported audio format: {ext}")
try:
import soundfile as sf
except ImportError:
raise ImportError("Please install soundfile: pip install soundfile")
waveform, sample_rate = sf.read(
path_or_fp, dtype="float32", always_2d=True, frames=frames, start=start
)
waveform = waveform.T # T x C -> C x T
waveform, sample_rate = convert_waveform(
waveform, sample_rate, normalize_volume=normalize_volume, to_mono=mono,
to_sample_rate=output_sample_rate
)
if not normalization:
waveform *= 2 ** 15 # denormalized to 16-bit signed integers
if not always_2d:
waveform = waveform.squeeze(axis=0)
return waveform, sample_rate
def parse_path(path: str) -> Tuple[str, List[int]]:
"""Parse data path which is either a path to
1. a .npy/.wav/.flac/.ogg file
2. a stored ZIP file with slicing info: "[zip_path]:[offset]:[length]"
Args:
path (str): the data path to parse
Returns:
file_path (str): the file path
slice_ptr (list of int): empty in case 1;
byte offset and length for the slice in case 2
"""
if Path(path).suffix in FEATURE_OR_SF_AUDIO_FILE_EXTENSIONS:
_path, slice_ptr = path, []
else:
_path, *slice_ptr = path.split(":")
if not Path(_path).is_file():
raise FileNotFoundError(f"File not found: {_path}")
assert len(slice_ptr) in {0, 2}, f"Invalid path: {path}"
slice_ptr = [int(i) for i in slice_ptr]
return _path, slice_ptr
The provided code snippet includes necessary dependencies for implementing the `get_features_or_waveform` function. Write a Python function `def get_features_or_waveform(path: str, need_waveform=False, use_sample_rate=None)` to solve the following problem:
Get speech features from .npy file or waveform from .wav/.flac file. The file may be inside an uncompressed ZIP file and is accessed via byte offset and length. Args: path (str): File path in the format of "<.npy/.wav/.flac path>" or "<zip path>:<byte offset>:<byte length>". need_waveform (bool): return waveform instead of features. use_sample_rate (int): change sample rate for the input wave file Returns: features_or_waveform (numpy.ndarray): speech features or waveform.
Here is the function:
def get_features_or_waveform(path: str, need_waveform=False, use_sample_rate=None):
"""Get speech features from .npy file or waveform from .wav/.flac file.
The file may be inside an uncompressed ZIP file and is accessed via byte
offset and length.
Args:
path (str): File path in the format of "<.npy/.wav/.flac path>" or
"<zip path>:<byte offset>:<byte length>".
need_waveform (bool): return waveform instead of features.
use_sample_rate (int): change sample rate for the input wave file
Returns:
features_or_waveform (numpy.ndarray): speech features or waveform.
"""
_path, slice_ptr = parse_path(path)
if len(slice_ptr) == 0:
if need_waveform:
return get_waveform(
_path, always_2d=False, output_sample_rate=use_sample_rate
)[0]
return get_features_from_npy_or_audio(_path)
elif len(slice_ptr) == 2:
features_or_waveform = get_features_or_waveform_from_stored_zip(
_path,
slice_ptr[0],
slice_ptr[1],
need_waveform=need_waveform,
use_sample_rate=use_sample_rate,
)
else:
raise ValueError(f"Invalid path: {path}")
return features_or_waveform | Get speech features from .npy file or waveform from .wav/.flac file. The file may be inside an uncompressed ZIP file and is accessed via byte offset and length. Args: path (str): File path in the format of "<.npy/.wav/.flac path>" or "<zip path>:<byte offset>:<byte length>". need_waveform (bool): return waveform instead of features. use_sample_rate (int): change sample rate for the input wave file Returns: features_or_waveform (numpy.ndarray): speech features or waveform. |
181,568 | import shutil
import struct
from functools import lru_cache
import numpy as np
import torch
from fairseq.dataclass.constants import DATASET_IMPL_CHOICES
from fairseq.data.fasta_dataset import FastaDataset
from fairseq.file_io import PathManager
from fairseq.data.huffman import HuffmanMMapIndexedDataset, HuffmanMMapIndex
from . import FairseqDataset
from typing import Union
DATASET_IMPL_CHOICES = ChoiceEnum(["raw", "lazy", "cached", "mmap", "fasta", "huffman"])
def get_available_dataset_impl():
return list(map(str, DATASET_IMPL_CHOICES)) | null |
181,569 | import shutil
import struct
from functools import lru_cache
import numpy as np
import torch
from fairseq.dataclass.constants import DATASET_IMPL_CHOICES
from fairseq.data.fasta_dataset import FastaDataset
from fairseq.file_io import PathManager
from fairseq.data.huffman import HuffmanMMapIndexedDataset, HuffmanMMapIndex
from . import FairseqDataset
from typing import Union
def index_file_path(prefix_path):
return prefix_path + ".idx"
class IndexedDataset(FairseqDataset):
"""Loader for TorchNet IndexedDataset"""
_HDR_MAGIC = b"TNTIDX\x00\x00"
def __init__(self, path, fix_lua_indexing=False):
super().__init__()
self.path = path
self.fix_lua_indexing = fix_lua_indexing
self.data_file = None
self.read_index(path)
def read_index(self, path):
with open(index_file_path(path), "rb") as f:
magic = f.read(8)
assert magic == self._HDR_MAGIC, (
"Index file doesn't match expected format. "
"Make sure that --dataset-impl is configured properly."
)
version = f.read(8)
assert struct.unpack("<Q", version) == (1,)
code, self.element_size = struct.unpack("<QQ", f.read(16))
self.dtype = _code_to_dtype[code]
self._len, self.s = struct.unpack("<QQ", f.read(16))
self.dim_offsets = read_longs(f, self._len + 1)
self.data_offsets = read_longs(f, self._len + 1)
self.sizes = read_longs(f, self.s)
def read_data(self, path):
self.data_file = open(data_file_path(path), "rb", buffering=0)
def check_index(self, i):
if i < 0 or i >= self._len:
raise IndexError("index out of range")
def __del__(self):
if self.data_file:
self.data_file.close()
def __getitem__(self, i) -> torch.Tensor:
if not self.data_file:
self.read_data(self.path)
self.check_index(i)
tensor_size = self.sizes[self.dim_offsets[i] : self.dim_offsets[i + 1]]
a = np.empty(tensor_size, dtype=self.dtype)
self.data_file.seek(self.data_offsets[i] * self.element_size)
self.data_file.readinto(a)
item = torch.from_numpy(a).long()
if self.fix_lua_indexing:
item -= 1 # subtract 1 for 0-based indexing
return item
def __len__(self):
return self._len
def num_tokens(self, index):
return self.sizes[index]
def size(self, index):
return self.sizes[index]
def exists(path):
return PathManager.exists(index_file_path(path)) and PathManager.exists(
data_file_path(path)
)
def supports_prefetch(self):
return False # avoid prefetching to save memory
class IndexedRawTextDataset(FairseqDataset):
"""Takes a text file as input and binarizes it in memory at instantiation.
Original lines are also kept in memory"""
def __init__(self, path, dictionary, append_eos=True, reverse_order=False):
self.tokens_list = []
self.lines = []
self.sizes = []
self.append_eos = append_eos
self.reverse_order = reverse_order
self.read_data(path, dictionary)
self.size = len(self.tokens_list)
def read_data(self, path, dictionary):
with open(path, "r", encoding="utf-8") as f:
for line in f:
self.lines.append(line.strip("\n"))
tokens = dictionary.encode_line(
line,
add_if_not_exist=False,
append_eos=self.append_eos,
reverse_order=self.reverse_order,
).long()
self.tokens_list.append(tokens)
self.sizes.append(len(tokens))
self.sizes = np.array(self.sizes)
def check_index(self, i):
if i < 0 or i >= self.size:
raise IndexError("index out of range")
def __getitem__(self, i):
self.check_index(i)
return self.tokens_list[i]
def get_original_text(self, i):
self.check_index(i)
return self.lines[i]
def __del__(self):
pass
def __len__(self):
return self.size
def num_tokens(self, index):
return self.sizes[index]
def size(self, index):
return self.sizes[index]
def exists(path):
return PathManager.exists(path)
class MMapIndexedDataset(torch.utils.data.Dataset):
class Index:
_HDR_MAGIC = b"MMIDIDX\x00\x00"
def writer(cls, path, dtype):
class _Writer:
def __enter__(self):
self._file = open(path, "wb")
self._file.write(cls._HDR_MAGIC)
self._file.write(struct.pack("<Q", 1))
self._file.write(struct.pack("<B", _dtype_header_code(dtype)))
return self
def _get_pointers(sizes):
dtype_size = dtype().itemsize
address = 0
pointers = []
for size in sizes:
pointers.append(address)
address += size * dtype_size
return pointers
def write(self, sizes):
pointers = self._get_pointers(sizes)
self._file.write(struct.pack("<Q", len(sizes)))
sizes = np.array(sizes, dtype=np.int32)
self._file.write(sizes.tobytes(order="C"))
del sizes
pointers = np.array(pointers, dtype=np.int64)
self._file.write(pointers.tobytes(order="C"))
del pointers
def __exit__(self, exc_type, exc_val, exc_tb):
self._file.close()
return _Writer()
def __init__(self, path):
with open(path, "rb") as stream:
magic_test = stream.read(9)
assert self._HDR_MAGIC == magic_test, (
"Index file doesn't match expected format. "
"Make sure that --dataset-impl is configured properly."
)
version = struct.unpack("<Q", stream.read(8))
assert (1,) == version
(dtype_code,) = struct.unpack("<B", stream.read(1))
self._dtype = _code_to_dtype[dtype_code]
self._dtype_size = self._dtype().itemsize
self._len = struct.unpack("<Q", stream.read(8))[0]
offset = stream.tell()
_warmup_mmap_file(path)
self._bin_buffer_mmap = np.memmap(path, mode="r", order="C")
self._bin_buffer = memoryview(self._bin_buffer_mmap)
self._sizes = np.frombuffer(
self._bin_buffer, dtype=np.int32, count=self._len, offset=offset
)
self._pointers = np.frombuffer(
self._bin_buffer,
dtype=np.int64,
count=self._len,
offset=offset + self._sizes.nbytes,
)
def __del__(self):
self._bin_buffer_mmap._mmap.close()
del self._bin_buffer_mmap
def dtype(self):
return self._dtype
def sizes(self):
return self._sizes
def __getitem__(self, i):
return self._pointers[i], self._sizes[i]
def __len__(self):
return self._len
def __init__(self, path):
super().__init__()
self._path = None
self._index = None
self._bin_buffer = None
self._do_init(path)
def __getstate__(self):
return self._path
def __setstate__(self, state):
self._do_init(state)
def _do_init(self, path):
self._path = path
self._index = self.Index(index_file_path(self._path))
_warmup_mmap_file(data_file_path(self._path))
self._bin_buffer_mmap = np.memmap(
data_file_path(self._path), mode="r", order="C"
)
self._bin_buffer = memoryview(self._bin_buffer_mmap)
def __del__(self):
self._bin_buffer_mmap._mmap.close()
del self._bin_buffer_mmap
del self._index
def __len__(self):
return len(self._index)
def __getitem__(self, i):
ptr, size = self._index[i]
np_array = np.frombuffer(
self._bin_buffer, dtype=self._index.dtype, count=size, offset=ptr
)
if self._index.dtype != np.int64:
np_array = np_array.astype(np.int64)
return torch.from_numpy(np_array)
def sizes(self):
return self._index.sizes
def supports_prefetch(self):
return False
def exists(path):
return PathManager.exists(index_file_path(path)) and PathManager.exists(
data_file_path(path)
)
class FastaDataset(torch.utils.data.Dataset):
"""
For loading protein sequence datasets in the common FASTA data format
"""
def __init__(self, path: str, cache_indices=False):
self.fn = fasta_file_path(path)
self.threadlocal = threading.local()
self.cache = Path(f"{path}.fasta.idx.npy")
if cache_indices:
if self.cache.exists():
self.offsets, self.sizes = np.load(self.cache)
else:
self.offsets, self.sizes = self._build_index(path)
np.save(self.cache, np.stack([self.offsets, self.sizes]))
else:
self.offsets, self.sizes = self._build_index(path)
def _get_file(self):
if not hasattr(self.threadlocal, "f"):
self.threadlocal.f = open(self.fn, "r")
return self.threadlocal.f
def __getitem__(self, idx):
f = self._get_file()
f.seek(self.offsets[idx])
desc = f.readline().strip()
line = f.readline()
seq = ""
while line != "" and line[0] != ">":
seq += line.strip()
line = f.readline()
return desc, seq
def __len__(self):
return self.offsets.size
def _build_index(self, path: str):
# Use grep and awk to get 100M/s on local SSD.
# Should process your enormous 100G fasta in ~10 min single core...
path = fasta_file_path(path)
bytes_offsets = subprocess.check_output(
f"cat {path} | tqdm --bytes --total $(wc -c < {path})"
"| grep --byte-offset '^>' -o | cut -d: -f1",
shell=True,
)
fasta_lengths = subprocess.check_output(
f"cat {path} | tqdm --bytes --total $(wc -c < {path})"
"| awk '/^>/ {print \"\";next;} { printf(\"%s\",$0);}' | tail -n+2 | awk '{print length($1)}'",
shell=True,
)
bytes_np = np.fromstring(bytes_offsets, dtype=np.int64, sep=" ")
sizes_np = np.fromstring(fasta_lengths, dtype=np.int64, sep=" ")
return bytes_np, sizes_np
def __setstate__(self, state):
self.__dict__ = state
self.threadlocal = threading.local()
def __getstate__(self):
d = {}
for i, v in self.__dict__.items():
if i != "threadlocal":
d[i] = v
return d
def __del__(self):
if hasattr(self.threadlocal, "f"):
self.threadlocal.f.close()
del self.threadlocal.f
def exists(path):
return os.path.exists(fasta_file_path(path))
def infer_dataset_impl(path):
if IndexedRawTextDataset.exists(path):
return "raw"
elif IndexedDataset.exists(path):
with open(index_file_path(path), "rb") as f:
magic = f.read(8)
if magic == IndexedDataset._HDR_MAGIC:
return "cached"
elif magic == MMapIndexedDataset.Index._HDR_MAGIC[:8]:
return "mmap"
elif magic == HuffmanMMapIndex._HDR_MAGIC[:8]:
return "huffman"
else:
return None
elif FastaDataset.exists(path):
return "fasta"
else:
return None | null |
181,571 | import shutil
import struct
from functools import lru_cache
import numpy as np
import torch
from fairseq.dataclass.constants import DATASET_IMPL_CHOICES
from fairseq.data.fasta_dataset import FastaDataset
from fairseq.file_io import PathManager
from fairseq.data.huffman import HuffmanMMapIndexedDataset, HuffmanMMapIndex
from . import FairseqDataset
from typing import Union
class IndexedDataset(FairseqDataset):
"""Loader for TorchNet IndexedDataset"""
_HDR_MAGIC = b"TNTIDX\x00\x00"
def __init__(self, path, fix_lua_indexing=False):
super().__init__()
self.path = path
self.fix_lua_indexing = fix_lua_indexing
self.data_file = None
self.read_index(path)
def read_index(self, path):
with open(index_file_path(path), "rb") as f:
magic = f.read(8)
assert magic == self._HDR_MAGIC, (
"Index file doesn't match expected format. "
"Make sure that --dataset-impl is configured properly."
)
version = f.read(8)
assert struct.unpack("<Q", version) == (1,)
code, self.element_size = struct.unpack("<QQ", f.read(16))
self.dtype = _code_to_dtype[code]
self._len, self.s = struct.unpack("<QQ", f.read(16))
self.dim_offsets = read_longs(f, self._len + 1)
self.data_offsets = read_longs(f, self._len + 1)
self.sizes = read_longs(f, self.s)
def read_data(self, path):
self.data_file = open(data_file_path(path), "rb", buffering=0)
def check_index(self, i):
if i < 0 or i >= self._len:
raise IndexError("index out of range")
def __del__(self):
if self.data_file:
self.data_file.close()
def __getitem__(self, i) -> torch.Tensor:
if not self.data_file:
self.read_data(self.path)
self.check_index(i)
tensor_size = self.sizes[self.dim_offsets[i] : self.dim_offsets[i + 1]]
a = np.empty(tensor_size, dtype=self.dtype)
self.data_file.seek(self.data_offsets[i] * self.element_size)
self.data_file.readinto(a)
item = torch.from_numpy(a).long()
if self.fix_lua_indexing:
item -= 1 # subtract 1 for 0-based indexing
return item
def __len__(self):
return self._len
def num_tokens(self, index):
return self.sizes[index]
def size(self, index):
return self.sizes[index]
def exists(path):
return PathManager.exists(index_file_path(path)) and PathManager.exists(
data_file_path(path)
)
def supports_prefetch(self):
return False # avoid prefetching to save memory
class IndexedCachedDataset(IndexedDataset):
def __init__(self, path, fix_lua_indexing=False):
super().__init__(path, fix_lua_indexing=fix_lua_indexing)
self.cache = None
self.cache_index = {}
def supports_prefetch(self):
return True
def prefetch(self, indices):
if all(i in self.cache_index for i in indices):
return
if not self.data_file:
self.read_data(self.path)
indices = sorted(set(indices))
total_size = 0
for i in indices:
total_size += self.data_offsets[i + 1] - self.data_offsets[i]
self.cache = np.empty(total_size, dtype=self.dtype)
ptx = 0
self.cache_index.clear()
for i in indices:
self.cache_index[i] = ptx
size = self.data_offsets[i + 1] - self.data_offsets[i]
a = self.cache[ptx : ptx + size]
self.data_file.seek(self.data_offsets[i] * self.element_size)
self.data_file.readinto(a)
ptx += size
if self.data_file:
# close and delete data file after prefetch so we can pickle
self.data_file.close()
self.data_file = None
def __getitem__(self, i):
self.check_index(i)
tensor_size = self.sizes[self.dim_offsets[i] : self.dim_offsets[i + 1]]
a = np.empty(tensor_size, dtype=self.dtype)
ptx = self.cache_index[i]
np.copyto(a, self.cache[ptx : ptx + a.size])
item = torch.from_numpy(a).long()
if self.fix_lua_indexing:
item -= 1 # subtract 1 for 0-based indexing
return item
class IndexedRawTextDataset(FairseqDataset):
"""Takes a text file as input and binarizes it in memory at instantiation.
Original lines are also kept in memory"""
def __init__(self, path, dictionary, append_eos=True, reverse_order=False):
self.tokens_list = []
self.lines = []
self.sizes = []
self.append_eos = append_eos
self.reverse_order = reverse_order
self.read_data(path, dictionary)
self.size = len(self.tokens_list)
def read_data(self, path, dictionary):
with open(path, "r", encoding="utf-8") as f:
for line in f:
self.lines.append(line.strip("\n"))
tokens = dictionary.encode_line(
line,
add_if_not_exist=False,
append_eos=self.append_eos,
reverse_order=self.reverse_order,
).long()
self.tokens_list.append(tokens)
self.sizes.append(len(tokens))
self.sizes = np.array(self.sizes)
def check_index(self, i):
if i < 0 or i >= self.size:
raise IndexError("index out of range")
def __getitem__(self, i):
self.check_index(i)
return self.tokens_list[i]
def get_original_text(self, i):
self.check_index(i)
return self.lines[i]
def __del__(self):
pass
def __len__(self):
return self.size
def num_tokens(self, index):
return self.sizes[index]
def size(self, index):
return self.sizes[index]
def exists(path):
return PathManager.exists(path)
class MMapIndexedDataset(torch.utils.data.Dataset):
class Index:
_HDR_MAGIC = b"MMIDIDX\x00\x00"
def writer(cls, path, dtype):
class _Writer:
def __enter__(self):
self._file = open(path, "wb")
self._file.write(cls._HDR_MAGIC)
self._file.write(struct.pack("<Q", 1))
self._file.write(struct.pack("<B", _dtype_header_code(dtype)))
return self
def _get_pointers(sizes):
dtype_size = dtype().itemsize
address = 0
pointers = []
for size in sizes:
pointers.append(address)
address += size * dtype_size
return pointers
def write(self, sizes):
pointers = self._get_pointers(sizes)
self._file.write(struct.pack("<Q", len(sizes)))
sizes = np.array(sizes, dtype=np.int32)
self._file.write(sizes.tobytes(order="C"))
del sizes
pointers = np.array(pointers, dtype=np.int64)
self._file.write(pointers.tobytes(order="C"))
del pointers
def __exit__(self, exc_type, exc_val, exc_tb):
self._file.close()
return _Writer()
def __init__(self, path):
with open(path, "rb") as stream:
magic_test = stream.read(9)
assert self._HDR_MAGIC == magic_test, (
"Index file doesn't match expected format. "
"Make sure that --dataset-impl is configured properly."
)
version = struct.unpack("<Q", stream.read(8))
assert (1,) == version
(dtype_code,) = struct.unpack("<B", stream.read(1))
self._dtype = _code_to_dtype[dtype_code]
self._dtype_size = self._dtype().itemsize
self._len = struct.unpack("<Q", stream.read(8))[0]
offset = stream.tell()
_warmup_mmap_file(path)
self._bin_buffer_mmap = np.memmap(path, mode="r", order="C")
self._bin_buffer = memoryview(self._bin_buffer_mmap)
self._sizes = np.frombuffer(
self._bin_buffer, dtype=np.int32, count=self._len, offset=offset
)
self._pointers = np.frombuffer(
self._bin_buffer,
dtype=np.int64,
count=self._len,
offset=offset + self._sizes.nbytes,
)
def __del__(self):
self._bin_buffer_mmap._mmap.close()
del self._bin_buffer_mmap
def dtype(self):
return self._dtype
def sizes(self):
return self._sizes
def __getitem__(self, i):
return self._pointers[i], self._sizes[i]
def __len__(self):
return self._len
def __init__(self, path):
super().__init__()
self._path = None
self._index = None
self._bin_buffer = None
self._do_init(path)
def __getstate__(self):
return self._path
def __setstate__(self, state):
self._do_init(state)
def _do_init(self, path):
self._path = path
self._index = self.Index(index_file_path(self._path))
_warmup_mmap_file(data_file_path(self._path))
self._bin_buffer_mmap = np.memmap(
data_file_path(self._path), mode="r", order="C"
)
self._bin_buffer = memoryview(self._bin_buffer_mmap)
def __del__(self):
self._bin_buffer_mmap._mmap.close()
del self._bin_buffer_mmap
del self._index
def __len__(self):
return len(self._index)
def __getitem__(self, i):
ptr, size = self._index[i]
np_array = np.frombuffer(
self._bin_buffer, dtype=self._index.dtype, count=size, offset=ptr
)
if self._index.dtype != np.int64:
np_array = np_array.astype(np.int64)
return torch.from_numpy(np_array)
def sizes(self):
return self._index.sizes
def supports_prefetch(self):
return False
def exists(path):
return PathManager.exists(index_file_path(path)) and PathManager.exists(
data_file_path(path)
)
class FastaDataset(torch.utils.data.Dataset):
"""
For loading protein sequence datasets in the common FASTA data format
"""
def __init__(self, path: str, cache_indices=False):
self.fn = fasta_file_path(path)
self.threadlocal = threading.local()
self.cache = Path(f"{path}.fasta.idx.npy")
if cache_indices:
if self.cache.exists():
self.offsets, self.sizes = np.load(self.cache)
else:
self.offsets, self.sizes = self._build_index(path)
np.save(self.cache, np.stack([self.offsets, self.sizes]))
else:
self.offsets, self.sizes = self._build_index(path)
def _get_file(self):
if not hasattr(self.threadlocal, "f"):
self.threadlocal.f = open(self.fn, "r")
return self.threadlocal.f
def __getitem__(self, idx):
f = self._get_file()
f.seek(self.offsets[idx])
desc = f.readline().strip()
line = f.readline()
seq = ""
while line != "" and line[0] != ">":
seq += line.strip()
line = f.readline()
return desc, seq
def __len__(self):
return self.offsets.size
def _build_index(self, path: str):
# Use grep and awk to get 100M/s on local SSD.
# Should process your enormous 100G fasta in ~10 min single core...
path = fasta_file_path(path)
bytes_offsets = subprocess.check_output(
f"cat {path} | tqdm --bytes --total $(wc -c < {path})"
"| grep --byte-offset '^>' -o | cut -d: -f1",
shell=True,
)
fasta_lengths = subprocess.check_output(
f"cat {path} | tqdm --bytes --total $(wc -c < {path})"
"| awk '/^>/ {print \"\";next;} { printf(\"%s\",$0);}' | tail -n+2 | awk '{print length($1)}'",
shell=True,
)
bytes_np = np.fromstring(bytes_offsets, dtype=np.int64, sep=" ")
sizes_np = np.fromstring(fasta_lengths, dtype=np.int64, sep=" ")
return bytes_np, sizes_np
def __setstate__(self, state):
self.__dict__ = state
self.threadlocal = threading.local()
def __getstate__(self):
d = {}
for i, v in self.__dict__.items():
if i != "threadlocal":
d[i] = v
return d
def __del__(self):
if hasattr(self.threadlocal, "f"):
self.threadlocal.f.close()
del self.threadlocal.f
def exists(path):
return os.path.exists(fasta_file_path(path))
class EncodedFastaDataset(FastaDataset):
"""
The FastaDataset returns raw sequences - this allows us to return
indices with a dictionary instead.
"""
def __init__(self, path, dictionary):
super().__init__(path, cache_indices=True)
self.dictionary = dictionary
def __getitem__(self, idx):
desc, seq = super().__getitem__(idx)
return self.dictionary.encode_line(seq, line_tokenizer=list).long()
def make_dataset(path, impl, fix_lua_indexing=False, dictionary=None):
if impl == "raw" and IndexedRawTextDataset.exists(path):
assert dictionary is not None
return IndexedRawTextDataset(path, dictionary)
elif impl == "lazy" and IndexedDataset.exists(path):
return IndexedDataset(path, fix_lua_indexing=fix_lua_indexing)
elif impl == "cached" and IndexedDataset.exists(path):
return IndexedCachedDataset(path, fix_lua_indexing=fix_lua_indexing)
elif impl == "mmap" and MMapIndexedDataset.exists(path):
return MMapIndexedDataset(path)
elif impl == "fasta" and FastaDataset.exists(path):
from fairseq.data.fasta_dataset import EncodedFastaDataset
return EncodedFastaDataset(path, dictionary)
elif impl == "huffman" and HuffmanMMapIndexedDataset.exists(path):
return HuffmanMMapIndexedDataset(path)
return None | null |
181,577 | import shutil
import struct
from functools import lru_cache
import numpy as np
import torch
from fairseq.dataclass.constants import DATASET_IMPL_CHOICES
from fairseq.data.fasta_dataset import FastaDataset
from fairseq.file_io import PathManager
from fairseq.data.huffman import HuffmanMMapIndexedDataset, HuffmanMMapIndex
from . import FairseqDataset
from typing import Union
def index_file_path(prefix_path):
return prefix_path + ".idx"
def data_file_path(prefix_path):
return prefix_path + ".bin"
class PathManager:
"""
Wrapper for insulating OSS I/O (using Python builtin operations) from
iopath's PathManager abstraction (for transparently handling various
internal backends).
"""
def open(
path: str,
mode: str = "r",
buffering: int = -1,
encoding: Optional[str] = None,
errors: Optional[str] = None,
newline: Optional[str] = None,
):
if IOPathManager:
return IOPathManager.open(
path=path,
mode=mode,
buffering=buffering,
encoding=encoding,
errors=errors,
newline=newline,
)
return open(
path,
mode=mode,
buffering=buffering,
encoding=encoding,
errors=errors,
newline=newline,
)
def copy(src_path: str, dst_path: str, overwrite: bool = False) -> bool:
if IOPathManager:
return IOPathManager.copy(
src_path=src_path, dst_path=dst_path, overwrite=overwrite
)
return shutil.copyfile(src_path, dst_path)
def get_local_path(path: str, **kwargs) -> str:
if IOPathManager:
return IOPathManager.get_local_path(path, **kwargs)
return path
def exists(path: str) -> bool:
if IOPathManager:
return IOPathManager.exists(path)
return os.path.exists(path)
def isfile(path: str) -> bool:
if IOPathManager:
return IOPathManager.isfile(path)
return os.path.isfile(path)
def ls(path: str) -> List[str]:
if IOPathManager:
return IOPathManager.ls(path)
return os.listdir(path)
def mkdirs(path: str) -> None:
if IOPathManager:
return IOPathManager.mkdirs(path)
os.makedirs(path, exist_ok=True)
def rm(path: str) -> None:
if IOPathManager:
return IOPathManager.rm(path)
os.remove(path)
def chmod(path: str, mode: int) -> None:
if not PathManager.path_requires_pathmanager(path):
os.chmod(path, mode)
def register_handler(handler) -> None:
if IOPathManager:
return IOPathManager.register_handler(handler=handler)
def copy_from_local(
local_path: str, dst_path: str, overwrite: bool = False, **kwargs
) -> None:
if IOPathManager:
return IOPathManager.copy_from_local(
local_path=local_path, dst_path=dst_path, overwrite=overwrite, **kwargs
)
return shutil.copyfile(local_path, dst_path)
def path_requires_pathmanager(path: str) -> bool:
"""Do we require PathManager to access given path?"""
if IOPathManager:
for p in IOPathManager._path_handlers.keys():
if path.startswith(p):
return True
return False
def supports_rename(path: str) -> bool:
# PathManager doesn't yet support renames
return not PathManager.path_requires_pathmanager(path)
def rename(src: str, dst: str):
os.rename(src, dst)
"""
ioPath async PathManager methods:
"""
def opena(
path: str,
mode: str = "r",
buffering: int = -1,
encoding: Optional[str] = None,
errors: Optional[str] = None,
newline: Optional[str] = None,
):
"""
Return file descriptor with asynchronous write operations.
"""
global IOPathManager
if not IOPathManager:
logging.info("ioPath is initializing PathManager.")
try:
from iopath.common.file_io import PathManager
IOPathManager = PathManager()
except Exception:
logging.exception("Failed to initialize ioPath PathManager object.")
return IOPathManager.opena(
path=path,
mode=mode,
buffering=buffering,
encoding=encoding,
errors=errors,
newline=newline,
)
def async_close() -> bool:
"""
Wait for files to be written and clean up asynchronous PathManager.
NOTE: `PathManager.async_close()` must be called at the end of any
script that uses `PathManager.opena(...)`.
"""
global IOPathManager
if IOPathManager:
return IOPathManager.async_close()
return False
def get_indexed_dataset_to_local(path) -> str:
local_index_path = PathManager.get_local_path(index_file_path(path))
local_data_path = PathManager.get_local_path(data_file_path(path))
assert local_index_path.endswith(".idx") and local_data_path.endswith(".bin"), (
"PathManager.get_local_path does not return files with expected patterns: "
f"{local_index_path} and {local_data_path}"
)
local_path = local_data_path[:-4] # stripping surfix ".bin"
assert local_path == local_index_path[:-4] # stripping surfix ".idx"
return local_path | null |
181,578 | from argparse import Namespace
from typing import Union
from fairseq.dataclass import FairseqDataclass
from fairseq.dataclass.utils import merge_with_parent
from hydra.core.config_store import ConfigStore
from omegaconf import DictConfig
REGISTRIES = {}
def merge_with_parent(dc: FairseqDataclass, cfg: DictConfig, remove_missing=True):
if remove_missing:
if is_dataclass(dc):
target_keys = set(dc.__dataclass_fields__.keys())
else:
target_keys = set(dc.keys())
with open_dict(cfg):
for k in list(cfg.keys()):
if k not in target_keys:
del cfg[k]
merged_cfg = OmegaConf.merge(dc, cfg)
merged_cfg.__dict__["_parent"] = cfg.__dict__["_parent"]
OmegaConf.set_struct(merged_cfg, True)
return merged_cfg
def setup_registry(registry_name: str, base_class=None, default=None, required=False):
assert registry_name.startswith("--")
registry_name = registry_name[2:].replace("-", "_")
REGISTRY = {}
REGISTRY_CLASS_NAMES = set()
DATACLASS_REGISTRY = {}
# maintain a registry of all registries
if registry_name in REGISTRIES:
return # registry already exists
REGISTRIES[registry_name] = {
"registry": REGISTRY,
"default": default,
"dataclass_registry": DATACLASS_REGISTRY,
}
def build_x(cfg: Union[DictConfig, str, Namespace], *extra_args, **extra_kwargs):
if isinstance(cfg, DictConfig):
choice = cfg._name
if choice and choice in DATACLASS_REGISTRY:
dc = DATACLASS_REGISTRY[choice]
cfg = merge_with_parent(dc(), cfg)
elif isinstance(cfg, str):
choice = cfg
if choice in DATACLASS_REGISTRY:
cfg = DATACLASS_REGISTRY[choice]()
else:
choice = getattr(cfg, registry_name, None)
if choice in DATACLASS_REGISTRY:
cfg = DATACLASS_REGISTRY[choice].from_namespace(cfg)
if choice is None:
if required:
raise ValueError("{} is required!".format(registry_name))
return None
cls = REGISTRY[choice]
if hasattr(cls, "build_" + registry_name):
builder = getattr(cls, "build_" + registry_name)
else:
builder = cls
return builder(cfg, *extra_args, **extra_kwargs)
def register_x(name, dataclass=None):
def register_x_cls(cls):
if name in REGISTRY:
raise ValueError(
"Cannot register duplicate {} ({})".format(registry_name, name)
)
if cls.__name__ in REGISTRY_CLASS_NAMES:
raise ValueError(
"Cannot register {} with duplicate class name ({})".format(
registry_name, cls.__name__
)
)
if base_class is not None and not issubclass(cls, base_class):
raise ValueError(
"{} must extend {}".format(cls.__name__, base_class.__name__)
)
if dataclass is not None and not issubclass(dataclass, FairseqDataclass):
raise ValueError(
"Dataclass {} must extend FairseqDataclass".format(dataclass)
)
cls.__dataclass = dataclass
if cls.__dataclass is not None:
DATACLASS_REGISTRY[name] = cls.__dataclass
cs = ConfigStore.instance()
node = dataclass()
node._name = name
cs.store(name=name, group=registry_name, node=node, provider="fairseq")
REGISTRY[name] = cls
return cls
return register_x_cls
return build_x, register_x, REGISTRY, DATACLASS_REGISTRY | null |
181,598 | import argparse
from pathlib import Path
from typing import Callable, List, Optional, Union
import torch
from fairseq import utils
from fairseq.data.indexed_dataset import get_available_dataset_impl
from fairseq.dataclass.configs import (
CheckpointConfig,
CommonConfig,
CommonEvalConfig,
DatasetConfig,
DistributedTrainingConfig,
EvalLMConfig,
GenerationConfig,
InteractiveConfig,
OptimizationConfig,
EMAConfig,
)
from fairseq.dataclass.utils import gen_parser_from_dataclass
from fairseq.utils import csv_str_list, eval_bool, eval_str_dict, eval_str_list
def get_parser(desc, default_task="translation"):
def add_dataset_args(parser, train=False, gen=False):
def add_distributed_training_args(parser, default_world_size=None):
def add_eval_lm_args(parser):
def get_eval_lm_parser(default_task="language_modeling"):
parser = get_parser("Evaluate Language Model", default_task)
add_dataset_args(parser, gen=True)
add_distributed_training_args(parser, default_world_size=1)
add_eval_lm_args(parser)
return parser | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.