repo
stringlengths
1
99
file
stringlengths
13
215
code
stringlengths
12
59.2M
file_length
int64
12
59.2M
avg_line_length
float64
3.82
1.48M
max_line_length
int64
12
2.51M
extension_type
stringclasses
1 value
pytorch-CycleGAN-and-pix2pix
pytorch-CycleGAN-and-pix2pix-master/util/image_pool.py
import random import torch class ImagePool(): """This class implements an image buffer that stores previously generated images. This buffer enables us to update discriminators using a history of generated images rather than the ones produced by the latest generators. """ def __init__(self, pool_size): """Initialize the ImagePool class Parameters: pool_size (int) -- the size of image buffer, if pool_size=0, no buffer will be created """ self.pool_size = pool_size if self.pool_size > 0: # create an empty pool self.num_imgs = 0 self.images = [] def query(self, images): """Return an image from the pool. Parameters: images: the latest generated images from the generator Returns images from the buffer. By 50/100, the buffer will return input images. By 50/100, the buffer will return images previously stored in the buffer, and insert the current images to the buffer. """ if self.pool_size == 0: # if the buffer size is 0, do nothing return images return_images = [] for image in images: image = torch.unsqueeze(image.data, 0) if self.num_imgs < self.pool_size: # if the buffer is not full; keep inserting current images to the buffer self.num_imgs = self.num_imgs + 1 self.images.append(image) return_images.append(image) else: p = random.uniform(0, 1) if p > 0.5: # by 50% chance, the buffer will return a previously stored image, and insert the current image into the buffer random_id = random.randint(0, self.pool_size - 1) # randint is inclusive tmp = self.images[random_id].clone() self.images[random_id] = image return_images.append(tmp) else: # by another 50% chance, the buffer will return the current image return_images.append(image) return_images = torch.cat(return_images, 0) # collect all the images and return return return_images
2,226
39.490909
140
py
pytorch-CycleGAN-and-pix2pix
pytorch-CycleGAN-and-pix2pix-master/util/util.py
"""This module contains simple helper functions """ from __future__ import print_function import torch import numpy as np from PIL import Image import os def tensor2im(input_image, imtype=np.uint8): """"Converts a Tensor array into a numpy image array. Parameters: input_image (tensor) -- the input image tensor array imtype (type) -- the desired type of the converted numpy array """ if not isinstance(input_image, np.ndarray): if isinstance(input_image, torch.Tensor): # get the data from a variable image_tensor = input_image.data else: return input_image image_numpy = image_tensor[0].cpu().float().numpy() # convert it into a numpy array if image_numpy.shape[0] == 1: # grayscale to RGB image_numpy = np.tile(image_numpy, (3, 1, 1)) image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0 # post-processing: tranpose and scaling else: # if it is a numpy array, do nothing image_numpy = input_image return image_numpy.astype(imtype) def diagnose_network(net, name='network'): """Calculate and print the mean of average absolute(gradients) Parameters: net (torch network) -- Torch network name (str) -- the name of the network """ mean = 0.0 count = 0 for param in net.parameters(): if param.grad is not None: mean += torch.mean(torch.abs(param.grad.data)) count += 1 if count > 0: mean = mean / count print(name) print(mean) def save_image(image_numpy, image_path, aspect_ratio=1.0): """Save a numpy image to the disk Parameters: image_numpy (numpy array) -- input numpy array image_path (str) -- the path of the image """ image_pil = Image.fromarray(image_numpy) h, w, _ = image_numpy.shape if aspect_ratio > 1.0: image_pil = image_pil.resize((h, int(w * aspect_ratio)), Image.BICUBIC) if aspect_ratio < 1.0: image_pil = image_pil.resize((int(h / aspect_ratio), w), Image.BICUBIC) image_pil.save(image_path) def print_numpy(x, val=True, shp=False): """Print the mean, min, max, median, std, and size of a numpy array Parameters: val (bool) -- if print the values of the numpy array shp (bool) -- if print the shape of the numpy array """ x = x.astype(np.float64) if shp: print('shape,', x.shape) if val: x = x.flatten() print('mean = %3.3f, min = %3.3f, max = %3.3f, median = %3.3f, std=%3.3f' % ( np.mean(x), np.min(x), np.max(x), np.median(x), np.std(x))) def mkdirs(paths): """create empty directories if they don't exist Parameters: paths (str list) -- a list of directory paths """ if isinstance(paths, list) and not isinstance(paths, str): for path in paths: mkdir(path) else: mkdir(paths) def mkdir(path): """create a single empty directory if it didn't exist Parameters: path (str) -- a single directory path """ if not os.path.exists(path): os.makedirs(path)
3,175
29.538462
119
py
pytorch-CycleGAN-and-pix2pix
pytorch-CycleGAN-and-pix2pix-master/scripts/edges/batch_hed.py
# HED batch processing script; modified from https://github.com/s9xie/hed/blob/master/examples/hed/HED-tutorial.ipynb # Step 1: download the hed repo: https://github.com/s9xie/hed # Step 2: download the models and protoxt, and put them under {caffe_root}/examples/hed/ # Step 3: put this script under {caffe_root}/examples/hed/ # Step 4: run the following script: # python batch_hed.py --images_dir=/data/to/path/photos/ --hed_mat_dir=/data/to/path/hed_mat_files/ # The code sometimes crashes after computation is done. Error looks like "Check failed: ... driver shutting down". You can just kill the job. # For large images, it will produce gpu memory issue. Therefore, you better resize the images before running this script. # Step 5: run the MATLAB post-processing script "PostprocessHED.m" import caffe import numpy as np from PIL import Image import os import argparse import sys import scipy.io as sio def parse_args(): parser = argparse.ArgumentParser(description='batch proccesing: photos->edges') parser.add_argument('--caffe_root', dest='caffe_root', help='caffe root', default='../../', type=str) parser.add_argument('--caffemodel', dest='caffemodel', help='caffemodel', default='./hed_pretrained_bsds.caffemodel', type=str) parser.add_argument('--prototxt', dest='prototxt', help='caffe prototxt file', default='./deploy.prototxt', type=str) parser.add_argument('--images_dir', dest='images_dir', help='directory to store input photos', type=str) parser.add_argument('--hed_mat_dir', dest='hed_mat_dir', help='directory to store output hed edges in mat file', type=str) parser.add_argument('--border', dest='border', help='padding border', type=int, default=128) parser.add_argument('--gpu_id', dest='gpu_id', help='gpu id', type=int, default=1) args = parser.parse_args() return args args = parse_args() for arg in vars(args): print('[%s] =' % arg, getattr(args, arg)) # Make sure that caffe is on the python path: caffe_root = args.caffe_root # this file is expected to be in {caffe_root}/examples/hed/ sys.path.insert(0, caffe_root + 'python') if not os.path.exists(args.hed_mat_dir): print('create output directory %s' % args.hed_mat_dir) os.makedirs(args.hed_mat_dir) imgList = os.listdir(args.images_dir) nImgs = len(imgList) print('#images = %d' % nImgs) caffe.set_mode_gpu() caffe.set_device(args.gpu_id) # load net net = caffe.Net(args.prototxt, args.caffemodel, caffe.TEST) # pad border border = args.border for i in range(nImgs): if i % 500 == 0: print('processing image %d/%d' % (i, nImgs)) im = Image.open(os.path.join(args.images_dir, imgList[i])) in_ = np.array(im, dtype=np.float32) in_ = np.pad(in_, ((border, border), (border, border), (0, 0)), 'reflect') in_ = in_[:, :, 0:3] in_ = in_[:, :, ::-1] in_ -= np.array((104.00698793, 116.66876762, 122.67891434)) in_ = in_.transpose((2, 0, 1)) # remove the following two lines if testing with cpu # shape for input (data blob is N x C x H x W), set data net.blobs['data'].reshape(1, *in_.shape) net.blobs['data'].data[...] = in_ # run net and take argmax for prediction net.forward() fuse = net.blobs['sigmoid-fuse'].data[0][0, :, :] # get rid of the border fuse = fuse[(border + 35):(-border + 35), (border + 35):(-border + 35)] # save hed file to the disk name, ext = os.path.splitext(imgList[i]) sio.savemat(os.path.join(args.hed_mat_dir, name + '.mat'), {'edge_predict': fuse})
3,521
41.95122
141
py
pytorch-CycleGAN-and-pix2pix
pytorch-CycleGAN-and-pix2pix-master/scripts/eval_cityscapes/evaluate.py
import os import caffe import argparse import numpy as np import scipy.misc from PIL import Image from util import segrun, fast_hist, get_scores from cityscapes import cityscapes parser = argparse.ArgumentParser() parser.add_argument("--cityscapes_dir", type=str, required=True, help="Path to the original cityscapes dataset") parser.add_argument("--result_dir", type=str, required=True, help="Path to the generated images to be evaluated") parser.add_argument("--output_dir", type=str, required=True, help="Where to save the evaluation results") parser.add_argument("--caffemodel_dir", type=str, default='./scripts/eval_cityscapes/caffemodel/', help="Where the FCN-8s caffemodel stored") parser.add_argument("--gpu_id", type=int, default=0, help="Which gpu id to use") parser.add_argument("--split", type=str, default='val', help="Data split to be evaluated") parser.add_argument("--save_output_images", type=int, default=0, help="Whether to save the FCN output images") args = parser.parse_args() def main(): if not os.path.isdir(args.output_dir): os.makedirs(args.output_dir) if args.save_output_images > 0: output_image_dir = args.output_dir + 'image_outputs/' if not os.path.isdir(output_image_dir): os.makedirs(output_image_dir) CS = cityscapes(args.cityscapes_dir) n_cl = len(CS.classes) label_frames = CS.list_label_frames(args.split) caffe.set_device(args.gpu_id) caffe.set_mode_gpu() net = caffe.Net(args.caffemodel_dir + '/deploy.prototxt', args.caffemodel_dir + 'fcn-8s-cityscapes.caffemodel', caffe.TEST) hist_perframe = np.zeros((n_cl, n_cl)) for i, idx in enumerate(label_frames): if i % 10 == 0: print('Evaluating: %d/%d' % (i, len(label_frames))) city = idx.split('_')[0] # idx is city_shot_frame label = CS.load_label(args.split, city, idx) im_file = args.result_dir + '/' + idx + '_leftImg8bit.png' im = np.array(Image.open(im_file)) im = scipy.misc.imresize(im, (label.shape[1], label.shape[2])) # im = np.array(Image.fromarray(im).resize((label.shape[1], label.shape[2]))) # Note: scipy.misc.imresize is deprecated, but we still use it for reproducibility. out = segrun(net, CS.preprocess(im)) hist_perframe += fast_hist(label.flatten(), out.flatten(), n_cl) if args.save_output_images > 0: label_im = CS.palette(label) pred_im = CS.palette(out) scipy.misc.imsave(output_image_dir + '/' + str(i) + '_pred.jpg', pred_im) scipy.misc.imsave(output_image_dir + '/' + str(i) + '_gt.jpg', label_im) scipy.misc.imsave(output_image_dir + '/' + str(i) + '_input.jpg', im) mean_pixel_acc, mean_class_acc, mean_class_iou, per_class_acc, per_class_iou = get_scores(hist_perframe) with open(args.output_dir + '/evaluation_results.txt', 'w') as f: f.write('Mean pixel accuracy: %f\n' % mean_pixel_acc) f.write('Mean class accuracy: %f\n' % mean_class_acc) f.write('Mean class IoU: %f\n' % mean_class_iou) f.write('************ Per class numbers below ************\n') for i, cl in enumerate(CS.classes): while len(cl) < 15: cl = cl + ' ' f.write('%s: acc = %f, iou = %f\n' % (cl, per_class_acc[i], per_class_iou[i])) main()
3,403
47.628571
170
py
pytorch-CycleGAN-and-pix2pix
pytorch-CycleGAN-and-pix2pix-master/data/colorization_dataset.py
import os from data.base_dataset import BaseDataset, get_transform from data.image_folder import make_dataset from skimage import color # require skimage from PIL import Image import numpy as np import torchvision.transforms as transforms class ColorizationDataset(BaseDataset): """This dataset class can load a set of natural images in RGB, and convert RGB format into (L, ab) pairs in Lab color space. This dataset is required by pix2pix-based colorization model ('--model colorization') """ @staticmethod def modify_commandline_options(parser, is_train): """Add new dataset-specific options, and rewrite default values for existing options. Parameters: parser -- original option parser is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options. Returns: the modified parser. By default, the number of channels for input image is 1 (L) and the number of channels for output image is 2 (ab). The direction is from A to B """ parser.set_defaults(input_nc=1, output_nc=2, direction='AtoB') return parser def __init__(self, opt): """Initialize this dataset class. Parameters: opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions """ BaseDataset.__init__(self, opt) self.dir = os.path.join(opt.dataroot, opt.phase) self.AB_paths = sorted(make_dataset(self.dir, opt.max_dataset_size)) assert(opt.input_nc == 1 and opt.output_nc == 2 and opt.direction == 'AtoB') self.transform = get_transform(self.opt, convert=False) def __getitem__(self, index): """Return a data point and its metadata information. Parameters: index - - a random integer for data indexing Returns a dictionary that contains A, B, A_paths and B_paths A (tensor) - - the L channel of an image B (tensor) - - the ab channels of the same image A_paths (str) - - image paths B_paths (str) - - image paths (same as A_paths) """ path = self.AB_paths[index] im = Image.open(path).convert('RGB') im = self.transform(im) im = np.array(im) lab = color.rgb2lab(im).astype(np.float32) lab_t = transforms.ToTensor()(lab) A = lab_t[[0], ...] / 50.0 - 1.0 B = lab_t[[1, 2], ...] / 110.0 return {'A': A, 'B': B, 'A_paths': path, 'B_paths': path} def __len__(self): """Return the total number of images in the dataset.""" return len(self.AB_paths)
2,717
38.391304
141
py
pytorch-CycleGAN-and-pix2pix
pytorch-CycleGAN-and-pix2pix-master/data/base_dataset.py
"""This module implements an abstract base class (ABC) 'BaseDataset' for datasets. It also includes common transformation functions (e.g., get_transform, __scale_width), which can be later used in subclasses. """ import random import numpy as np import torch.utils.data as data from PIL import Image import torchvision.transforms as transforms from abc import ABC, abstractmethod class BaseDataset(data.Dataset, ABC): """This class is an abstract base class (ABC) for datasets. To create a subclass, you need to implement the following four functions: -- <__init__>: initialize the class, first call BaseDataset.__init__(self, opt). -- <__len__>: return the size of dataset. -- <__getitem__>: get a data point. -- <modify_commandline_options>: (optionally) add dataset-specific options and set default options. """ def __init__(self, opt): """Initialize the class; save the options in the class Parameters: opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions """ self.opt = opt self.root = opt.dataroot @staticmethod def modify_commandline_options(parser, is_train): """Add new dataset-specific options, and rewrite default values for existing options. Parameters: parser -- original option parser is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options. Returns: the modified parser. """ return parser @abstractmethod def __len__(self): """Return the total number of images in the dataset.""" return 0 @abstractmethod def __getitem__(self, index): """Return a data point and its metadata information. Parameters: index - - a random integer for data indexing Returns: a dictionary of data with their names. It ususally contains the data itself and its metadata information. """ pass def get_params(opt, size): w, h = size new_h = h new_w = w if opt.preprocess == 'resize_and_crop': new_h = new_w = opt.load_size elif opt.preprocess == 'scale_width_and_crop': new_w = opt.load_size new_h = opt.load_size * h // w x = random.randint(0, np.maximum(0, new_w - opt.crop_size)) y = random.randint(0, np.maximum(0, new_h - opt.crop_size)) flip = random.random() > 0.5 return {'crop_pos': (x, y), 'flip': flip} def get_transform(opt, params=None, grayscale=False, method=transforms.InterpolationMode.BICUBIC, convert=True): transform_list = [] if grayscale: transform_list.append(transforms.Grayscale(1)) if 'resize' in opt.preprocess: osize = [opt.load_size, opt.load_size] transform_list.append(transforms.Resize(osize, method)) elif 'scale_width' in opt.preprocess: transform_list.append(transforms.Lambda(lambda img: __scale_width(img, opt.load_size, opt.crop_size, method))) if 'crop' in opt.preprocess: if params is None: transform_list.append(transforms.RandomCrop(opt.crop_size)) else: transform_list.append(transforms.Lambda(lambda img: __crop(img, params['crop_pos'], opt.crop_size))) if opt.preprocess == 'none': transform_list.append(transforms.Lambda(lambda img: __make_power_2(img, base=4, method=method))) if not opt.no_flip: if params is None: transform_list.append(transforms.RandomHorizontalFlip()) elif params['flip']: transform_list.append(transforms.Lambda(lambda img: __flip(img, params['flip']))) if convert: transform_list += [transforms.ToTensor()] if grayscale: transform_list += [transforms.Normalize((0.5,), (0.5,))] else: transform_list += [transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))] return transforms.Compose(transform_list) def __transforms2pil_resize(method): mapper = {transforms.InterpolationMode.BILINEAR: Image.BILINEAR, transforms.InterpolationMode.BICUBIC: Image.BICUBIC, transforms.InterpolationMode.NEAREST: Image.NEAREST, transforms.InterpolationMode.LANCZOS: Image.LANCZOS,} return mapper[method] def __make_power_2(img, base, method=transforms.InterpolationMode.BICUBIC): method = __transforms2pil_resize(method) ow, oh = img.size h = int(round(oh / base) * base) w = int(round(ow / base) * base) if h == oh and w == ow: return img __print_size_warning(ow, oh, w, h) return img.resize((w, h), method) def __scale_width(img, target_size, crop_size, method=transforms.InterpolationMode.BICUBIC): method = __transforms2pil_resize(method) ow, oh = img.size if ow == target_size and oh >= crop_size: return img w = target_size h = int(max(target_size * oh / ow, crop_size)) return img.resize((w, h), method) def __crop(img, pos, size): ow, oh = img.size x1, y1 = pos tw = th = size if (ow > tw or oh > th): return img.crop((x1, y1, x1 + tw, y1 + th)) return img def __flip(img, flip): if flip: return img.transpose(Image.FLIP_LEFT_RIGHT) return img def __print_size_warning(ow, oh, w, h): """Print warning information about image size(only print once)""" if not hasattr(__print_size_warning, 'has_printed'): print("The image size needs to be a multiple of 4. " "The loaded image size was (%d, %d), so it was adjusted to " "(%d, %d). This adjustment will be done to all images " "whose sizes are not multiples of 4" % (ow, oh, w, h)) __print_size_warning.has_printed = True
5,895
34.095238
141
py
pytorch-CycleGAN-and-pix2pix
pytorch-CycleGAN-and-pix2pix-master/data/image_folder.py
"""A modified image folder class We modify the official PyTorch image folder (https://github.com/pytorch/vision/blob/master/torchvision/datasets/folder.py) so that this class can load images from both current directory and its subdirectories. """ import torch.utils.data as data from PIL import Image import os IMG_EXTENSIONS = [ '.jpg', '.JPG', '.jpeg', '.JPEG', '.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP', '.tif', '.TIF', '.tiff', '.TIFF', ] def is_image_file(filename): return any(filename.endswith(extension) for extension in IMG_EXTENSIONS) def make_dataset(dir, max_dataset_size=float("inf")): images = [] assert os.path.isdir(dir), '%s is not a valid directory' % dir for root, _, fnames in sorted(os.walk(dir)): for fname in fnames: if is_image_file(fname): path = os.path.join(root, fname) images.append(path) return images[:min(max_dataset_size, len(images))] def default_loader(path): return Image.open(path).convert('RGB') class ImageFolder(data.Dataset): def __init__(self, root, transform=None, return_paths=False, loader=default_loader): imgs = make_dataset(root) if len(imgs) == 0: raise(RuntimeError("Found 0 images in: " + root + "\n" "Supported image extensions are: " + ",".join(IMG_EXTENSIONS))) self.root = root self.imgs = imgs self.transform = transform self.return_paths = return_paths self.loader = loader def __getitem__(self, index): path = self.imgs[index] img = self.loader(path) if self.transform is not None: img = self.transform(img) if self.return_paths: return img, path else: return img def __len__(self): return len(self.imgs)
1,885
27.575758
122
py
pytorch-CycleGAN-and-pix2pix
pytorch-CycleGAN-and-pix2pix-master/data/__init__.py
"""This package includes all the modules related to data loading and preprocessing To add a custom dataset class called 'dummy', you need to add a file called 'dummy_dataset.py' and define a subclass 'DummyDataset' inherited from BaseDataset. You need to implement four functions: -- <__init__>: initialize the class, first call BaseDataset.__init__(self, opt). -- <__len__>: return the size of dataset. -- <__getitem__>: get a data point from data loader. -- <modify_commandline_options>: (optionally) add dataset-specific options and set default options. Now you can use the dataset class by specifying flag '--dataset_mode dummy'. See our template dataset class 'template_dataset.py' for more details. """ import importlib import torch.utils.data from data.base_dataset import BaseDataset def find_dataset_using_name(dataset_name): """Import the module "data/[dataset_name]_dataset.py". In the file, the class called DatasetNameDataset() will be instantiated. It has to be a subclass of BaseDataset, and it is case-insensitive. """ dataset_filename = "data." + dataset_name + "_dataset" datasetlib = importlib.import_module(dataset_filename) dataset = None target_dataset_name = dataset_name.replace('_', '') + 'dataset' for name, cls in datasetlib.__dict__.items(): if name.lower() == target_dataset_name.lower() \ and issubclass(cls, BaseDataset): dataset = cls if dataset is None: raise NotImplementedError("In %s.py, there should be a subclass of BaseDataset with class name that matches %s in lowercase." % (dataset_filename, target_dataset_name)) return dataset def get_option_setter(dataset_name): """Return the static method <modify_commandline_options> of the dataset class.""" dataset_class = find_dataset_using_name(dataset_name) return dataset_class.modify_commandline_options def create_dataset(opt): """Create a dataset given the option. This function wraps the class CustomDatasetDataLoader. This is the main interface between this package and 'train.py'/'test.py' Example: >>> from data import create_dataset >>> dataset = create_dataset(opt) """ data_loader = CustomDatasetDataLoader(opt) dataset = data_loader.load_data() return dataset class CustomDatasetDataLoader(): """Wrapper class of Dataset class that performs multi-threaded data loading""" def __init__(self, opt): """Initialize this class Step 1: create a dataset instance given the name [dataset_mode] Step 2: create a multi-threaded data loader. """ self.opt = opt dataset_class = find_dataset_using_name(opt.dataset_mode) self.dataset = dataset_class(opt) print("dataset [%s] was created" % type(self.dataset).__name__) self.dataloader = torch.utils.data.DataLoader( self.dataset, batch_size=opt.batch_size, shuffle=not opt.serial_batches, num_workers=int(opt.num_threads)) def load_data(self): return self def __len__(self): """Return the number of data in the dataset""" return min(len(self.dataset), self.opt.max_dataset_size) def __iter__(self): """Return a batch of data""" for i, data in enumerate(self.dataloader): if i * self.opt.batch_size >= self.opt.max_dataset_size: break yield data
3,554
36.819149
176
py
sign-topic
sign-topic-main/setup.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import os import subprocess import sys from setuptools import Extension, find_packages, setup if sys.version_info < (3, 6): sys.exit("Sorry, Python >= 3.6 is required for fairseq.") def write_version_py(): with open(os.path.join("fairseq", "version.txt")) as f: version = f.read().strip() # append latest commit hash to version string try: sha = ( subprocess.check_output(["git", "rev-parse", "HEAD"]) .decode("ascii") .strip() ) version += "+" + sha[:7] except Exception: pass # write version info to fairseq/version.py with open(os.path.join("fairseq", "version.py"), "w") as f: f.write('__version__ = "{}"\n'.format(version)) return version version = write_version_py() with open("README.md") as f: readme = f.read() if sys.platform == "darwin": extra_compile_args = ["-stdlib=libc++", "-O3"] else: extra_compile_args = ["-std=c++11", "-O3"] class NumpyExtension(Extension): """Source: https://stackoverflow.com/a/54128391""" def __init__(self, *args, **kwargs): self.__include_dirs = [] super().__init__(*args, **kwargs) @property def include_dirs(self): import numpy return self.__include_dirs + [numpy.get_include()] @include_dirs.setter def include_dirs(self, dirs): self.__include_dirs = dirs extensions = [ Extension( "fairseq.libbleu", sources=[ "fairseq/clib/libbleu/libbleu.cpp", "fairseq/clib/libbleu/module.cpp", ], extra_compile_args=extra_compile_args, ), NumpyExtension( "fairseq.data.data_utils_fast", sources=["fairseq/data/data_utils_fast.pyx"], language="c++", extra_compile_args=extra_compile_args, ), NumpyExtension( "fairseq.data.token_block_utils_fast", sources=["fairseq/data/token_block_utils_fast.pyx"], language="c++", extra_compile_args=extra_compile_args, ), ] cmdclass = {} try: # torch is not available when generating docs from torch.utils import cpp_extension extensions.extend( [ cpp_extension.CppExtension( "fairseq.libbase", sources=[ "fairseq/clib/libbase/balanced_assignment.cpp", ], ) ] ) extensions.extend( [ cpp_extension.CppExtension( "fairseq.libnat", sources=[ "fairseq/clib/libnat/edit_dist.cpp", ], ), cpp_extension.CppExtension( "alignment_train_cpu_binding", sources=[ "examples/operators/alignment_train_cpu.cpp", ], ), ] ) if "CUDA_HOME" in os.environ: extensions.extend( [ cpp_extension.CppExtension( "fairseq.libnat_cuda", sources=[ "fairseq/clib/libnat_cuda/edit_dist.cu", "fairseq/clib/libnat_cuda/binding.cpp", ], ), cpp_extension.CppExtension( "fairseq.ngram_repeat_block_cuda", sources=[ "fairseq/clib/cuda/ngram_repeat_block_cuda.cpp", "fairseq/clib/cuda/ngram_repeat_block_cuda_kernel.cu", ], ), cpp_extension.CppExtension( "alignment_train_cuda_binding", sources=[ "examples/operators/alignment_train_kernel.cu", "examples/operators/alignment_train_cuda.cpp", ], ), ] ) cmdclass["build_ext"] = cpp_extension.BuildExtension except ImportError: pass if "READTHEDOCS" in os.environ: # don't build extensions when generating docs extensions = [] if "build_ext" in cmdclass: del cmdclass["build_ext"] # use CPU build of PyTorch dependency_links = [ "https://download.pytorch.org/whl/cpu/torch-1.7.0%2Bcpu-cp36-cp36m-linux_x86_64.whl" ] else: dependency_links = [] if "clean" in sys.argv[1:]: # Source: https://bit.ly/2NLVsgE print("deleting Cython files...") import subprocess subprocess.run( ["rm -f fairseq/*.so fairseq/**/*.so fairseq/*.pyd fairseq/**/*.pyd"], shell=True, ) extra_packages = [] if os.path.exists(os.path.join("fairseq", "model_parallel", "megatron", "mpu")): extra_packages.append("fairseq.model_parallel.megatron.mpu") def do_setup(package_data): setup( name="fairseq", version=version, description="Facebook AI Research Sequence-to-Sequence Toolkit", url="https://github.com/pytorch/fairseq", classifiers=[ "Intended Audience :: Science/Research", "License :: OSI Approved :: MIT License", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Topic :: Scientific/Engineering :: Artificial Intelligence", ], long_description=readme, long_description_content_type="text/markdown", setup_requires=[ "cython", 'numpy<1.20.0; python_version<"3.7"', 'numpy; python_version>="3.7"', "setuptools>=18.0", ], install_requires=[ "cffi", "cython", 'dataclasses; python_version<"3.7"', "hydra-core>=1.0.7,<1.1", "omegaconf<2.1", 'numpy<1.20.0; python_version<"3.7"', 'numpy; python_version>="3.7"', "regex", "sacrebleu>=1.4.12", "torch", "tqdm", "bitarray", "torchaudio>=0.8.0", "sentencepiece", "fvcore" ], dependency_links=dependency_links, packages=find_packages( exclude=[ "examples", "examples.*", "scripts", "scripts.*", "tests", "tests.*", ] ) + extra_packages, package_data=package_data, ext_modules=extensions, test_suite="tests", entry_points={ "console_scripts": [ "fairseq-eval-lm = fairseq_cli.eval_lm:cli_main", "fairseq-generate = fairseq_cli.generate:cli_main", "fairseq-hydra-train = fairseq_cli.hydra_train:cli_main", "fairseq-interactive = fairseq_cli.interactive:cli_main", "fairseq-preprocess = fairseq_cli.preprocess:cli_main", "fairseq-score = fairseq_cli.score:cli_main", "fairseq-train = fairseq_cli.train:cli_main", "fairseq-validate = fairseq_cli.validate:cli_main", ], }, cmdclass=cmdclass, zip_safe=False, ) def get_files(path, relative_to="fairseq"): all_files = [] for root, _dirs, files in os.walk(path, followlinks=True): root = os.path.relpath(root, relative_to) for file in files: if file.endswith(".pyc"): continue all_files.append(os.path.join(root, file)) return all_files if __name__ == "__main__": try: # symlink examples into fairseq package so package_data accepts them fairseq_examples = os.path.join("fairseq", "examples") if "build_ext" not in sys.argv[1:] and not os.path.exists(fairseq_examples): os.symlink(os.path.join("..", "examples"), fairseq_examples) package_data = { "fairseq": ( get_files(fairseq_examples) + get_files(os.path.join("fairseq", "config")) ) } do_setup(package_data) finally: if "build_ext" not in sys.argv[1:] and os.path.islink(fairseq_examples): os.unlink(fairseq_examples)
8,427
28.263889
92
py
sign-topic
sign-topic-main/hubconf.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """isort:skip_file""" import functools import importlib dependencies = [ "dataclasses", "hydra", "numpy", "omegaconf", "regex", "requests", "torch", ] # Check for required dependencies and raise a RuntimeError if any are missing. missing_deps = [] for dep in dependencies: try: importlib.import_module(dep) except ImportError: # Hack: the hydra package is provided under the "hydra-core" name in # pypi. We don't want the user mistakenly calling `pip install hydra` # since that will install an unrelated package. if dep == "hydra": dep = "hydra-core" missing_deps.append(dep) if len(missing_deps) > 0: raise RuntimeError("Missing dependencies: {}".format(", ".join(missing_deps))) # only do fairseq imports after checking for dependencies from fairseq.hub_utils import ( # noqa; noqa BPEHubInterface as bpe, TokenizerHubInterface as tokenizer, ) from fairseq.models import MODEL_REGISTRY # noqa # torch.hub doesn't build Cython components, so if they are not found then try # to build them here try: import fairseq.data.token_block_utils_fast # noqa except ImportError: try: import cython # noqa import os from setuptools import sandbox sandbox.run_setup( os.path.join(os.path.dirname(__file__), "setup.py"), ["build_ext", "--inplace"], ) except ImportError: print( "Unable to build Cython components. Please make sure Cython is " "installed if the torch.hub model you are loading depends on it." ) # automatically expose models defined in FairseqModel::hub_models for _model_type, _cls in MODEL_REGISTRY.items(): for model_name in _cls.hub_models().keys(): globals()[model_name] = functools.partial( _cls.from_pretrained, model_name, )
2,099
27.378378
82
py
sign-topic
sign-topic-main/examples/truncated_bptt/transformer_xl_model.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging from dataclasses import dataclass, field from typing import Dict, List, Optional import torch from fairseq.dataclass import FairseqDataclass from fairseq.models import ( FairseqIncrementalDecoder, FairseqLanguageModel, register_model, ) from fairseq.modules.checkpoint_activations import checkpoint_wrapper from omegaconf import II logger = logging.getLogger(__name__) @dataclass class TransformerXLConfig(FairseqDataclass): # defaults come from the original Transformer-XL code cutoffs: List[int] = field(default_factory=lambda: [20000, 40000, 200000]) d_model: int = 500 n_head: int = 10 d_head: int = 50 d_inner: int = 1000 div_val: int = 1 n_layer: int = 12 mem_len: int = 0 clamp_len: int = -1 same_length: bool = False dropout: float = 0.0 dropatt: float = 0.0 checkpoint_activations: bool = False offload_activations: bool = False max_target_positions: int = II("task.max_target_positions") @register_model("transformer_xl", dataclass=TransformerXLConfig) class TransformerXLLanguageModel(FairseqLanguageModel): @classmethod def build_model(cls, cfg: TransformerXLConfig, task): return cls(TransformerXLDecoder(cfg, task)) class TransformerXLDecoder(FairseqIncrementalDecoder): def __init__(self, cfg, task): try: from transformers.models.transfo_xl import ( TransfoXLConfig, TransfoXLLMHeadModel, ) except ImportError: from transformers.configuration_transfo_xl import TransfoXLConfig from transformers.modeling_transfo_xl import TransfoXLLMHeadModel super().__init__(task.target_dictionary) self.cfg = cfg # remove any cutoffs larger than the vocab size cutoffs = [ cutoff for cutoff in cfg.cutoffs if cutoff < len(task.target_dictionary) ] config = TransfoXLConfig( vocab_size=len(task.target_dictionary), cutoffs=cutoffs, d_model=cfg.d_model, d_embed=cfg.d_model, n_head=cfg.n_head, d_head=cfg.d_head, d_inner=cfg.d_inner, div_val=cfg.div_val, n_layer=cfg.n_layer, mem_len=cfg.mem_len, clamp_len=cfg.clamp_len, same_length=cfg.same_length, dropout=cfg.dropout, dropatt=cfg.dropatt, ) logger.info(config) self.model = TransfoXLLMHeadModel(config) # Workaround a bug in huggingface's ``ProjectedAdaptiveLogSoftmax`` # which adds ``None`` values to an ``nn.ParameterList``, which is not # supported in PyTorch. Instead we can replace this with an # ``nn.ModuleList``, which does support ``None`` values. try: if all(p is None for p in self.model.crit.out_projs._parameters.values()): self.model.crit.out_projs = torch.nn.ModuleList( [None] * len(self.model.crit.out_projs._parameters) ) except Exception: pass if cfg.checkpoint_activations or cfg.offload_activations: for i in range(len(self.model.transformer.layers)): self.model.transformer.layers[i] = checkpoint_wrapper( self.model.transformer.layers[i], offload_to_cpu=cfg.offload_activations, ) # TODO: may save mem to wrap(layer.pos_ff.CoreNet[3]) self._mems = None def forward( self, src_tokens, src_lengths=None, # unused incremental_state: Optional[Dict[str, List[torch.Tensor]]] = None, encoder_out=None, ): if incremental_state is not None: # used during inference mems = self.get_incremental_state(incremental_state, "mems") src_tokens = src_tokens[:, -1:] # only keep the most recent token else: mems = self._mems output = self.model( input_ids=src_tokens, mems=mems, return_dict=False, ) if len(output) >= 2: if incremental_state is not None: self.set_incremental_state(incremental_state, "mems", output[1]) else: self._mems = output[1] return (output[0],) def max_positions(self): return self.cfg.max_target_positions def reorder_incremental_state( self, incremental_state: Dict[str, Dict[str, Optional[torch.Tensor]]], new_order: torch.Tensor, ): """Reorder incremental state. This will be called when the order of the input has changed from the previous time step. A typical use case is beam search, where the input order changes between time steps based on the selection of beams. """ mems = self.get_incremental_state(incremental_state, "mems") if mems is not None: new_mems = [mems_i.index_select(1, new_order) for mems_i in mems] self.set_incremental_state(incremental_state, "mems", new_mems)
5,324
33.134615
86
py
sign-topic
sign-topic-main/examples/truncated_bptt/truncated_bptt_lm_task.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import os from dataclasses import dataclass, field from typing import List, Optional, Tuple import torch from fairseq import utils from fairseq.data import ( Dictionary, TokenBlockDataset, data_utils, iterators, ) from fairseq.dataclass import FairseqDataclass from fairseq.distributed import utils as dist_utils from fairseq.tasks import FairseqTask, register_task from omegaconf import II logger = logging.getLogger(__name__) @dataclass class TruncatedBPTTLMConfig(FairseqDataclass): data: str = field(default="???", metadata={"help": "path to data directory"}) tokens_per_sample: int = field( default=1024, metadata={"help": "max number of tokens per sequence"}, ) batch_size: int = II("dataset.batch_size") # Some models use *max_target_positions* to know how many positional # embeddings to learn. We use II(...) to make it default to # *tokens_per_sample*, but in principle there could be more positional # embeddings than tokens in a single batch. This may also be irrelevant for # custom model implementations. max_target_positions: int = II("task.tokens_per_sample") # these will be populated automatically if not provided data_parallel_rank: Optional[int] = None data_parallel_size: Optional[int] = None @register_task("truncated_bptt_lm", dataclass=TruncatedBPTTLMConfig) class TruncatedBPTTLMTask(FairseqTask): def __init__(self, cfg: TruncatedBPTTLMConfig): super().__init__(cfg) if cfg.data_parallel_rank is None or cfg.data_parallel_size is None: if torch.distributed.is_initialized(): cfg.data_parallel_rank = dist_utils.get_data_parallel_rank() cfg.data_parallel_size = dist_utils.get_data_parallel_world_size() else: cfg.data_parallel_rank = 0 cfg.data_parallel_size = 1 # load the dictionary paths = utils.split_paths(cfg.data) assert len(paths) > 0 self.dictionary = Dictionary.load(os.path.join(paths[0], "dict.txt")) logger.info("dictionary: {} types".format(len(self.dictionary))) def load_dataset(self, split, epoch=1, combine=False, **kwargs): """Load a given dataset split (e.g., train, valid, test)""" # support sharded datasets paths = utils.split_paths(self.cfg.data) assert len(paths) > 0 data_path = paths[(epoch - 1) % len(paths)] split_path = os.path.join(data_path, split) # each element of *data* will be a tensorized line from the original # text dataset, similar to ``open(split_path).readlines()`` data = data_utils.load_indexed_dataset( split_path, self.dictionary, combine=combine ) if data is None: raise FileNotFoundError( "Dataset not found: {} ({})".format(split, split_path) ) # this is similar to ``data.view(-1).split(tokens_per_sample)`` data = TokenBlockDataset( data, data.sizes, block_size=self.cfg.tokens_per_sample, pad=None, # unused eos=None, # unused break_mode="none", ) self.datasets[split] = TruncatedBPTTDataset( data=data, bsz_per_shard=self.cfg.batch_size, shard_id=self.cfg.data_parallel_rank, num_shards=self.cfg.data_parallel_size, ) def dataset(self, split): return self.datasets[split] def get_batch_iterator( self, dataset, num_workers=0, epoch=1, data_buffer_size=0, skip_remainder_batch=False, **kwargs ): return iterators.EpochBatchIterator( dataset=dataset, collate_fn=self._collate_fn, num_workers=num_workers, epoch=epoch, buffer_size=data_buffer_size, # we don't use the batching functionality from EpochBatchIterator; # instead every item in *dataset* is a whole batch batch_sampler=[[i] for i in range(len(dataset))], disable_shuffling=True, skip_remainder_batch=skip_remainder_batch, ) def _collate_fn(self, items: List[List[torch.Tensor]]): # we don't use fairseq's batching functionality, so we expect a single # Tensor of type List[torch.Tensor] assert len(items) == 1 # item will have shape B x T (the last batch may have length < T) id, item = items[0] item = data_utils.collate_tokens(item, pad_idx=self.source_dictionary.pad()) B, T = item.size() # shift item one position over and append a padding token for the target target = torch.nn.functional.pad( item[:, 1:], (0, 1, 0, 0), value=self.target_dictionary.pad() ) # fairseq expects batches to have the following structure return { "id": torch.tensor([id] * item.size(0)), "net_input": {"src_tokens": item,}, "target": target, "nsentences": item.size(0), "ntokens": item.numel(), } def build_dataset_for_inference( self, src_tokens: List[torch.Tensor], src_lengths: List[int], **kwargs ) -> torch.utils.data.Dataset: eos = self.source_dictionary.eos() dataset = TokenBlockDataset( src_tokens, src_lengths, block_size=None, # ignored for "eos" break mode pad=self.source_dictionary.pad(), eos=eos, break_mode="eos", ) class Dataset(torch.utils.data.Dataset): def __getitem__(self, i): item = dataset[i] if item[-1] == eos: # remove eos to support generating with a prefix item = item[:-1] return (i, [item]) def __len__(self): return len(dataset) return Dataset() def inference_step( self, generator, models, sample, prefix_tokens=None, constraints=None ): with torch.no_grad(): if constraints is not None: raise NotImplementedError # SequenceGenerator doesn't use *src_tokens* directly, we need to # pass the *prefix_tokens* argument instead. if prefix_tokens is None and sample["net_input"]["src_tokens"].nelement(): prefix_tokens = sample["net_input"]["src_tokens"] # begin generation with the end-of-sentence token bos_token = self.source_dictionary.eos() return generator.generate( models, sample, prefix_tokens=prefix_tokens, bos_token=bos_token ) def eval_lm_dataloader( self, dataset, max_tokens: Optional[int] = 36000, batch_size: Optional[int] = None, max_positions: Optional[int] = None, num_shards: int = 1, shard_id: int = 0, num_workers: int = 1, data_buffer_size: int = 10, context_window: int = 0, ): if context_window > 0: raise NotImplementedError( "Transformer-XL doesn't need --context-window, try " "--model-overrides '{\"mem_len\":42}' instead " ) return self.get_batch_iterator( dataset=dataset, max_tokens=max_tokens, max_sentences=batch_size, max_positions=max_positions, ignore_invalid_inputs=True, num_shards=num_shards, shard_id=shard_id, num_workers=num_workers, data_buffer_size=data_buffer_size, ).next_epoch_itr(shuffle=False) @property def source_dictionary(self): return self.dictionary @property def target_dictionary(self): return self.dictionary class TruncatedBPTTDataset(torch.utils.data.Dataset): def __init__( self, data: List[torch.Tensor], # ordered list of items bsz_per_shard, # number of items processed per GPUs per forward shard_id, # current GPU ID num_shards, # number of GPUs ): super().__init__() self.data = data def batchify(data, bsz): # Work out how cleanly we can divide the dataset into bsz parts. nbatch = data.size(0) // bsz # Trim off any extra elements that wouldn't cleanly fit (remainders). data = data.narrow(0, 0, nbatch * bsz) # Evenly divide the data across the bsz batches. data = data.view(bsz, -1).contiguous() return data # total number of sequences processed by all GPUs in each forward pass global_batch_size = bsz_per_shard * num_shards """ With a 16 item dataset, bsz_per_shard=2 and num_shards=3, *indices* might look like: indices = [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11]] The size of the TruncatedBPTTDataset instance will be 2, and shard 1 will see items: [(0, [data[4], data[6]]), (1, [data[5], data[7]])] """ indices = batchify(torch.arange(len(data)), global_batch_size) assert indices.size(0) == global_batch_size self.my_indices = indices[ shard_id * bsz_per_shard : (shard_id + 1) * bsz_per_shard ] assert self.my_indices.size(0) == bsz_per_shard def __len__(self): return self.my_indices.size(1) def __getitem__(self, i) -> Tuple[int, List[torch.Tensor]]: return (i, [self.data[idx] for idx in self.my_indices[:, i]])
9,995
33.951049
86
py
sign-topic
sign-topic-main/examples/linformer/linformer_src/modules/multihead_linear_attention.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math from typing import Dict, Optional, Tuple import torch import torch.nn.functional as F from fairseq import utils from fairseq.incremental_decoding_utils import with_incremental_state from fairseq.modules.quant_noise import quant_noise from torch import Tensor, nn from torch.nn import Parameter @with_incremental_state class MultiheadLinearAttention(nn.Module): """Multi-headed linformer attention. Projects the key and values down to the compressed dimension, before computing self-attention. See "Linformer: Self-Attention with Linear Complexity" for more details. """ def __init__( self, embed_dim, num_heads, kdim=None, vdim=None, dropout=0.0, bias=True, add_bias_kv=False, add_zero_attn=False, self_attention=False, encoder_decoder_attention=False, q_noise=0.0, qn_block_size=8, compressed=1, max_seq_len=256, shared_kv_compressed=0, shared_compress_layer=None, freeze_compress=0, ): super().__init__() self.embed_dim = embed_dim self.kdim = kdim if kdim is not None else embed_dim self.vdim = vdim if vdim is not None else embed_dim self.qkv_same_dim = self.kdim == embed_dim and self.vdim == embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads assert ( self.head_dim * num_heads == self.embed_dim ), "embed_dim must be divisible by num_heads" self.scaling = self.head_dim ** -0.5 self.self_attention = self_attention self.encoder_decoder_attention = encoder_decoder_attention assert not self.self_attention or self.qkv_same_dim, ( "Self-attention requires query, key and " "value to be of the same size" ) self.k_proj = quant_noise( nn.Linear(self.kdim, embed_dim, bias=bias), q_noise, qn_block_size ) self.v_proj = quant_noise( nn.Linear(self.vdim, embed_dim, bias=bias), q_noise, qn_block_size ) self.q_proj = quant_noise( nn.Linear(embed_dim, embed_dim, bias=bias), q_noise, qn_block_size ) # used for compress sequence to subsequence if shared_compress_layer is None: self.compress_seq_len = max_seq_len // compressed self.compress_k = nn.Linear(max_seq_len, self.compress_seq_len, bias=False) if shared_kv_compressed == 0: self.compress_v = nn.Linear( max_seq_len, self.compress_seq_len, bias=False ) self.layerwise_sharing = False else: self.compress_k = shared_compress_layer if shared_kv_compressed == 0: self.compress_v = shared_compress_layer self.layerwise_sharing = True self.shared_kv_compressed = shared_kv_compressed self.out_proj = quant_noise( nn.Linear(embed_dim, embed_dim, bias=bias), q_noise, qn_block_size ) if add_bias_kv: self.bias_k = Parameter(torch.Tensor(1, 1, embed_dim)) self.bias_v = Parameter(torch.Tensor(1, 1, embed_dim)) else: self.bias_k = self.bias_v = None self.add_zero_attn = add_zero_attn self.reset_parameters() if freeze_compress == 1: self.compress_k.weight.requires_grad = False if shared_kv_compressed == 0: self.compress_v.weight.requires_grad = False self.onnx_trace = False def prepare_for_onnx_export_(self): self.onnx_trace = True def reset_parameters(self): if self.qkv_same_dim: # Empirically observed the convergence to be much better with # the scaled initialization nn.init.xavier_uniform_(self.k_proj.weight, gain=1 / math.sqrt(2)) nn.init.xavier_uniform_(self.v_proj.weight, gain=1 / math.sqrt(2)) nn.init.xavier_uniform_(self.q_proj.weight, gain=1 / math.sqrt(2)) if ( not self.layerwise_sharing ): # otherwise, we already initialize the parameters nn.init.xavier_uniform_(self.compress_k.weight, gain=1 / math.sqrt(2)) if self.shared_kv_compressed == 0: nn.init.xavier_uniform_( self.compress_v.weight, gain=1 / math.sqrt(2) ) else: nn.init.xavier_uniform_(self.k_proj.weight) nn.init.xavier_uniform_(self.v_proj.weight) nn.init.xavier_uniform_(self.q_proj.weight) if ( not self.layerwise_sharing ): # otherwise, we already initialize the parameters nn.init.xavier_uniform_(self.compress_k.weight) if self.shared_kv_compressed == 0: nn.init.xavier_uniform_(self.compress_v.weight) nn.init.xavier_uniform_(self.out_proj.weight) if self.out_proj.bias is not None: nn.init.constant_(self.out_proj.bias, 0.0) if self.bias_k is not None: nn.init.xavier_normal_(self.bias_k) if self.bias_v is not None: nn.init.xavier_normal_(self.bias_v) def forward( self, query, key: Optional[Tensor], value: Optional[Tensor], key_padding_mask: Optional[Tensor] = None, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None, need_weights: bool = True, static_kv: bool = False, attn_mask: Optional[Tensor] = None, before_softmax: bool = False, need_head_weights: bool = False, ) -> Tuple[Tensor, Optional[Tensor]]: """Input shape: Time x Batch x Channel Args: key_padding_mask (ByteTensor, optional): mask to exclude keys that are pads, of shape `(batch, src_len)`, where padding elements are indicated by 1s. need_weights (bool, optional): return the attention weights, averaged over heads (default: False). attn_mask (ByteTensor, optional): typically used to implement causal attention, where the mask prevents the attention from looking forward in time (default: None). before_softmax (bool, optional): return the raw attention weights and values before the attention softmax. need_head_weights (bool, optional): return the attention weights for each head. Implies *need_weights*. Default: return the average attention weights over all heads. """ if need_head_weights: need_weights = True tgt_len, bsz, embed_dim = query.size() assert embed_dim == self.embed_dim assert list(query.size()) == [tgt_len, bsz, embed_dim] if incremental_state is not None: saved_state = self._get_input_buffer(incremental_state) if saved_state is not None and "prev_key" in saved_state: # previous time steps are cached - no need to recompute # key and value if they are static if static_kv: assert self.encoder_decoder_attention and not self.self_attention key = value = None else: saved_state = None if self.self_attention: q = self.q_proj(query) k_input = query.permute(1, 2, 0).contiguous() # B * C * T k_input = ( F.linear(k_input, self.compress_k.weight[:, 0:tgt_len]) .permute(2, 0, 1) .contiguous() ) k = self.k_proj(k_input) v_input = query.permute(1, 2, 0).contiguous() # B * C * T if self.shared_kv_compressed == 0: v_input = ( F.linear(v_input, self.compress_v.weight[:, 0:tgt_len]) .permute(2, 0, 1) .contiguous() ) if self.shared_kv_compressed == 1: # use shared kv compressed linear layer v_input = ( F.linear(v_input, self.compress_k.weight[:, 0:tgt_len]) .permute(2, 0, 1) .contiguous() ) v = self.v_proj(v_input) elif self.encoder_decoder_attention: # encoder-decoder attention q = self.q_proj(query) if key is None: assert value is None k = v = None else: k = self.k_proj(key) v = self.v_proj(key) else: assert key is not None and value is not None q = self.q_proj(query) k = self.k_proj(key) v = self.v_proj(value) q *= self.scaling if self.bias_k is not None: assert self.bias_v is not None k = torch.cat([k, self.bias_k.repeat(1, bsz, 1)]) v = torch.cat([v, self.bias_v.repeat(1, bsz, 1)]) if attn_mask is not None: attn_mask = torch.cat( [attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1 ) if key_padding_mask is not None: key_padding_mask = torch.cat( [ key_padding_mask, key_padding_mask.new_zeros(key_padding_mask.size(0), 1), ], dim=1, ) q = ( q.contiguous() .view(tgt_len, bsz * self.num_heads, self.head_dim) .transpose(0, 1) ) if k is not None: k = ( k.contiguous() .view(-1, bsz * self.num_heads, self.head_dim) .transpose(0, 1) ) if v is not None: v = ( v.contiguous() .view(-1, bsz * self.num_heads, self.head_dim) .transpose(0, 1) ) if saved_state is not None: # saved states are stored with shape (bsz, num_heads, seq_len, head_dim) if "prev_key" in saved_state: _prev_key = saved_state["prev_key"] assert _prev_key is not None prev_key = _prev_key.view(bsz * self.num_heads, -1, self.head_dim) if static_kv: k = prev_key else: assert k is not None k = torch.cat([prev_key, k], dim=1) if "prev_value" in saved_state: _prev_value = saved_state["prev_value"] assert _prev_value is not None prev_value = _prev_value.view(bsz * self.num_heads, -1, self.head_dim) if static_kv: v = prev_value else: assert v is not None v = torch.cat([prev_value, v], dim=1) prev_key_padding_mask: Optional[Tensor] = None if "prev_key_padding_mask" in saved_state: prev_key_padding_mask = saved_state["prev_key_padding_mask"] assert k is not None and v is not None key_padding_mask = MultiheadLinearAttention._append_prev_key_padding_mask( key_padding_mask=key_padding_mask, prev_key_padding_mask=prev_key_padding_mask, batch_size=bsz, src_len=k.size(1), static_kv=static_kv, ) saved_state["prev_key"] = k.view(bsz, self.num_heads, -1, self.head_dim) saved_state["prev_value"] = v.view(bsz, self.num_heads, -1, self.head_dim) saved_state["prev_key_padding_mask"] = key_padding_mask # In this branch incremental_state is never None assert incremental_state is not None incremental_state = self._set_input_buffer(incremental_state, saved_state) assert k is not None src_len = k.size(1) if self.add_zero_attn: assert v is not None src_len += 1 k = torch.cat([k, k.new_zeros((k.size(0), 1) + k.size()[2:])], dim=1) v = torch.cat([v, v.new_zeros((v.size(0), 1) + v.size()[2:])], dim=1) if attn_mask is not None: attn_mask = torch.cat( [attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1 ) attn_weights = torch.bmm(q, k.transpose(1, 2)) attn_weights = MultiheadLinearAttention.apply_sparse_mask( attn_weights, tgt_len, src_len, bsz ) assert list(attn_weights.size()) == [bsz * self.num_heads, tgt_len, src_len] if attn_mask is not None: attn_mask = attn_mask.unsqueeze(0) if self.onnx_trace: attn_mask = attn_mask.repeat(attn_weights.size(0), 1, 1) attn_weights += attn_mask if before_softmax: return attn_weights, v attn_weights_float = utils.softmax( attn_weights, dim=-1, onnx_trace=self.onnx_trace ) attn_weights = attn_weights_float.type_as(attn_weights) attn_probs = F.dropout( attn_weights, p=self.dropout, training=self.training, ) assert v is not None attn = torch.bmm(attn_probs, v) assert list(attn.size()) == [bsz * self.num_heads, tgt_len, self.head_dim] if self.onnx_trace and attn.size(1) == 1: # when ONNX tracing a single decoder step (sequence length == 1) # the transpose is a no-op copy before view, thus unnecessary attn = attn.contiguous().view(tgt_len, bsz, embed_dim) else: attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim) attn = self.out_proj(attn) attn_weights: Optional[Tensor] = None if need_weights: attn_weights = attn_weights_float.view( bsz, self.num_heads, tgt_len, src_len ).transpose(1, 0) if not need_head_weights: # average attention weights over heads attn_weights = attn_weights.mean(dim=0) return attn, attn_weights @staticmethod def _append_prev_key_padding_mask( key_padding_mask: Optional[Tensor], prev_key_padding_mask: Optional[Tensor], batch_size: int, src_len: int, static_kv: bool, ) -> Optional[Tensor]: # saved key padding masks have shape (bsz, seq_len) if prev_key_padding_mask is not None and static_kv: new_key_padding_mask = prev_key_padding_mask elif prev_key_padding_mask is not None and key_padding_mask is not None: new_key_padding_mask = torch.cat( [prev_key_padding_mask.float(), key_padding_mask.float()], dim=1 ) # During incremental decoding, as the padding token enters and # leaves the frame, there will be a time when prev or current # is None elif prev_key_padding_mask is not None: filler = torch.zeros( (batch_size, src_len - prev_key_padding_mask.size(1)), device=prev_key_padding_mask.device, ) new_key_padding_mask = torch.cat( [prev_key_padding_mask.float(), filler.float()], dim=1 ) elif key_padding_mask is not None: filler = torch.zeros( (batch_size, src_len - key_padding_mask.size(1)), device=key_padding_mask.device, ) new_key_padding_mask = torch.cat( [filler.float(), key_padding_mask.float()], dim=1 ) else: new_key_padding_mask = prev_key_padding_mask return new_key_padding_mask @torch.jit.export def reorder_incremental_state( self, incremental_state: Dict[str, Dict[str, Optional[Tensor]]], new_order: Tensor, ): """Reorder buffered internal state (for incremental generation).""" input_buffer = self._get_input_buffer(incremental_state) if input_buffer is not None: for k in input_buffer.keys(): input_buffer_k = input_buffer[k] if input_buffer_k is not None: if self.encoder_decoder_attention and input_buffer_k.size( 0 ) == new_order.size(0): break input_buffer[k] = input_buffer_k.index_select(0, new_order) incremental_state = self._set_input_buffer(incremental_state, input_buffer) return incremental_state def _get_input_buffer( self, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] ) -> Dict[str, Optional[Tensor]]: result = self.get_incremental_state(incremental_state, "attn_state") if result is not None: return result else: empty_result: Dict[str, Optional[Tensor]] = {} return empty_result def _set_input_buffer( self, incremental_state: Dict[str, Dict[str, Optional[Tensor]]], buffer: Dict[str, Optional[Tensor]], ): return self.set_incremental_state(incremental_state, "attn_state", buffer) def apply_sparse_mask(attn_weights, tgt_len: int, src_len: int, bsz: int): return attn_weights def upgrade_state_dict_named(self, state_dict, name): prefix = name + "." if name != "" else "" items_to_add = {} keys_to_remove = [] for k in state_dict.keys(): if k.endswith(prefix + "in_proj_weight"): # in_proj_weight used to be q + k + v with same dimensions dim = int(state_dict[k].shape[0] / 3) items_to_add[prefix + "q_proj.weight"] = state_dict[k][:dim] items_to_add[prefix + "k_proj.weight"] = state_dict[k][dim : 2 * dim] items_to_add[prefix + "v_proj.weight"] = state_dict[k][2 * dim :] keys_to_remove.append(k) k_bias = prefix + "in_proj_bias" if k_bias in state_dict.keys(): dim = int(state_dict[k].shape[0] / 3) items_to_add[prefix + "q_proj.bias"] = state_dict[k_bias][:dim] items_to_add[prefix + "k_proj.bias"] = state_dict[k_bias][ dim : 2 * dim ] items_to_add[prefix + "v_proj.bias"] = state_dict[k_bias][2 * dim :] keys_to_remove.append(prefix + "in_proj_bias") for k in keys_to_remove: del state_dict[k] for key, value in items_to_add.items(): state_dict[key] = value
19,151
38.73444
98
py
sign-topic
sign-topic-main/examples/linformer/linformer_src/modules/linformer_sentence_encoder.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math import torch.nn as nn from fairseq.models.transformer import TransformerEncoder from .linformer_sentence_encoder_layer import LinformerTransformerEncoderLayer class LinformerTransformerEncoder(TransformerEncoder): """ Implementation for a Bi-directional Linformer based Sentence Encoder used in BERT/XLM style pre-trained models. This first computes the token embedding using the token embedding matrix, position embeddings (if specified) and segment embeddings (if specified). After applying the specified number of LinformerEncoderLayers, it outputs all the internal states of the encoder as well as the final representation associated with the first token (usually CLS token). Input: - tokens: B x T matrix representing sentences - segment_labels: B x T matrix representing segment label for tokens Output: - a tuple of the following: - a list of internal model states used to compute the predictions where each tensor has shape T x B x C - sentence representation associated with first input token in format B x C. """ def __init__(self, args, dictionary, embed_tokens): self.compress_layer = None super().__init__(args, dictionary, embed_tokens) def build_encoder_layer(self, args): if self.args.shared_layer_kv_compressed == 1 and self.compress_layer is None: compress_layer = nn.Linear( self.args.max_positions, self.args.max_positions // self.args.compressed, ) # intialize parameters for compressed layer nn.init.xavier_uniform_(compress_layer.weight, gain=1 / math.sqrt(2)) if self.args.freeze_compress == 1: compress_layer.weight.requires_grad = False self.compress_layer = compress_layer return LinformerTransformerEncoderLayer(args, self.compress_layer)
2,151
38.127273
85
py
sign-topic
sign-topic-main/examples/linformer/linformer_src/modules/linformer_sentence_encoder_layer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch from fairseq import utils from fairseq.modules import TransformerEncoderLayer from .multihead_linear_attention import MultiheadLinearAttention class LinformerTransformerEncoderLayer(TransformerEncoderLayer): """ Implements a Linformer Encoder Layer used in BERT/XLM style pre-trained models. """ def __init__(self, args, shared_compress_layer): # wrap in a list so it's not automatically registered by PyTorch self.shared_compress_layer = [shared_compress_layer] super().__init__(args) self.register_buffer("version", torch.tensor(2)) def build_self_attention(self, embed_dim, args): return MultiheadLinearAttention( embed_dim, args.encoder_attention_heads, dropout=args.dropout, self_attention=True, q_noise=args.quant_noise_pq, qn_block_size=args.quant_noise_pq_block_size, compressed=args.compressed, max_seq_len=args.max_positions, shared_kv_compressed=args.shared_kv_compressed, shared_compress_layer=self.shared_compress_layer[0], freeze_compress=args.freeze_compress, ) def upgrade_state_dict_named(self, state_dict, name): super().upgrade_state_dict_named(state_dict, name) prefix = name + "." if name != "" else "" # some old checkpoints had weight sharing implemented incorrectly # (note: this was correct in the original paper code) if utils.item(state_dict.get(f"{prefix}version", torch.tensor(1))) < 2: state_dict[f"{prefix}version"] = torch.tensor(1) # check compression layer sharing if f"{prefix}shared_compress_layer.weight" in state_dict: # reinitialize block without sharing compression layer to match # old behavior self.shared_compress_layer = [ torch.nn.Linear( self.shared_compress_layer[0].weight.size(1), self.shared_compress_layer[0].weight.size(0), ) ] self.self_attn = self.build_self_attention(self.embed_dim, self.args) # delete shared_compress_layer, since it's already copied to # self_attn.compress_k.weight del state_dict[f"{prefix}shared_compress_layer.weight"] if f"{prefix}shared_compress_layer.bias" in state_dict: del state_dict[f"{prefix}shared_compress_layer.bias"]
2,743
40.575758
85
py
sign-topic
sign-topic-main/examples/linformer/linformer_src/models/linformer_roberta.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Linformer: Self-Attention with Linear Complexity """ import logging import torch from fairseq import utils from fairseq.models import register_model, register_model_architecture from fairseq.models.roberta import ( init_bert_params, roberta_base_architecture, roberta_large_architecture, RobertaEncoder, RobertaModel, ) from fairseq.utils import safe_hasattr from ..modules.linformer_sentence_encoder import LinformerTransformerEncoder logger = logging.getLogger(__name__) @register_model("linformer_roberta") class LinformerModel(RobertaModel): @staticmethod def add_args(parser): RobertaModel.add_args(parser) # add args for Linformer parser.add_argument( "--compressed", type=int, help="compressed ratio of sequence length" ) parser.add_argument( "--shared-kv-compressed", type=int, help="share compressed matrix between k and v, in each layer", ) parser.add_argument( "--shared-layer-kv-compressed", type=int, help="share compressed matrix between k and v and across all layers", ) parser.add_argument( "--freeze-compress", type=int, help="freeze the parameters in compressed layer", ) @classmethod def build_model(cls, args, task): """Build a new model instance.""" # make sure all arguments are present base_architecture(args) if not safe_hasattr(args, "max_positions"): args.max_positions = args.tokens_per_sample encoder = LinformerEncoder(args, task.source_dictionary) return cls(args, encoder) class LinformerEncoder(RobertaEncoder): """Linformer encoder.""" def __init__(self, args, dictionary): super().__init__(args, dictionary) self.register_buffer("version", torch.tensor(2)) def build_encoder(self, args, dictionary, embed_tokens): encoder = LinformerTransformerEncoder(args, dictionary, embed_tokens) encoder.apply(init_bert_params) return encoder def upgrade_state_dict_named(self, state_dict, name): super().upgrade_state_dict_named(state_dict, name) prefix = name + "." if name != "" else "" # some old checkpoints had weight sharing implemented incorrectly # (note: this was correct in the original paper code) if utils.item(state_dict.get(f"{prefix}version", torch.tensor(1))) < 2: state_dict[f"{prefix}version"] = torch.tensor(1) # check if input embeddings and output embeddings were tied if not torch.allclose( state_dict[f"{prefix}sentence_encoder.embed_tokens.weight"], state_dict[f"{prefix}lm_head.weight"], ): # they weren't tied, re-init the LM head without weight sharing self.lm_head = self.build_lm_head( embed_dim=self.args.encoder_embed_dim, output_dim=len(self.dictionary), activation_fn=self.args.activation_fn, weight=None, # don't share weights ) @register_model_architecture("linformer_roberta", "linformer_roberta") def base_architecture(args): args.compressed = getattr(args, "compressed", 4) args.shared_kv_compressed = getattr(args, "shared_kv_compressed", 0) args.shared_layer_kv_compressed = getattr(args, "shared_layer_kv_compressed", 0) args.freeze_compress = getattr(args, "freeze_compress", 0) roberta_base_architecture(args) @register_model_architecture("linformer_roberta", "linformer_roberta_base") def linformer_roberta_base_architecture(args): base_architecture(args) @register_model_architecture("linformer_roberta", "linformer_roberta_large") def linformer_roberta_large_architecture(args): roberta_large_architecture(args) base_architecture(args)
4,143
33.247934
84
py
sign-topic
sign-topic-main/examples/wav2vec/vq-wav2vec_featurize.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Helper script to pre-compute embeddings for a flashlight (previously called wav2letter++) dataset """ import argparse import glob import os import os.path as osp import pprint import soundfile as sf import torch import fairseq from torch import nn from torch.utils.data import DataLoader try: import tqdm except: print("Install tqdm to use --log-format=tqdm") class FilesDataset: def __init__(self, files, labels): self.files = files if labels and osp.exists(labels): with open(labels, "r") as lbl_f: self.labels = [line.rstrip() for line in lbl_f] else: self.labels = labels def __len__(self): return len(self.files) def __getitem__(self, index): fname = self.files[index] wav, sr = sf.read(fname) assert sr == 16000 wav = torch.from_numpy(wav).float() lbls = None if self.labels: if isinstance(self.labels, str): lbl_file = osp.splitext(fname)[0] + "." + self.labels with open(lbl_file, "r") as lblf: lbls = lblf.readline() assert lbls is not None else: lbls = self.labels[index] return wav, lbls def collate(self, batch): return batch class ArgTypes: @staticmethod def existing_path(arg): arg = str(arg) assert osp.exists(arg), f"File {arg} does not exist" return arg @staticmethod def mkdir(arg): arg = str(arg) os.makedirs(arg, exist_ok=True) return arg class DatasetWriter: def __init__(self): self.args = self.load_config() pprint.pprint(self.args.__dict__) self.model = self.load_model() def __getattr__(self, attr): return getattr(self.args, attr) def read_manifest(self, fname): with open(fname, "r") as fp: lines = fp.read().split("\n") root = lines.pop(0).strip() fnames = [ osp.join(root, line.split("\t")[0]) for line in lines if len(line) > 0 ] return fnames def process_splits(self): if self.args.shard is not None or self.args.num_shards is not None: assert self.args.shard is not None and self.args.num_shards is not None for split in self.splits: print(split) if self.extension == "tsv": datadir = osp.join(self.data_dir, f"{split}.{self.extension}") print("Reading manifest file: ", datadir) files = self.read_manifest(datadir) else: datadir = osp.join(self.data_dir, split, f"**/*.{self.extension}") files = glob.glob(datadir, recursive=True) assert len(files) > 0 if self.args.shard is not None: files = files[self.args.shard :: self.args.num_shards] lbls = [] with open(self.data_file(split), "w") as srcf: for line, lbl in self.iterate(files): print(line, file=srcf) if self.args.labels: lbls.append(lbl + "\n") if self.args.labels: assert all(a is not None for a in lbls) with open(self.lbl_file(split), "w") as lblf: lblf.writelines(lbls) def iterate(self, files): data = self.load_data(files) for samples in tqdm.tqdm(data, total=len(files) // 32): for wav, lbl in samples: x = wav.unsqueeze(0).float().cuda() div = 1 while x.size(-1) // div > self.args.max_size: div += 1 xs = x.chunk(div, dim=-1) result = [] for x in xs: torch.cuda.empty_cache() x = self.model.feature_extractor(x) if self.quantize_location == "encoder": with torch.no_grad(): _, idx = self.model.vector_quantizer.forward_idx(x) idx = idx.squeeze(0).cpu() else: with torch.no_grad(): z = self.model.feature_aggregator(x) _, idx = self.model.vector_quantizer.forward_idx(z) idx = idx.squeeze(0).cpu() result.append(idx) idx = torch.cat(result, dim=0) yield " ".join("-".join(map(str, a.tolist())) for a in idx), lbl def lbl_file(self, name): shard_part = "" if self.args.shard is None else f".{self.args.shard}" return osp.join(self.output_dir, f"{name}.lbl{shard_part}") def data_file(self, name): shard_part = "" if self.args.shard is None else f".{self.args.shard}" return osp.join(self.output_dir, f"{name}.src{shard_part}") def var_file(self): return osp.join(self.output_dir, f"vars.pt") def load_config(self): parser = argparse.ArgumentParser("Vector Quantized wav2vec features") # Model Arguments parser.add_argument("--checkpoint", type=ArgTypes.existing_path, required=True) parser.add_argument("--data-parallel", action="store_true") # Output Arguments parser.add_argument("--output-dir", type=ArgTypes.mkdir, required=True) # Data Arguments parser.add_argument("--data-dir", type=ArgTypes.existing_path, required=True) parser.add_argument("--splits", type=str, nargs="+", required=True) parser.add_argument("--extension", type=str, required=True) parser.add_argument("--labels", type=str, required=False) parser.add_argument("--shard", type=int, default=None) parser.add_argument("--num-shards", type=int, default=None) parser.add_argument("--max-size", type=int, default=1300000) # Logger Arguments parser.add_argument( "--log-format", type=str, choices=["none", "simple", "tqdm"] ) return parser.parse_args() def load_data(self, fnames): dataset = FilesDataset(fnames, self.args.labels) loader = DataLoader( dataset, batch_size=32, collate_fn=dataset.collate, num_workers=8 ) return loader def load_model(self): model, cfg, task = fairseq.checkpoint_utils.load_model_ensemble_and_task([self.checkpoint]) model = model[0] self.quantize_location = getattr(cfg.model, "vq", "encoder") model.eval().float() model.cuda() if self.data_parallel: model = nn.DataParallel(model) return model def __call__(self): self.process_splits() if hasattr(self.model.feature_extractor, "vars") and ( self.args.shard is None or self.args.shard == 0 ): vars = ( self.model.feature_extractor.vars.view( self.model.feature_extractor.banks, self.model.feature_extractor.num_vars, -1, ) .cpu() .detach() ) print("writing learned latent variable embeddings: ", vars.shape) torch.save(vars, self.var_file()) if __name__ == "__main__": write_data = DatasetWriter() write_data() print("Done.")
7,680
29.601594
99
py
sign-topic
sign-topic-main/examples/wav2vec/wav2vec_featurize.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Helper script to pre-compute embeddings for a flashlight (previously called wav2letter++) dataset """ import argparse import glob import os from shutil import copy import h5py import numpy as np import soundfile as sf import torch import tqdm import fairseq from torch import nn def read_audio(fname): """ Load an audio file and return PCM along with the sample rate """ wav, sr = sf.read(fname) assert sr == 16e3 return wav, 16e3 class PretrainedWav2VecModel(nn.Module): def __init__(self, fname): super().__init__() model, cfg, task = fairseq.checkpoint_utils.load_model_ensemble_and_task([fname]) model = model[0] model.eval() self.model = model def forward(self, x): with torch.no_grad(): z = self.model.feature_extractor(x) if isinstance(z, tuple): z = z[0] c = self.model.feature_aggregator(z) return z, c class EmbeddingWriterConfig(argparse.ArgumentParser): def __init__(self): super().__init__("Pre-compute embeddings for flashlight datasets") kwargs = {"action": "store", "type": str, "required": True} self.add_argument("--input", "-i", help="Input Directory", **kwargs) self.add_argument("--output", "-o", help="Output Directory", **kwargs) self.add_argument("--model", help="Path to model checkpoint", **kwargs) self.add_argument("--split", help="Dataset Splits", nargs="+", **kwargs) self.add_argument( "--ext", default="wav", required=False, help="Audio file extension" ) self.add_argument( "--no-copy-labels", action="store_true", help="Do not copy label files. Useful for large datasets, use --targetdir in flashlight then.", ) self.add_argument( "--use-feat", action="store_true", help="Use the feature vector ('z') instead of context vector ('c') for features", ) self.add_argument("--gpu", help="GPU to use", default=0, type=int) class Prediction: """ Lightweight wrapper around a fairspeech embedding model """ def __init__(self, fname, gpu=0): self.gpu = gpu self.model = PretrainedWav2VecModel(fname).cuda(gpu) def __call__(self, x): x = torch.from_numpy(x).float().cuda(self.gpu) with torch.no_grad(): z, c = self.model(x.unsqueeze(0)) return z.squeeze(0).cpu().numpy(), c.squeeze(0).cpu().numpy() class H5Writer: """ Write features as hdf5 file in flashlight compatible format """ def __init__(self, fname): self.fname = fname os.makedirs(os.path.dirname(self.fname), exist_ok=True) def write(self, data): channel, T = data.shape with h5py.File(self.fname, "w") as out_ds: data = data.T.flatten() out_ds["features"] = data out_ds["info"] = np.array([16e3 // 160, T, channel]) class EmbeddingDatasetWriter(object): """Given a model and a flashlight dataset, pre-compute and store embeddings Args: input_root, str : Path to the flashlight dataset output_root, str : Desired output directory. Will be created if non-existent split, str : Dataset split """ def __init__( self, input_root, output_root, split, model_fname, extension="wav", gpu=0, verbose=False, use_feat=False, ): assert os.path.exists(model_fname) self.model_fname = model_fname self.model = Prediction(self.model_fname, gpu) self.input_root = input_root self.output_root = output_root self.split = split self.verbose = verbose self.extension = extension self.use_feat = use_feat assert os.path.exists(self.input_path), "Input path '{}' does not exist".format( self.input_path ) def _progress(self, iterable, **kwargs): if self.verbose: return tqdm.tqdm(iterable, **kwargs) return iterable def require_output_path(self, fname=None): path = self.get_output_path(fname) os.makedirs(path, exist_ok=True) @property def input_path(self): return self.get_input_path() @property def output_path(self): return self.get_output_path() def get_input_path(self, fname=None): if fname is None: return os.path.join(self.input_root, self.split) return os.path.join(self.get_input_path(), fname) def get_output_path(self, fname=None): if fname is None: return os.path.join(self.output_root, self.split) return os.path.join(self.get_output_path(), fname) def copy_labels(self): self.require_output_path() labels = list( filter( lambda x: self.extension not in x, glob.glob(self.get_input_path("*")) ) ) for fname in tqdm.tqdm(labels): copy(fname, self.output_path) @property def input_fnames(self): return sorted(glob.glob(self.get_input_path("*.{}".format(self.extension)))) def __len__(self): return len(self.input_fnames) def write_features(self): paths = self.input_fnames fnames_context = map( lambda x: os.path.join( self.output_path, x.replace("." + self.extension, ".h5context") ), map(os.path.basename, paths), ) for name, target_fname in self._progress( zip(paths, fnames_context), total=len(self) ): wav, sr = read_audio(name) z, c = self.model(wav) feat = z if self.use_feat else c writer = H5Writer(target_fname) writer.write(feat) def __repr__(self): return "EmbeddingDatasetWriter ({n_files} files)\n\tinput:\t{input_root}\n\toutput:\t{output_root}\n\tsplit:\t{split})".format( n_files=len(self), **self.__dict__ ) if __name__ == "__main__": args = EmbeddingWriterConfig().parse_args() for split in args.split: writer = EmbeddingDatasetWriter( input_root=args.input, output_root=args.output, split=split, model_fname=args.model, gpu=args.gpu, extension=args.ext, use_feat=args.use_feat, ) print(writer) writer.require_output_path() print("Writing Features...") writer.write_features() print("Done.") if not args.no_copy_labels: print("Copying label data...") writer.copy_labels() print("Done.")
7,020
27.084
135
py
sign-topic
sign-topic-main/examples/wav2vec/unsupervised/w2vu_generate.py
#!/usr/bin/env python3 -u # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Run inference for pre-processed data with a trained model. """ import ast from collections import namedtuple from dataclasses import dataclass, field from enum import Enum, auto import hydra from hydra.core.config_store import ConfigStore import logging import math import os from omegaconf import OmegaConf from typing import Optional import sys import editdistance import torch from hydra.core.hydra_config import HydraConfig from fairseq import checkpoint_utils, progress_bar, tasks, utils from fairseq.data.data_utils import post_process from fairseq.dataclass.configs import FairseqDataclass, FairseqConfig from fairseq.logging.meters import StopwatchMeter from omegaconf import open_dict from examples.speech_recognition.kaldi.kaldi_decoder import KaldiDecoderConfig logging.root.setLevel(logging.INFO) logging.basicConfig(stream=sys.stdout, level=logging.INFO) logger = logging.getLogger(__name__) class DecoderType(Enum): VITERBI = auto() KENLM = auto() FAIRSEQ = auto() KALDI = auto() @dataclass class UnsupGenerateConfig(FairseqDataclass): fairseq: FairseqConfig = FairseqConfig() lm_weight: float = field( default=2.0, metadata={"help": "language model weight"}, ) w2l_decoder: DecoderType = field( default=DecoderType.VITERBI, metadata={"help": "type of decoder to use"}, ) kaldi_decoder_config: Optional[KaldiDecoderConfig] = None lexicon: Optional[str] = field( default=None, metadata={ "help": "path to lexicon. This is also used to 'phonemize' for unsupvised param tuning" }, ) lm_model: Optional[str] = field( default=None, metadata={"help": "path to language model (kenlm or fairseq)"}, ) unit_lm: bool = field( default=False, metadata={"help": "whether to use unit lm"}, ) beam_threshold: float = field( default=50.0, metadata={"help": "beam score threshold"}, ) beam_size_token: float = field( default=100.0, metadata={"help": "max tokens per beam"}, ) beam: int = field( default=5, metadata={"help": "decoder beam size"}, ) nbest: int = field( default=1, metadata={"help": "number of results to return"}, ) word_score: float = field( default=1.0, metadata={"help": "word score to add at end of word"}, ) unk_weight: float = field( default=-math.inf, metadata={"help": "unknown token weight"}, ) sil_weight: float = field( default=0.0, metadata={"help": "silence token weight"}, ) targets: Optional[str] = field( default=None, metadata={"help": "extension of ground truth labels to compute UER"}, ) results_path: Optional[str] = field( default=None, metadata={"help": "where to store results"}, ) post_process: Optional[str] = field( default=None, metadata={"help": "how to post process results"}, ) vocab_usage_power: float = field( default=2, metadata={"help": "for unsupervised param tuning"}, ) viterbi_transcript: Optional[str] = field( default=None, metadata={"help": "for unsupervised param tuning"}, ) min_lm_ppl: float = field( default=0, metadata={"help": "for unsupervised param tuning"}, ) min_vt_uer: float = field( default=0, metadata={"help": "for unsupervised param tuning"}, ) blank_weight: float = field( default=0, metadata={"help": "value to add or set for blank emission"}, ) blank_mode: str = field( default="set", metadata={ "help": "can be add or set, how to modify blank emission with blank weight" }, ) sil_is_blank: bool = field( default=False, metadata={"help": "if true, <SIL> token is same as blank token"}, ) unsupervised_tuning: bool = field( default=False, metadata={ "help": "if true, returns a score based on unsupervised param selection metric instead of UER" }, ) is_ax: bool = field( default=False, metadata={ "help": "if true, assumes we are using ax for tuning and returns a tuple for ax to consume" }, ) def get_dataset_itr(cfg, task): return task.get_batch_iterator( dataset=task.dataset(cfg.fairseq.dataset.gen_subset), max_tokens=cfg.fairseq.dataset.max_tokens, max_sentences=cfg.fairseq.dataset.batch_size, max_positions=(sys.maxsize, sys.maxsize), ignore_invalid_inputs=cfg.fairseq.dataset.skip_invalid_size_inputs_valid_test, required_batch_size_multiple=cfg.fairseq.dataset.required_batch_size_multiple, num_shards=cfg.fairseq.dataset.num_shards, shard_id=cfg.fairseq.dataset.shard_id, num_workers=cfg.fairseq.dataset.num_workers, data_buffer_size=cfg.fairseq.dataset.data_buffer_size, ).next_epoch_itr(shuffle=False) def process_predictions( cfg: UnsupGenerateConfig, hypos, tgt_dict, target_tokens, res_files, ): retval = [] word_preds = [] transcriptions = [] dec_scores = [] for i, hypo in enumerate(hypos[: min(len(hypos), cfg.nbest)]): if torch.is_tensor(hypo["tokens"]): tokens = hypo["tokens"].int().cpu() tokens = tokens[tokens >= tgt_dict.nspecial] hyp_pieces = tgt_dict.string(tokens) else: hyp_pieces = " ".join(hypo["tokens"]) if "words" in hypo and len(hypo["words"]) > 0: hyp_words = " ".join(hypo["words"]) else: hyp_words = post_process(hyp_pieces, cfg.post_process) to_write = {} if res_files is not None: to_write[res_files["hypo.units"]] = hyp_pieces to_write[res_files["hypo.words"]] = hyp_words tgt_words = "" if target_tokens is not None: if isinstance(target_tokens, str): tgt_pieces = tgt_words = target_tokens else: tgt_pieces = tgt_dict.string(target_tokens) tgt_words = post_process(tgt_pieces, cfg.post_process) if res_files is not None: to_write[res_files["ref.units"]] = tgt_pieces to_write[res_files["ref.words"]] = tgt_words if not cfg.fairseq.common_eval.quiet: logger.info(f"HYPO {i}:" + hyp_words) if tgt_words: logger.info("TARGET:" + tgt_words) if "am_score" in hypo and "lm_score" in hypo: logger.info( f"DECODER AM SCORE: {hypo['am_score']}, DECODER LM SCORE: {hypo['lm_score']}, DECODER SCORE: {hypo['score']}" ) elif "score" in hypo: logger.info(f"DECODER SCORE: {hypo['score']}") logger.info("___________________") hyp_words_arr = hyp_words.split() tgt_words_arr = tgt_words.split() retval.append( ( editdistance.eval(hyp_words_arr, tgt_words_arr), len(hyp_words_arr), len(tgt_words_arr), hyp_pieces, hyp_words, ) ) word_preds.append(hyp_words_arr) transcriptions.append(to_write) dec_scores.append(-hypo.get("score", 0)) # negate cuz kaldi returns NLL if len(retval) > 1: best = None for r, t in zip(retval, transcriptions): if best is None or r[0] < best[0][0]: best = r, t for dest, tran in best[1].items(): print(tran, file=dest) dest.flush() return best[0] assert len(transcriptions) == 1 for dest, tran in transcriptions[0].items(): print(tran, file=dest) return retval[0] def prepare_result_files(cfg: UnsupGenerateConfig): def get_res_file(file_prefix): if cfg.fairseq.dataset.num_shards > 1: file_prefix = f"{cfg.fairseq.dataset.shard_id}_{file_prefix}" path = os.path.join( cfg.results_path, "{}{}.txt".format( cfg.fairseq.dataset.gen_subset, file_prefix, ), ) return open(path, "w", buffering=1) if not cfg.results_path: return None return { "hypo.words": get_res_file(""), "hypo.units": get_res_file("_units"), "ref.words": get_res_file("_ref"), "ref.units": get_res_file("_ref_units"), "hypo.nbest.words": get_res_file("_nbest_words"), } def optimize_models(cfg: UnsupGenerateConfig, use_cuda, models): """Optimize ensemble for generation""" for model in models: model.eval() if cfg.fairseq.common.fp16: model.half() if use_cuda: model.cuda() GenResult = namedtuple( "GenResult", [ "count", "errs_t", "gen_timer", "lengths_hyp_unit_t", "lengths_hyp_t", "lengths_t", "lm_score_t", "num_feats", "num_sentences", "num_symbols", "vt_err_t", "vt_length_t", ], ) def generate(cfg: UnsupGenerateConfig, models, saved_cfg, use_cuda): task = tasks.setup_task(cfg.fairseq.task) saved_cfg.task.labels = cfg.fairseq.task.labels task.load_dataset(cfg.fairseq.dataset.gen_subset, task_cfg=saved_cfg.task) # Set dictionary tgt_dict = task.target_dictionary logger.info( "| {} {} {} examples".format( cfg.fairseq.task.data, cfg.fairseq.dataset.gen_subset, len(task.dataset(cfg.fairseq.dataset.gen_subset)), ) ) # Load dataset (possibly sharded) itr = get_dataset_itr(cfg, task) # Initialize generator gen_timer = StopwatchMeter() def build_generator(cfg: UnsupGenerateConfig): w2l_decoder = cfg.w2l_decoder if w2l_decoder == DecoderType.VITERBI: from examples.speech_recognition.w2l_decoder import W2lViterbiDecoder return W2lViterbiDecoder(cfg, task.target_dictionary) elif w2l_decoder == DecoderType.KENLM: from examples.speech_recognition.w2l_decoder import W2lKenLMDecoder return W2lKenLMDecoder(cfg, task.target_dictionary) elif w2l_decoder == DecoderType.FAIRSEQ: from examples.speech_recognition.w2l_decoder import W2lFairseqLMDecoder return W2lFairseqLMDecoder(cfg, task.target_dictionary) elif w2l_decoder == DecoderType.KALDI: from examples.speech_recognition.kaldi.kaldi_decoder import KaldiDecoder assert cfg.kaldi_decoder_config is not None return KaldiDecoder( cfg.kaldi_decoder_config, cfg.beam, ) else: raise NotImplementedError( "only wav2letter decoders with (viterbi, kenlm, fairseqlm) options are supported at the moment but found " + str(w2l_decoder) ) generator = build_generator(cfg) kenlm = None fairseq_lm = None if cfg.lm_model is not None: import kenlm kenlm = kenlm.Model(cfg.lm_model) num_sentences = 0 if cfg.results_path is not None and not os.path.exists(cfg.results_path): os.makedirs(cfg.results_path) res_files = prepare_result_files(cfg) errs_t = 0 lengths_hyp_t = 0 lengths_hyp_unit_t = 0 lengths_t = 0 count = 0 num_feats = 0 all_hyp_pieces = [] all_hyp_words = [] num_symbols = ( len([s for s in tgt_dict.symbols if not s.startswith("madeup")]) - tgt_dict.nspecial ) targets = None if cfg.targets is not None: tgt_path = os.path.join( cfg.fairseq.task.data, cfg.fairseq.dataset.gen_subset + "." + cfg.targets ) if os.path.exists(tgt_path): with open(tgt_path, "r") as f: targets = f.read().splitlines() viterbi_transcript = None if cfg.viterbi_transcript is not None and len(cfg.viterbi_transcript) > 0: logger.info(f"loading viterbi transcript from {cfg.viterbi_transcript}") with open(cfg.viterbi_transcript, "r") as vf: viterbi_transcript = vf.readlines() viterbi_transcript = [v.rstrip().split() for v in viterbi_transcript] gen_timer.start() start = 0 end = len(itr) hypo_futures = None if cfg.w2l_decoder == DecoderType.KALDI: logger.info("Extracting features") hypo_futures = [] samples = [] with progress_bar.build_progress_bar(cfg.fairseq.common, itr) as t: for i, sample in enumerate(t): if "net_input" not in sample or i < start or i >= end: continue if "padding_mask" not in sample["net_input"]: sample["net_input"]["padding_mask"] = None hypos, num_feats = gen_hypos( generator, models, num_feats, sample, task, use_cuda ) hypo_futures.append(hypos) samples.append(sample) itr = list(zip(hypo_futures, samples)) start = 0 end = len(itr) logger.info("Finished extracting features") with progress_bar.build_progress_bar(cfg.fairseq.common, itr) as t: for i, sample in enumerate(t): if i < start or i >= end: continue if hypo_futures is not None: hypos, sample = sample hypos = [h.result() for h in hypos] else: if "net_input" not in sample: continue hypos, num_feats = gen_hypos( generator, models, num_feats, sample, task, use_cuda ) for i, sample_id in enumerate(sample["id"].tolist()): if targets is not None: target_tokens = targets[sample_id] elif "target" in sample or "target_label" in sample: toks = ( sample["target"][i, :] if "target_label" not in sample else sample["target_label"][i, :] ) target_tokens = utils.strip_pad(toks, tgt_dict.pad()).int().cpu() else: target_tokens = None # Process top predictions ( errs, length_hyp, length, hyp_pieces, hyp_words, ) = process_predictions( cfg, hypos[i], tgt_dict, target_tokens, res_files, ) errs_t += errs lengths_hyp_t += length_hyp lengths_hyp_unit_t += ( len(hyp_pieces) if len(hyp_pieces) > 0 else len(hyp_words) ) lengths_t += length count += 1 all_hyp_pieces.append(hyp_pieces) all_hyp_words.append(hyp_words) num_sentences += ( sample["nsentences"] if "nsentences" in sample else sample["id"].numel() ) lm_score_sum = 0 if kenlm is not None: if cfg.unit_lm: lm_score_sum = sum(kenlm.score(w) for w in all_hyp_pieces) else: lm_score_sum = sum(kenlm.score(w) for w in all_hyp_words) elif fairseq_lm is not None: lm_score_sum = sum(fairseq_lm.score([h.split() for h in all_hyp_words])[0]) vt_err_t = 0 vt_length_t = 0 if viterbi_transcript is not None: unit_hyps = [] if cfg.targets is not None and cfg.lexicon is not None: lex = {} with open(cfg.lexicon, "r") as lf: for line in lf: items = line.rstrip().split() lex[items[0]] = items[1:] for h in all_hyp_pieces: hyp_ws = [] for w in h.split(): assert w in lex, w hyp_ws.extend(lex[w]) unit_hyps.append(hyp_ws) else: unit_hyps.extend([h.split() for h in all_hyp_words]) vt_err_t = sum( editdistance.eval(vt, h) for vt, h in zip(viterbi_transcript, unit_hyps) ) vt_length_t = sum(len(h) for h in viterbi_transcript) if res_files is not None: for r in res_files.values(): r.close() gen_timer.stop(lengths_hyp_t) return GenResult( count, errs_t, gen_timer, lengths_hyp_unit_t, lengths_hyp_t, lengths_t, lm_score_sum, num_feats, num_sentences, num_symbols, vt_err_t, vt_length_t, ) def gen_hypos(generator, models, num_feats, sample, task, use_cuda): sample = utils.move_to_cuda(sample) if use_cuda else sample if "features" in sample["net_input"]: sample["net_input"]["dense_x_only"] = True num_feats += ( sample["net_input"]["features"].shape[0] * sample["net_input"]["features"].shape[1] ) hypos = task.inference_step(generator, models, sample, None) return hypos, num_feats def main(cfg: UnsupGenerateConfig, model=None): if ( cfg.fairseq.dataset.max_tokens is None and cfg.fairseq.dataset.batch_size is None ): cfg.fairseq.dataset.max_tokens = 1024000 use_cuda = torch.cuda.is_available() and not cfg.fairseq.common.cpu task = tasks.setup_task(cfg.fairseq.task) overrides = ast.literal_eval(cfg.fairseq.common_eval.model_overrides) if cfg.fairseq.task._name == "unpaired_audio_text": overrides["model"] = { "blank_weight": cfg.blank_weight, "blank_mode": cfg.blank_mode, "blank_is_sil": cfg.sil_is_blank, "no_softmax": True, "segmentation": { "type": "NONE", }, } else: overrides["model"] = { "blank_weight": cfg.blank_weight, "blank_mode": cfg.blank_mode, } if model is None: # Load ensemble logger.info("| loading model(s) from {}".format(cfg.fairseq.common_eval.path)) models, saved_cfg = checkpoint_utils.load_model_ensemble( cfg.fairseq.common_eval.path.split("\\"), arg_overrides=overrides, task=task, suffix=cfg.fairseq.checkpoint.checkpoint_suffix, strict=(cfg.fairseq.checkpoint.checkpoint_shard_count == 1), num_shards=cfg.fairseq.checkpoint.checkpoint_shard_count, ) optimize_models(cfg, use_cuda, models) else: models = [model] saved_cfg = cfg.fairseq with open_dict(saved_cfg.task): saved_cfg.task.shuffle = False saved_cfg.task.sort_by_length = False gen_result = generate(cfg, models, saved_cfg, use_cuda) wer = None if gen_result.lengths_t > 0: wer = gen_result.errs_t * 100.0 / gen_result.lengths_t logger.info(f"WER: {wer}") lm_ppl = float("inf") if gen_result.lm_score_t != 0 and gen_result.lengths_hyp_t > 0: hyp_len = gen_result.lengths_hyp_t lm_ppl = math.pow( 10, -gen_result.lm_score_t / (hyp_len + gen_result.num_sentences) ) logger.info(f"LM PPL: {lm_ppl}") logger.info( "| Processed {} sentences ({} tokens) in {:.1f}s ({:.2f}" " sentences/s, {:.2f} tokens/s)".format( gen_result.num_sentences, gen_result.gen_timer.n, gen_result.gen_timer.sum, gen_result.num_sentences / gen_result.gen_timer.sum, 1.0 / gen_result.gen_timer.avg, ) ) vt_diff = None if gen_result.vt_length_t > 0: vt_diff = gen_result.vt_err_t / gen_result.vt_length_t vt_diff = max(cfg.min_vt_uer, vt_diff) lm_ppl = max(cfg.min_lm_ppl, lm_ppl) if not cfg.unsupervised_tuning == 0: weighted_score = wer else: weighted_score = math.log(lm_ppl) * (vt_diff or 1.0) res = ( f"| Generate {cfg.fairseq.dataset.gen_subset} with beam={cfg.beam}, " f"lm_weight={cfg.kaldi_decoder_config.acoustic_scale if cfg.kaldi_decoder_config else cfg.lm_weight}, " f"word_score={cfg.word_score}, sil_weight={cfg.sil_weight}, blank_weight={cfg.blank_weight}, " f"WER: {wer}, LM_PPL: {lm_ppl}, num feats: {gen_result.num_feats}, " f"length: {gen_result.lengths_hyp_t}, UER to viterbi: {(vt_diff or 0) * 100}, score: {weighted_score}" ) logger.info(res) # print(res) return task, weighted_score @hydra.main( config_path=os.path.join("../../..", "fairseq", "config"), config_name="config" ) def hydra_main(cfg): with open_dict(cfg): # make hydra logging work with ddp (see # see https://github.com/facebookresearch/hydra/issues/1126) cfg.job_logging_cfg = OmegaConf.to_container( HydraConfig.get().job_logging, resolve=True ) cfg = OmegaConf.create( OmegaConf.to_container(cfg, resolve=False, enum_to_str=False) ) OmegaConf.set_struct(cfg, True) logger.info(cfg) utils.import_user_module(cfg.fairseq.common) _, score = main(cfg) if cfg.is_ax: return score, None return score def cli_main(): try: from hydra._internal.utils import get_args cfg_name = get_args().config_name or "config" except: logger.warning("Failed to get config name from hydra args") cfg_name = "config" cs = ConfigStore.instance() cs.store(name=cfg_name, node=UnsupGenerateConfig) hydra_main() if __name__ == "__main__": cli_main()
22,210
30.371469
129
py
sign-topic
sign-topic-main/examples/wav2vec/unsupervised/models/wav2vec_u.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from dataclasses import dataclass from enum import Enum, auto import math import numpy as np from typing import Tuple, List, Optional, Dict import torch import torch.nn as nn import torch.nn.functional as F from torch import autograd from fairseq import checkpoint_utils, utils from fairseq.dataclass import FairseqDataclass from fairseq.models import BaseFairseqModel, register_model from fairseq.modules import ( SamePad, TransposeLast, ) class SegmentationType(Enum): NONE = auto() RANDOM = auto() UNIFORM_RANDOM = auto() UNIFORM_RANDOM_JOIN = auto() JOIN = auto() @dataclass class SegmentationConfig(FairseqDataclass): type: SegmentationType = SegmentationType.NONE subsample_rate: float = 0.25 mean_pool: bool = True mean_pool_join: bool = False remove_zeros: bool = False @dataclass class Wav2vec_UConfig(FairseqDataclass): discriminator_kernel: int = 3 discriminator_dilation: int = 1 discriminator_dim: int = 256 discriminator_causal: bool = True discriminator_linear_emb: bool = False discriminator_depth: int = 1 discriminator_max_pool: bool = False discriminator_act_after_linear: bool = False discriminator_dropout: float = 0.0 discriminator_spectral_norm: bool = False discriminator_weight_norm: bool = False generator_kernel: int = 4 generator_dilation: int = 1 generator_stride: int = 1 generator_bias: bool = False generator_dropout: float = 0.0 blank_weight: float = 0 blank_mode: str = "add" blank_is_sil: bool = False no_softmax: bool = False smoothness_weight: float = 0.0 smoothing: float = 0.0 smoothing_one_sided: bool = False gradient_penalty: float = 0.0 probabilistic_grad_penalty_slicing: bool = False code_penalty: float = 0.0 gumbel: bool = False hard_gumbel: bool = True temp: Tuple[float, float, float] = (2, 0.1, 0.99995) input_dim: int = 128 segmentation: SegmentationConfig = SegmentationConfig() class Segmenter(nn.Module): cfg: SegmentationConfig def __init__(self, cfg: SegmentationConfig): super().__init__() self.cfg = cfg self.subsample_rate = cfg.subsample_rate def pre_segment(self, dense_x, dense_padding_mask): return dense_x, dense_padding_mask def logit_segment(self, logits, padding_mask): return logits, padding_mask class RandomSegmenter(Segmenter): def pre_segment(self, dense_x, dense_padding_mask): target_num = math.ceil(dense_x.size(1) * self.subsample_rate) ones = torch.ones(dense_x.shape[:-1], device=dense_x.device) indices, _ = ones.multinomial(target_num).sort(dim=-1) indices_ld = indices.unsqueeze(-1).expand(-1, -1, dense_x.size(-1)) dense_x = dense_x.gather(1, indices_ld) dense_padding_mask = dense_padding_mask.gather(1, index=indices) return dense_x, dense_padding_mask class UniformRandomSegmenter(Segmenter): def pre_segment(self, dense_x, dense_padding_mask): bsz, tsz, fsz = dense_x.shape target_num = math.ceil(tsz * self.subsample_rate) rem = tsz % target_num if rem > 0: dense_x = F.pad(dense_x, [0, 0, 0, target_num - rem]) dense_padding_mask = F.pad( dense_padding_mask, [0, target_num - rem], value=True ) dense_x = dense_x.view(bsz, target_num, -1, fsz) dense_padding_mask = dense_padding_mask.view(bsz, target_num, -1) if self.cfg.mean_pool: dense_x = dense_x.mean(dim=-2) dense_padding_mask = dense_padding_mask.all(dim=-1) else: ones = torch.ones((bsz, dense_x.size(2)), device=dense_x.device) indices = ones.multinomial(1) indices = indices.unsqueeze(-1).expand(-1, target_num, -1) indices_ld = indices.unsqueeze(-1).expand(-1, -1, -1, fsz) dense_x = dense_x.gather(2, indices_ld).reshape(bsz, -1, fsz) dense_padding_mask = dense_padding_mask.gather(2, index=indices).reshape( bsz, -1 ) return dense_x, dense_padding_mask class JoinSegmenter(Segmenter): def logit_segment(self, logits, padding_mask): preds = logits.argmax(dim=-1) if padding_mask.any(): preds[padding_mask] = -1 # mark pad uniques = [] bsz, tsz, csz = logits.shape for p in preds: uniques.append( p.cpu().unique_consecutive(return_inverse=True, return_counts=True) ) new_tsz = max(u[0].numel() for u in uniques) new_logits = logits.new_zeros(bsz, new_tsz, csz) new_pad = padding_mask.new_zeros(bsz, new_tsz) for b in range(bsz): u, idx, c = uniques[b] keep = u != -1 if self.cfg.remove_zeros: keep.logical_and_(u != 0) if self.training and not self.cfg.mean_pool_join: u[0] = 0 u[1:] = c.cumsum(0)[:-1] m = c > 1 r = torch.rand(m.sum()) o = (c[m] * r).long() u[m] += o new_logits[b, : u.numel()] = logits[b, u] else: new_logits[b].index_add_( dim=0, index=idx.to(new_logits.device), source=logits[b] ) new_logits[b, : c.numel()] /= c.unsqueeze(-1).to(new_logits.device) new_sz = keep.sum() if not keep.all(): kept_logits = new_logits[b, : c.numel()][keep] new_logits[b, :new_sz] = kept_logits if new_sz < new_tsz: pad = new_tsz - new_sz new_logits[b, -pad:] = 0 new_pad[b, -pad:] = True return new_logits, new_pad class UniformRandomJoinSegmenter(UniformRandomSegmenter, JoinSegmenter): pass SEGMENT_FACTORY = { SegmentationType.NONE: Segmenter, SegmentationType.RANDOM: RandomSegmenter, SegmentationType.UNIFORM_RANDOM: UniformRandomSegmenter, SegmentationType.UNIFORM_RANDOM_JOIN: UniformRandomJoinSegmenter, SegmentationType.JOIN: JoinSegmenter, } class Discriminator(nn.Module): def __init__(self, dim, cfg: Wav2vec_UConfig): super().__init__() inner_dim = cfg.discriminator_dim kernel = cfg.discriminator_kernel dilation = cfg.discriminator_dilation self.max_pool = cfg.discriminator_max_pool if cfg.discriminator_causal: padding = kernel - 1 else: padding = kernel // 2 def make_conv(in_d, out_d, k, p=0, has_dilation=True): conv = nn.Conv1d( in_d, out_d, kernel_size=k, padding=p, dilation=dilation if has_dilation else 1, ) if cfg.discriminator_spectral_norm: conv = nn.utils.spectral_norm(conv) elif cfg.discriminator_weight_norm: conv = nn.utils.weight_norm(conv) return conv inner_net = [ nn.Sequential( make_conv(inner_dim, inner_dim, kernel, padding), SamePad(kernel_size=kernel, causal=cfg.discriminator_causal), nn.Dropout(cfg.discriminator_dropout), nn.GELU(), ) for _ in range(cfg.discriminator_depth - 1) ] + [ make_conv(inner_dim, 1, kernel, padding, has_dilation=False), SamePad(kernel_size=kernel, causal=cfg.discriminator_causal), ] if cfg.discriminator_linear_emb: emb_net = [make_conv(dim, inner_dim, 1)] else: emb_net = [ make_conv(dim, inner_dim, kernel, padding), SamePad(kernel_size=kernel, causal=cfg.discriminator_causal), ] if cfg.discriminator_act_after_linear: emb_net.append(nn.GELU()) self.net = nn.Sequential( *emb_net, nn.Dropout(cfg.discriminator_dropout), *inner_net, ) def forward(self, x, padding_mask): x = x.transpose(1, 2) # BTC -> BCT x = self.net(x) x = x.transpose(1, 2) x_sz = x.size(1) if padding_mask is not None and padding_mask.any() and padding_mask.dim() > 1: padding_mask = padding_mask[:, : x.size(1)] x[padding_mask] = float("-inf") if self.max_pool else 0 x_sz = x_sz - padding_mask.sum(dim=-1) x = x.squeeze(-1) if self.max_pool: x, _ = x.max(dim=-1) else: x = x.sum(dim=-1) x = x / x_sz return x class Generator(nn.Module): def __init__(self, input_dim, output_dim, cfg: Wav2vec_UConfig): super().__init__() self.cfg = cfg self.output_dim = output_dim self.stride = cfg.generator_stride self.dropout = nn.Dropout(cfg.generator_dropout) padding = cfg.generator_kernel // 2 self.proj = nn.Sequential( TransposeLast(), nn.Conv1d( input_dim, output_dim, kernel_size=cfg.generator_kernel, stride=cfg.generator_stride, dilation=cfg.generator_dilation, padding=padding, bias=cfg.generator_bias, ), TransposeLast(), ) def forward(self, dense_x, tokens, dense_padding_mask): dense_x = self.dropout(dense_x) dense_x = self.proj(dense_x) if self.stride > 1: dense_padding_mask = dense_padding_mask[:, :: self.stride] if dense_padding_mask.size(1) != dense_x.size(1): new_padding = dense_padding_mask.new_zeros(dense_x.shape[:-1]) diff = new_padding.size(1) - dense_padding_mask.size(1) assert ( diff > 0 ), f"{new_padding.shape}, {dense_padding_mask.shape}, {dense_x.shape}, {diff}" if diff > 0: new_padding[:, diff:] = dense_padding_mask else: assert diff < 0 new_padding = dense_padding_mask[:, :diff] dense_padding_mask = new_padding result = {} token_x = None if tokens is not None: token_x = dense_x.new_zeros(tokens.numel(), self.output_dim) token_x.scatter_(1, tokens.view(-1, 1).long(), 1) token_x = token_x.view(tokens.shape + (self.output_dim,)) result["dense_x"] = dense_x result["token_x"] = token_x result["dense_padding_mask"] = dense_padding_mask return result @register_model("wav2vec_u", dataclass=Wav2vec_UConfig) class Wav2vec_U(BaseFairseqModel): def calc_gradient_penalty(self, real_data, fake_data): b_size = min(real_data.size(0), fake_data.size(0)) t_size = min(real_data.size(1), fake_data.size(1)) if self.cfg.probabilistic_grad_penalty_slicing: def get_slice(data, dim, target_size): size = data.size(dim) diff = size - target_size if diff <= 0: return data start = np.random.randint(0, diff + 1) return data.narrow(dim=dim, start=start, length=target_size) real_data = get_slice(real_data, 0, b_size) real_data = get_slice(real_data, 1, t_size) fake_data = get_slice(fake_data, 0, b_size) fake_data = get_slice(fake_data, 1, t_size) else: real_data = real_data[:b_size, :t_size] fake_data = fake_data[:b_size, :t_size] alpha = torch.rand(real_data.size(0), 1, 1) alpha = alpha.expand(real_data.size()) alpha = alpha.to(real_data.device) interpolates = alpha * real_data + ((1 - alpha) * fake_data) disc_interpolates = self.discriminator(interpolates, None) gradients = autograd.grad( outputs=disc_interpolates, inputs=interpolates, grad_outputs=torch.ones(disc_interpolates.size(), device=real_data.device), create_graph=True, retain_graph=True, only_inputs=True, )[0] gradient_penalty = (gradients.norm(2, dim=1) - 1) ** 2 return gradient_penalty def set_num_updates(self, num_updates): super().set_num_updates(num_updates) self.update_num = num_updates self.curr_temp = max( self.max_temp * self.temp_decay ** num_updates, self.min_temp ) def discrim_step(self, num_updates): return num_updates % 2 == 1 def get_groups_for_update(self, num_updates): return "discriminator" if self.discrim_step(num_updates) else "generator" def __init__(self, cfg: Wav2vec_UConfig, target_dict): super().__init__() self.cfg = cfg self.zero_index = target_dict.index("<SIL>") if "<SIL>" in target_dict else 0 self.smoothness_weight = cfg.smoothness_weight output_size = len(target_dict) self.pad = target_dict.pad() self.eos = target_dict.eos() self.smoothing = cfg.smoothing self.smoothing_one_sided = cfg.smoothing_one_sided self.no_softmax = cfg.no_softmax self.gumbel = cfg.gumbel self.hard_gumbel = cfg.hard_gumbel self.last_acc = None self.gradient_penalty = cfg.gradient_penalty self.code_penalty = cfg.code_penalty self.blank_weight = cfg.blank_weight self.blank_mode = cfg.blank_mode self.blank_index = target_dict.index("<SIL>") if cfg.blank_is_sil else 0 assert self.blank_index != target_dict.unk() self.discriminator = Discriminator(output_size, cfg) for p in self.discriminator.parameters(): p.param_group = "discriminator" self.pca_A = self.pca_b = None d = cfg.input_dim self.segmenter = SEGMENT_FACTORY[cfg.segmentation.type](cfg.segmentation) self.generator = Generator(d, output_size, cfg) for p in self.generator.parameters(): p.param_group = "generator" for p in self.segmenter.parameters(): p.param_group = "generator" self.max_temp, self.min_temp, self.temp_decay = cfg.temp self.curr_temp = self.max_temp self.update_num = 0 @classmethod def build_model(cls, cfg, task): return cls(cfg, task.target_dictionary) def get_logits( self, net_output: Optional[Dict[str, List[Optional[torch.Tensor]]]], normalize: bool = False, ): logits = net_output["logits"] if self.blank_weight != 0: if self.blank_mode == "add": logits[..., self.blank_index] += self.blank_weight elif self.blank_mode == "set": logits[..., self.blank_index] = self.blank_weight else: raise Exception(f"invalid blank mode {self.blank_mode}") padding = net_output["padding_mask"] if padding.any(): logits[padding] = float("-inf") logits[padding][..., self.blank_index] = float("inf") if normalize: logits = utils.log_softmax(logits.float(), dim=-1) return logits.transpose(0, 1) def get_normalized_probs( self, net_output: Tuple[ torch.Tensor, Optional[Dict[str, List[Optional[torch.Tensor]]]] ], log_probs: bool, sample: Optional[Dict[str, torch.Tensor]] = None, ): logits = self.get_logits(net_output) probs = super().get_normalized_probs(logits, log_probs, sample) # BTC -> TBC for ctc probs = probs.transpose(0, 1) return probs def normalize(self, dense_x): bsz, tsz, csz = dense_x.shape if dense_x.numel() == 0: raise Exception(dense_x.shape) _, k = dense_x.max(-1) hard_x = ( dense_x.new_zeros(bsz * tsz, csz) .scatter_(-1, k.view(-1, 1), 1.0) .view(-1, csz) ) hard_probs = torch.mean(hard_x.float(), dim=0) code_perplexity = torch.exp( -torch.sum(hard_probs * torch.log(hard_probs + 1e-7), dim=-1) ) avg_probs = torch.softmax(dense_x.reshape(-1, csz).float(), dim=-1).mean(dim=0) prob_perplexity = torch.exp( -torch.sum(avg_probs * torch.log(avg_probs + 1e-7), dim=-1) ) if not self.no_softmax: if self.training and self.gumbel: dense_x = F.gumbel_softmax( dense_x.float(), tau=self.curr_temp, hard=self.hard_gumbel ).type_as(dense_x) else: dense_x = dense_x.softmax(-1) return dense_x, code_perplexity, prob_perplexity def forward( self, features, padding_mask, random_label=None, dense_x_only=False, segment=True, ): if segment: features, padding_mask = self.segmenter.pre_segment(features, padding_mask) orig_size = features.size(0) * features.size(1) - padding_mask.sum() gen_result = self.generator(features, random_label, padding_mask) orig_dense_x, token_x = gen_result["dense_x"], gen_result["token_x"] orig_dense_padding_mask = gen_result["dense_padding_mask"] if segment: dense_x, dense_padding_mask = self.segmenter.logit_segment( orig_dense_x, orig_dense_padding_mask ) else: dense_x = orig_dense_x dense_padding_mask = orig_dense_padding_mask dense_logits = dense_x prob_perplexity = None code_perplexity = None if not (self.no_softmax and dense_x_only): dense_x, code_perplexity, prob_perplexity = self.normalize(dense_logits) if dense_x_only or self.discriminator is None: return { "logits": dense_x, "padding_mask": dense_padding_mask, } token_padding_mask = random_label == self.pad dense_y = self.discriminator(dense_x, dense_padding_mask) token_y = self.discriminator(token_x, token_padding_mask) sample_size = features.size(0) d_step = self.discrim_step(self.update_num) fake_smooth = self.smoothing real_smooth = self.smoothing if self.smoothing_one_sided: fake_smooth = 0 zero_loss = None smoothness_loss = None code_pen = None if d_step: loss_dense = F.binary_cross_entropy_with_logits( dense_y, dense_y.new_ones(dense_y.shape) - fake_smooth, reduction="sum", ) loss_token = F.binary_cross_entropy_with_logits( token_y, token_y.new_zeros(token_y.shape) + real_smooth, reduction="sum", ) if self.training and self.gradient_penalty > 0: grad_pen = self.calc_gradient_penalty(token_x, dense_x) grad_pen = grad_pen.sum() * self.gradient_penalty else: grad_pen = None else: grad_pen = None loss_token = None loss_dense = F.binary_cross_entropy_with_logits( dense_y, dense_y.new_zeros(dense_y.shape) + fake_smooth, reduction="sum", ) num_vars = dense_x.size(-1) if prob_perplexity is not None: code_pen = (num_vars - prob_perplexity) / num_vars code_pen = code_pen * sample_size * self.code_penalty if self.smoothness_weight > 0: smoothness_loss = F.mse_loss( dense_logits[:, :-1], dense_logits[:, 1:], reduction="none" ) smoothness_loss[dense_padding_mask[:, 1:]] = 0 smoothness_loss = ( smoothness_loss.mean() * sample_size * self.smoothness_weight ) result = { "losses": { "grad_pen": grad_pen, "code_pen": code_pen, "smoothness": smoothness_loss, }, "temp": self.curr_temp, "code_ppl": code_perplexity, "prob_ppl": prob_perplexity, "d_steps": int(d_step), "sample_size": sample_size, } suff = "_d" if d_step else "_g" result["losses"]["dense" + suff] = loss_dense result["losses"]["token" + suff] = loss_token return result
20,954
31.844828
90
py
sign-topic
sign-topic-main/examples/wav2vec/unsupervised/scripts/wav2vec_apply_cluster_faiss.py
#!/usr/bin/env python3 -u # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import os import os.path as osp import numpy as np import tqdm import torch import sys import faiss import torch.nn.functional as F from wav2vec_cluster_faiss import parse_faiss_specs, Wav2VecFeatureReader def get_parser(): parser = argparse.ArgumentParser(description="apply clusters") # fmt: off parser.add_argument('data', help='location of tsv files') parser.add_argument('--split', help='split to process', required=True) parser.add_argument('--labels', help='split to process', default="phn") parser.add_argument('--path', help='path to pca and centroids', required=True) parser.add_argument('--checkpoint', type=str, help='checkpoint for wav2vec model (if using wav2vec features)', required=True) parser.add_argument('--layer', '-l', type=int, help='which layer to read', default=14) parser.add_argument('--max-tsz', type=int, help='batch kmeans up to this much', default=14) # fmt: on return parser def get_iterator(args): label_path = osp.join(args.data, f"{args.split}.{args.labels}") if osp.exists(label_path): lp = open(label_path, "r") else: lp = None with open(osp.join(args.data, f"{args.split}.tsv"), "r") as fp: lines = fp.read().split("\n") root = lines.pop(0).strip() files = [line.rstrip() for line in lines if len(line) > 0] if lp is not None: lbls = [line.rstrip() for line in lp] else: lbls = [None] * len(files) num = len(files) reader = Wav2VecFeatureReader(args.checkpoint, args.layer) def iterate(): for fname, lbl in zip(files, lbls): file = osp.join(root, fname.split("\t")[0]) feats = reader.get_feats(file) yield feats.data, fname, lbl return iterate, num, root def main(): parser = get_parser() args = parser.parse_args() spec = osp.basename(args.path) try: faiss_spec = parse_faiss_specs(spec.rstrip("/"))[0] except: print(spec) raise print("Faiss Spec:", faiss_spec, file=sys.stderr) if faiss_spec.pca: A = torch.from_numpy(np.load(osp.join(args.path, "pca_A.npy"))).cuda() b = torch.from_numpy(np.load(osp.join(args.path, "pca_b.npy"))).cuda() print("Loaded PCA", file=sys.stderr) centroids = np.load(osp.join(args.path, "centroids.npy")) print("Loaded centroids", centroids.shape, file=sys.stderr) res = faiss.StandardGpuResources() index_flat = ( faiss.IndexFlatL2(centroids.shape[1]) if not faiss_spec.sphere else faiss.IndexFlatIP(centroids.shape[1]) ) faiss_index = faiss.index_cpu_to_gpu(res, 0, index_flat) faiss_index.add(centroids) generator, num, root = get_iterator(args) iterator = generator() had_labels = False label_path = osp.join(args.path, f"{args.split}.{args.labels}") with torch.no_grad(): with open(osp.join(args.path, f"{args.split}.src"), "w") as fp, open( osp.join(args.path, f"{args.split}.tsv"), "w" ) as pp, open(label_path, "w") as lp: print(root, file=pp) for f, fname, lbl in tqdm.tqdm(iterator, total=num): if faiss_spec.pca: f = torch.mm(f, A) + b if faiss_spec.norm: f = F.normalize(f, p=2, dim=-1) f = f.cpu().numpy() _, z = faiss_index.search(f, 1) print(" ".join(str(x.item()) for x in z), file=fp) print(fname, file=pp) if lbl is not None: print(lbl, file=lp) had_labels = True if not had_labels: os.remove(label_path) if __name__ == "__main__": main()
4,015
30.131783
129
py
sign-topic
sign-topic-main/examples/wav2vec/unsupervised/scripts/merge_clusters.py
#!/usr/bin/env python3 -u # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import os import os.path as osp import numpy as np import tqdm import torch import random from shutil import copyfile from npy_append_array import NpyAppendArray def get_parser(): parser = argparse.ArgumentParser( description="transforms features via a given pca and stored them in target dir" ) # fmt: off parser.add_argument('source', help='directory with features') parser.add_argument('--split', help='which split to read', required=True) parser.add_argument('--save-dir', help='where to save the output', required=True) parser.add_argument('--cluster-dir', help='where the clusters are') parser.add_argument('--pooling', type=str, default='mean', choices=['mean', 'sample'], help='how to pool') # fmt: on return parser def main(): parser = get_parser() args = parser.parse_args() source_path = osp.join(args.source, args.split) cluster_path = osp.join(args.cluster_dir, args.split + ".src") print(f"data path: {source_path}") features = np.load(source_path + ".npy", mmap_mode="r") sizes = [] offsets = [] offset = 0 with open(source_path + ".lengths", "r") as len_f: for line in len_f: length = int(line.rstrip()) sizes.append(length) offsets.append(offset) offset += length clusters = [] with open(cluster_path, "r") as cf: for line in cf: line = line.rstrip() items = line.split() items = list(map(int, items)) clusters.append(items) os.makedirs(args.save_dir, exist_ok=True) save_path = osp.join(args.save_dir, args.split) copyfile(source_path + ".tsv", save_path + ".tsv") if os.path.exists(source_path + ".phn"): copyfile(source_path + ".phn", save_path + ".phn") if os.path.exists(osp.join(args.source, "dict.phn.txt")): copyfile( osp.join(args.source, "dict.phn.txt"), osp.join(args.save_dir, "dict.phn.txt"), ) if os.path.exists(source_path + ".wrd"): copyfile(source_path + ".wrd", save_path + ".wrd") if osp.exists(save_path + ".npy"): os.remove(save_path + ".npy") npaa = NpyAppendArray(save_path + ".npy") def merge(feats, clust): feats = torch.from_numpy(feats.copy()) clust = torch.LongTensor(clust) _, counts = clust.unique_consecutive(return_counts=True) curr = 0 merged = [] for c in counts: c = c.item() start = curr end = curr + c curr += c if args.pooling == "mean": new_x = feats[start:end].mean(dim=0) elif args.pooling == "sample": new_x = feats[start + int(random.random() * c)] else: raise NotImplementedError() merged.append(new_x) return torch.stack(merged, dim=0).numpy() with open(save_path + ".lengths", "w") as l_f: for size, offset, clust in tqdm.tqdm( zip(sizes, offsets, clusters), total=len(sizes) ): end = size + offset feats = features[offset:end] feats = merge(feats, clust) print(len(feats), file=l_f) npaa.append(feats) if __name__ == "__main__": main()
3,543
29.817391
110
py
sign-topic
sign-topic-main/examples/wav2vec/unsupervised/scripts/remove_silence.py
#!/usr/bin/env python3 -u # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ get intervals from .vads file, specify output data, and this script removes silences and saves the audio data in out path folder paths=shards/train.tsv vads=shards/train.vads python remove_silence.py --paths $paths --vads $vads """ import os import argparse import torch import torchaudio import tqdm parser = argparse.ArgumentParser() parser.add_argument("--tsv", default="", type=str) parser.add_argument("--vads", default="", type=str) parser.add_argument("--out", type=str) params = parser.parse_args() # load paths paths = [] with open(params.tsv) as f: root = next(f).rstrip() for line in f: paths.append(os.path.join(root, line.rstrip().split("\t")[0])) # load vads list_intervals = [] with open(params.vads) as f: for line in f: interval = [ [int(w.split(":")[0]), int(w.split(":")[1])] for w in line.rstrip().split() ] list_intervals.append(interval) # load audio and keep only intervals (i.e. remove silences) for i in tqdm.trange(len(paths)): data, _ = torchaudio.load(paths[i]) if len(list_intervals[i]) > 0: data_filtered = torch.cat( [data[0][int(it[0]) : int(it[1])] for it in list_intervals[i]] ).unsqueeze(0) else: data_filtered = data # YOU MAY NEED TO MODIFY THIS TO GET THE RIGHT SUBPATH # outpath = params.out + '/'.join(paths[i].split('/')[-1]) outpath = params.out + "/" + "/".join(paths[i].split("/")[-2:]) if not os.path.isdir("/".join(outpath.split("/")[:-1])): os.makedirs("/".join(outpath.split("/")[:-1])) if not os.path.exists(outpath): torchaudio.save(outpath, data_filtered, sample_rate=16000) else: print(outpath, "exists!")
1,927
29.125
128
py
sign-topic
sign-topic-main/examples/wav2vec/unsupervised/scripts/apply_pca.py
#!/usr/bin/env python3 -u # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import os import os.path as osp import math import numpy as np import tqdm import torch from shutil import copyfile from npy_append_array import NpyAppendArray def get_parser(): parser = argparse.ArgumentParser( description="transforms features via a given pca and stored them in target dir" ) # fmt: off parser.add_argument('source', help='directory with features') parser.add_argument('--split', help='which split to read', required=True) parser.add_argument('--save-dir', help='where to save the output', required=True) parser.add_argument('--pca-path', type=str, help='pca location. will append _A.npy and _b.npy', required=True) parser.add_argument('--batch-size', type=int, default=2048000, help='batch size') parser.add_argument('--unfiltered', action='store_true', help='process the unfiltered version') # fmt: on return parser def main(): parser = get_parser() args = parser.parse_args() source_path = osp.join(args.source, args.split) data_poth = source_path + "_unfiltered" if args.unfiltered else source_path print(f"data path: {data_poth}") features = np.load(data_poth + ".npy", mmap_mode="r") pca_A = torch.from_numpy(np.load(args.pca_path + "_A.npy")).cuda() pca_b = torch.from_numpy(np.load(args.pca_path + "_b.npy")).cuda() os.makedirs(args.save_dir, exist_ok=True) save_path = osp.join(args.save_dir, args.split) copyfile(source_path + ".tsv", save_path + ".tsv") copyfile(data_poth + ".lengths", save_path + ".lengths") if osp.exists(source_path + ".phn"): copyfile(source_path + ".phn", save_path + ".phn") if osp.exists(source_path + ".wrd"): copyfile(source_path + ".wrd", save_path + ".wrd") if osp.exists(save_path + ".npy"): os.remove(save_path + ".npy") npaa = NpyAppendArray(save_path + ".npy") batches = math.ceil(features.shape[0] / args.batch_size) with torch.no_grad(): for b in tqdm.trange(batches): start = b * args.batch_size end = start + args.batch_size x = torch.from_numpy(features[start:end]).cuda() x = torch.matmul(x, pca_A) + pca_b npaa.append(x.cpu().numpy()) if __name__ == "__main__": main()
2,496
31.428571
114
py
sign-topic
sign-topic-main/examples/wav2vec/unsupervised/scripts/wav2vec_cluster_faiss.py
#!/usr/bin/env python3 -u # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import gc import os import os.path as osp import random import numpy as np import tqdm import torch from collections import namedtuple import faiss import fairseq import soundfile as sf def get_parser(): parser = argparse.ArgumentParser( description="compute kmeans codebook from kaldi-computed feats" ) # fmt: off parser.add_argument('data', help='location of tsv files') parser.add_argument('--save-dir', help='where to save the output', required=True) parser.add_argument('--checkpoint', type=str, help='checkpoint for wav2vec model (if using wav2vec features)', required=True) parser.add_argument('--sample-pct', '-r', type=float, help='percentage of timesteps to sample', default=0) parser.add_argument('--layer', '-l', type=int, help='which layer to read', default=14) parser.add_argument('--faiss-specs', '-f', type=str, help='faiss index specs; separated by space ' 'format is: PCAx_NORM_CLUSx_SPHERICAL -> ' 'PCAx if exists first apply PCA ' 'NORM if exists, normalize the vector by L2 norm ' 'CLUSx must exist, cluster to x clusters ' 'SPEHRICAL if exists, apply spherical kmeans', default='l2') # fmt: on return parser faiss_spec = namedtuple("faiss_spec", ["pca", "norm", "n_clus", "sphere", "spec_str"]) def parse_faiss_specs(specs_str): specs = [] for ss in specs_str.split(): comps = ss.split("_") pca = 0 norm = False n_clus = 0 sphere = False for c in comps: if c.startswith("PCA"): pca = int(c[3:]) elif c == "NORM": norm = True elif c.startswith("CLUS"): n_clus = int(c[4:]) elif c == "SPHERICAL": sphere = True assert n_clus > 0 specs.append( faiss_spec(pca=pca, norm=norm, n_clus=n_clus, sphere=sphere, spec_str=ss) ) return specs class Wav2VecFeatureReader(object): def __init__(self, cp_file, layer): state = fairseq.checkpoint_utils.load_checkpoint_to_cpu(cp_file) self.layer = layer if "cfg" in state: w2v_args = state["cfg"] task = fairseq.tasks.setup_task(w2v_args.task) model = task.build_model(w2v_args.model) else: w2v_args = state["args"] task = fairseq.tasks.setup_task(w2v_args) model = task.build_model(w2v_args) model.load_state_dict(state["model"], strict=True) model.eval() model.cuda() self.model = model def read_audio(self, fname): """Load an audio file and return PCM along with the sample rate""" wav, sr = sf.read(fname) assert sr == 16e3 return wav def get_feats(self, loc): x = self.read_audio(loc) with torch.no_grad(): source = torch.from_numpy(x).view(1, -1).float().cuda() res = self.model( source=source, mask=False, features_only=True, layer=self.layer ) return res["layer_results"][self.layer][0].squeeze(1) def get_iterator(args): with open(args.data, "r") as fp: lines = fp.read().split("\n") root = lines.pop(0).strip() files = [osp.join(root, line.split("\t")[0]) for line in lines if len(line) > 0] if getattr(args, "sample_pct", 0) > 0: files = random.sample(files, int(args.sample_pct * len(files))) num = len(files) reader = Wav2VecFeatureReader(args.checkpoint, args.layer) def iterate(): for fname in files: feats = reader.get_feats(fname) yield feats.cpu().numpy() return iterate, num def main(): parser = get_parser() args = parser.parse_args() faiss_specs = parse_faiss_specs(args.faiss_specs) print("Faiss Specs:", faiss_specs) feat_path = osp.join(args.save_dir, "features") if osp.exists(feat_path + ".npy"): feats = np.load(feat_path + ".npy") else: generator, num = get_iterator(args) iterator = generator() feats = [] for f in tqdm.tqdm(iterator, total=num): feats.append(f) del iterator del generator feats = np.concatenate(feats) print(feats.shape) os.makedirs(args.save_dir, exist_ok=True) # np.save(feat_path, feats) gc.collect() torch.cuda.empty_cache() reload = False for spec in faiss_specs: print("Processing spec", spec) if reload: print("Reloading...") del feats gc.collect() feats = np.load(feat_path + ".npy") save_path = osp.join(args.save_dir, spec.spec_str) os.makedirs(save_path, exist_ok=True) d = feats.shape[-1] x = feats if spec.pca > 0: print("Computing PCA") pca = faiss.PCAMatrix(d, spec.pca) pca.train(x) d = spec.pca b = faiss.vector_to_array(pca.b) A = faiss.vector_to_array(pca.A).reshape(pca.d_out, pca.d_in) np.save(osp.join(save_path, "pca_A"), A.T) np.save(osp.join(save_path, "pca_b"), b) print("Applying PCA") x = pca.apply_py(x) if spec.norm: reload = spec.pca <= 0 print("Normalizing") faiss.normalize_L2(x) print("Computing kmeans") kmeans = faiss.Kmeans( d, spec.n_clus, niter=50, verbose=True, spherical=spec.sphere, max_points_per_centroid=feats.shape[0], gpu=True, nredo=3, ) kmeans.train(x) np.save(osp.join(save_path, "centroids"), kmeans.centroids) del kmeans del x gc.collect() if __name__ == "__main__": main()
6,315
28.933649
129
py
sign-topic
sign-topic-main/examples/wav2vec/unsupervised/scripts/mean_pool.py
#!/usr/bin/env python3 -u # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import os import os.path as osp import math import numpy as np import tqdm import torch import torch.nn.functional as F from shutil import copyfile from npy_append_array import NpyAppendArray def get_parser(): parser = argparse.ArgumentParser( description="mean pools representations by compressing uniform splits of the data" ) # fmt: off parser.add_argument('source', help='directory with features') parser.add_argument('--split', help='which split to read', required=True) parser.add_argument('--save-dir', help='where to save the output', required=True) parser.add_argument('--subsample-rate', type=float, default=0.5, help='size to subsample data to') parser.add_argument('--remove-extra', action='store_true', help='if true, removes extra states that cant be pooled, otherwise pads with 0s') # fmt: on return parser def main(): parser = get_parser() args = parser.parse_args() source_path = osp.join(args.source, args.split) print(f"data path: {source_path}") features = np.load(source_path + ".npy", mmap_mode="r") os.makedirs(args.save_dir, exist_ok=True) save_path = osp.join(args.save_dir, args.split) copyfile(source_path + ".tsv", save_path + ".tsv") if os.path.exists(source_path + ".phn"): copyfile(source_path + ".phn", save_path + ".phn") if os.path.exists(source_path + ".wrd"): copyfile(source_path + ".wrd", save_path + ".wrd") if os.path.exists(osp.join(args.source, "dict.phn.txt")): copyfile( osp.join(args.source, "dict.phn.txt"), osp.join(args.save_dir, "dict.phn.txt"), ) if osp.exists(save_path + ".npy"): os.remove(save_path + ".npy") npaa = NpyAppendArray(save_path + ".npy") with open(source_path + ".lengths", "r") as lf: lengths = lf.readlines() fsz = features.shape[-1] start = 0 with torch.no_grad(): with open(save_path + ".lengths", "w") as lengths_out: for length in tqdm.tqdm(lengths): length = int(length) end = start + length feats = features[start:end] start += length x = torch.from_numpy(feats).cuda() target_num = math.ceil(length * args.subsample_rate) rem = length % target_num if rem > 0: if args.remove_extra: to_rem = target_num - rem target_num -= 1 x = x[:-to_rem] else: to_add = target_num - rem x = F.pad(x, [0, 0, 0, to_add]) x[-to_add:] = x[-to_add - 1] x = x.view(target_num, -1, fsz) x = x.mean(dim=-2) print(target_num, file=lengths_out) npaa.append(x.cpu().numpy()) if __name__ == "__main__": main()
3,187
30.88
144
py
sign-topic
sign-topic-main/examples/wav2vec/unsupervised/scripts/wav2vec_extract_features.py
#!/usr/bin/env python3 -u # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import os import os.path as osp import tqdm import torch import torch.nn.functional as F from shutil import copyfile from npy_append_array import NpyAppendArray import fairseq import soundfile as sf def get_parser(): parser = argparse.ArgumentParser( description="compute kmeans codebook from kaldi-computed feats" ) # fmt: off parser.add_argument('data', help='location of tsv files') parser.add_argument('--split', help='which split to read', required=True) parser.add_argument('--save-dir', help='where to save the output', required=True) parser.add_argument('--checkpoint', type=str, help='checkpoint for wav2vec ctc model', required=True) parser.add_argument('--layer', type=int, default=14, help='which layer to use') # fmt: on return parser class Wav2VecFeatureReader(object): def __init__(self, cp_file, layer): model, cfg, task = fairseq.checkpoint_utils.load_model_ensemble_and_task( [cp_file] ) model = model[0] model.eval() model.cuda() self.model = model self.task = task self.layer = layer def read_audio(self, fname): """Load an audio file and return PCM along with the sample rate""" wav, sr = sf.read(fname) assert sr == 16e3 return wav def get_feats(self, loc): x = self.read_audio(loc) with torch.no_grad(): source = torch.from_numpy(x).float().cuda() if self.task.cfg.normalize: assert source.dim() == 1, source.dim() with torch.no_grad(): source = F.layer_norm(source, source.shape) source = source.view(1, -1) m_res = self.model(source=source, mask=False, features_only=True, layer=self.layer) return m_res["x"].squeeze(0).cpu() def get_iterator(args): with open(osp.join(args.data, args.split) + ".tsv", "r") as fp: lines = fp.read().split("\n") root = lines.pop(0).strip() files = [osp.join(root, line.split("\t")[0]) for line in lines if len(line) > 0] num = len(files) reader = Wav2VecFeatureReader(args.checkpoint, args.layer) def iterate(): for fname in files: w2v_feats = reader.get_feats(fname) yield w2v_feats return iterate, num def main(): parser = get_parser() args = parser.parse_args() os.makedirs(args.save_dir, exist_ok=True) def create_files(dest): copyfile(osp.join(args.data, args.split) + ".tsv", dest + ".tsv") if osp.exists(osp.join(args.data, args.split) + ".wrd"): copyfile(osp.join(args.data, args.split) + ".wrd", dest + ".wrd") if osp.exists(osp.join(args.data, args.split) + ".phn"): copyfile(osp.join(args.data, args.split) + ".phn", dest + ".phn") if osp.exists(dest + ".npy"): os.remove(dest + ".npy") npaa = NpyAppendArray(dest + ".npy") return npaa save_path = osp.join(args.save_dir, args.split) npaa = create_files(save_path) generator, num = get_iterator(args) iterator = generator() with open(save_path + ".lengths", "w") as l_f: for w2v_feats in tqdm.tqdm(iterator, total=num): print(len(w2v_feats), file=l_f) if len(w2v_feats) > 0: npaa.append(w2v_feats.numpy()) if __name__ == "__main__": main()
3,673
29.616667
105
py
sign-topic
sign-topic-main/examples/wav2vec/unsupervised/data/extracted_features_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import os import contextlib import numpy as np import torch from fairseq.data import FairseqDataset, data_utils logger = logging.getLogger(__name__) class ExtractedFeaturesDataset(FairseqDataset): def __init__( self, path, split, min_length=3, max_length=None, labels=None, label_dict=None, shuffle=True, sort_by_length=True, ): super().__init__() self.min_length = min_length self.max_length = max_length self.shuffle = shuffle self.sort_by_length = sort_by_length self.label_dict = label_dict if labels is not None: assert label_dict is not None self.sizes = [] self.offsets = [] self.labels = [] path = os.path.join(path, split) data_path = path self.data = np.load(data_path + ".npy", mmap_mode="r") offset = 0 skipped = 0 if not os.path.exists(path + f".{labels}"): labels = None with open(data_path + ".lengths", "r") as len_f, open( path + f".{labels}", "r" ) if labels is not None else contextlib.ExitStack() as lbl_f: for line in len_f: length = int(line.rstrip()) lbl = None if labels is None else next(lbl_f).rstrip().split() if length >= min_length and ( max_length is None or length <= max_length ): self.sizes.append(length) self.offsets.append(offset) if lbl is not None: self.labels.append(lbl) offset += length self.sizes = np.asarray(self.sizes) self.offsets = np.asarray(self.offsets) logger.info(f"loaded {len(self.offsets)}, skipped {skipped} samples") def __getitem__(self, index): offset = self.offsets[index] end = self.sizes[index] + offset feats = torch.from_numpy(self.data[offset:end].copy()).float() res = {"id": index, "features": feats} if len(self.labels) > 0: res["target"] = self.label_dict.encode_line( self.labels[index], line_tokenizer=lambda x: x, append_eos=False, ) return res def __len__(self): return len(self.sizes) def collater(self, samples): if len(samples) == 0: return {} features = [s["features"] for s in samples] sizes = [len(s) for s in features] target_size = max(sizes) collated_features = features[0].new_zeros( len(features), target_size, features[0].size(-1) ) padding_mask = torch.BoolTensor(collated_features.shape[:-1]).fill_(False) for i, (f, size) in enumerate(zip(features, sizes)): collated_features[i, :size] = f padding_mask[i, size:] = True res = { "id": torch.LongTensor([s["id"] for s in samples]), "net_input": {"features": collated_features, "padding_mask": padding_mask}, } if len(self.labels) > 0: target = data_utils.collate_tokens( [s["target"] for s in samples], pad_idx=self.label_dict.pad(), left_pad=False, ) res["target"] = target return res def num_tokens(self, index): return self.size(index) def size(self, index): return self.sizes[index] def ordered_indices(self): """Return an ordered list of indices. Batches will be constructed based on this order.""" if self.shuffle: order = [np.random.permutation(len(self))] else: order = [np.arange(len(self))] if self.sort_by_length: order.append(self.sizes) return np.lexsort(order)[::-1] else: return order[0]
4,170
27.765517
87
py
sign-topic
sign-topic-main/examples/wav2vec/unsupervised/tasks/unpaired_audio_text.py
# Copyright (c) 2017-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the LICENSE file in # the root directory of this source tree. An additional grant of patent rights # can be found in the PATENTS file in the same directory. from dataclasses import dataclass, field import logging import math import os from typing import Optional import torch from fairseq.logging import metrics from fairseq.tasks import FairseqTask, register_task from ..data import ExtractedFeaturesDataset, RandomInputDataset from fairseq.data import ( Dictionary, data_utils, StripTokenDataset, ) from fairseq.dataclass import FairseqDataclass from fairseq.distributed.utils import get_data_parallel_world_size from omegaconf import MISSING from examples.speech_recognition.kaldi.kaldi_decoder import ( KaldiDecoder, KaldiDecoderConfig, ) logger = logging.getLogger(__name__) @dataclass class DecodingConfig(FairseqDataclass): kenlm_path: Optional[str] = None lm_weight: float = 0 blank_weight: float = 0 @dataclass class UnpairedAudioTextConfig(FairseqDataclass): data: str = field( default=MISSING, metadata={"help": "path to data directory containing audio"} ) text_data: str = field( default=MISSING, metadata={"help": "path to data directory containing text"} ) max_length: Optional[int] = None labels: Optional[str] = field( default=None, metadata={"help": "extension of the label file to load, used for fine-tuning"}, ) unfiltered: bool = field( default=False, metadata={"help": "load data with _unfiltered suffix"} ) ctc_eval: bool = field( default=False, metadata={"help": "eval UER as if computed by CTC"} ) sort_by_length: bool = field( default=True, metadata={"help": "sort examples by length of audio timesteps"} ) shuffle: bool = field(default=True, metadata={"help": "shuffle examples"}) append_eos: bool = field(default=False, metadata={"help": "append eos"}) uppercase: Optional[bool] = field( default=False, metadata={"help": "uppercase for LM score computation"} ) skipwords: Optional[str] = field( default="", metadata={ "help": "comma-separated words to be removed for LM score computation" }, ) kenlm_path: Optional[str] = None vocab_usage_power: float = 2 word_decoder_config: Optional[KaldiDecoderConfig] = None word_kenlm_path: Optional[str] = None decoding_config: DecodingConfig = DecodingConfig() @register_task("unpaired_audio_text", dataclass=UnpairedAudioTextConfig) class UnpairedAudioText(FairseqTask): """ """ cfg: UnpairedAudioTextConfig def __init__( self, cfg: UnpairedAudioTextConfig, source_dictionary=None, target_dictionary=None, ): super().__init__(cfg) self._target_dictionary = target_dictionary self._source_dictionary = source_dictionary self.num_symbols = ( len([s for s in target_dictionary.symbols if not s.startswith("madeup")]) - target_dictionary.nspecial ) self.sil_id = ( target_dictionary.index("<SIL>") if "<SIL>" in target_dictionary else -1 ) self.kenlm = None if cfg.kenlm_path is not None: import kenlm self.kenlm = kenlm.Model(cfg.kenlm_path) self.word_kenlm = None if cfg.word_kenlm_path is not None: import kenlm self.word_kenlm = kenlm.Model(cfg.word_kenlm_path) self.uppercase = cfg.uppercase self.skipwords = set(cfg.skipwords.split(",")) def str_postprocess(s): s = " ".join(w for w in s.split() if w not in self.skipwords) s = s.upper() if self.uppercase else s return s self.str_postprocess = str_postprocess self.compute_lm_score = lambda s: self.kenlm.score(self.str_postprocess(s)) self.compute_word_score = None if cfg.word_decoder_config is not None: self.kaldi_decoder = KaldiDecoder(cfg.word_decoder_config, beam=10) def compute_word_score(logits, padding): res = self.kaldi_decoder.decode(logits, padding) for r in res: r = r.result() assert len(r) == 1 r = r[0] yield r["score"], r["words"] self.compute_word_score = compute_word_score @classmethod def setup_task(cls, cfg: UnpairedAudioTextConfig, **kwargs): """Setup the task (e.g., load dictionaries). Args: cfg (AudioPretrainingConfig): configuration of this task """ dict_path = os.path.join(cfg.text_data, "dict.txt") if os.path.exists(dict_path): target_dictionary = Dictionary.load(dict_path) else: dict_path = os.path.join(cfg.data, f"dict.{cfg.labels}.txt") target_dictionary = Dictionary.load(dict_path) return cls(cfg, target_dictionary=target_dictionary) def optimizer_step(self, optimizer, model, update_num): if hasattr(model, "get_groups_for_update"): groups = model.get_groups_for_update(update_num) optimizer.step(groups={groups}) else: optimizer.step() def valid_step(self, sample, model, criterion): res = model( **sample["net_input"], dense_x_only=True, ) dense_x = res["logits"] padding_mask = res["padding_mask"] word_scores = None if self.compute_word_score is not None: word_scores = self.compute_word_score(dense_x.cpu(), padding_mask.cpu()) z = dense_x.argmax(-1) z[padding_mask] = self.target_dictionary.pad() vocab_seen = torch.zeros(self.num_symbols, dtype=torch.bool) import editdistance c_err = 0 c_len = 0 pred_c_len = 0 lm_score_sum = 0 for i, (x, t, id) in enumerate( zip( z, sample["target"] if "target" in sample else [None] * len(z), sample["id"], ) ): if t is not None: t = t[(t >= self.target_dictionary.nspecial)] x = x[ (x >= self.target_dictionary.nspecial) & (x < (self.num_symbols + self.target_dictionary.nspecial)) ] if self.sil_id >= 0: x = x[x != self.sil_id] vocab_seen[x - self.target_dictionary.nspecial] = True pred_units_arr = x if self.cfg.ctc_eval: pred_units_arr = pred_units_arr.unique_consecutive() pred_units_arr = pred_units_arr[pred_units_arr != 0] if id == 0: if t is not None: logger.info(f"REF: {self.target_dictionary.string(t)}") logger.info(f"HYP: {self.target_dictionary.string(pred_units_arr)}") if self.kenlm is not None: if t is not None: ref_lm_s = self.compute_lm_score( self.target_dictionary.string(t) ) logger.info( f"LM [REF]: {ref_lm_s}, {math.pow(10, -ref_lm_s / (len(t) + 1))}" ) hyp_lm_s = self.compute_lm_score( self.target_dictionary.string(pred_units_arr) ) logger.info( f"LM [HYP]: {hyp_lm_s}, {math.pow(10, -hyp_lm_s / (len(pred_units_arr) + 1))}" ) pred_units_arr = pred_units_arr.tolist() pred_c_len += len(pred_units_arr) if t is not None: t = t.tolist() c_err += editdistance.eval(pred_units_arr, t) c_len += len(t) else: c_len = pred_c_len if self.kenlm is not None: pred_str = self.target_dictionary.string(pred_units_arr) lm_score = self.compute_lm_score(pred_str) lm_score_sum += lm_score kaldi_score_sum = 0 word_lm_sum = 0 num_words = 0 if word_scores is not None: for score, words in word_scores: kaldi_score_sum += score num_words += len(words) if self.word_kenlm is not None: word_lm_sum += self.kenlm.score(" ".join(words)) try: world_size = get_data_parallel_world_size() except: world_size = 1 logging_output = { "loss": c_err, "_num_char_errors": c_err, "_num_chars": c_len, "_num_pred_chars": pred_c_len, "ntokens": c_len, "nsentences": z.size(0), "sample_size": c_len, "_world_size": world_size, "_lm_score_sum": lm_score_sum, "_kaldi_score_sum": kaldi_score_sum, "_word_lm_sum": word_lm_sum, "_num_words": num_words, "_vocab_seen": vocab_seen, } return c_err, c_len, logging_output def load_dataset(self, split: str, task_cfg: FairseqDataclass = None, **kwargs): data_path = self.cfg.data task_cfg = task_cfg or self.cfg has_unpaired_text = os.path.exists( os.path.join(self.cfg.text_data, f"{split}.idx") ) self.datasets[split] = ExtractedFeaturesDataset( path=data_path, split=split, min_length=3, max_length=task_cfg.max_length, labels=None if has_unpaired_text else task_cfg.labels, label_dict=self.target_dictionary, shuffle=getattr(task_cfg, "shuffle", True), sort_by_length=task_cfg.sort_by_length, ) logger.info(f"split {split} has unpaired text? {has_unpaired_text}") if has_unpaired_text: text_dataset = data_utils.load_indexed_dataset( os.path.join(self.cfg.text_data, split), self.target_dictionary ) text_dataset = StripTokenDataset(text_dataset, self.target_dictionary.eos()) self.datasets[split] = RandomInputDataset( self.datasets[split], text_dataset, ["random_label"], add_to_input=True, pad_idx=self.target_dictionary.pad(), ) @property def source_dictionary(self): return self._source_dictionary @property def target_dictionary(self): """Return the :class:`~fairseq.data.Dictionary` for the language model.""" return self._target_dictionary def max_positions(self): """Maximum input length supported by the encoder.""" return None def reduce_metrics(self, logging_outputs, criterion): super().reduce_metrics(logging_outputs, criterion) zero = torch.scalar_tensor(0.0) num_char_errors = sum( log.get("_num_char_errors", zero) for log in logging_outputs ) num_chars = sum(log.get("_num_chars", zero) for log in logging_outputs) num_word_errors = sum( log.get("_num_word_errors", zero) for log in logging_outputs ) num_words = sum(log.get("_num_words", zero) for log in logging_outputs) num_pred_chars = sum( log.get("_num_pred_chars", zero) for log in logging_outputs ) lm_score_sum = sum(log.get("_lm_score_sum", zero) for log in logging_outputs) vocab_seen = ( sum(log.get("_vocab_seen", zero) for log in logging_outputs) .bool() .sum() .item() ) kaldi_score_sum = sum( log.get("_kaldi_score_sum", zero) for log in logging_outputs ) word_lm_sum = sum(log.get("_word_lm_sum", zero) for log in logging_outputs) metrics.log_scalar_sum("_num_char_errors", num_char_errors) metrics.log_scalar_sum("_num_chars", num_chars) metrics.log_scalar_sum("_num_word_errors", num_word_errors) metrics.log_scalar_sum("_num_words", num_words) metrics.log_scalar_sum("lm_score_sum", lm_score_sum) metrics.log_scalar_sum("num_pred_chars", num_pred_chars) if self.cfg.word_kenlm_path is not None: metrics.log_scalar_sum("kaldi_score_sum", kaldi_score_sum) metrics.log_scalar_sum("word_lm_sum", word_lm_sum) if num_chars > 0: metrics.log_derived( "uer", lambda meters: meters["_num_char_errors"].sum * 100.0 / meters["_num_chars"].sum if meters["_num_chars"].sum > 0 else float("nan"), ) if lm_score_sum < 0 and vocab_seen > 0: metrics.log_scalar("vocab_seen_pct", vocab_seen / self.num_symbols) metrics.log_derived( "weighted_lm_ppl", lambda meters: math.pow( 10, -meters["lm_score_sum"].sum / ( meters["num_pred_chars"].sum + meters["nsentences"].sum ), # account for </s> ) / meters["vocab_seen_pct"].avg ** self.cfg.vocab_usage_power, ) metrics.log_derived( "lm_ppl", lambda meters: math.pow( 10, -meters["lm_score_sum"].sum / ( meters["num_pred_chars"].sum + meters["nsentences"].sum ), # account for </s> ), ) else: metrics.log_derived("weighted_lm_ppl", lambda meters: float("inf")) if num_words > 0: if word_lm_sum != 0: metrics.log_derived( "word_lm_ppl", lambda meters: math.pow( 10, -meters["word_lm_sum"].sum / ( meters["_num_words"].sum + meters["nsentences"].sum ), # account for </s> ), ) metrics.log_derived( "weighted_word_lm_ppl", lambda meters: math.pow( 10, -meters["word_lm_sum"].sum / ( meters["_num_words"].sum + meters["nsentences"].sum ), # account for </s> ) / meters["vocab_seen_pct"].avg ** self.cfg.vocab_usage_power, ) if self.cfg.word_kenlm_path is not None: metrics.log_derived( "kaldi_score", lambda meters: meters["kaldi_score_sum"].sum / meters["nsentences"].sum, ) def build_model(self, cfg: FairseqDataclass): model = super().build_model(cfg) return model
15,435
33.455357
102
py
sign-topic
sign-topic-main/examples/criss/save_encoder.py
#!/usr/bin/env python3 -u # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Translate pre-processed data with a trained model. """ import numpy as np import torch from fairseq import checkpoint_utils, options, progress_bar, tasks, utils from fairseq.sequence_generator import EnsembleModel from fairseq.utils import safe_hasattr def get_avg_pool( models, sample, prefix_tokens, src_dict, remove_bpe, has_langtok=False ): model = EnsembleModel(models) # model.forward normally channels prev_output_tokens into the decoder # separately, but SequenceGenerator directly calls model.encoder encoder_input = { k: v for k, v in sample["net_input"].items() if k != "prev_output_tokens" } # compute the encoder output for each beam encoder_outs = model.forward_encoder(encoder_input) np_encoder_outs = encoder_outs[0].encoder_out.cpu().numpy().astype(np.float32) encoder_mask = 1 - encoder_outs[0].encoder_padding_mask.cpu().numpy().astype( np.float32 ) encoder_mask = np.expand_dims(encoder_mask.T, axis=2) if has_langtok: encoder_mask = encoder_mask[1:, :, :] np_encoder_outs = np_encoder_outs[1, :, :] masked_encoder_outs = encoder_mask * np_encoder_outs avg_pool = (masked_encoder_outs / encoder_mask.sum(axis=0)).sum(axis=0) return avg_pool def main(args): assert args.path is not None, "--path required for generation!" assert ( not args.sampling or args.nbest == args.beam ), "--sampling requires --nbest to be equal to --beam" assert ( args.replace_unk is None or args.raw_text ), "--replace-unk requires a raw text dataset (--raw-text)" args.beam = 1 utils.import_user_module(args) if args.max_tokens is None: args.max_tokens = 12000 print(args) use_cuda = torch.cuda.is_available() and not args.cpu # Load dataset splits task = tasks.setup_task(args) task.load_dataset(args.gen_subset) # Set dictionaries try: src_dict = getattr(task, "source_dictionary", None) except NotImplementedError: src_dict = None tgt_dict = task.target_dictionary # Load ensemble print("| loading model(s) from {}".format(args.path)) models, _model_args = checkpoint_utils.load_model_ensemble( args.path.split(":"), arg_overrides=eval(args.model_overrides), task=task, ) # Optimize ensemble for generation for model in models: model.make_generation_fast_( beamable_mm_beam_size=None if args.no_beamable_mm else args.beam, need_attn=args.print_alignment, ) if args.fp16: model.half() if use_cuda: model.cuda() # Load alignment dictionary for unknown word replacement # (None if no unknown word replacement, empty if no path to align dictionary) align_dict = utils.load_align_dict(args.replace_unk) # Load dataset (possibly sharded) itr = task.get_batch_iterator( dataset=task.dataset(args.gen_subset), max_tokens=args.max_tokens, max_positions=utils.resolve_max_positions( task.max_positions(), ), ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test, required_batch_size_multiple=args.required_batch_size_multiple, num_shards=args.num_shards, shard_id=args.shard_id, num_workers=args.num_workers, ).next_epoch_itr(shuffle=False) num_sentences = 0 source_sentences = [] shard_id = 0 all_avg_pool = None encoder_has_langtok = ( safe_hasattr(task.args, "encoder_langtok") and task.args.encoder_langtok is not None and safe_hasattr(task.args, "lang_tok_replacing_bos_eos") and not task.args.lang_tok_replacing_bos_eos ) with progress_bar.build_progress_bar(args, itr) as t: for sample in t: if sample is None: print("Skipping None") continue sample = utils.move_to_cuda(sample) if use_cuda else sample if "net_input" not in sample: continue prefix_tokens = None if args.prefix_size > 0: prefix_tokens = sample["target"][:, : args.prefix_size] with torch.no_grad(): avg_pool = get_avg_pool( models, sample, prefix_tokens, src_dict, args.post_process, has_langtok=encoder_has_langtok, ) if all_avg_pool is not None: all_avg_pool = np.concatenate((all_avg_pool, avg_pool)) else: all_avg_pool = avg_pool if not isinstance(sample["id"], list): sample_ids = sample["id"].tolist() else: sample_ids = sample["id"] for i, sample_id in enumerate(sample_ids): # Remove padding src_tokens = utils.strip_pad( sample["net_input"]["src_tokens"][i, :], tgt_dict.pad() ) # Either retrieve the original sentences or regenerate them from tokens. if align_dict is not None: src_str = task.dataset(args.gen_subset).src.get_original_text( sample_id ) else: if src_dict is not None: src_str = src_dict.string(src_tokens, args.post_process) else: src_str = "" if not args.quiet: if src_dict is not None: print("S-{}\t{}".format(sample_id, src_str)) source_sentences.append(f"{sample_id}\t{src_str}") num_sentences += sample["nsentences"] if all_avg_pool.shape[0] >= 1000000: with open( f"{args.encoder_save_dir}/all_avg_pool.{args.source_lang}.{shard_id}", "w", ) as avg_pool_file: all_avg_pool.tofile(avg_pool_file) with open( f"{args.encoder_save_dir}/sentences.{args.source_lang}.{shard_id}", "w", ) as sentence_file: sentence_file.writelines(f"{line}\n" for line in source_sentences) all_avg_pool = None source_sentences = [] shard_id += 1 if all_avg_pool is not None: with open( f"{args.encoder_save_dir}/all_avg_pool.{args.source_lang}.{shard_id}", "w" ) as avg_pool_file: all_avg_pool.tofile(avg_pool_file) with open( f"{args.encoder_save_dir}/sentences.{args.source_lang}.{shard_id}", "w" ) as sentence_file: sentence_file.writelines(f"{line}\n" for line in source_sentences) return None def cli_main(): parser = options.get_generation_parser() parser.add_argument( "--encoder-save-dir", default="", type=str, metavar="N", help="directory to save encoder outputs", ) args = options.parse_args_and_arch(parser) main(args) if __name__ == "__main__": cli_main()
7,473
33.762791
90
py
sign-topic
sign-topic-main/examples/speech_to_speech/generate_waveform_from_code.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import json import logging from pathlib import Path import random import soundfile as sf import torch from tqdm import tqdm from fairseq import utils from fairseq.models.text_to_speech.vocoder import CodeHiFiGANVocoder logging.basicConfig() logging.root.setLevel(logging.INFO) logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) def dump_result(args, sample_id, pred_wav, suffix=""): sf.write( f"{args.results_path}/{sample_id}{suffix}_pred.wav", pred_wav.detach().cpu().numpy(), 16000, ) def load_code(in_file): with open(in_file) as f: out = [list(map(int, line.strip().split())) for line in f] return out def main(args): logger.info(args) use_cuda = torch.cuda.is_available() and not args.cpu with open(args.vocoder_cfg) as f: vocoder_cfg = json.load(f) vocoder = CodeHiFiGANVocoder(args.vocoder, vocoder_cfg) if use_cuda: vocoder = vocoder.cuda() multispkr = vocoder.model.multispkr if multispkr: logger.info("multi-speaker vocoder") num_speakers = vocoder_cfg.get( "num_speakers", 200 ) # following the default in codehifigan to set to 200 assert ( args.speaker_id < num_speakers ), f"invalid --speaker-id ({args.speaker_id}) with total #speakers = {num_speakers}" data = load_code(args.in_code_file) Path(args.results_path).mkdir(exist_ok=True, parents=True) for i, d in tqdm(enumerate(data), total=len(data)): x = { "code": torch.LongTensor(d).view(1, -1), } suffix = "" if multispkr: spk = ( random.randint(0, num_speakers - 1) if args.speaker_id == -1 else args.speaker_id ) suffix = f"_spk{spk}" x["spkr"] = torch.LongTensor([spk]).view(1, 1) x = utils.move_to_cuda(x) if use_cuda else x wav = vocoder(x, args.dur_prediction) dump_result(args, i, wav, suffix=suffix) def cli_main(): parser = argparse.ArgumentParser() parser.add_argument( "--in-code-file", type=str, required=True, help="one unit sequence per line" ) parser.add_argument( "--vocoder", type=str, required=True, help="path to the CodeHiFiGAN vocoder" ) parser.add_argument( "--vocoder-cfg", type=str, required=True, help="path to the CodeHiFiGAN vocoder config", ) parser.add_argument("--results-path", type=str, required=True) parser.add_argument( "--dur-prediction", action="store_true", help="enable duration prediction (for reduced/unique code sequences)", ) parser.add_argument( "--speaker-id", type=int, default=-1, help="Speaker id (for vocoder that supports multispeaker). Set to -1 to randomly sample speakers.", ) parser.add_argument("--cpu", action="store_true", help="run on CPU") args = parser.parse_args() main(args) if __name__ == "__main__": cli_main()
3,285
27.08547
107
py
sign-topic
sign-topic-main/examples/speech_to_speech/benchmarking/core.py
import timeit import logging import torch from pypapi import events, papi_high as high from memory_profiler import memory_usage from torch import nn from argparse import Namespace from fairseq.dataclass.utils import convert_namespace_to_omegaconf from fairseq.data import data_utils as fairseq_data_utils from fairseq import checkpoint_utils, tasks, utils from fairseq.models.text_to_speech.vocoder import CodeHiFiGANVocoder from examples.hubert.simple_kmeans.dump_hubert_feature import HubertFeatureReader from examples.hubert.simple_kmeans.dump_km_label import ApplyKmeans from fairseq_cli.generate import get_symbols_to_strip_from_output import soundfile as sf import ast import json logging.basicConfig() logging.root.setLevel(logging.INFO) logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) torch.manual_seed(1) torch.set_deterministic(True) class BenchmarkingBase(nn.Module): def __init__(self): nn.Module.__init__(self) self.s2x_task = None def warm_up(self, sample, repeat): """Warm up the model""" for _i in range(repeat): self.forward(sample) logger.info(f"Model warmed up by running inference {repeat} times") def benchmark_run_time(self, dataset, repeat): """Benchmark average runtime for the model by calling benchmark_run_time_single_sample function""" logger.info("Starting run time benchmarking") time_elapsed = 0 for i, sample in enumerate(dataset): time_elapsed += self.benchmark_run_time_single_sample(sample, repeat=repeat) if i % 100 == 0: logger.info(f"Benchmarked run time for {i}/{len(dataset)} samples") total_time_elapsed = time_elapsed / len(dataset) return total_time_elapsed def benchmark_run_time_single_sample(self, sample, repeat): """Benchmark average runtime for a single sample using timeit library. Units are seconds""" timer = timeit.Timer(lambda: self.forward(sample)) time_elapsed = timer.timeit(repeat) return time_elapsed / repeat def count_flops( self, dataset, repeat, ): """Use PYPAPI library to count average flops for model inference. Note: It only works if the model is being run on cpu""" logger.info("Starting flop counter") high.start_counters([events.PAPI_DP_OPS]) for i, sample in enumerate(dataset): for _r in range(repeat): self.forward(sample) if i % 100 == 0: logger.info(f"Counted flops for {i}/{len(dataset)} samples") flops = high.stop_counters() flops = round(flops[0] / (repeat * len(dataset))) return flops def max_memory(self, dataset, repeat): """Compute average max memory consumed by model inference. Units are MiB""" logger.info("Starting memory benchmarking") total_memory = 0 for i, sample in enumerate(dataset): for _r in range(repeat): total_memory += max(memory_usage((self.forward, (sample,), {}))) if i % 100 == 0: logger.info(f"Benchmarked memory for {i}/{len(dataset)} samples") total_memory = total_memory / (repeat * len(dataset)) return total_memory def gather_all_metrics(self, dataset, repeat): run_time = self.benchmark_run_time(dataset, repeat) max_memory = self.max_memory(dataset, repeat) flops = self.count_flops(dataset, repeat) return run_time, max_memory, flops def dump_final_speech_output( self, dataset, output_dir, resample_fn, sample_rate, prefix=None ): for i, sample in enumerate(dataset): hypo = self.forward(sample)[0] def to_np(x): return x.detach().cpu().numpy() try: wave_preds = to_np(resample_fn(hypo["waveform"])) sf.write( f"{output_dir}/{prefix}_{i}_pred.wav", wave_preds, sample_rate, ) except Exception as e: raise Exception( f" Encountered {e} - Invalid waveform. Make sure the model outputs a waveform" ) class Processing(BenchmarkingBase): """Class similar to fairseq_cli/generate.py. Supports ASR, MT and ST model inference""" def __init__(self, args): super().__init__() self.use_cuda = not getattr(args, "cpu", False) self.setUp(args) self.training = False self.s2x_task = self.task def setUp(self, cfg): if isinstance(cfg, Namespace): cfg = convert_namespace_to_omegaconf(cfg) self.task = tasks.setup_task(cfg.task) self.tgt_dict = self.task.target_dictionary # Load ensemble logger.info("loading model(s) from {}".format(cfg.common_eval.path)) models, _ = checkpoint_utils.load_model_ensemble( utils.split_paths(cfg.common_eval.path), arg_overrides={}, task=self.task, suffix=cfg.checkpoint.checkpoint_suffix, strict=False, num_shards=cfg.checkpoint.checkpoint_shard_count, ) if len(models) > 1: raise Exception("Currently loading multiple models is not supported") self.model = models[0] # Optimize model for generation if cfg.common.fp16: self.model.half() if self.use_cuda: self.model.cuda() self.model.prepare_for_inference_(cfg) self.generator = self.task.build_generator( [self.model], cfg.generation, extra_gen_cls_kwargs={}, ) # Handle tokenization and BPE self.tokenizer = self.task.build_tokenizer(cfg.tokenizer) self.bpe = self.task.build_bpe(cfg.bpe) self.remove_bpe = cfg.common_eval.post_process def encode_source(self, src): """Method to generate source tokens from a string""" if self.tokenizer is not None: src = self.tokenizer.encode(src) if self.bpe is not None: src = self.bpe.encode(src) src_tokens = self.task.source_dictionary.encode_line(src).long() src_lens = src_tokens.size(0) return { "net_input": { "src_tokens": src_tokens.view(1, src_lens), "src_lengths": torch.tensor([src_lens]), } } def decode_target(self, hypos): """Method to decode target string from tokens""" hypo_str = self.tgt_dict.string( hypos[0][0]["tokens"].int().cpu(), self.remove_bpe, get_symbols_to_strip_from_output(self.generator), ) if self.bpe is not None: hypo_str = self.bpe.decode(hypo_str) if self.tokenizer is not None: hypo_str = self.tokenizer.decode(hypo_str) return hypo_str def forward(self, sample): hypos = self.task.inference_step( self.generator, [self.model], sample, prefix_tokens=None, constraints=None, ) return hypos class GenerateWaveformFromCode(BenchmarkingBase): """Class to support waveform generation from code. Currently, vocoder only supports single speaker""" def __init__(self, args): super().__init__() with open(args.vocoder_cfg) as f: vocoder_cfg = json.load(f) self.dur_prediction = args.dur_prediction self.vocoder = CodeHiFiGANVocoder(args.vocoder, vocoder_cfg) def format_units(self, input): code = torch.LongTensor(list(map(int, input.strip().split()))).view(1, -1) return {"code": code} def generate_vocoder_input(self, dataset): return [self.format_units(sample) for sample in dataset] def forward(self, sample): return [{"waveform": self.vocoder(sample, self.dur_prediction)}] class HubertUnitExtractor(BenchmarkingBase): def __init__(self, args): self.feature_reader = HubertFeatureReader( args.hubert_ckpt_path, args.hubert_layer ) self.kmeans = ApplyKmeans(args.hubert_km_path) def forward(self, sample): with torch.no_grad(): feat = [] for start in range(0, sample.size(1), self.feature_reader.max_chunk): x_chunk = sample[:, start : start + self.max_chunk] feat_chunk, _ = self.feature_reader.model.extract_features( source=x_chunk, padding_mask=None, mask=False, output_layer=self.layer, ) feat.append(feat_chunk) torch.cat(feat, 1).squeeze(0) return self.kmeans(feat).tolist() class SpeechGeneration(BenchmarkingBase): """Class similar to examples/text_to_speech/generate_waveform.py. Supports models with speech generation as end goal (TTS, Direct S2ST models etc)""" def __init__(self, args): super().__init__() self.use_cuda = not getattr(args, "cpu", False) self.setUp(args) self.s2x_task = self.task def setUp(self, args): if args.task == "speech_to_speech": args.normalize_waveform = False self.task = tasks.setup_task(args) self.pre_tokenizer = self.task.build_tokenizer(args) self.bpe_tokenizer = self.task.build_bpe(args) try: self.src_dict = self.task.src_dict except Exception: self.src_dict = None ensemble, saved_cfg, task = checkpoint_utils.load_model_ensemble_and_task( [args.path], arg_overrides=ast.literal_eval(args.model_overrides), task=self.task, strict=False, ) self.model = ensemble[0] if self.use_cuda: self.model.cuda() # criterion.cuda() self.model.eval() self.generator = self.task.build_generator( [self.model], args, ) def processTextInput(self, text): """Generate source tokens from text input""" if self.pre_tokenizer is not None: text = self.pre_tokenizer.encode(text) if self.bpe_tokenizer is not None: text = self.bpe_tokenizer.encode(text) target = self.src_dict.encode_line( text, add_if_not_exist=False, append_eos=True ).long() target = fairseq_data_utils.collate_tokens( [target], self.src_dict.pad(), self.src_dict.eos(), left_pad=False, move_eos_to_beginning=False, ) src_lengths = torch.tensor([target.size(1)], dtype=torch.long) prev_output_tokens = None sample = { "net_input": { "src_tokens": target, "src_lengths": src_lengths, "prev_output_tokens": prev_output_tokens, } } sample = utils.move_to_cuda(sample) if self.use_cuda else sample return sample def forward(self, sample): sample["speaker"] = None output = self.generator.generate(self.model, sample) # , has_targ=False return output class S2UT(BenchmarkingBase): """Class to support S2UT models. Also supports generating waveforms from the units predicted""" def __init__(self, s2u_args, vocoder_args=None): super().__init__() self.s2u = Processing(s2u_args) self.vocoder = None if vocoder_args: self.vocoder = GenerateWaveformFromCode(vocoder_args) self.vocoder_input = None def forward(self, sample): s2u_hypos = self.s2u(sample) s2u_output = self.s2u.decode_target(s2u_hypos) if not self.vocoder: return s2u_output units = self.vocoder.format_units(s2u_output) vocoder_output = self.vocoder(units) return vocoder_output def generate_s2u_outputs(self, dataset): return [self.s2u.decode_target(self.s2u(sample)) for sample in dataset] def compute_metrics(self, metric_type, dataset, repeat=None): """Generic function to compute metrics ignoring the io processing time""" if self.vocoder and not self.vocoder_input: self.s2u_output = self.generate_s2u_outputs(dataset) self.vocoder_input = self.vocoder.generate_vocoder_input(self.s2u_output) s2u_metrics = getattr(self.s2u, metric_type)( dataset, repeat, ) vocoder_metrics = 0 if self.vocoder: vocoder_metrics = getattr(self.vocoder, metric_type)( self.vocoder_input, repeat, ) print( f"metric_type = {metric_type} s2u_metrics = {s2u_metrics} \t vocoder_metrics = {vocoder_metrics}" ) if metric_type == "max_memory": return max(s2u_metrics, vocoder_metrics) else: return s2u_metrics + vocoder_metrics def benchmark_run_time(self, dataset, repeat): return self.compute_metrics("benchmark_run_time", dataset, repeat) def count_flops(self, dataset, repeat): return self.compute_metrics("count_flops", dataset, repeat) def max_memory(self, dataset, repeat): return self.compute_metrics("max_memory", dataset, repeat) class Cascaded2StageS2ST(BenchmarkingBase): """ST + TTS""" def __init__(self, s2t_args, tts_args): super().__init__() self.s2t = Processing(s2t_args) self.s2x_task = self.s2t.task self.tts = SpeechGeneration(tts_args) if tts_args else None self.training = False self.tts_inputs = None def forward(self, sample): if not self.tts: raise Exception( "Forward function is not callable without tts. Reinitialize the class with tts_args" ) s2t_hypos = self.s2t(sample) s2t_output = self.s2t.decode_target(s2t_hypos) tts_input = self.tts.processTextInput(s2t_output) tts_output = self.tts(tts_input) return tts_output def generate_s2t_outputs(self, dataset): """Process dataset and generate s2t outputs""" return [self.s2t.decode_target(self.s2t(sample)) for sample in dataset] def generate_tts_inputs(self, dataset): """Process dataset and generate tts inputs""" return [self.tts.processTextInput(sample) for sample in dataset] def compute_metrics(self, metric_type, dataset, repeat=None): """Generic function to compute metrics ignoring the io processing time""" if not self.tts_inputs: s2t_outputs = self.generate_s2t_outputs(dataset) self.tts_inputs = self.generate_tts_inputs(s2t_outputs) s2t_metrics = getattr(self.s2t, metric_type)( dataset, repeat, ) tts_metrics = getattr(self.tts, metric_type)( self.tts_inputs, repeat, ) print( f"metric_type = {metric_type} s2t_metrics = {s2t_metrics} \t tts_metrics = {tts_metrics}" ) if metric_type == "max_memory": return max(s2t_metrics, tts_metrics) else: return s2t_metrics + tts_metrics def benchmark_run_time(self, dataset, repeat): return self.compute_metrics("benchmark_run_time", dataset, repeat) def count_flops(self, dataset, repeat): return self.compute_metrics("count_flops", dataset, repeat) def max_memory(self, dataset, repeat): return self.compute_metrics("max_memory", dataset, repeat) class Cascaded3StageS2ST(Cascaded2StageS2ST): """ASR + MT + TTS""" def __init__(self, s2t_args, tts_args, mt_args): super().__init__(s2t_args, tts_args) self.mt = Processing(mt_args) self.mt_inputs = [] def forward(self, sample): s2t_hypos = self.s2t(sample) s2t_output = self.s2t.decode_target(s2t_hypos) mt_input = self.mt.encode_source(s2t_output) mt_hypos = self.mt(mt_input) mt_output = self.mt.decode_target(mt_hypos) tts_input = self.tts.processTextInput(mt_output) tts_output = self.tts(tts_input) return tts_output def generate_mt_inputs(self, dataset): """Process dataset to generate mt model inputs""" return [self.mt.encode_source(sample) for sample in dataset] def generate_mt_outputs(self, dataset): """Process dataset to generate mt model outputs""" return [self.mt.decode_target(self.mt(sample)) for sample in dataset] def compute_metrics(self, metric_type, dataset, repeat=None): """Generic function to compute metrics ignoring the io processing time""" if not self.tts_inputs: s2t_outputs = self.generate_s2t_outputs(dataset) self.mt_inputs = self.generate_mt_inputs(s2t_outputs) mt_outputs = self.generate_mt_outputs(self.mt_inputs) self.tts_inputs = self.generate_tts_inputs(mt_outputs) s2t_metrics = getattr(self.s2t, metric_type)( dataset, repeat, ) mt_metrics = getattr(self.mt, metric_type)(self.mt_inputs, repeat) tts_metrics = getattr(self.tts, metric_type)( self.tts_inputs, repeat, ) print( f"metric_type = {metric_type} s2t_metrics = {s2t_metrics} \t mt_metrics = {mt_metrics} \t tts_metrics = {tts_metrics}" ) if metric_type == "max_memory": return max(s2t_metrics, mt_metrics, tts_metrics) else: return s2t_metrics + mt_metrics + tts_metrics
17,782
35.440574
131
py
sign-topic
sign-topic-main/examples/speech_to_speech/benchmarking/data_utils.py
from fairseq import tasks import numpy as np import logging import random from fairseq import options import torch import os import soundfile as sf from fairseq.data.audio.audio_utils import ( get_waveform, parse_path, ) logging.basicConfig() logging.root.setLevel(logging.INFO) logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) random.seed(1) np.random.seed(1) random_number_generator = np.random.RandomState(30) def generate_random_data_sample(T, B=1, D=80): """Generate random data sample given the T, B, D values""" net_input = { "src_tokens": torch.tensor(random_number_generator.randn(B, T, D)).float(), "src_lengths": torch.tensor([T]), } return {"net_input": net_input} def generate_random_dataset(T_range_min, T_range_max, B=1, D=80, dataset_size=100): """Generate random dataset with T values within a given range, B, D""" T_values = [random.randint(T_range_min, T_range_max) for i in range(dataset_size)] dataset = [] for t in T_values: dataset.append(generate_random_data_sample(t, B, D)) return dataset, sum(T_values) / dataset_size def load_dataset_npy(file_name, dataset_size=None): """Load dataset from a .npy file.""" data = np.load(file_name, allow_pickle=True) if dataset_size: data = data[:dataset_size] return data def load_dataset_raw_to_waveforms( file_name, dataset_size=None, need_waveform=True, sample_rate=16000, read_using_soundfile=False, ): """Load raw dataset from w2v tsv file. Optionally get waveforms""" data = [] with open(file_name, "r") as fp: lines = fp.readlines() data = [ os.path.join(lines[0].strip(), line.strip().split("\t")[0]) for line in lines[1:] ] if dataset_size: data = data[:dataset_size] if not need_waveform: return data features = [] if read_using_soundfile: for _i, d in enumerate(data): wav = sf.read(d)[0] if wav.ndim == 2: wav = wav.mean(-1) features.append(torch.from_numpy(wav).float().view(1, -1)) else: for i, d in enumerate(data): _path, slice_ptr = parse_path(d) if len(slice_ptr) == 0: feat = get_waveform( _path, always_2d=True, output_sample_rate=sample_rate )[0] features.append( { "id": i, "net_input": { "src_tokens": torch.tensor(feat), "src_lengths": torch.tensor([feat.shape[1]]), }, } ) else: raise Exception("Currently unsupported data format") return features def load_dataset_task( args, batch_size=1, limit_size=None, ref_dataset=None, ): """Loads dataset based on args by creating a task""" if not args.data or not args.subset or not args.task: raise Exception( "Please provide necessary arguments to load the dataset - data, subset and task" ) task = tasks.setup_task(args) task.load_dataset(args.subset) if not limit_size: limit_size = len(task.dataset(args.subset)) iter = task.get_batch_iterator( dataset=task.dataset(args.subset), max_sentences=batch_size ).next_epoch_itr(shuffle=False) dataset = [] for i, sample in enumerate(iter): sample = { "id": task.datasets[args.subset].ids[sample["id"].item()], "net_input": { "src_tokens": sample["net_input"]["src_tokens"], "src_lengths": sample["net_input"]["src_lengths"], }, } dataset.append(sample) if i == limit_size - 1: break if ref_dataset: try: ids = get_ids_from_dataset(ref_dataset) except Exception as e: raise Exception(f"{e} - Cannot extract ids from reference dataset") filtered_dataset = [] for sample in dataset: if ( sample["id"] in ids or sample["id"][5:] in ids or f"dev_{sample['id']}" in ids ): filtered_dataset.append(sample) dataset = filtered_dataset max_len, min_len, avg_len = get_dataset_stats(dataset) print( f"{args.subset} dataset stats : num_samples={len(dataset)} max_len = {max_len} min_len = {min_len} avg_len = {avg_len}" ) return dataset def randomly_sample_subset(dataset, size=500): """Randomly sample subset from a dataset""" random_indices = [random.randint(0, len(dataset) - 1) for i in range(size)] return [dataset[i] for i in random_indices] def get_short_data_subset(dataset, size=500): """Get a subset of desired size by sorting based on src_lengths""" return sort_dataset(dataset)[:size] def get_long_data_subset(dataset, size=500): """Get a subset of desired size by sorting based on src_lengths descending""" return sort_dataset(dataset, reverse=True)[:size] def sort_dataset(dataset, reverse=False): return sorted( dataset, key=lambda x: x["net_input"]["src_lengths"].item(), reverse=reverse ) def save_dataset_npy(dataset, file_name): """Save a dataset as .npy file""" np.save(file_name, dataset) def get_dataset_stats(dataset): """Get stats about dataset based on src_lengths of samples""" max_len = 0 min_len = 100000 avg_len = 0 for d in dataset: max_len = max(max_len, d["net_input"]["src_lengths"].item()) min_len = min(min_len, d["net_input"]["src_lengths"].item()) avg_len += d["net_input"]["src_lengths"].item() return max_len, min_len, avg_len / len(dataset) def make_parser(): """ Additional args: 1. Provide the dataset dir path using --data. 2. Loading the dataset doesn't require config, provide --config-yaml to apply additional feature transforms """ parser = options.get_speech_generation_parser() parser.add_argument( "--subset", default=None, type=str, required=True, help="Subset to use for dataset generation", ) parser.add_argument( "--dataset-save-dir", default=None, type=str, required=False, help="Dir path in which the datasets are to be saved", ) parser.add_argument( "--ref-dataset", default=None, type=str, required=False, help="If provided, the ids in the reference dataset will be used to filter the new dataset generated.", ) parser.add_argument("--dataset-save-token", default="", type=str, required=False) options.add_generation_args(parser) return parser def get_ids_from_dataset(dataset): return {sample["id"]: 1 for sample in dataset} def cli_main(): parser = make_parser() args = options.parse_args_and_arch(parser) dataset = load_dataset_task(args) random_dataset = randomly_sample_subset(dataset) short_dataset = get_short_data_subset(dataset) long_dataset = get_long_data_subset(dataset) if args.dataset_save_token: args.dataset_save_token = f"_{args.dataset_save_token}_" if args.dataset_save_dir: save_dataset_npy( random_dataset, f"{args.dataset_save_dir}/random_dataset{args.dataset_save_token}w_ids.npy", ) save_dataset_npy( short_dataset, f"{args.dataset_save_dir}/short_dataset{args.dataset_save_token}w_ids.npy", ) save_dataset_npy( long_dataset, f"{args.dataset_save_dir}/long_dataset{args.dataset_save_token}w_ids.npy", ) if __name__ == "__main__": cli_main()
7,893
28.788679
127
py
sign-topic
sign-topic-main/examples/speech_to_speech/benchmarking/get_metrics.py
import copy import torch import logging from argparse import Namespace import yaml from fairseq import options from examples.speech_to_speech.benchmarking.core import ( Processing, SpeechGeneration, Cascaded2StageS2ST, Cascaded3StageS2ST, S2UT, ) from examples.speech_to_speech.benchmarking.data_utils import ( load_dataset_npy, load_dataset_raw_to_waveforms, ) logging.basicConfig() logging.root.setLevel(logging.INFO) logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) torch.manual_seed(1) torch.set_deterministic(True) def make_parser(): """Note: As the names indicate use s2x_args(ex:ST, ASR etc) for models with speech input, x2s_args for models with speech output(ex:TTS) and mt_args for translation models (ex: mt, T2U etc). For direct S2ST models, use x2s_args to provide model details. """ parser = options.get_speech_generation_parser() parser.add_argument("--target-is-code", action="store_true", default=False) parser.add_argument("--config", type=str) parser.add_argument( "--model-type", default="S2U", choices=["S2S", "TTS", "S2UT", "MT", "S2T", "2StageS2ST", "3StageS2ST"], help="Choose one of the models. For model inference implementation, refer to core.py", ) parser.add_argument( "--dataset-path", type=str, help="""File to load dataset from. Assumes dataset is a list of samples. Each sample is a dict of format {'net_input':{'src_tokens':torch.tenor(),'src_lengths':torch.tensor()}}""", ) parser.add_argument( "--dataset-type", type=str, default="npy", choices=["npy", "raw"], help="""Type of input dataset file""", ) parser.add_argument( "--read-using-sf", type=str, default=False, help="""If sound file should be used to read the raw dataset""", ) parser.add_argument( "--dataset-size", default=None, type=int, help="Dataset size to use for benchmarking", ) parser.add_argument( "--dump-speech-waveforms-dir", default=None, type=str, help="Directory to dump the speech waveforms computed on the dataset.", ) parser.add_argument( "--dump-waveform-file-prefix", default="", type=str, help="File name prefix for the saved speech waveforms", ) parser.add_argument( "--feat-dim", default=80, type=int, help="Input feature dimension" ) parser.add_argument( "--target-sr", default=16000, type=int, help="Target sample rate for dumping waveforms", ) options.add_generation_args(parser) options.get_interactive_generation_parser(parser) return parser def cli_main(): parser = make_parser() args = options.parse_args_and_arch(parser) with open( args.config, "r", ) as f: config = yaml.load(f, Loader=yaml.FullLoader) dict_args = vars(args) dict_args.update(config["general"]) args = Namespace(**dict_args) i = 1 stage_args = [] while i <= 3: var = f"stage{i}" tmp_args = copy.deepcopy(dict_args) if var in config: tmp_args.update(config[var]) stage_args.append(Namespace(**tmp_args)) i += 1 else: break if args.model_type == "S2S" or args.model_type == "TTS": model = SpeechGeneration(stage_args[0]) elif args.model_type == "S2UT": model = S2UT(stage_args[0], stage_args[1] if len(stage_args) > 1 else None) elif args.model_type == "MT" or args.model_type == "S2T": model = Processing(stage_args[0]) elif args.model_type == "2StageS2ST": model = Cascaded2StageS2ST(stage_args[0], stage_args[1]) elif args.model_type == "3StageS2ST": model = Cascaded3StageS2ST(stage_args[0], stage_args[2], stage_args[1]) else: raise Exception(f"Currently unsupported model type {args.model_type}") print(f"Evaluating on dataset - {args.dataset_path}\n") if args.dataset_type == "npy": dataset = load_dataset_npy(args.dataset_path, dataset_size=args.dataset_size) elif args.dataset_type == "raw": dataset = load_dataset_raw_to_waveforms( args.dataset_path, dataset_size=args.dataset_size, read_using_soundfile=args.read_using_sf, ) else: raise Exception(f"Invalid dataset type {args.dataset_type}") model.warm_up(sample=dataset[0], repeat=2) run_time, memory, flops = model.gather_all_metrics(dataset, repeat=1) print(f"run_time = {run_time}sec \tmemory = {memory}MiB \tflops = {flops}") if args.dump_speech_waveforms_dir: model.dump_final_speech_output( dataset, args.dump_speech_waveforms_dir, lambda x: x, args.target_sr, prefix=args.dump_waveform_file_prefix, ) if __name__ == "__main__": cli_main()
5,053
30.006135
115
py
sign-topic
sign-topic-main/examples/speech_to_speech/preprocessing/prep_s2spect_data.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import logging import os from pathlib import Path import shutil import torchaudio import soundfile as sf from tqdm import tqdm import pandas as pd from examples.speech_synthesis.data_utils import extract_logmel_spectrogram from examples.speech_to_speech.preprocessing.data_utils import gen_config_yaml from examples.speech_to_text.data_utils import create_zip, get_zip_manifest, save_df_to_tsv from fairseq.data.audio.audio_utils import convert_waveform logger = logging.getLogger(__name__) MANIFEST_COLUMNS = ["id", "src_audio", "src_n_frames", "tgt_audio", "tgt_n_frames"] def prepare_target_data(args, tgt_audios): feature_name = "logmelspec80" zip_path = args.output_root / f"{feature_name}.zip" if zip_path.exists(): print(f"{zip_path} exists.") return zip_path feature_root = args.output_root / feature_name feature_root.mkdir(exist_ok=True) print("Extracting Mel spectrogram features...") for tgt_audio in tqdm(tgt_audios): sample_id = tgt_audio.stem waveform, sample_rate = torchaudio.load(tgt_audio.as_posix()) waveform, sample_rate = convert_waveform( waveform, sample_rate, normalize_volume=args.normalize_volume, to_sample_rate=args.sample_rate ) extract_logmel_spectrogram( waveform, sample_rate, feature_root / f"{sample_id}.npy", win_length=args.win_length, hop_length=args.hop_length, n_fft=args.n_fft, n_mels=args.n_mels, f_min=args.f_min, f_max=args.f_max ) print("ZIPing features...") create_zip(feature_root, zip_path) shutil.rmtree(feature_root) return zip_path def process(args): os.makedirs(args.output_root, exist_ok=True) manifest = {} tgt_audios = [] for split in args.data_split: print(f"Processing {split}...") manifest[split] = {c: [] for c in MANIFEST_COLUMNS} missing_tgt_audios = [] src_audios = list(args.source_dir.glob(f"{split}/*.wav")) for src_audio in tqdm(src_audios): sample_id = src_audio.stem tgt_audio = args.target_dir / split / f"{sample_id}.wav" if not tgt_audio.is_file(): missing_tgt_audios.append(sample_id) continue tgt_audios.append(tgt_audio) src_n_frames = sf.info(src_audio.as_posix()).frames manifest[split]["id"].append(sample_id) manifest[split]["src_audio"].append(src_audio.as_posix()) manifest[split]["src_n_frames"].append( src_n_frames // 160 ) # estimation of 10-ms frame for 16kHz audio print(f"Processed {len(manifest[split]['id'])} samples") if len(missing_tgt_audios) > 0: print( f"{len(missing_tgt_audios)} with missing target data (first 3 examples: {', '.join(missing_tgt_audios[:3])})" ) # Extract features and pack features into ZIP zip_path = prepare_target_data(args, tgt_audios) print("Fetching ZIP manifest...") tgt_audio_paths, tgt_audio_lengths = get_zip_manifest(zip_path) print("Generating manifest...") for split in args.data_split: print(f"Processing {split}...") for sample_id in tqdm(manifest[split]["id"]): manifest[split]["tgt_audio"].append(tgt_audio_paths[sample_id]) manifest[split]["tgt_n_frames"].append(tgt_audio_lengths[sample_id]) out_manifest = args.output_root / f"{split}.tsv" print(f"Writing manifest to {out_manifest}...") save_df_to_tsv(pd.DataFrame.from_dict(manifest[split]), out_manifest) # Generate config YAML win_len_t = args.win_length / args.sample_rate hop_len_t = args.hop_length / args.sample_rate extra = { "features": { "type": "spectrogram+melscale+log", "sample_rate": args.sample_rate, "eps": 1e-5, "n_mels": args.n_mels, "n_fft": args.n_fft, "window_fn": "hann", "win_length": args.win_length, "hop_length": args.hop_length, "win_len_t": win_len_t, "hop_len_t": hop_len_t, "f_min": args.f_min, "f_max": args.f_max, "n_stft": args.n_fft // 2 + 1 } } gen_config_yaml( args.output_root, audio_root=args.output_root.as_posix(), specaugment_policy="lb", feature_transform=["utterance_cmvn", "delta_deltas"], extra=extra, ) def main(): parser = argparse.ArgumentParser() parser.add_argument( "--source-dir", required=True, type=Path, help="source audio directory" ) parser.add_argument( "--target-dir", required=True, type=Path, help="target audio directory" ) parser.add_argument( "--data-split", default=["train", "valid", "test"], nargs="+", help="data split names", ) parser.add_argument( "--output-root", required=True, type=Path, help="output directory" ) # target feature related parser.add_argument("--win-length", type=int, default=1024) parser.add_argument("--hop-length", type=int, default=256) parser.add_argument("--n-fft", type=int, default=1024) parser.add_argument("--n-mels", type=int, default=80) parser.add_argument("--f-min", type=int, default=20) parser.add_argument("--f-max", type=int, default=8000) parser.add_argument("--sample-rate", type=int, default=22050) parser.add_argument("--normalize-volume", "-n", action="store_true") args = parser.parse_args() process(args) if __name__ == "__main__": main()
5,844
33.382353
125
py
sign-topic
sign-topic-main/examples/bart/summarize.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch from fairseq.models.bart import BARTModel import argparse XSUM_KWARGS = dict(beam=6, lenpen=1.0, max_len_b=60, min_len=10, no_repeat_ngram_size=3) CNN_KWARGS = dict(beam=4, lenpen=2.0, max_len_b=140, min_len=55, no_repeat_ngram_size=3) @torch.no_grad() def generate(bart, infile, outfile="bart_hypo.txt", bsz=32, n_obs=None, **eval_kwargs): count = 1 # if n_obs is not None: bsz = min(bsz, n_obs) with open(infile) as source, open(outfile, "w") as fout: sline = source.readline().strip() slines = [sline] for sline in source: if n_obs is not None and count > n_obs: break if count % bsz == 0: hypotheses_batch = bart.sample(slines, **eval_kwargs) for hypothesis in hypotheses_batch: fout.write(hypothesis + "\n") fout.flush() slines = [] slines.append(sline.strip()) count += 1 if slines != []: hypotheses_batch = bart.sample(slines, **eval_kwargs) for hypothesis in hypotheses_batch: fout.write(hypothesis + "\n") fout.flush() def main(): """ Usage:: python examples/bart/summarize.py \ --model-dir $HOME/bart.large.cnn \ --model-file model.pt \ --src $HOME/data-bin/cnn_dm/test.source """ parser = argparse.ArgumentParser() parser.add_argument( "--model-dir", required=True, type=str, default="bart.large.cnn/", help="path containing model file and src_dict.txt", ) parser.add_argument( "--model-file", default="checkpoint_best.pt", help="where in model_dir are weights saved", ) parser.add_argument( "--src", default="test.source", help="text to summarize", type=str ) parser.add_argument( "--out", default="test.hypo", help="where to save summaries", type=str ) parser.add_argument("--bsz", default=32, help="where to save summaries", type=int) parser.add_argument( "--n", default=None, help="how many examples to summarize", type=int ) parser.add_argument( "--xsum-kwargs", action="store_true", default=False, help="if true use XSUM_KWARGS else CNN_KWARGS", ) args = parser.parse_args() eval_kwargs = XSUM_KWARGS if args.xsum_kwargs else CNN_KWARGS if args.model_dir == "pytorch/fairseq": bart = torch.hub.load("pytorch/fairseq", args.model_file) else: bart = BARTModel.from_pretrained( args.model_dir, checkpoint_file=args.model_file, data_name_or_path=args.model_dir, ) bart = bart.eval() if torch.cuda.is_available(): bart = bart.cuda().half() generate( bart, args.src, bsz=args.bsz, n_obs=args.n, outfile=args.out, **eval_kwargs ) if __name__ == "__main__": main()
3,174
30.435644
88
py
sign-topic
sign-topic-main/examples/adaptive_span/adaptive_span_attention.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math import torch import torch.nn as nn import torch.nn.functional as F class AdaptiveMask(nn.Module): """Soft masking function for adaptive size. It masks out the last K values of an input. The masking value goes from 1 to 0 gradually, so K can be learned with back-propagation. Args: max_size: maximum size (i.e. input dimension) ramp_size: size of the ramp going from 0 to 1 init_val: initial size proportion not to be masked out shape: learn multiple sizes independent of each other """ def __init__(self, max_size, ramp_size, init_val=0, shape=(1,)): nn.Module.__init__(self) self._max_size = max_size self._ramp_size = ramp_size self.current_val = nn.Parameter(torch.zeros(*shape) + init_val) mask_template = torch.linspace(1 - max_size, 0, steps=max_size) self.register_buffer("mask_template", mask_template) def forward(self, x): mask = self.mask_template.float() + self.current_val.float() * self._max_size mask = mask / self._ramp_size + 1 mask = mask.clamp(0, 1) if x.size(-1) < self._max_size: # the input could have been trimmed beforehand to save computation mask = mask.narrow(-1, self._max_size - x.size(-1), x.size(-1)) x = (x * mask).type_as(x) return x def get_current_max_size(self, include_ramp=True): current_size = math.ceil(self.current_val.max().item() * self._max_size) if include_ramp: current_size += self._ramp_size current_size = max(0, min(self._max_size, current_size)) return current_size def get_current_avg_size(self, include_ramp=True): current_size = math.ceil( self.current_val.float().mean().item() * self._max_size ) if include_ramp: current_size += self._ramp_size current_size = max(0, min(self._max_size, current_size)) return current_size def clamp_param(self): """this need to be called after each update""" self.current_val.data.clamp_(0, 1) class AdaptiveSpan(nn.Module): """Adaptive attention span for Transformerself. This module learns an attention span length from data for each self-attention head. Args: attn_span: maximum attention span adapt_span_loss: loss coefficient for the span length adapt_span_ramp: length of the masking ramp adapt_span_init: initial size ratio adapt_span_cache: adapt cache size to reduce memory usage """ def __init__( self, attn_span, adapt_span_ramp, adapt_span_init, n_head, adapt_span_layer, **kargs ): nn.Module.__init__(self) self._max_span = attn_span self._n_head = n_head self._adapt_span_layer = adapt_span_layer if self._adapt_span_layer: self._mask = AdaptiveMask( max_size=self._max_span, ramp_size=adapt_span_ramp, init_val=adapt_span_init, ) else: self._mask = AdaptiveMask( max_size=self._max_span, ramp_size=adapt_span_ramp, init_val=adapt_span_init, shape=(n_head, 1, 1), ) def forward(self, attn, normalize=True): """mask attention with the right span""" # batch and head dimensions are merged together, so separate them first self.clamp_param() if self._adapt_span_layer: attn = self._mask(attn) else: B = attn.size(0) # batch size M = attn.size(1) # block size attn = attn.reshape(B // self._n_head, self._n_head, M, -1) attn = self._mask(attn) attn = attn.view(B, M, -1) return attn def get_trim_len(self): """how much of memory can be trimmed to reduce computation""" L = self._max_span trim_len = min(L - 1, L - self._mask.get_current_max_size()) # too fine granularity might be bad for the memory management trim_len = math.floor(trim_len / 64) * 64 return trim_len def trim_memory(self, query, key, value, key_pe): """trim out unnecessary memory beforehand to reduce computation""" trim_len = self.get_trim_len() cache_size = key.size(1) - query.size(1) trim_len_cache = trim_len - (self._max_span - cache_size) if trim_len_cache > 0: key = key[:, trim_len_cache:, :] value = value[:, trim_len_cache:, :] elif trim_len_cache < 0: # cache is too short! this happens when validation resumes # after a lot of updates. key = F.pad(key, [0, 0, -trim_len_cache, 0]) value = F.pad(value, [0, 0, -trim_len_cache, 0]) if trim_len > 0: if key_pe is not None: key_pe = key_pe[:, :, trim_len:] return key, value, key_pe def get_cache_size(self): """determine how long the cache should be""" trim_len = self.get_trim_len() # give a buffer of 64 steps since a span might increase # in future updates return min(self._max_span, self._max_span - trim_len + 64) def get_loss(self): """a loss term for regularizing the span length""" return self._max_span * self._mask.current_val.float().mean() def get_current_max_span(self): return self._mask.get_current_max_size() def get_current_avg_span(self): return self._mask.get_current_avg_size() def clamp_param(self): self._mask.clamp_param()
5,881
35.534161
85
py
sign-topic
sign-topic-main/examples/adaptive_span/adagrad_with_grad_clip.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from torch.optim import Adagrad from fairseq.optim import LegacyFairseqOptimizer, register_optimizer @register_optimizer("adagrad_with_grad_clip") class FairseqAdagradWithGradClip(LegacyFairseqOptimizer): def __init__(self, args, params): super().__init__(args) self._optimizer = AdagradWithGradClip(params, **self.optimizer_config) @staticmethod def add_args(parser): """Add optimizer-specific arguments to the parser.""" # fmt: off parser.add_argument('--weight-decay', '--wd', default=0.0, type=float, metavar='WD', help='weight decay') parser.add_argument('--adagrad-clip', default=0.0, type=float, metavar='D', help='internal grad clip') # fmt: on @property def optimizer_config(self): """ Return a kwarg dictionary that will be used to override optimizer args stored in checkpoints. This allows us to load a checkpoint and resume training using a different set of optimizer args, e.g., with a different learning rate. """ return { "lr": self.args.lr[0], "weight_decay": self.args.weight_decay, "grad_clip": self.args.adagrad_clip, } @property def supports_flat_params(self): return False def _clip_grad(clr, grad, group_grad_clip): if group_grad_clip > 0: norm = grad.norm(2).item() if norm > group_grad_clip: clr *= group_grad_clip / (norm + 1e-10) return clr class AdagradWithGradClip(Adagrad): """Adagrad algorithm with custom gradient clipping""" def __init__( self, params, lr=1e-2, lr_decay=0, weight_decay=0, initial_accumulator_value=0, grad_clip=0, ): Adagrad.__init__( self, params, lr=lr, lr_decay=lr_decay, weight_decay=weight_decay, initial_accumulator_value=initial_accumulator_value, ) self.defaults["grad_clip"] = grad_clip self.param_groups[0].setdefault("grad_clip", grad_clip) def step(self, closure=None): loss = None if closure is not None: loss = closure() for group in self.param_groups: for p in group["params"]: if p.grad is None: continue grad = p.grad.data state = self.state[p] state["step"] += 1 if group["weight_decay"] != 0: if p.grad.data.is_sparse: raise RuntimeError( "weight_decay option is " "not compatible with sparse " "gradients" ) grad = grad.add(group["weight_decay"], p.data) clr = group["lr"] / (1 + (state["step"] - 1) * group["lr_decay"]) # clip clr = _clip_grad(clr=clr, grad=grad, group_grad_clip=group["grad_clip"]) if grad.is_sparse: # the update is non-linear so indices must be unique grad = grad.coalesce() grad_indices = grad._indices() grad_values = grad._values() size = grad.size() def make_sparse(values): constructor = grad.new if grad_indices.dim() == 0 or values.dim() == 0: return constructor().resize_as_(grad) return constructor(grad_indices, values, size) state["sum"].add_(make_sparse(grad_values.pow(2))) std = state["sum"]._sparse_mask(grad) std_values = std._values().sqrt_().add_(1e-10) p.data.add_(-clr, make_sparse(grad_values / std_values)) else: state["sum"].addcmul_(1, grad, grad) std = state["sum"].sqrt().add_(1e-10) p.data.addcdiv_(-clr, grad, std) return loss
4,374
32.914729
92
py
sign-topic
sign-topic-main/examples/adaptive_span/adaptive_span_model.py
# Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. import math import torch import torch.nn as nn import torch.nn.functional as F from fairseq.modules.layer_norm import LayerNorm from .adaptive_span_attention import AdaptiveSpan # Size notations: # B = batch_size, H = d_model, M = block_size, L = attn_span def _skew(X, pad_value): """shift every row 1 step to right""" # X = B x M x L B, M, L = X.size() X = F.pad(X, (0, M + 1), value=pad_value) # B x M x (L+M+1) X = X.view(B, -1) # B x ML+MM+M X = X[:, :-M] # B x ML+MM X = X.view(B, M, M + L) # B x M x L+M return X def _unskew(X): """reverse _skew operation""" # X = B x M x L+M B, M, L = X.size() L -= M X = X.view(B, -1) # B x ML+MM X = F.pad(X, (0, M)) # B x ML+MM+M X = X.view(B, M, M + L + 1) # B x M x L+M+1 X = X[:, :, :L] # B x M x L return X class SeqAttention(nn.Module): """Sequential self-attention layer. Each token will attend to its previous fixed number of steps. Note that attention doesn't include the current step itself. """ def __init__(self, d_model, n_head, attn_span, dropout, adapt_span_layer, **kargs): nn.Module.__init__(self) self.dropout = nn.Dropout(dropout) self.d_model = d_model # size of a single head self.attn_span = attn_span self.adaptive_span = AdaptiveSpan( attn_span=attn_span, n_head=n_head, adapt_span_layer=adapt_span_layer, **kargs ) def forward(self, query, key, value, key_pe): # query size = B x M x H # key, value sizes = B x (M+L) x H key, value, key_pe = self.adaptive_span.trim_memory(query, key, value, key_pe) # compute attention from context # B x M (dest) x (M+L) (src) attn_cont = torch.matmul(query, key.transpose(-1, -2)) attn_cont = _unskew(attn_cont) # B x M x L # compute the effect of position embedding attn_pos = torch.matmul(query, key_pe) # B x M x L_pos attn = attn_cont + attn_pos attn = attn / math.sqrt(self.d_model) # B x M X L_pos attn = F.softmax(attn.float(), dim=-1).type_as(attn) # trim attention lengths according to the learned span attn = self.adaptive_span(attn) attn = self.dropout(attn) # B x M X L_pos attn_cont = _skew(attn, 0) # B x M X (L+M) out = torch.matmul(attn_cont, value) # B x M x H return out def get_cache_size(self): return self.adaptive_span.get_cache_size() class MultiHeadSeqAttention(nn.Module): def __init__(self, d_model, n_head, **kargs): nn.Module.__init__(self) assert d_model % n_head == 0 self.n_head = n_head self.head_dim = d_model // n_head self.attn = SeqAttention(d_model=self.head_dim, n_head=n_head, **kargs) self.proj_query = nn.Linear(d_model, d_model, bias=False) nn.init.xavier_normal_(self.proj_query.weight) self.proj_out = nn.Linear(d_model, d_model, bias=False) nn.init.xavier_normal_(self.proj_out.weight) self.proj_val = nn.Linear(d_model, d_model, bias=False) nn.init.xavier_normal_(self.proj_val.weight) self.proj_key = nn.Linear(d_model, d_model, bias=False) nn.init.xavier_normal_(self.proj_key.weight) def head_reshape(self, x): K = self.n_head D = self.head_dim x = x.view(x.size()[:-1] + (K, D)) # B x (M+L) x K x D x = x.transpose(1, 2).contiguous() # B x K x (M+L) x D x = x.view(-1, x.size(-2), x.size(-1)) # B_K x (M+L) x D return x def forward(self, query, key, value, key_pe): B = query.size(0) K = self.n_head D = self.head_dim M = query.size(1) query = self.proj_query(query) query = self.head_reshape(query) value = self.proj_val(value) value = self.head_reshape(value) key = self.proj_key(key) key = self.head_reshape(key) out = self.attn(query, key, value, key_pe) # B_K x M x D out = out.view(B, K, M, D) # B x K x M x D out = out.transpose(1, 2).contiguous() # B x M x K x D out = out.view(B, M, -1) # B x M x K_D out = self.proj_out(out) return out class FeedForwardLayer(nn.Module): def __init__(self, d_model, d_inner, dropout, **kargs): nn.Module.__init__(self) self.fc1 = nn.Linear(d_model, d_inner) self.fc2 = nn.Linear(d_inner, d_model) nn.init.xavier_uniform_(self.fc1.weight) nn.init.xavier_uniform_(self.fc2.weight) self.dropout = nn.Dropout(dropout) def forward(self, h): h1 = F.relu(self.fc1(h)) h1 = self.dropout(h1) h2 = self.fc2(h1) return h2 class TransformerSeqLayer(nn.Module): def __init__(self, d_model, **kargs): nn.Module.__init__(self) self.attn = MultiHeadSeqAttention(d_model=d_model, **kargs) self.norm1 = LayerNorm(d_model) self.ff = FeedForwardLayer(d_model=d_model, **kargs) self.norm2 = LayerNorm(d_model) def forward(self, h, h_cache, key_pe): # h = B x M x H # h_cache = B x L x H h_all = torch.cat([h_cache, h], dim=1) # B x (M+L) x H attn_out = self.attn(h, h_all, h_all, key_pe) h = self.norm1(h + attn_out) # B x M x H if self.ff is not None: ff_out = self.ff(h) out = self.norm2(h + ff_out) # B x M x H else: out = h return out def get_cache_size(self): return self.attn.attn.get_cache_size() class TransformerSeq(nn.Module): def __init__( self, vocab_size, d_model, n_head, n_layer, attn_span, emb_dropout, aux_loss_scaler, adapt_span_layer, **kargs ): nn.Module.__init__(self) # token embeddings self.in_emb = nn.Embedding(vocab_size, d_model) nn.init.normal_(self.in_emb.weight, mean=0, std=d_model ** -0.5) self.out_emb = nn.Linear(d_model, vocab_size) self.aux_loss_scaler = aux_loss_scaler if emb_dropout > 0: self.emb_dropout = nn.Dropout(emb_dropout) else: self.emb_dropout = None # position embeddings self.key_pe = nn.Parameter(torch.randn(1, d_model // n_head, attn_span)) self.layers = nn.ModuleList() self.layers.extend( TransformerSeqLayer( d_model=d_model, n_head=n_head, attn_span=attn_span, adapt_span_layer=adapt_span_layer, **kargs ) for _ in range(n_layer) ) def forward(self, x, h_cache, target=None): # x size = B x M block_size = x.size(1) h = self.in_emb(x) # B x M x H if self.emb_dropout is not None: h = self.emb_dropout(h) h_cache_next = [] for l, layer in enumerate(self.layers): cache_size = layer.attn.attn.get_cache_size() if cache_size > block_size: h_cache_next_l = torch.cat( [h_cache[l][:, -cache_size + block_size :, :], h], dim=1 ).detach() else: h_cache_next_l = h[:, -cache_size:, :].detach() h_cache_next.append(h_cache_next_l) h = layer(h, h_cache[l], self.key_pe) # B x M x H if self.emb_dropout is not None: h = self.emb_dropout(h) out = F.log_softmax(self.out_emb(h).float(), dim=-1).type_as(h) dummy_loss = None return out, h_cache_next, dummy_loss def get_aux_loss(self): loss = 0.0 for layer in self.layers: loss += layer.attn.attn.adaptive_span.get_loss() return self.aux_loss_scaler * loss def get_current_max_span(self): max_span = 0.0 for layer in self.layers: max_span = max( max_span, layer.attn.attn.adaptive_span.get_current_max_span() ) return max_span def get_current_avg_span(self): avg_span = 0.0 for layer in self.layers: avg_span += layer.attn.attn.adaptive_span.get_current_avg_span() return avg_span / len(self.layers)
8,540
31.352273
87
py
sign-topic
sign-topic-main/examples/adaptive_span/adaptive_span_loss.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math from dataclasses import dataclass import torch.nn.functional as F from fairseq import metrics, utils from fairseq.criterions import register_criterion from fairseq.criterions.cross_entropy import CrossEntropyCriterion from fairseq.dataclass import FairseqDataclass from omegaconf import II @dataclass class AdaptiveSpanCriterionConfig(FairseqDataclass): sentence_avg: bool = II("optimization.sentence_avg") @register_criterion("adaptive_span_loss", dataclass=AdaptiveSpanCriterionConfig) class AdaptiveSpanCriterion(CrossEntropyCriterion): def __init__(self, task, sentence_avg): super().__init__(task, sentence_avg) def forward(self, model, sample, reduce=True): """Compute the loss for the given sample. Returns a tuple with three elements: 1) the loss here is summed, different from the adaptive span code 2) the sample size, which is used as the denominator for the gradient 3) logging outputs to display while training """ net_output = model(**sample["net_input"]) loss, aux_loss, avg_span, max_span = self.compute_loss( model, net_output, sample, reduce=reduce ) sample_size = ( sample["target"].size(0) if self.sentence_avg else sample["ntokens"] ) loss /= sample_size total_loss = loss + aux_loss sample_size = 1 logging_output = { "loss": loss.data, "ntokens": sample["ntokens"], "nsentences": sample["target"].size(0), "sample_size": sample_size, "total_loss": total_loss.data, "avg_span": avg_span * sample_size, "max_span": max_span * sample_size, } return total_loss, sample_size, logging_output def compute_loss(self, model, net_output, sample, reduce=True): loss, _ = super().compute_loss(model, net_output, sample, reduce) aux_loss = model.get_aux_loss() avg_span = model.get_current_avg_span() max_span = model.get_current_max_span() return loss, aux_loss, avg_span, max_span @staticmethod def reduce_metrics(logging_outputs) -> None: """Aggregate logging outputs from data parallel training.""" loss_sum = sum(log.get("loss", 0) for log in logging_outputs) ntokens = sum(log.get("ntokens", 0) for log in logging_outputs) sample_size = sum(log.get("sample_size", 0) for log in logging_outputs) total_loss_sum = sum(log.get("total_loss", 0) for log in logging_outputs) avg_span_sum = sum(log.get("avg_span", 0) for log in logging_outputs) max_span_sum = sum(log.get("max_span", 0) for log in logging_outputs) # we divide by log(2) to convert the loss from base e to base 2 metrics.log_scalar( "loss", loss_sum / sample_size / math.log(2), sample_size, round=3 ) metrics.log_scalar("avg_span", avg_span_sum / sample_size, sample_size, round=3) metrics.log_scalar("max_span", max_span_sum / sample_size, sample_size, round=3) # total loss contains the L1 norm on adaptive-span metrics.log_scalar( "total_loss", total_loss_sum / sample_size / math.log(2), sample_size, round=3, ) if sample_size != ntokens: metrics.log_scalar( "nll_loss", loss_sum / ntokens / math.log(2), ntokens, round=3 ) metrics.log_derived( "ppl", lambda meters: utils.get_perplexity(meters["nll_loss"].avg) ) else: metrics.log_derived( "ppl", lambda meters: utils.get_perplexity(meters["loss"].avg) ) @staticmethod def logging_outputs_can_be_summed() -> bool: """ Whether the logging outputs returned by `forward` can be summed across workers prior to calling `reduce_metrics`. Setting this to True will improves distributed training speed. """ return True
4,233
38.570093
88
py
sign-topic
sign-topic-main/examples/adaptive_span/adaptive_span_model_wrapper.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging from dataclasses import dataclass from typing import Dict, List, Optional import torch from fairseq.dataclass import FairseqDataclass from fairseq.models import ( FairseqIncrementalDecoder, FairseqLanguageModel, register_model, ) from .adaptive_span_model import TransformerSeq as AdaptiveSpanTransformerModel logger = logging.getLogger(__name__) @dataclass class AdaptiveSpanSmallConfig(FairseqDataclass): # defaults come from https://github.com/facebookresearch/adaptive-span/blob/master/experiments/enwik8_small.sh vocab_size: int = 50 d_model: int = 256 n_head: int = 4 d_inner: int = 1024 n_layer: int = 8 attn_span: int = 1024 dropout: float = 0.0 emb_dropout: float = 0.0 adapt_span_ramp: int = 32 adapt_span_init: float = 0.0 aux_loss_scaler: float = 0.000002 adapt_span_layer: bool = False @register_model("adaptive_span", dataclass=AdaptiveSpanSmallConfig) class AdaptiveSpanTransformer(FairseqLanguageModel): @classmethod def build_model(cls, cfg: AdaptiveSpanSmallConfig, task): return cls(AdaptiveSpanDecoder(cfg, task)) def get_aux_loss(self): return self.decoder.get_aux_loss() def get_current_max_span(self): return self.decoder.get_current_max_span() def get_current_avg_span(self): return self.decoder.get_current_avg_span() class AdaptiveSpanDecoder(FairseqIncrementalDecoder): def __init__(self, cfg, task): super().__init__(task.target_dictionary) self.config = cfg config = AdaptiveSpanSmallConfig( vocab_size=len(task.target_dictionary), d_model=cfg.d_model, n_head=cfg.n_head, d_inner=cfg.d_inner, n_layer=cfg.n_layer, attn_span=cfg.attn_span, dropout=cfg.dropout, emb_dropout=cfg.emb_dropout, adapt_span_ramp=cfg.adapt_span_ramp, adapt_span_init=cfg.adapt_span_init, aux_loss_scaler=cfg.aux_loss_scaler, adapt_span_layer=cfg.adapt_span_layer, ) logger.info(config) self.model = AdaptiveSpanTransformerModel(**config.__dict__) self._mems = None def forward( self, src_tokens, incremental_state: Optional[Dict[str, List[torch.Tensor]]] = None, encoder_out=None, ): bsz = src_tokens.size(0) if incremental_state is not None: # used during inference mems = self.get_incremental_state("mems") src_tokens = src_tokens[:, -1:] # only keep the most recent token else: mems = self._mems if mems is None: # first time init mems = self.init_hid_cache(bsz) output = self.model(x=src_tokens, h_cache=mems,) if incremental_state is not None: self.set_incremental_state(incremental_state, "mems", output[1]) else: self._mems = output[1] return (output[0],) def max_positions(self): return self.config.attn_span def init_hid_cache(self, batch_sz): hid = [] for layer in self.model.layers: param = next(self.model.parameters()) h = torch.zeros( batch_sz, layer.get_cache_size(), self.config.d_model, dtype=param.dtype, device=param.device, ) hid.append(h) return hid def get_aux_loss(self): return self.model.get_aux_loss() def get_current_max_span(self): return self.model.get_current_max_span() def get_current_avg_span(self): return self.model.get_current_avg_span() def reorder_incremental_state( self, incremental_state: Dict[str, Dict[str, Optional[torch.Tensor]]], new_order: torch.Tensor, ): """Reorder incremental state. This will be called when the order of the input has changed from the previous time step. A typical use case is beam search, where the input order changes between time steps based on the selection of beams. """ raise NotImplementedError("This is required for generation/beam search") # mems = self.get_incremental_state(incremental_state, "mems") # if mems is not None: # new_mems = [mems_i.index_select(1, new_order) for mems_i in mems] # self.set_incremental_state(incremental_state, "mems", new_mems)
4,692
31.143836
114
py
sign-topic
sign-topic-main/examples/MMPT/setup.py
import setuptools with open("README.md", "r") as fh: long_description = fh.read() setuptools.setup( name="mmpt", version="0.0.1", author="Hu Xu, Po-yao Huang", author_email="huxu@fb.com", description="A package for multimodal pretraining.", long_description=long_description, long_description_content_type="text/markdown", url="https://github.com/pytorch/fairseq/examples/MMPT", packages=setuptools.find_packages(), install_requires=[ ], classifiers=[ "Programming Language :: Python :: 3", "License :: CC-BY-NC", "Operating System :: OS Independent", ], python_requires='>=3.6', )
668
25.76
59
py
sign-topic
sign-topic-main/examples/MMPT/mmpt_cli/predict.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import os import glob import argparse import pprint import omegaconf from omegaconf import OmegaConf from torch.utils.data import DataLoader from mmpt.utils import load_config, set_seed from mmpt.evaluators import Evaluator from mmpt.evaluators import predictor as predictor_path from mmpt.tasks import Task from mmpt import processors from mmpt.datasets import MMDataset def get_dataloader(config): meta_processor_cls = getattr(processors, config.dataset.meta_processor) video_processor_cls = getattr(processors, config.dataset.video_processor) text_processor_cls = getattr(processors, config.dataset.text_processor) aligner_cls = getattr(processors, config.dataset.aligner) meta_processor = meta_processor_cls(config.dataset) video_processor = video_processor_cls(config.dataset) text_processor = text_processor_cls(config.dataset) aligner = aligner_cls(config.dataset) test_data = MMDataset( meta_processor, video_processor, text_processor, aligner, ) print("test_len", len(test_data)) output = test_data[0] test_data.print_example(output) test_dataloader = DataLoader( test_data, batch_size=config.fairseq.dataset.batch_size, shuffle=False, num_workers=6, collate_fn=test_data.collater, ) return test_dataloader def main(args): config = load_config(args) if isinstance(config, omegaconf.dictconfig.DictConfig): print(OmegaConf.to_yaml(config)) else: pp = pprint.PrettyPrinter(indent=4) pp.print(config) mmtask = Task.config_task(config) mmtask.build_model() test_dataloader = get_dataloader(config) checkpoint_search_path = os.path.dirname(config.eval.save_path) results = [] prefix = os.path.basename(args.taskconfig) if prefix.startswith("test"): # loop all checkpoint for datasets without validation set. if "best" not in config.fairseq.common_eval.path: print("eval each epoch.") for checkpoint in glob.glob(checkpoint_search_path + "/checkpoint*"): model = mmtask.load_checkpoint(checkpoint) ckpt = os.path.basename(checkpoint) evaluator = Evaluator(config) output = evaluator.evaluate( model, test_dataloader, ckpt + "_merged") results.append((checkpoint, output)) # use the one specified by the config lastly. model = mmtask.load_checkpoint(config.fairseq.common_eval.path) evaluator = Evaluator(config) output = evaluator.evaluate(model, test_dataloader) results.append((config.fairseq.common_eval.path, output)) best_result = None best_metric = 0. for checkpoint, result in results: print(checkpoint) evaluator.metric.print_computed_metrics(result) best_score = evaluator.metric.best_metric(result) if best_score > best_metric: best_result = (checkpoint, result) best_metric = best_score print("best results:") print(best_result[0]) evaluator.metric.print_computed_metrics(best_result[1]) elif prefix.startswith("vis"): model = mmtask.load_checkpoint(config.fairseq.common_eval.path) predictor_cls = getattr(predictor_path, config.predictor) predictor = predictor_cls(config) predictor.predict_loop(model, test_dataloader, mmtask, None) else: raise ValueError("unknown prefix of the config file", args.taskconfig) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("taskconfig", type=str) args = parser.parse_args() main(args)
3,937
33.54386
81
py
sign-topic
sign-topic-main/examples/MMPT/mmpt/modules/mm.py
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Copyright (c) Facebook, Inc. All Rights Reserved import torch from torch import nn try: from transformers.modeling_bert import ( BertEmbeddings, ACT2FN, ) except ImportError: pass class VideoTokenMLP(nn.Module): def __init__(self, config): super().__init__() input_dim = config.input_dim if hasattr(config, "input_dim") else 512 self.linear1 = nn.Linear(input_dim, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size) self.activation = ACT2FN[config.hidden_act] self.linear2 = nn.Linear(config.hidden_size, config.hidden_size) def forward(self, hidden_states): hidden_states = self.linear1(hidden_states) hidden_states = self.activation(hidden_states) hidden_states = self.LayerNorm(hidden_states) hidden_states = self.linear2(hidden_states) return hidden_states class MMBertEmbeddings(BertEmbeddings): def __init__(self, config): super().__init__(config) self.max_video_len = config.max_video_len if hasattr(config, "use_seg_emb") and config.use_seg_emb: """the original VLM paper uses seg_embeddings for temporal space. although not used it changed the randomness of initialization. we keep it for reproducibility. """ self.seg_embeddings = nn.Embedding(256, config.hidden_size) def forward( self, input_ids, input_video_embeds, token_type_ids=None, position_ids=None, inputs_embeds=None, ): input_tensor = input_ids if input_ids is not None else inputs_embeds if input_video_embeds is not None: input_shape = ( input_tensor.size(0), input_tensor.size(1) + input_video_embeds.size(1), ) else: input_shape = (input_tensor.size(0), input_tensor.size(1)) if position_ids is None: """ Auto skip position embeddings for text only case. use cases: (1) action localization and segmentation: feed in len-1 dummy video token needs text part to skip input_video_embeds.size(1) for the right position_ids for video [SEP] and rest text tokens. (2) MMFusionShare for two forward passings: in `forward_text`: input_video_embeds is None. need to skip video [SEP] token. # video_len + 1: [CLS] + video_embed # self.max_video_len + 1: [SEP] for video. # self.max_video_len + 2: [SEP] for video. # self.max_video_len + input_ids.size(1): rest for text. """ if input_video_embeds is not None: video_len = input_video_embeds.size(1) starting_offset = self.max_video_len + 1 # video [SEP] ending_offset = self.max_video_len + input_ids.size(1) else: video_len = 0 starting_offset = self.max_video_len + 2 # first text token. ending_offset = self.max_video_len + input_ids.size(1) + 1 position_ids = torch.cat([ self.position_ids[:, :video_len + 1], self.position_ids[:, starting_offset:ending_offset] ], dim=1) if token_type_ids is None: token_type_ids = torch.zeros( input_shape, dtype=torch.long, device=self.position_ids.device ) """ the format of input_ids is [CLS] [SEP] caption [SEP] padding. the goal is to build [CLS] video tokens [SEP] caption [SEP] . """ if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) if input_video_embeds is not None: inputs_mm_embeds = torch.cat([ inputs_embeds[:, :1], input_video_embeds, inputs_embeds[:, 1:] ], dim=1) else: # text only for `MMFusionShare`. inputs_mm_embeds = inputs_embeds position_embeddings = self.position_embeddings(position_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_mm_embeds + position_embeddings embeddings += token_type_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings class AlignHead(nn.Module): """this will load pre-trained weights for NSP, which is desirable.""" def __init__(self, config): super().__init__() self.seq_relationship = nn.Linear(config.hidden_size, 2) def forward(self, dropout_pooled_output): logits = self.seq_relationship(dropout_pooled_output) return logits
5,537
36.931507
83
py
sign-topic
sign-topic-main/examples/MMPT/mmpt/modules/vectorpool.py
# Copyright (c) Facebook, Inc. All Rights Reserved import torch import os import numpy as np import pickle from . import retri from ..utils import get_local_rank class VectorPool(object): """ Base class of retrieval space. """ def __init__(self, config): from transformers import AutoConfig self.hidden_size = AutoConfig.from_pretrained( config.dataset.bert_name).hidden_size self.retriever_cls = getattr(retri, config.retriever_cls) def __call__(self, sample, **kwargs): raise NotImplementedError def build_retriver( self, retriever_cls=None, hidden_size=None, centroids=512, db_type="flatl2", examples_per_cent_to_train=48 ): """merge results from multiple gpus and return a retriver..""" self.retriver = retriever_cls( hidden_size, centroids, db_type, examples_per_cent_to_train) return self.retriver def __repr__(self): if hasattr(self, "retriver"): retriver_name = str(len(self.retriver)) else: retriver_name = "no retriver field yet" return self.__class__.__name__ \ + "(" + retriver_name + ")" class VideoVectorPool(VectorPool): """ average clips of a video as video representation. """ def __init__(self, config): super().__init__(config) self.build_retriver(self.retriever_cls, self.hidden_size) def __call__(self, sample, subsampling, **kwargs): hidden_states = ( sample["pooled_video"] + sample["pooled_text"]) / 2. hidden_states = hidden_states.view( -1, subsampling, hidden_states.size(-1)) hidden_states = torch.mean(hidden_states, dim=1) hidden_states = hidden_states.cpu().detach().numpy() video_ids = [] for offset_idx, video_id in enumerate(sample["video_id"]): if isinstance(video_id, tuple) and len(video_id) == 3: # a sharded video_id. video_id = video_id[0] video_ids.append(video_id) assert len(video_ids) == len(hidden_states) self.retriver.add( hidden_states.astype("float32"), video_ids ) class DistributedVectorPool(VectorPool): """ support sync of multiple gpus/nodes. """ def __init__(self, config): super().__init__(config) self.out_dir = os.path.join( config.fairseq.checkpoint.save_dir, "retri") os.makedirs(self.out_dir, exist_ok=True) self.hidden_states = [] self.video_ids = [] def build_retriver( self, retriever_cls=None, hidden_size=None, centroids=4096, db_type="flatl2", examples_per_cent_to_train=48 ): if retriever_cls is None: retriever_cls = self.retriever_cls if hidden_size is None: hidden_size = self.hidden_size """merge results from multiple gpus and return a retriver..""" if torch.distributed.is_initialized(): self.save() # sync saving. torch.distributed.barrier() world_size = torch.distributed.get_world_size() else: world_size = 1 self.retriver = retriever_cls( hidden_size, centroids, db_type, examples_per_cent_to_train) # each gpu process has its own retriever. for local_rank in range(world_size): if get_local_rank() == 0: print("load local_rank", local_rank) hidden_states, video_ids = self.load(local_rank) hidden_states = hidden_states.astype("float32") self.retriver.add(hidden_states, video_ids) return self.retriver def load(self, local_rank): hidden_states = np.load( os.path.join( self.out_dir, "hidden_state" + str(local_rank) + ".npy" ) ) with open( os.path.join( self.out_dir, "video_id" + str(local_rank) + ".pkl"), "rb") as fr: video_ids = pickle.load(fr) return hidden_states, video_ids def save(self): hidden_states = np.vstack(self.hidden_states) assert len(hidden_states) == len(self.video_ids), "{}, {}".format( len(hidden_states), len(self.video_ids) ) local_rank = torch.distributed.get_rank() \ if torch.distributed.is_initialized() else 0 np.save( os.path.join( self.out_dir, "hidden_state" + str(local_rank) + ".npy"), hidden_states) with open( os.path.join( self.out_dir, "video_id" + str(local_rank) + ".pkl"), "wb") as fw: pickle.dump( self.video_ids, fw, protocol=pickle.HIGHEST_PROTOCOL ) class DistributedVideoVectorPool(DistributedVectorPool): """ average clips of a video as video representation. """ def __call__(self, sample, subsampling, **kwargs): hidden_states = ( sample["pooled_video"] + sample["pooled_text"]) / 2. hidden_states = hidden_states.view( -1, subsampling, hidden_states.size(-1)) hidden_states = torch.mean(hidden_states, dim=1) hidden_states = hidden_states.cpu().detach().numpy() video_ids = [] for offset_idx, video_id in enumerate(sample["video_id"]): if isinstance(video_id, tuple) and len(video_id) == 3: # a sharded video_id. video_id = video_id[0] video_ids.append(video_id) assert len(video_ids) == len(hidden_states) self.hidden_states.append(hidden_states) self.video_ids.extend(video_ids) # ------------ the following are deprecated -------------- class TextClipVectorPool(VectorPool): def __init__(self, config): from transformers import AutoConfig hidden_size = AutoConfig.from_pretrained( config.dataset.bert_name).hidden_size retriever_cls = getattr(retri, config.retriever_cls) self.build_retriver(retriever_cls, hidden_size) def __call__(self, sample, **kwargs): clip_meta = sample["clip_meta"].cpu() assert torch.all(torch.le(clip_meta[:, 4], clip_meta[:, 5])) text_meta = [tuple(item.tolist()) for item in clip_meta[:, 3:]] if hasattr(self, "retriver"): # build_retriver is called. self.retriver.add( sample["pooled_text"].cpu().numpy().astype("float32"), text_meta ) else: raise NotImplementedError class MMClipVectorPool(VectorPool): """ Multimodal Clip-level vector pool. """ def __init__(self, out_dir): """use hidden_states to store `(video, text)`.""" """use video_ids to store `(video_id, start, end)`.""" super().__init__(out_dir) def __call__(self, sample, **kwargs): pooled_video = sample["pooled_video"].cpu().unsqueeze(1).numpy() pooled_text = sample["pooled_text"].cpu().unsqueeze(1).numpy() self.hidden_states.append( np.concatenate([pooled_video, pooled_text], axis=1) ) video_starts = sample["video_start"].cpu() video_ends = sample["video_end"].cpu() assert torch.all(torch.le(video_starts, video_ends)) text_starts = sample["text_start"].cpu() text_ends = sample["text_end"].cpu() assert torch.all(torch.le(text_starts, text_ends)) subsample_size = sample["pooled_video"].size(0) // len(sample["video_id"]) video_ids = [video_id for video_id in sample["video_id"] for _ in range(subsample_size) ] for video_id, video_start, video_end, text_start, text_end in zip( video_ids, video_starts, video_ends, text_starts, text_ends): self.video_ids.append(( video_id, (int(video_start), int(video_end)), (int(text_start), int(text_end)) ))
8,278
32.518219
82
py
sign-topic
sign-topic-main/examples/MMPT/mmpt/models/transformermodel.py
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Copyright (c) Facebook, Inc. All Rights Reserved import torch from torch import nn try: from transformers.modeling_bert import ( BertPreTrainedModel, BertModel, BertEncoder, BertPredictionHeadTransform, ) except ImportError: pass from ..modules import VideoTokenMLP, MMBertEmbeddings # --------------- fine-tuning models --------------- class MMBertForJoint(BertPreTrainedModel): """A BertModel with isolated attention mask to separate modality.""" def __init__(self, config): super().__init__(config) self.videomlp = VideoTokenMLP(config) self.bert = MMBertModel(config) self.init_weights() def forward( self, input_ids=None, input_video_embeds=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, next_sentence_label=None, output_attentions=None, output_hidden_states=None, return_dict=None, separate_forward_split=None, ): return_dict = ( return_dict if return_dict is not None else self.config.use_return_dict ) video_tokens = self.videomlp(input_video_embeds) outputs = self.bert( input_ids, video_tokens, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, separate_forward_split=separate_forward_split, ) return outputs class MMBertForTokenClassification(BertPreTrainedModel): """A BertModel similar to MMJointUni, with extra wrapper layer to be fine-tuned from other pretrained MMFusion model.""" def __init__(self, config): super().__init__(config) self.videomlp = VideoTokenMLP(config) self.bert = MMBertModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) # TODO(huxu): 779 is the number of classes for COIN: move to config? self.classifier = nn.Linear(config.hidden_size, 779) self.init_weights() def forward( self, input_ids=None, input_video_embeds=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, next_sentence_label=None, output_attentions=None, output_hidden_states=None, return_dict=None, separate_forward_split=None, ): return_dict = ( return_dict if return_dict is not None else self.config.use_return_dict ) video_tokens = self.videomlp(input_video_embeds) outputs = self.bert( input_ids, video_tokens, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, separate_forward_split=separate_forward_split, ) return (self.classifier(outputs[0]),) # ------------ pre-training models ---------------- class MMBertForEncoder(BertPreTrainedModel): """A BertModel for Contrastive Learning.""" def __init__(self, config): super().__init__(config) self.videomlp = VideoTokenMLP(config) self.bert = MMBertModel(config) self.init_weights() def forward( self, input_ids=None, input_video_embeds=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): return_dict = ( return_dict if return_dict is not None else self.config.use_return_dict ) if input_video_embeds is not None: video_tokens = self.videomlp(input_video_embeds) else: video_tokens = None outputs = self.bert( input_ids, video_tokens, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) return outputs class MMBertForMFMMLM(BertPreTrainedModel): """A BertModel with shared prediction head on MFM-MLM.""" def __init__(self, config): super().__init__(config) self.videomlp = VideoTokenMLP(config) self.bert = MMBertModel(config) self.cls = MFMMLMHead(config) self.hidden_size = config.hidden_size self.init_weights() def get_output_embeddings(self): return self.cls.predictions.decoder def forward( self, input_ids=None, input_video_embeds=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, masked_frame_labels=None, target_video_hidden_states=None, non_masked_frame_mask=None, masked_lm_labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): return_dict = ( return_dict if return_dict is not None else self.config.use_return_dict ) if input_video_embeds is not None: video_tokens = self.videomlp(input_video_embeds) else: video_tokens = None if target_video_hidden_states is not None: target_video_hidden_states = self.videomlp( target_video_hidden_states) non_masked_frame_hidden_states = video_tokens.masked_select( non_masked_frame_mask.unsqueeze(-1) ).view(-1, self.hidden_size) outputs = self.bert( input_ids, video_tokens, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] mfm_scores, prediction_scores = None, None if masked_frame_labels is not None and masked_lm_labels is not None: # split the sequence. text_offset = masked_frame_labels.size(1) + 1 # [CLS] video_sequence_output = sequence_output[ :, 1:text_offset ] # remove [SEP] as not in video_label. text_sequence_output = torch.cat( [sequence_output[:, :1], sequence_output[:, text_offset:]], dim=1 ) hidden_size = video_sequence_output.size(-1) selected_video_output = video_sequence_output.masked_select( masked_frame_labels.unsqueeze(-1) ).view(-1, hidden_size) # only compute select tokens to training to speed up. hidden_size = text_sequence_output.size(-1) # masked_lm_labels = masked_lm_labels.reshape(-1) labels_mask = masked_lm_labels != -100 selected_text_output = text_sequence_output.masked_select( labels_mask.unsqueeze(-1) ).view(-1, hidden_size) mfm_scores, prediction_scores = self.cls( selected_video_output, target_video_hidden_states, non_masked_frame_hidden_states, selected_text_output, ) output = ( mfm_scores, prediction_scores, ) + outputs return output class BertMFMMLMPredictionHead(nn.Module): def __init__(self, config): super().__init__() self.transform = BertPredictionHeadTransform(config) # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. self.decoder = nn.Linear( config.hidden_size, config.vocab_size, bias=False) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) # Need a link between the two variables so that the bias is correctly # resized with `resize_token_embeddings` self.decoder.bias = self.bias def forward( self, video_hidden_states=None, target_video_hidden_states=None, non_masked_frame_hidden_states=None, text_hidden_states=None, ): video_logits, text_logits = None, None if video_hidden_states is not None: video_hidden_states = self.transform(video_hidden_states) non_masked_frame_logits = torch.mm( video_hidden_states, non_masked_frame_hidden_states.transpose(1, 0) ) masked_frame_logits = torch.bmm( video_hidden_states.unsqueeze(1), target_video_hidden_states.unsqueeze(-1), ).squeeze(-1) video_logits = torch.cat( [masked_frame_logits, non_masked_frame_logits], dim=1 ) if text_hidden_states is not None: text_hidden_states = self.transform(text_hidden_states) text_logits = self.decoder(text_hidden_states) return video_logits, text_logits class MFMMLMHead(nn.Module): def __init__(self, config): super().__init__() self.predictions = BertMFMMLMPredictionHead(config) def forward( self, video_hidden_states=None, target_video_hidden_states=None, non_masked_frame_hidden_states=None, text_hidden_states=None, ): video_logits, text_logits = self.predictions( video_hidden_states, target_video_hidden_states, non_masked_frame_hidden_states, text_hidden_states, ) return video_logits, text_logits class MMBertForMTM(MMBertForMFMMLM): def __init__(self, config): BertPreTrainedModel.__init__(self, config) self.videomlp = VideoTokenMLP(config) self.bert = MMBertModel(config) self.cls = MTMHead(config) self.hidden_size = config.hidden_size self.init_weights() class BertMTMPredictionHead(nn.Module): def __init__(self, config): super().__init__() self.transform = BertPredictionHeadTransform(config) self.decoder = nn.Linear( config.hidden_size, config.vocab_size, bias=False) def forward( self, video_hidden_states=None, target_video_hidden_states=None, non_masked_frame_hidden_states=None, text_hidden_states=None, ): non_masked_frame_hidden_states = non_masked_frame_hidden_states.transpose(1, 0) video_logits, text_logits = None, None if video_hidden_states is not None: video_hidden_states = self.transform(video_hidden_states) masked_frame_logits = torch.bmm( video_hidden_states.unsqueeze(1), target_video_hidden_states.unsqueeze(-1), ).squeeze(-1) non_masked_frame_logits = torch.mm( video_hidden_states, non_masked_frame_hidden_states ) video_on_vocab_logits = self.decoder(video_hidden_states) video_logits = torch.cat([ masked_frame_logits, non_masked_frame_logits, video_on_vocab_logits], dim=1) if text_hidden_states is not None: text_hidden_states = self.transform(text_hidden_states) # text first so label does not need to be shifted. text_on_vocab_logits = self.decoder(text_hidden_states) text_on_video_logits = torch.mm( text_hidden_states, non_masked_frame_hidden_states ) text_logits = torch.cat([ text_on_vocab_logits, text_on_video_logits ], dim=1) return video_logits, text_logits class MTMHead(nn.Module): def __init__(self, config): super().__init__() self.predictions = BertMTMPredictionHead(config) def forward( self, video_hidden_states=None, target_video_hidden_states=None, non_masked_frame_hidden_states=None, text_hidden_states=None, ): video_logits, text_logits = self.predictions( video_hidden_states, target_video_hidden_states, non_masked_frame_hidden_states, text_hidden_states, ) return video_logits, text_logits class MMBertModel(BertModel): """MMBertModel has MMBertEmbedding to support video tokens.""" def __init__(self, config, add_pooling_layer=True): super().__init__(config) # overwrite embedding self.embeddings = MMBertEmbeddings(config) self.encoder = MultiLayerAttentionMaskBertEncoder(config) self.init_weights() def forward( self, input_ids=None, input_video_embeds=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, encoder_hidden_states=None, encoder_attention_mask=None, output_attentions=None, output_hidden_states=None, return_dict=None, separate_forward_split=None, ): output_attentions = ( output_attentions if output_attentions is not None else self.config.output_attentions ) output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = ( return_dict if return_dict is not None else self.config.use_return_dict ) if input_ids is not None and inputs_embeds is not None: raise ValueError( "You cannot specify both input_ids " "and inputs_embeds at the same time" ) elif input_ids is not None: if input_video_embeds is not None: input_shape = ( input_ids.size(0), input_ids.size(1) + input_video_embeds.size(1), ) else: input_shape = ( input_ids.size(0), input_ids.size(1), ) elif inputs_embeds is not None: if input_video_embeds is not None: input_shape = ( inputs_embeds.size(0), inputs_embeds.size(1) + input_video_embeds.size(1), ) else: input_shape = ( input_ids.size(0), input_ids.size(1), ) else: raise ValueError( "You have to specify either input_ids or inputs_embeds") device = input_ids.device if input_ids is not None \ else inputs_embeds.device if attention_mask is None: attention_mask = torch.ones(input_shape, device=device) if token_type_ids is None: token_type_ids = torch.zeros( input_shape, dtype=torch.long, device=device) # We can provide a self-attention mask of dimensions # [batch_size, from_seq_length, to_seq_length] # ourselves in which case # we just need to make it broadcastable to all heads. extended_attention_mask: torch.Tensor = \ self.get_extended_attention_mask( attention_mask, input_shape, device) # If a 2D or 3D attention mask is provided for the cross-attention # we need to make broadcastable to # [batch_size, num_heads, seq_length, seq_length] if self.config.is_decoder and encoder_hidden_states is not None: ( encoder_batch_size, encoder_sequence_length, _, ) = encoder_hidden_states.size() encoder_hidden_shape = ( encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: encoder_attention_mask = torch.ones( encoder_hidden_shape, device=device) encoder_extended_attention_mask = self.invert_attention_mask( encoder_attention_mask ) else: encoder_extended_attention_mask = None # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or # [num_hidden_layers x num_heads] # and head_mask is converted to shape # [num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask( head_mask, self.config.num_hidden_layers) embedding_output = self.embeddings( input_ids, input_video_embeds, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, ) if separate_forward_split is not None: split_embedding_output = \ embedding_output[:, :separate_forward_split] split_extended_attention_mask = extended_attention_mask[ :, :, :, :separate_forward_split, :separate_forward_split ] split_encoder_outputs = self.encoder( split_embedding_output, attention_mask=split_extended_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) assert ( len(split_encoder_outputs) <= 2 ), "we do not support merge on attention for now." encoder_outputs = [] encoder_outputs.append([split_encoder_outputs[0]]) if len(split_encoder_outputs) == 2: encoder_outputs.append([]) for _all_hidden_states in split_encoder_outputs[1]: encoder_outputs[-1].append([_all_hidden_states]) split_embedding_output = \ embedding_output[:, separate_forward_split:] split_extended_attention_mask = extended_attention_mask[ :, :, :, separate_forward_split:, separate_forward_split: ] split_encoder_outputs = self.encoder( split_embedding_output, attention_mask=split_extended_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) assert ( len(split_encoder_outputs) <= 2 ), "we do not support merge on attention for now." encoder_outputs[0].append(split_encoder_outputs[0]) encoder_outputs[0] = torch.cat(encoder_outputs[0], dim=1) if len(split_encoder_outputs) == 2: for layer_idx, _all_hidden_states in enumerate( split_encoder_outputs[1] ): encoder_outputs[1][layer_idx].append(_all_hidden_states) encoder_outputs[1][layer_idx] = torch.cat( encoder_outputs[1][layer_idx], dim=1 ) encoder_outputs = tuple(encoder_outputs) else: encoder_outputs = self.encoder( embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] pooled_output = ( self.pooler(sequence_output) if self.pooler is not None else None ) return (sequence_output, pooled_output) + encoder_outputs[1:] def get_extended_attention_mask(self, attention_mask, input_shape, device): """This is borrowed from `modeling_utils.py` with the support of multi-layer attention masks. The second dim is expected to be number of layers. See `MMAttentionMaskProcessor`. Makes broadcastable attention and causal masks so that future and masked tokens are ignored. Arguments: attention_mask (:obj:`torch.Tensor`): Mask with ones indicating tokens to attend to, zeros for tokens to ignore. input_shape (:obj:`Tuple[int]`): The shape of the input to the model. device: (:obj:`torch.device`): The device of the input to the model. Returns: :obj:`torch.Tensor` The extended attention mask, \ with a the same dtype as :obj:`attention_mask.dtype`. """ # We can provide a self-attention mask of dimensions # [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable # to all heads. if attention_mask.dim() == 4: extended_attention_mask = attention_mask[:, :, None, :, :] extended_attention_mask = extended_attention_mask.to( dtype=self.dtype ) # fp16 compatibility extended_attention_mask = (1.0 - extended_attention_mask) \ * -10000.0 return extended_attention_mask else: return super().get_extended_attention_mask( attention_mask, input_shape, device ) class MultiLayerAttentionMaskBertEncoder(BertEncoder): """extend BertEncoder with the capability of multiple layers of attention mask.""" def forward( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, output_attentions=False, output_hidden_states=False, return_dict=False, ): all_hidden_states = () if output_hidden_states else None all_attentions = () if output_attentions else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None layer_attention_mask = ( attention_mask[:, i, :, :, :] if attention_mask.dim() == 5 else attention_mask ) if getattr(self.config, "gradient_checkpointing", False): def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs, output_attentions) return custom_forward layer_outputs = torch.utils.checkpoint.checkpoint( create_custom_forward(layer_module), hidden_states, layer_attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, ) else: layer_outputs = layer_module( hidden_states, layer_attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, output_attentions, ) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) return tuple( v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None )
26,064
34.462585
87
py
sign-topic
sign-topic-main/examples/MMPT/mmpt/models/mmfusionnlg.py
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors, Facebook AI Research authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Copyright (c) Facebook, Inc. All Rights Reserved import torch from torch.nn import functional as F from typing import Optional, Iterable try: from transformers import BertPreTrainedModel from transformers.modeling_bert import BertOnlyMLMHead from transformers.file_utils import ModelOutput from transformers.modeling_outputs import CausalLMOutput from transformers.generation_utils import ( BeamHypotheses, top_k_top_p_filtering ) except ImportError: pass from .mmfusion import MMFusion from .transformermodel import MMBertModel from ..modules import VideoTokenMLP class MMFusionNLG(MMFusion): def __init__(self, config, **kwargs): super().__init__(config) if config.model.max_decode_length is not None: self.max_length = min( config.model.max_decode_length, config.dataset.max_len - config.dataset.max_video_len - 3 ) else: self.max_length = \ config.dataset.max_len - config.dataset.max_video_len - 3 self.gen_param = config.gen_param if config.gen_param is not None \ else {} def forward( self, caps, cmasks, vfeats, vmasks, attention_mask, video_label=None, text_label=None, **kwargs ): """use pre-trained LM header for generation.""" attention_mask, token_type_ids = self._mm_on_the_fly( cmasks, vmasks, attention_mask) outputs = self.mm_encoder( input_ids=caps, input_video_embeds=vfeats, attention_mask=attention_mask, token_type_ids=token_type_ids, masked_lm_labels=text_label, ) return {"logits": outputs[0]} @torch.no_grad() def generate( self, caps, cmasks, vfeats, vmasks, attention_mask=None, bos_token_id=None, eos_token_id=None, **kwargs ): # a simplified interface from # https://huggingface.co/transformers/v3.4.0/_modules/transformers/generation_utils.html#GenerationMixin.generate # caps now only have # [CLS], [SEP] (for video) and [CLS] (as bos_token) assert caps.size(1) == 3 attention_mask, token_type_ids = self._mm_on_the_fly( cmasks, vmasks, attention_mask) output = self.mm_encoder.generate( input_ids=caps, input_video_embeds=vfeats, attention_mask=attention_mask, token_type_ids=token_type_ids, bos_token_id=bos_token_id, eos_token_id=eos_token_id, max_length=self.max_length, **self.gen_param ) return output class MMBertForNLG(BertPreTrainedModel): def __init__(self, config): super().__init__(config) self.bert = MMBertModel(config) self.videomlp = VideoTokenMLP(config) # we do not use `BertGenerationOnlyLMHead` # because we can reuse pretraining. self.cls = BertOnlyMLMHead(config) self.hidden_size = config.hidden_size self.init_weights() def get_output_embeddings(self): return self.cls.predictions.decoder def forward( self, input_ids=None, input_video_embeds=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, masked_lm_labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): # similar to MMBertForMFMMLM without MFM. video_tokens = self.videomlp(input_video_embeds) outputs = self.bert( input_ids, video_tokens, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] prediction_scores = None if masked_lm_labels is not None: text_offset = input_video_embeds.size(1) + 1 # [CLS] # recover caps format: [CLS] [SEP] text [SEP] text_sequence_output = torch.cat( [sequence_output[:, :1], sequence_output[:, text_offset:]], dim=1 ) # only compute select tokens to training to speed up. hidden_size = text_sequence_output.size(-1) # masked_lm_labels = masked_lm_labels.reshape(-1) labels_mask = masked_lm_labels != -100 selected_text_output = text_sequence_output.masked_select( labels_mask.unsqueeze(-1) ).view(-1, hidden_size) prediction_scores = self.cls(selected_text_output) if not return_dict: output = ( prediction_scores, ) + outputs[2:] return output # for generation. text_offset = input_video_embeds.size(1) + 2 # [CLS] text_sequence_output = sequence_output[:, text_offset:] prediction_scores = self.cls(text_sequence_output) return CausalLMOutput( loss=None, logits=prediction_scores, ) def prepare_inputs_for_generation( self, input_ids, input_video_embeds, attention_mask=None, token_type_ids=None, **model_kwargs ): # must return a dictionary. seq_len = input_ids.size(1) + input_video_embeds.size(1) if attention_mask is not None: if len(attention_mask.size()) == 4: attention_mask = attention_mask[:, :, :seq_len, :seq_len] elif len(attention_mask.size()) == 3: attention_mask = attention_mask[:, :seq_len, :seq_len] else: attention_mask = attention_mask[:, :seq_len] if token_type_ids is not None: token_type_ids = token_type_ids[:, :seq_len] return { "input_ids": input_ids, "input_video_embeds": input_video_embeds, "attention_mask": attention_mask, "token_type_ids": token_type_ids, } @torch.no_grad() def generate( self, input_ids: Optional[torch.LongTensor] = None, decoder_input_ids: Optional[torch.LongTensor] = None, max_length: Optional[int] = None, min_length: Optional[int] = None, do_sample: Optional[bool] = None, early_stopping: Optional[bool] = None, num_beams: Optional[int] = None, temperature: Optional[float] = None, top_k: Optional[int] = None, top_p: Optional[float] = None, repetition_penalty: Optional[float] = None, bad_words_ids: Optional[Iterable[int]] = None, bos_token_id: Optional[int] = None, pad_token_id: Optional[int] = None, eos_token_id: Optional[int] = None, length_penalty: Optional[float] = None, no_repeat_ngram_size: Optional[int] = None, num_return_sequences: Optional[int] = None, attention_mask: Optional[torch.LongTensor] = None, decoder_start_token_id: Optional[int] = None, use_cache: Optional[bool] = None, **model_kwargs ) -> torch.LongTensor: r""" Generates sequences for models with a language modeling head. The method currently supports greedy decoding, beam-search decoding, sampling with temperature, sampling with top-k or nucleus sampling. Adapted in part from `Facebook's XLM beam search code <https://github.com/facebookresearch/XLM/blob/9e6f6814d17be4fe5b15f2e6c43eb2b2d76daeb4/src/model/transformer.py#L529>`__. Apart from :obj:`input_ids` and :obj:`attention_mask`, all the arguments below will default to the value of the attribute of the same name inside the :class:`~transformers.PretrainedConfig` of the model. The default values indicated are the default values of those config. Most of these parameters are explained in more detail in `this blog post <https://huggingface.co/blog/how-to-generate>`__. Parameters: input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): The sequence used as a prompt for the generation. If :obj:`None` the method initializes it as an empty :obj:`torch.LongTensor` of shape :obj:`(1,)`. decoder_input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): initial input_ids for the decoder of encoder-decoder type models. If :obj:`None` then only decoder_start_token_id is passed as the first token to the decoder. max_length (:obj:`int`, `optional`, defaults to 20): The maximum length of the sequence to be generated. min_length (:obj:`int`, `optional`, defaults to 10): The minimum length of the sequence to be generated. do_sample (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not to use sampling ; use greedy decoding otherwise. early_stopping (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether to stop the beam search when at least ``num_beams`` sentences are finished per batch or not. num_beams (:obj:`int`, `optional`, defaults to 1): Number of beams for beam search. 1 means no beam search. temperature (:obj:`float`, `optional`, defaults tp 1.0): The value used to module the next token probabilities. top_k (:obj:`int`, `optional`, defaults to 50): The number of highest probability vocabulary tokens to keep for top-k-filtering. top_p (:obj:`float`, `optional`, defaults to 1.0): If set to float < 1, only the most probable tokens with probabilities that add up to ``top_p`` or higher are kept for generation. repetition_penalty (:obj:`float`, `optional`, defaults to 1.0): The parameter for repetition penalty. 1.0 means no penalty. See `this paper <https://arxiv.org/pdf/1909.05858.pdf>`__ for more details. pad_token_id (:obj:`int`, `optional`): The id of the `padding` token. bos_token_id (:obj:`int`, `optional`): The id of the `beginning-of-sequence` token. eos_token_id (:obj:`int`, `optional`): The id of the `end-of-sequence` token. length_penalty (:obj:`float`, `optional`, defaults to 1.0): Exponential penalty to the length. 1.0 means no penalty. Set to values < 1.0 in order to encourage the model to generate shorter sequences, to a value > 1.0 in order to encourage the model to produce longer sequences. no_repeat_ngram_size (:obj:`int`, `optional`, defaults to 0): If set to int > 0, all ngrams of that size can only occur once. bad_words_ids(:obj:`List[int]`, `optional`): List of token ids that are not allowed to be generated. In order to get the tokens of the words that should not appear in the generated text, use :obj:`tokenizer.encode(bad_word, add_prefix_space=True)`. num_return_sequences(:obj:`int`, `optional`, defaults to 1): The number of independently computed returned sequences for each element in the batch. attention_mask (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Mask to avoid performing attention on padding token indices. Mask values are in ``[0, 1]``, 1 for tokens that are not masked, and 0 for masked tokens. If not provided, will default to a tensor the same shape as :obj:`input_ids` that masks the pad token. `What are attention masks? <../glossary.html#attention-mask>`__ decoder_start_token_id (:obj:`int`, `optional`): If an encoder-decoder model starts decoding with a different token than `bos`, the id of that token. use_cache: (:obj:`bool`, `optional`, defaults to :obj:`True`): Whether or not the model should use the past last key/values attentions (if applicable to the model) to speed up decoding. model_kwargs: Additional model specific kwargs will be forwarded to the :obj:`forward` function of the model. Return: :obj:`torch.LongTensor` of shape :obj:`(batch_size * num_return_sequences, sequence_length)`: The generated sequences. The second dimension (sequence_length) is either equal to :obj:`max_length` or shorter if all batches finished early due to the :obj:`eos_token_id`. Examples:: tokenizer = AutoTokenizer.from_pretrained('distilgpt2') # Initialize tokenizer model = AutoModelWithLMHead.from_pretrained('distilgpt2') # Download model and configuration from S3 and cache. outputs = model.generate(max_length=40) # do greedy decoding print('Generated: {}'.format(tokenizer.decode(outputs[0], skip_special_tokens=True))) tokenizer = AutoTokenizer.from_pretrained('openai-gpt') # Initialize tokenizer model = AutoModelWithLMHead.from_pretrained('openai-gpt') # Download model and configuration from S3 and cache. input_context = 'The dog' input_ids = tokenizer.encode(input_context, return_tensors='pt') # encode input context outputs = model.generate(input_ids=input_ids, num_beams=5, num_return_sequences=3, temperature=1.5) # generate 3 independent sequences using beam search decoding (5 beams) with sampling from initial context 'The dog' for i in range(3): # 3 output sequences were generated print('Generated {}: {}'.format(i, tokenizer.decode(outputs[i], skip_special_tokens=True))) tokenizer = AutoTokenizer.from_pretrained('distilgpt2') # Initialize tokenizer model = AutoModelWithLMHead.from_pretrained('distilgpt2') # Download model and configuration from S3 and cache. input_context = 'The dog' input_ids = tokenizer.encode(input_context, return_tensors='pt') # encode input context outputs = model.generate(input_ids=input_ids, max_length=40, temperature=0.7, num_return_sequences=3, do_sample=True) # generate 3 candidates using sampling for i in range(3): # 3 output sequences were generated print('Generated {}: {}'.format(i, tokenizer.decode(outputs[i], skip_special_tokens=True))) tokenizer = AutoTokenizer.from_pretrained('ctrl') # Initialize tokenizer model = AutoModelWithLMHead.from_pretrained('ctrl') # Download model and configuration from S3 and cache. input_context = 'Legal My neighbor is' # "Legal" is one of the control codes for ctrl input_ids = tokenizer.encode(input_context, return_tensors='pt') # encode input context outputs = model.generate(input_ids=input_ids, max_length=50, temperature=0.7, repetition_penalty=1.2) # generate sequences print('Generated: {}'.format(tokenizer.decode(outputs[0], skip_special_tokens=True))) tokenizer = AutoTokenizer.from_pretrained('gpt2') # Initialize tokenizer model = AutoModelWithLMHead.from_pretrained('gpt2') # Download model and configuration from S3 and cache. input_context = 'My cute dog' # "Legal" is one of the control codes for ctrl bad_words_ids = [tokenizer.encode(bad_word, add_prefix_space=True) for bad_word in ['idiot', 'stupid', 'shut up']] input_ids = tokenizer.encode(input_context, return_tensors='pt') # encode input context outputs = model.generate(input_ids=input_ids, max_length=100, do_sample=True, bad_words_ids=bad_words_ids) # generate sequences without allowing bad_words to be generated """ # We cannot generate if the model does not have a LM head if self.get_output_embeddings() is None: raise AttributeError( "You tried to generate sequences with a model that does not have a LM Head." "Please use another model class (e.g. `OpenAIGPTLMHeadModel`, `XLNetLMHeadModel`, `GPT2LMHeadModel`, `CTRLLMHeadModel`, `T5WithLMHeadModel`, `TransfoXLLMHeadModel`, `XLMWithLMHeadModel`, `BartForConditionalGeneration` )" ) max_length = max_length if max_length is not None else self.config.max_length min_length = min_length if min_length is not None else self.config.min_length do_sample = do_sample if do_sample is not None else self.config.do_sample early_stopping = early_stopping if early_stopping is not None else self.config.early_stopping use_cache = use_cache if use_cache is not None else self.config.use_cache num_beams = num_beams if num_beams is not None else self.config.num_beams temperature = temperature if temperature is not None else self.config.temperature top_k = top_k if top_k is not None else self.config.top_k top_p = top_p if top_p is not None else self.config.top_p repetition_penalty = repetition_penalty if repetition_penalty is not None else self.config.repetition_penalty bos_token_id = bos_token_id if bos_token_id is not None else self.config.bos_token_id pad_token_id = pad_token_id if pad_token_id is not None else self.config.pad_token_id eos_token_id = eos_token_id if eos_token_id is not None else self.config.eos_token_id length_penalty = length_penalty if length_penalty is not None else self.config.length_penalty no_repeat_ngram_size = ( no_repeat_ngram_size if no_repeat_ngram_size is not None else self.config.no_repeat_ngram_size ) bad_words_ids = bad_words_ids if bad_words_ids is not None else self.config.bad_words_ids num_return_sequences = ( num_return_sequences if num_return_sequences is not None else self.config.num_return_sequences ) decoder_start_token_id = ( decoder_start_token_id if decoder_start_token_id is not None else self.config.decoder_start_token_id ) if input_ids is not None: batch_size = input_ids.shape[0] # overriden by the input batch_size else: batch_size = 1 assert isinstance(max_length, int) and max_length > 0, "`max_length` should be a strictly positive integer." assert isinstance(min_length, int) and min_length >= 0, "`min_length` should be a positive integer." assert isinstance(do_sample, bool), "`do_sample` should be a boolean." assert isinstance(early_stopping, bool), "`early_stopping` should be a boolean." assert isinstance(use_cache, bool), "`use_cache` should be a boolean." assert isinstance(num_beams, int) and num_beams > 0, "`num_beams` should be a strictly positive integer." assert temperature > 0, "`temperature` should be strictly positive." assert isinstance(top_k, int) and top_k >= 0, "`top_k` should be a positive integer." assert 0 <= top_p <= 1, "`top_p` should be between 0 and 1." assert repetition_penalty >= 1.0, "`repetition_penalty` should be >= 1." assert input_ids is not None or ( isinstance(bos_token_id, int) and bos_token_id >= 0 ), "If input_ids is not defined, `bos_token_id` should be a positive integer." assert pad_token_id is None or ( isinstance(pad_token_id, int) and (pad_token_id >= 0) ), "`pad_token_id` should be a positive integer." assert (eos_token_id is None) or ( isinstance(eos_token_id, int) and (eos_token_id >= 0) ), "`eos_token_id` should be a positive integer." assert length_penalty > 0, "`length_penalty` should be strictly positive." assert ( isinstance(no_repeat_ngram_size, int) and no_repeat_ngram_size >= 0 ), "`no_repeat_ngram_size` should be a positive integer." assert ( isinstance(num_return_sequences, int) and num_return_sequences > 0 ), "`num_return_sequences` should be a strictly positive integer." assert ( bad_words_ids is None or isinstance(bad_words_ids, list) and isinstance(bad_words_ids[0], list) ), "`bad_words_ids` is either `None` or a list of lists of tokens that should not be generated" if input_ids is None: assert isinstance(bos_token_id, int) and bos_token_id >= 0, ( "you should either supply a context to complete as `input_ids` input " "or a `bos_token_id` (integer >= 0) as a first token to start the generation." ) input_ids = torch.full( (batch_size, 1), bos_token_id, dtype=torch.long, device=next(self.parameters()).device, ) else: assert input_ids.dim() == 2, "Input prompt should be of shape (batch_size, sequence length)." # not allow to duplicate outputs when greedy decoding if do_sample is False: if num_beams == 1: # no_beam_search greedy generation conditions assert ( num_return_sequences == 1 ), "Greedy decoding will always produce the same output for num_beams == 1 and num_return_sequences > 1. Please set num_return_sequences = 1" else: # beam_search greedy generation conditions assert ( num_beams >= num_return_sequences ), "Greedy beam search decoding cannot return more sequences than it has beams. Please set num_beams >= num_return_sequences" # create attention mask if necessary # TODO (PVP): this should later be handled by the forward fn() in each model in the future see PR 3140 if (attention_mask is None) and (pad_token_id is not None) and (pad_token_id in input_ids): attention_mask = input_ids.ne(pad_token_id).long() elif attention_mask is None: attention_mask = input_ids.new_ones(input_ids.shape) # set pad_token_id to eos_token_id if not set. Important that this is done after # attention_mask is created if pad_token_id is None and eos_token_id is not None: print( "Setting `pad_token_id` to {} (first `eos_token_id`) to generate sequence".format(eos_token_id) ) pad_token_id = eos_token_id # vocab size if hasattr(self.config, "vocab_size"): vocab_size = self.config.vocab_size elif ( self.config.is_encoder_decoder and hasattr(self.config, "decoder") and hasattr(self.config.decoder, "vocab_size") ): vocab_size = self.config.decoder.vocab_size else: raise ValueError("either self.config.vocab_size or self.config.decoder.vocab_size needs to be defined") # set effective batch size and effective batch multiplier according to do_sample if do_sample: effective_batch_size = batch_size * num_return_sequences effective_batch_mult = num_return_sequences else: effective_batch_size = batch_size effective_batch_mult = 1 if self.config.is_encoder_decoder: if decoder_start_token_id is None: # see if BOS token can be used for decoder_start_token_id if bos_token_id is not None: decoder_start_token_id = bos_token_id elif ( hasattr(self.config, "decoder") and hasattr(self.config.decoder, "bos_token_id") and self.config.decoder.bos_token_id is not None ): decoder_start_token_id = self.config.decoder.bos_token_id else: raise ValueError( "decoder_start_token_id or bos_token_id has to be defined for encoder-decoder generation" ) assert hasattr(self, "get_encoder"), "{} should have a 'get_encoder' function defined".format(self) assert callable(self.get_encoder), "{} should be a method".format(self.get_encoder) # get encoder and store encoder outputs encoder = self.get_encoder() encoder_outputs: ModelOutput = encoder(input_ids, attention_mask=attention_mask, return_dict=True) # Expand input ids if num_beams > 1 or num_return_sequences > 1 if num_return_sequences > 1 or num_beams > 1: # TODO: make this a call-back function. # input_ids=caps, # input_video_embeds=vfeats, # attention_mask=attention_mask, # token_type_ids=token_type_ids, input_video_embeds = model_kwargs.pop("input_video_embeds", None) token_type_ids = model_kwargs.pop("token_type_ids", None) input_ids_len = input_ids.shape[-1] input_ids = input_ids.unsqueeze(1).expand( batch_size, effective_batch_mult * num_beams, input_ids_len) input_video_embeds_len, input_video_embeds_hidden = input_video_embeds.size(1), input_video_embeds.size(2) input_video_embeds = input_video_embeds.unsqueeze(1).expand( batch_size, effective_batch_mult * num_beams, input_video_embeds_len, input_video_embeds_hidden) attention_mask_from_len, attention_mask_to_len = attention_mask.size(1), attention_mask.size(2) attention_mask = attention_mask.unsqueeze(1).expand( batch_size, effective_batch_mult * num_beams, attention_mask_from_len, attention_mask_to_len ) token_type_ids_len = token_type_ids.size(1) token_type_ids = token_type_ids.unsqueeze(1).expand( batch_size, effective_batch_mult * num_beams, token_type_ids_len ) # contiguous ... input_ids = input_ids.contiguous().view( effective_batch_size * num_beams, input_ids_len ) # shape: (batch_size * num_return_sequences * num_beams, cur_len) input_video_embeds = input_video_embeds.contiguous().view( effective_batch_size * num_beams, input_video_embeds_len, input_video_embeds_hidden) attention_mask = attention_mask.contiguous().view( effective_batch_size * num_beams, attention_mask_from_len, attention_mask_to_len ) # shape: (batch_size * num_return_sequences * num_beams, cur_len) token_type_ids = token_type_ids.contiguous().view( effective_batch_size * num_beams, token_type_ids_len ) model_kwargs["input_video_embeds"] = input_video_embeds model_kwargs["token_type_ids"] = token_type_ids if self.config.is_encoder_decoder: device = next(self.parameters()).device if decoder_input_ids is not None: # give initial decoder input ids input_ids = decoder_input_ids.repeat(effective_batch_size * num_beams, 1).to(device) else: # create empty decoder input_ids input_ids = torch.full( (effective_batch_size * num_beams, 1), decoder_start_token_id, dtype=torch.long, device=device, ) cur_len = input_ids.shape[-1] assert ( batch_size == encoder_outputs.last_hidden_state.shape[0] ), f"expected encoder_outputs.last_hidden_state to have 1st dimension bs={batch_size}, got {encoder_outputs.last_hidden_state.shape[0]} " # expand batch_idx to assign correct encoder output for expanded input_ids (due to num_beams > 1 and num_return_sequences > 1) expanded_batch_idxs = ( torch.arange(batch_size) .view(-1, 1) .repeat(1, num_beams * effective_batch_mult) .view(-1) .to(input_ids.device) ) # expand encoder_outputs encoder_outputs["last_hidden_state"] = encoder_outputs.last_hidden_state.index_select( 0, expanded_batch_idxs ) # save encoder_outputs in `model_kwargs` model_kwargs["encoder_outputs"] = encoder_outputs else: cur_len = input_ids.shape[-1] assert ( cur_len < max_length ), f"The context has {cur_len} number of tokens, but `max_length` is only {max_length}. Please make sure that `max_length` is bigger than the number of tokens, by setting either `generate(max_length=...,...)` or `config.max_length = ...`" if num_beams > 1: output = self._generate_beam_search( input_ids, cur_len=cur_len, max_length=max_length, min_length=min_length, do_sample=do_sample, early_stopping=early_stopping, temperature=temperature, top_k=top_k, top_p=top_p, repetition_penalty=repetition_penalty, no_repeat_ngram_size=no_repeat_ngram_size, bad_words_ids=bad_words_ids, pad_token_id=pad_token_id, eos_token_id=eos_token_id, batch_size=effective_batch_size, num_return_sequences=num_return_sequences, length_penalty=length_penalty, num_beams=num_beams, vocab_size=vocab_size, attention_mask=attention_mask, use_cache=use_cache, model_kwargs=model_kwargs, ) else: output = self._generate_no_beam_search( input_ids, cur_len=cur_len, max_length=max_length, min_length=min_length, do_sample=do_sample, temperature=temperature, top_k=top_k, top_p=top_p, repetition_penalty=repetition_penalty, no_repeat_ngram_size=no_repeat_ngram_size, bad_words_ids=bad_words_ids, pad_token_id=pad_token_id, eos_token_id=eos_token_id, batch_size=effective_batch_size, attention_mask=attention_mask, use_cache=use_cache, model_kwargs=model_kwargs, ) return output def _generate_beam_search( self, input_ids, cur_len, max_length, min_length, do_sample, early_stopping, temperature, top_k, top_p, repetition_penalty, no_repeat_ngram_size, bad_words_ids, pad_token_id, eos_token_id, batch_size, num_return_sequences, length_penalty, num_beams, vocab_size, attention_mask, use_cache, model_kwargs, ): """Generate sequences for each example with beam search.""" # generated hypotheses generated_hyps = [ BeamHypotheses(num_beams, max_length, length_penalty, early_stopping=early_stopping) for _ in range(batch_size) ] # scores for each sentence in the beam beam_scores = torch.zeros((batch_size, num_beams), dtype=torch.float, device=input_ids.device) # for greedy decoding it is made sure that only tokens of the first beam are considered to avoid sampling the exact same tokens three times if do_sample is False: beam_scores[:, 1:] = -1e9 beam_scores = beam_scores.view(-1) # shape (batch_size * num_beams,) # cache compute states past = None # done sentences done = [False for _ in range(batch_size)] while cur_len < max_length: model_inputs = self.prepare_inputs_for_generation( input_ids, past=past, attention_mask=attention_mask, use_cache=use_cache, **model_kwargs ) outputs = self(**model_inputs, return_dict=True) # (batch_size * num_beams, cur_len, vocab_size) next_token_logits = outputs.logits[:, -1, :] # (batch_size * num_beams, vocab_size) # if model has past, then set the past variable to speed up decoding if "past_key_values" in outputs: past = outputs.past_key_values elif "mems" in outputs: past = outputs.mems if self.config.is_encoder_decoder and do_sample is False: # TODO (PVP) still a bit hacky here - there might be a better solution next_token_logits = self.adjust_logits_during_generation( next_token_logits, cur_len=cur_len, max_length=max_length ) scores = F.log_softmax(next_token_logits, dim=-1) # (batch_size * num_beams, vocab_size) scores = self.postprocess_next_token_scores( scores=scores, input_ids=input_ids, no_repeat_ngram_size=no_repeat_ngram_size, bad_words_ids=bad_words_ids, cur_len=cur_len, min_length=min_length, max_length=max_length, eos_token_id=eos_token_id, repetition_penalty=repetition_penalty, batch_size=batch_size, num_beams=num_beams, ) assert scores.shape == (batch_size * num_beams, vocab_size), "Shapes of scores: {} != {}".format( scores.shape, (batch_size * num_beams, vocab_size) ) if do_sample: _scores = scores + beam_scores[:, None].expand_as(scores) # (batch_size * num_beams, vocab_size) # Temperature if temperature != 1.0: _scores = _scores / temperature # Top-p/top-k filtering _scores = top_k_top_p_filtering( _scores, top_k=top_k, top_p=top_p, min_tokens_to_keep=2 ) # (batch_size * num_beams, vocab_size) # re-organize to group the beam together to sample from all beam_idxs _scores = _scores.contiguous().view( batch_size, num_beams * vocab_size ) # (batch_size, num_beams * vocab_size) # Sample 2 next tokens for each beam (so we have some spare tokens and match output of greedy beam search) probs = F.softmax(_scores, dim=-1) next_tokens = torch.multinomial(probs, num_samples=2 * num_beams) # (batch_size, num_beams * 2) # Compute next scores next_scores = torch.gather(_scores, -1, next_tokens) # (batch_size, num_beams * 2) # sort the sampled vector to make sure that the first num_beams samples are the best next_scores, next_scores_indices = torch.sort(next_scores, descending=True, dim=1) next_tokens = torch.gather(next_tokens, -1, next_scores_indices) # (batch_size, num_beams * 2) else: next_scores = scores + beam_scores[:, None].expand_as(scores) # (batch_size * num_beams, vocab_size) # re-organize to group the beam together (we are keeping top hypothesis accross beams) next_scores = next_scores.view( batch_size, num_beams * vocab_size ) # (batch_size, num_beams * vocab_size) next_scores, next_tokens = torch.topk(next_scores, 2 * num_beams, dim=1, largest=True, sorted=True) assert next_scores.size() == next_tokens.size() == (batch_size, 2 * num_beams) # next batch beam content next_batch_beam = [] # for each sentence for batch_idx in range(batch_size): # if we are done with this sentence, add a pad token if done[batch_idx]: assert ( len(generated_hyps[batch_idx]) >= num_beams ), "Batch can only be done if at least {} beams have been generated".format(num_beams) assert ( eos_token_id is not None and pad_token_id is not None ), "generated beams >= num_beams -> eos_token_id and pad_token have to be defined" next_batch_beam.extend([(0, pad_token_id, 0)] * num_beams) # pad the batch continue # next sentence beam content, this will get added to next_batch_beam next_sent_beam = [] # next tokens for this sentence for beam_token_rank, (beam_token_id, beam_token_score) in enumerate( zip(next_tokens[batch_idx], next_scores[batch_idx]) ): # get beam and token IDs beam_id = beam_token_id // vocab_size token_id = beam_token_id % vocab_size effective_beam_id = batch_idx * num_beams + beam_id # add to generated hypotheses if end of sentence if (eos_token_id is not None) and (token_id.item() == eos_token_id): # if beam_token does not belong to top num_beams tokens, it should not be added is_beam_token_worse_than_top_num_beams = beam_token_rank >= num_beams if is_beam_token_worse_than_top_num_beams: continue generated_hyps[batch_idx].add( input_ids[effective_beam_id].clone(), beam_token_score.item(), ) else: # add next predicted token since it is not eos_token next_sent_beam.append((beam_token_score, token_id, effective_beam_id)) # once the beam for next step is full, don't add more tokens to it. if len(next_sent_beam) == num_beams: break # Check if we are done so that we can save a pad step if all(done) done[batch_idx] = done[batch_idx] or generated_hyps[batch_idx].is_done( next_scores[batch_idx].max().item(), cur_len ) # update next beam content assert len(next_sent_beam) == num_beams, "Beam should always be full" next_batch_beam.extend(next_sent_beam) assert len(next_batch_beam) == num_beams * (batch_idx + 1), "We should have added num_beams each step" # stop when we are done with each sentence if all(done): break # sanity check / prepare next batch assert len(next_batch_beam) == batch_size * num_beams beam_scores = beam_scores.new([x[0] for x in next_batch_beam]) beam_tokens = input_ids.new([x[1] for x in next_batch_beam]) beam_idx = input_ids.new([x[2] for x in next_batch_beam]) # re-order batch and update current length input_ids = input_ids[beam_idx, :] input_ids = torch.cat([input_ids, beam_tokens.unsqueeze(1)], dim=-1) cur_len = cur_len + 1 # re-order internal states if past is not None: past = self._reorder_cache(past, beam_idx) # extend attention_mask for new generated input if only decoder # (huxu): move out since we trim attention_mask by ourselves. # if self.config.is_encoder_decoder is False: # attention_mask = torch.cat( # [attention_mask, attention_mask.new_ones((attention_mask.shape[0], 1))], dim=-1 # ) # finalize all open beam hypotheses and add to generated hypotheses for batch_idx in range(batch_size): if done[batch_idx]: continue # test that beam scores match previously calculated scores if not eos and batch_idx not done if eos_token_id is not None and all( (token_id % vocab_size).item() != eos_token_id for token_id in next_tokens[batch_idx] ): assert torch.all( next_scores[batch_idx, :num_beams] == beam_scores.view(batch_size, num_beams)[batch_idx] ), "If batch_idx is not done, final next scores: {} have to equal to accumulated beam_scores: {}".format( next_scores[:, :num_beams][batch_idx], beam_scores.view(batch_size, num_beams)[batch_idx], ) # need to add best num_beams hypotheses to generated hyps for beam_id in range(num_beams): effective_beam_id = batch_idx * num_beams + beam_id final_score = beam_scores[effective_beam_id].item() final_tokens = input_ids[effective_beam_id] generated_hyps[batch_idx].add(final_tokens, final_score) # depending on whether greedy generation is wanted or not define different output_batch_size and output_num_return_sequences_per_batch output_batch_size = batch_size if do_sample else batch_size * num_return_sequences output_num_return_sequences_per_batch = 1 if do_sample else num_return_sequences # select the best hypotheses sent_lengths = input_ids.new(output_batch_size) best = [] # retrieve best hypotheses for i, hypotheses in enumerate(generated_hyps): sorted_hyps = sorted(hypotheses.beams, key=lambda x: x[0]) for j in range(output_num_return_sequences_per_batch): effective_batch_idx = output_num_return_sequences_per_batch * i + j best_hyp = sorted_hyps.pop()[1] sent_lengths[effective_batch_idx] = len(best_hyp) best.append(best_hyp) # prepare for adding eos sent_max_len = min(sent_lengths.max().item() + 1, max_length) decoded = input_ids.new(output_batch_size, sent_max_len) # shorter batches are padded if needed if sent_lengths.min().item() != sent_lengths.max().item(): assert pad_token_id is not None, "`pad_token_id` has to be defined" decoded.fill_(pad_token_id) # fill with hypotheses and eos_token_id if the latter fits in for i, hypo in enumerate(best): decoded[i, : sent_lengths[i]] = hypo if sent_lengths[i] < max_length: decoded[i, sent_lengths[i]] = eos_token_id return decoded def _generate_no_beam_search( self, input_ids, cur_len, max_length, min_length, do_sample, temperature, top_k, top_p, repetition_penalty, no_repeat_ngram_size, bad_words_ids, pad_token_id, eos_token_id, batch_size, attention_mask, use_cache, model_kwargs, ): """Generate sequences for each example without beam search (num_beams == 1). All returned sequence are generated independantly. """ # length of generated sentences / unfinished sentences unfinished_sents = input_ids.new(batch_size).fill_(1) sent_lengths = input_ids.new(batch_size).fill_(max_length) past = None while cur_len < max_length: model_inputs = self.prepare_inputs_for_generation( input_ids, past=past, attention_mask=attention_mask, use_cache=use_cache, **model_kwargs ) outputs = self(**model_inputs, return_dict=True) next_token_logits = outputs.logits[:, -1, :] scores = self.postprocess_next_token_scores( scores=next_token_logits, input_ids=input_ids, no_repeat_ngram_size=no_repeat_ngram_size, bad_words_ids=bad_words_ids, cur_len=cur_len, min_length=min_length, max_length=max_length, eos_token_id=eos_token_id, repetition_penalty=repetition_penalty, batch_size=batch_size, num_beams=1, ) # if model has past, then set the past variable to speed up decoding if "past_key_values" in outputs: past = outputs.past_key_values elif "mems" in outputs: past = outputs.mems if do_sample: # Temperature (higher temperature => more likely to sample low probability tokens) if temperature != 1.0: scores = scores / temperature # Top-p/top-k filtering next_token_logscores = top_k_top_p_filtering(scores, top_k=top_k, top_p=top_p) # Sample probs = F.softmax(next_token_logscores, dim=-1) next_token = torch.multinomial(probs, num_samples=1).squeeze(1) else: # Greedy decoding next_token = torch.argmax(next_token_logits, dim=-1) # print(next_token_logits[0,next_token[0]], next_token_logits[0,eos_token_id]) # update generations and finished sentences if eos_token_id is not None: # pad finished sentences if eos_token_id exist tokens_to_add = next_token * unfinished_sents + (pad_token_id) * (1 - unfinished_sents) else: tokens_to_add = next_token # add token and increase length by one input_ids = torch.cat([input_ids, tokens_to_add.unsqueeze(-1)], dim=-1) cur_len = cur_len + 1 if eos_token_id is not None: eos_in_sents = tokens_to_add == eos_token_id # if sentence is unfinished and the token to add is eos, sent_lengths is filled with current length is_sents_unfinished_and_token_to_add_is_eos = unfinished_sents.mul(eos_in_sents.long()).bool() sent_lengths.masked_fill_(is_sents_unfinished_and_token_to_add_is_eos, cur_len) # unfinished_sents is set to zero if eos in sentence unfinished_sents.mul_((~eos_in_sents).long()) # stop when there is a </s> in each sentence, or if we exceed the maximul length if unfinished_sents.max() == 0: break # extend attention_mask for new generated input if only decoder # if self.config.is_encoder_decoder is False: # attention_mask = torch.cat( # [attention_mask, attention_mask.new_ones((attention_mask.shape[0], 1))], dim=-1 # ) return input_ids
48,394
47.395
246
py
sign-topic
sign-topic-main/examples/MMPT/mmpt/models/mmfusion.py
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Copyright (c) Facebook, Inc. All Rights Reserved import torch from torch import nn try: from transformers import AutoConfig, AutoTokenizer except ImportError: pass from . import transformermodel class MMPTModel(nn.Module): """An e2e wrapper of inference model. """ @classmethod def from_pretrained(cls, config, checkpoint="checkpoint_best.pt"): import os from ..utils import recursive_config from ..tasks import Task config = recursive_config(config) mmtask = Task.config_task(config) checkpoint_path = os.path.join(config.eval.save_path, checkpoint) mmtask.build_model(checkpoint=checkpoint_path) # TODO(huxu): make the video encoder configurable. from ..processors.models.s3dg import S3D video_encoder = S3D('pretrained_models/s3d_dict.npy', 512) video_encoder.load_state_dict( torch.load('pretrained_models/s3d_howto100m.pth')) from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained( config.dataset.bert_name, use_fast=config.dataset.use_fast ) from ..processors import Aligner aligner = Aligner(config.dataset) return ( MMPTModel(config, mmtask.model, video_encoder), tokenizer, aligner ) def __init__(self, config, model, video_encoder, **kwargs): super().__init__() self.max_video_len = config.dataset.max_video_len self.video_encoder = video_encoder self.model = model def forward(self, video_frames, caps, cmasks, return_score=False): bsz = video_frames.size(0) assert bsz == 1, "only bsz=1 is supported now." seq_len = video_frames.size(1) video_frames = video_frames.view(-1, *video_frames.size()[2:]) vfeats = self.video_encoder(video_frames.permute(0, 4, 1, 2, 3)) vfeats = vfeats['video_embedding'] vfeats = vfeats.view(bsz, seq_len, vfeats.size(-1)) padding = torch.zeros( bsz, self.max_video_len - seq_len, vfeats.size(-1)) vfeats = torch.cat([vfeats, padding], dim=1) vmasks = torch.cat([ torch.ones((bsz, seq_len), dtype=torch.bool), torch.zeros((bsz, self.max_video_len - seq_len), dtype=torch.bool) ], dim=1 ) output = self.model(caps, cmasks, vfeats, vmasks) if return_score: output = {"score": torch.bmm( output["pooled_video"][:, None, :], output["pooled_text"][:, :, None] ).squeeze(-1).squeeze(-1)} return output class MMFusion(nn.Module): """a MMPT wrapper class for MMBert style models. TODO: move isolated mask to a subclass. """ def __init__(self, config, **kwargs): super().__init__() transformer_config = AutoConfig.from_pretrained( config.dataset.bert_name) self.hidden_size = transformer_config.hidden_size self.is_train = False if config.dataset.train_path is not None: self.is_train = True # 0 means no iso; 1-12 means iso up to that layer. self.num_hidden_layers = transformer_config.num_hidden_layers self.last_iso_layer = 0 if config.dataset.num_iso_layer is not None: self.last_iso_layer = config.dataset.num_iso_layer - 1 + 1 if config.model.mm_encoder_cls is not None: mm_encoder_cls = getattr(transformermodel, config.model.mm_encoder_cls) model_config = AutoConfig.from_pretrained(config.dataset.bert_name) model_config.max_video_len = config.dataset.max_video_len # TODO: a general way to add parameter for a model. model_config.use_seg_emb = config.model.use_seg_emb self.mm_encoder = mm_encoder_cls.from_pretrained( config.dataset.bert_name, config=model_config) elif config.model.video_encoder_cls is not None\ and config.model.text_encoder_cls is not None: video_encoder_cls = getattr(transformermodel, config.model.video_encoder_cls) model_config = AutoConfig.from_pretrained(config.dataset.bert_name) model_config.max_video_len = config.dataset.max_video_len # TODO: make each model a set of config class. if hasattr(model_config, "num_layers"): model_config.num_layers = config.model.num_hidden_video_layers else: model_config.num_hidden_layers = config.model.num_hidden_video_layers self.video_encoder = video_encoder_cls.from_pretrained( config.dataset.bert_name, config=model_config) # exact same NLP model from Huggingface. text_encoder_cls = getattr(transformermodel, config.model.text_encoder_cls) self.text_encoder = text_encoder_cls.from_pretrained( config.dataset.bert_name) else: raise ValueError("the encoder must be either MM or two backbones.") def forward( self, caps, cmasks, vfeats, vmasks, **kwargs ): raise NotImplementedError( "Please derive MMFusion module." ) def _mm_on_the_fly( self, cmasks, vmasks, attention_mask ): """helper function for mask, seg_ids and token_type_ids.""" if attention_mask is None: attention_mask = self._mm_attention_mask(cmasks, vmasks) """ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 | first sequence | second sequence | """ token_type_ids = torch.cat( [ torch.zeros( (vmasks.size(0), vmasks.size(1) + 2), dtype=torch.long, device=vmasks.device, ), torch.ones( (cmasks.size(0), cmasks.size(1) - 2), dtype=torch.long, device=cmasks.device, ), ], dim=1, ) return attention_mask, token_type_ids def _mm_attention_mask(self, cmasks, vmasks): assert cmasks.size(0) == vmasks.size(0), "{}, {}, {}, {}".format( str(cmasks.size()), str(vmasks.size()), str(cmasks.size(0)), str(vmasks.size(0)), ) mm_mask = torch.cat([cmasks[:, :1], vmasks, cmasks[:, 1:]], dim=1) if self.last_iso_layer == 0: # hard attention mask. return mm_mask else: # a gpu iso mask; 0 : num_iso_layer is isolated; # num_iso_layer: are MM-fused. # make an iso layer batch_size = cmasks.size(0) iso_mask = self._make_iso_mask(batch_size, cmasks, vmasks) mm_mask = mm_mask[:, None, :].repeat(1, mm_mask.size(-1), 1) iso_mm_masks = [] # hard attention mask. iso_mask = iso_mask[:, None, :, :].repeat( 1, self.last_iso_layer, 1, 1) iso_mm_masks.append(iso_mask) if self.last_iso_layer < self.num_hidden_layers: mm_mask = mm_mask[:, None, :, :].repeat( 1, self.num_hidden_layers - self.last_iso_layer, 1, 1 ) iso_mm_masks.append(mm_mask) iso_mm_masks = torch.cat(iso_mm_masks, dim=1) return iso_mm_masks def _make_iso_mask(self, batch_size, cmasks, vmasks): cls_self_mask = torch.cat( [ torch.ones( (batch_size, 1), dtype=torch.bool, device=cmasks.device), torch.zeros( (batch_size, cmasks.size(1) + vmasks.size(1) - 1), dtype=torch.bool, device=cmasks.device) ], dim=1) iso_video_mask = torch.cat( [ # [CLS] is not used. torch.zeros( (batch_size, 1), dtype=torch.bool, device=cmasks.device ), vmasks, # assume to be 1. cmasks[:, 1:2], # 2 means [CLS] + [SEP] torch.zeros( (batch_size, cmasks.size(1) - 2), dtype=torch.bool, device=cmasks.device, ), ], dim=1, ) iso_text_mask = torch.cat( [ torch.zeros( (batch_size, 2 + vmasks.size(1)), dtype=torch.bool, device=cmasks.device, ), # [CLS] is not used. cmasks[:, 2:], # assume to be 1. ], dim=1, ) cls_self_mask = cls_self_mask[:, None, :] iso_video_mask = iso_video_mask[:, None, :].repeat( 1, vmasks.size(1) + 1, 1) iso_text_mask = iso_text_mask[:, None, :].repeat( 1, cmasks.size(1) - 2, 1) return torch.cat([cls_self_mask, iso_video_mask, iso_text_mask], dim=1) def _pooling_vt_layer( self, layered_sequence_output, cmasks, vmasks ): layer_idx = self.last_iso_layer \ if self.last_iso_layer > 0 else self.num_hidden_layers hidden_state = layered_sequence_output[layer_idx] # also output pooled_video and pooled_text. batch_size = cmasks.size(0) # pool the modality. text_offset = vmasks.size(1) + 2 # [CLS] + [SEP] # video tokens + [SEP] video_outputs = hidden_state[:, 1:text_offset] video_attention_mask = torch.cat( [ vmasks, torch.ones( (batch_size, 1), dtype=torch.bool, device=vmasks.device), ], dim=1, ) assert video_outputs.size(1) == video_attention_mask.size(1) pooled_video = torch.sum( video_outputs * video_attention_mask.unsqueeze(-1), dim=1 ) / video_attention_mask.sum(1, keepdim=True) # pooled_video = torch.mean(video_outputs[0], dim=1) # text tokens + [SEP] text_attention_mask = cmasks[:, 2:] text_outputs = hidden_state[:, text_offset:] assert text_outputs.size(1) == text_attention_mask.size(1) pooled_text = torch.sum( text_outputs * text_attention_mask.unsqueeze(-1), dim=1 ) / text_attention_mask.sum(1, keepdim=True) return pooled_video, pooled_text class MMFusionMFMMLM(MMFusion): """forward function for MFM and MLM.""" def forward( self, caps, cmasks, vfeats, vmasks, attention_mask=None, video_label=None, text_label=None, **kwargs ): output_hidden_states = False if self.is_train else True target_vfeats, non_masked_frame_mask = None, None if video_label is not None: target_vfeats = vfeats.masked_select( video_label.unsqueeze(-1)).view( -1, vfeats.size(-1) ) # mask video token. vfeats[video_label] = 0.0 non_masked_frame_mask = vmasks.clone() non_masked_frame_mask[video_label] = False attention_mask, token_type_ids = self._mm_on_the_fly( cmasks, vmasks, attention_mask) outputs = self.mm_encoder( input_ids=caps, input_video_embeds=vfeats, attention_mask=attention_mask, token_type_ids=token_type_ids, masked_frame_labels=video_label, target_video_hidden_states=target_vfeats, non_masked_frame_mask=non_masked_frame_mask, masked_lm_labels=text_label, output_hidden_states=output_hidden_states, ) video_logits, text_logits = outputs[0], outputs[1] if self.is_train: # return earlier for training. return { "video_logits": video_logits, "text_logits": text_logits, } pooled_video, pooled_text = self._pooling_vt_layer( outputs[2], cmasks, vmasks) return {"pooled_video": pooled_video, "pooled_text": pooled_text} class MMFusionMTM(MMFusionMFMMLM): def __init__(self, config, **kwargs): super().__init__(config) """ For reproducibility: self.mm_encoder will be initialized then discarded. """ from .transformermodel import MMBertForMTM model_config = AutoConfig.from_pretrained(config.dataset.bert_name) model_config.max_video_len = config.dataset.max_video_len model_config.use_seg_emb = config.model.use_seg_emb self.mm_encoder = MMBertForMTM.from_pretrained( config.dataset.bert_name, config=model_config) class MMFusionShare(MMFusion): """A retrival wrapper using mm_encoder as both video/text backbone. TODO: move formally. """ def forward( self, caps, cmasks, vfeats, vmasks, attention_mask=None, video_label=None, text_label=None, output_hidden_states=False, **kwargs ): pooled_video = self.forward_video( vfeats, vmasks, caps, cmasks, output_hidden_states ) pooled_text = self.forward_text( caps, cmasks, output_hidden_states ) return {"pooled_video": pooled_video, "pooled_text": pooled_text} def forward_video( self, vfeats, vmasks, caps, cmasks, output_hidden_states=False, **kwargs ): input_ids = caps[:, :2] attention_mask = torch.cat([ cmasks[:, :1], vmasks, cmasks[:, 1:2] ], dim=1) token_type_ids = torch.zeros( (vmasks.size(0), vmasks.size(1) + 2), dtype=torch.long, device=vmasks.device) outputs = self.mm_encoder( input_ids=input_ids, input_video_embeds=vfeats, attention_mask=attention_mask, token_type_ids=token_type_ids, output_hidden_states=True ) video_outputs = outputs[0] if output_hidden_states: return video_outputs batch_size = cmasks.size(0) video_attention_mask = torch.cat( [ torch.zeros( (batch_size, 1), dtype=torch.bool, device=vmasks.device), vmasks, torch.ones( (batch_size, 1), dtype=torch.bool, device=vmasks.device), ], dim=1, ) assert video_outputs.size(1) == video_attention_mask.size(1) video_attention_mask = video_attention_mask.type(video_outputs.dtype) \ / video_attention_mask.sum(1, keepdim=True) pooled_video = torch.bmm( video_outputs.transpose(2, 1), video_attention_mask.unsqueeze(2) ).squeeze(-1) return pooled_video # video_outputs def forward_text( self, caps, cmasks, output_hidden_states=False, **kwargs ): input_ids = torch.cat([ caps[:, :1], caps[:, 2:], ], dim=1) attention_mask = torch.cat([ cmasks[:, :1], cmasks[:, 2:] ], dim=1) token_type_ids = torch.cat([ torch.zeros( (cmasks.size(0), 1), dtype=torch.long, device=cmasks.device), torch.ones( (cmasks.size(0), cmasks.size(1) - 2), dtype=torch.long, device=cmasks.device) ], dim=1) outputs = self.mm_encoder( input_ids=input_ids, input_video_embeds=None, attention_mask=attention_mask, token_type_ids=token_type_ids, output_hidden_states=True ) text_outputs = outputs[0] if output_hidden_states: return text_outputs batch_size = caps.size(0) # text tokens + [SEP] text_attention_mask = torch.cat([ torch.zeros( (batch_size, 1), dtype=torch.bool, device=cmasks.device), cmasks[:, 2:] ], dim=1) assert text_outputs.size(1) == text_attention_mask.size(1) text_attention_mask = text_attention_mask.type(text_outputs.dtype) \ / text_attention_mask.sum(1, keepdim=True) pooled_text = torch.bmm( text_outputs.transpose(2, 1), text_attention_mask.unsqueeze(2) ).squeeze(-1) return pooled_text # text_outputs class MMFusionSeparate(MMFusionShare): def forward_video( self, vfeats, vmasks, caps, cmasks, output_hidden_states=False, **kwargs ): input_ids = caps[:, :2] attention_mask = torch.cat([ cmasks[:, :1], vmasks, cmasks[:, 1:2] ], dim=1) token_type_ids = torch.zeros( (vmasks.size(0), vmasks.size(1) + 2), dtype=torch.long, device=vmasks.device) outputs = self.video_encoder( input_ids=input_ids, input_video_embeds=vfeats, attention_mask=attention_mask, token_type_ids=token_type_ids, output_hidden_states=True ) video_outputs = outputs[0] if output_hidden_states: return video_outputs batch_size = cmasks.size(0) video_attention_mask = torch.cat( [ torch.zeros( (batch_size, 1), dtype=torch.bool, device=vmasks.device), vmasks, torch.ones( (batch_size, 1), dtype=torch.bool, device=vmasks.device), ], dim=1, ) assert video_outputs.size(1) == video_attention_mask.size(1) video_attention_mask = video_attention_mask.type(video_outputs.dtype) \ / video_attention_mask.sum(1, keepdim=True) pooled_video = torch.bmm( video_outputs.transpose(2, 1), video_attention_mask.unsqueeze(2) ).squeeze(-1) return pooled_video # video_outputs def forward_text( self, caps, cmasks, output_hidden_states=False, **kwargs ): input_ids = torch.cat([ caps[:, :1], caps[:, 2:], ], dim=1) attention_mask = torch.cat([ cmasks[:, :1], cmasks[:, 2:] ], dim=1) # different from sharing, we use all-0 type. token_type_ids = torch.zeros( (cmasks.size(0), cmasks.size(1) - 1), dtype=torch.long, device=cmasks.device) outputs = self.text_encoder( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, output_hidden_states=True ) text_outputs = outputs[0] if output_hidden_states: return text_outputs batch_size = caps.size(0) # text tokens + [SEP] text_attention_mask = torch.cat([ torch.zeros( (batch_size, 1), dtype=torch.bool, device=cmasks.device), cmasks[:, 2:] ], dim=1) assert text_outputs.size(1) == text_attention_mask.size(1) text_attention_mask = text_attention_mask.type(text_outputs.dtype) \ / text_attention_mask.sum(1, keepdim=True) pooled_text = torch.bmm( text_outputs.transpose(2, 1), text_attention_mask.unsqueeze(2) ).squeeze(-1) return pooled_text # text_outputs class MMFusionJoint(MMFusion): """fine-tuning wrapper for retrival task.""" def forward( self, caps, cmasks, vfeats, vmasks, attention_mask=None, video_label=None, text_label=None, **kwargs ): # TODO (huxu): other ways to do negative examples; move the following # into your criterion forward. output_hidden_states = True attention_mask, token_type_ids = self._mm_on_the_fly( cmasks, vmasks, attention_mask) separate_forward_split = ( None if self.is_train else vmasks.size(1) + 2 ) # [CLS] + [SEP] outputs = self.mm_encoder( input_ids=caps, input_video_embeds=vfeats, attention_mask=attention_mask, token_type_ids=token_type_ids, output_hidden_states=output_hidden_states, separate_forward_split=separate_forward_split, ) pooled_video, pooled_text = self._pooling_vt_layer( outputs[2], cmasks, vmasks) return {"pooled_video": pooled_video, "pooled_text": pooled_text} class MMFusionActionSegmentation(MMFusion): """Fine-tuning wrapper for action segmentation. TODO: rename this for VLM. """ def forward( self, caps, cmasks, vfeats, vmasks, attention_mask=None, **kwargs ): # ActionLocalization assume of batch_size=1, squeeze it. caps = caps.view(-1, caps.size(-1)) cmasks = cmasks.view(-1, cmasks.size(-1)) vfeats = vfeats.view(-1, vfeats.size(2), vfeats.size(3)) vmasks = vmasks.view(-1, vmasks.size(-1)) # this may not cover all shapes of attention_mask. attention_mask = attention_mask.view( -1, attention_mask.size(2), attention_mask.size(3)) \ if attention_mask is not None else None # TODO (huxu): other ways to do negative examples; move the following # into your criterion forward. output_hidden_states = True # video forwarding, text is dummy; never use attention_mask. attention_mask, token_type_ids = self._mm_on_the_fly( cmasks, vmasks, attention_mask) logits = self.mm_encoder( input_ids=caps, input_video_embeds=vfeats, attention_mask=attention_mask, token_type_ids=token_type_ids, output_hidden_states=output_hidden_states, ) return {"logits": logits[0][:, 1:vmasks.size(1)+1]} class MMFusionActionLocalization(MMFusion): """fine-tuning model for retrival task.""" def __init__(self, config, **kwargs): super().__init__(config) tokenizer = AutoTokenizer.from_pretrained( config.dataset.bert_name) self.cls_token_id = tokenizer.cls_token_id self.sep_token_id = tokenizer.sep_token_id self.pad_token_id = tokenizer.pad_token_id def forward( self, caps, cmasks, vfeats, vmasks, attention_mask=None, **kwargs ): # ActionLocalization assume of batch_size=1, squeeze it. caps = caps.squeeze(0) cmasks = cmasks.squeeze(0) vfeats = vfeats.squeeze(0) vmasks = vmasks.squeeze(0) attention_mask = attention_mask.squeeze(0) if attention_mask is not None else None # TODO (huxu): other ways to do negative examples; move the following # into your criterion forward. output_hidden_states = True # a len1 dummy video token. dummy_vfeats = torch.zeros( (caps.size(0), 1, vfeats.size(-1)), device=vfeats.device, dtype=vfeats.dtype) dummy_vmasks = torch.ones( (caps.size(0), 1), dtype=torch.bool, device=vfeats.device) dummy_caps = torch.LongTensor( [[self.cls_token_id, self.sep_token_id, self.pad_token_id, self.sep_token_id]], ).to(caps.device).repeat(vfeats.size(0), 1) dummy_cmasks = torch.BoolTensor( [[0, 1, 0, 1]] # pad are valid for attention. ).to(caps.device).repeat(vfeats.size(0), 1) # video forwarding, text is dummy; never use attention_mask. attention_mask, token_type_ids = self._mm_on_the_fly( dummy_cmasks, vmasks, None) outputs = self.mm_encoder( input_ids=dummy_caps, input_video_embeds=vfeats, attention_mask=attention_mask, token_type_ids=token_type_ids, output_hidden_states=output_hidden_states, ) layer_idx = self.last_iso_layer \ if self.last_iso_layer > 0 else self.num_hidden_layers video_seq = outputs[2][layer_idx][:, 1:vmasks.size(1)+1].masked_select( vmasks.unsqueeze(-1) ).view(-1, self.hidden_size) # text forwarding, video is dummy attention_mask, token_type_ids = self._mm_on_the_fly( cmasks, dummy_vmasks, None) outputs = self.mm_encoder( input_ids=caps, input_video_embeds=dummy_vfeats, attention_mask=attention_mask, token_type_ids=token_type_ids, output_hidden_states=output_hidden_states, ) _, pooled_text = self._pooling_vt_layer( outputs[2], cmasks, dummy_vmasks) # this line is not right. logits = torch.mm(video_seq, pooled_text.transpose(1, 0)) return {"logits": logits} # --------------- MMFusionSeparate for end tasks --------------- class MMFusionSeparateActionSegmentation(MMFusionSeparate): """Fine-tuning wrapper for action segmentation.""" def forward( self, caps, cmasks, vfeats, vmasks, attention_mask=None, **kwargs ): # ActionLocalization assume of batch_size=1, squeeze it. caps = caps.view(-1, caps.size(-1)) cmasks = cmasks.view(-1, cmasks.size(-1)) vfeats = vfeats.view(-1, vfeats.size(2), vfeats.size(3)) vmasks = vmasks.view(-1, vmasks.size(-1)) logits = self.forward_video( vfeats, vmasks, caps, cmasks, output_hidden_states=True ) return {"logits": logits[:, 1:vmasks.size(1)+1]} class MMFusionSeparateActionLocalization(MMFusionSeparate): def __init__(self, config, **kwargs): super().__init__(config) tokenizer = AutoTokenizer.from_pretrained( config.dataset.bert_name) self.cls_token_id = tokenizer.cls_token_id self.sep_token_id = tokenizer.sep_token_id self.pad_token_id = tokenizer.pad_token_id def forward( self, caps, cmasks, vfeats, vmasks, **kwargs ): # ActionLocalization assume of batch_size=1, squeeze it. caps = caps.squeeze(0) cmasks = cmasks.squeeze(0) vfeats = vfeats.squeeze(0) vmasks = vmasks.squeeze(0) # TODO (huxu): other ways to do negative examples; move the following # into your criterion forward. dummy_caps = torch.LongTensor( [[self.cls_token_id, self.sep_token_id, self.pad_token_id, self.sep_token_id]], ).to(caps.device).repeat(vfeats.size(0), 1) dummy_cmasks = torch.BoolTensor( [[0, 1, 0, 1]] # pad are valid for attention. ).to(caps.device).repeat(vfeats.size(0), 1) outputs = self.forward_video( vfeats, vmasks, dummy_caps, dummy_cmasks, output_hidden_states=True ) video_seq = outputs[:, 1:vmasks.size(1)+1].masked_select( vmasks.unsqueeze(-1) ).view(-1, self.hidden_size) pooled_text = self.forward_text( caps, cmasks, output_hidden_states=False ) # this line is not right. logits = torch.mm(video_seq, pooled_text.transpose(1, 0)) return {"logits": logits} class MMFusionShareActionLocalization(MMFusionShare): def __init__(self, config, **kwargs): super().__init__(config) tokenizer = AutoTokenizer.from_pretrained( config.dataset.bert_name) self.cls_token_id = tokenizer.cls_token_id self.sep_token_id = tokenizer.sep_token_id self.pad_token_id = tokenizer.pad_token_id def forward( self, caps, cmasks, vfeats, vmasks, **kwargs ): # ActionLocalization assume of batch_size=1, squeeze it. caps = caps.squeeze(0) cmasks = cmasks.squeeze(0) vfeats = vfeats.squeeze(0) vmasks = vmasks.squeeze(0) # TODO (huxu): other ways to do negative examples; move the following # into your criterion forward. dummy_caps = torch.LongTensor( [[self.cls_token_id, self.sep_token_id, self.pad_token_id, self.sep_token_id]], ).to(caps.device).repeat(vfeats.size(0), 1) dummy_cmasks = torch.BoolTensor( [[0, 1, 0, 1]] # pad are valid for attention. ).to(caps.device).repeat(vfeats.size(0), 1) outputs = self.forward_video( vfeats, vmasks, dummy_caps, dummy_cmasks, output_hidden_states=True ) video_seq = outputs[:, 1:vmasks.size(1)+1].masked_select( vmasks.unsqueeze(-1) ).view(-1, self.hidden_size) pooled_text = self.forward_text( caps, cmasks, output_hidden_states=False ) # this line is not right. logits = torch.mm(video_seq, pooled_text.transpose(1, 0)) return {"logits": logits}
30,634
32.047465
90
py
sign-topic
sign-topic-main/examples/MMPT/mmpt/datasets/fairseqmmdataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ TODO (huxu): fairseq wrapper class for all dataset you defined: mostly MMDataset. """ from collections import OrderedDict from torch.utils.data import Dataset from torch.utils.data.dataloader import default_collate from fairseq.data import FairseqDataset, data_utils class FairseqMMDataset(FairseqDataset): """ A wrapper class for MMDataset for fairseq. """ def __init__(self, mmdataset): if not isinstance(mmdataset, Dataset): raise TypeError("mmdataset must be of type `torch.utils.data.dataset`.") self.mmdataset = mmdataset def set_epoch(self, epoch, **unused): super().set_epoch(epoch) self.epoch = epoch def __getitem__(self, idx): with data_utils.numpy_seed(43211, self.epoch, idx): return self.mmdataset[idx] def __len__(self): return len(self.mmdataset) def collater(self, samples): if hasattr(self.mmdataset, "collator"): return self.mmdataset.collator(samples) if len(samples) == 0: return {} if isinstance(samples[0], dict): batch = OrderedDict() for key in samples[0]: if samples[0][key] is not None: batch[key] = default_collate([sample[key] for sample in samples]) return batch else: return default_collate(samples) def size(self, index): """dummy implementation: we don't use --max-tokens""" return 1 def num_tokens(self, index): """dummy implementation: we don't use --max-tokens""" return 1
1,785
29.793103
85
py
sign-topic
sign-topic-main/examples/MMPT/mmpt/datasets/mmdataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch from collections import OrderedDict from torch.utils.data import Dataset from torch.utils.data.dataloader import default_collate from ..utils import set_seed class MMDataset(Dataset): """ A generic multi-modal dataset. Args: `meta_processor`: a meta processor, handling loading meta data and return video_id and text_id. `video_processor`: a video processor, handling e.g., decoding, loading .np files. `text_processor`: a text processor, handling e.g., tokenization. `aligner`: combine the video and text feature as one training example. """ def __init__( self, meta_processor, video_processor, text_processor, align_processor, ): self.split = meta_processor.split self.meta_processor = meta_processor self.video_processor = video_processor self.text_processor = text_processor self.align_processor = align_processor def __len__(self): return len(self.meta_processor) def __getitem__(self, idx): if self.split == "test": set_seed(idx) video_id, text_id = self.meta_processor[idx] video_feature = self.video_processor(video_id) text_feature = self.text_processor(text_id) output = self.align_processor(video_id, video_feature, text_feature) # TODO (huxu): the following is for debug purpose. output.update({"idx": idx}) return output def collater(self, samples): """This collator is deprecated. set self.collator = MMDataset.collater. see collator in FairseqMMDataset. """ if len(samples) == 0: return {} if isinstance(samples[0], dict): batch = OrderedDict() for key in samples[0]: if samples[0][key] is not None: batch[key] = default_collate( [sample[key] for sample in samples]) # if torch.is_tensor(batch[key]): # print(key, batch[key].size()) # else: # print(key, len(batch[key])) return batch else: return default_collate(samples) def print_example(self, output): print("[one example]", output["video_id"]) if ( hasattr(self.align_processor, "subsampling") and self.align_processor.subsampling is not None and self.align_processor.subsampling > 1 ): for key in output: if torch.is_tensor(output[key]): output[key] = output[key][0] # search tokenizer to translate ids back. tokenizer = None if hasattr(self.text_processor, "tokenizer"): tokenizer = self.text_processor.tokenizer elif hasattr(self.align_processor, "tokenizer"): tokenizer = self.align_processor.tokenizer if tokenizer is not None: caps = output["caps"].tolist() if isinstance(caps[0], list): caps = caps[0] print("caps", tokenizer.decode(caps)) print("caps", tokenizer.convert_ids_to_tokens(caps)) for key, value in output.items(): if torch.is_tensor(value): if len(value.size()) >= 3: # attention_mask. print(key, value.size()) print(key, "first", value[0, :, :]) print(key, "last", value[-1, :, :]) else: print(key, value) print("[end of one example]")
3,873
33.589286
76
py
sign-topic
sign-topic-main/examples/MMPT/mmpt/evaluators/predictor.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import os import random import json import numpy as np import torch import pickle import math from tqdm import tqdm class Predictor(object): """this base class is used to save predictions to disk (and being called by a evaluator later). Predictor has minimum support of single gpu prediction. """ def __init__(self, config): self.pred_dir = None # on-the-fly eval does not save the results. if hasattr(config, "eval") and config.eval is not None: self.pred_dir = config.eval.save_path os.makedirs(self.pred_dir, exist_ok=True) def __call__(self, outputs): """extract the prediction and save it.""" raise NotImplementedError def predict_loop(self, model, eval_dataloader, output_file=None): """on-the-fly prediction on a single gpu.""" self.full_scores = [] model.eval() model = model.to(0) with torch.no_grad(): for data in eval_dataloader: data = self.to_ctx(data) outputs = model(**data) outputs.update(data) self(outputs) return self.finalize(output_file) def finalize(self, output_file): pass def to_ctx(self, data, ctx=0, dtype=None): if isinstance(data, dict): for key in data: if torch.is_tensor(data[key]): if dtype is not None and data[key].dtype == torch.float32: data[key] = data[key].to(dtype) data[key] = data[key].to(ctx) return data else: raise ValueError("non-dict type of batch is not supported yet.") class NLGPredictor(Predictor): """Predicting Text from MMFusion models.""" """TODO: make a context.""" def __init__(self, config): super().__init__(config) from transformers import AutoTokenizer self.tokenizer = AutoTokenizer.from_pretrained( config.dataset.bert_name, bos_token="[CLS]", eos_token="[SEP]") self.bos_token_id = self.tokenizer.bos_token_id self.eos_token_id = self.tokenizer.eos_token_id def predict_loop(self, model, eval_dataloader, output_file=None): """TODO: refactor base classes.""" ctx = 0 outputs = {"outputs": [], "targets": [[]]} model.eval() model = model.to(ctx) with torch.no_grad(): for data in tqdm(eval_dataloader): data = self.to_ctx(data, ctx) self(data, model, outputs) return self.finalize(outputs, output_file) def __call__(self, data, model, outputs): data.update({ "bos_token_id": self.bos_token_id, "eos_token_id": self.eos_token_id }) output = model.generate(**data) assert len(output) == len(data["ref"]) for idx, _output in enumerate(output): generated_text = self.tokenizer.decode( _output, skip_special_tokens=True) if generated_text == "": generated_text = "none" outputs["outputs"].append(generated_text) outputs["targets"][0].append(data["ref"][idx]) if random.random() < 0.001: print("_output", _output) print("generated_text", generated_text) print("ref", data["ref"][idx]) def finalize(self, outputs, output_file=None): if output_file is not None: with open(os.path.join( self.pred_dir, output_file + ".json"), "w") as fw: json.dump(outputs, fw, indent=4) return outputs class RetrievalPredictor(Predictor): """generated `pooled_video` and `pooled_text`.""" def __init__(self, config): super().__init__(config) from transformers import AutoTokenizer self.tokenizer = AutoTokenizer.from_pretrained( config.dataset.bert_name) def predict_loop( self, model, eval_dataloader, output_file="retrieval.npy" ): """on-the-fly prediction on a single gpu.""" full_scores = [] texts = [] model.eval() model = model.cuda() with torch.no_grad(): for data in eval_dataloader: # convert to dict. if not isinstance(data, dict): data = { "caps": data[0], "cmasks": data[1], "vfeats": data[2], "vmasks": data[3], "video_id": data[4] } data = self.to_ctx(data) outputs = model(**data) outputs.update(data) self(outputs, full_scores) for _cap in data["caps"]: texts.append( self.tokenizer.decode(_cap, skip_special_tokens=True) ) return self.finalize(full_scores, texts, output_file) def __call__(self, sample, full_scores): scores = self._get_pooled_outputs(sample) self._append_scores(scores, full_scores) def finalize(self, full_scores, texts, output_file=None): outputs = self._aggregate_scores(full_scores) if output_file is not None: np.save(os.path.join(self.pred_dir, output_file + ".npy"), outputs) return {"outputs": outputs, "texts": texts} def _get_pooled_outputs(self, outputs): if "pooled_video" in outputs: return outputs["pooled_video"], outputs["pooled_text"] else: raise ValueError("unknown format of outputs.") def _append_scores(self, scores, full_scores): assert len(scores) == 2 if len(full_scores) == 0: full_scores.append([]) full_scores.append([]) full_scores[0].append(scores[0].cpu().detach().numpy()) full_scores[1].append(scores[1].cpu().detach().numpy()) def _aggregate_scores(self, scores): assert len(scores) == 2 video_hidden = np.concatenate(scores[0], axis=0) text_hidden = np.concatenate(scores[1], axis=0) # clear up. self.full_scores = [] return np.matmul(text_hidden, video_hidden.T) class QAPredictor(Predictor): """generated `pooled_video` and `pooled_text`.""" def __init__(self, config): super().__init__(config) """predictor maintains scores and aggregate them.""" def predict_loop(self, model, eval_dataloader, output_file="qa.npy"): """on-the-fly prediction on a single gpu.""" self.full_scores = [] model.eval() model = model.cuda() with torch.no_grad(): for data in eval_dataloader: # reshape ans and dup video 5 times. v_len = data["vfeats"].size(1) hidden_size = data["vfeats"].size(2) data["vfeats"] = data["vfeats"].unsqueeze(1).repeat(1, 5, 1, 1).view(-1, v_len, hidden_size) data["vmasks"] = data["vmasks"].unsqueeze(1).repeat(1, 5, 1).view(-1, v_len) t_len = data["caps"].size(-1) data["caps"] = data["caps"].view(-1, t_len) data["cmasks"] = data["cmasks"].view(-1, t_len) data = self.to_ctx(data) outputs = model(**data) outputs.update(data) self(outputs) return self.finalize(output_file) def __call__(self, sample): hidden_size = sample["pooled_video"].size(-1) pooled_video = sample["pooled_video"].view(-1, 5, hidden_size) pooled_text = sample["pooled_text"].view(-1, 5, hidden_size) scores = torch.bmm(pooled_video, pooled_text.transpose(2, 1)) scores = scores.argmax(-1) self._append_scores(scores[:, 0], sample["answers"], self.full_scores) def finalize(self, output_file=None): outputs, targets = self._aggregate_scores(self.full_scores) if output_file is not None: np.save(os.path.join(self.pred_dir, output_file + ".npy"), outputs) return {"outputs": outputs, "targets": targets} def _append_scores(self, scores, answers, full_scores): if len(full_scores) == 0: full_scores.append([]) full_scores.append([]) full_scores[0].append(scores.cpu().detach().numpy()) full_scores[1].append(answers.cpu().detach().numpy()) def _aggregate_scores(self, scores): assert len(scores) == 2 outputs = np.concatenate(scores[0], axis=0) targets = np.concatenate(scores[1], axis=0) # clear up. self.full_scores = [] return outputs, targets class CrossTaskPredictor(Predictor): """ CrossTaskPredictor needs to compute the average of logits for overlapped sliding-window. """ def __init__(self, config): super().__init__(config) self.lsm = torch.nn.LogSoftmax(dim=1) self.max_video_len = config.dataset.max_video_len self.sliding_window = config.dataset.sliding_window self.sliding_window_size = config.dataset.sliding_window_size self.annotation_path = config.dataset.annotation_path def predict_loop(self, model, eval_dataloader, output_file="result.pkl"): """refactored from line 144: https://github.com/DmZhukov/CrossTask/blob/master/train.py """ ctx = 0 model.eval() model = model.to(ctx) # this is not a loss but just compute neg_log_prob. Y_pred = {} Y_true = {} with torch.no_grad(): for batch in eval_dataloader: self(batch, model, Y_pred, Y_true) return self.finalize(Y_pred, Y_true, output_file) def __call__(self, sample, model, Y_pred, Y_true): # please install dp from `https://github.com/DmZhukov/CrossTask` from dp import dp vid, task = sample['video_id'][0], sample['task'][0] sample = self.to_ctx(sample) # compute the average logits over sliding windows. output = model(**sample) batch_logits = output["logits"].cpu() video_len = sample["video_len"][0] # the following version is slow. logits = torch.zeros((video_len, batch_logits.size(1))) logits_counts = torch.zeros((video_len, 1), dtype=torch.long) # use the same loop as aligner to recover. batch_logit_idx = 0 for window_start in range(0, video_len, self.sliding_window): video_end = min(video_len - window_start, self.sliding_window_size) logits[window_start: window_start + video_end] += batch_logits[ batch_logit_idx: batch_logit_idx + video_end] batch_logit_idx += video_end logits_counts[window_start: window_start + video_end] += torch.ones((video_end, 1), dtype=torch.long) if (video_len - window_start) <= self.sliding_window_size: break logits /= logits_counts assert logits.size() == (video_len, batch_logits.size(1)), "{}, {}".format(logits.size(), video_len) O = self.lsm(logits) y = np.zeros(O.size(), dtype=np.float32) dp(y, -O.detach().cpu().numpy()) if task not in Y_pred: Y_pred[task] = {} Y_pred[task][vid] = y annot_path = os.path.join( self.annotation_path, task+'_'+vid+'.csv') if os.path.exists(annot_path): if task not in Y_true: Y_true[task] = {} Y_true[task][vid] = self._read_assignment( *y.shape, annot_path) def finalize(self, Y_pred, Y_true, output_file=None): if output_file is not None: with open( os.path.join(self.pred_dir, output_file + ".pkl"), "wb") as fw: pickle.dump( {"Y_pred": Y_pred, "Y_true": Y_true}, fw, protocol=pickle.HIGHEST_PROTOCOL) return {"outputs": Y_pred, "targets": Y_true} def _read_assignment(self, T, K, path): """ refactored from https://github.com/DmZhukov/CrossTask/blob/master/data.py Howto interpret contraints on loss that is going to be minimized: lambd is a big number; self.lambd * C is a big number for all valid position (csv stores invalids) def forward(self, O, Y, C): return (Y*(self.lambd * C - self.lsm(O))).mean(dim=0).sum() This will load the csv file and fill-in the step col from start to end rows. """ Y = np.zeros([T, K], dtype=np.uint8) with open(path, 'r') as f: for line in f: step, start, end = line.strip().split(',') start = int(math.floor(float(start))) end = int(math.ceil(float(end))) step = int(step) - 1 Y[start:end, step] = 1 return Y class COINPredictor(Predictor): """ COINPredictor is similar to CrossTask on sliding windows. """ def __init__(self, config): super().__init__(config) self.max_video_len = config.dataset.max_video_len self.sliding_window = config.dataset.sliding_window self.sliding_window_size = config.dataset.sliding_window_size def predict_loop(self, model, eval_dataloader, output_file="result.pkl"): """refactored from line 144: https://github.com/DmZhukov/CrossTask/blob/master/train.py """ ctx = 0 model.eval() model = model.to(ctx) # this is not a loss but just compute neg_log_prob. Y_pred = [] Y_true = [] with torch.no_grad(): for batch in eval_dataloader: self(batch, model, Y_pred, Y_true) return self.finalize(Y_pred, Y_true, output_file) def __call__(self, sample, model, Y_pred, Y_true): sample = self.to_ctx(sample) # compute the average logits over sliding windows. output = model(**sample) logits = self._merge_windows(sample, output) Y_pred.append(logits.argmax(dim=1)) Y_true.append(sample["video_targets"].squeeze(0).cpu()) def _merge_windows(self, sample, output): targets = sample["targets"].reshape(-1).cpu() valid_mask = targets != -100 targets = targets[valid_mask] batch_logits = output["logits"].cpu() batch_logits = batch_logits.reshape(-1, batch_logits.size(-1)) batch_logits = batch_logits[valid_mask] video_len = sample["video_len"][0] # the following version is slow. logits = torch.zeros((video_len, batch_logits.size(1))) logits_counts = torch.zeros((video_len, 1), dtype=torch.long) # use the same loop as aligner to recover. batch_logit_idx = 0 for window_start in range(0, video_len, self.sliding_window): video_end = min(video_len - window_start, self.sliding_window_size) logits[window_start: window_start + video_end] += batch_logits[ batch_logit_idx: batch_logit_idx + video_end] batch_logit_idx += video_end logits_counts[window_start: window_start + video_end] += torch.ones((video_end, 1), dtype=torch.long) if (video_len - window_start) <= self.sliding_window_size: break logits /= logits_counts assert logits.size() == (video_len, batch_logits.size(1)), "{}, {}".format(logits.size(), video_len) return logits def finalize(self, Y_pred, Y_true, output_file=None): Y_pred = torch.cat(Y_pred, dim=0).numpy() Y_true = torch.cat(Y_true, dim=0).numpy() assert len(Y_pred) == len(Y_true) error_mask = Y_pred != Y_true print("sample error", Y_pred[error_mask][:10], Y_true[error_mask][:10]) print("sample error", Y_pred[error_mask][10:20], Y_true[error_mask][10:20]) if output_file is not None: with open( os.path.join(self.pred_dir, output_file + ".pkl"), "wb") as fw: pickle.dump( {"Y_pred": Y_pred, "Y_true": Y_true}, fw, protocol=pickle.HIGHEST_PROTOCOL) return {"outputs": Y_pred, "targets": Y_true} class COINZSPredictor(COINPredictor): """ COINZSPredictor for COIN zero-shot prediction. """ def __init__(self, config): super().__init__(config) self.dataset_config = config.dataset def predict_loop(self, model, eval_dataloader, output_file="result.pkl"): """refactored from line 144: https://github.com/DmZhukov/CrossTask/blob/master/train.py """ ctx = 0 model.eval() model = model.to(ctx) with torch.no_grad(): outputs = eval_dataloader.dataset.meta_processor.meta_text_labels( self.dataset_config) outputs = self.to_ctx(outputs, ctx) label_hidden_states = model.forward_text(**outputs).cpu() label_sim = label_hidden_states @ label_hidden_states.t() num_labels = label_sim.size(0) eye_mask = ~torch.eye(num_labels, dtype=torch.bool) label_sim = label_sim.masked_select(eye_mask).view(num_labels, num_labels - 1) lbd = label_sim.max() # this is not a loss but just compute neg_log_prob. Y_pred = [] Y_true = [] with torch.no_grad(): for batch in eval_dataloader: self(batch, label_hidden_states, model, lbd, Y_pred, Y_true) return self.finalize(Y_pred, Y_true, output_file) def reshape_subsample(self, sample): for key in sample: if torch.is_tensor(sample[key]): sample[key] = self.flat_subsample(sample[key]) return sample def flat_subsample(self, tensor): if len(tensor.size()) > 1 and tensor.size(0) == 1: tensor = tensor.squeeze(0) return tensor def __call__(self, sample, label_hidden_states, model, lbd, Y_pred, Y_true): sample = self.reshape_subsample(sample) sample = self.to_ctx(sample) # compute the average logits over sliding windows. sample["output_hidden_states"] = True video_outputs = model.forward_video(**sample).cpu() output = {"logits": video_outputs[:, 1:sample["vmasks"].size(1)+1] @ label_hidden_states.t()} logits = self._merge_windows(sample, output) # logic of zero-shot for sequence labeling. logits_argmax = logits.argmax(dim=1) + 1 # 0 is "O" label. logits_max = logits.max(dim=1)[0] pred = torch.zeros_like(logits_argmax) label_select = logits_max > lbd # 73 or 74 pred[label_select] = logits_argmax[label_select] Y_pred.append(pred) Y_true.append(sample["video_targets"].squeeze(0).cpu()) def finalize(self, Y_pred, Y_true, output_file=None): Y_pred = torch.cat(Y_pred, dim=0).numpy() Y_true = torch.cat(Y_true, dim=0).numpy() assert len(Y_pred) == len(Y_true) error_mask = Y_pred != Y_true print("sample error", Y_pred[error_mask][:10], Y_true[error_mask][:10]) print("sample error", Y_pred[error_mask][10:20], Y_true[error_mask][10:20]) if output_file is not None: with open( os.path.join(self.pred_dir, output_file + ".pkl"), "wb") as fw: pickle.dump( {"Y_pred": Y_pred, "Y_true": Y_true}, fw, protocol=pickle.HIGHEST_PROTOCOL) return {"outputs": Y_pred, "targets": Y_true} class DiDeMoPredictor(Predictor): """reference: https://github.com/LisaAnne/LocalizingMoments/blob/master/utils/eval.py https://github.com/LisaAnne/LocalizingMoments/blob/master/utils/data_processing.py """ def __init__(self, config): super().__init__(config) # load targets. with open(config.dataset.test_path) as data_file: self.test_data = json.load(data_file) def predict_loop(self, model, eval_dataloader, output_file="didemo.npy"): """ TODO: two solutions here. """ import itertools # 21 chunks. self.possible_segments = [(0,0), (1,1), (2,2), (3,3), (4,4), (5,5)] for i in itertools.combinations(range(6), 2): self.possible_segments.append(i) # pick segments from a video. """on-the-fly prediction on a single gpu.""" self.full_scores = [] model.eval() model = model.cuda() with torch.no_grad(): for data in eval_dataloader: # TODO special forwarding logic here. data = self.to_ctx(data) data["output_hidden_states"] = True hidden_video = model.forward_video(**data) data["output_hidden_states"] = False pooled_text = model.forward_text(**data) outputs = { "hidden_video": hidden_video, "pooled_text": pooled_text } outputs.update(data) self(outputs) return self.finalize(output_file) def __call__(self, sample): # TODO: make an index select from self.possible_segments. hidden_video = sample["hidden_video"] pooled_text = sample["pooled_text"] vmasks = sample["vmasks"] # probably maintain valid results here. hidden_video = hidden_video[:, 1:-1, :] # probably maintain valid results here. pooled_video = [] for s, e in self.possible_segments: pooled_video.append( torch.mean( hidden_video[:, int(s*5):int((e+1)*5), :], dim=1, keepdim=True) ) pooled_video = torch.cat(pooled_video, dim=1) scores = torch.bmm( pooled_video, pooled_text.unsqueeze(-1)).squeeze(-1).cpu() ranks = scores.argsort(dim=-1, descending=True) for batch_idx, rank in enumerate(ranks): rank_of_moment = [] for m_idx, moment in enumerate(rank): s, e = self.possible_segments[moment.item()] if torch.any( vmasks[batch_idx, int(s*5):int((e+1)*5)] ): rank_of_moment.append((s, e)) self.full_scores.append(rank_of_moment) def finalize(self, output_file=None): outputs = self._aggregate_scores(self.full_scores) if output_file is not None: np.save(os.path.join(self.pred_dir, output_file + ".npy"), outputs) return {"outputs": outputs, "targets": self.test_data} def _aggregate_scores(self, scores): self.full_scores = [] return scores
23,125
37.802013
113
py
sign-topic
sign-topic-main/examples/MMPT/mmpt/processors/how2processor.py
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Copyright (c) Facebook, Inc. All Rights Reserved import torch import math import pickle import random import os import numpy as np from collections import deque from typing import Optional, Tuple, List from .processor import ( Processor, MetaProcessor, TextProcessor, Aligner, MMAttentionMask2DProcessor ) from ..utils import ShardedTensor class How2MetaProcessor(MetaProcessor): def __init__(self, config): super().__init__(config) path = self._get_split_path(config) with open(path) as fd: self.data = [line.strip() for line in fd] def __getitem__(self, idx): video_id = self.data[idx] return video_id, video_id class ShardedHow2MetaProcessor(How2MetaProcessor): def __init__(self, config): super().__init__(config) self.split = str(config.split) self.vfeat_dir = config.vfeat_dir self._init_shard() def _init_shard(self): if self.split == "train": meta_fn = os.path.join(self.vfeat_dir, "train" + "_meta.pkl") with open(meta_fn, "rb") as fr: meta = pickle.load(fr) elif self.split == "valid": meta_fn = os.path.join(self.vfeat_dir, "val" + "_meta.pkl") with open(meta_fn, "rb") as fr: meta = pickle.load(fr) elif self.split == "test": print("use how2 val as test.") meta_fn = os.path.join(self.vfeat_dir, "val" + "_meta.pkl") with open(meta_fn, "rb") as fr: meta = pickle.load(fr) else: raise ValueError("unsupported for MetaProcessor:", self.split) video_id_to_shard = {} for shard_id in meta: for video_idx, video_id in enumerate(meta[shard_id]): video_id_to_shard[video_id] = (shard_id, video_idx) self.video_id_to_shard = video_id_to_shard def __getitem__(self, idx): video_id, video_id = super().__getitem__(idx) shard_id, shard_idx = self.video_id_to_shard[video_id] meta = (video_id, idx, shard_id, shard_idx) return meta, meta class ShardedVideoProcessor(Processor): """ mmaped shards of numpy video features. """ def __init__(self, config): self.split = str(config.split) self.vfeat_dir = config.vfeat_dir def __call__(self, video_id): _, _, shard_id, video_idx = video_id if self.split == "train": shard = ShardedTensor.load( os.path.join(self.vfeat_dir, "train" + "_" + str(shard_id)), "r" ) elif self.split == "valid": shard = ShardedTensor.load( os.path.join(self.vfeat_dir, "val" + "_" + str(shard_id)), "r" ) elif self.split == "test": shard = ShardedTensor.load( os.path.join(self.vfeat_dir, "val" + "_" + str(shard_id)), "r" ) else: raise ValueError("unknown split", self.split) feat = shard[video_idx] return feat class ShardedTextProcessor(Processor): def __init__(self, config): self.tfeat_dir = str(config.tfeat_dir) self.split = str(config.split) def __call__(self, video_id): _, _, shard_id, shard_idx = video_id if self.split == "train": target_path = self.tfeat_dir + "train" + "_" + str(shard_id) elif self.split == "valid": target_path = self.tfeat_dir + "val" + "_" + str(shard_id) elif self.split == "test": target_path = self.tfeat_dir + "val" + "_" + str(shard_id) else: raise ValueError("unknown split", self.split) startend = ShardedTensor.load( target_path + ".startends", "r")[shard_idx] cap_ids = ShardedTensor.load( target_path + ".caps_ids", "r")[shard_idx] cap = [] for clip_idx in range(len(cap_ids)): clip = cap_ids[clip_idx] cap.append(clip[clip != -1].tolist()) start, end = startend[:, 0].tolist(), startend[:, 1].tolist() return {"start": start, "end": end, "cap": cap} class FixedLenAligner(Aligner): """ In the model we assume text is on the left (closer to BERT formulation) and video is on the right. We fix the total length of text + video. max_video_len is in number of secs. max_text_len is in number of tokens. special tokens formats: we use the format [CLS] [SEP] text tokens [SEP] [PAD] ... [CLS] will be splitted out into: [CLS] video tokens [SEP] text tokens [SEP] [PAD] ... token_type_ids will be generated by the model (for now). 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 | first sequence | second sequence | so each sequence owns a [SEP] token for no-ops. """ def __init__(self, config): super().__init__(config) self.text_clip_sampler = TextClipSamplingProcessor( self.max_len - self.max_video_len - 3 ) """ decide subsampling: `config.subsampling` will change batch_size in trainer. `config.clip_per_video` (used by RetriTask) doesn't change batch_size in trainer. """ subsampling = config.subsampling \ if config.subsampling is not None else None if config.clip_per_video is not None: subsampling = config.clip_per_video self.subsampling = subsampling def _get_text_maxlen(self): # use max text len return self.text_clip_sampler.max_text_len def __call__(self, video_id, video_feature, text_feature): from transformers import default_data_collator video_idx = video_id[1] if self.subsampling is not None and self.subsampling >= 1: batch = [] for _ in range(self.subsampling): centerclip_idx = random.randint( 0, len(text_feature["start"]) - 1) batch.append( self.sampling( video_idx, video_feature, text_feature, centerclip_idx, self._get_text_maxlen() )) batch = self.batch_post_processing(batch, video_feature) batch = default_data_collator(batch) else: raise ValueError( "dataset.subsampling must be >= 1 for efficient video loading.") batch = self.sampling(video_idx, video_feature, text_feature) batch = self.batch_post_processing(batch, video_feature) batch["video_id"] = video_id if isinstance(video_id, str) \ else video_id[0] # e2e: make sure frame ids is into tensor. assert torch.is_tensor(batch["vfeats"]) return batch def sampling( self, video_idx, video_feature, text_feature, centerclip_idx=None, sampled_max_text_len=None, ): text_clip_indexs = self.text_clip_sampler( text_feature, centerclip_idx, sampled_max_text_len ) if isinstance(video_feature, np.ndarray): video_len = len(video_feature) else: video_len = math.ceil(text_feature["end"][-1]) video_end = min( math.ceil(text_feature["end"][text_clip_indexs[-1]]), video_len ) video_start = max( min( math.floor(text_feature["start"][text_clip_indexs[0]]), video_end), 0 ) video_clips = {"start": [video_start], "end": [video_end]} # tensorize. vfeats, vmasks = self._build_video_seq( video_feature, video_clips ) caps, cmasks = self._build_text_seq( text_feature, text_clip_indexs ) text_start = text_clip_indexs[0] text_end = text_clip_indexs[-1] + 1 return { "caps": caps, "cmasks": cmasks, "vfeats": vfeats, "vmasks": vmasks, "video_start": video_start, "video_end": video_end, "text_start": text_start, "text_end": text_end, } class VariedLenAligner(FixedLenAligner): def __init__(self, config): super().__init__(config) self.sampled_min_len = config.sampled_min_len self.sampled_max_len = config.sampled_max_len def _get_text_maxlen(self): return random.randint(self.sampled_min_len, self.sampled_max_len) class StartClipAligner(VariedLenAligner): def sampling( self, video_idx, video_feature, text_feature, centerclip_idx=None, sampled_max_text_len=None, ): return super().sampling( video_idx, video_feature, text_feature, 0) class OverlappedAligner(VariedLenAligner): """video clip and text clip has overlappings but may not be the same start/end.""" def __init__(self, config): super().__init__(config) self.sampled_video_min_len = config.sampled_video_min_len self.sampled_video_max_len = config.sampled_video_max_len self.video_clip_sampler = VideoClipSamplingProcessor() def _get_video_maxlen(self): return random.randint( self.sampled_video_min_len, self.sampled_video_max_len) def sampling( self, video_idx, video_feature, text_feature, centerclip_idx=None, sampled_max_text_len=None, ): text_clip_indexs = self.text_clip_sampler( text_feature, centerclip_idx, sampled_max_text_len ) if isinstance(video_feature, np.ndarray): video_len = len(video_feature) else: video_len = math.ceil(text_feature["end"][-1]) low = math.floor(text_feature["start"][text_clip_indexs[0]]) high = math.ceil(text_feature["end"][text_clip_indexs[-1]]) if low < high: center = random.randint(low, high) else: center = int((low + high) // 2) center = max(0, min(video_feature.shape[0] - 1, center)) assert 0 <= center < video_feature.shape[0] video_clips = self.video_clip_sampler( video_len, self._get_video_maxlen(), center ) video_start = video_clips["start"][0] video_end = video_clips["end"][0] # tensorize. vfeats, vmasks = self._build_video_seq( video_feature, video_clips ) caps, cmasks = self._build_text_seq( text_feature, text_clip_indexs ) text_start = text_clip_indexs[0] text_end = text_clip_indexs[-1] + 1 return { "caps": caps, "cmasks": cmasks, "vfeats": vfeats, "vmasks": vmasks, "video_start": video_start, "video_end": video_end, "text_start": text_start, "text_end": text_end, } class MFMMLMAligner(FixedLenAligner): """ `FixedLenAligner` with Masked Language Model and Masked Frame Model. """ def __init__(self, config): super().__init__(config) keep_prob = config.keep_prob if config.keep_prob is not None else 1.0 self.text_clip_sampler = TextClipSamplingProcessor( self.max_len - self.max_video_len - 3, keep_prob ) self.sampled_min_len = config.sampled_min_len self.sampled_max_len = config.sampled_max_len self.masked_token_sampler = TextMaskingProcessor(config) self.mm_type = config.mm_type \ if config.mm_type is not None else "full" self.attnmasker = MMAttentionMask2DProcessor() \ if self.mm_type == "textgen" else None self.masked_frame_sampler = FrameMaskingProcessor(config) self.lazy_vfeat_mask = ( False if config.lazy_vfeat_mask is None else config.lazy_vfeat_mask ) self.mm_prob = config.mm_prob if config.mm_prob is not None else 0. def __call__(self, video_id, video_feature, text_feature): from transformers import default_data_collator if self.subsampling is not None and self.subsampling > 1: batch = [] for _ in range(self.subsampling): centerclip_idx = random.randint( 0, len(text_feature["start"]) - 1) sampled_max_text_len = random.randint( self.sampled_min_len, self.sampled_max_len ) batch.append( self.sampling( video_id, video_feature, text_feature, centerclip_idx, sampled_max_text_len, ) ) batch = self.batch_post_processing(batch, video_feature) batch = default_data_collator(batch) else: batch = self.sampling(video_id, video_feature, text_feature) batch = self.batch_post_processing(batch, video_feature) batch["video_id"] = video_id if isinstance(video_id, str) \ else video_id[0] return batch def sampling( self, video_id, video_feature, text_feature, centerclip_idx=None, sampled_max_text_len=None, ): output = FixedLenAligner.sampling(self, video_id, video_feature, text_feature, centerclip_idx, sampled_max_text_len) masking_text, masking_video = None, None if random.random() < self.mm_prob: if random.random() > 0.5: masking_text, masking_video = self.mm_type, "no" else: masking_text, masking_video = "no", "full" video_feats = output["vfeats"] if not self.lazy_vfeat_mask else None video_label = self.masked_frame_sampler( output["vmasks"], masking_video, vfeats=video_feats) caps, text_label = self.masked_token_sampler( output["caps"], masking_text) output.update({ "caps": caps, "video_label": video_label, "text_label": text_label, }) if self.attnmasker is not None: attention_mask = self.attnmasker( output["vmasks"], output["cmasks"], masking_text) output.update({ "attention_mask": attention_mask }) return output class FrameMaskingProcessor(Processor): def __init__(self, config): self.mfm_probability = 0.15 if config.mfm_probability is not None: self.mfm_probability = config.mfm_probability def __call__(self, vmasks, modality_masking=None, vfeats=None): """ We perform lazy masking to save data transfer time. It only generates video_labels by default and MFM model will do actualy masking. Return: `video_label` is a binary mask. """ video_label = vmasks.clone() if modality_masking is not None: if modality_masking == "full": probability_matrix = torch.full(video_label.shape, 1.) elif modality_masking == "no": probability_matrix = torch.full(video_label.shape, 0.) elif modality_masking == "inverse": probability_matrix = torch.full( video_label.shape, 1. - self.mfm_probability) else: raise ValueError("unknown modality masking.", modality_masking) else: probability_matrix = torch.full( video_label.shape, self.mfm_probability) masked_indices = torch.bernoulli(probability_matrix).bool() # We only compute loss on masked tokens video_label[~masked_indices] = 0 if vfeats is not None: vfeats[video_label, :] = 0.0 return video_label class TextGenerationProcessor(Processor): def __init__(self, tokenizer): self.bos_token_id = tokenizer.bos_token_id self.pad_token_id = tokenizer.pad_token_id def __call__(self, inputs): labels = inputs.clone() # [CLS] [SEP] for video labels[:2] = -100 # keep [SEP] for text. pad_mask = labels == self.pad_token_id labels[pad_mask] = -100 inputs[2:] = torch.cat([ torch.LongTensor([self.bos_token_id]), inputs[2:-1]]) inputs[pad_mask] = self.pad_token_id assert len(inputs) == len(labels) return inputs, labels class TextMaskingProcessor(Processor): def __init__(self, config): """this function is borrowed from `transformers/data/data_collator.DataCollatorForLanguageModeling`""" self.mlm_probability = 0.15 if config.mlm_probability is not None: self.mlm_probability = config.mlm_probability self.bert_name = config.bert_name # [CLS] is used as bos_token and [SEP] is used as eos_token. # https://huggingface.co/transformers/master/model_doc/bertgeneration.html from transformers import AutoTokenizer self.tokenizer = AutoTokenizer.from_pretrained( self.bert_name, bos_token="[CLS]", eos_token="[SEP]") self.textgen = TextGenerationProcessor(self.tokenizer) def __call__( self, inputs: torch.Tensor, modality_masking=None, special_tokens_mask: Optional[torch.Tensor] = None ) -> Tuple[torch.Tensor, torch.Tensor]: """ expand modality_masking into None: traditional bert masking. "no": no masking. "full": all [MASK] token for generation. "gen": autoregressive generation. """ """ Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original. """ labels = inputs.clone() # We sample a few tokens in each sequence for MLM training # (with probability `self.mlm_probability`) if modality_masking is not None: if modality_masking == "full": probability_matrix = torch.full(labels.shape, 1.) elif modality_masking == "no": probability_matrix = torch.full(labels.shape, 0.) elif modality_masking.startswith("textgen"): # [CLS] [SEP] <s> ... inputs, labels = self.textgen(inputs) if "mask" not in modality_masking: return inputs, labels inputs = self.mask_input(inputs, special_tokens_mask) return inputs, labels elif modality_masking == "mask": inputs = self.mask_input(inputs, special_tokens_mask) labels = torch.full(inputs.shape, -100) return inputs, labels elif modality_masking == "inverse": probability_matrix = torch.full(labels.shape, 1. - self.mlm_probability) else: raise ValueError("unknown modality masking.", modality_masking) else: probability_matrix = torch.full(labels.shape, self.mlm_probability) if special_tokens_mask is None: special_tokens_mask = self.get_special_tokens_mask( labels.tolist(), already_has_special_tokens=True ) special_tokens_mask = torch.tensor( special_tokens_mask, dtype=torch.bool) else: special_tokens_mask = special_tokens_mask.bool() probability_matrix.masked_fill_(special_tokens_mask, value=0.0) masked_indices = torch.bernoulli(probability_matrix).bool() labels[~masked_indices] = -100 # We only compute loss on masked tokens # 80% of the time, # we replace masked input tokens with tokenizer.mask_token ([MASK]) indices_replaced = ( torch.bernoulli( torch.full(labels.shape, 0.8)).bool() & masked_indices ) inputs[indices_replaced] = self.tokenizer.convert_tokens_to_ids( self.tokenizer.mask_token ) # 10% of the time, we replace masked input tokens with random word indices_random = ( torch.bernoulli(torch.full(labels.shape, 0.5)).bool() & masked_indices & ~indices_replaced ) random_words = torch.randint( len(self.tokenizer), labels.shape, dtype=torch.long ) inputs[indices_random] = random_words[indices_random] # The rest of the time (10% of the time) we keep the masked input # tokens unchanged return inputs, labels def mask_input(self, inputs, special_tokens_mask=None): # the following is new with masked autoregressive. probability_matrix = torch.full( inputs.shape, self.mlm_probability) if special_tokens_mask is None: special_tokens_mask = self.get_special_tokens_mask( inputs.tolist(), already_has_special_tokens=True ) special_tokens_mask = torch.tensor( special_tokens_mask, dtype=torch.bool) else: special_tokens_mask = special_tokens_mask.bool() probability_matrix.masked_fill_(special_tokens_mask, value=0.0) masked_indices = torch.bernoulli(probability_matrix).bool() indices_replaced = ( torch.bernoulli( torch.full(inputs.shape, 0.8)).bool() & masked_indices ) inputs[indices_replaced] = self.tokenizer.convert_tokens_to_ids( self.tokenizer.mask_token ) # 10% of the time, we replace masked input tokens with random word indices_random = ( torch.bernoulli(torch.full(inputs.shape, 0.5)).bool() & masked_indices & ~indices_replaced ) random_words = torch.randint( len(self.tokenizer), inputs.shape, dtype=torch.long ) inputs[indices_random] = random_words[indices_random] return inputs def get_special_tokens_mask( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False ) -> List[int]: """ Note: the version from transformers do not consider pad as special tokens. """ if already_has_special_tokens: if token_ids_1 is not None: raise ValueError( "You should not supply a second sequence if" "the provided sequence of " "ids is already formated with special tokens " "for the model." ) return list(map(lambda x: 1 if x in [ self.tokenizer.sep_token_id, self.tokenizer.cls_token_id, self.tokenizer.pad_token_id] else 0, token_ids_0)) if token_ids_1 is not None: return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1] return [1] + ([0] * len(token_ids_0)) + [1] class TextClipSamplingProcessor(Processor): def __init__(self, max_text_len, keep_prob=1.0): self.max_text_len = max_text_len self.max_video_len = 256 # always hold. self.keep_prob = keep_prob def __call__( self, text_feature, centerclip_idx=None, sampled_max_text_len=None, sampled_max_video_len=None, ): # Let's use all caps for now and see if 256 can cover all of them. if sampled_max_text_len is not None: max_text_len = sampled_max_text_len else: max_text_len = self.max_text_len if sampled_max_video_len is not None: max_video_len = sampled_max_video_len else: max_video_len = self.max_video_len t_num_clips = len(text_feature["start"]) if centerclip_idx is None: centerclip_idx = random.randint(0, t_num_clips - 1) start_idx, end_idx = centerclip_idx, centerclip_idx + 1 text_clip_indexs = deque() text_clip_indexs.append(start_idx) text_len = len(text_feature["cap"][start_idx]) video_len = max( 0, text_feature["end"][start_idx] - text_feature["start"][start_idx], ) while ( (start_idx > 0 or end_idx < t_num_clips) and text_len < max_text_len and video_len < max_video_len ): if random.random() > 0.5 and end_idx < t_num_clips: # skip the next one? if random.random() > self.keep_prob and (end_idx + 1) < t_num_clips: end_idx = end_idx + 1 text_clip_indexs.append(end_idx) text_len += len(text_feature["cap"][end_idx]) end_idx += 1 elif start_idx > 0: if random.random() > self.keep_prob and (start_idx - 1) > 0: start_idx = start_idx - 1 start_idx -= 1 text_clip_indexs.insert(0, start_idx) text_len += len(text_feature["cap"][start_idx]) else: if end_idx < t_num_clips: if random.random() > self.keep_prob and (end_idx + 1) < t_num_clips: end_idx = end_idx + 1 text_clip_indexs.append(end_idx) text_len += len(text_feature["cap"][end_idx]) end_idx += 1 else: return text_clip_indexs video_len = max( 0, text_feature["end"][text_clip_indexs[-1]] - text_feature["start"][text_clip_indexs[0]], ) return text_clip_indexs class VideoClipSamplingProcessor(Processor): def __call__(self, video_len, max_video_len, center): """ `video_len`: length of the video. `max_video_len`: maximum video tokens allowd in a sequence. `center`: initial starting index. """ assert center >= 0 and center < video_len t_clip_len = 0 start, end = center, center while (start > 0 or end < video_len) and t_clip_len < max_video_len: # decide the direction to grow. if start <= 0: end += 1 elif end >= video_len: start -= 1 elif random.random() > 0.5: end += 1 else: start -= 1 t_clip_len += 1 return {"start": [start], "end": [end]} class How2MILNCEAligner(FixedLenAligner): """reference: `antoine77340/MIL-NCE_HowTo100M/video_loader.py`""" def __init__(self, config): super().__init__(config) self.num_candidates = 4 self.min_time = 5.0 self.num_sec = 3.2 # self.num_sec = self.num_frames / float(self.fps) num_frames=16 / fps = 5 # self.num_frames = 16 def sampling( self, video_id, video_feature, text_feature, centerclip_idx=None, # will be ignored. sampled_max_text_len=None # will be ignored. ): text, start, end = self._get_text(text_feature) video = self._get_video(video_feature, start, end) vfeats = torch.zeros((self.max_video_len, video_feature.shape[1])) vmasks = torch.zeros((self.max_video_len,), dtype=torch.bool) vfeats[: video.shape[0]] = torch.from_numpy(np.array(video)) vmasks[: video.shape[0]] = 1 caps, cmasks = [], [] for words in text: cap, cmask = self._build_text_seq(text_feature, words) caps.append(cap) cmasks.append(cmask) caps = torch.stack(caps) cmasks = torch.stack(cmasks) # video of shape: (video_len) # text of shape (num_candidates, max_text_len) return { "caps": caps, "cmasks": cmasks, "vfeats": vfeats, "vmasks": vmasks, # "video_id": video_id, } def _get_video(self, video_feature, start, end): start_seek = random.randint(start, int(max(start, end - self.num_sec))) # duration = self.num_sec + 0.1 return video_feature[start_seek : int(start_seek + self.num_sec)] def _get_text(self, cap): ind = random.randint(0, len(cap["start"]) - 1) if self.num_candidates == 1: words = [ind] else: words = [] cap_start = self._find_nearest_candidates(cap, ind) for i in range(self.num_candidates): words.append([max(0, min(len(cap["cap"]) - 1, cap_start + i))]) start, end = cap["start"][ind], cap["end"][ind] # TODO: May need to be improved for edge cases. # expand the min time. if end - start < self.min_time: diff = self.min_time - end + start start = max(0, start - diff / 2) end = start + self.min_time return words, int(start), int(end) def _find_nearest_candidates(self, caption, ind): """find the range of the clips.""" start, end = ind, ind #diff = caption["end"][end] - caption["start"][start] n_candidate = 1 while n_candidate < self.num_candidates: # the first clip if start == 0: return 0 # we add () in the following condition to fix the bug. elif end == (len(caption["start"]) - 1): return start - (self.num_candidates - n_candidate) elif (caption["end"][end] - caption["start"][start - 1]) < ( caption["end"][end + 1] - caption["start"][start] ): start -= 1 else: end += 1 n_candidate += 1 return start class PKLJSONStrTextProcessor(TextProcessor): """`caption.json` from howto100m are preprocessed as a dict `[video_id, json_str]`. Json parsing tokenization are conducted on-the-fly and cached into dict. """ def __init__(self, config, max_clip_text_len=96): print("[Warning] PKLJSONStrTextProcessor is slow for num_workers > 0.") self.caption_pkl_path = str(config.caption_pkl_path) with open(self.caption_pkl_path, "rb") as fd: self.data = pickle.load(fd) self.max_clip_text_len = max_clip_text_len from transformers import AutoTokenizer self.tokenizer = AutoTokenizer.from_pretrained( str(config.bert_name), use_fast=config.use_fast ) def __call__(self, video_id): caption = self.data[video_id] if isinstance(caption, str): import json caption = json.loads(caption) cap = [] for clip_idx, text_clip in enumerate(caption["text"]): clip_ids = [] if isinstance(text_clip, str): clip_ids = self.tokenizer( text_clip[: self.max_clip_text_len], add_special_tokens=False )["input_ids"] cap.append(clip_ids) caption["cap"] = cap caption.pop("text") # save space. self.data[video_id] = caption return caption
32,302
35.377252
88
py
sign-topic
sign-topic-main/examples/MMPT/mmpt/processors/processor.py
# Copyright (c) Facebook, Inc. All Rights Reserved import numpy as np import os import torch class Processor(object): """ A generic processor for video (codec, feature etc.) and text. """ def __call__(self, **kwargs): raise NotImplementedError class MetaProcessor(Processor): """ A meta processor is expected to load the metadata of a dataset: (e.g., video_ids, or captions). You must implement the `__getitem__` (meta datasets are rather diverse.). """ def __init__(self, config): self.split = config.split def __len__(self): return len(self.data) def __getitem__(self, idx): raise NotImplementedError def _get_split_path(self, config): splits = { "train": config.train_path, "valid": config.val_path, "test": config.test_path, } if config.split is not None: return splits[config.split] return config.train_path class TextProcessor(Processor): """ A generic Text processor: rename this as `withTokenizer`. tokenize a string of text on-the-fly. Warning: mostly used for end tasks. (on-the-fly tokenization is slow for how2.) TODO(huxu): move this class as a subclass. """ def __init__(self, config): self.bert_name = str(config.bert_name) self.use_fast = config.use_fast from transformers import AutoTokenizer self.tokenizer = AutoTokenizer.from_pretrained( self.bert_name, use_fast=self.use_fast ) def __call__(self, text_id): caption = self.tokenizer(text_id, add_special_tokens=False) return caption["input_ids"] class VideoProcessor(Processor): """ A generic video processor: load a numpy video tokens by default. """ def __init__(self, config): self.vfeat_dir = config.vfeat_dir def __call__(self, video_fn): if isinstance(video_fn, tuple): video_fn = video_fn[0] assert isinstance(video_fn, str) video_fn = os.path.join(self.vfeat_dir, video_fn + ".npy") feat = np.load(video_fn) return feat class Aligner(object): """ An alignprocessor align video and text and output a dict of tensors (for a model). """ def __init__(self, config): """__init__ needs to be light weight for more workers/threads.""" self.split = config.split self.max_video_len = config.max_video_len self.max_len = config.max_len from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained( str(config.bert_name), use_fast=config.use_fast ) self.cls_token_id = tokenizer.cls_token_id self.sep_token_id = tokenizer.sep_token_id self.pad_token_id = tokenizer.pad_token_id self.mask_token_id = tokenizer.mask_token_id def __call__(self, video_id, video_feature, text_feature): raise NotImplementedError def _build_video_seq(self, video_feature, video_clips=None): """ `video_feature`: available video tokens. `video_clips`: video clip sequence to build. """ if not isinstance(video_feature, np.ndarray): raise ValueError( "unsupported type of video_feature", type(video_feature) ) if video_clips is None: # this is borrowed from DSAligner video_start = 0 video_end = min(len(video_feature), self.max_video_len) # the whole sequence is a single clip. video_clips = {"start": [video_start], "end": [video_end]} vfeats = np.zeros( (self.max_video_len, video_feature.shape[1]), dtype=np.float32 ) vmasks = torch.zeros((self.max_video_len,), dtype=torch.bool) video_len = 0 for start, end in zip(video_clips["start"], video_clips["end"]): clip_len = min(self.max_video_len - video_len, (end - start)) if clip_len > 0: vfeats[video_len: video_len + clip_len] = video_feature[ start: start + clip_len ] vmasks[video_len: video_len + clip_len] = 1 video_len += clip_len vfeats = torch.from_numpy(vfeats) return vfeats, vmasks def _build_text_seq(self, text_feature, text_clip_indexs=None): """ `text_feature`: all available clips. `text_clip_indexes`: clip sequence to build. """ if text_clip_indexs is None: text_clip_indexs = [0] full_caps = [] if isinstance(text_feature, dict): for clip_idx in text_clip_indexs: full_caps.extend(text_feature["cap"][clip_idx]) else: full_caps = text_feature max_text_len = self.max_len - self.max_video_len - 3 full_caps = full_caps[:max_text_len] full_caps = ( [self.cls_token_id, self.sep_token_id] + full_caps + [self.sep_token_id] ) text_pad_len = self.max_len - len(full_caps) - self.max_video_len padded_full_caps = full_caps + [self.pad_token_id] * text_pad_len caps = torch.LongTensor(padded_full_caps) cmasks = torch.zeros((len(padded_full_caps),), dtype=torch.bool) cmasks[: len(full_caps)] = 1 return caps, cmasks def batch_post_processing(self, batch, video_feature): return batch class MMAttentionMask2DProcessor(Processor): """text generation requires 2d mask that is harder to generate by GPU at this stage.""" def __call__(self, vmask, cmask, mtype): if mtype == "textgen": return self._build_textgeneration_mask(vmask, cmask) elif mtype == "videogen": return self._build_videogeneration_mask(vmask, cmask) else: return self._build_mm_mask(vmask, cmask) def _build_mm_mask(self, vmask, cmask): mask_1d = torch.cat([cmask[:1], vmask, cmask[1:]], dim=0) return mask_1d[None, :].repeat(mask_1d.size(0), 1) def _build_videogeneration_mask(self, vmask, cmask): # cls_mask is only about text otherwise it will leak generation. cls_text_mask = torch.cat([ # [CLS] torch.ones( (1,), dtype=torch.bool, device=cmask.device), # video tokens and [SEP] for video. torch.zeros( (vmask.size(0) + 1,), dtype=torch.bool, device=cmask.device), cmask[2:] ], dim=0) # concat horizontially. video_len = int(vmask.sum()) video_masks = torch.cat([ # [CLS] torch.ones( (video_len, 1), dtype=torch.bool, device=cmask.device ), torch.tril( torch.ones( (video_len, video_len), dtype=torch.bool, device=cmask.device)), # video_padding torch.zeros( (video_len, vmask.size(0) - video_len), dtype=torch.bool, device=cmask.device ), # [SEP] for video (unused). torch.zeros( (video_len, 1), dtype=torch.bool, device=cmask.device ), cmask[2:].unsqueeze(0).repeat(video_len, 1) ], dim=1) text_masks = cls_text_mask[None, :].repeat( cmask.size(0) - 2, 1) video_padding_masks = cls_text_mask[None, :].repeat( vmask.size(0) - video_len, 1) return torch.cat([ cls_text_mask[None, :], video_masks, video_padding_masks, torch.cat([cmask[:1], vmask, cmask[1:]], dim=0)[None,:], text_masks ], dim=0) def _build_textgeneration_mask(self, vmask, cmask): # cls_mask is only about video otherwise it will leak generation. cls_video_mask = torch.cat([ # [CLS] torch.ones( (1,), dtype=torch.bool, device=cmask.device), vmask, # [SEP] torch.ones((1,), dtype=torch.bool, device=cmask.device), torch.zeros( (cmask.size(0)-2,), dtype=torch.bool, device=cmask.device) ], dim=0) # concat horizontially. text_len = int(cmask[2:].sum()) text_masks = torch.cat([ # [CLS] torch.ones( (text_len, 1), dtype=torch.bool, device=cmask.device ), vmask.unsqueeze(0).repeat(text_len, 1), # [SEP] for video. torch.ones( (text_len, 1), dtype=torch.bool, device=cmask.device ), torch.tril( torch.ones( (text_len, text_len), dtype=torch.bool, device=cmask.device)), # padding. torch.zeros( (text_len, cmask.size(0) - text_len - 2), dtype=torch.bool, device=cmask.device ) ], dim=1) cls_video_masks = cls_video_mask[None, :].repeat( vmask.size(0) + 2, 1) text_padding_masks = cls_video_mask[None, :].repeat( cmask.size(0) - text_len - 2, 1) return torch.cat([ cls_video_masks, text_masks, text_padding_masks], dim=0)
9,358
33.032727
86
py
sign-topic
sign-topic-main/examples/MMPT/mmpt/processors/dsprocessor.py
# Copyright (c) Facebook, Inc. All Rights Reserved """ Processors for all downstream (ds) tasks. """ import json import os import pickle import random import math import numpy as np import torch from collections import defaultdict from .processor import ( MetaProcessor, VideoProcessor, TextProcessor, Aligner, MMAttentionMask2DProcessor, ) from .how2processor import TextGenerationProcessor # ------------- A General Aligner for all downstream tasks----------------- class DSAligner(Aligner): """ Downstream (DS) aligner shared by all datasets. """ def __call__(self, video_id, video_feature, text_feature, wps=0.7): # random sample a starting sec for video. video_start = 0 video_end = min(len(video_feature), self.max_video_len) # the whole sequence is a single clip. video_clips = {"start": [video_start], "end": [video_end]} text_feature = { "cap": [text_feature], "start": [video_start], "end": [len(text_feature) / wps], } text_clip_indexs = [0] vfeats, vmasks = self._build_video_seq( video_feature, video_clips ) caps, cmasks = self._build_text_seq( text_feature, text_clip_indexs ) return { "caps": caps, "cmasks": cmasks, "vfeats": vfeats, "vmasks": vmasks, "video_id": video_id, } class NLGTextProcessor(TextProcessor): """ Also return the original text as ref. """ def __call__(self, text_id): return super().__call__(text_id), text_id class DSNLGAligner(DSAligner): """extend with the capability of 2d mask for generation.""" def __init__(self, config): super().__init__(config) self.attnmasker = MMAttentionMask2DProcessor() from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained( self.bert_name, use_fast=self.use_fast, bos_token="[CLS]", eos_token="[SEP]" ) self.tokenizer = tokenizer self.bos_token_id = tokenizer.bos_token_id self.eos_token_id = tokenizer.eos_token_id self.textgen = TextGenerationProcessor(tokenizer) def __call__(self, video_id, video_feature, text_feature): output = super().__call__(video_id, video_feature, text_feature[0]) if self.split == "test": # output.update({"ref": text_feature[1]}) output.update({"ref": self.tokenizer.decode( output["caps"], skip_special_tokens=True)}) text_label = output["caps"] cmasks = torch.BoolTensor([1] * text_label.size(0)) caps = torch.LongTensor([ self.cls_token_id, self.sep_token_id, self.bos_token_id]) else: caps, text_label = self.textgen(output["caps"]) cmasks = output["cmasks"] attention_mask = self.attnmasker( output["vmasks"], cmasks, "textgen") output.update({ "caps": caps, "cmasks": cmasks, "text_label": text_label, "attention_mask": attention_mask, }) return output # -------------------- MSRVTT ------------------------ class MSRVTTMetaProcessor(MetaProcessor): """MSRVTT dataset. reference: `howto100m/msrvtt_dataloader.py` """ def __init__(self, config): super().__init__(config) import pandas as pd data = pd.read_csv(self._get_split_path(config)) # TODO: add a text1ka flag. if config.split == "train" \ and config.full_test_path is not None \ and config.jsfusion_path is not None: # add testing videos from full_test_path not used by jfusion. additional_data = pd.read_csv(config.full_test_path) jsfusion_data = pd.read_csv(config.jsfusion_path) for video_id in additional_data["video_id"]: if video_id not in jsfusion_data["video_id"].values: data = data.append( {"video_id": video_id}, ignore_index=True) if config.dup is not None and config.split == "train": data = data.append([data] * (config.dup - 1), ignore_index=True) self.data = data def __len__(self): return len(self.data) def __getitem__(self, idx): """slightly modify with if condition to combine train/test.""" vid, sentence = None, None vid = self.data["video_id"].values[idx] if "sentence" in self.data: # for testing. sentence = self.data["sentence"].values[idx] else: # for training. sentence = vid return vid, sentence class MSRVTTTextProcessor(TextProcessor): """MSRVTT dataset. reference: `msrvtt_dataloader.py` `MSRVTT_TrainDataLoader`. TODO (huxu): add max_words. """ def __init__(self, config): super().__init__(config) self.sentences = None if config.json_path is not None and config.split == "train": with open(config.json_path) as fd: self.data = json.load(fd) self.sentences = defaultdict(list) for s in self.data["sentences"]: self.sentences[s["video_id"]].append(s["caption"]) def __call__(self, text_id): if self.sentences is not None: rind = random.randint(0, len(self.sentences[text_id]) - 1) sentence = self.sentences[text_id][rind] else: sentence = text_id caption = self.tokenizer(sentence, add_special_tokens=False) return caption["input_ids"] class MSRVTTNLGTextProcessor(MSRVTTTextProcessor): """TODO: change dsaligner and merge to avoid any NLG text processor.""" def __call__(self, text_id): if self.sentences is not None: rind = random.randint(0, len(self.sentences[text_id]) - 1) sentence = self.sentences[text_id][rind] else: sentence = text_id caption = self.tokenizer(sentence, add_special_tokens=False) return caption["input_ids"], sentence class MSRVTTQAMetaProcessor(MetaProcessor): """MSRVTT-QA: retrieval-based multi-choice QA from JSFusion dataset. For simplicity, we use the train retrieval model. reference: `https://github.com/yj-yu/lsmdc` """ def __init__(self, config): super().__init__(config) import pandas as pd csv_data = pd.read_csv(self._get_split_path(config), sep="\t") data = [] for video_id, a1, a2, a3, a4, a5, answer in zip( csv_data["vid_key"].values, csv_data["a1"].values, csv_data["a2"].values, csv_data["a3"].values, csv_data["a4"].values, csv_data["a5"].values, csv_data["answer"].values): video_id = video_id.replace("msr", "video") data.append((video_id, (answer, [a1, a2, a3, a4, a5]))) self.data = data def __len__(self): return len(self.data) def __getitem__(self, idx): return self.data[idx] class MSRVTTQATextProcessor(TextProcessor): """MSRVTT-QA dataset. text_ans is of format `(answer, [a1, a2, a3, a4, a5])`. """ def __call__(self, text_ans): for ans_idx, ans in enumerate(text_ans[1]): if isinstance(ans, str): text_ans[1][ans_idx] = self.tokenizer(ans, add_special_tokens=False)["input_ids"] return text_ans class MSRVTTQAAligner(DSAligner): """MSRVTT dataset. similar to sample in how2. we call __call__ multiple times. """ def __call__(self, video_id, video_feature, text_feature, wps=0.7): caps = [] cmasks = [] answer = text_feature[0] for ans_idx, _text_feature in enumerate(text_feature[1]): output = super().__call__( video_id, video_feature, _text_feature, wps) caps.append(output["caps"]) cmasks.append(output["cmasks"]) output.update({ "caps": torch.stack(caps), "cmasks": torch.stack(cmasks), "answers": torch.LongTensor([answer]), }) return output # -------------------- Youcook ----------------------- class YoucookMetaProcessor(MetaProcessor): """Youcook dataset. reference: `howto100m/youcook_dataloader.py` note that the data can be different as the (1) some videos already in Howto100m are removed. (2) stop words are removed from caption TODO (huxu): make a flag to load the original caption. (see youcookii_annotations_trainval.json). The max_video_len can be 264 and text can be 64 tokens. In reality we may not need that long. see projects/task/youcook.yaml """ def __init__(self, config): super().__init__(config) vfeat_dir = config.vfeat_dir print(self._get_split_path(config)) with open(self._get_split_path(config), "rb") as fd: data = pickle.load(fd) all_valid_video_ids = set( [os.path.splitext(fn)[0] for fn in os.listdir(vfeat_dir)] ) recs = [] video_ids = set() valid_video_ids = set() for rec in data: # filter videos not available. udl_idx = rec["id"].rindex("_") video_id = rec["id"][:udl_idx] video_ids.add(video_id) if video_id in all_valid_video_ids: valid_video_ids.add(video_id) recs.append(rec) print("total video_ids in .pkl", len(video_ids)) print("valid video_ids in .pkl", len(valid_video_ids)) print("please verify {train,val}_list.txt") data = recs self.data = data with open(config.trainval_annotation) as fd: self.youcook_annotation = json.load(fd)["database"] if config.use_annotation_text is True: print("using text in annotation.") self.use_annotation_caption = True else: self.use_annotation_caption = False def __getitem__(self, idx): def _get_video_and_caption(rec): vid = rec["id"] udl_idx = vid.rindex("_") video_id, clip_id = vid[:udl_idx], int(vid[udl_idx + 1:]) clip = self.youcook_annotation[video_id]["annotations"][clip_id] start, end = clip["segment"] if self.use_annotation_caption: caption = clip["sentence"] else: caption = rec["caption"] return (video_id, start, end), caption rec = self.data[idx] video_info, text_info = _get_video_and_caption(rec) return video_info, text_info class YoucookVideoProcessor(VideoProcessor): """video_fn is a tuple of (video_id, start, end) now.""" def __call__(self, video_fn): video_id, start, end = video_fn feat = np.load(os.path.join(self.vfeat_dir, video_id + ".npy")) return feat[start:end] class YoucookNLGMetaProcessor(MetaProcessor): """NLG uses the original split: `train_list.txt` and `val_list.txt` """ def __init__(self, config): super().__init__(config) vfeat_dir = config.vfeat_dir print(self._get_split_path(config)) with open(self._get_split_path(config)) as fd: video_ids = [ line.strip().split("/")[1] for line in fd.readlines()] print("total video_ids in train/val_list.txt", len(video_ids)) all_valid_video_ids = set( [os.path.splitext(fn)[0] for fn in os.listdir(vfeat_dir)] ) video_ids = [ video_id for video_id in video_ids if video_id in all_valid_video_ids] print("valid video_ids in train/val_list.txt", len(video_ids)) with open(config.trainval_annotation) as fd: self.youcook_annotation = json.load(fd)["database"] data = [] for video_id in video_ids: for clip in self.youcook_annotation[video_id]["annotations"]: start, end = clip["segment"] caption = clip["sentence"] data.append(((video_id, start, end), caption)) self.data = data def __getitem__(self, idx): return self.data[idx] # --------------------- CrossTask ------------------------- class CrossTaskMetaProcessor(MetaProcessor): def __init__(self, config): super().__init__(config) np.random.seed(0) # deterministic random split. task_vids = self._get_vids( config.train_csv_path, config.vfeat_dir, config.annotation_path) val_vids = self._get_vids( config.val_csv_path, config.vfeat_dir, config.annotation_path) # filter out those task and vids appear in val_vids. task_vids = { task: [ vid for vid in vids if task not in val_vids or vid not in val_vids[task]] for task, vids in task_vids.items()} primary_info = self._read_task_info(config.primary_path) test_tasks = set(primary_info['steps'].keys()) # if args.use_related: related_info = self._read_task_info(config.related_path) task_steps = {**primary_info['steps'], **related_info['steps']} n_steps = {**primary_info['n_steps'], **related_info['n_steps']} # else: # task_steps = primary_info['steps'] # n_steps = primary_info['n_steps'] all_tasks = set(n_steps.keys()) # filter and keep task in primary or related. task_vids = { task: vids for task, vids in task_vids.items() if task in all_tasks} # vocab-by-step matrix (A) and vocab (M) # (huxu): we do not use BoW. # A, M = self._get_A(task_steps, share="words") train_vids, test_vids = self._random_split( task_vids, test_tasks, config.n_train) print("train_num_videos", sum(len(vids) for vids in train_vids.values())) print("test_num_videos", sum(len(vids) for vids in test_vids.values())) # added by huxu to automatically determine the split. split_map = { "train": train_vids, "valid": test_vids, "test": test_vids } task_vids = split_map[config.split] self.vids = [] for task, vids in task_vids.items(): self.vids.extend([(task, vid) for vid in vids]) self.task_steps = task_steps self.n_steps = n_steps def __getitem__(self, idx): task, vid = self.vids[idx] n_steps = self.n_steps[task] steps = self.task_steps[task] assert len(steps) == n_steps return (task, vid, steps, n_steps), (task, vid, steps, n_steps) def __len__(self): return len(self.vids) def _random_split(self, task_vids, test_tasks, n_train): train_vids = {} test_vids = {} for task, vids in task_vids.items(): if task in test_tasks and len(vids) > n_train: train_vids[task] = np.random.choice( vids, n_train, replace=False).tolist() test_vids[task] = [ vid for vid in vids if vid not in train_vids[task]] else: train_vids[task] = vids return train_vids, test_vids def _get_vids(self, path, vfeat_dir, annotation_path): """refactored from https://github.com/DmZhukov/CrossTask/blob/master/data.py changes: add `vfeat_dir` to check if the video is available. add `annotation_path` to check if the video is available. """ task_vids = {} with open(path, 'r') as f: for line in f: task, vid, url = line.strip().split(',') # double check the video is available. if not os.path.exists( os.path.join(vfeat_dir, vid + ".npy")): continue # double check the annotation is available. if not os.path.exists(os.path.join( annotation_path, task + "_" + vid + ".csv")): continue if task not in task_vids: task_vids[task] = [] task_vids[task].append(vid) return task_vids def _read_task_info(self, path): titles = {} urls = {} n_steps = {} steps = {} with open(path, 'r') as f: idx = f.readline() while idx != '': idx = idx.strip() titles[idx] = f.readline().strip() urls[idx] = f.readline().strip() n_steps[idx] = int(f.readline().strip()) steps[idx] = f.readline().strip().split(',') next(f) idx = f.readline() return { 'title': titles, 'url': urls, 'n_steps': n_steps, 'steps': steps } def _get_A(self, task_steps, share="words"): raise ValueError("running get_A is not allowed for BERT.") """Step-to-component matrices.""" if share == 'words': # share words task_step_comps = { task: [step.split(' ') for step in steps] for task, steps in task_steps.items()} elif share == 'task_words': # share words within same task task_step_comps = { task: [[task+'_'+tok for tok in step.split(' ')] for step in steps] for task, steps in task_steps.items()} elif share == 'steps': # share whole step descriptions task_step_comps = { task: [[step] for step in steps] for task, steps in task_steps.items()} else: # no sharing task_step_comps = { task: [[task+'_'+step] for step in steps] for task, steps in task_steps.items()} # BERT tokenizer here? vocab = [] for task, steps in task_step_comps.items(): for step in steps: vocab.extend(step) vocab = {comp: m for m, comp in enumerate(set(vocab))} M = len(vocab) A = {} for task, steps in task_step_comps.items(): K = len(steps) a = torch.zeros(M, K) for k, step in enumerate(steps): a[[vocab[comp] for comp in step], k] = 1 a /= a.sum(dim=0) A[task] = a return A, M class CrossTaskVideoProcessor(VideoProcessor): def __call__(self, video_fn): task, vid, steps, n_steps = video_fn video_fn = os.path.join(self.vfeat_dir, vid + ".npy") feat = np.load(video_fn) return feat class CrossTaskTextProcessor(TextProcessor): def __call__(self, text_id): task, vid, steps, n_steps = text_id step_ids = [] for step_str in steps: step_ids.append( self.tokenizer(step_str, add_special_tokens=False)["input_ids"] ) return step_ids class CrossTaskAligner(Aligner): """ TODO: it's not clear yet the formulation of the task; finish this later. """ def __init__(self, config): super().__init__(config) self.annotation_path = config.annotation_path self.sliding_window = config.sliding_window self.sliding_window_size = config.sliding_window_size def __call__(self, video_id, video_feature, text_feature): task, vid, steps, n_steps = video_id annot_path = os.path.join( self.annotation_path, task + '_' + vid + '.csv') video_len = len(video_feature) labels = torch.from_numpy(self._read_assignment( video_len, n_steps, annot_path)).float() vfeats, vmasks, targets = [], [], [] # sliding window on video features and targets. for window_start in range(0, video_len, self.sliding_window): video_start = 0 video_end = min(video_len - window_start, self.sliding_window_size) video_clip = {"start": [video_start], "end": [video_end]} vfeat, vmask = self._build_video_seq( video_feature[window_start: window_start + video_end], video_clip ) target = labels[window_start: window_start + video_end] assert len(vfeat) >= len(target), "{},{}".format(len(vfeat), len(target)) # TODO: randomly drop all zero targets for training ? # if self.split == "train" and target.sum() == 0: # continue vfeats.append(vfeat) vmasks.append(vmask) targets.append(target) if (video_len - window_start) <= self.sliding_window_size: break vfeats = torch.stack(vfeats) vmasks = torch.stack(vmasks) targets = torch.cat(targets, dim=0) caps, cmasks = [], [] for step in text_feature: step_text_feature = {"start": [0], "end": [1], "cap": [step]} step_text_clip_index = [0] cap, cmask = self._build_text_seq( step_text_feature, step_text_clip_index ) caps.append(cap) cmasks.append(cmask) caps = torch.stack(caps) cmasks = torch.stack(cmasks) return { "caps": caps, "cmasks": cmasks, "vfeats": vfeats, # X for original code. "vmasks": vmasks, "targets": targets, "video_id": vid, "task": task, "video_len": video_len # for later checking. } def _read_assignment(self, T, K, path): """ refactored from https://github.com/DmZhukov/CrossTask/blob/master/data.py Howto interpret contraints on loss that is going to be minimized: lambd is a big number; self.lambd * C is a big number for all valid position (csv stores invalids) def forward(self, O, Y, C): return (Y*(self.lambd * C - self.lsm(O))).mean(dim=0).sum() This will load the csv file and fill-in the step col from start to end rows. """ Y = np.zeros([T, K], dtype=np.uint8) with open(path, 'r') as f: for line in f: step, start, end = line.strip().split(',') start = int(math.floor(float(start))) end = int(math.ceil(float(end))) step = int(step) - 1 Y[start:end, step] = 1 return Y # --------------------- COIN ------------------------- class MetaTextBinarizer(Aligner): def __call__(self, text_feature): text_feature = { "cap": [text_feature], "start": [0.], "end": [100.], } text_clip_indexs = [0] caps, cmasks = self._build_text_seq( text_feature, text_clip_indexs ) return {"caps": caps, "cmasks": cmasks} class COINActionSegmentationMetaProcessor(MetaProcessor): split_map = { "train": "training", "valid": "testing", "test": "testing", } def __init__(self, config): super().__init__(config) with open(self._get_split_path(config)) as fr: database = json.load(fr)["database"] id2label = {} data = [] # filter the data by split. for video_id, rec in database.items(): # always use testing to determine label_set if rec["subset"] == "testing": for segment in rec["annotation"]: id2label[int(segment["id"])] = segment["label"] # text_labels is used for ZS setting self.text_labels = ["none"] * len(id2label) for label_id in id2label: self.text_labels[label_id-1] = id2label[label_id] id2label[0] = "O" print("num of labels", len(id2label)) for video_id, rec in database.items(): if not os.path.isfile(os.path.join(config.vfeat_dir, video_id + ".npy")): continue if rec["subset"] == COINActionSegmentationMetaProcessor.split_map[self.split]: starts, ends, labels = [], [], [] for segment in rec["annotation"]: start, end = segment["segment"] label = int(segment["id"]) starts.append(start) ends.append(end) labels.append(label) data.append( (video_id, {"start": starts, "end": ends, "label": labels})) self.data = data def meta_text_labels(self, config): from transformers import default_data_collator from ..utils import get_local_rank text_processor = TextProcessor(config) binarizer = MetaTextBinarizer(config) # TODO: add prompts to .yaml. text_labels = [label for label in self.text_labels] if get_local_rank() == 0: print(text_labels) outputs = [] for text_label in text_labels: text_feature = text_processor(text_label) outputs.append(binarizer(text_feature)) return default_data_collator(outputs) def __getitem__(self, idx): return self.data[idx] class COINActionSegmentationTextProcessor(TextProcessor): def __call__(self, text_label): return text_label class COINActionSegmentationAligner(Aligner): def __init__(self, config): super().__init__(config) self.sliding_window = config.sliding_window self.sliding_window_size = config.sliding_window_size def __call__(self, video_id, video_feature, text_feature): starts, ends, label_ids = text_feature["start"], text_feature["end"], text_feature["label"] # sliding window. video_len = len(video_feature) vfeats, vmasks, targets = [], [], [] # sliding window on video features and targets. for window_start in range(0, video_len, self.sliding_window): video_start = 0 video_end = min(video_len - window_start, self.sliding_window_size) video_clip = {"start": [video_start], "end": [video_end]} vfeat, vmask = self._build_video_seq( video_feature[window_start: window_start + video_end], video_clip ) # covers video length only. target = torch.full_like(vmask, -100, dtype=torch.long) target[vmask] = 0 for start, end, label_id in zip(starts, ends, label_ids): if (window_start < end) and (start < (window_start + video_end)): start_offset = max(0, math.floor(start) - window_start) end_offset = min(video_end, math.ceil(end) - window_start) target[start_offset:end_offset] = label_id vfeats.append(vfeat) vmasks.append(vmask) targets.append(target) if (video_len - window_start) <= self.sliding_window_size: break vfeats = torch.stack(vfeats) vmasks = torch.stack(vmasks) targets = torch.stack(targets) video_targets = torch.full((video_len,), 0) for start, end, label_id in zip(starts, ends, label_ids): start_offset = max(0, math.floor(start)) end_offset = min(video_len, math.ceil(end)) video_targets[start_offset:end_offset] = label_id caps = torch.LongTensor( [[self.cls_token_id, self.sep_token_id, self.pad_token_id, self.sep_token_id]], ).repeat(vfeats.size(0), 1) cmasks = torch.BoolTensor( [[0, 1, 0, 1]] # pad are valid for attention. ).repeat(vfeats.size(0), 1) return { "caps": caps, "cmasks": cmasks, "vfeats": vfeats, # X for original code. "vmasks": vmasks, "targets": targets, "video_id": video_id, "video_len": video_len, # for later checking. "video_targets": video_targets } class DiDeMoMetaProcessor(MetaProcessor): """reference: https://github.com/LisaAnne/LocalizingMoments/blob/master/utils/eval.py https://github.com/LisaAnne/LocalizingMoments/blob/master/utils/data_processing.py """ def __init__(self, config): super().__init__(config) assert "test" in self._get_split_path(config), "DiDeMo only supports zero-shot testing for now." with open(self._get_split_path(config)) as data_file: json_data = json.load(data_file) data = [] for record in json_data: data.append((record["video"], record["description"])) self.data = data def __len__(self): return len(self.data) def __getitem__(self, idx): return self.data[idx] class DiDeMoTextProcessor(TextProcessor): """reference: https://github.com/LisaAnne/LocalizingMoments/blob/master/utils/eval.py https://github.com/LisaAnne/LocalizingMoments/blob/master/utils/data_processing.py """ def __call__(self, text): return self.tokenizer(text, add_special_tokens=False)["input_ids"] class DiDeMoAligner(DSAligner): """ check video length. """ def __call__(self, video_id, video_feature, text_feature): # print(video_feature.shape[0]) return super().__call__(video_id, video_feature, text_feature)
29,891
34.208481
104
py
sign-topic
sign-topic-main/examples/MMPT/mmpt/processors/models/s3dg.py
# This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """Contains a PyTorch definition for Gated Separable 3D network (S3D-G) with a text module for computing joint text-video embedding from raw text and video input. The following code will enable you to load the HowTo100M pretrained S3D Text-Video model from: A. Miech, J.-B. Alayrac, L. Smaira, I. Laptev, J. Sivic and A. Zisserman, End-to-End Learning of Visual Representations from Uncurated Instructional Videos. https://arxiv.org/abs/1912.06430. S3D-G was proposed by: S. Xie, C. Sun, J. Huang, Z. Tu and K. Murphy, Rethinking Spatiotemporal Feature Learning For Video Understanding. https://arxiv.org/abs/1712.04851. Tensorflow code: https://github.com/tensorflow/models/blob/master/research/slim/nets/s3dg.py The S3D architecture was slightly modified with a space to depth trick for TPU optimization. """ import torch as th import torch.nn.functional as F import torch.nn as nn import os import numpy as np import re class InceptionBlock(nn.Module): def __init__( self, input_dim, num_outputs_0_0a, num_outputs_1_0a, num_outputs_1_0b, num_outputs_2_0a, num_outputs_2_0b, num_outputs_3_0b, gating=True, ): super(InceptionBlock, self).__init__() self.conv_b0 = STConv3D(input_dim, num_outputs_0_0a, [1, 1, 1]) self.conv_b1_a = STConv3D(input_dim, num_outputs_1_0a, [1, 1, 1]) self.conv_b1_b = STConv3D( num_outputs_1_0a, num_outputs_1_0b, [3, 3, 3], padding=1, separable=True ) self.conv_b2_a = STConv3D(input_dim, num_outputs_2_0a, [1, 1, 1]) self.conv_b2_b = STConv3D( num_outputs_2_0a, num_outputs_2_0b, [3, 3, 3], padding=1, separable=True ) self.maxpool_b3 = th.nn.MaxPool3d((3, 3, 3), stride=1, padding=1) self.conv_b3_b = STConv3D(input_dim, num_outputs_3_0b, [1, 1, 1]) self.gating = gating self.output_dim = ( num_outputs_0_0a + num_outputs_1_0b + num_outputs_2_0b + num_outputs_3_0b ) if gating: self.gating_b0 = SelfGating(num_outputs_0_0a) self.gating_b1 = SelfGating(num_outputs_1_0b) self.gating_b2 = SelfGating(num_outputs_2_0b) self.gating_b3 = SelfGating(num_outputs_3_0b) def forward(self, input): """Inception block """ b0 = self.conv_b0(input) b1 = self.conv_b1_a(input) b1 = self.conv_b1_b(b1) b2 = self.conv_b2_a(input) b2 = self.conv_b2_b(b2) b3 = self.maxpool_b3(input) b3 = self.conv_b3_b(b3) if self.gating: b0 = self.gating_b0(b0) b1 = self.gating_b1(b1) b2 = self.gating_b2(b2) b3 = self.gating_b3(b3) return th.cat((b0, b1, b2, b3), dim=1) class SelfGating(nn.Module): def __init__(self, input_dim): super(SelfGating, self).__init__() self.fc = nn.Linear(input_dim, input_dim) def forward(self, input_tensor): """Feature gating as used in S3D-G. """ spatiotemporal_average = th.mean(input_tensor, dim=[2, 3, 4]) weights = self.fc(spatiotemporal_average) weights = th.sigmoid(weights) return weights[:, :, None, None, None] * input_tensor class STConv3D(nn.Module): def __init__( self, input_dim, output_dim, kernel_size, stride=1, padding=0, separable=False ): super(STConv3D, self).__init__() self.separable = separable self.relu = nn.ReLU(inplace=True) assert len(kernel_size) == 3 if separable and kernel_size[0] != 1: spatial_kernel_size = [1, kernel_size[1], kernel_size[2]] temporal_kernel_size = [kernel_size[0], 1, 1] if isinstance(stride, list) and len(stride) == 3: spatial_stride = [1, stride[1], stride[2]] temporal_stride = [stride[0], 1, 1] else: spatial_stride = [1, stride, stride] temporal_stride = [stride, 1, 1] if isinstance(padding, list) and len(padding) == 3: spatial_padding = [0, padding[1], padding[2]] temporal_padding = [padding[0], 0, 0] else: spatial_padding = [0, padding, padding] temporal_padding = [padding, 0, 0] if separable: self.conv1 = nn.Conv3d( input_dim, output_dim, kernel_size=spatial_kernel_size, stride=spatial_stride, padding=spatial_padding, bias=False, ) self.bn1 = nn.BatchNorm3d(output_dim) self.conv2 = nn.Conv3d( output_dim, output_dim, kernel_size=temporal_kernel_size, stride=temporal_stride, padding=temporal_padding, bias=False, ) self.bn2 = nn.BatchNorm3d(output_dim) else: self.conv1 = nn.Conv3d( input_dim, output_dim, kernel_size=kernel_size, stride=stride, padding=padding, bias=False, ) self.bn1 = nn.BatchNorm3d(output_dim) def forward(self, input): out = self.relu(self.bn1(self.conv1(input))) if self.separable: out = self.relu(self.bn2(self.conv2(out))) return out class MaxPool3dTFPadding(th.nn.Module): def __init__(self, kernel_size, stride=None, padding="SAME"): super(MaxPool3dTFPadding, self).__init__() if padding == "SAME": padding_shape = self._get_padding_shape(kernel_size, stride) self.padding_shape = padding_shape self.pad = th.nn.ConstantPad3d(padding_shape, 0) self.pool = th.nn.MaxPool3d(kernel_size, stride, ceil_mode=True) def _get_padding_shape(self, filter_shape, stride): def _pad_top_bottom(filter_dim, stride_val): pad_along = max(filter_dim - stride_val, 0) pad_top = pad_along // 2 pad_bottom = pad_along - pad_top return pad_top, pad_bottom padding_shape = [] for filter_dim, stride_val in zip(filter_shape, stride): pad_top, pad_bottom = _pad_top_bottom(filter_dim, stride_val) padding_shape.append(pad_top) padding_shape.append(pad_bottom) depth_top = padding_shape.pop(0) depth_bottom = padding_shape.pop(0) padding_shape.append(depth_top) padding_shape.append(depth_bottom) return tuple(padding_shape) def forward(self, inp): inp = self.pad(inp) out = self.pool(inp) return out class Sentence_Embedding(nn.Module): def __init__( self, embd_dim, num_embeddings=66250, word_embedding_dim=300, token_to_word_path="dict.npy", max_words=16, output_dim=2048, ): super(Sentence_Embedding, self).__init__() self.word_embd = nn.Embedding(num_embeddings, word_embedding_dim) self.fc1 = nn.Linear(word_embedding_dim, output_dim) self.fc2 = nn.Linear(output_dim, embd_dim) self.word_to_token = {} self.max_words = max_words token_to_word = np.load(token_to_word_path) for i, t in enumerate(token_to_word): self.word_to_token[t] = i + 1 def _zero_pad_tensor_token(self, tensor, size): if len(tensor) >= size: return tensor[:size] else: zero = th.zeros(size - len(tensor)).long() return th.cat((tensor, zero), dim=0) def _split_text(self, sentence): w = re.findall(r"[\w']+", str(sentence)) return w def _words_to_token(self, words): words = [ self.word_to_token[word] for word in words if word in self.word_to_token ] if words: we = self._zero_pad_tensor_token(th.LongTensor(words), self.max_words) return we else: return th.zeros(self.max_words).long() def _words_to_ids(self, x): split_x = [self._words_to_token(self._split_text(sent.lower())) for sent in x] return th.stack(split_x, dim=0) def forward(self, x): x = self._words_to_ids(x) x = self.word_embd(x) x = F.relu(self.fc1(x)) x = th.max(x, dim=1)[0] x = self.fc2(x) return {'text_embedding': x} class S3D(nn.Module): def __init__(self, dict_path, num_classes=512, gating=True, space_to_depth=True): super(S3D, self).__init__() self.num_classes = num_classes self.gating = gating self.space_to_depth = space_to_depth if space_to_depth: self.conv1 = STConv3D( 24, 64, [2, 4, 4], stride=1, padding=(1, 2, 2), separable=False ) else: self.conv1 = STConv3D( 3, 64, [3, 7, 7], stride=2, padding=(1, 3, 3), separable=False ) self.conv_2b = STConv3D(64, 64, [1, 1, 1], separable=False) self.conv_2c = STConv3D(64, 192, [3, 3, 3], padding=1, separable=True) self.gating = SelfGating(192) self.maxpool_2a = MaxPool3dTFPadding( kernel_size=(1, 3, 3), stride=(1, 2, 2), padding="SAME" ) self.maxpool_3a = MaxPool3dTFPadding( kernel_size=(1, 3, 3), stride=(1, 2, 2), padding="SAME" ) self.mixed_3b = InceptionBlock(192, 64, 96, 128, 16, 32, 32) self.mixed_3c = InceptionBlock( self.mixed_3b.output_dim, 128, 128, 192, 32, 96, 64 ) self.maxpool_4a = MaxPool3dTFPadding( kernel_size=(3, 3, 3), stride=(2, 2, 2), padding="SAME" ) self.mixed_4b = InceptionBlock( self.mixed_3c.output_dim, 192, 96, 208, 16, 48, 64 ) self.mixed_4c = InceptionBlock( self.mixed_4b.output_dim, 160, 112, 224, 24, 64, 64 ) self.mixed_4d = InceptionBlock( self.mixed_4c.output_dim, 128, 128, 256, 24, 64, 64 ) self.mixed_4e = InceptionBlock( self.mixed_4d.output_dim, 112, 144, 288, 32, 64, 64 ) self.mixed_4f = InceptionBlock( self.mixed_4e.output_dim, 256, 160, 320, 32, 128, 128 ) self.maxpool_5a = self.maxPool3d_5a_2x2 = MaxPool3dTFPadding( kernel_size=(2, 2, 2), stride=(2, 2, 2), padding="SAME" ) self.mixed_5b = InceptionBlock( self.mixed_4f.output_dim, 256, 160, 320, 32, 128, 128 ) self.mixed_5c = InceptionBlock( self.mixed_5b.output_dim, 384, 192, 384, 48, 128, 128 ) self.fc = nn.Linear(self.mixed_5c.output_dim, num_classes) self.text_module = Sentence_Embedding(num_classes, token_to_word_path=dict_path) def _space_to_depth(self, input): """3D space to depth trick for TPU optimization. """ B, C, T, H, W = input.shape input = input.view(B, C, T // 2, 2, H // 2, 2, W // 2, 2) input = input.permute(0, 3, 5, 7, 1, 2, 4, 6) input = input.contiguous().view(B, 8 * C, T // 2, H // 2, W // 2) return input def forward(self, inputs): """Defines the S3DG base architecture.""" if self.space_to_depth: inputs = self._space_to_depth(inputs) net = self.conv1(inputs) if self.space_to_depth: # we need to replicate 'SAME' tensorflow padding net = net[:, :, 1:, 1:, 1:] net = self.maxpool_2a(net) net = self.conv_2b(net) net = self.conv_2c(net) if self.gating: net = self.gating(net) net = self.maxpool_3a(net) net = self.mixed_3b(net) net = self.mixed_3c(net) net = self.maxpool_4a(net) net = self.mixed_4b(net) net = self.mixed_4c(net) net = self.mixed_4d(net) net = self.mixed_4e(net) net = self.mixed_4f(net) net = self.maxpool_5a(net) net = self.mixed_5b(net) net = self.mixed_5c(net) net = th.mean(net, dim=[2, 3, 4]) return {'video_embedding': self.fc(net), 'mixed_5c': net}
12,416
35.845697
94
py
sign-topic
sign-topic-main/examples/MMPT/mmpt/utils/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import random import numpy as np import torch from .shardedtensor import * from .load_config import * def set_seed(seed=43211): random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) if torch.backends.cudnn.enabled: torch.backends.cudnn.benchmark = False torch.backends.cudnn.deterministic = True def get_world_size(): if torch.distributed.is_initialized(): world_size = torch.distributed.get_world_size() else: world_size = 1 return world_size def get_local_rank(): return torch.distributed.get_rank() \ if torch.distributed.is_initialized() else 0 def print_on_rank0(func): local_rank = get_local_rank() if local_rank == 0: print("[INFO]", func) class RetriMeter(object): """ Statistics on whether retrieval yields a better pair. """ def __init__(self, freq=1024): self.freq = freq self.total = 0 self.replace = 0 self.updates = 0 def __call__(self, data): if isinstance(data, np.ndarray): self.replace += data.shape[0] - int((data[:, 0] == -1).sum()) self.total += data.shape[0] elif torch.is_tensor(data): self.replace += int(data.sum()) self.total += data.size(0) else: raise ValueError("unsupported RetriMeter data type.", type(data)) self.updates += 1 if get_local_rank() == 0 and self.updates % self.freq == 0: print("[INFO]", self) def __repr__(self): return "RetriMeter (" + str(self.replace / self.total) \ + "/" + str(self.replace) + "/" + str(self.total) + ")"
1,886
26.347826
77
py
sign-topic
sign-topic-main/examples/MMPT/mmpt/tasks/retritask.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import os import torch import pickle import random from tqdm import tqdm from torch.utils.data import DataLoader from torch.utils.data.distributed import DistributedSampler from ..processors import ( ShardedHow2MetaProcessor, ShardedVideoProcessor, ShardedTextProcessor, VariedLenAligner, ) from ..datasets import MMDataset from .task import Task from ..modules import vectorpool from ..evaluators.predictor import Predictor from ..utils import set_seed, get_local_rank, get_world_size class RetriTask(Task): """abstract class for task with retrival.""" def reshape_subsample(self, sample): for key in sample: if torch.is_tensor(sample[key]): sample[key] = self.flat_subsample(sample[key]) return sample def flat_subsample(self, tensor): if tensor.size(0) == 1: tensor = tensor.squeeze(0) return tensor def build_dataloader(self): """called by `get_batch_iterator` in fairseqmmtask. """ # TODO: hard-code dataloader for retri for now and configurable in .yaml. # reuse the `train.lst`. self.config.dataset.split = "train" meta_processor = ShardedHow2MetaProcessor(self.config.dataset) video_processor = ShardedVideoProcessor(self.config.dataset) text_processor = ShardedTextProcessor(self.config.dataset) aligner = VariedLenAligner(self.config.dataset) aligner.subsampling = self.config.dataset.clip_per_video self.retri_data = MMDataset( meta_processor, video_processor, text_processor, aligner ) retri_sampler = DistributedSampler(self.retri_data) infer_scale = 16 batch_size = self.config.dataset.num_video_per_batch \ * infer_scale self.retri_dataloader = DataLoader( self.retri_data, collate_fn=self.retri_data.collater, batch_size=batch_size, shuffle=False, sampler=retri_sampler, num_workers=self.config.fairseq.dataset.num_workers ) return self.retri_dataloader def retrive_candidates(self, epoch, dataloader=None): if get_local_rank() == 0: print("running retrieval model.") out_dir = os.path.join( self.config.fairseq.checkpoint.save_dir, "retri") os.makedirs(out_dir, exist_ok=True) if not os.path.isfile( os.path.join( out_dir, "batched_e" + str(epoch) + "_videos0.pkl") ): if dataloader is None: dataloader = self.retri_dataloader self.model.eval() self.model.is_train = False assert self.retri_data.meta_processor.data == \ self.train_data.meta_processor.data # video_ids not mutated. self._retri_predict(epoch, dataloader) self.model.train() self.model.is_train = True torch.distributed.barrier() output = self._retri_sync(epoch, out_dir) torch.distributed.barrier() self.train_data.meta_processor.set_candidates(output) return output class VideoRetriTask(RetriTask): """RetriTask on video level.""" def reshape_subsample(self, sample): if ( hasattr(self.config.dataset, "clip_per_video") and self.config.dataset.clip_per_video is not None and self.config.dataset.clip_per_video > 1 ): for key in sample: if torch.is_tensor(sample[key]): sample[key] = self.flat_subsample(sample[key]) return sample def flat_subsample(self, tensor): if tensor.size(0) == 1: tensor = tensor.squeeze(0) return Task.flat_subsample(self, tensor) def _retri_predict(self, epoch, dataloader): set_seed(epoch) # save for retrival. predictor = VideoPredictor(self.config) predictor.predict_loop( self.model, dataloader) set_seed(epoch) # get the same text clips. # retrival. retri_predictor = VideoRetriPredictor( self.config) retri_predictor.predict_loop( self.model, predictor.vecpool.retriver, epoch) del predictor del retri_predictor def _retri_sync(self, epoch, out_dir): # gpu do the same merge. batched_videos = [] for local_rank in range(get_world_size()): fn = os.path.join( out_dir, "batched_e" + str(epoch) + "_videos" + str(local_rank) + ".pkl") with open(fn, "rb") as fr: batched_videos.extend(pickle.load(fr)) print( "[INFO] batched_videos", len(batched_videos), len(batched_videos[0])) return batched_videos class VideoPredictor(Predictor): def __init__(self, config): vectorpool_cls = getattr(vectorpool, config.vectorpool_cls) self.vecpool = vectorpool_cls(config) def predict_loop( self, model, dataloader, early_stop=-1, ): with torch.no_grad(): if get_local_rank() == 0: dataloader = tqdm(dataloader) for batch_idx, batch in enumerate(dataloader): if batch_idx == early_stop: break self(batch, model) return self.finalize() def __call__(self, sample, model, **kwargs): param = next(model.parameters()) dtype = param.dtype device = param.device subsample = sample["vfeats"].size(1) sample = self.to_ctx(sample, device, dtype) for key in sample: if torch.is_tensor(sample[key]): size = sample[key].size() if len(size) >= 2: batch_size = size[0] * size[1] expanded_size = ( (batch_size,) + size[2:] if len(size) > 2 else (batch_size,) ) sample[key] = sample[key].view(expanded_size) outputs = model(**sample) sample.update(outputs) self.vecpool(sample, subsample) def finalize(self): print("[INFO]", self.vecpool) if not self.vecpool.retriver.db.is_trained: self.vecpool.retriver.finalize_training() return self.vecpool.retriver class VideoRetriPredictor(Predictor): """ Online Retrieval Predictor for Clips (used by RetriTask). TODO: merge this with VisPredictor? """ def __init__(self, config): self.pred_dir = os.path.join( config.fairseq.checkpoint.save_dir, "retri") self.num_cands = config.num_cands self.num_video_per_batch = config.dataset.num_video_per_batch def predict_loop( self, model, retriver, epoch, early_stop=-1 ): # a fake loop that only try to recover video vector # from video_id. batched_videos = [] # obtain available video_ids. video_ids = list(retriver.videoid_to_vectoridx.keys()) dataloader = random.sample( video_ids, len(video_ids) // self.num_video_per_batch ) if get_local_rank() == 0: dataloader = tqdm(dataloader) for batch_idx, batch in enumerate(dataloader): # batch is one video id. if batch_idx == early_stop: break video_ids = retriver.search_by_video_ids( [batch], self.num_cands)[0] if len(video_ids) > self.num_video_per_batch: # we moved the center to make cluster robust. video_ids = random.sample(video_ids, self.num_video_per_batch) batched_videos.append(video_ids) return self.finalize(batched_videos, epoch) def finalize(self, batched_videos, epoch): fn = os.path.join( self.pred_dir, "batched_e" + str(epoch) + "_videos" + str(get_local_rank()) + ".pkl") with open(fn, "wb") as fw: pickle.dump(batched_videos, fw, pickle.HIGHEST_PROTOCOL) return batched_videos
8,413
32.125984
82
py
sign-topic
sign-topic-main/examples/MMPT/mmpt/tasks/milncetask.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch from .task import Task class MILNCETask(Task): def reshape_subsample(self, sample): if ( hasattr(self.config.dataset, "subsampling") and self.config.dataset.subsampling is not None and self.config.dataset.subsampling > 1 ): for key in sample: if torch.is_tensor(sample[key]): tensor = self.flat_subsample(sample[key]) if key in ["caps", "cmasks"]: size = tensor.size() batch_size = size[0] * size[1] expanded_size = (batch_size,) + size[2:] tensor = tensor.view(expanded_size) sample[key] = tensor return sample
954
33.107143
65
py
sign-topic
sign-topic-main/examples/MMPT/mmpt/tasks/task.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch from .. import tasks from .. import models from .. import losses from ..datasets import MMDataset from .. import processors class Task(object): """ A task refers to one generic training task (e.g., training one model). """ @classmethod def config_task(cls, config): """ determine whether to load a hard-coded task or config from a generic one. via if a task string is available in config. """ if config.task is not None: # TODO (huxu): expand the search scope. task_cls = getattr(tasks, config.task) return task_cls(config) else: return Task(config) def __init__(self, config): self.config = config self.train_data = None self.val_data = None self.test_data = None self.model = None self.loss_fn = None self.eval_fn = None def build_dataset(self): """TODO (huxu): move processor breakdown to MMDataset.""" """fill-in `self.train_data`, `self.val_data` and `self.test_data`.""" meta_processor_cls = getattr( processors, self.config.dataset.meta_processor) video_processor_cls = getattr( processors, self.config.dataset.video_processor) text_processor_cls = getattr( processors, self.config.dataset.text_processor) aligner_cls = getattr( processors, self.config.dataset.aligner) if self.config.dataset.train_path is not None: self.config.dataset.split = "train" # may be used by meta processor. # meta_processor controls different dataset. meta_processor = meta_processor_cls(self.config.dataset) video_processor = video_processor_cls(self.config.dataset) text_processor = text_processor_cls(self.config.dataset) aligner = aligner_cls(self.config.dataset) self.train_data = MMDataset( meta_processor, video_processor, text_processor, aligner ) print("train_len", len(self.train_data)) output = self.train_data[0] self.train_data.print_example(output) if self.config.dataset.val_path is not None: self.config.dataset.split = "valid" # may be used by meta processor. meta_processor = meta_processor_cls(self.config.dataset) video_processor = video_processor_cls(self.config.dataset) text_processor = text_processor_cls(self.config.dataset) aligner = aligner_cls(self.config.dataset) self.val_data = MMDataset( meta_processor, video_processor, text_processor, aligner ) print("val_len", len(self.val_data)) output = self.val_data[0] self.val_data.print_example(output) if self.config.dataset.split == "test": # the following is run via lauching fairseq-validate. meta_processor = meta_processor_cls(self.config.dataset) video_processor = video_processor_cls(self.config.dataset) text_processor = text_processor_cls(self.config.dataset) self.test_data = MMDataset( meta_processor, video_processor, text_processor, aligner ) print("test_len", len(self.test_data)) output = self.test_data[0] self.test_data.print_example(output) def build_model(self, checkpoint=None): if self.model is None: model_cls = getattr(models, self.config.model.model_cls) self.model = model_cls(self.config) if checkpoint is not None: self.load_checkpoint(checkpoint) return self.model def load_checkpoint(self, checkpoint): if self.model is None: raise ValueError("model is not initialized.") state_dict = torch.load(checkpoint) state_dict = self._trim_state_dict(state_dict) self.model.load_state_dict(state_dict, strict=False) # if it's a fp16 model, turn it back. if next(self.model.parameters()).dtype == torch.float16: self.model = self.model.float() return self.model def _trim_state_dict(self, state_dict): from collections import OrderedDict if "state_dict" in state_dict: state_dict = state_dict["state_dict"] if "model" in state_dict: # fairseq checkpoint format. state_dict = state_dict["model"] ret_state_dict = OrderedDict() for ( key, value, ) in state_dict.items(): # remove fairseq wrapper since this is a task. if key.startswith("mmmodel"): key = key[len("mmmodel."):] ret_state_dict[key] = value return ret_state_dict def build_loss(self): if self.loss_fn is None and self.config.loss is not None: loss_cls = getattr(losses, self.config.loss.loss_cls) self.loss_fn = loss_cls() return self.loss_fn def flat_subsample(self, tensor): size = tensor.size() if len(size) >= 2: batch_size = size[0] * size[1] expanded_size = ( (batch_size,) + size[2:] if len(size) > 2 else (batch_size,) ) tensor = tensor.view(expanded_size) return tensor def reshape_subsample(self, sample): if ( hasattr(self.config.dataset, "subsampling") and self.config.dataset.subsampling is not None and self.config.dataset.subsampling > 1 ): for key in sample: if torch.is_tensor(sample[key]): sample[key] = self.flat_subsample(sample[key]) return sample def __call__(self, model, sample): loss = None loss_scalar = float("inf") sample = self.reshape_subsample(sample) outputs = self.model(**sample) sample.update(outputs) if self.loss_fn is not None: loss = self.loss_fn(**sample) loss_scalar = loss.item() batch_size = sample["caps"].size(0) sample_size = 1 return { "loss": loss, "loss_scalar": loss_scalar, "max_len": self.config.dataset.max_len, "batch_size": batch_size, "sample_size": sample_size, } def build_dataloader(self): """only used for trainer that lacks building loaders.""" raise NotImplementedError
6,780
35.654054
81
py
sign-topic
sign-topic-main/examples/MMPT/mmpt/tasks/vlmtask.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch from .task import Task class VLMTask(Task): """A VLM task for reproducibility. the collator split subsamples into two sub-batches. This has should have no logic changes. but changed the randomness in frame masking. """ def flat_subsample(self, tensor): size = tensor.size() if len(size) >= 2: batch_size = size[0] * (size[1] // 2) expanded_size = ( (batch_size, 2) + size[2:] if len(size) > 2 else (batch_size, 2) ) tensor = tensor.view(expanded_size) tensor = torch.cat([tensor[:, 0], tensor[:, 1]], dim=0) return tensor
856
29.607143
67
py
sign-topic
sign-topic-main/examples/MMPT/mmpt/losses/loss.py
# Copyright (c) Facebook, Inc. All Rights Reserved import torch from torch import nn class Loss(object): def __call__(self, *args, **kwargs): raise NotImplementedError # Dummy Loss for testing. class DummyLoss(Loss): def __init__(self): self.loss = nn.CrossEntropyLoss() def __call__(self, logits, targets, **kwargs): return self.loss(logits, targets) class DummyK400Loss(Loss): """dummy k400 loss for MViT.""" def __init__(self): self.loss = nn.CrossEntropyLoss() def __call__(self, logits, targets, **kwargs): return self.loss( logits, torch.randint(0, 400, (logits.size(0),), device=logits.device)) class CrossEntropy(Loss): def __init__(self): self.loss = nn.CrossEntropyLoss() def __call__(self, logits, targets, **kwargs): return self.loss(logits.reshape(-1, logits.size(-1)), targets.reshape(-1)) class ArgmaxCrossEntropy(Loss): def __init__(self): self.loss = nn.CrossEntropyLoss() def __call__(self, logits, targets, **kwargs): return self.loss(logits, targets.argmax(dim=1)) class BCE(Loss): def __init__(self): self.loss = nn.BCEWithLogitsLoss() def __call__(self, logits, targets, **kwargs): targets = targets.squeeze(0) return self.loss(logits, targets) class NLGLoss(Loss): def __init__(self): self.loss = nn.CrossEntropyLoss() def __call__(self, logits, text_label, **kwargs): targets = text_label[text_label != -100] return self.loss(logits, targets) class MSE(Loss): def __init__(self): self.loss = nn.MSELoss() def __call__(self, logits, targets, **kwargs): return self.loss(logits, targets) class L1(Loss): def __init__(self): self.loss = nn.L1Loss() def __call__(self, logits, targets, **kwargs): return self.loss(logits, targets) class SmoothL1(Loss): def __init__(self): self.loss = nn.SmoothL1Loss() def __call__(self, logits, targets, **kwargs): return self.loss(logits, targets)
2,095
22.818182
83
py
sign-topic
sign-topic-main/examples/MMPT/mmpt/losses/nce.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ softmax-based NCE loss, used by this project. """ import torch from torch import nn from .loss import Loss class NCE(Loss): def __init__(self): # TODO (huxu): define temperature. self.loss = nn.CrossEntropyLoss() def __call__(self, align_scores, **kargs): # note: we reuse the same shape as cls head in BERT (batch_size, 2) # but NCE only needs one logits. # (so we drop all weights in the second neg logits.) align_scores = align_scores[:, :1] # duplicate negative examples batch_size = align_scores.size(0) // 2 pos_scores = align_scores[:batch_size] neg_scores = align_scores[batch_size:].view(1, batch_size).repeat( batch_size, 1) scores = torch.cat([pos_scores, neg_scores], dim=1) return self.loss( scores, torch.zeros( (batch_size,), dtype=torch.long, device=align_scores.device), ) class T2VContraLoss(Loss): """NCE for MM joint space, on softmax text2video matrix. """ def __init__(self): # TODO (huxu): define temperature. self.loss = nn.CrossEntropyLoss() def __call__(self, pooled_video, pooled_text, **kargs): batch_size = pooled_video.size(0) logits = torch.mm(pooled_text, pooled_video.transpose(1, 0)) targets = torch.arange( batch_size, dtype=torch.long, device=pooled_video.device) return self.loss(logits, targets) class V2TContraLoss(Loss): """NCE for MM joint space, with softmax on video2text matrix.""" def __init__(self): # TODO (huxu): define temperature. self.loss = nn.CrossEntropyLoss() def __call__(self, pooled_video, pooled_text, **kargs): batch_size = pooled_video.size(0) logits = torch.mm(pooled_video, pooled_text.transpose(1, 0)) targets = torch.arange( batch_size, dtype=torch.long, device=pooled_video.device) return self.loss(logits, targets) class MMContraLoss(Loss): def __init__(self): self.loss = nn.CrossEntropyLoss() def __call__(self, pooled_video, pooled_text, **kwargs): logits_per_video = pooled_video @ pooled_text.t() logits_per_text = pooled_text @ pooled_video.t() targets = torch.arange( pooled_video.size(0), dtype=torch.long, device=pooled_video.device) loss_video = self.loss(logits_per_video, targets) loss_text = self.loss(logits_per_text, targets) return loss_video + loss_text class MTM(Loss): """Combination of MFM and MLM.""" def __init__(self): self.loss = nn.CrossEntropyLoss() def __call__( self, video_logits, text_logits, video_label, text_label, **kwargs ): text_logits = torch.cat([ text_logits, torch.zeros( (text_logits.size(0), 1), device=text_logits.device) ], dim=1) vt_logits = torch.cat([video_logits, text_logits], dim=0) # loss for video. video_label = torch.zeros( (video_logits.size(0),), dtype=torch.long, device=video_logits.device ) # loss for text. text_label = text_label.reshape(-1) labels_mask = text_label != -100 selected_text_label = text_label[labels_mask] vt_label = torch.cat([video_label, selected_text_label], dim=0) return self.loss(vt_logits, vt_label) class MFMMLM(Loss): """Combination of MFM and MLM.""" def __init__(self): self.loss = nn.CrossEntropyLoss() def __call__( self, video_logits, text_logits, video_label, text_label, **kwargs ): # loss for video. video_label = torch.zeros( (video_logits.size(0),), dtype=torch.long, device=video_logits.device ) masked_frame_loss = self.loss(video_logits, video_label) # loss for text. text_label = text_label.reshape(-1) labels_mask = text_label != -100 selected_text_label = text_label[labels_mask] masked_lm_loss = self.loss(text_logits, selected_text_label) return masked_frame_loss + masked_lm_loss
4,586
28.216561
75
py
sign-topic
sign-topic-main/examples/MMPT/scripts/text_token_extractor/pretokenization.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import pickle import os import argparse import numpy as np from torch.utils.data import Dataset, DataLoader from mmpt.processors import PKLJSONStrTextProcessor from mmpt.utils import ShardedTensor, recursive_config class TokenizerDataset(Dataset): def __init__(self, config): self.text_processor = PKLJSONStrTextProcessor(config) self.video_ids = list(self.text_processor.data.keys()) def __getitem__(self, idx): video_id = self.video_ids[idx] return video_id, self.text_processor(video_id) def __len__(self): return len(self.video_ids) def numpify(shard_idx, video_ids, captions, target_dir, split, prefix, max_cap_len=32): startends = [] caps_ids = [] for video_id in video_ids: caption = captions[video_id] startend = [] cap_ids = [] for start, end, cap in zip( caption["start"], caption["end"], caption["cap"]): startend.append(np.array([start, end]).astype("float32")) cap_id = np.full((max_cap_len,), -1, dtype=np.int32) cap = cap[:max_cap_len] cap_id[:len(cap)] = cap cap_ids.append(cap_id) startends.append(np.stack(startend)) caps_ids.append(np.stack(cap_ids)) startends = ShardedTensor.from_list(startends) target_path = os.path.join( target_dir, prefix + split + "_" + str(shard_idx) ) print("save to", target_path) startends.save(target_path + ".startends") caps_ids = ShardedTensor.from_list(caps_ids) caps_ids.save(target_path + ".caps_ids") def sharding(config, out_file): with open(out_file, "rb") as fr: captions = pickle.load(fr) target_dir = config.target_dir prefix = os.path.basename( os.path.splitext(config.caption_pkl_path)[0] ) + "." + config.bert_name + "." for split in ["train", "val"]: target_path = os.path.join(target_dir, split + "_meta") with open(target_path + ".pkl", "rb") as fr: meta = pickle.load(fr) print("load meta", target_path, len(meta)) for shard_id in meta: numpify( shard_id, meta[shard_id], captions, target_dir, split, prefix ) def tokenize(config, out_file): def collator(samples): return samples dataset = TokenizerDataset(config) data = {} for idx, batch in enumerate( DataLoader(dataset, collate_fn=collator, num_workers=16)): for video_id, caption in batch: data[video_id] = caption if idx % 5000 == 0: print(idx) with open(out_file, "wb") as fw: pickle.dump(data, fw, pickle.HIGHEST_PROTOCOL) def main(args): config = recursive_config(args.config).dataset out_file = os.path.splitext(config.caption_pkl_path)[0] \ + "." + config.bert_name + ".pkl" if not os.path.isfile(out_file): tokenize(config, out_file) sharding(config, out_file) if __name__ == "__main__": parser = argparse.ArgumentParser( description="pretokenize (raw_)caption.json into pkl.") parser.add_argument('config', type=str) args = parser.parse_args() main(args)
3,408
30.859813
87
py
sign-topic
sign-topic-main/examples/MMPT/scripts/video_feature_extractor/videoreader.py
# Copyright Howto100M authors. # Copyright (c) Facebook, Inc. All Rights Reserved import torch as th import pandas as pd import os import numpy as np import ffmpeg import random from torch.utils.data import Dataset class VideoLoader(Dataset): """modified from how2's video_feature_extractor.""" def __init__( self, csv=None, video_dict=None, framerate=1, size=112, centercrop=False, hflip=False, **kwargs ): if csv is None and video_dict is None: raise ValueError("csv and video_dict cannot be both None.") if csv is not None: self.csv = pd.read_csv(csv) if video_dict is not None: self.csv = pd.DataFrame.from_dict(video_dict) self.centercrop = centercrop self.size = size self.framerate = framerate self.hflip = hflip def __len__(self): return len(self.csv) def _get_video_dim(self, video_path): probe = ffmpeg.probe(video_path) video_stream = next((stream for stream in probe['streams'] if stream['codec_type'] == 'video'), None) width = int(video_stream['width']) height = int(video_stream['height']) return height, width def _get_video_info(self, video_path): probe = ffmpeg.probe(video_path) video_stream = next((stream for stream in probe['streams'] if stream['codec_type'] == 'video'), None) return video_stream def _get_output_dim(self, h, w): if isinstance(self.size, tuple) and len(self.size) == 2: return self.size elif h >= w: return int(h * self.size / w), self.size else: return self.size, int(w * self.size / h) def __getitem__(self, idx): video_path = self.csv['video_path'].values[idx] output_file = self.csv['feature_path'].values[idx] return self._decode(output_file, video_path) def _decode(self, output_file, video_path): if not(os.path.isfile(output_file)) and os.path.isfile(video_path): try: h, w = self._get_video_dim(video_path) except Exception: print('ffprobe failed at: {}'.format(video_path)) return {'video': th.zeros(1), 'input': video_path, 'output': output_file} try: os.makedirs(os.path.dirname(output_file), exist_ok=True) height, width = self._get_output_dim(h, w) cmd = ( ffmpeg .input(video_path) .filter('fps', fps=self.framerate) .filter('scale', width, height) ) if self.hflip: cmd = cmd.filter('hflip') if self.centercrop: x = int((width - self.size) / 2.0) y = int((height - self.size) / 2.0) cmd = cmd.crop(x, y, self.size, self.size) video = self._run(cmd, output_file) except Exception: video = th.zeros(1) else: video = th.zeros(1) return {'video': video, 'input': video_path, 'output': output_file} def _run(self, cmd, output_file): out, _ = ( cmd.output('pipe:', format='rawvideo', pix_fmt='rgb24') .run(capture_stdout=True, quiet=True) ) if self.centercrop and isinstance(self.size, int): height, width = self.size, self.size video = np.frombuffer(out, np.uint8).reshape([-1, height, width, 3]) video = th.from_numpy(video.astype('float32')) return video.permute(0, 3, 1, 2) class VideoVerifier(VideoLoader): def __getitem__(self, idx): video_path = self.csv['video_path'].values[idx] try: return self._get_video_info(video_path) except Exception: # print('ffprobe failed at: {}'.format(video_path)) return None class VideoCompressor(VideoLoader): def __init__( self, csv=None, video_dict=None, framerate=1, size=112, centercrop=False, hflip=False, crf=32, **kwargs ): super().__init__( csv, video_dict, framerate, size, centercrop, hflip ) self.crf = crf def _run(self, cmd, output_file): out, _ = ( cmd.output(filename=output_file, crf=self.crf) .run(quiet=True) ) video = None return video class VideoDownloader(VideoCompressor): """download""" def __getitem__(self, idx): video_path = self.csv['video_path'].values[idx] output_file = self.csv['feature_path'].values[idx] if not(os.path.isfile(output_file)): os.makedirs(os.path.dirname(output_file), exist_ok=True) cmd = "wget -O" + output_file + " " + video_path # import subprocess # subprocess.check_output( # cmd, # stderr=subprocess.STDOUT, shell=True) os.system(cmd) return {'video': None, 'input': video_path, 'output': output_file} class AvKeyframeVideoCompressor(VideoLoader): """extract keyframes from a video and save it as jpg. TODO: consider to merge with `CodecProcessor`. """ def __init__( self, csv=None, video_dict=None, framerate=1, size=112, centercrop=False, max_num_frames=5, **kwargs ): super().__init__(csv, video_dict, framerate, size, centercrop) self.max_num_frames = max_num_frames def _get_video_dim(self, video_fn): """decord cannot probe the size of a video, we use pyav instead.""" import av with av.open(video_fn) as container: height = container.streams.video[0].codec_context.height width = container.streams.video[0].codec_context.width return height, width def _get_output_dim(self, height, width): """ keep the shorter side be `self.size`, strech the other. """ if height >= width: return int(height * self.size / width), self.size else: return self.size, int(width * self.size / height) def __getitem__(self, idx): import av video_path = self.csv['video_path'].values[idx] output_file = self.csv['feature_path'].values[idx] if not(os.path.isdir(output_file)) and os.path.isfile(video_path): try: h, w = self._get_video_dim(video_path) except Exception: print('probe failed at: {}'.format(video_path)) return {'video': th.zeros(1), 'input': video_path, 'output': output_file} try: height, width = self._get_output_dim(h, w) # new for av. with av.open(video_path) as container: container.streams.video[0].thread_type = "AUTO" container.streams.video[0].codec_context.height = height container.streams.video[0].codec_context.width = width if self.framerate == 0: # keyframe. container.streams.video[0].codec_context.skip_frame = 'NONKEY' frames = [] for frame in container.decode(video=0): frames.append(frame) frames = random.sample(frames, self.max_num_frames) os.makedirs(output_file, exist_ok=True) for frame in frames: frame.to_image().save( os.path.join( output_file, "%04d.jpg" % frame.index)) except Exception: print('extract failed at: {}'.format(video_path)) return {'video': th.zeros(1), 'input': video_path, 'output': output_file} video = th.zeros(1) return {'video': video, 'input': video_path, 'output': output_file}
8,322
33.251029
86
py
sign-topic
sign-topic-main/examples/MMPT/scripts/video_feature_extractor/preprocessing.py
# Copyright Howto100m authors. # Copyright (c) Facebook, Inc. All Rights Reserved import torch as th class Normalize(object): def __init__(self, mean, std): self.mean = th.FloatTensor(mean).view(1, 3, 1, 1) self.std = th.FloatTensor(std).view(1, 3, 1, 1) def __call__(self, tensor): tensor = (tensor - self.mean) / (self.std + 1e-8) return tensor class Preprocessing(object): def __init__(self, type): self.type = type if type == '2d': self.norm = Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) elif type == '3d': self.norm = Normalize(mean=[110.6, 103.2, 96.3], std=[1.0, 1.0, 1.0]) elif type == 'vmz': self.norm = Normalize(mean=[110.201, 100.64, 95.997], std=[58.1489, 56.4701, 55.3324]) def _zero_pad(self, tensor, size): n = size - len(tensor) % size if n == size: return tensor else: z = th.zeros(n, tensor.shape[1], tensor.shape[2], tensor.shape[3]) return th.cat((tensor, z), 0) def __call__(self, tensor): if self.type == '2d': tensor = tensor / 255.0 tensor = self.norm(tensor) elif self.type == 'vmz': #tensor = self._zero_pad(tensor, 8) tensor = self._zero_pad(tensor, 10) tensor = self.norm(tensor) #tensor = tensor.view(-1, 8, 3, 112, 112) tensor = tensor.view(-1, 10, 3, 112, 112) tensor = tensor.transpose(1, 2) elif self.type == '3d': tensor = self._zero_pad(tensor, 16) tensor = self.norm(tensor) tensor = tensor.view(-1, 16, 3, 112, 112) tensor = tensor.transpose(1, 2) elif self.type == 's3d': tensor = tensor / 255.0 tensor = self._zero_pad(tensor, 30) tensor = tensor.view(-1, 30, 3, 224, 224) # N x 30 x 3 x H x W tensor = tensor.transpose(1, 2) # N x 3 x 30 x H x W # for vae do nothing return tensor
2,071
34.724138
98
py
sign-topic
sign-topic-main/examples/MMPT/scripts/video_feature_extractor/model.py
# Copyright (c) Howto100M authors and Facebook, Inc. All Rights Reserved import torch as th from torch import nn class GlobalAvgPool(nn.Module): def __init__(self): super(GlobalAvgPool, self).__init__() def forward(self, x): return th.mean(x, dim=[-2, -1]) def get_model(args): assert args.type in ['2d', '3d', 'vmz', 's3d', 'vae'] if args.type == '2d': print('Loading 2D-ResNet-152 ...') import torchvision.models as models model = models.resnet152(pretrained=True) model = nn.Sequential(*list(model.children())[:-2], GlobalAvgPool()) model = model.cuda() elif args.type == 'vmz': print('Loading VMZ ...') from vmz34 import r2plus1d_34 model = r2plus1d_34(pretrained_path=args.vmz_model_path, pretrained_num_classes=487) model = model.cuda() elif args.type == 's3d': # we use one copy of s3d instead of dup another one for feature extraction. from mmpt.processors.models.s3dg import S3D model = S3D('pretrained_models/s3d_dict.npy', 512) model.load_state_dict(th.load('pretrained_models/s3d_howto100m.pth')) model = model.cuda() elif args.type == '3d': print('Loading 3D-ResneXt-101 ...') from videocnn.models import resnext model = resnext.resnet101( num_classes=400, shortcut_type='B', cardinality=32, sample_size=112, sample_duration=16, last_fc=False) model = model.cuda() model_data = th.load(args.resnext101_model_path) model.load_state_dict(model_data) elif args.type == 'vae': from openaivae import OpenAIParallelDiscreteVAE model = OpenAIParallelDiscreteVAE() model = model.cuda() else: raise ValueError("model not supported yet.") model.eval() print('loaded') return model
1,921
31.576271
92
py
sign-topic
sign-topic-main/examples/MMPT/scripts/video_feature_extractor/extract.py
# Copyright Howto100M authors. # Copyright (c) Facebook, Inc. All Rights Reserved import torch as th import torch.nn.functional as F import math import numpy as np import argparse from torch.utils.data import DataLoader from model import get_model from preprocessing import Preprocessing from random_sequence_shuffler import RandomSequenceSampler from tqdm import tqdm from pathbuilder import PathBuilder from videoreader import VideoLoader parser = argparse.ArgumentParser(description='Easy video feature extractor') parser.add_argument('--vdir', type=str) parser.add_argument('--fdir', type=str) parser.add_argument('--hflip', type=int, default=0) parser.add_argument('--batch_size', type=int, default=64, help='batch size') parser.add_argument('--type', type=str, default='2d', help='CNN type') parser.add_argument('--half_precision', type=int, default=0, help='output half precision float') parser.add_argument('--num_decoding_thread', type=int, default=4, help='Num parallel thread for video decoding') parser.add_argument('--l2_normalize', type=int, default=1, help='l2 normalize feature') parser.add_argument('--resnext101_model_path', type=str, default='model/resnext101.pth', help='Resnext model path') parser.add_argument('--vmz_model_path', type=str, default='model/r2plus1d_34_clip8_ig65m_from_scratch-9bae36ae.pth', help='vmz model path') args = parser.parse_args() # TODO: refactor all args into config. (current code is from different people.) CONFIGS = { "2d": { "fps": 1, "size": 224, "centercrop": False, "shards": 0, }, "3d": { "fps": 24, "size": 112, "centercrop": True, "shards": 0, }, "s3d": { "fps": 30, "size": 224, "centercrop": True, "shards": 0, }, "vmz": { "fps": 24, "size": 112, "centercrop": True, "shards": 0, }, "vae": { "fps": 2, "size": 256, "centercrop": True, "shards": 100, } } config = CONFIGS[args.type] video_dirs = args.vdir feature_dir = args.fdir video_dict = PathBuilder.build(video_dirs, feature_dir, ".npy", config["shards"]) dataset = VideoLoader( video_dict=video_dict, framerate=config["fps"], size=config["size"], centercrop=config["centercrop"], hflip=args.hflip ) n_dataset = len(dataset) sampler = RandomSequenceSampler(n_dataset, 10) loader = DataLoader( dataset, batch_size=1, shuffle=False, num_workers=args.num_decoding_thread, sampler=sampler if n_dataset > 10 else None, ) preprocess = Preprocessing(args.type) model = get_model(args) with th.no_grad(): for k, data in tqdm(enumerate(loader), total=loader.__len__(), ascii=True): input_file = data['input'][0] output_file = data['output'][0] if len(data['video'].shape) > 3: video = data['video'].squeeze() if len(video.shape) == 4: video = preprocess(video) n_chunk = len(video) if args.type == 'vmz': n_chunk = math.ceil(n_chunk/float(3)) features = th.cuda.FloatTensor(n_chunk, 512).fill_(0) elif args.type == 's3d': features = th.cuda.FloatTensor(n_chunk, 512).fill_(0) elif args.type == "vae": features = th.cuda.LongTensor(n_chunk, 1024).fill_(0) else: features = th.cuda.FloatTensor(n_chunk, 2048).fill_(0) n_iter = int(math.ceil(n_chunk / float(args.batch_size))) for i in range(n_iter): factor = 1 if args.type == 'vmz': factor = 3 min_ind = factor * i * args.batch_size max_ind = factor * (i + 1) * args.batch_size video_batch = video[min_ind:max_ind:factor].cuda() if args.type == '2d': batch_features = model(video_batch) # (51, 487), (51, 512) elif args.type == 's3d': batch_features = model(video_batch) batch_features = batch_features['video_embedding'] elif args.type == "vae": # image_code. batch_features = model(video_batch) else: batch_pred, batch_features = model(video_batch) # (51, 487), (51, 512) if args.l2_normalize: batch_features = F.normalize(batch_features, dim=1) features[i*args.batch_size:(i+1)*args.batch_size] = batch_features features = features.cpu().numpy() if args.half_precision: if args.type == "vae": features = features.astype(np.int16) else: features = features.astype('float16') else: if args.type == "vae": features = features.astype(np.int32) else: features = features.astype('float32') np.save(output_file, features) else: print('Video {} error.'.format(input_file))
5,529
34
116
py
sign-topic
sign-topic-main/examples/MMPT/scripts/video_feature_extractor/random_sequence_shuffler.py
# Copyright (c) Facebook, Inc. All Rights Reserved import numpy as np from torch.utils.data.sampler import Sampler class RandomSequenceSampler(Sampler): def __init__(self, n_sample, seq_len): self.n_sample = n_sample self.seq_len = seq_len def _pad_ind(self, ind): zeros = np.zeros(self.seq_len - self.n_sample % self.seq_len) ind = np.concatenate((ind, zeros)) return ind def __iter__(self): idx = np.arange(self.n_sample) if self.n_sample % self.seq_len != 0: idx = self._pad_ind(idx) idx = np.reshape(idx, (-1, self.seq_len)) np.random.shuffle(idx) idx = np.reshape(idx, (-1)) return iter(idx.astype(int)) def __len__(self): return self.n_sample + (self.seq_len - self.n_sample % self.seq_len)
829
26.666667
76
py
sign-topic
sign-topic-main/examples/translation_moe/translation_moe_src/mean_pool_gating_network.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch import torch.nn.functional as F class MeanPoolGatingNetwork(torch.nn.Module): """A simple mean-pooling gating network for selecting experts. This module applies mean pooling over an encoder's output and returns reponsibilities for each expert. The encoder format is expected to match :class:`fairseq.models.transformer.TransformerEncoder`. """ def __init__(self, embed_dim, num_experts, dropout=None): super().__init__() self.embed_dim = embed_dim self.num_experts = num_experts self.fc1 = torch.nn.Linear(embed_dim, embed_dim) self.dropout = torch.nn.Dropout(dropout) if dropout is not None else None self.fc2 = torch.nn.Linear(embed_dim, num_experts) def forward(self, encoder_out): if not ( "encoder_out" in encoder_out and "encoder_padding_mask" in encoder_out and encoder_out["encoder_out"][0].size(2) == self.embed_dim ): raise ValueError("Unexpected format for encoder_out") # mean pooling over time encoder_padding_mask = encoder_out["encoder_padding_mask"][0] # B x T encoder_out = encoder_out["encoder_out"][0].transpose(0, 1) # B x T x C if encoder_padding_mask is not None: encoder_out = encoder_out.clone() # required because of transpose above encoder_out[encoder_padding_mask] = 0 ntokens = torch.sum(~encoder_padding_mask, dim=1, keepdim=True) x = torch.sum(encoder_out, dim=1) / ntokens.type_as(encoder_out) else: x = torch.mean(encoder_out, dim=1) x = torch.tanh(self.fc1(x)) if self.dropout is not None: x = self.dropout(x) x = self.fc2(x) return F.log_softmax(x, dim=-1, dtype=torch.float32).type_as(x)
2,011
38.45098
84
py
sign-topic
sign-topic-main/examples/translation_moe/translation_moe_src/logsumexp_moe.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch class LogSumExpMoE(torch.autograd.Function): """Standard LogSumExp forward pass, but use *posterior* for the backward. See `"Mixture Models for Diverse Machine Translation: Tricks of the Trade" (Shen et al., 2019) <https://arxiv.org/abs/1902.07816>`_. """ @staticmethod def forward(ctx, logp, posterior, dim=-1): ctx.save_for_backward(posterior) ctx.dim = dim return torch.logsumexp(logp, dim=dim) @staticmethod def backward(ctx, grad_output): (posterior,) = ctx.saved_tensors grad_logp = grad_output.unsqueeze(ctx.dim) * posterior return grad_logp, None, None
837
30.037037
78
py
sign-topic
sign-topic-main/examples/translation_moe/translation_moe_src/translation_moe.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from dataclasses import dataclass, field import torch from omegaconf import II from fairseq import metrics, utils from fairseq.dataclass import ChoiceEnum from fairseq.tasks import register_task from fairseq.tasks.translation import TranslationConfig, TranslationTask from .logsumexp_moe import LogSumExpMoE from .mean_pool_gating_network import MeanPoolGatingNetwork METHOD_CHOICES = ChoiceEnum(["sMoElp", "sMoEup", "hMoElp", "hMoEup"]) @dataclass class TranslationMoEConfig(TranslationConfig): method: METHOD_CHOICES = field( default="hMoEup", metadata={"help": "MoE method"}, ) num_experts: int = field( default=3, metadata={"help": "number of experts"}, ) mean_pool_gating_network: bool = field( default=False, metadata={"help": "use a simple mean-pooling gating network"}, ) mean_pool_gating_network_dropout: float = field( default=0, metadata={"help": "dropout for mean-pooling gating network"}, ) mean_pool_gating_network_encoder_dim: int = field( default=0, metadata={"help": "encoder output dim for mean-pooling gating network"}, ) gen_expert: int = field( default=0, metadata={"help": "which expert to use for generation"}, ) sentence_avg: bool = II("optimization.sentence_avg") @register_task("translation_moe", dataclass=TranslationMoEConfig) class TranslationMoETask(TranslationTask): """ Translation task for Mixture of Experts (MoE) models. See `"Mixture Models for Diverse Machine Translation: Tricks of the Trade" (Shen et al., 2019) <https://arxiv.org/abs/1902.07816>`_. Args: src_dict (~fairseq.data.Dictionary): dictionary for the source language tgt_dict (~fairseq.data.Dictionary): dictionary for the target language .. note:: The translation task is compatible with :mod:`fairseq-train`, :mod:`fairseq-generate` and :mod:`fairseq-interactive`. The translation task provides the following additional command-line arguments: .. argparse:: :ref: fairseq.tasks.translation_parser :prog: """ cfg: TranslationMoEConfig def __init__(self, cfg: TranslationMoEConfig, src_dict, tgt_dict): if cfg.method == "sMoElp": # soft MoE with learned prior self.uniform_prior = False self.hard_selection = False elif cfg.method == "sMoEup": # soft MoE with uniform prior self.uniform_prior = True self.hard_selection = False elif cfg.method == "hMoElp": # hard MoE with learned prior self.uniform_prior = False self.hard_selection = True elif cfg.method == "hMoEup": # hard MoE with uniform prior self.uniform_prior = True self.hard_selection = True # add indicator tokens for each expert for i in range(cfg.num_experts): # add to both dictionaries in case we're sharing embeddings src_dict.add_symbol("<expert_{}>".format(i)) tgt_dict.add_symbol("<expert_{}>".format(i)) super().__init__(cfg, src_dict, tgt_dict) def build_model(self, cfg): from fairseq import models model = models.build_model(cfg, self) if not self.uniform_prior and not hasattr(model, "gating_network"): if self.cfg.mean_pool_gating_network: if self.cfg.mean_pool_gating_network_encoder_dim > 0: encoder_dim = self.cfg.mean_pool_gating_network_encoder_dim elif getattr(cfg, "encoder_embed_dim", None): # assume that encoder_embed_dim is the encoder's output dimension encoder_dim = cfg.encoder_embed_dim else: raise ValueError( "Must specify --mean-pool-gating-network-encoder-dim" ) if self.cfg.mean_pool_gating_network_dropout > 0: dropout = self.cfg.mean_pool_gating_network_dropout elif getattr(cfg, "dropout", None): dropout = cfg.dropout else: raise ValueError("Must specify task.mean_pool_gating_network_dropout") model.gating_network = MeanPoolGatingNetwork( encoder_dim, self.cfg.num_experts, dropout, ) else: raise ValueError( "translation_moe task with learned prior requires the model to " "have a gating network; try using --mean-pool-gating-network" ) return model def expert_index(self, i): return i + self.tgt_dict.index("<expert_0>") def _get_loss(self, sample, model, criterion): assert hasattr( criterion, "compute_loss" ), "translation_moe task requires the criterion to implement the compute_loss() method" k = self.cfg.num_experts bsz = sample["target"].size(0) def get_lprob_y(encoder_out, prev_output_tokens_k): net_output = model.decoder( prev_output_tokens=prev_output_tokens_k, encoder_out=encoder_out, ) loss, _ = criterion.compute_loss(model, net_output, sample, reduce=False) loss = loss.view(bsz, -1) return -loss.sum(dim=1, keepdim=True) # -> B x 1 def get_lprob_yz(winners=None): encoder_out = model.encoder( src_tokens=sample["net_input"]["src_tokens"], src_lengths=sample["net_input"]["src_lengths"], ) if winners is None: lprob_y = [] for i in range(k): prev_output_tokens_k = sample["net_input"][ "prev_output_tokens" ].clone() assert not prev_output_tokens_k.requires_grad prev_output_tokens_k[:, 0] = self.expert_index(i) lprob_y.append(get_lprob_y(encoder_out, prev_output_tokens_k)) lprob_y = torch.cat(lprob_y, dim=1) # -> B x K else: prev_output_tokens_k = sample["net_input"]["prev_output_tokens"].clone() prev_output_tokens_k[:, 0] = self.expert_index(winners) lprob_y = get_lprob_y(encoder_out, prev_output_tokens_k) # -> B if self.uniform_prior: lprob_yz = lprob_y else: lprob_z = model.gating_network(encoder_out) # B x K if winners is not None: lprob_z = lprob_z.gather(dim=1, index=winners.unsqueeze(-1)) lprob_yz = lprob_y + lprob_z.type_as(lprob_y) # B x K return lprob_yz # compute responsibilities without dropout with utils.model_eval(model): # disable dropout with torch.no_grad(): # disable autograd lprob_yz = get_lprob_yz() # B x K prob_z_xy = torch.nn.functional.softmax(lprob_yz, dim=1) assert not prob_z_xy.requires_grad # compute loss with dropout if self.hard_selection: winners = prob_z_xy.max(dim=1)[1] loss = -get_lprob_yz(winners) else: lprob_yz = get_lprob_yz() # B x K loss = -LogSumExpMoE.apply(lprob_yz, prob_z_xy, 1) loss = loss.sum() sample_size = ( sample["target"].size(0) if self.cfg.sentence_avg else sample["ntokens"] ) logging_output = { "loss": utils.item(loss.data), "ntokens": sample["ntokens"], "nsentences": bsz, "sample_size": sample_size, "posterior": prob_z_xy.float().sum(dim=0).cpu(), } return loss, sample_size, logging_output def train_step( self, sample, model, criterion, optimizer, update_num, ignore_grad=False ): model.train() loss, sample_size, logging_output = self._get_loss(sample, model, criterion) if ignore_grad: loss *= 0 optimizer.backward(loss) return loss, sample_size, logging_output def valid_step(self, sample, model, criterion): model.eval() with torch.no_grad(): loss, sample_size, logging_output = self._get_loss(sample, model, criterion) return loss, sample_size, logging_output def inference_step( self, generator, models, sample, prefix_tokens=None, expert=None, constraints=None, ): expert = expert or self.cfg.gen_expert with torch.no_grad(): return generator.generate( models, sample, prefix_tokens=prefix_tokens, constraints=constraints, bos_token=self.expert_index(expert), ) def reduce_metrics(self, logging_outputs, criterion): super().reduce_metrics(logging_outputs, criterion) metrics.log_scalar( "posterior", sum(log["posterior"] for log in logging_outputs if "posterior" in log), )
9,484
35.621622
95
py
sign-topic
sign-topic-main/examples/laser/laser_src/laser_transformer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging from typing import Any, Dict, List, Optional from torch import Tensor import torch import torch.nn as nn from fairseq.models import ( FairseqEncoderDecoderModel, register_model, register_model_architecture, ) from fairseq.models.transformer import ( base_architecture, Embedding, TransformerModel, TransformerEncoder, TransformerDecoder, ) from fairseq.modules import ( TransformerDecoderLayer, ) logger = logging.getLogger(__name__) @register_model("laser_transformer") class LaserTransformerModel(FairseqEncoderDecoderModel): """Train Transformer for LASER task Requires --task laser """ def __init__(self, encoder, decoder): super().__init__(encoder, decoder) def forward( self, src_tokens, src_lengths, prev_output_tokens=None, tgt_tokens=None, tgt_lengths=None, target_language_id=-1, dataset_name="", ): laser_encoder_out = self.encoder(src_tokens, src_lengths) return self.decoder( prev_output_tokens, laser_encoder_out, lang_id=target_language_id ) @staticmethod def add_args(parser): """Add model-specific arguments to the parser.""" TransformerModel.add_args(parser) parser.add_argument( "--decoder-lang-embed-dim", type=int, metavar="N", help="decoder language embedding dimension", ) @classmethod def build_model(cls, args, task): base_laser_transformer_architecture(args) num_langs = task.num_tasks if hasattr(task, "num_tasks") else 0 def load_embed_tokens(dictionary, embed_dim): num_embeddings = len(dictionary) padding_idx = dictionary.pad() return Embedding(num_embeddings, embed_dim, padding_idx) encoder_embed_tokens = load_embed_tokens( task.source_dictionary, args.encoder_embed_dim ) decoder_embed_tokens = load_embed_tokens( task.target_dictionary, args.decoder_embed_dim ) num_langs = task.num_tasks if hasattr(task, "num_tasks") else 0 encoder = LaserTransformerEncoder( args, task.source_dictionary, encoder_embed_tokens ) decoder = LaserTransformerDecoder( args, task.target_dictionary, decoder_embed_tokens, num_langs=num_langs, lang_embed_dim=args.decoder_lang_embed_dim, ) return cls(encoder, decoder) class LaserTransformerEncoder(TransformerEncoder): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) def forward(self, src_tokens, *args, **kwargs): encoder_out = super().forward(src_tokens, *args, **kwargs) x = encoder_out["encoder_out"][0] # T x B x C padding_mask = src_tokens.eq(self.padding_idx).t().unsqueeze(-1) if padding_mask.any(): x = x.float().masked_fill_(padding_mask, float("-inf")).type_as(x) # Build the sentence embedding by max-pooling over the encoder outputs sentemb = x.max(dim=0)[0] # The Pytorch Mobile lite interpreter does not supports returning NamedTuple in # `foward` so we use a dictionary instead. # TorchScript does not support mixed values so the values are all lists. # The empty list is equivalent to None. return {"sentemb": [sentemb]} # B x C @torch.jit.export def reorder_encoder_out(self, encoder_out: Dict[str, List[Tensor]], new_order): """ Same as the one in transformer.py, with new_sentemb """ if len(encoder_out["sentemb"]) == 0: new_sentemb = [] else: new_sentemb = [encoder_out["sentemb"][0].index_select(0, new_order)] return { "sentemb": new_sentemb, # B x C } class LaserTransformerDecoder(TransformerDecoder): def __init__(self, args, dictionary, *kargs, **kwargs): self.num_langs = kwargs.get("num_langs", 1) self.lang_embed_dim = kwargs.get("lang_embed_dim", 0) kwargs.pop("num_langs", None) kwargs.pop("lang_embed_dim", None) super().__init__(args, dictionary, *kargs, **kwargs, no_encoder_attn=True) if self.lang_embed_dim == 0: self.embed_lang = None else: self.embed_lang = nn.Embedding(self.num_langs, self.lang_embed_dim) nn.init.uniform_(self.embed_lang.weight, -0.1, 0.1) if self.output_projection is not None: laser_output_embed_dim = ( self.output_embed_dim + self.lang_embed_dim + args.encoder_embed_dim ) self.output_projection = nn.Linear( laser_output_embed_dim, len(dictionary), bias=False ) nn.init.normal_( self.output_projection.weight, mean=0, std=laser_output_embed_dim ** -0.5, ) def build_decoder_layer(self, args, no_encoder_attn=False): decoder_embed_dim = args.decoder_embed_dim args.decoder_embed_dim = ( decoder_embed_dim + self.lang_embed_dim + args.encoder_embed_dim ) res = TransformerDecoderLayer(args, no_encoder_attn=True) args.decoder_embed_dim = decoder_embed_dim return res def extract_features( self, prev_output_tokens, encoder_out: Optional[Dict[str, List[Tensor]]], incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None, full_context_alignment: bool = False, alignment_layer: Optional[int] = None, alignment_heads: Optional[int] = None, lang_id: Optional[int] = None, ): """ Similar to *forward* but only return features. Includes several features from "Jointly Learning to Align and Translate with Transformer Models" (Garg et al., EMNLP 2019). Args: full_context_alignment (bool, optional): don't apply auto-regressive mask to self-attention (default: False). alignment_layer (int, optional): return mean alignment over heads at this layer (default: last layer). alignment_heads (int, optional): only average alignment over this many heads (default: all heads). Returns: tuple: - the decoder's features of shape `(batch, tgt_len, embed_dim)` - a dictionary with any model-specific outputs """ if alignment_layer is None: alignment_layer = self.num_layers - 1 # embed positions positions = ( self.embed_positions( prev_output_tokens, incremental_state=incremental_state ) if self.embed_positions is not None else None ) if incremental_state is not None: prev_output_tokens = prev_output_tokens[:, -1:] if positions is not None: positions = positions[:, -1:] bsz, seqlen = prev_output_tokens.size() # embed tokens and positions x = self.embed_scale * self.embed_tokens(prev_output_tokens) if self.quant_noise is not None: x = self.quant_noise(x) if self.project_in_dim is not None: x = self.project_in_dim(x) if positions is not None: x += positions if self.layernorm_embedding is not None: x = self.layernorm_embedding(x) x = self.dropout_module(x) # B x T x C -> T x B x C x = x.transpose(0, 1) if self.embed_lang is not None: lang_ids = prev_output_tokens.data.new_full((bsz,), lang_id) langemb = self.embed_lang(lang_ids) langemb = langemb.unsqueeze(0) repeat_vals = [x.shape[0] // langemb.shape[0]] + [-1] * ( len(langemb.shape) - 1 ) x = torch.cat((x, langemb.expand(*repeat_vals)), dim=-1) sentemb = encoder_out["sentemb"][0] sentemb = sentemb.unsqueeze(0) repeat_vals = [x.shape[0] // sentemb.shape[0]] + [-1] * (len(sentemb.shape) - 1) x = torch.cat((x, sentemb.expand(*repeat_vals)), dim=-1) self_attn_padding_mask: Optional[Tensor] = None if self.cross_self_attention or prev_output_tokens.eq(self.padding_idx).any(): self_attn_padding_mask = prev_output_tokens.eq(self.padding_idx) # decoder layers attn: Optional[Tensor] = None inner_states: List[Optional[Tensor]] = [x] for idx, layer in enumerate(self.layers): if incremental_state is None and not full_context_alignment: self_attn_mask = self.buffered_future_mask(x) else: self_attn_mask = None x, layer_attn, _ = layer( x, None, None, incremental_state, self_attn_mask=self_attn_mask, self_attn_padding_mask=self_attn_padding_mask, need_attn=bool((idx == alignment_layer)), need_head_weights=bool((idx == alignment_layer)), ) inner_states.append(x) if layer_attn is not None and idx == alignment_layer: attn = layer_attn.float().to(x) if attn is not None: if alignment_heads is not None: attn = attn[:alignment_heads] # average probabilities over heads attn = attn.mean(dim=0) if self.layer_norm is not None: x = self.layer_norm(x) # T x B x C -> B x T x C x = x.transpose(0, 1) if self.project_out_dim is not None: x = self.project_out_dim(x) return x, {"attn": [attn], "inner_states": inner_states} def forward( self, prev_output_tokens, encoder_out: Optional[Dict[str, List[Tensor]]] = None, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None, features_only: bool = False, alignment_layer: Optional[int] = None, alignment_heads: Optional[int] = None, src_lengths: Optional[Any] = None, return_all_hiddens: bool = False, lang_id: Optional[int] = None, ): """ Args: prev_output_tokens (LongTensor): previous decoder outputs of shape `(batch, tgt_len)`, for teacher forcing encoder_out (optional): output from the encoder, used for encoder-side attention incremental_state (dict): dictionary used for storing state during :ref:`Incremental decoding` features_only (bool, optional): only return features without applying output layer (default: False). Returns: tuple: - the decoder's output of shape `(batch, tgt_len, vocab)` - a dictionary with any model-specific outputs """ assert lang_id is not None x, extra = self.extract_features( prev_output_tokens, encoder_out=encoder_out, incremental_state=incremental_state, alignment_layer=alignment_layer, alignment_heads=alignment_heads, lang_id=lang_id, ) if not features_only: x = self.output_layer(x) return x, extra @register_model_architecture("laser_transformer", "laser_transformer") def base_laser_transformer_architecture(args): base_architecture(args) args.decoder_lang_embed_dim = getattr(args, "decoder_lang_embed_dim", 0)
11,947
32.656338
88
py
sign-topic
sign-topic-main/examples/laser/laser_src/laser_lstm.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch import torch.nn as nn import torch.nn.functional as F from fairseq import options, utils from fairseq.models import ( FairseqEncoder, FairseqIncrementalDecoder, FairseqEncoderDecoderModel, register_model, register_model_architecture, ) @register_model("laser_lstm") class LSTMModel(FairseqEncoderDecoderModel): def __init__(self, encoder, decoder): super().__init__(encoder, decoder) def forward( self, src_tokens, src_lengths, prev_output_tokens=None, tgt_tokens=None, tgt_lengths=None, target_language_id=None, dataset_name="", ): assert target_language_id is not None src_encoder_out = self.encoder(src_tokens, src_lengths, dataset_name) return self.decoder( prev_output_tokens, src_encoder_out, lang_id=target_language_id ) @staticmethod def add_args(parser): """Add model-specific arguments to the parser.""" parser.add_argument( "--dropout", default=0.1, type=float, metavar="D", help="dropout probability", ) parser.add_argument( "--encoder-embed-dim", type=int, metavar="N", help="encoder embedding dimension", ) parser.add_argument( "--encoder-embed-path", default=None, type=str, metavar="STR", help="path to pre-trained encoder embedding", ) parser.add_argument( "--encoder-hidden-size", type=int, metavar="N", help="encoder hidden size" ) parser.add_argument( "--encoder-layers", type=int, metavar="N", help="number of encoder layers" ) parser.add_argument( "--encoder-bidirectional", action="store_true", help="make all layers of encoder bidirectional", ) parser.add_argument( "--decoder-embed-dim", type=int, metavar="N", help="decoder embedding dimension", ) parser.add_argument( "--decoder-embed-path", default=None, type=str, metavar="STR", help="path to pre-trained decoder embedding", ) parser.add_argument( "--decoder-hidden-size", type=int, metavar="N", help="decoder hidden size" ) parser.add_argument( "--decoder-layers", type=int, metavar="N", help="number of decoder layers" ) parser.add_argument( "--decoder-out-embed-dim", type=int, metavar="N", help="decoder output embedding dimension", ) parser.add_argument( "--decoder-zero-init", type=str, metavar="BOOL", help="initialize the decoder hidden/cell state to zero", ) parser.add_argument( "--decoder-lang-embed-dim", type=int, metavar="N", help="decoder language embedding dimension", ) parser.add_argument( "--fixed-embeddings", action="store_true", help="keep embeddings fixed (ENCODER ONLY)", ) # TODO Also apply to decoder embeddings? # Granular dropout settings (if not specified these default to --dropout) parser.add_argument( "--encoder-dropout-in", type=float, metavar="D", help="dropout probability for encoder input embedding", ) parser.add_argument( "--encoder-dropout-out", type=float, metavar="D", help="dropout probability for encoder output", ) parser.add_argument( "--decoder-dropout-in", type=float, metavar="D", help="dropout probability for decoder input embedding", ) parser.add_argument( "--decoder-dropout-out", type=float, metavar="D", help="dropout probability for decoder output", ) @classmethod def build_model(cls, args, task): """Build a new model instance.""" # make sure that all args are properly defaulted (in case there are any new ones) base_architecture(args) def load_pretrained_embedding_from_file(embed_path, dictionary, embed_dim): num_embeddings = len(dictionary) padding_idx = dictionary.pad() embed_tokens = Embedding(num_embeddings, embed_dim, padding_idx) embed_dict = utils.parse_embedding(embed_path) utils.print_embed_overlap(embed_dict, dictionary) return utils.load_embedding(embed_dict, dictionary, embed_tokens) pretrained_encoder_embed = None if args.encoder_embed_path: pretrained_encoder_embed = load_pretrained_embedding_from_file( args.encoder_embed_path, task.source_dictionary, args.encoder_embed_dim ) pretrained_decoder_embed = None if args.decoder_embed_path: pretrained_decoder_embed = load_pretrained_embedding_from_file( args.decoder_embed_path, task.target_dictionary, args.decoder_embed_dim ) num_langs = task.num_tasks if hasattr(task, "num_tasks") else 0 encoder = LSTMEncoder( dictionary=task.source_dictionary, embed_dim=args.encoder_embed_dim, hidden_size=args.encoder_hidden_size, num_layers=args.encoder_layers, dropout_in=args.encoder_dropout_in, dropout_out=args.encoder_dropout_out, bidirectional=args.encoder_bidirectional, pretrained_embed=pretrained_encoder_embed, fixed_embeddings=args.fixed_embeddings, ) decoder = LSTMDecoder( dictionary=task.target_dictionary, embed_dim=args.decoder_embed_dim, hidden_size=args.decoder_hidden_size, out_embed_dim=args.decoder_out_embed_dim, num_layers=args.decoder_layers, dropout_in=args.decoder_dropout_in, dropout_out=args.decoder_dropout_out, zero_init=options.eval_bool(args.decoder_zero_init), encoder_embed_dim=args.encoder_embed_dim, encoder_output_units=encoder.output_units, pretrained_embed=pretrained_decoder_embed, num_langs=num_langs, lang_embed_dim=args.decoder_lang_embed_dim, ) return cls(encoder, decoder) class LSTMEncoder(FairseqEncoder): """LSTM encoder.""" def __init__( self, dictionary, embed_dim=512, hidden_size=512, num_layers=1, dropout_in=0.1, dropout_out=0.1, bidirectional=False, left_pad=True, pretrained_embed=None, padding_value=0.0, fixed_embeddings=False, ): super().__init__(dictionary) self.num_layers = num_layers self.dropout_in = dropout_in self.dropout_out = dropout_out self.bidirectional = bidirectional self.hidden_size = hidden_size num_embeddings = len(dictionary) self.padding_idx = dictionary.pad() if pretrained_embed is None: self.embed_tokens = Embedding(num_embeddings, embed_dim, self.padding_idx) else: self.embed_tokens = pretrained_embed if fixed_embeddings: self.embed_tokens.weight.requires_grad = False self.lstm = LSTM( input_size=embed_dim, hidden_size=hidden_size, num_layers=num_layers, dropout=self.dropout_out if num_layers > 1 else 0.0, bidirectional=bidirectional, ) self.left_pad = left_pad self.padding_value = padding_value self.output_units = hidden_size if bidirectional: self.output_units *= 2 def forward(self, src_tokens, src_lengths, dataset_name): if self.left_pad: # convert left-padding to right-padding src_tokens = utils.convert_padding_direction( src_tokens, self.padding_idx, left_to_right=True, ) bsz, seqlen = src_tokens.size() # embed tokens x = self.embed_tokens(src_tokens) x = F.dropout(x, p=self.dropout_in, training=self.training) # B x T x C -> T x B x C x = x.transpose(0, 1) # pack embedded source tokens into a PackedSequence try: packed_x = nn.utils.rnn.pack_padded_sequence(x, src_lengths.data.tolist()) except BaseException: raise Exception(f"Packing failed in dataset {dataset_name}") # apply LSTM if self.bidirectional: state_size = 2 * self.num_layers, bsz, self.hidden_size else: state_size = self.num_layers, bsz, self.hidden_size h0 = x.data.new(*state_size).zero_() c0 = x.data.new(*state_size).zero_() packed_outs, (final_hiddens, final_cells) = self.lstm(packed_x, (h0, c0)) # unpack outputs and apply dropout x, _ = nn.utils.rnn.pad_packed_sequence( packed_outs, padding_value=self.padding_value ) x = F.dropout(x, p=self.dropout_out, training=self.training) assert list(x.size()) == [seqlen, bsz, self.output_units] if self.bidirectional: def combine_bidir(outs): return torch.cat( [ torch.cat([outs[2 * i], outs[2 * i + 1]], dim=0).view( 1, bsz, self.output_units ) for i in range(self.num_layers) ], dim=0, ) final_hiddens = combine_bidir(final_hiddens) final_cells = combine_bidir(final_cells) encoder_padding_mask = src_tokens.eq(self.padding_idx).t() # Set padded outputs to -inf so they are not selected by max-pooling padding_mask = src_tokens.eq(self.padding_idx).t().unsqueeze(-1) if padding_mask.any(): x = x.float().masked_fill_(padding_mask, float("-inf")).type_as(x) # Build the sentence embedding by max-pooling over the encoder outputs sentemb = x.max(dim=0)[0] return { "sentemb": sentemb, "encoder_out": (x, final_hiddens, final_cells), "encoder_padding_mask": encoder_padding_mask if encoder_padding_mask.any() else None, } def reorder_encoder_out(self, encoder_out_dict, new_order): encoder_out_dict["sentemb"] = encoder_out_dict["sentemb"].index_select( 0, new_order ) encoder_out_dict["encoder_out"] = tuple( eo.index_select(1, new_order) for eo in encoder_out_dict["encoder_out"] ) if encoder_out_dict["encoder_padding_mask"] is not None: encoder_out_dict["encoder_padding_mask"] = encoder_out_dict[ "encoder_padding_mask" ].index_select(1, new_order) return encoder_out_dict def max_positions(self): """Maximum input length supported by the encoder.""" return int(1e5) # an arbitrary large number class LSTMDecoder(FairseqIncrementalDecoder): """LSTM decoder.""" def __init__( self, dictionary, embed_dim=512, hidden_size=512, out_embed_dim=512, num_layers=1, dropout_in=0.1, dropout_out=0.1, zero_init=False, encoder_embed_dim=512, encoder_output_units=512, pretrained_embed=None, num_langs=1, lang_embed_dim=0, ): super().__init__(dictionary) self.dropout_in = dropout_in self.dropout_out = dropout_out self.hidden_size = hidden_size num_embeddings = len(dictionary) padding_idx = dictionary.pad() if pretrained_embed is None: self.embed_tokens = Embedding(num_embeddings, embed_dim, padding_idx) else: self.embed_tokens = pretrained_embed self.layers = nn.ModuleList( [ LSTMCell( input_size=encoder_output_units + embed_dim + lang_embed_dim if layer == 0 else hidden_size, hidden_size=hidden_size, ) for layer in range(num_layers) ] ) if hidden_size != out_embed_dim: self.additional_fc = Linear(hidden_size, out_embed_dim) self.fc_out = Linear(out_embed_dim, num_embeddings, dropout=dropout_out) if zero_init: self.sentemb2init = None else: self.sentemb2init = Linear( encoder_output_units, 2 * num_layers * hidden_size ) if lang_embed_dim == 0: self.embed_lang = None else: self.embed_lang = nn.Embedding(num_langs, lang_embed_dim) nn.init.uniform_(self.embed_lang.weight, -0.1, 0.1) def forward( self, prev_output_tokens, encoder_out_dict, incremental_state=None, lang_id=0 ): sentemb = encoder_out_dict["sentemb"] encoder_out = encoder_out_dict["encoder_out"] if incremental_state is not None: prev_output_tokens = prev_output_tokens[:, -1:] bsz, seqlen = prev_output_tokens.size() # get outputs from encoder encoder_outs, _, _ = encoder_out[:3] srclen = encoder_outs.size(0) # embed tokens x = self.embed_tokens(prev_output_tokens) x = F.dropout(x, p=self.dropout_in, training=self.training) # embed language identifier if self.embed_lang is not None: lang_ids = prev_output_tokens.data.new_full((bsz,), lang_id) langemb = self.embed_lang(lang_ids) # TODO Should we dropout here??? # B x T x C -> T x B x C x = x.transpose(0, 1) # initialize previous states (or get from cache during incremental generation) cached_state = utils.get_incremental_state( self, incremental_state, "cached_state" ) if cached_state is not None: prev_hiddens, prev_cells, input_feed = cached_state else: num_layers = len(self.layers) if self.sentemb2init is None: prev_hiddens = [ x.data.new(bsz, self.hidden_size).zero_() for i in range(num_layers) ] prev_cells = [ x.data.new(bsz, self.hidden_size).zero_() for i in range(num_layers) ] else: init = self.sentemb2init(sentemb) prev_hiddens = [ init[:, (2 * i) * self.hidden_size : (2 * i + 1) * self.hidden_size] for i in range(num_layers) ] prev_cells = [ init[ :, (2 * i + 1) * self.hidden_size : (2 * i + 2) * self.hidden_size, ] for i in range(num_layers) ] input_feed = x.data.new(bsz, self.hidden_size).zero_() attn_scores = x.data.new(srclen, seqlen, bsz).zero_() outs = [] for j in range(seqlen): if self.embed_lang is None: input = torch.cat((x[j, :, :], sentemb), dim=1) else: input = torch.cat((x[j, :, :], sentemb, langemb), dim=1) for i, rnn in enumerate(self.layers): # recurrent cell hidden, cell = rnn(input, (prev_hiddens[i], prev_cells[i])) # hidden state becomes the input to the next layer input = F.dropout(hidden, p=self.dropout_out, training=self.training) # save state for next time step prev_hiddens[i] = hidden prev_cells[i] = cell out = hidden out = F.dropout(out, p=self.dropout_out, training=self.training) # input feeding input_feed = out # save final output outs.append(out) # cache previous states (no-op except during incremental generation) utils.set_incremental_state( self, incremental_state, "cached_state", (prev_hiddens, prev_cells, input_feed), ) # collect outputs across time steps x = torch.cat(outs, dim=0).view(seqlen, bsz, self.hidden_size) # T x B x C -> B x T x C x = x.transpose(1, 0) # srclen x tgtlen x bsz -> bsz x tgtlen x srclen attn_scores = attn_scores.transpose(0, 2) # project back to size of vocabulary if hasattr(self, "additional_fc"): x = self.additional_fc(x) x = F.dropout(x, p=self.dropout_out, training=self.training) x = self.fc_out(x) return x, attn_scores def reorder_incremental_state(self, incremental_state, new_order): super().reorder_incremental_state(incremental_state, new_order) cached_state = utils.get_incremental_state( self, incremental_state, "cached_state" ) if cached_state is None: return def reorder_state(state): if isinstance(state, list): return [reorder_state(state_i) for state_i in state] return state.index_select(0, new_order) new_state = tuple(map(reorder_state, cached_state)) utils.set_incremental_state(self, incremental_state, "cached_state", new_state) def max_positions(self): """Maximum output length supported by the decoder.""" return int(1e5) # an arbitrary large number def Embedding(num_embeddings, embedding_dim, padding_idx): m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx) nn.init.uniform_(m.weight, -0.1, 0.1) nn.init.constant_(m.weight[padding_idx], 0) return m def LSTM(input_size, hidden_size, **kwargs): m = nn.LSTM(input_size, hidden_size, **kwargs) for name, param in m.named_parameters(): if "weight" in name or "bias" in name: param.data.uniform_(-0.1, 0.1) return m def LSTMCell(input_size, hidden_size, **kwargs): m = nn.LSTMCell(input_size, hidden_size, **kwargs) for name, param in m.named_parameters(): if "weight" in name or "bias" in name: param.data.uniform_(-0.1, 0.1) return m def Linear(in_features, out_features, bias=True, dropout=0): """Weight-normalized Linear layer (input: N x T x C)""" m = nn.Linear(in_features, out_features, bias=bias) m.weight.data.uniform_(-0.1, 0.1) if bias: m.bias.data.uniform_(-0.1, 0.1) return m @register_model_architecture("laser_lstm", "laser_lstm") def base_architecture(args): args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) args.encoder_embed_path = getattr(args, "encoder_embed_path", None) args.encoder_hidden_size = getattr( args, "encoder_hidden_size", args.encoder_embed_dim ) args.encoder_layers = getattr(args, "encoder_layers", 1) args.encoder_bidirectional = getattr(args, "encoder_bidirectional", False) args.encoder_dropout_in = getattr(args, "encoder_dropout_in", args.dropout) args.encoder_dropout_out = getattr(args, "encoder_dropout_out", args.dropout) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512) args.decoder_embed_path = getattr(args, "decoder_embed_path", None) args.decoder_hidden_size = getattr( args, "decoder_hidden_size", args.decoder_embed_dim ) args.decoder_layers = getattr(args, "decoder_layers", 1) args.decoder_out_embed_dim = getattr(args, "decoder_out_embed_dim", 512) args.decoder_dropout_in = getattr(args, "decoder_dropout_in", args.dropout) args.decoder_dropout_out = getattr(args, "decoder_dropout_out", args.dropout) args.decoder_zero_init = getattr(args, "decoder_zero_init", "0") args.decoder_lang_embed_dim = getattr(args, "decoder_lang_embed_dim", 0) args.fixed_embeddings = getattr(args, "fixed_embeddings", False)
20,672
34.278157
89
py
sign-topic
sign-topic-main/examples/text_classification/prep_how2sign.py
#!/usr/bin/env python3 # This code is based on the speech_to_text implementation (commit: d974c70) # # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import errno import os import h5py import argparse import logging import pandas as pd from typing import Tuple from pathlib import Path import torch from torch.utils.data import Dataset from examples.SL_topic_detection.utils import ( save_df_to_tsv, load_text, ) log = logging.getLogger(__name__) MANIFEST_COLUMNS = ['id', 'signs', 'n_frames', 'tgt_text'] class How2Sign(Dataset): ''' Create a Dataset for How2Sign. ''' LANGUAGES = ['en'] # TODO: add 'pt' SPLITS = ['train', 'val', 'test'] def __init__( self, root: str, lang: str, split: str ) -> None: self.root = Path(root) assert split in self.SPLITS and lang in self.LANGUAGES assert self.root.is_dir() try: self.h5_sign = h5py.File(self.root / f'{split}.h5', 'r') except: raise FileNotFoundError( errno.ENOENT, os.strerror(errno.ENOENT), self.root / f'{split}.h5' ) with h5py.File(self.root / f'{split}_filt.h5', 'w') as f: for key in self.h5_sign.keys(): try: f[key[:11]] = self.h5_sign[key][()] except: pass self.h5_sign.close() self.h5_sign = h5py.File(self.root / f'{split}_filt.h5', 'r') self.text = load_text(self.root / f'{split}.txt', list(self.h5_sign.keys())) self.data = pd.read_csv(self.root / f'{split}.csv') self.data['TEXT'] = pd.NaT self.data['START_FRAME'] = pd.NaT self.data['END_FRAME'] = pd.NaT for i, row in self.data.iterrows(): if row['VIDEO_ID'] not in list(self.h5_sign.keys()): print(f'Error with keypoint {row["VIDEO_ID"]}, not found inside h5_sign') self.data.drop(i, inplace=True) else: self.data.loc[i, 'START_FRAME'] = 0 self.data.loc[i, 'END_FRAME'] = torch.Tensor(self.h5_sign[row['VIDEO_ID']]).shape[0] self.data.loc[i, 'TEXT'] = self.text[row['VIDEO_ID']] self.data.reset_index(drop=True, inplace=True) def __getitem__(self, n: int) -> Tuple[torch.Tensor, str, str]: sent_id = self.data.loc[n, 'VIDEO_ID'] src_signs = torch.Tensor(self.h5_sign[sent_id]) text = self.data.loc[n, 'TEXT'] categ = self.data.loc[n, 'CATEGORY'] return sent_id, src_signs, text, categ def __len__(self) -> int: return len(self.data) def filter_by_length(self, min_n_frames: int, max_n_frames: int) -> None: lengths = self.data['END_FRAME'] - self.data['START_FRAME'] + 1 self.data = self.data[lengths.between(min_n_frames, max_n_frames)] def process(args): root = Path(args.data_root).absolute() for split in How2Sign.SPLITS: print(f'Processing "{split}" split') filt_csv = root / f'{split}_filt.csv' for lang in How2Sign.LANGUAGES: dataset = How2Sign(root, lang, split) print('Filtering samples by length...') dataset.filter_by_length(args.min_n_frames, args.max_n_frames) print(f'{len(dataset)} samples after filtering') print('Saving dataframe...') save_df_to_tsv(dataset.data, filt_csv) def main(): parser = argparse.ArgumentParser() parser.add_argument('--data-root', '-d', required=True, type=str) parser.add_argument('--min-n-frames', default=150, type=int) parser.add_argument('--max-n-frames', default=5500, type=int) parser.add_argument('--overwrite', action='store_true') args = parser.parse_args() process(args) if __name__ == '__main__': main()
3,974
29.576923
100
py
sign-topic
sign-topic-main/examples/latent_depth/latent_depth_src/modules/latent_layers.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch import torch.nn as nn class LayerSelect(nn.Module): """Compute samples (from a Gumbel-Sigmoid distribution) which is used as either (soft) weighting or (hard) selection of residual connection. https://arxiv.org/abs/2009.13102 """ def __init__(self, num_layers, num_logits, soft_select=False, sampling_tau=5.): super(LayerSelect, self).__init__() self.layer_logits = torch.nn.Parameter( torch.Tensor(num_logits, num_layers), requires_grad=True, ) self.hard_select = not soft_select self.tau = sampling_tau self.detach_grad = False self.layer_samples = [None] * num_logits def sample(self, logit_idx): """To leverage the efficiency of distributed training, samples for all layers are computed at once for each logit_idx. Logits are parameters learnt independent of each other. Args: logit_idx: The index of logit parameters used for sampling. """ assert logit_idx is not None self.samples = self._gumbel_sigmoid( self.layer_logits[logit_idx, :].detach() if self.detach_grad else self.layer_logits[logit_idx, :], dim=-1, tau=self.tau, hard=self.hard_select, ) self.layer_samples[logit_idx] = self.samples def forward(self, i): sample = self.samples[i] return sample def _gumbel_sigmoid( self, logits, tau=1, hard=False, eps=1e-10, dim=-1, threshold=0.5 ): # ~Gumbel(0,1) gumbels1 = ( -torch.empty_like(logits, memory_format=torch.legacy_contiguous_format) .exponential_() .log() ) gumbels2 = ( -torch.empty_like(logits, memory_format=torch.legacy_contiguous_format) .exponential_() .log() ) # Difference of two gumbels because we apply a sigmoid gumbels1 = (logits + gumbels1 - gumbels2) / tau y_soft = gumbels1.sigmoid() if hard: # Straight through. y_hard = torch.zeros_like( logits, memory_format=torch.legacy_contiguous_format ).masked_fill(y_soft > threshold, 1.0) ret = y_hard - y_soft.detach() + y_soft else: # Reparametrization trick. ret = y_soft return ret
2,605
33.289474
83
py
sign-topic
sign-topic-main/examples/latent_depth/latent_depth_src/models/latent_transformer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Any, Dict, Optional import torch.nn as nn from fairseq.models.fairseq_encoder import EncoderOut from fairseq.models.transformer import TransformerDecoder, TransformerEncoder from fairseq.modules import TransformerDecoderLayer, TransformerEncoderLayer from torch import Tensor from ..modules.latent_layers import LayerSelect class LatentTransformerEncoder(TransformerEncoder): """Latent depth (https://arxiv.org/abs/2009.13102) implemented in TransformerEncoder. """ def __init__(self, args, dictionary, embed_tokens, num_logits=1): self.num_logits = num_logits self.num_layers = args.encoder_layers super().__init__(args, dictionary, embed_tokens) self.layer_select = LayerSelect( num_layers=self.num_layers, num_logits=self.num_logits, soft_select=getattr(args, "soft_select", False), sampling_tau=getattr(args, "sampling_tau", 5.), ) self.lang_idx = None self.layers = nn.ModuleList( [self._build_encoder_layer(args, idx) for idx in range(args.encoder_layers)] ) def set_lang_idx(self, lang_idx): self.lang_idx = lang_idx def _build_encoder_layer(self, args, idx=None): return LatentTransformerEncoderLayer(args, idx, layer_select=self.layer_select) def forward(self, src_tokens, src_lengths, return_all_hiddens: bool = False): self.layer_select.sample(self.lang_idx) return super().forward(src_tokens, src_lengths, return_all_hiddens) class LatentTransformerEncoderLayer(TransformerEncoderLayer): """Encoder layer with each (non_residual) block weighted by samples of Bernouli or Gumbel Signmoid samples. Args: args (argparse.Namespace): parsed command-line arguments from standard TransformerEncoderLayer. idx (int): layer index (used to retrieve samples). layer_select (LayerSelect, optional): instance of LayerSelect module with logits parameters and sampling method. """ def __init__(self, args, idx, layer_select=None): super().__init__(args) self.idx = idx self.layer_select = layer_select def residual_connection(self, x, residual): return residual + x * self.layer_select(self.idx) class LatentTransformerDecoder(TransformerDecoder): """Latent depth (https://arxiv.org/abs/2009.13102) implemented in TransformerDecoder. """ def __init__( self, args, dictionary, embed_tokens, no_encoder_attn=False, num_logits=1 ): self.num_logits = num_logits self.num_layers = args.decoder_layers super().__init__( args, dictionary, embed_tokens, no_encoder_attn=no_encoder_attn ) self.layer_select = LayerSelect( num_layers=self.num_layers, num_logits=self.num_logits, soft_select=getattr(args, "soft_select", False), sampling_tau=getattr(args, "sampling_tau", 5.), ) self.lang_idx = None self.layers = nn.ModuleList( [ self._build_decoder_layer(args, no_encoder_attn, idx) for idx in range(args.decoder_layers) ] ) def set_lang_idx(self, lang_idx): self.lang_idx = lang_idx def _build_decoder_layer(self, args, no_encoder_attn=False, idx=None): return LatentTransformerDecoderLayer( args, idx, layer_select=self.layer_select, no_encoder_attn=no_encoder_attn ) def forward( self, prev_output_tokens, encoder_out: Optional[EncoderOut] = None, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None, features_only: bool = False, alignment_layer: Optional[int] = None, alignment_heads: Optional[int] = None, src_lengths: Optional[Any] = None, return_all_hiddens: bool = False, ): self.layer_select.sample(self.lang_idx) return super().forward( prev_output_tokens=prev_output_tokens, encoder_out=encoder_out, incremental_state=incremental_state, features_only=features_only, alignment_layer=alignment_layer, src_lengths=src_lengths, return_all_hiddens=return_all_hiddens, ) class LatentTransformerDecoderLayer(TransformerDecoderLayer): """Decoder layer with each (non_residual) block weighted by samples of Bernouli or Gumbel Signmoid samples. Args: args (argparse.Namespace): parsed command-line arguments from standard TransformerDecoderLayer. idx (int): layer index (used to retrieve samples). layer_select (LayerSelect, optional): instance of LayerSelect module with logits parameters and sampling method. no_encoder_attn (bool, optional): whether to attend to encoder outputs (default: False). """ def __init__( self, args, idx, layer_select=None, no_encoder_attn=False, add_bias_kv=False, add_zero_attn=False, ): super().__init__(args, no_encoder_attn, add_bias_kv, add_zero_attn) self.idx = idx self.layer_select = layer_select def residual_connection(self, x, residual): return residual + x * self.layer_select(self.idx)
5,584
34.573248
88
py
sign-topic
sign-topic-main/examples/latent_depth/latent_depth_src/loss/latent_depth.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math import torch from torch.nn.modules.loss import _Loss class LatentLayersKLLoss(_Loss): def __init__(self, args): super().__init__() self.args = args def forward(self, layer_samples, lang_idx, update_num, sample_size): prior = self.args.prior samples = layer_samples[lang_idx] eps = 1e-7 if prior == "uniform": # uniform prior kl_loss = (samples * (torch.log(samples + eps) - math.log(0.5))).sum(-1) elif prior == "agged_posterior": # aggregated posterior y_t = torch.stack([x.detach() for x in layer_samples], dim=0) agged_q = torch.sum(y_t, dim=0) row_norm = agged_q.sum(-1) normed_agg_q = agged_q / row_norm kl_loss = ( samples * (torch.log(samples + eps) - torch.log(normed_agg_q + eps)) ).sum(-1) else: raise NotImplementedError("The specified prior is not implemented.") # normalized by number of layers kl_loss /= layer_samples[0].size()[0] kl_weight = min( self.args.sparsity_weight, (update_num - self.args.soft_update) * self.args.sparsity_weight / self.args.anneal_updates, ) kl_loss *= kl_weight * sample_size return kl_loss class LatentLayersSparsityLoss(_Loss): def __init__(self, args): super().__init__() self.args = args def is_valid(self, update_num): if self.args.target_layers <= 0: return False return update_num > (self.args.soft_update + self.args.anneal_updates) def forward(self, layer_samples_list, update_num, sample_size): batch_loss = 0 share_loss = 0 global_sparsity_loss = 0 layer_samples = torch.stack(layer_samples_list, dim=0) if ( self.args.target_layers > 0 or self.args.share_weight > 0 ) and update_num > (self.args.soft_update + self.args.anneal_updates): # anneal sparsity weight if update_num < (self.args.anneal_updates + self.args.soft_update): weight_anneal = 0 elif update_num < (2 * self.args.anneal_updates + self.args.soft_update): weight_anneal = ( (update_num - self.args.soft_update - self.args.anneal_updates) * self.args.share_weight / self.args.anneal_updates ) else: weight_anneal = 1 # compute ratio among languages layer_utilization = torch.sum(layer_samples, dim=0) layer_utilization /= layer_samples.size()[0] if self.args.share_weight > 0: # encouraging sharing across languages share_loss = sum( -1.0 * v * math.log(v) for v in layer_utilization if v > 0 ) batch_loss += ( weight_anneal * self.args.share_weight * sample_size * share_loss ) if self.args.target_layers > 0: # computed expected number of layers selected expeted_layers = sum(layer_utilization) # compute l2 loss wrt target number of layers global_sparsity_loss = (expeted_layers - self.args.target_layers) ** 2 batch_loss += ( weight_anneal * self.args.share_weight * sample_size * global_sparsity_loss ) return batch_loss
3,802
37.03
86
py
sign-topic
sign-topic-main/examples/hubert/update_ckpt.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch src_ckpt = "/checkpoint/wnhsu/w2v/archived/hubert_base_ls960_it2.pt" ref_ckpt = "/checkpoint/wnhsu/w2v/hubert_icassp_oss_v3/iter2_km100-400k-grp-L6/oss.km500_p0_1_s334.pmw1_0.puw0_0.grpnorm.ml10.mp0_8.untie.mxsz250000.ufreq1.maxtok1400000.MU100k.s1337.ngpu32/checkpoint_last.pt" new_ckpt = "/checkpoint/wnhsu/w2v/archived/hubert_base_ls960_it2_updated.pt" def update_state(state): state["model"]["label_embs_concat"] = state["model"].pop("label_embs") state["args"].task = "hubert_pretraining" state["args"].labels = f"['{state['args'].labels}']" return state src_state = torch.load(src_ckpt) src_state = update_state(src_state) torch.save(src_state, new_ckpt)
873
37
209
py
sign-topic
sign-topic-main/examples/hubert/simple_kmeans/dump_km_label.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import os import sys import numpy as np import joblib import torch import tqdm logging.basicConfig( format="%(asctime)s | %(levelname)s | %(name)s | %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=os.environ.get("LOGLEVEL", "INFO").upper(), stream=sys.stdout, ) logger = logging.getLogger("dump_km_label") class ApplyKmeans(object): def __init__(self, km_path): self.km_model = joblib.load(km_path) self.C_np = self.km_model.cluster_centers_.transpose() self.Cnorm_np = (self.C_np ** 2).sum(0, keepdims=True) self.C = torch.from_numpy(self.C_np) self.Cnorm = torch.from_numpy(self.Cnorm_np) if torch.cuda.is_available(): self.C = self.C.cuda() self.Cnorm = self.Cnorm.cuda() def __call__(self, x): if isinstance(x, torch.Tensor): dist = ( x.pow(2).sum(1, keepdim=True) - 2 * torch.matmul(x, self.C) + self.Cnorm ) return dist.argmin(dim=1).cpu().numpy() else: dist = ( (x ** 2).sum(1, keepdims=True) - 2 * np.matmul(x, self.C_np) + self.Cnorm_np ) return np.argmin(dist, axis=1) def get_feat_iterator(feat_dir, split, nshard, rank): feat_path = f"{feat_dir}/{split}_{rank}_{nshard}.npy" leng_path = f"{feat_dir}/{split}_{rank}_{nshard}.len" with open(leng_path, "r") as f: lengs = [int(line.rstrip()) for line in f] offsets = [0] + np.cumsum(lengs[:-1]).tolist() def iterate(): feat = np.load(feat_path, mmap_mode="r") assert feat.shape[0] == (offsets[-1] + lengs[-1]) for offset, leng in zip(offsets, lengs): yield feat[offset: offset + leng] return iterate, len(lengs) def dump_label(feat_dir, split, km_path, nshard, rank, lab_dir): apply_kmeans = ApplyKmeans(km_path) generator, num = get_feat_iterator(feat_dir, split, nshard, rank) iterator = generator() lab_path = f"{lab_dir}/{split}_{rank}_{nshard}.km" os.makedirs(lab_dir, exist_ok=True) with open(lab_path, "w") as f: for feat in tqdm.tqdm(iterator, total=num): # feat = torch.from_numpy(feat).cuda() lab = apply_kmeans(feat).tolist() f.write(" ".join(map(str, lab)) + "\n") logger.info("finished successfully") if __name__ == "__main__": import argparse parser = argparse.ArgumentParser() parser.add_argument("feat_dir") parser.add_argument("split") parser.add_argument("km_path") parser.add_argument("nshard", type=int) parser.add_argument("rank", type=int) parser.add_argument("lab_dir") args = parser.parse_args() logging.info(str(args)) dump_label(**vars(args))
3,008
29.393939
69
py
sign-topic
sign-topic-main/examples/hubert/simple_kmeans/dump_mfcc_feature.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import os import sys import soundfile as sf import torch import torchaudio from feature_utils import get_path_iterator, dump_feature logging.basicConfig( format="%(asctime)s | %(levelname)s | %(name)s | %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=os.environ.get("LOGLEVEL", "INFO").upper(), stream=sys.stdout, ) logger = logging.getLogger("dump_mfcc_feature") class MfccFeatureReader(object): def __init__(self, sample_rate): self.sample_rate = sample_rate def read_audio(self, path, ref_len=None): wav, sr = sf.read(path) assert sr == self.sample_rate, sr if wav.ndim == 2: wav = wav.mean(-1) assert wav.ndim == 1, wav.ndim if ref_len is not None and abs(ref_len - len(wav)) > 160: logging.warning(f"ref {ref_len} != read {len(wav)} ({path})") return wav def get_feats(self, path, ref_len=None): x = self.read_audio(path, ref_len) with torch.no_grad(): x = torch.from_numpy(x).float() x = x.view(1, -1) mfccs = torchaudio.compliance.kaldi.mfcc( waveform=x, sample_frequency=self.sample_rate, use_energy=False, ) # (time, freq) mfccs = mfccs.transpose(0, 1) # (freq, time) deltas = torchaudio.functional.compute_deltas(mfccs) ddeltas = torchaudio.functional.compute_deltas(deltas) concat = torch.cat([mfccs, deltas, ddeltas], dim=0) concat = concat.transpose(0, 1).contiguous() # (freq, time) return concat def main(tsv_dir, split, nshard, rank, feat_dir, sample_rate): reader = MfccFeatureReader(sample_rate) generator, num = get_path_iterator(f"{tsv_dir}/{split}.tsv", nshard, rank) dump_feature(reader, generator, num, split, nshard, rank, feat_dir) if __name__ == "__main__": import argparse parser = argparse.ArgumentParser() parser.add_argument("tsv_dir") parser.add_argument("split") parser.add_argument("nshard", type=int) parser.add_argument("rank", type=int) parser.add_argument("feat_dir") parser.add_argument("--sample_rate", type=int, default=16000) args = parser.parse_args() logger.info(args) main(**vars(args))
2,491
30.544304
78
py
sign-topic
sign-topic-main/examples/hubert/simple_kmeans/dump_w2v2_feature.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import os import sys import fairseq import soundfile as sf import torch import torch.nn.functional as F from feature_utils import get_path_iterator, dump_feature logging.basicConfig( format="%(asctime)s | %(levelname)s | %(name)s | %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=os.environ.get("LOGLEVEL", "INFO").upper(), stream=sys.stdout, ) logger = logging.getLogger("dump_w2v2_feature") class Wav2Vec2FeatureReader(object): def __init__(self, ckpt_path, layer, max_chunk=1600000): ( model, cfg, task, ) = fairseq.checkpoint_utils.load_model_ensemble_and_task([ckpt_path]) self.model = model[0].eval().cuda() self.task = task self.layer = layer # assume this is 1-based like HuBERT self.max_chunk = max_chunk logger.info(f"TASK CONFIG:\n{self.task.cfg}") logger.info(f" max_chunk = {self.max_chunk}") logger.info(f" model:\n{self.model}") def read_audio(self, path, ref_len=None): wav, sr = sf.read(path) assert sr == self.task.cfg.sample_rate, sr if wav.ndim == 2: wav = wav.mean(-1) assert wav.ndim == 1, wav.ndim if ref_len is not None and abs(ref_len - len(wav)) > 160: logging.warning(f"ref {ref_len} != read {len(wav)} ({path})") return wav def get_feats(self, path, ref_len=None): x = self.read_audio(path, ref_len) with torch.no_grad(): x = torch.from_numpy(x).float().cuda() if self.task.cfg.normalize: x = F.layer_norm(x, x.shape) x = x.view(1, -1) feat = [] for start in range(0, x.size(1), self.max_chunk): x_chunk = x[:, start: start + self.max_chunk] res = self.model.extract_features( source=x_chunk, padding_mask=None, mask=False, layer=self.layer - 1, ) feat_chunk = res["x"] feat.append(feat_chunk) return torch.cat(feat, 1).squeeze(0) def main(tsv_dir, split, ckpt_path, layer, nshard, rank, feat_dir, max_chunk): reader = Wav2Vec2FeatureReader(ckpt_path, layer, max_chunk) generator, num = get_path_iterator(f"{tsv_dir}/{split}.tsv", nshard, rank) dump_feature(reader, generator, num, split, nshard, rank, feat_dir) if __name__ == "__main__": import argparse parser = argparse.ArgumentParser() parser.add_argument("tsv_dir") parser.add_argument("split") parser.add_argument("ckpt_path") parser.add_argument("layer", type=int) parser.add_argument("nshard", type=int) parser.add_argument("rank", type=int) parser.add_argument("feat_dir") parser.add_argument("--max_chunk", type=int, default=1600000) args = parser.parse_args() logger.info(args) main(**vars(args))
3,129
31.604167
78
py
sign-topic
sign-topic-main/examples/hubert/simple_kmeans/dump_hubert_feature.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import os import sys import fairseq import soundfile as sf import torch import torch.nn.functional as F from feature_utils import get_path_iterator, dump_feature logging.basicConfig( format="%(asctime)s | %(levelname)s | %(name)s | %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=os.environ.get("LOGLEVEL", "INFO").upper(), stream=sys.stdout, ) logger = logging.getLogger("dump_hubert_feature") class HubertFeatureReader(object): def __init__(self, ckpt_path, layer, max_chunk=1600000): ( model, cfg, task, ) = fairseq.checkpoint_utils.load_model_ensemble_and_task([ckpt_path]) self.model = model[0].eval().cuda() self.task = task self.layer = layer self.max_chunk = max_chunk logger.info(f"TASK CONFIG:\n{self.task.cfg}") logger.info(f" max_chunk = {self.max_chunk}") def read_audio(self, path, ref_len=None): wav, sr = sf.read(path) assert sr == self.task.cfg.sample_rate, sr if wav.ndim == 2: wav = wav.mean(-1) assert wav.ndim == 1, wav.ndim if ref_len is not None and abs(ref_len - len(wav)) > 160: logging.warning(f"ref {ref_len} != read {len(wav)} ({path})") return wav def get_feats(self, path, ref_len=None): x = self.read_audio(path, ref_len) with torch.no_grad(): x = torch.from_numpy(x).float().cuda() if self.task.cfg.normalize: x = F.layer_norm(x, x.shape) x = x.view(1, -1) feat = [] for start in range(0, x.size(1), self.max_chunk): x_chunk = x[:, start: start + self.max_chunk] feat_chunk, _ = self.model.extract_features( source=x_chunk, padding_mask=None, mask=False, output_layer=self.layer, ) feat.append(feat_chunk) return torch.cat(feat, 1).squeeze(0) def main(tsv_dir, split, ckpt_path, layer, nshard, rank, feat_dir, max_chunk): reader = HubertFeatureReader(ckpt_path, layer, max_chunk) generator, num = get_path_iterator(f"{tsv_dir}/{split}.tsv", nshard, rank) dump_feature(reader, generator, num, split, nshard, rank, feat_dir) if __name__ == "__main__": import argparse parser = argparse.ArgumentParser() parser.add_argument("tsv_dir") parser.add_argument("split") parser.add_argument("ckpt_path") parser.add_argument("layer", type=int) parser.add_argument("nshard", type=int) parser.add_argument("rank", type=int) parser.add_argument("feat_dir") parser.add_argument("--max_chunk", type=int, default=1600000) args = parser.parse_args() logger.info(args) main(**vars(args))
3,018
31.117021
78
py
sign-topic
sign-topic-main/examples/speech_to_text/prep_covost_data.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import logging from pathlib import Path import shutil from tempfile import NamedTemporaryFile from typing import Optional, Tuple import pandas as pd import torchaudio from examples.speech_to_text.data_utils import ( create_zip, extract_fbank_features, filter_manifest_df, gen_config_yaml, gen_vocab, get_zip_manifest, load_df_from_tsv, save_df_to_tsv, ) from torch import Tensor from torch.utils.data import Dataset from torchaudio.datasets.utils import download_url, extract_archive from tqdm import tqdm log = logging.getLogger(__name__) MANIFEST_COLUMNS = ["id", "audio", "n_frames", "tgt_text", "speaker"] class CoVoST(Dataset): """Create a Dataset for CoVoST (https://github.com/facebookresearch/covost). Args: root (str): root path to the dataset and generated manifests/features source_language (str): source (audio) language target_language (str, optional): target (text) language, None for no translation (default: None) version (int, optional): CoVoST version. (default: 2) download (bool, optional): Whether to download the dataset if it is not found at root path. (default: ``False``). """ COVOST_URL_TEMPLATE = ( "https://dl.fbaipublicfiles.com/covost/" "covost_v2.{src_lang}_{tgt_lang}.tsv.tar.gz" ) VERSIONS = {2} SPLITS = ["train", "dev", "test"] XX_EN_LANGUAGES = { 1: ["fr", "de", "nl", "ru", "es", "it", "tr", "fa", "sv-SE", "mn", "zh-CN"], 2: [ "fr", "de", "es", "ca", "it", "ru", "zh-CN", "pt", "fa", "et", "mn", "nl", "tr", "ar", "sv-SE", "lv", "sl", "ta", "ja", "id", "cy", ], } EN_XX_LANGUAGES = { 1: [], 2: [ "de", "tr", "fa", "sv-SE", "mn", "zh-CN", "cy", "ca", "sl", "et", "id", "ar", "ta", "lv", "ja", ], } def __init__( self, root: str, split: str, source_language: str, target_language: Optional[str] = None, version: int = 2, ) -> None: assert version in self.VERSIONS and split in self.SPLITS assert source_language is not None self.no_translation = target_language is None if not self.no_translation: assert "en" in {source_language, target_language} if source_language == "en": assert target_language in self.EN_XX_LANGUAGES[version] else: assert source_language in self.XX_EN_LANGUAGES[version] else: # Hack here so that we can get "split" column from CoVoST TSV. # Note that we use CoVoST train split for ASR which is an extension # to Common Voice train split. target_language = "de" if source_language == "en" else "en" self.root: Path = Path(root) cv_tsv_path = self.root / "validated.tsv" assert cv_tsv_path.is_file() covost_url = self.COVOST_URL_TEMPLATE.format( src_lang=source_language, tgt_lang=target_language ) covost_archive = self.root / Path(covost_url).name if not covost_archive.is_file(): download_url(covost_url, self.root.as_posix(), hash_value=None) extract_archive(covost_archive.as_posix()) cv_tsv = load_df_from_tsv(cv_tsv_path) covost_tsv = load_df_from_tsv( self.root / Path(covost_url).name.replace(".tar.gz", "") ) df = pd.merge( left=cv_tsv[["path", "sentence", "client_id"]], right=covost_tsv[["path", "translation", "split"]], how="inner", on="path", ) if split == "train": df = df[(df["split"] == split) | (df["split"] == f"{split}_covost")] else: df = df[df["split"] == split] data = df.to_dict(orient="index").items() data = [v for k, v in sorted(data, key=lambda x: x[0])] self.data = [] for e in data: try: path = self.root / "clips" / e["path"] _ = torchaudio.info(path.as_posix()) self.data.append(e) except RuntimeError: pass def __getitem__( self, n: int ) -> Tuple[Tensor, int, str, str, Optional[str], str, str]: """Load the n-th sample from the dataset. Args: n (int): The index of the sample to be loaded Returns: tuple: ``(waveform, sample_rate, sentence, translation, speaker_id, sample_id)`` """ data = self.data[n] path = self.root / "clips" / data["path"] waveform, sample_rate = torchaudio.load(path) sentence = data["sentence"] translation = None if self.no_translation else data["translation"] speaker_id = data["client_id"] _id = data["path"].replace(".mp3", "") return waveform, sample_rate, sentence, translation, speaker_id, _id def __len__(self) -> int: return len(self.data) def process(args): root = Path(args.data_root).absolute() / args.src_lang if not root.is_dir(): raise NotADirectoryError(f"{root} does not exist") # Extract features feature_root = root / "fbank80" feature_root.mkdir(exist_ok=True) for split in CoVoST.SPLITS: print(f"Fetching split {split}...") dataset = CoVoST(root, split, args.src_lang, args.tgt_lang) print("Extracting log mel filter bank features...") for waveform, sample_rate, _, _, _, utt_id in tqdm(dataset): extract_fbank_features( waveform, sample_rate, feature_root / f"{utt_id}.npy" ) # Pack features into ZIP zip_path = root / "fbank80.zip" print("ZIPing features...") create_zip(feature_root, zip_path) print("Fetching ZIP manifest...") audio_paths, audio_lengths = get_zip_manifest(zip_path) # Generate TSV manifest print("Generating manifest...") train_text = [] task = f"asr_{args.src_lang}" if args.tgt_lang is not None: task = f"st_{args.src_lang}_{args.tgt_lang}" for split in CoVoST.SPLITS: manifest = {c: [] for c in MANIFEST_COLUMNS} dataset = CoVoST(root, split, args.src_lang, args.tgt_lang) for _, _, src_utt, tgt_utt, speaker_id, utt_id in tqdm(dataset): manifest["id"].append(utt_id) manifest["audio"].append(audio_paths[utt_id]) manifest["n_frames"].append(audio_lengths[utt_id]) manifest["tgt_text"].append(src_utt if args.tgt_lang is None else tgt_utt) manifest["speaker"].append(speaker_id) is_train_split = split.startswith("train") if is_train_split: train_text.extend(manifest["tgt_text"]) df = pd.DataFrame.from_dict(manifest) df = filter_manifest_df(df, is_train_split=is_train_split) save_df_to_tsv(df, root / f"{split}_{task}.tsv") # Generate vocab vocab_size_str = "" if args.vocab_type == "char" else str(args.vocab_size) spm_filename_prefix = f"spm_{args.vocab_type}{vocab_size_str}_{task}" with NamedTemporaryFile(mode="w") as f: for t in train_text: f.write(t + "\n") gen_vocab( Path(f.name), root / spm_filename_prefix, args.vocab_type, args.vocab_size ) # Generate config YAML gen_config_yaml( root, spm_filename=spm_filename_prefix + ".model", yaml_filename=f"config_{task}.yaml", specaugment_policy="lb", ) # Clean up shutil.rmtree(feature_root) def main(): parser = argparse.ArgumentParser() parser.add_argument( "--data-root", "-d", required=True, type=str, help="data root with sub-folders for each language <root>/<src_lang>" ) parser.add_argument( "--vocab-type", default="unigram", required=True, type=str, choices=["bpe", "unigram", "char"], ), parser.add_argument("--vocab-size", default=1000, type=int) parser.add_argument("--src-lang", "-s", required=True, type=str) parser.add_argument("--tgt-lang", "-t", type=str) args = parser.parse_args() process(args) if __name__ == "__main__": main()
8,909
30.821429
86
py
sign-topic
sign-topic-main/examples/speech_to_text/prep_mtedx_data.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import logging import os from pathlib import Path import shutil from itertools import groupby from tempfile import NamedTemporaryFile from typing import Tuple import pandas as pd import soundfile as sf from examples.speech_to_text.data_utils import ( create_zip, extract_fbank_features, filter_manifest_df, gen_config_yaml, gen_vocab, get_zip_manifest, load_df_from_tsv, save_df_to_tsv, ) import torch from torch.utils.data import Dataset from tqdm import tqdm from fairseq.data.audio.audio_utils import get_waveform, convert_waveform log = logging.getLogger(__name__) MANIFEST_COLUMNS = [ "id", "audio", "n_frames", "tgt_text", "speaker", "tgt_lang" ] class mTEDx(Dataset): """ Create a Dataset for Multilingual TEDx. Each item is a tuple of the form: waveform, sample_rate, source utterance, target utterance, speaker_id, utterance_id """ SPLITS = ["train", "valid", "test"] LANGPAIRS = ["es-es", "fr-fr", "pt-pt", "it-it", "ru-ru", "el-el", "ar-ar", "de-de", "es-en", "es-fr", "es-pt", "es-it", "fr-en", "fr-es", "fr-pt", "pt-en", "pt-es", "it-en", "it-es", "ru-en", "el-en"] def __init__(self, root: str, lang: str, split: str) -> None: assert split in self.SPLITS and lang in self.LANGPAIRS _root = Path(root) / f"{lang}" / "data" / split wav_root, txt_root = _root / "wav", _root / "txt" assert _root.is_dir() and wav_root.is_dir() and txt_root.is_dir() # Load audio segments try: import yaml except ImportError: print( "Please install PyYAML to load the Multilingual TEDx YAML files" ) with open(txt_root / f"{split}.yaml") as f: segments = yaml.load(f, Loader=yaml.BaseLoader) # Load source and target utterances src, tgt = lang.split("-") for _lang in [src, tgt]: with open(txt_root / f"{split}.{_lang}") as f: utterances = [r.strip() for r in f] assert len(segments) == len(utterances) for i, u in enumerate(utterances): segments[i][_lang] = u # Gather info self.data = [] for wav_filename, _seg_group in groupby(segments, lambda x: x["wav"]): wav_filename = wav_filename.replace(".wav", ".flac") wav_path = wav_root / wav_filename sample_rate = sf.info(wav_path.as_posix()).samplerate seg_group = sorted(_seg_group, key=lambda x: float(x["offset"])) for i, segment in enumerate(seg_group): offset = int(float(segment["offset"]) * sample_rate) n_frames = int(float(segment["duration"]) * sample_rate) _id = f"{wav_path.stem}_{i}" self.data.append( ( wav_path.as_posix(), offset, n_frames, sample_rate, segment[src], segment[tgt], segment["speaker_id"], tgt, _id, ) ) def __getitem__( self, n: int ) -> Tuple[torch.Tensor, int, str, str, str, str, str]: wav_path, offset, n_frames, sr, src_utt, tgt_utt, spk_id, tgt_lang, \ utt_id = self.data[n] waveform, _ = get_waveform(wav_path, frames=n_frames, start=offset) waveform = torch.from_numpy(waveform) return waveform, sr, src_utt, tgt_utt, spk_id, tgt_lang, utt_id def __len__(self) -> int: return len(self.data) def process(args): root = Path(args.data_root).absolute() for lang in mTEDx.LANGPAIRS: cur_root = root / f"{lang}" if not cur_root.is_dir(): print(f"{cur_root.as_posix()} does not exist. Skipped.") continue # Extract features audio_root = cur_root / ("flac" if args.use_audio_input else "fbank80") audio_root.mkdir(exist_ok=True) for split in mTEDx.SPLITS: print(f"Fetching split {split}...") dataset = mTEDx(root.as_posix(), lang, split) if args.use_audio_input: print("Converting audios...") for waveform, sample_rate, _, _, _, utt_id in tqdm(dataset): tgt_sample_rate = 16_000 _wavform, _ = convert_waveform( waveform, sample_rate, to_mono=True, to_sample_rate=tgt_sample_rate ) sf.write( (audio_root / f"{utt_id}.flac").as_posix(), _wavform.numpy(), tgt_sample_rate ) else: print("Extracting log mel filter bank features...") for waveform, sample_rate, _, _, _, _, utt_id in tqdm(dataset): extract_fbank_features( waveform, sample_rate, audio_root / f"{utt_id}.npy" ) # Pack features into ZIP zip_path = cur_root / f"{audio_root.name}.zip" print("ZIPing audios/features...") create_zip(audio_root, zip_path) print("Fetching ZIP manifest...") audio_paths, audio_lengths = get_zip_manifest(zip_path) # Generate TSV manifest print("Generating manifest...") train_text = [] for split in mTEDx.SPLITS: is_train_split = split.startswith("train") manifest = {c: [] for c in MANIFEST_COLUMNS} ds = mTEDx(args.data_root, lang, split) for _, _, src_utt, tgt_utt, spk_id, tgt_lang, utt_id in tqdm(ds): manifest["id"].append(utt_id) manifest["audio"].append(audio_paths[utt_id]) manifest["n_frames"].append(audio_lengths[utt_id]) manifest["tgt_text"].append( src_utt if args.task == "asr" else tgt_utt ) manifest["speaker"].append(spk_id) manifest["tgt_lang"].append(tgt_lang) if is_train_split: train_text.extend(manifest["tgt_text"]) df = pd.DataFrame.from_dict(manifest) df = filter_manifest_df(df, is_train_split=is_train_split) save_df_to_tsv(df, cur_root / f"{split}_{args.task}.tsv") # Generate vocab v_size_str = "" if args.vocab_type == "char" else str(args.vocab_size) spm_filename_prefix = f"spm_{args.vocab_type}{v_size_str}_{args.task}" with NamedTemporaryFile(mode="w") as f: for t in train_text: f.write(t + "\n") gen_vocab( Path(f.name), cur_root / spm_filename_prefix, args.vocab_type, args.vocab_size, ) # Generate config YAML if args.use_audio_input: gen_config_yaml( cur_root, spm_filename=spm_filename_prefix + ".model", yaml_filename=f"config_{args.task}.yaml", specaugment_policy=None, extra={"use_audio_input": True} ) else: gen_config_yaml( cur_root, spm_filename=spm_filename_prefix + ".model", yaml_filename=f"config_{args.task}.yaml", specaugment_policy="lb", ) # Clean up shutil.rmtree(audio_root) def process_joint(args): cur_root = Path(args.data_root) assert all((cur_root / f"{lang}").is_dir() for lang in mTEDx.LANGPAIRS), \ "do not have downloaded data available for all languages" # Generate vocab vocab_size_str = "" if args.vocab_type == "char" else str(args.vocab_size) spm_filename_prefix = f"spm_{args.vocab_type}{vocab_size_str}_{args.task}" with NamedTemporaryFile(mode="w") as f: for lang in mTEDx.LANGPAIRS: tsv_path = cur_root / f"{lang}" / f"train_{args.task}.tsv" df = load_df_from_tsv(tsv_path) for t in df["tgt_text"]: f.write(t + "\n") special_symbols = None if args.joint: # Add tgt_lang tags to dict special_symbols = list( {f'<lang:{lang.split("-")[1]}>' for lang in mTEDx.LANGPAIRS} ) gen_vocab( Path(f.name), cur_root / spm_filename_prefix, args.vocab_type, args.vocab_size, special_symbols=special_symbols ) # Generate config YAML gen_config_yaml( cur_root, spm_filename=spm_filename_prefix + ".model", yaml_filename=f"config_{args.task}.yaml", specaugment_policy="ld", prepend_tgt_lang_tag=(args.joint), ) # Make symbolic links to manifests for lang in mTEDx.LANGPAIRS: for split in mTEDx.SPLITS: src_path = cur_root / f"{lang}" / f"{split}_{args.task}.tsv" desc_path = cur_root / f"{split}_{lang}_{args.task}.tsv" if not desc_path.is_symlink(): os.symlink(src_path, desc_path) def main(): parser = argparse.ArgumentParser() parser.add_argument("--data-root", "-d", required=True, type=str) parser.add_argument( "--vocab-type", default="unigram", required=True, type=str, choices=["bpe", "unigram", "char"], ), parser.add_argument("--vocab-size", default=8000, type=int) parser.add_argument("--task", type=str, choices=["asr", "st"]) parser.add_argument("--joint", action="store_true", help="") parser.add_argument("--use-audio-input", action="store_true") args = parser.parse_args() if args.joint: process_joint(args) else: process(args) if __name__ == "__main__": main()
10,168
36.386029
80
py
sign-topic
sign-topic-main/examples/speech_to_text/prep_librispeech_data.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import logging from pathlib import Path import shutil from tempfile import NamedTemporaryFile import pandas as pd from examples.speech_to_text.data_utils import ( create_zip, extract_fbank_features, gen_config_yaml, gen_vocab, get_zip_manifest, save_df_to_tsv, ) from torchaudio.datasets import LIBRISPEECH from tqdm import tqdm log = logging.getLogger(__name__) SPLITS = [ "train-clean-100", "train-clean-360", "train-other-500", "dev-clean", "dev-other", "test-clean", "test-other", ] MANIFEST_COLUMNS = ["id", "audio", "n_frames", "tgt_text", "speaker"] def process(args): out_root = Path(args.output_root).absolute() out_root.mkdir(exist_ok=True) # Extract features feature_root = out_root / "fbank80" feature_root.mkdir(exist_ok=True) for split in SPLITS: print(f"Fetching split {split}...") dataset = LIBRISPEECH(out_root.as_posix(), url=split, download=True) print("Extracting log mel filter bank features...") for wav, sample_rate, _, spk_id, chapter_no, utt_no in tqdm(dataset): sample_id = f"{spk_id}-{chapter_no}-{utt_no}" extract_fbank_features( wav, sample_rate, feature_root / f"{sample_id}.npy" ) # Pack features into ZIP zip_path = out_root / "fbank80.zip" print("ZIPing features...") create_zip(feature_root, zip_path) print("Fetching ZIP manifest...") audio_paths, audio_lengths = get_zip_manifest(zip_path) # Generate TSV manifest print("Generating manifest...") train_text = [] for split in SPLITS: manifest = {c: [] for c in MANIFEST_COLUMNS} dataset = LIBRISPEECH(out_root.as_posix(), url=split) for _, _, utt, spk_id, chapter_no, utt_no in tqdm(dataset): sample_id = f"{spk_id}-{chapter_no}-{utt_no}" manifest["id"].append(sample_id) manifest["audio"].append(audio_paths[sample_id]) manifest["n_frames"].append(audio_lengths[sample_id]) manifest["tgt_text"].append(utt.lower()) manifest["speaker"].append(spk_id) save_df_to_tsv( pd.DataFrame.from_dict(manifest), out_root / f"{split}.tsv" ) if split.startswith("train"): train_text.extend(manifest["tgt_text"]) # Generate vocab vocab_size = "" if args.vocab_type == "char" else str(args.vocab_size) spm_filename_prefix = f"spm_{args.vocab_type}{vocab_size}" with NamedTemporaryFile(mode="w") as f: for t in train_text: f.write(t + "\n") gen_vocab( Path(f.name), out_root / spm_filename_prefix, args.vocab_type, args.vocab_size, ) # Generate config YAML gen_config_yaml( out_root, spm_filename=spm_filename_prefix + ".model", specaugment_policy="ld" ) # Clean up shutil.rmtree(feature_root) def main(): parser = argparse.ArgumentParser() parser.add_argument("--output-root", "-o", required=True, type=str) parser.add_argument( "--vocab-type", default="unigram", required=True, type=str, choices=["bpe", "unigram", "char"], ), parser.add_argument("--vocab-size", default=10000, type=int) args = parser.parse_args() process(args) if __name__ == "__main__": main()
3,623
29.2
77
py
sign-topic
sign-topic-main/examples/speech_to_text/data_utils.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import csv from pathlib import Path import zipfile from functools import reduce from multiprocessing import cpu_count from typing import Any, Dict, List, Optional, Union import io import numpy as np import pandas as pd import sentencepiece as sp from fairseq.data.audio.audio_utils import ( convert_waveform, _get_kaldi_fbank, _get_torchaudio_fbank, is_npy_data, is_sf_audio_data ) import torch import soundfile as sf from tqdm import tqdm UNK_TOKEN, UNK_TOKEN_ID = "<unk>", 3 BOS_TOKEN, BOS_TOKEN_ID = "<s>", 0 EOS_TOKEN, EOS_TOKEN_ID = "</s>", 2 PAD_TOKEN, PAD_TOKEN_ID = "<pad>", 1 def gen_vocab( input_path: Path, output_path_prefix: Path, model_type="bpe", vocab_size=1000, special_symbols: Optional[List[str]] = None ): # Train SentencePiece Model arguments = [ f"--input={input_path.as_posix()}", f"--model_prefix={output_path_prefix.as_posix()}", f"--model_type={model_type}", f"--vocab_size={vocab_size}", "--character_coverage=1.0", f"--num_threads={cpu_count()}", f"--unk_id={UNK_TOKEN_ID}", f"--bos_id={BOS_TOKEN_ID}", f"--eos_id={EOS_TOKEN_ID}", f"--pad_id={PAD_TOKEN_ID}", ] if special_symbols is not None: _special_symbols = ",".join(special_symbols) arguments.append(f"--user_defined_symbols={_special_symbols}") sp.SentencePieceTrainer.Train(" ".join(arguments)) # Export fairseq dictionary spm = sp.SentencePieceProcessor() spm.Load(output_path_prefix.as_posix() + ".model") vocab = {i: spm.IdToPiece(i) for i in range(spm.GetPieceSize())} assert ( vocab.get(UNK_TOKEN_ID) == UNK_TOKEN and vocab.get(PAD_TOKEN_ID) == PAD_TOKEN and vocab.get(BOS_TOKEN_ID) == BOS_TOKEN and vocab.get(EOS_TOKEN_ID) == EOS_TOKEN ) vocab = { i: s for i, s in vocab.items() if s not in {UNK_TOKEN, BOS_TOKEN, EOS_TOKEN, PAD_TOKEN} } with open(output_path_prefix.as_posix() + ".txt", "w") as f_out: for _, s in sorted(vocab.items(), key=lambda x: x[0]): f_out.write(f"{s} 1\n") def extract_fbank_features( waveform: torch.FloatTensor, sample_rate: int, output_path: Optional[Path] = None, n_mel_bins: int = 80, overwrite: bool = False, ): if output_path is not None and output_path.is_file() and not overwrite: return _waveform, _ = convert_waveform(waveform, sample_rate, to_mono=True) # Kaldi compliance: 16-bit signed integers _waveform = _waveform * (2 ** 15) _waveform = _waveform[0].numpy() features = _get_kaldi_fbank(_waveform, sample_rate, n_mel_bins) if features is None: features = _get_torchaudio_fbank(_waveform, sample_rate, n_mel_bins) if features is None: raise ImportError( "Please install pyKaldi or torchaudio to enable fbank feature extraction" ) if output_path is not None: np.save(output_path.as_posix(), features) return features def create_zip(data_root: Path, zip_path: Path): paths = list(data_root.glob("*.npy")) paths.extend(data_root.glob("*.flac")) with zipfile.ZipFile(zip_path, "w", zipfile.ZIP_STORED) as f: for path in tqdm(paths): f.write(path, arcname=path.name) def get_zip_manifest( zip_path: Path, zip_root: Optional[Path] = None, is_audio=False ): _zip_path = Path.joinpath(zip_root or Path(""), zip_path) with zipfile.ZipFile(_zip_path, mode="r") as f: info = f.infolist() paths, lengths = {}, {} for i in tqdm(info): utt_id = Path(i.filename).stem offset, file_size = i.header_offset + 30 + len(i.filename), i.file_size paths[utt_id] = f"{zip_path.as_posix()}:{offset}:{file_size}" with open(_zip_path, "rb") as f: f.seek(offset) byte_data = f.read(file_size) assert len(byte_data) > 1 if is_audio: assert is_sf_audio_data(byte_data), i else: assert is_npy_data(byte_data), i byte_data_fp = io.BytesIO(byte_data) if is_audio: lengths[utt_id] = sf.info(byte_data_fp).frames else: lengths[utt_id] = np.load(byte_data_fp).shape[0] return paths, lengths def gen_config_yaml( manifest_root: Path, spm_filename: Optional[str] = None, vocab_name: Optional[str] = None, yaml_filename: str = "config.yaml", specaugment_policy: Optional[str] = "lb", prepend_tgt_lang_tag: bool = False, sampling_alpha: Optional[float] = None, input_channels: Optional[int] = 1, input_feat_per_channel: Optional[int] = 80, audio_root: str = "", cmvn_type: str = "utterance", gcmvn_path: Optional[Path] = None, extra=None ): manifest_root = manifest_root.absolute() writer = S2TDataConfigWriter(manifest_root / yaml_filename) assert spm_filename is not None or vocab_name is not None vocab_name = spm_filename.replace(".model", ".txt") if vocab_name is None \ else vocab_name writer.set_vocab_filename(vocab_name) if input_channels is not None: writer.set_input_channels(input_channels) if input_feat_per_channel is not None: writer.set_input_feat_per_channel(input_feat_per_channel) specaugment_setters = { "lb": writer.set_specaugment_lb_policy, "ld": writer.set_specaugment_ld_policy, "sm": writer.set_specaugment_sm_policy, "ss": writer.set_specaugment_ss_policy, } specaugment_setter = specaugment_setters.get(specaugment_policy, None) if specaugment_setter is not None: specaugment_setter() if spm_filename is not None: writer.set_bpe_tokenizer( { "bpe": "sentencepiece", "sentencepiece_model": (manifest_root / spm_filename).as_posix(), } ) if prepend_tgt_lang_tag: writer.set_prepend_tgt_lang_tag(True) if sampling_alpha is not None: writer.set_sampling_alpha(sampling_alpha) if cmvn_type not in ["global", "utterance"]: raise NotImplementedError if specaugment_policy is not None: writer.set_feature_transforms( "_train", [f"{cmvn_type}_cmvn", "specaugment"] ) writer.set_feature_transforms("*", [f"{cmvn_type}_cmvn"]) if cmvn_type == "global": if gcmvn_path is None: raise ValueError("Please provide path of global cmvn file.") else: writer.set_global_cmvn(gcmvn_path.as_posix()) if len(audio_root) > 0: writer.set_audio_root(audio_root) if extra is not None: writer.set_extra(extra) writer.flush() def load_df_from_tsv(path: Union[str, Path]) -> pd.DataFrame: _path = path if isinstance(path, str) else path.as_posix() return pd.read_csv( _path, sep="\t", header=0, encoding="utf-8", escapechar="\\", quoting=csv.QUOTE_NONE, na_filter=False, ) def save_df_to_tsv(dataframe, path: Union[str, Path]): _path = path if isinstance(path, str) else path.as_posix() dataframe.to_csv( _path, sep="\t", header=True, index=False, encoding="utf-8", escapechar="\\", quoting=csv.QUOTE_NONE, ) def load_tsv_to_dicts(path: Union[str, Path]) -> List[dict]: with open(path, "r") as f: reader = csv.DictReader( f, delimiter="\t", quotechar=None, doublequote=False, lineterminator="\n", quoting=csv.QUOTE_NONE, ) rows = [dict(e) for e in reader] return rows def filter_manifest_df( df, is_train_split=False, extra_filters=None, min_n_frames=5, max_n_frames=3000 ): filters = { "no speech": df["audio"] == "", f"short speech (<{min_n_frames} frames)": df["n_frames"] < min_n_frames, "empty sentence": df["tgt_text"] == "", } if is_train_split: filters[f"long speech (>{max_n_frames} frames)"] = df["n_frames"] > max_n_frames if extra_filters is not None: filters.update(extra_filters) invalid = reduce(lambda x, y: x | y, filters.values()) valid = ~invalid print( "| " + ", ".join(f"{n}: {f.sum()}" for n, f in filters.items()) + f", total {invalid.sum()} filtered, {valid.sum()} remained." ) return df[valid] def cal_gcmvn_stats(features_list): features = np.concatenate(features_list) square_sums = (features ** 2).sum(axis=0) mean = features.mean(axis=0) features = np.subtract(features, mean) var = square_sums / features.shape[0] - mean ** 2 std = np.sqrt(np.maximum(var, 1e-8)) return {"mean": mean.astype("float32"), "std": std.astype("float32")} class S2TDataConfigWriter(object): DEFAULT_VOCAB_FILENAME = "dict.txt" DEFAULT_INPUT_FEAT_PER_CHANNEL = 80 DEFAULT_INPUT_CHANNELS = 1 def __init__(self, yaml_path: Path): try: import yaml except ImportError: print("Please install PyYAML for S2T data config YAML files") self.yaml = yaml self.yaml_path = yaml_path self.config = {} def flush(self): with open(self.yaml_path, "w") as f: self.yaml.dump(self.config, f) def set_audio_root(self, audio_root=""): self.config["audio_root"] = audio_root def set_vocab_filename(self, vocab_filename: str = "dict.txt"): self.config["vocab_filename"] = vocab_filename def set_specaugment( self, time_wrap_w: int, freq_mask_n: int, freq_mask_f: int, time_mask_n: int, time_mask_t: int, time_mask_p: float, ): self.config["specaugment"] = { "time_wrap_W": time_wrap_w, "freq_mask_N": freq_mask_n, "freq_mask_F": freq_mask_f, "time_mask_N": time_mask_n, "time_mask_T": time_mask_t, "time_mask_p": time_mask_p, } def set_specaugment_lb_policy(self): self.set_specaugment( time_wrap_w=0, freq_mask_n=1, freq_mask_f=27, time_mask_n=1, time_mask_t=100, time_mask_p=1.0, ) def set_specaugment_ld_policy(self): self.set_specaugment( time_wrap_w=0, freq_mask_n=2, freq_mask_f=27, time_mask_n=2, time_mask_t=100, time_mask_p=1.0, ) def set_specaugment_sm_policy(self): self.set_specaugment( time_wrap_w=0, freq_mask_n=2, freq_mask_f=15, time_mask_n=2, time_mask_t=70, time_mask_p=0.2, ) def set_specaugment_ss_policy(self): self.set_specaugment( time_wrap_w=0, freq_mask_n=2, freq_mask_f=27, time_mask_n=2, time_mask_t=70, time_mask_p=0.2, ) def set_input_channels(self, input_channels: int = 1): self.config["input_channels"] = input_channels def set_input_feat_per_channel(self, input_feat_per_channel: int = 80): self.config["input_feat_per_channel"] = input_feat_per_channel def set_bpe_tokenizer(self, bpe_tokenizer: Dict[str, Any]): self.config["bpe_tokenizer"] = bpe_tokenizer def set_global_cmvn(self, stats_npz_path: str): self.config["global_cmvn"] = {"stats_npz_path": stats_npz_path} def set_feature_transforms(self, split: str, transforms: List[str]): if "transforms" not in self.config: self.config["transforms"] = {} self.config["transforms"][split] = transforms def set_prepend_tgt_lang_tag(self, flag: bool = True): self.config["prepend_tgt_lang_tag"] = flag def set_sampling_alpha(self, sampling_alpha: float = 1.0): self.config["sampling_alpha"] = sampling_alpha def set_extra(self, data): self.config.update(data)
12,275
30.96875
88
py
sign-topic
sign-topic-main/examples/speech_to_text/prep_mustc_data.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import logging import os from pathlib import Path import shutil from itertools import groupby from tempfile import NamedTemporaryFile from typing import Tuple import numpy as np import pandas as pd import soundfile as sf from examples.speech_to_text.data_utils import ( create_zip, extract_fbank_features, filter_manifest_df, gen_config_yaml, gen_vocab, get_zip_manifest, load_df_from_tsv, save_df_to_tsv, cal_gcmvn_stats, ) import torch from torch.utils.data import Dataset from tqdm import tqdm from fairseq.data.audio.audio_utils import get_waveform, convert_waveform log = logging.getLogger(__name__) MANIFEST_COLUMNS = ["id", "audio", "n_frames", "tgt_text", "speaker"] class MUSTC(Dataset): """ Create a Dataset for MuST-C. Each item is a tuple of the form: waveform, sample_rate, source utterance, target utterance, speaker_id, utterance_id """ SPLITS = ["train", "dev", "tst-COMMON", "tst-HE"] LANGUAGES = ["de", "es", "fr", "it", "nl", "pt", "ro", "ru"] def __init__(self, root: str, lang: str, split: str) -> None: assert split in self.SPLITS and lang in self.LANGUAGES _root = Path(root) / f"en-{lang}" / "data" / split wav_root, txt_root = _root / "wav", _root / "txt" assert _root.is_dir() and wav_root.is_dir() and txt_root.is_dir() # Load audio segments try: import yaml except ImportError: print("Please install PyYAML to load the MuST-C YAML files") with open(txt_root / f"{split}.yaml") as f: segments = yaml.load(f, Loader=yaml.BaseLoader) # Load source and target utterances for _lang in ["en", lang]: with open(txt_root / f"{split}.{_lang}") as f: utterances = [r.strip() for r in f] assert len(segments) == len(utterances) for i, u in enumerate(utterances): segments[i][_lang] = u # Gather info self.data = [] for wav_filename, _seg_group in groupby(segments, lambda x: x["wav"]): wav_path = wav_root / wav_filename sample_rate = sf.info(wav_path.as_posix()).samplerate seg_group = sorted(_seg_group, key=lambda x: x["offset"]) for i, segment in enumerate(seg_group): offset = int(float(segment["offset"]) * sample_rate) n_frames = int(float(segment["duration"]) * sample_rate) _id = f"{wav_path.stem}_{i}" self.data.append( ( wav_path.as_posix(), offset, n_frames, sample_rate, segment["en"], segment[lang], segment["speaker_id"], _id, ) ) def __getitem__( self, n: int ) -> Tuple[torch.Tensor, int, str, str, str, str]: wav_path, offset, n_frames, sr, src_utt, tgt_utt, spk_id, \ utt_id = self.data[n] waveform, _ = get_waveform(wav_path, frames=n_frames, start=offset) waveform = torch.from_numpy(waveform) return waveform, sr, src_utt, tgt_utt, spk_id, utt_id def __len__(self) -> int: return len(self.data) def process(args): root = Path(args.data_root).absolute() for lang in MUSTC.LANGUAGES: cur_root = root / f"en-{lang}" if not cur_root.is_dir(): print(f"{cur_root.as_posix()} does not exist. Skipped.") continue # Extract features audio_root = cur_root / ("flac" if args.use_audio_input else "fbank80") audio_root.mkdir(exist_ok=True) for split in MUSTC.SPLITS: print(f"Fetching split {split}...") dataset = MUSTC(root.as_posix(), lang, split) if args.use_audio_input: print("Converting audios...") for waveform, sample_rate, _, _, _, utt_id in tqdm(dataset): tgt_sample_rate = 16_000 _wavform, _ = convert_waveform( waveform, sample_rate, to_mono=True, to_sample_rate=tgt_sample_rate ) sf.write( (audio_root / f"{utt_id}.flac").as_posix(), _wavform.T.numpy(), tgt_sample_rate ) else: print("Extracting log mel filter bank features...") gcmvn_feature_list = [] if split == 'train' and args.cmvn_type == "global": print("And estimating cepstral mean and variance stats...") for waveform, sample_rate, _, _, _, utt_id in tqdm(dataset): features = extract_fbank_features( waveform, sample_rate, audio_root / f"{utt_id}.npy" ) if split == 'train' and args.cmvn_type == "global": if len(gcmvn_feature_list) < args.gcmvn_max_num: gcmvn_feature_list.append(features) if split == 'train' and args.cmvn_type == "global": # Estimate and save cmv stats = cal_gcmvn_stats(gcmvn_feature_list) with open(cur_root / "gcmvn.npz", "wb") as f: np.savez(f, mean=stats["mean"], std=stats["std"]) # Pack features into ZIP zip_path = cur_root / f"{audio_root.name}.zip" print("ZIPing audios/features...") create_zip(audio_root, zip_path) print("Fetching ZIP manifest...") audio_paths, audio_lengths = get_zip_manifest( zip_path, is_audio=args.use_audio_input, ) # Generate TSV manifest print("Generating manifest...") train_text = [] for split in MUSTC.SPLITS: is_train_split = split.startswith("train") manifest = {c: [] for c in MANIFEST_COLUMNS} dataset = MUSTC(args.data_root, lang, split) for _, _, src_utt, tgt_utt, speaker_id, utt_id in tqdm(dataset): manifest["id"].append(utt_id) manifest["audio"].append(audio_paths[utt_id]) manifest["n_frames"].append(audio_lengths[utt_id]) manifest["tgt_text"].append( src_utt if args.task == "asr" else tgt_utt ) manifest["speaker"].append(speaker_id) if is_train_split: train_text.extend(manifest["tgt_text"]) df = pd.DataFrame.from_dict(manifest) df = filter_manifest_df(df, is_train_split=is_train_split) save_df_to_tsv(df, cur_root / f"{split}_{args.task}.tsv") # Generate vocab v_size_str = "" if args.vocab_type == "char" else str(args.vocab_size) spm_filename_prefix = f"spm_{args.vocab_type}{v_size_str}_{args.task}" with NamedTemporaryFile(mode="w") as f: for t in train_text: f.write(t + "\n") gen_vocab( Path(f.name), cur_root / spm_filename_prefix, args.vocab_type, args.vocab_size, ) # Generate config YAML if args.use_audio_input: gen_config_yaml( cur_root, spm_filename=spm_filename_prefix + ".model", yaml_filename=f"config_{args.task}.yaml", specaugment_policy=None, extra={"use_audio_input": True} ) else: gen_config_yaml( cur_root, spm_filename=spm_filename_prefix + ".model", yaml_filename=f"config_{args.task}.yaml", specaugment_policy="lb", cmvn_type=args.cmvn_type, gcmvn_path=( cur_root / "gcmvn.npz" if args.cmvn_type == "global" else None ), ) # Clean up shutil.rmtree(audio_root) def process_joint(args): cur_root = Path(args.data_root) assert all( (cur_root / f"en-{lang}").is_dir() for lang in MUSTC.LANGUAGES ), "do not have downloaded data available for all 8 languages" # Generate vocab vocab_size_str = "" if args.vocab_type == "char" else str(args.vocab_size) spm_filename_prefix = f"spm_{args.vocab_type}{vocab_size_str}_{args.task}" with NamedTemporaryFile(mode="w") as f: for lang in MUSTC.LANGUAGES: tsv_path = cur_root / f"en-{lang}" / f"train_{args.task}.tsv" df = load_df_from_tsv(tsv_path) for t in df["tgt_text"]: f.write(t + "\n") special_symbols = None if args.task == 'st': special_symbols = [f'<lang:{lang}>' for lang in MUSTC.LANGUAGES] gen_vocab( Path(f.name), cur_root / spm_filename_prefix, args.vocab_type, args.vocab_size, special_symbols=special_symbols ) # Generate config YAML gen_config_yaml( cur_root, spm_filename=spm_filename_prefix + ".model", yaml_filename=f"config_{args.task}.yaml", specaugment_policy="ld", prepend_tgt_lang_tag=(args.task == "st"), ) # Make symbolic links to manifests for lang in MUSTC.LANGUAGES: for split in MUSTC.SPLITS: src_path = cur_root / f"en-{lang}" / f"{split}_{args.task}.tsv" desc_path = cur_root / f"{split}_{lang}_{args.task}.tsv" if not desc_path.is_symlink(): os.symlink(src_path, desc_path) def main(): parser = argparse.ArgumentParser() parser.add_argument("--data-root", "-d", required=True, type=str) parser.add_argument( "--vocab-type", default="unigram", required=True, type=str, choices=["bpe", "unigram", "char"], ), parser.add_argument("--vocab-size", default=8000, type=int) parser.add_argument("--task", type=str, choices=["asr", "st"]) parser.add_argument("--joint", action="store_true", help="") parser.add_argument( "--cmvn-type", default="utterance", choices=["global", "utterance"], help="The type of cepstral mean and variance normalization" ) parser.add_argument( "--gcmvn-max-num", default=150000, type=int, help="Maximum number of sentences to use to estimate global mean and " "variance" ) parser.add_argument("--use-audio-input", action="store_true") args = parser.parse_args() if args.joint: process_joint(args) else: process(args) if __name__ == "__main__": main()
11,080
36.562712
79
py
sign-topic
sign-topic-main/examples/speech_to_text/simultaneous_translation/agents/fairseq_simul_st_agent.py
import math import os import json import numpy as np import torch import torchaudio.compliance.kaldi as kaldi import yaml from fairseq import checkpoint_utils, tasks from fairseq.file_io import PathManager try: from simuleval import READ_ACTION, WRITE_ACTION, DEFAULT_EOS from simuleval.agents import SpeechAgent from simuleval.states import ListEntry, SpeechStates except ImportError: print("Please install simuleval 'pip install simuleval'") SHIFT_SIZE = 10 WINDOW_SIZE = 25 SAMPLE_RATE = 16000 FEATURE_DIM = 80 BOW_PREFIX = "\u2581" class OnlineFeatureExtractor: """ Extract speech feature on the fly. """ def __init__(self, args): self.shift_size = args.shift_size self.window_size = args.window_size assert self.window_size >= self.shift_size self.sample_rate = args.sample_rate self.feature_dim = args.feature_dim self.num_samples_per_shift = int(self.shift_size * self.sample_rate / 1000) self.num_samples_per_window = int(self.window_size * self.sample_rate / 1000) self.len_ms_to_samples = lambda x: x * self.sample_rate / 1000 self.previous_residual_samples = [] self.global_cmvn = args.global_cmvn def clear_cache(self): self.previous_residual_samples = [] def __call__(self, new_samples): samples = self.previous_residual_samples + new_samples if len(samples) < self.num_samples_per_window: self.previous_residual_samples = samples return # num_frames is the number of frames from the new segment num_frames = math.floor( (len(samples) - self.len_ms_to_samples(self.window_size - self.shift_size)) / self.num_samples_per_shift ) # the number of frames used for feature extraction # including some part of thte previous segment effective_num_samples = int( num_frames * self.len_ms_to_samples(self.shift_size) + self.len_ms_to_samples(self.window_size - self.shift_size) ) input_samples = samples[:effective_num_samples] self.previous_residual_samples = samples[ num_frames * self.num_samples_per_shift: ] torch.manual_seed(1) output = kaldi.fbank( torch.FloatTensor(input_samples).unsqueeze(0), num_mel_bins=self.feature_dim, frame_length=self.window_size, frame_shift=self.shift_size, ).numpy() output = self.transform(output) return torch.from_numpy(output) def transform(self, input): if self.global_cmvn is None: return input mean = self.global_cmvn["mean"] std = self.global_cmvn["std"] x = np.subtract(input, mean) x = np.divide(x, std) return x class TensorListEntry(ListEntry): """ Data structure to store a list of tensor. """ def append(self, value): if len(self.value) == 0: self.value = value return self.value = torch.cat([self.value] + [value], dim=0) def info(self): return { "type": str(self.new_value_type), "length": self.__len__(), "value": "" if type(self.value) is list else self.value.size(), } class FairseqSimulSTAgent(SpeechAgent): speech_segment_size = 40 # in ms, 4 pooling ratio * 10 ms step size def __init__(self, args): super().__init__(args) self.eos = DEFAULT_EOS self.gpu = getattr(args, "gpu", False) self.args = args self.load_model_vocab(args) if getattr( self.model.decoder.layers[0].encoder_attn, 'pre_decision_ratio', None ) is not None: self.speech_segment_size *= ( self.model.decoder.layers[0].encoder_attn.pre_decision_ratio ) args.global_cmvn = None if args.config: with open(os.path.join(args.data_bin, args.config), "r") as f: config = yaml.load(f, Loader=yaml.BaseLoader) if "global_cmvn" in config: args.global_cmvn = np.load(config["global_cmvn"]["stats_npz_path"]) if args.global_stats: with PathManager.open(args.global_stats, "r") as f: global_cmvn = json.loads(f.read()) self.global_cmvn = {"mean": global_cmvn["mean"], "std": global_cmvn["stddev"]} self.feature_extractor = OnlineFeatureExtractor(args) self.max_len = args.max_len self.force_finish = args.force_finish torch.set_grad_enabled(False) def build_states(self, args, client, sentence_id): # Initialize states here, for example add customized entry to states # This function will be called at beginning of every new sentence states = SpeechStates(args, client, sentence_id, self) self.initialize_states(states) return states def to_device(self, tensor): if self.gpu: return tensor.cuda() else: return tensor.cpu() @staticmethod def add_args(parser): # fmt: off parser.add_argument('--model-path', type=str, required=True, help='path to your pretrained model.') parser.add_argument("--data-bin", type=str, required=True, help="Path of data binary") parser.add_argument("--config", type=str, default=None, help="Path to config yaml file") parser.add_argument("--global-stats", type=str, default=None, help="Path to json file containing cmvn stats") parser.add_argument("--tgt-splitter-type", type=str, default="SentencePiece", help="Subword splitter type for target text") parser.add_argument("--tgt-splitter-path", type=str, default=None, help="Subword splitter model path for target text") parser.add_argument("--user-dir", type=str, default="examples/simultaneous_translation", help="User directory for simultaneous translation") parser.add_argument("--max-len", type=int, default=200, help="Max length of translation") parser.add_argument("--force-finish", default=False, action="store_true", help="Force the model to finish the hypothsis if the source is not finished") parser.add_argument("--shift-size", type=int, default=SHIFT_SIZE, help="Shift size of feature extraction window.") parser.add_argument("--window-size", type=int, default=WINDOW_SIZE, help="Window size of feature extraction window.") parser.add_argument("--sample-rate", type=int, default=SAMPLE_RATE, help="Sample rate") parser.add_argument("--feature-dim", type=int, default=FEATURE_DIM, help="Acoustic feature dimension.") # fmt: on return parser def load_model_vocab(self, args): filename = args.model_path if not os.path.exists(filename): raise IOError("Model file not found: {}".format(filename)) state = checkpoint_utils.load_checkpoint_to_cpu(filename) task_args = state["cfg"]["task"] task_args.data = args.data_bin if args.config is not None: task_args.config_yaml = args.config task = tasks.setup_task(task_args) # build model for ensemble state["cfg"]["model"].load_pretrained_encoder_from = None state["cfg"]["model"].load_pretrained_decoder_from = None self.model = task.build_model(state["cfg"]["model"]) self.model.load_state_dict(state["model"], strict=True) self.model.eval() self.model.share_memory() if self.gpu: self.model.cuda() # Set dictionary self.dict = {} self.dict["tgt"] = task.target_dictionary def initialize_states(self, states): self.feature_extractor.clear_cache() states.units.source = TensorListEntry() states.units.target = ListEntry() states.incremental_states = dict() def segment_to_units(self, segment, states): # Convert speech samples to features features = self.feature_extractor(segment) if features is not None: return [features] else: return [] def units_to_segment(self, units, states): # Merge sub word to full word. if self.model.decoder.dictionary.eos() == units[0]: return DEFAULT_EOS segment = [] if None in units.value: units.value.remove(None) for index in units: if index is None: units.pop() token = self.model.decoder.dictionary.string([index]) if token.startswith(BOW_PREFIX): if len(segment) == 0: segment += [token.replace(BOW_PREFIX, "")] else: for j in range(len(segment)): units.pop() string_to_return = ["".join(segment)] if self.model.decoder.dictionary.eos() == units[0]: string_to_return += [DEFAULT_EOS] return string_to_return else: segment += [token.replace(BOW_PREFIX, "")] if ( len(units) > 0 and self.model.decoder.dictionary.eos() == units[-1] or len(states.units.target) > self.max_len ): tokens = [self.model.decoder.dictionary.string([unit]) for unit in units] return ["".join(tokens).replace(BOW_PREFIX, "")] + [DEFAULT_EOS] return None def update_model_encoder(self, states): if len(states.units.source) == 0: return src_indices = self.to_device( states.units.source.value.unsqueeze(0) ) src_lengths = self.to_device( torch.LongTensor([states.units.source.value.size(0)]) ) states.encoder_states = self.model.encoder(src_indices, src_lengths) torch.cuda.empty_cache() def update_states_read(self, states): # Happens after a read action. self.update_model_encoder(states) def policy(self, states): if not getattr(states, "encoder_states", None): return READ_ACTION tgt_indices = self.to_device( torch.LongTensor( [self.model.decoder.dictionary.eos()] + [x for x in states.units.target.value if x is not None] ).unsqueeze(0) ) states.incremental_states["steps"] = { "src": states.encoder_states["encoder_out"][0].size(0), "tgt": 1 + len(states.units.target), } states.incremental_states["online"] = {"only": torch.tensor(not states.finish_read())} x, outputs = self.model.decoder.forward( prev_output_tokens=tgt_indices, encoder_out=states.encoder_states, incremental_state=states.incremental_states, ) states.decoder_out = x states.decoder_out_extra = outputs torch.cuda.empty_cache() if outputs.action == 0: return READ_ACTION else: return WRITE_ACTION def predict(self, states): decoder_states = states.decoder_out lprobs = self.model.get_normalized_probs( [decoder_states[:, -1:]], log_probs=True ) index = lprobs.argmax(dim=-1) index = index[0, 0].item() if ( self.force_finish and index == self.model.decoder.dictionary.eos() and not states.finish_read() ): # If we want to force finish the translation # (don't stop before finish reading), return a None # self.model.decoder.clear_cache(states.incremental_states) index = None return index
12,193
32.5
105
py
sign-topic
sign-topic-main/examples/roberta/commonsense_qa/commonsense_qa_task.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import json import os import numpy as np import torch from fairseq.data import ( Dictionary, IdDataset, ListDataset, NestedDictionaryDataset, NumelDataset, NumSamplesDataset, RawLabelDataset, RightPadDataset, SortDataset, data_utils, encoders, ) from fairseq.tasks import LegacyFairseqTask, register_task @register_task("commonsense_qa") class CommonsenseQATask(LegacyFairseqTask): """Task to finetune RoBERTa for Commonsense QA.""" @staticmethod def add_args(parser): """Add task-specific arguments to the parser.""" parser.add_argument( "data", metavar="DIR", help="path to data directory; we load <split>.jsonl" ) parser.add_argument( "--init-token", type=int, default=None, help="add token at the beginning of each batch item", ) parser.add_argument("--num-classes", type=int, default=5) def __init__(self, args, vocab): super().__init__(args) self.vocab = vocab self.mask = vocab.add_symbol("<mask>") self.bpe = encoders.build_bpe(args) @classmethod def load_dictionary(cls, filename): """Load the dictionary from the filename Args: filename (str): the filename """ dictionary = Dictionary.load(filename) dictionary.add_symbol("<mask>") return dictionary @classmethod def setup_task(cls, args, **kwargs): assert ( args.criterion == "sentence_ranking" ), "Must set --criterion=sentence_ranking" # load data and label dictionaries vocab = cls.load_dictionary(os.path.join(args.data, "dict.txt")) print("| dictionary: {} types".format(len(vocab))) return cls(args, vocab) def load_dataset( self, split, epoch=1, combine=False, data_path=None, return_only=False, **kwargs ): """Load a given dataset split. Args: split (str): name of the split (e.g., train, valid, test) """ def binarize(s, append_bos=False): if self.bpe is not None: s = self.bpe.encode(s) tokens = self.vocab.encode_line( s, append_eos=True, add_if_not_exist=False, ).long() if append_bos and self.args.init_token is not None: tokens = torch.cat([tokens.new([self.args.init_token]), tokens]) return tokens if data_path is None: data_path = os.path.join(self.args.data, split + ".jsonl") if not os.path.exists(data_path): raise FileNotFoundError("Cannot find data: {}".format(data_path)) src_tokens = [[] for i in range(self.args.num_classes)] src_lengths = [[] for i in range(self.args.num_classes)] labels = [] with open(data_path) as h: for line in h: example = json.loads(line.strip()) if "answerKey" in example: label = ord(example["answerKey"]) - ord("A") labels.append(label) question = example["question"]["stem"] assert len(example["question"]["choices"]) == self.args.num_classes # format: `<s> Q: Where would I not want a fox? </s> A: hen house </s>` question = "Q: " + question question_toks = binarize(question, append_bos=True) for i, choice in enumerate(example["question"]["choices"]): src = "A: " + choice["text"] src_bin = torch.cat([question_toks, binarize(src)]) src_tokens[i].append(src_bin) src_lengths[i].append(len(src_bin)) assert all( len(src_tokens[0]) == len(src_tokens[i]) for i in range(self.args.num_classes) ) assert len(src_tokens[0]) == len(src_lengths[0]) assert len(labels) == 0 or len(labels) == len(src_tokens[0]) for i in range(self.args.num_classes): src_lengths[i] = np.array(src_lengths[i]) src_tokens[i] = ListDataset(src_tokens[i], src_lengths[i]) src_lengths[i] = ListDataset(src_lengths[i]) dataset = { "id": IdDataset(), "nsentences": NumSamplesDataset(), "ntokens": NumelDataset(src_tokens[0], reduce=True), } for i in range(self.args.num_classes): dataset.update( { "net_input{}".format(i + 1): { "src_tokens": RightPadDataset( src_tokens[i], pad_idx=self.source_dictionary.pad(), ), "src_lengths": src_lengths[i], } } ) if len(labels) > 0: dataset.update({"target": RawLabelDataset(labels)}) dataset = NestedDictionaryDataset( dataset, sizes=[np.maximum.reduce([src_token.sizes for src_token in src_tokens])], ) with data_utils.numpy_seed(self.args.seed): dataset = SortDataset( dataset, # shuffle sort_order=[np.random.permutation(len(dataset))], ) print("| Loaded {} with {} samples".format(split, len(dataset))) self.datasets[split] = dataset return self.datasets[split] def build_model(self, args): from fairseq import models model = models.build_model(args, self) model.register_classification_head( "sentence_classification_head", num_classes=1, ) return model @property def source_dictionary(self): return self.vocab @property def target_dictionary(self): return self.vocab
6,124
31.068063
88
py
sign-topic
sign-topic-main/examples/roberta/wsc/wsc_task.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import json import os import tempfile import numpy as np import torch import torch.nn.functional as F from fairseq import utils from fairseq.data import ( Dictionary, IdDataset, ListDataset, NestedDictionaryDataset, NumelDataset, NumSamplesDataset, PadDataset, SortDataset, data_utils, encoders, ) from fairseq.tasks import LegacyFairseqTask, register_task from . import wsc_utils @register_task("wsc") class WSCTask(LegacyFairseqTask): """Task to finetune RoBERTa for Winograd Schemas.""" @staticmethod def add_args(parser): """Add task-specific arguments to the parser.""" parser.add_argument( "data", metavar="DIR", help="path to data directory; we load <split>.jsonl" ) parser.add_argument( "--init-token", type=int, default=None, help="add token at the beginning of each batch item", ) def __init__(self, args, vocab): super().__init__(args) self.vocab = vocab self.mask = vocab.add_symbol("<mask>") self.bpe = encoders.build_bpe(args) self.tokenizer = encoders.build_tokenizer(args) # hack to handle GPT-2 BPE, which includes leading spaces if args.bpe == "gpt2": self.leading_space = True self.trailing_space = False else: self.leading_space = False self.trailing_space = True @classmethod def load_dictionary(cls, filename): """Load the dictionary from the filename Args: filename (str): the filename """ dictionary = Dictionary.load(filename) dictionary.add_symbol("<mask>") return dictionary @classmethod def setup_task(cls, args, **kwargs): assert args.criterion == "wsc", "Must set --criterion=wsc" # load data and label dictionaries vocab = cls.load_dictionary(os.path.join(args.data, "dict.txt")) print("| dictionary: {} types".format(len(vocab))) return cls(args, vocab) def binarize(self, s: str, append_eos: bool = False): if self.tokenizer is not None: s = self.tokenizer.encode(s) if self.bpe is not None: s = self.bpe.encode(s) tokens = self.vocab.encode_line( s, append_eos=append_eos, add_if_not_exist=False, ).long() if self.args.init_token is not None: tokens = torch.cat([tokens.new([self.args.init_token]), tokens]) return tokens def binarize_with_mask(self, txt, prefix, suffix, leading_space, trailing_space): toks = self.binarize( prefix + leading_space + txt + trailing_space + suffix, append_eos=True, ) mask = torch.zeros_like(toks, dtype=torch.bool) mask_start = len(self.binarize(prefix)) mask_size = len(self.binarize(leading_space + txt)) mask[mask_start : mask_start + mask_size] = 1 return toks, mask def load_dataset( self, split, epoch=1, combine=False, data_path=None, return_only=False, **kwargs ): """Load a given dataset split. Args: split (str): name of the split (e.g., train, valid, test) """ if data_path is None: data_path = os.path.join(self.args.data, split + ".jsonl") if not os.path.exists(data_path): raise FileNotFoundError("Cannot find data: {}".format(data_path)) query_tokens = [] query_masks = [] query_lengths = [] candidate_tokens = [] candidate_masks = [] candidate_lengths = [] labels = [] for sentence, pronoun_span, query, label in wsc_utils.jsonl_iterator(data_path): prefix = sentence[: pronoun_span.start].text suffix = sentence[pronoun_span.end :].text_with_ws # spaCy spans include trailing spaces, but we need to know about # leading spaces for the GPT-2 BPE leading_space = ( " " if sentence[: pronoun_span.start].text_with_ws.endswith(" ") else "" ) trailing_space = " " if pronoun_span.text_with_ws.endswith(" ") else "" # get noun phrases, excluding pronouns and anything overlapping with the query cand_spans = wsc_utils.filter_noun_chunks( wsc_utils.extended_noun_chunks(sentence), exclude_pronouns=True, exclude_query=query, exact_match=False, ) if query is not None: query_toks, query_mask = self.binarize_with_mask( query, prefix, suffix, leading_space, trailing_space ) query_len = len(query_toks) else: query_toks, query_mask, query_len = None, None, 0 query_tokens.append(query_toks) query_masks.append(query_mask) query_lengths.append(query_len) cand_toks, cand_masks = [], [] for cand_span in cand_spans: toks, mask = self.binarize_with_mask( cand_span.text, prefix, suffix, leading_space, trailing_space, ) cand_toks.append(toks) cand_masks.append(mask) # collate candidates cand_toks = data_utils.collate_tokens(cand_toks, pad_idx=self.vocab.pad()) cand_masks = data_utils.collate_tokens(cand_masks, pad_idx=0) assert cand_toks.size() == cand_masks.size() candidate_tokens.append(cand_toks) candidate_masks.append(cand_masks) candidate_lengths.append(cand_toks.size(1)) labels.append(label) query_lengths = np.array(query_lengths) query_tokens = ListDataset(query_tokens, query_lengths) query_masks = ListDataset(query_masks, query_lengths) candidate_lengths = np.array(candidate_lengths) candidate_tokens = ListDataset(candidate_tokens, candidate_lengths) candidate_masks = ListDataset(candidate_masks, candidate_lengths) labels = ListDataset(labels, [1] * len(labels)) dataset = { "id": IdDataset(), "query_tokens": query_tokens, "query_masks": query_masks, "candidate_tokens": candidate_tokens, "candidate_masks": candidate_masks, "labels": labels, "nsentences": NumSamplesDataset(), "ntokens": NumelDataset(query_tokens, reduce=True), } nested_dataset = NestedDictionaryDataset( dataset, sizes=[query_lengths], ) with data_utils.numpy_seed(self.args.seed): shuffle = np.random.permutation(len(query_tokens)) dataset = SortDataset( nested_dataset, # shuffle sort_order=[shuffle], ) if return_only: return dataset self.datasets[split] = dataset return self.datasets[split] def build_dataset_for_inference(self, sample_json): with tempfile.NamedTemporaryFile(buffering=0) as h: h.write((json.dumps(sample_json) + "\n").encode("utf-8")) dataset = self.load_dataset( "disambiguate_pronoun", data_path=h.name, return_only=True, ) return dataset def disambiguate_pronoun(self, model, sentence, use_cuda=False): sample_json = wsc_utils.convert_sentence_to_json(sentence) dataset = self.build_dataset_for_inference(sample_json) sample = dataset.collater([dataset[0]]) if use_cuda: sample = utils.move_to_cuda(sample) def get_masked_input(tokens, mask): masked_tokens = tokens.clone() masked_tokens[mask.bool()] = self.mask return masked_tokens def get_lprobs(tokens, mask): logits, _ = model(src_tokens=get_masked_input(tokens, mask)) lprobs = F.log_softmax(logits, dim=-1, dtype=torch.float) scores = lprobs.gather(2, tokens.unsqueeze(-1)).squeeze(-1) mask = mask.type_as(scores) scores = (scores * mask).sum(dim=-1) / mask.sum(dim=-1) return scores cand_lprobs = get_lprobs( sample["candidate_tokens"][0], sample["candidate_masks"][0], ) if sample["query_tokens"][0] is not None: query_lprobs = get_lprobs( sample["query_tokens"][0].unsqueeze(0), sample["query_masks"][0].unsqueeze(0), ) return (query_lprobs >= cand_lprobs).all().item() == 1 else: best_idx = cand_lprobs.argmax().item() full_cand = sample["candidate_tokens"][0][best_idx] mask = sample["candidate_masks"][0][best_idx] toks = full_cand[mask.bool()] return self.bpe.decode(self.source_dictionary.string(toks)).strip() @property def source_dictionary(self): return self.vocab @property def target_dictionary(self): return self.vocab @register_task("winogrande") class WinograndeTask(WSCTask): """ Task for WinoGrande dataset. Efficient implementation for Winograd schema tasks with exactly two candidates, one of which is correct. """ @classmethod def setup_task(cls, args, **kwargs): assert args.criterion == "winogrande", "Must set --criterion=winogrande" # load data and label dictionaries vocab = cls.load_dictionary(os.path.join(args.data, "dict.txt")) print("| dictionary: {} types".format(len(vocab))) return cls(args, vocab) def load_dataset( self, split, epoch=1, combine=False, data_path=None, return_only=False, **kwargs ): """Load a given dataset split. Args: split (str): name of the split (e.g., train, valid, test) """ if data_path is None: data_path = os.path.join(self.args.data, split + ".jsonl") if not os.path.exists(data_path): raise FileNotFoundError("Cannot find data: {}".format(data_path)) query_tokens = [] query_masks = [] query_lengths = [] candidate_tokens = [] candidate_masks = [] candidate_lengths = [] itr = wsc_utils.winogrande_jsonl_iterator(data_path, eval=(split == "test")) for sample in itr: sentence, pronoun_span, query, cand_text = sample prefix = sentence[: pronoun_span[0]].rstrip() suffix = sentence[pronoun_span[1] :] leading_space = " " if sentence[: pronoun_span[0]].endswith(" ") else "" trailing_space = "" if query is not None: query_toks, query_mask = self.binarize_with_mask( query, prefix, suffix, leading_space, trailing_space, ) query_len = len(query_toks) else: query_toks, query_mask, query_len = None, None, 0 query_tokens.append(query_toks) query_masks.append(query_mask) query_lengths.append(query_len) cand_toks, cand_mask = self.binarize_with_mask( cand_text, prefix, suffix, leading_space, trailing_space, ) candidate_tokens.append(cand_toks) candidate_masks.append(cand_mask) candidate_lengths.append(cand_toks.size(0)) query_lengths = np.array(query_lengths) def get_pad_dataset_fn(tokens, length, pad_idx): return PadDataset( ListDataset(tokens, length), pad_idx=pad_idx, left_pad=False, ) query_tokens = get_pad_dataset_fn(query_tokens, query_lengths, self.vocab.pad()) query_masks = get_pad_dataset_fn(query_masks, query_lengths, 0) candidate_lengths = np.array(candidate_lengths) candidate_tokens = get_pad_dataset_fn( candidate_tokens, candidate_lengths, self.vocab.pad() ) candidate_masks = get_pad_dataset_fn(candidate_masks, candidate_lengths, 0) dataset = { "id": IdDataset(), "query_tokens": query_tokens, "query_masks": query_masks, "candidate_tokens": candidate_tokens, "candidate_masks": candidate_masks, "nsentences": NumSamplesDataset(), "ntokens": NumelDataset(query_tokens, reduce=True), } nested_dataset = NestedDictionaryDataset( dataset, sizes=[query_lengths], ) with data_utils.numpy_seed(self.args.seed): shuffle = np.random.permutation(len(query_tokens)) dataset = SortDataset( nested_dataset, # shuffle sort_order=[shuffle], ) if return_only: return dataset self.datasets[split] = dataset return self.datasets[split]
13,524
32.644279
90
py
sign-topic
sign-topic-main/examples/roberta/wsc/wsc_criterion.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math import torch import torch.nn.functional as F from fairseq import utils from fairseq.criterions import LegacyFairseqCriterion, register_criterion from fairseq.data import encoders @register_criterion("wsc") class WSCCriterion(LegacyFairseqCriterion): def __init__(self, args, task): super().__init__(args, task) if self.args.save_predictions is not None: self.prediction_h = open(self.args.save_predictions, "w") else: self.prediction_h = None self.bpe = encoders.build_bpe(args.bpe) self.tokenizer = encoders.build_tokenizer(args.tokenizer) def __del__(self): if self.prediction_h is not None: self.prediction_h.close() @staticmethod def add_args(parser): """Add criterion-specific arguments to the parser.""" parser.add_argument("--wsc-margin-alpha", type=float, metavar="A", default=1.0) parser.add_argument("--wsc-margin-beta", type=float, metavar="B", default=0.0) parser.add_argument( "--wsc-cross-entropy", action="store_true", help="use cross entropy formulation instead of margin loss", ) parser.add_argument( "--save-predictions", metavar="FILE", help="file to save predictions to" ) def get_masked_input(self, tokens, mask): masked_tokens = tokens.clone() masked_tokens[mask] = self.task.mask return masked_tokens def get_lprobs(self, model, tokens, mask): logits, _ = model(src_tokens=self.get_masked_input(tokens, mask)) lprobs = F.log_softmax(logits, dim=-1, dtype=torch.float) scores = lprobs.gather(2, tokens.unsqueeze(-1)).squeeze(-1) mask = mask.type_as(scores) scores = (scores * mask).sum(dim=-1) / mask.sum(dim=-1) return scores def get_loss(self, query_lprobs, cand_lprobs): if self.args.wsc_cross_entropy: return F.cross_entropy( torch.cat([query_lprobs, cand_lprobs]).unsqueeze(0), query_lprobs.new([0]).long(), ) else: return ( -query_lprobs + self.args.wsc_margin_alpha * (cand_lprobs - query_lprobs + self.args.wsc_margin_beta).clamp(min=0) ).sum() def forward(self, model, sample, reduce=True): # compute loss and accuracy loss, nloss = 0.0, 0 ncorrect, nqueries = 0, 0 for i, label in enumerate(sample["labels"]): query_lprobs = self.get_lprobs( model, sample["query_tokens"][i].unsqueeze(0), sample["query_masks"][i].unsqueeze(0), ) cand_lprobs = self.get_lprobs( model, sample["candidate_tokens"][i], sample["candidate_masks"][i], ) pred = (query_lprobs >= cand_lprobs).all().item() if label is not None: label = 1 if label else 0 ncorrect += 1 if pred == label else 0 nqueries += 1 if label: # only compute a loss for positive instances nloss += 1 loss += self.get_loss(query_lprobs, cand_lprobs) id = sample["id"][i].item() if self.prediction_h is not None: print("{}\t{}\t{}".format(id, pred, label), file=self.prediction_h) if nloss == 0: loss = torch.tensor(0.0, requires_grad=True) sample_size = nqueries if nqueries > 0 else 1 logging_output = { "loss": utils.item(loss.data) if reduce else loss.data, "ntokens": sample["ntokens"], "nsentences": sample["nsentences"], "sample_size": sample_size, "ncorrect": ncorrect, "nqueries": nqueries, } return loss, sample_size, logging_output @staticmethod def aggregate_logging_outputs(logging_outputs): """Aggregate logging outputs from data parallel training.""" loss_sum = sum(log.get("loss", 0) for log in logging_outputs) ntokens = sum(log.get("ntokens", 0) for log in logging_outputs) nsentences = sum(log.get("nsentences", 0) for log in logging_outputs) sample_size = sum(log.get("sample_size", 0) for log in logging_outputs) agg_output = { "loss": loss_sum / sample_size / math.log(2), "ntokens": ntokens, "nsentences": nsentences, "sample_size": sample_size, } ncorrect = sum(log.get("ncorrect", 0) for log in logging_outputs) nqueries = sum(log.get("nqueries", 0) for log in logging_outputs) if nqueries > 0: agg_output["accuracy"] = ncorrect / float(nqueries) return agg_output @register_criterion("winogrande") class WinograndeCriterion(WSCCriterion): def forward(self, model, sample, reduce=True): # compute loss and accuracy query_lprobs = self.get_lprobs( model, sample["query_tokens"], sample["query_masks"], ) cand_lprobs = self.get_lprobs( model, sample["candidate_tokens"], sample["candidate_masks"], ) pred = query_lprobs >= cand_lprobs loss = self.get_loss(query_lprobs, cand_lprobs) sample_size = sample["query_tokens"].size(0) ncorrect = pred.sum().item() logging_output = { "loss": utils.item(loss.data) if reduce else loss.data, "ntokens": sample["ntokens"], "nsentences": sample["nsentences"], "sample_size": sample_size, "ncorrect": ncorrect, "nqueries": sample_size, } return loss, sample_size, logging_output
6,037
34.940476
87
py
sign-topic
sign-topic-main/examples/speech_recognition/infer.py
#!/usr/bin/env python3 -u # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Run inference for pre-processed data with a trained model. """ import ast import logging import math import os import sys import editdistance import numpy as np import torch from fairseq import checkpoint_utils, options, progress_bar, tasks, utils from fairseq.data.data_utils import post_process from fairseq.logging.meters import StopwatchMeter, TimeMeter logging.basicConfig() logging.root.setLevel(logging.INFO) logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) def add_asr_eval_argument(parser): parser.add_argument("--kspmodel", default=None, help="sentence piece model") parser.add_argument( "--wfstlm", default=None, help="wfstlm on dictonary output units" ) parser.add_argument( "--rnnt_decoding_type", default="greedy", help="wfstlm on dictonary\ output units", ) try: parser.add_argument( "--lm-weight", "--lm_weight", type=float, default=0.2, help="weight for lm while interpolating with neural score", ) except: pass parser.add_argument( "--rnnt_len_penalty", default=-0.5, help="rnnt length penalty on word level" ) parser.add_argument( "--w2l-decoder", choices=["viterbi", "kenlm", "fairseqlm"], help="use a w2l decoder", ) parser.add_argument("--lexicon", help="lexicon for w2l decoder") parser.add_argument("--unit-lm", action="store_true", help="if using a unit lm") parser.add_argument("--kenlm-model", "--lm-model", help="lm model for w2l decoder") parser.add_argument("--beam-threshold", type=float, default=25.0) parser.add_argument("--beam-size-token", type=float, default=100) parser.add_argument("--word-score", type=float, default=1.0) parser.add_argument("--unk-weight", type=float, default=-math.inf) parser.add_argument("--sil-weight", type=float, default=0.0) parser.add_argument( "--dump-emissions", type=str, default=None, help="if present, dumps emissions into this file and exits", ) parser.add_argument( "--dump-features", type=str, default=None, help="if present, dumps features into this file and exits", ) parser.add_argument( "--load-emissions", type=str, default=None, help="if present, loads emissions from this file", ) return parser def check_args(args): # assert args.path is not None, "--path required for generation!" # assert args.results_path is not None, "--results_path required for generation!" assert ( not args.sampling or args.nbest == args.beam ), "--sampling requires --nbest to be equal to --beam" assert ( args.replace_unk is None or args.raw_text ), "--replace-unk requires a raw text dataset (--raw-text)" def get_dataset_itr(args, task, models): return task.get_batch_iterator( dataset=task.dataset(args.gen_subset), max_tokens=args.max_tokens, max_sentences=args.batch_size, max_positions=(sys.maxsize, sys.maxsize), ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test, required_batch_size_multiple=args.required_batch_size_multiple, num_shards=args.num_shards, shard_id=args.shard_id, num_workers=args.num_workers, data_buffer_size=args.data_buffer_size, ).next_epoch_itr(shuffle=False) def process_predictions( args, hypos, sp, tgt_dict, target_tokens, res_files, speaker, id ): for hypo in hypos[: min(len(hypos), args.nbest)]: hyp_pieces = tgt_dict.string(hypo["tokens"].int().cpu()) if "words" in hypo: hyp_words = " ".join(hypo["words"]) else: hyp_words = post_process(hyp_pieces, args.post_process) if res_files is not None: print( "{} ({}-{})".format(hyp_pieces, speaker, id), file=res_files["hypo.units"], ) print( "{} ({}-{})".format(hyp_words, speaker, id), file=res_files["hypo.words"], ) tgt_pieces = tgt_dict.string(target_tokens) tgt_words = post_process(tgt_pieces, args.post_process) if res_files is not None: print( "{} ({}-{})".format(tgt_pieces, speaker, id), file=res_files["ref.units"], ) print( "{} ({}-{})".format(tgt_words, speaker, id), file=res_files["ref.words"] ) if not args.quiet: logger.info("HYPO:" + hyp_words) logger.info("TARGET:" + tgt_words) logger.info("___________________") hyp_words = hyp_words.split() tgt_words = tgt_words.split() return editdistance.eval(hyp_words, tgt_words), len(tgt_words) def prepare_result_files(args): def get_res_file(file_prefix): if args.num_shards > 1: file_prefix = f"{args.shard_id}_{file_prefix}" path = os.path.join( args.results_path, "{}-{}-{}.txt".format( file_prefix, os.path.basename(args.path), args.gen_subset ), ) return open(path, "w", buffering=1) if not args.results_path: return None return { "hypo.words": get_res_file("hypo.word"), "hypo.units": get_res_file("hypo.units"), "ref.words": get_res_file("ref.word"), "ref.units": get_res_file("ref.units"), } def optimize_models(args, use_cuda, models): """Optimize ensemble for generation""" for model in models: model.make_generation_fast_( beamable_mm_beam_size=None if args.no_beamable_mm else args.beam, need_attn=args.print_alignment, ) if args.fp16: model.half() if use_cuda: model.cuda() def apply_half(t): if t.dtype is torch.float32: return t.to(dtype=torch.half) return t class ExistingEmissionsDecoder(object): def __init__(self, decoder, emissions): self.decoder = decoder self.emissions = emissions def generate(self, models, sample, **unused): ids = sample["id"].cpu().numpy() try: emissions = np.stack(self.emissions[ids]) except: print([x.shape for x in self.emissions[ids]]) raise Exception("invalid sizes") emissions = torch.from_numpy(emissions) return self.decoder.decode(emissions) def main(args, task=None, model_state=None): check_args(args) use_fp16 = args.fp16 if args.max_tokens is None and args.batch_size is None: args.max_tokens = 4000000 logger.info(args) use_cuda = torch.cuda.is_available() and not args.cpu logger.info("| decoding with criterion {}".format(args.criterion)) task = tasks.setup_task(args) # Load ensemble if args.load_emissions: models, criterions = [], [] task.load_dataset(args.gen_subset) else: logger.info("| loading model(s) from {}".format(args.path)) models, saved_cfg, task = checkpoint_utils.load_model_ensemble_and_task( utils.split_paths(args.path, separator="\\"), arg_overrides=ast.literal_eval(args.model_overrides), task=task, suffix=args.checkpoint_suffix, strict=(args.checkpoint_shard_count == 1), num_shards=args.checkpoint_shard_count, state=model_state, ) optimize_models(args, use_cuda, models) task.load_dataset(args.gen_subset, task_cfg=saved_cfg.task) # Set dictionary tgt_dict = task.target_dictionary logger.info( "| {} {} {} examples".format( args.data, args.gen_subset, len(task.dataset(args.gen_subset)) ) ) # hack to pass transitions to W2lDecoder if args.criterion == "asg_loss": raise NotImplementedError("asg_loss is currently not supported") # trans = criterions[0].asg.trans.data # args.asg_transitions = torch.flatten(trans).tolist() # Load dataset (possibly sharded) itr = get_dataset_itr(args, task, models) # Initialize generator gen_timer = StopwatchMeter() def build_generator(args): w2l_decoder = getattr(args, "w2l_decoder", None) if w2l_decoder == "viterbi": from examples.speech_recognition.w2l_decoder import W2lViterbiDecoder return W2lViterbiDecoder(args, task.target_dictionary) elif w2l_decoder == "kenlm": from examples.speech_recognition.w2l_decoder import W2lKenLMDecoder return W2lKenLMDecoder(args, task.target_dictionary) elif w2l_decoder == "fairseqlm": from examples.speech_recognition.w2l_decoder import W2lFairseqLMDecoder return W2lFairseqLMDecoder(args, task.target_dictionary) else: print( "only flashlight decoders with (viterbi, kenlm, fairseqlm) options are supported at the moment" ) # please do not touch this unless you test both generate.py and infer.py with audio_pretraining task generator = build_generator(args) if args.load_emissions: generator = ExistingEmissionsDecoder( generator, np.load(args.load_emissions, allow_pickle=True) ) logger.info("loaded emissions from " + args.load_emissions) num_sentences = 0 if args.results_path is not None and not os.path.exists(args.results_path): os.makedirs(args.results_path) max_source_pos = ( utils.resolve_max_positions( task.max_positions(), *[model.max_positions() for model in models] ), ) if max_source_pos is not None: max_source_pos = max_source_pos[0] if max_source_pos is not None: max_source_pos = max_source_pos[0] - 1 if args.dump_emissions: emissions = {} if args.dump_features: features = {} models[0].bert.proj = None else: res_files = prepare_result_files(args) errs_t = 0 lengths_t = 0 with progress_bar.build_progress_bar(args, itr) as t: wps_meter = TimeMeter() for sample in t: sample = utils.move_to_cuda(sample) if use_cuda else sample if use_fp16: sample = utils.apply_to_sample(apply_half, sample) if "net_input" not in sample: continue prefix_tokens = None if args.prefix_size > 0: prefix_tokens = sample["target"][:, : args.prefix_size] gen_timer.start() if args.dump_emissions: with torch.no_grad(): encoder_out = models[0](**sample["net_input"]) emm = models[0].get_normalized_probs(encoder_out, log_probs=True) emm = emm.transpose(0, 1).cpu().numpy() for i, id in enumerate(sample["id"]): emissions[id.item()] = emm[i] continue elif args.dump_features: with torch.no_grad(): encoder_out = models[0](**sample["net_input"]) feat = encoder_out["encoder_out"].transpose(0, 1).cpu().numpy() for i, id in enumerate(sample["id"]): padding = ( encoder_out["encoder_padding_mask"][i].cpu().numpy() if encoder_out["encoder_padding_mask"] is not None else None ) features[id.item()] = (feat[i], padding) continue hypos = task.inference_step(generator, models, sample, prefix_tokens) num_generated_tokens = sum(len(h[0]["tokens"]) for h in hypos) gen_timer.stop(num_generated_tokens) for i, sample_id in enumerate(sample["id"].tolist()): speaker = None # id = task.dataset(args.gen_subset).ids[int(sample_id)] id = sample_id toks = ( sample["target"][i, :] if "target_label" not in sample else sample["target_label"][i, :] ) target_tokens = utils.strip_pad(toks, tgt_dict.pad()).int().cpu() # Process top predictions errs, length = process_predictions( args, hypos[i], None, tgt_dict, target_tokens, res_files, speaker, id, ) errs_t += errs lengths_t += length wps_meter.update(num_generated_tokens) t.log({"wps": round(wps_meter.avg)}) num_sentences += ( sample["nsentences"] if "nsentences" in sample else sample["id"].numel() ) wer = None if args.dump_emissions: emm_arr = [] for i in range(len(emissions)): emm_arr.append(emissions[i]) np.save(args.dump_emissions, emm_arr) logger.info(f"saved {len(emissions)} emissions to {args.dump_emissions}") elif args.dump_features: feat_arr = [] for i in range(len(features)): feat_arr.append(features[i]) np.save(args.dump_features, feat_arr) logger.info(f"saved {len(features)} emissions to {args.dump_features}") else: if lengths_t > 0: wer = errs_t * 100.0 / lengths_t logger.info(f"WER: {wer}") logger.info( "| Processed {} sentences ({} tokens) in {:.1f}s ({:.2f}" "sentences/s, {:.2f} tokens/s)".format( num_sentences, gen_timer.n, gen_timer.sum, num_sentences / gen_timer.sum, 1.0 / gen_timer.avg, ) ) logger.info("| Generate {} with beam={}".format(args.gen_subset, args.beam)) return task, wer def make_parser(): parser = options.get_generation_parser() parser = add_asr_eval_argument(parser) return parser def cli_main(): parser = make_parser() args = options.parse_args_and_arch(parser) main(args) if __name__ == "__main__": cli_main()
14,677
32.588101
111
py
sign-topic
sign-topic-main/examples/speech_recognition/w2l_decoder.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Flashlight decoders. """ import gc import itertools as it import os.path as osp from typing import List import warnings from collections import deque, namedtuple import numpy as np import torch from examples.speech_recognition.data.replabels import unpack_replabels from fairseq import tasks from fairseq.utils import apply_to_sample from omegaconf import open_dict from fairseq.dataclass.utils import convert_namespace_to_omegaconf try: from flashlight.lib.text.dictionary import create_word_dict, load_words from flashlight.lib.sequence.criterion import CpuViterbiPath, get_data_ptr_as_bytes from flashlight.lib.text.decoder import ( CriterionType, LexiconDecoderOptions, KenLM, LM, LMState, SmearingMode, Trie, LexiconDecoder, ) except: warnings.warn( "flashlight python bindings are required to use this functionality. Please install from https://github.com/facebookresearch/flashlight/tree/master/bindings/python" ) LM = object LMState = object class W2lDecoder(object): def __init__(self, args, tgt_dict): self.tgt_dict = tgt_dict self.vocab_size = len(tgt_dict) self.nbest = args.nbest # criterion-specific init self.criterion_type = CriterionType.CTC self.blank = ( tgt_dict.index("<ctc_blank>") if "<ctc_blank>" in tgt_dict.indices else tgt_dict.bos() ) if "<sep>" in tgt_dict.indices: self.silence = tgt_dict.index("<sep>") elif "|" in tgt_dict.indices: self.silence = tgt_dict.index("|") else: self.silence = tgt_dict.eos() self.asg_transitions = None def generate(self, models, sample, **unused): """Generate a batch of inferences.""" # model.forward normally channels prev_output_tokens into the decoder # separately, but SequenceGenerator directly calls model.encoder encoder_input = { k: v for k, v in sample["net_input"].items() if k != "prev_output_tokens" } emissions = self.get_emissions(models, encoder_input) return self.decode(emissions) def get_emissions(self, models, encoder_input): """Run encoder and normalize emissions""" model = models[0] encoder_out = model(**encoder_input) if hasattr(model, "get_logits"): emissions = model.get_logits(encoder_out) # no need to normalize emissions else: emissions = model.get_normalized_probs(encoder_out, log_probs=True) return emissions.transpose(0, 1).float().cpu().contiguous() def get_tokens(self, idxs): """Normalize tokens by handling CTC blank, ASG replabels, etc.""" idxs = (g[0] for g in it.groupby(idxs)) idxs = filter(lambda x: x != self.blank, idxs) return torch.LongTensor(list(idxs)) class W2lViterbiDecoder(W2lDecoder): def __init__(self, args, tgt_dict): super().__init__(args, tgt_dict) def decode(self, emissions): B, T, N = emissions.size() hypos = [] if self.asg_transitions is None: transitions = torch.FloatTensor(N, N).zero_() else: transitions = torch.FloatTensor(self.asg_transitions).view(N, N) viterbi_path = torch.IntTensor(B, T) workspace = torch.ByteTensor(CpuViterbiPath.get_workspace_size(B, T, N)) CpuViterbiPath.compute( B, T, N, get_data_ptr_as_bytes(emissions), get_data_ptr_as_bytes(transitions), get_data_ptr_as_bytes(viterbi_path), get_data_ptr_as_bytes(workspace), ) return [ [{"tokens": self.get_tokens(viterbi_path[b].tolist()), "score": 0}] for b in range(B) ] class W2lKenLMDecoder(W2lDecoder): def __init__(self, args, tgt_dict): super().__init__(args, tgt_dict) self.unit_lm = getattr(args, "unit_lm", False) if args.lexicon: self.lexicon = load_words(args.lexicon) self.word_dict = create_word_dict(self.lexicon) self.unk_word = self.word_dict.get_index("<unk>") self.lm = KenLM(args.kenlm_model, self.word_dict) self.trie = Trie(self.vocab_size, self.silence) start_state = self.lm.start(False) for i, (word, spellings) in enumerate(self.lexicon.items()): word_idx = self.word_dict.get_index(word) _, score = self.lm.score(start_state, word_idx) for spelling in spellings: spelling_idxs = [tgt_dict.index(token) for token in spelling] assert ( tgt_dict.unk() not in spelling_idxs ), f"{spelling} {spelling_idxs}" self.trie.insert(spelling_idxs, word_idx, score) self.trie.smear(SmearingMode.MAX) self.decoder_opts = LexiconDecoderOptions( beam_size=args.beam, beam_size_token=int(getattr(args, "beam_size_token", len(tgt_dict))), beam_threshold=args.beam_threshold, lm_weight=args.lm_weight, word_score=args.word_score, unk_score=args.unk_weight, sil_score=args.sil_weight, log_add=False, criterion_type=self.criterion_type, ) if self.asg_transitions is None: N = 768 # self.asg_transitions = torch.FloatTensor(N, N).zero_() self.asg_transitions = [] self.decoder = LexiconDecoder( self.decoder_opts, self.trie, self.lm, self.silence, self.blank, self.unk_word, self.asg_transitions, self.unit_lm, ) else: assert args.unit_lm, "lexicon free decoding can only be done with a unit language model" from flashlight.lib.text.decoder import LexiconFreeDecoder, LexiconFreeDecoderOptions d = {w: [[w]] for w in tgt_dict.symbols} self.word_dict = create_word_dict(d) self.lm = KenLM(args.kenlm_model, self.word_dict) self.decoder_opts = LexiconFreeDecoderOptions( beam_size=args.beam, beam_size_token=int(getattr(args, "beam_size_token", len(tgt_dict))), beam_threshold=args.beam_threshold, lm_weight=args.lm_weight, sil_score=args.sil_weight, log_add=False, criterion_type=self.criterion_type, ) self.decoder = LexiconFreeDecoder( self.decoder_opts, self.lm, self.silence, self.blank, [] ) def get_timesteps(self, token_idxs: List[int]) -> List[int]: """Returns frame numbers corresponding to every non-blank token. Parameters ---------- token_idxs : List[int] IDs of decoded tokens. Returns ------- List[int] Frame numbers corresponding to every non-blank token. """ timesteps = [] for i, token_idx in enumerate(token_idxs): if token_idx == self.blank: continue if i == 0 or token_idx != token_idxs[i-1]: timesteps.append(i) return timesteps def decode(self, emissions): B, T, N = emissions.size() hypos = [] for b in range(B): emissions_ptr = emissions.data_ptr() + 4 * b * emissions.stride(0) results = self.decoder.decode(emissions_ptr, T, N) nbest_results = results[: self.nbest] hypos.append( [ { "tokens": self.get_tokens(result.tokens), "score": result.score, "timesteps": self.get_timesteps(result.tokens), "words": [ self.word_dict.get_entry(x) for x in result.words if x >= 0 ], } for result in nbest_results ] ) return hypos FairseqLMState = namedtuple("FairseqLMState", ["prefix", "incremental_state", "probs"]) class FairseqLM(LM): def __init__(self, dictionary, model): LM.__init__(self) self.dictionary = dictionary self.model = model self.unk = self.dictionary.unk() self.save_incremental = False # this currently does not work properly self.max_cache = 20_000 model.cuda() model.eval() model.make_generation_fast_() self.states = {} self.stateq = deque() def start(self, start_with_nothing): state = LMState() prefix = torch.LongTensor([[self.dictionary.eos()]]) incremental_state = {} if self.save_incremental else None with torch.no_grad(): res = self.model(prefix.cuda(), incremental_state=incremental_state) probs = self.model.get_normalized_probs(res, log_probs=True, sample=None) if incremental_state is not None: incremental_state = apply_to_sample(lambda x: x.cpu(), incremental_state) self.states[state] = FairseqLMState( prefix.numpy(), incremental_state, probs[0, -1].cpu().numpy() ) self.stateq.append(state) return state def score(self, state: LMState, token_index: int, no_cache: bool = False): """ Evaluate language model based on the current lm state and new word Parameters: ----------- state: current lm state token_index: index of the word (can be lexicon index then you should store inside LM the mapping between indices of lexicon and lm, or lm index of a word) Returns: -------- (LMState, float): pair of (new state, score for the current word) """ curr_state = self.states[state] def trim_cache(targ_size): while len(self.stateq) > targ_size: rem_k = self.stateq.popleft() rem_st = self.states[rem_k] rem_st = FairseqLMState(rem_st.prefix, None, None) self.states[rem_k] = rem_st if curr_state.probs is None: new_incremental_state = ( curr_state.incremental_state.copy() if curr_state.incremental_state is not None else None ) with torch.no_grad(): if new_incremental_state is not None: new_incremental_state = apply_to_sample( lambda x: x.cuda(), new_incremental_state ) elif self.save_incremental: new_incremental_state = {} res = self.model( torch.from_numpy(curr_state.prefix).cuda(), incremental_state=new_incremental_state, ) probs = self.model.get_normalized_probs( res, log_probs=True, sample=None ) if new_incremental_state is not None: new_incremental_state = apply_to_sample( lambda x: x.cpu(), new_incremental_state ) curr_state = FairseqLMState( curr_state.prefix, new_incremental_state, probs[0, -1].cpu().numpy() ) if not no_cache: self.states[state] = curr_state self.stateq.append(state) score = curr_state.probs[token_index].item() trim_cache(self.max_cache) outstate = state.child(token_index) if outstate not in self.states and not no_cache: prefix = np.concatenate( [curr_state.prefix, torch.LongTensor([[token_index]])], -1 ) incr_state = curr_state.incremental_state self.states[outstate] = FairseqLMState(prefix, incr_state, None) if token_index == self.unk: score = float("-inf") return outstate, score def finish(self, state: LMState): """ Evaluate eos for language model based on the current lm state Returns: -------- (LMState, float): pair of (new state, score for the current word) """ return self.score(state, self.dictionary.eos()) def empty_cache(self): self.states = {} self.stateq = deque() gc.collect() class W2lFairseqLMDecoder(W2lDecoder): def __init__(self, args, tgt_dict): super().__init__(args, tgt_dict) self.unit_lm = getattr(args, "unit_lm", False) self.lexicon = load_words(args.lexicon) if args.lexicon else None self.idx_to_wrd = {} checkpoint = torch.load(args.kenlm_model, map_location="cpu") if "cfg" in checkpoint and checkpoint["cfg"] is not None: lm_args = checkpoint["cfg"] else: lm_args = convert_namespace_to_omegaconf(checkpoint["args"]) with open_dict(lm_args.task): lm_args.task.data = osp.dirname(args.kenlm_model) task = tasks.setup_task(lm_args.task) model = task.build_model(lm_args.model) model.load_state_dict(checkpoint["model"], strict=False) self.trie = Trie(self.vocab_size, self.silence) self.word_dict = task.dictionary self.unk_word = self.word_dict.unk() self.lm = FairseqLM(self.word_dict, model) if self.lexicon: start_state = self.lm.start(False) for i, (word, spellings) in enumerate(self.lexicon.items()): if self.unit_lm: word_idx = i self.idx_to_wrd[i] = word score = 0 else: word_idx = self.word_dict.index(word) _, score = self.lm.score(start_state, word_idx, no_cache=True) for spelling in spellings: spelling_idxs = [tgt_dict.index(token) for token in spelling] assert ( tgt_dict.unk() not in spelling_idxs ), f"{spelling} {spelling_idxs}" self.trie.insert(spelling_idxs, word_idx, score) self.trie.smear(SmearingMode.MAX) self.decoder_opts = LexiconDecoderOptions( beam_size=args.beam, beam_size_token=int(getattr(args, "beam_size_token", len(tgt_dict))), beam_threshold=args.beam_threshold, lm_weight=args.lm_weight, word_score=args.word_score, unk_score=args.unk_weight, sil_score=args.sil_weight, log_add=False, criterion_type=self.criterion_type, ) self.decoder = LexiconDecoder( self.decoder_opts, self.trie, self.lm, self.silence, self.blank, self.unk_word, [], self.unit_lm, ) else: assert args.unit_lm, "lexicon free decoding can only be done with a unit language model" from flashlight.lib.text.decoder import LexiconFreeDecoder, LexiconFreeDecoderOptions d = {w: [[w]] for w in tgt_dict.symbols} self.word_dict = create_word_dict(d) self.lm = KenLM(args.kenlm_model, self.word_dict) self.decoder_opts = LexiconFreeDecoderOptions( beam_size=args.beam, beam_size_token=int(getattr(args, "beam_size_token", len(tgt_dict))), beam_threshold=args.beam_threshold, lm_weight=args.lm_weight, sil_score=args.sil_weight, log_add=False, criterion_type=self.criterion_type, ) self.decoder = LexiconFreeDecoder( self.decoder_opts, self.lm, self.silence, self.blank, [] ) def decode(self, emissions): B, T, N = emissions.size() hypos = [] def idx_to_word(idx): if self.unit_lm: return self.idx_to_wrd[idx] else: return self.word_dict[idx] def make_hypo(result): hypo = {"tokens": self.get_tokens(result.tokens), "score": result.score} if self.lexicon: hypo["words"] = [idx_to_word(x) for x in result.words if x >= 0] return hypo for b in range(B): emissions_ptr = emissions.data_ptr() + 4 * b * emissions.stride(0) results = self.decoder.decode(emissions_ptr, T, N) nbest_results = results[: self.nbest] hypos.append([make_hypo(result) for result in nbest_results]) self.lm.empty_cache() return hypos
17,396
34.722793
171
py
sign-topic
sign-topic-main/examples/speech_recognition/criterions/cross_entropy_acc.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from __future__ import absolute_import, division, print_function, unicode_literals import logging import math import torch import torch.nn.functional as F from fairseq import utils from fairseq.criterions import FairseqCriterion, register_criterion @register_criterion("cross_entropy_acc") class CrossEntropyWithAccCriterion(FairseqCriterion): def __init__(self, task, sentence_avg): super().__init__(task) self.sentence_avg = sentence_avg def compute_loss(self, model, net_output, target, reduction, log_probs): # N, T -> N * T target = target.view(-1) lprobs = model.get_normalized_probs(net_output, log_probs=log_probs) if not hasattr(lprobs, "batch_first"): logging.warning( "ERROR: we need to know whether " "batch first for the net output; " "you need to set batch_first attribute for the return value of " "model.get_normalized_probs. Now, we assume this is true, but " "in the future, we will raise exception instead. " ) batch_first = getattr(lprobs, "batch_first", True) if not batch_first: lprobs = lprobs.transpose(0, 1) # N, T, D -> N * T, D lprobs = lprobs.view(-1, lprobs.size(-1)) loss = F.nll_loss( lprobs, target, ignore_index=self.padding_idx, reduction=reduction ) return lprobs, loss def get_logging_output(self, sample, target, lprobs, loss): target = target.view(-1) mask = target != self.padding_idx correct = torch.sum( lprobs.argmax(1).masked_select(mask) == target.masked_select(mask) ) total = torch.sum(mask) sample_size = ( sample["target"].size(0) if self.sentence_avg else sample["ntokens"] ) logging_output = { "loss": utils.item(loss.data), # * sample['ntokens'], "ntokens": sample["ntokens"], "nsentences": sample["target"].size(0), "sample_size": sample_size, "correct": utils.item(correct.data), "total": utils.item(total.data), "nframes": torch.sum(sample["net_input"]["src_lengths"]).item(), } return sample_size, logging_output def forward(self, model, sample, reduction="sum", log_probs=True): """Computes the cross entropy with accuracy metric for the given sample. This is similar to CrossEntropyCriterion in fairseq, but also computes accuracy metrics as part of logging Args: logprobs (Torch.tensor) of shape N, T, D i.e. batchsize, timesteps, dimensions targets (Torch.tensor) of shape N, T i.e batchsize, timesteps Returns: tuple: With three elements: 1) the loss 2) the sample size, which is used as the denominator for the gradient 3) logging outputs to display while training TODO: * Currently this Criterion will only work with LSTMEncoderModels or FairseqModels which have decoder, or Models which return TorchTensor as net_output. We need to make a change to support all FairseqEncoder models. """ net_output = model(**sample["net_input"]) target = model.get_targets(sample, net_output) lprobs, loss = self.compute_loss( model, net_output, target, reduction, log_probs ) sample_size, logging_output = self.get_logging_output( sample, target, lprobs, loss ) return loss, sample_size, logging_output @staticmethod def aggregate_logging_outputs(logging_outputs): """Aggregate logging outputs from data parallel training.""" correct_sum = sum(log.get("correct", 0) for log in logging_outputs) total_sum = sum(log.get("total", 0) for log in logging_outputs) loss_sum = sum(log.get("loss", 0) for log in logging_outputs) ntokens = sum(log.get("ntokens", 0) for log in logging_outputs) nsentences = sum(log.get("nsentences", 0) for log in logging_outputs) sample_size = sum(log.get("sample_size", 0) for log in logging_outputs) nframes = sum(log.get("nframes", 0) for log in logging_outputs) agg_output = { "loss": loss_sum / sample_size / math.log(2) if sample_size > 0 else 0.0, # if args.sentence_avg, then sample_size is nsentences, then loss # is per-sentence loss; else sample_size is ntokens, the loss # becomes per-output token loss "ntokens": ntokens, "nsentences": nsentences, "nframes": nframes, "sample_size": sample_size, "acc": correct_sum * 100.0 / total_sum if total_sum > 0 else 0.0, "correct": correct_sum, "total": total_sum, # total is the number of validate tokens } if sample_size != ntokens: agg_output["nll_loss"] = loss_sum / ntokens / math.log(2) # loss: per output token loss # nll_loss: per sentence loss return agg_output
5,372
40.015267
85
py
sign-topic
sign-topic-main/examples/speech_recognition/criterions/ASG_loss.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch from examples.speech_recognition.data.replabels import pack_replabels from fairseq import utils from fairseq.criterions import FairseqCriterion, register_criterion @register_criterion("asg_loss") class ASGCriterion(FairseqCriterion): @staticmethod def add_args(parser): group = parser.add_argument_group("ASG Loss") group.add_argument( "--asg-transitions-init", help="initial diagonal value of transition matrix", type=float, default=0.0, ) group.add_argument( "--max-replabel", help="maximum # of replabels", type=int, default=2 ) group.add_argument( "--linseg-updates", help="# of training updates to use LinSeg initialization", type=int, default=0, ) group.add_argument( "--hide-linseg-messages", help="hide messages about LinSeg initialization", action="store_true", ) def __init__( self, task, silence_token, asg_transitions_init, max_replabel, linseg_updates, hide_linseg_messages, ): from flashlight.lib.sequence.criterion import ASGLoss, CriterionScaleMode super().__init__(task) self.tgt_dict = task.target_dictionary self.eos = self.tgt_dict.eos() self.silence = ( self.tgt_dict.index(silence_token) if silence_token in self.tgt_dict else None ) self.max_replabel = max_replabel num_labels = len(self.tgt_dict) self.asg = ASGLoss(num_labels, scale_mode=CriterionScaleMode.TARGET_SZ_SQRT) self.asg.trans = torch.nn.Parameter( asg_transitions_init * torch.eye(num_labels), requires_grad=True ) self.linseg_progress = torch.nn.Parameter( torch.tensor([0], dtype=torch.int), requires_grad=False ) self.linseg_maximum = linseg_updates self.linseg_message_state = "none" if hide_linseg_messages else "start" @classmethod def build_criterion(cls, args, task): return cls( task, args.silence_token, args.asg_transitions_init, args.max_replabel, args.linseg_updates, args.hide_linseg_messages, ) def linseg_step(self): if not self.training: return False if self.linseg_progress.item() < self.linseg_maximum: if self.linseg_message_state == "start": print("| using LinSeg to initialize ASG") self.linseg_message_state = "finish" self.linseg_progress.add_(1) return True elif self.linseg_message_state == "finish": print("| finished LinSeg initialization") self.linseg_message_state = "none" return False def replace_eos_with_silence(self, tgt): if tgt[-1] != self.eos: return tgt elif self.silence is None or (len(tgt) > 1 and tgt[-2] == self.silence): return tgt[:-1] else: return tgt[:-1] + [self.silence] def forward(self, model, sample, reduce=True): """Compute the loss for the given sample. Returns a tuple with three elements: 1) the loss 2) the sample size, which is used as the denominator for the gradient 3) logging outputs to display while training """ net_output = model(**sample["net_input"]) emissions = net_output["encoder_out"].transpose(0, 1).contiguous() B = emissions.size(0) T = emissions.size(1) device = emissions.device target = torch.IntTensor(B, T) target_size = torch.IntTensor(B) using_linseg = self.linseg_step() for b in range(B): initial_target_size = sample["target_lengths"][b].item() if initial_target_size == 0: raise ValueError("target size cannot be zero") tgt = sample["target"][b, :initial_target_size].tolist() tgt = self.replace_eos_with_silence(tgt) tgt = pack_replabels(tgt, self.tgt_dict, self.max_replabel) tgt = tgt[:T] if using_linseg: tgt = [tgt[t * len(tgt) // T] for t in range(T)] target[b][: len(tgt)] = torch.IntTensor(tgt) target_size[b] = len(tgt) loss = self.asg.forward(emissions, target.to(device), target_size.to(device)) if reduce: loss = torch.sum(loss) sample_size = ( sample["target"].size(0) if self.args.sentence_avg else sample["ntokens"] ) logging_output = { "loss": utils.item(loss.data) if reduce else loss.data, "ntokens": sample["ntokens"], "nsentences": sample["target"].size(0), "sample_size": sample_size, } return loss, sample_size, logging_output @staticmethod def aggregate_logging_outputs(logging_outputs): """Aggregate logging outputs from data parallel training.""" loss_sum = sum(log.get("loss", 0) for log in logging_outputs) ntokens = sum(log.get("ntokens", 0) for log in logging_outputs) nsentences = sum(log.get("nsentences", 0) for log in logging_outputs) sample_size = sum(log.get("sample_size", 0) for log in logging_outputs) agg_output = { "loss": loss_sum / nsentences, "ntokens": ntokens, "nsentences": nsentences, "sample_size": sample_size, } return agg_output
5,870
33.333333
85
py
sign-topic
sign-topic-main/examples/speech_recognition/models/vggtransformer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import math from collections.abc import Iterable import torch import torch.nn as nn from examples.speech_recognition.data.data_utils import lengths_to_encoder_padding_mask from fairseq import utils from fairseq.models import ( FairseqEncoder, FairseqEncoderDecoderModel, FairseqEncoderModel, FairseqIncrementalDecoder, register_model, register_model_architecture, ) from fairseq.modules import ( LinearizedConvolution, TransformerDecoderLayer, TransformerEncoderLayer, VGGBlock, ) @register_model("asr_vggtransformer") class VGGTransformerModel(FairseqEncoderDecoderModel): """ Transformers with convolutional context for ASR https://arxiv.org/abs/1904.11660 """ def __init__(self, encoder, decoder): super().__init__(encoder, decoder) @staticmethod def add_args(parser): """Add model-specific arguments to the parser.""" parser.add_argument( "--input-feat-per-channel", type=int, metavar="N", help="encoder input dimension per input channel", ) parser.add_argument( "--vggblock-enc-config", type=str, metavar="EXPR", help=""" an array of tuples each containing the configuration of one vggblock: [(out_channels, conv_kernel_size, pooling_kernel_size, num_conv_layers, use_layer_norm), ...]) """, ) parser.add_argument( "--transformer-enc-config", type=str, metavar="EXPR", help="""" a tuple containing the configuration of the encoder transformer layers configurations: [(input_dim, num_heads, ffn_dim, normalize_before, dropout, attention_dropout, relu_dropout), ...]') """, ) parser.add_argument( "--enc-output-dim", type=int, metavar="N", help=""" encoder output dimension, can be None. If specified, projecting the transformer output to the specified dimension""", ) parser.add_argument( "--in-channels", type=int, metavar="N", help="number of encoder input channels", ) parser.add_argument( "--tgt-embed-dim", type=int, metavar="N", help="embedding dimension of the decoder target tokens", ) parser.add_argument( "--transformer-dec-config", type=str, metavar="EXPR", help=""" a tuple containing the configuration of the decoder transformer layers configurations: [(input_dim, num_heads, ffn_dim, normalize_before, dropout, attention_dropout, relu_dropout), ...] """, ) parser.add_argument( "--conv-dec-config", type=str, metavar="EXPR", help=""" an array of tuples for the decoder 1-D convolution config [(out_channels, conv_kernel_size, use_layer_norm), ...]""", ) @classmethod def build_encoder(cls, args, task): return VGGTransformerEncoder( input_feat_per_channel=args.input_feat_per_channel, vggblock_config=eval(args.vggblock_enc_config), transformer_config=eval(args.transformer_enc_config), encoder_output_dim=args.enc_output_dim, in_channels=args.in_channels, ) @classmethod def build_decoder(cls, args, task): return TransformerDecoder( dictionary=task.target_dictionary, embed_dim=args.tgt_embed_dim, transformer_config=eval(args.transformer_dec_config), conv_config=eval(args.conv_dec_config), encoder_output_dim=args.enc_output_dim, ) @classmethod def build_model(cls, args, task): """Build a new model instance.""" # make sure that all args are properly defaulted # (in case there are any new ones) base_architecture(args) encoder = cls.build_encoder(args, task) decoder = cls.build_decoder(args, task) return cls(encoder, decoder) def get_normalized_probs(self, net_output, log_probs, sample=None): # net_output['encoder_out'] is a (B, T, D) tensor lprobs = super().get_normalized_probs(net_output, log_probs, sample) lprobs.batch_first = True return lprobs DEFAULT_ENC_VGGBLOCK_CONFIG = ((32, 3, 2, 2, False),) * 2 DEFAULT_ENC_TRANSFORMER_CONFIG = ((256, 4, 1024, True, 0.2, 0.2, 0.2),) * 2 # 256: embedding dimension # 4: number of heads # 1024: FFN # True: apply layerNorm before (dropout + resiaul) instead of after # 0.2 (dropout): dropout after MultiheadAttention and second FC # 0.2 (attention_dropout): dropout in MultiheadAttention # 0.2 (relu_dropout): dropout after ReLu DEFAULT_DEC_TRANSFORMER_CONFIG = ((256, 2, 1024, True, 0.2, 0.2, 0.2),) * 2 DEFAULT_DEC_CONV_CONFIG = ((256, 3, True),) * 2 # TODO: repace transformer encoder config from one liner # to explicit args to get rid of this transformation def prepare_transformer_encoder_params( input_dim, num_heads, ffn_dim, normalize_before, dropout, attention_dropout, relu_dropout, ): args = argparse.Namespace() args.encoder_embed_dim = input_dim args.encoder_attention_heads = num_heads args.attention_dropout = attention_dropout args.dropout = dropout args.activation_dropout = relu_dropout args.encoder_normalize_before = normalize_before args.encoder_ffn_embed_dim = ffn_dim return args def prepare_transformer_decoder_params( input_dim, num_heads, ffn_dim, normalize_before, dropout, attention_dropout, relu_dropout, ): args = argparse.Namespace() args.encoder_embed_dim = None args.decoder_embed_dim = input_dim args.decoder_attention_heads = num_heads args.attention_dropout = attention_dropout args.dropout = dropout args.activation_dropout = relu_dropout args.decoder_normalize_before = normalize_before args.decoder_ffn_embed_dim = ffn_dim return args class VGGTransformerEncoder(FairseqEncoder): """VGG + Transformer encoder""" def __init__( self, input_feat_per_channel, vggblock_config=DEFAULT_ENC_VGGBLOCK_CONFIG, transformer_config=DEFAULT_ENC_TRANSFORMER_CONFIG, encoder_output_dim=512, in_channels=1, transformer_context=None, transformer_sampling=None, ): """constructor for VGGTransformerEncoder Args: - input_feat_per_channel: feature dim (not including stacked, just base feature) - in_channel: # input channels (e.g., if stack 8 feature vector together, this is 8) - vggblock_config: configuration of vggblock, see comments on DEFAULT_ENC_VGGBLOCK_CONFIG - transformer_config: configuration of transformer layer, see comments on DEFAULT_ENC_TRANSFORMER_CONFIG - encoder_output_dim: final transformer output embedding dimension - transformer_context: (left, right) if set, self-attention will be focused on (t-left, t+right) - transformer_sampling: an iterable of int, must match with len(transformer_config), transformer_sampling[i] indicates sampling factor for i-th transformer layer, after multihead att and feedfoward part """ super().__init__(None) self.num_vggblocks = 0 if vggblock_config is not None: if not isinstance(vggblock_config, Iterable): raise ValueError("vggblock_config is not iterable") self.num_vggblocks = len(vggblock_config) self.conv_layers = nn.ModuleList() self.in_channels = in_channels self.input_dim = input_feat_per_channel self.pooling_kernel_sizes = [] if vggblock_config is not None: for _, config in enumerate(vggblock_config): ( out_channels, conv_kernel_size, pooling_kernel_size, num_conv_layers, layer_norm, ) = config self.conv_layers.append( VGGBlock( in_channels, out_channels, conv_kernel_size, pooling_kernel_size, num_conv_layers, input_dim=input_feat_per_channel, layer_norm=layer_norm, ) ) self.pooling_kernel_sizes.append(pooling_kernel_size) in_channels = out_channels input_feat_per_channel = self.conv_layers[-1].output_dim transformer_input_dim = self.infer_conv_output_dim( self.in_channels, self.input_dim ) # transformer_input_dim is the output dimension of VGG part self.validate_transformer_config(transformer_config) self.transformer_context = self.parse_transformer_context(transformer_context) self.transformer_sampling = self.parse_transformer_sampling( transformer_sampling, len(transformer_config) ) self.transformer_layers = nn.ModuleList() if transformer_input_dim != transformer_config[0][0]: self.transformer_layers.append( Linear(transformer_input_dim, transformer_config[0][0]) ) self.transformer_layers.append( TransformerEncoderLayer( prepare_transformer_encoder_params(*transformer_config[0]) ) ) for i in range(1, len(transformer_config)): if transformer_config[i - 1][0] != transformer_config[i][0]: self.transformer_layers.append( Linear(transformer_config[i - 1][0], transformer_config[i][0]) ) self.transformer_layers.append( TransformerEncoderLayer( prepare_transformer_encoder_params(*transformer_config[i]) ) ) self.encoder_output_dim = encoder_output_dim self.transformer_layers.extend( [ Linear(transformer_config[-1][0], encoder_output_dim), LayerNorm(encoder_output_dim), ] ) def forward(self, src_tokens, src_lengths, **kwargs): """ src_tokens: padded tensor (B, T, C * feat) src_lengths: tensor of original lengths of input utterances (B,) """ bsz, max_seq_len, _ = src_tokens.size() x = src_tokens.view(bsz, max_seq_len, self.in_channels, self.input_dim) x = x.transpose(1, 2).contiguous() # (B, C, T, feat) for layer_idx in range(len(self.conv_layers)): x = self.conv_layers[layer_idx](x) bsz, _, output_seq_len, _ = x.size() # (B, C, T, feat) -> (B, T, C, feat) -> (T, B, C, feat) -> (T, B, C * feat) x = x.transpose(1, 2).transpose(0, 1) x = x.contiguous().view(output_seq_len, bsz, -1) input_lengths = src_lengths.clone() for s in self.pooling_kernel_sizes: input_lengths = (input_lengths.float() / s).ceil().long() encoder_padding_mask, _ = lengths_to_encoder_padding_mask( input_lengths, batch_first=True ) if not encoder_padding_mask.any(): encoder_padding_mask = None subsampling_factor = int(max_seq_len * 1.0 / output_seq_len + 0.5) attn_mask = self.lengths_to_attn_mask(input_lengths, subsampling_factor) transformer_layer_idx = 0 for layer_idx in range(len(self.transformer_layers)): if isinstance(self.transformer_layers[layer_idx], TransformerEncoderLayer): x = self.transformer_layers[layer_idx]( x, encoder_padding_mask, attn_mask ) if self.transformer_sampling[transformer_layer_idx] != 1: sampling_factor = self.transformer_sampling[transformer_layer_idx] x, encoder_padding_mask, attn_mask = self.slice( x, encoder_padding_mask, attn_mask, sampling_factor ) transformer_layer_idx += 1 else: x = self.transformer_layers[layer_idx](x) # encoder_padding_maks is a (T x B) tensor, its [t, b] elements indicate # whether encoder_output[t, b] is valid or not (valid=0, invalid=1) return { "encoder_out": x, # (T, B, C) "encoder_padding_mask": encoder_padding_mask.t() if encoder_padding_mask is not None else None, # (B, T) --> (T, B) } def infer_conv_output_dim(self, in_channels, input_dim): sample_seq_len = 200 sample_bsz = 10 x = torch.randn(sample_bsz, in_channels, sample_seq_len, input_dim) for i, _ in enumerate(self.conv_layers): x = self.conv_layers[i](x) x = x.transpose(1, 2) mb, seq = x.size()[:2] return x.contiguous().view(mb, seq, -1).size(-1) def validate_transformer_config(self, transformer_config): for config in transformer_config: input_dim, num_heads = config[:2] if input_dim % num_heads != 0: msg = ( "ERROR in transformer config {}: ".format(config) + "input dimension {} ".format(input_dim) + "not dividable by number of heads {}".format(num_heads) ) raise ValueError(msg) def parse_transformer_context(self, transformer_context): """ transformer_context can be the following: - None; indicates no context is used, i.e., transformer can access full context - a tuple/list of two int; indicates left and right context, any number <0 indicates infinite context * e.g., (5, 6) indicates that for query at x_t, transformer can access [t-5, t+6] (inclusive) * e.g., (-1, 6) indicates that for query at x_t, transformer can access [0, t+6] (inclusive) """ if transformer_context is None: return None if not isinstance(transformer_context, Iterable): raise ValueError("transformer context must be Iterable if it is not None") if len(transformer_context) != 2: raise ValueError("transformer context must have length 2") left_context = transformer_context[0] if left_context < 0: left_context = None right_context = transformer_context[1] if right_context < 0: right_context = None if left_context is None and right_context is None: return None return (left_context, right_context) def parse_transformer_sampling(self, transformer_sampling, num_layers): """ parsing transformer sampling configuration Args: - transformer_sampling, accepted input: * None, indicating no sampling * an Iterable with int (>0) as element - num_layers, expected number of transformer layers, must match with the length of transformer_sampling if it is not None Returns: - A tuple with length num_layers """ if transformer_sampling is None: return (1,) * num_layers if not isinstance(transformer_sampling, Iterable): raise ValueError( "transformer_sampling must be an iterable if it is not None" ) if len(transformer_sampling) != num_layers: raise ValueError( "transformer_sampling {} does not match with the number " "of layers {}".format(transformer_sampling, num_layers) ) for layer, value in enumerate(transformer_sampling): if not isinstance(value, int): raise ValueError("Invalid value in transformer_sampling: ") if value < 1: raise ValueError( "{} layer's subsampling is {}.".format(layer, value) + " This is not allowed! " ) return transformer_sampling def slice(self, embedding, padding_mask, attn_mask, sampling_factor): """ embedding is a (T, B, D) tensor padding_mask is a (B, T) tensor or None attn_mask is a (T, T) tensor or None """ embedding = embedding[::sampling_factor, :, :] if padding_mask is not None: padding_mask = padding_mask[:, ::sampling_factor] if attn_mask is not None: attn_mask = attn_mask[::sampling_factor, ::sampling_factor] return embedding, padding_mask, attn_mask def lengths_to_attn_mask(self, input_lengths, subsampling_factor=1): """ create attention mask according to sequence lengths and transformer context Args: - input_lengths: (B, )-shape Int/Long tensor; input_lengths[b] is the length of b-th sequence - subsampling_factor: int * Note that the left_context and right_context is specified in the input frame-level while input to transformer may already go through subsampling (e.g., the use of striding in vggblock) we use subsampling_factor to scale the left/right context Return: - a (T, T) binary tensor or None, where T is max(input_lengths) * if self.transformer_context is None, None * if left_context is None, * attn_mask[t, t + right_context + 1:] = 1 * others = 0 * if right_context is None, * attn_mask[t, 0:t - left_context] = 1 * others = 0 * elsif * attn_mask[t, t - left_context: t + right_context + 1] = 0 * others = 1 """ if self.transformer_context is None: return None maxT = torch.max(input_lengths).item() attn_mask = torch.zeros(maxT, maxT) left_context = self.transformer_context[0] right_context = self.transformer_context[1] if left_context is not None: left_context = math.ceil(self.transformer_context[0] / subsampling_factor) if right_context is not None: right_context = math.ceil(self.transformer_context[1] / subsampling_factor) for t in range(maxT): if left_context is not None: st = 0 en = max(st, t - left_context) attn_mask[t, st:en] = 1 if right_context is not None: st = t + right_context + 1 st = min(st, maxT - 1) attn_mask[t, st:] = 1 return attn_mask.to(input_lengths.device) def reorder_encoder_out(self, encoder_out, new_order): encoder_out["encoder_out"] = encoder_out["encoder_out"].index_select( 1, new_order ) if encoder_out["encoder_padding_mask"] is not None: encoder_out["encoder_padding_mask"] = encoder_out[ "encoder_padding_mask" ].index_select(1, new_order) return encoder_out class TransformerDecoder(FairseqIncrementalDecoder): """ Transformer decoder consisting of *args.decoder_layers* layers. Each layer is a :class:`TransformerDecoderLayer`. Args: args (argparse.Namespace): parsed command-line arguments dictionary (~fairseq.data.Dictionary): decoding dictionary embed_tokens (torch.nn.Embedding): output embedding no_encoder_attn (bool, optional): whether to attend to encoder outputs. Default: ``False`` left_pad (bool, optional): whether the input is left-padded. Default: ``False`` """ def __init__( self, dictionary, embed_dim=512, transformer_config=DEFAULT_ENC_TRANSFORMER_CONFIG, conv_config=DEFAULT_DEC_CONV_CONFIG, encoder_output_dim=512, ): super().__init__(dictionary) vocab_size = len(dictionary) self.padding_idx = dictionary.pad() self.embed_tokens = Embedding(vocab_size, embed_dim, self.padding_idx) self.conv_layers = nn.ModuleList() for i in range(len(conv_config)): out_channels, kernel_size, layer_norm = conv_config[i] if i == 0: conv_layer = LinearizedConv1d( embed_dim, out_channels, kernel_size, padding=kernel_size - 1 ) else: conv_layer = LinearizedConv1d( conv_config[i - 1][0], out_channels, kernel_size, padding=kernel_size - 1, ) self.conv_layers.append(conv_layer) if layer_norm: self.conv_layers.append(nn.LayerNorm(out_channels)) self.conv_layers.append(nn.ReLU()) self.layers = nn.ModuleList() if conv_config[-1][0] != transformer_config[0][0]: self.layers.append(Linear(conv_config[-1][0], transformer_config[0][0])) self.layers.append( TransformerDecoderLayer( prepare_transformer_decoder_params(*transformer_config[0]) ) ) for i in range(1, len(transformer_config)): if transformer_config[i - 1][0] != transformer_config[i][0]: self.layers.append( Linear(transformer_config[i - 1][0], transformer_config[i][0]) ) self.layers.append( TransformerDecoderLayer( prepare_transformer_decoder_params(*transformer_config[i]) ) ) self.fc_out = Linear(transformer_config[-1][0], vocab_size) def forward(self, prev_output_tokens, encoder_out=None, incremental_state=None): """ Args: prev_output_tokens (LongTensor): previous decoder outputs of shape `(batch, tgt_len)`, for input feeding/teacher forcing encoder_out (Tensor, optional): output from the encoder, used for encoder-side attention incremental_state (dict): dictionary used for storing state during :ref:`Incremental decoding` Returns: tuple: - the last decoder layer's output of shape `(batch, tgt_len, vocab)` - the last decoder layer's attention weights of shape `(batch, tgt_len, src_len)` """ target_padding_mask = ( (prev_output_tokens == self.padding_idx).to(prev_output_tokens.device) if incremental_state is None else None ) if incremental_state is not None: prev_output_tokens = prev_output_tokens[:, -1:] # embed tokens x = self.embed_tokens(prev_output_tokens) # B x T x C -> T x B x C x = self._transpose_if_training(x, incremental_state) for layer in self.conv_layers: if isinstance(layer, LinearizedConvolution): x = layer(x, incremental_state) else: x = layer(x) # B x T x C -> T x B x C x = self._transpose_if_inference(x, incremental_state) # decoder layers for layer in self.layers: if isinstance(layer, TransformerDecoderLayer): x, *_ = layer( x, (encoder_out["encoder_out"] if encoder_out is not None else None), ( encoder_out["encoder_padding_mask"].t() if encoder_out["encoder_padding_mask"] is not None else None ), incremental_state, self_attn_mask=( self.buffered_future_mask(x) if incremental_state is None else None ), self_attn_padding_mask=( target_padding_mask if incremental_state is None else None ), ) else: x = layer(x) # T x B x C -> B x T x C x = x.transpose(0, 1) x = self.fc_out(x) return x, None def buffered_future_mask(self, tensor): dim = tensor.size(0) if ( not hasattr(self, "_future_mask") or self._future_mask is None or self._future_mask.device != tensor.device ): self._future_mask = torch.triu( utils.fill_with_neg_inf(tensor.new(dim, dim)), 1 ) if self._future_mask.size(0) < dim: self._future_mask = torch.triu( utils.fill_with_neg_inf(self._future_mask.resize_(dim, dim)), 1 ) return self._future_mask[:dim, :dim] def _transpose_if_training(self, x, incremental_state): if incremental_state is None: x = x.transpose(0, 1) return x def _transpose_if_inference(self, x, incremental_state): if incremental_state: x = x.transpose(0, 1) return x @register_model("asr_vggtransformer_encoder") class VGGTransformerEncoderModel(FairseqEncoderModel): def __init__(self, encoder): super().__init__(encoder) @staticmethod def add_args(parser): """Add model-specific arguments to the parser.""" parser.add_argument( "--input-feat-per-channel", type=int, metavar="N", help="encoder input dimension per input channel", ) parser.add_argument( "--vggblock-enc-config", type=str, metavar="EXPR", help=""" an array of tuples each containing the configuration of one vggblock [(out_channels, conv_kernel_size, pooling_kernel_size,num_conv_layers), ...] """, ) parser.add_argument( "--transformer-enc-config", type=str, metavar="EXPR", help=""" a tuple containing the configuration of the Transformer layers configurations: [(input_dim, num_heads, ffn_dim, normalize_before, dropout, attention_dropout, relu_dropout), ]""", ) parser.add_argument( "--enc-output-dim", type=int, metavar="N", help="encoder output dimension, projecting the LSTM output", ) parser.add_argument( "--in-channels", type=int, metavar="N", help="number of encoder input channels", ) parser.add_argument( "--transformer-context", type=str, metavar="EXPR", help=""" either None or a tuple of two ints, indicating left/right context a transformer can have access to""", ) parser.add_argument( "--transformer-sampling", type=str, metavar="EXPR", help=""" either None or a tuple of ints, indicating sampling factor in each layer""", ) @classmethod def build_model(cls, args, task): """Build a new model instance.""" base_architecture_enconly(args) encoder = VGGTransformerEncoderOnly( vocab_size=len(task.target_dictionary), input_feat_per_channel=args.input_feat_per_channel, vggblock_config=eval(args.vggblock_enc_config), transformer_config=eval(args.transformer_enc_config), encoder_output_dim=args.enc_output_dim, in_channels=args.in_channels, transformer_context=eval(args.transformer_context), transformer_sampling=eval(args.transformer_sampling), ) return cls(encoder) def get_normalized_probs(self, net_output, log_probs, sample=None): # net_output['encoder_out'] is a (T, B, D) tensor lprobs = super().get_normalized_probs(net_output, log_probs, sample) # lprobs is a (T, B, D) tensor # we need to transoose to get (B, T, D) tensor lprobs = lprobs.transpose(0, 1).contiguous() lprobs.batch_first = True return lprobs class VGGTransformerEncoderOnly(VGGTransformerEncoder): def __init__( self, vocab_size, input_feat_per_channel, vggblock_config=DEFAULT_ENC_VGGBLOCK_CONFIG, transformer_config=DEFAULT_ENC_TRANSFORMER_CONFIG, encoder_output_dim=512, in_channels=1, transformer_context=None, transformer_sampling=None, ): super().__init__( input_feat_per_channel=input_feat_per_channel, vggblock_config=vggblock_config, transformer_config=transformer_config, encoder_output_dim=encoder_output_dim, in_channels=in_channels, transformer_context=transformer_context, transformer_sampling=transformer_sampling, ) self.fc_out = Linear(self.encoder_output_dim, vocab_size) def forward(self, src_tokens, src_lengths, **kwargs): """ src_tokens: padded tensor (B, T, C * feat) src_lengths: tensor of original lengths of input utterances (B,) """ enc_out = super().forward(src_tokens, src_lengths) x = self.fc_out(enc_out["encoder_out"]) # x = F.log_softmax(x, dim=-1) # Note: no need this line, because model.get_normalized_prob will call # log_softmax return { "encoder_out": x, # (T, B, C) "encoder_padding_mask": enc_out["encoder_padding_mask"], # (T, B) } def max_positions(self): """Maximum input length supported by the encoder.""" return (1e6, 1e6) # an arbitrary large number def Embedding(num_embeddings, embedding_dim, padding_idx): m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx) # nn.init.uniform_(m.weight, -0.1, 0.1) # nn.init.constant_(m.weight[padding_idx], 0) return m def Linear(in_features, out_features, bias=True, dropout=0): """Linear layer (input: N x T x C)""" m = nn.Linear(in_features, out_features, bias=bias) # m.weight.data.uniform_(-0.1, 0.1) # if bias: # m.bias.data.uniform_(-0.1, 0.1) return m def LinearizedConv1d(in_channels, out_channels, kernel_size, dropout=0, **kwargs): """Weight-normalized Conv1d layer optimized for decoding""" m = LinearizedConvolution(in_channels, out_channels, kernel_size, **kwargs) std = math.sqrt((4 * (1.0 - dropout)) / (m.kernel_size[0] * in_channels)) nn.init.normal_(m.weight, mean=0, std=std) nn.init.constant_(m.bias, 0) return nn.utils.weight_norm(m, dim=2) def LayerNorm(embedding_dim): m = nn.LayerNorm(embedding_dim) return m # seq2seq models def base_architecture(args): args.input_feat_per_channel = getattr(args, "input_feat_per_channel", 40) args.vggblock_enc_config = getattr( args, "vggblock_enc_config", DEFAULT_ENC_VGGBLOCK_CONFIG ) args.transformer_enc_config = getattr( args, "transformer_enc_config", DEFAULT_ENC_TRANSFORMER_CONFIG ) args.enc_output_dim = getattr(args, "enc_output_dim", 512) args.in_channels = getattr(args, "in_channels", 1) args.tgt_embed_dim = getattr(args, "tgt_embed_dim", 128) args.transformer_dec_config = getattr( args, "transformer_dec_config", DEFAULT_ENC_TRANSFORMER_CONFIG ) args.conv_dec_config = getattr(args, "conv_dec_config", DEFAULT_DEC_CONV_CONFIG) args.transformer_context = getattr(args, "transformer_context", "None") @register_model_architecture("asr_vggtransformer", "vggtransformer_1") def vggtransformer_1(args): args.input_feat_per_channel = getattr(args, "input_feat_per_channel", 80) args.vggblock_enc_config = getattr( args, "vggblock_enc_config", "[(64, 3, 2, 2, True), (128, 3, 2, 2, True)]" ) args.transformer_enc_config = getattr( args, "transformer_enc_config", "((1024, 16, 4096, True, 0.15, 0.15, 0.15),) * 14", ) args.enc_output_dim = getattr(args, "enc_output_dim", 1024) args.tgt_embed_dim = getattr(args, "tgt_embed_dim", 128) args.conv_dec_config = getattr(args, "conv_dec_config", "((256, 3, True),) * 4") args.transformer_dec_config = getattr( args, "transformer_dec_config", "((1024, 16, 4096, True, 0.15, 0.15, 0.15),) * 4", ) @register_model_architecture("asr_vggtransformer", "vggtransformer_2") def vggtransformer_2(args): args.input_feat_per_channel = getattr(args, "input_feat_per_channel", 80) args.vggblock_enc_config = getattr( args, "vggblock_enc_config", "[(64, 3, 2, 2, True), (128, 3, 2, 2, True)]" ) args.transformer_enc_config = getattr( args, "transformer_enc_config", "((1024, 16, 4096, True, 0.15, 0.15, 0.15),) * 16", ) args.enc_output_dim = getattr(args, "enc_output_dim", 1024) args.tgt_embed_dim = getattr(args, "tgt_embed_dim", 512) args.conv_dec_config = getattr(args, "conv_dec_config", "((256, 3, True),) * 4") args.transformer_dec_config = getattr( args, "transformer_dec_config", "((1024, 16, 4096, True, 0.15, 0.15, 0.15),) * 6", ) @register_model_architecture("asr_vggtransformer", "vggtransformer_base") def vggtransformer_base(args): args.input_feat_per_channel = getattr(args, "input_feat_per_channel", 80) args.vggblock_enc_config = getattr( args, "vggblock_enc_config", "[(64, 3, 2, 2, True), (128, 3, 2, 2, True)]" ) args.transformer_enc_config = getattr( args, "transformer_enc_config", "((512, 8, 2048, True, 0.15, 0.15, 0.15),) * 12" ) args.enc_output_dim = getattr(args, "enc_output_dim", 512) args.tgt_embed_dim = getattr(args, "tgt_embed_dim", 512) args.conv_dec_config = getattr(args, "conv_dec_config", "((256, 3, True),) * 4") args.transformer_dec_config = getattr( args, "transformer_dec_config", "((512, 8, 2048, True, 0.15, 0.15, 0.15),) * 6" ) # Size estimations: # Encoder: # - vggblock param: 64*1*3*3 + 64*64*3*3 + 128*64*3*3 + 128*128*3 = 258K # Transformer: # - input dimension adapter: 2560 x 512 -> 1.31M # - transformer_layers (x12) --> 37.74M # * MultiheadAttention: 512*512*3 (in_proj) + 512*512 (out_proj) = 1.048M # * FFN weight: 512*2048*2 = 2.097M # - output dimension adapter: 512 x 512 -> 0.26 M # Decoder: # - LinearizedConv1d: 512 * 256 * 3 + 256 * 256 * 3 * 3 # - transformer_layer: (x6) --> 25.16M # * MultiheadAttention (self-attention): 512*512*3 + 512*512 = 1.048M # * MultiheadAttention (encoder-attention): 512*512*3 + 512*512 = 1.048M # * FFN: 512*2048*2 = 2.097M # Final FC: # - FC: 512*5000 = 256K (assuming vocab size 5K) # In total: # ~65 M # CTC models def base_architecture_enconly(args): args.input_feat_per_channel = getattr(args, "input_feat_per_channel", 40) args.vggblock_enc_config = getattr( args, "vggblock_enc_config", "[(32, 3, 2, 2, True)] * 2" ) args.transformer_enc_config = getattr( args, "transformer_enc_config", "((256, 4, 1024, True, 0.2, 0.2, 0.2),) * 2" ) args.enc_output_dim = getattr(args, "enc_output_dim", 512) args.in_channels = getattr(args, "in_channels", 1) args.transformer_context = getattr(args, "transformer_context", "None") args.transformer_sampling = getattr(args, "transformer_sampling", "None") @register_model_architecture("asr_vggtransformer_encoder", "vggtransformer_enc_1") def vggtransformer_enc_1(args): # vggtransformer_1 is the same as vggtransformer_enc_big, except the number # of layers is increased to 16 # keep it here for backward compatiablity purpose args.input_feat_per_channel = getattr(args, "input_feat_per_channel", 80) args.vggblock_enc_config = getattr( args, "vggblock_enc_config", "[(64, 3, 2, 2, True), (128, 3, 2, 2, True)]" ) args.transformer_enc_config = getattr( args, "transformer_enc_config", "((1024, 16, 4096, True, 0.15, 0.15, 0.15),) * 16", ) args.enc_output_dim = getattr(args, "enc_output_dim", 1024)
37,260
35.494613
88
py
sign-topic
sign-topic-main/examples/speech_recognition/models/w2l_conv_glu_enc.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math import torch import torch.nn as nn import torch.nn.functional as F from fairseq.models import ( FairseqEncoder, FairseqEncoderModel, register_model, register_model_architecture, ) from fairseq.modules.fairseq_dropout import FairseqDropout default_conv_enc_config = """[ (400, 13, 170, 0.2), (440, 14, 0, 0.214), (484, 15, 0, 0.22898), (532, 16, 0, 0.2450086), (584, 17, 0, 0.262159202), (642, 18, 0, 0.28051034614), (706, 19, 0, 0.30014607037), (776, 20, 0, 0.321156295296), (852, 21, 0, 0.343637235966), (936, 22, 0, 0.367691842484), (1028, 23, 0, 0.393430271458), (1130, 24, 0, 0.42097039046), (1242, 25, 0, 0.450438317792), (1366, 26, 0, 0.481969000038), (1502, 27, 0, 0.51570683004), (1652, 28, 0, 0.551806308143), (1816, 29, 0, 0.590432749713), ]""" @register_model("asr_w2l_conv_glu_encoder") class W2lConvGluEncoderModel(FairseqEncoderModel): def __init__(self, encoder): super().__init__(encoder) @staticmethod def add_args(parser): """Add model-specific arguments to the parser.""" parser.add_argument( "--input-feat-per-channel", type=int, metavar="N", help="encoder input dimension per input channel", ) parser.add_argument( "--in-channels", type=int, metavar="N", help="number of encoder input channels", ) parser.add_argument( "--conv-enc-config", type=str, metavar="EXPR", help=""" an array of tuples each containing the configuration of one conv layer [(out_channels, kernel_size, padding, dropout), ...] """, ) @classmethod def build_model(cls, args, task): """Build a new model instance.""" conv_enc_config = getattr(args, "conv_enc_config", default_conv_enc_config) encoder = W2lConvGluEncoder( vocab_size=len(task.target_dictionary), input_feat_per_channel=args.input_feat_per_channel, in_channels=args.in_channels, conv_enc_config=eval(conv_enc_config), ) return cls(encoder) def get_normalized_probs(self, net_output, log_probs, sample=None): lprobs = super().get_normalized_probs(net_output, log_probs, sample) lprobs.batch_first = False return lprobs class W2lConvGluEncoder(FairseqEncoder): def __init__( self, vocab_size, input_feat_per_channel, in_channels, conv_enc_config ): super().__init__(None) self.input_dim = input_feat_per_channel if in_channels != 1: raise ValueError("only 1 input channel is currently supported") self.conv_layers = nn.ModuleList() self.linear_layers = nn.ModuleList() self.dropouts = [] cur_channels = input_feat_per_channel for out_channels, kernel_size, padding, dropout in conv_enc_config: layer = nn.Conv1d(cur_channels, out_channels, kernel_size, padding=padding) layer.weight.data.mul_(math.sqrt(3)) # match wav2letter init self.conv_layers.append(nn.utils.weight_norm(layer)) self.dropouts.append( FairseqDropout(dropout, module_name=self.__class__.__name__) ) if out_channels % 2 != 0: raise ValueError("odd # of out_channels is incompatible with GLU") cur_channels = out_channels // 2 # halved by GLU for out_channels in [2 * cur_channels, vocab_size]: layer = nn.Linear(cur_channels, out_channels) layer.weight.data.mul_(math.sqrt(3)) self.linear_layers.append(nn.utils.weight_norm(layer)) cur_channels = out_channels // 2 def forward(self, src_tokens, src_lengths, **kwargs): """ src_tokens: padded tensor (B, T, C * feat) src_lengths: tensor of original lengths of input utterances (B,) """ B, T, _ = src_tokens.size() x = src_tokens.transpose(1, 2).contiguous() # (B, feat, T) assuming C == 1 for layer_idx in range(len(self.conv_layers)): x = self.conv_layers[layer_idx](x) x = F.glu(x, dim=1) x = self.dropouts[layer_idx](x) x = x.transpose(1, 2).contiguous() # (B, T, 908) x = self.linear_layers[0](x) x = F.glu(x, dim=2) x = self.dropouts[-1](x) x = self.linear_layers[1](x) assert x.size(0) == B assert x.size(1) == T encoder_out = x.transpose(0, 1) # (T, B, vocab_size) # need to debug this -- find a simpler/elegant way in pytorch APIs encoder_padding_mask = ( torch.arange(T).view(1, T).expand(B, -1).to(x.device) >= src_lengths.view(B, 1).expand(-1, T) ).t() # (B x T) -> (T x B) return { "encoder_out": encoder_out, # (T, B, vocab_size) "encoder_padding_mask": encoder_padding_mask, # (T, B) } def reorder_encoder_out(self, encoder_out, new_order): encoder_out["encoder_out"] = encoder_out["encoder_out"].index_select( 1, new_order ) encoder_out["encoder_padding_mask"] = encoder_out[ "encoder_padding_mask" ].index_select(1, new_order) return encoder_out def max_positions(self): """Maximum input length supported by the encoder.""" return (1e6, 1e6) # an arbitrary large number @register_model_architecture("asr_w2l_conv_glu_encoder", "w2l_conv_glu_enc") def w2l_conv_glu_enc(args): args.input_feat_per_channel = getattr(args, "input_feat_per_channel", 80) args.in_channels = getattr(args, "in_channels", 1) args.conv_enc_config = getattr(args, "conv_enc_config", default_conv_enc_config)
6,078
33.151685
87
py
sign-topic
sign-topic-main/examples/speech_recognition/datasets/asr_prep_json.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from __future__ import absolute_import, division, print_function, unicode_literals import argparse import concurrent.futures import json import multiprocessing import os from collections import namedtuple from itertools import chain import sentencepiece as spm from fairseq.data import Dictionary MILLISECONDS_TO_SECONDS = 0.001 def process_sample(aud_path, lable, utt_id, sp, tgt_dict): import torchaudio input = {} output = {} si, ei = torchaudio.info(aud_path) input["length_ms"] = int( si.length / si.channels / si.rate / MILLISECONDS_TO_SECONDS ) input["path"] = aud_path token = " ".join(sp.EncodeAsPieces(lable)) ids = tgt_dict.encode_line(token, append_eos=False) output["text"] = lable output["token"] = token output["tokenid"] = ", ".join(map(str, [t.tolist() for t in ids])) return {utt_id: {"input": input, "output": output}} def main(): parser = argparse.ArgumentParser() parser.add_argument( "--audio-dirs", nargs="+", default=["-"], required=True, help="input directories with audio files", ) parser.add_argument( "--labels", required=True, help="aggregated input labels with format <ID LABEL> per line", type=argparse.FileType("r", encoding="UTF-8"), ) parser.add_argument( "--spm-model", required=True, help="sentencepiece model to use for encoding", type=argparse.FileType("r", encoding="UTF-8"), ) parser.add_argument( "--dictionary", required=True, help="file to load fairseq dictionary from", type=argparse.FileType("r", encoding="UTF-8"), ) parser.add_argument("--audio-format", choices=["flac", "wav"], default="wav") parser.add_argument( "--output", required=True, type=argparse.FileType("w"), help="path to save json output", ) args = parser.parse_args() sp = spm.SentencePieceProcessor() sp.Load(args.spm_model.name) tgt_dict = Dictionary.load(args.dictionary) labels = {} for line in args.labels: (utt_id, label) = line.split(" ", 1) labels[utt_id] = label if len(labels) == 0: raise Exception("No labels found in ", args.labels_path) Sample = namedtuple("Sample", "aud_path utt_id") samples = [] for path, _, files in chain.from_iterable( os.walk(path) for path in args.audio_dirs ): for f in files: if f.endswith(args.audio_format): if len(os.path.splitext(f)) != 2: raise Exception("Expect <utt_id.extension> file name. Got: ", f) utt_id = os.path.splitext(f)[0] if utt_id not in labels: continue samples.append(Sample(os.path.join(path, f), utt_id)) utts = {} num_cpu = multiprocessing.cpu_count() with concurrent.futures.ThreadPoolExecutor(max_workers=num_cpu) as executor: future_to_sample = { executor.submit( process_sample, s.aud_path, labels[s.utt_id], s.utt_id, sp, tgt_dict ): s for s in samples } for future in concurrent.futures.as_completed(future_to_sample): try: data = future.result() except Exception as exc: print("generated an exception: ", exc) else: utts.update(data) json.dump({"utts": utts}, args.output, indent=4) if __name__ == "__main__": main()
3,775
28.968254
84
py
sign-topic
sign-topic-main/examples/speech_recognition/new/infer.py
#!/usr/bin/env python -u # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import ast import hashlib import logging import os import shutil import sys from dataclasses import dataclass, field, is_dataclass from pathlib import Path from typing import Any, Dict, List, Optional, Tuple, Union import editdistance import torch import torch.distributed as dist from examples.speech_recognition.new.decoders.decoder_config import ( DecoderConfig, FlashlightDecoderConfig, ) from examples.speech_recognition.new.decoders.decoder import Decoder from fairseq import checkpoint_utils, distributed_utils, progress_bar, tasks, utils from fairseq.data.data_utils import post_process from fairseq.dataclass.configs import ( CheckpointConfig, CommonConfig, CommonEvalConfig, DatasetConfig, DistributedTrainingConfig, FairseqDataclass, ) from fairseq.logging.meters import StopwatchMeter, TimeMeter from fairseq.logging.progress_bar import BaseProgressBar from fairseq.models.fairseq_model import FairseqModel from omegaconf import OmegaConf import hydra from hydra.core.config_store import ConfigStore logging.root.setLevel(logging.INFO) logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) config_path = Path(__file__).resolve().parent / "conf" @dataclass class DecodingConfig(DecoderConfig, FlashlightDecoderConfig): unique_wer_file: bool = field( default=False, metadata={"help": "If set, use a unique file for storing WER"}, ) results_path: Optional[str] = field( default=None, metadata={ "help": "If set, write hypothesis and reference sentences into this directory" }, ) @dataclass class InferConfig(FairseqDataclass): task: Any = None decoding: DecodingConfig = DecodingConfig() common: CommonConfig = CommonConfig() common_eval: CommonEvalConfig = CommonEvalConfig() checkpoint: CheckpointConfig = CheckpointConfig() distributed_training: DistributedTrainingConfig = DistributedTrainingConfig() dataset: DatasetConfig = DatasetConfig() is_ax: bool = field( default=False, metadata={ "help": "if true, assumes we are using ax for tuning and returns a tuple for ax to consume" }, ) def reset_logging(): root = logging.getLogger() for handler in root.handlers: root.removeHandler(handler) root.setLevel(os.environ.get("LOGLEVEL", "INFO").upper()) handler = logging.StreamHandler(sys.stdout) handler.setFormatter( logging.Formatter( fmt="%(asctime)s | %(levelname)s | %(name)s | %(message)s", datefmt="%Y-%m-%d %H:%M:%S", ) ) root.addHandler(handler) class InferenceProcessor: cfg: InferConfig def __init__(self, cfg: InferConfig) -> None: self.cfg = cfg self.task = tasks.setup_task(cfg.task) models, saved_cfg = self.load_model_ensemble() self.models = models self.saved_cfg = saved_cfg self.tgt_dict = self.task.target_dictionary self.task.load_dataset( self.cfg.dataset.gen_subset, task_cfg=saved_cfg.task, ) self.generator = Decoder(cfg.decoding, self.tgt_dict) self.gen_timer = StopwatchMeter() self.wps_meter = TimeMeter() self.num_sentences = 0 self.total_errors = 0 self.total_length = 0 self.hypo_words_file = None self.hypo_units_file = None self.ref_words_file = None self.ref_units_file = None self.progress_bar = self.build_progress_bar() def __enter__(self) -> "InferenceProcessor": if self.cfg.decoding.results_path is not None: self.hypo_words_file = self.get_res_file("hypo.word") self.hypo_units_file = self.get_res_file("hypo.units") self.ref_words_file = self.get_res_file("ref.word") self.ref_units_file = self.get_res_file("ref.units") return self def __exit__(self, *exc) -> bool: if self.cfg.decoding.results_path is not None: self.hypo_words_file.close() self.hypo_units_file.close() self.ref_words_file.close() self.ref_units_file.close() return False def __iter__(self) -> Any: for sample in self.progress_bar: if not self.cfg.common.cpu: sample = utils.move_to_cuda(sample) # Happens on the last batch. if "net_input" not in sample: continue yield sample def log(self, *args, **kwargs): self.progress_bar.log(*args, **kwargs) def print(self, *args, **kwargs): self.progress_bar.print(*args, **kwargs) def get_res_file(self, fname: str) -> None: fname = os.path.join(self.cfg.decoding.results_path, fname) if self.data_parallel_world_size > 1: fname = f"{fname}.{self.data_parallel_rank}" return open(fname, "w", buffering=1) def merge_shards(self) -> None: """Merges all shard files into shard 0, then removes shard suffix.""" shard_id = self.data_parallel_rank num_shards = self.data_parallel_world_size if self.data_parallel_world_size > 1: def merge_shards_with_root(fname: str) -> None: fname = os.path.join(self.cfg.decoding.results_path, fname) logger.info("Merging %s on shard %d", fname, shard_id) base_fpath = Path(f"{fname}.0") with open(base_fpath, "a") as out_file: for s in range(1, num_shards): shard_fpath = Path(f"{fname}.{s}") with open(shard_fpath, "r") as in_file: for line in in_file: out_file.write(line) shard_fpath.unlink() shutil.move(f"{fname}.0", fname) dist.barrier() # ensure all shards finished writing if shard_id == (0 % num_shards): merge_shards_with_root("hypo.word") if shard_id == (1 % num_shards): merge_shards_with_root("hypo.units") if shard_id == (2 % num_shards): merge_shards_with_root("ref.word") if shard_id == (3 % num_shards): merge_shards_with_root("ref.units") dist.barrier() def optimize_model(self, model: FairseqModel) -> None: model.make_generation_fast_() if self.cfg.common.fp16: model.half() if not self.cfg.common.cpu: model.cuda() def load_model_ensemble(self) -> Tuple[List[FairseqModel], FairseqDataclass]: arg_overrides = ast.literal_eval(self.cfg.common_eval.model_overrides) models, saved_cfg = checkpoint_utils.load_model_ensemble( utils.split_paths(self.cfg.common_eval.path, separator="\\"), arg_overrides=arg_overrides, task=self.task, suffix=self.cfg.checkpoint.checkpoint_suffix, strict=(self.cfg.checkpoint.checkpoint_shard_count == 1), num_shards=self.cfg.checkpoint.checkpoint_shard_count, ) for model in models: self.optimize_model(model) return models, saved_cfg def get_dataset_itr(self, disable_iterator_cache: bool = False) -> None: return self.task.get_batch_iterator( dataset=self.task.dataset(self.cfg.dataset.gen_subset), max_tokens=self.cfg.dataset.max_tokens, max_sentences=self.cfg.dataset.batch_size, max_positions=(sys.maxsize, sys.maxsize), ignore_invalid_inputs=self.cfg.dataset.skip_invalid_size_inputs_valid_test, required_batch_size_multiple=self.cfg.dataset.required_batch_size_multiple, seed=self.cfg.common.seed, num_shards=self.data_parallel_world_size, shard_id=self.data_parallel_rank, num_workers=self.cfg.dataset.num_workers, data_buffer_size=self.cfg.dataset.data_buffer_size, disable_iterator_cache=disable_iterator_cache, ).next_epoch_itr(shuffle=False) def build_progress_bar( self, epoch: Optional[int] = None, prefix: Optional[str] = None, default_log_format: str = "tqdm", ) -> BaseProgressBar: return progress_bar.progress_bar( iterator=self.get_dataset_itr(), log_format=self.cfg.common.log_format, log_interval=self.cfg.common.log_interval, epoch=epoch, prefix=prefix, tensorboard_logdir=self.cfg.common.tensorboard_logdir, default_log_format=default_log_format, ) @property def data_parallel_world_size(self): if self.cfg.distributed_training.distributed_world_size == 1: return 1 return distributed_utils.get_data_parallel_world_size() @property def data_parallel_rank(self): if self.cfg.distributed_training.distributed_world_size == 1: return 0 return distributed_utils.get_data_parallel_rank() def process_sentence( self, sample: Dict[str, Any], hypo: Dict[str, Any], sid: int, batch_id: int, ) -> Tuple[int, int]: speaker = None # Speaker can't be parsed from dataset. if "target_label" in sample: toks = sample["target_label"] else: toks = sample["target"] toks = toks[batch_id, :] # Processes hypothesis. hyp_pieces = self.tgt_dict.string(hypo["tokens"].int().cpu()) if "words" in hypo: hyp_words = " ".join(hypo["words"]) else: hyp_words = post_process(hyp_pieces, self.cfg.common_eval.post_process) # Processes target. target_tokens = utils.strip_pad(toks, self.tgt_dict.pad()) tgt_pieces = self.tgt_dict.string(target_tokens.int().cpu()) tgt_words = post_process(tgt_pieces, self.cfg.common_eval.post_process) if self.cfg.decoding.results_path is not None: print(f"{hyp_pieces} ({speaker}-{sid})", file=self.hypo_units_file) print(f"{hyp_words} ({speaker}-{sid})", file=self.hypo_words_file) print(f"{tgt_pieces} ({speaker}-{sid})", file=self.ref_units_file) print(f"{tgt_words} ({speaker}-{sid})", file=self.ref_words_file) if not self.cfg.common_eval.quiet: logger.info(f"HYPO: {hyp_words}") logger.info(f"REF: {tgt_words}") logger.info("---------------------") hyp_words, tgt_words = hyp_words.split(), tgt_words.split() return editdistance.eval(hyp_words, tgt_words), len(tgt_words) def process_sample(self, sample: Dict[str, Any]) -> None: self.gen_timer.start() hypos = self.task.inference_step( generator=self.generator, models=self.models, sample=sample, ) num_generated_tokens = sum(len(h[0]["tokens"]) for h in hypos) self.gen_timer.stop(num_generated_tokens) self.wps_meter.update(num_generated_tokens) for batch_id, sample_id in enumerate(sample["id"].tolist()): errs, length = self.process_sentence( sample=sample, sid=sample_id, batch_id=batch_id, hypo=hypos[batch_id][0], ) self.total_errors += errs self.total_length += length self.log({"wps": round(self.wps_meter.avg)}) if "nsentences" in sample: self.num_sentences += sample["nsentences"] else: self.num_sentences += sample["id"].numel() def log_generation_time(self) -> None: logger.info( "Processed %d sentences (%d tokens) in %.1fs %.2f " "sentences per second, %.2f tokens per second)", self.num_sentences, self.gen_timer.n, self.gen_timer.sum, self.num_sentences / self.gen_timer.sum, 1.0 / self.gen_timer.avg, ) def parse_wer(wer_file: Path) -> float: with open(wer_file, "r") as f: return float(f.readline().strip().split(" ")[1]) def get_wer_file(cfg: InferConfig) -> Path: """Hashes the decoding parameters to a unique file ID.""" base_path = "wer" if cfg.decoding.results_path is not None: base_path = os.path.join(cfg.decoding.results_path, base_path) if cfg.decoding.unique_wer_file: yaml_str = OmegaConf.to_yaml(cfg.decoding) fid = int(hashlib.md5(yaml_str.encode("utf-8")).hexdigest(), 16) return Path(f"{base_path}.{fid % 1000000}") else: return Path(base_path) def main(cfg: InferConfig) -> float: """Entry point for main processing logic. Args: cfg: The inferance configuration to use. wer: Optional shared memory pointer for returning the WER. If not None, the final WER value will be written here instead of being returned. Returns: The final WER if `wer` is None, otherwise None. """ yaml_str, wer_file = OmegaConf.to_yaml(cfg.decoding), get_wer_file(cfg) # Validates the provided configuration. if cfg.dataset.max_tokens is None and cfg.dataset.batch_size is None: cfg.dataset.max_tokens = 4000000 if not cfg.common.cpu and not torch.cuda.is_available(): raise ValueError("CUDA not found; set `cpu=True` to run without CUDA") with InferenceProcessor(cfg) as processor: for sample in processor: processor.process_sample(sample) processor.log_generation_time() if cfg.decoding.results_path is not None: processor.merge_shards() errs_t, leng_t = processor.total_errors, processor.total_length if cfg.common.cpu: logger.warning("Merging WER requires CUDA.") elif processor.data_parallel_world_size > 1: stats = torch.LongTensor([errs_t, leng_t]).cuda() dist.all_reduce(stats, op=dist.ReduceOp.SUM) errs_t, leng_t = stats[0].item(), stats[1].item() wer = errs_t * 100.0 / leng_t if distributed_utils.is_master(cfg.distributed_training): with open(wer_file, "w") as f: f.write( ( f"WER: {wer}\n" f"err / num_ref_words = {errs_t} / {leng_t}\n\n" f"{yaml_str}" ) ) return wer @hydra.main(config_path=config_path, config_name="infer") def hydra_main(cfg: InferConfig) -> Union[float, Tuple[float, Optional[float]]]: container = OmegaConf.to_container(cfg, resolve=True, enum_to_str=True) cfg = OmegaConf.create(container) OmegaConf.set_struct(cfg, True) if cfg.common.reset_logging: reset_logging() # logger.info("Config:\n%s", OmegaConf.to_yaml(cfg)) wer = float("inf") try: if cfg.common.profile: with torch.cuda.profiler.profile(): with torch.autograd.profiler.emit_nvtx(): distributed_utils.call_main(cfg, main) else: distributed_utils.call_main(cfg, main) wer = parse_wer(get_wer_file(cfg)) except BaseException as e: # pylint: disable=broad-except if not cfg.common.suppress_crashes: raise else: logger.error("Crashed! %s", str(e)) logger.info("Word error rate: %.4f", wer) if cfg.is_ax: return wer, None return wer def cli_main() -> None: try: from hydra._internal.utils import ( get_args, ) # pylint: disable=import-outside-toplevel cfg_name = get_args().config_name or "infer" except ImportError: logger.warning("Failed to get config name from hydra args") cfg_name = "infer" cs = ConfigStore.instance() cs.store(name=cfg_name, node=InferConfig) for k in InferConfig.__dataclass_fields__: if is_dataclass(InferConfig.__dataclass_fields__[k].type): v = InferConfig.__dataclass_fields__[k].default cs.store(name=k, node=v) hydra_main() # pylint: disable=no-value-for-parameter if __name__ == "__main__": cli_main()
16,498
33.955508
103
py
sign-topic
sign-topic-main/examples/speech_recognition/new/decoders/base_decoder.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import itertools as it from typing import Any, Dict, List import torch from fairseq.data.dictionary import Dictionary from fairseq.models.fairseq_model import FairseqModel class BaseDecoder: def __init__(self, tgt_dict: Dictionary) -> None: self.tgt_dict = tgt_dict self.vocab_size = len(tgt_dict) self.blank = ( tgt_dict.index("<ctc_blank>") if "<ctc_blank>" in tgt_dict.indices else tgt_dict.bos() ) if "<sep>" in tgt_dict.indices: self.silence = tgt_dict.index("<sep>") elif "|" in tgt_dict.indices: self.silence = tgt_dict.index("|") else: self.silence = tgt_dict.eos() def generate( self, models: List[FairseqModel], sample: Dict[str, Any], **unused ) -> List[List[Dict[str, torch.LongTensor]]]: encoder_input = { k: v for k, v in sample["net_input"].items() if k != "prev_output_tokens" } emissions = self.get_emissions(models, encoder_input) return self.decode(emissions) def get_emissions( self, models: List[FairseqModel], encoder_input: Dict[str, Any], ) -> torch.FloatTensor: model = models[0] encoder_out = model(**encoder_input) if hasattr(model, "get_logits"): emissions = model.get_logits(encoder_out) else: emissions = model.get_normalized_probs(encoder_out, log_probs=True) return emissions.transpose(0, 1).float().cpu().contiguous() def get_tokens(self, idxs: torch.IntTensor) -> torch.LongTensor: idxs = (g[0] for g in it.groupby(idxs)) idxs = filter(lambda x: x != self.blank, idxs) return torch.LongTensor(list(idxs)) def decode( self, emissions: torch.FloatTensor, ) -> List[List[Dict[str, torch.LongTensor]]]: raise NotImplementedError
2,093
32.238095
85
py