id
int64
0
190k
prompt
stringlengths
21
13.4M
docstring
stringlengths
1
12k
185,239
from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import logging import glob import json import argparse import math import string from multiprocessing import Pool, cpu_count from tqdm import tqdm, trange from pathlib import Path import numpy as np import rouge import time import tempfile import shutil from pytorch_pretrained_bert.tokenization import BertTokenizer from cnndm.bs_pyrouge import Rouge155 args = parser.parse_args() evaluator = rouge.Rouge(metrics=['rouge-n', 'rouge-l'], max_n=2, limit_length=False, apply_avg=True, weight_factor=1.2) def test_rouge(cand, ref): temp_dir = tempfile.mkdtemp() candidates = cand references = ref assert len(candidates) == len(references) cnt = len(candidates) current_time = time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime()) tmp_dir = os.path.join(temp_dir, "rouge-tmp-{}".format(current_time)) if not os.path.isdir(tmp_dir): os.mkdir(tmp_dir) os.mkdir(tmp_dir + "/candidate") os.mkdir(tmp_dir + "/reference") try: for i in range(cnt): if len(references[i]) < 1: continue with open(tmp_dir + "/candidate/cand.{}.txt".format(i), "w", encoding="utf-8") as f: f.write(candidates[i]) with open(tmp_dir + "/reference/ref.{}.txt".format(i), "w", encoding="utf-8") as f: f.write(references[i]) r = Rouge155(temp_dir=temp_dir) r.model_dir = tmp_dir + "/reference/" r.system_dir = tmp_dir + "/candidate/" r.model_filename_pattern = 'ref.#ID#.txt' r.system_filename_pattern = r'cand.(\d+).txt' rouge_results = r.convert_and_evaluate() print(rouge_results) results_dict = r.output_to_dict(rouge_results) finally: if os.path.isdir(tmp_dir): shutil.rmtree(tmp_dir) return results_dict def get_f1(text_a, text_b): tokens_a = text_a.lower().split() tokens_b = text_b.lower().split() if len(tokens_a) == 0 or len(tokens_b) == 0: return 1 if len(tokens_a) == len(tokens_b) else 0 set_a = count_tokens(tokens_a) set_b = count_tokens(tokens_b) match = 0 for token in set_a.keys(): if token in set_b.keys(): match += min(set_a[token], set_b[token]) p = match / len(tokens_a) r = match / len(tokens_b) return 2.0 * p * r / (p + r + 1e-5) def fix_tokenization(text): input_tokens = text.split() output_tokens = [] has_left_quote = False has_left_single_quote = False i = 0 prev_dash = False while i < len(input_tokens): tok = input_tokens[i] flag_prev_dash = False if tok in _tok_dict.keys(): output_tokens.append(_tok_dict[tok]) i += 1 elif tok == "\"": if has_left_quote: output_tokens.append("''") else: output_tokens.append("``") has_left_quote = not has_left_quote i += 1 elif tok == "'" and len(output_tokens) > 0 and output_tokens[-1].endswith("n") and i < len(input_tokens) - 1 and input_tokens[i + 1] == "t": output_tokens[-1] = output_tokens[-1][:-1] output_tokens.append("n't") i += 2 elif tok == "'" and i < len(input_tokens) - 1 and input_tokens[i + 1] in ("s", "d", "ll"): output_tokens.append("'"+input_tokens[i + 1]) i += 2 elif tok == "'": if has_left_single_quote: output_tokens.append("'") else: output_tokens.append("`") has_left_single_quote = not has_left_single_quote i += 1 elif tok == "." and i < len(input_tokens) - 2 and input_tokens[i + 1] == "." and input_tokens[i + 2] == ".": output_tokens.append("...") i += 3 elif tok == "," and len(output_tokens) > 0 and _is_digit(output_tokens[-1]) and i < len(input_tokens) - 1 and _is_digit(input_tokens[i + 1]): # $ 3 , 000 -> $ 3,000 output_tokens[-1] += ','+input_tokens[i + 1] i += 2 elif tok == "." and len(output_tokens) > 0 and output_tokens[-1].isdigit() and i < len(input_tokens) - 1 and input_tokens[i + 1].isdigit(): # 3 . 03 -> $ 3.03 output_tokens[-1] += '.'+input_tokens[i + 1] i += 2 elif tok == "." and len(output_tokens) > 0 and len(output_tokens[-1]) == 1 and output_tokens[-1].isupper() and i < len(input_tokens) - 2 and len(input_tokens[i + 1]) == 1 and input_tokens[i + 1].isupper() and input_tokens[i + 2] == '.': # U . N . -> U.N. k = i+3 while k+2 < len(input_tokens): if len(input_tokens[k + 1]) == 1 and input_tokens[k + 1].isupper() and input_tokens[k + 2] == '.': k += 2 else: break output_tokens[-1] += ''.join(input_tokens[i:k]) i += 2 elif tok == "-": if i < len(input_tokens) - 1 and input_tokens[i + 1] == "-": output_tokens.append("--") i += 2 elif i == len(input_tokens) - 1 or i == 0: output_tokens.append("-") i += 1 elif output_tokens[-1] not in string.punctuation and input_tokens[i + 1][0] not in string.punctuation: output_tokens[-1] += "-" i += 1 flag_prev_dash = True else: output_tokens.append("-") i += 1 elif prev_dash and len(output_tokens) > 0 and tok[0] not in string.punctuation: output_tokens[-1] += tok i += 1 else: output_tokens.append(tok) i += 1 prev_dash = flag_prev_dash return " ".join(output_tokens) def remove_duplicate(l_list, duplicate_rate): tk_list = [l.lower().split() for l in l_list] r_list = [] history_set = set() for i, w_list in enumerate(tk_list): w_set = set(w_list) if len(w_set & history_set)/len(w_set) <= duplicate_rate: r_list.append(l_list[i]) history_set |= w_set return r_list def process_eval(eval_fn): gold_list = [] with open(args.gold, "r", encoding="utf-8") as f_in: for l in f_in: line = l.strip().replace(" <S_SEP> ", '\n') gold_list.append(line) pred_list = [] with open(eval_fn, "r", encoding="utf-8") as f_in: for l in f_in: buf = [] for sentence in l.strip().split("[X_SEP]"): sentence = fix_tokenization(sentence) if any(get_f1(sentence, s) > 1.0 for s in buf): continue s_len = len(sentence.split()) if s_len <= 4: continue buf.append(sentence) if args.duplicate_rate and args.duplicate_rate < 1: buf = remove_duplicate(buf, args.duplicate_rate) if args.trunc_len: num_left = args.trunc_len trunc_list = [] for bit in buf: tk_list = bit.split() n = min(len(tk_list), num_left) trunc_list.append(' '.join(tk_list[:n])) num_left -= n if num_left <= 0: break else: trunc_list = buf line = "\n".join(trunc_list) pred_list.append(line) with open(eval_fn+'.post', 'w', encoding='utf-8') as f_out: for l in pred_list: f_out.write(l.replace('\n', ' [X_SEP] ').strip()) f_out.write('\n') # rouge scores if len(pred_list) < len(gold_list): # evaluate subset gold_list = gold_list[:len(pred_list)] assert len(pred_list) == len(gold_list) if args.perl: scores = test_rouge(pred_list, gold_list) else: scores = evaluator.get_scores(pred_list, [[it] for it in gold_list]) return eval_fn, scores
null
185,241
from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import logging import glob import json import argparse import math import string from multiprocessing import Pool, cpu_count from tqdm import tqdm, trange from pathlib import Path import numpy as np import rouge import time import tempfile import shutil from pytorch_pretrained_bert.tokenization import BertTokenizer from gigaword.bs_pyrouge import Rouge155 def rouge_results_to_str(results_dict): return ">> ROUGE-F(1/2/l): {:.2f}/{:.2f}/{:.2f}\nROUGE-R(1/2/3/l): {:.2f}/{:.2f}/{:.2f}\n".format( results_dict["rouge_1_f_score"] * 100, results_dict["rouge_2_f_score"] * 100, results_dict["rouge_l_f_score"] * 100, results_dict["rouge_1_recall"] * 100, results_dict["rouge_2_recall"] * 100, results_dict["rouge_l_recall"] * 100 )
null
185,242
from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import logging import glob import json import argparse import math import string from multiprocessing import Pool, cpu_count from tqdm import tqdm, trange from pathlib import Path import numpy as np import rouge import time import tempfile import shutil from pytorch_pretrained_bert.tokenization import BertTokenizer from gigaword.bs_pyrouge import Rouge155 def count_tokens(tokens): counter = {} for t in tokens: if t in counter.keys(): counter[t] += 1 else: counter[t] = 1 return counter def get_f1(text_a, text_b): tokens_a = text_a.lower().split() tokens_b = text_b.lower().split() if len(tokens_a) == 0 or len(tokens_b) == 0: return 1 if len(tokens_a) == len(tokens_b) else 0 set_a = count_tokens(tokens_a) set_b = count_tokens(tokens_b) match = 0 for token in set_a.keys(): if token in set_b.keys(): match += min(set_a[token], set_b[token]) p = match / len(tokens_a) r = match / len(tokens_b) return 2.0 * p * r / (p + r + 1e-5)
null
185,243
from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import logging import glob import json import argparse import math import string from multiprocessing import Pool, cpu_count from tqdm import tqdm, trange from pathlib import Path import numpy as np import rouge import time import tempfile import shutil from pytorch_pretrained_bert.tokenization import BertTokenizer from gigaword.bs_pyrouge import Rouge155 args = parser.parse_args() evaluator = rouge.Rouge(metrics=['rouge-n', 'rouge-l'], max_n=2, limit_length=False, apply_avg=True, weight_factor=1.2) def test_rouge(cand, ref): temp_dir = tempfile.mkdtemp() candidates = cand references = ref assert len(candidates) == len(references) cnt = len(candidates) current_time = time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime()) tmp_dir = os.path.join(temp_dir, "rouge-tmp-{}".format(current_time)) if not os.path.isdir(tmp_dir): os.mkdir(tmp_dir) os.mkdir(tmp_dir + "/candidate") os.mkdir(tmp_dir + "/reference") try: for i in range(cnt): if len(references[i]) < 1: continue with open(tmp_dir + "/candidate/cand.{}.txt".format(i), "w", encoding="utf-8") as f: f.write(candidates[i]) with open(tmp_dir + "/reference/ref.{}.txt".format(i), "w", encoding="utf-8") as f: f.write(references[i]) r = Rouge155(temp_dir=temp_dir) r.model_dir = tmp_dir + "/reference/" r.system_dir = tmp_dir + "/candidate/" r.model_filename_pattern = 'ref.#ID#.txt' r.system_filename_pattern = r'cand.(\d+).txt' rouge_results = r.convert_and_evaluate() print(rouge_results) results_dict = r.output_to_dict(rouge_results) finally: if os.path.isdir(tmp_dir): shutil.rmtree(tmp_dir) return results_dict def fix_tokenization(text): input_tokens = text.split() output_tokens = [] has_left_quote = False has_left_single_quote = False i = 0 prev_dash = False while i < len(input_tokens): tok = input_tokens[i] flag_prev_dash = False if tok in _tok_dict.keys(): output_tokens.append(_tok_dict[tok]) i += 1 elif tok == "\"": if has_left_quote: output_tokens.append("''") else: output_tokens.append("``") has_left_quote = not has_left_quote i += 1 elif tok == "'" and len(output_tokens) > 0 and output_tokens[-1].endswith("n") and i < len(input_tokens) - 1 and input_tokens[i + 1] == "t": output_tokens[-1] = output_tokens[-1][:-1] output_tokens.append("n't") i += 2 elif tok == "'" and i < len(input_tokens) - 1 and input_tokens[i + 1] in ("s", "d", "ll"): output_tokens.append("'"+input_tokens[i + 1]) i += 2 elif tok == "'": if has_left_single_quote: output_tokens.append("'") else: output_tokens.append("`") has_left_single_quote = not has_left_single_quote i += 1 elif tok == "." and i < len(input_tokens) - 2 and input_tokens[i + 1] == "." and input_tokens[i + 2] == ".": output_tokens.append("...") i += 3 elif tok == "," and len(output_tokens) > 0 and _is_digit(output_tokens[-1]) and i < len(input_tokens) - 1 and _is_digit(input_tokens[i + 1]): # $ 3 , 000 -> $ 3,000 output_tokens[-1] += ','+input_tokens[i + 1] i += 2 elif tok == "." and len(output_tokens) > 0 and output_tokens[-1].isdigit() and i < len(input_tokens) - 1 and input_tokens[i + 1].isdigit(): # 3 . 03 -> $ 3.03 output_tokens[-1] += '.'+input_tokens[i + 1] i += 2 elif tok == "." and len(output_tokens) > 0 and len(output_tokens[-1]) == 1 and output_tokens[-1].isupper() and i < len(input_tokens) - 2 and len(input_tokens[i + 1]) == 1 and input_tokens[i + 1].isupper() and input_tokens[i + 2] == '.': # U . N . -> U.N. k = i+3 while k+2 < len(input_tokens): if len(input_tokens[k + 1]) == 1 and input_tokens[k + 1].isupper() and input_tokens[k + 2] == '.': k += 2 else: break output_tokens[-1] += ''.join(input_tokens[i:k]) i += 2 elif tok == "-": if i < len(input_tokens) - 1 and input_tokens[i + 1] == "-": output_tokens.append("--") i += 2 elif i == len(input_tokens) - 1 or i == 0: output_tokens.append("-") i += 1 elif output_tokens[-1] not in string.punctuation and input_tokens[i + 1][0] not in string.punctuation: output_tokens[-1] += "-" i += 1 flag_prev_dash = True else: output_tokens.append("-") i += 1 elif prev_dash and len(output_tokens) > 0 and tok[0] not in string.punctuation: output_tokens[-1] += tok i += 1 else: output_tokens.append(tok) i += 1 prev_dash = flag_prev_dash return " ".join(output_tokens) def process_eval(eval_fn): gold_list = [] with open(args.gold, "r", encoding="utf-8") as f_in: for l in f_in: line = l.strip() gold_list.append(line) pred_list = [] with open(eval_fn, "r", encoding="utf-8") as f_in: for l in f_in: buf = [] sentence = fix_tokenization(l.strip()).replace('1', '#') buf.append(sentence) if args.trunc_len: num_left = args.trunc_len trunc_list = [] for bit in buf: tk_list = bit.split() n = min(len(tk_list), num_left) trunc_list.append(' '.join(tk_list[:n])) num_left -= n if num_left <= 0: break else: trunc_list = buf line = "\n".join(trunc_list) pred_list.append(line) with open(eval_fn+'.post', 'w', encoding='utf-8') as f_out: for l in pred_list: f_out.write(l.strip()) f_out.write('\n') # rouge scores if len(pred_list) < len(gold_list): # evaluate subset gold_list = gold_list[:len(pred_list)] assert len(pred_list) == len(gold_list) if args.perl: scores = test_rouge(pred_list, gold_list) else: scores = evaluator.get_scores(pred_list, [[it] for it in gold_list]) return eval_fn, scores
null
185,244
import task import deit import trocr_models import torch import fairseq from fairseq import utils from fairseq_cli import generate from PIL import Image import torchvision.transforms as transforms def init(model_path, beam=5): model, cfg, task = fairseq.checkpoint_utils.load_model_ensemble_and_task( [model_path], arg_overrides={"beam": beam, "task": "text_recognition", "data": "", "fp16": False}) device = "cuda" if torch.cuda.is_available() else "cpu" model[0].to(device) img_transform = transforms.Compose([ transforms.Resize((384, 384), interpolation=3), transforms.ToTensor(), transforms.Normalize(0.5, 0.5) ]) generator = task.build_generator( model, cfg.generation, extra_gen_cls_kwargs={'lm_model': None, 'lm_weight': None} ) bpe = task.build_bpe(cfg.bpe) return model, cfg, task, generator, bpe, img_transform, device
null
185,245
import task import deit import trocr_models import torch import fairseq from fairseq import utils from fairseq_cli import generate from PIL import Image import torchvision.transforms as transforms def preprocess(img_path, img_transform): im = Image.open(img_path).convert('RGB').resize((384, 384)) im = img_transform(im).unsqueeze(0).to(device).float() sample = { 'net_input': {"imgs": im}, } return sample
null
185,246
import task import deit import trocr_models import torch import fairseq from fairseq import utils from fairseq_cli import generate from PIL import Image import torchvision.transforms as transforms def get_text(cfg, generator, model, sample, bpe): decoder_output = task.inference_step(generator, model, sample, prefix_tokens=None, constraints=None) decoder_output = decoder_output[0][0] #top1 hypo_tokens, hypo_str, alignment = utils.post_process_prediction( hypo_tokens=decoder_output["tokens"].int().cpu(), src_str="", alignment=decoder_output["alignment"], align_dict=None, tgt_dict=model[0].decoder.dictionary, remove_bpe=cfg.common_eval.post_process, extra_symbols_to_ignore=generate.get_symbols_to_strip_from_output(generator), ) detok_hypo_str = bpe.decode(hypo_str) return detok_hypo_str
null
185,247
import cv2 import numpy as np from wand.image import Image as WandImage from scipy.ndimage import zoom as scizoom from wand.api import library as wandlibrary def clipped_zoom(img, zoom_factor): h = img.shape[1] # ceil crop height(= crop width) ch = int(np.ceil(h / float(zoom_factor))) top = (h - ch) // 2 img = scizoom(img[top:top + ch, top:top + ch], (zoom_factor, zoom_factor, 1), order=1) # trim off any extra pixels trim_top = (img.shape[0] - h) // 2 return img[trim_top:trim_top + h, trim_top:trim_top + h]
null
185,248
import cv2 import numpy as np from wand.image import Image as WandImage from scipy.ndimage import zoom as scizoom from wand.api import library as wandlibrary def disk(radius, alias_blur=0.1, dtype=np.float32): if radius <= 8: L = np.arange(-8, 8 + 1) ksize = (3, 3) else: L = np.arange(-radius, radius + 1) ksize = (5, 5) X, Y = np.meshgrid(L, L) aliased_disk = np.array((X ** 2 + Y ** 2) <= radius ** 2, dtype=dtype) aliased_disk /= np.sum(aliased_disk) # supersample disk to antialias return cv2.GaussianBlur(aliased_disk, ksize=ksize, sigmaX=alias_blur)
null
185,249
import cv2 import numpy as np from wand.image import Image as WandImage from scipy.ndimage import zoom as scizoom from wand.api import library as wandlibrary The provided code snippet includes necessary dependencies for implementing the `plasma_fractal` function. Write a Python function `def plasma_fractal(mapsize=256, wibbledecay=3)` to solve the following problem: Generate a heightmap using diamond-square algorithm. Return square 2d array, side length 'mapsize', of floats in range 0-255. 'mapsize' must be a power of two. Here is the function: def plasma_fractal(mapsize=256, wibbledecay=3): """ Generate a heightmap using diamond-square algorithm. Return square 2d array, side length 'mapsize', of floats in range 0-255. 'mapsize' must be a power of two. """ assert (mapsize & (mapsize - 1) == 0) maparray = np.empty((mapsize, mapsize), dtype=np.float_) maparray[0, 0] = 0 stepsize = mapsize wibble = 100 def wibbledmean(array): return array / 4 + wibble * np.random.uniform(-wibble, wibble, array.shape) def fillsquares(): """For each square of points stepsize apart, calculate middle value as mean of points + wibble""" cornerref = maparray[0:mapsize:stepsize, 0:mapsize:stepsize] squareaccum = cornerref + np.roll(cornerref, shift=-1, axis=0) squareaccum += np.roll(squareaccum, shift=-1, axis=1) maparray[stepsize // 2:mapsize:stepsize, stepsize // 2:mapsize:stepsize] = wibbledmean(squareaccum) def filldiamonds(): """For each diamond of points stepsize apart, calculate middle value as mean of points + wibble""" mapsize = maparray.shape[0] drgrid = maparray[stepsize // 2:mapsize:stepsize, stepsize // 2:mapsize:stepsize] ulgrid = maparray[0:mapsize:stepsize, 0:mapsize:stepsize] ldrsum = drgrid + np.roll(drgrid, 1, axis=0) lulsum = ulgrid + np.roll(ulgrid, -1, axis=1) ltsum = ldrsum + lulsum maparray[0:mapsize:stepsize, stepsize // 2:mapsize:stepsize] = wibbledmean(ltsum) tdrsum = drgrid + np.roll(drgrid, 1, axis=1) tulsum = ulgrid + np.roll(ulgrid, -1, axis=0) ttsum = tdrsum + tulsum maparray[stepsize // 2:mapsize:stepsize, 0:mapsize:stepsize] = wibbledmean(ttsum) while stepsize >= 2: fillsquares() filldiamonds() stepsize //= 2 wibble /= wibbledecay maparray -= maparray.min() return maparray / maparray.max()
Generate a heightmap using diamond-square algorithm. Return square 2d array, side length 'mapsize', of floats in range 0-255. 'mapsize' must be a power of two.
185,250
import torchvision.transforms as transforms from PIL import Image, ImageFilter import random import torch import numpy as np import logging from enum import Enum from .augmentation.warp import Curve, Distort, Stretch from .augmentation.geometry import Rotate, Perspective, Shrink, TranslateX, TranslateY from .augmentation.pattern import VGrid, HGrid, Grid, RectGrid, EllipseGrid from .augmentation.noise import GaussianNoise, ShotNoise, ImpulseNoise, SpeckleNoise from .augmentation.blur import GaussianBlur, DefocusBlur, MotionBlur, GlassBlur, ZoomBlur from .augmentation.camera import Contrast, Brightness, JpegCompression, Pixelate from .augmentation.weather import Fog, Snow, Frost, Rain, Shadow from .augmentation.process import Posterize, Solarize, Invert, Equalize, AutoContrast, Sharpness, Color class InterpolationMode(): NEAREST = 0 BILINEAR = 2 BICUBIC = 3 BOX = 4 HAMMING = 5 LANCZOS = 1 class ResizePad(object): def __init__(self, imgH=64, imgW=3072, keep_ratio_with_pad=True): self.imgH = imgH self.imgW = imgW assert keep_ratio_with_pad == True self.keep_ratio_with_pad = keep_ratio_with_pad def __call__(self, im): old_size = im.size # old_size[0] is in (width, height) format ratio = float(self.imgH)/old_size[1] new_size = tuple([int(x*ratio) for x in old_size]) im = im.resize(new_size, Image.BICUBIC) new_im = Image.new("RGB", (self.imgW, self.imgH)) new_im.paste(im, (0, 0)) return new_im class WeightedRandomChoice: def __init__(self, trans, weights=None): self.trans = trans if not weights: self.weights = [1] * len(trans) else: assert len(trans) == len(weights) self.weights = weights def __call__(self, img): t = random.choices(self.trans, weights=self.weights, k=1)[0] try: tfm_img = t(img) except Exception as e: logger.warning('Error during data_aug: '+str(e)) return img return tfm_img def __repr__(self): format_string = self.__class__.__name__ + '(' for t in self.transforms: format_string += '\n' format_string += ' {0}'.format(t) format_string += '\n)' return format_string class Dilation(torch.nn.Module): def __init__(self, kernel=3): super().__init__() self.kernel=kernel def forward(self, img): return img.filter(ImageFilter.MaxFilter(self.kernel)) def __repr__(self): return self.__class__.__name__ + '(kernel={})'.format(self.kernel) class Erosion(torch.nn.Module): def __init__(self, kernel=3): super().__init__() self.kernel=kernel def forward(self, img): return img.filter(ImageFilter.MinFilter(self.kernel)) def __repr__(self): return self.__class__.__name__ + '(kernel={})'.format(self.kernel) class Underline(torch.nn.Module): def __init__(self): super().__init__() def forward(self, img): img_np = np.array(img.convert('L')) black_pixels = np.where(img_np < 50) try: y1 = max(black_pixels[0]) x0 = min(black_pixels[1]) x1 = max(black_pixels[1]) except: return img for x in range(x0, x1): for y in range(y1, y1-3, -1): try: img.putpixel((x, y), (0, 0, 0)) except: continue return img class KeepOriginal(torch.nn.Module): def __init__(self): super().__init__() def forward(self, img): return img class GaussianBlur: def __init__(self): pass def __call__(self, img, mag=-1, prob=1.): if np.random.uniform(0,1) > prob: return img W, H = img.size #kernel = [(31,31)] prev 1 level only kernel = (31, 31) sigmas = [.5, 1, 2] if mag<0 or mag>=len(kernel): index = np.random.randint(0, len(sigmas)) else: index = mag sigma = sigmas[index] return transforms.GaussianBlur(kernel_size=kernel, sigma=sigma)(img) def build_data_aug(size, mode, resnet=False, resizepad=False): if resnet: norm_tfm = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) else: norm_tfm = transforms.Normalize(0.5, 0.5) if resizepad: resize_tfm = ResizePad(imgH=size[0], imgW=size[1]) else: resize_tfm = transforms.Resize(size, interpolation=InterpolationMode.BICUBIC) if mode == 'train': return transforms.Compose([ WeightedRandomChoice([ # transforms.RandomHorizontalFlip(p=1), transforms.RandomRotation(degrees=(-10, 10), expand=True, fill=255), transforms.GaussianBlur(3), Dilation(3), Erosion(3), transforms.Resize((size[0] // 3, size[1] // 3), interpolation=InterpolationMode.NEAREST), Underline(), KeepOriginal(), ]), resize_tfm, transforms.ToTensor(), norm_tfm ]) else: return transforms.Compose([ resize_tfm, transforms.ToTensor(), norm_tfm ])
null
185,251
import torchvision.transforms as transforms from PIL import Image, ImageFilter import random import torch import numpy as np import logging from enum import Enum from .augmentation.warp import Curve, Distort, Stretch from .augmentation.geometry import Rotate, Perspective, Shrink, TranslateX, TranslateY from .augmentation.pattern import VGrid, HGrid, Grid, RectGrid, EllipseGrid from .augmentation.noise import GaussianNoise, ShotNoise, ImpulseNoise, SpeckleNoise from .augmentation.blur import GaussianBlur, DefocusBlur, MotionBlur, GlassBlur, ZoomBlur from .augmentation.camera import Contrast, Brightness, JpegCompression, Pixelate from .augmentation.weather import Fog, Snow, Frost, Rain, Shadow from .augmentation.process import Posterize, Solarize, Invert, Equalize, AutoContrast, Sharpness, Color def isless(prob=0.5): return np.random.uniform(0,1) < prob
null
185,252
import glob import logging import os import random import torch from fairseq.data import FairseqDataset, data_utils from natsort import natsorted from PIL import Image from tqdm import tqdm def default_collater(target_dict, samples, dataset=None): if not samples: return None if any([sample is None for sample in samples]): if not dataset: return None len_batch = len(samples) while True: samples.append(dataset[random.choice(range(len(dataset)))]) samples =list(filter (lambda x:x is not None, samples)) if len(samples) == len_batch: break indices = [] imgs = [] # bs, c, h , w target_samples = [] target_ntokens = 0 for sample in samples: index = sample['id'] indices.append(index) imgs.append(sample['tfm_img']) target_samples.append(sample['label_ids'].long()) target_ntokens += len(sample['label_ids']) num_sentences = len(samples) target_batch = data_utils.collate_tokens(target_samples, pad_idx=target_dict.pad(), eos_idx=target_dict.eos(), move_eos_to_beginning=False) rotate_batch = data_utils.collate_tokens(target_samples, pad_idx=target_dict.pad(), eos_idx=target_dict.eos(), move_eos_to_beginning=True) indices = torch.tensor(indices, dtype=torch.long) imgs = torch.stack(imgs, dim=0) return { 'id': indices, 'net_input': { 'imgs': imgs, 'prev_output_tokens': rotate_batch }, 'ntokens': target_ntokens, 'nsentences': num_sentences, 'target': target_batch }
null
185,253
import glob import logging import os import random import torch from fairseq.data import FairseqDataset, data_utils from natsort import natsorted from PIL import Image from tqdm import tqdm logger = logging.getLogger(__name__) def read_txt_and_tokenize(txt_path: str, bpe, target_dict): annotations = [] with open(txt_path, 'r', encoding='utf8') as fp: for line in fp.readlines(): line = line.rstrip() if not line: continue line_split = line.split(',', maxsplit=8) quadrangle = list(map(int, line_split[:8])) content = line_split[-1] if bpe: encoded_str = bpe.encode(content) else: encoded_str = content xs = [quadrangle[i] for i in range(0, 8, 2)] ys = [quadrangle[i] for i in range(1, 8, 2)] bbox = [min(xs), min(ys), max(xs), max(ys)] annotations.append({'bbox': bbox, 'encoded_str': encoded_str, 'category_id': 0, 'segmentation': [quadrangle]}) # 0 for text, 1 for background return annotations def SROIETask2(root_dir: str, bpe, target_dict, crop_img_output_dir=None): data = [] img_id = -1 crop_data = [] crop_img_id = -1 image_paths = natsorted(list(glob.glob(os.path.join(root_dir, '*.jpg')))) for jpg_path in tqdm(image_paths): im = Image.open(jpg_path).convert('RGB') img_w, img_h = im.size img_id += 1 txt_path = jpg_path.replace('.jpg', '.txt') annotations = read_txt_and_tokenize(txt_path, bpe, target_dict) img_dict = {'file_name': jpg_path, 'width': img_w, 'height': img_h, 'image_id':img_id, 'annotations':annotations} data.append(img_dict) for ann in annotations: crop_w = ann['bbox'][2] - ann['bbox'][0] crop_h = ann['bbox'][3] - ann['bbox'][1] if not (crop_w > 0 and crop_h > 0): logger.warning('Error occurs during image cropping: {} has a zero area bbox.'.format(os.path.basename(jpg_path))) continue crop_img_id += 1 crop_im = im.crop(ann['bbox']) if crop_img_output_dir: crop_im.save(os.path.join(crop_img_output_dir, '{:d}.jpg'.format(crop_img_id))) crop_img_dict = {'img':crop_im, 'file_name': jpg_path, 'width': crop_w, 'height': crop_h, 'image_id':crop_img_id, 'encoded_str':ann['encoded_str']} crop_data.append(crop_img_dict) return data, crop_data
null
185,254
import glob import logging import os import random import torch from fairseq.data import FairseqDataset, data_utils from natsort import natsorted from PIL import Image from tqdm import tqdm def STR(gt_path, bpe_parser): root_dir = os.path.dirname(gt_path) data = [] img_id = 0 with open(gt_path, 'r') as fp: for line in tqdm(list(fp.readlines()), desc='Loading STR:'): line = line.rstrip() temp = line.split('\t', 1) img_file = temp[0] text = temp[1] img_path = os.path.join(root_dir, 'image', img_file) if not bpe_parser: encoded_str = text else: encoded_str = bpe_parser.encode(text) data.append({'img_path': img_path, 'image_id':img_id, 'text':text, 'encoded_str':encoded_str}) img_id += 1 return data
null
185,255
import glob import logging import os import random import torch from fairseq.data import FairseqDataset, data_utils from natsort import natsorted from PIL import Image from tqdm import tqdm def Receipt53K(gt_path): root_dir = os.path.dirname(gt_path) data = [] with open(gt_path, 'r', encoding='utf8') as fp: for line in tqdm(list(fp.readlines()), desc='Loading Receipt53K:'): line = line.rstrip() temp = line.split('\t', 1) img_file = temp[0] text = temp[1] img_path = os.path.join(root_dir, img_file) data.append({'img_path': img_path, 'text':text}) return data
null
185,256
from fairseq.models import FairseqEncoder, register_model, FairseqEncoderDecoderModel, register_model_architecture from fairseq.models.transformer import TransformerDecoder, Embedding, TransformerModel from fairseq.models.transformer import base_architecture as base_transformer from fairseq.models.fairseq_encoder import EncoderOut from torch.nn import Parameter from fairseq import utils from torch import Tensor import torch from torch.hub import load_state_dict_from_url from timm.models import create_model from functools import partial import logging import argparse from typing import Dict, Optional, Tuple from collections import OrderedDict import os from argparse import Namespace from omegaconf import DictConfig from fairseq.dataclass.utils import convert_namespace_to_omegaconf def deit_base_decoder_base(args): # DeiT Encoder deit_base_distilled_patch16_384 args.deit_arch = getattr(args, "deit_arch", "deit_base_distilled_patch16_384") # Transformer Decoder # args.encoder_embed_dim = 768 base_transformer(args)
null
185,257
from fairseq.models import FairseqEncoder, register_model, FairseqEncoderDecoderModel, register_model_architecture from fairseq.models.transformer import TransformerDecoder, Embedding, TransformerModel from fairseq.models.transformer import base_architecture as base_transformer from fairseq.models.fairseq_encoder import EncoderOut from torch.nn import Parameter from fairseq import utils from torch import Tensor import torch from torch.hub import load_state_dict_from_url from timm.models import create_model from functools import partial import logging import argparse from typing import Dict, Optional, Tuple from collections import OrderedDict import os from argparse import Namespace from omegaconf import DictConfig from fairseq.dataclass.utils import convert_namespace_to_omegaconf def deit_base_decoder_large(args): # DeiT Encoder deit_base_distilled_patch16_384 args.deit_arch = getattr(args, "deit_arch", "deit_base_distilled_patch16_384") # Transformer Decoder # args.encoder_embed_dim = 768 args.decoder_layers = getattr(args, "decoder_layers", 12) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 1024) args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 4096) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16) base_transformer(args)
null
185,258
from fairseq.models import FairseqEncoder, register_model, FairseqEncoderDecoderModel, register_model_architecture from fairseq.models.transformer import TransformerDecoder, Embedding, TransformerModel from fairseq.models.transformer import base_architecture as base_transformer from fairseq.models.fairseq_encoder import EncoderOut from torch.nn import Parameter from fairseq import utils from torch import Tensor import torch from torch.hub import load_state_dict_from_url from timm.models import create_model from functools import partial import logging import argparse from typing import Dict, Optional, Tuple from collections import OrderedDict import os from argparse import Namespace from omegaconf import DictConfig from fairseq.dataclass.utils import convert_namespace_to_omegaconf def beit_base_decoder_large(args): # DeiT Encoder deit_base_distilled_patch16_384 args.deit_arch = getattr(args, "deit_arch", "beit_base_patch16_384") # Transformer Decoder # args.encoder_embed_dim = 768 args.decoder_layers = getattr(args, "decoder_layers", 12) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 1024) args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 4096) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16) base_transformer(args)
null
185,259
from fairseq.models import FairseqEncoder, register_model, FairseqEncoderDecoderModel, register_model_architecture from fairseq.models.transformer import TransformerDecoder, Embedding, TransformerModel from fairseq.models.transformer import base_architecture as base_transformer from fairseq.models.fairseq_encoder import EncoderOut from torch.nn import Parameter from fairseq import utils from torch import Tensor import torch from torch.hub import load_state_dict_from_url from timm.models import create_model from functools import partial import logging import argparse from typing import Dict, Optional, Tuple from collections import OrderedDict import os from argparse import Namespace from omegaconf import DictConfig from fairseq.dataclass.utils import convert_namespace_to_omegaconf def beit_large_decoder_large(args): # DeiT Encoder deit_base_distilled_patch16_384 args.deit_arch = getattr(args, "deit_arch", "beit_large_patch16_384") # Transformer Decoder # args.encoder_embed_dim = 1024 args.decoder_layers = getattr(args, "decoder_layers", 12) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 1024) args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 4096) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16) base_transformer(args)
null
185,260
from fairseq.models import FairseqEncoder, register_model, FairseqEncoderDecoderModel, register_model_architecture from fairseq.models.transformer import TransformerDecoder, Embedding, TransformerModel from fairseq.models.transformer import base_architecture as base_transformer from fairseq.models.fairseq_encoder import EncoderOut from torch.nn import Parameter from fairseq import utils from torch import Tensor import torch from torch.hub import load_state_dict_from_url from timm.models import create_model from functools import partial import logging import argparse from typing import Dict, Optional, Tuple from collections import OrderedDict import os from argparse import Namespace from omegaconf import DictConfig from fairseq.dataclass.utils import convert_namespace_to_omegaconf def deit_base_decoder_large_custom_size(args): # DeiT Encoder deit_base_distilled_patch16_custom_size args.deit_arch = getattr(args, "deit_arch", "deit_base_distilled_patch16_custom_size") # Transformer Decoder # args.encoder_embed_dim = 768 args.decoder_layers = getattr(args, "decoder_layers", 12) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 1024) args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 4096) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16) base_transformer(args)
null
185,261
from fairseq.models import FairseqEncoder, register_model, FairseqEncoderDecoderModel, register_model_architecture from fairseq.models.transformer import TransformerDecoder, Embedding, TransformerModel from fairseq.models.transformer import base_architecture as base_transformer from fairseq.models.fairseq_encoder import EncoderOut from torch.nn import Parameter from fairseq import utils from torch import Tensor import torch from torch.hub import load_state_dict_from_url from timm.models import create_model from functools import partial import logging import argparse from typing import Dict, Optional, Tuple from collections import OrderedDict import os from argparse import Namespace from omegaconf import DictConfig from fairseq.dataclass.utils import convert_namespace_to_omegaconf def nlrv4_compressed_tiny(args): args.decoder_learned_pos = True args.layernorm_embedding = True args.decoder_attention_heads = 8 args.decoder_embed_dim = 256 args.decoder_output_dim = 256 args.decoder_ffn_embed_dim = 1024 args.dropout = 0.1 args.decoder_layers = 6 args.max_target_positions = 512 def trocr_small(args): # DeiT Encoder deit_base_distilled_patch16_384 args.deit_arch = getattr(args, "deit_arch", "deit_small_distilled_patch16_224") nlrv4_compressed_tiny(args) # Transformer Decoder base_transformer(args)
null
185,262
from fairseq.models import FairseqEncoder, register_model, FairseqEncoderDecoderModel, register_model_architecture from fairseq.models.transformer import TransformerDecoder, Embedding, TransformerModel from fairseq.models.transformer import base_architecture as base_transformer from fairseq.models.fairseq_encoder import EncoderOut from torch.nn import Parameter from fairseq import utils from torch import Tensor import torch from torch.hub import load_state_dict_from_url from timm.models import create_model from functools import partial import logging import argparse from typing import Dict, Optional, Tuple from collections import OrderedDict import os from argparse import Namespace from omegaconf import DictConfig from fairseq.dataclass.utils import convert_namespace_to_omegaconf def nlrv4_compressed_tiny(args): args.decoder_learned_pos = True args.layernorm_embedding = True args.decoder_attention_heads = 8 args.decoder_embed_dim = 256 args.decoder_output_dim = 256 args.decoder_ffn_embed_dim = 1024 args.dropout = 0.1 args.decoder_layers = 6 args.max_target_positions = 512 def trocr_small_384(args): # DeiT Encoder deit_base_distilled_patch16_384 args.deit_arch = getattr(args, "deit_arch", "deit_small_distilled_patch16_384") nlrv4_compressed_tiny(args) # Transformer Decoder base_transformer(args)
null
185,263
import os import logging import torch import torch.nn as nn import torch.nn.functional as F from functools import partial from timm.models.vision_transformer import VisionTransformer, _cfg from timm.models.vision_transformer import Attention, Block from timm.models.registry import register_model from timm.models.layers import trunc_normal_ def deit_tiny_patch16_224(pretrained=False, **kwargs): model = VisionTransformer( patch_size=16, embed_dim=192, depth=12, num_heads=3, mlp_ratio=4, qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) model.default_cfg = _cfg() if pretrained: checkpoint = torch.hub.load_state_dict_from_url( url="https://dl.fbaipublicfiles.com/deit/deit_tiny_patch16_224-a1311bcf.pth", map_location="cpu", check_hash=True ) model.load_state_dict(checkpoint["model"]) return model
null
185,264
import os import logging import torch import torch.nn as nn import torch.nn.functional as F from functools import partial from timm.models.vision_transformer import VisionTransformer, _cfg from timm.models.vision_transformer import Attention, Block from timm.models.registry import register_model from timm.models.layers import trunc_normal_ def deit_small_patch16_224(pretrained=False, **kwargs): model = VisionTransformer( patch_size=16, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4, qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) model.default_cfg = _cfg() if pretrained: checkpoint = torch.hub.load_state_dict_from_url( url="https://dl.fbaipublicfiles.com/deit/deit_small_patch16_224-cd65a155.pth", map_location="cpu", check_hash=True ) model.load_state_dict(checkpoint["model"]) return model
null
185,265
import os import logging import torch import torch.nn as nn import torch.nn.functional as F from functools import partial from timm.models.vision_transformer import VisionTransformer, _cfg from timm.models.vision_transformer import Attention, Block from timm.models.registry import register_model from timm.models.layers import trunc_normal_ def deit_base_patch16_224(pretrained=False, **kwargs): model = VisionTransformer( patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) model.default_cfg = _cfg() if pretrained: checkpoint = torch.hub.load_state_dict_from_url( url="https://dl.fbaipublicfiles.com/deit/deit_base_patch16_224-b5f2ef4d.pth", map_location="cpu", check_hash=True ) model.load_state_dict(checkpoint["model"]) return model
null
185,266
import os import logging import torch import torch.nn as nn import torch.nn.functional as F from functools import partial from timm.models.vision_transformer import VisionTransformer, _cfg from timm.models.vision_transformer import Attention, Block from timm.models.registry import register_model from timm.models.layers import trunc_normal_ class AdaptedVisionTransformer(VisionTransformer): def __init__(self, *args, **kwargs): def forward_features(self, x): def deit_tiny_distilled_patch16_224(pretrained=False, **kwargs): model = AdaptedVisionTransformer(distilled=True, patch_size=16, embed_dim=192, depth=12, num_heads=3, mlp_ratio=4, qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) model.default_cfg = _cfg() if pretrained: checkpoint = torch.hub.load_state_dict_from_url( url="https://dl.fbaipublicfiles.com/deit/deit_tiny_distilled_patch16_224-b40b3cf7.pth", map_location="cpu", check_hash=True ) model.load_state_dict(checkpoint["model"]) return model
null
185,267
import os import logging import torch import torch.nn as nn import torch.nn.functional as F from functools import partial from timm.models.vision_transformer import VisionTransformer, _cfg from timm.models.vision_transformer import Attention, Block from timm.models.registry import register_model from timm.models.layers import trunc_normal_ class AdaptedVisionTransformer(VisionTransformer): def __init__(self, *args, **kwargs): self.ape = kwargs.pop('ape', 0) self.mask_ratio = kwargs.pop('mask_ratio', 0.0) self.patch_size = kwargs.get('patch_size') self.fp16fixed = kwargs.pop('fp16fixed', False) weight_init = kwargs.get('weight_init', '') super().__init__(*args, **kwargs) if self.ape: self.pos_embed = nn.Parameter(torch.zeros(1, self.ape + self.num_tokens, self.embed_dim)) if self.fp16fixed: # img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12, # num_heads=12, mlp_ratio=4., qkv_bias=True, representation_size=None, distilled=False, # drop_rate=0., attn_drop_rate=0., drop_path_rate=0., embed_layer=PatchEmbed, norm_layer=None, # act_layer=None, weight_init='' embed_dim = kwargs.get('embed_dim', 768) num_heads = kwargs.get('num_heads', 12) mlp_ratio = kwargs.get('mlp_ratio', 4.) qkv_bias = kwargs.get('qkv_bias', True) drop_rate = kwargs.get('drop_rate', 0.) attn_drop_rate = kwargs.get('attn_drop_rate', 0.) drop_path_rate = kwargs.get('drop_path_rate', 0.) depth = kwargs.get('depth', 12) norm_layer = kwargs.get('norm_layer', partial(nn.LayerNorm, eps=1e-6)) act_layer = kwargs.get('act_layer', nn.GELU) dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule self.blocks = nn.Sequential(*[ Fp16FixedBlock( dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, act_layer=act_layer) for i in range(depth)]) self.init_weights(weight_init) def forward_features(self, x): _, _, H, W = x.shape Wh = H // self.patch_size Ww = W // self.patch_size x = self.patch_embed(x) if self.mask_ratio != 0: probability_matrix = torch.full(x.shape[:2], self.mask_ratio) masked_indices = torch.bernoulli(probability_matrix).bool() x[masked_indices] = 0 cls_token = self.cls_token.expand(x.shape[0], -1, -1) # stole cls_tokens impl from Phil Wang, thanks if self.dist_token is None: x = torch.cat((cls_token, x), dim=1) else: x = torch.cat((cls_token, self.dist_token.expand(x.shape[0], -1, -1), x), dim=1) if self.ape: pos_embed_patch_num = int(self.pos_embed.size(1) ** 0.5) offset = self.num_tokens adapt_pos_embed = self.pos_embed[:, offset:, :].view(self.pos_embed.shape[0], pos_embed_patch_num, pos_embed_patch_num, self.pos_embed.shape[-1]) # B 24 24 768 adapt_pos_embed = adapt_pos_embed.permute(0, 3, 1, 2) pos_embed = F.interpolate(adapt_pos_embed, size=(Wh, Ww), mode='bicubic') pos_embed = pos_embed.flatten(2).transpose(1, 2) # B Wh*Ww C pos_embed = torch.cat((pos_embed, self.pos_embed[:, :offset, :]), dim=1) else: pos_embed = self.pos_embed input_embedding = x + pos_embed x = self.pos_drop(input_embedding) x = self.blocks(x) x = self.norm(x) return x, input_embedding def deit_small_distilled_patch16_224(pretrained=False, **kwargs): model = AdaptedVisionTransformer(distilled=True, patch_size=16, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4, qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) model.default_cfg = _cfg() if pretrained: checkpoint = torch.hub.load_state_dict_from_url( url="https://dl.fbaipublicfiles.com/deit/deit_small_distilled_patch16_224-649709d9.pth", map_location="cpu", check_hash=True ) model.load_state_dict(checkpoint["model"]) return model
null
185,268
import os import logging import torch import torch.nn as nn import torch.nn.functional as F from functools import partial from timm.models.vision_transformer import VisionTransformer, _cfg from timm.models.vision_transformer import Attention, Block from timm.models.registry import register_model from timm.models.layers import trunc_normal_ logger = logging.getLogger(__name__) class AdaptedVisionTransformer(VisionTransformer): def __init__(self, *args, **kwargs): self.ape = kwargs.pop('ape', 0) self.mask_ratio = kwargs.pop('mask_ratio', 0.0) self.patch_size = kwargs.get('patch_size') self.fp16fixed = kwargs.pop('fp16fixed', False) weight_init = kwargs.get('weight_init', '') super().__init__(*args, **kwargs) if self.ape: self.pos_embed = nn.Parameter(torch.zeros(1, self.ape + self.num_tokens, self.embed_dim)) if self.fp16fixed: # img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12, # num_heads=12, mlp_ratio=4., qkv_bias=True, representation_size=None, distilled=False, # drop_rate=0., attn_drop_rate=0., drop_path_rate=0., embed_layer=PatchEmbed, norm_layer=None, # act_layer=None, weight_init='' embed_dim = kwargs.get('embed_dim', 768) num_heads = kwargs.get('num_heads', 12) mlp_ratio = kwargs.get('mlp_ratio', 4.) qkv_bias = kwargs.get('qkv_bias', True) drop_rate = kwargs.get('drop_rate', 0.) attn_drop_rate = kwargs.get('attn_drop_rate', 0.) drop_path_rate = kwargs.get('drop_path_rate', 0.) depth = kwargs.get('depth', 12) norm_layer = kwargs.get('norm_layer', partial(nn.LayerNorm, eps=1e-6)) act_layer = kwargs.get('act_layer', nn.GELU) dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule self.blocks = nn.Sequential(*[ Fp16FixedBlock( dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, act_layer=act_layer) for i in range(depth)]) self.init_weights(weight_init) def forward_features(self, x): _, _, H, W = x.shape Wh = H // self.patch_size Ww = W // self.patch_size x = self.patch_embed(x) if self.mask_ratio != 0: probability_matrix = torch.full(x.shape[:2], self.mask_ratio) masked_indices = torch.bernoulli(probability_matrix).bool() x[masked_indices] = 0 cls_token = self.cls_token.expand(x.shape[0], -1, -1) # stole cls_tokens impl from Phil Wang, thanks if self.dist_token is None: x = torch.cat((cls_token, x), dim=1) else: x = torch.cat((cls_token, self.dist_token.expand(x.shape[0], -1, -1), x), dim=1) if self.ape: pos_embed_patch_num = int(self.pos_embed.size(1) ** 0.5) offset = self.num_tokens adapt_pos_embed = self.pos_embed[:, offset:, :].view(self.pos_embed.shape[0], pos_embed_patch_num, pos_embed_patch_num, self.pos_embed.shape[-1]) # B 24 24 768 adapt_pos_embed = adapt_pos_embed.permute(0, 3, 1, 2) pos_embed = F.interpolate(adapt_pos_embed, size=(Wh, Ww), mode='bicubic') pos_embed = pos_embed.flatten(2).transpose(1, 2) # B Wh*Ww C pos_embed = torch.cat((pos_embed, self.pos_embed[:, :offset, :]), dim=1) else: pos_embed = self.pos_embed input_embedding = x + pos_embed x = self.pos_drop(input_embedding) x = self.blocks(x) x = self.norm(x) return x, input_embedding def deit_small_distilled_patch16_384(pretrained=False, **kwargs): model = AdaptedVisionTransformer(distilled=True, img_size=384, patch_size=16, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4, qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) model.default_cfg = _cfg() if pretrained: checkpoint = torch.hub.load_state_dict_from_url( url="https://dl.fbaipublicfiles.com/deit/deit_small_distilled_patch16_224-649709d9.pth", map_location="cpu", check_hash=True ) # adapt 224 model to 384 model_seq_len = model.state_dict()['pos_embed'].shape[1] ckpt_seq_len = checkpoint['model']['pos_embed'].shape[1] logger.warning('Deit load {:d} seq len to {:d} APE {}'.format(ckpt_seq_len, model_seq_len, str(model.ape))) if not model.ape: if model_seq_len <= ckpt_seq_len: checkpoint['model']['pos_embed'] = checkpoint['model']['pos_embed'][:, :model_seq_len, :] else: t = model.state_dict()['pos_embed'] t[:, :ckpt_seq_len, :] = checkpoint['model']['pos_embed'] checkpoint['model']['pos_embed'] = t model.load_state_dict(checkpoint["model"]) return model
null
185,269
import os import logging import torch import torch.nn as nn import torch.nn.functional as F from functools import partial from timm.models.vision_transformer import VisionTransformer, _cfg from timm.models.vision_transformer import Attention, Block from timm.models.registry import register_model from timm.models.layers import trunc_normal_ class AdaptedVisionTransformer(VisionTransformer): def __init__(self, *args, **kwargs): self.ape = kwargs.pop('ape', 0) self.mask_ratio = kwargs.pop('mask_ratio', 0.0) self.patch_size = kwargs.get('patch_size') self.fp16fixed = kwargs.pop('fp16fixed', False) weight_init = kwargs.get('weight_init', '') super().__init__(*args, **kwargs) if self.ape: self.pos_embed = nn.Parameter(torch.zeros(1, self.ape + self.num_tokens, self.embed_dim)) if self.fp16fixed: # img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12, # num_heads=12, mlp_ratio=4., qkv_bias=True, representation_size=None, distilled=False, # drop_rate=0., attn_drop_rate=0., drop_path_rate=0., embed_layer=PatchEmbed, norm_layer=None, # act_layer=None, weight_init='' embed_dim = kwargs.get('embed_dim', 768) num_heads = kwargs.get('num_heads', 12) mlp_ratio = kwargs.get('mlp_ratio', 4.) qkv_bias = kwargs.get('qkv_bias', True) drop_rate = kwargs.get('drop_rate', 0.) attn_drop_rate = kwargs.get('attn_drop_rate', 0.) drop_path_rate = kwargs.get('drop_path_rate', 0.) depth = kwargs.get('depth', 12) norm_layer = kwargs.get('norm_layer', partial(nn.LayerNorm, eps=1e-6)) act_layer = kwargs.get('act_layer', nn.GELU) dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule self.blocks = nn.Sequential(*[ Fp16FixedBlock( dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, act_layer=act_layer) for i in range(depth)]) self.init_weights(weight_init) def forward_features(self, x): _, _, H, W = x.shape Wh = H // self.patch_size Ww = W // self.patch_size x = self.patch_embed(x) if self.mask_ratio != 0: probability_matrix = torch.full(x.shape[:2], self.mask_ratio) masked_indices = torch.bernoulli(probability_matrix).bool() x[masked_indices] = 0 cls_token = self.cls_token.expand(x.shape[0], -1, -1) # stole cls_tokens impl from Phil Wang, thanks if self.dist_token is None: x = torch.cat((cls_token, x), dim=1) else: x = torch.cat((cls_token, self.dist_token.expand(x.shape[0], -1, -1), x), dim=1) if self.ape: pos_embed_patch_num = int(self.pos_embed.size(1) ** 0.5) offset = self.num_tokens adapt_pos_embed = self.pos_embed[:, offset:, :].view(self.pos_embed.shape[0], pos_embed_patch_num, pos_embed_patch_num, self.pos_embed.shape[-1]) # B 24 24 768 adapt_pos_embed = adapt_pos_embed.permute(0, 3, 1, 2) pos_embed = F.interpolate(adapt_pos_embed, size=(Wh, Ww), mode='bicubic') pos_embed = pos_embed.flatten(2).transpose(1, 2) # B Wh*Ww C pos_embed = torch.cat((pos_embed, self.pos_embed[:, :offset, :]), dim=1) else: pos_embed = self.pos_embed input_embedding = x + pos_embed x = self.pos_drop(input_embedding) x = self.blocks(x) x = self.norm(x) return x, input_embedding def deit_base_distilled_patch16_224(pretrained=False, **kwargs): model = AdaptedVisionTransformer(distilled=True, patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) model.default_cfg = _cfg() if pretrained: checkpoint = torch.hub.load_state_dict_from_url( url="https://dl.fbaipublicfiles.com/deit/deit_base_distilled_patch16_224-df68dfff.pth", map_location="cpu", check_hash=True ) model.load_state_dict(checkpoint["model"]) return model
null
185,270
import os import logging import torch import torch.nn as nn import torch.nn.functional as F from functools import partial from timm.models.vision_transformer import VisionTransformer, _cfg from timm.models.vision_transformer import Attention, Block from timm.models.registry import register_model from timm.models.layers import trunc_normal_ def deit_base_patch16_384(pretrained=False, **kwargs): model = VisionTransformer( img_size=384, patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) model.default_cfg = _cfg() if pretrained: checkpoint = torch.hub.load_state_dict_from_url( url="https://dl.fbaipublicfiles.com/deit/deit_base_patch16_384-8de9b5d1.pth", map_location="cpu", check_hash=True ) model.load_state_dict(checkpoint["model"]) return model
null
185,271
import os import logging import torch import torch.nn as nn import torch.nn.functional as F from functools import partial from timm.models.vision_transformer import VisionTransformer, _cfg from timm.models.vision_transformer import Attention, Block from timm.models.registry import register_model from timm.models.layers import trunc_normal_ class AdaptedVisionTransformer(VisionTransformer): def __init__(self, *args, **kwargs): self.ape = kwargs.pop('ape', 0) self.mask_ratio = kwargs.pop('mask_ratio', 0.0) self.patch_size = kwargs.get('patch_size') self.fp16fixed = kwargs.pop('fp16fixed', False) weight_init = kwargs.get('weight_init', '') super().__init__(*args, **kwargs) if self.ape: self.pos_embed = nn.Parameter(torch.zeros(1, self.ape + self.num_tokens, self.embed_dim)) if self.fp16fixed: # img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12, # num_heads=12, mlp_ratio=4., qkv_bias=True, representation_size=None, distilled=False, # drop_rate=0., attn_drop_rate=0., drop_path_rate=0., embed_layer=PatchEmbed, norm_layer=None, # act_layer=None, weight_init='' embed_dim = kwargs.get('embed_dim', 768) num_heads = kwargs.get('num_heads', 12) mlp_ratio = kwargs.get('mlp_ratio', 4.) qkv_bias = kwargs.get('qkv_bias', True) drop_rate = kwargs.get('drop_rate', 0.) attn_drop_rate = kwargs.get('attn_drop_rate', 0.) drop_path_rate = kwargs.get('drop_path_rate', 0.) depth = kwargs.get('depth', 12) norm_layer = kwargs.get('norm_layer', partial(nn.LayerNorm, eps=1e-6)) act_layer = kwargs.get('act_layer', nn.GELU) dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule self.blocks = nn.Sequential(*[ Fp16FixedBlock( dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, act_layer=act_layer) for i in range(depth)]) self.init_weights(weight_init) def forward_features(self, x): _, _, H, W = x.shape Wh = H // self.patch_size Ww = W // self.patch_size x = self.patch_embed(x) if self.mask_ratio != 0: probability_matrix = torch.full(x.shape[:2], self.mask_ratio) masked_indices = torch.bernoulli(probability_matrix).bool() x[masked_indices] = 0 cls_token = self.cls_token.expand(x.shape[0], -1, -1) # stole cls_tokens impl from Phil Wang, thanks if self.dist_token is None: x = torch.cat((cls_token, x), dim=1) else: x = torch.cat((cls_token, self.dist_token.expand(x.shape[0], -1, -1), x), dim=1) if self.ape: pos_embed_patch_num = int(self.pos_embed.size(1) ** 0.5) offset = self.num_tokens adapt_pos_embed = self.pos_embed[:, offset:, :].view(self.pos_embed.shape[0], pos_embed_patch_num, pos_embed_patch_num, self.pos_embed.shape[-1]) # B 24 24 768 adapt_pos_embed = adapt_pos_embed.permute(0, 3, 1, 2) pos_embed = F.interpolate(adapt_pos_embed, size=(Wh, Ww), mode='bicubic') pos_embed = pos_embed.flatten(2).transpose(1, 2) # B Wh*Ww C pos_embed = torch.cat((pos_embed, self.pos_embed[:, :offset, :]), dim=1) else: pos_embed = self.pos_embed input_embedding = x + pos_embed x = self.pos_drop(input_embedding) x = self.blocks(x) x = self.norm(x) return x, input_embedding def deit_base_distilled_patch16_384(pretrained=False, **kwargs): model = AdaptedVisionTransformer(distilled=True, img_size=384, patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) model.default_cfg = _cfg() if pretrained: checkpoint = torch.hub.load_state_dict_from_url( url="https://dl.fbaipublicfiles.com/deit/deit_base_distilled_patch16_384-d0272ac0.pth", map_location="cpu", check_hash=True ) model.load_state_dict(checkpoint["model"]) return model
null
185,272
import os import logging import torch import torch.nn as nn import torch.nn.functional as F from functools import partial from timm.models.vision_transformer import VisionTransformer, _cfg from timm.models.vision_transformer import Attention, Block from timm.models.registry import register_model from timm.models.layers import trunc_normal_ logger = logging.getLogger(__name__) class AdaptedVisionTransformer(VisionTransformer): def __init__(self, *args, **kwargs): self.ape = kwargs.pop('ape', 0) self.mask_ratio = kwargs.pop('mask_ratio', 0.0) self.patch_size = kwargs.get('patch_size') self.fp16fixed = kwargs.pop('fp16fixed', False) weight_init = kwargs.get('weight_init', '') super().__init__(*args, **kwargs) if self.ape: self.pos_embed = nn.Parameter(torch.zeros(1, self.ape + self.num_tokens, self.embed_dim)) if self.fp16fixed: # img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12, # num_heads=12, mlp_ratio=4., qkv_bias=True, representation_size=None, distilled=False, # drop_rate=0., attn_drop_rate=0., drop_path_rate=0., embed_layer=PatchEmbed, norm_layer=None, # act_layer=None, weight_init='' embed_dim = kwargs.get('embed_dim', 768) num_heads = kwargs.get('num_heads', 12) mlp_ratio = kwargs.get('mlp_ratio', 4.) qkv_bias = kwargs.get('qkv_bias', True) drop_rate = kwargs.get('drop_rate', 0.) attn_drop_rate = kwargs.get('attn_drop_rate', 0.) drop_path_rate = kwargs.get('drop_path_rate', 0.) depth = kwargs.get('depth', 12) norm_layer = kwargs.get('norm_layer', partial(nn.LayerNorm, eps=1e-6)) act_layer = kwargs.get('act_layer', nn.GELU) dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule self.blocks = nn.Sequential(*[ Fp16FixedBlock( dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, act_layer=act_layer) for i in range(depth)]) self.init_weights(weight_init) def forward_features(self, x): _, _, H, W = x.shape Wh = H // self.patch_size Ww = W // self.patch_size x = self.patch_embed(x) if self.mask_ratio != 0: probability_matrix = torch.full(x.shape[:2], self.mask_ratio) masked_indices = torch.bernoulli(probability_matrix).bool() x[masked_indices] = 0 cls_token = self.cls_token.expand(x.shape[0], -1, -1) # stole cls_tokens impl from Phil Wang, thanks if self.dist_token is None: x = torch.cat((cls_token, x), dim=1) else: x = torch.cat((cls_token, self.dist_token.expand(x.shape[0], -1, -1), x), dim=1) if self.ape: pos_embed_patch_num = int(self.pos_embed.size(1) ** 0.5) offset = self.num_tokens adapt_pos_embed = self.pos_embed[:, offset:, :].view(self.pos_embed.shape[0], pos_embed_patch_num, pos_embed_patch_num, self.pos_embed.shape[-1]) # B 24 24 768 adapt_pos_embed = adapt_pos_embed.permute(0, 3, 1, 2) pos_embed = F.interpolate(adapt_pos_embed, size=(Wh, Ww), mode='bicubic') pos_embed = pos_embed.flatten(2).transpose(1, 2) # B Wh*Ww C pos_embed = torch.cat((pos_embed, self.pos_embed[:, :offset, :]), dim=1) else: pos_embed = self.pos_embed input_embedding = x + pos_embed x = self.pos_drop(input_embedding) x = self.blocks(x) x = self.norm(x) return x, input_embedding def deit_base_distilled_patch16_custom_size(pretrained=False, img_size=384, **kwargs): model = AdaptedVisionTransformer(distilled=True, img_size=img_size, patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) model.default_cfg = _cfg() if pretrained: checkpoint = torch.hub.load_state_dict_from_url( url="https://dl.fbaipublicfiles.com/deit/deit_base_distilled_patch16_384-d0272ac0.pth", map_location="cpu", check_hash=True ) # checkpoint['model']['pos_embed'] = checkpoint['model']['pos_embed'][:, :502, :] # ape torch.Size([1, 578, 768]) from checkpoint, the shape in current model is torch.Size([1, 1026, 768]). model_seq_len = model.state_dict()['pos_embed'].shape[1] ckpt_seq_len = checkpoint['model']['pos_embed'].shape[1] logger.warning('Deit load {:d} seq len to {:d} APE {}'.format(ckpt_seq_len, model_seq_len, str(model.ape))) if not model.ape: if model_seq_len <= ckpt_seq_len: checkpoint['model']['pos_embed'] = checkpoint['model']['pos_embed'][:, :model_seq_len, :] else: t = model.state_dict()['pos_embed'] t[:, :ckpt_seq_len, :] = checkpoint['model']['pos_embed'] checkpoint['model']['pos_embed'] = t model.load_state_dict(checkpoint["model"]) return model
null
185,273
import os import logging import torch import torch.nn as nn import torch.nn.functional as F from functools import partial from timm.models.vision_transformer import VisionTransformer, _cfg from timm.models.vision_transformer import Attention, Block from timm.models.registry import register_model from timm.models.layers import trunc_normal_ class AdaptedVisionTransformer(VisionTransformer): def __init__(self, *args, **kwargs): self.ape = kwargs.pop('ape', 0) self.mask_ratio = kwargs.pop('mask_ratio', 0.0) self.patch_size = kwargs.get('patch_size') self.fp16fixed = kwargs.pop('fp16fixed', False) weight_init = kwargs.get('weight_init', '') super().__init__(*args, **kwargs) if self.ape: self.pos_embed = nn.Parameter(torch.zeros(1, self.ape + self.num_tokens, self.embed_dim)) if self.fp16fixed: # img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12, # num_heads=12, mlp_ratio=4., qkv_bias=True, representation_size=None, distilled=False, # drop_rate=0., attn_drop_rate=0., drop_path_rate=0., embed_layer=PatchEmbed, norm_layer=None, # act_layer=None, weight_init='' embed_dim = kwargs.get('embed_dim', 768) num_heads = kwargs.get('num_heads', 12) mlp_ratio = kwargs.get('mlp_ratio', 4.) qkv_bias = kwargs.get('qkv_bias', True) drop_rate = kwargs.get('drop_rate', 0.) attn_drop_rate = kwargs.get('attn_drop_rate', 0.) drop_path_rate = kwargs.get('drop_path_rate', 0.) depth = kwargs.get('depth', 12) norm_layer = kwargs.get('norm_layer', partial(nn.LayerNorm, eps=1e-6)) act_layer = kwargs.get('act_layer', nn.GELU) dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule self.blocks = nn.Sequential(*[ Fp16FixedBlock( dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, act_layer=act_layer) for i in range(depth)]) self.init_weights(weight_init) def forward_features(self, x): _, _, H, W = x.shape Wh = H // self.patch_size Ww = W // self.patch_size x = self.patch_embed(x) if self.mask_ratio != 0: probability_matrix = torch.full(x.shape[:2], self.mask_ratio) masked_indices = torch.bernoulli(probability_matrix).bool() x[masked_indices] = 0 cls_token = self.cls_token.expand(x.shape[0], -1, -1) # stole cls_tokens impl from Phil Wang, thanks if self.dist_token is None: x = torch.cat((cls_token, x), dim=1) else: x = torch.cat((cls_token, self.dist_token.expand(x.shape[0], -1, -1), x), dim=1) if self.ape: pos_embed_patch_num = int(self.pos_embed.size(1) ** 0.5) offset = self.num_tokens adapt_pos_embed = self.pos_embed[:, offset:, :].view(self.pos_embed.shape[0], pos_embed_patch_num, pos_embed_patch_num, self.pos_embed.shape[-1]) # B 24 24 768 adapt_pos_embed = adapt_pos_embed.permute(0, 3, 1, 2) pos_embed = F.interpolate(adapt_pos_embed, size=(Wh, Ww), mode='bicubic') pos_embed = pos_embed.flatten(2).transpose(1, 2) # B Wh*Ww C pos_embed = torch.cat((pos_embed, self.pos_embed[:, :offset, :]), dim=1) else: pos_embed = self.pos_embed input_embedding = x + pos_embed x = self.pos_drop(input_embedding) x = self.blocks(x) x = self.norm(x) return x, input_embedding def beit_base_patch16_384(pretrained=False, **kwargs): model = AdaptedVisionTransformer( img_size=384, patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, qkv_bias=False, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) model.default_cfg = _cfg() return model
null
185,274
import os import logging import torch import torch.nn as nn import torch.nn.functional as F from functools import partial from timm.models.vision_transformer import VisionTransformer, _cfg from timm.models.vision_transformer import Attention, Block from timm.models.registry import register_model from timm.models.layers import trunc_normal_ class AdaptedVisionTransformer(VisionTransformer): def __init__(self, *args, **kwargs): self.ape = kwargs.pop('ape', 0) self.mask_ratio = kwargs.pop('mask_ratio', 0.0) self.patch_size = kwargs.get('patch_size') self.fp16fixed = kwargs.pop('fp16fixed', False) weight_init = kwargs.get('weight_init', '') super().__init__(*args, **kwargs) if self.ape: self.pos_embed = nn.Parameter(torch.zeros(1, self.ape + self.num_tokens, self.embed_dim)) if self.fp16fixed: # img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12, # num_heads=12, mlp_ratio=4., qkv_bias=True, representation_size=None, distilled=False, # drop_rate=0., attn_drop_rate=0., drop_path_rate=0., embed_layer=PatchEmbed, norm_layer=None, # act_layer=None, weight_init='' embed_dim = kwargs.get('embed_dim', 768) num_heads = kwargs.get('num_heads', 12) mlp_ratio = kwargs.get('mlp_ratio', 4.) qkv_bias = kwargs.get('qkv_bias', True) drop_rate = kwargs.get('drop_rate', 0.) attn_drop_rate = kwargs.get('attn_drop_rate', 0.) drop_path_rate = kwargs.get('drop_path_rate', 0.) depth = kwargs.get('depth', 12) norm_layer = kwargs.get('norm_layer', partial(nn.LayerNorm, eps=1e-6)) act_layer = kwargs.get('act_layer', nn.GELU) dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule self.blocks = nn.Sequential(*[ Fp16FixedBlock( dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, act_layer=act_layer) for i in range(depth)]) self.init_weights(weight_init) def forward_features(self, x): _, _, H, W = x.shape Wh = H // self.patch_size Ww = W // self.patch_size x = self.patch_embed(x) if self.mask_ratio != 0: probability_matrix = torch.full(x.shape[:2], self.mask_ratio) masked_indices = torch.bernoulli(probability_matrix).bool() x[masked_indices] = 0 cls_token = self.cls_token.expand(x.shape[0], -1, -1) # stole cls_tokens impl from Phil Wang, thanks if self.dist_token is None: x = torch.cat((cls_token, x), dim=1) else: x = torch.cat((cls_token, self.dist_token.expand(x.shape[0], -1, -1), x), dim=1) if self.ape: pos_embed_patch_num = int(self.pos_embed.size(1) ** 0.5) offset = self.num_tokens adapt_pos_embed = self.pos_embed[:, offset:, :].view(self.pos_embed.shape[0], pos_embed_patch_num, pos_embed_patch_num, self.pos_embed.shape[-1]) # B 24 24 768 adapt_pos_embed = adapt_pos_embed.permute(0, 3, 1, 2) pos_embed = F.interpolate(adapt_pos_embed, size=(Wh, Ww), mode='bicubic') pos_embed = pos_embed.flatten(2).transpose(1, 2) # B Wh*Ww C pos_embed = torch.cat((pos_embed, self.pos_embed[:, :offset, :]), dim=1) else: pos_embed = self.pos_embed input_embedding = x + pos_embed x = self.pos_drop(input_embedding) x = self.blocks(x) x = self.norm(x) return x, input_embedding def beit_large_patch16_384(pretrained=False, **kwargs): model = AdaptedVisionTransformer( img_size=384, patch_size=16, embed_dim=1024, depth=24, num_heads=16, mlp_ratio=4, qkv_bias=False, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) model.default_cfg = _cfg() return model
null
185,275
import torch.nn as nn from fairseq.models import FairseqEncoder, register_model, FairseqEncoderDecoderModel, register_model_architecture from fairseq.models.transformer import TransformerDecoder, Embedding, TransformerModel from fairseq.models.fairseq_encoder import EncoderOut from fairseq import utils from timm.models.layers import trunc_normal_ import torch from torch.hub import load_state_dict_from_url from functools import partial import logging def ViT_TR_base(args): # ViT Encoder vit_base_patch16_224 args.vit_img_size = getattr(args, "vit_img_size", 224) args.resize_img_size = args.vit_img_size args.vit_patch_size = getattr(args, "vit_patch_size", 16) args.vit_dim = getattr(args, "vit_dim", 768) args.vit_depth = getattr(args, "vit_depth", 12) args.vit_heads = getattr(args, "vit_heads", 12) args.encoder_pretrained_url = getattr(args, "encoder_pretrained_url", "https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_p16_224-80ecf9dd.pth") # Transformer Decoder args.encoder_embed_path = getattr(args, "encoder_embed_path", None) args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 768) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048) args.encoder_layers = getattr(args, "encoder_layers", 6) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8) args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False) args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False) args.decoder_embed_path = getattr(args, "decoder_embed_path", None) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim) args.decoder_ffn_embed_dim = getattr( args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim ) args.decoder_layers = getattr(args, "decoder_layers", 6) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8) args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False) args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False) args.attention_dropout = getattr(args, "attention_dropout", 0.0) args.activation_dropout = getattr(args, "activation_dropout", 0.0) args.activation_fn = getattr(args, "activation_fn", "relu") args.dropout = getattr(args, "dropout", 0.1) args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None) args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0) args.share_decoder_input_output_embed = getattr( args, "share_decoder_input_output_embed", False ) args.share_all_embeddings = getattr(args, "share_all_embeddings", False) args.no_token_positional_embeddings = getattr( args, "no_token_positional_embeddings", False ) args.adaptive_input = getattr(args, "adaptive_input", False) args.no_cross_attention = getattr(args, "no_cross_attention", False) args.cross_self_attention = getattr(args, "cross_self_attention", False) args.decoder_output_dim = getattr( args, "decoder_output_dim", args.decoder_embed_dim ) args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim) args.no_scale_embedding = getattr(args, "no_scale_embedding", False) args.layernorm_embedding = getattr(args, "layernorm_embedding", False) args.tie_adaptive_weights = getattr(args, "tie_adaptive_weights", False) args.checkpoint_activations = getattr(args, "checkpoint_activations", False) args.offload_activations = getattr(args, "offload_activations", False) if args.offload_activations: args.checkpoint_activations = True args.encoder_layers_to_keep = getattr(args, "encoder_layers_to_keep", None) args.decoder_layers_to_keep = getattr(args, "decoder_layers_to_keep", None) args.encoder_layerdrop = getattr(args, "encoder_layerdrop", 0) args.decoder_layerdrop = getattr(args, "decoder_layerdrop", 0) args.quant_noise_pq = getattr(args, "quant_noise_pq", 0) args.quant_noise_pq_block_size = getattr(args, "quant_noise_pq_block_size", 8) args.quant_noise_scalar = getattr(args, "quant_noise_scalar", 0)
null
185,276
import torch.nn as nn from fairseq.models import FairseqEncoder, register_model, FairseqEncoderDecoderModel, register_model_architecture from fairseq.models.transformer import TransformerDecoder, Embedding, TransformerModel from fairseq.models.fairseq_encoder import EncoderOut from fairseq import utils from timm.models.layers import trunc_normal_ import torch from torch.hub import load_state_dict_from_url from functools import partial import logging def large_architecture(args): # ViT Encoder vit_base_patch16_224 args.vit_img_size = getattr(args, "vit_img_size", 384) args.resize_img_size = args.vit_img_size args.vit_patch_size = getattr(args, "vit_patch_size", 16) args.vit_dim = getattr(args, "vit_dim", 1024) args.vit_depth = getattr(args, "vit_depth", 24) args.vit_heads = getattr(args, "vit_heads", 16) args.encoder_pretrained_url = getattr(args, "encoder_pretrained_url", "https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_large_p16_384-b3be5167.pth") # Transformer Decoder args.encoder_embed_path = getattr(args, "encoder_embed_path", None) args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048) args.encoder_layers = getattr(args, "encoder_layers", 6) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8) args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False) args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False) args.decoder_embed_path = getattr(args, "decoder_embed_path", None) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim) args.decoder_ffn_embed_dim = getattr( args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim ) args.decoder_layers = getattr(args, "decoder_layers", 6) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8) args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False) args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False) args.attention_dropout = getattr(args, "attention_dropout", 0.0) args.activation_dropout = getattr(args, "activation_dropout", 0.0) args.activation_fn = getattr(args, "activation_fn", "relu") args.dropout = getattr(args, "dropout", 0.1) args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None) args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0) args.share_decoder_input_output_embed = getattr( args, "share_decoder_input_output_embed", False ) args.share_all_embeddings = getattr(args, "share_all_embeddings", False) args.no_token_positional_embeddings = getattr( args, "no_token_positional_embeddings", False ) args.adaptive_input = getattr(args, "adaptive_input", False) args.no_cross_attention = getattr(args, "no_cross_attention", False) args.cross_self_attention = getattr(args, "cross_self_attention", False) args.decoder_output_dim = getattr( args, "decoder_output_dim", args.decoder_embed_dim ) args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim) args.no_scale_embedding = getattr(args, "no_scale_embedding", False) args.layernorm_embedding = getattr(args, "layernorm_embedding", False) args.tie_adaptive_weights = getattr(args, "tie_adaptive_weights", False) args.checkpoint_activations = getattr(args, "checkpoint_activations", False) args.offload_activations = getattr(args, "offload_activations", False) if args.offload_activations: args.checkpoint_activations = True args.encoder_layers_to_keep = getattr(args, "encoder_layers_to_keep", None) args.decoder_layers_to_keep = getattr(args, "decoder_layers_to_keep", None) args.encoder_layerdrop = getattr(args, "encoder_layerdrop", 0) args.decoder_layerdrop = getattr(args, "decoder_layerdrop", 0) args.quant_noise_pq = getattr(args, "quant_noise_pq", 0) args.quant_noise_pq_block_size = getattr(args, "quant_noise_pq_block_size", 8) args.quant_noise_scalar = getattr(args, "quant_noise_scalar", 0)
null
185,277
import torch import logging from torch import Tensor from transformers import PreTrainedTokenizerFast, BatchEncoding from typing import Mapping, Dict, List logger = _setup_logger() def _setup_logger(): log_format = logging.Formatter("[%(asctime)s %(levelname)s] %(message)s") logger = logging.getLogger() logger.setLevel(logging.INFO) console_handler = logging.StreamHandler() console_handler.setFormatter(log_format) logger.handlers = [console_handler] return logger
null
185,278
import torch import logging from torch import Tensor from transformers import PreTrainedTokenizerFast, BatchEncoding from typing import Mapping, Dict, List def move_to_cuda(sample): if len(sample) == 0: return {} def _move_to_cuda(maybe_tensor): if torch.is_tensor(maybe_tensor): return maybe_tensor.cuda(non_blocking=True) elif isinstance(maybe_tensor, dict): return {key: _move_to_cuda(value) for key, value in maybe_tensor.items()} elif isinstance(maybe_tensor, list): return [_move_to_cuda(x) for x in maybe_tensor] elif isinstance(maybe_tensor, tuple): return tuple([_move_to_cuda(x) for x in maybe_tensor]) elif isinstance(maybe_tensor, Mapping): return type(maybe_tensor)({k: _move_to_cuda(v) for k, v in maybe_tensor.items()}) else: return maybe_tensor return _move_to_cuda(sample)
null
185,279
import torch import logging from torch import Tensor from transformers import PreTrainedTokenizerFast, BatchEncoding from typing import Mapping, Dict, List def pool(last_hidden_states: Tensor, attention_mask: Tensor, pool_type: str) -> Tensor: last_hidden = last_hidden_states.masked_fill(~attention_mask[..., None].bool(), 0.0) if pool_type == "avg": emb = last_hidden.sum(dim=1) / attention_mask.sum(dim=1)[..., None] elif pool_type == "weightedavg": # position-weighted mean pooling from SGPT (https://arxiv.org/abs/2202.08904) attention_mask *= attention_mask.cumsum(dim=1) # [0,1,1,1,0,0] -> [0,1,2,3,0,0] s = torch.sum(last_hidden * attention_mask.unsqueeze(-1).float(), dim=1) d = attention_mask.sum(dim=1, keepdim=True).float() emb = s / d elif pool_type == "cls": emb = last_hidden[:, 0] elif pool_type == "last": left_padding = (attention_mask[:, -1].sum() == attention_mask.shape[0]) if left_padding: emb = last_hidden[:, -1] else: sequence_lengths = attention_mask.sum(dim=1) - 1 batch_size = last_hidden.shape[0] emb = last_hidden[torch.arange(batch_size, device=last_hidden.device), sequence_lengths] else: raise ValueError(f"pool_type {pool_type} not supported") return emb
null
185,280
import torch import logging from torch import Tensor from transformers import PreTrainedTokenizerFast, BatchEncoding from typing import Mapping, Dict, List def create_batch_dict(tokenizer: PreTrainedTokenizerFast, input_texts: List[str], always_add_eos: bool, max_length: int = 512) -> BatchEncoding: if not always_add_eos: return tokenizer( input_texts, max_length=max_length, padding=True, pad_to_multiple_of=8, return_token_type_ids=False, truncation=True, return_tensors='pt' ) else: batch_dict = tokenizer( input_texts, max_length=max_length - 1, return_token_type_ids=False, return_attention_mask=False, padding=False, truncation=True ) # append eos_token_id to every input_ids batch_dict['input_ids'] = [input_ids + [tokenizer.eos_token_id] for input_ids in batch_dict['input_ids']] return tokenizer.pad( batch_dict, padding=True, pad_to_multiple_of=8, return_attention_mask=True, return_tensors="pt", )
null
185,281
import torch import logging from torch import Tensor from transformers import PreTrainedTokenizerFast, BatchEncoding from typing import Mapping, Dict, List def get_task_def_by_task_name_and_type(task_name: str, task_type: str) -> str: if task_type in ['STS']: return "Retrieve semantically similar text." if task_type in ['Summarization']: return "Given a news summary, retrieve other semantically similar summaries" if task_type in ['BitextMining']: return "Retrieve parallel sentences." if task_type in ['Classification']: task_name_to_instruct: Dict[str, str] = { 'AmazonCounterfactualClassification': 'Classify a given Amazon customer review text as either counterfactual or not-counterfactual', 'AmazonPolarityClassification': 'Classify Amazon reviews into positive or negative sentiment', 'AmazonReviewsClassification': 'Classify the given Amazon review into its appropriate rating category', 'Banking77Classification': 'Given a online banking query, find the corresponding intents', 'EmotionClassification': 'Classify the emotion expressed in the given Twitter message into one of the six emotions: anger, fear, joy, love, sadness, and surprise', 'ImdbClassification': 'Classify the sentiment expressed in the given movie review text from the IMDB dataset', 'MassiveIntentClassification': 'Given a user utterance as query, find the user intents', 'MassiveScenarioClassification': 'Given a user utterance as query, find the user scenarios', 'MTOPDomainClassification': 'Classify the intent domain of the given utterance in task-oriented conversation', 'MTOPIntentClassification': 'Classify the intent of the given utterance in task-oriented conversation', 'ToxicConversationsClassification': 'Classify the given comments as either toxic or not toxic', 'TweetSentimentExtractionClassification': 'Classify the sentiment of a given tweet as either positive, negative, or neutral', # C-MTEB eval instructions 'TNews': 'Classify the fine-grained category of the given news title', 'IFlyTek': 'Given an App description text, find the appropriate fine-grained category', 'MultilingualSentiment': 'Classify sentiment of the customer review into positive, neutral, or negative', 'JDReview': 'Classify the customer review for iPhone on e-commerce platform into positive or negative', 'OnlineShopping': 'Classify the customer review for online shopping into positive or negative', 'Waimai': 'Classify the customer review from a food takeaway platform into positive or negative', } return task_name_to_instruct[task_name] if task_type in ['Clustering']: task_name_to_instruct: Dict[str, str] = { 'ArxivClusteringP2P': 'Identify the main and secondary category of Arxiv papers based on the titles and abstracts', 'ArxivClusteringS2S': 'Identify the main and secondary category of Arxiv papers based on the titles', 'BiorxivClusteringP2P': 'Identify the main category of Biorxiv papers based on the titles and abstracts', 'BiorxivClusteringS2S': 'Identify the main category of Biorxiv papers based on the titles', 'MedrxivClusteringP2P': 'Identify the main category of Medrxiv papers based on the titles and abstracts', 'MedrxivClusteringS2S': 'Identify the main category of Medrxiv papers based on the titles', 'RedditClustering': 'Identify the topic or theme of Reddit posts based on the titles', 'RedditClusteringP2P': 'Identify the topic or theme of Reddit posts based on the titles and posts', 'StackExchangeClustering': 'Identify the topic or theme of StackExchange posts based on the titles', 'StackExchangeClusteringP2P': 'Identify the topic or theme of StackExchange posts based on the given paragraphs', 'TwentyNewsgroupsClustering': 'Identify the topic or theme of the given news articles', # C-MTEB eval instructions 'CLSClusteringS2S': 'Identify the main category of scholar papers based on the titles', 'CLSClusteringP2P': 'Identify the main category of scholar papers based on the titles and abstracts', 'ThuNewsClusteringS2S': 'Identify the topic or theme of the given news articles based on the titles', 'ThuNewsClusteringP2P': 'Identify the topic or theme of the given news articles based on the titles and contents', } return task_name_to_instruct[task_name] if task_type in ['Reranking', 'PairClassification']: task_name_to_instruct: Dict[str, str] = { 'AskUbuntuDupQuestions': 'Retrieve duplicate questions from AskUbuntu forum', 'MindSmallReranking': 'Retrieve relevant news articles based on user browsing history', 'SciDocsRR': 'Given a title of a scientific paper, retrieve the titles of other relevant papers', 'StackOverflowDupQuestions': 'Retrieve duplicate questions from StackOverflow forum', 'SprintDuplicateQuestions': 'Retrieve duplicate questions from Sprint forum', 'TwitterSemEval2015': 'Retrieve tweets that are semantically similar to the given tweet', 'TwitterURLCorpus': 'Retrieve tweets that are semantically similar to the given tweet', # C-MTEB eval instructions 'T2Reranking': 'Given a Chinese search query, retrieve web passages that answer the question', 'MMarcoReranking': 'Given a Chinese search query, retrieve web passages that answer the question', 'CMedQAv1': 'Given a Chinese community medical question, retrieve replies that best answer the question', 'CMedQAv2': 'Given a Chinese community medical question, retrieve replies that best answer the question', 'Ocnli': 'Retrieve semantically similar text.', 'Cmnli': 'Retrieve semantically similar text.', } return task_name_to_instruct[task_name] if task_type in ['Retrieval']: if task_name.lower().startswith('cqadupstack'): return 'Given a question, retrieve detailed question descriptions from Stackexchange that are duplicates to the given question' task_name_to_instruct: Dict[str, str] = { 'ArguAna': 'Given a claim, find documents that refute the claim', 'ClimateFEVER': 'Given a claim about climate change, retrieve documents that support or refute the claim', 'DBPedia': 'Given a query, retrieve relevant entity descriptions from DBPedia', 'FEVER': 'Given a claim, retrieve documents that support or refute the claim', 'FiQA2018': 'Given a financial question, retrieve user replies that best answer the question', 'HotpotQA': 'Given a multi-hop question, retrieve documents that can help answer the question', 'MSMARCO': 'Given a web search query, retrieve relevant passages that answer the query', 'NFCorpus': 'Given a question, retrieve relevant documents that best answer the question', 'NQ': 'Given a question, retrieve Wikipedia passages that answer the question', 'QuoraRetrieval': 'Given a question, retrieve questions that are semantically equivalent to the given question', 'SCIDOCS': 'Given a scientific paper title, retrieve paper abstracts that are cited by the given paper', 'SciFact': 'Given a scientific claim, retrieve documents that support or refute the claim', 'Touche2020': 'Given a question, retrieve detailed and persuasive arguments that answer the question', 'TRECCOVID': 'Given a query on COVID-19, retrieve documents that answer the query', # C-MTEB eval instructions 'T2Retrieval': 'Given a Chinese search query, retrieve web passages that answer the question', 'MMarcoRetrieval': 'Given a web search query, retrieve relevant passages that answer the query', 'DuRetrieval': 'Given a Chinese search query, retrieve web passages that answer the question', 'CovidRetrieval': 'Given a question on COVID-19, retrieve news articles that answer the question', 'CmedqaRetrieval': 'Given a Chinese community medical question, retrieve replies that best answer the question', 'EcomRetrieval': 'Given a user query from an e-commerce website, retrieve description sentences of relevant products', 'MedicalRetrieval': 'Given a medical question, retrieve user replies that best answer the question', 'VideoRetrieval': 'Given a video search query, retrieve the titles of relevant videos', } # add lower case keys to match some beir names task_name_to_instruct.update({k.lower(): v for k, v in task_name_to_instruct.items()}) # other cases where lower case match still doesn't work task_name_to_instruct['trec-covid'] = task_name_to_instruct['TRECCOVID'] task_name_to_instruct['climate-fever'] = task_name_to_instruct['ClimateFEVER'] task_name_to_instruct['dbpedia-entity'] = task_name_to_instruct['DBPedia'] task_name_to_instruct['webis-touche2020'] = task_name_to_instruct['Touche2020'] task_name_to_instruct['fiqa'] = task_name_to_instruct['FiQA2018'] task_name_to_instruct['quora'] = task_name_to_instruct['QuoraRetrieval'] # for miracl evaluation task_name_to_instruct['miracl'] = 'Given a question, retrieve Wikipedia passages that answer the question' return task_name_to_instruct[task_name] raise ValueError(f"No instruction config for task {task_name} with type {task_type}")
null
185,282
import torch import logging from torch import Tensor from transformers import PreTrainedTokenizerFast, BatchEncoding from typing import Mapping, Dict, List def get_detailed_instruct(task_description: str) -> str: if not task_description: return '' return 'Instruct: {}\nQuery: '.format(task_description)
null
185,283
import torch from fairseq import utils def varsize_tensor_all_gather(tensor): # cuda_device = f'cuda:{torch.distributed.get_rank()} cuda_device = 'cuda' if tensor is None: size_tens = torch.tensor([0], dtype=torch.int64, device=cuda_device) else: size_tens = torch.tensor([tensor.shape[0]], dtype=torch.int64, device=cuda_device) # print("size_tens", flush=True) # print(size_tens, flush=True) size_tens = tensor_all_gather(size_tens).cpu() max_size = size_tens.max() padded = torch.empty(max_size, *tensor.shape[1:], dtype=tensor.dtype, device=cuda_device) if tensor is not None: padded[:tensor.shape[0]] = tensor # print("padded:", flush=True) # print(padded, flush=True) ag = tensor_all_gather(padded) # print("ag:", flush=True) # print(ag, flush=True) slices = [] for i, sz in enumerate(size_tens): start_idx = i * max_size end_idx = start_idx + sz.item() if end_idx > start_idx: slices.append(ag[start_idx:end_idx]) ret = torch.cat(slices, dim=0) return ret.to(tensor) The provided code snippet includes necessary dependencies for implementing the `concat_all_gather` function. Write a Python function `def concat_all_gather(tensor)` to solve the following problem: Performs all_gather operation on the provided tensors. *** Warning ***: torch.distributed.all_gather has no gradient. Here is the function: def concat_all_gather(tensor): """ Performs all_gather operation on the provided tensors. *** Warning ***: torch.distributed.all_gather has no gradient. """ if torch.cuda.device_count() > 1: return varsize_tensor_all_gather(tensor) else: output = tensor return output
Performs all_gather operation on the provided tensors. *** Warning ***: torch.distributed.all_gather has no gradient.
185,284
import torch from fairseq import utils def _get_logging_loss(loss, reduce=True): if loss is None: return 0 return utils.item(loss.data) if reduce else loss.data
null
185,285
import torch from fairseq import utils def construct_idx_tensor_from_list(idx_list2d, lens, pad_idx, device=None): max_len = max(lens) padded_list = [list_i + [pad_idx] * (max_len - lens[i]) for i, list_i in enumerate(idx_list2d)] tensor = torch.LongTensor(padded_list) if device is not None: tensor = tensor.to(device=device) return tensor
null
185,286
import torch from fairseq import utils def move_to_device(sample, device): def _move_to_device(tensor): return tensor.to(device=device) return utils.apply_to_sample(_move_to_device, sample)
null
185,287
import numpy as np import torch from fairseq.data import data_utils, FairseqDataset, MaskTokensDataset, TruncateDataset, BaseWrapperDataset from infoxlm.data.dict_dataset import DictDataset class XlcoDataset(FairseqDataset): def __init__(self, dataset, vocab, remove_bos_of_item2=True, seed=1): def set_epoch(self, epoch): def __len__(self): def __getitem__(self, index): def collater(self, samples): def merge(key, left_pad, move_eos_to_beginning=False): def get_xlco_dataset(args, dataset_path, vocab, mask_idx, combine=False): dataset = data_utils.load_indexed_dataset( dataset_path, vocab, args.dataset_impl, combine=combine) dataset, _ = MaskTokensDataset.apply_mask( dataset, vocab=vocab, pad_idx=vocab.pad(), mask_idx=mask_idx, seed=args.seed, mask_prob=args.mask_prob, mask_whole_words=None, ) dataset = XlcoDataset(dataset, vocab) return dataset
null
185,288
import torch from fairseq.data import (data_utils, TokenBlockDataset, PrependTokenDataset, PadDataset, TruncateDataset, NumelDataset, NumSamplesDataset, NestedDictionaryDataset, MaskTokensDataset, AppendTokenDataset, ) from fairseq.data.encoders.utils import get_whole_word_mask def get_prepended_token_block_dataset(args, dataset_path, vocab, combine=False): dataset = data_utils.load_indexed_dataset( dataset_path, vocab, args.dataset_impl, combine=combine, ) if dataset is None: raise FileNotFoundError('Dataset not found: ({})'.format(dataset_path)) if not args.apply_ptb: print("| [I] ptb not applied.", flush=True) return dataset dataset = TruncateDataset(dataset, args.tokens_per_sample - 1) dataset = TokenBlockDataset( dataset, dataset.sizes, args.tokens_per_sample - 1, # one less for <s> pad=vocab.pad(), eos=vocab.eos(), break_mode=args.sample_break_mode, ) print('| loaded {} blocks from: {}'.format(len(dataset), dataset_path), flush=True) dataset = PrependTokenDataset(dataset, vocab.bos()) return dataset def get_mlm_dataset(args, dataset_path, vocab, mask_idx, mask_whole_words=None, combine=False): ptb_dataset = get_prepended_token_block_dataset( args, dataset_path, vocab, combine=combine) src_dataset, tgt_dataset = MaskTokensDataset.apply_mask( ptb_dataset, vocab=vocab, pad_idx=vocab.pad(), mask_idx=mask_idx, seed=args.seed, mask_prob=args.mask_prob, mask_whole_words=mask_whole_words, ) dataset = NestedDictionaryDataset( { 'net_input': { 'src_tokens': PadDataset( src_dataset, pad_idx=vocab.pad(), left_pad=False, ), 'src_lengths': NumelDataset(src_dataset, reduce=False), }, 'target': PadDataset( tgt_dataset, pad_idx=vocab.pad(), left_pad=False, ), 'nsentences': NumSamplesDataset(), 'ntokens': NumelDataset(src_dataset, reduce=True), # 'lang_id': RawLabelDataset([lang_id] * src_dataset.sizes.shape[0]), }, sizes=[src_dataset.sizes], ) return dataset
null
185,289
import torch from fairseq.data import (data_utils, TokenBlockDataset, PrependTokenDataset, PadDataset, TruncateDataset, NumelDataset, NumSamplesDataset, NestedDictionaryDataset, MaskTokensDataset, AppendTokenDataset, ) from fairseq.data.encoders.utils import get_whole_word_mask def add_mlm_args(parser): parser.add_argument('--mask-whole-words', default=False, action='store_true', help='mask whole words; you may also want to set --bpe') parser.add_argument('--mask-prob', default=0.15, type=float, help='probability of replacing a token with mask') parser.add_argument('--leave-unmasked-prob', default=0.1, type=float, help='probability that a masked token is unmasked') parser.add_argument('--random-token-prob', default=0.1, type=float, help='probability of replacing a token with a random token') parser.add_argument('--sample-break-mode', default='complete', choices=['none', 'complete', 'complete_doc', 'eos'], help='If omitted or "none", fills each sample with tokens-per-sample ' 'tokens. If set to "complete", splits samples only at the end ' 'of sentence, but may include multiple sentences per sample. ' '"complete_doc" is similar but respects doc boundaries. ' 'If set to "eos", includes only one sentence per sample.')
null
185,290
import torch from fairseq.data import (data_utils, TokenBlockDataset, PrependTokenDataset, PadDataset, TruncateDataset, NumelDataset, NumSamplesDataset, NestedDictionaryDataset, MaskTokensDataset, AppendTokenDataset, ) from fairseq.data.encoders.utils import get_whole_word_mask def get_preprocessed_ptb_dataset(args, dataset_path, vocab, combine=False): dataset = data_utils.load_indexed_dataset( dataset_path, vocab, args.dataset_impl, combine=combine, ) if dataset is None: raise FileNotFoundError('Dataset not found: ({})'.format(dataset_path)) return dataset
null
185,291
import torch from fairseq.data import BaseWrapperDataset from fairseq.data import (data_utils, TokenBlockDataset, PrependTokenDataset, PadDataset, TruncateDataset, NumelDataset, NumSamplesDataset, NestedDictionaryDataset, MaskTokensDataset, AppendTokenDataset, ) from infoxlm.data.mlm_utils import get_mlm_dataset, get_prepended_token_block_dataset class OffsetDataset(BaseWrapperDataset): def __init__(self, ptb_dataset, vocab): super().__init__(ptb_dataset) self.vocab = vocab def get_check_ptb_offsets(self, ptb_item): # parse ptb_item eos_idx = self.vocab.eos() bos_idx = self.vocab.bos() _nonzero = (ptb_item == eos_idx).nonzero() if len(_nonzero) != 2: # raise ValueError # NOTE WALKAROUND _nonzero_0 = _nonzero[0].item() _nonzero_1 = len(ptb_item) else: _nonzero_0 = _nonzero[0].item() _nonzero_1 = _nonzero[1].item() assert ptb_item[0].item() == bos_idx, (ptb_item[0].item(), bos_idx) src_fr = 1 src_to = _nonzero[0].item() trg_fr = src_to + 1 trg_to = _nonzero[1].item() # print("ptb_item:") # print(ptb_item) # print("offsets:") # print("%d %d %d %d" % (src_fr, src_to, trg_fr, trg_to)) # print("4 items: %d %d %d %d" % tuple(ptb_item[i].item() for i in [src_fr, src_to, trg_fr, trg_to])) if src_to - src_fr <= 0 or trg_to - trg_fr <= 0: print("[W] ptb_item=%s offsets=%d,%d,%d,%d" % ( str(ptb_item), src_fr, src_to, trg_fr, trg_to, )) # raise ValueError return src_fr, src_to, trg_fr, trg_to def __getitem__(self, index): ptb_item = self.dataset[index] return self.get_check_ptb_offsets(ptb_item) def collater(self, samples): src_fr = [s[0] for s in samples] src_to = [s[1] for s in samples] trg_fr = [s[2] for s in samples] trg_to = [s[3] for s in samples] return src_fr, src_to, trg_fr, trg_to def get_mlm_dataset_with_offset(args, dataset_path, vocab, mask_idx,mask_whole_words=None, combine=False): ptb_dataset = get_prepended_token_block_dataset( args, dataset_path, vocab, combine=combine) src_dataset, tgt_dataset = MaskTokensDataset.apply_mask( ptb_dataset, vocab=vocab, pad_idx=vocab.pad(), mask_idx=mask_idx, seed=args.seed, mask_prob=args.mask_prob, mask_whole_words=mask_whole_words, ) dataset = NestedDictionaryDataset( { 'net_input': { 'src_tokens': PadDataset( src_dataset, pad_idx=vocab.pad(), left_pad=False, ), 'src_lengths': NumelDataset(src_dataset, reduce=False), }, 'target': PadDataset( tgt_dataset, pad_idx=vocab.pad(), left_pad=False, ), 'nsentences': NumSamplesDataset(), 'ntokens': NumelDataset(src_dataset, reduce=True), 'offsets': OffsetDataset(ptb_dataset, vocab), }, sizes=[src_dataset.sizes], ) return dataset
null
185,292
import torch from fairseq.data import (data_utils, TokenBlockDataset, PrependTokenDataset, PadDataset, TruncateDataset, NumelDataset, NumSamplesDataset, NestedDictionaryDataset, MaskTokensDataset, AppendTokenDataset, ) from fairseq.data.encoders.utils import get_whole_word_mask from infoxlm.data.mlm_utils import get_prepended_token_block_dataset from infoxlm.data.offset_dataset import OffsetDataset def get_xlm_align_dataset_with_mask(args, dataset_path, vocab, mask_idx, combine=False): ptb_dataset = get_prepended_token_block_dataset( args, dataset_path, vocab, combine=combine) src_dataset, tgt_dataset = MaskTokensDataset.apply_mask( ptb_dataset, vocab=vocab, pad_idx=vocab.pad(), mask_idx=mask_idx, seed=args.seed, mask_prob=args.mask_prob, ) dataset = NestedDictionaryDataset({ 'net_input': { 'src_tokens': PadDataset( ptb_dataset, pad_idx=vocab.pad(), left_pad=False, ), 'src_lengths': NumelDataset(ptb_dataset, reduce=False), }, 'nsentences': NumSamplesDataset(), 'ntokens': NumelDataset(ptb_dataset, reduce=True), 'offsets': OffsetDataset(ptb_dataset, vocab), 'net_input_tlm': { 'src_tokens': PadDataset( src_dataset, pad_idx=vocab.pad(), left_pad=False, ), 'src_lengths': NumelDataset(src_dataset, reduce=False), }, 'target': PadDataset( tgt_dataset, pad_idx=vocab.pad(), left_pad=False, ), }, sizes=[ptb_dataset.sizes]) return dataset
null
185,293
import os import torch from functools import lru_cache from fairseq.tasks import register_task, FairseqTask from fairseq.data.dictionary import Dictionary from fairseq.data import FairseqDataset from fairseq import utils from infoxlm.data import mlm_utils from infoxlm.data.dict_dataset import DictDataset from infoxlm.data.xlco_dataset import get_xlco_dataset from infoxlm.tasks.mlm import Mlm def _prepare_sample(sample, cuda=True, fp16=True): if sample is None or len(sample) == 0: return None if cuda: sample = utils.move_to_cuda(sample) def apply_half(t): if t.dtype is torch.float32: return t.half() return t if fp16: sample = utils.apply_to_sample(apply_half, sample) return sample
null
185,294
import os from functools import lru_cache import numpy as np import torch from fairseq import utils from fairseq.data.data_utils import process_bpe_symbol from fairseq.data.dictionary import Dictionary from fairseq.tasks import FairseqTask, register_task from infoxlm.data import mlm_utils from infoxlm.data.dict_dataset import DictDataset from infoxlm.data.xlm_align import get_xlm_align_dataset_with_mask def extract_wa_from_pi_xi(pi, xi): m, n = pi.size() forward = torch.eye(n)[pi.argmax(dim=1)] backward = torch.eye(m)[xi.argmax(dim=0)] inter = forward * backward.transpose(0, 1) ret = [] for i in range(m): for j in range(n): if inter[i, j].item() > 0: ret.append((i, j)) return ret
null
185,295
import os from functools import lru_cache import numpy as np import torch from fairseq import utils from fairseq.data.data_utils import process_bpe_symbol from fairseq.data.dictionary import Dictionary from fairseq.tasks import FairseqTask, register_task from infoxlm.data import mlm_utils from infoxlm.data.dict_dataset import DictDataset from infoxlm.data.xlm_align import get_xlm_align_dataset_with_mask def _sinkhorn_iter(S, num_iter=2): assert S.dim() == 2 S[S <= 0] = 1e-6 pi = S xi = pi for i in range(num_iter): pi_sum_over_i = pi.sum(dim=0, keepdim=True) xi = pi / pi_sum_over_i xi_sum_over_j = xi.sum(dim=1, keepdim=True) pi = xi / xi_sum_over_j return pi, xi
null
185,296
import logging import torch import torch.nn as nn import torch.nn.functional as F from fairseq import checkpoint_utils from fairseq import utils from fairseq.models import ( BaseFairseqModel, register_model, register_model_architecture, ) from fairseq.models.roberta import ( RobertaModel, roberta_base_architecture, roberta_large_architecture ) from infoxlm.utils import concat_all_gather def build_projection_dict(langs, dim, activation_fn, fp16=False): proj_dict = {} cnt = 0 for lang in langs: proj_dict[lang] = cnt cnt += 1 proj_matrix_slow = torch.randn(cnt, dim, dim) proj_matrix_slow.normal_(mean=0.0, std=0.02) proj_matrix_slow = nn.Parameter(proj_matrix_slow, requires_grad=False) proj_matrix_fast = nn.Parameter(proj_matrix_slow.data.clone(), requires_grad=True) return proj_dict, proj_matrix_fast, proj_matrix_slow
null
185,297
import logging import torch import torch.nn as nn import torch.nn.functional as F from fairseq import checkpoint_utils from fairseq import utils from fairseq.models import ( BaseFairseqModel, register_model, register_model_architecture, ) from fairseq.models.roberta import ( RobertaModel, roberta_base_architecture, roberta_large_architecture ) from infoxlm.utils import concat_all_gather def infoxlm_base(args): roberta_base_architecture(args)
null
185,298
import logging import torch import torch.nn as nn import torch.nn.functional as F from fairseq import checkpoint_utils from fairseq import utils from fairseq.models import ( BaseFairseqModel, register_model, register_model_architecture, ) from fairseq.models.roberta import ( RobertaModel, roberta_base_architecture, roberta_large_architecture ) from infoxlm.utils import concat_all_gather def infoxlm_large(args): roberta_large_architecture(args)
null
185,299
import logging import torch import torch.nn as nn import torch.nn.functional as F from fairseq import checkpoint_utils from fairseq import utils from fairseq.models import ( BaseFairseqModel, register_model, register_model_architecture, ) from fairseq.models.roberta import ( RobertaModel, RobertaEncoder, roberta_base_architecture, roberta_large_architecture, ) def reload_roberta_base(args): roberta_base_architecture(args)
null
185,300
import logging import torch import torch.nn as nn import torch.nn.functional as F from fairseq import checkpoint_utils from fairseq import utils from fairseq.models import ( BaseFairseqModel, register_model, register_model_architecture, ) from fairseq.models.roberta import ( RobertaModel, RobertaEncoder, roberta_base_architecture, roberta_large_architecture, ) def reload_roberta_large(args): roberta_large_architecture(args)
null
185,301
import torch import torch.nn as nn import torch.nn.functional as F from fairseq import checkpoint_utils from fairseq import utils from fairseq.models import ( BaseFairseqModel, register_model, register_model_architecture, ) from fairseq.models.roberta import ( RobertaModel, roberta_base_architecture, roberta_large_architecture ) from fairseq.modules import LayerNorm from infoxlm.models.roberta import ReloadRoberta, reload_roberta_base, RobertaEncoder def xlm_align_base(args): roberta_base_architecture(args)
null
185,302
import torch import torch.nn as nn import torch.nn.functional as F from fairseq import checkpoint_utils from fairseq import utils from fairseq.models import ( BaseFairseqModel, register_model, register_model_architecture, ) from fairseq.models.roberta import ( RobertaModel, roberta_base_architecture, roberta_large_architecture ) from fairseq.modules import LayerNorm from infoxlm.models.roberta import ReloadRoberta, reload_roberta_base, RobertaEncoder def xlm_align_large(args): roberta_large_architecture(args)
null
185,303
import collections import math import random import numpy as np import torch from fairseq import checkpoint_utils, distributed_utils, options, progress_bar, tasks, utils from fairseq.data import iterators from fairseq.trainer import Trainer from fairseq.meters import AverageMeter, StopwatchMeter def get_training_stats(trainer): stats = collections.OrderedDict() stats['loss'] = trainer.get_meter('train_loss') if trainer.get_meter('train_nll_loss').count > 0: nll_loss = trainer.get_meter('train_nll_loss') stats['nll_loss'] = nll_loss else: nll_loss = trainer.get_meter('train_loss') stats['ppl'] = utils.get_perplexity(nll_loss.avg) stats['wps'] = trainer.get_meter('wps') stats['ups'] = trainer.get_meter('ups') stats['wpb'] = trainer.get_meter('wpb') stats['bsz'] = trainer.get_meter('bsz') stats['num_updates'] = trainer.get_num_updates() stats['lr'] = trainer.get_lr() stats['gnorm'] = trainer.get_meter('gnorm') stats['clip'] = trainer.get_meter('clip') stats['oom'] = trainer.get_meter('oom') if trainer.get_meter('loss_scale') is not None: stats['loss_scale'] = trainer.get_meter('loss_scale') stats['wall'] = round(trainer.get_meter('wall').elapsed_time) stats['train_wall'] = trainer.get_meter('train_wall') return stats def validate(args, trainer, task, epoch_itr, subsets): """Evaluate the model on the validation set(s) and return the losses.""" if args.fixed_validation_seed is not None: # set fixed seed for every validation utils.set_torch_seed(args.fixed_validation_seed) valid_losses = [] for subset in subsets: # Initialize data iterator itr = task.get_batch_iterator( dataset=task.dataset(subset), max_tokens=args.max_tokens_valid, max_sentences=args.max_sentences_valid, max_positions=utils.resolve_max_positions( task.max_positions(), trainer.get_model().max_positions(), ), ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test, required_batch_size_multiple=args.required_batch_size_multiple, seed=args.seed, num_shards=args.distributed_world_size, shard_id=args.distributed_rank, num_workers=args.num_workers, ).next_epoch_itr(shuffle=False) progress = progress_bar.build_progress_bar( args, itr, epoch_itr.epoch, prefix='valid on \'{}\' subset'.format(subset), no_progress_bar='simple' ) # reset validation loss meters for k in ['valid_loss', 'valid_nll_loss']: meter = trainer.get_meter(k) if meter is not None: meter.reset() extra_meters = collections.defaultdict(lambda: AverageMeter()) for sample in progress: log_output = trainer.valid_step(sample) for k, v in log_output.items(): if k in ['loss', 'nll_loss', 'ntokens', 'nsentences', 'sample_size']: continue extra_meters[k].update(v) # log validation stats stats = get_valid_stats(trainer, args, extra_meters) for k, meter in extra_meters.items(): stats[k] = meter.avg progress.print(stats, tag=subset, step=trainer.get_num_updates()) valid_losses.append( stats[args.best_checkpoint_metric].avg if args.best_checkpoint_metric == 'loss' else stats[args.best_checkpoint_metric] ) return valid_losses The provided code snippet includes necessary dependencies for implementing the `train` function. Write a Python function `def train(args, trainer, task, epoch_itr)` to solve the following problem: Train the model for one epoch. Here is the function: def train(args, trainer, task, epoch_itr): """Train the model for one epoch.""" # Update parameters every N batches print("| Start train.train ..." , flush=True) update_freq = args.update_freq[epoch_itr.epoch - 1] \ if epoch_itr.epoch <= len(args.update_freq) else args.update_freq[-1] # Initialize data iterator itr = epoch_itr.next_epoch_itr( fix_batches_to_gpus=args.fix_batches_to_gpus, shuffle=(epoch_itr.epoch >= args.curriculum), ) print("| Itr init (1) ...", flush=True) itr = iterators.GroupedIterator(itr, update_freq) progress = progress_bar.build_progress_bar( args, itr, epoch_itr.epoch, no_progress_bar='simple', ) print("| Itr init (2) ...", flush=True) extra_meters = collections.defaultdict(lambda: AverageMeter()) valid_subsets = args.valid_subset.split(',') max_update = args.max_update or math.inf # ##################### DEBUG ##################### # debug_samples = [] # print("Fetch debug examples ...") # for i in range(1000): # debug_samples.append(next(itr)) # progress = progress_bar.build_progress_bar( # args, iter(debug_samples), epoch_itr.epoch, no_progress_bar='simple', # ) # ##################### DEBUG ##################### for i, samples in enumerate(progress, start=epoch_itr.iterations_in_epoch): log_output = trainer.train_step(samples) if log_output is None: continue # log mid-epoch stats stats = get_training_stats(trainer) for k, v in log_output.items(): if k in ['loss', 'nll_loss', 'ntokens', 'nsentences', 'sample_size']: continue # these are already logged above if 'loss' in k or k == 'accuracy': extra_meters[k].update(v, log_output['sample_size']) else: extra_meters[k].update(v) stats[k] = extra_meters[k].val progress.log(stats, tag='train', step=stats['num_updates']) # ignore the first mini-batch in words-per-second and updates-per-second calculation if i == 0: trainer.get_meter('wps').reset() trainer.get_meter('ups').reset() num_updates = trainer.get_num_updates() if ( not args.disable_validation and args.save_interval_updates > 0 and num_updates % args.save_interval_updates == 0 and num_updates > 0 ): valid_losses = validate(args, trainer, task, epoch_itr, valid_subsets) checkpoint_utils.save_checkpoint(args, trainer, epoch_itr, valid_losses[0]) elif (args.save_interval_updates > 0 and num_updates % args.save_interval_updates == 0 and num_updates > 0): checkpoint_utils.save_checkpoint(args, trainer, epoch_itr, None) if num_updates >= max_update: break # log end-of-epoch stats stats = get_training_stats(trainer) for k, meter in extra_meters.items(): stats[k] = meter.val progress.print(stats, tag='train', step=stats['num_updates']) # reset training meters for k in [ 'train_loss', 'train_nll_loss', 'wps', 'ups', 'wpb', 'bsz', 'gnorm', 'clip', ]: meter = trainer.get_meter(k) if meter is not None: meter.reset()
Train the model for one epoch.
185,304
import collections import math import random import numpy as np import torch from fairseq import checkpoint_utils, distributed_utils, options, progress_bar, tasks, utils from fairseq.data import iterators from fairseq.trainer import Trainer from fairseq.meters import AverageMeter, StopwatchMeter def main(args, init_distributed=False): def distributed_main(i, args, start_rank=0): def cli_main(): parser = options.get_training_parser() args = options.parse_args_and_arch(parser) if args.distributed_init_method is None: distributed_utils.infer_init_method(args) if args.distributed_init_method is not None: # distributed training if torch.cuda.device_count() > 1 and not args.distributed_no_spawn: start_rank = args.distributed_rank args.distributed_rank = None # assign automatically torch.multiprocessing.spawn( fn=distributed_main, args=(args, start_rank), nprocs=torch.cuda.device_count(), ) else: distributed_main(args.device_id, args) elif args.distributed_world_size > 1: # fallback for single node with multiple GPUs assert args.distributed_world_size <= torch.cuda.device_count() port = random.randint(10000, 20000) args.distributed_init_method = 'tcp://localhost:{port}'.format(port=port) args.distributed_rank = None # set based on device id if max(args.update_freq) > 1 and args.ddp_backend != 'no_c10d': print('| NOTE: you may get better performance with: --ddp-backend=no_c10d') torch.multiprocessing.spawn( fn=distributed_main, args=(args, ), nprocs=args.distributed_world_size, ) else: # single GPU training main(args)
null
185,305
from collections import namedtuple import fileinput import torch from fairseq import checkpoint_utils, options, tasks, utils from fairseq.data import encoders def buffered_read(input, buffer_size): buffer = [] with fileinput.input(files=[input], openhook=fileinput.hook_encoded("utf-8")) as h: for src_str in h: buffer.append(src_str.strip()) if len(buffer) >= buffer_size: yield buffer buffer = [] if len(buffer) > 0: yield buffer
null
185,306
from collections import namedtuple import fileinput import torch from fairseq import checkpoint_utils, options, tasks, utils from fairseq.data import encoders Batch = namedtuple('Batch', 'ids src_tokens src_lengths') def make_batches(lines, args, task, max_positions, encode_fn): tokens = [ task.source_dictionary.encode_line( encode_fn(src_str), add_if_not_exist=False ).long() for src_str in lines ] lengths = torch.LongTensor([t.numel() for t in tokens]) itr = task.get_batch_iterator( dataset=task.build_dataset_for_inference(tokens, lengths), max_tokens=args.max_tokens, max_sentences=args.max_sentences, max_positions=max_positions, ).next_epoch_itr(shuffle=False) for batch in itr: yield Batch( ids=batch['id'], src_tokens=batch['net_input']['src_tokens'], src_lengths=batch['net_input']['src_lengths'], )
null
185,307
from collections import namedtuple import fileinput import torch from fairseq import checkpoint_utils, options, tasks, utils from fairseq.data import encoders def main(args): utils.import_user_module(args) if args.buffer_size < 1: args.buffer_size = 1 if args.max_tokens is None and args.max_sentences is None: args.max_sentences = 1 assert not args.sampling or args.nbest == args.beam, \ '--sampling requires --nbest to be equal to --beam' assert not args.max_sentences or args.max_sentences <= args.buffer_size, \ '--max-sentences/--batch-size cannot be larger than --buffer-size' print(args) use_cuda = torch.cuda.is_available() and not args.cpu # Setup task, e.g., translation task = tasks.setup_task(args) # Load ensemble print('| loading model(s) from {}'.format(args.path)) models, _model_args = checkpoint_utils.load_model_ensemble( args.path.split(':'), arg_overrides=eval(args.model_overrides), task=task, ) # Set dictionaries src_dict = task.source_dictionary tgt_dict = task.target_dictionary # Optimize ensemble for generation for model in models: model.make_generation_fast_( beamable_mm_beam_size=None if args.no_beamable_mm else args.beam, need_attn=args.print_alignment, ) if args.fp16: model.half() if use_cuda: model.cuda() # Initialize generator generator = task.build_generator(args) # Handle tokenization and BPE tokenizer = encoders.build_tokenizer(args) bpe = encoders.build_bpe(args) def encode_fn(x): if tokenizer is not None: x = tokenizer.encode(x) if bpe is not None: x = bpe.encode(x) return x def decode_fn(x): if bpe is not None: x = bpe.decode(x) if tokenizer is not None: x = tokenizer.decode(x) return x # Load alignment dictionary for unknown word replacement # (None if no unknown word replacement, empty if no path to align dictionary) align_dict = utils.load_align_dict(args.replace_unk) max_positions = utils.resolve_max_positions( task.max_positions(), *[model.max_positions() for model in models] ) if args.buffer_size > 1: print('| Sentence buffer size:', args.buffer_size) print('| Type the input sentence and press return:') start_id = 0 for inputs in buffered_read(args.input, args.buffer_size): results = [] for batch in make_batches(inputs, args, task, max_positions, encode_fn): src_tokens = batch.src_tokens src_lengths = batch.src_lengths if use_cuda: src_tokens = src_tokens.cuda() src_lengths = src_lengths.cuda() sample = { 'net_input': { 'src_tokens': src_tokens, 'src_lengths': src_lengths, }, } translations = task.inference_step(generator, models, sample) for i, (id, hypos) in enumerate(zip(batch.ids.tolist(), translations)): src_tokens_i = utils.strip_pad(src_tokens[i], tgt_dict.pad()) results.append((start_id + id, src_tokens_i, hypos)) # sort output to match input order for id, src_tokens, hypos in sorted(results, key=lambda x: x[0]): if src_dict is not None: src_str = src_dict.string(src_tokens, args.remove_bpe) print('S-{}\t{}'.format(id, src_str)) # Process top predictions for hypo in hypos[:min(len(hypos), args.nbest)]: hypo_tokens, hypo_str, alignment = utils.post_process_prediction( hypo_tokens=hypo['tokens'].int().cpu(), src_str=src_str, alignment=hypo['alignment'], align_dict=align_dict, tgt_dict=tgt_dict, remove_bpe=args.remove_bpe, ) hypo_str = decode_fn(hypo_str) print('H-{}\t{}\t{}'.format(id, hypo['score'], hypo_str)) print('P-{}\t{}'.format( id, ' '.join(map(lambda x: '{:.4f}'.format(x), hypo['positional_scores'].tolist())) )) if args.print_alignment: alignment_str = " ".join(["{}-{}".format(src, tgt) for src, tgt in alignment]) print('A-{}\t{}'.format( id, alignment_str )) # update running id counter start_id += len(inputs) def cli_main(): parser = options.get_generation_parser(interactive=True) args = options.parse_args_and_arch(parser) main(args)
null
185,308
import torch def calc_mean_invstddev(feature): if len(feature.size()) != 2: raise ValueError("We expect the input feature to be 2-D tensor") mean = feature.mean(0) var = feature.var(0) # avoid division by ~zero eps = 1e-8 if (var < eps).any(): return mean, 1.0 / (torch.sqrt(var) + eps) return mean, 1.0 / torch.sqrt(var) def apply_mv_norm(features): mean, invstddev = calc_mean_invstddev(features) res = (features - mean) * invstddev return res
null
185,313
import json import os import re import torch from fairseq.data import Dictionary from fairseq.tasks import FairseqTask, register_task from examples.speech_recognition.data import AsrDataset from examples.speech_recognition.data.replabels import replabel_symbol The provided code snippet includes necessary dependencies for implementing the `get_asr_dataset_from_json` function. Write a Python function `def get_asr_dataset_from_json(data_json_path, tgt_dict)` to solve the following problem: Parse data json and create dataset. See scripts/asr_prep_json.py which pack json from raw files Json example: { "utts": { "4771-29403-0025": { "input": { "length_ms": 170, "path": "/tmp/file1.flac" }, "output": { "text": "HELLO \n", "token": "HE LLO", "tokenid": "4815, 861" } }, "1564-142299-0096": { ... } } Here is the function: def get_asr_dataset_from_json(data_json_path, tgt_dict): """ Parse data json and create dataset. See scripts/asr_prep_json.py which pack json from raw files Json example: { "utts": { "4771-29403-0025": { "input": { "length_ms": 170, "path": "/tmp/file1.flac" }, "output": { "text": "HELLO \n", "token": "HE LLO", "tokenid": "4815, 861" } }, "1564-142299-0096": { ... } } """ if not os.path.isfile(data_json_path): raise FileNotFoundError("Dataset not found: {}".format(data_json_path)) with open(data_json_path, "rb") as f: data_samples = json.load(f)["utts"] assert len(data_samples) != 0 sorted_samples = sorted( data_samples.items(), key=lambda sample: int(sample[1]["input"]["length_ms"]), reverse=True, ) aud_paths = [s[1]["input"]["path"] for s in sorted_samples] ids = [s[0] for s in sorted_samples] speakers = [] for s in sorted_samples: m = re.search("(.+?)-(.+?)-(.+?)", s[0]) speakers.append(m.group(1) + "_" + m.group(2)) frame_sizes = [s[1]["input"]["length_ms"] for s in sorted_samples] tgt = [ torch.LongTensor([int(i) for i in s[1]["output"]["tokenid"].split(", ")]) for s in sorted_samples ] # append eos tgt = [torch.cat([t, torch.LongTensor([tgt_dict.eos()])]) for t in tgt] return AsrDataset(aud_paths, frame_sizes, tgt, tgt_dict, ids, speakers)
Parse data json and create dataset. See scripts/asr_prep_json.py which pack json from raw files Json example: { "utts": { "4771-29403-0025": { "input": { "length_ms": 170, "path": "/tmp/file1.flac" }, "output": { "text": "HELLO \n", "token": "HE LLO", "tokenid": "4815, 861" } }, "1564-142299-0096": { ... } }
185,315
import logging import math import os import sentencepiece as spm import torch from fairseq import checkpoint_utils, options, progress_bar, utils, tasks from fairseq.meters import StopwatchMeter, TimeMeter from fairseq.utils import import_user_module def check_args(args): assert args.path is not None, "--path required for generation!" assert args.results_path is not None, "--results_path required for generation!" assert ( not args.sampling or args.nbest == args.beam ), "--sampling requires --nbest to be equal to --beam" assert ( args.replace_unk is None or args.raw_text ), "--replace-unk requires a raw text dataset (--raw-text)"
null
185,316
import logging import math import os import sentencepiece as spm import torch from fairseq import checkpoint_utils, options, progress_bar, utils, tasks from fairseq.meters import StopwatchMeter, TimeMeter from fairseq.utils import import_user_module def get_dataset_itr(args, task): return task.get_batch_iterator( dataset=task.dataset(args.gen_subset), max_tokens=args.max_tokens, max_sentences=args.max_sentences, max_positions=(1000000.0, 1000000.0), ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test, required_batch_size_multiple=args.required_batch_size_multiple, num_shards=args.num_shards, shard_id=args.shard_id, num_workers=args.num_workers, ).next_epoch_itr(shuffle=False)
null
185,317
import logging import math import os import sentencepiece as spm import torch from fairseq import checkpoint_utils, options, progress_bar, utils, tasks from fairseq.meters import StopwatchMeter, TimeMeter from fairseq.utils import import_user_module logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) def process_predictions( args, hypos, sp, tgt_dict, target_tokens, res_files, speaker, id ): for hypo in hypos[: min(len(hypos), args.nbest)]: hyp_pieces = tgt_dict.string(hypo["tokens"].int().cpu()) hyp_words = sp.DecodePieces(hyp_pieces.split()) print( "{} ({}-{})".format(hyp_pieces, speaker, id), file=res_files["hypo.units"] ) print("{} ({}-{})".format(hyp_words, speaker, id), file=res_files["hypo.words"]) tgt_pieces = tgt_dict.string(target_tokens) tgt_words = sp.DecodePieces(tgt_pieces.split()) print("{} ({}-{})".format(tgt_pieces, speaker, id), file=res_files["ref.units"]) print("{} ({}-{})".format(tgt_words, speaker, id), file=res_files["ref.words"]) # only score top hypothesis if not args.quiet: logger.debug("HYPO:" + hyp_words) logger.debug("TARGET:" + tgt_words) logger.debug("___________________")
null
185,318
import logging import math import os import sentencepiece as spm import torch from fairseq import checkpoint_utils, options, progress_bar, utils, tasks from fairseq.meters import StopwatchMeter, TimeMeter from fairseq.utils import import_user_module def prepare_result_files(args): def get_res_file(file_prefix): path = os.path.join( args.results_path, "{}-{}-{}.txt".format( file_prefix, os.path.basename(args.path), args.gen_subset ), ) return open(path, "w", buffering=1) return { "hypo.words": get_res_file("hypo.word"), "hypo.units": get_res_file("hypo.units"), "ref.words": get_res_file("ref.word"), "ref.units": get_res_file("ref.units"), }
null
185,319
import logging import math import os import sentencepiece as spm import torch from fairseq import checkpoint_utils, options, progress_bar, utils, tasks from fairseq.meters import StopwatchMeter, TimeMeter from fairseq.utils import import_user_module def load_models_and_criterions(filenames, arg_overrides=None, task=None): models = [] criterions = [] for filename in filenames: if not os.path.exists(filename): raise IOError("Model file not found: {}".format(filename)) state = checkpoint_utils.load_checkpoint_to_cpu(filename, arg_overrides) args = state["args"] if task is None: task = tasks.setup_task(args) # build model for ensemble model = task.build_model(args) model.load_state_dict(state["model"], strict=True) models.append(model) criterion = task.build_criterion(args) if "criterion" in state: criterion.load_state_dict(state["criterion"], strict=True) criterions.append(criterion) return models, criterions, args
null
185,320
import logging import math import os import sentencepiece as spm import torch from fairseq import checkpoint_utils, options, progress_bar, utils, tasks from fairseq.meters import StopwatchMeter, TimeMeter from fairseq.utils import import_user_module The provided code snippet includes necessary dependencies for implementing the `optimize_models` function. Write a Python function `def optimize_models(args, use_cuda, models)` to solve the following problem: Optimize ensemble for generation Here is the function: def optimize_models(args, use_cuda, models): """Optimize ensemble for generation """ for model in models: model.make_generation_fast_( beamable_mm_beam_size=None if args.no_beamable_mm else args.beam, need_attn=args.print_alignment, ) if args.fp16: model.half() if use_cuda: model.cuda()
Optimize ensemble for generation
185,321
import logging import math import os import sentencepiece as spm import torch from fairseq import checkpoint_utils, options, progress_bar, utils, tasks from fairseq.meters import StopwatchMeter, TimeMeter from fairseq.utils import import_user_module def add_asr_eval_argument(parser): parser.add_argument("--kspmodel", default=None, help="sentence piece model") parser.add_argument( "--wfstlm", default=None, help="wfstlm on dictonary output units" ) parser.add_argument( "--rnnt_decoding_type", default="greedy", help="wfstlm on dictonary\ output units", ) parser.add_argument( "--lm-weight", "--lm_weight", type=float, default=0.2, help="weight for lm while interpolating with neural score", ) parser.add_argument( "--rnnt_len_penalty", default=-0.5, help="rnnt length penalty on word level" ) parser.add_argument( "--w2l-decoder", choices=["viterbi", "kenlm"], help="use a w2l decoder" ) parser.add_argument("--lexicon", help="lexicon for w2l decoder") parser.add_argument("--kenlm-model", help="kenlm model for w2l decoder") parser.add_argument("--beam-threshold", type=float, default=25.0) parser.add_argument("--word-score", type=float, default=1.0) parser.add_argument("--unk-weight", type=float, default=-math.inf) parser.add_argument("--sil-weight", type=float, default=0.0) return parser def main(args): check_args(args) import_user_module(args) if args.max_tokens is None and args.max_sentences is None: args.max_tokens = 30000 logger.info(args) use_cuda = torch.cuda.is_available() and not args.cpu # Load dataset splits task = tasks.setup_task(args) task.load_dataset(args.gen_subset) logger.info( "| {} {} {} examples".format( args.data, args.gen_subset, len(task.dataset(args.gen_subset)) ) ) # Set dictionary tgt_dict = task.target_dictionary logger.info("| decoding with criterion {}".format(args.criterion)) # Load ensemble logger.info("| loading model(s) from {}".format(args.path)) models, criterions, _model_args = load_models_and_criterions( args.path.split(":"), arg_overrides=eval(args.model_overrides), # noqa task=task, ) optimize_models(args, use_cuda, models) # hack to pass transitions to W2lDecoder if args.criterion == "asg_loss": trans = criterions[0].asg.trans.data args.asg_transitions = torch.flatten(trans).tolist() # Load dataset (possibly sharded) itr = get_dataset_itr(args, task) # Initialize generator gen_timer = StopwatchMeter() generator = task.build_generator(args) num_sentences = 0 if not os.path.exists(args.results_path): os.makedirs(args.results_path) sp = spm.SentencePieceProcessor() sp.Load(os.path.join(args.data, "spm.model")) res_files = prepare_result_files(args) with progress_bar.build_progress_bar(args, itr) as t: wps_meter = TimeMeter() for sample in t: sample = utils.move_to_cuda(sample) if use_cuda else sample if "net_input" not in sample: continue prefix_tokens = None if args.prefix_size > 0: prefix_tokens = sample["target"][:, : args.prefix_size] gen_timer.start() hypos = task.inference_step(generator, models, sample, prefix_tokens) num_generated_tokens = sum(len(h[0]["tokens"]) for h in hypos) gen_timer.stop(num_generated_tokens) for i, sample_id in enumerate(sample["id"].tolist()): speaker = task.dataset(args.gen_subset).speakers[int(sample_id)] id = task.dataset(args.gen_subset).ids[int(sample_id)] target_tokens = ( utils.strip_pad(sample["target"][i, :], tgt_dict.pad()).int().cpu() ) # Process top predictions process_predictions( args, hypos[i], sp, tgt_dict, target_tokens, res_files, speaker, id ) wps_meter.update(num_generated_tokens) t.log({"wps": round(wps_meter.avg)}) num_sentences += sample["nsentences"] logger.info( "| Processed {} sentences ({} tokens) in {:.1f}s ({:.2f}" "sentences/s, {:.2f} tokens/s)".format( num_sentences, gen_timer.n, gen_timer.sum, num_sentences / gen_timer.sum, 1.0 / gen_timer.avg, ) ) logger.info("| Generate {} with beam={}".format(args.gen_subset, args.beam)) def cli_main(): parser = options.get_generation_parser() parser = add_asr_eval_argument(parser) args = options.parse_args_and_arch(parser) main(args)
null
185,322
from __future__ import absolute_import, division, print_function, unicode_literals from collections import namedtuple import concurrent.futures from itertools import chain import argparse import os import json import sentencepiece as spm import multiprocessing import torchaudio from fairseq.data import Dictionary MILLISECONDS_TO_SECONDS = 0.001 def process_sample(aud_path, lable, utt_id, sp, tgt_dict): input = {} output = {} si, ei = torchaudio.info(aud_path) input["length_ms"] = int(si.length / si.channels / si.rate / MILLISECONDS_TO_SECONDS) input["path"] = aud_path token = " ".join(sp.EncodeAsPieces(lable)) ids = tgt_dict.encode_line(token, append_eos=False) output["text"] = lable output["token"] = token output["tokenid"] = ', '.join(map(str, [t.tolist() for t in ids])) return {utt_id: {"input": input, "output": output}}
null
185,345
from functools import lru_cache import json def find_token(sentence, start_pos): found_tok = None for tok in sentence: if tok.idx == start_pos: found_tok = tok break return found_tok def find_span(sentence, search_text, start=0): search_text = search_text.lower() for tok in sentence[start:]: remainder = sentence[tok.i:].text.lower() if remainder.startswith(search_text): len_to_consume = len(search_text) start_idx = tok.idx for next_tok in sentence[tok.i:]: end_idx = next_tok.idx + len(next_tok.text) if end_idx - start_idx == len_to_consume: span = sentence[tok.i:next_tok.i + 1] return span return None def get_detokenizer(): from sacremoses import MosesDetokenizer detok = MosesDetokenizer(lang='en') return detok def get_spacy_nlp(): import en_core_web_lg nlp = en_core_web_lg.load() return nlp def jsonl_iterator(input_fname, positive_only=False, ngram_order=3, eval=False): detok = get_detokenizer() nlp = get_spacy_nlp() with open(input_fname) as fin: for line in fin: sample = json.loads(line.strip()) if positive_only and 'label' in sample and not sample['label']: # only consider examples where the query is correct continue target = sample['target'] # clean up the query query = target['span1_text'] if query is not None: if '\n' in query: continue if query.endswith('.') or query.endswith(','): query = query[:-1] # split tokens tokens = sample['text'].split(' ') def strip_pronoun(x): return x.rstrip('.,"') # find the pronoun pronoun_idx = target['span2_index'] pronoun = strip_pronoun(target['span2_text']) if strip_pronoun(tokens[pronoun_idx]) != pronoun: # hack: sometimes the index is misaligned if strip_pronoun(tokens[pronoun_idx + 1]) == pronoun: pronoun_idx += 1 else: raise Exception('Misaligned pronoun!') assert strip_pronoun(tokens[pronoun_idx]) == pronoun # split tokens before and after the pronoun before = tokens[:pronoun_idx] after = tokens[pronoun_idx + 1:] # the GPT BPE attaches leading spaces to tokens, so we keep track # of whether we need spaces before or after the pronoun leading_space = ' ' if pronoun_idx > 0 else '' trailing_space = ' ' if len(after) > 0 else '' # detokenize before = detok.detokenize(before, return_str=True) pronoun = detok.detokenize([pronoun], return_str=True) after = detok.detokenize(after, return_str=True) # hack: when the pronoun ends in a period (or comma), move the # punctuation to the "after" part if pronoun.endswith('.') or pronoun.endswith(','): after = pronoun[-1] + trailing_space + after pronoun = pronoun[:-1] # hack: when the "after" part begins with a comma or period, remove # the trailing space if after.startswith('.') or after.startswith(','): trailing_space = '' # parse sentence with spacy sentence = nlp(before + leading_space + pronoun + trailing_space + after) # find pronoun span start = len(before + leading_space) first_pronoun_tok = find_token(sentence, start_pos=start) pronoun_span = find_span(sentence, pronoun, start=first_pronoun_tok.i) assert pronoun_span.text == pronoun if eval: # convert to format where pronoun is surrounded by "[]" and # query is surrounded by "_" query_span = find_span(sentence, query) query_with_ws = '_{}_{}'.format( query_span.text, (' ' if query_span.text_with_ws.endswith(' ') else '') ) pronoun_with_ws = '[{}]{}'.format( pronoun_span.text, (' ' if pronoun_span.text_with_ws.endswith(' ') else '') ) if query_span.start < pronoun_span.start: first = (query_span, query_with_ws) second = (pronoun_span, pronoun_with_ws) else: first = (pronoun_span, pronoun_with_ws) second = (query_span, query_with_ws) sentence = ( sentence[:first[0].start].text_with_ws + first[1] + sentence[first[0].end:second[0].start].text_with_ws + second[1] + sentence[second[0].end:].text ) yield sentence, sample.get('label', None) else: yield sentence, pronoun_span, query, sample.get('label', None)
null
185,349
import argparse from itertools import chain import sys import random import numpy as np from sacrebleu import compute_bleu, corpus_bleu as _corpus_bleu def dictolist(d): def load_sys(paths): src, tgt, hypos, log_probs = {}, {}, {}, {} for path in paths: with open(path) as f: for line in f: line = line.rstrip() if line.startswith(('S-', 'T-', 'H-')): i = int(line[line.find('-')+1:line.find('\t')]) if line.startswith('S-'): src[i] = line.split('\t')[1] if line.startswith('T-'): tgt[i] = line.split('\t')[1] if line.startswith('H-'): if i not in hypos: hypos[i] = [] log_probs[i] = [] hypos[i].append(line.split('\t')[2]) log_probs[i].append(float(line.split('\t')[1])) return dictolist(src), dictolist(tgt), dictolist(hypos), dictolist(log_probs)
null
185,354
from fairseq import options def add_reranking_args(parser): group = parser.add_argument_group("Reranking") # fmt: off group.add_argument('--score-model1', '-s1', type=str, metavar='FILE', required=True, help='path to first model or ensemble of models for rescoring') group.add_argument('--score-model2', '-s2', type=str, metavar='FILE', required=False, help='path to second model or ensemble of models for rescoring') group.add_argument('--num-rescore', '-n', type=int, metavar='N', default=10, help='the number of candidate hypothesis to rescore') group.add_argument('-bz', '--batch-size', type=int, metavar='N', default=128, help='batch size for generating the nbest list') group.add_argument('--gen-subset', default='test', metavar='SET', choices=['test', 'train', 'valid'], help='data subset to generate (train, valid, test)') group.add_argument('--gen-model', default=None, metavar='FILE', help='the model to generate translations') group.add_argument('-b1', '--backwards1', action='store_true', help='whether or not the first model group is backwards') group.add_argument('-b2', '--backwards2', action='store_true', help='whether or not the second model group is backwards') group.add_argument('-a', '--weight1', default=1, nargs='+', type=float, help='the weight(s) of the first model') group.add_argument('-b', '--weight2', default=1, nargs='+', type=float, help='the weight(s) of the second model, or the gen model if using nbest from interactive.py') group.add_argument('-c', '--weight3', default=1, nargs='+', type=float, help='the weight(s) of the third model') # lm arguments group.add_argument('-lm', '--language-model', default=None, metavar='FILE', help='language model for target language to rescore translations') group.add_argument('--lm-dict', default=None, metavar='FILE', help='the dict of the language model for the target language') group.add_argument('--lm-name', default=None, help='the name of the language model for the target language') group.add_argument('--lm-bpe-code', default=None, metavar='FILE', help='the bpe code for the language model for the target language') group.add_argument('--data-dir-name', default=None, help='name of data directory') group.add_argument('--lenpen', default=1, nargs='+', type=float, help='length penalty: <1.0 favors shorter, >1.0 favors longer sentences') group.add_argument('--score-dict-dir', default=None, help='the directory with dictionaries for the scoring models') group.add_argument('--right-to-left1', action='store_true', help='whether the first model group is a right to left model') group.add_argument('--right-to-left2', action='store_true', help='whether the second model group is a right to left model') group.add_argument('--remove-bpe', default='@@ ', help='the bpe symbol, used for the bitext and LM') group.add_argument('--prefix-len', default=None, type=int, help='the length of the target prefix to use in rescoring (in terms of words wo bpe)') group.add_argument('--sampling', action='store_true', help='use sampling instead of beam search for generating n best list') group.add_argument('--diff-bpe', action='store_true', help='bpe for rescoring and nbest list not the same') group.add_argument('--rescore-bpe-code', default=None, help='bpe code for rescoring models') group.add_argument('--nbest-list', default=None, help='use predefined nbest list in interactive.py format') group.add_argument('--write-hypos', default=None, help='filename prefix to write hypos to') group.add_argument('--ref-translation', default=None, help='reference translation to use with nbest list from interactive.py') group.add_argument('--backwards-score-dict-dir', default=None, help='the directory with dictionaries for the backwards model,' 'if None then it is assumed the fw and backwards models share dictionaries') # extra scaling args group.add_argument('--gen-model-name', default=None, help='the name of the models that generated the nbest list') group.add_argument('--model1-name', default=None, help='the name of the set for model1 group ') group.add_argument('--model2-name', default=None, help='the name of the set for model2 group') group.add_argument('--shard-id', default=0, type=int, help='the id of the shard to generate') group.add_argument('--num-shards', default=1, type=int, help='the number of shards to generate across') group.add_argument('--all-shards', action='store_true', help='use all shards') group.add_argument('--target-prefix-frac', default=None, type=float, help='the fraction of the target prefix to use in rescoring (in terms of words wo bpe)') group.add_argument('--source-prefix-frac', default=None, type=float, help='the fraction of the source prefix to use in rescoring (in terms of words wo bpe)') group.add_argument('--normalize', action='store_true', help='whether to normalize by src and target len') return group def get_reranking_parser(default_task='translation'): parser = options.get_parser('Generation and reranking', default_task) add_reranking_args(parser) return parser
null
185,355
from fairseq import options def add_reranking_args(parser): group = parser.add_argument_group("Reranking") # fmt: off group.add_argument('--score-model1', '-s1', type=str, metavar='FILE', required=True, help='path to first model or ensemble of models for rescoring') group.add_argument('--score-model2', '-s2', type=str, metavar='FILE', required=False, help='path to second model or ensemble of models for rescoring') group.add_argument('--num-rescore', '-n', type=int, metavar='N', default=10, help='the number of candidate hypothesis to rescore') group.add_argument('-bz', '--batch-size', type=int, metavar='N', default=128, help='batch size for generating the nbest list') group.add_argument('--gen-subset', default='test', metavar='SET', choices=['test', 'train', 'valid'], help='data subset to generate (train, valid, test)') group.add_argument('--gen-model', default=None, metavar='FILE', help='the model to generate translations') group.add_argument('-b1', '--backwards1', action='store_true', help='whether or not the first model group is backwards') group.add_argument('-b2', '--backwards2', action='store_true', help='whether or not the second model group is backwards') group.add_argument('-a', '--weight1', default=1, nargs='+', type=float, help='the weight(s) of the first model') group.add_argument('-b', '--weight2', default=1, nargs='+', type=float, help='the weight(s) of the second model, or the gen model if using nbest from interactive.py') group.add_argument('-c', '--weight3', default=1, nargs='+', type=float, help='the weight(s) of the third model') # lm arguments group.add_argument('-lm', '--language-model', default=None, metavar='FILE', help='language model for target language to rescore translations') group.add_argument('--lm-dict', default=None, metavar='FILE', help='the dict of the language model for the target language') group.add_argument('--lm-name', default=None, help='the name of the language model for the target language') group.add_argument('--lm-bpe-code', default=None, metavar='FILE', help='the bpe code for the language model for the target language') group.add_argument('--data-dir-name', default=None, help='name of data directory') group.add_argument('--lenpen', default=1, nargs='+', type=float, help='length penalty: <1.0 favors shorter, >1.0 favors longer sentences') group.add_argument('--score-dict-dir', default=None, help='the directory with dictionaries for the scoring models') group.add_argument('--right-to-left1', action='store_true', help='whether the first model group is a right to left model') group.add_argument('--right-to-left2', action='store_true', help='whether the second model group is a right to left model') group.add_argument('--remove-bpe', default='@@ ', help='the bpe symbol, used for the bitext and LM') group.add_argument('--prefix-len', default=None, type=int, help='the length of the target prefix to use in rescoring (in terms of words wo bpe)') group.add_argument('--sampling', action='store_true', help='use sampling instead of beam search for generating n best list') group.add_argument('--diff-bpe', action='store_true', help='bpe for rescoring and nbest list not the same') group.add_argument('--rescore-bpe-code', default=None, help='bpe code for rescoring models') group.add_argument('--nbest-list', default=None, help='use predefined nbest list in interactive.py format') group.add_argument('--write-hypos', default=None, help='filename prefix to write hypos to') group.add_argument('--ref-translation', default=None, help='reference translation to use with nbest list from interactive.py') group.add_argument('--backwards-score-dict-dir', default=None, help='the directory with dictionaries for the backwards model,' 'if None then it is assumed the fw and backwards models share dictionaries') # extra scaling args group.add_argument('--gen-model-name', default=None, help='the name of the models that generated the nbest list') group.add_argument('--model1-name', default=None, help='the name of the set for model1 group ') group.add_argument('--model2-name', default=None, help='the name of the set for model2 group') group.add_argument('--shard-id', default=0, type=int, help='the id of the shard to generate') group.add_argument('--num-shards', default=1, type=int, help='the number of shards to generate across') group.add_argument('--all-shards', action='store_true', help='use all shards') group.add_argument('--target-prefix-frac', default=None, type=float, help='the fraction of the target prefix to use in rescoring (in terms of words wo bpe)') group.add_argument('--source-prefix-frac', default=None, type=float, help='the fraction of the source prefix to use in rescoring (in terms of words wo bpe)') group.add_argument('--normalize', action='store_true', help='whether to normalize by src and target len') return group def add_tuning_args(parser): group = parser.add_argument_group("Tuning") group.add_argument('--lower-bound', default=[-0.7], nargs='+', type=float, help='lower bound of search space') group.add_argument('--upper-bound', default=[3], nargs='+', type=float, help='upper bound of search space') group.add_argument('--tune-param', default=['lenpen'], nargs='+', choices=['lenpen', 'weight1', 'weight2', 'weight3'], help='the parameter(s) to tune') group.add_argument('--tune-subset', default='valid', choices=['valid', 'test', 'train'], help='the subset to tune on ') group.add_argument('--num-trials', default=1000, type=int, help='number of trials to do for random search') group.add_argument('--share-weights', action='store_true', help='share weight2 and weight 3') return group def get_tuning_parser(default_task='translation'): parser = options.get_parser('Reranking tuning', default_task) add_reranking_args(parser) add_tuning_args(parser) return parser
null
185,356
import subprocess import os import re from fairseq import options import eval_lm import preprocess from contextlib import redirect_stdout import math The provided code snippet includes necessary dependencies for implementing the `reprocess` function. Write a Python function `def reprocess(fle)` to solve the following problem: reprocess generate.py output Here is the function: def reprocess(fle): # takes in a file of generate.py translation generate_output # returns a source dict and hypothesis dict, where keys are the ID num (as a string) # and values and the corresponding source and translation. There may be several translations # per source, so the values for hypothesis_dict are lists. # parses output of generate.py with open(fle, 'r') as f: txt = f.read() """reprocess generate.py output""" p = re.compile(r"[STHP][-]\d+\s*") hp = re.compile(r"(\s*[-]?\d+[.]?\d+\s*)|(\s*(-inf)\s*)") source_dict = {} hypothesis_dict = {} score_dict = {} target_dict = {} pos_score_dict = {} lines = txt.split("\n") for line in lines: line += "\n" prefix = re.search(p, line) if prefix is not None: assert len(prefix.group()) > 2, "prefix id not found" _, j = prefix.span() id_num = prefix.group()[2:] id_num = int(id_num) line_type = prefix.group()[0] if line_type == "H": h_txt = line[j:] hypo = re.search(hp, h_txt) assert hypo is not None, ("regular expression failed to find the hypothesis scoring") _, i = hypo.span() score = hypo.group() if id_num in hypothesis_dict: hypothesis_dict[id_num].append(h_txt[i:]) score_dict[id_num].append(float(score)) else: hypothesis_dict[id_num] = [h_txt[i:]] score_dict[id_num] = [float(score)] elif line_type == "S": source_dict[id_num] = (line[j:]) elif line_type == "T": target_dict[id_num] = (line[j:]) elif line_type == "P": pos_scores = (line[j:]).split() pos_scores = [float(x) for x in pos_scores] if id_num in pos_score_dict: pos_score_dict[id_num].append(pos_scores) else: pos_score_dict[id_num] = [pos_scores] return source_dict, hypothesis_dict, score_dict, target_dict, pos_score_dict
reprocess generate.py output
185,357
import subprocess import os import re from fairseq import options import eval_lm import preprocess from contextlib import redirect_stdout import math The provided code snippet includes necessary dependencies for implementing the `reprocess_nbest` function. Write a Python function `def reprocess_nbest(fle)` to solve the following problem: reprocess interactive.py output Here is the function: def reprocess_nbest(fle): """reprocess interactive.py output""" with open(fle, 'r') as f: txt = f.read() source_dict = {} hypothesis_dict = {} score_dict = {} target_dict = {} pos_score_dict = {} lines = txt.split("\n") hp = re.compile(r'[-]?\d+[.]?\d+') j = -1 for _i, line in enumerate(lines): line += "\n" line_type = line[0] if line_type == "H": hypo = re.search(hp, line) _, start_index = hypo.span() score = hypo.group() if j in score_dict: score_dict[j].append(float(score)) hypothesis_dict[j].append(line[start_index:].strip("\t")) else: score_dict[j] = [float(score)] hypothesis_dict[j] = [line[start_index:].strip("\t")] elif line_type == "O": j += 1 source_dict[j] = line[2:] # we don't have the targets for interactive.py target_dict[j] = "filler" elif line_type == "P": pos_scores = [float(pos_score) for pos_score in line.split()[1:]] if j in pos_score_dict: pos_score_dict[j].append(pos_scores) else: pos_score_dict[j] = [pos_scores] assert source_dict.keys() == hypothesis_dict.keys() assert source_dict.keys() == pos_score_dict.keys() assert source_dict.keys() == score_dict.keys() return source_dict, hypothesis_dict, score_dict, target_dict, pos_score_dict
reprocess interactive.py output
185,358
import subprocess import os import re from fairseq import options import eval_lm import preprocess from contextlib import redirect_stdout import math def remove_bpe(line, bpe_symbol): line = line.replace("\n", '') line = (line + ' ').replace(bpe_symbol, '').rstrip() return line+("\n") def remove_bpe_dict(pred_dict, bpe_symbol): new_dict = {} for i in pred_dict: if type(pred_dict[i]) == list: new_list = [remove_bpe(elem, bpe_symbol) for elem in pred_dict[i]] new_dict[i] = new_list else: new_dict[i] = remove_bpe(pred_dict[i], bpe_symbol) return new_dict
null
185,359
import subprocess import os import re from fairseq import options import eval_lm import preprocess from contextlib import redirect_stdout import math def parse_bleu_scoring(line): p = re.compile(r'(BLEU4 = )\d+[.]\d+') res = re.search(p, line) assert res is not None, line return float(res.group()[8:])
null
185,360
import subprocess import os import re from fairseq import options import eval_lm import preprocess from contextlib import redirect_stdout import math The provided code snippet includes necessary dependencies for implementing the `get_full_from_prefix` function. Write a Python function `def get_full_from_prefix(hypo_prefix, hypos)` to solve the following problem: given a hypo prefix, recover the first hypo from the list of complete hypos beginning with that prefix Here is the function: def get_full_from_prefix(hypo_prefix, hypos): """given a hypo prefix, recover the first hypo from the list of complete hypos beginning with that prefix""" for hypo in hypos: hypo_prefix = hypo_prefix.strip("\n") len_prefix = len(hypo_prefix) if hypo[:len_prefix] == hypo_prefix: return hypo # no match found raise Exception()
given a hypo prefix, recover the first hypo from the list of complete hypos beginning with that prefix
185,361
import subprocess import os import re from fairseq import options import eval_lm import preprocess from contextlib import redirect_stdout import math def get_score(a, b, c, target_len, bitext_score1, bitext_score2=None, lm_score=None, lenpen=None, src_len=None, tgt_len=None, bitext1_backwards=False, bitext2_backwards=False, normalize=False): if bitext1_backwards: bitext1_norm = src_len else: bitext1_norm = tgt_len if bitext_score2 is not None: if bitext2_backwards: bitext2_norm = src_len else: bitext2_norm = tgt_len else: bitext2_norm = 1 bitext_score2 = 0 if normalize: score = a*bitext_score1/bitext1_norm + b*bitext_score2/bitext2_norm+c*lm_score/src_len else: score = a*bitext_score1 + b*bitext_score2+c*lm_score if lenpen is not None: score /= (target_len) ** float(lenpen) return score
null
185,362
import subprocess import os import re from fairseq import options import eval_lm import preprocess from contextlib import redirect_stdout import math def calc_length_from_frac(bpe_sentence, prefix_frac, bpe_symbol): # return number of words, (not bpe tokens) that we want no_bpe_sen = remove_bpe(bpe_sentence, bpe_symbol) len_sen = len(no_bpe_sen.split()) num_words = math.ceil(len_sen * prefix_frac) prefix = get_prefix_no_bpe(bpe_sentence, bpe_symbol, num_words) num_bpe_tokens = len(prefix.split()) return num_words, prefix, num_bpe_tokens def get_num_bpe_tokens_from_len(sentence, bpe_symbol, prefix_len): """given a prefix length in terms of words, return the number of bpe tokens""" prefix = get_prefix_no_bpe(sentence, bpe_symbol, prefix_len) assert len(remove_bpe(prefix, bpe_symbol).split()) <= prefix_len return len(prefix.split(" ")) def get_score_from_pos(pos_score_dict, prefix_len, hypo_dict, bpe_symbol, hypo_frac, backwards): score_dict = {} num_bpe_tokens_dict = {} assert prefix_len is None or hypo_frac is None for key in pos_score_dict: score_dict[key] = [] num_bpe_tokens_dict[key] = [] for i in range(len(pos_score_dict[key])): if prefix_len is not None and not backwards: num_bpe_tokens = get_num_bpe_tokens_from_len(hypo_dict[key][i], bpe_symbol, prefix_len) score_dict[key].append(sum(pos_score_dict[key][i][:num_bpe_tokens])) num_bpe_tokens_dict[key].append(num_bpe_tokens) elif hypo_frac is not None: num_words, shortened, hypo_prefix_len = calc_length_from_frac(hypo_dict[key][i], hypo_frac, bpe_symbol) score_dict[key].append(sum(pos_score_dict[key][i][:hypo_prefix_len])) num_bpe_tokens_dict[key].append(hypo_prefix_len) else: score_dict[key].append(sum(pos_score_dict[key][i])) num_bpe_tokens_dict[key].append(len(pos_score_dict[key][i])) return score_dict, num_bpe_tokens_dict
null
185,363
import subprocess import os import re from fairseq import options import eval_lm import preprocess from contextlib import redirect_stdout import math def calc_length_from_frac(bpe_sentence, prefix_frac, bpe_symbol): # return number of words, (not bpe tokens) that we want no_bpe_sen = remove_bpe(bpe_sentence, bpe_symbol) len_sen = len(no_bpe_sen.split()) num_words = math.ceil(len_sen * prefix_frac) prefix = get_prefix_no_bpe(bpe_sentence, bpe_symbol, num_words) num_bpe_tokens = len(prefix.split()) return num_words, prefix, num_bpe_tokens def get_num_bpe_tokens_from_len(sentence, bpe_symbol, prefix_len): """given a prefix length in terms of words, return the number of bpe tokens""" prefix = get_prefix_no_bpe(sentence, bpe_symbol, prefix_len) assert len(remove_bpe(prefix, bpe_symbol).split()) <= prefix_len return len(prefix.split(" ")) def remove_bpe(line, bpe_symbol): line = line.replace("\n", '') line = (line + ' ').replace(bpe_symbol, '').rstrip() return line+("\n") The provided code snippet includes necessary dependencies for implementing the `parse_lm` function. Write a Python function `def parse_lm(input_file, prefix_len=None, bpe_symbol=None, target_prefix_frac=None)` to solve the following problem: parse output of eval_lm Here is the function: def parse_lm(input_file, prefix_len=None, bpe_symbol=None, target_prefix_frac=None): """parse output of eval_lm""" with open(input_file, 'r') as f: text = f.readlines() text = text[7:] cleaned_text = text[:-2] sentences = {} sen_scores = {} sen_pos_scores = {} no_bpe_sentences = {} num_bpe_tokens_dict = {} for _i, line in enumerate(cleaned_text): tokens = line.split() if tokens[0].isdigit(): line_id = int(tokens[0]) scores = [float(x[1:-1]) for x in tokens[2::2]] sentences[line_id] = " ".join(tokens[1::2][:-1])+"\n" if bpe_symbol is not None: # exclude <eos> symbol to match output from generate.py bpe_sen = " ".join(tokens[1::2][:-1])+"\n" no_bpe_sen = remove_bpe(bpe_sen, bpe_symbol) no_bpe_sentences[line_id] = no_bpe_sen if prefix_len is not None: num_bpe_tokens = get_num_bpe_tokens_from_len(bpe_sen, bpe_symbol, prefix_len) sen_scores[line_id] = sum(scores[:num_bpe_tokens]) num_bpe_tokens_dict[line_id] = num_bpe_tokens elif target_prefix_frac is not None: num_words, shortened, target_prefix_len = calc_length_from_frac(bpe_sen, target_prefix_frac, bpe_symbol) sen_scores[line_id] = sum(scores[:target_prefix_len]) num_bpe_tokens_dict[line_id] = target_prefix_len else: sen_scores[line_id] = sum(scores) num_bpe_tokens_dict[line_id] = len(scores) sen_pos_scores[line_id] = scores return sentences, sen_scores, sen_pos_scores, no_bpe_sentences, num_bpe_tokens_dict
parse output of eval_lm
185,364
import subprocess import os import re from fairseq import options import eval_lm import preprocess from contextlib import redirect_stdout import math def get_directories(data_dir_name, num_rescore, gen_subset, fw_name, shard_id, num_shards, sampling=False, prefix_len=None, target_prefix_frac=None, source_prefix_frac=None): nbest_file_id = "nbest_" + str(num_rescore) + \ "_subset_" + gen_subset + \ "_fw_name_" + fw_name + \ "_shard_" + str(shard_id) + \ "_of_" + str(num_shards) if sampling: nbest_file_id += "_sampling" # the directory containing all information for this nbest list pre_gen = os.path.join(os.path.dirname(__file__))+"/rerank_data/"+data_dir_name+"/"+nbest_file_id # the directory to store the preprocessed nbest list, for left to right rescoring left_to_right_preprocessed_dir = pre_gen+"/left_to_right_preprocessed" if source_prefix_frac is not None: left_to_right_preprocessed_dir = left_to_right_preprocessed_dir + "/prefix_frac" + str(source_prefix_frac) # the directory to store the preprocessed nbest list, for right to left rescoring right_to_left_preprocessed_dir = pre_gen+"/right_to_left_preprocessed" # the directory to store the preprocessed nbest list, for backwards rescoring backwards_preprocessed_dir = pre_gen+"/backwards" if target_prefix_frac is not None: backwards_preprocessed_dir = backwards_preprocessed_dir+"/prefix_frac"+str(target_prefix_frac) elif prefix_len is not None: backwards_preprocessed_dir = backwards_preprocessed_dir+"/prefix_"+str(prefix_len) # the directory to store the preprocessed nbest list, for rescoring with P(T) lm_preprocessed_dir = pre_gen+"/lm_preprocessed" return pre_gen, left_to_right_preprocessed_dir, right_to_left_preprocessed_dir, \ backwards_preprocessed_dir, lm_preprocessed_dir
null
185,365
import subprocess import os import re from fairseq import options import eval_lm import preprocess from contextlib import redirect_stdout import math def write_reprocessed(sources, hypos, targets, source_outfile, hypo_outfile, target_outfile, right_to_left=False, prefix_len=None, bpe_symbol=None, target_prefix_frac=None, source_prefix_frac=None): """writes nbest hypothesis for rescoring""" assert not (prefix_len is not None and target_prefix_frac is not None), \ "in writing reprocessed, only one type of prefix may be used" assert not (prefix_len is not None and source_prefix_frac is not None), \ "in writing reprocessed, only one type of prefix may be used" assert not (target_prefix_frac is not None and source_prefix_frac is not None), \ "in writing reprocessed, only one type of prefix may be used" with open(source_outfile, 'w') as source_file, \ open(hypo_outfile, 'w') as hypo_file, \ open(target_outfile, 'w') as target_file: assert len(sources) == len(hypos), "sources and hypos list length mismatch" if right_to_left: for i in range(len(sources)): for j in range(len(hypos[i])): if prefix_len is None: hypo_file.write(make_right_to_left(hypos[i][j])+"\n") else: raise NotImplementedError() source_file.write(make_right_to_left(sources[i])+"\n") target_file.write(make_right_to_left(targets[i])+"\n") else: for i in sorted(sources.keys()): for j in range(len(hypos[i])): if prefix_len is not None: shortened = get_prefix_no_bpe(hypos[i][j], bpe_symbol, prefix_len)+"\n" hypo_file.write(shortened) source_file.write(sources[i]) target_file.write(targets[i]) elif target_prefix_frac is not None: num_words, shortened, num_bpe_tokens = \ calc_length_from_frac(hypos[i][j], target_prefix_frac, bpe_symbol) shortened += "\n" hypo_file.write(shortened) source_file.write(sources[i]) target_file.write(targets[i]) elif source_prefix_frac is not None: num_words, shortened, num_bpe_tokensn = \ calc_length_from_frac(sources[i], source_prefix_frac, bpe_symbol) shortened += "\n" hypo_file.write(hypos[i][j]) source_file.write(shortened) target_file.write(targets[i]) else: hypo_file.write(hypos[i][j]) source_file.write(sources[i]) target_file.write(targets[i]) def lm_scoring(preprocess_directory, bpe_status, gen_output, pre_gen, cur_lm_dict, cur_lm_name, cur_language_model, cur_lm_bpe_code, batch_size, lm_score_file, target_lang, source_lang, prefix_len=None): if prefix_len is not None: assert bpe_status == "different", "bpe status must be different to use prefix len" if bpe_status == "no bpe": # run lm on output without bpe write_reprocessed(gen_output.no_bpe_source, gen_output.no_bpe_hypo, gen_output.no_bpe_target, pre_gen+"/rescore_data_no_bpe.de", pre_gen+"/rescore_data_no_bpe.en", pre_gen+"/reference_file_no_bpe") preprocess_lm_param = ["--only-source", "--trainpref", pre_gen+"/rescore_data_no_bpe."+target_lang, "--srcdict", cur_lm_dict, "--destdir", preprocess_directory] preprocess_parser = options.get_preprocessing_parser() input_args = preprocess_parser.parse_args(preprocess_lm_param) preprocess.main(input_args) eval_lm_param = [preprocess_directory, "--path", cur_language_model, "--output-word-probs", "--batch-size", str(batch_size), "--max-tokens", "1024", "--sample-break-mode", "eos", "--gen-subset", "train"] eval_lm_parser = options.get_eval_lm_parser() input_args = options.parse_args_and_arch(eval_lm_parser, eval_lm_param) with open(lm_score_file, 'w') as f: with redirect_stdout(f): eval_lm.main(input_args) elif bpe_status == "shared": preprocess_lm_param = ["--only-source", "--trainpref", pre_gen+"/rescore_data."+target_lang, "--srcdict", cur_lm_dict, "--destdir", preprocess_directory] preprocess_parser = options.get_preprocessing_parser() input_args = preprocess_parser.parse_args(preprocess_lm_param) preprocess.main(input_args) eval_lm_param = [preprocess_directory, "--path", cur_language_model, "--output-word-probs", "--batch-size", str(batch_size), "--sample-break-mode", "eos", "--gen-subset", "train"] eval_lm_parser = options.get_eval_lm_parser() input_args = options.parse_args_and_arch(eval_lm_parser, eval_lm_param) with open(lm_score_file, 'w') as f: with redirect_stdout(f): eval_lm.main(input_args) elif bpe_status == "different": rescore_file = pre_gen+"/rescore_data_no_bpe" rescore_bpe = pre_gen+"/rescore_data_new_bpe" rescore_file += "." rescore_bpe += "." write_reprocessed(gen_output.no_bpe_source, gen_output.no_bpe_hypo, gen_output.no_bpe_target, rescore_file+source_lang, rescore_file+target_lang, pre_gen+"/reference_file_no_bpe", bpe_symbol=None) # apply LM bpe to nbest list bpe_src_param = ["-c", cur_lm_bpe_code, "--input", rescore_file+target_lang, "--output", rescore_bpe+target_lang] subprocess.call(["python", os.path.join(os.path.dirname(__file__), "subword-nmt/subword_nmt/apply_bpe.py")] + bpe_src_param, shell=False) # uncomment to use fastbpe instead of subword-nmt bpe # bpe_src_param = [rescore_bpe+target_lang, rescore_file+target_lang, cur_lm_bpe_code] # subprocess.call(["/private/home/edunov/fastBPE/fast", "applybpe"] + bpe_src_param, shell=False) preprocess_dir = preprocess_directory preprocess_lm_param = ["--only-source", "--trainpref", rescore_bpe+target_lang, "--srcdict", cur_lm_dict, "--destdir", preprocess_dir] preprocess_parser = options.get_preprocessing_parser() input_args = preprocess_parser.parse_args(preprocess_lm_param) preprocess.main(input_args) eval_lm_param = [preprocess_dir, "--path", cur_language_model, "--output-word-probs", "--batch-size", str(batch_size), "--max-tokens", "1024", "--sample-break-mode", "eos", "--gen-subset", "train"] eval_lm_parser = options.get_eval_lm_parser() input_args = options.parse_args_and_arch(eval_lm_parser, eval_lm_param) with open(lm_score_file, 'w') as f: with redirect_stdout(f): eval_lm.main(input_args)
null
185,366
import subprocess import os import re from fairseq import options import eval_lm import preprocess from contextlib import redirect_stdout import math def rescore_file_name(nbest_dir, prefix_len, scorer_name, lm_file=False, target_prefix_frac=None, source_prefix_frac=None, backwards=None): if lm_file: score_file = nbest_dir+"/lm_score_translations_model_"+scorer_name+".txt" else: score_file = nbest_dir+"/"+scorer_name+"_score_translations.txt" if backwards: if prefix_len is not None: score_file += "prefix_len"+str(prefix_len) elif target_prefix_frac is not None: score_file += "target_prefix_frac"+str(target_prefix_frac) else: if source_prefix_frac is not None: score_file += "source_prefix_frac"+str(source_prefix_frac) return score_file
null
185,367
import rerank_utils import os from fairseq import options from examples.noisychannel import rerank_options from contextlib import redirect_stdout import generate def score_bw(args): if args.backwards1: scorer1_src = args.target_lang scorer1_tgt = args.source_lang else: scorer1_src = args.source_lang scorer1_tgt = args.target_lang if args.score_model2 is not None: if args.backwards2: scorer2_src = args.target_lang scorer2_tgt = args.source_lang else: scorer2_src = args.source_lang scorer2_tgt = args.target_lang rerank1_is_gen = args.gen_model == args.score_model1 and args.source_prefix_frac is None rerank2_is_gen = args.gen_model == args.score_model2 and args.source_prefix_frac is None pre_gen, left_to_right_preprocessed_dir, right_to_left_preprocessed_dir, \ backwards_preprocessed_dir, lm_preprocessed_dir = \ rerank_utils.get_directories(args.data_dir_name, args.num_rescore, args.gen_subset, args.gen_model_name, args.shard_id, args.num_shards, args.sampling, args.prefix_len, args.target_prefix_frac, args.source_prefix_frac) score1_file = rerank_utils.rescore_file_name(pre_gen, args.prefix_len, args.model1_name, target_prefix_frac=args.target_prefix_frac, source_prefix_frac=args.source_prefix_frac, backwards=args.backwards1) if args.score_model2 is not None: score2_file = rerank_utils.rescore_file_name(pre_gen, args.prefix_len, args.model2_name, target_prefix_frac=args.target_prefix_frac, source_prefix_frac=args.source_prefix_frac, backwards=args.backwards2) if args.right_to_left1: rerank_data1 = right_to_left_preprocessed_dir elif args.backwards1: rerank_data1 = backwards_preprocessed_dir else: rerank_data1 = left_to_right_preprocessed_dir gen_param = ["--batch-size", str(128), "--score-reference", "--gen-subset", "train"] if not rerank1_is_gen and not os.path.isfile(score1_file): print("STEP 4: score the translations for model 1") model_param1 = ["--path", args.score_model1, "--source-lang", scorer1_src, "--target-lang", scorer1_tgt] gen_model1_param = [rerank_data1] + gen_param + model_param1 gen_parser = options.get_generation_parser() input_args = options.parse_args_and_arch(gen_parser, gen_model1_param) with open(score1_file, 'w') as f: with redirect_stdout(f): generate.main(input_args) if args.score_model2 is not None and not os.path.isfile(score2_file) and not rerank2_is_gen: print("STEP 4: score the translations for model 2") if args.right_to_left2: rerank_data2 = right_to_left_preprocessed_dir elif args.backwards2: rerank_data2 = backwards_preprocessed_dir else: rerank_data2 = left_to_right_preprocessed_dir model_param2 = ["--path", args.score_model2, "--source-lang", scorer2_src, "--target-lang", scorer2_tgt] gen_model2_param = [rerank_data2] + gen_param + model_param2 gen_parser = options.get_generation_parser() input_args = options.parse_args_and_arch(gen_parser, gen_model2_param) with open(score2_file, 'w') as f: with redirect_stdout(f): generate.main(input_args) def cli_main(): parser = rerank_options.get_reranking_parser() args = options.parse_args_and_arch(parser) score_bw(args)
null
185,368
import rerank_utils import rerank_generate import rerank_score_bw import rerank_score_lm from fairseq import bleu, options from fairseq.data import dictionary from examples.noisychannel import rerank_options from multiprocessing import Pool import math import numpy as np def rerank(args): if type(args.lenpen) is not list: args.lenpen = [args.lenpen] if type(args.weight1) is not list: args.weight1 = [args.weight1] if type(args.weight2) is not list: args.weight2 = [args.weight2] if type(args.weight3) is not list: args.weight3 = [args.weight3] if args.all_shards: shard_ids = list(range(args.num_shards)) else: shard_ids = [args.shard_id] for shard_id in shard_ids: pre_gen, left_to_right_preprocessed_dir, right_to_left_preprocessed_dir, \ backwards_preprocessed_dir, lm_preprocessed_dir = \ rerank_utils.get_directories(args.data_dir_name, args.num_rescore, args.gen_subset, args.gen_model_name, shard_id, args.num_shards, args.sampling, args.prefix_len, args.target_prefix_frac, args.source_prefix_frac) rerank_generate.gen_and_reprocess_nbest(args) rerank_score_bw.score_bw(args) rerank_score_lm.score_lm(args) if args.write_hypos is None: write_targets = pre_gen+"/matched_targets" write_hypos = pre_gen+"/matched_hypos" else: write_targets = args.write_hypos+"_targets" + args.gen_subset write_hypos = args.write_hypos+"_hypos" + args.gen_subset if args.all_shards: write_targets += "_all_shards" write_hypos += "_all_shards" best_lenpen, best_weight1, best_weight2, best_weight3, best_score = \ match_target_hypo(args, write_targets, write_hypos) return best_lenpen, best_weight1, best_weight2, best_weight3, best_score def cli_main(): parser = rerank_options.get_reranking_parser() args = options.parse_args_and_arch(parser) rerank(args)
null
185,369
import rerank import argparse import numpy as np import random from examples.noisychannel import rerank_options from fairseq import options def random_search(args): param_values = [] tuneable_parameters = ['lenpen', 'weight1', 'weight2', 'weight3'] initial_params = [args.lenpen, args.weight1, args.weight2, args.weight3] for i, elem in enumerate(initial_params): if type(elem) is not list: initial_params[i] = [elem] else: initial_params[i] = elem tune_parameters = args.tune_param.copy() for i in range(len(args.tune_param)): assert args.upper_bound[i] >= args.lower_bound[i] index = tuneable_parameters.index(args.tune_param[i]) del tuneable_parameters[index] del initial_params[index] tune_parameters += tuneable_parameters param_values += initial_params random.seed(args.seed) random_params = np.array([ [random.uniform(args.lower_bound[i], args.upper_bound[i]) for i in range(len(args.tune_param))] for k in range(args.num_trials) ]) set_params = np.array([ [initial_params[i][0] for i in range(len(tuneable_parameters))] for k in range(args.num_trials) ]) random_params = np.concatenate((random_params, set_params), 1) rerank_args = vars(args).copy() if args.nbest_list: rerank_args['gen_subset'] = 'test' else: rerank_args['gen_subset'] = args.tune_subset for k in range(len(tune_parameters)): rerank_args[tune_parameters[k]] = list(random_params[:, k]) if args.share_weights: k = tune_parameters.index('weight2') rerank_args['weight3'] = list(random_params[:, k]) rerank_args = argparse.Namespace(**rerank_args) best_lenpen, best_weight1, best_weight2, best_weight3, best_score = rerank.rerank(rerank_args) rerank_args = vars(args).copy() rerank_args['lenpen'] = [best_lenpen] rerank_args['weight1'] = [best_weight1] rerank_args['weight2'] = [best_weight2] rerank_args['weight3'] = [best_weight3] # write the hypothesis from the valid set from the best trial if args.gen_subset != "valid": rerank_args['gen_subset'] = "valid" rerank_args = argparse.Namespace(**rerank_args) rerank.rerank(rerank_args) # test with the best hyperparameters on gen subset rerank_args = vars(args).copy() rerank_args['gen_subset'] = args.gen_subset rerank_args['lenpen'] = [best_lenpen] rerank_args['weight1'] = [best_weight1] rerank_args['weight2'] = [best_weight2] rerank_args['weight3'] = [best_weight3] rerank_args = argparse.Namespace(**rerank_args) rerank.rerank(rerank_args) def cli_main(): parser = rerank_options.get_tuning_parser() args = options.parse_args_and_arch(parser) random_search(args)
null
185,370
import rerank_utils import os from fairseq import options from examples.noisychannel import rerank_options def score_lm(args): using_nbest = args.nbest_list is not None pre_gen, left_to_right_preprocessed_dir, right_to_left_preprocessed_dir, \ backwards_preprocessed_dir, lm_preprocessed_dir = \ rerank_utils.get_directories(args.data_dir_name, args.num_rescore, args.gen_subset, args.gen_model_name, args.shard_id, args.num_shards, args.sampling, args.prefix_len, args.target_prefix_frac, args.source_prefix_frac) predictions_bpe_file = pre_gen+"/generate_output_bpe.txt" if using_nbest: print("Using predefined n-best list from interactive.py") predictions_bpe_file = args.nbest_list gen_output = rerank_utils.BitextOutputFromGen(predictions_bpe_file, bpe_symbol=args.remove_bpe, nbest=using_nbest) if args.language_model is not None: lm_score_file = rerank_utils.rescore_file_name(pre_gen, args.prefix_len, args.lm_name, lm_file=True) if args.language_model is not None and not os.path.isfile(lm_score_file): print("STEP 4.5: language modeling for P(T)") if args.lm_bpe_code is None: bpe_status = "no bpe" elif args.lm_bpe_code == "shared": bpe_status = "shared" else: bpe_status = "different" rerank_utils.lm_scoring(lm_preprocessed_dir, bpe_status, gen_output, pre_gen, args.lm_dict, args.lm_name, args.language_model, args.lm_bpe_code, 128, lm_score_file, args.target_lang, args.source_lang, prefix_len=args.prefix_len) def cli_main(): parser = rerank_options.get_reranking_parser() args = options.parse_args_and_arch(parser) score_lm(args)
null
185,371
from contextlib import redirect_stdout import os import subprocess import rerank_utils from examples.noisychannel import rerank_options from fairseq import options import generate import preprocess def gen_and_reprocess_nbest(args): if args.score_dict_dir is None: args.score_dict_dir = args.data if args.prefix_len is not None: assert args.right_to_left1 is False, "prefix length not compatible with right to left models" assert args.right_to_left2 is False, "prefix length not compatible with right to left models" if args.nbest_list is not None: assert args.score_model2 is None if args.backwards1: scorer1_src = args.target_lang scorer1_tgt = args.source_lang else: scorer1_src = args.source_lang scorer1_tgt = args.target_lang store_data = os.path.join(os.path.dirname(__file__))+"/rerank_data/"+args.data_dir_name if not os.path.exists(store_data): os.makedirs(store_data) pre_gen, left_to_right_preprocessed_dir, right_to_left_preprocessed_dir, \ backwards_preprocessed_dir, lm_preprocessed_dir = \ rerank_utils.get_directories(args.data_dir_name, args.num_rescore, args.gen_subset, args.gen_model_name, args.shard_id, args.num_shards, args.sampling, args.prefix_len, args.target_prefix_frac, args.source_prefix_frac) assert not (args.right_to_left1 and args.backwards1), "backwards right to left not supported" assert not (args.right_to_left2 and args.backwards2), "backwards right to left not supported" assert not (args.prefix_len is not None and args.target_prefix_frac is not None), \ "target prefix frac and target prefix len incompatible" # make directory to store generation results if not os.path.exists(pre_gen): os.makedirs(pre_gen) rerank1_is_gen = args.gen_model == args.score_model1 and args.source_prefix_frac is None rerank2_is_gen = args.gen_model == args.score_model2 and args.source_prefix_frac is None if args.nbest_list is not None: rerank2_is_gen = True # make directories to store preprossed nbest list for reranking if not os.path.exists(left_to_right_preprocessed_dir): os.makedirs(left_to_right_preprocessed_dir) if not os.path.exists(right_to_left_preprocessed_dir): os.makedirs(right_to_left_preprocessed_dir) if not os.path.exists(lm_preprocessed_dir): os.makedirs(lm_preprocessed_dir) if not os.path.exists(backwards_preprocessed_dir): os.makedirs(backwards_preprocessed_dir) score1_file = rerank_utils.rescore_file_name(pre_gen, args.prefix_len, args.model1_name, target_prefix_frac=args.target_prefix_frac, source_prefix_frac=args.source_prefix_frac, backwards=args.backwards1) if args.score_model2 is not None: score2_file = rerank_utils.rescore_file_name(pre_gen, args.prefix_len, args.model2_name, target_prefix_frac=args.target_prefix_frac, source_prefix_frac=args.source_prefix_frac, backwards=args.backwards2) predictions_bpe_file = pre_gen+"/generate_output_bpe.txt" using_nbest = args.nbest_list is not None if using_nbest: print("Using predefined n-best list from interactive.py") predictions_bpe_file = args.nbest_list else: if not os.path.isfile(predictions_bpe_file): print("STEP 1: generate predictions using the p(T|S) model with bpe") print(args.data) param1 = [args.data, "--path", args.gen_model, "--shard-id", str(args.shard_id), "--num-shards", str(args.num_shards), "--nbest", str(args.num_rescore), "--batch-size", str(args.batch_size), "--beam", str(args.num_rescore), "--max-sentences", str(args.num_rescore), "--gen-subset", args.gen_subset, "--source-lang", args.source_lang, "--target-lang", args.target_lang] if args.sampling: param1 += ["--sampling"] gen_parser = options.get_generation_parser() input_args = options.parse_args_and_arch(gen_parser, param1) print(input_args) with open(predictions_bpe_file, 'w') as f: with redirect_stdout(f): generate.main(input_args) gen_output = rerank_utils.BitextOutputFromGen(predictions_bpe_file, bpe_symbol=args.remove_bpe, nbest=using_nbest, prefix_len=args.prefix_len, target_prefix_frac=args.target_prefix_frac) if args.diff_bpe: rerank_utils.write_reprocessed(gen_output.no_bpe_source, gen_output.no_bpe_hypo, gen_output.no_bpe_target, pre_gen+"/source_gen_bpe."+args.source_lang, pre_gen+"/target_gen_bpe."+args.target_lang, pre_gen+"/reference_gen_bpe."+args.target_lang) bitext_bpe = args.rescore_bpe_code bpe_src_param = ["-c", bitext_bpe, "--input", pre_gen+"/source_gen_bpe."+args.source_lang, "--output", pre_gen+"/rescore_data."+args.source_lang] bpe_tgt_param = ["-c", bitext_bpe, "--input", pre_gen+"/target_gen_bpe."+args.target_lang, "--output", pre_gen+"/rescore_data."+args.target_lang] subprocess.call(["python", os.path.join(os.path.dirname(__file__), "subword-nmt/subword_nmt/apply_bpe.py")] + bpe_src_param, shell=False) subprocess.call(["python", os.path.join(os.path.dirname(__file__), "subword-nmt/subword_nmt/apply_bpe.py")] + bpe_tgt_param, shell=False) if (not os.path.isfile(score1_file) and not rerank1_is_gen) or \ (args.score_model2 is not None and not os.path.isfile(score2_file) and not rerank2_is_gen): print("STEP 2: process the output of generate.py so we have clean text files with the translations") rescore_file = "/rescore_data" if args.prefix_len is not None: prefix_len_rescore_file = rescore_file + "prefix"+str(args.prefix_len) if args.target_prefix_frac is not None: target_prefix_frac_rescore_file = rescore_file + "target_prefix_frac"+str(args.target_prefix_frac) if args.source_prefix_frac is not None: source_prefix_frac_rescore_file = rescore_file + "source_prefix_frac"+str(args.source_prefix_frac) if not args.right_to_left1 or not args.right_to_left2: if not args.diff_bpe: rerank_utils.write_reprocessed(gen_output.source, gen_output.hypo, gen_output.target, pre_gen+rescore_file+"."+args.source_lang, pre_gen+rescore_file+"."+args.target_lang, pre_gen+"/reference_file", bpe_symbol=args.remove_bpe) if args.prefix_len is not None: bw_rescore_file = prefix_len_rescore_file rerank_utils.write_reprocessed(gen_output.source, gen_output.hypo, gen_output.target, pre_gen+prefix_len_rescore_file+"."+args.source_lang, pre_gen+prefix_len_rescore_file+"."+args.target_lang, pre_gen+"/reference_file", prefix_len=args.prefix_len, bpe_symbol=args.remove_bpe) elif args.target_prefix_frac is not None: bw_rescore_file = target_prefix_frac_rescore_file rerank_utils.write_reprocessed(gen_output.source, gen_output.hypo, gen_output.target, pre_gen+target_prefix_frac_rescore_file+"."+args.source_lang, pre_gen+target_prefix_frac_rescore_file+"."+args.target_lang, pre_gen+"/reference_file", bpe_symbol=args.remove_bpe, target_prefix_frac=args.target_prefix_frac) else: bw_rescore_file = rescore_file if args.source_prefix_frac is not None: fw_rescore_file = source_prefix_frac_rescore_file rerank_utils.write_reprocessed(gen_output.source, gen_output.hypo, gen_output.target, pre_gen+source_prefix_frac_rescore_file+"."+args.source_lang, pre_gen+source_prefix_frac_rescore_file+"."+args.target_lang, pre_gen+"/reference_file", bpe_symbol=args.remove_bpe, source_prefix_frac=args.source_prefix_frac) else: fw_rescore_file = rescore_file if args.right_to_left1 or args.right_to_left2: rerank_utils.write_reprocessed(gen_output.source, gen_output.hypo, gen_output.target, pre_gen+"/right_to_left_rescore_data."+args.source_lang, pre_gen+"/right_to_left_rescore_data."+args.target_lang, pre_gen+"/right_to_left_reference_file", right_to_left=True, bpe_symbol=args.remove_bpe) print("STEP 3: binarize the translations") if not args.right_to_left1 or args.score_model2 is not None and not args.right_to_left2 or not rerank1_is_gen: if args.backwards1 or args.backwards2: if args.backwards_score_dict_dir is not None: bw_dict = args.backwards_score_dict_dir else: bw_dict = args.score_dict_dir bw_preprocess_param = ["--source-lang", scorer1_src, "--target-lang", scorer1_tgt, "--trainpref", pre_gen+bw_rescore_file, "--srcdict", bw_dict + "/dict." + scorer1_src + ".txt", "--tgtdict", bw_dict + "/dict." + scorer1_tgt + ".txt", "--destdir", backwards_preprocessed_dir] preprocess_parser = options.get_preprocessing_parser() input_args = preprocess_parser.parse_args(bw_preprocess_param) preprocess.main(input_args) preprocess_param = ["--source-lang", scorer1_src, "--target-lang", scorer1_tgt, "--trainpref", pre_gen+fw_rescore_file, "--srcdict", args.score_dict_dir+"/dict."+scorer1_src+".txt", "--tgtdict", args.score_dict_dir+"/dict."+scorer1_tgt+".txt", "--destdir", left_to_right_preprocessed_dir] preprocess_parser = options.get_preprocessing_parser() input_args = preprocess_parser.parse_args(preprocess_param) preprocess.main(input_args) if args.right_to_left1 or args.right_to_left2: preprocess_param = ["--source-lang", scorer1_src, "--target-lang", scorer1_tgt, "--trainpref", pre_gen+"/right_to_left_rescore_data", "--srcdict", args.score_dict_dir+"/dict."+scorer1_src+".txt", "--tgtdict", args.score_dict_dir+"/dict."+scorer1_tgt+".txt", "--destdir", right_to_left_preprocessed_dir] preprocess_parser = options.get_preprocessing_parser() input_args = preprocess_parser.parse_args(preprocess_param) preprocess.main(input_args) return gen_output def cli_main(): parser = rerank_options.get_reranking_parser() args = options.parse_args_and_arch(parser) gen_and_reprocess_nbest(args)
null
185,372
import torch from fairseq import checkpoint_utils, options, progress_bar, utils def main(args, override_args=None): def cli_main(): parser = options.get_validation_parser() args = options.parse_args_and_arch(parser) # only override args that are explicitly given on the command line override_parser = options.get_validation_parser() override_args = options.parse_args_and_arch(override_parser, suppress_defaults=True) main(args, override_args)
null
185,373
from collections import Counter from itertools import zip_longest from fairseq import options, tasks, utils from fairseq.data import indexed_dataset from fairseq.binarizer import Binarizer from multiprocessing import Pool import os import shutil def dataset_dest_file(args, output_prefix, lang, extension): base = dataset_dest_prefix(args, output_prefix, lang) return "{}.{}".format(base, extension) class Binarizer: def binarize( filename, dict, consumer, tokenize=tokenize_line, append_eos=True, reverse_order=False, offset=0, end=-1, already_numberized=False, ) -> Dict[str, int]: nseq, ntok = 0, 0 replaced = Counter() def replaced_consumer(word, idx): if idx == dict.unk_index and word != dict.unk_word: replaced.update([word]) with Chunker( PathManager.get_local_path(filename), offset, end ) as line_iterator: for line in line_iterator: if already_numberized: id_strings = line.strip().split() id_list = [int(id_string) for id_string in id_strings] if reverse_order: id_list.reverse() if append_eos: id_list.append(dict.eos()) ids = torch.IntTensor(id_list) else: ids = dict.encode_line( line=line, line_tokenizer=tokenize, add_if_not_exist=False, consumer=replaced_consumer, append_eos=append_eos, reverse_order=reverse_order, ) nseq += 1 ntok += len(ids) consumer(ids) return { "nseq": nseq, "nunk": sum(replaced.values()), "ntok": ntok, "replaced": replaced, } def binarize_alignments( filename, alignment_parser, consumer, offset=0, end=-1 ) -> Dict[str, int]: nseq = 0 with Chunker( PathManager.get_local_path(filename), offset, end ) as line_iterator: for line in line_iterator: ids = alignment_parser(line) nseq += 1 consumer(ids) return {"nseq": nseq} def binarize(args, filename, vocab, output_prefix, lang, offset, end, append_eos=True): ds = indexed_dataset.make_builder(dataset_dest_file(args, output_prefix, lang, "bin"), impl=args.dataset_impl, vocab_size=len(vocab)) def consumer(tensor): ds.add_item(tensor) res = Binarizer.binarize(filename, vocab, consumer, append_eos=append_eos, offset=offset, end=end) ds.finalize(dataset_dest_file(args, output_prefix, lang, "idx")) return res
null