id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
18,835 | import cv2
import numpy as np
def enhance_level_to_args(MAX_LEVEL):
def level_to_args(level):
return ((level / MAX_LEVEL) * 1.8 + 0.1,)
return level_to_args | null |
18,836 | import cv2
import numpy as np
def shear_level_to_args(MAX_LEVEL, replace_value):
def level_to_args(level):
level = (level / MAX_LEVEL) * 0.3
if np.random.random() > 0.5: level = -level
return (level, replace_value)
return level_to_args | null |
18,837 | import cv2
import numpy as np
def translate_level_to_args(translate_const, MAX_LEVEL, replace_value):
def level_to_args(level):
level = (level / MAX_LEVEL) * float(translate_const)
if np.random.random() > 0.5: level = -level
return (level, replace_value)
return level_to_args | null |
18,838 | import cv2
import numpy as np
def cutout_level_to_args(cutout_const, MAX_LEVEL, replace_value):
def level_to_args(level):
level = int((level / MAX_LEVEL) * cutout_const)
return (level, replace_value)
return level_to_args | null |
18,839 | import cv2
import numpy as np
def solarize_level_to_args(MAX_LEVEL):
def level_to_args(level):
level = int((level / MAX_LEVEL) * 256)
return (level, )
return level_to_args | null |
18,840 | import cv2
import numpy as np
def none_level_to_args(level):
return () | null |
18,841 | import cv2
import numpy as np
def posterize_level_to_args(MAX_LEVEL):
def level_to_args(level):
level = int((level / MAX_LEVEL) * 4)
return (level, )
return level_to_args | null |
18,842 | import cv2
import numpy as np
def rotate_level_to_args(MAX_LEVEL, replace_value):
def level_to_args(level):
level = (level / MAX_LEVEL) * 30
if np.random.random() < 0.5:
level = -level
return (level, replace_value)
return level_to_args | null |
18,843 | from typing import Optional
from torch import Tensor
from PIL import Image
from dataset import vg_transforms as T
import os
import re
import sys
import json
import torch
import numpy as np
import os.path as osp
import scipy.io as sio
import torch.utils.data as data
from models.tokenization_bert import BertTokenizer
from vgTools.utils.box_utils import xywh2xyxy
class InputExample(object):
def __init__(self, unique_id, text_a, text_b):
self.unique_id = unique_id
self.text_a = text_a
self.text_b = text_b
The provided code snippet includes necessary dependencies for implementing the `read_examples` function. Write a Python function `def read_examples(input_line, unique_id)` to solve the following problem:
Read a list of `InputExample`s from an input file.
Here is the function:
def read_examples(input_line, unique_id):
"""Read a list of `InputExample`s from an input file."""
examples = []
# unique_id = 0
line = input_line # reader.readline()
# if not line:
# break
line = line.strip()
text_a = None
text_b = None
m = re.match(r"^(.*) \|\|\| (.*)$", line)
if m is None:
text_a = line
else:
text_a = m.group(1)
text_b = m.group(2)
examples.append(
InputExample(unique_id=unique_id, text_a=text_a, text_b=text_b))
# unique_id += 1
return examples | Read a list of `InputExample`s from an input file. |
18,844 | from typing import Optional
from torch import Tensor
from PIL import Image
from dataset import vg_transforms as T
import os
import re
import sys
import json
import torch
import numpy as np
import os.path as osp
import scipy.io as sio
import torch.utils.data as data
from models.tokenization_bert import BertTokenizer
from vgTools.utils.box_utils import xywh2xyxy
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, unique_id, tokens, input_ids, input_mask, input_type_ids):
self.unique_id = unique_id
self.tokens = tokens
self.input_ids = input_ids
self.input_mask = input_mask
self.input_type_ids = input_type_ids
The provided code snippet includes necessary dependencies for implementing the `convert_examples_to_features` function. Write a Python function `def convert_examples_to_features(examples, seq_length, tokenizer)` to solve the following problem:
Loads a data file into a list of `InputBatch`s.
Here is the function:
def convert_examples_to_features(examples, seq_length, tokenizer):
"""Loads a data file into a list of `InputBatch`s."""
features = []
for (ex_index, example) in enumerate(examples):
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
if tokens_b:
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
_truncate_seq_pair(tokens_a, tokens_b, seq_length - 3)
else:
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > seq_length - 2:
tokens_a = tokens_a[0:(seq_length - 2)]
tokens = []
input_type_ids = []
tokens.append("[CLS]")
input_type_ids.append(0)
for token in tokens_a:
tokens.append(token)
input_type_ids.append(0)
tokens.append("[SEP]")
input_type_ids.append(0)
if tokens_b:
for token in tokens_b:
tokens.append(token)
input_type_ids.append(1)
tokens.append("[SEP]")
input_type_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < seq_length:
input_ids.append(0)
input_mask.append(0)
input_type_ids.append(0)
assert len(input_ids) == seq_length
assert len(input_mask) == seq_length
assert len(input_type_ids) == seq_length
features.append(
InputFeatures(
unique_id=example.unique_id,
tokens=tokens,
input_ids=input_ids,
input_mask=input_mask,
input_type_ids=input_type_ids))
return features | Loads a data file into a list of `InputBatch`s. |
18,845 | from typing import Optional
from torch import Tensor
from PIL import Image
from dataset import vg_transforms as T
import os
import re
import sys
import json
import torch
import numpy as np
import os.path as osp
import scipy.io as sio
import torch.utils.data as data
from models.tokenization_bert import BertTokenizer
from vgTools.utils.box_utils import xywh2xyxy
class NestedTensor(object):
def __init__(self, tensors, mask: Optional[Tensor]):
def to(self, device):
def decompose(self):
def __repr__(self):
def collate_fn(raw_batch):
raw_batch = list(zip(*raw_batch))
img = torch.stack(raw_batch[0])
img_mask = torch.tensor(np.array(raw_batch[1]))
img_data = NestedTensor(img, img_mask)
word_id = torch.tensor(np.array(raw_batch[2]))
word_mask = torch.tensor(np.array(raw_batch[3]))
text_data = NestedTensor(word_id, word_mask)
bbox = torch.tensor(np.array(raw_batch[4]))
batch = [img_data, text_data, bbox]
return tuple(batch) | null |
18,846 | from typing import Optional
from torch import Tensor
from PIL import Image
from dataset import vg_transforms as T
import os
import re
import sys
import json
import torch
import numpy as np
import os.path as osp
import scipy.io as sio
import torch.utils.data as data
from models.tokenization_bert import BertTokenizer
from vgTools.utils.box_utils import xywh2xyxy
class NestedTensor(object):
def __init__(self, tensors, mask: Optional[Tensor]):
self.tensors = tensors
self.mask = mask
def to(self, device):
# type: (Device) -> NestedTensor # noqa
cast_tensor = self.tensors.to(device)
mask = self.mask
if mask is not None:
assert mask is not None
cast_mask = mask.to(device)
else:
cast_mask = None
return NestedTensor(cast_tensor, cast_mask)
def decompose(self):
return self.tensors, self.mask
def __repr__(self):
return str(self.tensors)
def collate_fn_val(raw_batch):
raw_batch = list(zip(*raw_batch))
img = torch.stack(raw_batch[0])
img_mask = torch.tensor(np.array(raw_batch[1]))
img_data = NestedTensor(img, img_mask)
word_id = torch.tensor(np.array(raw_batch[2]))
word_mask = torch.tensor(np.array(raw_batch[3]))
text_data = NestedTensor(word_id, word_mask)
bbox = torch.tensor(np.array(raw_batch[4]))
raw_data = raw_batch[-1]
batch = [img_data, text_data, bbox, raw_data]
return tuple(batch) | null |
18,847 | from typing import Optional
from torch import Tensor
from PIL import Image
from dataset import vg_transforms as T
import os
import re
import sys
import json
import torch
import numpy as np
import os.path as osp
import scipy.io as sio
import torch.utils.data as data
from models.tokenization_bert import BertTokenizer
from vgTools.utils.box_utils import xywh2xyxy
class VisualGroundingDataset(data.Dataset):
SUPPORTED_DATASETS = {
'referit': {'splits': ('train', 'val', 'trainval', 'test')},
'unc': {
'splits': ('train', 'val', 'trainval', 'testA', 'testB'),
'params': {'dataset': 'refcoco', 'split_by': 'unc'}
},
'unc+': {
'splits': ('train', 'val', 'trainval', 'testA', 'testB'),
'params': {'dataset': 'refcoco+', 'split_by': 'unc'}
},
'gref': {
'splits': ('train', 'val'),
'params': {'dataset': 'refcocog', 'split_by': 'google'}
},
'gref_umd': {
'splits': ('train', 'val', 'test'),
'params': {'dataset': 'refcocog', 'split_by': 'umd'}
},
'flickr': {
'splits': ('train', 'val', 'test')},
'pailitao': {
'splits': ('train', 'val', 'test')
}
}
def __init__(self, data_root, split_root='data', dataset='referit',
transform=None, return_idx=False, testmode=False,
split='train', max_query_len=128,
bert_model='bert-base-uncased', swin=False, odps_slice_tuple=None):
self.images = []
self.data_root = data_root
self.split_root = split_root
self.dataset = dataset
self.query_len = max_query_len
self.transform = transform
self.testmode = testmode
self.split = split
self.tokenizer = BertTokenizer.from_pretrained(bert_model)
self.return_idx = return_idx
self.swin = swin
assert self.transform is not None
if split == 'train':
self.augment = True
else:
self.augment = False
if self.dataset == 'referit':
self.dataset_root = osp.join(self.data_root, 'referit')
self.im_dir = osp.join(self.dataset_root, 'images')
self.split_dir = osp.join(self.dataset_root, 'splits')
elif self.dataset == 'flickr':
self.dataset_root = osp.join(self.data_root, 'Flickr30k')
self.im_dir = osp.join(self.dataset_root, 'flickr30k_images')
else: # refcoco, etc.
self.dataset_root = osp.join(self.data_root, 'other')
self.im_dir = osp.join(
self.dataset_root, 'images', 'mscoco', 'images', 'train2014')
self.split_dir = osp.join(self.dataset_root, 'splits')
dataset_path = osp.join(self.split_root, self.dataset)
valid_splits = self.SUPPORTED_DATASETS[self.dataset]['splits']
if split not in valid_splits:
raise ValueError(
'Dataset {0} does not have split {1}'.format(
self.dataset, split))
splits = [split]
if self.dataset != 'referit':
splits = ['train', 'val'] if split == 'trainval' else [split]
for split in splits:
imgset_file = '{0}_{1}.pth'.format(self.dataset, split)
imgset_path = osp.join(dataset_path, imgset_file)
self.images += torch.load(imgset_path)
# assign item id as the last
self.images = [item+(i,) for i, item in enumerate(self.images)]
def exists_dataset(self):
return osp.exists(osp.join(self.split_root, self.dataset))
def pull_item(self, idx):
# x1,y1,x2,y2
if self.dataset == 'flickr':
img_file, bbox, phrase, item_id = self.images[idx]
elif self.dataset == 'pailitao':
nid, title, crop_cord, image_url, item_id = self.images[idx]
phrase = title
else:
img_file, _, bbox, phrase, attri, item_id = self.images[idx]
# box format: to x1y1x2y2
if not (self.dataset == 'referit' or self.dataset == 'flickr'):
bbox = np.array(bbox, dtype=int)
bbox[2], bbox[3] = bbox[0]+bbox[2], bbox[1]+bbox[3]
else:
bbox = np.array(bbox, dtype=int)
img_path = osp.join(self.im_dir, img_file)
img = Image.open(img_path).convert("RGB")
image_url = 'none'
bbox = torch.tensor(bbox)
bbox = bbox.float()
return img, phrase, bbox, item_id
def tokenize_phrase(self, phrase):
return self.corpus.tokenize(phrase, self.query_len)
def untokenize_word_vector(self, words):
return self.corpus.dictionary[words]
def __len__(self):
return len(self.images)
def __getitem__(self, idx):
raw_data = {'idx': idx}
from icecream import ic
img, phrase, bbox, item_id = self.pull_item(idx)
# phrase = phrase.decode("utf-8").encode().lower()
phrase = phrase.lower()
input_dict = {'img': img, 'box': bbox, 'text': phrase}
input_dict = self.transform(input_dict)
img = input_dict['img']
h, w = img.shape[-2:]
bbox = input_dict['box']
phrase = input_dict['text']
img_mask = input_dict['mask']
raw_data['phrase'] = phrase
raw_data['gt_bbox'] = bbox
raw_data['img'] = img
raw_data['item_id'] = item_id
# encode phrase to bert input
examples = read_examples(phrase, idx)
features = convert_examples_to_features(
examples=examples, seq_length=self.query_len, tokenizer=self.tokenizer)
word_id = features[0].input_ids
word_mask = features[0].input_mask
if self.split == 'train':
return img, np.array(img_mask), np.array(word_id, dtype=int), np.array(word_mask, dtype=int), np.array(bbox, dtype=np.float32)
else:
return img, np.array(img_mask), np.array(word_id, dtype=int), np.array(word_mask, dtype=int), np.array(bbox, dtype=np.float32), raw_data
def make_transforms(args, image_set, is_onestage=False):
imsize = args['image_res']
if image_set == 'train':
scales = []
if args['aug_scale']:
rate = imsize//20
for i in range(7):
scales.append(imsize - rate * i)
else:
scales = [imsize]
if args['aug_crop']:
crop_prob = 0.5
else:
crop_prob = 0.
return T.Compose([
T.RandomSelect(
T.RandomResize(scales),
T.Compose([
T.RandomResize([400, 500, 600], with_long_side=False),
T.RandomSizeCrop(384, 600),
T.RandomResize(scales),
]),
p=crop_prob
),
T.ColorJitter(0.4, 0.4, 0.4),
T.GaussianBlur(aug_blur=args['aug_blur']),
T.RandomHorizontalFlip(),
T.ToTensor(),
T.NormalizeAndPad(size=imsize, mean=(0.48145466, 0.4578275, 0.40821073), std=(
0.26862954, 0.26130258, 0.27577711), aug_translate=args['aug_translate'])
])
if image_set in ['val', 'test', 'testA', 'testB']:
return T.Compose([
T.RandomResize([imsize]),
T.ToTensor(),
T.NormalizeAndPad(size=imsize),
])
raise ValueError(f'unknown {image_set}')
def build_vg_dataset(split, args, dataset_name=None):
return VisualGroundingDataset(data_root=args['data_root'],
split_root=args['split_root'],
dataset=dataset_name,
split=split,
bert_model=args['text_encoder'],
transform=make_transforms(args, split),
max_query_len=args['max_query_len'],
) | null |
18,848 | from typing import Optional
from torch import Tensor
from PIL import Image
from dataset import vg_transforms as T
import os
import re
import sys
import json
import torch
import numpy as np
import os.path as osp
import scipy.io as sio
import torch.utils.data as data
from models.tokenization_bert import BertTokenizer
from vgTools.utils.box_utils import xywh2xyxy
class VisualGroundingDataset(data.Dataset):
SUPPORTED_DATASETS = {
'referit': {'splits': ('train', 'val', 'trainval', 'test')},
'unc': {
'splits': ('train', 'val', 'trainval', 'testA', 'testB'),
'params': {'dataset': 'refcoco', 'split_by': 'unc'}
},
'unc+': {
'splits': ('train', 'val', 'trainval', 'testA', 'testB'),
'params': {'dataset': 'refcoco+', 'split_by': 'unc'}
},
'gref': {
'splits': ('train', 'val'),
'params': {'dataset': 'refcocog', 'split_by': 'google'}
},
'gref_umd': {
'splits': ('train', 'val', 'test'),
'params': {'dataset': 'refcocog', 'split_by': 'umd'}
},
'flickr': {
'splits': ('train', 'val', 'test')},
'pailitao': {
'splits': ('train', 'val', 'test')
}
}
def __init__(self, data_root, split_root='data', dataset='referit',
transform=None, return_idx=False, testmode=False,
split='train', max_query_len=128,
bert_model='bert-base-uncased', swin=False, odps_slice_tuple=None):
self.images = []
self.data_root = data_root
self.split_root = split_root
self.dataset = dataset
self.query_len = max_query_len
self.transform = transform
self.testmode = testmode
self.split = split
self.tokenizer = BertTokenizer.from_pretrained(bert_model)
self.return_idx = return_idx
self.swin = swin
assert self.transform is not None
if split == 'train':
self.augment = True
else:
self.augment = False
if self.dataset == 'referit':
self.dataset_root = osp.join(self.data_root, 'referit')
self.im_dir = osp.join(self.dataset_root, 'images')
self.split_dir = osp.join(self.dataset_root, 'splits')
elif self.dataset == 'flickr':
self.dataset_root = osp.join(self.data_root, 'Flickr30k')
self.im_dir = osp.join(self.dataset_root, 'flickr30k_images')
else: # refcoco, etc.
self.dataset_root = osp.join(self.data_root, 'other')
self.im_dir = osp.join(
self.dataset_root, 'images', 'mscoco', 'images', 'train2014')
self.split_dir = osp.join(self.dataset_root, 'splits')
dataset_path = osp.join(self.split_root, self.dataset)
valid_splits = self.SUPPORTED_DATASETS[self.dataset]['splits']
if split not in valid_splits:
raise ValueError(
'Dataset {0} does not have split {1}'.format(
self.dataset, split))
splits = [split]
if self.dataset != 'referit':
splits = ['train', 'val'] if split == 'trainval' else [split]
for split in splits:
imgset_file = '{0}_{1}.pth'.format(self.dataset, split)
imgset_path = osp.join(dataset_path, imgset_file)
self.images += torch.load(imgset_path)
# assign item id as the last
self.images = [item+(i,) for i, item in enumerate(self.images)]
def exists_dataset(self):
return osp.exists(osp.join(self.split_root, self.dataset))
def pull_item(self, idx):
# x1,y1,x2,y2
if self.dataset == 'flickr':
img_file, bbox, phrase, item_id = self.images[idx]
elif self.dataset == 'pailitao':
nid, title, crop_cord, image_url, item_id = self.images[idx]
phrase = title
else:
img_file, _, bbox, phrase, attri, item_id = self.images[idx]
# box format: to x1y1x2y2
if not (self.dataset == 'referit' or self.dataset == 'flickr'):
bbox = np.array(bbox, dtype=int)
bbox[2], bbox[3] = bbox[0]+bbox[2], bbox[1]+bbox[3]
else:
bbox = np.array(bbox, dtype=int)
img_path = osp.join(self.im_dir, img_file)
img = Image.open(img_path).convert("RGB")
image_url = 'none'
bbox = torch.tensor(bbox)
bbox = bbox.float()
return img, phrase, bbox, item_id
def tokenize_phrase(self, phrase):
return self.corpus.tokenize(phrase, self.query_len)
def untokenize_word_vector(self, words):
return self.corpus.dictionary[words]
def __len__(self):
return len(self.images)
def __getitem__(self, idx):
raw_data = {'idx': idx}
from icecream import ic
img, phrase, bbox, item_id = self.pull_item(idx)
# phrase = phrase.decode("utf-8").encode().lower()
phrase = phrase.lower()
input_dict = {'img': img, 'box': bbox, 'text': phrase}
input_dict = self.transform(input_dict)
img = input_dict['img']
h, w = img.shape[-2:]
bbox = input_dict['box']
phrase = input_dict['text']
img_mask = input_dict['mask']
raw_data['phrase'] = phrase
raw_data['gt_bbox'] = bbox
raw_data['img'] = img
raw_data['item_id'] = item_id
# encode phrase to bert input
examples = read_examples(phrase, idx)
features = convert_examples_to_features(
examples=examples, seq_length=self.query_len, tokenizer=self.tokenizer)
word_id = features[0].input_ids
word_mask = features[0].input_mask
if self.split == 'train':
return img, np.array(img_mask), np.array(word_id, dtype=int), np.array(word_mask, dtype=int), np.array(bbox, dtype=np.float32)
else:
return img, np.array(img_mask), np.array(word_id, dtype=int), np.array(word_mask, dtype=int), np.array(bbox, dtype=np.float32), raw_data
def make_transforms(args, image_set, is_onestage=False):
imsize = args['image_res']
if image_set == 'train':
scales = []
if args['aug_scale']:
rate = imsize//20
for i in range(7):
scales.append(imsize - rate * i)
else:
scales = [imsize]
if args['aug_crop']:
crop_prob = 0.5
else:
crop_prob = 0.
return T.Compose([
T.RandomSelect(
T.RandomResize(scales),
T.Compose([
T.RandomResize([400, 500, 600], with_long_side=False),
T.RandomSizeCrop(384, 600),
T.RandomResize(scales),
]),
p=crop_prob
),
T.ColorJitter(0.4, 0.4, 0.4),
T.GaussianBlur(aug_blur=args['aug_blur']),
T.RandomHorizontalFlip(),
T.ToTensor(),
T.NormalizeAndPad(size=imsize, mean=(0.48145466, 0.4578275, 0.40821073), std=(
0.26862954, 0.26130258, 0.27577711), aug_translate=args['aug_translate'])
])
if image_set in ['val', 'test', 'testA', 'testB']:
return T.Compose([
T.RandomResize([imsize]),
T.ToTensor(),
T.NormalizeAndPad(size=imsize),
])
raise ValueError(f'unknown {image_set}')
def build_uni_training_dataset(args):
from torch.utils import data
datasets = []
for dataset_name in ['referit', 'unc', 'unc+', 'gref_umd']:
max_query_len = 20 if 'gref' not in dataset_name else 20
datasets.append(VisualGroundingDataset(data_root=args['data_root'],
split_root=args['split_root'],
dataset=dataset_name,
split='train',
bert_model=args['text_encoder'],
transform=make_transforms(args, 'train'),
max_query_len=max_query_len))
uni_dataset = data.ConcatDataset(datasets)
return uni_dataset | null |
18,849 | from torch.utils.data import Dataset
from torchvision.datasets.utils import download_url
from PIL import Image
import torch
import numpy as np
import random
import decord
from decord import VideoReader
import json
import os
from dataset.utils import pre_caption
def load_jsonl(filename):
with open(filename, "r") as f:
return [json.loads(l.strip("\n")) for l in f.readlines()] | null |
18,850 | import os
import json
import random
import torch
import numpy as np
from PIL import Image
from PIL import ImageFile
from torch.utils.data import Dataset
from dataset.utils import pre_question
import decord
from decord import VideoReader
import oss2
from io import BytesIO
def load_jsonl(filename):
with open(filename, "r") as f:
return [json.loads(l.strip("\n")) for l in f.readlines()] | null |
18,851 | import math
import torch
import random
from PIL import Image, ImageEnhance, ImageFilter
import numpy as np
import torchvision.transforms as T
import torchvision.transforms.functional as F
from vgTools.utils.box_utils import xyxy2xywh
from vgTools.utils.misc import interpolate
def crop(image, box, region):
cropped_image = F.crop(image, *region)
i, j, h, w = region
max_size = torch.as_tensor([w, h], dtype=torch.float32)
cropped_box = box - torch.as_tensor([j, i, j, i])
cropped_box = torch.min(cropped_box.reshape(2, 2), max_size)
cropped_box = cropped_box.clamp(min=0)
cropped_box = cropped_box.reshape(-1)
return cropped_image, cropped_box | null |
18,852 | import math
import torch
import random
from PIL import Image, ImageEnhance, ImageFilter
import numpy as np
import torchvision.transforms as T
import torchvision.transforms.functional as F
from vgTools.utils.box_utils import xyxy2xywh
from vgTools.utils.misc import interpolate
def resize_according_to_long_side(img, box, size):
h, w = img.height, img.width
ratio = float(size / float(max(h, w)))
new_w, new_h = round(w* ratio), round(h * ratio)
img = F.resize(img, (new_h, new_w))
box = box * ratio
return img, box | null |
18,853 | import math
import torch
import random
from PIL import Image, ImageEnhance, ImageFilter
import numpy as np
import torchvision.transforms as T
import torchvision.transforms.functional as F
from vgTools.utils.box_utils import xyxy2xywh
from vgTools.utils.misc import interpolate
def resize_according_to_short_side(img, box, size):
h, w = img.height, img.width
ratio = float(size / float(min(h, w)))
new_w, new_h = round(w* ratio), round(h * ratio)
img = F.resize(img, (new_h, new_w))
box = box * ratio
return img, box | null |
18,854 | import argparse
import os
import ruamel_yaml as yaml
import numpy as np
import random
import time
import datetime
import json
from pathlib import Path
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
import torch.backends.cudnn as cudnn
import torch.distributed as dist
from models.model_vqa_mplug import MPLUG
from models.vit import interpolate_pos_embed, resize_pos_embed
from models.tokenization_bert import BertTokenizer
import utils
from dataset.utils import save_result
from dataset import create_dataset, create_sampler, create_loader, vqa_collate_fn
from scheduler import create_scheduler
from optim import create_optimizer, create_two_optimizer
import deepspeed
def train(model, data_loader, optimizer, tokenizer, epoch, warmup_steps, device, scheduler, config, do_amp=False,
do_two_optim=False, do_accum=False, accum_steps=1):
def evaluation(model, data_loader, tokenizer, device, config):
# test
model.eval()
metric_logger = utils.MetricLogger(delimiter=" ")
header = 'Generate VQA test result:'
print_freq = 50
result = []
answer_list = [answer + config['eos'] for answer in data_loader.dataset.answer_list]
answer_input = tokenizer(answer_list, padding='longest', return_tensors='pt').to(device)
for n, (image, question, question_id) in enumerate(metric_logger.log_every(data_loader, print_freq, header)):
image = image.to(device, non_blocking=True)
question_input = tokenizer(question, padding='longest', return_tensors="pt").to(device)
topk_ids, topk_probs = model(image, question_input, answer_input, train=False, k=config['k_test'])
for ques_id, topk_id, topk_prob in zip(question_id, topk_ids, topk_probs):
ques_id = int(ques_id.item())
ans = tokenizer.decode(topk_id[0]).replace("[SEP]", "").replace("[CLS]", "").replace("[PAD]", "").strip()
result.append({"question_id":ques_id, "answer":ans})
return result | null |
18,855 | import argparse
import os
import ruamel_yaml as yaml
import numpy as np
import random
import time
import datetime
import json
from pathlib import Path
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
import torch.backends.cudnn as cudnn
import torch.distributed as dist
from models.model_vqa_mplug import MPLUG
from models.vit import interpolate_pos_embed, resize_pos_embed
from models.tokenization_bert import BertTokenizer
import utils
from dataset.utils import save_result
from dataset import create_dataset, create_sampler, create_loader, vqa_collate_fn
from scheduler import create_scheduler
from optim import create_optimizer, create_two_optimizer
import deepspeed
def train(model, data_loader, optimizer, tokenizer, epoch, warmup_steps, device, scheduler, config, do_amp=False,
do_two_optim=False, do_accum=False, accum_steps=1):
# train
model.train()
metric_logger = utils.MetricLogger(delimiter=" ")
if do_two_optim:
metric_logger.add_meter('lr1', utils.SmoothedValue(window_size=50, fmt='{value:.6f}'))
metric_logger.add_meter('lr2', utils.SmoothedValue(window_size=50, fmt='{value:.6f}'))
else:
metric_logger.add_meter('lr', utils.SmoothedValue(window_size=50, fmt='{value:.6f}'))
metric_logger.add_meter('loss', utils.SmoothedValue(window_size=1, fmt='{value:.4f}'))
header = 'Train Epoch: [{}]'.format(epoch)
print_freq = 50
step_size = 100
warmup_iterations = warmup_steps * step_size
for i, (image, question, answer, weights, n) in enumerate(metric_logger.log_every(data_loader, print_freq, header)):
image, weights = image.to(device, non_blocking=True), weights.to(device, non_blocking=True)
question_input = tokenizer(question, padding='longest', truncation=True, max_length=args.max_input_length if config["add_ocr"] else 25, return_tensors="pt").to(
device)
if i == 0:
print ("question: ", question)
answer_input = tokenizer(answer, padding='longest', return_tensors="pt").to(device)
if epoch > 0 or not config['warm_up']:
alpha = config['alpha']
else:
alpha = config['alpha'] * min(1, i / len(data_loader))
loss = model(image, question_input, answer_input, train=True, alpha=alpha, k=n, weights=weights)
#if accum_steps > 1:
# loss = loss / accum_steps
#if do_amp:
# from apex import amp
# with amp.scale_loss(loss, optimizer) as scaled_loss:
# # logger.info('scaled loss: {}'.format(str(scaled_loss)))
# scaled_loss.backward()
#else:
# loss.backward()
#if (i + 1) % accum_steps == 0:
# optimizer.step()
# optimizer.zero_grad()
model.backward(loss)
model.step()
metric_logger.update(loss=loss.item())
if do_two_optim:
metric_logger.update(lr1=optimizer.param_groups[0]["lr"])
metric_logger.update(lr2=optimizer.param_groups[2]["lr"])
else:
metric_logger.update(lr=optimizer.param_groups[0]["lr"])
if epoch == 0 and i % step_size == 0 and i <= warmup_iterations:
scheduler.step(i // step_size)
del image,weights, question_input,answer_input, loss
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print("Averaged stats:", metric_logger.global_avg())
return {k: "{:.3f}".format(meter.global_avg) for k, meter in metric_logger.meters.items()}
def cal_metric(vqa_result, val_file):
with open(val_file[0], "r") as f:
data_list = json.load(f)
id2datum = {}
for each in data_list:
id2datum[each["question_id"]] = each["label"]
score = 0.
for each in vqa_result:
quesid = each["question_id"]
ans = each["answer"]
label = id2datum[quesid]
if ans in label:
score += label[ans]
return score / len(vqa_result)
def evaluate(model, data_loader, dataset, tokenizer, device, config):
# test
model.eval()
metric_logger = utils.MetricLogger(delimiter=" ")
header = 'Evaluation:'
print_freq = 50
answer_list = [answer+config['eos'] for answer in data_loader.dataset.answer_list]
answer_input = tokenizer(answer_list, padding='longest', return_tensors='pt').to(device)
for n, (image, question, question_id) in enumerate(metric_logger.log_every(data_loader, print_freq, header)):
image = image.to(device,non_blocking=True)
question_input = tokenizer(question, padding='longest', return_tensors="pt").to(device)
topk_ids, topk_probs = model(image, question_input, answer_input, train=False, k=config['k_test'])
result = []
for ques_id, topk_id, topk_prob in zip(question_id, topk_ids, topk_probs):
ques_id = int(ques_id.item())
ans = tokenizer.decode(topk_id[0]).replace("[SEP]", "").replace("[CLS]", "").replace("[PAD]", "").strip()
result.append({"question_id":ques_id, "answer":ans})
accuracy = cal_metric(result, dataset)
# accuracy = (targets == pred_class).sum() / targets.size(0)
#
metric_logger.meters['acc'].update(accuracy, n=image.size(0))
# gather the stats from all processes
torch.cuda.empty_cache()
metric_logger.synchronize_between_processes()
print("Averaged stats:", metric_logger.global_avg())
return {k: "{:.4f}".format(meter.global_avg) for k, meter in metric_logger.meters.items()} | null |
18,856 | import argparse
import os
import ruamel_yaml as yaml
import numpy as np
import random
import time
import datetime
import json
from pathlib import Path
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
import torch.backends.cudnn as cudnn
import torch.distributed as dist
from models.model_vqa_mplug import MPLUG
from models.vit import interpolate_pos_embed, resize_pos_embed
from models.tokenization_bert import BertTokenizer
import utils
from dataset.utils import save_result
from dataset import create_sampler, create_loader
from dataset.videoqa_dataset import videoqa_dataset
def cal_metric(vqa_result, val_file):
with open(val_file[0], "r") as f:
data_list = [json.loads(l.strip("\n")) for l in f.readlines()]
id2datum = {}
for idx, each in enumerate(data_list):
question_id = idx
id2datum[question_id] = {
'question': each['question'],
'video_id': each['video_id'],
'answer': each['answer'],
}
score = 0.
for each in vqa_result:
quesid = each["question_id"]
ans = each["answer"]
label = id2datum[quesid]['answer']
if label == ans:
score += 1
return score / len(vqa_result)
def evaluate(model, data_loader, dataset, tokenizer, device, config):
# test
model.eval()
metric_logger = utils.MetricLogger(delimiter=" ")
header = 'Evaluation:'
print_freq = 50
result = []
answer_list = [answer + config['eos'] for answer in data_loader.dataset.answer_list]
answer_input = tokenizer(answer_list, padding='longest', return_tensors='pt').to(device)
for n, (image, question, question_id) in enumerate(metric_logger.log_every(data_loader, print_freq, header)):
image = image.to(device, non_blocking=True)
question_input = tokenizer(question, padding='longest', return_tensors="pt").to(device)
# topk_ids, topk_probs = model(image, question_input, answer_input, train=False, k=config['k_test'])
B, T, C, H, W = image.shape
image = image.view(-1, C, H, W)
image_embeds = model.visual_encoder.visual(image, skip_last_layer=True, use_checkpoint=model.use_checkpoint)
if model.large:
image_embeds = model.dropout(model.visn_layer_norm(model.visn_fc(image_embeds)))
image_embeds = image_embeds.view(B, -1, image_embeds.shape[-1])
image_atts = torch.ones(image_embeds.size()[:-1], dtype=torch.long).to(image.device)
text_output = model.text_encoder(question_input.input_ids, attention_mask=question_input.attention_mask,
return_dict=True)
text_embeds = text_output.last_hidden_state
fusion_output = model.fusion_encoder(encoder_embeds=text_embeds,
attention_mask=question_input.attention_mask,
encoder_hidden_states=image_embeds,
encoder_attention_mask=image_atts,
return_dict=False)
image_output, question_output = fusion_output
question_output = torch.cat([image_output, question_output], 1)
merge_text_attention = torch.cat([image_atts, question_input.attention_mask], 1)
topk_ids, topk_probs = model.rank_answer(question_output, merge_text_attention, answer_input.input_ids,
answer_input.attention_mask, config['k_test'])
result = []
for ques_id, topk_id, topk_prob in zip(question_id, topk_ids, topk_probs):
ques_id = int(ques_id.item())
_, pred = topk_prob.max(dim=0)
result.append({"question_id": ques_id, "answer": data_loader.dataset.answer_list[topk_id[pred]]})
accuracy = cal_metric(result, dataset)
metric_logger.meters['acc'].update(accuracy, n=B)
# gather the stats from all processes
torch.cuda.empty_cache()
metric_logger.synchronize_between_processes()
print("Averaged stats:", metric_logger.global_avg())
return {k: "{:.4f}".format(meter.global_avg) for k, meter in metric_logger.meters.items()} | null |
18,857 | import torch
from torch import optim as optim
from .adafactor import Adafactor
from .adahessian import Adahessian
from .adamp import AdamP
from .lookahead import Lookahead
from .nadam import Nadam
from .novograd import NovoGrad
from .nvnovograd import NvNovoGrad
from .radam import RAdam
from .rmsprop_tf import RMSpropTF
from .sgdp import SGDP
def add_weight_decay(model, weight_decay=1e-5, skip_list=()):
decay = []
no_decay = []
for name, param in model.named_parameters():
if not param.requires_grad:
continue # frozen weights
if len(param.shape) == 1 or name.endswith(".bias") or name in skip_list:
no_decay.append(param)
else:
decay.append(param)
return [
{'params': no_decay, 'weight_decay': 0.},
{'params': decay, 'weight_decay': weight_decay}]
from torch.optim import Optimizer
class Adafactor(torch.optim.Optimizer):
"""Implements Adafactor algorithm.
This implementation is based on: `Adafactor: Adaptive Learning Rates with Sublinear Memory Cost`
(see https://arxiv.org/abs/1804.04235)
Note that this optimizer internally adjusts the learning rate depending on the
*scale_parameter*, *relative_step* and *warmup_init* options.
To use a manual (external) learning rate schedule you should set `scale_parameter=False` and
`relative_step=False`.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining parameter groups
lr (float, optional): external learning rate (default: None)
eps (tuple[float, float]): regularization constants for square gradient
and parameter scale respectively (default: (1e-30, 1e-3))
clip_threshold (float): threshold of root mean square of final gradient update (default: 1.0)
decay_rate (float): coefficient used to compute running averages of square gradient (default: -0.8)
beta1 (float): coefficient used for computing running averages of gradient (default: None)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
scale_parameter (bool): if True, learning rate is scaled by root mean square of parameter (default: True)
relative_step (bool): if True, time-dependent learning rate is computed
instead of external learning rate (default: True)
warmup_init (bool): time-dependent learning rate computation depends on
whether warm-up initialization is being used (default: False)
"""
def __init__(self, params, lr=None, eps=1e-30, eps_scale=1e-3, clip_threshold=1.0,
decay_rate=-0.8, betas=None, weight_decay=0.0, scale_parameter=True, warmup_init=False):
relative_step = lr is None
if warmup_init and not relative_step:
raise ValueError('warmup_init requires relative_step=True')
beta1 = None if betas is None else betas[0] # make it compat with standard betas arg
defaults = dict(lr=lr, eps=eps, eps_scale=eps_scale, clip_threshold=clip_threshold, decay_rate=decay_rate,
beta1=beta1, weight_decay=weight_decay, scale_parameter=scale_parameter,
relative_step=relative_step, warmup_init=warmup_init)
super(Adafactor, self).__init__(params, defaults)
def _get_lr(param_group, param_state):
if param_group['relative_step']:
min_step = 1e-6 * param_state['step'] if param_group['warmup_init'] else 1e-2
lr_t = min(min_step, 1.0 / math.sqrt(param_state['step']))
param_scale = 1.0
if param_group['scale_parameter']:
param_scale = max(param_group['eps_scale'], param_state['RMS'])
param_group['lr'] = lr_t * param_scale
return param_group['lr']
def _get_options(param_group, param_shape):
factored = len(param_shape) >= 2
use_first_moment = param_group['beta1'] is not None
return factored, use_first_moment
def _rms(tensor):
return tensor.norm(2) / (tensor.numel() ** 0.5)
def _approx_sq_grad(self, exp_avg_sq_row, exp_avg_sq_col):
r_factor = (exp_avg_sq_row / exp_avg_sq_row.mean(dim=-1, keepdim=True)).rsqrt_().unsqueeze(-1)
c_factor = exp_avg_sq_col.unsqueeze(-2).rsqrt()
return torch.mul(r_factor, c_factor)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.dtype in {torch.float16, torch.bfloat16}:
grad = grad.float()
if grad.is_sparse:
raise RuntimeError('Adafactor does not support sparse gradients.')
state = self.state[p]
grad_shape = grad.shape
factored, use_first_moment = self._get_options(group, grad_shape)
# State Initialization
if len(state) == 0:
state['step'] = 0
if use_first_moment:
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(grad)
if factored:
state['exp_avg_sq_row'] = torch.zeros(grad_shape[:-1]).to(grad)
state['exp_avg_sq_col'] = torch.zeros(grad_shape[:-2] + grad_shape[-1:]).to(grad)
else:
state['exp_avg_sq'] = torch.zeros_like(grad)
state['RMS'] = 0
else:
if use_first_moment:
state['exp_avg'] = state['exp_avg'].to(grad)
if factored:
state['exp_avg_sq_row'] = state['exp_avg_sq_row'].to(grad)
state['exp_avg_sq_col'] = state['exp_avg_sq_col'].to(grad)
else:
state['exp_avg_sq'] = state['exp_avg_sq'].to(grad)
p_data_fp32 = p.data
if p.data.dtype in {torch.float16, torch.bfloat16}:
p_data_fp32 = p_data_fp32.float()
state['step'] += 1
state['RMS'] = self._rms(p_data_fp32)
lr_t = self._get_lr(group, state)
beta2t = 1.0 - math.pow(state['step'], group['decay_rate'])
update = grad ** 2 + group['eps']
if factored:
exp_avg_sq_row = state['exp_avg_sq_row']
exp_avg_sq_col = state['exp_avg_sq_col']
exp_avg_sq_row.mul_(beta2t).add_(1.0 - beta2t, update.mean(dim=-1))
exp_avg_sq_col.mul_(beta2t).add_(1.0 - beta2t, update.mean(dim=-2))
#exp_avg_sq_row.mul_(beta2t).add_(update.mean(dim=-1), alpha=1.0 - beta2t) # pytorch 1.6+
#exp_avg_sq_col.mul_(beta2t).add_(update.mean(dim=-2), alpha=1.0 - beta2t)
# Approximation of exponential moving average of square of gradient
update = self._approx_sq_grad(exp_avg_sq_row, exp_avg_sq_col)
update.mul_(grad)
else:
exp_avg_sq = state['exp_avg_sq']
exp_avg_sq.mul_(beta2t).add_(1.0 - beta2t, update)
#exp_avg_sq.mul_(beta2t).add_(update, alpha=1.0 - beta2t) # pytorch 1.6+
update = exp_avg_sq.rsqrt().mul_(grad)
update.div_((self._rms(update) / group['clip_threshold']).clamp_(min=1.0))
update.mul_(lr_t)
if use_first_moment:
exp_avg = state['exp_avg']
exp_avg.mul_(group["beta1"]).add_(1 - group["beta1"], update)
#exp_avg.mul_(group['beta1']).add_(update, alpha=1 - group['beta1']) # pytorch 1.6+
update = exp_avg
if group['weight_decay'] != 0:
p_data_fp32.add_(-group["weight_decay"] * lr_t, p_data_fp32)
#p_data_fp32.add_(p_data_fp32, alpha=-group['weight_decay'] * lr_t) # pytorch 1.6+
p_data_fp32.add_(-update)
if p.data.dtype in {torch.float16, torch.bfloat16}:
p.data.copy_(p_data_fp32)
return loss
class Adahessian(torch.optim.Optimizer):
"""
Implements the AdaHessian algorithm from "ADAHESSIAN: An Adaptive Second OrderOptimizer for Machine Learning"
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining parameter groups
lr (float, optional): learning rate (default: 0.1)
betas ((float, float), optional): coefficients used for computing running averages of gradient and the
squared hessian trace (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0.0)
hessian_power (float, optional): exponent of the hessian trace (default: 1.0)
update_each (int, optional): compute the hessian trace approximation only after *this* number of steps
(to save time) (default: 1)
n_samples (int, optional): how many times to sample `z` for the approximation of the hessian trace (default: 1)
"""
def __init__(self, params, lr=0.1, betas=(0.9, 0.999), eps=1e-8, weight_decay=0.0,
hessian_power=1.0, update_each=1, n_samples=1, avg_conv_kernel=False):
if not 0.0 <= lr:
raise ValueError(f"Invalid learning rate: {lr}")
if not 0.0 <= eps:
raise ValueError(f"Invalid epsilon value: {eps}")
if not 0.0 <= betas[0] < 1.0:
raise ValueError(f"Invalid beta parameter at index 0: {betas[0]}")
if not 0.0 <= betas[1] < 1.0:
raise ValueError(f"Invalid beta parameter at index 1: {betas[1]}")
if not 0.0 <= hessian_power <= 1.0:
raise ValueError(f"Invalid Hessian power value: {hessian_power}")
self.n_samples = n_samples
self.update_each = update_each
self.avg_conv_kernel = avg_conv_kernel
# use a separate generator that deterministically generates the same `z`s across all GPUs in case of distributed training
self.seed = 2147483647
self.generator = torch.Generator().manual_seed(self.seed)
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, hessian_power=hessian_power)
super(Adahessian, self).__init__(params, defaults)
for p in self.get_params():
p.hess = 0.0
self.state[p]["hessian step"] = 0
def is_second_order(self):
return True
def get_params(self):
"""
Gets all parameters in all param_groups with gradients
"""
return (p for group in self.param_groups for p in group['params'] if p.requires_grad)
def zero_hessian(self):
"""
Zeros out the accumalated hessian traces.
"""
for p in self.get_params():
if not isinstance(p.hess, float) and self.state[p]["hessian step"] % self.update_each == 0:
p.hess.zero_()
def set_hessian(self):
"""
Computes the Hutchinson approximation of the hessian trace and accumulates it for each trainable parameter.
"""
params = []
for p in filter(lambda p: p.grad is not None, self.get_params()):
if self.state[p]["hessian step"] % self.update_each == 0: # compute the trace only each `update_each` step
params.append(p)
self.state[p]["hessian step"] += 1
if len(params) == 0:
return
if self.generator.device != params[0].device: # hackish way of casting the generator to the right device
self.generator = torch.Generator(params[0].device).manual_seed(self.seed)
grads = [p.grad for p in params]
for i in range(self.n_samples):
# Rademacher distribution {-1.0, 1.0}
zs = [torch.randint(0, 2, p.size(), generator=self.generator, device=p.device) * 2.0 - 1.0 for p in params]
h_zs = torch.autograd.grad(
grads, params, grad_outputs=zs, only_inputs=True, retain_graph=i < self.n_samples - 1)
for h_z, z, p in zip(h_zs, zs, params):
p.hess += h_z * z / self.n_samples # approximate the expected values of z*(H@z)
def step(self, closure=None):
"""
Performs a single optimization step.
Arguments:
closure (callable, optional) -- a closure that reevaluates the model and returns the loss (default: None)
"""
loss = None
if closure is not None:
loss = closure()
self.zero_hessian()
self.set_hessian()
for group in self.param_groups:
for p in group['params']:
if p.grad is None or p.hess is None:
continue
if self.avg_conv_kernel and p.dim() == 4:
p.hess = torch.abs(p.hess).mean(dim=[2, 3], keepdim=True).expand_as(p.hess).clone()
# Perform correct stepweight decay as in AdamW
p.mul_(1 - group['lr'] * group['weight_decay'])
state = self.state[p]
# State initialization
if len(state) == 1:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p)
# Exponential moving average of Hessian diagonal square values
state['exp_hessian_diag_sq'] = torch.zeros_like(p)
exp_avg, exp_hessian_diag_sq = state['exp_avg'], state['exp_hessian_diag_sq']
beta1, beta2 = group['betas']
state['step'] += 1
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(p.grad, alpha=1 - beta1)
exp_hessian_diag_sq.mul_(beta2).addcmul_(p.hess, p.hess, value=1 - beta2)
bias_correction1 = 1 - beta1 ** state['step']
bias_correction2 = 1 - beta2 ** state['step']
k = group['hessian_power']
denom = (exp_hessian_diag_sq / bias_correction2).pow_(k / 2).add_(group['eps'])
# make update
step_size = group['lr'] / bias_correction1
p.addcdiv_(exp_avg, denom, value=-step_size)
return loss
class AdamP(Optimizer):
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8,
weight_decay=0, delta=0.1, wd_ratio=0.1, nesterov=False):
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay,
delta=delta, wd_ratio=wd_ratio, nesterov=nesterov)
super(AdamP, self).__init__(params, defaults)
def _channel_view(self, x):
return x.view(x.size(0), -1)
def _layer_view(self, x):
return x.view(1, -1)
def _cosine_similarity(self, x, y, eps, view_func):
x = view_func(x)
y = view_func(y)
x_norm = x.norm(dim=1).add_(eps)
y_norm = y.norm(dim=1).add_(eps)
dot = (x * y).sum(dim=1)
return dot.abs() / x_norm / y_norm
def _projection(self, p, grad, perturb, delta, wd_ratio, eps):
wd = 1
expand_size = [-1] + [1] * (len(p.shape) - 1)
for view_func in [self._channel_view, self._layer_view]:
cosine_sim = self._cosine_similarity(grad, p.data, eps, view_func)
if cosine_sim.max() < delta / math.sqrt(view_func(p.data).size(1)):
p_n = p.data / view_func(p.data).norm(dim=1).view(expand_size).add_(eps)
perturb -= p_n * view_func(p_n * perturb).sum(dim=1).view(expand_size)
wd = wd_ratio
return perturb, wd
return perturb, wd
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
beta1, beta2 = group['betas']
nesterov = group['nesterov']
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p.data)
state['exp_avg_sq'] = torch.zeros_like(p.data)
# Adam
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
state['step'] += 1
bias_correction1 = 1 - beta1 ** state['step']
bias_correction2 = 1 - beta2 ** state['step']
exp_avg.mul_(beta1).add_(1 - beta1, grad)
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps'])
step_size = group['lr'] / bias_correction1
if nesterov:
perturb = (beta1 * exp_avg + (1 - beta1) * grad) / denom
else:
perturb = exp_avg / denom
# Projection
wd_ratio = 1
if len(p.shape) > 1:
perturb, wd_ratio = self._projection(p, grad, perturb, group['delta'], group['wd_ratio'], group['eps'])
# Weight decay
if group['weight_decay'] > 0:
p.data.mul_(1 - group['lr'] * group['weight_decay'] * wd_ratio)
# Step
p.data.add_(-step_size, perturb)
return loss
class Lookahead(Optimizer):
def __init__(self, base_optimizer, alpha=0.5, k=6):
if not 0.0 <= alpha <= 1.0:
raise ValueError(f'Invalid slow update rate: {alpha}')
if not 1 <= k:
raise ValueError(f'Invalid lookahead steps: {k}')
defaults = dict(lookahead_alpha=alpha, lookahead_k=k, lookahead_step=0)
self.base_optimizer = base_optimizer
self.param_groups = self.base_optimizer.param_groups
self.defaults = base_optimizer.defaults
self.defaults.update(defaults)
self.state = defaultdict(dict)
# manually add our defaults to the param groups
for name, default in defaults.items():
for group in self.param_groups:
group.setdefault(name, default)
def update_slow(self, group):
for fast_p in group["params"]:
if fast_p.grad is None:
continue
param_state = self.state[fast_p]
if 'slow_buffer' not in param_state:
param_state['slow_buffer'] = torch.empty_like(fast_p.data)
param_state['slow_buffer'].copy_(fast_p.data)
slow = param_state['slow_buffer']
slow.add_(group['lookahead_alpha'], fast_p.data - slow)
fast_p.data.copy_(slow)
def sync_lookahead(self):
for group in self.param_groups:
self.update_slow(group)
def step(self, closure=None):
#assert id(self.param_groups) == id(self.base_optimizer.param_groups)
loss = self.base_optimizer.step(closure)
for group in self.param_groups:
group['lookahead_step'] += 1
if group['lookahead_step'] % group['lookahead_k'] == 0:
self.update_slow(group)
return loss
def state_dict(self):
fast_state_dict = self.base_optimizer.state_dict()
slow_state = {
(id(k) if isinstance(k, torch.Tensor) else k): v
for k, v in self.state.items()
}
fast_state = fast_state_dict['state']
param_groups = fast_state_dict['param_groups']
return {
'state': fast_state,
'slow_state': slow_state,
'param_groups': param_groups,
}
def load_state_dict(self, state_dict):
fast_state_dict = {
'state': state_dict['state'],
'param_groups': state_dict['param_groups'],
}
self.base_optimizer.load_state_dict(fast_state_dict)
# We want to restore the slow state, but share param_groups reference
# with base_optimizer. This is a bit redundant but least code
slow_state_new = False
if 'slow_state' not in state_dict:
print('Loading state_dict from optimizer without Lookahead applied.')
state_dict['slow_state'] = defaultdict(dict)
slow_state_new = True
slow_state_dict = {
'state': state_dict['slow_state'],
'param_groups': state_dict['param_groups'], # this is pointless but saves code
}
super(Lookahead, self).load_state_dict(slow_state_dict)
self.param_groups = self.base_optimizer.param_groups # make both ref same container
if slow_state_new:
# reapply defaults to catch missing lookahead specific ones
for name, default in self.defaults.items():
for group in self.param_groups:
group.setdefault(name, default)
class Nadam(Optimizer):
"""Implements Nadam algorithm (a variant of Adam based on Nesterov momentum).
It has been proposed in `Incorporating Nesterov Momentum into Adam`__.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 2e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
schedule_decay (float, optional): momentum schedule decay (default: 4e-3)
__ http://cs229.stanford.edu/proj2015/054_report.pdf
__ http://www.cs.toronto.edu/~fritz/absps/momentum.pdf
Originally taken from: https://github.com/pytorch/pytorch/pull/1408
NOTE: Has potential issues but does work well on some problems.
"""
def __init__(self, params, lr=2e-3, betas=(0.9, 0.999), eps=1e-8,
weight_decay=0, schedule_decay=4e-3):
defaults = dict(lr=lr, betas=betas, eps=eps,
weight_decay=weight_decay, schedule_decay=schedule_decay)
super(Nadam, self).__init__(params, defaults)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
state['m_schedule'] = 1.
state['exp_avg'] = grad.new().resize_as_(grad).zero_()
state['exp_avg_sq'] = grad.new().resize_as_(grad).zero_()
# Warming momentum schedule
m_schedule = state['m_schedule']
schedule_decay = group['schedule_decay']
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
eps = group['eps']
state['step'] += 1
t = state['step']
if group['weight_decay'] != 0:
grad = grad.add(group['weight_decay'], p.data)
momentum_cache_t = beta1 * \
(1. - 0.5 * (0.96 ** (t * schedule_decay)))
momentum_cache_t_1 = beta1 * \
(1. - 0.5 * (0.96 ** ((t + 1) * schedule_decay)))
m_schedule_new = m_schedule * momentum_cache_t
m_schedule_next = m_schedule * momentum_cache_t * momentum_cache_t_1
state['m_schedule'] = m_schedule_new
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(1. - beta1, grad)
exp_avg_sq.mul_(beta2).addcmul_(1. - beta2, grad, grad)
exp_avg_sq_prime = exp_avg_sq / (1. - beta2 ** t)
denom = exp_avg_sq_prime.sqrt_().add_(eps)
p.data.addcdiv_(-group['lr'] * (1. - momentum_cache_t) / (1. - m_schedule_new), grad, denom)
p.data.addcdiv_(-group['lr'] * momentum_cache_t_1 / (1. - m_schedule_next), exp_avg, denom)
return loss
class NovoGrad(Optimizer):
def __init__(self, params, grad_averaging=False, lr=0.1, betas=(0.95, 0.98), eps=1e-8, weight_decay=0):
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)
super(NovoGrad, self).__init__(params, defaults)
self._lr = lr
self._beta1 = betas[0]
self._beta2 = betas[1]
self._eps = eps
self._wd = weight_decay
self._grad_averaging = grad_averaging
self._momentum_initialized = False
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
if not self._momentum_initialized:
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
state = self.state[p]
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('NovoGrad does not support sparse gradients')
v = torch.norm(grad)**2
m = grad/(torch.sqrt(v) + self._eps) + self._wd * p.data
state['step'] = 0
state['v'] = v
state['m'] = m
state['grad_ema'] = None
self._momentum_initialized = True
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
state = self.state[p]
state['step'] += 1
step, v, m = state['step'], state['v'], state['m']
grad_ema = state['grad_ema']
grad = p.grad.data
g2 = torch.norm(grad)**2
grad_ema = g2 if grad_ema is None else grad_ema * \
self._beta2 + g2 * (1. - self._beta2)
grad *= 1.0 / (torch.sqrt(grad_ema) + self._eps)
if self._grad_averaging:
grad *= (1. - self._beta1)
g2 = torch.norm(grad)**2
v = self._beta2*v + (1. - self._beta2)*g2
m = self._beta1*m + (grad / (torch.sqrt(v) + self._eps) + self._wd * p.data)
bias_correction1 = 1 - self._beta1 ** step
bias_correction2 = 1 - self._beta2 ** step
step_size = group['lr'] * math.sqrt(bias_correction2) / bias_correction1
state['v'], state['m'] = v, m
state['grad_ema'] = grad_ema
p.data.add_(-step_size, m)
return loss
class NvNovoGrad(Optimizer):
"""
Implements Novograd algorithm.
Args:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.95, 0.98))
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
grad_averaging: gradient averaging
amsgrad (boolean, optional): whether to use the AMSGrad variant of this
algorithm from the paper `On the Convergence of Adam and Beyond`_
(default: False)
"""
def __init__(self, params, lr=1e-3, betas=(0.95, 0.98), eps=1e-8,
weight_decay=0, grad_averaging=False, amsgrad=False):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
defaults = dict(lr=lr, betas=betas, eps=eps,
weight_decay=weight_decay,
grad_averaging=grad_averaging,
amsgrad=amsgrad)
super(NvNovoGrad, self).__init__(params, defaults)
def __setstate__(self, state):
super(NvNovoGrad, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('amsgrad', False)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Sparse gradients are not supported.')
amsgrad = group['amsgrad']
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros([]).to(state['exp_avg'].device)
if amsgrad:
# Maintains max of all exp. moving avg. of sq. grad. values
state['max_exp_avg_sq'] = torch.zeros([]).to(state['exp_avg'].device)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
if amsgrad:
max_exp_avg_sq = state['max_exp_avg_sq']
beta1, beta2 = group['betas']
state['step'] += 1
norm = torch.sum(torch.pow(grad, 2))
if exp_avg_sq == 0:
exp_avg_sq.copy_(norm)
else:
exp_avg_sq.mul_(beta2).add_(1 - beta2, norm)
if amsgrad:
# Maintains the maximum of all 2nd moment running avg. till now
torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)
# Use the max. for normalizing running avg. of gradient
denom = max_exp_avg_sq.sqrt().add_(group['eps'])
else:
denom = exp_avg_sq.sqrt().add_(group['eps'])
grad.div_(denom)
if group['weight_decay'] != 0:
grad.add_(group['weight_decay'], p.data)
if group['grad_averaging']:
grad.mul_(1 - beta1)
exp_avg.mul_(beta1).add_(grad)
p.data.add_(-group['lr'], exp_avg)
return loss
class RAdam(Optimizer):
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0):
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)
self.buffer = [[None, None, None] for ind in range(10)]
super(RAdam, self).__init__(params, defaults)
def __setstate__(self, state):
super(RAdam, self).__setstate__(state)
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data.float()
if grad.is_sparse:
raise RuntimeError('RAdam does not support sparse gradients')
p_data_fp32 = p.data.float()
state = self.state[p]
if len(state) == 0:
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p_data_fp32)
state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)
else:
state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)
state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
exp_avg.mul_(beta1).add_(1 - beta1, grad)
state['step'] += 1
buffered = self.buffer[int(state['step'] % 10)]
if state['step'] == buffered[0]:
N_sma, step_size = buffered[1], buffered[2]
else:
buffered[0] = state['step']
beta2_t = beta2 ** state['step']
N_sma_max = 2 / (1 - beta2) - 1
N_sma = N_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t)
buffered[1] = N_sma
# more conservative since it's an approximated value
if N_sma >= 5:
step_size = group['lr'] * math.sqrt(
(1 - beta2_t) * (N_sma - 4) / (N_sma_max - 4) * (N_sma - 2) / N_sma * N_sma_max / (
N_sma_max - 2)) / (1 - beta1 ** state['step'])
else:
step_size = group['lr'] / (1 - beta1 ** state['step'])
buffered[2] = step_size
if group['weight_decay'] != 0:
p_data_fp32.add_(-group['weight_decay'] * group['lr'], p_data_fp32)
# more conservative since it's an approximated value
if N_sma >= 5:
denom = exp_avg_sq.sqrt().add_(group['eps'])
p_data_fp32.addcdiv_(-step_size, exp_avg, denom)
else:
p_data_fp32.add_(-step_size, exp_avg)
p.data.copy_(p_data_fp32)
return loss
class RMSpropTF(Optimizer):
"""Implements RMSprop algorithm (TensorFlow style epsilon)
NOTE: This is a direct cut-and-paste of PyTorch RMSprop with eps applied before sqrt
and a few other modifications to closer match Tensorflow for matching hyper-params.
Noteworthy changes include:
1. Epsilon applied inside square-root
2. square_avg initialized to ones
3. LR scaling of update accumulated in momentum buffer
Proposed by G. Hinton in his
`course <http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf>`_.
The centered version first appears in `Generating Sequences
With Recurrent Neural Networks <https://arxiv.org/pdf/1308.0850v5.pdf>`_.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-2)
momentum (float, optional): momentum factor (default: 0)
alpha (float, optional): smoothing (decay) constant (default: 0.9)
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-10)
centered (bool, optional) : if ``True``, compute the centered RMSProp,
the gradient is normalized by an estimation of its variance
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
decoupled_decay (bool, optional): decoupled weight decay as per https://arxiv.org/abs/1711.05101
lr_in_momentum (bool, optional): learning rate scaling is included in the momentum buffer
update as per defaults in Tensorflow
"""
def __init__(self, params, lr=1e-2, alpha=0.9, eps=1e-10, weight_decay=0, momentum=0., centered=False,
decoupled_decay=False, lr_in_momentum=True):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= momentum:
raise ValueError("Invalid momentum value: {}".format(momentum))
if not 0.0 <= weight_decay:
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
if not 0.0 <= alpha:
raise ValueError("Invalid alpha value: {}".format(alpha))
defaults = dict(lr=lr, momentum=momentum, alpha=alpha, eps=eps, centered=centered, weight_decay=weight_decay,
decoupled_decay=decoupled_decay, lr_in_momentum=lr_in_momentum)
super(RMSpropTF, self).__init__(params, defaults)
def __setstate__(self, state):
super(RMSpropTF, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('momentum', 0)
group.setdefault('centered', False)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('RMSprop does not support sparse gradients')
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
state['square_avg'] = torch.ones_like(p.data) # PyTorch inits to zero
if group['momentum'] > 0:
state['momentum_buffer'] = torch.zeros_like(p.data)
if group['centered']:
state['grad_avg'] = torch.zeros_like(p.data)
square_avg = state['square_avg']
one_minus_alpha = 1. - group['alpha']
state['step'] += 1
if group['weight_decay'] != 0:
if 'decoupled_decay' in group and group['decoupled_decay']:
p.data.add_(-group['weight_decay'], p.data)
else:
grad = grad.add(group['weight_decay'], p.data)
# Tensorflow order of ops for updating squared avg
square_avg.add_(one_minus_alpha, grad.pow(2) - square_avg)
# square_avg.mul_(alpha).addcmul_(1 - alpha, grad, grad) # PyTorch original
if group['centered']:
grad_avg = state['grad_avg']
grad_avg.add_(one_minus_alpha, grad - grad_avg)
# grad_avg.mul_(alpha).add_(1 - alpha, grad) # PyTorch original
avg = square_avg.addcmul(-1, grad_avg, grad_avg).add(group['eps']).sqrt_() # eps moved in sqrt
else:
avg = square_avg.add(group['eps']).sqrt_() # eps moved in sqrt
if group['momentum'] > 0:
buf = state['momentum_buffer']
# Tensorflow accumulates the LR scaling in the momentum buffer
if 'lr_in_momentum' in group and group['lr_in_momentum']:
buf.mul_(group['momentum']).addcdiv_(group['lr'], grad, avg)
p.data.add_(-buf)
else:
# PyTorch scales the param update by LR
buf.mul_(group['momentum']).addcdiv_(grad, avg)
p.data.add_(-group['lr'], buf)
else:
p.data.addcdiv_(-group['lr'], grad, avg)
return loss
class SGDP(Optimizer):
def __init__(self, params, lr=required, momentum=0, dampening=0,
weight_decay=0, nesterov=False, eps=1e-8, delta=0.1, wd_ratio=0.1):
defaults = dict(lr=lr, momentum=momentum, dampening=dampening, weight_decay=weight_decay,
nesterov=nesterov, eps=eps, delta=delta, wd_ratio=wd_ratio)
super(SGDP, self).__init__(params, defaults)
def _channel_view(self, x):
return x.view(x.size(0), -1)
def _layer_view(self, x):
return x.view(1, -1)
def _cosine_similarity(self, x, y, eps, view_func):
x = view_func(x)
y = view_func(y)
x_norm = x.norm(dim=1).add_(eps)
y_norm = y.norm(dim=1).add_(eps)
dot = (x * y).sum(dim=1)
return dot.abs() / x_norm / y_norm
def _projection(self, p, grad, perturb, delta, wd_ratio, eps):
wd = 1
expand_size = [-1] + [1] * (len(p.shape) - 1)
for view_func in [self._channel_view, self._layer_view]:
cosine_sim = self._cosine_similarity(grad, p.data, eps, view_func)
if cosine_sim.max() < delta / math.sqrt(view_func(p.data).size(1)):
p_n = p.data / view_func(p.data).norm(dim=1).view(expand_size).add_(eps)
perturb -= p_n * view_func(p_n * perturb).sum(dim=1).view(expand_size)
wd = wd_ratio
return perturb, wd
return perturb, wd
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
weight_decay = group['weight_decay']
momentum = group['momentum']
dampening = group['dampening']
nesterov = group['nesterov']
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
state = self.state[p]
# State initialization
if len(state) == 0:
state['momentum'] = torch.zeros_like(p.data)
# SGD
buf = state['momentum']
buf.mul_(momentum).add_(1 - dampening, grad)
if nesterov:
d_p = grad + momentum * buf
else:
d_p = buf
# Projection
wd_ratio = 1
if len(p.shape) > 1:
d_p, wd_ratio = self._projection(p, grad, d_p, group['delta'], group['wd_ratio'], group['eps'])
# Weight decay
if weight_decay != 0:
p.data.mul_(1 - group['lr'] * group['weight_decay'] * wd_ratio / (1-momentum))
# Step
p.data.add_(-group['lr'], d_p)
return loss
def create_optimizer(args, model, filter_bias_and_bn=True):
opt_lower = args.opt.lower()
weight_decay = args.weight_decay
if weight_decay and filter_bias_and_bn:
skip = {}
if hasattr(model, 'no_weight_decay'):
skip = model.no_weight_decay()
parameters = add_weight_decay(model, weight_decay, skip)
weight_decay = 0.
else:
parameters = model.parameters()
if 'fused' in opt_lower:
assert has_apex and torch.cuda.is_available(), 'APEX and CUDA required for fused optimizers'
opt_args = dict(lr=args.lr, weight_decay=weight_decay)
if hasattr(args, 'opt_eps') and args.opt_eps is not None:
opt_args['eps'] = args.opt_eps
if hasattr(args, 'opt_betas') and args.opt_betas is not None:
opt_args['betas'] = args.opt_betas
if hasattr(args, 'opt_args') and args.opt_args is not None:
opt_args.update(args.opt_args)
opt_split = opt_lower.split('_')
opt_lower = opt_split[-1]
if opt_lower == 'sgd' or opt_lower == 'nesterov':
opt_args.pop('eps', None)
optimizer = optim.SGD(parameters, momentum=args.momentum, nesterov=True, **opt_args)
elif opt_lower == 'momentum':
opt_args.pop('eps', None)
optimizer = optim.SGD(parameters, momentum=args.momentum, nesterov=False, **opt_args)
elif opt_lower == 'adam':
optimizer = optim.Adam(parameters, **opt_args)
elif opt_lower == 'adamw':
optimizer = optim.AdamW(parameters, **opt_args)
elif opt_lower == 'nadam':
optimizer = Nadam(parameters, **opt_args)
elif opt_lower == 'radam':
optimizer = RAdam(parameters, **opt_args)
elif opt_lower == 'adamp':
optimizer = AdamP(parameters, wd_ratio=0.01, nesterov=True, **opt_args)
elif opt_lower == 'sgdp':
optimizer = SGDP(parameters, momentum=args.momentum, nesterov=True, **opt_args)
elif opt_lower == 'adadelta':
optimizer = optim.Adadelta(parameters, **opt_args)
elif opt_lower == 'adafactor':
if not args.lr:
opt_args['lr'] = None
optimizer = Adafactor(parameters, **opt_args)
elif opt_lower == 'adahessian':
optimizer = Adahessian(parameters, **opt_args)
elif opt_lower == 'rmsprop':
optimizer = optim.RMSprop(parameters, alpha=0.9, momentum=args.momentum, **opt_args)
elif opt_lower == 'rmsproptf':
optimizer = RMSpropTF(parameters, alpha=0.9, momentum=args.momentum, **opt_args)
elif opt_lower == 'novograd':
optimizer = NovoGrad(parameters, **opt_args)
elif opt_lower == 'nvnovograd':
optimizer = NvNovoGrad(parameters, **opt_args)
elif opt_lower == 'fusedsgd':
opt_args.pop('eps', None)
optimizer = FusedSGD(parameters, momentum=args.momentum, nesterov=True, **opt_args)
elif opt_lower == 'fusedmomentum':
opt_args.pop('eps', None)
optimizer = FusedSGD(parameters, momentum=args.momentum, nesterov=False, **opt_args)
elif opt_lower == 'fusedadam':
optimizer = FusedAdam(parameters, adam_w_mode=False, **opt_args)
elif opt_lower == 'fusedadamw':
optimizer = FusedAdam(parameters, adam_w_mode=True, **opt_args)
elif opt_lower == 'fusedlamb':
optimizer = FusedLAMB(parameters, **opt_args)
elif opt_lower == 'fusednovograd':
opt_args.setdefault('betas', (0.95, 0.98))
optimizer = FusedNovoGrad(parameters, **opt_args)
else:
assert False and "Invalid optimizer"
raise ValueError
if len(opt_split) > 1:
if opt_split[0] == 'lookahead':
optimizer = Lookahead(optimizer)
return optimizer | null |
18,858 | import torch
from torch import optim as optim
from .adafactor import Adafactor
from .adahessian import Adahessian
from .adamp import AdamP
from .lookahead import Lookahead
from .nadam import Nadam
from .novograd import NovoGrad
from .nvnovograd import NvNovoGrad
from .radam import RAdam
from .rmsprop_tf import RMSpropTF
from .sgdp import SGDP
from torch.optim import Optimizer
def create_two_optimizer(args, model, filter_bias_and_bn=True):
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if
((not any(nd in n for nd in no_decay)) and ("visual_encoder" not in n))],
"weight_decay": args.weight_decay,
"lr": args.lr1
},
{
"params": [p for n, p in model.named_parameters() if
((any(nd in n for nd in no_decay)) and ("visual_encoder" not in n))],
"weight_decay": 0.0,
"lr": args.lr1
},
{
"params": [p for n, p in model.visual_encoder.named_parameters() if
((not any(nd in n for nd in no_decay)) and ("visual_encoder" not in n))],
"weight_decay": args.weight_decay,
"lr": args.lr2
},
{
"params": [p for n, p in model.visual_encoder.named_parameters() if
((any(nd in n for nd in no_decay)) and ("visual_encoder" not in n))],
"weight_decay": 0.0,
"lr": args.lr2
},
]
optimizer = optim.AdamW(optimizer_grouped_parameters)
return optimizer | null |
18,859 | from functools import partial
from pickle import NONE, TRUE
from turtle import forward
from matplotlib.transforms import Transform
from models.vit import VisionTransformer, interpolate_pos_embed
from models.modeling_mplug import BertConfig, BertModel, BertPrefixModel,BertEncoder, BertPrefixModelForGrounding, FusionModel
from models.visual_transformers import initialize_clip
import torch
import torch.nn.functional as F
from torch import nn
import numpy as np
import random
from einops import repeat
from vgTools.utils.box_utils import xywh2xyxy,generalized_box_iou
from icecream import ic
from mmdet.models.losses import smooth_l1_loss
The provided code snippet includes necessary dependencies for implementing the `concat_all_gather` function. Write a Python function `def concat_all_gather(tensor)` to solve the following problem:
Performs all_gather operation on the provided tensors. *** Warning ***: torch.distributed.all_gather has no gradient.
Here is the function:
def concat_all_gather(tensor):
"""
Performs all_gather operation on the provided tensors.
*** Warning ***: torch.distributed.all_gather has no gradient.
"""
tensors_gather = [torch.ones_like(tensor)
for _ in range(torch.distributed.get_world_size())]
torch.distributed.all_gather(tensors_gather, tensor, async_op=False)
output = torch.cat(tensors_gather, dim=0)
return output | Performs all_gather operation on the provided tensors. *** Warning ***: torch.distributed.all_gather has no gradient. |
18,860 | from functools import partial
from models.vit import VisionTransformer
from models.modeling_mplug import BertConfig, BertModel, BertLMHeadModel, FusionModel
from models.visual_transformers import initialize_clip
import torch
from torch import nn
import torch.nn.functional as F
The provided code snippet includes necessary dependencies for implementing the `concat_all_gather` function. Write a Python function `def concat_all_gather(tensor)` to solve the following problem:
Performs all_gather operation on the provided tensors. *** Warning ***: torch.distributed.all_gather has no gradient.
Here is the function:
def concat_all_gather(tensor):
"""
Performs all_gather operation on the provided tensors.
*** Warning ***: torch.distributed.all_gather has no gradient.
"""
tensors_gather = [torch.ones_like(tensor)
for _ in range(torch.distributed.get_world_size())]
torch.distributed.all_gather(tensors_gather, tensor, async_op=False)
output = torch.cat(tensors_gather, dim=0)
return output | Performs all_gather operation on the provided tensors. *** Warning ***: torch.distributed.all_gather has no gradient. |
18,861 | import copy
import json
import logging
import math
import os
import shutil
import tarfile
import tempfile
import sys
from io import open
import torch.nn.functional as F
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, SmoothL1Loss
import numpy as np
def resize_pos_embed(posemb, posemb_new):
# Rescale the grid of position embeddings when loading from state_dict. Adapted from
# https://github.com/google-research/vision_transformer/blob/00883dd691c63a6830751563748663526e811cee/vit_jax/checkpoint.py#L224
ntok_new = posemb_new.shape[1]
if True:
posemb_tok, posemb_grid = posemb[:, :1], posemb[0, 1:]
ntok_new -= 1
else:
posemb_tok, posemb_grid = posemb[:, :0], posemb[0]
gs_old = int(math.sqrt(len(posemb_grid)))
gs_new = int(math.sqrt(ntok_new))
# _logger.info('Position embedding grid-size from %s to %s', gs_old, gs_new)
posemb_grid = posemb_grid.reshape(1, gs_old, gs_old, -1).permute(0, 3, 1, 2)
orig = posemb_grid.dtype
posemb_grid = F.interpolate(posemb_grid.float(), size=(gs_new, gs_new), mode='bilinear')
posemb_grid = posemb_grid.to(orig)
posemb_grid = posemb_grid.permute(0, 2, 3, 1).reshape(1, gs_new * gs_new, -1)
posemb = torch.cat([posemb_tok, posemb_grid], dim=1)
return posemb
from torch.optim import Optimizer
def initialize_clip(config, num_patches=240):
from models.clip import clip
if config["clip_name"] == "ViT-B-16":
clip_model, preprocess = clip.load("ViT-B-16.tar", jit=False)
num_patches = int(config['image_res']*config['image_res']/(16*16))
pos_embed = nn.Parameter(torch.zeros(num_patches + 1, 768).float())
elif config["clip_name"] == "ViT-L-14":
clip_model, preprocess = clip.load("ViT-L-14.tar", jit=False)
num_patches = int(config['image_res']*config['image_res']/(14*14))
pos_embed = nn.Parameter(torch.zeros(num_patches + 1, 1024).float())
pos_embed.weight = resize_pos_embed(clip_model.visual.positional_embedding.unsqueeze(0), pos_embed.unsqueeze(0))
clip_model.visual.positional_embedding = pos_embed
return clip_model, preprocess | null |
18,862 | import copy
import json
import logging
import math
import os
import shutil
import tarfile
import tempfile
import sys
from io import open
import torch.nn.functional as F
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, SmoothL1Loss
import numpy as np
def resize_pos_embed(posemb, posemb_new):
# Rescale the grid of position embeddings when loading from state_dict. Adapted from
# https://github.com/google-research/vision_transformer/blob/00883dd691c63a6830751563748663526e811cee/vit_jax/checkpoint.py#L224
ntok_new = posemb_new.shape[1]
if True:
posemb_tok, posemb_grid = posemb[:, :1], posemb[0, 1:]
ntok_new -= 1
else:
posemb_tok, posemb_grid = posemb[:, :0], posemb[0]
gs_old = int(math.sqrt(len(posemb_grid)))
gs_new = int(math.sqrt(ntok_new))
# _logger.info('Position embedding grid-size from %s to %s', gs_old, gs_new)
posemb_grid = posemb_grid.reshape(1, gs_old, gs_old, -1).permute(0, 3, 1, 2)
orig = posemb_grid.dtype
posemb_grid = F.interpolate(posemb_grid.float(), size=(gs_new, gs_new), mode='bilinear')
posemb_grid = posemb_grid.to(orig)
posemb_grid = posemb_grid.permute(0, 2, 3, 1).reshape(1, gs_new * gs_new, -1)
posemb = torch.cat([posemb_tok, posemb_grid], dim=1)
return posemb
from torch.optim import Optimizer
def initialize_vit(VISUAL_CONFIG, model_type="ViT-B_32", pretrained_dir="data/ViT-B_32.npz", img_size=(384, 640),
num_patches=240):
from vit.models.modeling import VisionTransformer, CONFIGS
config = CONFIGS[model_type]
model = VisionTransformer(config, img_size=224, zero_head=True, num_classes=1)
model.load_from(np.load(pretrained_dir))
pos_embed = nn.Parameter(torch.zeros(num_patches + 1, 768).float())
pos_embed.weight = resize_pos_embed(model.transformer.embeddings.position_embeddings, pos_embed.unsqueeze(0))
model.transformer.embeddings.position_embeddings = pos_embed
if VISUAL_CONFIG.freeze_clip:
for parameter in model.parameters():
parameter.requires_grad = False
return model | null |
18,863 | import copy
import json
import logging
import math
import os
import shutil
import tarfile
import tempfile
import sys
from io import open
import torch.nn.functional as F
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, SmoothL1Loss
import numpy as np
from torch.optim import Optimizer
def initialize_optimizer(visual_model, lr, momentum, weight_decay):
optimizer = torch.optim.SGD(visual_model.parameters(), lr,
momentum=momentum,
weight_decay=weight_decay)
return optimizer | null |
18,864 | import copy
import json
import logging
import math
import os
import shutil
import tarfile
import tempfile
import sys
from io import open
import torch.nn.functional as F
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, SmoothL1Loss
import numpy as np
from torch.optim import Optimizer
The provided code snippet includes necessary dependencies for implementing the `adjust_learning_rate` function. Write a Python function `def adjust_learning_rate(optimizer, epoch, args)` to solve the following problem:
Decay the learning rate based on schedule
Here is the function:
def adjust_learning_rate(optimizer, epoch, args):
"""Decay the learning rate based on schedule"""
lr = args.sgd_lr
for milestone in args.schedule.split(","):
lr *= 0.1 if epoch >= float(milestone) else 1.
for param_group in optimizer.param_groups:
param_group['lr'] = lr | Decay the learning rate based on schedule |
18,865 | from functools import partial
from models.vit import VisionTransformer
from models.modeling_mplug import BertConfig, BertModel, BertPrefixModel, FusionModel
from models.visual_transformers import initialize_clip
from models.predictor import TextGenerator
import torch
from torch import nn
import torch.nn.functional as F
import numpy as np
def tile(x, dim, n_tile):
init_dim = x.size(dim)
repeat_idx = [1] * x.dim()
repeat_idx[dim] = n_tile
x = x.repeat(*(repeat_idx))
order_index = torch.LongTensor(np.concatenate([init_dim * np.arange(n_tile) + i for i in range(init_dim)]))
return torch.index_select(x, dim, order_index.to(x.device)) | null |
18,866 | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from functools import partial
from timm.models.vision_transformer import _cfg, PatchEmbed
from timm.models.registry import register_model
from timm.models.layers import trunc_normal_, DropPath
def interpolate_pos_embed(pos_embed_checkpoint, visual_encoder):
# interpolate position embedding
embedding_size = pos_embed_checkpoint.shape[-1]
num_patches = visual_encoder.patch_embed.num_patches
num_extra_tokens = visual_encoder.pos_embed.shape[-2] - num_patches
# height (== width) for the checkpoint position embedding
orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5)
# height (== width) for the new position embedding
new_size = int(num_patches ** 0.5)
if orig_size!=new_size:
# class_token and dist_token are kept unchanged
extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens]
# only the position tokens are interpolated
pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:]
pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size, embedding_size).permute(0, 3, 1, 2)
pos_tokens = torch.nn.functional.interpolate(
pos_tokens, size=(new_size, new_size), mode='bicubic', align_corners=False)
pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2)
new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1)
print('reshape position embedding from %d to %d'%(orig_size ** 2,new_size ** 2))
return new_pos_embed
else:
return pos_embed_checkpoint | null |
18,867 | from functools import partial
from models.vit import VisionTransformer
from models.modeling_mplug import BertConfig, BertModel, BertLMHeadModel, FusionModel
from models.visual_transformers import initialize_clip
from models.predictor import TextGenerator
import torch
from torch import nn
import torch.nn.functional as F
import numpy as np
def tile(x, dim, n_tile):
init_dim = x.size(dim)
repeat_idx = [1] * x.dim()
repeat_idx[dim] = n_tile
x = x.repeat(*(repeat_idx))
order_index = torch.LongTensor(np.concatenate([init_dim * np.arange(n_tile) + i for i in range(init_dim)]))
return torch.index_select(x, dim, order_index.to(x.device)) | null |
18,868 | import math
import os
import warnings
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import Tensor, device, dtype, nn
import torch.utils.checkpoint
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
import torch.nn.functional as F
from transformers.activations import ACT2FN
from transformers.file_utils import (
ModelOutput,
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
replace_return_docstrings,
)
from transformers.modeling_outputs import (
BaseModelOutputWithPastAndCrossAttentions,
BaseModelOutputWithPoolingAndCrossAttentions,
CausalLMOutputWithCrossAttentions,
MaskedLMOutput,
MultipleChoiceModelOutput,
NextSentencePredictorOutput,
QuestionAnsweringModelOutput,
SequenceClassifierOutput,
TokenClassifierOutput,
)
from transformers.modeling_utils import (
PreTrainedModel,
apply_chunking_to_forward,
find_pruneable_heads_and_indices,
prune_linear_layer,
)
from transformers.utils import logging
from transformers.models.bert.configuration_bert import BertConfig
import transformers
logger = logging.get_logger(__name__)
The provided code snippet includes necessary dependencies for implementing the `load_tf_weights_in_bert` function. Write a Python function `def load_tf_weights_in_bert(model, config, tf_checkpoint_path)` to solve the following problem:
Load tf checkpoints in a pytorch model.
Here is the function:
def load_tf_weights_in_bert(model, config, tf_checkpoint_path):
"""Load tf checkpoints in a pytorch model."""
try:
import re
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise
tf_path = os.path.abspath(tf_checkpoint_path)
logger.info("Converting TensorFlow checkpoint from {}".format(tf_path))
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
logger.info("Loading TF weight {} with shape {}".format(name, shape))
array = tf.train.load_variable(tf_path, name)
names.append(name)
arrays.append(array)
for name, array in zip(names, arrays):
name = name.split("/")
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if any(
n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"]
for n in name
):
logger.info("Skipping {}".format("/".join(name)))
continue
pointer = model
for m_name in name:
if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
scope_names = re.split(r"_(\d+)", m_name)
else:
scope_names = [m_name]
if scope_names[0] == "kernel" or scope_names[0] == "gamma":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
pointer = getattr(pointer, "bias")
elif scope_names[0] == "output_weights":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "squad":
pointer = getattr(pointer, "classifier")
else:
try:
pointer = getattr(pointer, scope_names[0])
except AttributeError:
logger.info("Skipping {}".format("/".join(name)))
continue
if len(scope_names) >= 2:
num = int(scope_names[1])
pointer = pointer[num]
if m_name[-11:] == "_embeddings":
pointer = getattr(pointer, "weight")
elif m_name == "kernel":
array = np.transpose(array)
try:
assert (
pointer.shape == array.shape
), f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched"
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
logger.info("Initialize PyTorch weight {}".format(name))
pointer.data = torch.from_numpy(array)
return model | Load tf checkpoints in a pytorch model. |
18,869 | import math
import os
import warnings
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import Tensor, device, dtype, nn
import torch.utils.checkpoint
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
import torch.nn.functional as F
from transformers.activations import ACT2FN
from transformers.file_utils import (
ModelOutput,
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
replace_return_docstrings,
)
from transformers.modeling_outputs import (
BaseModelOutputWithPastAndCrossAttentions,
BaseModelOutputWithPoolingAndCrossAttentions,
CausalLMOutputWithCrossAttentions,
MaskedLMOutput,
MultipleChoiceModelOutput,
NextSentencePredictorOutput,
QuestionAnsweringModelOutput,
SequenceClassifierOutput,
TokenClassifierOutput,
)
from transformers.modeling_utils import (
PreTrainedModel,
apply_chunking_to_forward,
find_pruneable_heads_and_indices,
prune_linear_layer,
)
from transformers.utils import logging
from transformers.models.bert.configuration_bert import BertConfig
import transformers
def clamp_inf(tensor):
if tensor.dtype == torch.float16 and torch.isinf(tensor).any():
clamp_value = torch.finfo(tensor.dtype).max - 1000
tensor = torch.clamp(tensor, min=-clamp_value, max=clamp_value)
return tensor | null |
18,870 | from __future__ import print_function
import torch.nn as nn
import torch.nn.functional as F
import os
import math
import json
import torch
The provided code snippet includes necessary dependencies for implementing the `_make_causal_mask` function. Write a Python function `def _make_causal_mask(input_ids_shape: torch.Size, dtype: torch.dtype, past_key_values_length: int = 0, device='cuda:0')` to solve the following problem:
Make causal mask used for bi-directional self-attention.
Here is the function:
def _make_causal_mask(input_ids_shape: torch.Size, dtype: torch.dtype, past_key_values_length: int = 0, device='cuda:0'):
"""
Make causal mask used for bi-directional self-attention.
"""
bsz, tgt_len = input_ids_shape
mask = torch.full((tgt_len, tgt_len), float("-inf"), device=device)
mask_cond = torch.arange(mask.size(-1), device=device)
mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0)
mask = mask.to(dtype)
if past_key_values_length > 0:
mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype), mask], dim=-1)
return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length) | Make causal mask used for bi-directional self-attention. |
18,871 | from __future__ import print_function
import torch.nn as nn
import torch.nn.functional as F
import os
import math
import json
import torch
class TextGenerator(object):
def __init__(self,
args,
model,
vocab=None,
symbols=None,
global_scorer=None,
logger=None,
dump_beam=""):
def _build_target_tokens(self, pred):
def translate_batch(self, encoder_inputs, do_sample=False, out_size=1):
def translate_batch_scst(self, encoder_inputs, do_sample=False, out_size=1):
def _fast_translate_batch(self,
encoder_inputs,
max_length,
min_length=0,
do_sample=False,
out_size=1):
def _generate_no_beam_search(
self,
input_ids,
cur_len,
max_length,
do_sample,
temperature,
top_k,
top_p,
repetition_penalty,
pad_token_id,
eos_token_ids,
batch_size,
):
def build_predictor(args, tokenizer, symbols, model, logger=None):
scorer = None #GNMTGlobalScorer(args.alpha, length_penalty='wu')
translator = TextGenerator(args, model, tokenizer, symbols, global_scorer=scorer, logger=logger)
return translator | null |
18,872 | from __future__ import print_function
import torch.nn as nn
import torch.nn.functional as F
import os
import math
import json
import torch
def top_k_top_p_filtering(logits, top_k=10, top_p=1.0, filter_value=-float("Inf"), min_tokens_to_keep=1):
if top_k > 0:
top_k = min(max(top_k, min_tokens_to_keep), logits.size(-1)) # Safety check
# Remove all tokens with a probability less than the last token of the top-k
indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
logits[indices_to_remove] = filter_value
if top_p < 1.0:
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
# Remove tokens with cumulative probability above the threshold (token with 0 are kept)
sorted_indices_to_remove = cumulative_probs > top_p
if min_tokens_to_keep > 1:
# Keep at least min_tokens_to_keep (set to min_tokens_to_keep-1 because we add the first one below)
sorted_indices_to_remove[..., :min_tokens_to_keep] = 0
# Shift the indices to the right to keep also the first token above the threshold
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
sorted_indices_to_remove[..., 0] = 0
# scatter sorted tensors to original indexing
indices_to_remove = sorted_indices_to_remove.scatter(1, sorted_indices, sorted_indices_to_remove)
logits[indices_to_remove] = filter_value
return logits | null |
18,873 | from __future__ import print_function
import torch.nn as nn
import torch.nn.functional as F
import os
import math
import json
import torch
The provided code snippet includes necessary dependencies for implementing the `tile` function. Write a Python function `def tile(x, count, dim=0)` to solve the following problem:
Tiles x on dimension dim count times.
Here is the function:
def tile(x, count, dim=0):
"""
Tiles x on dimension dim count times.
"""
perm = list(range(len(x.size())))
if dim != 0:
perm[0], perm[dim] = perm[dim], perm[0]
x = x.permute(perm).contiguous()
out_size = list(x.size())
out_size[0] *= count
batch = x.size(0)
x = x.view(batch, -1) \
.transpose(0, 1) \
.repeat(count, 1) \
.transpose(0, 1) \
.contiguous() \
.view(*out_size)
if dim != 0:
x = x.permute(perm).contiguous()
return x | Tiles x on dimension dim count times. |
18,875 | import hashlib
import os
import urllib
import warnings
from typing import Union, List
import torch
from PIL import Image
from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize
from tqdm import tqdm
from .model import build_model
from .simple_tokenizer import SimpleTokenizer as _Tokenizer
_tokenizer = _Tokenizer()
def tokenize(texts: Union[str, List[str]], context_length: int = 77):
if isinstance(texts, str):
texts = [texts]
sot_token = _tokenizer.encoder["<|startoftext|>"]
eot_token = _tokenizer.encoder["<|endoftext|>"]
all_tokens = [[sot_token] + _tokenizer.encode(text) + [eot_token] for text in texts]
result = torch.zeros(len(all_tokens), context_length, dtype=torch.long)
for i, tokens in enumerate(all_tokens):
if len(tokens) > context_length:
raise RuntimeError(f"Input {texts[i]} is too long for context length {context_length}")
result[i, :len(tokens)] = torch.tensor(tokens)
return result | null |
18,876 | from collections import OrderedDict
from typing import Tuple, Union
import torch
import torch.nn.functional as F
from torch import nn
The provided code snippet includes necessary dependencies for implementing the `convert_weights` function. Write a Python function `def convert_weights(model: nn.Module)` to solve the following problem:
Convert applicable model parameters to fp16
Here is the function:
def convert_weights(model: nn.Module):
"""Convert applicable model parameters to fp16"""
def _convert_weights_to_fp16(l):
if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Linear)):
l.weight.data = l.weight.data.half()
if l.bias is not None:
l.bias.data = l.bias.data.half()
if isinstance(l, nn.MultiheadAttention):
for attr in [*[f"{s}_proj_weight" for s in ["in", "q", "k", "v"]], "in_proj_bias", "bias_k", "bias_v"]:
tensor = getattr(l, attr)
if tensor is not None:
tensor.data = tensor.data.half()
for name in ["text_projection", "proj"]:
if hasattr(l, name):
attr = getattr(l, name)
if attr is not None:
attr.data = attr.data.half()
model.apply(_convert_weights_to_fp16) | Convert applicable model parameters to fp16 |
18,877 | import gzip
import html
import os
from functools import lru_cache
import ftfy
import regex as re
def default_bpe():
return os.path.join(os.path.dirname(os.path.abspath(__file__)), "bpe_simple_vocab_16e6.txt.gz") | null |
18,878 | import gzip
import html
import os
from functools import lru_cache
import ftfy
import regex as re
The provided code snippet includes necessary dependencies for implementing the `bytes_to_unicode` function. Write a Python function `def bytes_to_unicode()` to solve the following problem:
Returns list of utf-8 byte and a corresponding list of unicode strings. The reversible bpe codes work on unicode strings. This means you need a large # of unicode characters in your vocab if you want to avoid UNKs. When you're at something like a 10B token dataset you end up needing around 5K for decent coverage. This is a signficant percentage of your normal, say, 32K bpe vocab. To avoid that, we want lookup tables between utf-8 bytes and unicode strings. And avoids mapping to whitespace/control characters the bpe code barfs on.
Here is the function:
def bytes_to_unicode():
"""
Returns list of utf-8 byte and a corresponding list of unicode strings.
The reversible bpe codes work on unicode strings.
This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.
When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.
This is a signficant percentage of your normal, say, 32K bpe vocab.
To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
And avoids mapping to whitespace/control characters the bpe code barfs on.
"""
bs = list(range(ord("!"), ord("~")+1))+list(range(ord("¡"), ord("¬")+1))+list(range(ord("®"), ord("ÿ")+1))
cs = bs[:]
n = 0
for b in range(2**8):
if b not in bs:
bs.append(b)
cs.append(2**8+n)
n += 1
cs = [chr(n) for n in cs]
return dict(zip(bs, cs)) | Returns list of utf-8 byte and a corresponding list of unicode strings. The reversible bpe codes work on unicode strings. This means you need a large # of unicode characters in your vocab if you want to avoid UNKs. When you're at something like a 10B token dataset you end up needing around 5K for decent coverage. This is a signficant percentage of your normal, say, 32K bpe vocab. To avoid that, we want lookup tables between utf-8 bytes and unicode strings. And avoids mapping to whitespace/control characters the bpe code barfs on. |
18,879 | import gzip
import html
import os
from functools import lru_cache
import ftfy
import regex as re
The provided code snippet includes necessary dependencies for implementing the `get_pairs` function. Write a Python function `def get_pairs(word)` to solve the following problem:
Return set of symbol pairs in a word. Word is represented as tuple of symbols (symbols being variable-length strings).
Here is the function:
def get_pairs(word):
"""Return set of symbol pairs in a word.
Word is represented as tuple of symbols (symbols being variable-length strings).
"""
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs | Return set of symbol pairs in a word. Word is represented as tuple of symbols (symbols being variable-length strings). |
18,880 | import gzip
import html
import os
from functools import lru_cache
import ftfy
import regex as re
def basic_clean(text):
text = ftfy.fix_text(text)
text = html.unescape(html.unescape(text))
return text.strip() | null |
18,881 | import gzip
import html
import os
from functools import lru_cache
import ftfy
import regex as re
def whitespace_clean(text):
text = re.sub(r'\s+', ' ', text)
text = text.strip()
return text | null |
18,882 | import collections
import os
import unicodedata
from typing import List, Optional, Tuple
from transformers.tokenization_utils import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace
from transformers.utils import logging
The provided code snippet includes necessary dependencies for implementing the `load_vocab` function. Write a Python function `def load_vocab(vocab_file)` to solve the following problem:
Loads a vocabulary file into a dictionary.
Here is the function:
def load_vocab(vocab_file):
"""Loads a vocabulary file into a dictionary."""
vocab = collections.OrderedDict()
with open(vocab_file, "r", encoding="utf-8") as reader:
tokens = reader.readlines()
for index, token in enumerate(tokens):
token = token.rstrip("\n")
vocab[token] = index
return vocab | Loads a vocabulary file into a dictionary. |
18,883 | import collections
import os
import unicodedata
from typing import List, Optional, Tuple
from transformers.tokenization_utils import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace
from transformers.utils import logging
The provided code snippet includes necessary dependencies for implementing the `whitespace_tokenize` function. Write a Python function `def whitespace_tokenize(text)` to solve the following problem:
Runs basic whitespace cleaning and splitting on a piece of text.
Here is the function:
def whitespace_tokenize(text):
"""Runs basic whitespace cleaning and splitting on a piece of text."""
text = text.strip()
if not text:
return []
tokens = text.split()
return tokens | Runs basic whitespace cleaning and splitting on a piece of text. |
18,884 | import argparse
import os
import ruamel_yaml as yaml
import numpy as np
import random
import time
import datetime
import json
from pathlib import Path
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
import torch.distributed as dist
from models.model_retrieval_mplug import MPLUG
from models.vit import interpolate_pos_embed, resize_pos_embed
from models.tokenization_bert import BertTokenizer
import utils
from dataset import create_dataset, create_sampler, create_loader
from scheduler import create_scheduler
from optim import create_optimizer, create_two_optimizer
def train(model, data_loader, optimizer, tokenizer, epoch, warmup_steps, device, scheduler, config, do_amp=False,
do_two_optim=False):
# train
model.train()
metric_logger = utils.MetricLogger(delimiter=" ")
if do_two_optim:
metric_logger.add_meter('lr1', utils.SmoothedValue(window_size=50, fmt='{value:.6f}'))
metric_logger.add_meter('lr2', utils.SmoothedValue(window_size=50, fmt='{value:.6f}'))
else:
metric_logger.add_meter('lr', utils.SmoothedValue(window_size=50, fmt='{value:.6f}'))
metric_logger.add_meter('loss_itm', utils.SmoothedValue(window_size=1, fmt='{value:.4f}'))
metric_logger.add_meter('loss_ita', utils.SmoothedValue(window_size=1, fmt='{value:.4f}'))
header = 'Train Epoch: [{}]'.format(epoch)
print_freq = 50
step_size = 100
warmup_iterations = warmup_steps * step_size
for i, (image, text, idx) in enumerate(metric_logger.log_every(data_loader, print_freq, header)):
image = image.to(device, non_blocking=True)
idx = idx.to(device, non_blocking=True)
text_input = tokenizer(text, padding='longest', max_length=30, return_tensors="pt").to(device)
if epoch > 0 or not config['warm_up']:
alpha = config['alpha']
else:
alpha = config['alpha'] * min(1, i / len(data_loader))
loss_ita, loss_itm = model(image, text_input, alpha=alpha, idx=idx)
loss = loss_ita + loss_itm
if do_amp:
from apex import amp
with amp.scale_loss(loss, optimizer) as scaled_loss:
# logger.info('scaled loss: {}'.format(str(scaled_loss)))
scaled_loss.backward()
else:
loss.backward()
optimizer.step()
optimizer.zero_grad()
metric_logger.update(loss_itm=loss_itm.item())
metric_logger.update(loss_ita=loss_ita.item())
if do_two_optim:
metric_logger.update(lr1=optimizer.param_groups[0]["lr"])
metric_logger.update(lr2=optimizer.param_groups[2]["lr"])
else:
metric_logger.update(lr=optimizer.param_groups[0]["lr"])
if epoch == 0 and i % step_size == 0 and i <= warmup_iterations:
scheduler.step(i // step_size)
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print("Averaged stats:", metric_logger.global_avg())
return {k: "{:.3f}".format(meter.global_avg) for k, meter in metric_logger.meters.items()} | null |
18,885 | import argparse
import os
import ruamel_yaml as yaml
import numpy as np
import random
import time
import datetime
import json
from pathlib import Path
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
import torch.distributed as dist
from models.model_retrieval_mplug import MPLUG
from models.vit import interpolate_pos_embed, resize_pos_embed
from models.tokenization_bert import BertTokenizer
import utils
from dataset import create_dataset, create_sampler, create_loader
from scheduler import create_scheduler
from optim import create_optimizer, create_two_optimizer
def evaluation(model, data_loader, tokenizer, device, config):
# test
model.eval()
metric_logger = utils.MetricLogger(delimiter=" ")
header = 'Evaluation:'
print('Computing features for evaluation...')
start_time = time.time()
texts = data_loader.dataset.text
num_text = len(texts)
text_bs = 256
text_feats = []
text_embeds = []
text_atts = []
for i in range(0, num_text, text_bs):
text = texts[i: min(num_text, i + text_bs)]
text_input = tokenizer(text, padding='max_length', truncation=True, max_length=30, return_tensors="pt").to(
device)
text_output = model.text_encoder(text_input.input_ids, attention_mask=text_input.attention_mask)
text_feat = text_output.last_hidden_state
text_embed = F.normalize(model.text_proj(text_feat[:, 0, :]))
text_embeds.append(text_embed)
text_feats.append(text_feat)
text_atts.append(text_input.attention_mask)
text_embeds = torch.cat(text_embeds, dim=0)
text_feats = torch.cat(text_feats, dim=0)
text_atts = torch.cat(text_atts, dim=0)
image_feats = []
image_embeds = []
for image, img_id in data_loader:
image = image.to(device)
image_feat = model.visual_encoder.visual(image, skip_last_layer=True)
image_feat = model.visn_layer_norm(model.visn_fc(image_feat))
# image_feat = model.visual_encoder(image)
image_embed = model.vision_proj(image_feat[:, 0, :])
image_embed = F.normalize(image_embed, dim=-1)
image_feats.append(image_feat)
image_embeds.append(image_embed)
image_feats = torch.cat(image_feats, dim=0)
image_embeds = torch.cat(image_embeds, dim=0)
sims_matrix = image_embeds @ text_embeds.t()
score_matrix_i2t = torch.full((len(data_loader.dataset.image), len(texts)), -100.0).to(device)
num_tasks = utils.get_world_size()
rank = utils.get_rank()
step = sims_matrix.size(0) // num_tasks + 1
start = rank * step
end = min(sims_matrix.size(0), start + step)
for i, sims in enumerate(metric_logger.log_every(sims_matrix[start:end], 50, header)):
topk_sim, topk_idx = sims.topk(k=config['k_test'], dim=0)
encoder_output = image_feats[start + i].repeat(config['k_test'], 1, 1)
encoder_att = torch.ones(encoder_output.size()[:-1], dtype=torch.long).to(device)
_, output = model.fusion_encoder(encoder_embeds=text_feats[topk_idx],
attention_mask=text_atts[topk_idx],
encoder_hidden_states=encoder_output,
encoder_attention_mask=encoder_att,
return_dict=False,
)
score = model.itm_head(output[:, 0, :])[:, 1]
score_matrix_i2t[start + i, topk_idx] = score.float()
sims_matrix = sims_matrix.t()
score_matrix_t2i = torch.full((len(texts), len(data_loader.dataset.image)), -100.0).to(device)
step = sims_matrix.size(0) // num_tasks + 1
start = rank * step
end = min(sims_matrix.size(0), start + step)
for i, sims in enumerate(metric_logger.log_every(sims_matrix[start:end], 50, header)):
topk_sim, topk_idx = sims.topk(k=config['k_test'], dim=0)
encoder_output = image_feats[topk_idx]
encoder_att = torch.ones(encoder_output.size()[:-1], dtype=torch.long).to(device)
_, output = model.fusion_encoder(encoder_embeds=text_feats[start + i].repeat(config['k_test'], 1, 1),
attention_mask=text_atts[start + i].repeat(config['k_test'], 1),
encoder_hidden_states=encoder_output,
encoder_attention_mask=encoder_att,
return_dict=False,
)
score = model.itm_head(output[:, 0, :])[:, 1]
score_matrix_t2i[start + i, topk_idx] = score.float()
if args.distributed:
dist.barrier()
torch.distributed.all_reduce(score_matrix_i2t, op=torch.distributed.ReduceOp.SUM)
torch.distributed.all_reduce(score_matrix_t2i, op=torch.distributed.ReduceOp.SUM)
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('Evaluation time {}'.format(total_time_str))
return score_matrix_i2t.cpu().numpy(), score_matrix_t2i.cpu().numpy() | null |
18,886 | import argparse
import os
import ruamel_yaml as yaml
import numpy as np
import random
import time
import datetime
import json
from pathlib import Path
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
import torch.distributed as dist
from models.model_retrieval_mplug import MPLUG
from models.vit import interpolate_pos_embed, resize_pos_embed
from models.tokenization_bert import BertTokenizer
import utils
from dataset import create_dataset, create_sampler, create_loader
from scheduler import create_scheduler
from optim import create_optimizer, create_two_optimizer
def itm_eval(scores_i2t, scores_t2i, txt2img, img2txt):
# Images->Text
ranks = np.zeros(scores_i2t.shape[0])
for index, score in enumerate(scores_i2t):
inds = np.argsort(score)[::-1]
# Score
rank = 1e20
for i in img2txt[index]:
tmp = np.where(inds == i)[0][0]
if tmp < rank:
rank = tmp
ranks[index] = rank
# Compute metrics
tr1 = 100.0 * len(np.where(ranks < 1)[0]) / len(ranks)
tr5 = 100.0 * len(np.where(ranks < 5)[0]) / len(ranks)
tr10 = 100.0 * len(np.where(ranks < 10)[0]) / len(ranks)
# Text->Images
ranks = np.zeros(scores_t2i.shape[0])
for index, score in enumerate(scores_t2i):
inds = np.argsort(score)[::-1]
ranks[index] = np.where(inds == txt2img[index])[0][0]
# Compute metrics
ir1 = 100.0 * len(np.where(ranks < 1)[0]) / len(ranks)
ir5 = 100.0 * len(np.where(ranks < 5)[0]) / len(ranks)
ir10 = 100.0 * len(np.where(ranks < 10)[0]) / len(ranks)
tr_mean = (tr1 + tr5 + tr10) / 3
ir_mean = (ir1 + ir5 + ir10) / 3
r_mean = (tr_mean + ir_mean) / 2
eval_result = {'txt_r1': tr1,
'txt_r5': tr5,
'txt_r10': tr10,
'txt_r_mean': tr_mean,
'img_r1': ir1,
'img_r5': ir5,
'img_r10': ir10,
'img_r_mean': ir_mean,
'r_mean': r_mean}
return eval_result | null |
18,887 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import pickle
import modeling
import modeling_labert
import optimization
import tokenization
import tokenization_labert
import tensorflow as tf
import numpy as np
import tf_metrics
import random
import shutil
import collections
from loss_logging_hook import LossLoggingHook
from best_checkpoint_copyer import BestCheckpointCopier
def convert_single_example(ex_index, example, label_list, max_seq_length,
tokenizer):
if isinstance(example, PaddingInputExample):
return InputFeatures(
input_ids=[0] * max_seq_length,
input_mask=[0] * max_seq_length,
segment_ids=[0] * max_seq_length,
positional_embeddings_start=[0] * max_seq_length,
positional_embeddings_end=[0] * max_seq_length,
label_positions=[0] * max_seq_length,
label_ids=[0] * max_seq_length,
label_weights=[0.] * max_seq_length)
label_map = {}
for (i, label) in enumerate(label_list):
label_map[label] = i
segment_ids, label_positions, label_ids, label_weights = [], [], [], []
segment_ids.append(0)
position = 1
encoding = tokenizer.build_cls_encoding(add_candidate_indices=False)
covered = {(0, 1)} # The CLS token
for i, word in enumerate(example.words):
if example.labels is not None:
label_id = label_map[example.labels[i]]
else:
label_id = 0 # dummy id.
encoding_of_a_word = tokenizer.tokenize(word, add_candidate_indices=False)
last_position = encoding.positions[-1] + encoding.lengths[-1]
for start_position, length in zip(encoding_of_a_word.positions,
encoding_of_a_word.lengths):
covered.add((last_position + start_position,
last_position + start_position + length))
encoding.extend(encoding_of_a_word)
for j, _ in enumerate(encoding_of_a_word.tokens):
if j == 0:
label_positions.append(position)
label_ids.append(label_id)
label_weights.append(1.)
segment_ids.append(0)
position += 1
encoding1 = tokenizer.build_cls_encoding(add_candidate_indices=False)
encoding1.extend(tokenizer.tokenize("".join(example.words), add_candidate_indices=False))
for token, start_position, length in zip(encoding1.tokens,
encoding1.positions,
encoding1.lengths):
if (start_position, start_position + length) in covered:
continue
segment_ids.append(0)
encoding.tokens.append(token)
encoding.positions.append(start_position)
encoding.lengths.append(length)
encoding.extend(tokenizer.build_sep_encoding(add_candidate_indices=False))
segment_ids.append(0)
positional_embeddings_start, positional_embeddings_end = encoding.position_embedding(modes=['start', 'end'])
input_ids = tokenizer.convert_tokens_to_ids(encoding.tokens)
input_mask = [1] * len(input_ids)
if len(input_ids) > max_seq_length:
input_ids = input_ids[:max_seq_length]
input_mask = input_mask[:max_seq_length]
segment_ids = segment_ids[:max_seq_length]
positional_embeddings_start = positional_embeddings_start[:max_seq_length]
positional_embeddings_end = positional_embeddings_end[:max_seq_length]
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
positional_embeddings_start.append(0)
positional_embeddings_end.append(0)
if len(label_positions) > max_seq_length:
label_positions = label_positions[:max_seq_length]
label_ids = label_ids[:max_seq_length]
label_weights = label_weights[:max_seq_length]
while len(label_positions) < max_seq_length:
label_positions.append(0) # it's label padding, not related with padding
label_ids.append(0)
label_weights.append(0.)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
assert len(positional_embeddings_start) == max_seq_length
assert len(positional_embeddings_end) == max_seq_length
if ex_index < 3:
tf.compat.v1.logging.info("*** Example ***")
tf.compat.v1.logging.info("guid: %s" % example.guid)
tf.compat.v1.logging.info("tokens: %s" % " ".join(
[tokenization.printable_text(x) for x in encoding.tokens]))
tf.compat.v1.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
tf.compat.v1.logging.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
tf.compat.v1.logging.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
tf.compat.v1.logging.info(
"positional_embeddings_start: %s" % " ".join([str(x) for x in positional_embeddings_start]))
tf.compat.v1.logging.info(
"positional_embeddings_end: %s" % " ".join([str(x) for x in positional_embeddings_end]))
tf.compat.v1.logging.info("label_positions: %s" % " ".join([str(x) for x in label_positions]))
tf.compat.v1.logging.info("label_ids: %s" % " ".join([str(x) for x in label_ids]))
tf.compat.v1.logging.info("label_weights: %s" % " ".join([str(x) for x in label_weights]))
feature = InputFeatures(
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
positional_embeddings_start=positional_embeddings_start,
positional_embeddings_end=positional_embeddings_end,
label_positions=label_positions,
label_ids=label_ids,
label_weights=label_weights)
return feature
The provided code snippet includes necessary dependencies for implementing the `file_based_convert_examples_to_features` function. Write a Python function `def file_based_convert_examples_to_features( examples, label_list, max_seq_length, tokenizer, output_file,)` to solve the following problem:
Convert a set of `InputExample`s to a TFRecord file.
Here is the function:
def file_based_convert_examples_to_features(
examples, label_list, max_seq_length, tokenizer, output_file,):
"""Convert a set of `InputExample`s to a TFRecord file."""
writer = tf.io.TFRecordWriter(output_file)
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
tf.compat.v1.logging.info("Writing example %d of %d" % (ex_index, len(examples)))
feature = convert_single_example(ex_index, example, label_list,
max_seq_length, tokenizer,)
def create_int_feature(values):
f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))
return f
def create_float_feature(values):
f = tf.train.Feature(float_list=tf.train.FloatList(value=list(values)))
return f
if feature is not None:
features = collections.OrderedDict()
features["input_ids"] = create_int_feature(feature.input_ids)
features["input_mask"] = create_int_feature(feature.input_mask)
features["segment_ids"] = create_int_feature(feature.segment_ids)
features["positional_embeddings_start"] = create_int_feature(feature.positional_embeddings_start)
features["positional_embeddings_end"] = create_int_feature(feature.positional_embeddings_end)
features["label_positions"] = create_int_feature(feature.label_positions)
features["label_ids"] = create_int_feature(feature.label_ids)
features["label_weights"] = create_float_feature(feature.label_weights)
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
writer.write(tf_example.SerializeToString())
writer.close() | Convert a set of `InputExample`s to a TFRecord file. |
18,888 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import pickle
import modeling
import modeling_labert
import optimization
import tokenization
import tokenization_labert
import tensorflow as tf
import numpy as np
import tf_metrics
import random
import shutil
import collections
from loss_logging_hook import LossLoggingHook
from best_checkpoint_copyer import BestCheckpointCopier
The provided code snippet includes necessary dependencies for implementing the `file_based_input_fn_builder` function. Write a Python function `def file_based_input_fn_builder(input_file, seq_length, is_training, drop_remainder, num_training_instances=100)` to solve the following problem:
Creates an `input_fn` closure to be passed to TPUEstimator.
Here is the function:
def file_based_input_fn_builder(input_file, seq_length, is_training,
drop_remainder, num_training_instances=100):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
name_to_features = {
"input_ids": tf.io.FixedLenFeature([seq_length], tf.int64),
"input_mask": tf.io.FixedLenFeature([seq_length], tf.int64),
"segment_ids": tf.io.FixedLenFeature([seq_length], tf.int64),
"positional_embeddings_start": tf.io.FixedLenFeature([seq_length], tf.int64),
"positional_embeddings_end": tf.io.FixedLenFeature([seq_length], tf.int64),
"label_positions": tf.io.FixedLenFeature([seq_length], tf.int64),
"label_ids": tf.io.FixedLenFeature([seq_length], tf.int64),
"label_weights": tf.io.FixedLenFeature([seq_length], tf.float32),
}
def _decode_record(record, name_to_features):
"""Decodes a record to a TensorFlow example."""
example = tf.io.parse_single_example(record, name_to_features)
# tf.Example only supports tf.int64, but the TPU only supports tf.int32.
# So cast all int64 to int32.
for name in list(example.keys()):
t = example[name]
if t.dtype == tf.int64:
t = tf.cast(t, tf.int32)
example[name] = t
return example
def input_fn(params):
"""The actual input function."""
batch_size = params["batch_size"]
# For training, we want a lot of parallel reading and shuffling.
# For eval, we want no shuffling and parallel reading doesn't matter.
d = tf.data.TFRecordDataset(input_file)
if is_training:
d = d.repeat()
d = d.shuffle(min(num_training_instances, 10000))
d = d.apply(
tf.contrib.data.map_and_batch(
lambda record: _decode_record(record, name_to_features),
batch_size=batch_size,
drop_remainder=drop_remainder))
return d
return input_fn | Creates an `input_fn` closure to be passed to TPUEstimator. |
18,889 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import pickle
import modeling
import modeling_labert
import optimization
import tokenization
import tokenization_labert
import tensorflow as tf
import numpy as np
import tf_metrics
import random
import shutil
import collections
from loss_logging_hook import LossLoggingHook
from best_checkpoint_copyer import BestCheckpointCopier
FLAGS = flags.FLAGS
def compute_adv_loss(embedding_output, labert_config, input_ids, input_mask,
start_positions, end_positions,
num_labels,
label_positions, label_weights, is_training,
target_logits, noise_epsilon, step_size):
z = tf.random.normal(tf.shape(embedding_output)) * noise_epsilon
with tf.compat.v1.variable_scope("bert", reuse=True):
with tf.compat.v1.variable_scope("embeddings"):
adv_embedding_output = embedding_output + z
with tf.compat.v1.variable_scope("encoder"):
attention_mask = modeling.create_attention_mask_from_input_mask(
input_ids, input_mask)
all_encoder_layers = modeling_labert.transformer_model(
position_embeddings_ids=[start_positions, end_positions],
input_tensor=adv_embedding_output,
attention_mask=attention_mask,
hidden_size=labert_config.hidden_size,
embedding_size=labert_config.embedding_size,
num_hidden_layers=labert_config.num_hidden_layers,
num_attention_heads=labert_config.num_attention_heads,
intermediate_size=labert_config.intermediate_size,
intermediate_act_fn=modeling.get_activation(labert_config.hidden_act),
hidden_dropout_prob=labert_config.hidden_dropout_prob,
attention_probs_dropout_prob=labert_config.attention_probs_dropout_prob,
initializer_range=labert_config.initializer_range,
do_share_parameter_across_layers=False,
do_return_all_layers=True,
do_return_attention_maps=False,
compute_type=tf.float32)
adv_output_layer = tf.cast(all_encoder_layers[-1], tf.float32)
adv_output_layer = gather_indexes(adv_output_layer, label_positions)
hidden_size = adv_output_layer.shape[-1].value
root_scope = tf.compat.v1.get_variable_scope()
with tf.compat.v1.variable_scope(root_scope, reuse=True):
output_weights = tf.compat.v1.get_variable(
"output_weights", [num_labels, hidden_size],
initializer=tf.truncated_normal_initializer(stddev=0.02))
output_bias = tf.compat.v1.get_variable(
"output_bias", [num_labels], initializer=tf.zeros_initializer())
with tf.compat.v1.variable_scope("loss", reuse=True):
if is_training:
adv_output_layer = tf.nn.dropout(adv_output_layer, rate=0.1)
adv_logits = tf.matmul(adv_output_layer, output_weights, transpose_b=True)
adv_logits = tf.nn.bias_add(adv_logits, output_bias)
label_weights = tf.reshape(label_weights, [-1])
adv_loss = stable_ce_kl(adv_logits, tf.stop_gradient(target_logits))
adv_loss = tf.reshape(adv_loss, [-1])
adv_loss = tf.reduce_sum(adv_loss * label_weights) / tf.reduce_sum(label_weights)
delta_grad = tf.compat.v1.gradients(adv_loss, adv_embedding_output)[0]
norm = tf.norm(delta_grad)
is_corrupted = tf.math.logical_or(tf.math.is_inf(norm), tf.math.is_nan(norm))
delta_grad = delta_grad / (tf.math.reduce_max(tf.math.abs(delta_grad), axis=-1, keepdims=True) + 1e-6)
with tf.compat.v1.variable_scope("bert", reuse=True):
with tf.compat.v1.variable_scope("embeddings"):
adv_embedding_output2 = embedding_output + tf.stop_gradient(delta_grad * step_size)
with tf.compat.v1.variable_scope("encoder"):
all_encoder_layers2 = modeling_labert.transformer_model(
input_tensor=adv_embedding_output2,
attention_mask=attention_mask,
position_embeddings_ids=[start_positions, end_positions],
hidden_size=labert_config.hidden_size,
embedding_size=labert_config.embedding_size,
num_hidden_layers=labert_config.num_hidden_layers,
num_attention_heads=labert_config.num_attention_heads,
intermediate_size=labert_config.intermediate_size,
intermediate_act_fn=modeling.get_activation(labert_config.hidden_act),
hidden_dropout_prob=labert_config.hidden_dropout_prob,
attention_probs_dropout_prob=labert_config.attention_probs_dropout_prob,
initializer_range=labert_config.initializer_range,
do_share_parameter_across_layers=False,
do_return_all_layers=True,
do_return_attention_maps=False,
compute_type=tf.float32)
adv_output_layer2 = tf.cast(all_encoder_layers2[-1], tf.float32)
adv_output_layer2 = gather_indexes(adv_output_layer2, label_positions)
with tf.compat.v1.variable_scope("loss", reuse=True):
if is_training:
adv_output_layer2 = tf.nn.dropout(adv_output_layer2, rate=0.1)
adv_logits2 = tf.matmul(adv_output_layer2, output_weights, transpose_b=True)
adv_logits2 = tf.nn.bias_add(adv_logits2, output_bias)
adv_loss2 = sym_ce_kl_loss(adv_logits2, target_logits)
adv_loss2 = tf.reshape(adv_loss2, [-1])
adv_loss2 = tf.reduce_sum(adv_loss2 * label_weights) / tf.reduce_sum(label_weights)
return tf.cond(is_corrupted, lambda: tf.constant(0.), lambda: adv_loss2)
def create_model(labert_config, is_training, input_ids, input_mask, segment_ids,
positional_embeddings_start, positional_embeddings_end,
label_positions, num_labels,
use_fp16=False, do_return_model=False):
"""Creates a classification model."""
model = modeling_labert.LaBertModel(
scope="bert",
config=labert_config,
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask,
token_type_ids=segment_ids,
start_positions=positional_embeddings_start,
end_positions=positional_embeddings_end,
use_one_hot_embeddings=False,
compute_type=tf.float16 if use_fp16 else tf.float32,)
# In the demo, we are doing a simple classification task on the entire
# segment.
#
# If you want to use the token-level output, use model.get_sequence_output()
# instead.
output_layer = model.get_sequence_output()
hidden_size = output_layer.shape[-1].value
output_layer = gather_indexes(output_layer, label_positions)
output_weights = tf.compat.v1.get_variable(
"output_weights", [num_labels, hidden_size],
initializer=tf.truncated_normal_initializer(stddev=0.02))
output_bias = tf.compat.v1.get_variable(
"output_bias", [num_labels], initializer=tf.zeros_initializer())
with tf.compat.v1.variable_scope("loss"):
if is_training:
# I.e., 0.1 dropout
output_layer = tf.nn.dropout(output_layer, rate=0.1)
logits = tf.matmul(output_layer, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
if do_return_model:
return logits, model
return logits
class LossLoggingHook(tf.estimator.SessionRunHook):
def __init__(self, batch_size, every_n_iter):
self.every_n_iter = every_n_iter
self.every_n_examples = every_n_iter * batch_size
self.fetches = tf.estimator.SessionRunArgs(
fetches=[
"step_update:0",
"total_loss:0",
"learning_rate:0"
])
self.step_start_time = -1
def begin(self):
self.step_start_time = time.time()
def before_run(self, run_context):
return self.fetches
def after_run(self, run_context, run_values):
global_step, total_loss, learning_rate = run_values.results
if global_step % self.every_n_iter == 0:
current_time = time.time()
tf.compat.v1.logging.info(
'global_step=%d (%.2f ex/sec) | total_loss=%2.5f | learning_rate=%.5e' % (
global_step, self.every_n_examples / (current_time - self.step_start_time), total_loss, learning_rate))
self.step_start_time = current_time
The provided code snippet includes necessary dependencies for implementing the `model_fn_builder` function. Write a Python function `def model_fn_builder(labert_config, num_labels, init_checkpoint, beta1, beta2, epsilon, num_train_steps, lr_layer_decay_rate, pos_indices=None, learning_rate=None, num_warmup_steps=None)` to solve the following problem:
Returns `model_fn` closure for TPUEstimator.
Here is the function:
def model_fn_builder(labert_config, num_labels, init_checkpoint,
beta1, beta2, epsilon,
num_train_steps, lr_layer_decay_rate, pos_indices=None,
learning_rate=None, num_warmup_steps=None):
"""Returns `model_fn` closure for TPUEstimator."""
def _model_fn(features, labels, mode, params):
do_log_information = (FLAGS.do_train and mode == tf.estimator.ModeKeys.TRAIN) or \
(not FLAGS.do_train and FLAGS.do_eval and mode == tf.estimator.ModeKeys.EVAL) or \
(FLAGS.do_predict and mode == tf.estimator.ModeKeys.PREDICT)
if do_log_information:
tf.compat.v1.logging.info("*** Features ***")
for name in sorted(features.keys()):
tf.compat.v1.logging.info(" name = %s, shape = %s" % (name, features[name].shape))
input_ids = features["input_ids"]
input_mask = features["input_mask"]
segment_ids = features["segment_ids"]
positional_embeddings_start = features["positional_embeddings_start"]
positional_embeddings_end = features["positional_embeddings_end"]
label_positions = features["label_positions"]
label_ids = features["label_ids"]
label_weights = features["label_weights"]
seq_length = label_weights.shape[1]
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
payload = create_model(
labert_config=labert_config, is_training=is_training,
input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids,
positional_embeddings_start=positional_embeddings_start,
positional_embeddings_end=positional_embeddings_end,
label_positions=label_positions, num_labels=num_labels,
do_return_model=FLAGS.do_adversarial_train and is_training)
if FLAGS.do_adversarial_train and is_training:
logits, model = payload
else:
logits, model = payload, None
seq_logits = tf.reshape(logits, (-1, seq_length, num_labels))
transition_matrix = tf.compat.v1.get_variable(
"transition_matrix", [num_labels, num_labels],
initializer=tf.truncated_normal_initializer(stddev=0.02))
sequence_lengths = tf.reduce_sum(label_weights, axis=-1)
sequence_lengths = tf.cast(sequence_lengths, tf.int64)
total_loss, _ = tf.contrib.crf.crf_log_likelihood(seq_logits, label_ids, sequence_lengths,
transition_matrix)
total_loss = tf.reduce_mean(-total_loss)
predictions, predicted_scores = tf.contrib.crf.crf_decode(seq_logits, transition_matrix, sequence_lengths)
if FLAGS.do_adversarial_train and is_training:
embedding_output = model.get_embedding_output()
adv_loss = compute_adv_loss(embedding_output=embedding_output,
labert_config=labert_config,
input_ids=input_ids, input_mask=input_mask,
start_positions=positional_embeddings_start,
end_positions=positional_embeddings_end,
num_labels=num_labels, label_positions=label_positions,
label_weights=label_weights, is_training=is_training,
target_logits=logits,
noise_epsilon=1e-5, step_size=1e-3)
total_loss = total_loss + adv_loss
total_loss = tf.identity(total_loss, name='total_loss')
tvars = tf.compat.v1.trainable_variables()
initialized_variable_names = {}
if init_checkpoint:
(assignment_map, initialized_variable_names
) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)
tf.compat.v1.train.init_from_checkpoint(init_checkpoint, assignment_map)
if do_log_information:
tf.compat.v1.logging.info("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
tf.compat.v1.logging.info(" name = %s, shape = %s%s", var.name, var.shape, init_string)
return total_loss, predictions, predicted_scores, label_ids, label_weights
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
"""The `model_fn` for TPUEstimator."""
total_loss, predictions, predicted_scores, label_ids, label_weights = _model_fn(features, labels, mode, params)
output_spec = None
if mode == tf.estimator.ModeKeys.TRAIN:
train_op = optimization.create_optimizer(
loss=total_loss, init_lr=learning_rate,
beta1=beta1, beta2=beta2, epsilon=epsilon, num_train_steps=num_train_steps,
num_warmup_steps=num_warmup_steps,
hvd=None, use_fp16=False, num_accumulate_steps=1,
optimizer_type="adam", allreduce_post_accumulation=False,
lr_layer_decay_rate=lr_layer_decay_rate)
logging_hook = LossLoggingHook(params['batch_size'], every_n_iter=int(num_train_steps / 200 + 1))
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
train_op=train_op,
scaffold_fn=None,
training_hooks=[logging_hook])
elif mode == tf.estimator.ModeKeys.EVAL:
def metric_fn(per_example_loss, label_ids, predictions, label_weights):
label_weights = tf.reshape(label_weights, [-1])
label_ids = tf.reshape(label_ids, [-1])
predictions = tf.reshape(predictions, [-1])
precision_micro = tf_metrics.precision(label_ids, predictions, num_labels,
pos_indices=pos_indices, weights=label_weights, average="micro")
recall_micro = tf_metrics.recall(label_ids, predictions, num_labels,
pos_indices=pos_indices, weights=label_weights, average="micro")
f_micro = tf_metrics.f1(label_ids, predictions, num_labels,
pos_indices=pos_indices, weights=label_weights, average="micro")
accuracy = tf.compat.v1.metrics.accuracy(
labels=label_ids, predictions=predictions, weights=label_weights)
return {
"eval_precision (micro)": precision_micro,
"eval_recall (micro)": recall_micro,
"eval_f (micro)": f_micro,
"eval_acc": accuracy
}
eval_metrics = (metric_fn,
[total_loss, label_ids, predictions, label_weights])
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
eval_metrics=eval_metrics,
scaffold_fn=None)
else:
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
predictions={"probabilities": predicted_scores,
"predictions": predictions,
"label_weights": label_weights},
scaffold_fn=None)
return output_spec
return model_fn | Returns `model_fn` closure for TPUEstimator. |
18,890 | import tensorflow as tf
import collections
import tokenization
def write_lattice_instance_to_example_file(
instance, tokenizer, writer, max_seq_length,
max_predictions_per_seq,
position_embedding_names=('start', 'end'), do_dump_example=False):
"""Create TF example files from `TrainingInstance`s."""
input_ids = tokenizer.convert_tokens_to_ids(instance.encodings.tokens)
positional_embeddings = instance.encodings.position_embedding(position_embedding_names)
for positional_embedding in positional_embeddings:
for i in positional_embedding:
assert i >= 0, f"{instance.encodings.tokens}"
input_mask = [1] * len(input_ids)
segment_ids = list(instance.segment_ids)
assert len(input_ids) <= max_seq_length
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
for t in positional_embeddings:
t.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
for positional_embedding in positional_embeddings:
assert len(positional_embedding) == max_seq_length
masked_lm_positions = list(instance.masked_lm_positions)
masked_lm_ids = tokenizer.convert_tokens_to_ids(instance.masked_lm_labels)
masked_lm_weights = [1.0] * len(masked_lm_ids)
while len(masked_lm_positions) < max_predictions_per_seq:
masked_lm_positions.append(0)
masked_lm_ids.append(0)
masked_lm_weights.append(0.0)
next_sentence_label = instance.next_sentence_label
features = collections.OrderedDict()
features["input_ids"] = create_int_feature(input_ids)
features["input_mask"] = create_int_feature(input_mask)
features["segment_ids"] = create_int_feature(segment_ids)
for name, positional_embedding in zip(position_embedding_names, positional_embeddings):
features[f"positional_embeddings_{name}"] = create_int_feature(positional_embedding)
features["masked_lm_positions"] = create_int_feature(masked_lm_positions)
features["masked_lm_ids"] = create_int_feature(masked_lm_ids)
features["masked_lm_weights"] = create_float_feature(masked_lm_weights)
features["next_sentence_labels"] = create_int_feature([next_sentence_label])
assert all([len(t) == len(input_ids) for t in positional_embeddings])
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
writer.write(tf_example.SerializeToString())
if do_dump_example:
tf.compat.v1.logging.info("*** Example ***")
tf.compat.v1.logging.info("tokens: %s" % " ".join(
[tokenization.printable_text(x) for x in instance.encodings.tokens]))
for feature_name in features.keys():
feature = features[feature_name]
values = []
if feature.int64_list.value:
values = feature.int64_list.value
elif feature.float_list.value:
values = feature.float_list.value
tf.compat.v1.logging.info(
"%s: %s" % (feature_name, " ".join([str(x) for x in values])))
The provided code snippet includes necessary dependencies for implementing the `write_lattice_instances_to_example_files` function. Write a Python function `def write_lattice_instances_to_example_files( instances, tokenizer, max_seq_length, max_predictions_per_seq, output_files, position_embedding_names=('start', 'end'))` to solve the following problem:
Create TF example files from `TrainingInstance`s.
Here is the function:
def write_lattice_instances_to_example_files(
instances, tokenizer, max_seq_length,
max_predictions_per_seq, output_files,
position_embedding_names=('start', 'end')):
"""Create TF example files from `TrainingInstance`s."""
writers = []
for output_file in output_files:
writers.append(tf.io.TFRecordWriter(output_file))
writer_index = 0
total_written = 0
for (inst_index, instance) in enumerate(instances):
write_lattice_instance_to_example_file(
instance, tokenizer, writers[writer_index],
max_seq_length, max_predictions_per_seq,
position_embedding_names, inst_index < 20)
writer_index = (writer_index + 1) % len(writers)
total_written += 1
for writer in writers:
writer.close()
tf.compat.v1.logging.info("Wrote %d total instances", total_written) | Create TF example files from `TrainingInstance`s. |
18,891 | import tensorflow as tf
import collections
import tokenization
def create_int_feature(values):
feature = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))
return feature
def create_float_feature(values):
feature = tf.train.Feature(float_list=tf.train.FloatList(value=list(values)))
return feature
The provided code snippet includes necessary dependencies for implementing the `write_instances_to_example_files` function. Write a Python function `def write_instances_to_example_files(instances, tokenizer, max_seq_length, max_predictions_per_seq, output_files)` to solve the following problem:
Create TF example files from `TrainingInstance`s.
Here is the function:
def write_instances_to_example_files(instances, tokenizer, max_seq_length,
max_predictions_per_seq, output_files):
"""Create TF example files from `TrainingInstance`s."""
writers = []
for output_file in output_files:
writers.append(tf.io.TFRecordWriter(output_file))
writer_index = 0
total_written = 0
for (inst_index, instance) in enumerate(instances):
input_ids = tokenizer.convert_tokens_to_ids(instance.encodings)
input_mask = [1] * len(input_ids)
segment_ids = list(instance.segment_ids)
assert len(input_ids) <= max_seq_length
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
masked_lm_positions = list(instance.masked_lm_positions)
masked_lm_ids = tokenizer.convert_tokens_to_ids(instance.masked_lm_labels)
masked_lm_weights = [1.0] * len(masked_lm_ids)
while len(masked_lm_positions) < max_predictions_per_seq:
masked_lm_positions.append(0)
masked_lm_ids.append(0)
masked_lm_weights.append(0.0)
next_sentence_label = instance.next_sentence_label
features = collections.OrderedDict()
features["input_ids"] = create_int_feature(input_ids)
features["input_mask"] = create_int_feature(input_mask)
features["segment_ids"] = create_int_feature(segment_ids)
features["masked_lm_positions"] = create_int_feature(masked_lm_positions)
features["masked_lm_ids"] = create_int_feature(masked_lm_ids)
features["masked_lm_weights"] = create_float_feature(masked_lm_weights)
features["next_sentence_labels"] = create_int_feature([next_sentence_label])
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
writers[writer_index].write(tf_example.SerializeToString())
writer_index = (writer_index + 1) % len(writers)
total_written += 1
if inst_index < 400:
tf.compat.v1.logging.info("*** Example ***")
tf.compat.v1.logging.info("tokens: %s" % " ".join(
[tokenization.printable_text(x) for x in instance.encodings]))
for feature_name in features.keys():
feature = features[feature_name]
values = []
if feature.int64_list.value:
values = feature.int64_list.value
elif feature.float_list.value:
values = feature.float_list.value
tf.compat.v1.logging.info(
"%s: %s" % (feature_name, " ".join([str(x) for x in values])))
for writer in writers:
writer.close()
tf.compat.v1.logging.info("Wrote %d total instances", total_written) | Create TF example files from `TrainingInstance`s. |
18,892 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import csv
import os
import pickle
import modeling
import modeling_labert
import optimization
import tokenization
import tokenization_labert
import tensorflow as tf
import numpy as np
import random
import shutil
from loss_logging_hook import LossLoggingHook
from best_checkpoint_copyer import BestCheckpointCopier
The provided code snippet includes necessary dependencies for implementing the `_read_tsv` function. Write a Python function `def _read_tsv(input_file, quotechar=None)` to solve the following problem:
Reads a tab separated value file.
Here is the function:
def _read_tsv(input_file, quotechar=None):
"""Reads a tab separated value file."""
with tf.io.gfile.GFile(input_file, "r") as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
lines.append(line)
return lines | Reads a tab separated value file. |
18,893 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import csv
import os
import pickle
import modeling
import modeling_labert
import optimization
import tokenization
import tokenization_labert
import tensorflow as tf
import numpy as np
import random
import shutil
from loss_logging_hook import LossLoggingHook
from best_checkpoint_copyer import BestCheckpointCopier
def convert_single_example(ex_index, example, label_list, max_seq_length,
tokenizer, rng):
"""Converts a single `InputExample` into a single `InputFeatures`."""
if isinstance(example, PaddingInputExample):
return InputFeatures(
input_ids=[0] * max_seq_length,
input_mask=[0] * max_seq_length,
segment_ids=[0] * max_seq_length,
positional_embeddings_start=[0] * max_seq_length,
positional_embeddings_end=[0] * max_seq_length,
label_id=0,
is_real_example=False,)
label_map = {}
for (i, label) in enumerate(label_list):
label_map[label] = i
encoding_a = tokenizer.tokenize(example.text_a, add_candidate_indices=False)
encoding_b = None
if example.text_b:
encoding_b = tokenizer.tokenize(example.text_b, add_candidate_indices=False)
if encoding_b:
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
_truncate_seq_pair(encoding_a, encoding_b, max_seq_length - 3, rng)
else:
# Account for [CLS] and [SEP] with "- 2"
while encoding_a.lazy_length() > max_seq_length - 2:
encoding_a.lazy_pop_back()
encoding_a.finalize_lazy_pop_back()
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
encoding = tokenizer.build_cls_encoding(add_candidate_indices=False)
segment_ids = [0]
encoding.extend(encoding_a)
segment_ids.extend([0] * len(encoding_a.tokens))
encoding.extend(tokenizer.build_sep_encoding(add_candidate_indices=False))
segment_ids.append(0)
if encoding_b:
encoding.extend(encoding_b)
segment_ids.extend([1] * len(encoding_b.tokens))
encoding.extend(tokenizer.build_sep_encoding(add_candidate_indices=False))
segment_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(encoding.tokens)
positional_embeddings_start, positional_embeddings_end = encoding.position_embedding(['start', 'end'])
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
positional_embeddings_start.append(0)
positional_embeddings_end.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
assert len(positional_embeddings_start) == max_seq_length
assert len(positional_embeddings_end) == max_seq_length
if example.label:
label_id = label_map[example.label]
else:
label_id = 0
if ex_index < 5:
tf.compat.v1.logging.info("*** Example ***")
tf.compat.v1.logging.info("guid: %s" % (example.guid))
tf.compat.v1.logging.info("tokens: %s" % " ".join(
[tokenization.printable_text(x) for x in encoding.tokens]))
tf.compat.v1.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
tf.compat.v1.logging.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
tf.compat.v1.logging.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
tf.compat.v1.logging.info(
"positional_embeddings_start: %s" % " ".join([str(x) for x in positional_embeddings_start]))
tf.compat.v1.logging.info(
"positional_embeddings_end: %s" % " ".join([str(x) for x in positional_embeddings_end]))
tf.compat.v1.logging.info("label: %s (id = %d)" % (example.label, label_id))
feature = InputFeatures(
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id,
positional_embeddings_start=positional_embeddings_start,
positional_embeddings_end=positional_embeddings_end,
is_real_example=True,)
return feature
The provided code snippet includes necessary dependencies for implementing the `file_based_convert_examples_to_features` function. Write a Python function `def file_based_convert_examples_to_features( examples, label_list, max_seq_length, tokenizer, output_file, rng,)` to solve the following problem:
Convert a set of `InputExample`s to a TFRecord file.
Here is the function:
def file_based_convert_examples_to_features(
examples, label_list, max_seq_length, tokenizer, output_file, rng,):
"""Convert a set of `InputExample`s to a TFRecord file."""
writer = tf.io.TFRecordWriter(output_file)
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
tf.compat.v1.logging.info("Writing example %d of %d" % (ex_index, len(examples)))
feature = convert_single_example(ex_index, example, label_list,
max_seq_length, tokenizer, rng)
def create_int_feature(values):
f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))
return f
features = collections.OrderedDict()
features["input_ids"] = create_int_feature(feature.input_ids)
features["input_mask"] = create_int_feature(feature.input_mask)
features["segment_ids"] = create_int_feature(feature.segment_ids)
features["positional_embeddings_start"] = create_int_feature(feature.positional_embeddings_start)
features["positional_embeddings_end"] = create_int_feature(feature.positional_embeddings_end)
features["label_ids"] = create_int_feature([feature.label_id])
features["is_real_example"] = create_int_feature(
[int(feature.is_real_example)])
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
writer.write(tf_example.SerializeToString())
writer.close() | Convert a set of `InputExample`s to a TFRecord file. |
18,894 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import csv
import os
import pickle
import modeling
import modeling_labert
import optimization
import tokenization
import tokenization_labert
import tensorflow as tf
import numpy as np
import random
import shutil
from loss_logging_hook import LossLoggingHook
from best_checkpoint_copyer import BestCheckpointCopier
The provided code snippet includes necessary dependencies for implementing the `file_based_input_fn_builder` function. Write a Python function `def file_based_input_fn_builder(input_file, seq_length, is_training, drop_remainder, add_vis_matrix=False)` to solve the following problem:
Creates an `input_fn` closure to be passed to TPUEstimator.
Here is the function:
def file_based_input_fn_builder(input_file, seq_length, is_training,
drop_remainder, add_vis_matrix=False):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
name_to_features = {
"input_ids": tf.io.FixedLenFeature([seq_length], tf.int64),
"input_mask": tf.io.FixedLenFeature([seq_length], tf.int64),
"segment_ids": tf.io.FixedLenFeature([seq_length], tf.int64),
"positional_embeddings_start": tf.io.FixedLenFeature([seq_length], tf.int64),
"positional_embeddings_end": tf.io.FixedLenFeature([seq_length], tf.int64),
"label_ids": tf.io.FixedLenFeature([], tf.int64),
"is_real_example": tf.io.FixedLenFeature([], tf.int64),
}
if add_vis_matrix:
name_to_features['visibility_matrix'] = tf.io.SparseFeature(
index_key=['visibility_matrix_i', 'visibility_matrix_j'],
value_key='visibility_matrix_values',
dtype=tf.int64, size=[seq_length, seq_length])
def _decode_record(record, name_to_features):
"""Decodes a record to a TensorFlow example."""
example = tf.parse_single_example(record, name_to_features)
# tf.Example only supports tf.int64, but the TPU only supports tf.int32.
# So cast all int64 to int32.
for name in list(example.keys()):
t = example[name]
if t.dtype == tf.int64:
t = tf.to_int32(t)
example[name] = t
return example
def input_fn(params):
"""The actual input function."""
batch_size = params["batch_size"]
# For training, we want a lot of parallel reading and shuffling.
# For eval, we want no shuffling and parallel reading doesn't matter.
d = tf.data.TFRecordDataset(input_file)
if is_training:
d = d.repeat()
d = d.shuffle(buffer_size=100)
d = d.apply(
tf.contrib.data.map_and_batch(
lambda record: _decode_record(record, name_to_features),
batch_size=batch_size,
drop_remainder=drop_remainder))
return d
return input_fn | Creates an `input_fn` closure to be passed to TPUEstimator. |
18,895 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import csv
import os
import pickle
import modeling
import modeling_labert
import optimization
import tokenization
import tokenization_labert
import tensorflow as tf
import numpy as np
import random
import shutil
from loss_logging_hook import LossLoggingHook
from best_checkpoint_copyer import BestCheckpointCopier
FLAGS = flags.FLAGS
def create_model(labert_config, is_training, input_ids, input_mask, segment_ids,
positional_embeddings_start, positional_embeddings_end, labels,
num_labels, use_one_hot_embeddings, use_as_feature,):
"""Creates a classification model."""
model = modeling_labert.LaBertModel(
config=labert_config,
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask,
token_type_ids=segment_ids,
start_positions=positional_embeddings_start,
end_positions=positional_embeddings_end,
use_one_hot_embeddings=use_one_hot_embeddings,
compute_type=tf.float32,)
# In the demo, we are doing a simple classification task on the entire
# segment.
#
# If you want to use the token-level output, use model.get_sequence_output()
# instead.
output_layer = model.get_pooled_output()
hidden_size = output_layer.shape[-1].value
if use_as_feature:
output_layer = tf.stop_gradient(output_layer)
project_weights = tf.get_variable(
"project_weights", [hidden_size, hidden_size],
initializer=tf.truncated_normal_initializer(stddev=0.02))
project_bias = tf.get_variable(
"project_bias", [hidden_size], initializer=tf.zeros_initializer())
if is_training:
# I.e., 0.1 dropout
output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)
output_layer = tf.matmul(output_layer, project_weights, transpose_b=True)
output_layer = tf.nn.bias_add(output_layer, project_bias)
output_layer = tf.nn.relu(output_layer)
output_weights = tf.get_variable(
"output_weights", [num_labels, hidden_size],
initializer=tf.truncated_normal_initializer(stddev=0.02))
output_bias = tf.get_variable(
"output_bias", [num_labels], initializer=tf.zeros_initializer())
with tf.variable_scope("loss"):
if is_training:
# I.e., 0.1 dropout
output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)
logits = tf.matmul(output_layer, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
probabilities = tf.nn.softmax(logits, axis=-1)
log_probs = tf.nn.log_softmax(logits, axis=-1)
one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32)
per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)
total_loss = tf.reduce_mean(per_example_loss)
return (total_loss, per_example_loss, logits, probabilities)
class LossLoggingHook(tf.estimator.SessionRunHook):
def __init__(self, batch_size, every_n_iter):
self.every_n_iter = every_n_iter
self.every_n_examples = every_n_iter * batch_size
self.fetches = tf.estimator.SessionRunArgs(
fetches=[
"step_update:0",
"total_loss:0",
"learning_rate:0"
])
self.step_start_time = -1
def begin(self):
self.step_start_time = time.time()
def before_run(self, run_context):
return self.fetches
def after_run(self, run_context, run_values):
global_step, total_loss, learning_rate = run_values.results
if global_step % self.every_n_iter == 0:
current_time = time.time()
tf.compat.v1.logging.info(
'global_step=%d (%.2f ex/sec) | total_loss=%2.5f | learning_rate=%.5e' % (
global_step, self.every_n_examples / (current_time - self.step_start_time), total_loss, learning_rate))
self.step_start_time = current_time
The provided code snippet includes necessary dependencies for implementing the `model_fn_builder` function. Write a Python function `def model_fn_builder(bert_config, num_labels, init_checkpoint, learning_rate, beta1, beta2, epsilon, num_train_steps, num_warmup_steps, use_one_hot_embeddings, use_as_feature,)` to solve the following problem:
Returns `model_fn` closure for TPUEstimator.
Here is the function:
def model_fn_builder(bert_config, num_labels, init_checkpoint, learning_rate, beta1, beta2, epsilon,
num_train_steps, num_warmup_steps,
use_one_hot_embeddings, use_as_feature,):
"""Returns `model_fn` closure for TPUEstimator."""
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
"""The `model_fn` for TPUEstimator."""
do_log_information = (FLAGS.do_train and mode == tf.estimator.ModeKeys.TRAIN) or \
(not FLAGS.do_train and FLAGS.do_eval and mode == tf.estimator.ModeKeys.EVAL) or \
(FLAGS.do_predict and mode == tf.estimator.ModeKeys.PREDICT)
if do_log_information:
tf.compat.v1.logging.info("*** Features ***")
for name in sorted(features.keys()):
tf.compat.v1.logging.info(" name = %s, shape = %s" % (name, features[name].shape))
input_ids = features["input_ids"]
input_mask = features["input_mask"]
segment_ids = features["segment_ids"]
label_ids = features["label_ids"]
positional_embeddings_start = features["positional_embeddings_start"]
positional_embeddings_end = features["positional_embeddings_end"]
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
(total_loss, per_example_loss, logits, probabilities) = create_model(
bert_config, is_training, input_ids, input_mask, segment_ids,
positional_embeddings_start, positional_embeddings_end, label_ids,
num_labels, use_one_hot_embeddings, use_as_feature,)
total_loss = tf.identity(total_loss, name='total_loss')
tvars = tf.compat.v1.trainable_variables()
initialized_variable_names = {}
scaffold_fn = None
if init_checkpoint:
(assignment_map, initialized_variable_names
) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)
tf.compat.v1.train.init_from_checkpoint(init_checkpoint, assignment_map)
if do_log_information:
tf.compat.v1.logging.info("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
tf.compat.v1.logging.info(" name = %s, shape = %s%s", var.name, var.shape, init_string)
output_spec = None
if mode == tf.estimator.ModeKeys.TRAIN:
train_op = optimization.create_optimizer(
total_loss, learning_rate, beta1, beta2, epsilon, num_train_steps, num_warmup_steps,
None, False)
logging_hook = LossLoggingHook(params['batch_size'], every_n_iter=int(num_train_steps / 200 + 1))
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
train_op=train_op,
scaffold_fn=scaffold_fn,
training_hooks=[logging_hook])
elif mode == tf.estimator.ModeKeys.EVAL:
def metric_fn(per_example_loss, label_ids, logits):
predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)
accuracy = tf.compat.v1.metrics.accuracy(labels=label_ids, predictions=predictions)
loss = tf.compat.v1.metrics.mean(values=per_example_loss)
return {
"eval_accuracy": accuracy,
"eval_loss": loss,
}
eval_metrics = (metric_fn,
[per_example_loss, label_ids, logits])
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
eval_metrics=eval_metrics,
scaffold_fn=scaffold_fn)
else:
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
predictions={"probabilities": probabilities},
scaffold_fn=scaffold_fn)
return output_spec
return model_fn | Returns `model_fn` closure for TPUEstimator. |
18,896 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import csv
import os
import pickle
import modeling
import modeling_labert
import optimization
import tokenization
import tokenization_labert
import tensorflow as tf
import numpy as np
import random
import shutil
from loss_logging_hook import LossLoggingHook
from best_checkpoint_copyer import BestCheckpointCopier
def convert_single_example(ex_index, example, label_list, max_seq_length,
tokenizer, rng):
"""Converts a single `InputExample` into a single `InputFeatures`."""
if isinstance(example, PaddingInputExample):
return InputFeatures(
input_ids=[0] * max_seq_length,
input_mask=[0] * max_seq_length,
segment_ids=[0] * max_seq_length,
positional_embeddings_start=[0] * max_seq_length,
positional_embeddings_end=[0] * max_seq_length,
label_id=0,
is_real_example=False,)
label_map = {}
for (i, label) in enumerate(label_list):
label_map[label] = i
encoding_a = tokenizer.tokenize(example.text_a, add_candidate_indices=False)
encoding_b = None
if example.text_b:
encoding_b = tokenizer.tokenize(example.text_b, add_candidate_indices=False)
if encoding_b:
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
_truncate_seq_pair(encoding_a, encoding_b, max_seq_length - 3, rng)
else:
# Account for [CLS] and [SEP] with "- 2"
while encoding_a.lazy_length() > max_seq_length - 2:
encoding_a.lazy_pop_back()
encoding_a.finalize_lazy_pop_back()
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
encoding = tokenizer.build_cls_encoding(add_candidate_indices=False)
segment_ids = [0]
encoding.extend(encoding_a)
segment_ids.extend([0] * len(encoding_a.tokens))
encoding.extend(tokenizer.build_sep_encoding(add_candidate_indices=False))
segment_ids.append(0)
if encoding_b:
encoding.extend(encoding_b)
segment_ids.extend([1] * len(encoding_b.tokens))
encoding.extend(tokenizer.build_sep_encoding(add_candidate_indices=False))
segment_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(encoding.tokens)
positional_embeddings_start, positional_embeddings_end = encoding.position_embedding(['start', 'end'])
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
positional_embeddings_start.append(0)
positional_embeddings_end.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
assert len(positional_embeddings_start) == max_seq_length
assert len(positional_embeddings_end) == max_seq_length
if example.label:
label_id = label_map[example.label]
else:
label_id = 0
if ex_index < 5:
tf.compat.v1.logging.info("*** Example ***")
tf.compat.v1.logging.info("guid: %s" % (example.guid))
tf.compat.v1.logging.info("tokens: %s" % " ".join(
[tokenization.printable_text(x) for x in encoding.tokens]))
tf.compat.v1.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
tf.compat.v1.logging.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
tf.compat.v1.logging.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
tf.compat.v1.logging.info(
"positional_embeddings_start: %s" % " ".join([str(x) for x in positional_embeddings_start]))
tf.compat.v1.logging.info(
"positional_embeddings_end: %s" % " ".join([str(x) for x in positional_embeddings_end]))
tf.compat.v1.logging.info("label: %s (id = %d)" % (example.label, label_id))
feature = InputFeatures(
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id,
positional_embeddings_start=positional_embeddings_start,
positional_embeddings_end=positional_embeddings_end,
is_real_example=True,)
return feature
The provided code snippet includes necessary dependencies for implementing the `convert_examples_to_features` function. Write a Python function `def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer, rng)` to solve the following problem:
Convert a set of `InputExample`s to a list of `InputFeatures`.
Here is the function:
def convert_examples_to_features(examples, label_list, max_seq_length,
tokenizer, rng):
"""Convert a set of `InputExample`s to a list of `InputFeatures`."""
features = []
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
tf.compat.v1.logging.info("Writing example %d of %d" % (ex_index, len(examples)))
feature = convert_single_example(ex_index, example, label_list,
max_seq_length, tokenizer, rng)
features.append(feature)
return features | Convert a set of `InputExample`s to a list of `InputFeatures`. |
18,897 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import math
import tensorflow as tf
from modeling import (BertConfig,
reshape_to_matrix,
reshape_from_matrix,
get_shape_list,
get_activation,
gelu,
dropout,
layer_norm,
layer_norm_and_dropout,
create_attention_mask_from_input_mask,
create_initializer)
from gpu_environment import get_custom_getter
def create_initializer(initializer_range=0.02):
"""Creates a `truncated_normal_initializer` with the given range."""
return tf.truncated_normal_initializer(stddev=initializer_range)
def get_shape_list(tensor, expected_rank=None, name=None):
"""Returns a list of the shape of tensor, preferring static dimensions.
Args:
tensor: A tf.Tensor object to find the shape of.
expected_rank: (optional) int. The expected rank of `tensor`. If this is
specified and the `tensor` has a different rank, and exception will be
thrown.
name: Optional name of the tensor for the error message.
Returns:
A list of dimensions of the shape of tensor. All static dimensions will
be returned as python integers, and dynamic dimensions will be returned
as tf.Tensor scalars.
"""
if name is None:
name = tensor.name
if expected_rank is not None:
assert_rank(tensor, expected_rank, name)
shape = tensor.shape.as_list()
non_static_indexes = []
for (index, dim) in enumerate(shape):
if dim is None:
non_static_indexes.append(index)
if not non_static_indexes:
return shape
dyn_shape = tf.shape(tensor)
for index in non_static_indexes:
shape[index] = dyn_shape[index]
return shape
def factorized_embedding_lookup(input_ids,
vocab_size,
hidden_size=128,
embedding_size=128,
initializer_range=0.02,
word_embedding_name="word_embeddings",
use_one_hot_embeddings=False):
# This function assumes that the input is of shape [batch_size, seq_length,
# num_inputs].
#
# If the input is a 2D tensor of shape [batch_size, seq_length], we
# reshape to [batch_size, seq_length, 1].
if input_ids.shape.ndims == 2:
input_ids = tf.expand_dims(input_ids, axis=[-1])
factorized_embedding_table = tf.compat.v1.get_variable(
name=word_embedding_name,
shape=[vocab_size, embedding_size],
initializer=create_initializer(initializer_range))
projection = tf.compat.v1.get_variable(
name=word_embedding_name + "_projection",
shape=[embedding_size, hidden_size],
initializer=create_initializer(initializer_range))
flat_input_ids = tf.reshape(input_ids, [-1])
if use_one_hot_embeddings:
one_hot_input_ids = tf.one_hot(flat_input_ids, depth=vocab_size)
output = tf.matmul(one_hot_input_ids, factorized_embedding_table)
else:
output = tf.gather(factorized_embedding_table, flat_input_ids)
output = tf.matmul(output, projection)
input_shape = get_shape_list(input_ids)
output = tf.reshape(output,
input_shape[0:-1] + [input_shape[-1] * hidden_size])
return (output, factorized_embedding_table) | null |
18,898 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import math
import tensorflow as tf
from modeling import (BertConfig,
reshape_to_matrix,
reshape_from_matrix,
get_shape_list,
get_activation,
gelu,
dropout,
layer_norm,
layer_norm_and_dropout,
create_attention_mask_from_input_mask,
create_initializer)
from gpu_environment import get_custom_getter
def layer_norm_and_dropout(input_tensor, dropout_prob, name=None):
"""Runs layer normalization followed by dropout."""
output_tensor = layer_norm(input_tensor, name)
output_tensor = dropout(output_tensor, dropout_prob)
return output_tensor
def create_initializer(initializer_range=0.02):
"""Creates a `truncated_normal_initializer` with the given range."""
return tf.truncated_normal_initializer(stddev=initializer_range)
def get_shape_list(tensor, expected_rank=None, name=None):
"""Returns a list of the shape of tensor, preferring static dimensions.
Args:
tensor: A tf.Tensor object to find the shape of.
expected_rank: (optional) int. The expected rank of `tensor`. If this is
specified and the `tensor` has a different rank, and exception will be
thrown.
name: Optional name of the tensor for the error message.
Returns:
A list of dimensions of the shape of tensor. All static dimensions will
be returned as python integers, and dynamic dimensions will be returned
as tf.Tensor scalars.
"""
if name is None:
name = tensor.name
if expected_rank is not None:
assert_rank(tensor, expected_rank, name)
shape = tensor.shape.as_list()
non_static_indexes = []
for (index, dim) in enumerate(shape):
if dim is None:
non_static_indexes.append(index)
if not non_static_indexes:
return shape
dyn_shape = tf.shape(tensor)
for index in non_static_indexes:
shape[index] = dyn_shape[index]
return shape
The provided code snippet includes necessary dependencies for implementing the `embedding_postprocessor` function. Write a Python function `def embedding_postprocessor(input_tensor, use_token_type=False, token_type_ids=None, token_type_vocab_size=16, token_type_embedding_name="token_type_embeddings", positions=None, use_position_embeddings=True, position_embedding_name=None, initializer_range=0.02, max_position_embeddings=1024, dropout_prob=0.1)` to solve the following problem:
Performs various post-processing on a word embedding tensor. Args: input_tensor: float Tensor of shape [batch_size, seq_length, embedding_size]. use_token_type: bool. Whether to add embeddings for `token_type_ids`. token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length]. Must be specified if `use_token_type` is True. token_type_vocab_size: int. The vocabulary size of `token_type_ids`. token_type_embedding_name: string. The name of the embedding table variable for token type ids. use_position_embeddings: bool. Whether to add position embeddings for the position of each token in the sequence. position_embedding_name: string. The name of the embedding table variable for positional embeddings. initializer_range: float. Range of the weight initialization. max_position_embeddings: int. Maximum sequence length that might ever be used with this model. This can be longer than the sequence length of input_tensor, but cannot be shorter. dropout_prob: float. Dropout probability applied to the final output tensor. Returns: float tensor with same shape as `input_tensor`. Raises: ValueError: One of the tensor shapes or input values is invalid.
Here is the function:
def embedding_postprocessor(input_tensor,
use_token_type=False,
token_type_ids=None,
token_type_vocab_size=16,
token_type_embedding_name="token_type_embeddings",
positions=None,
use_position_embeddings=True,
position_embedding_name=None,
initializer_range=0.02,
max_position_embeddings=1024,
dropout_prob=0.1):
"""Performs various post-processing on a word embedding tensor.
Args:
input_tensor: float Tensor of shape [batch_size, seq_length,
embedding_size].
use_token_type: bool. Whether to add embeddings for `token_type_ids`.
token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length].
Must be specified if `use_token_type` is True.
token_type_vocab_size: int. The vocabulary size of `token_type_ids`.
token_type_embedding_name: string. The name of the embedding table variable
for token type ids.
use_position_embeddings: bool. Whether to add position embeddings for the
position of each token in the sequence.
position_embedding_name: string. The name of the embedding table variable
for positional embeddings.
initializer_range: float. Range of the weight initialization.
max_position_embeddings: int. Maximum sequence length that might ever be
used with this model. This can be longer than the sequence length of
input_tensor, but cannot be shorter.
dropout_prob: float. Dropout probability applied to the final output tensor.
Returns:
float tensor with same shape as `input_tensor`.
Raises:
ValueError: One of the tensor shapes or input values is invalid.
"""
input_shape = get_shape_list(input_tensor, expected_rank=3)
batch_size = input_shape[0]
seq_length = input_shape[1]
width = input_shape[2]
output = input_tensor
if use_token_type:
if token_type_ids is None:
raise ValueError("`token_type_ids` must be specified if"
"`use_token_type` is True.")
token_type_table = tf.compat.v1.get_variable(
name=token_type_embedding_name,
shape=[token_type_vocab_size, width],
initializer=create_initializer(initializer_range))
# This vocab will be small so we always do one-hot here, since it is always
# faster for a small vocabulary.
flat_token_type_ids = tf.reshape(token_type_ids, [-1])
one_hot_ids = tf.one_hot(flat_token_type_ids, depth=token_type_vocab_size)
token_type_embeddings = tf.matmul(one_hot_ids, token_type_table)
token_type_embeddings = tf.reshape(token_type_embeddings,
[batch_size, seq_length, width])
output += token_type_embeddings
if use_position_embeddings:
if type(positions) in [list, tuple]:
assert type(position_embedding_name) in [list, tuple]
else:
assert type(position_embedding_name) is str
positions = [positions]
position_embedding_name = [position_embedding_name]
if len(positions) == 1:
widths = [width]
else:
widths = [width // len(positions)] * len(positions)
widths[-1] = width - sum(widths[:-1])
assert_op = tf.compat.v1.assert_less_equal(seq_length, max_position_embeddings)
with tf.control_dependencies([assert_op]):
full_position_embeddings = [tf.compat.v1.get_variable(
name=position_embedding_name[i],
shape=[max_position_embeddings, widths[i]],
initializer=create_initializer(initializer_range)) for i in range(len(positions))]
all_positional_embeddings = []
for i in range(len(positions)):
position_embeddings_id = positions[i]
if position_embeddings_id.shape.ndims == 2:
position_embeddings_id = tf.expand_dims(position_embeddings_id, axis=[-1])
# print(position_embeddings_id.shape) # (8, 128, 1)
flat_input_ids = tf.reshape(position_embeddings_id, [-1])
# print(flat_input_ids.shape) # (1024,) = 8*128
position_embeddings = tf.gather(full_position_embeddings[i], flat_input_ids)
# (1024, 768)
input_shape = get_shape_list(position_embeddings_id)
position_embeddings = tf.reshape(position_embeddings,
input_shape[0:-1] + [input_shape[-1] * widths[i]])
all_positional_embeddings.append(position_embeddings)
# (8, 128, 384)
position_embeddings = tf.concat(all_positional_embeddings, -1)
# print(position_embeddings.shape) # (8, 128, 768)
output += position_embeddings
output = layer_norm_and_dropout(output, dropout_prob)
return output | Performs various post-processing on a word embedding tensor. Args: input_tensor: float Tensor of shape [batch_size, seq_length, embedding_size]. use_token_type: bool. Whether to add embeddings for `token_type_ids`. token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length]. Must be specified if `use_token_type` is True. token_type_vocab_size: int. The vocabulary size of `token_type_ids`. token_type_embedding_name: string. The name of the embedding table variable for token type ids. use_position_embeddings: bool. Whether to add position embeddings for the position of each token in the sequence. position_embedding_name: string. The name of the embedding table variable for positional embeddings. initializer_range: float. Range of the weight initialization. max_position_embeddings: int. Maximum sequence length that might ever be used with this model. This can be longer than the sequence length of input_tensor, but cannot be shorter. dropout_prob: float. Dropout probability applied to the final output tensor. Returns: float tensor with same shape as `input_tensor`. Raises: ValueError: One of the tensor shapes or input values is invalid. |
18,899 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import math
import tensorflow as tf
from modeling import (BertConfig,
reshape_to_matrix,
reshape_from_matrix,
get_shape_list,
get_activation,
gelu,
dropout,
layer_norm,
layer_norm_and_dropout,
create_attention_mask_from_input_mask,
create_initializer)
from gpu_environment import get_custom_getter
def attention_layer_with_reset_attention_scores(from_tensor,
to_tensor,
reset_attention_scores,
compute_type=tf.float32,
attention_mask=None,
num_attention_heads=1,
size_per_head=512,
query_act=None,
key_act=None,
value_act=None,
attention_probs_dropout_prob=0.0,
initializer_range=0.02,
do_return_2d_tensor=False,
batch_size=None,
from_seq_length=None,
to_seq_length=None,
do_return_attention_maps=False, ):
"""Performs multi-headed attention from `from_tensor` to `to_tensor`.
This is an implementation of multi-headed attention based on "Attention
is all you Need". If `from_tensor` and `to_tensor` are the same, then
this is self-attention. Each timestep in `from_tensor` attends to the
corresponding sequence in `to_tensor`, and returns a fixed-with vector.
This function first projects `from_tensor` into a "query" tensor and
`to_tensor` into "key" and "value" tensors. These are (effectively) a list
of tensors of length `num_attention_heads`, where each tensor is of shape
[batch_size, seq_length, size_per_head].
Then, the query and key tensors are dot-producted and scaled. These are
softmaxed to obtain attention probabilities. The value tensors are then
interpolated by these probabilities, then concatenated back to a single
tensor and returned.
In practice, the multi-headed attention are done with transposes and
reshapes rather than actual separate tensors.
Args:
from_tensor: float Tensor of shape [batch_size, from_seq_length,
from_width].
to_tensor: float Tensor of shape [batch_size, to_seq_length, to_width].
attention_mask: (optional) int32 Tensor of shape [batch_size,
from_seq_length, to_seq_length]. The values should be 1 or 0. The
attention scores will effectively be set to -infinity for any positions in
the mask that are 0, and will be unchanged for positions that are 1.
num_attention_heads: int. Number of attention heads.
size_per_head: int. Size of each attention head.
query_act: (optional) Activation function for the query transform.
key_act: (optional) Activation function for the key transform.
value_act: (optional) Activation function for the value transform.
attention_probs_dropout_prob: (optional) float. Dropout probability of the
attention probabilities.
initializer_range: float. Range of the weight initializer.
do_return_2d_tensor: bool. If True, the output will be of shape [batch_size
* from_seq_length, num_attention_heads * size_per_head]. If False, the
output will be of shape [batch_size, from_seq_length, num_attention_heads
* size_per_head].
batch_size: (Optional) int. If the input is 2D, this might be the batch size
of the 3D version of the `from_tensor` and `to_tensor`.
from_seq_length: (Optional) If the input is 2D, this might be the seq length
of the 3D version of the `from_tensor`.
to_seq_length: (Optional) If the input is 2D, this might be the seq length
of the 3D version of the `to_tensor`.
do_return_attention_maps: Whether to also return attention map of all layers.
Returns:
float Tensor of shape [batch_size, from_seq_length,
num_attention_heads * size_per_head]. (If `do_return_2d_tensor` is
true, this will be of shape [batch_size * from_seq_length,
num_attention_heads * size_per_head]).
Raises:
ValueError: Any of the arguments or tensor shapes are invalid.
"""
def transpose_for_scores(input_tensor, batch_size, num_attention_heads,
seq_length, width):
output_tensor = tf.reshape(
input_tensor, [batch_size, seq_length, num_attention_heads, width])
output_tensor = tf.transpose(output_tensor, [0, 2, 1, 3])
return output_tensor
from_shape = get_shape_list(from_tensor, expected_rank=[2, 3])
to_shape = get_shape_list(to_tensor, expected_rank=[2, 3])
if len(from_shape) != len(to_shape):
raise ValueError(
"The rank of `from_tensor` must match the rank of `to_tensor`.")
if len(from_shape) == 3:
batch_size = from_shape[0]
from_seq_length = from_shape[1]
to_seq_length = to_shape[1]
elif len(from_shape) == 2:
if (batch_size is None or from_seq_length is None or to_seq_length is None):
raise ValueError(
"When passing in rank 2 tensors to attention_layer, the values "
"for `batch_size`, `from_seq_length`, and `to_seq_length` "
"must all be specified.")
# Scalar dimensions referenced here:
# B = batch size (number of sequences)
# F = `from_tensor` sequence length
# T = `to_tensor` sequence length
# N = `num_attention_heads`
# H = `size_per_head`
from_tensor_2d = reshape_to_matrix(from_tensor)
to_tensor_2d = reshape_to_matrix(to_tensor)
# `query_layer` = [B*F, N*H]
query_layer = tf.compat.v1.layers.dense(
from_tensor_2d,
num_attention_heads * size_per_head,
activation=query_act,
name="query",
kernel_initializer=create_initializer(initializer_range))
# `key_layer` = [B*T, N*H]
key_layer = tf.compat.v1.layers.dense(
to_tensor_2d,
num_attention_heads * size_per_head,
activation=key_act,
name="key",
kernel_initializer=create_initializer(initializer_range))
# `value_layer` = [B*T, N*H]
value_layer = tf.compat.v1.layers.dense(
to_tensor_2d,
num_attention_heads * size_per_head,
activation=value_act,
name="value",
kernel_initializer=create_initializer(initializer_range))
# `query_layer` = [B, N, F, H]
query_layer = transpose_for_scores(query_layer, batch_size,
num_attention_heads, from_seq_length,
size_per_head)
# `key_layer` = [B, N, T, H]
key_layer = transpose_for_scores(key_layer, batch_size, num_attention_heads,
to_seq_length, size_per_head)
# Take the dot product between "query" and "key" to get the raw
# attention scores.
# `attention_scores` = [B, N, F, T]
attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True)
attention_scores = tf.multiply(attention_scores,
1.0 / math.sqrt(float(size_per_head)))
# print(attention_scores.shape) # (2, 4, 128, 128)
# positional_attention_score
# compute_type
attention_scores = attention_scores + reset_attention_scores
if attention_mask is not None:
# `attention_mask` = [B, 1, F, T]
attention_mask = tf.expand_dims(attention_mask, axis=[1])
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
adder = (1.0 - tf.cast(attention_mask, attention_scores.dtype)) * -10000.0
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
attention_scores += adder
# Normalize the attention scores to probabilities.
# `attention_probs` = [B, N, F, T]
attention_probs = tf.nn.softmax(attention_scores)
attention_probs = tf.saturate_cast(attention_probs, compute_type)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = dropout(attention_probs, attention_probs_dropout_prob)
# `value_layer` = [B, T, N, H]
value_layer = tf.reshape(
value_layer,
[batch_size, to_seq_length, num_attention_heads, size_per_head])
# `value_layer` = [B, N, T, H]
value_layer = tf.transpose(value_layer, [0, 2, 1, 3])
# `context_layer` = [B, N, F, H]
context_layer = tf.matmul(attention_probs, value_layer)
# `context_layer` = [B, F, N, H]
context_layer = tf.transpose(context_layer, [0, 2, 1, 3])
if do_return_2d_tensor:
# `context_layer` = [B*F, N*H]
context_layer = tf.reshape(
context_layer,
[batch_size * from_seq_length, num_attention_heads * size_per_head])
else:
# `context_layer` = [B, F, N*H]
context_layer = tf.reshape(
context_layer,
[batch_size, from_seq_length, num_attention_heads * size_per_head])
if do_return_attention_maps:
return context_layer, attention_probs
return context_layer
def compute_reset_attention_scores(position_embeddings_ids, max_position_embeddings, embedding_size,
num_attention_heads, size_per_head, initializer_range,
max_relative_position=128):
# batch_size: B
# embedding_size: E
# seq_length: L
def transpose_for_scores(input_tensor, batch_size, num_attention_heads,
seq_length, width):
output_tensor = tf.reshape(
input_tensor, [batch_size, seq_length, num_attention_heads, width])
output_tensor = tf.transpose(output_tensor, [0, 2, 1, 3])
return output_tensor
if position_embeddings_ids is not None:
assert type(position_embeddings_ids) in (list, tuple)
assert len(position_embeddings_ids) == 2
start_position_embedding_table = tf.compat.v1.get_variable(
name='start_position_embeddings',
shape=[max_position_embeddings, embedding_size],
initializer=create_initializer(initializer_range))
end_position_embedding_table = tf.compat.v1.get_variable(
name='end_position_embeddings',
shape=[max_position_embeddings, embedding_size],
initializer=create_initializer(initializer_range))
position_embedding_tables = [start_position_embedding_table, end_position_embedding_table]
position_embedding_outputs = []
for i in range(len(position_embeddings_ids)):
# position_embeddings_id: [B, L]
position_embeddings_id = position_embeddings_ids[i]
if position_embeddings_id.shape.ndims == 2:
position_embeddings_id = tf.expand_dims(position_embeddings_id, axis=[-1])
# flat_input_ids: [B*L]
flat_input_ids = tf.reshape(position_embeddings_id, [-1])
# position_embeddings: [B*L, E]
position_embeddings = tf.gather(position_embedding_tables[i], flat_input_ids)
# input_shape= (B, L, 1)
input_shape = get_shape_list(position_embeddings_id)
# position_embeddings: [B,L,E]
position_embeddings = tf.reshape(position_embeddings,
input_shape[0:-1] + [input_shape[-1] * embedding_size])
position_embedding_outputs.append(position_embeddings)
# position_embeddings: [B,L,2*E]
position_embeddings = tf.concat(position_embedding_outputs, axis=-1)
from_tensor = to_tensor = position_embeddings
from_shape = get_shape_list(from_tensor, expected_rank=[2, 3])
to_shape = get_shape_list(to_tensor, expected_rank=[2, 3])
if len(from_shape) != len(to_shape):
raise ValueError(
"The rank of `from_tensor` must match the rank of `to_tensor`.")
assert len(from_shape) == 3
batch_size = from_shape[0]
from_seq_length = from_shape[1]
to_seq_length = to_shape[1]
# `from_tensor_2d` = [B*L,2*E]
from_tensor_2d = reshape_to_matrix(position_embeddings)
# `to_tensor_2d` = [B*L,2*E]
to_tensor_2d = reshape_to_matrix(position_embeddings)
# `query_layer` = [B*L, N*H]
query_layer = tf.compat.v1.layers.dense(
from_tensor_2d,
num_attention_heads * size_per_head,
activation=None,
name="query_position",
kernel_initializer=create_initializer(initializer_range))
# `key_layer` = [B*L, N*H]
key_layer = tf.compat.v1.layers.dense(
to_tensor_2d,
num_attention_heads * size_per_head,
activation=None,
name="key_position",
kernel_initializer=create_initializer(initializer_range))
# `query_layer` = [B, N, L, H]
query_layer = transpose_for_scores(query_layer, batch_size,
num_attention_heads, from_seq_length,
size_per_head)
# `key_layer` = [B, N, L, H]
key_layer = transpose_for_scores(key_layer, batch_size, num_attention_heads,
to_seq_length, size_per_head)
# Take the dot product between "query" and "key" to get the raw
# attention scores.
# `attention_scores` = [B, N, L, L]
attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True)
attention_scores = tf.multiply(attention_scores,
1.0 / math.sqrt(float(size_per_head)))
def build_relative_positions(range_vec_k, range_vec_q, max_relative_position):
# """Generates matrix of relative positions between inputs."""
# print(range_vec_k.shape, range_vec_q.shape) # (2, 128) (2, 128)
distance_mat = range_vec_k[:, None, :] - range_vec_q[:, :, None]
# print('distance_mat', distance_mat.shape) # distance_mat (2, 128, 128)
distance_mat_clipped = tf.clip_by_value(distance_mat, -max_relative_position,
max_relative_position)
# Shift values to be >= 0. Each integer still uniquely identifies a relative
# position difference.
final_mat = distance_mat_clipped + max_relative_position
return final_mat
start_positions, end_positions = position_embeddings_ids
pairs = [[start_positions, start_positions],
[start_positions, end_positions],
[end_positions, start_positions],
[end_positions, end_positions]]
relative_position_scores = []
# Generates scores for each relative position of dimension depth.
for i, (from_position, to_position) in enumerate(pairs):
relative_position_score_table = tf.compat.v1.get_variable(
name=f"relative_position_scores_{i}",
shape=[max_relative_position * 2 + 1, num_attention_heads],
initializer=create_initializer(initializer_range))
# `relative_positions_matrix` = [B, L, L]
relative_positions_matrix = build_relative_positions(
from_position, to_position,
max_relative_position=max_relative_position)
# `relative_position_score` = [B, L, L, N]
relative_position_score = tf.gather(relative_position_score_table, relative_positions_matrix)
# `relative_position_score` = [B, N, L, L]
relative_position_score = tf.transpose(relative_position_score, [0, 3, 1, 2])
relative_position_scores.append(relative_position_score)
from_position_head = start_positions[:, None, :] # (B, 1, L)
from_position_tail = end_positions[:, None, :] # (B, 1, L)
to_position_head = start_positions[:, :, None] # (B, L, 1)
to_position_tail = end_positions[:, :, None] # (B, L, 1)
# `relative_position_encoding` = (B, L, L)
relative_position_encoding = \
1 * tf.cast(tf.greater_equal(from_position_head, to_position_head), tf.int64) + \
2 * tf.cast(tf.greater(from_position_head, to_position_tail), tf.int64) + \
4 * tf.cast(tf.greater_equal(from_position_tail, to_position_head), tf.int64) + \
8 * tf.cast(tf.greater_equal(from_position_tail, to_position_tail), tf.int64) + \
16 * (tf.cast(tf.equal(from_position_head, to_position_head), tf.int64) *
tf.cast(tf.equal(from_position_tail, to_position_tail), tf.int64))
# use 2^5 to encode relative position codes
# 32 is the magic number
relative_position_encoding_embedding_table = tf.compat.v1.get_variable(
name="relative_type_embeddings_d1",
shape=[32, num_attention_heads],
initializer=create_initializer(initializer_range))
# `relative_position_encoding_score` = (B, L, L, N)
relative_position_encoding_score = tf.gather(relative_position_encoding_embedding_table,
relative_position_encoding)
# `relative_position_encoding_score` = (B, N, L, L)
relative_position_encoding_score = tf.transpose(relative_position_encoding_score, [0, 3, 1, 2])
# `unreset_attention_scores` = (B, N, L, L)
unreset_attention_scores = relative_position_encoding_score + sum(relative_position_scores) + attention_scores
in_reset_shape = tf.shape(unreset_attention_scores)
reset_theta_1_raw = tf.compat.v1.get_variable(
name="relative_att_reset_theta_1",
shape=[1, 1, 1, 2 * embedding_size],
initializer=create_initializer(initializer_range))
reset_theta_2_raw = tf.compat.v1.get_variable(
name="relative_att_reset_theta_2",
shape=[1, 1, 1, 2 * embedding_size],
initializer=create_initializer(initializer_range))
def f21(input_tensor):
# `q_l` = [1*1, N*H]
q_l = tf.layers.dense(
input_tensor,
num_attention_heads * size_per_head,
activation=None,
name="query_position",
kernel_initializer=create_initializer(initializer_range), reuse=True)
# `key_layer` = [B*T, N*H]
k_l = tf.layers.dense(
input_tensor,
num_attention_heads * size_per_head,
activation=None,
name="key_position",
kernel_initializer=create_initializer(initializer_range), reuse=True)
q_l = transpose_for_scores(q_l, 1, num_attention_heads, 1, size_per_head)
k_l = transpose_for_scores(k_l, 1, num_attention_heads, 1, size_per_head)
# Take the dot product between "query" and "key" to get the raw
# attention scores.
# `attention_scores` = [B, N, F, T]
attention_scores = tf.matmul(q_l, k_l, transpose_b=True)
attention_scores = tf.multiply(attention_scores, 1.0 / math.sqrt(float(size_per_head)))
return attention_scores
reset_theta_1 = f21(reset_theta_1_raw)
reset_theta_2 = f21(reset_theta_2_raw)
reset_theta_1 = tf.tile(reset_theta_1, [in_reset_shape[0], 1, 1, in_reset_shape[-1] - 1])
reset_theta_2 = tf.tile(reset_theta_2, [in_reset_shape[0], 1, in_reset_shape[-1], 1])
unreset_attention_scores = unreset_attention_scores[:, :, 1:, 1:]
# print(in_reset.shape) # (2, 4, 127, 127)
reset_attention_scores = tf.concat([reset_theta_1, unreset_attention_scores], axis=-2)
# print(in_reset.shape) # (2, 4, 128, 127)
reset_attention_scores = tf.concat([reset_theta_2, reset_attention_scores], axis=-1)
# print(in_reset.shape) # (2, 4, 128, 128)
return reset_attention_scores # , key_layer_rel
def gelu(x):
"""Gaussian Error Linear Unit.
This is a smoother version of the RELU.
Original paper: https://arxiv.org/abs/1606.08415
Args:
x: float Tensor to perform activation.
Returns:
`x` with the GELU activation applied.
"""
cdf = 0.5 * (1.0 + tf.tanh(
(np.sqrt(2 / np.pi) * (x + 0.044715 * tf.pow(x, 3)))))
return x * cdf
def dropout(input_tensor, dropout_prob):
"""Perform dropout.
Args:
input_tensor: float Tensor.
dropout_prob: Python float. The probability of dropping out a value (NOT of
*keeping* a dimension as in `tf.nn.dropout`).
Returns:
A version of `input_tensor` with dropout applied.
"""
if dropout_prob is None or dropout_prob == 0.0:
return input_tensor
output = tf.nn.dropout(input_tensor, rate=dropout_prob)
return output
def layer_norm(input_tensor, name=None):
"""Run layer normalization on the last dimension of the tensor."""
if input_tensor.dtype == tf.float16:
try:
from fused_layer_norm import fused_layer_norm
return fused_layer_norm(
inputs=input_tensor, begin_norm_axis=-1, begin_params_axis=-1, scope=name,
use_fused_batch_norm=True)
except ImportError:
return tf.contrib.layers.layer_norm(
inputs=input_tensor, begin_norm_axis=-1, begin_params_axis=-1, scope=name)
else:
return tf.contrib.layers.layer_norm(
inputs=input_tensor, begin_norm_axis=-1, begin_params_axis=-1, scope=name)
def create_initializer(initializer_range=0.02):
"""Creates a `truncated_normal_initializer` with the given range."""
return tf.truncated_normal_initializer(stddev=initializer_range)
def get_shape_list(tensor, expected_rank=None, name=None):
"""Returns a list of the shape of tensor, preferring static dimensions.
Args:
tensor: A tf.Tensor object to find the shape of.
expected_rank: (optional) int. The expected rank of `tensor`. If this is
specified and the `tensor` has a different rank, and exception will be
thrown.
name: Optional name of the tensor for the error message.
Returns:
A list of dimensions of the shape of tensor. All static dimensions will
be returned as python integers, and dynamic dimensions will be returned
as tf.Tensor scalars.
"""
if name is None:
name = tensor.name
if expected_rank is not None:
assert_rank(tensor, expected_rank, name)
shape = tensor.shape.as_list()
non_static_indexes = []
for (index, dim) in enumerate(shape):
if dim is None:
non_static_indexes.append(index)
if not non_static_indexes:
return shape
dyn_shape = tf.shape(tensor)
for index in non_static_indexes:
shape[index] = dyn_shape[index]
return shape
def reshape_to_matrix(input_tensor):
"""Reshapes a >= rank 2 tensor to a rank 2 tensor (i.e., a matrix)."""
ndims = input_tensor.shape.ndims
if ndims < 2:
raise ValueError("Input tensor must have at least rank 2. Shape = %s" %
(input_tensor.shape))
if ndims == 2:
return input_tensor
width = input_tensor.shape[-1]
output_tensor = tf.reshape(input_tensor, [-1, width])
return output_tensor
def reshape_from_matrix(output_tensor, orig_shape_list):
"""Reshapes a rank 2 tensor back to its original rank >= 2 tensor."""
if len(orig_shape_list) == 2:
return output_tensor
output_shape = get_shape_list(output_tensor)
orig_dims = orig_shape_list[0:-1]
width = output_shape[-1]
return tf.reshape(output_tensor, orig_dims + [width])
The provided code snippet includes necessary dependencies for implementing the `transformer_model_preln` function. Write a Python function `def transformer_model_preln(input_tensor, compute_type, position_embeddings_ids=None, attention_mask=None, max_position_embeddings=None, hidden_size=768, embedding_size=128, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, intermediate_act_fn=gelu, hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, initializer_range=0.02, do_share_parameter_across_layers=False, do_return_all_layers=False, do_return_attention_maps=False,)` to solve the following problem:
Transformer model from "On layer normalization in the transformer architecture". See the original paper: https://openreview.net/pdf?id=B1x8anVFPr Args: input_tensor: float Tensor of shape [batch_size, seq_length, hidden_size]. attention_mask: (optional) int32 Tensor of shape [batch_size, seq_length, seq_length], with 1 for positions that can be attended to and 0 in positions that should not be. hidden_size: int. Hidden size of the Transformer. num_hidden_layers: int. Number of layers (blocks) in the Transformer. num_attention_heads: int. Number of attention heads in the Transformer. intermediate_size: int. The size of the "intermediate" (a.k.a., feed forward) layer. intermediate_act_fn: function. The non-linear activation function to apply to the output of the intermediate/feed-forward layer. hidden_dropout_prob: float. Dropout probability for the hidden layers. attention_probs_dropout_prob: float. Dropout probability of the attention probabilities. initializer_range: float. Range of the initializer (stddev of truncated normal). do_share_parameter_across_layers: do_return_all_layers: Whether to also return all layers or just the final layer. do_return_attention_maps: Whether to also return attention map of all layers. Returns: float Tensor of shape [batch_size, seq_length, hidden_size], the final hidden layer of the Transformer. Raises: ValueError: A Tensor shape or parameter is invalid.
Here is the function:
def transformer_model_preln(input_tensor,
compute_type,
position_embeddings_ids=None,
attention_mask=None,
max_position_embeddings=None,
hidden_size=768,
embedding_size=128,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
intermediate_act_fn=gelu,
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
initializer_range=0.02,
do_share_parameter_across_layers=False,
do_return_all_layers=False,
do_return_attention_maps=False,):
"""Transformer model from "On layer normalization in the transformer architecture".
See the original paper:
https://openreview.net/pdf?id=B1x8anVFPr
Args:
input_tensor: float Tensor of shape [batch_size, seq_length, hidden_size].
attention_mask: (optional) int32 Tensor of shape [batch_size, seq_length,
seq_length], with 1 for positions that can be attended to and 0 in
positions that should not be.
hidden_size: int. Hidden size of the Transformer.
num_hidden_layers: int. Number of layers (blocks) in the Transformer.
num_attention_heads: int. Number of attention heads in the Transformer.
intermediate_size: int. The size of the "intermediate" (a.k.a., feed
forward) layer.
intermediate_act_fn: function. The non-linear activation function to apply
to the output of the intermediate/feed-forward layer.
hidden_dropout_prob: float. Dropout probability for the hidden layers.
attention_probs_dropout_prob: float. Dropout probability of the attention
probabilities.
initializer_range: float. Range of the initializer (stddev of truncated
normal).
do_share_parameter_across_layers:
do_return_all_layers: Whether to also return all layers or just the final
layer.
do_return_attention_maps: Whether to also return attention map of all layers.
Returns:
float Tensor of shape [batch_size, seq_length, hidden_size], the final
hidden layer of the Transformer.
Raises:
ValueError: A Tensor shape or parameter is invalid.
"""
if hidden_size % num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (hidden_size, num_attention_heads))
attention_head_size = int(hidden_size / num_attention_heads)
input_shape = get_shape_list(input_tensor, expected_rank=3)
batch_size = input_shape[0]
seq_length = input_shape[1]
input_width = input_shape[2]
reset_attention_scores = compute_reset_attention_scores(position_embeddings_ids,
max_position_embeddings, embedding_size,
num_attention_heads=num_attention_heads,
size_per_head=attention_head_size,
initializer_range=initializer_range)
# The Transformer performs sum residuals on all layers so the input needs
# to be the same as the hidden size.
if input_width != hidden_size:
raise ValueError("The width of the input tensor (%d) != hidden size (%d)" %
(input_width, hidden_size))
# We keep the representation as a 2D tensor to avoid re-shaping it back and
# forth from a 3D tensor to a 2D tensor. Re-shapes are normally free on
# the GPU/CPU but may not be free on the TPU, so we want to minimize them to
# help the optimizer.
prev_output = reshape_to_matrix(input_tensor)
all_layer_outputs = []
all_layer_attention_maps = []
for layer_idx in range(num_hidden_layers):
if do_share_parameter_across_layers:
name_variable_scope = "layer_shared"
else:
name_variable_scope = "layer_%d" % layer_idx
with tf.compat.v1.variable_scope(name_variable_scope,
reuse=do_share_parameter_across_layers and layer_idx > 0):
layer_input = prev_output
with tf.compat.v1.variable_scope("attention"):
attention_heads = []
with tf.compat.v1.variable_scope("self"):
layer_input = layer_norm(layer_input)
payload = attention_layer_with_reset_attention_scores(
from_tensor=layer_input,
to_tensor=layer_input,
reset_attention_scores=tf.saturate_cast(reset_attention_scores, compute_type),
attention_mask=attention_mask,
num_attention_heads=num_attention_heads,
size_per_head=attention_head_size,
attention_probs_dropout_prob=attention_probs_dropout_prob,
initializer_range=initializer_range,
do_return_2d_tensor=True,
batch_size=batch_size,
from_seq_length=seq_length,
to_seq_length=seq_length,
do_return_attention_maps=do_return_attention_maps,)
if do_return_attention_maps:
attention_head, attention_prob = payload
attention_heads.append(attention_head)
all_layer_attention_maps.append(attention_prob)
else:
attention_head = payload
attention_heads.append(attention_head)
attention_output = None
if len(attention_heads) == 1:
attention_output = attention_heads[0]
else:
# In the case where we have other sequences, we just concatenate
# them to the self-attention head before the projection.
attention_output = tf.concat(attention_heads, axis=-1)
# Run a linear projection of `hidden_size` then add a residual
# with `layer_input`.
with tf.compat.v1.variable_scope("output"):
attention_output = tf.layers.dense(
attention_output,
hidden_size,
kernel_initializer=create_initializer(initializer_range))
attention_output = dropout(attention_output, hidden_dropout_prob)
attention_output = attention_output + layer_input
# The activation is only applied to the "intermediate" hidden layer.
with tf.compat.v1.variable_scope("intermediate"):
attention_output = layer_norm(attention_output)
intermediate_output = tf.layers.dense(
attention_output,
intermediate_size,
activation=intermediate_act_fn,
kernel_initializer=create_initializer(initializer_range))
# Down-project back to `hidden_size` then add the residual.
with tf.compat.v1.variable_scope("output"):
layer_output = tf.layers.dense(
intermediate_output,
hidden_size,
kernel_initializer=create_initializer(initializer_range))
layer_output = dropout(layer_output, hidden_dropout_prob)
layer_output = layer_output + attention_output
prev_output = layer_output
all_layer_outputs.append(layer_output)
if do_return_all_layers:
final_outputs = []
for layer_output in all_layer_outputs:
final_output = reshape_from_matrix(layer_output, input_shape)
final_outputs.append(final_output)
if do_return_attention_maps:
return final_outputs, all_layer_attention_maps
return final_outputs
else:
final_output = reshape_from_matrix(prev_output, input_shape)
return final_output | Transformer model from "On layer normalization in the transformer architecture". See the original paper: https://openreview.net/pdf?id=B1x8anVFPr Args: input_tensor: float Tensor of shape [batch_size, seq_length, hidden_size]. attention_mask: (optional) int32 Tensor of shape [batch_size, seq_length, seq_length], with 1 for positions that can be attended to and 0 in positions that should not be. hidden_size: int. Hidden size of the Transformer. num_hidden_layers: int. Number of layers (blocks) in the Transformer. num_attention_heads: int. Number of attention heads in the Transformer. intermediate_size: int. The size of the "intermediate" (a.k.a., feed forward) layer. intermediate_act_fn: function. The non-linear activation function to apply to the output of the intermediate/feed-forward layer. hidden_dropout_prob: float. Dropout probability for the hidden layers. attention_probs_dropout_prob: float. Dropout probability of the attention probabilities. initializer_range: float. Range of the initializer (stddev of truncated normal). do_share_parameter_across_layers: do_return_all_layers: Whether to also return all layers or just the final layer. do_return_attention_maps: Whether to also return attention map of all layers. Returns: float Tensor of shape [batch_size, seq_length, hidden_size], the final hidden layer of the Transformer. Raises: ValueError: A Tensor shape or parameter is invalid. |
18,900 | import tensorflow as tf
def float32_variable_storage_getter(getter, name, shape=None, dtype=None,
initializer=None, regularizer=None,
trainable=True,
*args, **kwargs):
def get_custom_getter(compute_type):
return float32_variable_storage_getter if compute_type == tf.float16 else None | null |
18,901 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
import modeling
import modeling_labert
import optimization
import tensorflow as tf
import shutil
import random
tf.flags = tf.compat.v1.flags
FLAGS = flags.FLAGS
tf.flags.DEFINE_string(
"tpu_name", None,
"The Cloud TPU to use for training. This should be either the name "
"used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 "
"url.")
tf.flags.DEFINE_string(
"tpu_zone", None,
"[Optional] GCE zone where the Cloud TPU is located in. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
tf.flags.DEFINE_string(
"gcp_project", None,
"[Optional] Project name for the Cloud TPU-enabled project. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
tf.flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.")
def get_masked_lm_output(labert_config, input_tensor, output_weights, positions,
label_ids, label_weights):
"""Get loss and log probs for the masked LM."""
input_tensor = gather_indexes(input_tensor, positions)
with tf.compat.v1.variable_scope("cls/predictions"):
# We apply one more non-linear transformation before the output layer.
# This matrix is not used after pre-training.
with tf.compat.v1.variable_scope("transform"):
input_tensor = tf.layers.dense(
input_tensor,
units=labert_config.embedding_size,
activation=modeling.get_activation(labert_config.hidden_act),
kernel_initializer=modeling.create_initializer(
labert_config.initializer_range))
input_tensor = modeling.layer_norm(input_tensor)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
output_bias = tf.compat.v1.get_variable(
"output_bias",
shape=[labert_config.vocab_size],
initializer=tf.zeros_initializer())
logits = tf.matmul(tf.cast(input_tensor, tf.float32), output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
log_probs = tf.nn.log_softmax(logits, axis=-1)
label_ids = tf.reshape(label_ids, [-1])
label_weights = tf.reshape(label_weights, [-1])
one_hot_labels = tf.one_hot(
label_ids, depth=labert_config.vocab_size, dtype=tf.float32)
# The `positions` tensor might be zero-padded (if the sequence is too
# short to have the maximum number of predictions). The `label_weights`
# tensor has a value of 1.0 for every real prediction and 0.0 for the
# padding predictions.
per_example_loss = -tf.reduce_sum(log_probs * one_hot_labels, axis=[-1])
numerator = tf.reduce_sum(label_weights * per_example_loss)
denominator = tf.reduce_sum(label_weights) + 1e-5
loss = numerator / denominator
return (loss, per_example_loss, log_probs)
def get_next_sentence_output(bert_config, input_tensor, labels, num_classes):
"""Get loss and log probs for the next sentence prediction."""
# Simple binary classification. Note that 0 is "next sentence" and 1 is
# "random sentence". This weight matrix is not used after pre-training.
with tf.compat.v1.variable_scope("cls/seq_relationship"):
output_weights = tf.compat.v1.get_variable(
"output_weights",
shape=[num_classes, bert_config.hidden_size],
initializer=modeling.create_initializer(bert_config.initializer_range))
output_bias = tf.compat.v1.get_variable(
"output_bias", shape=[num_classes], initializer=tf.zeros_initializer())
logits = tf.matmul(tf.cast(input_tensor, tf.float32), output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
log_probs = tf.nn.log_softmax(logits, axis=-1)
labels = tf.reshape(labels, [-1])
one_hot_labels = tf.one_hot(labels, depth=num_classes, dtype=tf.float32)
per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)
loss = tf.reduce_mean(per_example_loss)
return (loss, per_example_loss, log_probs)
The provided code snippet includes necessary dependencies for implementing the `model_fn_builder` function. Write a Python function `def model_fn_builder(bert_config, init_checkpoint, learning_rate, beta1, beta2, epsilon, num_decay_steps, num_warmup_steps, num_accumulate_steps, use_horovod, use_fp16, use_one_hot_embeddings, do_return_all_attention_maps)` to solve the following problem:
Returns `model_fn` closure for TPUEstimator.
Here is the function:
def model_fn_builder(bert_config, init_checkpoint, learning_rate, beta1, beta2, epsilon,
num_decay_steps, num_warmup_steps, num_accumulate_steps, use_horovod, use_fp16,
use_one_hot_embeddings, do_return_all_attention_maps):
"""Returns `model_fn` closure for TPUEstimator."""
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
"""The `model_fn` for TPUEstimator."""
if not use_horovod or hvd.rank() == 0:
tf.compat.v1.logging.info("*** Features ***")
for name in sorted(features.keys()):
tf.compat.v1.logging.info(" name = %s, shape = %s" % (name, features[name].shape))
input_ids = features["input_ids"]
input_mask = features["input_mask"]
segment_ids = features["segment_ids"]
masked_lm_positions = features["masked_lm_positions"]
masked_lm_ids = features["masked_lm_ids"]
masked_lm_weights = features["masked_lm_weights"]
next_sentence_labels = features["next_sentence_labels"]
positional_embeddings_start = features['positional_embeddings_start']
positional_embeddings_end = features['positional_embeddings_end']
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
model = modeling_labert.LaBertModel(
config=bert_config,
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask,
token_type_ids=segment_ids,
start_positions=positional_embeddings_start,
end_positions=positional_embeddings_end,
use_one_hot_embeddings=use_one_hot_embeddings,
transformer_model_type=FLAGS.transformer_model_type,
do_share_parameter_across_layers=FLAGS.do_share_parameter_across_layers,
compute_type=tf.float16 if use_fp16 else tf.float32,
do_return_all_attention_maps=do_return_all_attention_maps,)
(masked_lm_loss,
masked_lm_example_loss, masked_lm_log_probs) = get_masked_lm_output(
bert_config, model.get_sequence_output(), model.get_embedding_table(),
masked_lm_positions, masked_lm_ids, masked_lm_weights)
(next_sentence_loss, next_sentence_example_loss,
next_sentence_log_probs) = get_next_sentence_output(
bert_config, model.get_pooled_output(), next_sentence_labels, FLAGS.next_sentence_type)
if do_return_all_attention_maps:
for i in range(19):
print('$$$start$$$')
attention_maps = model.get_all_encoder_attention_maps()
print(type(attention_maps), len(attention_maps))
for t in attention_maps:
print(t.shape)
masked_lm_loss = tf.identity(masked_lm_loss, name="masked_lm_loss")
next_sentence_loss = tf.identity(next_sentence_loss, name="next_sentence_loss")
total_loss = masked_lm_loss + next_sentence_loss
total_loss = tf.identity(total_loss, name='total_loss')
tvars = tf.compat.v1.trainable_variables()
initialized_variable_names = {}
if init_checkpoint and (not use_horovod or hvd.rank() == 0):
(assignment_map, initialized_variable_names
) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)
tf.compat.v1.train.init_from_checkpoint(init_checkpoint, assignment_map)
if not use_horovod or hvd.rank() == 0:
tf.compat.v1.logging.info("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
tf.compat.v1.logging.info(" name = %s, shape = %s%s", var.name, var.shape, init_string)
output_spec = None
if mode == tf.estimator.ModeKeys.TRAIN:
train_op = optimization.create_optimizer(
total_loss, learning_rate, beta1, beta2, epsilon, num_decay_steps, num_warmup_steps,
hvd if use_horovod else None, use_fp16, num_accumulate_steps,
FLAGS.optimizer_type, FLAGS.allreduce_post_accumulation, ignore_pooler=FLAGS.ignore_pooler)
output_spec = tf.estimator.EstimatorSpec(
mode=mode,
loss=total_loss,
train_op=train_op)
elif mode == tf.estimator.ModeKeys.EVAL:
def metric_fn(masked_lm_example_loss, masked_lm_log_probs, masked_lm_ids,
masked_lm_weights, next_sentence_example_loss,
next_sentence_log_probs, next_sentence_labels):
"""Computes the loss and accuracy of the model."""
masked_lm_log_probs = tf.reshape(masked_lm_log_probs,
[-1, masked_lm_log_probs.shape[-1]])
masked_lm_predictions = tf.argmax(
masked_lm_log_probs, axis=-1, output_type=tf.int32)
masked_lm_example_loss = tf.reshape(masked_lm_example_loss, [-1])
masked_lm_ids = tf.reshape(masked_lm_ids, [-1])
masked_lm_weights = tf.reshape(masked_lm_weights, [-1])
masked_lm_accuracy = tf.compat.v1.metrics.accuracy(
labels=masked_lm_ids,
predictions=masked_lm_predictions,
weights=masked_lm_weights)
masked_lm_mean_loss = tf.compat.v1.metrics.mean(
values=masked_lm_example_loss, weights=masked_lm_weights)
next_sentence_log_probs = tf.reshape(
next_sentence_log_probs, [-1, next_sentence_log_probs.shape[-1]])
next_sentence_predictions = tf.argmax(
next_sentence_log_probs, axis=-1, output_type=tf.int32)
next_sentence_labels = tf.reshape(next_sentence_labels, [-1])
next_sentence_accuracy = tf.compat.v1.metrics.accuracy(
labels=next_sentence_labels, predictions=next_sentence_predictions)
next_sentence_mean_loss = tf.compat.v1.metrics.mean(
values=next_sentence_example_loss)
return {
"masked_lm_accuracy": masked_lm_accuracy,
"masked_lm_loss": masked_lm_mean_loss,
"next_sentence_accuracy": next_sentence_accuracy,
"next_sentence_loss": next_sentence_mean_loss,
}
eval_metrics = metric_fn(
masked_lm_example_loss, masked_lm_log_probs, masked_lm_ids,
masked_lm_weights, next_sentence_example_loss,
next_sentence_log_probs, next_sentence_labels)
output_spec = tf.estimator.EstimatorSpec(
mode=mode,
loss=total_loss,
eval_metric_ops=eval_metrics)
else:
raise ValueError("Only TRAIN and EVAL modes are supported: %s" % (mode))
return output_spec
return model_fn | Returns `model_fn` closure for TPUEstimator. |
18,902 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
import modeling
import modeling_labert
import optimization
import tensorflow as tf
import shutil
import random
tf.flags = tf.compat.v1.flags
FLAGS = flags.FLAGS
tf.flags.DEFINE_string(
"tpu_name", None,
"The Cloud TPU to use for training. This should be either the name "
"used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 "
"url.")
tf.flags.DEFINE_string(
"tpu_zone", None,
"[Optional] GCE zone where the Cloud TPU is located in. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
tf.flags.DEFINE_string(
"gcp_project", None,
"[Optional] Project name for the Cloud TPU-enabled project. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
tf.flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.")
def _decode_record(record, name_to_features):
"""Decodes a record to a TensorFlow example."""
example = tf.io.parse_single_example(record, name_to_features)
# tf.Example only supports tf.int64, but the TPU only supports tf.int32.
# So cast all int64 to int32.
for name in list(example.keys()):
t = example[name]
if t.dtype == tf.int64:
t = tf.cast(t, tf.int32)
example[name] = t
return example
The provided code snippet includes necessary dependencies for implementing the `input_fn_builder` function. Write a Python function `def input_fn_builder(input_files, max_seq_length, max_predictions_per_seq, is_training, num_cpu_threads=4)` to solve the following problem:
Creates an `input_fn` closure to be passed to TPUEstimator.
Here is the function:
def input_fn_builder(input_files,
max_seq_length,
max_predictions_per_seq,
is_training,
num_cpu_threads=4):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
def input_fn(params):
"""The actual input function."""
batch_size = FLAGS.train_batch_size if is_training else FLAGS.eval_batch_size
name_to_features = {
"input_ids":
tf.io.FixedLenFeature([max_seq_length], tf.int64),
"input_mask":
tf.io.FixedLenFeature([max_seq_length], tf.int64),
"segment_ids":
tf.io.FixedLenFeature([max_seq_length], tf.int64),
"positional_embeddings_start":
tf.io.FixedLenFeature([max_seq_length], tf.int64),
"positional_embeddings_end":
tf.io.FixedLenFeature([max_seq_length], tf.int64),
"masked_lm_positions":
tf.io.FixedLenFeature([max_predictions_per_seq], tf.int64),
"masked_lm_ids":
tf.io.FixedLenFeature([max_predictions_per_seq], tf.int64),
"masked_lm_weights":
tf.io.FixedLenFeature([max_predictions_per_seq], tf.float32),
"next_sentence_labels":
tf.io.FixedLenFeature([1], tf.int64),
}
if FLAGS.adopt_mask_matrix:
name_to_features['visibility_matrix'] = tf.io.SparseFeature(
index_key=['visibility_matrix_i', 'visibility_matrix_j'],
value_key='visibility_matrix_values',
dtype=tf.int64, size=[max_seq_length, max_seq_length])
# For training, we want a lot of parallel reading and shuffling.
# For eval, we want no shuffling and parallel reading doesn't matter.
if is_training:
d = tf.data.Dataset.from_tensor_slices(tf.constant(input_files))
if FLAGS.use_horovod:
d = d.shard(hvd.size(), hvd.rank())
d = d.repeat()
d = d.shuffle(buffer_size=len(input_files))
# `cycle_length` is the number of parallel files that get read.
cycle_length = min(num_cpu_threads, len(input_files))
# `sloppy` mode means that the interleaving is not exact. This adds
# even more randomness to the training pipeline.
d = d.apply(
tf.contrib.data.parallel_interleave(
tf.data.TFRecordDataset,
sloppy=is_training,
cycle_length=cycle_length))
d = d.shuffle(buffer_size=100)
else:
d = tf.data.TFRecordDataset(input_files)
# Since we evaluate for a fixed number of steps we don't want to encounter
# out-of-range exceptions.
d = d.repeat()
# We must `drop_remainder` on training because the TPU requires fixed
# size dimensions. For eval, we assume we are evaluating on the CPU or GPU
# and we *don't* want to drop the remainder, otherwise we wont cover
# every sample.
d = d.apply(
tf.contrib.data.map_and_batch(
lambda record: _decode_record(record, name_to_features),
batch_size=batch_size,
num_parallel_batches=num_cpu_threads,
drop_remainder=True))
return d
return input_fn | Creates an `input_fn` closure to be passed to TPUEstimator. |
18,903 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import re
import unicodedata
import six
import tensorflow as tf
The provided code snippet includes necessary dependencies for implementing the `validate_case_matches_checkpoint` function. Write a Python function `def validate_case_matches_checkpoint(do_lower_case, init_checkpoint)` to solve the following problem:
Checks whether the casing config is consistent with the checkpoint name.
Here is the function:
def validate_case_matches_checkpoint(do_lower_case, init_checkpoint):
"""Checks whether the casing config is consistent with the checkpoint name."""
# The casing has to be passed in by the user and there is no explicit check
# as to whether it matches the checkpoint. The casing information probably
# should have been stored in the bert_config.json file, but it's not, so
# we have to heuristically detect it to validate.
if not init_checkpoint:
return
m = re.match("^.*?([A-Za-z0-9_-]+)/bert_model.ckpt", init_checkpoint)
if m is None:
return
model_name = m.group(1)
lower_models = [
"uncased_L-24_H-1024_A-16", "uncased_L-12_H-768_A-12",
"multilingual_L-12_H-768_A-12", "chinese_L-12_H-768_A-12"
]
cased_models = [
"cased_L-12_H-768_A-12", "cased_L-24_H-1024_A-16",
"multi_cased_L-12_H-768_A-12"
]
is_bad_config = False
if model_name in lower_models and not do_lower_case:
is_bad_config = True
actual_flag = "False"
case_name = "lowercased"
opposite_flag = "True"
if model_name in cased_models and do_lower_case:
is_bad_config = True
actual_flag = "True"
case_name = "cased"
opposite_flag = "False"
if is_bad_config:
raise ValueError(
"You passed in `--do_lower_case=%s` with `--init_checkpoint=%s`. "
"However, `%s` seems to be a %s model, so you "
"should pass in `--do_lower_case=%s` so that the fine-tuning matches "
"how the model was pre-training. If this error is wrong, please "
"just comment out this check." % (actual_flag, init_checkpoint,
model_name, case_name, opposite_flag)) | Checks whether the casing config is consistent with the checkpoint name. |
18,904 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import re
import unicodedata
import six
import tensorflow as tf
def convert_to_unicode(text):
"""Converts `text` to Unicode (if it's not already), assuming utf-8 input."""
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
return text.decode("utf-8", "ignore")
elif isinstance(text, unicode):
return text
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?")
The provided code snippet includes necessary dependencies for implementing the `load_vocab` function. Write a Python function `def load_vocab(vocab_file)` to solve the following problem:
Loads a vocabulary file into a dictionary.
Here is the function:
def load_vocab(vocab_file):
"""Loads a vocabulary file into a dictionary."""
vocab = collections.OrderedDict()
index = 0
with tf.gfile.GFile(vocab_file, "r") as reader:
while True:
token = convert_to_unicode(reader.readline())
if not token:
break
token = token.strip()
vocab[token] = index
index += 1
return vocab | Loads a vocabulary file into a dictionary. |
18,905 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import re
import unicodedata
import six
import tensorflow as tf
def convert_by_vocab(vocab, items):
"""Converts a sequence of [tokens|ids] using the vocab."""
output = []
for item in items:
output.append(vocab[item])
return output
def convert_ids_to_tokens(inv_vocab, ids):
return convert_by_vocab(inv_vocab, ids) | null |
18,906 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import re
import unicodedata
import six
import tensorflow as tf
The provided code snippet includes necessary dependencies for implementing the `whitespace_tokenize` function. Write a Python function `def whitespace_tokenize(text)` to solve the following problem:
Runs basic whitespace cleaning and splitting on a piece of text.
Here is the function:
def whitespace_tokenize(text):
"""Runs basic whitespace cleaning and splitting on a piece of text."""
text = text.strip()
if not text:
return []
tokens = text.split()
return tokens | Runs basic whitespace cleaning and splitting on a piece of text. |
18,907 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import re
import unicodedata
import six
import tensorflow as tf
The provided code snippet includes necessary dependencies for implementing the `_is_whitespace` function. Write a Python function `def _is_whitespace(char)` to solve the following problem:
Checks whether `chars` is a whitespace character.
Here is the function:
def _is_whitespace(char):
"""Checks whether `chars` is a whitespace character."""
# \t, \n, and \r are technically contorl characters but we treat them
# as whitespace since they are generally considered as such.
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
cat = unicodedata.category(char)
if cat == "Zs":
return True
return False | Checks whether `chars` is a whitespace character. |
18,908 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import re
import unicodedata
import six
import tensorflow as tf
The provided code snippet includes necessary dependencies for implementing the `_is_control` function. Write a Python function `def _is_control(char)` to solve the following problem:
Checks whether `chars` is a control character.
Here is the function:
def _is_control(char):
"""Checks whether `chars` is a control character."""
# These are technically control characters but we count them as whitespace
# characters.
if char == "\t" or char == "\n" or char == "\r":
return False
cat = unicodedata.category(char)
if cat in ("Cc", "Cf"):
return True
return False | Checks whether `chars` is a control character. |
18,909 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import re
import unicodedata
import six
import tensorflow as tf
The provided code snippet includes necessary dependencies for implementing the `_is_punctuation` function. Write a Python function `def _is_punctuation(char)` to solve the following problem:
Checks whether `chars` is a punctuation character.
Here is the function:
def _is_punctuation(char):
"""Checks whether `chars` is a punctuation character."""
cp = ord(char)
# We treat all non-letter/number ASCII as punctuation.
# Characters such as "^", "$", and "`" are not in the Unicode
# Punctuation class but we treat them as punctuation anyways, for
# consistency.
if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or
(cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):
return True
cat = unicodedata.category(char)
if cat.startswith("P"):
return True
return False | Checks whether `chars` is a punctuation character. |
18,910 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import tokenization
import tokenization_labert
from tokenization_labert import LatticeEncoding
import tensorflow as tf
import numpy as np
from create_pretraining_data_utils import (
write_lattice_instances_to_example_files,
write_lattice_instance_to_example_file,
TrainingInstance,
MaskedLmInstance)
FLAGS = flags.FLAGS
def create_training_instances(input_files, tokenizer, max_seq_length,
dupe_factor, short_seq_prob, masked_lm_prob,
max_predictions_per_seq, next_sentence_type, rng,):
"""Create `TrainingInstance`s from raw text."""
assert next_sentence_type in (2, 3), "next_sentence_type support 2 or 3 only"
all_documents = [[]]
# Input file format:
# (1) One sentence per line. These should ideally be actual sentences, not
# entire paragraphs or arbitrary spans of text. (Because we use the
# sentence boundaries for the "next sentence prediction" task).
# (2) Blank lines between documents. Document boundaries are needed so
# that the "next sentence prediction" task doesn't span between documents.
for input_file in input_files:
with tf.io.gfile.GFile(input_file, "r") as reader:
while True:
line = tokenization.convert_to_unicode(reader.readline())
if not line:
break
line = line.strip()
# Empty lines are used as document delimiters
if not line:
all_documents.append([])
lattice_encoding = tokenization_labert.LatticeEncoding()
for span in line.split():
new_lattice_encoding = tokenizer.tokenize(span, add_candidate_indices=True)
lattice_encoding.extend(new_lattice_encoding)
if len(lattice_encoding.tokens) > 0:
all_documents[-1].append(lattice_encoding)
# Remove empty documents
all_documents = [x for x in all_documents if x]
rng.shuffle(all_documents)
tf.compat.v1.logging.info(f'Finished load {len(all_documents)} documents.')
vocab_words = list(tokenizer.vocab.keys())
for _ in range(dupe_factor):
for document_index in range(len(all_documents)):
for instance in create_instances_from_document(all_documents, document_index,
max_seq_length, short_seq_prob,
masked_lm_prob, max_predictions_per_seq, next_sentence_type,
vocab_words, rng, tokenizer):
yield instance
def write_lattice_instance_to_example_file(
instance, tokenizer, writer, max_seq_length,
max_predictions_per_seq,
position_embedding_names=('start', 'end'), do_dump_example=False):
"""Create TF example files from `TrainingInstance`s."""
input_ids = tokenizer.convert_tokens_to_ids(instance.encodings.tokens)
positional_embeddings = instance.encodings.position_embedding(position_embedding_names)
for positional_embedding in positional_embeddings:
for i in positional_embedding:
assert i >= 0, f"{instance.encodings.tokens}"
input_mask = [1] * len(input_ids)
segment_ids = list(instance.segment_ids)
assert len(input_ids) <= max_seq_length
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
for t in positional_embeddings:
t.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
for positional_embedding in positional_embeddings:
assert len(positional_embedding) == max_seq_length
masked_lm_positions = list(instance.masked_lm_positions)
masked_lm_ids = tokenizer.convert_tokens_to_ids(instance.masked_lm_labels)
masked_lm_weights = [1.0] * len(masked_lm_ids)
while len(masked_lm_positions) < max_predictions_per_seq:
masked_lm_positions.append(0)
masked_lm_ids.append(0)
masked_lm_weights.append(0.0)
next_sentence_label = instance.next_sentence_label
features = collections.OrderedDict()
features["input_ids"] = create_int_feature(input_ids)
features["input_mask"] = create_int_feature(input_mask)
features["segment_ids"] = create_int_feature(segment_ids)
for name, positional_embedding in zip(position_embedding_names, positional_embeddings):
features[f"positional_embeddings_{name}"] = create_int_feature(positional_embedding)
features["masked_lm_positions"] = create_int_feature(masked_lm_positions)
features["masked_lm_ids"] = create_int_feature(masked_lm_ids)
features["masked_lm_weights"] = create_float_feature(masked_lm_weights)
features["next_sentence_labels"] = create_int_feature([next_sentence_label])
assert all([len(t) == len(input_ids) for t in positional_embeddings])
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
writer.write(tf_example.SerializeToString())
if do_dump_example:
tf.compat.v1.logging.info("*** Example ***")
tf.compat.v1.logging.info("tokens: %s" % " ".join(
[tokenization.printable_text(x) for x in instance.encodings.tokens]))
for feature_name in features.keys():
feature = features[feature_name]
values = []
if feature.int64_list.value:
values = feature.int64_list.value
elif feature.float_list.value:
values = feature.float_list.value
tf.compat.v1.logging.info(
"%s: %s" % (feature_name, " ".join([str(x) for x in values])))
def main_func(_):
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO)
if FLAGS.use_named_lexicon:
tokenizer = tokenization_labert.LatticeTokenizerWithMapping(
vocab_file=FLAGS.vocab_file,
lexicon_file=FLAGS.lexicon_file,
do_lower_case=FLAGS.do_lower_case)
else:
tokenizer = tokenization_labert.LatticeTokenizer(
vocab_file=FLAGS.vocab_file,
lexicon_file=FLAGS.lexicon_file,
do_lower_case=FLAGS.do_lower_case)
input_files = []
for input_pattern in FLAGS.input_file.split(","):
input_files.extend(tf.io.gfile.glob(input_pattern))
tf.compat.v1.logging.info("*** Reading from input files ***")
for input_file in input_files:
tf.compat.v1.logging.info(" %s", input_file)
np.random.seed(FLAGS.random_seed)
rng = random.Random(FLAGS.random_seed)
total_written = 0
tf.compat.v1.logging.info(f"*** Writing to output {FLAGS.output_file} ***")
writer = tf.io.TFRecordWriter(FLAGS.output_file)
for inst_index, instance in enumerate(
create_training_instances(
input_files, tokenizer, FLAGS.max_seq_length, FLAGS.dupe_factor,
FLAGS.short_seq_prob, FLAGS.masked_lm_prob, FLAGS.max_predictions_per_seq,
FLAGS.next_sentence_type, rng,)):
write_lattice_instance_to_example_file(
instance, tokenizer, writer,
FLAGS.max_seq_length, FLAGS.max_predictions_per_seq,
position_embedding_names=('start', 'end'),
do_dump_example=inst_index < 20)
total_written += 1
writer.close()
tf.compat.v1.logging.info("Wrote %d total instances", total_written) | null |
18,911 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import json
import math
import re
import numpy as np
import six
import tensorflow as tf
from gpu_environment import get_custom_getter
def create_initializer(initializer_range=0.02):
"""Creates a `truncated_normal_initializer` with the given range."""
return tf.truncated_normal_initializer(stddev=initializer_range)
def get_shape_list(tensor, expected_rank=None, name=None):
"""Returns a list of the shape of tensor, preferring static dimensions.
Args:
tensor: A tf.Tensor object to find the shape of.
expected_rank: (optional) int. The expected rank of `tensor`. If this is
specified and the `tensor` has a different rank, and exception will be
thrown.
name: Optional name of the tensor for the error message.
Returns:
A list of dimensions of the shape of tensor. All static dimensions will
be returned as python integers, and dynamic dimensions will be returned
as tf.Tensor scalars.
"""
if name is None:
name = tensor.name
if expected_rank is not None:
assert_rank(tensor, expected_rank, name)
shape = tensor.shape.as_list()
non_static_indexes = []
for (index, dim) in enumerate(shape):
if dim is None:
non_static_indexes.append(index)
if not non_static_indexes:
return shape
dyn_shape = tf.shape(tensor)
for index in non_static_indexes:
shape[index] = dyn_shape[index]
return shape
The provided code snippet includes necessary dependencies for implementing the `embedding_lookup` function. Write a Python function `def embedding_lookup(input_ids, vocab_size, embedding_size=128, initializer_range=0.02, word_embedding_name="word_embeddings", use_one_hot_embeddings=False)` to solve the following problem:
Looks up words embeddings for id tensor. Args: input_ids: int32 Tensor of shape [batch_size, seq_length] containing word ids. vocab_size: int. Size of the embedding vocabulary. embedding_size: int. Width of the word embeddings. initializer_range: float. Embedding initialization range. word_embedding_name: string. Name of the embedding table. use_one_hot_embeddings: bool. If True, use one-hot method for word embeddings. If False, use `tf.gather()`. Returns: float Tensor of shape [batch_size, seq_length, embedding_size].
Here is the function:
def embedding_lookup(input_ids,
vocab_size,
embedding_size=128,
initializer_range=0.02,
word_embedding_name="word_embeddings",
use_one_hot_embeddings=False):
"""Looks up words embeddings for id tensor.
Args:
input_ids: int32 Tensor of shape [batch_size, seq_length] containing word
ids.
vocab_size: int. Size of the embedding vocabulary.
embedding_size: int. Width of the word embeddings.
initializer_range: float. Embedding initialization range.
word_embedding_name: string. Name of the embedding table.
use_one_hot_embeddings: bool. If True, use one-hot method for word
embeddings. If False, use `tf.gather()`.
Returns:
float Tensor of shape [batch_size, seq_length, embedding_size].
"""
# This function assumes that the input is of shape [batch_size, seq_length,
# num_inputs].
#
# If the input is a 2D tensor of shape [batch_size, seq_length], we
# reshape to [batch_size, seq_length, 1].
if input_ids.shape.ndims == 2:
input_ids = tf.expand_dims(input_ids, axis=[-1])
embedding_table = tf.compat.v1.get_variable(
name=word_embedding_name,
shape=[vocab_size, embedding_size],
initializer=create_initializer(initializer_range))
flat_input_ids = tf.reshape(input_ids, [-1])
if use_one_hot_embeddings:
one_hot_input_ids = tf.one_hot(flat_input_ids, depth=vocab_size)
output = tf.matmul(one_hot_input_ids, embedding_table)
else:
output = tf.gather(embedding_table, flat_input_ids)
input_shape = get_shape_list(input_ids)
output = tf.reshape(output,
input_shape[0:-1] + [input_shape[-1] * embedding_size])
return (output, embedding_table) | Looks up words embeddings for id tensor. Args: input_ids: int32 Tensor of shape [batch_size, seq_length] containing word ids. vocab_size: int. Size of the embedding vocabulary. embedding_size: int. Width of the word embeddings. initializer_range: float. Embedding initialization range. word_embedding_name: string. Name of the embedding table. use_one_hot_embeddings: bool. If True, use one-hot method for word embeddings. If False, use `tf.gather()`. Returns: float Tensor of shape [batch_size, seq_length, embedding_size]. |
18,912 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import json
import math
import re
import numpy as np
import six
import tensorflow as tf
from gpu_environment import get_custom_getter
def layer_norm_and_dropout(input_tensor, dropout_prob, name=None):
"""Runs layer normalization followed by dropout."""
output_tensor = layer_norm(input_tensor, name)
output_tensor = dropout(output_tensor, dropout_prob)
return output_tensor
def create_initializer(initializer_range=0.02):
"""Creates a `truncated_normal_initializer` with the given range."""
return tf.truncated_normal_initializer(stddev=initializer_range)
def get_shape_list(tensor, expected_rank=None, name=None):
"""Returns a list of the shape of tensor, preferring static dimensions.
Args:
tensor: A tf.Tensor object to find the shape of.
expected_rank: (optional) int. The expected rank of `tensor`. If this is
specified and the `tensor` has a different rank, and exception will be
thrown.
name: Optional name of the tensor for the error message.
Returns:
A list of dimensions of the shape of tensor. All static dimensions will
be returned as python integers, and dynamic dimensions will be returned
as tf.Tensor scalars.
"""
if name is None:
name = tensor.name
if expected_rank is not None:
assert_rank(tensor, expected_rank, name)
shape = tensor.shape.as_list()
non_static_indexes = []
for (index, dim) in enumerate(shape):
if dim is None:
non_static_indexes.append(index)
if not non_static_indexes:
return shape
dyn_shape = tf.shape(tensor)
for index in non_static_indexes:
shape[index] = dyn_shape[index]
return shape
The provided code snippet includes necessary dependencies for implementing the `embedding_postprocessor` function. Write a Python function `def embedding_postprocessor(input_tensor, use_token_type=False, token_type_ids=None, token_type_vocab_size=16, token_type_embedding_name="token_type_embeddings", use_position_embeddings=True, position_embedding_name="position_embeddings", initializer_range=0.02, max_position_embeddings=512, dropout_prob=0.1)` to solve the following problem:
Performs various post-processing on a word embedding tensor. Args: input_tensor: float Tensor of shape [batch_size, seq_length, embedding_size]. use_token_type: bool. Whether to add embeddings for `token_type_ids`. token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length]. Must be specified if `use_token_type` is True. token_type_vocab_size: int. The vocabulary size of `token_type_ids`. token_type_embedding_name: string. The name of the embedding table variable for token type ids. use_position_embeddings: bool. Whether to add position embeddings for the position of each token in the sequence. position_embedding_name: string. The name of the embedding table variable for positional embeddings. initializer_range: float. Range of the weight initialization. max_position_embeddings: int. Maximum sequence length that might ever be used with this model. This can be longer than the sequence length of input_tensor, but cannot be shorter. dropout_prob: float. Dropout probability applied to the final output tensor. Returns: float tensor with same shape as `input_tensor`. Raises: ValueError: One of the tensor shapes or input values is invalid.
Here is the function:
def embedding_postprocessor(input_tensor,
use_token_type=False,
token_type_ids=None,
token_type_vocab_size=16,
token_type_embedding_name="token_type_embeddings",
use_position_embeddings=True,
position_embedding_name="position_embeddings",
initializer_range=0.02,
max_position_embeddings=512,
dropout_prob=0.1):
"""Performs various post-processing on a word embedding tensor.
Args:
input_tensor: float Tensor of shape [batch_size, seq_length,
embedding_size].
use_token_type: bool. Whether to add embeddings for `token_type_ids`.
token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length].
Must be specified if `use_token_type` is True.
token_type_vocab_size: int. The vocabulary size of `token_type_ids`.
token_type_embedding_name: string. The name of the embedding table variable
for token type ids.
use_position_embeddings: bool. Whether to add position embeddings for the
position of each token in the sequence.
position_embedding_name: string. The name of the embedding table variable
for positional embeddings.
initializer_range: float. Range of the weight initialization.
max_position_embeddings: int. Maximum sequence length that might ever be
used with this model. This can be longer than the sequence length of
input_tensor, but cannot be shorter.
dropout_prob: float. Dropout probability applied to the final output tensor.
Returns:
float tensor with same shape as `input_tensor`.
Raises:
ValueError: One of the tensor shapes or input values is invalid.
"""
input_shape = get_shape_list(input_tensor, expected_rank=3)
batch_size = input_shape[0]
seq_length = input_shape[1]
width = input_shape[2]
output = input_tensor
if use_token_type:
if token_type_ids is None:
raise ValueError("`token_type_ids` must be specified if"
"`use_token_type` is True.")
token_type_table = tf.compat.v1.get_variable(
name=token_type_embedding_name,
shape=[token_type_vocab_size, width],
initializer=create_initializer(initializer_range))
# This vocab will be small so we always do one-hot here, since it is always
# faster for a small vocabulary.
flat_token_type_ids = tf.reshape(token_type_ids, [-1])
one_hot_ids = tf.one_hot(flat_token_type_ids, depth=token_type_vocab_size)
token_type_embeddings = tf.matmul(one_hot_ids, token_type_table)
token_type_embeddings = tf.reshape(token_type_embeddings,
[batch_size, seq_length, width])
output += token_type_embeddings
if use_position_embeddings:
assert_op = tf.compat.v1.assert_less_equal(seq_length, max_position_embeddings)
with tf.control_dependencies([assert_op]):
full_position_embeddings = tf.compat.v1.get_variable(
name=position_embedding_name,
shape=[max_position_embeddings, width],
initializer=create_initializer(initializer_range))
# Since the position embedding table is a learned variable, we create it
# using a (long) sequence length `max_position_embeddings`. The actual
# sequence length might be shorter than this, for faster training of
# tasks that do not have long sequences.
#
# So `full_position_embeddings` is effectively an embedding table
# for position [0, 1, 2, ..., max_position_embeddings-1], and the current
# sequence has positions [0, 1, 2, ... seq_length-1], so we can just
# perform a slice.
position_embeddings = tf.slice(full_position_embeddings, [0, 0],
[seq_length, -1])
num_dims = len(output.shape.as_list())
# Only the last two dimensions are relevant (`seq_length` and `width`), so
# we broadcast among the first dimensions, which is typically just
# the batch size.
position_broadcast_shape = []
for _ in range(num_dims - 2):
position_broadcast_shape.append(1)
position_broadcast_shape.extend([seq_length, width])
position_embeddings = tf.reshape(position_embeddings,
position_broadcast_shape)
output += position_embeddings
output = layer_norm_and_dropout(output, dropout_prob)
return output | Performs various post-processing on a word embedding tensor. Args: input_tensor: float Tensor of shape [batch_size, seq_length, embedding_size]. use_token_type: bool. Whether to add embeddings for `token_type_ids`. token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length]. Must be specified if `use_token_type` is True. token_type_vocab_size: int. The vocabulary size of `token_type_ids`. token_type_embedding_name: string. The name of the embedding table variable for token type ids. use_position_embeddings: bool. Whether to add position embeddings for the position of each token in the sequence. position_embedding_name: string. The name of the embedding table variable for positional embeddings. initializer_range: float. Range of the weight initialization. max_position_embeddings: int. Maximum sequence length that might ever be used with this model. This can be longer than the sequence length of input_tensor, but cannot be shorter. dropout_prob: float. Dropout probability applied to the final output tensor. Returns: float tensor with same shape as `input_tensor`. Raises: ValueError: One of the tensor shapes or input values is invalid. |
18,913 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import json
import math
import re
import numpy as np
import six
import tensorflow as tf
from gpu_environment import get_custom_getter
def gelu(x):
"""Gaussian Error Linear Unit.
This is a smoother version of the RELU.
Original paper: https://arxiv.org/abs/1606.08415
Args:
x: float Tensor to perform activation.
Returns:
`x` with the GELU activation applied.
"""
cdf = 0.5 * (1.0 + tf.tanh(
(np.sqrt(2 / np.pi) * (x + 0.044715 * tf.pow(x, 3)))))
return x * cdf
def dropout(input_tensor, dropout_prob):
"""Perform dropout.
Args:
input_tensor: float Tensor.
dropout_prob: Python float. The probability of dropping out a value (NOT of
*keeping* a dimension as in `tf.nn.dropout`).
Returns:
A version of `input_tensor` with dropout applied.
"""
if dropout_prob is None or dropout_prob == 0.0:
return input_tensor
output = tf.nn.dropout(input_tensor, rate=dropout_prob)
return output
def layer_norm(input_tensor, name=None):
"""Run layer normalization on the last dimension of the tensor."""
if input_tensor.dtype == tf.float16:
try:
from fused_layer_norm import fused_layer_norm
return fused_layer_norm(
inputs=input_tensor, begin_norm_axis=-1, begin_params_axis=-1, scope=name,
use_fused_batch_norm=True)
except ImportError:
return tf.contrib.layers.layer_norm(
inputs=input_tensor, begin_norm_axis=-1, begin_params_axis=-1, scope=name)
else:
return tf.contrib.layers.layer_norm(
inputs=input_tensor, begin_norm_axis=-1, begin_params_axis=-1, scope=name)
def create_initializer(initializer_range=0.02):
"""Creates a `truncated_normal_initializer` with the given range."""
return tf.truncated_normal_initializer(stddev=initializer_range)
def attention_layer(from_tensor,
to_tensor,
attention_mask=None,
num_attention_heads=1,
size_per_head=512,
query_act=None,
key_act=None,
value_act=None,
attention_probs_dropout_prob=0.0,
initializer_range=0.02,
do_return_2d_tensor=False,
batch_size=None,
from_seq_length=None,
to_seq_length=None,
do_return_attention_maps=False):
"""Performs multi-headed attention from `from_tensor` to `to_tensor`.
This is an implementation of multi-headed attention based on "Attention
is all you Need". If `from_tensor` and `to_tensor` are the same, then
this is self-attention. Each timestep in `from_tensor` attends to the
corresponding sequence in `to_tensor`, and returns a fixed-with vector.
This function first projects `from_tensor` into a "query" tensor and
`to_tensor` into "key" and "value" tensors. These are (effectively) a list
of tensors of length `num_attention_heads`, where each tensor is of shape
[batch_size, seq_length, size_per_head].
Then, the query and key tensors are dot-producted and scaled. These are
softmaxed to obtain attention probabilities. The value tensors are then
interpolated by these probabilities, then concatenated back to a single
tensor and returned.
In practice, the multi-headed attention are done with transposes and
reshapes rather than actual separate tensors.
Args:
from_tensor: float Tensor of shape [batch_size, from_seq_length,
from_width].
to_tensor: float Tensor of shape [batch_size, to_seq_length, to_width].
attention_mask: (optional) int32 Tensor of shape [batch_size,
from_seq_length, to_seq_length]. The values should be 1 or 0. The
attention scores will effectively be set to -infinity for any positions in
the mask that are 0, and will be unchanged for positions that are 1.
num_attention_heads: int. Number of attention heads.
size_per_head: int. Size of each attention head.
query_act: (optional) Activation function for the query transform.
key_act: (optional) Activation function for the key transform.
value_act: (optional) Activation function for the value transform.
attention_probs_dropout_prob: (optional) float. Dropout probability of the
attention probabilities.
initializer_range: float. Range of the weight initializer.
do_return_2d_tensor: bool. If True, the output will be of shape [batch_size
* from_seq_length, num_attention_heads * size_per_head]. If False, the
output will be of shape [batch_size, from_seq_length, num_attention_heads
* size_per_head].
batch_size: (Optional) int. If the input is 2D, this might be the batch size
of the 3D version of the `from_tensor` and `to_tensor`.
from_seq_length: (Optional) If the input is 2D, this might be the seq length
of the 3D version of the `from_tensor`.
to_seq_length: (Optional) If the input is 2D, this might be the seq length
of the 3D version of the `to_tensor`.
do_return_attention_maps: Whether to also return attention map of all layers.
Returns:
float Tensor of shape [batch_size, from_seq_length,
num_attention_heads * size_per_head]. (If `do_return_2d_tensor` is
true, this will be of shape [batch_size * from_seq_length,
num_attention_heads * size_per_head]).
Raises:
ValueError: Any of the arguments or tensor shapes are invalid.
"""
def transpose_for_scores(input_tensor, batch_size, num_attention_heads,
seq_length, width):
output_tensor = tf.reshape(
input_tensor, [batch_size, seq_length, num_attention_heads, width])
output_tensor = tf.transpose(output_tensor, [0, 2, 1, 3])
return output_tensor
from_shape = get_shape_list(from_tensor, expected_rank=[2, 3])
to_shape = get_shape_list(to_tensor, expected_rank=[2, 3])
if len(from_shape) != len(to_shape):
raise ValueError(
"The rank of `from_tensor` must match the rank of `to_tensor`.")
if len(from_shape) == 3:
batch_size = from_shape[0]
from_seq_length = from_shape[1]
to_seq_length = to_shape[1]
elif len(from_shape) == 2:
if (batch_size is None or from_seq_length is None or to_seq_length is None):
raise ValueError(
"When passing in rank 2 tensors to attention_layer, the values "
"for `batch_size`, `from_seq_length`, and `to_seq_length` "
"must all be specified.")
# Scalar dimensions referenced here:
# B = batch size (number of sequences)
# F = `from_tensor` sequence length
# T = `to_tensor` sequence length
# N = `num_attention_heads`
# H = `size_per_head`
from_tensor_2d = reshape_to_matrix(from_tensor)
to_tensor_2d = reshape_to_matrix(to_tensor)
# `query_layer` = [B*F, N*H]
query_layer = tf.layers.dense(
from_tensor_2d,
num_attention_heads * size_per_head,
activation=query_act,
name="query",
kernel_initializer=create_initializer(initializer_range))
# `key_layer` = [B*T, N*H]
key_layer = tf.layers.dense(
to_tensor_2d,
num_attention_heads * size_per_head,
activation=key_act,
name="key",
kernel_initializer=create_initializer(initializer_range))
# `value_layer` = [B*T, N*H]
value_layer = tf.layers.dense(
to_tensor_2d,
num_attention_heads * size_per_head,
activation=value_act,
name="value",
kernel_initializer=create_initializer(initializer_range))
# `query_layer` = [B, N, F, H]
query_layer = transpose_for_scores(query_layer, batch_size,
num_attention_heads, from_seq_length,
size_per_head)
# `key_layer` = [B, N, T, H]
key_layer = transpose_for_scores(key_layer, batch_size, num_attention_heads,
to_seq_length, size_per_head)
# Take the dot product between "query" and "key" to get the raw
# attention scores.
# `attention_scores` = [B, N, F, T]
attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True)
attention_scores = tf.multiply(attention_scores,
1.0 / math.sqrt(float(size_per_head)))
if attention_mask is not None:
# `attention_mask` = [B, 1, F, T]
attention_mask = tf.expand_dims(attention_mask, axis=[1])
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
adder = (1.0 - tf.cast(attention_mask, attention_scores.dtype)) * -10000.0
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
attention_scores += adder
# Normalize the attention scores to probabilities.
# `attention_probs` = [B, N, F, T]
attention_probs = tf.nn.softmax(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = dropout(attention_probs, attention_probs_dropout_prob)
# `value_layer` = [B, T, N, H]
value_layer = tf.reshape(
value_layer,
[batch_size, to_seq_length, num_attention_heads, size_per_head])
# `value_layer` = [B, N, T, H]
value_layer = tf.transpose(value_layer, [0, 2, 1, 3])
# `context_layer` = [B, N, F, H]
context_layer = tf.matmul(attention_probs, value_layer)
# `context_layer` = [B, F, N, H]
context_layer = tf.transpose(context_layer, [0, 2, 1, 3])
if do_return_2d_tensor:
# `context_layer` = [B*F, N*H]
context_layer = tf.reshape(
context_layer,
[batch_size * from_seq_length, num_attention_heads * size_per_head])
else:
# `context_layer` = [B, F, N*H]
context_layer = tf.reshape(
context_layer,
[batch_size, from_seq_length, num_attention_heads * size_per_head])
if do_return_attention_maps:
return context_layer, attention_probs
return context_layer
def get_shape_list(tensor, expected_rank=None, name=None):
"""Returns a list of the shape of tensor, preferring static dimensions.
Args:
tensor: A tf.Tensor object to find the shape of.
expected_rank: (optional) int. The expected rank of `tensor`. If this is
specified and the `tensor` has a different rank, and exception will be
thrown.
name: Optional name of the tensor for the error message.
Returns:
A list of dimensions of the shape of tensor. All static dimensions will
be returned as python integers, and dynamic dimensions will be returned
as tf.Tensor scalars.
"""
if name is None:
name = tensor.name
if expected_rank is not None:
assert_rank(tensor, expected_rank, name)
shape = tensor.shape.as_list()
non_static_indexes = []
for (index, dim) in enumerate(shape):
if dim is None:
non_static_indexes.append(index)
if not non_static_indexes:
return shape
dyn_shape = tf.shape(tensor)
for index in non_static_indexes:
shape[index] = dyn_shape[index]
return shape
def reshape_to_matrix(input_tensor):
"""Reshapes a >= rank 2 tensor to a rank 2 tensor (i.e., a matrix)."""
ndims = input_tensor.shape.ndims
if ndims < 2:
raise ValueError("Input tensor must have at least rank 2. Shape = %s" %
(input_tensor.shape))
if ndims == 2:
return input_tensor
width = input_tensor.shape[-1]
output_tensor = tf.reshape(input_tensor, [-1, width])
return output_tensor
def reshape_from_matrix(output_tensor, orig_shape_list):
"""Reshapes a rank 2 tensor back to its original rank >= 2 tensor."""
if len(orig_shape_list) == 2:
return output_tensor
output_shape = get_shape_list(output_tensor)
orig_dims = orig_shape_list[0:-1]
width = output_shape[-1]
return tf.reshape(output_tensor, orig_dims + [width])
The provided code snippet includes necessary dependencies for implementing the `transformer_model_v2` function. Write a Python function `def transformer_model_v2(input_tensor, attention_mask=None, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, intermediate_act_fn=gelu, hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, initializer_range=0.02, do_share_parameter_across_layers=False, do_return_all_layers=False, do_return_attention_maps=False)` to solve the following problem:
Transformer model from "On layer normalization in the transformer architecture". See the original paper: https://openreview.net/pdf?id=B1x8anVFPr Args: input_tensor: float Tensor of shape [batch_size, seq_length, hidden_size]. attention_mask: (optional) int32 Tensor of shape [batch_size, seq_length, seq_length], with 1 for positions that can be attended to and 0 in positions that should not be. hidden_size: int. Hidden size of the Transformer. num_hidden_layers: int. Number of layers (blocks) in the Transformer. num_attention_heads: int. Number of attention heads in the Transformer. intermediate_size: int. The size of the "intermediate" (a.k.a., feed forward) layer. intermediate_act_fn: function. The non-linear activation function to apply to the output of the intermediate/feed-forward layer. hidden_dropout_prob: float. Dropout probability for the hidden layers. attention_probs_dropout_prob: float. Dropout probability of the attention probabilities. initializer_range: float. Range of the initializer (stddev of truncated normal). do_share_parameter_across_layers: do_return_all_layers: Whether to also return all layers or just the final layer. do_return_attention_maps: Whether to also return attention map of all layers. Returns: float Tensor of shape [batch_size, seq_length, hidden_size], the final hidden layer of the Transformer. Raises: ValueError: A Tensor shape or parameter is invalid.
Here is the function:
def transformer_model_v2(input_tensor,
attention_mask=None,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
intermediate_act_fn=gelu,
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
initializer_range=0.02,
do_share_parameter_across_layers=False,
do_return_all_layers=False,
do_return_attention_maps=False):
"""Transformer model from "On layer normalization in the transformer architecture".
See the original paper:
https://openreview.net/pdf?id=B1x8anVFPr
Args:
input_tensor: float Tensor of shape [batch_size, seq_length, hidden_size].
attention_mask: (optional) int32 Tensor of shape [batch_size, seq_length,
seq_length], with 1 for positions that can be attended to and 0 in
positions that should not be.
hidden_size: int. Hidden size of the Transformer.
num_hidden_layers: int. Number of layers (blocks) in the Transformer.
num_attention_heads: int. Number of attention heads in the Transformer.
intermediate_size: int. The size of the "intermediate" (a.k.a., feed
forward) layer.
intermediate_act_fn: function. The non-linear activation function to apply
to the output of the intermediate/feed-forward layer.
hidden_dropout_prob: float. Dropout probability for the hidden layers.
attention_probs_dropout_prob: float. Dropout probability of the attention
probabilities.
initializer_range: float. Range of the initializer (stddev of truncated
normal).
do_share_parameter_across_layers:
do_return_all_layers: Whether to also return all layers or just the final
layer.
do_return_attention_maps: Whether to also return attention map of all layers.
Returns:
float Tensor of shape [batch_size, seq_length, hidden_size], the final
hidden layer of the Transformer.
Raises:
ValueError: A Tensor shape or parameter is invalid.
"""
if hidden_size % num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (hidden_size, num_attention_heads))
attention_head_size = int(hidden_size / num_attention_heads)
input_shape = get_shape_list(input_tensor, expected_rank=3)
batch_size = input_shape[0]
seq_length = input_shape[1]
input_width = input_shape[2]
# The Transformer performs sum residuals on all layers so the input needs
# to be the same as the hidden size.
if input_width != hidden_size:
raise ValueError("The width of the input tensor (%d) != hidden size (%d)" %
(input_width, hidden_size))
# We keep the representation as a 2D tensor to avoid re-shaping it back and
# forth from a 3D tensor to a 2D tensor. Re-shapes are normally free on
# the GPU/CPU but may not be free on the TPU, so we want to minimize them to
# help the optimizer.
prev_output = reshape_to_matrix(input_tensor)
all_layer_outputs = []
all_layer_attention_maps = []
for layer_idx in range(num_hidden_layers):
if do_share_parameter_across_layers:
name_variable_scope = "layer_shared"
else:
name_variable_scope = "layer_%d" % layer_idx
with tf.compat.v1.variable_scope(name_variable_scope,
reuse=do_share_parameter_across_layers and layer_idx > 0):
layer_input = prev_output
with tf.compat.v1.variable_scope("attention"):
attention_heads = []
with tf.compat.v1.variable_scope("self"):
layer_input = layer_norm(layer_input)
payload = attention_layer(
from_tensor=layer_input,
to_tensor=layer_input,
attention_mask=attention_mask,
num_attention_heads=num_attention_heads,
size_per_head=attention_head_size,
attention_probs_dropout_prob=attention_probs_dropout_prob,
initializer_range=initializer_range,
do_return_2d_tensor=True,
batch_size=batch_size,
from_seq_length=seq_length,
to_seq_length=seq_length,
do_return_attention_maps=do_return_attention_maps)
if do_return_attention_maps:
attention_head, attention_prob = payload
attention_heads.append(attention_head)
all_layer_attention_maps.append(attention_prob)
else:
attention_head = payload
attention_heads.append(attention_head)
attention_output = None
if len(attention_heads) == 1:
attention_output = attention_heads[0]
else:
# In the case where we have other sequences, we just concatenate
# them to the self-attention head before the projection.
attention_output = tf.concat(attention_heads, axis=-1)
# Run a linear projection of `hidden_size` then add a residual
# with `layer_input`.
with tf.compat.v1.variable_scope("output"):
attention_output = tf.layers.dense(
attention_output,
hidden_size,
kernel_initializer=create_initializer(initializer_range))
attention_output = dropout(attention_output, hidden_dropout_prob)
attention_output = attention_output + layer_input
# The activation is only applied to the "intermediate" hidden layer.
with tf.compat.v1.variable_scope("intermediate"):
attention_output = layer_norm(attention_output)
intermediate_output = tf.layers.dense(
attention_output,
intermediate_size,
activation=intermediate_act_fn,
kernel_initializer=create_initializer(initializer_range))
# Down-project back to `hidden_size` then add the residual.
with tf.compat.v1.variable_scope("output"):
layer_output = tf.layers.dense(
intermediate_output,
hidden_size,
kernel_initializer=create_initializer(initializer_range))
layer_output = dropout(layer_output, hidden_dropout_prob)
layer_output = layer_output + attention_output
prev_output = layer_output
all_layer_outputs.append(layer_output)
if do_return_all_layers:
final_outputs = []
for layer_output in all_layer_outputs:
final_output = reshape_from_matrix(layer_output, input_shape)
final_outputs.append(final_output)
if do_return_attention_maps:
return final_outputs, all_layer_attention_maps
return final_outputs
else:
final_output = reshape_from_matrix(prev_output, input_shape)
return final_output | Transformer model from "On layer normalization in the transformer architecture". See the original paper: https://openreview.net/pdf?id=B1x8anVFPr Args: input_tensor: float Tensor of shape [batch_size, seq_length, hidden_size]. attention_mask: (optional) int32 Tensor of shape [batch_size, seq_length, seq_length], with 1 for positions that can be attended to and 0 in positions that should not be. hidden_size: int. Hidden size of the Transformer. num_hidden_layers: int. Number of layers (blocks) in the Transformer. num_attention_heads: int. Number of attention heads in the Transformer. intermediate_size: int. The size of the "intermediate" (a.k.a., feed forward) layer. intermediate_act_fn: function. The non-linear activation function to apply to the output of the intermediate/feed-forward layer. hidden_dropout_prob: float. Dropout probability for the hidden layers. attention_probs_dropout_prob: float. Dropout probability of the attention probabilities. initializer_range: float. Range of the initializer (stddev of truncated normal). do_share_parameter_across_layers: do_return_all_layers: Whether to also return all layers or just the final layer. do_return_attention_maps: Whether to also return attention map of all layers. Returns: float Tensor of shape [batch_size, seq_length, hidden_size], the final hidden layer of the Transformer. Raises: ValueError: A Tensor shape or parameter is invalid. |
18,914 | import os
import random
import time
import numpy as np
import torch
from torch.nn.parallel.distributed import DistributedDataParallel as torchDDP
import mpu
import model
The provided code snippet includes necessary dependencies for implementing the `print_args` function. Write a Python function `def print_args(args)` to solve the following problem:
Print arguments.
Here is the function:
def print_args(args):
"""Print arguments."""
print('arguments:', flush=True)
for arg in vars(args):
dots = '.' * (29 - len(arg))
print(' {} {} {}'.format(arg, dots, getattr(args, arg)), flush=True) | Print arguments. |
18,915 | import os
import random
import time
import numpy as np
import torch
from torch.nn.parallel.distributed import DistributedDataParallel as torchDDP
import mpu
import model
def print_rank_0(message):
if torch.distributed.is_initialized():
if torch.distributed.get_rank() == 0:
print(message, flush=True)
else:
print(message, flush=True)
The provided code snippet includes necessary dependencies for implementing the `report_memory` function. Write a Python function `def report_memory(name)` to solve the following problem:
Simple GPU memory report.
Here is the function:
def report_memory(name):
"""Simple GPU memory report."""
mega_bytes = 1024.0 * 1024.0
string = name + ' memory (MB)'
string += ' | allocated: {}'.format(
torch.cuda.memory_allocated() / mega_bytes)
string += ' | max allocated: {}'.format(
torch.cuda.max_memory_allocated() / mega_bytes)
string += ' | cached: {}'.format(torch.cuda.memory_cached() / mega_bytes)
string += ' | max cached: {}'.format(
torch.cuda.max_memory_cached()/ mega_bytes)
print_rank_0(string) | Simple GPU memory report. |
18,916 | import os
import random
import time
import numpy as np
import torch
from torch.nn.parallel.distributed import DistributedDataParallel as torchDDP
import mpu
import model
def get_checkpoint_name(checkpoints_path, iteration, release=False, zero=False):
if release:
d = 'release'
else:
d = 'iter_{:07d}'.format(iteration)
if zero:
dp_rank = mpu.get_data_parallel_rank()
d += '_zero_dp_rank_{}'.format(dp_rank)
return os.path.join(checkpoints_path, d,
'mp_rank_{:02d}'.format(mpu.get_model_parallel_rank()),
'model_optim_rng.pt')
def ensure_directory_exists(filename):
dirname = os.path.dirname(filename)
if not os.path.exists(dirname):
os.makedirs(dirname)
def save_zero_checkpoint(args, iteration, optimizer):
zero_sd = {'iteration': iteration,
'optimizer_state_dict': optimizer.state_dict()}
zero_checkpoint_name = get_checkpoint_name(args.save, iteration, zero=True)
ensure_directory_exists(zero_checkpoint_name)
torch.save(zero_sd, zero_checkpoint_name)
print(' successfully saved {}'.format(zero_checkpoint_name)) | null |
18,917 | import os
import random
import numpy as np
import torch
import torch.nn.functional as F
from arguments import get_args
import deepspeed
from data_utils import make_tokenizer
from configure_data import configure_data
import mpu
from fp16 import FP16_Module
from data_utils.wordpiece import BertTokenizer
from model import PalmModel
from model import DistributedDataParallel as DDP
from utils import print_rank_0
def get_batch(context_tokens, device, args):
def top_k_logits(logits, top_k=0, top_p=0.0, filter_value=-float('Inf')):
def generate_samples(model, tokenizer, args, device, length, passage):
context_count=0
model.eval()
seq_length = 128
input_length = 512
init = True
with torch.no_grad():
while True:
torch.distributed.barrier(group=mpu.get_model_parallel_group())
terminate_runs=0
if mpu.get_model_parallel_rank() == 0:
if init:
seq_length_tensor = torch.cuda.LongTensor([50])
init = False
raw_text = passage #input("\nContext prompt (stop to exit, press enter to set output length) >>> ")
raw_text = raw_text.replace('‘', '\'').replace('“', '\"').replace('——', '--')
seq_length = max(1, length)
seq_length_tensor = torch.cuda.LongTensor([seq_length])
context_tokens = tokenizer.convert_tokens_to_ids(tokenizer.tokenize(raw_text))
if len(context_tokens) > input_length - 2:
context_tokens = context_tokens[len(context_tokens) - input_length + 2:]
context_tokens = [tokenizer.vocab[args.cls_token]] + context_tokens + [tokenizer.vocab[args.sep_token]]
context_length = len(context_tokens)
else:
context_tokens = tokenizer.convert_tokens_to_ids(tokenizer.tokenize("空"))
context_tokens = [tokenizer.vocab[args.cls_token]] + context_tokens + [tokenizer.vocab[args.sep_token]]
context_length = len(context_tokens)
seq_length_tensor = torch.cuda.LongTensor([50])
terminate_runs_tensor = torch.cuda.LongTensor([terminate_runs])
torch.distributed.broadcast(terminate_runs_tensor, mpu.get_model_parallel_src_rank(), group=mpu.get_model_parallel_group())
terminate_runs = terminate_runs_tensor[0].item()
pad_id = 0
if context_length < input_length:
context_tokens.extend([pad_id] * (input_length - context_length))
context_tokens_tensor = torch.cuda.LongTensor(context_tokens)
torch.distributed.broadcast(context_tokens_tensor, mpu.get_model_parallel_src_rank(), group=mpu.get_model_parallel_group())
torch.distributed.broadcast(seq_length_tensor, mpu.get_model_parallel_src_rank(), group=mpu.get_model_parallel_group())
seq_length = seq_length_tensor[0].item()
if terminate_runs == 1:
return
all_generate_tokens = []
generate_tokens = []
counter = 0
past_key_values = None
sequence_output = None
vocab_size = 21128
tokens, attention_mask, types, dec_input_ids = get_batch(context_tokens_tensor, device, args)
while counter < seq_length:
if counter % 128 == 0 and counter != 0:
generate_tokens.append(tokenizer.vocab[args.sep_token])
start = (context_tokens_tensor == 102).nonzero(as_tuple=True)[-1]
if start + len(generate_tokens) >= 512:
context_tokens_tensor = torch.cat([context_tokens_tensor[:start], torch.cuda.LongTensor(generate_tokens)], -1)[-512:]
else:
context_tokens_tensor[start:start+len(generate_tokens)] = torch.cuda.LongTensor(generate_tokens)
tokens, attention_mask, types, dec_input_ids = get_batch(context_tokens_tensor, device, args)
generate_tokens = []
sequence_output = None
# sequence_output, _ = model.module.module.module.model.bert(tokens, types, attention_mask)
position_ids = torch.full([args.batch_size, 1], len(generate_tokens), dtype=torch.long, device=device)
_, logits, sequence_output = model(tokens, types, attention_mask, dec_input_ids, attention_mask, position_ids, is_infer=True, sequence_output=sequence_output, parallel_output=False)
partition_vocab_size = logits.size()[-1]
logits = logits[:, -1, :]
logits = logits / args.temperature
logits = top_k_logits(logits, top_k=args.top_k, top_p=args.top_p)
log_probs = F.softmax(logits, dim=-1)
prev = torch.multinomial(log_probs, num_samples=1)
prev_token = prev[0].item()
if prev_token >= vocab_size: #or prev_token == 102:
prev_token = 100
prev[0] = 100
if prev_token == 102 and len(all_generate_tokens) > int(max(1, length) * 0.8):
break
if prev_token == 102:
counter += 1
continue
#if prev_token == 100:
# counter += 1
# continue
dec_input_ids = torch.cat([dec_input_ids, prev], dim=1)
generate_tokens.append(prev_token)
all_generate_tokens.append(prev_token)
counter += 1
generate_context = []
for token in all_generate_tokens:
if generate_context and generate_context[-1] == 100 and token == 100:
continue
else:
generate_context.append(token)
generate_context = "".join(tokenizer.convert_ids_to_tokens(generate_context)).replace('[UNK]', '“').replace('##','')
return generate_context
raw_text = None
torch.distributed.barrier(group=mpu.get_model_parallel_group())
context_count += 1 | null |
18,918 | import os
import random
import numpy as np
import torch
import torch.nn.functional as F
from arguments import get_args
import deepspeed
from data_utils import make_tokenizer
from configure_data import configure_data
import mpu
from fp16 import FP16_Module
from data_utils.wordpiece import BertTokenizer
from model import PalmModel
from model import DistributedDataParallel as DDP
from utils import print_rank_0
def prepare_tokenizer(args):
tokenizer_args = {
'tokenizer_type': args.tokenizer_type,
'corpus': None,
'model_path': args.tokenizer_path,
'vocab_size': args.vocab_size,
'model_type': args.tokenizer_model_type,
'cache_dir': args.cache_dir}
tokenizer = make_tokenizer(**tokenizer_args)
args.tokenizer_num_tokens = tokenizer.num_tokens
args.tokenizer_num_type_tokens = tokenizer.num_type_tokens
args.eod_token = tokenizer.get_command('eos').Id
after = tokenizer.num_tokens
while after % mpu.get_model_parallel_world_size() != 0:
after += 1
args.vocab_size = after
print("prepare tokenizer done", flush=True)
return tokenizer | null |
18,919 | import os
import random
import numpy as np
import torch
import torch.nn.functional as F
from arguments import get_args
import deepspeed
from data_utils import make_tokenizer
from configure_data import configure_data
import mpu
from fp16 import FP16_Module
from data_utils.wordpiece import BertTokenizer
from model import PalmModel
from model import DistributedDataParallel as DDP
from utils import print_rank_0
def set_random_seed(seed):
if seed is not None and seed > 0:
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
mpu.model_parallel_cuda_manual_seed(seed)
def initialize_distributed(args):
"""Initialize torch.distributed."""
# Manually set the device ids.
device = args.rank % torch.cuda.device_count()
if args.local_rank is not None:
device = args.local_rank
torch.cuda.set_device(device)
# Call the init process
init_method = 'tcp://'
master_ip = os.getenv('MASTER_ADDR', '127.0.0.1')
master_port = os.getenv('MASTER_PORT', '12345')
init_method += master_ip + ':' + master_port
torch.distributed.init_process_group(
backend=args.distributed_backend,
world_size=args.world_size, rank=args.rank,
init_method=init_method)
# Set the model-parallel communicators.
mpu.initialize_model_parallel(args.model_parallel_size)
def setup_model(args):
"""Setup model and optimizer."""
model = get_model(args)
if args.pre_load:
if args.load is not None:
from load_checkpoint import pre_load
load_model = pre_load(mpu, args.load, args.load_iteration)
model_dict = model.module.module.model.state_dict()
for key in load_model:
if key not in model_dict.keys():
print_rank_0('Skip key: '+key)
else:
print_rank_0('Loading key: '+key)
model.module.module.model.load_state_dict(load_model, strict=False)
args.iteration = 0
if not args.pre_load:
if args.load is not None:
#args.iteration = load_checkpoint(model, optimizer, lr_scheduler, args)
from load_checkpoint import pre_load
load_model = pre_load(mpu, args.load, args.load_iteration)
# model_dict = model.module.module.model.state_dict()
# additional FP16 Module ??
model_dict = model.module.module.module.model.state_dict()
for key in load_model:
if key not in model_dict.keys():
print_rank_0('Skip key: '+key)
else:
print_rank_0('Loading key: '+key)
# model.module.module.model.load_state_dict(pre_load(mpu, args.load, args.load_iteration), strict=False)
model.module.module.module.model.load_state_dict(pre_load(mpu, args.load, args.load_iteration), strict=False)
args.iteration = 0
else:
args.iteration = 0
return model
def setup_tokenizer(args):
data_config = configure_data()
data_config.set_defaults(data_set_type='BERT', transpose=False)
tokenizer = data_config.setup_tokenizer_for_structbert(args)
make_palm_loaders(args)
if mpu.get_model_parallel_rank() == 0:
args.do_train = True
args.do_valid = True
args.do_test = False
before = tokenizer.num_tokens
after = before
multiple = args.make_vocab_size_divisible_by * \
mpu.get_model_parallel_world_size()
while (after % multiple) != 0:
after += 1
print_rank_0('> padded vocab (size: {}) with {} dummy '
'tokens (new size: {})'.format(
before, after - before, after))
# Need to broadcast num_tokens and num_type_tokens.
token_counts = torch.cuda.LongTensor([after,
tokenizer.num_type_tokens,
int(args.do_train), int(args.do_valid), int(args.do_test)])
else:
token_counts = torch.cuda.LongTensor([0, 0, 0, 0, 0])
# Broadcast num tokens.
torch.distributed.broadcast(token_counts,
mpu.get_model_parallel_src_rank(),
group=mpu.get_model_parallel_group())
num_tokens = token_counts[0].item()
num_type_tokens = token_counts[1].item()
args.do_train = token_counts[2].item()
args.do_valid = token_counts[3].item()
args.do_test = token_counts[4].item()
return tokenizer, num_tokens, num_type_tokens
def get_args():
"""Parse all the args."""
parser = argparse.ArgumentParser(description='PyTorch BERT Model')
parser = add_model_config_args(parser)
parser = add_fp16_config_args(parser)
parser = add_training_args(parser)
parser = add_evaluation_args(parser)
parser = add_text_generate_args(parser)
parser = add_struct_args(parser)
parser = add_palm_args(parser)
parser = add_downstream_args(parser)
parser = add_data_args(parser)
# Include DeepSpeed configuration arguments
parser = deepspeed.add_config_arguments(parser)
args = parser.parse_args()
args.deepspeed = False
args.cuda = torch.cuda.is_available()
args.rank = int(os.getenv('RANK', '0'))
args.world_size = int(os.getenv("WORLD_SIZE", '1'))
if os.getenv('OMPI_COMM_WORLD_LOCAL_RANK'):
# We are using (OpenMPI) mpirun for launching distributed data parallel processes
local_rank = int(os.getenv('OMPI_COMM_WORLD_LOCAL_RANK'))
local_size = int(os.getenv('OMPI_COMM_WORLD_LOCAL_SIZE'))
# Possibly running with Slurm
num_nodes = int(os.getenv('SLURM_JOB_NUM_NODES', '1'))
nodeid = int(os.getenv('SLURM_NODEID', '0'))
args.local_rank = local_rank
args.rank = nodeid*local_size + local_rank
args.world_size = num_nodes*local_size
args.model_parallel_size = min(args.model_parallel_size, args.world_size)
if args.rank == 0:
print('using world size: {} and model-parallel size: {} '.format(
args.world_size, args.model_parallel_size))
args.dynamic_loss_scale = False
if args.loss_scale is None:
args.dynamic_loss_scale = True
if args.rank == 0:
print(' > using dynamic loss scaling')
# The args fp32_* or fp16_* meant to be active when the
# args fp16 is set. So the default behaviour should all
# be false.
if not args.fp16:
args.fp32_embedding = False
args.fp32_tokentypes = False
args.fp32_layernorm = False
return args
def pre_load(mpu,
load_dir,
tag):
load_path = _get_ckpt_name(mpu, load_dir, tag)
checkpoint = torch.load(load_path, map_location=lambda storage, loc: storage)
return checkpoint['module']
The provided code snippet includes necessary dependencies for implementing the `get_model_tokenizer` function. Write a Python function `def get_model_tokenizer(vocab, pretrain_model_path)` to solve the following problem:
Main training program.
Here is the function:
def get_model_tokenizer(vocab, pretrain_model_path):
"""Main training program."""
# Disable CuDNN.
torch.backends.cudnn.enabled = False
# Arguments.
args = get_args()
args.model_parallel_size = 8
args.pre_load = True
args.palm_dataset = True
args.num_layers = 24
args.dec_layers = 6
args.hidden_size = 8192
args.num_attention_heads = 128
args.max_position_embeddings = 2048
args.tokenizer_type = 'BertWordPieceTokenizer'
args.tokenizer_model_type = vocab
args.distributed_backend = 'nccl'
args.fp16 = True
args.fp32_layernorm = True
args.checkpoint_activations = True
args.deepspeed_activation_checkpointing = True
args.load = pretrain_model_path
args.load_iteration = ''
# Pytorch distributed.
initialize_distributed(args)
# Random seeds for reproducability.
set_random_seed(args.seed)
# get the tokenizer
tokenizer, args.tokenizer_num_tokens, args.tokenizer_num_type_tokens = setup_tokenizer(args)
# Model, optimizer, and learning rate.
model = setup_model(args)
#setting default batch size to 1
args.batch_size = 1
args.top_k = 20
args.top_p = 0.0
args.temperature = 0.9
return model, tokenizer, args | Main training program. |
18,924 | import math
import torch
import torch.nn.init as init
from apex.normalization.fused_layer_norm import FusedLayerNorm as LayerNorm
from .initialize import get_model_parallel_world_size
from .layers import ColumnParallelLinear
from .layers import RowParallelLinear
from .mappings import gather_from_model_parallel_region
import deepspeed
from .random import checkpoint
from .random import get_cuda_rng_tracker
from .utils import divide
from .utils import split_tensor_along_last_dim
from deepspeed.utils.timer import SynchronizedWallClockTimer
def gelu_impl(x):
"""OpenAI's gelu implementation."""
return 0.5 * x * (1.0 + torch.tanh(0.7978845608028654 * x *
(1.0 + 0.044715 * x * x)))
def gelu(x):
return gelu_impl(x) | null |
18,925 | import math
import torch
import torch.nn.init as init
from apex.normalization.fused_layer_norm import FusedLayerNorm as LayerNorm
from .initialize import get_model_parallel_world_size
from .layers import ColumnParallelLinear
from .layers import RowParallelLinear
from .mappings import gather_from_model_parallel_region
import deepspeed
from .random import checkpoint
from .random import get_cuda_rng_tracker
from .utils import divide
from .utils import split_tensor_along_last_dim
from deepspeed.utils.timer import SynchronizedWallClockTimer
The provided code snippet includes necessary dependencies for implementing the `unscaled_init_method` function. Write a Python function `def unscaled_init_method(sigma)` to solve the following problem:
Init method based on N(0, sigma).
Here is the function:
def unscaled_init_method(sigma):
"""Init method based on N(0, sigma)."""
def init_(tensor):
return torch.nn.init.normal_(tensor, mean=0.0, std=sigma)
return init_ | Init method based on N(0, sigma). |
18,926 | import math
import torch
import torch.nn.init as init
from apex.normalization.fused_layer_norm import FusedLayerNorm as LayerNorm
from .initialize import get_model_parallel_world_size
from .layers import ColumnParallelLinear
from .layers import RowParallelLinear
from .mappings import gather_from_model_parallel_region
import deepspeed
from .random import checkpoint
from .random import get_cuda_rng_tracker
from .utils import divide
from .utils import split_tensor_along_last_dim
from deepspeed.utils.timer import SynchronizedWallClockTimer
The provided code snippet includes necessary dependencies for implementing the `scaled_init_method` function. Write a Python function `def scaled_init_method(sigma, num_layers)` to solve the following problem:
Init method based on N(0, sigma/sqrt(2*num_layers).
Here is the function:
def scaled_init_method(sigma, num_layers):
"""Init method based on N(0, sigma/sqrt(2*num_layers)."""
std = sigma / math.sqrt(2.0 * num_layers)
def init_(tensor):
return torch.nn.init.normal_(tensor, mean=0.0, std=std)
return init_ | Init method based on N(0, sigma/sqrt(2*num_layers). |
18,932 | import torch
from .initialize import get_model_parallel_group
from .utils import split_tensor_along_last_dim
from deepspeed.utils.timer import SynchronizedWallClockTimer
class _ScatterToModelParallelRegion(torch.autograd.Function):
def forward(ctx, input_):
def backward(ctx, grad_output):
def scatter_to_model_parallel_region(input_):
return _ScatterToModelParallelRegion.apply(input_) | null |
18,933 | import torch
from .initialize import get_model_parallel_group
from .utils import split_tensor_along_last_dim
from deepspeed.utils.timer import SynchronizedWallClockTimer
class _GatherFromModelParallelRegion(torch.autograd.Function):
def forward(ctx, input_):
def backward(ctx, grad_output):
def gather_from_model_parallel_region(input_):
return _GatherFromModelParallelRegion.apply(input_) | null |
18,937 | import contextlib
import torch.distributed as dist
import torch
from torch import _C
from torch.cuda import _lazy_call, device as device_ctx_manager
import torch.distributed as dist
from .initialize import get_data_parallel_rank
from .initialize import get_model_parallel_rank
from .initialize import get_model_parallel_world_size
from .initialize import get_model_parallel_group
def see_memory_usage(message, force=False):
if not force:
return
dist.barrier()
if dist.get_rank() == 0:
print(message)
print("Memory Allocated ", torch.cuda.memory_allocated()/(1024*1024*1024), "GigaBytes")
print("Max Memory Allocated ", torch.cuda.max_memory_allocated()/(1024*1024*1024), "GigaBytes")
print("Cache Allocated ", torch.cuda.memory_cached()/(1024*1024*1024), "GigaBytes")
print("Max cache Allocated ", torch.cuda.max_memory_cached()/(1024*1024*1024), "GigaBytes")
print(" ")
#input("Press Any Key To Continue ..") | null |
18,938 | import contextlib
import torch.distributed as dist
import torch
from torch import _C
from torch.cuda import _lazy_call, device as device_ctx_manager
import torch.distributed as dist
from .initialize import get_data_parallel_rank
from .initialize import get_model_parallel_rank
from .initialize import get_model_parallel_world_size
from .initialize import get_model_parallel_group
def detach_variable(inputs, device=None):
if isinstance(inputs, tuple):
out = []
for inp in inputs:
if not isinstance(inp, torch.Tensor):
out.append(inp)
continue
requires_grad = inp.requires_grad
if device is not None:
x = inp.to(device=device)
else:
x = inp
x = x.detach()
x.requires_grad = requires_grad
out.append(x)
return tuple(out)
else:
raise RuntimeError(
"Only tuple of tensors is supported. Got Unsupported input type: ", type(inputs).__name__) | null |
18,939 | import contextlib
import torch.distributed as dist
import torch
from torch import _C
from torch.cuda import _lazy_call, device as device_ctx_manager
import torch.distributed as dist
from .initialize import get_data_parallel_rank
from .initialize import get_model_parallel_rank
from .initialize import get_model_parallel_world_size
from .initialize import get_model_parallel_group
The provided code snippet includes necessary dependencies for implementing the `_set_cuda_rng_state` function. Write a Python function `def _set_cuda_rng_state(new_state, device=-1)` to solve the following problem:
Sets the random number generator state of the current GPU. Argumentss: new_state (torch.ByteTensor): The desired state This function is adapted from PyTorch repo (torch.cuda.set_rng_state) with a single change: the input state is not cloned. Cloning caused major performance issues for +4 GPU cases.
Here is the function:
def _set_cuda_rng_state(new_state, device=-1):
"""Sets the random number generator state of the current GPU.
Argumentss:
new_state (torch.ByteTensor): The desired state
This function is adapted from PyTorch repo (torch.cuda.set_rng_state)
with a single change: the input state is not cloned. Cloning caused
major performance issues for +4 GPU cases.
"""
if hasattr(_C, '_cuda_setRNGState') and callable(_C._cuda_setRNGState):
# older PyTorch
def cb():
with device_ctx_manager(device):
_C._cuda_setRNGState(new_state)
else:
# newer PyTorch
if device == -1:
device = torch.device('cuda')
elif isinstance(device, str):
device = torch.device(device)
elif isinstance(device, int):
device = torch.device('cuda', device)
def cb():
idx = device.index
if idx is None:
idx = torch.cuda.current_device()
default_generator = torch.cuda.default_generators[idx]
default_generator.set_state(new_state)
_lazy_call(cb) | Sets the random number generator state of the current GPU. Argumentss: new_state (torch.ByteTensor): The desired state This function is adapted from PyTorch repo (torch.cuda.set_rng_state) with a single change: the input state is not cloned. Cloning caused major performance issues for +4 GPU cases. |
18,940 | import contextlib
import torch.distributed as dist
import torch
from torch import _C
from torch.cuda import _lazy_call, device as device_ctx_manager
import torch.distributed as dist
from .initialize import get_data_parallel_rank
from .initialize import get_model_parallel_rank
from .initialize import get_model_parallel_world_size
from .initialize import get_model_parallel_group
_CUDA_RNG_STATE_TRACKER = CudaRNGStatesTracker()
The provided code snippet includes necessary dependencies for implementing the `get_cuda_rng_tracker` function. Write a Python function `def get_cuda_rng_tracker()` to solve the following problem:
Get cuda rng tracker.
Here is the function:
def get_cuda_rng_tracker():
"""Get cuda rng tracker."""
return _CUDA_RNG_STATE_TRACKER | Get cuda rng tracker. |
18,941 | import contextlib
import torch.distributed as dist
import torch
from torch import _C
from torch.cuda import _lazy_call, device as device_ctx_manager
import torch.distributed as dist
from .initialize import get_data_parallel_rank
from .initialize import get_model_parallel_rank
from .initialize import get_model_parallel_world_size
from .initialize import get_model_parallel_group
_MODEL_PARALLEL_RNG_TRACKER_NAME = 'model-parallel-rng'
_CUDA_RNG_STATE_TRACKER = CudaRNGStatesTracker()
def get_model_parallel_rank():
"""Return my rank for the model parallel group."""
return torch.distributed.get_rank(group=get_model_parallel_group())
def get_data_parallel_rank():
"""Return my rank for the data parallel group."""
return torch.distributed.get_rank(group=get_data_parallel_group())
The provided code snippet includes necessary dependencies for implementing the `model_parallel_cuda_manual_seed` function. Write a Python function `def model_parallel_cuda_manual_seed(seed)` to solve the following problem:
Initialize model parallel cuda seed. This function should be called after the model parallel is initialized. Also, no torch.cuda.manual_seed should be called after this function. Basically, this is replacement for that function. Two set of RNG states are tracked: default state: This is for data parallelism and is the same among a set of model parallel GPUs but different across different model paralle groups. This is used for example for dropout in the non-model-parallel regions. model-parallel state: This state is different among a set of model parallel GPUs, but the same across data parallel groups. This is used for example for dropout in model parallel regions.
Here is the function:
def model_parallel_cuda_manual_seed(seed):
"""Initialize model parallel cuda seed.
This function should be called after the model parallel is
initialized. Also, no torch.cuda.manual_seed should be called
after this function. Basically, this is replacement for that
function.
Two set of RNG states are tracked:
default state: This is for data parallelism and is the same among a
set of model parallel GPUs but different across
different model paralle groups. This is used for
example for dropout in the non-model-parallel regions.
model-parallel state: This state is different among a set of model
parallel GPUs, but the same across data parallel
groups. This is used for example for dropout in
model parallel regions.
"""
# 2718 is just for fun and any POSITIVE value will work.
offset = seed + 2718
model_parallel_seed = offset + get_model_parallel_rank()
# Data parallel gets the original sedd.
data_parallel_seed = seed
if torch.distributed.get_rank() == 0:
print('> initializing model parallel cuda seeds on global rank {}, '
'model parallel rank {}, and data parallel rank {} with '
'model parallel seed: {} and data parallel seed: {}'.format(
torch.distributed.get_rank(), get_model_parallel_rank(),
get_data_parallel_rank(), model_parallel_seed,
data_parallel_seed), flush=True)
_CUDA_RNG_STATE_TRACKER.reset()
# Set the default state.
torch.cuda.manual_seed(data_parallel_seed)
# and model parallel state.
_CUDA_RNG_STATE_TRACKER.add(_MODEL_PARALLEL_RNG_TRACKER_NAME,
model_parallel_seed) | Initialize model parallel cuda seed. This function should be called after the model parallel is initialized. Also, no torch.cuda.manual_seed should be called after this function. Basically, this is replacement for that function. Two set of RNG states are tracked: default state: This is for data parallelism and is the same among a set of model parallel GPUs but different across different model paralle groups. This is used for example for dropout in the non-model-parallel regions. model-parallel state: This state is different among a set of model parallel GPUs, but the same across data parallel groups. This is used for example for dropout in model parallel regions. |
18,942 | import contextlib
import torch.distributed as dist
import torch
from torch import _C
from torch.cuda import _lazy_call, device as device_ctx_manager
import torch.distributed as dist
from .initialize import get_data_parallel_rank
from .initialize import get_model_parallel_rank
from .initialize import get_model_parallel_world_size
from .initialize import get_model_parallel_group
mp_rank = None
mp_size = None
mp_group = None
def get_partition_size(item):
global mp_rank, mp_size, mp_group
size = item.numel()
partition_size = size/mp_size
return int(partition_size)
def get_partition_start(item):
global mp_rank, mp_size, mp_group
partition_size = get_partition_size(item)
start = partition_size * mp_rank
return int(start) | null |
18,943 | import contextlib
import torch.distributed as dist
import torch
from torch import _C
from torch.cuda import _lazy_call, device as device_ctx_manager
import torch.distributed as dist
from .initialize import get_data_parallel_rank
from .initialize import get_model_parallel_rank
from .initialize import get_model_parallel_world_size
from .initialize import get_model_parallel_group
mp_rank = None
mp_size = None
mp_group = None
def get_full_inputs(tensors):
inputs=[]
for i in range(int(len(tensors)/2)-1):
item = tensors[2 * i]
size = tensors[2* i + 1]
partition_size = item.numel()
tensor_size = partition_size * mp_size
flat_tensor = torch.zeros([tensor_size], dtype=item.dtype, device=item.device)
partitions=[]
for i in range(mp_size):
part_i = flat_tensor.narrow(0, partition_size * i , partition_size)
if i == mp_rank:
part_i.copy_(item)
partitions.append(part_i)
dist.all_gather(partitions,partitions[mp_rank], group=mp_group)
input_tensor = flat_tensor.view(list(size.numpy()))
item.data=input_tensor.data
inputs.append(item)
inputs.append(tensors[-2])
return tuple(inputs) | null |
18,944 | import contextlib
import torch.distributed as dist
import torch
from torch import _C
from torch.cuda import _lazy_call, device as device_ctx_manager
import torch.distributed as dist
from .initialize import get_data_parallel_rank
from .initialize import get_model_parallel_rank
from .initialize import get_model_parallel_world_size
from .initialize import get_model_parallel_group
class CheckpointFunction(torch.autograd.Function):
"""This function is adapted from torch.utils.checkpoint with
two main changes:
1) torch.cuda.set_rng_state is replaced with `_set_cuda_rng_state`
2) the states in the model parallel tracker are also properly
tracked/set/reset.
"""
def forward(ctx, run_function, *args):
ctx.run_function = run_function
global mp_rank, mp_size, mp_group
if mp_rank is None:
mp_rank = get_model_parallel_rank()
mp_size = get_model_parallel_world_size()
mp_group = get_model_parallel_group()
global cuda_device, transport_stream, PARTITION_ACTIVATIONS
if cuda_device is None:
if dist.get_rank() == 0:
print(f"Partition Activations {PARTITION_ACTIVATIONS} and Correctness Check {PA_CORRECTNESS_TEST}")
cuda_device = torch.cuda.current_device()
#The transport stream is used to overlap the allgather communication for the activations
#with the computation in the backward pass
transport_stream = torch.cuda.Stream(device=cuda_device)
if PARTITION_ACTIVATIONS:
#inputs = [item.detach().contiguous().view(-1).narrow(0, get_partition_start(item), get_partition_size(item)).clone() for item in args[:-1]]
#inputs.append(args[-1])
inputs = [item.detach().contiguous().view(-1).narrow(0, get_partition_start(item), get_partition_size(item)).clone() for item in args[:2]]
inputs.extend(args[2:])
#just in case something funky is happening such as reuse of inputs
inputs_cuda = [item.to(cuda_device) for item in args]
# Copy the rng states.
ctx.fwd_cpu_rng_state = torch.get_rng_state()
ctx.fwd_cuda_rng_state = torch.cuda.get_rng_state()
ctx.fwd_cuda_rng_state_tracker = get_cuda_rng_tracker().get_states()
#ctx.save_for_backward(*args)
with torch.no_grad():
outputs = run_function(*inputs_cuda)
del inputs_cuda
if PARTITION_ACTIVATIONS:
new_args = []
for arg, inp in zip(args,inputs):
size= torch.tensor(arg.size())
arg.data = inp.data
new_args.append(arg)
new_args.append(size)
ctx.save_for_backward(*new_args)
else:
ctx.save_for_backward(*args)
return outputs
def backward(ctx, *args):
if not torch.autograd._is_checkpoint_valid():
raise RuntimeError("Checkpointing is not compatible with .grad(), "
"please use .backward() if possible")
global cuda_device, transport_stream, PARTITION_ACTIVATIONS
if PARTITION_ACTIVATIONS:
with torch.cuda.stream(transport_stream):
inputs = get_full_inputs(ctx.saved_tensors)
detached_inputs = detach_variable(inputs)
else:
inputs = ctx.saved_tensors
detached_inputs = detach_variable(inputs)
# Store the current states.
bwd_cpu_rng_state = torch.get_rng_state()
bwd_cuda_rng_state = torch.cuda.get_rng_state()
bwd_cuda_rng_state_tracker = get_cuda_rng_tracker().get_states()
# Set the states to what it used to be before the forward pass.
torch.set_rng_state(ctx.fwd_cpu_rng_state)
_set_cuda_rng_state(ctx.fwd_cuda_rng_state)
get_cuda_rng_tracker().set_states(ctx.fwd_cuda_rng_state_tracker)
if PARTITION_ACTIVATIONS:
current_stream=torch.cuda.current_stream()
current_stream.wait_stream(transport_stream)
with torch.enable_grad():
outputs = ctx.run_function(*detached_inputs)
# Set the states back to what it was at the start of this function.
torch.set_rng_state(bwd_cpu_rng_state)
_set_cuda_rng_state(bwd_cuda_rng_state)
get_cuda_rng_tracker().set_states(bwd_cuda_rng_state_tracker)
if isinstance(outputs, torch.Tensor):
outputs = (outputs,)
torch.autograd.backward(outputs, args)
return (None,) + tuple(inp.grad for inp in detached_inputs)
The provided code snippet includes necessary dependencies for implementing the `checkpoint` function. Write a Python function `def checkpoint(function, *args)` to solve the following problem:
Checkpoint a model or part of the model. This has been directly copied from torch.utils.checkpoint.
Here is the function:
def checkpoint(function, *args):
"""Checkpoint a model or part of the model.
This has been directly copied from torch.utils.checkpoint."""
return CheckpointFunction.apply(function, *args) | Checkpoint a model or part of the model. This has been directly copied from torch.utils.checkpoint. |
18,945 | import contextlib
import torch.distributed as dist
import torch
from torch import _C
from torch.cuda import _lazy_call, device as device_ctx_manager
import torch.distributed as dist
PARTITION_ACTIVATIONS = False
from .initialize import get_data_parallel_rank
from .initialize import get_model_parallel_rank
from .initialize import get_model_parallel_world_size
from .initialize import get_model_parallel_group
def partition_activations_in_checkpoint(partition_activation):
global PARTITION_ACTIVATIONS
PARTITION_ACTIVATIONS=partition_activation
if dist.get_rank() == 0:
print(f"**************Partition Activations {PARTITION_ACTIVATIONS}************") | null |
18,946 | import math
import torch
import torch.nn.functional as F
import torch.nn.init as init
from torch.nn.parameter import Parameter
from apex.normalization.fused_layer_norm import FusedLayerNorm as LayerNorm
from .initialize import get_model_parallel_rank
from .initialize import get_model_parallel_world_size
from .mappings import copy_to_model_parallel_region
from .mappings import gather_from_model_parallel_region
from .mappings import reduce_from_model_parallel_region
from .mappings import scatter_to_model_parallel_region
from .random import get_cuda_rng_tracker
from .utils import divide
from .utils import split_tensor_along_last_dim
from .utils import VocabUtility
from deepspeed.utils.timer import SynchronizedWallClockTimer
def get_model_parallel_world_size():
"""Return world size for the model parallel group."""
return torch.distributed.get_world_size(group=get_model_parallel_group())
def get_model_parallel_rank():
"""Return my rank for the model parallel group."""
return torch.distributed.get_rank(group=get_model_parallel_group())
def divide(numerator, denominator):
"""Ensure that numerator is divisible by the denominator and return
the division value."""
ensure_divisibility(numerator, denominator)
return numerator // denominator
The provided code snippet includes necessary dependencies for implementing the `_initialize_affine_weight` function. Write a Python function `def _initialize_affine_weight(weight, output_size, input_size, per_partition_size, partition_dim, init_method, stride=1, return_master_weight=False)` to solve the following problem:
Initialize affine weight for model parallel. Build the master weight on all processes and scatter the relevant chunk.
Here is the function:
def _initialize_affine_weight(weight, output_size, input_size,
per_partition_size, partition_dim, init_method,
stride=1, return_master_weight=False):
"""Initialize affine weight for model parallel.
Build the master weight on all processes and scatter
the relevant chunk."""
# If we only use 1 process for model parallelism, bypass scatter.
world_size = get_model_parallel_world_size()
if world_size == 1:
init_method(weight)
if return_master_weight:
return weight
return None
# Initialize master weight
master_weight = torch.empty(output_size, input_size,
dtype=weight.dtype,
requires_grad=False)
init_method(master_weight)
# Split and copy
per_partition_per_stride_size = divide(per_partition_size, stride)
weight_list = torch.split(master_weight, per_partition_per_stride_size,
dim=partition_dim)
rank = get_model_parallel_rank()
my_weight_list = weight_list[rank::world_size]
with torch.no_grad():
torch.cat(my_weight_list, dim=partition_dim, out=weight)
if return_master_weight:
return master_weight
return None | Initialize affine weight for model parallel. Build the master weight on all processes and scatter the relevant chunk. |
18,947 | import queue
import threading
import tensorflow as tf
import torch
import numpy as np
def convert_tf_example_to_torch_tensors(example):
item = {k: (v.numpy()) for k,v in example.items()}
mask = np.zeros_like(item['input_ids'])
mask_labels = np.ones_like(item['input_ids'])*-1
for b, row in enumerate(item['masked_lm_positions'].astype(int)):
for i, idx in enumerate(row):
if item['masked_lm_weights'][b, i] != 0:
mask[b, idx] = 1
mask_labels[b, idx] = item['masked_lm_ids'][b, i]
output = {'text': item['input_ids'], 'types': item['segment_ids'],'is_random': item['next_sentence_labels'],
'pad_mask': 1-item['input_mask'], 'mask': mask, 'mask_labels': mask_labels}
return {k: torch.from_numpy(v) for k,v in output.items()}
def _multiproc_iter(dl, output_queue):
data_iter = iter(dl)
for item in data_iter:
tensors = convert_tf_example_to_torch_tensors(item)
output_queue.put(tensors, block=True) | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.