id
int64
0
190k
prompt
stringlengths
21
13.4M
docstring
stringlengths
1
12k
184,614
import math import torch import torch.nn as nn from functools import partial from modeling_finetune import Block, _cfg, PatchEmbed, RelativePositionBias from timm.models.registry import register_model from timm.models.layers import trunc_normal_ as __call_trunc_normal_ class VisionTransformerForMaskedImageModeling(nn.Module): def __init__(self, img_size=224, patch_size=16, in_chans=3, vocab_size=8192, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4., qkv_bias=True, qk_scale=None, drop_rate=0., attn_drop_rate=0., drop_path_rate=0., norm_layer=None, init_values=None, attn_head_dim=None, use_abs_pos_emb=True, use_rel_pos_bias=False, use_shared_rel_pos_bias=False, init_std=0.02): def fix_init_weight(self): def rescale(param, layer_id): def _init_weights(self, m): def no_weight_decay(self): def get_num_layers(self): def forward_features(self, x, bool_masked_pos): def forward(self, x, bool_masked_pos=None, return_all_tokens=False, return_patch_tokens=False): def forward_return_qkv(self, x, bool_masked_pos=None, split_out_as_qkv=False): def forward_intermediate(self, x, bool_masked_pos=None, layer_id=12): def interpolate_pos_encoding(self, x, w, h): def get_last_selfattention(self, x): def _cfg(url='', **kwargs): def beit_24x544_patch16_224_8k_vocab(pretrained=False, **kwargs): if "num_classes" in kwargs: _ = kwargs.pop("num_classes") if 'vocab_size' in kwargs: vocab_size = kwargs['vocab_size'] _ = kwargs.pop("vocab_size") else: vocab_size = 8192 model = VisionTransformerForMaskedImageModeling( img_size=224, patch_size=16, embed_dim=544, depth=24, num_heads=16, mlp_ratio=4, qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), vocab_size=vocab_size, **kwargs) model.default_cfg = _cfg() if pretrained: checkpoint = torch.load( kwargs["init_ckpt"], map_location="cpu" ) model.load_state_dict(checkpoint["model"]) return model
null
184,615
import math import torch import torch.nn as nn from functools import partial from modeling_finetune import Block, _cfg, PatchEmbed, RelativePositionBias from timm.models.registry import register_model from timm.models.layers import trunc_normal_ as __call_trunc_normal_ class VisionTransformerForMaskedImageModelingCLS(VisionTransformerForMaskedImageModeling): def __init__(self, img_size=224, patch_size=16, in_chans=3, vocab_size=8192, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4., qkv_bias=True, qk_scale=None, drop_rate=0., attn_drop_rate=0., drop_path_rate=0., norm_layer=None, init_values=None, attn_head_dim=None, use_abs_pos_emb=True, use_rel_pos_bias=False, use_shared_rel_pos_bias=False, init_std=0.02, early_layers=6, head_layers=2, shared_lm_head=True): super().__init__(img_size=img_size, patch_size=patch_size, in_chans=in_chans, vocab_size=vocab_size, embed_dim=embed_dim, depth=depth, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, drop_rate=drop_rate, attn_drop_rate=attn_drop_rate, drop_path_rate=drop_path_rate, norm_layer=norm_layer, init_values=init_values, attn_head_dim=attn_head_dim, use_abs_pos_emb=use_abs_pos_emb, use_rel_pos_bias=use_rel_pos_bias, use_shared_rel_pos_bias=use_shared_rel_pos_bias, init_std=init_std) self.early_layers = early_layers print(f'early layer {early_layers}, late layer {depth - early_layers}, condenser head layers {head_layers}, shared_lm_head {shared_lm_head}') dpr = [x.item() for x in torch.linspace(0, drop_path_rate, max(depth, early_layers + head_layers))] # stochastic depth decay rule self.cls_pt_layers = nn.ModuleList([ Block( dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, init_values=init_values, window_size=self.patch_embed.patch_shape if use_rel_pos_bias else None, attn_head_dim=attn_head_dim, ) for i in range(early_layers, early_layers + head_layers)]) self.fix_init_cls_pt_weight() self.shared_lm_head = shared_lm_head if not shared_lm_head: self.cls_pt_norm = norm_layer(embed_dim) self.cls_pt_lm_head = nn.Linear(embed_dim, vocab_size) self.cls_pt_norm.apply(self._init_weights) self.cls_pt_lm_head.apply(self._init_weights) def fix_init_cls_pt_weight(self): def rescale(param, layer_id): param.div_(math.sqrt(2.0 * layer_id)) for layer_id, layer in enumerate(self.cls_pt_layers): rescale(layer.attn.proj.weight.data, self.early_layers + layer_id + 1) rescale(layer.mlp.fc2.weight.data, self.early_layers + layer_id + 1) def forward_features(self, x, bool_masked_pos): x = self.patch_embed(x, bool_masked_pos=bool_masked_pos) batch_size, seq_len, _ = x.size() cls_tokens = self.cls_token.expand(batch_size, -1, -1) # stole cls_tokens impl from Phil Wang, thanks mask_token = self.mask_token.expand(batch_size, seq_len, -1) # replace the masked visual tokens by mask_token w = bool_masked_pos.unsqueeze(-1).type_as(mask_token) x = x * (1 - w) + mask_token * w x = torch.cat((cls_tokens, x), dim=1) if self.pos_embed is not None: x = x + self.pos_embed x = self.pos_drop(x) rel_pos_bias = self.rel_pos_bias() if self.rel_pos_bias is not None else None for i, blk in enumerate(self.blocks): x = blk(x, rel_pos_bias=rel_pos_bias) if i + 1 == self.early_layers: early_states = x[:, 1:] x_cls_pt = torch.cat([x[:, [0]], early_states], dim=1) for blk in self.cls_pt_layers: x_cls_pt = blk(x_cls_pt, rel_pos_bias=rel_pos_bias) return self.norm(x), self.norm(x_cls_pt) if self.shared_lm_head else self.cls_pt_norm(x_cls_pt) def forward(self, x, bool_masked_pos=None, return_all_tokens=False, return_patch_tokens=False): if bool_masked_pos is None: bool_masked_pos = torch.zeros((x.shape[0], self.patch_embed.num_patches), dtype=torch.bool).to(x.device) x, x_cls_pt = self.forward_features(x, bool_masked_pos=bool_masked_pos) x = x[:, 1:] x_cls_pt = x_cls_pt[:, 1:] if return_patch_tokens: return [x, x_cls_pt] if return_all_tokens: return [self.lm_head(x), self.lm_head(x_cls_pt) if self.shared_lm_head else self.cls_pt_lm_head(x_cls_pt)] else: # return the masked tokens return [self.lm_head(x[bool_masked_pos]), self.lm_head(x_cls_pt[bool_masked_pos]) if self.shared_lm_head else self.cls_pt_lm_head(x_cls_pt[bool_masked_pos])] def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, 'crop_pct': .9, 'interpolation': 'bicubic', 'mean': (0.5, 0.5, 0.5), 'std': (0.5, 0.5, 0.5), **kwargs } def beit_24x544_patch16_224_8k_vocab_cls_pt(pretrained=False, **kwargs): if "num_classes" in kwargs: _ = kwargs.pop("num_classes") if 'vocab_size' in kwargs: vocab_size = kwargs['vocab_size'] _ = kwargs.pop("vocab_size") else: vocab_size = 8192 model = VisionTransformerForMaskedImageModelingCLS( img_size=224, patch_size=16, embed_dim=544, depth=24, num_heads=16, mlp_ratio=4, qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), vocab_size=vocab_size, **kwargs) model.default_cfg = _cfg() if pretrained: checkpoint = torch.load( kwargs["init_ckpt"], map_location="cpu" ) model.load_state_dict(checkpoint["model"]) return model
null
184,616
import math import torch import torch.nn as nn from functools import partial from modeling_finetune import Block, _cfg, PatchEmbed, RelativePositionBias from timm.models.registry import register_model from timm.models.layers import trunc_normal_ as __call_trunc_normal_ class VisionTransformerForMaskedImageModeling(nn.Module): def __init__(self, img_size=224, patch_size=16, in_chans=3, vocab_size=8192, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4., qkv_bias=True, qk_scale=None, drop_rate=0., attn_drop_rate=0., drop_path_rate=0., norm_layer=None, init_values=None, attn_head_dim=None, use_abs_pos_emb=True, use_rel_pos_bias=False, use_shared_rel_pos_bias=False, init_std=0.02): super().__init__() self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models self.patch_embed = PatchEmbed( img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim) num_patches = self.patch_embed.num_patches self.num_heads = num_heads self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) self.mask_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) if use_abs_pos_emb: self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim)) else: self.pos_embed = None self.pos_drop = nn.Dropout(p=drop_rate) if use_shared_rel_pos_bias: self.rel_pos_bias = RelativePositionBias(window_size=self.patch_embed.patch_shape, num_heads=num_heads) else: self.rel_pos_bias = None dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule self.blocks = nn.ModuleList([ Block( dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, init_values=init_values, window_size=self.patch_embed.patch_shape if use_rel_pos_bias else None, attn_head_dim=attn_head_dim, ) for i in range(depth)]) self.norm = norm_layer(embed_dim) self.init_std = init_std self.lm_head = nn.Linear(embed_dim, vocab_size) if self.pos_embed is not None: trunc_normal_(self.pos_embed, std=self.init_std) trunc_normal_(self.cls_token, std=self.init_std) trunc_normal_(self.mask_token, std=self.init_std) trunc_normal_(self.lm_head.weight, std=self.init_std) self.apply(self._init_weights) self.fix_init_weight() def fix_init_weight(self): def rescale(param, layer_id): param.div_(math.sqrt(2.0 * layer_id)) for layer_id, layer in enumerate(self.blocks): rescale(layer.attn.proj.weight.data, layer_id + 1) rescale(layer.mlp.fc2.weight.data, layer_id + 1) def _init_weights(self, m): if isinstance(m, nn.Linear): trunc_normal_(m.weight, std=self.init_std) if isinstance(m, nn.Linear) and m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.LayerNorm): nn.init.constant_(m.bias, 0) nn.init.constant_(m.weight, 1.0) elif isinstance(m, nn.Conv2d): trunc_normal_(m.weight, std=self.init_std) if m.bias is not None: nn.init.constant_(m.bias, 0) def no_weight_decay(self): return {'pos_embed', 'cls_token'} def get_num_layers(self): return len(self.blocks) def forward_features(self, x, bool_masked_pos): x = self.patch_embed(x, bool_masked_pos=bool_masked_pos) batch_size, seq_len, _ = x.size() cls_tokens = self.cls_token.expand(batch_size, -1, -1) # stole cls_tokens impl from Phil Wang, thanks mask_token = self.mask_token.expand(batch_size, seq_len, -1) # replace the masked visual tokens by mask_token w = bool_masked_pos.unsqueeze(-1).type_as(mask_token) x = x * (1 - w) + mask_token * w x = torch.cat((cls_tokens, x), dim=1) if self.pos_embed is not None: x = x + self.pos_embed x = self.pos_drop(x) rel_pos_bias = self.rel_pos_bias() if self.rel_pos_bias is not None else None for blk in self.blocks: x = blk(x, rel_pos_bias=rel_pos_bias) return self.norm(x) def forward(self, x, bool_masked_pos=None, return_all_tokens=False, return_patch_tokens=False): if bool_masked_pos is None: bool_masked_pos = torch.zeros((x.shape[0], self.patch_embed.num_patches), dtype=torch.bool).to(x.device) x = self.forward_features(x, bool_masked_pos=bool_masked_pos) x = x[:, 1:] if return_patch_tokens: return x if return_all_tokens: return self.lm_head(x) else: # return the masked tokens return self.lm_head(x[bool_masked_pos]) def forward_return_qkv(self, x, bool_masked_pos=None, split_out_as_qkv=False): if bool_masked_pos is None: bool_masked_pos = torch.zeros((x.shape[0], self.patch_embed.num_patches), dtype=torch.bool).to(x.device) x = self.patch_embed(x, bool_masked_pos=bool_masked_pos) batch_size, seq_len, _ = x.size() cls_tokens = self.cls_token.expand(batch_size, -1, -1) # stole cls_tokens impl from Phil Wang, thanks mask_token = self.mask_token.expand(batch_size, seq_len, -1) # replace the masked visual tokens by mask_token w = bool_masked_pos.unsqueeze(-1).type_as(mask_token) x = x * (1 - w) + mask_token * w x = torch.cat((cls_tokens, x), dim=1) if self.pos_embed is not None: x = x + self.pos_embed x = self.pos_drop(x) rel_pos_bias = self.rel_pos_bias() if self.rel_pos_bias is not None else None for i, blk in enumerate(self.blocks): if i < len(self.blocks) - 1: x = blk(x, rel_pos_bias=rel_pos_bias) else: # with torch.cuda.amp.autocast(enabled=False): x, qkv = blk(x, rel_pos_bias=rel_pos_bias, return_qkv=True) if split_out_as_qkv: x = self.norm(x) x = self.lm_head(x) # [b, n+1, 3*c] q, k, v = x.chunk(3, dim=-1) # [b, n+1, c] b, n, c =q.shape q = q.reshape(b, n, self.num_heads, -1).permute(0, 2, 1, 3) k = k.reshape(b, n, self.num_heads, -1).permute(0, 2, 1, 3) v = v.reshape(b, n, self.num_heads, -1).permute(0, 2, 1, 3) return x, q, k, v else: x = self.norm(x) x = x[:, 1:] x = self.lm_head(x[bool_masked_pos]) q, k, v = qkv[0], qkv[1], qkv[2] return x, q, k, v def forward_intermediate(self, x, bool_masked_pos=None, layer_id=12): if bool_masked_pos is None: bool_masked_pos = torch.zeros((x.shape[0], self.patch_embed.num_patches), dtype=torch.bool).to(x.device) x = self.patch_embed(x, bool_masked_pos=bool_masked_pos) batch_size, seq_len, _ = x.size() cls_tokens = self.cls_token.expand(batch_size, -1, -1) # stole cls_tokens impl from Phil Wang, thanks mask_token = self.mask_token.expand(batch_size, seq_len, -1) # replace the masked visual tokens by mask_token w = bool_masked_pos.unsqueeze(-1).type_as(mask_token) x = x * (1 - w) + mask_token * w x = torch.cat((cls_tokens, x), dim=1) if self.pos_embed is not None: x = x + self.pos_embed x = self.pos_drop(x) rel_pos_bias = self.rel_pos_bias() if self.rel_pos_bias is not None else None if isinstance(layer_id, list): output_list = [] for l, blk in enumerate(self.blocks): x = blk(x, rel_pos_bias=rel_pos_bias) if l in layer_id: output_list.append(x[:, 1:]) return output_list elif isinstance(layer_id, int): for l, blk in enumerate(self.blocks): if l < layer_id: x = blk(x, rel_pos_bias=rel_pos_bias) elif l == layer_id: x = blk.norm1(x) else: break return x[:, 1:] else: raise NotImplementedError(f"Not support for layer id is {layer_id} now!") def interpolate_pos_encoding(self, x, w, h): npatch = x.shape[1] - 1 N = self.pos_embed.shape[1] - 1 if npatch == N and w == h: return self.pos_embed class_pos_embed = self.pos_embed[:, 0] patch_pos_embed = self.pos_embed[:, 1:] dim = x.shape[-1] w0 = w // self.patch_embed.patch_size[0] h0 = h // self.patch_embed.patch_size[0] # we add a small number to avoid floating point error in the interpolation # see discussion at https://github.com/facebookresearch/dino/issues/8 w0, h0 = w0 + 0.1, h0 + 0.1 patch_pos_embed = nn.functional.interpolate( patch_pos_embed.reshape(1, int(math.sqrt(N)), int(math.sqrt(N)), dim).permute(0, 3, 1, 2), scale_factor=(w0 / math.sqrt(N), h0 / math.sqrt(N)), mode='bicubic', ) assert int(w0) == patch_pos_embed.shape[-2] and int(h0) == patch_pos_embed.shape[-1] patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim) return torch.cat((class_pos_embed.unsqueeze(0), patch_pos_embed), dim=1) def get_last_selfattention(self, x): B, nc, w, h = x.shape x = self.patch_embed(x) batch_size, seq_len, _ = x.size() cls_tokens = self.cls_token.expand(batch_size, -1, -1) x = torch.cat((cls_tokens, x), dim=1) if self.pos_embed is not None: if x.shape[1] != self.pos_embed.shape[1]: x = x + self.interpolate_pos_encoding(x, w, h) else: x = x + self.pos_embed x = self.pos_drop(x) rel_pos_bias = self.rel_pos_bias() if self.rel_pos_bias is not None else None for i, blk in enumerate(self.blocks): if i < len(self.blocks) - 1: x = blk(x, rel_pos_bias=rel_pos_bias) else: # return attention of the last block return blk(x, rel_pos_bias=rel_pos_bias, return_attention=True) def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, 'crop_pct': .9, 'interpolation': 'bicubic', 'mean': (0.5, 0.5, 0.5), 'std': (0.5, 0.5, 0.5), **kwargs } def beit_large_patch16_224_8k_vocab(pretrained=False, **kwargs): if "num_classes" in kwargs: _ = kwargs.pop("num_classes") if 'vocab_size' in kwargs: vocab_size = kwargs['vocab_size'] _ = kwargs.pop("vocab_size") else: vocab_size = 8192 model = VisionTransformerForMaskedImageModeling( patch_size=16, embed_dim=1024, depth=24, num_heads=16, mlp_ratio=4, qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), vocab_size=vocab_size, **kwargs) model.default_cfg = _cfg() if pretrained: checkpoint = torch.load( kwargs["init_ckpt"], map_location="cpu" ) model.load_state_dict(checkpoint["model"]) return model
null
184,617
import math import torch import torch.nn as nn from functools import partial from modeling_finetune import Block, _cfg, PatchEmbed, RelativePositionBias from timm.models.registry import register_model from timm.models.layers import trunc_normal_ as __call_trunc_normal_ class VisionTransformerForMaskedImageModelingCLS(VisionTransformerForMaskedImageModeling): def __init__(self, img_size=224, patch_size=16, in_chans=3, vocab_size=8192, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4., qkv_bias=True, qk_scale=None, drop_rate=0., attn_drop_rate=0., drop_path_rate=0., norm_layer=None, init_values=None, attn_head_dim=None, use_abs_pos_emb=True, use_rel_pos_bias=False, use_shared_rel_pos_bias=False, init_std=0.02, early_layers=6, head_layers=2, shared_lm_head=True): super().__init__(img_size=img_size, patch_size=patch_size, in_chans=in_chans, vocab_size=vocab_size, embed_dim=embed_dim, depth=depth, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, drop_rate=drop_rate, attn_drop_rate=attn_drop_rate, drop_path_rate=drop_path_rate, norm_layer=norm_layer, init_values=init_values, attn_head_dim=attn_head_dim, use_abs_pos_emb=use_abs_pos_emb, use_rel_pos_bias=use_rel_pos_bias, use_shared_rel_pos_bias=use_shared_rel_pos_bias, init_std=init_std) self.early_layers = early_layers print(f'early layer {early_layers}, late layer {depth - early_layers}, condenser head layers {head_layers}, shared_lm_head {shared_lm_head}') dpr = [x.item() for x in torch.linspace(0, drop_path_rate, max(depth, early_layers + head_layers))] # stochastic depth decay rule self.cls_pt_layers = nn.ModuleList([ Block( dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, init_values=init_values, window_size=self.patch_embed.patch_shape if use_rel_pos_bias else None, attn_head_dim=attn_head_dim, ) for i in range(early_layers, early_layers + head_layers)]) self.fix_init_cls_pt_weight() self.shared_lm_head = shared_lm_head if not shared_lm_head: self.cls_pt_norm = norm_layer(embed_dim) self.cls_pt_lm_head = nn.Linear(embed_dim, vocab_size) self.cls_pt_norm.apply(self._init_weights) self.cls_pt_lm_head.apply(self._init_weights) def fix_init_cls_pt_weight(self): def rescale(param, layer_id): param.div_(math.sqrt(2.0 * layer_id)) for layer_id, layer in enumerate(self.cls_pt_layers): rescale(layer.attn.proj.weight.data, self.early_layers + layer_id + 1) rescale(layer.mlp.fc2.weight.data, self.early_layers + layer_id + 1) def forward_features(self, x, bool_masked_pos): x = self.patch_embed(x, bool_masked_pos=bool_masked_pos) batch_size, seq_len, _ = x.size() cls_tokens = self.cls_token.expand(batch_size, -1, -1) # stole cls_tokens impl from Phil Wang, thanks mask_token = self.mask_token.expand(batch_size, seq_len, -1) # replace the masked visual tokens by mask_token w = bool_masked_pos.unsqueeze(-1).type_as(mask_token) x = x * (1 - w) + mask_token * w x = torch.cat((cls_tokens, x), dim=1) if self.pos_embed is not None: x = x + self.pos_embed x = self.pos_drop(x) rel_pos_bias = self.rel_pos_bias() if self.rel_pos_bias is not None else None for i, blk in enumerate(self.blocks): x = blk(x, rel_pos_bias=rel_pos_bias) if i + 1 == self.early_layers: early_states = x[:, 1:] x_cls_pt = torch.cat([x[:, [0]], early_states], dim=1) for blk in self.cls_pt_layers: x_cls_pt = blk(x_cls_pt, rel_pos_bias=rel_pos_bias) return self.norm(x), self.norm(x_cls_pt) if self.shared_lm_head else self.cls_pt_norm(x_cls_pt) def forward(self, x, bool_masked_pos=None, return_all_tokens=False, return_patch_tokens=False): if bool_masked_pos is None: bool_masked_pos = torch.zeros((x.shape[0], self.patch_embed.num_patches), dtype=torch.bool).to(x.device) x, x_cls_pt = self.forward_features(x, bool_masked_pos=bool_masked_pos) x = x[:, 1:] x_cls_pt = x_cls_pt[:, 1:] if return_patch_tokens: return [x, x_cls_pt] if return_all_tokens: return [self.lm_head(x), self.lm_head(x_cls_pt) if self.shared_lm_head else self.cls_pt_lm_head(x_cls_pt)] else: # return the masked tokens return [self.lm_head(x[bool_masked_pos]), self.lm_head(x_cls_pt[bool_masked_pos]) if self.shared_lm_head else self.cls_pt_lm_head(x_cls_pt[bool_masked_pos])] def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, 'crop_pct': .9, 'interpolation': 'bicubic', 'mean': (0.5, 0.5, 0.5), 'std': (0.5, 0.5, 0.5), **kwargs } def beit_large_patch16_224_8k_vocab_cls_pt(pretrained=False, **kwargs): if "num_classes" in kwargs: _ = kwargs.pop("num_classes") if 'vocab_size' in kwargs: vocab_size = kwargs['vocab_size'] _ = kwargs.pop("vocab_size") else: vocab_size = 8192 model = VisionTransformerForMaskedImageModelingCLS( patch_size=16, embed_dim=1024, depth=24, num_heads=16, mlp_ratio=4, qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), vocab_size=vocab_size, **kwargs) model.default_cfg = _cfg() if pretrained: checkpoint = torch.load( kwargs["init_ckpt"], map_location="cpu" ) model.load_state_dict(checkpoint["model"]) return model
null
184,618
import math import torch import torch.nn as nn from functools import partial from modeling_finetune import Block, _cfg, PatchEmbed, RelativePositionBias from timm.models.registry import register_model from timm.models.layers import trunc_normal_ as __call_trunc_normal_ class VisionTransformerForMaskedImageModeling(nn.Module): def __init__(self, img_size=224, patch_size=16, in_chans=3, vocab_size=8192, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4., qkv_bias=True, qk_scale=None, drop_rate=0., attn_drop_rate=0., drop_path_rate=0., norm_layer=None, init_values=None, attn_head_dim=None, use_abs_pos_emb=True, use_rel_pos_bias=False, use_shared_rel_pos_bias=False, init_std=0.02): super().__init__() self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models self.patch_embed = PatchEmbed( img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim) num_patches = self.patch_embed.num_patches self.num_heads = num_heads self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) self.mask_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) if use_abs_pos_emb: self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim)) else: self.pos_embed = None self.pos_drop = nn.Dropout(p=drop_rate) if use_shared_rel_pos_bias: self.rel_pos_bias = RelativePositionBias(window_size=self.patch_embed.patch_shape, num_heads=num_heads) else: self.rel_pos_bias = None dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule self.blocks = nn.ModuleList([ Block( dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, init_values=init_values, window_size=self.patch_embed.patch_shape if use_rel_pos_bias else None, attn_head_dim=attn_head_dim, ) for i in range(depth)]) self.norm = norm_layer(embed_dim) self.init_std = init_std self.lm_head = nn.Linear(embed_dim, vocab_size) if self.pos_embed is not None: trunc_normal_(self.pos_embed, std=self.init_std) trunc_normal_(self.cls_token, std=self.init_std) trunc_normal_(self.mask_token, std=self.init_std) trunc_normal_(self.lm_head.weight, std=self.init_std) self.apply(self._init_weights) self.fix_init_weight() def fix_init_weight(self): def rescale(param, layer_id): param.div_(math.sqrt(2.0 * layer_id)) for layer_id, layer in enumerate(self.blocks): rescale(layer.attn.proj.weight.data, layer_id + 1) rescale(layer.mlp.fc2.weight.data, layer_id + 1) def _init_weights(self, m): if isinstance(m, nn.Linear): trunc_normal_(m.weight, std=self.init_std) if isinstance(m, nn.Linear) and m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.LayerNorm): nn.init.constant_(m.bias, 0) nn.init.constant_(m.weight, 1.0) elif isinstance(m, nn.Conv2d): trunc_normal_(m.weight, std=self.init_std) if m.bias is not None: nn.init.constant_(m.bias, 0) def no_weight_decay(self): return {'pos_embed', 'cls_token'} def get_num_layers(self): return len(self.blocks) def forward_features(self, x, bool_masked_pos): x = self.patch_embed(x, bool_masked_pos=bool_masked_pos) batch_size, seq_len, _ = x.size() cls_tokens = self.cls_token.expand(batch_size, -1, -1) # stole cls_tokens impl from Phil Wang, thanks mask_token = self.mask_token.expand(batch_size, seq_len, -1) # replace the masked visual tokens by mask_token w = bool_masked_pos.unsqueeze(-1).type_as(mask_token) x = x * (1 - w) + mask_token * w x = torch.cat((cls_tokens, x), dim=1) if self.pos_embed is not None: x = x + self.pos_embed x = self.pos_drop(x) rel_pos_bias = self.rel_pos_bias() if self.rel_pos_bias is not None else None for blk in self.blocks: x = blk(x, rel_pos_bias=rel_pos_bias) return self.norm(x) def forward(self, x, bool_masked_pos=None, return_all_tokens=False, return_patch_tokens=False): if bool_masked_pos is None: bool_masked_pos = torch.zeros((x.shape[0], self.patch_embed.num_patches), dtype=torch.bool).to(x.device) x = self.forward_features(x, bool_masked_pos=bool_masked_pos) x = x[:, 1:] if return_patch_tokens: return x if return_all_tokens: return self.lm_head(x) else: # return the masked tokens return self.lm_head(x[bool_masked_pos]) def forward_return_qkv(self, x, bool_masked_pos=None, split_out_as_qkv=False): if bool_masked_pos is None: bool_masked_pos = torch.zeros((x.shape[0], self.patch_embed.num_patches), dtype=torch.bool).to(x.device) x = self.patch_embed(x, bool_masked_pos=bool_masked_pos) batch_size, seq_len, _ = x.size() cls_tokens = self.cls_token.expand(batch_size, -1, -1) # stole cls_tokens impl from Phil Wang, thanks mask_token = self.mask_token.expand(batch_size, seq_len, -1) # replace the masked visual tokens by mask_token w = bool_masked_pos.unsqueeze(-1).type_as(mask_token) x = x * (1 - w) + mask_token * w x = torch.cat((cls_tokens, x), dim=1) if self.pos_embed is not None: x = x + self.pos_embed x = self.pos_drop(x) rel_pos_bias = self.rel_pos_bias() if self.rel_pos_bias is not None else None for i, blk in enumerate(self.blocks): if i < len(self.blocks) - 1: x = blk(x, rel_pos_bias=rel_pos_bias) else: # with torch.cuda.amp.autocast(enabled=False): x, qkv = blk(x, rel_pos_bias=rel_pos_bias, return_qkv=True) if split_out_as_qkv: x = self.norm(x) x = self.lm_head(x) # [b, n+1, 3*c] q, k, v = x.chunk(3, dim=-1) # [b, n+1, c] b, n, c =q.shape q = q.reshape(b, n, self.num_heads, -1).permute(0, 2, 1, 3) k = k.reshape(b, n, self.num_heads, -1).permute(0, 2, 1, 3) v = v.reshape(b, n, self.num_heads, -1).permute(0, 2, 1, 3) return x, q, k, v else: x = self.norm(x) x = x[:, 1:] x = self.lm_head(x[bool_masked_pos]) q, k, v = qkv[0], qkv[1], qkv[2] return x, q, k, v def forward_intermediate(self, x, bool_masked_pos=None, layer_id=12): if bool_masked_pos is None: bool_masked_pos = torch.zeros((x.shape[0], self.patch_embed.num_patches), dtype=torch.bool).to(x.device) x = self.patch_embed(x, bool_masked_pos=bool_masked_pos) batch_size, seq_len, _ = x.size() cls_tokens = self.cls_token.expand(batch_size, -1, -1) # stole cls_tokens impl from Phil Wang, thanks mask_token = self.mask_token.expand(batch_size, seq_len, -1) # replace the masked visual tokens by mask_token w = bool_masked_pos.unsqueeze(-1).type_as(mask_token) x = x * (1 - w) + mask_token * w x = torch.cat((cls_tokens, x), dim=1) if self.pos_embed is not None: x = x + self.pos_embed x = self.pos_drop(x) rel_pos_bias = self.rel_pos_bias() if self.rel_pos_bias is not None else None if isinstance(layer_id, list): output_list = [] for l, blk in enumerate(self.blocks): x = blk(x, rel_pos_bias=rel_pos_bias) if l in layer_id: output_list.append(x[:, 1:]) return output_list elif isinstance(layer_id, int): for l, blk in enumerate(self.blocks): if l < layer_id: x = blk(x, rel_pos_bias=rel_pos_bias) elif l == layer_id: x = blk.norm1(x) else: break return x[:, 1:] else: raise NotImplementedError(f"Not support for layer id is {layer_id} now!") def interpolate_pos_encoding(self, x, w, h): npatch = x.shape[1] - 1 N = self.pos_embed.shape[1] - 1 if npatch == N and w == h: return self.pos_embed class_pos_embed = self.pos_embed[:, 0] patch_pos_embed = self.pos_embed[:, 1:] dim = x.shape[-1] w0 = w // self.patch_embed.patch_size[0] h0 = h // self.patch_embed.patch_size[0] # we add a small number to avoid floating point error in the interpolation # see discussion at https://github.com/facebookresearch/dino/issues/8 w0, h0 = w0 + 0.1, h0 + 0.1 patch_pos_embed = nn.functional.interpolate( patch_pos_embed.reshape(1, int(math.sqrt(N)), int(math.sqrt(N)), dim).permute(0, 3, 1, 2), scale_factor=(w0 / math.sqrt(N), h0 / math.sqrt(N)), mode='bicubic', ) assert int(w0) == patch_pos_embed.shape[-2] and int(h0) == patch_pos_embed.shape[-1] patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim) return torch.cat((class_pos_embed.unsqueeze(0), patch_pos_embed), dim=1) def get_last_selfattention(self, x): B, nc, w, h = x.shape x = self.patch_embed(x) batch_size, seq_len, _ = x.size() cls_tokens = self.cls_token.expand(batch_size, -1, -1) x = torch.cat((cls_tokens, x), dim=1) if self.pos_embed is not None: if x.shape[1] != self.pos_embed.shape[1]: x = x + self.interpolate_pos_encoding(x, w, h) else: x = x + self.pos_embed x = self.pos_drop(x) rel_pos_bias = self.rel_pos_bias() if self.rel_pos_bias is not None else None for i, blk in enumerate(self.blocks): if i < len(self.blocks) - 1: x = blk(x, rel_pos_bias=rel_pos_bias) else: # return attention of the last block return blk(x, rel_pos_bias=rel_pos_bias, return_attention=True) def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, 'crop_pct': .9, 'interpolation': 'bicubic', 'mean': (0.5, 0.5, 0.5), 'std': (0.5, 0.5, 0.5), **kwargs } def beit_huge_patch14_224_8k_vocab(pretrained=False, **kwargs): # patch_size=14, embed_dim=1280, depth=32, num_heads=16 if "num_classes" in kwargs: _ = kwargs.pop("num_classes") if 'vocab_size' in kwargs: vocab_size = kwargs['vocab_size'] _ = kwargs.pop("vocab_size") else: vocab_size = 8192 model = VisionTransformerForMaskedImageModeling( patch_size=14, embed_dim=1280, depth=32, num_heads=16, mlp_ratio=4, qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), vocab_size=8192, **kwargs) model.default_cfg = _cfg() if pretrained: checkpoint = torch.load( kwargs["init_ckpt"], map_location="cpu" ) model.load_state_dict(checkpoint["model"]) return model
null
184,619
import math import sys from typing import Iterable import torch import torch.nn as nn import utils def train_one_epoch(model: torch.nn.Module, data_loader: Iterable, optimizer: torch.optim.Optimizer, device: torch.device, epoch: int, loss_scaler, clip_grad: float = 0, log_writer=None, lr_scheduler=None, start_steps=None, lr_schedule_values=None, args=None, ): model.train() metric_logger = utils.MetricLogger(delimiter=" ") metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}')) metric_logger.add_meter('min_lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}')) header = 'Epoch: [{}]'.format(epoch) print_freq = 10 if hasattr(model.module, 'quantize'): try: model.module.quantize.reset_cluster_size(device) print("Reset the codebook statistic info in quantizer before each epoch") except: pass for step, (batch, _) in enumerate(metric_logger.log_every(data_loader, print_freq, header)): # assign learning rate & weight decay for each step it = start_steps + step # global training iteration if lr_schedule_values is not None: for i, param_group in enumerate(optimizer.param_groups): if lr_schedule_values is not None: param_group["lr"] = lr_schedule_values[it] * param_group.get("lr_scale", 1.0) images = batch.to(device, non_blocking=True) with torch.cuda.amp.autocast(enabled=True): loss, log_loss = model(images) loss_value = loss.item() if not math.isfinite(loss_value): print("Loss is {}, stopping training".format(loss_value), force=True) utils.save_nan_model(args, model) sys.exit(1) optimizer.zero_grad() # this attribute is added by timm on one optimizer (adahessian) is_second_order = hasattr(optimizer, 'is_second_order') and optimizer.is_second_order grad_norm = loss_scaler(loss, optimizer, clip_grad=clip_grad, parameters=model.parameters(), create_graph=is_second_order) loss_scale_value = loss_scaler.state_dict()["scale"] torch.cuda.synchronize() metric_logger.update(loss=loss_value) new_log_loss = {k.split('/')[-1]:v for k, v in log_loss.items() if k not in ['total_loss']} metric_logger.update(**new_log_loss) min_lr = 10. max_lr = 0. for group in optimizer.param_groups: min_lr = min(min_lr, group["lr"]) max_lr = max(max_lr, group["lr"]) metric_logger.update(lr=max_lr) metric_logger.update(min_lr=min_lr) weight_decay_value = None for group in optimizer.param_groups: if group["weight_decay"] > 0: weight_decay_value = group["weight_decay"] metric_logger.update(weight_decay=weight_decay_value) metric_logger.update(grad_norm=grad_norm) if log_writer is not None: log_writer.update(**new_log_loss, head="train/loss") log_writer.update(lr=max_lr, head="opt") log_writer.update(min_lr=min_lr, head="opt") log_writer.update(weight_decay=weight_decay_value, head="opt") log_writer.update(grad_norm=grad_norm, head="opt") log_writer.update(loss_scale=loss_scale_value, head="opt") log_writer.set_step() if lr_scheduler is not None: lr_scheduler.step_update(start_steps + step) # gather the stats from all processes metric_logger.synchronize_between_processes() print("Averaged stats:", metric_logger) # stat the codebook usage information if hasattr(model.module, 'quantize'): try: codebook_cluster_size = model.module.quantize._codebook.cluster_size except: codebook_cluster_size = model.module.quantize.cluster_size zero_cnt = (codebook_cluster_size == 0).sum().item() train_stat = {k: meter.global_avg for k, meter in metric_logger.meters.items()} train_stat['Unused_code'] = zero_cnt print(f"Unused code in codebook: {zero_cnt}") return train_stat return {k: meter.global_avg for k, meter in metric_logger.meters.items()}
null
184,620
import math import sys from typing import Iterable import torch import torch.nn as nn import utils def evaluate(data_loader, model, device, log_writer=None, epoch=None, args=None): metric_logger = utils.MetricLogger(delimiter=" ") header = 'Validation:' # switch to evaluation mode model.eval() if hasattr(model.module, 'quantize'): try: model.module.quantize.reset_cluster_size(device) print("Reset the codebook statistic info in quantizer before testing") except: pass for step, (batch, extra_info) in enumerate(metric_logger.log_every(data_loader, 10, header)): images = batch.to(device, non_blocking=True) loss, log_loss = model(images) metric_logger.update(loss=loss.item()) new_log_loss = {k.split('/')[-1]:v for k, v in log_loss.items() if k not in ['total_loss']} metric_logger.update(**new_log_loss) # gather the stats from all processes metric_logger.synchronize_between_processes() print("Averaged stats:", metric_logger) # stat the codebook usage information if hasattr(model, 'module') and hasattr(model.module, 'quantize'): try: codebook_cluster_size = model.module.quantize._codebook.cluster_size except: codebook_cluster_size = model.module.quantize.cluster_size zero_cnt = (codebook_cluster_size == 0).sum().item() test_stat = {k: meter.global_avg for k, meter in metric_logger.meters.items()} test_stat['unused_code'] = zero_cnt print(f"Unused code in codebook: {zero_cnt}") return test_stat return {k: meter.global_avg for k, meter in metric_logger.meters.items()}
null
184,621
import math import sys from typing import Iterable import torch import torch.nn as nn import utils def calculate_codebook_usage(data_loader, model, device, log_writer=None, epoch=None, args=None): metric_logger = utils.MetricLogger(delimiter=" ") header = 'Calculating codebook usage:' # switch to evaluation mode model.eval() codebook_num = args.codebook_n_emd codebook_cnt = torch.zeros(codebook_num, dtype=torch.float64).to(device) for step, (images, _) in enumerate(metric_logger.log_every(data_loader, 10, header)): images = images.to(device, non_blocking=True) outputs = utils.get_model(model).get_tokens(images)['token'].view(-1) outputs_gather_list = [torch.zeros_like(outputs) for _ in range(utils.get_world_size())] torch.distributed.all_gather(outputs_gather_list, outputs) all_tokens = torch.cat(outputs_gather_list, dim=0).view(-1) # [B * N * Ngpu, ] codebook_cnt += torch.bincount(all_tokens, minlength=codebook_num) # statistic zero_cnt = (codebook_cnt == 0).sum() # 0 print(f"STAT: {zero_cnt} tokens ({(zero_cnt / codebook_num) * 100}%) never are used in this codebook.")
null
184,622
from cgitb import enable import math import sys from typing import Iterable import torch import torch.nn as nn import torch.nn.functional as F import utils def train_one_epoch(model: torch.nn.Module, vqkd: torch.nn.Module, data_loader: Iterable, optimizer: torch.optim.Optimizer, device: torch.device, epoch: int, loss_scaler, max_norm: float = 0, log_writer=None, lr_scheduler=None, start_steps=None, lr_schedule_values=None, wd_schedule_values=None, args=None): model.train() metric_logger = utils.MetricLogger(delimiter=" ") metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}')) metric_logger.add_meter('min_lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}')) header = 'Epoch: [{}]'.format(epoch) print_freq = 10 loss_fn = nn.CrossEntropyLoss() for step, (batch, extra_info) in enumerate(metric_logger.log_every(data_loader, print_freq, header)): # assign learning rate & weight decay for each step it = start_steps + step # global training iteration if lr_schedule_values is not None or wd_schedule_values is not None: for i, param_group in enumerate(optimizer.param_groups): if lr_schedule_values is not None: param_group["lr"] = lr_schedule_values[it] * param_group["lr_scale"] if wd_schedule_values is not None and param_group["weight_decay"] > 0: param_group["weight_decay"] = wd_schedule_values[it] samples, images, bool_masked_pos = batch images = images.to(device, non_blocking=True) samples = samples.to(device, non_blocking=True) bool_masked_pos = bool_masked_pos.to(device, non_blocking=True) with torch.no_grad(): with torch.cuda.amp.autocast(): input_ids = vqkd.get_codebook_indices(images) bool_masked_pos = bool_masked_pos.flatten(1).to(torch.bool) labels = input_ids[bool_masked_pos] with torch.cuda.amp.autocast(): # enabled=False outputs = model(samples, bool_masked_pos=bool_masked_pos) if isinstance(outputs, list): loss_1 = loss_fn(input=outputs[0], target=labels) loss_2 = loss_fn(input=outputs[1], target=labels) loss = loss_1 + loss_2 else: loss = loss_fn(input=outputs, target=labels) loss_value = loss.item() if not math.isfinite(loss_value): print(f"Loss is {loss_value}, stopping training at rank {utils.get_rank()}", force=True) sys.exit(1) optimizer.zero_grad() # this attribute is added by timm on one optimizer (adahessian) is_second_order = hasattr(optimizer, 'is_second_order') and optimizer.is_second_order grad_norm = loss_scaler(loss, optimizer, clip_grad=max_norm, parameters=model.parameters(), create_graph=is_second_order) loss_scale_value = loss_scaler.state_dict()["scale"] torch.cuda.synchronize() if isinstance(outputs, list): mlm_acc_1 = (outputs[0].max(-1)[1] == labels).float().mean().item() mlm_acc_2 = (outputs[1].max(-1)[1] == labels).float().mean().item() metric_logger.update(mlm_acc_1=mlm_acc_1) metric_logger.update(mlm_acc_2=mlm_acc_2) metric_logger.update(loss_1=loss_1.item()) metric_logger.update(loss_2=loss_2.item()) if log_writer is not None: log_writer.update(mlm_acc_1=mlm_acc_1, head="loss") log_writer.update(mlm_acc_2=mlm_acc_2, head="loss") log_writer.update(loss_1=loss_1.item(), head="loss") log_writer.update(loss_2=loss_2.item(), head="loss") else: mlm_acc = (outputs.max(-1)[1] == labels).float().mean().item() metric_logger.update(mlm_acc=mlm_acc) if log_writer is not None: log_writer.update(mlm_acc=mlm_acc, head="loss") metric_logger.update(loss=loss_value) metric_logger.update(loss_scale=loss_scale_value) min_lr = 10. max_lr = 0. for group in optimizer.param_groups: min_lr = min(min_lr, group["lr"]) max_lr = max(max_lr, group["lr"]) metric_logger.update(lr=max_lr) metric_logger.update(min_lr=min_lr) weight_decay_value = None for group in optimizer.param_groups: if group["weight_decay"] > 0: weight_decay_value = group["weight_decay"] metric_logger.update(weight_decay=weight_decay_value) metric_logger.update(grad_norm=grad_norm) if log_writer is not None: log_writer.update(loss=loss_value, head="loss") log_writer.update(loss_scale=loss_scale_value, head="opt") log_writer.update(lr=max_lr, head="opt") log_writer.update(min_lr=min_lr, head="opt") log_writer.update(weight_decay=weight_decay_value, head="opt") log_writer.update(grad_norm=grad_norm, head="opt") log_writer.set_step() if lr_scheduler is not None: lr_scheduler.step_update(start_steps + step) # gather the stats from all processes metric_logger.synchronize_between_processes() print("Averaged stats:", metric_logger) return {k: meter.global_avg for k, meter in metric_logger.meters.items()}
null
184,623
import argparse import datetime import numpy as np import time import torch import torch.backends.cudnn as cudnn import json import os from pathlib import Path from timm.models import create_model from optim_factory import create_optimizer from datasets import build_beit_pretraining_dataset from engine_for_pretraining import train_one_epoch from utils import NativeScalerWithGradNormCount as NativeScaler import utils import modeling_pretrain import modeling_vqkd def get_args(): parser = argparse.ArgumentParser('BEiT pre-training script', add_help=False) parser.add_argument('--batch_size', default=64, type=int) parser.add_argument('--epochs', default=300, type=int) parser.add_argument('--save_ckpt_freq', default=20, type=int) # tokenizer settings parser.add_argument("--tokenizer_weight", type=str) parser.add_argument("--tokenizer_model", type=str, default="vqkd_encoder_base_decoder_3x768x12_clip") # Model parameters parser.add_argument('--model', default='beit_base_patch16_224_8k_vocab', type=str, metavar='MODEL', help='Name of model to train') parser.add_argument('--rel_pos_bias', action='store_true') parser.add_argument('--disable_rel_pos_bias', action='store_false', dest='rel_pos_bias') parser.set_defaults(rel_pos_bias=True) parser.add_argument('--abs_pos_emb', action='store_true') parser.set_defaults(abs_pos_emb=False) parser.add_argument('--layer_scale_init_value', default=0.1, type=float, help="0.1 for base, 1e-5 for large. set 0 to disable layer scale") parser.add_argument('--num_mask_patches', default=75, type=int, help='number of the visual tokens/patches need be masked') parser.add_argument('--max_mask_patches_per_block', type=int, default=None) parser.add_argument('--min_mask_patches_per_block', type=int, default=16) parser.add_argument('--input_size', default=224, type=int, help='images input size for backbone') parser.add_argument('--second_input_size', default=224, type=int, help='images input size for discrete vae') parser.add_argument('--drop_path', type=float, default=0.1, metavar='PCT', help='Drop path rate (default: 0.1)') # cls-pretraining settings parser.add_argument('--early_layers', default=9, type=int, help='early_layers, default 9 for base and 21 for large') parser.add_argument('--head_layers', default=2, type=int, help='head_layers') parser.add_argument('--shared_lm_head', default=True, type=utils.bool_flag, help='head_layers') # Tokenizer parameters parser.add_argument('--codebook_size', default=8192, type=int, help='number of codebook') parser.add_argument('--codebook_dim', default=32, type=int, help='number of codebook') # Optimizer parameters parser.add_argument('--opt', default='adamw', type=str, metavar='OPTIMIZER', help='Optimizer (default: "adamw"') parser.add_argument('--opt_eps', default=1e-8, type=float, metavar='EPSILON', help='Optimizer Epsilon (default: 1e-8)') parser.add_argument('--opt_betas', default=None, type=float, nargs='+', metavar='BETA', help='Optimizer Betas (default: None, use opt default)') parser.add_argument('--clip_grad', type=float, default=None, metavar='NORM', help='Clip gradient norm (default: None, no clipping)') parser.add_argument('--momentum', type=float, default=0.9, metavar='M', help='SGD momentum (default: 0.9)') parser.add_argument('--weight_decay', type=float, default=0.05, help='weight decay (default: 0.05)') parser.add_argument('--weight_decay_end', type=float, default=None, help="""Final value of the weight decay. We use a cosine schedule for WD. (Set the same value with args.weight_decay to keep weight decay no change)""") parser.add_argument('--lr', type=float, default=5e-4, metavar='LR', help='learning rate (default: 5e-4)') parser.add_argument('--warmup_lr', type=float, default=1e-6, metavar='LR', help='warmup learning rate (default: 1e-6)') parser.add_argument('--min_lr', type=float, default=1e-5, metavar='LR', help='lower lr bound for cyclic schedulers that hit 0 (1e-5)') parser.add_argument('--warmup_epochs', type=int, default=5, metavar='N', help='epochs to warmup LR, if scheduler supports') parser.add_argument('--warmup_steps', type=int, default=-1, metavar='N', help='epochs to warmup LR, if scheduler supports') # Augmentation parameters parser.add_argument('--decoupling_aug', default=False, type=utils.bool_flag, help="use decoupling aug for tokenizer and vit") parser.add_argument('--color_jitter', type=float, default=0.4, metavar='PCT', help='Color jitter factor (default: 0.4)') parser.add_argument('--train_interpolation', type=str, default='bicubic', help='Training interpolation (random, bilinear, bicubic default: "bicubic")') parser.add_argument('--second_interpolation', type=str, default='lanczos', help='Interpolation for discrete vae (random, bilinear, bicubic default: "bicubic")') parser.add_argument('--min_crop_scale', type=float, default=0.08, metavar='PCT', help='min_crop_scale (default: 0.08)') # Dataset parameters parser.add_argument('--data_path', default='/datasets01/imagenet_full_size/061417/', type=str, help='dataset path') parser.add_argument('--eval_data_path', default='', type=str, help='dataset path') parser.add_argument('--data_set', default='image_folder', type=str, help='dataset path') parser.add_argument('--imagenet_default_mean_and_std', default=False, action='store_true') parser.add_argument('--output_dir', default='', help='path where to save, empty for no saving') parser.add_argument('--log_dir', default=None, help='path where to tensorboard log') parser.add_argument('--device', default='cuda', help='device to use for training / testing') parser.add_argument('--seed', default=0, type=int) parser.add_argument('--resume', default='', help='resume from checkpoint') parser.add_argument('--auto_resume', action='store_true') parser.add_argument('--no_auto_resume', action='store_false', dest='auto_resume') parser.set_defaults(auto_resume=True) parser.add_argument('--start_epoch', default=0, type=int, metavar='N', help='start epoch') parser.add_argument('--num_workers', default=10, type=int) parser.add_argument('--pin_mem', action='store_true', help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.') parser.add_argument('--no_pin_mem', action='store_false', dest='pin_mem', help='') parser.set_defaults(pin_mem=True) # distributed training parameters parser.add_argument('--world_size', default=1, type=int, help='number of distributed processes') parser.add_argument('--local_rank', default=-1, type=int) parser.add_argument('--dist_on_itp', action='store_true') parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training') return parser.parse_args()
null
184,624
import argparse import datetime import numpy as np import time import torch import torch.backends.cudnn as cudnn import json import os from pathlib import Path from timm.models import create_model from optim_factory import create_optimizer from datasets import build_beit_pretraining_dataset from engine_for_pretraining import train_one_epoch from utils import NativeScalerWithGradNormCount as NativeScaler import utils import modeling_pretrain import modeling_vqkd def get_model(args): print(f"Creating model: {args.model}") if 'cls_pt' in args.model: model = create_model( args.model, pretrained=False, drop_path_rate=args.drop_path, drop_block_rate=None, use_shared_rel_pos_bias=args.rel_pos_bias, use_abs_pos_emb=args.abs_pos_emb, init_values=args.layer_scale_init_value, vocab_size=args.codebook_size, early_layers=args.early_layers, head_layers=args.head_layers, shared_lm_head=args.shared_lm_head, ) else: model = create_model( args.model, pretrained=False, drop_path_rate=args.drop_path, drop_block_rate=None, use_shared_rel_pos_bias=args.rel_pos_bias, use_abs_pos_emb=args.abs_pos_emb, init_values=args.layer_scale_init_value, vocab_size=args.codebook_size ) return model
null
184,625
import argparse import datetime import numpy as np import time import torch import torch.backends.cudnn as cudnn import json import os from pathlib import Path from timm.models import create_model from optim_factory import create_optimizer from datasets import build_beit_pretraining_dataset from engine_for_pretraining import train_one_epoch from utils import NativeScalerWithGradNormCount as NativeScaler import utils import modeling_pretrain import modeling_vqkd def get_visual_tokenizer(args): print(f"Creating visual tokenizer: {args.tokenizer_model}") model = create_model( args.tokenizer_model, pretrained=True, pretrained_weight=args.tokenizer_weight, as_tokenzer=True, n_code=args.codebook_size, code_dim=args.codebook_dim, ).eval() return model
null
184,626
import torch import numpy as np from torch import nn, einsum import torch.nn.functional as F import math from collections import OrderedDict from functools import partial, reduce from einops import rearrange from timm.models.layers import trunc_normal_ from timm.data.constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.models.registry import register_model from modeling_finetune import VisionTransformer from norm_ema_quantizer import NormEMAVectorQuantizer import utils from vqkd_teacher import clip, get_dino_vit_base class VQKD(nn.Module): def __init__(self, encoder_config, decoder_config, n_embed=8192, embed_dim=32, decay=0.99, process_type='default', quantize_kmeans_init=True, teacher_model_type='clip', decoder_out_dim=512, rec_loss_type='cosine', **kwargs ): super().__init__() print(kwargs) if decoder_config['in_chans'] != embed_dim: print(f"Rewrite the in_chans in decoder from {decoder_config['in_chans']} to {embed_dim}") decoder_config['in_chans'] = embed_dim # encoder & decode params print('Final encoder config', encoder_config) self.encoder = VisionTransformer(**encoder_config) print('Final decoder config', decoder_config) self.decoder = VisionTransformer(**decoder_config) self.quantize = NormEMAVectorQuantizer( n_embed=n_embed, embedding_dim=embed_dim, beta=1.0, kmeans_init=quantize_kmeans_init, decay=decay, ) self.patch_size = encoder_config['patch_size'] self.token_shape = (encoder_config['img_size'] // self.patch_size, encoder_config['img_size'] // self.patch_size) ## Teacher model setting self.teacher_model_type = teacher_model_type self.decoder_out_dim = decoder_out_dim if self.teacher_model_type == 'clip': self.scaling_layer = ScalingLayerForClip() self.teacher_model, _ = clip.load("ViT-B/16", device='cpu', jit=False) self.decoder_out_dim = 512 elif self.teacher_model_type == 'dino': self.scaling_layer = ScalingLayerForIM() self.teacher_model = get_dino_vit_base() self.decoder_out_dim = 768 else: self.teacher_model = None if self.teacher_model is not None: for param in self.teacher_model.parameters(): param.requires_grad = False # fix teacher_model model self.teacher_model.eval() self.teacher_input_size = kwargs.get('teacher_input_size', 224) # task layer self.encode_task_layer = nn.Sequential( nn.Linear(encoder_config['embed_dim'], encoder_config['embed_dim']), nn.Tanh(), nn.Linear(encoder_config['embed_dim'], embed_dim) # for quantize ) self.decode_task_layer = nn.Sequential( nn.Linear(decoder_config['embed_dim'], decoder_config['embed_dim']), nn.Tanh(), nn.Linear(decoder_config['embed_dim'], self.decoder_out_dim), ) self.rec_loss_type = rec_loss_type print(f"process type for VQKD: {process_type}") self.process_type = process_type # in ['default', 'dall-e'] self.logit_laplace_eps = 0.1 self.kwargs = kwargs self.encode_task_layer.apply(self._init_weights) self.decode_task_layer.apply(self._init_weights) def _init_weights(self, m): if isinstance(m, nn.Linear): trunc_normal_(m.weight, std=.02) if isinstance(m, nn.Linear) and m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.LayerNorm): nn.init.constant_(m.bias, 0) nn.init.constant_(m.weight, 1.0) def no_weight_decay(self): return {'quantize.embedding.weight', 'decoder.cls_token', 'decoder.pos_embed', 'encoder.cls_token', 'encoder.pos_embed'} def device(self): return self.decoder.cls_token.device def pre_process(self, data): if self.process_type == 'default': # TODO: modify for adapt data = data.to(self.device) if data.max() <= 1.: data = data * 255. data = data / 127.5 - 1.0 elif self.process_type == 'imagenet_norm': mean = torch.as_tensor(IMAGENET_DEFAULT_MEAN).to(self.device)[None, :, None, None] std = torch.as_tensor(IMAGENET_DEFAULT_STD).to(self.device)[None, :, None, None] data = (data - mean) / std return data def get_number_of_tokens(self): return self.quantize.n_e def get_tokens(self, data, **kwargs): data = self.pre_process(data) quantize, embed_ind, loss = self.encode(data) output = {} output['token'] = embed_ind.view(data.shape[0], -1) output['input_img'] = data return output def encode(self, x): encoder_features = self.encoder(x, return_patch_tokens=True) with torch.cuda.amp.autocast(enabled=False): to_quantizer_features = self.encode_task_layer(encoder_features.type_as(self.encode_task_layer[-1].weight)) N = to_quantizer_features.shape[1] h, w = int(math.sqrt(N)), int(math.sqrt(N)) to_quantizer_features = rearrange(to_quantizer_features, 'b (h w) c -> b c h w', h=h, w=w) # reshape for quantizer quantize, loss, embed_ind = self.quantize(to_quantizer_features) return quantize, embed_ind, loss def decode(self, quantize, **kwargs): # reshape tokens to feature maps for patch embed in decoder # quantize = rearrange(quantize, 'b (h w) c -> b c h w', h=self.token_shape[0], w=self.token_shape[1]) decoder_features = self.decoder(quantize, return_patch_tokens=True) rec = self.decode_task_layer(decoder_features) return rec def get_codebook_indices(self, x, **kwargs): # for beit pre-training return self.get_tokens(x, **kwargs)['token'] def get_regress_target(self, x, **kwargs): norm_imgs = self.scaling_layer(x) if self.teacher_model_type == 'clip': target = self.teacher_model.encode_image(norm_imgs, return_all_tokens=True) @ self.teacher_model.visual.proj elif self.teacher_model_type == 'dino': target = self.teacher_model.forward(norm_imgs, return_patch_tokens=True) else: raise NotImplementedError return target def calculate_rec_loss(self, rec, target): if self.rec_loss_type == 'cosine': target = target / target.norm(dim=-1, keepdim=True) rec = rec / rec.norm(dim=-1, keepdim=True) rec_loss = (1 - (target * rec).sum(-1)).mean() else: raise NotImplementedError return rec_loss def forward(self, x, **kwargs): """ x: shape [B, 3, H, W] in [0, 1] """ x = self.pre_process(x) # rescale to [-1, 1] target = self.get_regress_target(x, **kwargs) quantize, embed_ind, emb_loss = self.encode(x) xrec = self.decode(quantize) rec_loss = self.calculate_rec_loss(xrec, target) loss = emb_loss + rec_loss log = {} split="train" if self.training else "val" log[f'{split}/quant_loss'] = emb_loss.detach().mean() log[f'{split}/rec_loss'] = rec_loss.detach().mean() log[f'{split}/total_loss'] = loss.detach().mean() return loss, log def get_model_default_params(): return dict(img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4., qkv_bias=True, qk_scale=None, drop_rate=0., attn_drop_rate=0., drop_path_rate=0., norm_layer=partial(nn.LayerNorm, eps=1e-6), init_values=0., use_abs_pos_emb=True, use_rel_pos_bias=False, use_shared_rel_pos_bias=False, use_mean_pooling=True, init_scale=0.001) def vqkd_encoder_base_decoder_1x768x12_clip(pretrained=False, pretrained_weight=None, as_tokenzer=False, img_size=224, n_code=8192, code_dim=32, **kwargs): encoder_config, decoder_config = get_model_default_params(), get_model_default_params() # encoder settings encoder_config['img_size'] = img_size encoder_config['num_classes'] = 0 # decoder settings decoder_config['img_size'] = img_size // decoder_config['patch_size'] decoder_config['patch_size'] = 1 decoder_config['in_chans'] = code_dim decoder_config['num_classes'] = 0 decoder_config['depth'] = 1 # teacher settings _ = kwargs.pop("teacher_model_type", "clip") teacher_model_type = 'clip' if not as_tokenzer else 'None' decoder_out_dim = 512 model = VQKD(encoder_config, decoder_config, n_code, code_dim, teacher_model_type=teacher_model_type, decoder_out_dim=decoder_out_dim, **kwargs) if as_tokenzer: assert pretrained assert pretrained_weight is not None if pretrained_weight.startswith('https'): weights = torch.hub.load_state_dict_from_url(pretrained_weight, map_location='cpu', check_hash=True) else: weights = torch.load(pretrained_weight, map_location='cpu') if 'model' in weights: weights = weights['model'] else: weights = weights["state_dict"] keys = list(weights.keys()) for k in keys: if k.startswith("loss") or k.startswith("teacher") or k.startswith("scaling"): del weights[k] model.load_state_dict(weights) return model
null
184,627
import torch import numpy as np from torch import nn, einsum import torch.nn.functional as F import math from collections import OrderedDict from functools import partial, reduce from einops import rearrange from timm.models.layers import trunc_normal_ from timm.data.constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.models.registry import register_model from modeling_finetune import VisionTransformer from norm_ema_quantizer import NormEMAVectorQuantizer import utils from vqkd_teacher import clip, get_dino_vit_base class VQKD(nn.Module): def __init__(self, encoder_config, decoder_config, n_embed=8192, embed_dim=32, decay=0.99, process_type='default', quantize_kmeans_init=True, teacher_model_type='clip', decoder_out_dim=512, rec_loss_type='cosine', **kwargs ): def _init_weights(self, m): def no_weight_decay(self): def device(self): def pre_process(self, data): def get_number_of_tokens(self): def get_tokens(self, data, **kwargs): def encode(self, x): def decode(self, quantize, **kwargs): def get_codebook_indices(self, x, **kwargs): def get_regress_target(self, x, **kwargs): def calculate_rec_loss(self, rec, target): def forward(self, x, **kwargs): def get_model_default_params(): def vqkd_encoder_base_decoder_3x768x12_clip(pretrained=False, pretrained_weight=None, as_tokenzer=False, img_size=224, n_code=8192, code_dim=32, **kwargs): encoder_config, decoder_config = get_model_default_params(), get_model_default_params() # encoder settings encoder_config['img_size'] = img_size encoder_config['num_classes'] = 0 # decoder settings decoder_config['img_size'] = img_size // decoder_config['patch_size'] decoder_config['patch_size'] = 1 decoder_config['in_chans'] = code_dim decoder_config['num_classes'] = 0 decoder_config['depth'] = 3 # teacher settings _ = kwargs.pop("teacher_model_type", "clip") teacher_model_type = 'clip' if not as_tokenzer else 'None' decoder_out_dim = 512 model = VQKD(encoder_config, decoder_config, n_code, code_dim, teacher_model_type=teacher_model_type, decoder_out_dim=decoder_out_dim, **kwargs) if as_tokenzer: assert pretrained assert pretrained_weight is not None if pretrained_weight.startswith('https'): weights = torch.hub.load_state_dict_from_url(pretrained_weight, map_location='cpu', check_hash=True) else: weights = torch.load(pretrained_weight, map_location='cpu') if 'model' in weights: weights = weights['model'] else: weights = weights["state_dict"] keys = list(weights.keys()) for k in keys: if k.startswith("loss") or k.startswith("teacher") or k.startswith("scaling"): del weights[k] model.load_state_dict(weights) return model
null
184,628
import torch import numpy as np from torch import nn, einsum import torch.nn.functional as F import math from collections import OrderedDict from functools import partial, reduce from einops import rearrange from timm.models.layers import trunc_normal_ from timm.data.constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.models.registry import register_model from modeling_finetune import VisionTransformer from norm_ema_quantizer import NormEMAVectorQuantizer import utils from vqkd_teacher import clip, get_dino_vit_base class VQKD(nn.Module): def __init__(self, encoder_config, decoder_config, n_embed=8192, embed_dim=32, decay=0.99, process_type='default', quantize_kmeans_init=True, teacher_model_type='clip', decoder_out_dim=512, rec_loss_type='cosine', **kwargs ): super().__init__() print(kwargs) if decoder_config['in_chans'] != embed_dim: print(f"Rewrite the in_chans in decoder from {decoder_config['in_chans']} to {embed_dim}") decoder_config['in_chans'] = embed_dim # encoder & decode params print('Final encoder config', encoder_config) self.encoder = VisionTransformer(**encoder_config) print('Final decoder config', decoder_config) self.decoder = VisionTransformer(**decoder_config) self.quantize = NormEMAVectorQuantizer( n_embed=n_embed, embedding_dim=embed_dim, beta=1.0, kmeans_init=quantize_kmeans_init, decay=decay, ) self.patch_size = encoder_config['patch_size'] self.token_shape = (encoder_config['img_size'] // self.patch_size, encoder_config['img_size'] // self.patch_size) ## Teacher model setting self.teacher_model_type = teacher_model_type self.decoder_out_dim = decoder_out_dim if self.teacher_model_type == 'clip': self.scaling_layer = ScalingLayerForClip() self.teacher_model, _ = clip.load("ViT-B/16", device='cpu', jit=False) self.decoder_out_dim = 512 elif self.teacher_model_type == 'dino': self.scaling_layer = ScalingLayerForIM() self.teacher_model = get_dino_vit_base() self.decoder_out_dim = 768 else: self.teacher_model = None if self.teacher_model is not None: for param in self.teacher_model.parameters(): param.requires_grad = False # fix teacher_model model self.teacher_model.eval() self.teacher_input_size = kwargs.get('teacher_input_size', 224) # task layer self.encode_task_layer = nn.Sequential( nn.Linear(encoder_config['embed_dim'], encoder_config['embed_dim']), nn.Tanh(), nn.Linear(encoder_config['embed_dim'], embed_dim) # for quantize ) self.decode_task_layer = nn.Sequential( nn.Linear(decoder_config['embed_dim'], decoder_config['embed_dim']), nn.Tanh(), nn.Linear(decoder_config['embed_dim'], self.decoder_out_dim), ) self.rec_loss_type = rec_loss_type print(f"process type for VQKD: {process_type}") self.process_type = process_type # in ['default', 'dall-e'] self.logit_laplace_eps = 0.1 self.kwargs = kwargs self.encode_task_layer.apply(self._init_weights) self.decode_task_layer.apply(self._init_weights) def _init_weights(self, m): if isinstance(m, nn.Linear): trunc_normal_(m.weight, std=.02) if isinstance(m, nn.Linear) and m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.LayerNorm): nn.init.constant_(m.bias, 0) nn.init.constant_(m.weight, 1.0) def no_weight_decay(self): return {'quantize.embedding.weight', 'decoder.cls_token', 'decoder.pos_embed', 'encoder.cls_token', 'encoder.pos_embed'} def device(self): return self.decoder.cls_token.device def pre_process(self, data): if self.process_type == 'default': # TODO: modify for adapt data = data.to(self.device) if data.max() <= 1.: data = data * 255. data = data / 127.5 - 1.0 elif self.process_type == 'imagenet_norm': mean = torch.as_tensor(IMAGENET_DEFAULT_MEAN).to(self.device)[None, :, None, None] std = torch.as_tensor(IMAGENET_DEFAULT_STD).to(self.device)[None, :, None, None] data = (data - mean) / std return data def get_number_of_tokens(self): return self.quantize.n_e def get_tokens(self, data, **kwargs): data = self.pre_process(data) quantize, embed_ind, loss = self.encode(data) output = {} output['token'] = embed_ind.view(data.shape[0], -1) output['input_img'] = data return output def encode(self, x): encoder_features = self.encoder(x, return_patch_tokens=True) with torch.cuda.amp.autocast(enabled=False): to_quantizer_features = self.encode_task_layer(encoder_features.type_as(self.encode_task_layer[-1].weight)) N = to_quantizer_features.shape[1] h, w = int(math.sqrt(N)), int(math.sqrt(N)) to_quantizer_features = rearrange(to_quantizer_features, 'b (h w) c -> b c h w', h=h, w=w) # reshape for quantizer quantize, loss, embed_ind = self.quantize(to_quantizer_features) return quantize, embed_ind, loss def decode(self, quantize, **kwargs): # reshape tokens to feature maps for patch embed in decoder # quantize = rearrange(quantize, 'b (h w) c -> b c h w', h=self.token_shape[0], w=self.token_shape[1]) decoder_features = self.decoder(quantize, return_patch_tokens=True) rec = self.decode_task_layer(decoder_features) return rec def get_codebook_indices(self, x, **kwargs): # for beit pre-training return self.get_tokens(x, **kwargs)['token'] def get_regress_target(self, x, **kwargs): norm_imgs = self.scaling_layer(x) if self.teacher_model_type == 'clip': target = self.teacher_model.encode_image(norm_imgs, return_all_tokens=True) @ self.teacher_model.visual.proj elif self.teacher_model_type == 'dino': target = self.teacher_model.forward(norm_imgs, return_patch_tokens=True) else: raise NotImplementedError return target def calculate_rec_loss(self, rec, target): if self.rec_loss_type == 'cosine': target = target / target.norm(dim=-1, keepdim=True) rec = rec / rec.norm(dim=-1, keepdim=True) rec_loss = (1 - (target * rec).sum(-1)).mean() else: raise NotImplementedError return rec_loss def forward(self, x, **kwargs): """ x: shape [B, 3, H, W] in [0, 1] """ x = self.pre_process(x) # rescale to [-1, 1] target = self.get_regress_target(x, **kwargs) quantize, embed_ind, emb_loss = self.encode(x) xrec = self.decode(quantize) rec_loss = self.calculate_rec_loss(xrec, target) loss = emb_loss + rec_loss log = {} split="train" if self.training else "val" log[f'{split}/quant_loss'] = emb_loss.detach().mean() log[f'{split}/rec_loss'] = rec_loss.detach().mean() log[f'{split}/total_loss'] = loss.detach().mean() return loss, log def get_model_default_params(): return dict(img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4., qkv_bias=True, qk_scale=None, drop_rate=0., attn_drop_rate=0., drop_path_rate=0., norm_layer=partial(nn.LayerNorm, eps=1e-6), init_values=0., use_abs_pos_emb=True, use_rel_pos_bias=False, use_shared_rel_pos_bias=False, use_mean_pooling=True, init_scale=0.001) def vqkd_encoder_base_decoder_1x768x12_dino(pretrained=False, pretrained_weight=None, as_tokenzer=False, img_size=224, n_code=8192, code_dim=32, **kwargs): encoder_config, decoder_config = get_model_default_params(), get_model_default_params() # encoder settings encoder_config['img_size'] = img_size encoder_config['num_classes'] = 0 # decoder settings decoder_config['img_size'] = img_size // decoder_config['patch_size'] decoder_config['patch_size'] = 1 decoder_config['in_chans'] = code_dim decoder_config['num_classes'] = 0 decoder_config['depth'] = 1 # teacher settings _ = kwargs.pop("teacher_model_type", "dino") teacher_model_type = 'dino' if not as_tokenzer else 'None' decoder_out_dim = 768 model = VQKD(encoder_config, decoder_config, n_code, code_dim, teacher_model_type=teacher_model_type, decoder_out_dim=decoder_out_dim, **kwargs) if as_tokenzer: assert pretrained assert pretrained_weight is not None if pretrained_weight.startswith('https'): weights = torch.hub.load_state_dict_from_url(pretrained_weight, map_location='cpu', check_hash=True) else: weights = torch.load(pretrained_weight, map_location='cpu') if 'model' in weights: weights = weights['model'] else: weights = weights["state_dict"] keys = list(weights.keys()) for k in keys: if k.startswith("loss") or k.startswith("teacher") or k.startswith("scaling"): del weights[k] model.load_state_dict(weights) return model
null
184,629
import logging import os import sys from dataclasses import dataclass, field from typing import Optional import numpy as np from datasets import ClassLabel, load_dataset, load_metric import layoutlmft.data.datasets.funsd import transformers from layoutlmft.data import DataCollatorForKeyValueExtraction from layoutlmft.data.data_args import DataTrainingArguments from layoutlmft.models.model_args import ModelArguments from layoutlmft.trainers import FunsdTrainer as Trainer from transformers import ( AutoConfig, AutoModelForTokenClassification, AutoTokenizer, HfArgumentParser, PreTrainedTokenizerFast, TrainingArguments, set_seed, RobertaConfig ) import torch from model import Layoutlmv1ForTokenClassification from transformers.trainer_utils import get_last_checkpoint, is_main_process from transformers.utils import check_min_version def main(): def _mp_fn(index): # For xla_spawn (TPUs) main()
null
184,630
import torch from detectron2.data.detection_utils import read_image from detectron2.data.transforms import ResizeTransform, TransformList def normalize_bbox(bbox, size): return [ int(1000 * bbox[0] / size[0]), int(1000 * bbox[1] / size[1]), int(1000 * bbox[2] / size[0]), int(1000 * bbox[3] / size[1]), ]
null
184,631
import torch from detectron2.data.detection_utils import read_image from detectron2.data.transforms import ResizeTransform, TransformList def simplify_bbox(bbox): return [ min(bbox[0::2]), min(bbox[1::2]), max(bbox[2::2]), max(bbox[3::2]), ]
null
184,632
import torch from detectron2.data.detection_utils import read_image from detectron2.data.transforms import ResizeTransform, TransformList def merge_bbox(bbox_list): x0, y0, x1, y1 = list(zip(*bbox_list)) return [min(x0), min(y0), max(x1), max(y1)]
null
184,633
import torch from detectron2.data.detection_utils import read_image from detectron2.data.transforms import ResizeTransform, TransformList def load_image(image_path): image = read_image(image_path, format="BGR") h = image.shape[0] w = image.shape[1] img_trans = TransformList([ResizeTransform(h=h, w=w, new_h=224, new_w=224)]) image = torch.tensor(img_trans.apply_image(image).copy()).permute(2, 0, 1) # copy to make it writeable return image, (w, h)
null
184,634
import os import re import numpy as np from transformers.utils import logging _re_checkpoint = re.compile(r"^" + PREFIX_CHECKPOINT_DIR + r"\-(\d+)$") def get_last_checkpoint(folder): content = os.listdir(folder) checkpoints = [ path for path in content if _re_checkpoint.search(path) is not None and os.path.isdir(os.path.join(folder, path)) ] if len(checkpoints) == 0: return return os.path.join(folder, max(checkpoints, key=lambda x: int(_re_checkpoint.search(x).groups()[0])))
null
184,635
import os import re import numpy as np from transformers.utils import logging logger = logging.get_logger(__name__) The provided code snippet includes necessary dependencies for implementing the `re_score` function. Write a Python function `def re_score(pred_relations, gt_relations, mode="strict")` to solve the following problem: Evaluate RE predictions Args: pred_relations (list) : list of list of predicted relations (several relations in each sentence) gt_relations (list) : list of list of ground truth relations rel = { "head": (start_idx (inclusive), end_idx (exclusive)), "tail": (start_idx (inclusive), end_idx (exclusive)), "head_type": ent_type, "tail_type": ent_type, "type": rel_type} vocab (Vocab) : dataset vocabulary mode (str) : in 'strict' or 'boundaries Here is the function: def re_score(pred_relations, gt_relations, mode="strict"): """Evaluate RE predictions Args: pred_relations (list) : list of list of predicted relations (several relations in each sentence) gt_relations (list) : list of list of ground truth relations rel = { "head": (start_idx (inclusive), end_idx (exclusive)), "tail": (start_idx (inclusive), end_idx (exclusive)), "head_type": ent_type, "tail_type": ent_type, "type": rel_type} vocab (Vocab) : dataset vocabulary mode (str) : in 'strict' or 'boundaries'""" assert mode in ["strict", "boundaries"] relation_types = [v for v in [0, 1] if not v == 0] scores = {rel: {"tp": 0, "fp": 0, "fn": 0} for rel in relation_types + ["ALL"]} # Count GT relations and Predicted relations n_sents = len(gt_relations) n_rels = sum([len([rel for rel in sent]) for sent in gt_relations]) n_found = sum([len([rel for rel in sent]) for sent in pred_relations]) # Count TP, FP and FN per type for pred_sent, gt_sent in zip(pred_relations, gt_relations): for rel_type in relation_types: # strict mode takes argument types into account if mode == "strict": pred_rels = { (rel["head"], rel["head_type"], rel["tail"], rel["tail_type"]) for rel in pred_sent if rel["type"] == rel_type } gt_rels = { (rel["head"], rel["head_type"], rel["tail"], rel["tail_type"]) for rel in gt_sent if rel["type"] == rel_type } # boundaries mode only takes argument spans into account elif mode == "boundaries": pred_rels = {(rel["head"], rel["tail"]) for rel in pred_sent if rel["type"] == rel_type} gt_rels = {(rel["head"], rel["tail"]) for rel in gt_sent if rel["type"] == rel_type} scores[rel_type]["tp"] += len(pred_rels & gt_rels) scores[rel_type]["fp"] += len(pred_rels - gt_rels) scores[rel_type]["fn"] += len(gt_rels - pred_rels) # Compute per entity Precision / Recall / F1 for rel_type in scores.keys(): if scores[rel_type]["tp"]: scores[rel_type]["p"] = scores[rel_type]["tp"] / (scores[rel_type]["fp"] + scores[rel_type]["tp"]) scores[rel_type]["r"] = scores[rel_type]["tp"] / (scores[rel_type]["fn"] + scores[rel_type]["tp"]) else: scores[rel_type]["p"], scores[rel_type]["r"] = 0, 0 if not scores[rel_type]["p"] + scores[rel_type]["r"] == 0: scores[rel_type]["f1"] = ( 2 * scores[rel_type]["p"] * scores[rel_type]["r"] / (scores[rel_type]["p"] + scores[rel_type]["r"]) ) else: scores[rel_type]["f1"] = 0 # Compute micro F1 Scores tp = sum([scores[rel_type]["tp"] for rel_type in relation_types]) fp = sum([scores[rel_type]["fp"] for rel_type in relation_types]) fn = sum([scores[rel_type]["fn"] for rel_type in relation_types]) if tp: precision = tp / (tp + fp) recall = tp / (tp + fn) f1 = 2 * precision * recall / (precision + recall) else: precision, recall, f1 = 0, 0, 0 scores["ALL"]["p"] = precision scores["ALL"]["r"] = recall scores["ALL"]["f1"] = f1 scores["ALL"]["tp"] = tp scores["ALL"]["fp"] = fp scores["ALL"]["fn"] = fn # Compute Macro F1 Scores scores["ALL"]["Macro_f1"] = np.mean([scores[ent_type]["f1"] for ent_type in relation_types]) scores["ALL"]["Macro_p"] = np.mean([scores[ent_type]["p"] for ent_type in relation_types]) scores["ALL"]["Macro_r"] = np.mean([scores[ent_type]["r"] for ent_type in relation_types]) logger.info(f"RE Evaluation in *** {mode.upper()} *** mode") logger.info( "processed {} sentences with {} relations; found: {} relations; correct: {}.".format( n_sents, n_rels, n_found, tp ) ) logger.info( "\tALL\t TP: {};\tFP: {};\tFN: {}".format(scores["ALL"]["tp"], scores["ALL"]["fp"], scores["ALL"]["fn"]) ) logger.info("\t\t(m avg): precision: {:.2f};\trecall: {:.2f};\tf1: {:.2f} (micro)".format(precision, recall, f1)) logger.info( "\t\t(M avg): precision: {:.2f};\trecall: {:.2f};\tf1: {:.2f} (Macro)\n".format( scores["ALL"]["Macro_p"], scores["ALL"]["Macro_r"], scores["ALL"]["Macro_f1"] ) ) for rel_type in relation_types: logger.info( "\t{}: \tTP: {};\tFP: {};\tFN: {};\tprecision: {:.2f};\trecall: {:.2f};\tf1: {:.2f};\t{}".format( rel_type, scores[rel_type]["tp"], scores[rel_type]["fp"], scores[rel_type]["fn"], scores[rel_type]["p"], scores[rel_type]["r"], scores[rel_type]["f1"], scores[rel_type]["tp"] + scores[rel_type]["fp"], ) ) return scores
Evaluate RE predictions Args: pred_relations (list) : list of list of predicted relations (several relations in each sentence) gt_relations (list) : list of list of ground truth relations rel = { "head": (start_idx (inclusive), end_idx (exclusive)), "tail": (start_idx (inclusive), end_idx (exclusive)), "head_type": ent_type, "tail_type": ent_type, "type": rel_type} vocab (Vocab) : dataset vocabulary mode (str) : in 'strict' or 'boundaries
184,636
import math import torch import torch.nn.functional as F import torch.utils.checkpoint from torch import nn from torch.nn import CrossEntropyLoss import detectron2 from detectron2.modeling import META_ARCH_REGISTRY from transformers import PreTrainedModel from transformers.modeling_outputs import ( BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, TokenClassifierOutput, ) from transformers.modeling_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer from transformers.models.layoutlm.modeling_layoutlm import LayoutLMIntermediate as LayoutLMv2Intermediate from transformers.models.layoutlm.modeling_layoutlm import LayoutLMOutput as LayoutLMv2Output from transformers.models.layoutlm.modeling_layoutlm import LayoutLMPooler as LayoutLMv2Pooler from transformers.models.layoutlm.modeling_layoutlm import LayoutLMSelfOutput as LayoutLMv2SelfOutput from transformers.utils import logging from ...modules.decoders.re import REDecoder from ...utils import ReOutput from .configuration_layoutlmv2 import LayoutLMv2Config from .detectron2_config import add_layoutlmv2_config def relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128): ret = 0 if bidirectional: num_buckets //= 2 ret += (relative_position > 0).long() * num_buckets n = torch.abs(relative_position) else: n = torch.max(-relative_position, torch.zeros_like(relative_position)) # now n is in the range [0, inf) # half of the buckets are for exact increments in positions max_exact = num_buckets // 2 is_small = n < max_exact # The other half of the buckets are for logarithmically bigger bins in positions up to max_distance val_if_large = max_exact + ( torch.log(n.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact) ).to(torch.long) val_if_large = torch.min(val_if_large, torch.full_like(val_if_large, num_buckets - 1)) ret += torch.where(is_small, n, val_if_large) return ret
null
184,637
import math import torch import torch.nn.functional as F import torch.utils.checkpoint from torch import nn from torch.nn import CrossEntropyLoss import detectron2 from detectron2.modeling import META_ARCH_REGISTRY from transformers import PreTrainedModel from transformers.modeling_outputs import ( BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, TokenClassifierOutput, ) from transformers.modeling_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer from transformers.models.layoutlm.modeling_layoutlm import LayoutLMIntermediate as LayoutLMv2Intermediate from transformers.models.layoutlm.modeling_layoutlm import LayoutLMOutput as LayoutLMv2Output from transformers.models.layoutlm.modeling_layoutlm import LayoutLMPooler as LayoutLMv2Pooler from transformers.models.layoutlm.modeling_layoutlm import LayoutLMSelfOutput as LayoutLMv2SelfOutput from transformers.utils import logging from ...modules.decoders.re import REDecoder from ...utils import ReOutput from .configuration_layoutlmv2 import LayoutLMv2Config from .detectron2_config import add_layoutlmv2_config def my_convert_sync_batchnorm(module, process_group=None): # same as `nn.modules.SyncBatchNorm.convert_sync_batchnorm` but allowing converting from `detectron2.layers.FrozenBatchNorm2d` if isinstance(module, torch.nn.modules.batchnorm._BatchNorm): return nn.modules.SyncBatchNorm.convert_sync_batchnorm(module, process_group) module_output = module if isinstance(module, detectron2.layers.FrozenBatchNorm2d): module_output = torch.nn.SyncBatchNorm( num_features=module.num_features, eps=module.eps, affine=True, track_running_stats=True, process_group=process_group, ) module_output.weight = torch.nn.Parameter(module.weight) module_output.bias = torch.nn.Parameter(module.bias) module_output.running_mean = module.running_mean module_output.running_var = module.running_var module_output.num_batches_tracked = torch.tensor(0, dtype=torch.long, device=module.running_mean.device) for name, child in module.named_children(): module_output.add_module(name, my_convert_sync_batchnorm(child, process_group)) del module return module_output
null
184,638
def add_layoutlmv2_config(cfg): _C = cfg # ----------------------------------------------------------------------------- # Config definition # ----------------------------------------------------------------------------- _C.MODEL.MASK_ON = True # When using pre-trained models in Detectron1 or any MSRA models, # std has been absorbed into its conv1 weights, so the std needs to be set 1. # Otherwise, you can use [57.375, 57.120, 58.395] (ImageNet std) _C.MODEL.PIXEL_STD = [57.375, 57.120, 58.395] # ---------------------------------------------------------------------------- # # Backbone options # ---------------------------------------------------------------------------- # _C.MODEL.BACKBONE.NAME = "build_resnet_fpn_backbone" # ---------------------------------------------------------------------------- # # FPN options # ---------------------------------------------------------------------------- # # Names of the input feature maps to be used by FPN # They must have contiguous power of 2 strides # e.g., ["res2", "res3", "res4", "res5"] _C.MODEL.FPN.IN_FEATURES = ["res2", "res3", "res4", "res5"] # ---------------------------------------------------------------------------- # # Anchor generator options # ---------------------------------------------------------------------------- # # Anchor sizes (i.e. sqrt of area) in absolute pixels w.r.t. the network input. # Format: list[list[float]]. SIZES[i] specifies the list of sizes # to use for IN_FEATURES[i]; len(SIZES) == len(IN_FEATURES) must be true, # or len(SIZES) == 1 is true and size list SIZES[0] is used for all # IN_FEATURES. _C.MODEL.ANCHOR_GENERATOR.SIZES = [[32], [64], [128], [256], [512]] # ---------------------------------------------------------------------------- # # RPN options # ---------------------------------------------------------------------------- # # Names of the input feature maps to be used by RPN # e.g., ["p2", "p3", "p4", "p5", "p6"] for FPN _C.MODEL.RPN.IN_FEATURES = ["p2", "p3", "p4", "p5", "p6"] # Number of top scoring RPN proposals to keep before applying NMS # When FPN is used, this is *per FPN level* (not total) _C.MODEL.RPN.PRE_NMS_TOPK_TRAIN = 2000 _C.MODEL.RPN.PRE_NMS_TOPK_TEST = 1000 # Number of top scoring RPN proposals to keep after applying NMS # When FPN is used, this limit is applied per level and then again to the union # of proposals from all levels # NOTE: When FPN is used, the meaning of this config is different from Detectron1. # It means per-batch topk in Detectron1, but per-image topk here. # See the "find_top_rpn_proposals" function for details. _C.MODEL.RPN.POST_NMS_TOPK_TRAIN = 1000 _C.MODEL.RPN.POST_NMS_TOPK_TEST = 1000 # ---------------------------------------------------------------------------- # # ROI HEADS options # ---------------------------------------------------------------------------- # _C.MODEL.ROI_HEADS.NAME = "StandardROIHeads" # Number of foreground classes _C.MODEL.ROI_HEADS.NUM_CLASSES = 5 # Names of the input feature maps to be used by ROI heads # Currently all heads (box, mask, ...) use the same input feature map list # e.g., ["p2", "p3", "p4", "p5"] is commonly used for FPN _C.MODEL.ROI_HEADS.IN_FEATURES = ["p2", "p3", "p4", "p5"] # ---------------------------------------------------------------------------- # # Box Head # ---------------------------------------------------------------------------- # # C4 don't use head name option # Options for non-C4 models: FastRCNNConvFCHead, _C.MODEL.ROI_BOX_HEAD.NAME = "FastRCNNConvFCHead" _C.MODEL.ROI_BOX_HEAD.NUM_FC = 2 _C.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION = 14 # ---------------------------------------------------------------------------- # # Mask Head # ---------------------------------------------------------------------------- # _C.MODEL.ROI_MASK_HEAD.NAME = "MaskRCNNConvUpsampleHead" _C.MODEL.ROI_MASK_HEAD.NUM_CONV = 4 # The number of convs in the mask head _C.MODEL.ROI_MASK_HEAD.POOLER_RESOLUTION = 7 # ---------------------------------------------------------------------------- # # ResNe[X]t options (ResNets = {ResNet, ResNeXt} # Note that parts of a resnet may be used for both the backbone and the head # These options apply to both # ---------------------------------------------------------------------------- # _C.MODEL.RESNETS.DEPTH = 101 _C.MODEL.RESNETS.SIZES = [[32], [64], [128], [256], [512]] _C.MODEL.RESNETS.ASPECT_RATIOS = [[0.5, 1.0, 2.0]] _C.MODEL.RESNETS.OUT_FEATURES = ["res2", "res3", "res4", "res5"] # res4 for C4 backbone, res2..5 for FPN backbone # Number of groups to use; 1 ==> ResNet; > 1 ==> ResNeXt _C.MODEL.RESNETS.NUM_GROUPS = 32 # Baseline width of each group. # Scaling this parameters will scale the width of all bottleneck layers. _C.MODEL.RESNETS.WIDTH_PER_GROUP = 8 # Place the stride 2 conv on the 1x1 filter # Use True only for the original MSRA ResNet; use False for C2 and Torch models _C.MODEL.RESNETS.STRIDE_IN_1X1 = False
null
184,639
import random import numpy as np import torch import os import shutil import sys The provided code snippet includes necessary dependencies for implementing the `set_seed` function. Write a Python function `def set_seed(args)` to solve the following problem: Set seed for reproducibility Here is the function: def set_seed(args): ''' Set seed for reproducibility ''' random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) # if args.n_gpu > 0: # torch.cuda.manual_seed_all(args.seed)
Set seed for reproducibility
184,640
import random import numpy as np import torch import os import shutil import sys The provided code snippet includes necessary dependencies for implementing the `set_exp_folder` function. Write a Python function `def set_exp_folder(args)` to solve the following problem: Create a folder to store experimental results e.g., checkpoints or log Here is the function: def set_exp_folder(args): ''' Create a folder to store experimental results e.g., checkpoints or log ''' os.makedirs(os.path.join(args.output_dir, 'output'), exist_ok=True) if os.path.exists(os.path.join('output', args.exp_name)): if not args.overwrite_output_dir: assert False, 'The exp_name is already used. Please modify the experiment name or use --overwrite_output_dir' else: print('Remove original directories.') shutil.rmtree(os.path.join('output', args.exp_name)) print('Remove successfully.') os.makedirs(os.path.join(args.output_dir, 'output', args.exp_name), exist_ok=True) exp_path = os.path.join(args.output_dir, 'output', args.exp_name) print(f'Path [{exp_path}] has been created')
Create a folder to store experimental results e.g., checkpoints or log
184,641
import random import numpy as np import torch import os import shutil import sys The provided code snippet includes necessary dependencies for implementing the `check_screen` function. Write a Python function `def check_screen()` to solve the following problem: Check whether the experiment is in screen Here is the function: def check_screen(): ''' Check whether the experiment is in screen ''' text = os.popen('echo $STY').readlines() string = '' for line in text: string += line if len(string.strip()) == 0: print("**** Attention Please! The code is not executed in Screen! ****") else: print(f'**** Screen Name : {string} ****')
Check whether the experiment is in screen
184,642
from genericpath import exists import os import torch.nn as nn import torch import logging from tqdm import tqdm, trange import timeit import collections import json import math from bs4 import BeautifulSoup from copy import deepcopy import string import re from torch.utils.tensorboard import SummaryWriter from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler) from transformers import ( BasicTokenizer, ) from transformers import ( AdamW, get_linear_schedule_with_warmup, ) def reorganize_batch_web(args, batch_web): dic = {} dic['input_ids'] = batch_web[0].cuda() dic['attention_mask'] = batch_web[1].cuda() dic['token_type_ids'] = batch_web[2].cuda() dic['xpath_tags_seq'] = batch_web[3].cuda() dic['xpath_subs_seq'] = batch_web[4].cuda() dic['start_positions'] = batch_web[5].cuda() dic['end_positions'] = batch_web[6].cuda() if 'box' in args.embedding_mode: dic['bbox'] = batch_web[7].cuda() # new added dic['embedding_mode'] = args.embedding_mode return dic def evaluate(args, dataset_web, examples, features, model, tokenizer, step=0): gpu_nums = torch.cuda.device_count() batch = args.batch_per_gpu * gpu_nums eval_sampler = SequentialSampler(dataset_web) eval_dataloader = DataLoader(dataset_web, sampler=eval_sampler, batch_size=batch, num_workers=8) # Eval! logging.info("***** Running evaluation *****") logging.info(" Num examples = %d", len(dataset_web)) logging.info(" Batch size = %d", batch) model = model.cuda() all_results = [] start_time = timeit.default_timer() for batch in tqdm(eval_dataloader, desc="Evaluating"): model.eval() batch = tuple(t.cuda() for t in batch) with torch.no_grad(): inputs = {'input_ids': batch[0], 'attention_mask': batch[1], 'token_type_ids': batch[2], 'xpath_tags_seq': batch[4], 'xpath_subs_seq': batch[5], } feature_indices = batch[3] outputs = model(**inputs) for i, feature_index in enumerate(feature_indices): eval_feature = features[feature_index.item()] unique_id = int(eval_feature.unique_id) result = RawResult(unique_id=unique_id, start_logits=to_list(outputs[0][i]), end_logits=to_list(outputs[1][i])) all_results.append(result) eval_time = timeit.default_timer() - start_time logging.info(" Evaluation done in total %f secs (%f sec per example)", eval_time, eval_time / len(dataset_web)) # Compute predictions # output_dir = os.path.join(args.output_dir, 'output', args.exp_name, f'step-{global_step}') output_prediction_file = os.path.join(args.output_dir,"output", args.exp_name, f"predictions_{step}.json") output_tag_prediction_file = os.path.join(args.output_dir,"output", args.exp_name, f"tag_predictions_{step}.json") output_nbest_file = os.path.join(args.output_dir,"output", args.exp_name, f"nbest_predictions_{step}.json") output_result_file = os.path.join(args.output_dir,"output", args.exp_name, f"qas_eval_results_{step}.json") output_file = os.path.join(args.output_dir,"output", args.exp_name, f"eval_matrix_results_{step}") write_predictions(examples, features, all_results, args.n_best_size, args.max_answer_length, args.do_lower_case, output_prediction_file, output_tag_prediction_file, output_nbest_file, args.verbose_logging, tokenizer) # Evaluate evaluate_options = EvalOpts(data_file=args.web_eval_file, root_dir=args.root_dir, pred_file=output_prediction_file, tag_pred_file=output_tag_prediction_file, result_file=output_result_file, out_file=output_file) results = evaluate_on_squad(evaluate_options) return results def train(args, dataset_web, model, tokenizer): # torch.cuda.set_device(args.local_rank) # Log when executing on clusters try: from azureml.core.run import Run aml_run = Run.get_context() except: aml_run = None # Open tensorboard writer = SummaryWriter(f'{args.output_dir}/output/{args.exp_name}') # Count batch gpu_nums = torch.cuda.device_count() batch = args.batch_per_gpu * gpu_nums dataloader_web = DataLoader( dataset_web, batch_size=batch, num_workers=args.num_workers, pin_memory=False, shuffle=True, ) # Get warmup steps total_step = args.epoch * len(dataloader_web) warmup_steps = int(args.warmup_ratio * total_step) # Prepare optimizers no_decay = ["bias", "LayerNorm.weight"] optimizer_grouped_parameters = [ { "params": [ p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay) ], "weight_decay": args.weight_decay, }, { "params": [ p for n, p in model.named_parameters() if any(nd in n for nd in no_decay) ], "weight_decay": 0.0, }, ] optimizer = AdamW( optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon ) scheduler = get_linear_schedule_with_warmup( optimizer, num_warmup_steps=warmup_steps, num_training_steps=total_step ) # Transfer the parameters to cuda model = model.cuda() # Prepare fp16 if args.fp16: try: from apex import amp except ImportError: raise ImportError( "Please install apex from https://www.github.com/nvidia/apex to use fp16 training." ) model, optimizer = amp.initialize( model, optimizer, opt_level=args.fp16_opt_level ) logging.info('Successfully load fp16 mode') # Parallel or Distribute if gpu_nums > 1: model = torch.nn.DataParallel(model) # Record some training info logging.info("***** Running training *****") # logging.info(" Num examples in dataset_doc = %d", len(dataset_doc)) logging.info(" Num examples in dataset_web = %d", len(dataset_web)) # logging.info(" Num steps for each epoch for doc = %d", len(dataloader_doc)) logging.info(" Num steps for each epoch for web = %d", len(dataloader_web)) logging.info(" Num Epochs = %d", args.epoch) logging.info( " Instantaneous batch size per GPU = %d", args.batch_per_gpu ) logging.info(" Total optimization steps = %d", total_step) # Start training model.zero_grad() train_iterator = trange( 0, int(args.epoch), desc="Epoch", ) global_step = 0 for now_epoch, _ in enumerate(tqdm(train_iterator, desc="Iteration")): # tqdm for epoch # epoch_iterator_doc = iter(dataloader_doc) epoch_iterator_web = iter(dataloader_web) min_step = len(epoch_iterator_web) for now_step in tqdm(range(min_step), desc="Iteration"): # tqdm for step # batch_doc = epoch_iterator_doc.next() batch_web = epoch_iterator_web.next() batch_web = reorganize_batch_web(args, batch_web) model.train() # loss_doc = model(**batch_doc)[0] loss_web = model(**batch_web)[0] loss = loss_web if gpu_nums > 1: loss = loss.mean() # loss_doc = loss_doc.mean() loss_web = loss_web.mean() if args.fp16: with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() else: loss.backward() if args.fp16: torch.nn.utils.clip_grad_norm_( amp.master_params(optimizer), args.max_grad_norm ) else: torch.nn.utils.clip_grad_norm_( model.parameters(), args.max_grad_norm ) if global_step % args.accumulation == 0: optimizer.step() model.zero_grad() scheduler.step() global_step += 1 if global_step % args.log_step == 0: logging.info(f'epoch: {now_epoch} | step: {now_step+1} | total_step: {global_step} | loss: {loss} | lr: {scheduler.get_lr()[0]}') writer.add_scalar('loss', loss, global_step//args.log_step) # writer.add_scalar('loss_doc', loss_doc, global_step//args.log_step) writer.add_scalar('loss_web', loss_web, global_step//args.log_step) writer.add_scalar('lr', scheduler.get_lr()[0], global_step//args.log_step) if aml_run is not None: aml_run.log('loss', loss.item()) # aml_run.log('loss_doc', loss_doc.item()) aml_run.log('loss_web', loss_web.item()) aml_run.log('lr', scheduler.get_lr()[0]) if global_step % args.save_step == 0: # Save model checkpoint output_dir = os.path.join(args.output_dir, 'output', args.exp_name, f'step-{global_step}') os.makedirs(output_dir, exist_ok=True) model_to_save = ( model.module if hasattr(model, "module") else model ) # Take care of distributed/parallel training model_to_save.save_pretrained(output_dir) tokenizer.save_pretrained(output_dir) torch.save(args, os.path.join(output_dir, "training_args.bin")) logging.info("Saving model checkpoint to %s", output_dir) torch.save( optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt") ) torch.save( scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt") ) logging.info( "Saving optimizer and scheduler states to %s", output_dir ) if global_step % 1000 == 0: # eval print('Start eval!') from data.datasets.websrc import get_websrc_dataset dataset_web, examples, features = get_websrc_dataset(args, tokenizer, evaluate=True, output_examples=True) evaluate(args, dataset_web, examples, features, model, tokenizer, global_step)
null
184,643
import os import torch import collections import logging from tqdm import tqdm, trange import json import bs4 from os import path as osp from bs4 import BeautifulSoup as bs from torch.utils.data import Dataset import networkx as nx from lxml import etree import pickle from transformers import BertTokenizer import argparse def get_xpath4tokens(html_fn: str, unique_tids: set): xpath_map = {} tree = etree.parse(html_fn, etree.HTMLParser()) nodes = tree.xpath('//*') for node in nodes: tid = node.attrib.get("tid") if int(tid) in unique_tids: xpath_map[int(tid)] = tree.getpath(node) xpath_map[len(nodes)] = "/html" xpath_map[len(nodes) + 1] = "/html" return xpath_map
null
184,644
import os import torch import collections import logging from tqdm import tqdm, trange import json import bs4 from os import path as osp from bs4 import BeautifulSoup as bs from torch.utils.data import Dataset import networkx as nx from lxml import etree import pickle from transformers import BertTokenizer import argparse def load_and_cache_examples(args, tokenizer, max_depth=50, evaluate=False, output_examples=False): r""" Load and process the raw data. """ if args.local_rank not in [-1, 0] and not evaluate: torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, # and the others will use the cache # Load data features from cache or dataset file input_file = args.web_eval_file if evaluate else args.web_train_file cached_features_file = os.path.join(args.cache_dir, 'cached_{}_{}_{}_{}_{}_{}'.format( 'dev' if evaluate else 'train', "markuplm", str(args.max_seq_length), str(max_depth), args.web_num_features, args.model_type )) if not os.path.exists(os.path.dirname(cached_features_file)): os.makedirs(os.path.dirname(cached_features_file)) if os.path.exists(cached_features_file) and not args.overwrite_cache: print("Loading features from cached file %s", cached_features_file) features = torch.load(cached_features_file) if output_examples: examples, tag_list = read_squad_examples(args, input_file=input_file, root_dir=args.web_root_dir, is_training=not evaluate, tokenizer=tokenizer, simplify=True, max_depth=max_depth ) else: examples = None else: print("Creating features from dataset file at %s", input_file) examples, _ = read_squad_examples(args, input_file=input_file, root_dir=args.web_root_dir, is_training=not evaluate, tokenizer=tokenizer, simplify=False, max_depth=max_depth) features = convert_examples_to_features(examples=examples, tokenizer=tokenizer, max_seq_length=args.max_seq_length, doc_stride=args.doc_stride, max_query_length=args.max_query_length, is_training=not evaluate, cls_token=tokenizer.cls_token, sep_token=tokenizer.sep_token, pad_token=tokenizer.pad_token_id, sequence_a_segment_id=0, sequence_b_segment_id=0, max_depth=max_depth) if args.local_rank in [-1, 0] and args.web_save_features: print("Saving features into cached file %s", cached_features_file) torch.save(features, cached_features_file) if args.local_rank == 0 and not evaluate: torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, # and the others will use the cache # Convert to Tensors and build dataset all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long) all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long) all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long) all_xpath_tags_seq = torch.tensor([f.xpath_tags_seq for f in features], dtype=torch.long) all_xpath_subs_seq = torch.tensor([f.xpath_subs_seq for f in features], dtype=torch.long) all_xpath_box_seq = torch.tensor([f.xpath_box_seq for f in features], dtype=torch.long) if evaluate: all_feature_index = torch.arange(all_input_ids.size(0), dtype=torch.long) dataset = StrucDataset(all_input_ids, all_input_mask, all_segment_ids, all_feature_index, all_xpath_tags_seq, all_xpath_subs_seq, all_xpath_box_seq) else: all_start_positions = torch.tensor([f.start_position for f in features], dtype=torch.long) all_end_positions = torch.tensor([f.end_position for f in features], dtype=torch.long) dataset = StrucDataset(all_input_ids, all_input_mask, all_segment_ids, all_xpath_tags_seq, all_xpath_subs_seq, all_start_positions, all_end_positions, all_xpath_box_seq) if output_examples: dataset = (dataset, examples, features) return dataset def get_websrc_dataset(args, tokenizer, evaluate=False, output_examples=False): if not evaluate: websrc_dataset = load_and_cache_examples(args, tokenizer, evaluate=evaluate, output_examples=False) return websrc_dataset else: dataset, examples, features = load_and_cache_examples(args, tokenizer, evaluate=evaluate, output_examples=True) return dataset, examples, features
null
184,645
import logging import os import sys from dataclasses import dataclass, field from typing import Optional import datasets from datasets import load_dataset, load_metric import transformers from trainer_qa import QuestionAnsweringTrainer from transformers import ( AutoConfig, AutoModelForQuestionAnswering, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, PreTrainedTokenizerFast, TrainingArguments, default_data_collator, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version from transformers.utils.versions import require_version from utils_qa import postprocess_qa_predictions def main(): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout)], ) log_level = training_args.get_process_log_level() logger.setLevel(log_level) datasets.utils.logging.set_verbosity(log_level) transformers.utils.logging.set_verbosity(log_level) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}" ) logger.info(f"Training/evaluation parameters {training_args}") # Detecting last checkpoint. last_checkpoint = None if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: last_checkpoint = get_last_checkpoint(training_args.output_dir) if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. " "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Set seed before initializing model. # set_seed(training_args.seed) set_seed(training_args.seed) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. raw_datasets = load_dataset( data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, ) else: data_files = {} if data_args.train_file is not None: data_files["train"] = data_args.train_file extension = data_args.train_file.split(".")[-1] if data_args.validation_file is not None: data_files["validation"] = data_args.validation_file extension = data_args.validation_file.split(".")[-1] if data_args.test_file is not None: data_files["test"] = data_args.test_file extension = data_args.test_file.split(".")[-1] raw_datasets = load_dataset( extension, data_files=data_files, field="data", cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. config = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) tokenizer = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=True, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) model = AutoModelForQuestionAnswering.from_pretrained( model_args.model_name_or_path, from_tf=bool(".ckpt" in model_args.model_name_or_path), config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) # Tokenizer check: this script requires a fast tokenizer. if not isinstance(tokenizer, PreTrainedTokenizerFast): raise ValueError( "This example script only works for models that have a fast tokenizer. Checkout the big table of models at" " https://huggingface.co/transformers/index.html#supported-frameworks to find the model types that meet" " this requirement" ) # Preprocessing the datasets. # Preprocessing is slighlty different for training and evaluation. if training_args.do_train: column_names = raw_datasets["train"].column_names elif training_args.do_eval: column_names = raw_datasets["validation"].column_names else: column_names = raw_datasets["test"].column_names question_column_name = "question" if "question" in column_names else column_names[0] context_column_name = "context" if "context" in column_names else column_names[1] answer_column_name = "answers" if "answers" in column_names else column_names[2] # Padding side determines if we do (question|context) or (context|question). pad_on_right = tokenizer.padding_side == "right" if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the" f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." ) max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length) # Training preprocessing def prepare_train_features(examples): # Some of the questions have lots of whitespace on the left, which is not useful and will make the # truncation of the context fail (the tokenized question will take a lots of space). So we remove that # left whitespace examples[question_column_name] = [q.lstrip() for q in examples[question_column_name]] # Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results # in one example possible giving several features when a context is long, each of those features having a # context that overlaps a bit the context of the previous feature. tokenized_examples = tokenizer( examples[question_column_name if pad_on_right else context_column_name], examples[context_column_name if pad_on_right else question_column_name], truncation="only_second" if pad_on_right else "only_first", max_length=max_seq_length, stride=data_args.doc_stride, return_overflowing_tokens=True, return_offsets_mapping=True, padding="max_length" if data_args.pad_to_max_length else False, ) # Since one example might give us several features if it has a long context, we need a map from a feature to # its corresponding example. This key gives us just that. sample_mapping = tokenized_examples.pop("overflow_to_sample_mapping") # The offset mappings will give us a map from token to character position in the original context. This will # help us compute the start_positions and end_positions. offset_mapping = tokenized_examples.pop("offset_mapping") # Let's label those examples! tokenized_examples["start_positions"] = [] tokenized_examples["end_positions"] = [] for i, offsets in enumerate(offset_mapping): # We will label impossible answers with the index of the CLS token. input_ids = tokenized_examples["input_ids"][i] cls_index = input_ids.index(tokenizer.cls_token_id) # Grab the sequence corresponding to that example (to know what is the context and what is the question). sequence_ids = tokenized_examples.sequence_ids(i) # One example can give several spans, this is the index of the example containing this span of text. sample_index = sample_mapping[i] answers = examples[answer_column_name][sample_index] # If no answers are given, set the cls_index as answer. if len(answers["answer_start"]) == 0: tokenized_examples["start_positions"].append(cls_index) tokenized_examples["end_positions"].append(cls_index) else: # Start/end character index of the answer in the text. start_char = answers["answer_start"][0] end_char = start_char + len(answers["text"][0]) # Start token index of the current span in the text. token_start_index = 0 while sequence_ids[token_start_index] != (1 if pad_on_right else 0): token_start_index += 1 # End token index of the current span in the text. token_end_index = len(input_ids) - 1 while sequence_ids[token_end_index] != (1 if pad_on_right else 0): token_end_index -= 1 # Detect if the answer is out of the span (in which case this feature is labeled with the CLS index). if not (offsets[token_start_index][0] <= start_char and offsets[token_end_index][1] >= end_char): tokenized_examples["start_positions"].append(cls_index) tokenized_examples["end_positions"].append(cls_index) else: # Otherwise move the token_start_index and token_end_index to the two ends of the answer. # Note: we could go after the last offset if the answer is the last word (edge case). while token_start_index < len(offsets) and offsets[token_start_index][0] <= start_char: token_start_index += 1 tokenized_examples["start_positions"].append(token_start_index - 1) while offsets[token_end_index][1] >= end_char: token_end_index -= 1 tokenized_examples["end_positions"].append(token_end_index + 1) return tokenized_examples if training_args.do_train: if "train" not in raw_datasets: raise ValueError("--do_train requires a train dataset") train_dataset = raw_datasets["train"] if data_args.max_train_samples is not None: # We will select sample from whole data if argument is specified max_train_samples = min(len(train_dataset), data_args.max_train_samples) train_dataset = train_dataset.select(range(max_train_samples)) # Create train feature from dataset with training_args.main_process_first(desc="train dataset map pre-processing"): train_dataset = train_dataset.map( prepare_train_features, batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not data_args.overwrite_cache, desc="Running tokenizer on train dataset", ) if data_args.max_train_samples is not None: # Number of samples might increase during Feature Creation, We select only specified max samples max_train_samples = min(len(train_dataset), data_args.max_train_samples) train_dataset = train_dataset.select(range(max_train_samples)) # Validation preprocessing def prepare_validation_features(examples): # Some of the questions have lots of whitespace on the left, which is not useful and will make the # truncation of the context fail (the tokenized question will take a lots of space). So we remove that # left whitespace examples[question_column_name] = [q.lstrip() for q in examples[question_column_name]] # Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results # in one example possible giving several features when a context is long, each of those features having a # context that overlaps a bit the context of the previous feature. tokenized_examples = tokenizer( examples[question_column_name if pad_on_right else context_column_name], examples[context_column_name if pad_on_right else question_column_name], truncation="only_second" if pad_on_right else "only_first", max_length=max_seq_length, stride=data_args.doc_stride, return_overflowing_tokens=True, return_offsets_mapping=True, padding="max_length" if data_args.pad_to_max_length else False, ) # Since one example might give us several features if it has a long context, we need a map from a feature to # its corresponding example. This key gives us just that. sample_mapping = tokenized_examples.pop("overflow_to_sample_mapping") # For evaluation, we will need to convert our predictions to substrings of the context, so we keep the # corresponding example_id and we will store the offset mappings. tokenized_examples["example_id"] = [] for i in range(len(tokenized_examples["input_ids"])): # Grab the sequence corresponding to that example (to know what is the context and what is the question). sequence_ids = tokenized_examples.sequence_ids(i) context_index = 1 if pad_on_right else 0 # One example can give several spans, this is the index of the example containing this span of text. sample_index = sample_mapping[i] tokenized_examples["example_id"].append(examples["id"][sample_index]) # Set to None the offset_mapping that are not part of the context so it's easy to determine if a token # position is part of the context or not. tokenized_examples["offset_mapping"][i] = [ (o if sequence_ids[k] == context_index else None) for k, o in enumerate(tokenized_examples["offset_mapping"][i]) ] return tokenized_examples if training_args.do_eval: if "validation" not in raw_datasets: raise ValueError("--do_eval requires a validation dataset") eval_examples = raw_datasets["validation"] if data_args.max_eval_samples is not None: # We will select sample from whole data max_eval_samples = min(len(eval_examples), data_args.max_eval_samples) eval_examples = eval_examples.select(range(max_eval_samples)) # Validation Feature Creation with training_args.main_process_first(desc="validation dataset map pre-processing"): eval_dataset = eval_examples.map( prepare_validation_features, batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not data_args.overwrite_cache, desc="Running tokenizer on validation dataset", ) if data_args.max_eval_samples is not None: # During Feature creation dataset samples might increase, we will select required samples again max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples) eval_dataset = eval_dataset.select(range(max_eval_samples)) if training_args.do_predict: if "test" not in raw_datasets: raise ValueError("--do_predict requires a test dataset") predict_examples = raw_datasets["test"] if data_args.max_predict_samples is not None: # We will select sample from whole data predict_examples = predict_examples.select(range(data_args.max_predict_samples)) # Predict Feature Creation with training_args.main_process_first(desc="prediction dataset map pre-processing"): predict_dataset = predict_examples.map( prepare_validation_features, batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not data_args.overwrite_cache, desc="Running tokenizer on prediction dataset", ) if data_args.max_predict_samples is not None: # During Feature creation dataset samples might increase, we will select required samples again max_predict_samples = min(len(predict_dataset), data_args.max_predict_samples) predict_dataset = predict_dataset.select(range(max_predict_samples)) # Data collator # We have already padded to max length if the corresponding flag is True, otherwise we need to pad in the data # collator. data_collator = ( default_data_collator if data_args.pad_to_max_length else DataCollatorWithPadding(tokenizer, pad_to_multiple_of=8 if training_args.fp16 else None) ) # Post-processing: def post_processing_function(examples, features, predictions, stage="eval"): # Post-processing: we match the start logits and end logits to answers in the original context. predictions = postprocess_qa_predictions( examples=examples, features=features, predictions=predictions, version_2_with_negative=data_args.version_2_with_negative, n_best_size=data_args.n_best_size, max_answer_length=data_args.max_answer_length, null_score_diff_threshold=data_args.null_score_diff_threshold, output_dir=training_args.output_dir, log_level=log_level, prefix=stage, ) # Format the result to the format the metric expects. if data_args.version_2_with_negative: formatted_predictions = [ {"id": k, "prediction_text": v, "no_answer_probability": 0.0} for k, v in predictions.items() ] else: formatted_predictions = [{"id": k, "prediction_text": v} for k, v in predictions.items()] references = [{"id": ex["id"], "answers": ex[answer_column_name]} for ex in examples] return EvalPrediction(predictions=formatted_predictions, label_ids=references) metric = load_metric("squad_v2" if data_args.version_2_with_negative else "squad") def compute_metrics(p: EvalPrediction): return metric.compute(predictions=p.predictions, references=p.label_ids) # Initialize our Trainer trainer = QuestionAnsweringTrainer( model=model, args=training_args, train_dataset=train_dataset if training_args.do_train else None, eval_dataset=eval_dataset if training_args.do_eval else None, eval_examples=eval_examples if training_args.do_eval else None, tokenizer=tokenizer, data_collator=data_collator, post_process_function=post_processing_function, compute_metrics=compute_metrics, ) # Training if training_args.do_train: checkpoint = None if training_args.resume_from_checkpoint is not None: checkpoint = training_args.resume_from_checkpoint elif last_checkpoint is not None: checkpoint = last_checkpoint train_result = trainer.train(resume_from_checkpoint=checkpoint) trainer.save_model() # Saves the tokenizer too for easy upload metrics = train_result.metrics max_train_samples = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset) ) metrics["train_samples"] = min(max_train_samples, len(train_dataset)) trainer.log_metrics("train", metrics) trainer.save_metrics("train", metrics) trainer.save_state() # Evaluation if training_args.do_eval: logger.info("*** Evaluate ***") metrics = trainer.evaluate() max_eval_samples = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(eval_dataset) metrics["eval_samples"] = min(max_eval_samples, len(eval_dataset)) trainer.log_metrics("eval", metrics) trainer.save_metrics("eval", metrics) # Prediction if training_args.do_predict: logger.info("*** Predict ***") results = trainer.predict(predict_dataset, predict_examples) metrics = results.metrics max_predict_samples = ( data_args.max_predict_samples if data_args.max_predict_samples is not None else len(predict_dataset) ) metrics["predict_samples"] = min(max_predict_samples, len(predict_dataset)) trainer.log_metrics("predict", metrics) trainer.save_metrics("predict", metrics) kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "question-answering"} if data_args.dataset_name is not None: kwargs["dataset_tags"] = data_args.dataset_name if data_args.dataset_config_name is not None: kwargs["dataset_args"] = data_args.dataset_config_name kwargs["dataset"] = f"{data_args.dataset_name} {data_args.dataset_config_name}" else: kwargs["dataset"] = data_args.dataset_name if training_args.push_to_hub: trainer.push_to_hub(**kwargs) else: trainer.create_model_card(**kwargs) def _mp_fn(index): # For xla_spawn (TPUs) main()
null
184,648
from __future__ import absolute_import, division, print_function import json import logging import math import collections from io import open from os import path as osp from tqdm import tqdm import bs4 from bs4 import BeautifulSoup as bs from transformers.models.bert.tokenization_bert import BasicTokenizer, whitespace_tokenize from torch.utils.data import Dataset from lxml import etree from markuplmft.data.tag_utils import tags_dict The provided code snippet includes necessary dependencies for implementing the `html_escape` function. Write a Python function `def html_escape(html)` to solve the following problem: r""" replace the special expressions in the html file for specific punctuation. Here is the function: def html_escape(html): r""" replace the special expressions in the html file for specific punctuation. """ html = html.replace('&quot;', '"') html = html.replace('&amp;', '&') html = html.replace('&lt;', '<') html = html.replace('&gt;', '>') html = html.replace('&nbsp;', ' ') return html
r""" replace the special expressions in the html file for specific punctuation.
184,649
from __future__ import absolute_import, division, print_function import json import logging import math import collections from io import open from os import path as osp from tqdm import tqdm import bs4 from bs4 import BeautifulSoup as bs from transformers.models.bert.tokenization_bert import BasicTokenizer, whitespace_tokenize from torch.utils.data import Dataset from lxml import etree from markuplmft.data.tag_utils import tags_dict def get_xpath4tokens(html_fn: str, unique_tids: set): xpath_map = {} tree = etree.parse(html_fn, etree.HTMLParser()) nodes = tree.xpath('//*') for node in nodes: tid = node.attrib.get("tid") if int(tid) in unique_tids: xpath_map[int(tid)] = tree.getpath(node) xpath_map[len(nodes)] = "/html" xpath_map[len(nodes) + 1] = "/html" return xpath_map
null
184,650
from __future__ import absolute_import, division, print_function import argparse import logging import os import random import glob import timeit import numpy as np import torch from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler) from torch.utils.data.distributed import DistributedSampler from tensorboardX import SummaryWriter from tqdm import tqdm, trange from transformers import ( WEIGHTS_NAME, AdamW, get_linear_schedule_with_warmup, ) from markuplmft.models.markuplm import MarkupLMConfig, MarkupLMTokenizer, MarkupLMTokenizerFast, MarkupLMForQuestionAnswering from utils import StrucDataset from utils import (read_squad_examples, convert_examples_to_features, RawResult, write_predictions) from utils_evaluate import EvalOpts, main as evaluate_on_squad logger = logging.getLogger(__name__) def set_seed(args): r""" Fix the random seed for reproduction. """ random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) torch.backends.cudnn.enabled = True torch.backends.cudnn.benchmark = False torch.backends.cudnn.deterministic = True if args.n_gpu > 0: torch.cuda.manual_seed_all(args.seed) def evaluate(args, model, tokenizer, max_depth, prefix=""): r""" Evaluate the model """ dataset, examples, features = load_and_cache_examples(args, tokenizer, max_depth=max_depth, evaluate=True, output_examples=True) if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]: os.makedirs(args.output_dir) args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu) # Note that DistributedSampler samples randomly eval_sampler = SequentialSampler(dataset) if args.local_rank == -1 else DistributedSampler(dataset) eval_dataloader = DataLoader(dataset, sampler=eval_sampler, batch_size=args.eval_batch_size , num_workers=args.dataloader_workers) # multi-gpu evaluate if args.n_gpu > 1 and not isinstance(model, torch.nn.DataParallel): model = torch.nn.DataParallel(model) # Eval! logger.info("***** Running evaluation {} *****".format(prefix)) logger.info(" Num examples = %d", len(dataset)) logger.info(" Batch size = %d", args.eval_batch_size) all_results = [] start_time = timeit.default_timer() for batch in tqdm(eval_dataloader, desc="Evaluating"): model.eval() batch = tuple(t.to(args.device) for t in batch) with torch.no_grad(): inputs = {'input_ids': batch[0], 'attention_mask': batch[1], 'token_type_ids': batch[2], 'xpath_tags_seq': batch[4], 'xpath_subs_seq': batch[5], } feature_indices = batch[3] outputs = model(**inputs) for i, feature_index in enumerate(feature_indices): eval_feature = features[feature_index.item()] unique_id = int(eval_feature.unique_id) result = RawResult(unique_id=unique_id, start_logits=to_list(outputs[0][i]), end_logits=to_list(outputs[1][i])) all_results.append(result) eval_time = timeit.default_timer() - start_time logger.info(" Evaluation done in total %f secs (%f sec per example)", eval_time, eval_time / len(dataset)) # Compute predictions output_prediction_file = os.path.join(args.output_dir, "predictions_{}.json".format(prefix)) output_tag_prediction_file = os.path.join(args.output_dir, "tag_predictions_{}.json".format(prefix)) output_nbest_file = os.path.join(args.output_dir, "nbest_predictions_{}.json".format(prefix)) output_result_file = os.path.join(args.output_dir, "qas_eval_results_{}.json".format(prefix)) output_file = os.path.join(args.output_dir, "eval_matrix_results_{}".format(prefix)) write_predictions(examples, features, all_results, args.n_best_size, args.max_answer_length, args.do_lower_case, output_prediction_file, output_tag_prediction_file, output_nbest_file, args.verbose_logging, tokenizer) # Evaluate evaluate_options = EvalOpts(data_file=args.predict_file, root_dir=args.root_dir, pred_file=output_prediction_file, tag_pred_file=output_tag_prediction_file, result_file=output_result_file, out_file=output_file) results = evaluate_on_squad(evaluate_options) return results The provided code snippet includes necessary dependencies for implementing the `train` function. Write a Python function `def train(args, train_dataset, model, tokenizer)` to solve the following problem: r""" Train the model Here is the function: def train(args, train_dataset, model, tokenizer): r""" Train the model """ if args.local_rank in [-1, 0]: tb_writer = SummaryWriter() else: tb_writer = None args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu) train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset) train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size) if args.max_steps > 0: t_total = args.max_steps args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1 else: t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs # Prepare optimizer and schedule (linear warmup and decay) no_decay = ['bias', 'LayerNorm.weight'] optimizer_grouped_parameters = [ {'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': args.weight_decay}, {'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0} ] optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=int(args.warmup_ratio * t_total), num_training_steps=t_total) if args.fp16: try: from apex import amp except ImportError: raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.") model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level) # multi-gpu training (should be after apex fp16 initialization) if args.n_gpu > 1 and not isinstance(model, torch.nn.DataParallel): model = torch.nn.DataParallel(model) # Distributed training (should be after apex fp16 initialization) if args.local_rank != -1: model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True) # Train! logger.info("***** Running training *****") logger.info(" Num examples = %d", len(train_dataset)) logger.info(" Num Epochs = %d", args.num_train_epochs) logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size) logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d", args.train_batch_size * args.gradient_accumulation_steps * ( torch.distributed.get_world_size() if args.local_rank != -1 else 1)) logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps) logger.info(" Total optimization steps = %d", t_total) global_step = 0 tr_loss, logging_loss = 0.0, 0.0 model.zero_grad() train_iterator = trange(int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0]) set_seed(args) # Added here for reproductibility (even between python 2 and 3) for epoch in train_iterator: if isinstance(train_dataloader, DataLoader) and isinstance(train_dataloader.sampler, DistributedSampler): train_dataloader.sampler.set_epoch(epoch) epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0]) for step, batch in enumerate(epoch_iterator): model.train() batch = tuple(t.to(args.device) for t in batch) inputs = {'input_ids': batch[0], 'attention_mask': batch[1], 'token_type_ids': batch[2], 'xpath_tags_seq': batch[3], 'xpath_subs_seq': batch[4], 'start_positions': batch[5], 'end_positions': batch[6], } outputs = model(**inputs) loss = outputs[0] # model outputs are always tuple in transformers (see doc) if args.n_gpu > 1: loss = loss.mean() # mean() to average on multi-gpu parallel (not distributed) training if args.gradient_accumulation_steps > 1: loss = loss / args.gradient_accumulation_steps if args.fp16: with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() else: loss.backward() tr_loss += loss.item() if (step + 1) % args.gradient_accumulation_steps == 0: if args.fp16: torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm) else: torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm) optimizer.step() scheduler.step() # Update learning rate schedule model.zero_grad() global_step += 1 if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0: # Log metrics if args.local_rank == -1 and args.evaluate_during_training: results = evaluate(args, model, tokenizer, prefix=str(global_step)) for key, value in results.items(): tb_writer.add_scalar('eval_{}'.format(key), value, global_step) tb_writer.add_scalar('lr', scheduler.get_lr()[0], global_step) tb_writer.add_scalar('loss', (tr_loss - logging_loss) / args.logging_steps, global_step) logging_loss = tr_loss if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0: # Save model checkpoint output_dir = os.path.join(args.output_dir, 'checkpoint-{}'.format(global_step)) if not os.path.exists(output_dir): os.makedirs(output_dir) model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training model_to_save.save_pretrained(output_dir) torch.save(args, os.path.join(output_dir, 'training_args.bin')) logger.info("Saving model checkpoint to %s", output_dir) if 0 < args.max_steps < global_step: epoch_iterator.close() break if 0 < args.max_steps < global_step: train_iterator.close() break if args.local_rank in [-1, 0]: tb_writer.close() return global_step, tr_loss / global_step
r""" Train the model
184,651
import argparse import collections import json import os import re import string import sys from copy import deepcopy from bs4 import BeautifulSoup import sys sys.path.append(os.getcwd()) def parse_args(): parser = argparse.ArgumentParser() parser.add_argument('data_file', metavar='data.json', help='Input data JSON file.') parser.add_argument('root_dir', metavar='./data', help='The root directory of the raw WebSRC dataset') parser.add_argument('pred_file', metavar='pred.json', help='Model predictions.') parser.add_argument('tag_pred_file', metavar='tag_pred.json', help='Model predictions.') parser.add_argument('--result-file', '-r', metavar='qas_eval.json') parser.add_argument('--out-file', '-o', metavar='eval.json', help='Write accuracy metrics to file (default is stdout).') if len(sys.argv) == 1: parser.print_help() sys.exit(1) return parser.parse_args()
null
184,652
import argparse import collections import json import os import re import string import sys from copy import deepcopy from bs4 import BeautifulSoup The provided code snippet includes necessary dependencies for implementing the `make_pages_list` function. Write a Python function `def make_pages_list(dataset)` to solve the following problem: r""" Record all the pages which appears in the dataset and return the list. Here is the function: def make_pages_list(dataset): r""" Record all the pages which appears in the dataset and return the list. """ pages_list = [] last_page = None for domain in dataset: for w in domain['websites']: for qa in w['qas']: if last_page != qa['id'][:4]: last_page = qa['id'][:4] pages_list.append(last_page) return pages_list
r""" Record all the pages which appears in the dataset and return the list.
184,653
import argparse import collections import json import os import re import string import sys from copy import deepcopy from bs4 import BeautifulSoup The provided code snippet includes necessary dependencies for implementing the `make_qid_to_has_ans` function. Write a Python function `def make_qid_to_has_ans(dataset)` to solve the following problem: r""" Pick all the questions which has answer in the dataset and return the list. Here is the function: def make_qid_to_has_ans(dataset): r""" Pick all the questions which has answer in the dataset and return the list. """ qid_to_has_ans = {} for domain in dataset: for w in domain['websites']: for qa in w['qas']: qid_to_has_ans[qa['id']] = bool(qa['answers']) return qid_to_has_ans
r""" Pick all the questions which has answer in the dataset and return the list.
184,654
import argparse import collections import json import os import re import string import sys from copy import deepcopy from bs4 import BeautifulSoup def normalize_answer(s): """Lower text and remove punctuation, articles and extra whitespace.""" def remove_articles(text): regex = re.compile(r'\b(a|an|the)\b', re.UNICODE) return re.sub(regex, ' ', text) def white_space_fix(text): return ' '.join(text.split()) def remove_punc(text): exclude = set(string.punctuation) return ''.join(ch for ch in text if ch not in exclude) def lower(text): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(s)))) def compute_exact(a_gold, a_pred): r""" Calculate the exact match. """ if normalize_answer(a_gold) == normalize_answer(a_pred): return 1 return 0 def compute_f1(a_gold, a_pred): r""" Calculate the f1 score. """ gold_toks = get_tokens(a_gold) pred_toks = get_tokens(a_pred) common = collections.Counter(gold_toks) & collections.Counter(pred_toks) num_same = sum(common.values()) if len(gold_toks) == 0 or len(pred_toks) == 0: # If either is no-answer, then F1 is 1 if they agree, 0 otherwise return int(gold_toks == pred_toks) if num_same == 0: return 0 precision = 1.0 * num_same / len(pred_toks) recall = 1.0 * num_same / len(gold_toks) f1 = (2 * precision * recall) / (precision + recall) return f1 def compute_pos(f, t_gold, addition, t_pred): r""" Calculate the POS score. Arguments: f (str): the html file on which the question is based. t_gold (int): the gold answer tag id provided by the dataset (the value correspond to the key element_id). addition (int): the addition information used for yes/no question provided by the dataset (the value corresponding to the key answer_start). t_pred (list[int]): the tag ids of the tags corresponding the each word in the predicted answer. Returns: float: the POS score. """ h = BeautifulSoup(open(f), "lxml") p_gold, e_gold = set(), h.find(tid=t_gold) if e_gold is None: if len(t_pred) != 1: return 0 else: t = t_pred[0] e_pred, e_prev = h.find(tid=t), h.find(tid=t-1) if (e_pred is not None) or (addition == 1 and e_prev is not None) or\ (addition == 0 and e_prev is None): return 0 else: return 1 else: p_gold.add(e_gold['tid']) for e in e_gold.parents: if int(e['tid']) < 2: break p_gold.add(e['tid']) p = None for t in t_pred: p_pred, e_pred = set(), h.find(tid=t) if e_pred is not None: p_pred.add(e_pred['tid']) if e_pred.name != 'html': for e in e_pred.parents: if int(e['tid']) < 2: break p_pred.add(e['tid']) else: p_pred.add(str(t)) if p is None: p = p_pred else: p = p & p_pred # 预测值的公共祖先序列,except html&body return len(p_gold & p) / len(p_gold | p) The provided code snippet includes necessary dependencies for implementing the `get_raw_scores` function. Write a Python function `def get_raw_scores(dataset, preds, tag_preds, root_dir)` to solve the following problem: r""" Calculate all the three matrix (exact match, f1, POS) for each question. Arguments: dataset (dict): the dataset in use. preds (dict): the answer text prediction for each question in the dataset. tag_preds (dict): the answer tags prediction for each question in the dataset. root_dir (str): the base directory for the html files. Returns: tuple(dict, dict, dict): exact match, f1, pos scores for each question. Here is the function: def get_raw_scores(dataset, preds, tag_preds, root_dir): r""" Calculate all the three matrix (exact match, f1, POS) for each question. Arguments: dataset (dict): the dataset in use. preds (dict): the answer text prediction for each question in the dataset. tag_preds (dict): the answer tags prediction for each question in the dataset. root_dir (str): the base directory for the html files. Returns: tuple(dict, dict, dict): exact match, f1, pos scores for each question. """ exact_scores = {} f1_scores = {} pos_scores = {} for websites in dataset: for w in websites['websites']: f = os.path.join(root_dir, websites['domain'], w['page_id'][0:2], 'processed_data', w['page_id'] + '.html') for qa in w['qas']: qid = qa['id'] gold_answers = [a['text'] for a in qa['answers'] if normalize_answer(a['text'])] gold_tag_answers = [a['element_id'] for a in qa['answers']] additional_tag_information = [a['answer_start'] for a in qa['answers']] if not gold_answers: # For unanswerable questions, only correct answer is empty string gold_answers = [''] if qid not in preds: print('Missing prediction for %s' % qid) continue a_pred, t_pred = preds[qid], tag_preds[qid] # Take max over all gold answers exact_scores[qid] = max(compute_exact(a, a_pred) for a in gold_answers) f1_scores[qid] = max(compute_f1(a, a_pred) for a in gold_answers) pos_scores[qid] = max(compute_pos(f, t, a, t_pred) for t, a in zip(gold_tag_answers, additional_tag_information)) return exact_scores, f1_scores, pos_scores
r""" Calculate all the three matrix (exact match, f1, POS) for each question. Arguments: dataset (dict): the dataset in use. preds (dict): the answer text prediction for each question in the dataset. tag_preds (dict): the answer tags prediction for each question in the dataset. root_dir (str): the base directory for the html files. Returns: tuple(dict, dict, dict): exact match, f1, pos scores for each question.
184,655
import argparse import collections import json import os import re import string import sys from copy import deepcopy from bs4 import BeautifulSoup The provided code snippet includes necessary dependencies for implementing the `make_eval_dict` function. Write a Python function `def make_eval_dict(exact_scores, f1_scores, pos_scores, qid_list=None)` to solve the following problem: r""" Make the dictionary to show the evaluation results. Here is the function: def make_eval_dict(exact_scores, f1_scores, pos_scores, qid_list=None): r""" Make the dictionary to show the evaluation results. """ if qid_list is None: total = len(exact_scores) return collections.OrderedDict([ ('exact', 100.0 * sum(exact_scores.values()) / total), ('f1', 100.0 * sum(f1_scores.values()) / total), ('pos', 100.0 * sum(pos_scores.values()) / total), ('total', total), ]) else: total = len(qid_list) if total == 0: return collections.OrderedDict([ ('exact', 0), ('f1', 0), ('pos', 0), ('total', 0), ]) return collections.OrderedDict([ ('exact', 100.0 * sum(exact_scores[k] for k in qid_list) / total), ('f1', 100.0 * sum(f1_scores[k] for k in qid_list) / total), ('pos', 100.0 * sum(pos_scores[k] for k in qid_list) / total), ('total', total), ])
r""" Make the dictionary to show the evaluation results.
184,656
import argparse import collections import json import os import re import string import sys from copy import deepcopy from bs4 import BeautifulSoup def merge_eval(main_eval, new_eval, prefix): for k in new_eval: main_eval['%s_%s' % (prefix, k)] = new_eval[k]
null
184,657
import csv import json import argparse import os.path as osp import os from operator import itemgetter def parse_args(): parser = argparse.ArgumentParser() parser.add_argument("--root_dir", default=None, type=str, required=True, help="The root directory of the raw WebSRC dataset; The output SQuAD-style json file will also" "be placed here.") parser.add_argument("--version", default=None, type=str, required=True, help="The version of the generating dataset, which will also be the name of the json file.") parser.add_argument("--suffix", default="", type=str, help="Other suffix to distinguish different dataset.") return parser.parse_args()
null
184,658
import csv import json import argparse import os.path as osp import os from operator import itemgetter def convert_csv_to_dict(args): dir_list = os.walk(args.root_dir) print('Start Converting') data, websites, qas, answers = [], [], [], [] last_domain = None for d, _, fs in dir_list: for f in fs: if f != 'dataset.csv': continue print('Now converting', d + '/' + f) raw_data = list(csv.DictReader(open(osp.join(d, f)))) curr_domain = d.split('/')[-2] if last_domain != curr_domain and last_domain is not None: domain = {'domain': last_domain, 'websites': websites} data.append(domain) websites = [] last_domain = curr_domain raw_data.sort(key=itemgetter('id')) last = raw_data[0] for i in range(len(raw_data)): current = raw_data[i] if i != 0: qa = {'question': last['question'], 'id' : last['id'], 'answers' : answers} # , 'type': last['type']} qas.append(qa) answers = [] if last['id'][:-5] != current['id'][:-5]: website = {'qas': qas, 'page_id': last['id'][2:-5]} websites.append(website) qas = [] answer = {'text' : current['answer'], 'element_id' : int(current['element_id']), 'answer_start': int(current['answer_start'])} answers.append(answer) last = current if len(answers) > 0: qa = {'question': last['question'], 'id' : last['id'], 'answers' : answers} # , 'type' : last['type']} qas.append(qa) answers = [] if len(qas) > 0: website = {'qas': qas, 'page_id': last['id'][2:-5]} websites.append(website) qas = [] domain = {'domain': last_domain, 'websites': websites} data.append(domain) dataset = {'version': args.version, 'data': data} print('Converting Finished\n') return dataset
null
184,659
import csv import json import argparse import os.path as osp import os from operator import itemgetter def dataset_split(args, dataset): def count(last, curr): if last is None: return False if last != curr: return False return True split = json.load(open(osp.join(args.root_dir, 'dataset_split.json'))) data = dataset['data'] count_website = set() for domain in data: for website in domain['websites']: count_website.add(domain['domain'][0:2] + website['page_id'][0:2]) print('The number of total websites is', len(count_website)) train_list = [] dev_list, test_list = split['dev'], split['test'] for website in count_website: if website not in dev_list and website not in test_list: train_list.append(website) print('The train websites list is', train_list) print('The test websites list is', test_list) print('The dev websites list is', dev_list) train_data, test_data, dev_data = [], [], [] cnt = 0 for domain in data: train_websites, test_websites, dev_websites = [], [], [] last = None for website in domain['websites']: if not count(last, website['page_id'][0:2]): last = website['page_id'][0:2] cnt += 1 name = domain['domain'][0:2] + website['page_id'][0:2] if name in test_list: test_websites.append(website) continue if name in dev_list: dev_websites.append(website) continue if len(train_list) != 0 and name not in train_list: continue train_websites.append(website) if len(train_websites) != 0: train_data.append({'domain': domain['domain'], 'websites': train_websites}) if len(test_websites) != 0: test_data.append({'domain': domain['domain'], 'websites': test_websites}) if len(dev_websites) != 0: dev_data.append({'domain': domain['domain'], 'websites': dev_websites}) print('The number of processed websites is', cnt) train_dataset = {'version': dataset['version'], 'data': train_data} with open(osp.join(args.root_dir, dataset['version'] + '_train_' + args.suffix + '.json'), 'w') as f: f.write(json.dumps(train_dataset)) test_dataset = {'version': dataset['version'], 'data': test_data} with open(osp.join(args.root_dir, dataset['version'] + '_test_' + args.suffix + '.json'), 'w') as f: f.write(json.dumps(test_dataset)) dev_dataset = {'version': dataset['version'], 'data': dev_data} with open(osp.join(args.root_dir, dataset['version'] + '_dev_' + args.suffix + '.json'), 'w') as f: f.write(json.dumps(dev_dataset)) return
null
184,660
from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import pickle import sys from absl import app from absl import flags import tqdm import constants The provided code snippet includes necessary dependencies for implementing the `pack_swde_data` function. Write a Python function `def pack_swde_data(swde_path, pack_path, cut_off)` to solve the following problem: Packs the swde dataset to a single file. Args: swde_path: The path to SWDE dataset pages (http://shortn/_g22KuARPAi). pack_path: The path to save packed SWDE dataset file. cut_off: To shorten the list for testing. Returns: None Here is the function: def pack_swde_data(swde_path, pack_path, cut_off): """Packs the swde dataset to a single file. Args: swde_path: The path to SWDE dataset pages (http://shortn/_g22KuARPAi). pack_path: The path to save packed SWDE dataset file. cut_off: To shorten the list for testing. Returns: None """ # Get all website names for each vertical. # The SWDE dataset fold is structured as follows: # - swde/ # The root folder. # - swde/auto/ # A certain vertical. # - swde/auto/auto-aol(2000)/ # A certain website. # - swde/auto/auto-aol(2000)/0000.htm # A page. # Get all vertical names. vertical_to_websites_map = constants.VERTICAL_WEBSITES """ for `auto`, that is ---> [ "msn", "aol", "kbb", "cars", "yahoo", "autoweb", "autobytel", "automotive", "carquotes", "motortrend" ] """ # The data dict initialized with the path of each html file of SWDE. swde_data = list() print("Start loading data...") for v in vertical_to_websites_map: for w in os.listdir(os.path.join(swde_path, v)): page_count = 0 filenames = os.listdir(os.path.join(swde_path, v, w)) filenames.sort() for filename in filenames: print(os.path.join(swde_path, v, w, filename)) page = dict(vertical=v, website=w, path=os.path.join(v, w, filename)) # path is something like `book/book-amazon(2000)/0000.htm` swde_data.append(page) page_count += 1 if cut_off > 0 and page_count == cut_off: break # Load the html data. with tqdm.tqdm(total=len(swde_data), file=sys.stdout) as progressbar: for page in swde_data: with open(os.path.join(swde_path, page["path"])) as webpage: page["html_str"] = webpage.read() progressbar.set_description("processed") progressbar.update(1) # now, the swde_data is a list # for each page in it # we have it as # {"vertical":'book',"website":'book-amazon(2000)',"path:'book/book-amazon(2000)/0000.htm',"html_str":xx} # and finally these info are dumped into the swde.pickle file # Save the html_str data. with open(pack_path, "wb") as gfo: pickle.dump(swde_data, gfo)
Packs the swde dataset to a single file. Args: swde_path: The path to SWDE dataset pages (http://shortn/_g22KuARPAi). pack_path: The path to save packed SWDE dataset file. cut_off: To shorten the list for testing. Returns: None
184,661
from __future__ import absolute_import, division, print_function import argparse import logging import os import random import glob import numpy as np from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler) from torch.utils.data.distributed import DistributedSampler from tensorboardX import SummaryWriter from tqdm import tqdm, trange from transformers import ( WEIGHTS_NAME, AdamW, get_linear_schedule_with_warmup, ) from markuplmft.models.markuplm import MarkupLMConfig, MarkupLMTokenizer, MarkupLMForTokenClassification from utils import get_swde_features, SwdeDataset from eval_utils import page_level_constraint import constants import torch import copy def to_list(tensor): return tensor.detach().cpu().tolist()
null
184,662
from __future__ import absolute_import, division, print_function import argparse import logging import os import random import glob import numpy as np from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler) from torch.utils.data.distributed import DistributedSampler from tensorboardX import SummaryWriter from tqdm import tqdm, trange from transformers import ( WEIGHTS_NAME, AdamW, get_linear_schedule_with_warmup, ) from markuplmft.models.markuplm import MarkupLMConfig, MarkupLMTokenizer, MarkupLMForTokenClassification from utils import get_swde_features, SwdeDataset from eval_utils import page_level_constraint import constants import torch import copy def load_and_cache_one_website(args, tokenizer, website): cached_features_file = os.path.join( args.root_dir, "cached", args.vertical, website, f"cached_markuplm_{str(args.max_seq_length)}_pages{args.n_pages}_prevnodes{args.prev_nodes_into_account}" ) if not os.path.exists(os.path.dirname(cached_features_file)): os.makedirs(os.path.dirname(cached_features_file)) if os.path.exists(cached_features_file) and not args.overwrite_cache: logger.info("Loading features from cached file %s", cached_features_file) features = torch.load(cached_features_file) else: logger.info( f"Creating features for {args.vertical}-{website}-pages{args.n_pages}_prevnodes{args.prev_nodes_into_account}") features = get_swde_features(root_dir=args.root_dir, vertical=args.vertical, website=website, tokenizer=tokenizer, doc_stride=args.doc_stride, max_length=args.max_seq_length, prev_nodes=args.prev_nodes_into_account, n_pages=args.n_pages) if args.local_rank in [-1, 0] and args.save_features: logger.info("Saving features into cached file %s", cached_features_file) torch.save(features, cached_features_file) return features The provided code snippet includes necessary dependencies for implementing the `load_and_cache_examples` function. Write a Python function `def load_and_cache_examples(args, tokenizer, websites)` to solve the following problem: r""" Load and process the raw data. Here is the function: def load_and_cache_examples(args, tokenizer, websites): r""" Load and process the raw data. """ # if args.local_rank not in [-1, 0]: # torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, # and the others will use the cache feature_dicts = {} for website in websites: features_per_website = load_and_cache_one_website(args, tokenizer, website) feature_dicts[website] = features_per_website return feature_dicts
r""" Load and process the raw data.
184,663
from __future__ import absolute_import, division, print_function import argparse import logging import os import random import glob import numpy as np from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler) from torch.utils.data.distributed import DistributedSampler from tensorboardX import SummaryWriter from tqdm import tqdm, trange from transformers import ( WEIGHTS_NAME, AdamW, get_linear_schedule_with_warmup, ) from markuplmft.models.markuplm import MarkupLMConfig, MarkupLMTokenizer, MarkupLMForTokenClassification from utils import get_swde_features, SwdeDataset from eval_utils import page_level_constraint import constants import torch import copy logger = logging.getLogger(__name__) def set_seed(args): r""" Fix the random seed for reproduction. """ random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) torch.backends.cudnn.enabled = True torch.backends.cudnn.benchmark = False torch.backends.cudnn.deterministic = True if args.n_gpu > 0: torch.cuda.manual_seed_all(args.seed) def train(args, train_dataset, model, tokenizer, sub_output_dir): r""" Train the model """ if args.local_rank in [-1, 0]: tb_writer = SummaryWriter() else: tb_writer = None args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu) train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset) train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size) if args.max_steps > 0: t_total = args.max_steps args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1 else: t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs # Prepare optimizer and schedule (linear warmup and decay) no_decay = ['bias', 'LayerNorm.weight'] optimizer_grouped_parameters = [ {'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': args.weight_decay}, {'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0} ] optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=int(args.warmup_ratio * t_total), num_training_steps=t_total) if args.fp16: try: from apex import amp except ImportError: raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.") model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level) # multi-gpu training (should be after apex fp16 initialization) if args.n_gpu > 1 and not isinstance(model, torch.nn.DataParallel): model = torch.nn.DataParallel(model) # Distributed training (should be after apex fp16 initialization) if args.local_rank != -1: model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True) # Train! logger.info("***** Running training *****") logger.info(" Num examples = %d", len(train_dataset)) logger.info(" Num Epochs = %d", args.num_train_epochs) logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size) logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d", args.train_batch_size * args.gradient_accumulation_steps * ( torch.distributed.get_world_size() if args.local_rank != -1 else 1)) logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps) logger.info(" Total optimization steps = %d", t_total) global_step = 0 tr_loss, logging_loss = 0.0, 0.0 model.zero_grad() train_iterator = trange(int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0]) set_seed(args) # Added here for reproductibility (even between python 2 and 3) for epoch in train_iterator: if isinstance(train_dataloader, DataLoader) and isinstance(train_dataloader.sampler, DistributedSampler): train_dataloader.sampler.set_epoch(epoch) epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0]) for step, batch in enumerate(epoch_iterator): model.train() batch = tuple(t.to(args.device) for t in batch) inputs = {'input_ids': batch[0], 'attention_mask': batch[1], 'token_type_ids': batch[2], 'xpath_tags_seq': batch[3], 'xpath_subs_seq': batch[4], 'labels': batch[5], } outputs = model(**inputs) loss = outputs[0] # model outputs are always tuple in transformers (see doc) if args.n_gpu > 1: loss = loss.mean() # mean() to average on multi-gpu parallel (not distributed) training if args.gradient_accumulation_steps > 1: loss = loss / args.gradient_accumulation_steps if args.fp16: with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() else: loss.backward() tr_loss += loss.item() if (step + 1) % args.gradient_accumulation_steps == 0: if args.fp16: torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm) else: torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm) optimizer.step() scheduler.step() # Update learning rate schedule model.zero_grad() global_step += 1 if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0: # Log metrics if args.local_rank == -1 and args.evaluate_during_training: raise ValueError("Shouldn't `evaluate_during_training` when ft SWDE!!") # results = evaluate(args, model, tokenizer, prefix=str(global_step)) # for key, value in results.items(): # tb_writer.add_scalar('eval_{}'.format(key), value, global_step) tb_writer.add_scalar('lr', scheduler.get_lr()[0], global_step) tb_writer.add_scalar('loss', (tr_loss - logging_loss) / args.logging_steps, global_step) logging_loss = tr_loss if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0: # Save model checkpoint output_dir = os.path.join(sub_output_dir, 'checkpoint-{}'.format(global_step)) if not os.path.exists(output_dir): os.makedirs(output_dir) model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training model_to_save.save_pretrained(output_dir) torch.save(args, os.path.join(output_dir, 'training_args.bin')) logger.info("Saving model checkpoint to %s", output_dir) if 0 < args.max_steps < global_step: epoch_iterator.close() break if 0 < args.max_steps < global_step: train_iterator.close() break if args.local_rank in [-1, 0]: tb_writer.close() return global_step, tr_loss / global_step def evaluate(args, model, test_websites, sub_output_dir, prefix=""): r""" Evaluate the model """ all_eval_res = {} all_precision = [] all_recall = [] all_f1 = [] for website in test_websites: res_on_one_website = eval_on_one_website(args, model, website, sub_output_dir, prefix) all_precision.append(res_on_one_website[0]) all_recall.append(res_on_one_website[1]) all_f1.append(res_on_one_website[2]) return {"precision": sum(all_precision) / len(all_precision), "recall": sum(all_recall) / len(all_recall), "f1": sum(all_f1) / len(all_f1), } def get_dataset_and_info_for_websites(websites, evaluate=False): """ Args: websites: a list of websites Returns: a dataset object """ all_features = [] for website in websites: features_per_website = global_feature_dicts[website] all_features += features_per_website # Convert to Tensors and build dataset all_input_ids = torch.tensor([f.input_ids for f in all_features], dtype=torch.long) all_attention_mask = torch.tensor([f.attention_mask for f in all_features], dtype=torch.long) all_token_type_ids = torch.tensor([f.token_type_ids for f in all_features], dtype=torch.long) all_xpath_tags_seq = torch.tensor([f.xpath_tags_seq for f in all_features], dtype=torch.long) all_xpath_subs_seq = torch.tensor([f.xpath_subs_seq for f in all_features], dtype=torch.long) if not evaluate: all_labels = torch.tensor([f.labels for f in all_features], dtype=torch.long) dataset = SwdeDataset(all_input_ids=all_input_ids, all_attention_mask=all_attention_mask, all_token_type_ids=all_token_type_ids, all_xpath_tags_seq=all_xpath_tags_seq, all_xpath_subs_seq=all_xpath_subs_seq, all_labels=all_labels) info = None else: # in evaluation, we do not add labels dataset = SwdeDataset(all_input_ids=all_input_ids, all_attention_mask=all_attention_mask, all_token_type_ids=all_token_type_ids, all_xpath_tags_seq=all_xpath_tags_seq, all_xpath_subs_seq=all_xpath_subs_seq) info = [(f.html_path, f.involved_first_tokens_pos, f.involved_first_tokens_xpaths, f.involved_first_tokens_types, f.involved_first_tokens_text) for f in all_features] return dataset, info def do_something(train_websites, test_websites, args, config, tokenizer): # before each run, we reset the seed set_seed(args) model = MarkupLMForTokenClassification.from_pretrained(args.model_name_or_path, config=config) model.resize_token_embeddings(len(tokenizer)) sub_output_dir = os.path.join(args.output_dir, args.vertical, f"seed-{args.n_seed}_pages-{args.n_pages}", "-".join(train_websites)) # if args.local_rank == 0: # torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab model.to(args.device) logger.info("Training/evaluation parameters %s", args) # Before we do anything with models, we want to ensure that we get fp16 execution of torch.einsum if args.fp16 is # set. Otherwise it'll default to "promote" mode, and we'll get fp32 operations. Note that running # `--fp16_opt_level="O2"` will remove the need for this code, but it is still valid. if args.fp16: try: import apex apex.amp.register_half_function(torch, 'einsum') except ImportError: raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.") # Training if args.do_train: train_dataset, _ = get_dataset_and_info_for_websites(train_websites) tokenizer.save_pretrained(sub_output_dir) model.to(args.device) global_step, tr_loss = train(args, train_dataset, model, tokenizer, sub_output_dir) logger.info(" global_step = %s, average loss = %s", global_step, tr_loss) # Save the trained model and the tokenizer if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0): # Create output directory if needed if not os.path.exists(sub_output_dir) and args.local_rank in [-1, 0]: os.makedirs(sub_output_dir) logger.info("Saving model checkpoint to %s", sub_output_dir) # Save a trained model, configuration and tokenizer using `save_pretrained()`. # They can then be reloaded using `from_pretrained()` # Take care of distributed/parallel training model_to_save = model.module if hasattr(model, 'module') else model model_to_save.save_pretrained(sub_output_dir) tokenizer.save_pretrained(sub_output_dir) torch.save(args, os.path.join(sub_output_dir, 'training_args.bin')) # Evaluation - we can ask to evaluate all the checkpoints (sub-directories) in a directory results = {} if args.do_eval and args.local_rank in [-1, 0]: checkpoints = [sub_output_dir] if args.eval_all_checkpoints: checkpoints = list( os.path.dirname(c) for c in sorted(glob.glob(sub_output_dir + '/**/' + WEIGHTS_NAME, recursive=True))) logging.getLogger("transformers.modeling_utils").setLevel(logging.WARN) # Reduce model loading logs logger.info("Evaluate the following checkpoints: %s", checkpoints) config = MarkupLMConfig.from_pretrained(sub_output_dir) tokenizer = MarkupLMTokenizer.from_pretrained(sub_output_dir) for checkpoint in checkpoints: # Reload the model global_step = checkpoint.split('-')[-1] if len(checkpoints) > 1 else "" try: int(global_step) except ValueError: global_step = "" if global_step and int(global_step) < args.eval_from_checkpoint: continue if global_step and args.eval_to_checkpoint is not None and int(global_step) >= args.eval_to_checkpoint: continue model = MarkupLMForTokenClassification.from_pretrained(checkpoint, config=config) model.to(args.device) # Evaluate result = evaluate(args, model, test_websites, sub_output_dir, prefix=global_step) result = dict((k + ('_{}'.format(global_step) if global_step else ''), v) for k, v in result.items()) results.update(result) logger.info("Results: {}".format(results)) return results
null
184,664
from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import os import pickle import random import re import sys import unicodedata from absl import app from absl import flags import lxml from lxml import etree from lxml.html.clean import Cleaner from tqdm import tqdm import constants import multiprocessing as mp FLAGS = flags.FLAGS def load_html_and_groundtruth(vertical_to_load, website_to_load): """ DONE READ! """ # example is `book` and `abebooks` """Loads and returns the html sting and ground turth data as a dictionary.""" all_data_dict = collections.defaultdict(dict) vertical_to_websites_map = constants.VERTICAL_WEBSITES gt_path = FLAGS.input_groundtruth_path """ First build groudtruth dict """ for v in vertical_to_websites_map: if v != vertical_to_load: continue for truthfile in os.listdir(os.path.join(gt_path, v)): # For example, a groundtruth file name can be "auto-yahoo-price.txt". vertical, website, field = truthfile.replace(".txt", "").split("-") # like book , amazon , isbn_13 if website != website_to_load: continue with open(os.path.join(gt_path, v, truthfile), "r") as gfo: lines = gfo.readlines() for line in lines[2:]: # Each line should contains more than 3 elements splitted by \t # which are: index, number of values, value1, value2, etc. item = line.strip().split("\t") index = item[0] # like 0123 num_values = int(item[1]) # Can be 0 (when item[2] is "<NULL>"). all_data_dict[index]["field-" + field] = dict(values=item[2:2 + num_values]) # {"0123": # {"field-engine": # {"values":["engine A","engine B"]}, # "field-price": # } # } """ this is an example for book-abebooks-0000.htm <-- all_data_dict["0000"] --> { 'field-publication_date': {'values': ['2008']}, 'field-author': {'values': ['Howard Zinn', 'Paul Buhle', 'Mike Konopacki']}, 'field-title': {'values': ["A People's History of American Empire"]}, 'field-publisher': {'values': ['Metropolitan Books']}, 'field-isbn_13': {'values': ['9780805087444']} } """ print("Reading the pickle of SWDE original dataset.....", file=sys.stderr) with open(FLAGS.input_pickle_path, "rb") as gfo: swde_html_data = pickle.load(gfo) # {"vertical":'book',"website":'book-amazon(2000)',"path:'book/book-amazon(2000)/0000.htm',"html_str":xx} here for page in tqdm(swde_html_data, desc="Loading HTML data"): vertical = page["vertical"] website = page["website"] website = website[website.find("-") + 1:website.find("(")] if vertical != vertical_to_load or website != website_to_load: continue path = page["path"] # For example, auto/auto-aol(2000)/0000.htm html_str = page["html_str"] _, _, index = path.split("/") # website be like auto-aol(2000) index = index.replace(".htm", "") all_data_dict[index]["html_str"] = html_str all_data_dict[index]["path"] = path """ this is an example for book-abebooks-0000.htm <-- all_data_dict["0000"] --> { 'field-publication_date': {'values': ['2008']}, 'field-author': {'values': ['Howard Zinn', 'Paul Buhle', 'Mike Konopacki']}, 'field-title': {'values': ["A People's History of American Empire"]}, 'field-publisher': {'values': ['Metropolitan Books']}, 'field-isbn_13': {'values': ['9780805087444']}, 'path': 'book/book-abebooks(2000)/0000.htm', 'html_str': omitted, } """ # all_data_dict here has all the pages # however, only those in swde.pickle has the newly-appended 'path' and 'html_str' return all_data_dict def get_field_xpaths(all_data_dict, vertical_to_process, website_to_process, n_pages, max_variable_nodes_per_website): """Gets xpaths data for each page in the data dictionary. Args: all_data_dict: the dictionary saving both the html content and the truth. vertical_to_process: the vertical that we are working on; website_to_process: the website that we are working on. n_pages: we will work on the first n_pages number of the all pages. max_variable_nodes_per_website: top N frequent variable nodes as the final set. """ # Saving the xpath info of the whole website, # - Key is a xpath. # - Value is a set of text appeared before inside the node. overall_xpath_dict = collections.defaultdict(set) # Update page data with groundtruth xpaths and the overall xpath-value dict. for index in tqdm(all_data_dict, desc="Processing %s" % website_to_process, total=n_pages): if int(index) >= n_pages: continue # We add dom-tree attributes for the first n_pages page_data = all_data_dict[index] html = page_data["html_str"] dom_tree = get_dom_tree(html, website=website_to_process) page_data["dom_tree"] = dom_tree # Match values of each field for the current page. for field in page_data: if not field.startswith("field-"): continue # Saving the xpaths of the values for each field. page_data[field]["groundtruth_xpaths"] = set() page_data[field]["is_truth_value_list"] = set() for value in page_data[field]["values"]: xpaths, current_xpath_data, current_page_nodes_in_order, is_truth_value_list = \ get_value_xpaths(dom_tree, value, overall_xpath_dict, website_to_process, field[6:]) # Assert each truth value can be founded in >=1 nodes. assert len(xpaths) >= 1, \ "%s;\t%s;\t%s;\t%s; is not found" % (website_to_process, field, index, value) # Update the page-level xpath information. page_data[field]["groundtruth_xpaths"].update(xpaths) page_data[field]["is_truth_value_list"].update(is_truth_value_list) # now for each page_data # an example # page_data["field-author"] = # { # 'values': ['Dave Kemper', 'Patrick Sebranek', 'Verne Meyer'], # 'groundtruth_xpaths': # {'/html/body/div[2]/div[2]/div[2]/div[1]/h3/a[3]', # '/html/body/div[2]/div[2]/div[2]/div[1]/h3/a[2]', # '/html/body/div[2]/div[2]/div[2]/div[1]/h3/a[1]', # '/html/body/div[2]/div[2]/div[3]/div[3]/p/a'} # } page_data["xpath_data"] = current_xpath_data # page_data["doc_strings"] = current_page_nodes_in_order # [(text, xpath)*N] # page_data["reversed_doc_strings_ids"] = {v[0]: i for i, v in enumerate(current_page_nodes_in_order)} # page_data["doc_strings"] is the basis of our transformers-based method!!! # Define the fixed-text nodes and variable nodes. fixed_nodes = set() variable_nodes = set() # 这里对这个网址上的所有xpath进行排序 # 以对应的不同文本数目倒序排列 node_variability = sorted( [(xpath, len(text_set)) for xpath, text_set in overall_xpath_dict.items()], key=lambda x: x[1], reverse=True ) for xpath, variability in node_variability: # variability 为xpath的可变性 if variability > 5 and len(variable_nodes) < max_variable_nodes_per_website: variable_nodes.add(xpath) else: fixed_nodes.add(xpath) print("Vertical: %s; Website: %s; fixed_nodes: %d; variable_nodes: %d" % ( vertical_to_process, website_to_process, len(fixed_nodes), len(variable_nodes) ) ) assure_value_variable(all_data_dict, variable_nodes, fixed_nodes, n_pages) all_data_dict["fixed_nodes"] = list(fixed_nodes) all_data_dict["variable_nodes"] = list(variable_nodes) # 总之到这为止 # fixed_nodes包含的就是固定的node # variable_nodes包含的就是值会变化的node # 并且我们保证truth_value必定在variable nodes中 # now page_data has the `doc_strings` attributes # and each field has the `is_truth_value_list` attributes # all_data_dict has the following attributes # "0000" ~ "1999" is the infomation for each page # "fixed_nodes" are the xpaths for nodes that cannot have truth-value # "variable_nodes" are the xpaths for nodes that might have truth-value return The provided code snippet includes necessary dependencies for implementing the `generate_nodes_seq_and_write_to_file` function. Write a Python function `def generate_nodes_seq_and_write_to_file(compressed_args)` to solve the following problem: Extracts all the xpaths and labels the nodes for all the pages. Here is the function: def generate_nodes_seq_and_write_to_file(compressed_args): """Extracts all the xpaths and labels the nodes for all the pages.""" vertical, website = compressed_args all_data_dict = load_html_and_groundtruth(vertical, website) get_field_xpaths( all_data_dict, vertical_to_process=vertical, website_to_process=website, n_pages=2000, max_variable_nodes_per_website=300 ) """ keys to the following example ---> example = all_data_dict["0000"] dict_keys([ 'field-publication_date', 'field-author', 'field-title', 'field-publisher', 'field-isbn_13', 'html_str', 'path', 'dom_tree', 'xpath_data' ]) """ variable_nodes = all_data_dict["variable_nodes"] cleaned_features_for_this_website = {} for index in all_data_dict: if not index.isdigit(): # Skip the cases when index is actually the "fixed/variable_nodes" keys. continue if int(index) >= FLAGS.n_pages: break page_data = all_data_dict[index] assert "xpath_data" in page_data doc_strings = page_data["doc_strings"] new_doc_strings = [] field_info = {} for field in page_data: if not field.startswith("field-"): continue for doc_string_id in page_data[field]["is_truth_value_list"]: field_info[doc_string_id] = field[6:] for id, doc_string in enumerate(doc_strings): text, xpath = doc_string is_variable = xpath in variable_nodes if not is_variable: new_doc_strings.append((text, xpath, "fixed-node")) else: # for variable nodes,we need to give them labels gt_field = field_info.get(id, "none") new_doc_strings.append((text, xpath, gt_field)) cleaned_features_for_this_website[index] = new_doc_strings output_file_path = os.path.join(FLAGS.output_data_path, f"{vertical}-{website}-{FLAGS.n_pages}.pickle") print(f"Writing the processed first {FLAGS.n_pages} pages of {vertical}-{website} into {output_file_path}") with open(output_file_path, "wb") as f: pickle.dump(cleaned_features_for_this_website, f)
Extracts all the xpaths and labels the nodes for all the pages.
184,665
import math import os import torch import torch.utils.checkpoint from torch import nn from torch.nn import CrossEntropyLoss from transformers.activations import ACT2FN from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward, \ replace_return_docstrings from transformers.modeling_outputs import ( BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, MaskedLMOutput, TokenClassifierOutput, QuestionAnsweringModelOutput ) from transformers.modeling_utils import ( PreTrainedModel, apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer, ) from transformers.utils import logging from .configuration_markuplm import MarkupLMConfig from typing import Optional, Union The provided code snippet includes necessary dependencies for implementing the `create_position_ids_from_input_ids` function. Write a Python function `def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0)` to solve the following problem: Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols are ignored. This is modified from fairseq's `utils.make_positions`. Args: x: torch.Tensor x: Returns: torch.Tensor Here is the function: def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0): """ Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols are ignored. This is modified from fairseq's `utils.make_positions`. Args: x: torch.Tensor x: Returns: torch.Tensor """ # The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA. mask = input_ids.ne(padding_idx).int() incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask return incremental_indices.long() + padding_idx
Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols are ignored. This is modified from fairseq's `utils.make_positions`. Args: x: torch.Tensor x: Returns: torch.Tensor
184,666
from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import json import glob import logging import argparse import math from tqdm import tqdm import numpy as np import torch import random import pickle from s2s_ft.modeling_decoding import BertForSeq2SeqDecoder, BertConfig from transformers.tokenization_bert import whitespace_tokenize import s2s_ft.s2s_loader as seq2seq_loader from s2s_ft.utils import load_and_cache_examples from transformers import \ BertTokenizer, RobertaTokenizer, XLMRobertaTokenizer, ElectraTokenizer from s2s_ft.tokenization_unilm import UnilmTokenizer from s2s_ft.tokenization_minilm import MinilmTokenizer def detokenize(tk_list): r_list = [] for tk in tk_list: if tk.startswith('##') and len(r_list) > 0: r_list[-1] = r_list[-1] + tk[2:] else: r_list.append(tk) return r_list
null
184,667
from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import json import glob import logging import argparse import math from tqdm import tqdm import numpy as np import torch import random import pickle from s2s_ft.modeling_decoding import BertForSeq2SeqDecoder, BertConfig from transformers.tokenization_bert import whitespace_tokenize import s2s_ft.s2s_loader as seq2seq_loader from s2s_ft.utils import load_and_cache_examples from transformers import \ BertTokenizer, RobertaTokenizer, XLMRobertaTokenizer, ElectraTokenizer from s2s_ft.tokenization_unilm import UnilmTokenizer from s2s_ft.tokenization_minilm import MinilmTokenizer def ascii_print(text): text = text.encode("ascii", "ignore") print(text)
null
184,668
from __future__ import absolute_import, division, print_function import argparse import logging import os import json import random import numpy as np import torch from torch.utils.data import (DataLoader, SequentialSampler) from torch.utils.data.distributed import DistributedSampler try: from torch.utils.tensorboard import SummaryWriter except: from tensorboardX import SummaryWriter import tqdm from s2s_ft.modeling import BertForSequenceToSequenceWithPseudoMask, BertForSequenceToSequenceUniLMV1 from transformers import AdamW, get_linear_schedule_with_warmup from transformers import \ RobertaConfig, BertConfig, \ BertTokenizer, RobertaTokenizer, \ XLMRobertaConfig, XLMRobertaTokenizer, \ ElectraConfig, ElectraTokenizer from s2s_ft.configuration_unilm import UnilmConfig from s2s_ft.tokenization_unilm import UnilmTokenizer from s2s_ft.configuration_minilm import MinilmConfig from s2s_ft.tokenization_minilm import MinilmTokenizer from s2s_ft import utils from s2s_ft.config import BertForSeq2SeqConfig logger = logging.getLogger(__name__) def prepare_for_training(args, model, checkpoint_state_dict, amp): no_decay = ['bias', 'LayerNorm.weight'] optimizer_grouped_parameters = [ {'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': args.weight_decay}, {'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0} ] optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) if checkpoint_state_dict: optimizer.load_state_dict(checkpoint_state_dict['optimizer']) model.load_state_dict(checkpoint_state_dict['model']) # then remove optimizer state to make amp happy # https://github.com/NVIDIA/apex/issues/480#issuecomment-587154020 if amp: optimizer.state = {} if amp: model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level) if checkpoint_state_dict: amp.load_state_dict(checkpoint_state_dict['amp']) # Black Tech from https://github.com/NVIDIA/apex/issues/480#issuecomment-587154020 # forward, backward, optimizer step, zero_grad random_input = {'source_ids': torch.ones(size=(2, 2), device=args.device, dtype=torch.long), 'target_ids': torch.ones(size=(2, 2), device=args.device, dtype=torch.long), 'label_ids': torch.ones(size=(2, 2), device=args.device, dtype=torch.long), 'pseudo_ids': torch.ones(size=(2, 2), device=args.device, dtype=torch.long), 'num_source_tokens': torch.zeros(size=(2,), device=args.device, dtype=torch.long), 'num_target_tokens': torch.zeros(size=(2,), device=args.device, dtype=torch.long)} loss = model(**random_input) print("Loss = %f" % loss.cpu().item()) with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() optimizer.step() model.zero_grad() # then load optimizer state_dict again (this time without removing optimizer.state) optimizer.load_state_dict(checkpoint_state_dict['optimizer']) # multi-gpu training (should be after apex fp16 initialization) if args.n_gpu > 1: model = torch.nn.DataParallel(model) # Distributed training (should be after apex fp16 initialization) if args.local_rank != -1: model = torch.nn.parallel.DistributedDataParallel( model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True) return model, optimizer The provided code snippet includes necessary dependencies for implementing the `train` function. Write a Python function `def train(args, training_features, model, tokenizer)` to solve the following problem: Train the model Here is the function: def train(args, training_features, model, tokenizer): """ Train the model """ if args.local_rank in [-1, 0] and args.log_dir: tb_writer = SummaryWriter(log_dir=args.log_dir) else: tb_writer = None if args.fp16: try: from apex import amp except ImportError: raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.") else: amp = None # model recover recover_step = utils.get_max_epoch_model(args.output_dir) if recover_step: checkpoint_state_dict = utils.get_checkpoint_state_dict(args.output_dir, recover_step) else: checkpoint_state_dict = None model.to(args.device) model, optimizer = prepare_for_training(args, model, checkpoint_state_dict, amp=amp) per_node_train_batch_size = args.per_gpu_train_batch_size * args.n_gpu * args.gradient_accumulation_steps train_batch_size = per_node_train_batch_size * (torch.distributed.get_world_size() if args.local_rank != -1 else 1) global_step = recover_step if recover_step else 0 if args.num_training_steps == -1: args.num_training_steps = args.num_training_epochs * len(training_features) / train_batch_size scheduler = get_linear_schedule_with_warmup( optimizer, num_warmup_steps=args.num_warmup_steps, num_training_steps=args.num_training_steps, last_epoch=-1) if checkpoint_state_dict: scheduler.load_state_dict(checkpoint_state_dict["lr_scheduler"]) train_dataset = utils.Seq2seqDatasetForBert( features=training_features, max_source_len=args.max_source_seq_length, max_target_len=args.max_target_seq_length, vocab_size=tokenizer.vocab_size, cls_id=tokenizer.cls_token_id, sep_id=tokenizer.sep_token_id, pad_id=tokenizer.pad_token_id, mask_id=tokenizer.mask_token_id, random_prob=args.random_prob, keep_prob=args.keep_prob, offset=train_batch_size * global_step, num_training_instances=train_batch_size * args.num_training_steps, source_mask_prob=args.source_mask_prob, target_mask_prob=args.target_mask_prob, mask_way=args.mask_way, num_max_mask_token=args.num_max_mask_token, ) logger.info("Check dataset:") for i in range(5): source_ids, target_ids = train_dataset.__getitem__(i)[:2] logger.info("Instance-%d" % i) logger.info("Source tokens = %s" % " ".join(tokenizer.convert_ids_to_tokens(source_ids))) logger.info("Target tokens = %s" % " ".join(tokenizer.convert_ids_to_tokens(target_ids))) logger.info("Mode = %s" % str(model)) # Train! logger.info(" ***** Running training ***** *") logger.info(" Num examples = %d", len(training_features)) logger.info(" Num Epochs = %.2f", len(train_dataset) / len(training_features)) logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size) logger.info(" Batch size per node = %d", per_node_train_batch_size) logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d", train_batch_size) logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps) logger.info(" Total optimization steps = %d", args.num_training_steps) if args.num_training_steps <= global_step: logger.info("Training is done. Please use a new dir or clean this dir!") else: # The training features are shuffled train_sampler = SequentialSampler(train_dataset) \ if args.local_rank == -1 else DistributedSampler(train_dataset, shuffle=False) train_dataloader = DataLoader( train_dataset, sampler=train_sampler, batch_size=per_node_train_batch_size // args.gradient_accumulation_steps, collate_fn=utils.batch_list_to_batch_tensors) train_iterator = tqdm.tqdm( train_dataloader, initial=global_step * args.gradient_accumulation_steps, desc="Iter (loss=X.XXX, lr=X.XXXXXXX)", disable=args.local_rank not in [-1, 0]) model.train() model.zero_grad() tr_loss, logging_loss = 0.0, 0.0 for step, batch in enumerate(train_iterator): if global_step > args.num_training_steps: break batch = tuple(t.to(args.device) for t in batch) if args.mask_way == 'v2': inputs = {'source_ids': batch[0], 'target_ids': batch[1], 'label_ids': batch[2], 'pseudo_ids': batch[3], 'num_source_tokens': batch[4], 'num_target_tokens': batch[5]} elif args.mask_way == 'v1' or args.mask_way == 'v0': inputs = {'source_ids': batch[0], 'target_ids': batch[1], 'masked_ids': batch[2], 'masked_pos': batch[3], 'masked_weight': batch[4], 'num_source_tokens': batch[5], 'num_target_tokens': batch[6]} loss = model(**inputs) if args.n_gpu > 1: loss = loss.mean() # mean() to average on multi-gpu parallel (not distributed) training train_iterator.set_description('Iter (loss=%5.3f) lr=%9.7f' % (loss.item(), scheduler.get_lr()[0])) if args.gradient_accumulation_steps > 1: loss = loss / args.gradient_accumulation_steps if args.fp16: with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() else: loss.backward() logging_loss += loss.item() if (step + 1) % args.gradient_accumulation_steps == 0: if args.fp16: torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm) else: torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm) optimizer.step() scheduler.step() # Update learning rate schedule model.zero_grad() global_step += 1 if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0: logger.info("") logger.info(" Step [%d ~ %d]: %.2f", global_step - args.logging_steps, global_step, logging_loss) logging_loss = 0.0 if args.local_rank in [-1, 0] and args.save_steps > 0 and \ (global_step % args.save_steps == 0 or global_step == args.num_training_steps): save_path = os.path.join(args.output_dir, "ckpt-%d" % global_step) os.makedirs(save_path, exist_ok=True) model_to_save = model.module if hasattr(model, "module") else model model_to_save.save_pretrained(save_path) optim_to_save = { "optimizer": optimizer.state_dict(), "lr_scheduler": scheduler.state_dict(), } if args.fp16: optim_to_save["amp"] = amp.state_dict() torch.save(optim_to_save, os.path.join(save_path, utils.OPTIM_NAME)) logger.info("Saving model checkpoint %d into %s", global_step, save_path) if args.local_rank in [-1, 0] and tb_writer: tb_writer.close()
Train the model
184,669
from __future__ import absolute_import, division, print_function import argparse import logging import os import json import random import numpy as np import torch from torch.utils.data import (DataLoader, SequentialSampler) from torch.utils.data.distributed import DistributedSampler import tqdm from s2s_ft.modeling import BertForSequenceToSequenceWithPseudoMask, BertForSequenceToSequenceUniLMV1 from transformers import AdamW, get_linear_schedule_with_warmup from transformers import \ RobertaConfig, BertConfig, \ BertTokenizer, RobertaTokenizer, \ XLMRobertaConfig, XLMRobertaTokenizer, \ ElectraConfig, ElectraTokenizer from s2s_ft.configuration_unilm import UnilmConfig from s2s_ft.tokenization_unilm import UnilmTokenizer from s2s_ft.configuration_minilm import MinilmConfig from s2s_ft.tokenization_minilm import MinilmTokenizer from s2s_ft import utils from s2s_ft.config import BertForSeq2SeqConfig MODEL_CLASSES = { 'bert': (BertConfig, BertTokenizer), 'minilm': (MinilmConfig, MinilmTokenizer), 'roberta': (RobertaConfig, RobertaTokenizer), 'xlm-roberta': (XLMRobertaConfig, XLMRobertaTokenizer), 'unilm': (UnilmConfig, UnilmTokenizer), 'electra': (ElectraConfig, ElectraTokenizer), } def get_args(): parser = argparse.ArgumentParser() # parser.add_argument("--train_source_file", default=None, type=str, required=True, # help="Training data contains source") # parser.add_argument("--train_target_file", default=None, type=str, required=True, # help="Training data contains target") parser.add_argument("--train_file", default=None, type=str, required=True, help="Training data (json format) for training. Keys: source and target") parser.add_argument("--model_type", default=None, type=str, required=True, help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys())) parser.add_argument("--model_name_or_path", default=None, type=str, required=True, help="Path to pre-trained model or shortcut name selected in the list:") parser.add_argument("--output_dir", default=None, type=str, required=True, help="The output directory where the model checkpoints and predictions will be written.") parser.add_argument("--log_dir", default=None, type=str, help="The output directory where the log will be written.") ## Other parameters parser.add_argument("--config_name", default=None, type=str, help="Pretrained config name or path if not the same as model_name") parser.add_argument("--tokenizer_name", default=None, type=str, help="Pretrained tokenizer name or path if not the same as model_name") parser.add_argument("--cache_dir", default=None, type=str, help="Where do you want to store the pre-trained models downloaded from s3") parser.add_argument("--max_source_seq_length", default=464, type=int, help="The maximum total source sequence length after WordPiece tokenization. Sequences " "longer than this will be truncated, and sequences shorter than this will be padded.") parser.add_argument("--max_target_seq_length", default=48, type=int, help="The maximum total target sequence length after WordPiece tokenization. Sequences " "longer than this will be truncated, and sequences shorter than this will be padded.") parser.add_argument("--cached_train_features_file", default=None, type=str, help="Cached training features file") parser.add_argument("--do_lower_case", action='store_true', help="Set this flag if you are using an uncased model.") parser.add_argument("--per_gpu_train_batch_size", default=8, type=int, help="Batch size per GPU/CPU for training.") parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.") parser.add_argument('--gradient_accumulation_steps', type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.") parser.add_argument("--weight_decay", default=0.01, type=float, help="Weight decay if we apply some.") parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.") parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") parser.add_argument("--label_smoothing", default=0.1, type=float, help="Max gradient norm.") parser.add_argument("--num_training_steps", default=-1, type=int, help="set total number of training steps to perform") parser.add_argument("--num_training_epochs", default=10, type=int, help="set total number of training epochs to perform (--num_training_steps has higher priority)") parser.add_argument("--num_warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.") parser.add_argument("--random_prob", default=0.1, type=float, help="prob to random replace a masked token") parser.add_argument("--keep_prob", default=0.1, type=float, help="prob to keep no change for a masked token") parser.add_argument("--fix_word_embedding", action='store_true', help="Set word embedding no grad when finetuning.") parser.add_argument('--logging_steps', type=int, default=500, help="Log every X updates steps.") parser.add_argument('--save_steps', type=int, default=1500, help="Save checkpoint every X updates steps.") parser.add_argument("--no_cuda", action='store_true', help="Whether not to use CUDA when available") parser.add_argument('--seed', type=int, default=42, help="random seed for initialization") parser.add_argument("--local_rank", type=int, default=-1, help="local_rank for distributed training on gpus") parser.add_argument('--fp16', action='store_true', help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit") parser.add_argument('--fp16_opt_level', type=str, default='O1', help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']." "See details at https://nvidia.github.io/apex/amp.html") parser.add_argument('--server_ip', type=str, default='', help="Can be used for distant debugging.") parser.add_argument('--server_port', type=str, default='', help="Can be used for distant debugging.") parser.add_argument('--source_mask_prob', type=float, default=-1.0, help="Probability to mask source sequence in fine-tuning") parser.add_argument('--target_mask_prob', type=float, default=0.5, help="Probability to mask target sequence in fine-tuning") parser.add_argument('--num_max_mask_token', type=int, default=0, help="The number of the max masked tokens in target sequence") parser.add_argument('--mask_way', type=str, default='v2', help="Fine-tuning method (v0: position shift, v1: masked LM, v2: pseudo-masking)") parser.add_argument("--lmdb_cache", action='store_true', help="Use LMDB to cache training features") parser.add_argument("--lmdb_dtype", type=str, default='h', help="Data type for cached data type for LMDB") parser.add_argument args = parser.parse_args() return args
null
184,670
from __future__ import absolute_import, division, print_function import argparse import logging import os import json import random import numpy as np import torch from torch.utils.data import (DataLoader, SequentialSampler) from torch.utils.data.distributed import DistributedSampler try: from torch.utils.tensorboard import SummaryWriter except: from tensorboardX import SummaryWriter import tqdm from s2s_ft.modeling import BertForSequenceToSequenceWithPseudoMask, BertForSequenceToSequenceUniLMV1 from transformers import AdamW, get_linear_schedule_with_warmup from transformers import \ RobertaConfig, BertConfig, \ BertTokenizer, RobertaTokenizer, \ XLMRobertaConfig, XLMRobertaTokenizer, \ ElectraConfig, ElectraTokenizer from s2s_ft.configuration_unilm import UnilmConfig from s2s_ft.tokenization_unilm import UnilmTokenizer from s2s_ft.configuration_minilm import MinilmConfig from s2s_ft.tokenization_minilm import MinilmTokenizer from s2s_ft import utils from s2s_ft.config import BertForSeq2SeqConfig logger = logging.getLogger(__name__) def prepare(args): # Setup distant debugging if needed if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print("Waiting for debugger attach") ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True) ptvsd.wait_for_attach() os.makedirs(args.output_dir, exist_ok=True) json.dump(args.__dict__, open(os.path.join( args.output_dir, 'train_opt.json'), 'w'), sort_keys=True, indent=2) # Setup CUDA, GPU & distributed training if args.local_rank == -1 or args.no_cuda: device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") args.n_gpu = torch.cuda.device_count() else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs torch.cuda.set_device(args.local_rank) device = torch.device("cuda", args.local_rank) torch.distributed.init_process_group(backend='nccl') args.n_gpu = 1 args.device = device # Setup logging logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN) logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s", args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16) # Set seed random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) if args.n_gpu > 0: torch.cuda.manual_seed_all(args.seed) logger.info("Training/evaluation parameters %s", args) # Before we do anything with models, we want to ensure that we get fp16 execution of torch.einsum if args.fp16 is set. # Otherwise it'll default to "promote" mode, and we'll get fp32 operations. Note that running `--fp16_opt_level="O2"` will # remove the need for this code, but it is still valid. if args.fp16: try: import apex apex.amp.register_half_function(torch, 'einsum') except ImportError: raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
null
184,671
from __future__ import absolute_import, division, print_function import argparse import logging import os import json import random import numpy as np import torch from torch.utils.data import (DataLoader, SequentialSampler) from torch.utils.data.distributed import DistributedSampler import tqdm from s2s_ft.modeling import BertForSequenceToSequenceWithPseudoMask, BertForSequenceToSequenceUniLMV1 from transformers import AdamW, get_linear_schedule_with_warmup from transformers import \ RobertaConfig, BertConfig, \ BertTokenizer, RobertaTokenizer, \ XLMRobertaConfig, XLMRobertaTokenizer, \ ElectraConfig, ElectraTokenizer from s2s_ft.configuration_unilm import UnilmConfig from s2s_ft.tokenization_unilm import UnilmTokenizer from s2s_ft.configuration_minilm import MinilmConfig from s2s_ft.tokenization_minilm import MinilmTokenizer from s2s_ft import utils from s2s_ft.config import BertForSeq2SeqConfig logger = logging.getLogger(__name__) MODEL_CLASSES = { 'bert': (BertConfig, BertTokenizer), 'minilm': (MinilmConfig, MinilmTokenizer), 'roberta': (RobertaConfig, RobertaTokenizer), 'xlm-roberta': (XLMRobertaConfig, XLMRobertaTokenizer), 'unilm': (UnilmConfig, UnilmTokenizer), 'electra': (ElectraConfig, ElectraTokenizer), } class BertForSequenceToSequenceWithPseudoMask(BertForSequenceToSequence): MODEL_NAME = "BertForSequenceToSequenceWithPseudoMask" def create_attention_mask(source_mask, target_mask, source_position_ids, target_span_ids): weight = torch.cat((torch.zeros_like(source_position_ids), target_span_ids, -target_span_ids), dim=1) from_weight = weight.unsqueeze(-1) to_weight = weight.unsqueeze(1) true_tokens = (0 <= to_weight) & (torch.cat((source_mask, target_mask, target_mask), dim=1) == 1).unsqueeze(1) true_tokens_mask = (from_weight >= 0) & true_tokens & (to_weight <= from_weight) pseudo_tokens_mask = (from_weight < 0) & true_tokens & (-to_weight > from_weight) pseudo_tokens_mask = pseudo_tokens_mask | ((from_weight < 0) & (to_weight == from_weight)) return (true_tokens_mask | pseudo_tokens_mask).type_as(source_mask) def forward( self, source_ids, target_ids, label_ids, pseudo_ids, num_source_tokens, num_target_tokens, target_span_ids=None, target_no_offset=None): source_len = source_ids.size(1) target_len = target_ids.size(1) pseudo_len = pseudo_ids.size(1) assert target_len == pseudo_len assert source_len > 0 and target_len > 0 split_lengths = (source_len, target_len, pseudo_len) input_ids = torch.cat((source_ids, target_ids, pseudo_ids), dim=1) token_type_ids = torch.cat( (torch.ones_like(source_ids) * self.source_type_id, torch.ones_like(target_ids) * self.target_type_id, torch.ones_like(pseudo_ids) * self.target_type_id), dim=1) source_mask, source_position_ids = \ create_mask_and_position_ids(num_source_tokens, source_len) target_mask, target_position_ids = \ create_mask_and_position_ids( num_target_tokens, target_len, offset=None if target_no_offset else num_source_tokens) position_ids = torch.cat((source_position_ids, target_position_ids, target_position_ids), dim=1) if target_span_ids is None: target_span_ids = target_position_ids attention_mask = self.create_attention_mask(source_mask, target_mask, source_position_ids, target_span_ids) outputs = self.bert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, split_lengths=split_lengths) sequence_output = outputs[0] pseudo_sequence_output = sequence_output[:, source_len + target_len:, ] def loss_mask_and_normalize(loss, mask): mask = mask.type_as(loss) loss = loss * mask denominator = torch.sum(mask) + 1e-5 return (loss / denominator).sum() prediction_scores_masked = self.cls(pseudo_sequence_output) if self.crit_mask_lm_smoothed: masked_lm_loss = self.crit_mask_lm_smoothed( F.log_softmax(prediction_scores_masked.float(), dim=-1), label_ids) else: masked_lm_loss = self.crit_mask_lm( prediction_scores_masked.transpose(1, 2).float(), label_ids) pseudo_lm_loss = loss_mask_and_normalize( masked_lm_loss.float(), target_mask) return pseudo_lm_loss class BertForSequenceToSequenceUniLMV1(BertForSequenceToSequence): MODEL_NAME = "BertForSequenceToSequenceUniLMV1" def create_attention_mask(source_mask, target_mask, source_position_ids, target_span_ids): weight = torch.cat((torch.zeros_like(source_position_ids), target_span_ids), dim=1) from_weight = weight.unsqueeze(-1) to_weight = weight.unsqueeze(1) true_tokens = torch.cat((source_mask, target_mask), dim=1).unsqueeze(1) return ((true_tokens == 1) & (to_weight <= from_weight)).type_as(source_mask) def forward(self, source_ids, target_ids, masked_ids, masked_pos, masked_weight, num_source_tokens, num_target_tokens): source_len = source_ids.size(1) target_len = target_ids.size(1) split_lengths = (source_len, target_len) input_ids = torch.cat((source_ids, target_ids), dim=1) token_type_ids = torch.cat( (torch.ones_like(source_ids) * self.source_type_id, torch.ones_like(target_ids) * self.target_type_id), dim=1) source_mask, source_position_ids = \ create_mask_and_position_ids(num_source_tokens, source_len) target_mask, target_position_ids = \ create_mask_and_position_ids( num_target_tokens, target_len, offset=num_source_tokens) position_ids = torch.cat((source_position_ids, target_position_ids), dim=1) attention_mask = self.create_attention_mask( source_mask, target_mask, source_position_ids, target_position_ids) outputs = self.bert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, split_lengths=split_lengths) def gather_seq_out_by_pos(seq, pos): return torch.gather(seq, 1, pos.unsqueeze(2).expand(-1, -1, seq.size(-1))) sequence_output = outputs[0] target_sequence_output = sequence_output[:, source_len:, ] masked_sequence_output = gather_seq_out_by_pos(target_sequence_output, masked_pos) def loss_mask_and_normalize(loss, mask): mask = mask.type_as(loss) loss = loss * mask denominator = torch.sum(mask) + 1e-5 return (loss / denominator).sum() prediction_scores_masked = self.cls(masked_sequence_output) if self.crit_mask_lm_smoothed: masked_lm_loss = self.crit_mask_lm_smoothed( F.log_softmax(prediction_scores_masked.float(), dim=-1), masked_ids) else: masked_lm_loss = self.crit_mask_lm( prediction_scores_masked.transpose(1, 2).float(), masked_ids) pseudo_lm_loss = loss_mask_and_normalize( masked_lm_loss.float(), masked_weight) return pseudo_lm_loss class BertForSeq2SeqConfig(BertConfig): def __init__(self, label_smoothing=0.1, source_type_id=0, target_type_id=1, rel_pos_bins=0, max_rel_pos=0, fix_word_embedding=False, **kwargs): super(BertForSeq2SeqConfig, self).__init__(**kwargs) self.label_smoothing = label_smoothing self.source_type_id = source_type_id self.target_type_id = target_type_id self.max_rel_pos = max_rel_pos self.rel_pos_bins = rel_pos_bins self.fix_word_embedding = fix_word_embedding def from_exist_config(cls, config, label_smoothing=0.1, max_position_embeddings=None, fix_word_embedding=False): required_keys = [ "vocab_size", "hidden_size", "num_hidden_layers", "num_attention_heads", "hidden_act", "intermediate_size", "hidden_dropout_prob", "attention_probs_dropout_prob", "max_position_embeddings", "type_vocab_size", "initializer_range", "layer_norm_eps", ] kwargs = {} for key in required_keys: assert hasattr(config, key) kwargs[key] = getattr(config, key) kwargs["vocab_size_or_config_json_file"] = kwargs["vocab_size"] if isinstance(config, RobertaConfig): kwargs["type_vocab_size"] = 0 kwargs["max_position_embeddings"] = kwargs["max_position_embeddings"] - 2 additional_keys = [ "source_type_id", "target_type_id", "rel_pos_bins", "max_rel_pos", ] for key in additional_keys: if hasattr(config, key): kwargs[key] = getattr(config, key) if max_position_embeddings is not None and max_position_embeddings > config.max_position_embeddings: kwargs["max_position_embeddings"] = max_position_embeddings logger.info(" ** Change max position embeddings to %d ** " % max_position_embeddings) return cls(label_smoothing=label_smoothing, fix_word_embedding=fix_word_embedding, **kwargs) def get_model_and_tokenizer(args): config_class, tokenizer_class = MODEL_CLASSES[args.model_type] model_config = config_class.from_pretrained( args.config_name if args.config_name else args.model_name_or_path, cache_dir=args.cache_dir if args.cache_dir else None) config = BertForSeq2SeqConfig.from_exist_config( config=model_config, label_smoothing=args.label_smoothing, fix_word_embedding=args.fix_word_embedding, max_position_embeddings=args.max_source_seq_length + args.max_target_seq_length) logger.info("Model config for seq2seq: %s", str(config)) tokenizer = tokenizer_class.from_pretrained( args.tokenizer_name if args.tokenizer_name else args.model_name_or_path, do_lower_case=args.do_lower_case, cache_dir=args.cache_dir if args.cache_dir else None) model_class = \ BertForSequenceToSequenceWithPseudoMask if args.mask_way == 'v2' \ else BertForSequenceToSequenceUniLMV1 logger.info("Construct model %s" % model_class.MODEL_NAME) model = model_class.from_pretrained( args.model_name_or_path, config=config, model_type=args.model_type, reuse_position_embedding=True, cache_dir=args.cache_dir if args.cache_dir else None) return model, tokenizer
null
184,672
import torch import logging from transformers.modeling_utils import cached_path, WEIGHTS_NAME, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME logger = logging.getLogger(__name__) def get_checkpoint_from_transformer_cache( archive_file, pretrained_model_name_or_path, pretrained_model_archive_map, cache_dir, force_download, proxies, resume_download, ): try: resolved_archive_file = cached_path(archive_file, cache_dir=cache_dir, force_download=force_download, proxies=proxies, resume_download=resume_download) except EnvironmentError: if pretrained_model_name_or_path in pretrained_model_archive_map: msg = "Couldn't reach server at '{}' to download pretrained weights.".format( archive_file) else: msg = "Model name '{}' was not found in model name list ({}). " \ "We assumed '{}' was a path or url to model weight files named one of {} but " \ "couldn't find any such file at this path or url.".format( pretrained_model_name_or_path, ', '.join(pretrained_model_archive_map.keys()), archive_file, [WEIGHTS_NAME, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME]) raise EnvironmentError(msg) if resolved_archive_file == archive_file: logger.info("loading weights file {}".format(archive_file)) else: logger.info("loading weights file {} from cache at {}".format( archive_file, resolved_archive_file)) return torch.load(resolved_archive_file, map_location='cpu')
null
184,673
import torch import logging from transformers.modeling_utils import cached_path, WEIGHTS_NAME, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME logger = logging.getLogger(__name__) def hf_roberta_to_hf_bert(state_dict): logger.info(" * Convert Huggingface RoBERTa format to Huggingface BERT format * ") new_state_dict = {} for key in state_dict: value = state_dict[key] if key == 'roberta.embeddings.position_embeddings.weight': value = value[2:] if key == 'roberta.embeddings.token_type_embeddings.weight': continue if key.startswith('roberta'): key = 'bert.' + key[8:] elif key.startswith('lm_head'): if 'layer_norm' in key or 'dense' in key: key = 'cls.predictions.transform.' + key[8:] else: key = 'cls.predictions.' + key[8:] key = key.replace('layer_norm', 'LayerNorm') new_state_dict[key] = value return new_state_dict
null
184,674
import torch import logging from transformers.modeling_utils import cached_path, WEIGHTS_NAME, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME logger = logging.getLogger(__name__) def hf_electra_to_hf_bert(state_dict): logger.info(" * Convert Huggingface ELECTRA format to Huggingface BERT format * ") new_state_dict = {} for key in state_dict: value = state_dict[key] if key.startswith('electra'): key = 'bert.' + key[8:] new_state_dict[key] = value return new_state_dict
null
184,675
import torch import logging from transformers.modeling_utils import cached_path, WEIGHTS_NAME, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME def hf_bert_to_hf_bert(state_dict): # keep no change return state_dict
null
184,676
import torch import logging from transformers.modeling_utils import cached_path, WEIGHTS_NAME, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME logger = logging.getLogger(__name__) def unilm_to_hf_bert(state_dict): logger.info(" * Convert Fast QKV format to Huggingface BERT format * ") new_state_dict = {} for key in state_dict: value = state_dict[key] if key.endswith("attention.self.q_bias"): new_state_dict[key.replace("attention.self.q_bias", "attention.self.query.bias")] = value.view(-1) elif key.endswith("attention.self.v_bias"): new_state_dict[key.replace("attention.self.v_bias", "attention.self.value.bias")] = value.view(-1) new_state_dict[key.replace("attention.self.v_bias", "attention.self.key.bias")] = torch.zeros_like(value.view(-1)) elif key.endswith("attention.self.qkv_linear.weight"): l, _ = value.size() assert l % 3 == 0 l = l // 3 q, k, v = torch.split(value, split_size_or_sections=(l, l, l), dim=0) new_state_dict[key.replace("attention.self.qkv_linear.weight", "attention.self.query.weight")] = q new_state_dict[key.replace("attention.self.qkv_linear.weight", "attention.self.key.weight")] = k new_state_dict[key.replace("attention.self.qkv_linear.weight", "attention.self.value.weight")] = v elif key == "bert.encoder.rel_pos_bias.weight": new_state_dict["bert.rel_pos_bias.weight"] = value else: new_state_dict[key] = value del state_dict return new_state_dict
null
184,677
from __future__ import absolute_import, division, print_function import logging import os import json import random import glob import torch import tqdm import array import collections import torch.utils.data from transformers.file_utils import WEIGHTS_NAME def deserialize_str(x): return x.decode('ascii')
null
184,678
from __future__ import absolute_import, division, print_function import logging import os import json import random import glob import torch import tqdm import array import collections import torch.utils.data from transformers.file_utils import WEIGHTS_NAME try: import lmdb except: pass logger = logging.getLogger(__name__) class TrainingExample(object): def __init__(self, source_ids, target_ids, example_id): self.source_ids = source_ids self.target_ids = target_ids self.example_id = example_id def report_length(length_counter, total_count): max_len = max(length_counter.keys()) a = 0 tc = 0 while a < max_len: cc = 0 for i in range(16): cc += length_counter[a + i] tc += cc if cc > 0: logger.info("%d ~ %d = %d, %.2f%%" % (a, a + 16, cc, (tc * 100.0) / total_count)) a += 16 def serialize_str(x): return u"{}".format(x).encode('ascii') def serialize_array(x, dtype): data = array.array(dtype) data.fromlist(x) return data.tobytes() def write_to_lmdb(db, key, value): success = False while not success: txn = db.begin(write=True) try: txn.put(key, value) txn.commit() success = True except lmdb.MapFullError: txn.abort() # double the map_size curr_limit = db.info()['map_size'] new_limit = curr_limit*2 print('>>> Doubling LMDB map size to %sMB ...' % (new_limit >> 20,)) db.set_mapsize(new_limit) # double it class DocDB(object): def __init__(self, db_path): self.db_path = db_path self.env = lmdb.open(db_path, readonly=True, lock=False, readahead=False, meminit=False) with self.env.begin(write=False) as txn: self.start_key_index = int(deserialize_str(txn.get(b'__start__'))) self.size = int(deserialize_str(txn.get(b'__size__'))) self.dtype = deserialize_str(txn.get(b'__dtype__')) def _deserialize_array(self, x): data = array.array(self.dtype) data.frombytes(x) return data.tolist() def __getitem__(self, doc_id): with self.env.begin(write=False) as txn: # example = { # "source_ids": self._deserialize_array(txn.get(b"src_ids_%d" % doc_id)), # "target_ids": self._deserialize_array(txn.get(b"tgt_ids_%d" % doc_id)), # } example = TrainingExample( source_ids=self._deserialize_array(txn.get(b"src_ids_%d" % doc_id)), target_ids=self._deserialize_array(txn.get(b"tgt_ids_%d" % doc_id)), example_id=None, ) return example def __len__(self): return self.size def load_and_cache_examples( example_file, tokenizer, local_rank, cached_features_file, shuffle=True, lmdb_cache=None, lmdb_dtype='h', eval_mode=False): # Make sure only the first process in distributed training process the dataset, and the others will use the cache if local_rank not in [-1, 0]: torch.distributed.barrier() if cached_features_file is not None and os.path.isfile(cached_features_file): logger.info("Loading features from cached file %s", cached_features_file) features = torch.load(cached_features_file) elif cached_features_file is not None and os.path.isdir(cached_features_file) \ and os.path.exists(os.path.join(cached_features_file, 'lock.mdb')): logger.info("Loading features from cached LMDB %s", cached_features_file) features = DocDB(cached_features_file) else: logger.info("Creating features from dataset file at %s", example_file) examples = [] with open(example_file, mode="r", encoding="utf-8") as reader: for line in reader: examples.append(json.loads(line)) features = [] slc = collections.defaultdict(int) tlc = collections.defaultdict(int) for example in tqdm.tqdm(examples): if isinstance(example["src"], list): source_tokens = example["src"] target_tokens = [] if eval_mode else example["tgt"] else: source_tokens = tokenizer.tokenize(example["src"]) target_tokens = [] if eval_mode else tokenizer.tokenize(example["tgt"]) source_ids = tokenizer.convert_tokens_to_ids(source_tokens) target_ids = tokenizer.convert_tokens_to_ids(target_tokens) slc[len(source_ids)] += 1 tlc[len(target_ids)] += 1 # features.append({ # "source_ids": source_ids, # "target_ids": target_ids, # }) features.append( TrainingExample( source_ids=source_ids, target_ids=target_ids, example_id=len(features), ) ) if shuffle: random.shuffle(features) logger.info("Shuffle the features !") logger.info("Source length:") report_length(slc, total_count=len(examples)) logger.info("Target length:") report_length(tlc, total_count=len(examples)) if local_rank in [-1, 0] and cached_features_file is not None: if lmdb_cache: db = lmdb.open(cached_features_file, readonly=False, map_async=True) for idx, feature in enumerate(features): write_to_lmdb( db, b"src_ids_%d" % idx, serialize_array(feature.source_ids, dtype=lmdb_dtype)) write_to_lmdb( db, b"tgt_ids_%d" % idx, serialize_array(feature.target_ids, dtype=lmdb_dtype)) write_to_lmdb(db, b"__start__", serialize_str(0)) write_to_lmdb(db, b"__size__", serialize_str(len(features))) write_to_lmdb(db, b"__dtype__", serialize_str(lmdb_dtype)) db.sync() db.close() logger.info("db_key_idx = %d" % len(features)) del features features = cached_features_file logger.info("Saving features into cached lmdb dir %s", cached_features_file) else: logger.info("Saving features into cached file %s", cached_features_file) torch.save(features, cached_features_file) # Make sure only the first process in distributed training process the dataset, and the others will use the cache if local_rank == 0: torch.distributed.barrier() return features
null
184,679
import numpy as np from random import randint, shuffle, choice from random import random as rand import math import logging import torch import torch.utils.data def get_random_word(vocab_words): i = randint(0, len(vocab_words)-1) return vocab_words[i]
null
184,680
import numpy as np from random import randint, shuffle, choice from random import random as rand import math import logging import torch import torch.utils.data def batch_list_to_batch_tensors(batch): batch_tensors = [] for x in zip(*batch): if x[0] is None: batch_tensors.append(None) elif isinstance(x[0], torch.Tensor): batch_tensors.append(torch.stack(x)) else: batch_tensors.append(torch.tensor(x, dtype=torch.long)) return batch_tensors
null
184,681
import numpy as np from random import randint, shuffle, choice from random import random as rand import math import logging import torch import torch.utils.data def _get_word_split_index(tokens, st, end): split_idx = [] i = st while i < end: if (not tokens[i].startswith('##')) or (i == st): split_idx.append(i) i += 1 split_idx.append(end) return split_idx
null
184,682
import numpy as np from random import randint, shuffle, choice from random import random as rand import math import logging import torch import torch.utils.data def _expand_whole_word(tokens, st, end): new_st, new_end = st, end while (new_st >= 0) and tokens[new_st].startswith('##'): new_st -= 1 while (new_end < len(tokens)) and tokens[new_end].startswith('##'): new_end += 1 return new_st, new_end
null
184,683
from __future__ import absolute_import, division, print_function, unicode_literals import logging import math import os import torch from torch import nn from torch.nn.modules.loss import _Loss import torch.nn.functional as F from transformers.modeling_bert import \ BertPreTrainedModel, BertSelfOutput, BertIntermediate, \ BertOutput, BertPredictionHeadTransform, BertPooler from transformers.modeling_roberta import ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP from transformers.modeling_bert import BERT_PRETRAINED_MODEL_ARCHIVE_MAP from transformers.modeling_distilbert import DISTILBERT_PRETRAINED_MODEL_ARCHIVE_MAP from transformers.modeling_xlm_roberta import XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP from transformers.modeling_electra import ELECTRA_PRETRAINED_MODEL_ARCHIVE_MAP from transformers.file_utils import WEIGHTS_NAME from s2s_ft.config import BertForSeq2SeqConfig from s2s_ft.convert_state_dict import get_checkpoint_from_transformer_cache, state_dict_convert The provided code snippet includes necessary dependencies for implementing the `relative_position_bucket` function. Write a Python function `def relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128)` to solve the following problem: Adapted from Mesh Tensorflow: https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593 Here is the function: def relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128): """ Adapted from Mesh Tensorflow: https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593 """ ret = 0 if bidirectional: num_buckets //= 2 # mtf.to_int32(mtf.less(n, 0)) * num_buckets ret += (relative_position > 0).long() * num_buckets n = torch.abs(relative_position) else: n = torch.max(-relative_position, torch.zeros_like(relative_position)) # now n is in the range [0, inf) # half of the buckets are for exact increments in positions max_exact = num_buckets // 2 is_small = n < max_exact # The other half of the buckets are for logarithmically bigger bins in positions up to max_distance val_if_large = max_exact + ( torch.log(n.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact) ).to(torch.long) val_if_large = torch.min( val_if_large, torch.full_like(val_if_large, num_buckets - 1)) ret += torch.where(is_small, n, val_if_large) return ret
Adapted from Mesh Tensorflow: https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593
184,684
from __future__ import absolute_import, division, print_function, unicode_literals import logging import math import os import torch from torch import nn from torch.nn.modules.loss import _Loss import torch.nn.functional as F from transformers.modeling_bert import \ BertPreTrainedModel, BertSelfOutput, BertIntermediate, \ BertOutput, BertPredictionHeadTransform, BertPooler from transformers.modeling_roberta import ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP from transformers.modeling_bert import BERT_PRETRAINED_MODEL_ARCHIVE_MAP from transformers.modeling_distilbert import DISTILBERT_PRETRAINED_MODEL_ARCHIVE_MAP from transformers.modeling_xlm_roberta import XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP from transformers.modeling_electra import ELECTRA_PRETRAINED_MODEL_ARCHIVE_MAP from transformers.file_utils import WEIGHTS_NAME from s2s_ft.config import BertForSeq2SeqConfig from s2s_ft.convert_state_dict import get_checkpoint_from_transformer_cache, state_dict_convert def create_mask_and_position_ids(num_tokens, max_len, offset=None): base_position_matrix = torch.arange( 0, max_len, dtype=num_tokens.dtype, device=num_tokens.device).view(1, -1) mask = (base_position_matrix < num_tokens.view(-1, 1)).type_as(num_tokens) if offset is not None: base_position_matrix = base_position_matrix + offset.view(-1, 1) position_ids = base_position_matrix * mask return mask, position_ids
null
184,685
from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import copy import json import math import logging import tarfile import tempfile import shutil import numpy as np from functools import partial import torch from torch import nn from torch.nn import CrossEntropyLoss, MSELoss import torch.nn.functional as F from transformers.file_utils import cached_path from torch.nn.modules.loss import _Loss The provided code snippet includes necessary dependencies for implementing the `gelu` function. Write a Python function `def gelu(x)` to solve the following problem: Implementation of the gelu activation function. For information: OpenAI GPT's gelu is slightly different (and gives slightly different results): 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) Here is the function: def gelu(x): """Implementation of the gelu activation function. For information: OpenAI GPT's gelu is slightly different (and gives slightly different results): 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) """ return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
Implementation of the gelu activation function. For information: OpenAI GPT's gelu is slightly different (and gives slightly different results): 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
184,686
from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import copy import json import math import logging import tarfile import tempfile import shutil import numpy as np from functools import partial import torch from torch import nn from torch.nn import CrossEntropyLoss, MSELoss import torch.nn.functional as F from transformers.file_utils import cached_path from torch.nn.modules.loss import _Loss def swish(x): return x * torch.sigmoid(x)
null
184,687
from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import copy import json import math import logging import tarfile import tempfile import shutil import numpy as np from functools import partial import torch from torch import nn from torch.nn import CrossEntropyLoss, MSELoss import torch.nn.functional as F from transformers.file_utils import cached_path from torch.nn.modules.loss import _Loss The provided code snippet includes necessary dependencies for implementing the `relative_position_bucket` function. Write a Python function `def relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128)` to solve the following problem: Adapted from Mesh Tensorflow: https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593 Here is the function: def relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128): """ Adapted from Mesh Tensorflow: https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593 """ ret = 0 if bidirectional: num_buckets //= 2 # mtf.to_int32(mtf.less(n, 0)) * num_buckets ret += (relative_position > 0).long() * num_buckets n = torch.abs(relative_position) else: n = torch.max(-relative_position, torch.zeros_like(relative_position)) # now n is in the range [0, inf) # half of the buckets are for exact increments in positions max_exact = num_buckets // 2 is_small = n < max_exact # The other half of the buckets are for logarithmically bigger bins in positions up to max_distance val_if_large = max_exact + ( torch.log(n.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact) ).to(torch.long) val_if_large = torch.min( val_if_large, torch.full_like(val_if_large, num_buckets - 1)) ret += torch.where(is_small, n, val_if_large) return ret
Adapted from Mesh Tensorflow: https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593
184,688
from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import copy import json import math import logging import tarfile import tempfile import shutil import numpy as np from functools import partial import torch from torch import nn from torch.nn import CrossEntropyLoss, MSELoss import torch.nn.functional as F from transformers.file_utils import cached_path from torch.nn.modules.loss import _Loss def get_div_func(): # a crude code fix floor div for multiple torch version # https://github.com/microsoft/unilm/issues/297 # Thanks github user @guijuzhejiang, @piskunow and @zengyan-97 x = torch.ones(size=(1,), dtype=torch.long) * 11 try: # for pytorch 1.8+ div_func = partial(torch.div, rounding_mode='floor') y = div_func(x, 4) return div_func except: pass try: # for pytorch 1.6 & 1.7 div_func = torch.floor_divide y = div_func(x, 4) return div_func except: pass div_func = torch.div y = div_func(x, 4) if y.dtype != torch.long: raise NotImplementedError("Can not found right floor div function !") return div_func
null
184,689
from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import logging import glob import json import argparse import math import string from multiprocessing import Pool, cpu_count from tqdm import tqdm, trange from pathlib import Path import numpy as np import rouge import time import tempfile import shutil from evaluations.bs_pyrouge import Rouge155 def rouge_results_to_str(results_dict): return ">> ROUGE-F(1/2/l): {:.2f}/{:.2f}/{:.2f}\nROUGE-R(1/2/3/l): {:.2f}/{:.2f}/{:.2f}\n".format( results_dict["rouge_1_f_score"] * 100, results_dict["rouge_2_f_score"] * 100, results_dict["rouge_l_f_score"] * 100, results_dict["rouge_1_recall"] * 100, results_dict["rouge_2_recall"] * 100, results_dict["rouge_l_recall"] * 100 )
null
184,690
from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import logging import glob import json import argparse import math import string from multiprocessing import Pool, cpu_count from tqdm import tqdm, trange from pathlib import Path import numpy as np import rouge import time import tempfile import shutil from evaluations.bs_pyrouge import Rouge155 def count_tokens(tokens): counter = {} for t in tokens: if t in counter.keys(): counter[t] += 1 else: counter[t] = 1 return counter def get_f1(text_a, text_b): tokens_a = text_a.lower().split() tokens_b = text_b.lower().split() if len(tokens_a) == 0 or len(tokens_b) == 0: return 1 if len(tokens_a) == len(tokens_b) else 0 set_a = count_tokens(tokens_a) set_b = count_tokens(tokens_b) match = 0 for token in set_a.keys(): if token in set_b.keys(): match += min(set_a[token], set_b[token]) p = match / len(tokens_a) r = match / len(tokens_b) return 2.0 * p * r / (p + r + 1e-5)
null
184,691
from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import logging import glob import json import argparse import math import string from multiprocessing import Pool, cpu_count from tqdm import tqdm, trange from pathlib import Path import numpy as np import rouge import time import tempfile import shutil from evaluations.bs_pyrouge import Rouge155 args = parser.parse_args() evaluator = rouge.Rouge(metrics=['rouge-n', 'rouge-l'], max_n=2, limit_length=False, apply_avg=True, weight_factor=1.2) def test_rouge(cand, ref): temp_dir = tempfile.mkdtemp() candidates = cand references = ref assert len(candidates) == len(references) cnt = len(candidates) current_time = time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime()) tmp_dir = os.path.join(temp_dir, "rouge-tmp-{}".format(current_time)) if not os.path.isdir(tmp_dir): os.mkdir(tmp_dir) os.mkdir(tmp_dir + "/candidate") os.mkdir(tmp_dir + "/reference") try: for i in range(cnt): if len(references[i]) < 1: continue with open(tmp_dir + "/candidate/cand.{}.txt".format(i), "w", encoding="utf-8") as f: f.write(candidates[i]) with open(tmp_dir + "/reference/ref.{}.txt".format(i), "w", encoding="utf-8") as f: f.write(references[i]) r = Rouge155(temp_dir=temp_dir) r.model_dir = tmp_dir + "/reference/" r.system_dir = tmp_dir + "/candidate/" r.model_filename_pattern = 'ref.#ID#.txt' r.system_filename_pattern = r'cand.(\d+).txt' rouge_results = r.convert_and_evaluate() print(rouge_results) results_dict = r.output_to_dict(rouge_results) finally: if os.path.isdir(tmp_dir): shutil.rmtree(tmp_dir) return results_dict def fix_tokenization(text): input_tokens = text.split() output_tokens = [] has_left_quote = False has_left_single_quote = False i = 0 prev_dash = False while i < len(input_tokens): tok = input_tokens[i] flag_prev_dash = False if tok in _tok_dict.keys(): output_tokens.append(_tok_dict[tok]) i += 1 elif tok == "\"": if has_left_quote: output_tokens.append("''") else: output_tokens.append("``") has_left_quote = not has_left_quote i += 1 elif tok == "'" and len(output_tokens) > 0 and output_tokens[-1].endswith("n") and i < len(input_tokens) - 1 and input_tokens[i + 1] == "t": output_tokens[-1] = output_tokens[-1][:-1] output_tokens.append("n't") i += 2 elif tok == "'" and i < len(input_tokens) - 1 and input_tokens[i + 1] in ("s", "d", "ll"): output_tokens.append("'"+input_tokens[i + 1]) i += 2 elif tok == "'": if has_left_single_quote: output_tokens.append("'") else: output_tokens.append("`") has_left_single_quote = not has_left_single_quote i += 1 elif tok == "." and i < len(input_tokens) - 2 and input_tokens[i + 1] == "." and input_tokens[i + 2] == ".": output_tokens.append("...") i += 3 elif tok == "," and len(output_tokens) > 0 and _is_digit(output_tokens[-1]) and i < len(input_tokens) - 1 and _is_digit(input_tokens[i + 1]): # $ 3 , 000 -> $ 3,000 output_tokens[-1] += ','+input_tokens[i + 1] i += 2 elif tok == "." and len(output_tokens) > 0 and output_tokens[-1].isdigit() and i < len(input_tokens) - 1 and input_tokens[i + 1].isdigit(): # 3 . 03 -> $ 3.03 output_tokens[-1] += '.'+input_tokens[i + 1] i += 2 elif tok == "." and len(output_tokens) > 0 and len(output_tokens[-1]) == 1 and output_tokens[-1].isupper() and i < len(input_tokens) - 2 and len(input_tokens[i + 1]) == 1 and input_tokens[i + 1].isupper() and input_tokens[i + 2] == '.': # U . N . -> U.N. k = i+3 while k+2 < len(input_tokens): if len(input_tokens[k + 1]) == 1 and input_tokens[k + 1].isupper() and input_tokens[k + 2] == '.': k += 2 else: break output_tokens[-1] += ''.join(input_tokens[i:k]) i += 2 elif tok == "-": if i < len(input_tokens) - 1 and input_tokens[i + 1] == "-": output_tokens.append("--") i += 2 elif i == len(input_tokens) - 1 or i == 0: output_tokens.append("-") i += 1 elif output_tokens[-1] not in string.punctuation and input_tokens[i + 1][0] not in string.punctuation: output_tokens[-1] += "-" i += 1 flag_prev_dash = True else: output_tokens.append("-") i += 1 elif prev_dash and len(output_tokens) > 0 and tok[0] not in string.punctuation: output_tokens[-1] += tok i += 1 else: output_tokens.append(tok) i += 1 prev_dash = flag_prev_dash return " ".join(output_tokens) def process_eval(eval_fn): gold_list = [] with open(args.gold, "r", encoding="utf-8") as f_in: for l in f_in: line = l.strip() gold_list.append(line) pred_list = [] with open(eval_fn, "r", encoding="utf-8") as f_in: for l in f_in: buf = [] sentence = fix_tokenization(l.strip()).replace('1', '#') buf.append(sentence) if args.trunc_len: num_left = args.trunc_len trunc_list = [] for bit in buf: tk_list = bit.split() n = min(len(tk_list), num_left) trunc_list.append(' '.join(tk_list[:n])) num_left -= n if num_left <= 0: break else: trunc_list = buf line = "\n".join(trunc_list) pred_list.append(line) with open(eval_fn+'.post', 'w', encoding='utf-8') as f_out: for l in pred_list: f_out.write(l.strip()) f_out.write('\n') # rouge scores if len(pred_list) < len(gold_list): # evaluate subset gold_list = gold_list[:len(pred_list)] assert len(pred_list) == len(gold_list) if args.perl: scores = test_rouge(pred_list, gold_list) else: scores = evaluator.get_scores(pred_list, [[it] for it in gold_list]) return eval_fn, scores
null
184,694
from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import logging import glob import json import argparse import math import string from multiprocessing import Pool, cpu_count from tqdm import tqdm, trange from pathlib import Path import numpy as np import rouge import time import tempfile import shutil from evaluations.bs_pyrouge import Rouge155 args = parser.parse_args() evaluator = rouge.Rouge(metrics=['rouge-n', 'rouge-l'], max_n=2, limit_length=False, apply_avg=True, weight_factor=1.2) def test_rouge(cand, ref): temp_dir = tempfile.mkdtemp() candidates = cand references = ref assert len(candidates) == len(references) cnt = len(candidates) current_time = time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime()) tmp_dir = os.path.join(temp_dir, "rouge-tmp-{}".format(current_time)) if not os.path.isdir(tmp_dir): os.mkdir(tmp_dir) os.mkdir(tmp_dir + "/candidate") os.mkdir(tmp_dir + "/reference") try: for i in range(cnt): if len(references[i]) < 1: continue with open(tmp_dir + "/candidate/cand.{}.txt".format(i), "w", encoding="utf-8") as f: f.write(candidates[i]) with open(tmp_dir + "/reference/ref.{}.txt".format(i), "w", encoding="utf-8") as f: f.write(references[i]) r = Rouge155(temp_dir=temp_dir) r.model_dir = tmp_dir + "/reference/" r.system_dir = tmp_dir + "/candidate/" r.model_filename_pattern = 'ref.#ID#.txt' r.system_filename_pattern = r'cand.(\d+).txt' rouge_results = r.convert_and_evaluate() print(rouge_results) results_dict = r.output_to_dict(rouge_results) finally: if os.path.isdir(tmp_dir): shutil.rmtree(tmp_dir) return results_dict def get_f1(text_a, text_b): tokens_a = text_a.lower().split() tokens_b = text_b.lower().split() if len(tokens_a) == 0 or len(tokens_b) == 0: return 1 if len(tokens_a) == len(tokens_b) else 0 set_a = count_tokens(tokens_a) set_b = count_tokens(tokens_b) match = 0 for token in set_a.keys(): if token in set_b.keys(): match += min(set_a[token], set_b[token]) p = match / len(tokens_a) r = match / len(tokens_b) return 2.0 * p * r / (p + r + 1e-5) def fix_tokenization(text): input_tokens = text.split() output_tokens = [] has_left_quote = False has_left_single_quote = False i = 0 prev_dash = False while i < len(input_tokens): tok = input_tokens[i] flag_prev_dash = False if tok in _tok_dict.keys(): output_tokens.append(_tok_dict[tok]) i += 1 elif tok == "\"": if has_left_quote: output_tokens.append("''") else: output_tokens.append("``") has_left_quote = not has_left_quote i += 1 elif tok == "'" and len(output_tokens) > 0 and output_tokens[-1].endswith("n") and i < len(input_tokens) - 1 and input_tokens[i + 1] == "t": output_tokens[-1] = output_tokens[-1][:-1] output_tokens.append("n't") i += 2 elif tok == "'" and i < len(input_tokens) - 1 and input_tokens[i + 1] in ("s", "d", "ll"): output_tokens.append("'"+input_tokens[i + 1]) i += 2 elif tok == "'": if has_left_single_quote: output_tokens.append("'") else: output_tokens.append("`") has_left_single_quote = not has_left_single_quote i += 1 elif tok == "." and i < len(input_tokens) - 2 and input_tokens[i + 1] == "." and input_tokens[i + 2] == ".": output_tokens.append("...") i += 3 elif tok == "," and len(output_tokens) > 0 and _is_digit(output_tokens[-1]) and i < len(input_tokens) - 1 and _is_digit(input_tokens[i + 1]): # $ 3 , 000 -> $ 3,000 output_tokens[-1] += ','+input_tokens[i + 1] i += 2 elif tok == "." and len(output_tokens) > 0 and output_tokens[-1].isdigit() and i < len(input_tokens) - 1 and input_tokens[i + 1].isdigit(): # 3 . 03 -> $ 3.03 output_tokens[-1] += '.'+input_tokens[i + 1] i += 2 elif tok == "." and len(output_tokens) > 0 and len(output_tokens[-1]) == 1 and output_tokens[-1].isupper() and i < len(input_tokens) - 2 and len(input_tokens[i + 1]) == 1 and input_tokens[i + 1].isupper() and input_tokens[i + 2] == '.': # U . N . -> U.N. k = i+3 while k+2 < len(input_tokens): if len(input_tokens[k + 1]) == 1 and input_tokens[k + 1].isupper() and input_tokens[k + 2] == '.': k += 2 else: break output_tokens[-1] += ''.join(input_tokens[i:k]) i += 2 elif tok == "-": if i < len(input_tokens) - 1 and input_tokens[i + 1] == "-": output_tokens.append("--") i += 2 elif i == len(input_tokens) - 1 or i == 0: output_tokens.append("-") i += 1 elif output_tokens[-1] not in string.punctuation and input_tokens[i + 1][0] not in string.punctuation: output_tokens[-1] += "-" i += 1 flag_prev_dash = True else: output_tokens.append("-") i += 1 elif prev_dash and len(output_tokens) > 0 and tok[0] not in string.punctuation: output_tokens[-1] += tok i += 1 else: output_tokens.append(tok) i += 1 prev_dash = flag_prev_dash return " ".join(output_tokens) def remove_duplicate(l_list, duplicate_rate): tk_list = [l.lower().split() for l in l_list] r_list = [] history_set = set() for i, w_list in enumerate(tk_list): w_set = set(w_list) if len(w_set & history_set)/len(w_set) <= duplicate_rate: r_list.append(l_list[i]) history_set |= w_set return r_list def process_eval(eval_fn): gold_list = [] with open(args.gold, "r", encoding="utf-8") as f_in: for l in f_in: line = l.strip().replace(" <S_SEP> ", '\n') gold_list.append(line) pred_list = [] with open(eval_fn, "r", encoding="utf-8") as f_in: for l in f_in: buf = [] for sentence in l.strip().split("[X_SEP]"): sentence = fix_tokenization(sentence) sentence = sentence.replace("(", " -LRB- ").replace(")", " -RRB- ") sentence = sentence.replace("[", " -LSB- ").replace("]", " -RSB- ") while " " in sentence: sentence = sentence.replace(" ", " ") if any(get_f1(sentence, s) > 1.0 for s in buf): continue s_len = len(sentence.split()) if s_len <= 4: continue buf.append(sentence) if args.duplicate_rate and args.duplicate_rate < 1: buf = remove_duplicate(buf, args.duplicate_rate) if args.trunc_len: num_left = args.trunc_len trunc_list = [] for bit in buf: tk_list = bit.split() n = min(len(tk_list), num_left) trunc_list.append(' '.join(tk_list[:n])) num_left -= n if num_left <= 0: break else: trunc_list = buf line = "\n".join(trunc_list) pred_list.append(line) with open(eval_fn+'.post', 'w', encoding='utf-8') as f_out: for l in pred_list: f_out.write(l.replace('\n', ' [X_SEP] ').strip()) f_out.write('\n') # rouge scores if len(pred_list) < len(gold_list): # evaluate subset gold_list = gold_list[:len(pred_list)] assert len(pred_list) == len(gold_list) if args.perl: scores = test_rouge(pred_list, gold_list) else: scores = evaluator.get_scores(pred_list, [[it] for it in gold_list]) return eval_fn, scores
null
184,697
from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import logging import glob import json import argparse import math import string from multiprocessing import Pool, cpu_count from tqdm import tqdm, trange from pathlib import Path import numpy as np import rouge import time import tempfile import shutil from evaluations.bs_pyrouge import Rouge155 args = parser.parse_args() evaluator = rouge.Rouge(metrics=['rouge-n', 'rouge-l'], max_n=2, limit_length=False, apply_avg=True, weight_factor=1.2) def test_rouge(cand, ref): temp_dir = tempfile.mkdtemp() candidates = cand references = ref assert len(candidates) == len(references) cnt = len(candidates) current_time = time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime()) tmp_dir = os.path.join(temp_dir, "rouge-tmp-{}".format(current_time)) if not os.path.isdir(tmp_dir): os.mkdir(tmp_dir) os.mkdir(tmp_dir + "/candidate") os.mkdir(tmp_dir + "/reference") try: for i in range(cnt): if len(references[i]) < 1: continue with open(tmp_dir + "/candidate/cand.{}.txt".format(i), "w", encoding="utf-8") as f: f.write(candidates[i]) with open(tmp_dir + "/reference/ref.{}.txt".format(i), "w", encoding="utf-8") as f: f.write(references[i]) r = Rouge155(temp_dir=temp_dir) r.model_dir = tmp_dir + "/reference/" r.system_dir = tmp_dir + "/candidate/" r.model_filename_pattern = 'ref.#ID#.txt' r.system_filename_pattern = r'cand.(\d+).txt' rouge_results = r.convert_and_evaluate() print(rouge_results) results_dict = r.output_to_dict(rouge_results) finally: if os.path.isdir(tmp_dir): shutil.rmtree(tmp_dir) return results_dict def fix_tokenization(text): input_tokens = text.split() output_tokens = [] i = 0 prev_dash = False while i < len(input_tokens): tok = input_tokens[i] flag_prev_dash = False if tok in _tok_dict.keys(): output_tokens.append(_tok_dict[tok]) i += 1 elif tok == "'" and len(output_tokens) > 0 and output_tokens[-1].endswith("n") and i < len(input_tokens) - 1 and input_tokens[i + 1] == "t": output_tokens[-1] = output_tokens[-1][:-1] output_tokens.append("n't") i += 2 elif tok == "'" and i < len(input_tokens) - 1 and input_tokens[i + 1] in ("s", "d", "ll"): output_tokens.append("'"+input_tokens[i + 1]) i += 2 elif tok == "." and i < len(input_tokens) - 2 and input_tokens[i + 1] == "." and input_tokens[i + 2] == ".": output_tokens.append("...") i += 3 elif tok == "," and len(output_tokens) > 0 and _is_digit(output_tokens[-1]) and i < len(input_tokens) - 1 and _is_digit(input_tokens[i + 1]): # $ 3 , 000 -> $ 3,000 output_tokens[-1] += ','+input_tokens[i + 1] i += 2 elif tok == "." and len(output_tokens) > 0 and output_tokens[-1].isdigit() and i < len(input_tokens) - 1 and input_tokens[i + 1].isdigit(): # 3 . 03 -> $ 3.03 output_tokens[-1] += '.'+input_tokens[i + 1] i += 2 elif tok == "." and len(output_tokens) > 0 and len(output_tokens[-1]) == 1 and output_tokens[-1].isupper() and i < len(input_tokens) - 2 and len(input_tokens[i + 1]) == 1 and input_tokens[i + 1].isupper() and input_tokens[i + 2] == '.': # U . N . -> U.N. k = i+3 while k+2 < len(input_tokens): if len(input_tokens[k + 1]) == 1 and input_tokens[k + 1].isupper() and input_tokens[k + 2] == '.': k += 2 else: break output_tokens[-1] += ''.join(input_tokens[i:k]) i += 2 elif prev_dash and len(output_tokens) > 0 and tok[0] not in string.punctuation: output_tokens[-1] += tok i += 1 else: output_tokens.append(tok) i += 1 prev_dash = flag_prev_dash return " ".join(output_tokens) def process_eval(eval_fn): gold_list = [] with open(args.gold, "r", encoding="utf-8") as f_in: for l in f_in: line = l.strip() gold_list.append(line) pred_list = [] with open(eval_fn, "r", encoding="utf-8") as f_in: for l in f_in: buf = [] sentence = fix_tokenization(l.strip()).replace("(", " -LRB- ").replace(")", " -RRB- ") while " " in sentence: sentence = sentence.replace(" ", " ") buf.append(sentence) if args.trunc_len: num_left = args.trunc_len trunc_list = [] for bit in buf: tk_list = bit.split() n = min(len(tk_list), num_left) trunc_list.append(' '.join(tk_list[:n])) num_left -= n if num_left <= 0: break else: trunc_list = buf line = "\n".join(trunc_list) pred_list.append(line) with open(eval_fn+'.post', 'w', encoding='utf-8') as f_out: for l in pred_list: f_out.write(l.strip()) f_out.write('\n') # rouge scores if len(pred_list) < len(gold_list): # evaluate subset gold_list = gold_list[:len(pred_list)] assert len(pred_list) == len(gold_list) if args.perl: scores = test_rouge(pred_list, gold_list) else: scores = evaluator.get_scores(pred_list, [[it] for it in gold_list]) return eval_fn, scores
null
184,698
import pickle import math import argparse import glob import logging from pathlib import Path from tqdm import tqdm import unicodedata from transformers import BertTokenizer, RobertaTokenizer, XLMRobertaTokenizer from s2s_ft.tokenization_unilm import UnilmTokenizer from s2s_ft.tokenization_minilm import MinilmTokenizer def read_traces_from_file(file_name): with open(file_name, "rb") as fin: meta = pickle.load(fin) num_samples = meta["num_samples"] samples = [] for _ in range(num_samples): samples.append(pickle.load(fin)) return samples
null
184,699
import pickle import math import argparse import glob import logging from pathlib import Path from tqdm import tqdm import unicodedata from transformers import BertTokenizer, RobertaTokenizer, XLMRobertaTokenizer from s2s_ft.tokenization_unilm import UnilmTokenizer from s2s_ft.tokenization_minilm import MinilmTokenizer def get_best_sequence(sample, eos_id, pad_id, length_penalty=None, alpha=None, expect=None, min_len=None): # if not any((length_penalty, alpha, expect, min_len)): # raise ValueError( # "You can only specify length penalty or alpha, but not both.") scores = sample["scores"] wids_list = sample["wids"] ptrs = sample["ptrs"] last_frame_id = len(scores) - 1 for i, wids in enumerate(wids_list): if all(wid in (eos_id, pad_id) for wid in wids): last_frame_id = i break while all(wid == pad_id for wid in wids_list[last_frame_id]): last_frame_id -= 1 max_score = -math.inf frame_id = -1 pos_in_frame = -1 for fid in range(last_frame_id + 1): for i, wid in enumerate(wids_list[fid]): if fid <= last_frame_id and scores[fid][i] >= 0: # skip paddings continue if (wid in (eos_id, pad_id)) or fid == last_frame_id: s = scores[fid][i] if length_penalty: if expect: s -= length_penalty * math.fabs(fid+1 - expect) else: s += length_penalty * (fid + 1) elif alpha: s = s / math.pow((5 + fid + 1) / 6.0, alpha) if s > max_score: # if (frame_id != -1) and min_len and (fid+1 < min_len): # continue max_score = s frame_id = fid pos_in_frame = i if frame_id == -1: seq = [] else: seq = [wids_list[frame_id][pos_in_frame]] for fid in range(frame_id, 0, -1): pos_in_frame = ptrs[fid][pos_in_frame] seq.append(wids_list[fid - 1][pos_in_frame]) seq.reverse() return seq
null
184,700
import pickle import math import argparse import glob import logging from pathlib import Path from tqdm import tqdm import unicodedata from transformers import BertTokenizer, RobertaTokenizer, XLMRobertaTokenizer from s2s_ft.tokenization_unilm import UnilmTokenizer from s2s_ft.tokenization_minilm import MinilmTokenizer def detokenize(tk_list): r_list = [] for tk in tk_list: if tk.startswith('##') and len(r_list) > 0: r_list[-1] = r_list[-1] + tk[2:] else: r_list.append(tk) return r_list
null
184,701
import pickle import math import argparse import glob import logging from pathlib import Path from tqdm import tqdm import unicodedata from transformers import BertTokenizer, RobertaTokenizer, XLMRobertaTokenizer from s2s_ft.tokenization_unilm import UnilmTokenizer from s2s_ft.tokenization_minilm import MinilmTokenizer def simple_postprocess(tk_list): # truncate duplicate punctuations while tk_list and len(tk_list) > 4 and len(tk_list[-1]) == 1 and unicodedata.category(tk_list[-1]).startswith('P') and all(it == tk_list[-1] for it in tk_list[-4:]): tk_list = tk_list[:-3] return tk_list
null
184,702
import math import numpy as np import logging import torch import torch.nn as nn import torch.nn.functional as F from torch.nn import Parameter from torch import Tensor from typing import Any, Dict, List, Tuple, Callable, Optional def module_name_fordropout(module_name: str) -> str: if module_name == "TransformerEncoderBase": return "TransformerEncoder" else: return module_name
null
184,703
import math import numpy as np import logging import torch import torch.nn as nn import torch.nn.functional as F from torch.nn import Parameter from torch import Tensor from typing import Any, Dict, List, Tuple, Callable, Optional The provided code snippet includes necessary dependencies for implementing the `utils_make_positions` function. Write a Python function `def utils_make_positions(tensor, padding_idx: int, onnx_trace: bool = False)` to solve the following problem: Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols are ignored. Here is the function: def utils_make_positions(tensor, padding_idx: int, onnx_trace: bool = False): """Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols are ignored. """ # The series of casts and type-conversions here are carefully # balanced to both work with ONNX export and XLA. In particular XLA # prefers ints, cumsum defaults to output longs, and ONNX doesn't know # how to handle the dtype kwarg in cumsum. mask = tensor.ne(padding_idx).int() return (torch.cumsum(mask, dim=1).type_as(mask) * mask).long() + padding_idx
Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols are ignored.
184,704
import math import numpy as np import logging import torch import torch.nn as nn import torch.nn.functional as F from torch.nn import Parameter from torch import Tensor from typing import Any, Dict, List, Tuple, Callable, Optional def utils_item(tensor): # tpu-comment: making this a no-op for xla devices. if torch.is_tensor(tensor) and tensor.device.type == "xla": return tensor.detach() if hasattr(tensor, "item"): return tensor.item() if hasattr(tensor, "__getitem__"): return tensor[0] return tensor
null
184,705
import math import numpy as np import logging import torch import torch.nn as nn import torch.nn.functional as F from torch.nn import Parameter from torch import Tensor from typing import Any, Dict, List, Tuple, Callable, Optional The provided code snippet includes necessary dependencies for implementing the `fsdp_wrap` function. Write a Python function `def fsdp_wrap(module, min_num_params: Optional[int] = None, **kwargs)` to solve the following problem: Helper to wrap layers/modules in FSDP. This falls back to a no-op if fairscale is not available. Args: module (nn.Module): module to (maybe) wrap min_num_params (int, Optional): minimum number of layer params to wrap Here is the function: def fsdp_wrap(module, min_num_params: Optional[int] = None, **kwargs): """ Helper to wrap layers/modules in FSDP. This falls back to a no-op if fairscale is not available. Args: module (nn.Module): module to (maybe) wrap min_num_params (int, Optional): minimum number of layer params to wrap """ try: from fairscale.nn import wrap if min_num_params is not None: num_params = sum(p.numel() for p in module.parameters()) if num_params >= min_num_params: return wrap(module, **kwargs) else: return module else: return wrap(module, **kwargs) except ImportError: return module
Helper to wrap layers/modules in FSDP. This falls back to a no-op if fairscale is not available. Args: module (nn.Module): module to (maybe) wrap min_num_params (int, Optional): minimum number of layer params to wrap
184,706
import math import numpy as np import logging import torch import torch.nn as nn import torch.nn.functional as F from torch.nn import Parameter from torch import Tensor from typing import Any, Dict, List, Tuple, Callable, Optional The provided code snippet includes necessary dependencies for implementing the `quant_noise` function. Write a Python function `def quant_noise(module, p, block_size)` to solve the following problem: Wraps modules and applies quantization noise to the weights for subsequent quantization with Iterative Product Quantization as described in "Training with Quantization Noise for Extreme Model Compression" Args: - module: nn.Module - p: amount of Quantization Noise - block_size: size of the blocks for subsequent quantization with iPQ Remarks: - Module weights must have the right sizes wrt the block size - Only Linear, Embedding and Conv2d modules are supported for the moment - For more detail on how to quantize by blocks with convolutional weights, see "And the Bit Goes Down: Revisiting the Quantization of Neural Networks" - We implement the simplest form of noise here as stated in the paper which consists in randomly dropping blocks Here is the function: def quant_noise(module, p, block_size): """ Wraps modules and applies quantization noise to the weights for subsequent quantization with Iterative Product Quantization as described in "Training with Quantization Noise for Extreme Model Compression" Args: - module: nn.Module - p: amount of Quantization Noise - block_size: size of the blocks for subsequent quantization with iPQ Remarks: - Module weights must have the right sizes wrt the block size - Only Linear, Embedding and Conv2d modules are supported for the moment - For more detail on how to quantize by blocks with convolutional weights, see "And the Bit Goes Down: Revisiting the Quantization of Neural Networks" - We implement the simplest form of noise here as stated in the paper which consists in randomly dropping blocks """ # if no quantization noise, don't register hook if p <= 0: return module # supported modules assert isinstance(module, (nn.Linear, nn.Embedding, nn.Conv2d)) # test whether module.weight has the right sizes wrt block_size is_conv = module.weight.ndim == 4 # 2D matrix if not is_conv: assert ( module.weight.size(1) % block_size == 0 ), "Input features must be a multiple of block sizes" # 4D matrix else: # 1x1 convolutions if module.kernel_size == (1, 1): assert ( module.in_channels % block_size == 0 ), "Input channels must be a multiple of block sizes" # regular convolutions else: k = module.kernel_size[0] * module.kernel_size[1] assert k % block_size == 0, "Kernel size must be a multiple of block size" def _forward_pre_hook(mod, input): # no noise for evaluation if mod.training: if not is_conv: # gather weight and sizes weight = mod.weight in_features = weight.size(1) out_features = weight.size(0) # split weight matrix into blocks and randomly drop selected blocks mask = torch.zeros( in_features // block_size * out_features, device=weight.device ) mask.bernoulli_(p) mask = mask.repeat_interleave(block_size, -1).view(-1, in_features) else: # gather weight and sizes weight = mod.weight in_channels = mod.in_channels out_channels = mod.out_channels # split weight matrix into blocks and randomly drop selected blocks if mod.kernel_size == (1, 1): mask = torch.zeros( int(in_channels // block_size * out_channels), device=weight.device, ) mask.bernoulli_(p) mask = mask.repeat_interleave(block_size, -1).view(-1, in_channels) else: mask = torch.zeros( weight.size(0), weight.size(1), device=weight.device ) mask.bernoulli_(p) mask = ( mask.unsqueeze(2) .unsqueeze(3) .repeat(1, 1, mod.kernel_size[0], mod.kernel_size[1]) ) # scale weights and apply mask mask = mask.to( torch.bool ) # x.bool() is not currently supported in TorchScript s = 1 / (1 - p) mod.weight.data = s * weight.masked_fill(mask, 0) module.register_forward_pre_hook(_forward_pre_hook) return module
Wraps modules and applies quantization noise to the weights for subsequent quantization with Iterative Product Quantization as described in "Training with Quantization Noise for Extreme Model Compression" Args: - module: nn.Module - p: amount of Quantization Noise - block_size: size of the blocks for subsequent quantization with iPQ Remarks: - Module weights must have the right sizes wrt the block size - Only Linear, Embedding and Conv2d modules are supported for the moment - For more detail on how to quantize by blocks with convolutional weights, see "And the Bit Goes Down: Revisiting the Quantization of Neural Networks" - We implement the simplest form of noise here as stated in the paper which consists in randomly dropping blocks
184,707
import math import numpy as np import logging import torch import torch.nn as nn import torch.nn.functional as F from torch.nn import Parameter from torch import Tensor from typing import Any, Dict, List, Tuple, Callable, Optional logger = logging.getLogger(__name__) def relu_squared(x: torch.Tensor): return F.relu(x).pow(2) def gelu(x: torch.Tensor) -> torch.Tensor: return torch.nn.functional.gelu(x.float()).type_as(x) def gelu_accurate(x): if not hasattr(gelu_accurate, "_a"): gelu_accurate._a = math.sqrt(2 / math.pi) return ( 0.5 * x * (1 + torch.tanh(gelu_accurate._a * (x + 0.044715 * torch.pow(x, 3)))) ) The provided code snippet includes necessary dependencies for implementing the `get_activation_fn` function. Write a Python function `def get_activation_fn(activation: str) -> Callable` to solve the following problem: Returns the activation function corresponding to `activation` Here is the function: def get_activation_fn(activation: str) -> Callable: """Returns the activation function corresponding to `activation`""" if activation == "relu": return F.relu elif activation == "relu_squared": return relu_squared elif activation == "gelu": return gelu elif activation == "gelu_fast": logger.warn( "--activation-fn=gelu_fast has been renamed to gelu_accurate" ) return gelu_accurate elif activation == "gelu_accurate": return gelu_accurate elif activation == "tanh": return torch.tanh elif activation == "linear": return lambda x: x elif activation == "swish": return torch.nn.SiLU else: raise RuntimeError("--activation-fn {} not supported".format(activation))
Returns the activation function corresponding to `activation`
184,708
import math import numpy as np import logging import torch import torch.nn as nn import torch.nn.functional as F from torch.nn import Parameter from torch import Tensor from typing import Any, Dict, List, Tuple, Callable, Optional def softmax(x, dim: int, onnx_trace: bool = False): if onnx_trace: return F.softmax(x.float(), dim=dim) else: return F.softmax(x, dim=dim, dtype=torch.float32)
null
184,709
import math import numpy as np import logging import torch import torch.nn as nn import torch.nn.functional as F from torch.nn import Parameter from torch import Tensor from typing import Any, Dict, List, Tuple, Callable, Optional The provided code snippet includes necessary dependencies for implementing the `compute_mask_indices` function. Write a Python function `def compute_mask_indices( shape: Tuple[int, int], padding_mask: Optional[torch.Tensor], mask_prob: float, mask_length: int, mask_type: str = "static", mask_other: float = 0.0, min_masks: int = 0, no_overlap: bool = False, min_space: int = 0, require_same_masks: bool = True, mask_dropout: float = 0.0, ) -> np.ndarray` to solve the following problem: Computes random mask spans for a given shape Args: shape: the the shape for which to compute masks. should be of size 2 where first element is batch size and 2nd is timesteps padding_mask: optional padding mask of the same size as shape, which will prevent masking padded elements mask_prob: probability for each token to be chosen as start of the span to be masked. this will be multiplied by number of timesteps divided by length of mask span to mask approximately this percentage of all elements. however due to overlaps, the actual number will be smaller (unless no_overlap is True) mask_type: how to compute mask lengths static = fixed size uniform = sample from uniform distribution [mask_other, mask_length*2] normal = sample from normal distribution with mean mask_length and stdev mask_other. mask is min 1 element poisson = sample from possion distribution with lambda = mask length min_masks: minimum number of masked spans no_overlap: if false, will switch to an alternative recursive algorithm that prevents spans from overlapping min_space: only used if no_overlap is True, this is how many elements to keep unmasked between spans require_same_masks: if true, will randomly drop out masks until same amount of masks remains in each sample mask_dropout: randomly dropout this percentage of masks in each example Here is the function: def compute_mask_indices( shape: Tuple[int, int], padding_mask: Optional[torch.Tensor], mask_prob: float, mask_length: int, mask_type: str = "static", mask_other: float = 0.0, min_masks: int = 0, no_overlap: bool = False, min_space: int = 0, require_same_masks: bool = True, mask_dropout: float = 0.0, ) -> np.ndarray: """ Computes random mask spans for a given shape Args: shape: the the shape for which to compute masks. should be of size 2 where first element is batch size and 2nd is timesteps padding_mask: optional padding mask of the same size as shape, which will prevent masking padded elements mask_prob: probability for each token to be chosen as start of the span to be masked. this will be multiplied by number of timesteps divided by length of mask span to mask approximately this percentage of all elements. however due to overlaps, the actual number will be smaller (unless no_overlap is True) mask_type: how to compute mask lengths static = fixed size uniform = sample from uniform distribution [mask_other, mask_length*2] normal = sample from normal distribution with mean mask_length and stdev mask_other. mask is min 1 element poisson = sample from possion distribution with lambda = mask length min_masks: minimum number of masked spans no_overlap: if false, will switch to an alternative recursive algorithm that prevents spans from overlapping min_space: only used if no_overlap is True, this is how many elements to keep unmasked between spans require_same_masks: if true, will randomly drop out masks until same amount of masks remains in each sample mask_dropout: randomly dropout this percentage of masks in each example """ bsz, all_sz = shape mask = np.full((bsz, all_sz), False) all_num_mask = int( # add a random number for probabilistic rounding mask_prob * all_sz / float(mask_length) + np.random.rand() ) all_num_mask = max(min_masks, all_num_mask) mask_idcs = [] for i in range(bsz): if padding_mask is not None: sz = all_sz - padding_mask[i].long().sum().item() num_mask = int( # add a random number for probabilistic rounding mask_prob * sz / float(mask_length) + np.random.rand() ) num_mask = max(min_masks, num_mask) else: sz = all_sz num_mask = all_num_mask if mask_type == "static": lengths = np.full(num_mask, mask_length) elif mask_type == "uniform": lengths = np.random.randint(mask_other, mask_length * 2 + 1, size=num_mask) elif mask_type == "normal": lengths = np.random.normal(mask_length, mask_other, size=num_mask) lengths = [max(1, int(round(x))) for x in lengths] elif mask_type == "poisson": lengths = np.random.poisson(mask_length, size=num_mask) lengths = [int(round(x)) for x in lengths] else: raise Exception("unknown mask selection " + mask_type) if sum(lengths) == 0: lengths[0] = min(mask_length, sz - 1) if no_overlap: mask_idc = [] def arrange(s, e, length, keep_length): span_start = np.random.randint(s, e - length) mask_idc.extend(span_start + i for i in range(length)) new_parts = [] if span_start - s - min_space >= keep_length: new_parts.append((s, span_start - min_space + 1)) if e - span_start - keep_length - min_space > keep_length: new_parts.append((span_start + length + min_space, e)) return new_parts parts = [(0, sz)] min_length = min(lengths) for length in sorted(lengths, reverse=True): lens = np.fromiter( (e - s if e - s >= length + min_space else 0 for s, e in parts), np.int, ) l_sum = np.sum(lens) if l_sum == 0: break probs = lens / np.sum(lens) c = np.random.choice(len(parts), p=probs) s, e = parts.pop(c) parts.extend(arrange(s, e, length, min_length)) mask_idc = np.asarray(mask_idc) else: min_len = min(lengths) if sz - min_len <= num_mask: min_len = sz - num_mask - 1 mask_idc = np.random.choice(sz - min_len, num_mask, replace=False) mask_idc = np.asarray( [ mask_idc[j] + offset for j in range(len(mask_idc)) for offset in range(lengths[j]) ] ) mask_idcs.append(np.unique(mask_idc[mask_idc < sz])) min_len = min([len(m) for m in mask_idcs]) for i, mask_idc in enumerate(mask_idcs): if len(mask_idc) > min_len and require_same_masks: mask_idc = np.random.choice(mask_idc, min_len, replace=False) if mask_dropout > 0: num_holes = np.rint(len(mask_idc) * mask_dropout).astype(int) mask_idc = np.random.choice( mask_idc, len(mask_idc) - num_holes, replace=False ) mask[i, mask_idc] = True return mask
Computes random mask spans for a given shape Args: shape: the the shape for which to compute masks. should be of size 2 where first element is batch size and 2nd is timesteps padding_mask: optional padding mask of the same size as shape, which will prevent masking padded elements mask_prob: probability for each token to be chosen as start of the span to be masked. this will be multiplied by number of timesteps divided by length of mask span to mask approximately this percentage of all elements. however due to overlaps, the actual number will be smaller (unless no_overlap is True) mask_type: how to compute mask lengths static = fixed size uniform = sample from uniform distribution [mask_other, mask_length*2] normal = sample from normal distribution with mean mask_length and stdev mask_other. mask is min 1 element poisson = sample from possion distribution with lambda = mask length min_masks: minimum number of masked spans no_overlap: if false, will switch to an alternative recursive algorithm that prevents spans from overlapping min_space: only used if no_overlap is True, this is how many elements to keep unmasked between spans require_same_masks: if true, will randomly drop out masks until same amount of masks remains in each sample mask_dropout: randomly dropout this percentage of masks in each example
184,710
import math import numpy as np import logging import torch import torch.nn as nn import torch.nn.functional as F from torch.nn import Parameter from torch import Tensor from typing import Any, Dict, List, Tuple, Callable, Optional class MultiheadAttention(nn.Module): """Multi-headed attention. See "Attention Is All You Need" for more details. """ def __init__( self, embed_dim, num_heads, kdim=None, vdim=None, dropout=0.0, bias=True, add_bias_kv=False, add_zero_attn=False, self_attention=False, encoder_decoder_attention=False, q_noise=0.0, qn_block_size=8, has_relative_attention_bias=False, scaling_for_att=1.0 ): super().__init__() self.embed_dim = embed_dim self.kdim = kdim if kdim is not None else embed_dim self.vdim = vdim if vdim is not None else embed_dim self.qkv_same_dim = self.kdim == embed_dim and self.vdim == embed_dim self.num_heads = num_heads self.dropout_module = FairseqDropout( dropout, module_name=self.__class__.__name__ ) self.has_relative_attention_bias = has_relative_attention_bias self.head_dim = embed_dim // num_heads assert ( self.head_dim * num_heads == self.embed_dim ), "embed_dim must be divisible by num_heads" self.scaling = self.head_dim ** -0.5 self.scaling_for_att = scaling_for_att self.self_attention = self_attention self.encoder_decoder_attention = encoder_decoder_attention assert not self.self_attention or self.qkv_same_dim, ( "Self-attention requires query, key and " "value to be of the same size" ) self.k_proj = quant_noise( nn.Linear(self.kdim, embed_dim, bias=bias), q_noise, qn_block_size ) self.v_proj = quant_noise( nn.Linear(self.vdim, embed_dim, bias=bias), q_noise, qn_block_size ) self.q_proj = quant_noise( nn.Linear(embed_dim, embed_dim, bias=bias), q_noise, qn_block_size ) self.out_proj = quant_noise( nn.Linear(embed_dim, embed_dim, bias=bias), q_noise, qn_block_size ) if add_bias_kv: self.bias_k = Parameter(torch.Tensor(1, 1, embed_dim)) self.bias_v = Parameter(torch.Tensor(1, 1, embed_dim)) else: self.bias_k = self.bias_v = None self.add_zero_attn = add_zero_attn self.reset_parameters() self.onnx_trace = False def prepare_for_onnx_export_(self): self.onnx_trace = True def reset_parameters(self): if self.qkv_same_dim: # Empirically observed the convergence to be much better with # the scaled initialization nn.init.xavier_uniform_(self.k_proj.weight, gain=1 / math.sqrt(2)) nn.init.xavier_uniform_(self.v_proj.weight, gain=1 / math.sqrt(2)) nn.init.xavier_uniform_(self.q_proj.weight, gain=1 / math.sqrt(2)) else: nn.init.xavier_uniform_(self.k_proj.weight) nn.init.xavier_uniform_(self.v_proj.weight) nn.init.xavier_uniform_(self.q_proj.weight) nn.init.xavier_uniform_(self.out_proj.weight) if self.out_proj.bias is not None: nn.init.constant_(self.out_proj.bias, 0.0) if self.bias_k is not None: nn.init.xavier_normal_(self.bias_k) if self.bias_v is not None: nn.init.xavier_normal_(self.bias_v) def forward( self, query, key: Optional[Tensor], value: Optional[Tensor], key_padding_mask: Optional[Tensor] = None, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None, need_weights: bool = True, static_kv: bool = False, attn_mask: Optional[Tensor] = None, before_softmax: bool = False, need_head_weights: bool = False, position_bias: Optional[Tensor] = None ) -> Tuple[Tensor, Optional[Tensor]]: """Input shape: Time x Batch x Channel Args: key_padding_mask (ByteTensor, optional): mask to exclude keys that are pads, of shape `(batch, src_len)`, where padding elements are indicated by 1s. need_weights (bool, optional): return the attention weights, averaged over heads (default: False). attn_mask (ByteTensor, optional): typically used to implement causal attention, where the mask prevents the attention from looking forward in time (default: None). before_softmax (bool, optional): return the raw attention weights and values before the attention softmax. need_head_weights (bool, optional): return the attention weights for each head. Implies *need_weights*. Default: return the average attention weights over all heads. """ if need_head_weights: need_weights = True is_tpu = query.device.type == "xla" tgt_len, bsz, embed_dim = query.size() src_len = tgt_len assert embed_dim == self.embed_dim, f"query dim {embed_dim} != {self.embed_dim}" assert list(query.size()) == [tgt_len, bsz, embed_dim] if key is not None: src_len, key_bsz, _ = key.size() if not torch.jit.is_scripting(): assert key_bsz == bsz assert value is not None assert src_len, bsz == value.shape[:2] if ( not self.onnx_trace and not is_tpu # don't use PyTorch version on TPUs and incremental_state is None and not static_kv # A workaround for quantization to work. Otherwise JIT compilation # treats bias in linear module as method. and not torch.jit.is_scripting() and not self.has_relative_attention_bias ): assert key is not None and value is not None return F.multi_head_attention_forward( query, key, value, self.embed_dim, self.num_heads, torch.empty([0]), torch.cat((self.q_proj.bias, self.k_proj.bias, self.v_proj.bias)), self.bias_k, self.bias_v, self.add_zero_attn, self.dropout_module.p, self.out_proj.weight, self.out_proj.bias, self.training or self.dropout_module.apply_during_inference, key_padding_mask, need_weights, attn_mask, use_separate_proj_weight=True, q_proj_weight=self.q_proj.weight, k_proj_weight=self.k_proj.weight, v_proj_weight=self.v_proj.weight, ) if incremental_state is not None: saved_state = self._get_input_buffer(incremental_state) if saved_state is not None and "prev_key" in saved_state: # previous time steps are cached - no need to recompute # key and value if they are static if static_kv: assert self.encoder_decoder_attention and not self.self_attention key = value = None else: saved_state = None if self.self_attention: q = self.q_proj(query) k = self.k_proj(query) v = self.v_proj(query) elif self.encoder_decoder_attention: # encoder-decoder attention q = self.q_proj(query) if key is None: assert value is None k = v = None else: k = self.k_proj(key) v = self.v_proj(key) else: assert key is not None and value is not None q = self.q_proj(query) k = self.k_proj(key) v = self.v_proj(value) q *= self.scaling q *= (1 / self.scaling_for_att) if self.bias_k is not None: assert self.bias_v is not None k = torch.cat([k, self.bias_k.repeat(1, bsz, 1)]) v = torch.cat([v, self.bias_v.repeat(1, bsz, 1)]) if attn_mask is not None: attn_mask = torch.cat( [attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1 ) if key_padding_mask is not None: key_padding_mask = torch.cat( [ key_padding_mask, key_padding_mask.new_zeros(key_padding_mask.size(0), 1), ], dim=1, ) q = ( q.contiguous() .view(tgt_len, bsz * self.num_heads, self.head_dim) .transpose(0, 1) ) if k is not None: k = ( k.contiguous() .view(-1, bsz * self.num_heads, self.head_dim) .transpose(0, 1) ) if v is not None: v = ( v.contiguous() .view(-1, bsz * self.num_heads, self.head_dim) .transpose(0, 1) ) if saved_state is not None: # saved states are stored with shape (bsz, num_heads, seq_len, head_dim) if "prev_key" in saved_state: _prev_key = saved_state["prev_key"] assert _prev_key is not None prev_key = _prev_key.view(bsz * self.num_heads, -1, self.head_dim) if static_kv: k = prev_key else: assert k is not None k = torch.cat([prev_key, k], dim=1) src_len = k.size(1) if "prev_value" in saved_state: _prev_value = saved_state["prev_value"] assert _prev_value is not None prev_value = _prev_value.view(bsz * self.num_heads, -1, self.head_dim) if static_kv: v = prev_value else: assert v is not None v = torch.cat([prev_value, v], dim=1) prev_key_padding_mask: Optional[Tensor] = None if "prev_key_padding_mask" in saved_state: prev_key_padding_mask = saved_state["prev_key_padding_mask"] assert k is not None and v is not None key_padding_mask = MultiheadAttention._append_prev_key_padding_mask( key_padding_mask=key_padding_mask, prev_key_padding_mask=prev_key_padding_mask, batch_size=bsz, src_len=k.size(1), static_kv=static_kv, ) saved_state["prev_key"] = k.view(bsz, self.num_heads, -1, self.head_dim) saved_state["prev_value"] = v.view(bsz, self.num_heads, -1, self.head_dim) saved_state["prev_key_padding_mask"] = key_padding_mask # In this branch incremental_state is never None assert incremental_state is not None incremental_state = self._set_input_buffer(incremental_state, saved_state) assert k is not None assert k.size(1) == src_len # This is part of a workaround to get around fork/join parallelism # not supporting Optional types. if key_padding_mask is not None and key_padding_mask.dim() == 0: key_padding_mask = None if key_padding_mask is not None: assert key_padding_mask.size(0) == bsz assert key_padding_mask.size(1) == src_len if self.add_zero_attn: assert v is not None src_len += 1 k = torch.cat([k, k.new_zeros((k.size(0), 1) + k.size()[2:])], dim=1) v = torch.cat([v, v.new_zeros((v.size(0), 1) + v.size()[2:])], dim=1) if attn_mask is not None: attn_mask = torch.cat( [attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1 ) if key_padding_mask is not None: key_padding_mask = torch.cat( [ key_padding_mask, torch.zeros(key_padding_mask.size(0), 1).type_as( key_padding_mask ), ], dim=1, ) attn_weights = torch.bmm(q, k.transpose(1, 2)) attn_weights = self.apply_sparse_mask(attn_weights, tgt_len, src_len, bsz) if position_bias is not None: ## first order ## position_bias: [241, 241, 64] #print ("attn_weights: ", attn_weights.size()) # [492, 241, 241] reshape_q = q.contiguous().view(bsz * self.num_heads, -1, self.head_dim).transpose(0,1) #[241, 492, 64] #print ("reshape_q: ", reshape_q.size()) B = torch.matmul(reshape_q, position_bias.transpose(-2, -1)) #print ("B: ", B.size()) ## [241, 492, 241] #B = B.transpose(0, 1).view(bsz, self.num_heads, position_bias.size(0), position_bias.size(1)) B = B.transpose(0, 1).view(bsz*self.num_heads, position_bias.size(0), position_bias.size(1)) #print ("B 2: ", B.size()) attn_weights += B attn_weights *= self.scaling_for_att assert list(attn_weights.size()) == [bsz * self.num_heads, tgt_len, src_len] if attn_mask is not None: attn_mask = attn_mask.unsqueeze(0) if self.onnx_trace: attn_mask = attn_mask.repeat(attn_weights.size(0), 1, 1) attn_weights += attn_mask if key_padding_mask is not None: # don't attend to padding symbols attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) if not is_tpu: attn_weights = attn_weights.masked_fill( key_padding_mask.unsqueeze(1).unsqueeze(2).to(torch.bool), float("-inf"), ) else: attn_weights = attn_weights.transpose(0, 2) attn_weights = attn_weights.masked_fill(key_padding_mask, float("-inf")) attn_weights = attn_weights.transpose(0, 2) attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) if self.scaling_for_att > 1.0: attn_weights = attn_weights - attn_weights.detach().max(dim=-1, keepdim=True)[0] if before_softmax: return attn_weights, v attn_weights_float = softmax( attn_weights, dim=-1, onnx_trace=self.onnx_trace ) attn_weights = attn_weights_float.type_as(attn_weights) attn_probs = self.dropout_module(attn_weights) assert v is not None attn = torch.bmm(attn_probs, v) assert list(attn.size()) == [bsz * self.num_heads, tgt_len, self.head_dim] if self.onnx_trace and attn.size(1) == 1: # when ONNX tracing a single decoder step (sequence length == 1) # the transpose is a no-op copy before view, thus unnecessary attn = attn.contiguous().view(tgt_len, bsz, embed_dim) else: attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim) attn = self.out_proj(attn) attn_weights: Optional[Tensor] = None if need_weights: attn_weights = attn_weights_float.view( bsz, self.num_heads, tgt_len, src_len ).transpose(1, 0) if not need_head_weights: # average attention weights over heads attn_weights = attn_weights.mean(dim=0) return attn, attn_weights def _append_prev_key_padding_mask( key_padding_mask: Optional[Tensor], prev_key_padding_mask: Optional[Tensor], batch_size: int, src_len: int, static_kv: bool, ) -> Optional[Tensor]: # saved key padding masks have shape (bsz, seq_len) if prev_key_padding_mask is not None and static_kv: new_key_padding_mask = prev_key_padding_mask elif prev_key_padding_mask is not None and key_padding_mask is not None: new_key_padding_mask = torch.cat( [prev_key_padding_mask.float(), key_padding_mask.float()], dim=1 ) # During incremental decoding, as the padding token enters and # leaves the frame, there will be a time when prev or current # is None elif prev_key_padding_mask is not None: if src_len > prev_key_padding_mask.size(1): filler = torch.zeros( (batch_size, src_len - prev_key_padding_mask.size(1)), device=prev_key_padding_mask.device, ) new_key_padding_mask = torch.cat( [prev_key_padding_mask.float(), filler.float()], dim=1 ) else: new_key_padding_mask = prev_key_padding_mask.float() elif key_padding_mask is not None: if src_len > key_padding_mask.size(1): filler = torch.zeros( (batch_size, src_len - key_padding_mask.size(1)), device=key_padding_mask.device, ) new_key_padding_mask = torch.cat( [filler.float(), key_padding_mask.float()], dim=1 ) else: new_key_padding_mask = key_padding_mask.float() else: new_key_padding_mask = prev_key_padding_mask return new_key_padding_mask def reorder_incremental_state( self, incremental_state: Dict[str, Dict[str, Optional[Tensor]]], new_order: Tensor, ): """Reorder buffered internal state (for incremental generation).""" input_buffer = self._get_input_buffer(incremental_state) if input_buffer is not None: for k in input_buffer.keys(): input_buffer_k = input_buffer[k] if input_buffer_k is not None: if self.encoder_decoder_attention and input_buffer_k.size( 0 ) == new_order.size(0): break input_buffer[k] = input_buffer_k.index_select(0, new_order) incremental_state = self._set_input_buffer(incremental_state, input_buffer) return incremental_state def _get_input_buffer( self, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] ) -> Dict[str, Optional[Tensor]]: result = self.get_incremental_state(incremental_state, "attn_state") if result is not None: return result else: empty_result: Dict[str, Optional[Tensor]] = {} return empty_result def _set_input_buffer( self, incremental_state: Dict[str, Dict[str, Optional[Tensor]]], buffer: Dict[str, Optional[Tensor]], ): return self.set_incremental_state(incremental_state, "attn_state", buffer) def apply_sparse_mask(self, attn_weights, tgt_len: int, src_len: int, bsz: int): return attn_weights def upgrade_state_dict_named(self, state_dict, name): prefix = name + "." if name != "" else "" items_to_add = {} keys_to_remove = [] for k in state_dict.keys(): if k.endswith(prefix + "in_proj_weight"): # in_proj_weight used to be q + k + v with same dimensions dim = int(state_dict[k].shape[0] / 3) items_to_add[prefix + "q_proj.weight"] = state_dict[k][:dim] items_to_add[prefix + "k_proj.weight"] = state_dict[k][dim : 2 * dim] items_to_add[prefix + "v_proj.weight"] = state_dict[k][2 * dim :] keys_to_remove.append(k) k_bias = prefix + "in_proj_bias" if k_bias in state_dict.keys(): dim = int(state_dict[k].shape[0] / 3) items_to_add[prefix + "q_proj.bias"] = state_dict[k_bias][:dim] items_to_add[prefix + "k_proj.bias"] = state_dict[k_bias][ dim : 2 * dim ] items_to_add[prefix + "v_proj.bias"] = state_dict[k_bias][2 * dim :] keys_to_remove.append(prefix + "in_proj_bias") for k in keys_to_remove: del state_dict[k] for key, value in items_to_add.items(): state_dict[key] = value The provided code snippet includes necessary dependencies for implementing the `init_bert_params` function. Write a Python function `def init_bert_params(module)` to solve the following problem: Initialize the weights specific to the BERT Model. This overrides the default initializations depending on the specified arguments. 1. If normal_init_linear_weights is set then weights of linear layer will be initialized using the normal distribution and bais will be set to the specified value. 2. If normal_init_embed_weights is set then weights of embedding layer will be initialized using the normal distribution. 3. If normal_init_proj_weights is set then weights of in_project_weight for MultiHeadAttention initialized using the normal distribution (to be validated). Here is the function: def init_bert_params(module): """ Initialize the weights specific to the BERT Model. This overrides the default initializations depending on the specified arguments. 1. If normal_init_linear_weights is set then weights of linear layer will be initialized using the normal distribution and bais will be set to the specified value. 2. If normal_init_embed_weights is set then weights of embedding layer will be initialized using the normal distribution. 3. If normal_init_proj_weights is set then weights of in_project_weight for MultiHeadAttention initialized using the normal distribution (to be validated). """ def normal_(data): # with FSDP, module params will be on CUDA, so we cast them back to CPU # so that the RNG is consistent with and without FSDP data.copy_(data.cpu().normal_(mean=0.0, std=0.02).to(data.device)) if isinstance(module, nn.Linear): normal_(module.weight.data) if module.bias is not None: module.bias.data.zero_() if isinstance(module, nn.Embedding): normal_(module.weight.data) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() if isinstance(module, MultiheadAttention): normal_(module.q_proj.weight.data) normal_(module.k_proj.weight.data) normal_(module.v_proj.weight.data)
Initialize the weights specific to the BERT Model. This overrides the default initializations depending on the specified arguments. 1. If normal_init_linear_weights is set then weights of linear layer will be initialized using the normal distribution and bais will be set to the specified value. 2. If normal_init_embed_weights is set then weights of embedding layer will be initialized using the normal distribution. 3. If normal_init_proj_weights is set then weights of in_project_weight for MultiHeadAttention initialized using the normal distribution (to be validated).
184,711
import math import numpy as np import logging import torch import torch.nn as nn import torch.nn.functional as F from torch.nn import Parameter from torch import Tensor from typing import Any, Dict, List, Tuple, Callable, Optional def pad_to_multiple(x, multiple, dim=-1, value=0): # Inspired from https://github.com/lucidrains/local-attention/blob/master/local_attention/local_attention.py#L41 if x is None: return None, 0 tsz = x.size(dim) m = tsz / multiple remainder = math.ceil(m) * multiple - tsz if m.is_integer(): return x, 0 pad_offset = (0,) * (-1 - dim) * 2 return F.pad(x, (*pad_offset, 0, remainder), value=value), remainder
null
184,712
import math import numpy as np import logging import torch import torch.nn as nn import torch.nn.functional as F from torch.nn import Parameter from torch import Tensor from typing import Any, Dict, List, Tuple, Callable, Optional def is_xla_tensor(tensor): return torch.is_tensor(tensor) and tensor.device.type == "xla" def index_put(tensor, indices, value): if is_xla_tensor(tensor): for _ in range(indices.dim(), tensor.dim()): indices = indices.unsqueeze(-1) if indices.size(-1) < tensor.size(-1): indices = indices.expand_as(tensor) tensor = torch.mul(tensor, ~indices) + torch.mul(value, indices) else: tensor[indices] = value return tensor
null
184,713
import math import numpy as np import logging import torch import torch.nn as nn import torch.nn.functional as F from torch.nn import Parameter from torch import Tensor from typing import Any, Dict, List, Tuple, Callable, Optional class LearnedPositionalEmbedding(nn.Embedding): """ This module learns positional embeddings up to a fixed maximum size. Padding ids are ignored by either offsetting based on padding_idx or by setting padding_idx to None and ensuring that the appropriate position ids are passed to the forward function. """ def __init__(self, num_embeddings: int, embedding_dim: int, padding_idx: int): super().__init__(num_embeddings, embedding_dim, padding_idx) self.onnx_trace = False if self.padding_idx is not None: self.max_positions = self.num_embeddings - self.padding_idx - 1 else: self.max_positions = self.num_embeddings def forward( self, input: Tensor, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None, positions: Optional[Tensor] = None, ): """Input is expected to be of size [bsz x seqlen].""" assert (positions is None) or ( self.padding_idx is None ), "If positions is pre-computed then padding_idx should not be set." if positions is None: if incremental_state is not None: # positions is the same for every token when decoding a single step # Without the int() cast, it doesn't work in some cases when exporting to ONNX positions = torch.zeros( (1, 1), device=input.device, dtype=input.dtype ).fill_(int(self.padding_idx + input.size(1))) else: positions = utils_make_positions( input, self.padding_idx, onnx_trace=self.onnx_trace ) positions = torch.clamp(positions, max=self.padding_idx + self.max_positions) return F.embedding( positions, self.weight, self.padding_idx, self.max_norm, self.norm_type, self.scale_grad_by_freq, self.sparse, ) class SinusoidalPositionalEmbedding(nn.Module): """This module produces sinusoidal positional embeddings of any length. Padding symbols are ignored. """ def __init__(self, embedding_dim, padding_idx, init_size=1024): super().__init__() self.embedding_dim = embedding_dim self.padding_idx = padding_idx if padding_idx is not None else 0 self.weights = SinusoidalPositionalEmbedding.get_embedding( init_size, embedding_dim, padding_idx ) self.onnx_trace = False self.register_buffer("_float_tensor", torch.FloatTensor(1)) self.max_positions = int(1e5) def prepare_for_onnx_export_(self): self.onnx_trace = True def get_embedding( num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None ): """Build sinusoidal embeddings. This matches the implementation in tensor2tensor, but differs slightly from the description in Section 3.5 of "Attention Is All You Need". """ half_dim = embedding_dim // 2 emb = math.log(10000) / (half_dim - 1) emb = torch.exp(torch.arange(half_dim, dtype=torch.float) * -emb) emb = torch.arange(num_embeddings, dtype=torch.float).unsqueeze( 1 ) * emb.unsqueeze(0) emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view( num_embeddings, -1 ) if embedding_dim % 2 == 1: # zero pad emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1) if padding_idx is not None: emb[padding_idx, :] = 0 return emb def forward( self, input, incremental_state: Optional[Any] = None, timestep: Optional[Tensor] = None, positions: Optional[Any] = None, ): """Input is expected to be of size [bsz x seqlen].""" bspair = torch.onnx.operators.shape_as_tensor(input) bsz, seq_len = bspair[0], bspair[1] max_pos = self.padding_idx + 1 + seq_len if self.weights is None or max_pos > self.weights.size(0): # recompute/expand embeddings if needed self.weights = SinusoidalPositionalEmbedding.get_embedding( max_pos, self.embedding_dim, self.padding_idx ) self.weights = self.weights.to(self._float_tensor) if incremental_state is not None: # positions is the same for every token when decoding a single step pos = timestep.view(-1)[0] + 1 if timestep is not None else seq_len if self.onnx_trace: return ( self.weights.index_select(index=self.padding_idx + pos, dim=0) .unsqueeze(1) .repeat(bsz, 1, 1) ) return self.weights[self.padding_idx + pos, :].expand(bsz, 1, -1) positions = utils_make_positions( input, self.padding_idx, onnx_trace=self.onnx_trace ) if self.onnx_trace: flat_embeddings = self.weights.detach().index_select(0, positions.view(-1)) embedding_shape = torch.cat( (bsz.view(1), seq_len.view(1), torch.tensor([-1], dtype=torch.long)) ) embeddings = torch.onnx.operators.reshape_from_tensor_shape( flat_embeddings, embedding_shape ) return embeddings return ( self.weights.index_select(0, positions.view(-1)) .view(bsz, seq_len, -1) .detach() ) def PositionalEmbedding( num_embeddings: int, embedding_dim: int, padding_idx: int, learned: bool = False, ): if learned: # if padding_idx is specified then offset the embedding ids by # this index and adjust num_embeddings appropriately # TODO: The right place for this offset would be inside # LearnedPositionalEmbedding. Move this there for a cleaner implementation. if padding_idx is not None: num_embeddings = num_embeddings + padding_idx + 1 m = LearnedPositionalEmbedding(num_embeddings, embedding_dim, padding_idx) nn.init.normal_(m.weight, mean=0, std=embedding_dim**-0.5) if padding_idx is not None: nn.init.constant_(m.weight[padding_idx], 0) else: m = SinusoidalPositionalEmbedding( embedding_dim, padding_idx, init_size=num_embeddings + padding_idx + 1, ) return m
null
184,714
import math import numpy as np import logging import torch import torch.nn as nn import torch.nn.functional as F from torch.nn import Parameter from torch import Tensor from typing import Any, Dict, List, Tuple, Callable, Optional try: from apex.normalization import FusedLayerNorm as _FusedLayerNorm has_fused_layernorm = True class FusedLayerNorm(_FusedLayerNorm): def forward(self, x): if not x.is_cuda: return super().forward(x) else: with torch.cuda.device(x.device): return super().forward(x) except ImportError: has_fused_layernorm = False def LayerNorm(normalized_shape, eps=1e-5, elementwise_affine=True, export=False): if torch.jit.is_scripting() or torch.jit.is_tracing(): export = True if not export and torch.cuda.is_available() and has_fused_layernorm: return FusedLayerNorm(normalized_shape, eps, elementwise_affine) return torch.nn.LayerNorm(normalized_shape, eps, elementwise_affine)
null
184,715
import math from typing import Any, Dict, List, Optional import torch import torch.nn as nn from fairseq import utils from fairseq.distributed import fsdp_wrap from fairseq.models import FairseqIncrementalDecoder from fairseq.models.transformer import TransformerConfig from fairseq.modules import ( AdaptiveSoftmax, BaseLayer, FairseqDropout, LayerDropModuleList, LayerNorm, PositionalEmbedding, SinusoidalPositionalEmbedding, ) from fairseq.modules.checkpoint_activations import checkpoint_wrapper from fairseq.modules.quant_noise import quant_noise as apply_quant_noise_ from torch import Tensor from speechlm.modules import transformer_layer from speechlm.modules.relative_pos_enc import RelativePositionalEncoding def module_name_fordropout(module_name: str) -> str: if module_name == "TransformerDecoderBase": return "TransformerDecoder" else: return module_name
null
184,716
import math from typing import Any, Dict, List, Optional import torch import torch.nn as nn from fairseq import utils from fairseq.distributed import fsdp_wrap from fairseq.models import FairseqIncrementalDecoder from fairseq.models.transformer import TransformerConfig from fairseq.modules import ( AdaptiveSoftmax, BaseLayer, FairseqDropout, LayerDropModuleList, LayerNorm, PositionalEmbedding, SinusoidalPositionalEmbedding, ) from fairseq.modules.checkpoint_activations import checkpoint_wrapper from fairseq.modules.quant_noise import quant_noise as apply_quant_noise_ from torch import Tensor from speechlm.modules import transformer_layer from speechlm.modules.relative_pos_enc import RelativePositionalEncoding def Linear(in_features, out_features, bias=True): m = nn.Linear(in_features, out_features, bias) nn.init.xavier_uniform_(m.weight) if bias: nn.init.constant_(m.bias, 0.0) return m
null
184,717
import math from typing import Dict, List, Optional import torch import torch.nn as nn import torch.nn.functional as F from fairseq import utils from fairseq.distributed import fsdp_wrap from fairseq.models import FairseqEncoder from fairseq.modules import ( FairseqDropout, LayerDropModuleList, LayerNorm, SinusoidalPositionalEmbedding, ) from fairseq.modules.checkpoint_activations import checkpoint_wrapper from fairseq.modules.quant_noise import quant_noise as apply_quant_noise_ from torch import Tensor from fairseq.models.transformer import ( TransformerConfig, ) from speechlm.modules import transformer_layer, LearnedPositionalEmbedding from speechlm.modules.relative_pos_enc import RelativePositionalEncoding def module_name_fordropout(module_name: str) -> str: if module_name == "TransformerEncoderBase": return "TransformerEncoder" else: return module_name
null
184,718
import math from typing import Dict, List, Optional import torch import torch.nn as nn import torch.nn.functional as F from fairseq import utils from fairseq.distributed import fsdp_wrap from fairseq.models import FairseqEncoder from fairseq.modules import ( FairseqDropout, LayerDropModuleList, LayerNorm, SinusoidalPositionalEmbedding, ) from fairseq.modules.checkpoint_activations import checkpoint_wrapper from fairseq.modules.quant_noise import quant_noise as apply_quant_noise_ from torch import Tensor from fairseq.models.transformer import ( TransformerConfig, ) from speechlm.modules import transformer_layer, LearnedPositionalEmbedding from speechlm.modules.relative_pos_enc import RelativePositionalEncoding class LearnedPositionalEmbedding(nn.Embedding): """ This module learns positional embeddings up to a fixed maximum size. Padding ids are ignored by either offsetting based on padding_idx or by setting padding_idx to None and ensuring that the appropriate position ids are passed to the forward function. """ def __init__(self, num_embeddings: int, embedding_dim: int, padding_idx: int): super().__init__(num_embeddings, embedding_dim, padding_idx) self.onnx_trace = False if self.padding_idx is not None: self.max_positions = self.num_embeddings - self.padding_idx - 1 else: self.max_positions = self.num_embeddings def forward( self, input: Tensor, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None, positions: Optional[Tensor] = None, ): """Input is expected to be of size [bsz x seqlen].""" assert (positions is None) or ( self.padding_idx is None ), "If positions is pre-computed then padding_idx should not be set." if positions is None: if incremental_state is not None: # positions is the same for every token when decoding a single step # Without the int() cast, it doesn't work in some cases when exporting to ONNX positions = torch.zeros( (1, 1), device=input.device, dtype=input.dtype ).fill_(int(self.padding_idx + input.size(1))) else: positions = utils_make_positions( input, self.padding_idx, onnx_trace=self.onnx_trace ) positions = torch.clamp(positions, max=self.padding_idx + self.max_positions) return F.embedding( positions, self.weight, self.padding_idx, self.max_norm, self.norm_type, self.scale_grad_by_freq, self.sparse, ) def PositionalEmbedding( num_embeddings: int, embedding_dim: int, padding_idx: int, learned: bool = False, ): if learned: # if padding_idx is specified then offset the embedding ids by # this index and adjust num_embeddings appropriately # TODO: The right place for this offset would be inside # LearnedPositionalEmbedding. Move this there for a cleaner implementation. if padding_idx is not None: num_embeddings = num_embeddings + padding_idx + 1 m = LearnedPositionalEmbedding(num_embeddings, embedding_dim, padding_idx) nn.init.normal_(m.weight, mean=0, std=embedding_dim**-0.5) if padding_idx is not None: nn.init.constant_(m.weight[padding_idx], 0) else: m = SinusoidalPositionalEmbedding( embedding_dim, padding_idx, init_size=num_embeddings + padding_idx + 1, ) return m
null
184,719
import logging import numpy as np import torch import os import itertools from fairseq.data import FairseqDataset, data_utils from fairseq.data import ( AppendTokenDataset, ConcatDataset, PrependTokenDataset, data_utils, indexed_dataset, ) logger = logging.getLogger(__name__) class LanguageTripleDataset(FairseqDataset): """ A pair of torch.utils.data.Datasets. Args: src (torch.utils.data.Dataset): source dataset to wrap src_sizes (List[int]): source sentence lengths src_dict (~fairseq.data.Dictionary): source vocabulary tgt (torch.utils.data.Dataset, optional): target dataset to wrap tgt_sizes (List[int], optional): target sentence lengths tgt_dict (~fairseq.data.Dictionary, optional): target vocabulary left_pad_source (bool, optional): pad source tensors on the left side (default: True). left_pad_target (bool, optional): pad target tensors on the left side (default: False). shuffle (bool, optional): shuffle dataset elements before batching (default: True). input_feeding (bool, optional): create a shifted version of the targets to be passed into the model for teacher forcing (default: True). remove_eos_from_source (bool, optional): if set, removes eos from end of source if it's present (default: False). append_eos_to_target (bool, optional): if set, appends eos to end of target if it's absent (default: False). align_dataset (torch.utils.data.Dataset, optional): dataset containing alignments. constraints (Tensor, optional): 2d tensor with a concatenated, zero- delimited list of constraints for each sentence. append_bos (bool, optional): if set, appends bos to the beginning of source/target sentence. num_buckets (int, optional): if set to a value greater than 0, then batches will be bucketed into the given number of batch shapes. src_lang_id (int, optional): source language ID, if set, the collated batch will contain a field 'src_lang_id' in 'net_input' which indicates the source language of the samples. tgt_lang_id (int, optional): target language ID, if set, the collated batch will contain a field 'tgt_lang_id' which indicates the target language of the samples. """ def __init__( self, src, src_sizes, src_dict, ref, ref_sizes, ref_dict, tgt=None, tgt_sizes=None, tgt_dict=None, left_pad_source=True, left_pad_target=False, shuffle=True, input_feeding=True, remove_eos_from_source=False, append_eos_to_target=False, align_dataset=None, constraints=None, append_bos=False, eos=None, num_buckets=0, src_lang_id=None, tgt_lang_id=None, pad_to_multiple=1, ): if tgt_dict is not None: assert src_dict.pad() == tgt_dict.pad() assert src_dict.eos() == tgt_dict.eos() assert src_dict.unk() == tgt_dict.unk() if tgt is not None: assert len(src) == len( tgt ), "Source and target must contain the same number of examples" assert len(src) == len( ref ), "Source and reference must contain the same number of examples" self.src = src self.ref = ref self.tgt = tgt self.src_sizes = np.array(src_sizes) self.ref_sizes = np.array(ref_sizes) self.tgt_sizes = np.array(tgt_sizes) if tgt_sizes is not None else None self.sizes = ( np.vstack((self.src_sizes, self.tgt_sizes)).T if self.tgt_sizes is not None else self.src_sizes ) self.src_dict = src_dict self.ref_dict = ref_dict self.tgt_dict = tgt_dict self.left_pad_source = left_pad_source self.left_pad_target = left_pad_target self.shuffle = shuffle self.input_feeding = input_feeding self.remove_eos_from_source = remove_eos_from_source self.append_eos_to_target = append_eos_to_target self.align_dataset = align_dataset if self.align_dataset is not None: assert ( self.tgt_sizes is not None ), "Both source and target needed when alignments are provided" self.constraints = constraints self.append_bos = append_bos self.eos = eos if eos is not None else src_dict.eos() self.src_lang_id = src_lang_id self.tgt_lang_id = tgt_lang_id if num_buckets > 0: from fairseq.data import BucketPadLengthDataset self.src = BucketPadLengthDataset( self.src, sizes=self.src_sizes, num_buckets=num_buckets, pad_idx=self.src_dict.pad(), left_pad=self.left_pad_source, ) self.src_sizes = self.src.sizes logger.info("bucketing source lengths: {}".format(list(self.src.buckets))) self.ref = BucketPadLengthDataset( self.ref, sizes=self.ref_sizes, num_buckets=num_buckets, pad_idx=self.ref_dict.pad(), left_pad=self.left_pad_source, ) self.ref_sizes = self.ref.sizes logger.info("bucketing reference lengths: {}".format(list(self.src.buckets))) if self.tgt is not None: self.tgt = BucketPadLengthDataset( self.tgt, sizes=self.tgt_sizes, num_buckets=num_buckets, pad_idx=self.tgt_dict.pad(), left_pad=self.left_pad_target, ) self.tgt_sizes = self.tgt.sizes logger.info( "bucketing target lengths: {}".format(list(self.tgt.buckets)) ) # determine bucket sizes using self.num_tokens, which will return # the padded lengths (thanks to BucketPadLengthDataset) num_tokens = np.vectorize(self.num_tokens, otypes=[np.compat.long]) self.bucketed_num_tokens = num_tokens(np.arange(len(self.src))) self.buckets = [ (None, num_tokens) for num_tokens in np.unique(self.bucketed_num_tokens) ] else: self.buckets = None self.pad_to_multiple = pad_to_multiple def get_batch_shapes(self): return self.buckets def __getitem__(self, index): tgt_item = self.tgt[index] if self.tgt is not None else None src_item = self.src[index] ref_item = self.ref[index] # Append EOS to end of tgt sentence if it does not have an EOS and remove # EOS from end of src sentence if it exists. This is useful when we use # use existing datasets for opposite directions i.e., when we want to # use tgt_dataset as src_dataset and vice versa if self.append_eos_to_target: eos = self.tgt_dict.eos() if self.tgt_dict else self.src_dict.eos() if self.tgt and self.tgt[index][-1] != eos: tgt_item = torch.cat([self.tgt[index], torch.LongTensor([eos])]) if self.append_bos: bos = self.tgt_dict.bos() if self.tgt_dict else self.src_dict.bos() if self.tgt and self.tgt[index][0] != bos: tgt_item = torch.cat([torch.LongTensor([bos]), self.tgt[index]]) bos = self.src_dict.bos() if self.src[index][0] != bos: src_item = torch.cat([torch.LongTensor([bos]), self.src[index]]) if self.ref[index][0] != bos: ref_item = torch.cat([torch.LongTensor([bos]), self.ref[index]]) if self.remove_eos_from_source: eos = self.src_dict.eos() if self.src[index][-1] == eos: src_item = self.src[index][:-1] if self.ref[index][-1] == eos: ref_item = self.ref[index][:-1] example = { "id": index, "source": src_item, "reference": ref_item, "target": tgt_item, } if self.align_dataset is not None: example["alignment"] = self.align_dataset[index] if self.constraints is not None: example["constraints"] = self.constraints[index] return example def __len__(self): return len(self.src) def collater(self, samples, pad_to_length=None): """Merge a list of samples to form a mini-batch. Args: samples (List[dict]): samples to collate pad_to_length (dict, optional): a dictionary of {'source': source_pad_to_length, 'target': target_pad_to_length} to indicate the max length to pad to in source and target respectively. Returns: dict: a mini-batch with the following keys: - `id` (LongTensor): example IDs in the original input order - `ntokens` (int): total number of tokens in the batch - `net_input` (dict): the input to the Model, containing keys: - `src_tokens` (LongTensor): a padded 2D Tensor of tokens in the source sentence of shape `(bsz, src_len)`. Padding will appear on the left if *left_pad_source* is ``True``. - `src_lengths` (LongTensor): 1D Tensor of the unpadded lengths of each source sentence of shape `(bsz)` - `prev_output_tokens` (LongTensor): a padded 2D Tensor of tokens in the target sentence, shifted right by one position for teacher forcing, of shape `(bsz, tgt_len)`. This key will not be present if *input_feeding* is ``False``. Padding will appear on the left if *left_pad_target* is ``True``. - `src_lang_id` (LongTensor): a long Tensor which contains source language IDs of each sample in the batch - `target` (LongTensor): a padded 2D Tensor of tokens in the target sentence of shape `(bsz, tgt_len)`. Padding will appear on the left if *left_pad_target* is ``True``. - `tgt_lang_id` (LongTensor): a long Tensor which contains target language IDs of each sample in the batch """ res = collate( samples, pad_idx=self.src_dict.pad(), eos_idx=self.eos, left_pad_source=self.left_pad_source, left_pad_target=self.left_pad_target, input_feeding=self.input_feeding, pad_to_length=pad_to_length, pad_to_multiple=self.pad_to_multiple, ) if self.src_lang_id is not None or self.tgt_lang_id is not None: src_tokens = res["net_input"]["src_tokens"] bsz = src_tokens.size(0) if self.src_lang_id is not None: res["net_input"]["src_lang_id"] = ( torch.LongTensor([[self.src_lang_id]]).expand(bsz, 1).to(src_tokens) ) if self.tgt_lang_id is not None: res["tgt_lang_id"] = ( torch.LongTensor([[self.tgt_lang_id]]).expand(bsz, 1).to(src_tokens) ) return res def num_tokens(self, index): """Return the number of tokens in a sample. This value is used to enforce ``--max-tokens`` during batching.""" return max( self.src_sizes[index], self.tgt_sizes[index] if self.tgt_sizes is not None else 0, ) def num_tokens_vec(self, indices): """Return the number of tokens for a set of positions defined by indices. This value is used to enforce ``--max-tokens`` during batching.""" sizes = self.src_sizes[indices] if self.tgt_sizes is not None: sizes = np.maximum(sizes, self.tgt_sizes[indices]) return sizes def size(self, index): """Return an example's size as a float or tuple. This value is used when filtering a dataset with ``--max-positions``.""" return ( self.src_sizes[index], self.tgt_sizes[index] if self.tgt_sizes is not None else 0, ) def ordered_indices(self): """Return an ordered list of indices. Batches will be constructed based on this order.""" if self.shuffle: indices = np.random.permutation(len(self)).astype(np.int64) else: indices = np.arange(len(self), dtype=np.int64) if self.buckets is None: # sort by target length, then source length if self.tgt_sizes is not None: indices = indices[np.argsort(self.tgt_sizes[indices], kind="mergesort")] return indices[np.argsort(self.src_sizes[indices], kind="mergesort")] else: # sort by bucketed_num_tokens, which is: # max(padded_src_len, padded_tgt_len) return indices[ np.argsort(self.bucketed_num_tokens[indices], kind="mergesort") ] def supports_prefetch(self): return getattr(self.src, "supports_prefetch", False) and ( getattr(self.tgt, "supports_prefetch", False) or self.tgt is None ) def prefetch(self, indices): self.src.prefetch(indices) if self.tgt is not None: self.tgt.prefetch(indices) if self.align_dataset is not None: self.align_dataset.prefetch(indices) def filter_indices_by_size(self, indices, max_sizes): """Filter a list of sample indices. Remove those that are longer than specified in max_sizes. Args: indices (np.array): original array of sample indices max_sizes (int or list[int] or tuple[int]): max sample size, can be defined separately for src and tgt (then list or tuple) Returns: np.array: filtered sample array list: list of removed indices """ return data_utils.filter_paired_dataset_indices_by_size( self.src_sizes, self.tgt_sizes, indices, max_sizes, ) def load_langtriple_dataset( data_path, split, src, src_dict, ref, ref_dict, tgt, tgt_dict, combine, dataset_impl, upsample_primary, left_pad_source, left_pad_target, max_source_positions, max_target_positions, prepend_bos=False, load_alignments=False, truncate_source=False, append_source_id=False, num_buckets=0, shuffle=True, pad_to_multiple=1, prepend_bos_src=None, lang_format="[{}]", ): assert not truncate_source def split_exists(split, src, ref, tgt, lang, data_path): filename = os.path.join(data_path, "{}.{}-{}-{}.{}".format(split, src, ref, tgt, lang)) return indexed_dataset.dataset_exists(filename, impl=dataset_impl) src_datasets = [] ref_datasets = [] tgt_datasets = [] for k in itertools.count(): split_k = split + (str(k) if k > 0 else "") # infer langcode if split_exists(split_k, src, ref, tgt, src, data_path): prefix = os.path.join(data_path, "{}.{}-{}-{}.".format(split_k, src, ref, tgt)) elif split_exists(split_k, tgt, ref, src, src, data_path): prefix = os.path.join(data_path, "{}.{}-{}-{}.".format(split_k, tgt, ref, src)) else: if k > 0: break else: raise FileNotFoundError( "Dataset not found: {} ({})".format(split, data_path) ) src_dataset = data_utils.load_indexed_dataset( prefix + src, src_dict, dataset_impl ) src_datasets.append(src_dataset) ref_dataset = data_utils.load_indexed_dataset( prefix + ref, ref_dict, dataset_impl ) ref_datasets.append(ref_dataset) tgt_dataset = data_utils.load_indexed_dataset( prefix + tgt, tgt_dict, dataset_impl ) if tgt_dataset is not None: tgt_datasets.append(tgt_dataset) logger.info( "{} {} {}-{}-{} {} examples".format( data_path, split_k, src, ref, tgt, len(src_datasets[-1]) ) ) if not combine: break assert len(src_datasets) == len(ref_datasets) assert len(src_datasets) == len(tgt_datasets) or len(tgt_datasets) == 0 if len(src_datasets) == 1: src_dataset = src_datasets[0] ref_dataset = ref_datasets[0] tgt_dataset = tgt_datasets[0] if len(tgt_datasets) > 0 else None else: sample_ratios = [1] * len(src_datasets) sample_ratios[0] = upsample_primary src_dataset = ConcatDataset(src_datasets, sample_ratios) ref_dataset = ConcatDataset(ref_datasets, sample_ratios) if len(tgt_datasets) > 0: tgt_dataset = ConcatDataset(tgt_datasets, sample_ratios) else: tgt_dataset = None if prepend_bos: assert hasattr(src_dict, "bos_index") and hasattr(ref_dict, "bos_index") and hasattr(tgt_dict, "bos_index") src_dataset = PrependTokenDataset(src_dataset, src_dict.bos()) ref_dataset = PrependTokenDataset(ref_dataset, ref_dict.bos()) if tgt_dataset is not None: tgt_dataset = PrependTokenDataset(tgt_dataset, tgt_dict.bos()) elif prepend_bos_src is not None: logger.info(f"prepending src bos: {prepend_bos_src}") src_dataset = PrependTokenDataset(src_dataset, prepend_bos_src) ref_dataset = PrependTokenDataset(ref_dataset, prepend_bos_src) eos = None if append_source_id: src_dataset = AppendTokenDataset( src_dataset, src_dict.index(lang_format.format(src)) ) ref_dataset = AppendTokenDataset( ref_dataset, ref_dict.index(lang_format.format(ref)) ) if tgt_dataset is not None: tgt_dataset = AppendTokenDataset( tgt_dataset, tgt_dict.index(lang_format.format(tgt)) ) eos = tgt_dict.index(lang_format.format(tgt)) align_dataset = None if load_alignments: align_path = os.path.join(data_path, "{}.align.{}-{}".format(split, src, tgt)) if indexed_dataset.dataset_exists(align_path, impl=dataset_impl): align_dataset = data_utils.load_indexed_dataset( align_path, None, dataset_impl ) tgt_dataset_sizes = tgt_dataset.sizes if tgt_dataset is not None else None return LanguageTripleDataset( src_dataset, src_dataset.sizes, src_dict, ref_dataset, ref_dataset.sizes, ref_dict, tgt_dataset, tgt_dataset_sizes, tgt_dict, left_pad_source=left_pad_source, left_pad_target=left_pad_target, align_dataset=align_dataset, eos=eos, num_buckets=num_buckets, shuffle=shuffle, pad_to_multiple=pad_to_multiple, )
null