id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
19,368 | import os, json
import random as rd
from copy import deepcopy
import difflib
import re
import torch
import torchvision.datasets as dsets
import torch.nn as nn
import torch.nn.functional as F
from matplotlib.pylab import *
from torch.autograd import Variable
from .utils import generate_perm_inv
from .utils import json_default_type_checker
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def gen_l_hpu(i_hds):
"""
# Treat columns as if it is a batch of natural language utterance with batch-size = # of columns * # of batch_size
i_hds = [(17, 18), (19, 21), (22, 23), (24, 25), (26, 29), (30, 34)])
"""
l_hpu = []
for i_hds1 in i_hds:
for i_hds11 in i_hds1:
l_hpu.append(i_hds11[1] - i_hds11[0])
return l_hpu
def generate_inputs_s2s(tokenizer, nlu1_tt, hds1, sql_vocab1):
"""
[CLS] sql_vocab [SEP] question [SEP] headers
To make sql_vocab in a fixed position.
"""
tokens = []
segment_ids = []
tokens.append("[CLS]")
# sql_vocab
i_sql_vocab = []
# for doc
for i, sql_vocab11 in enumerate(sql_vocab1):
i_st_sql = len(tokens)
sub_tok = tokenizer.tokenize(sql_vocab11)
tokens += sub_tok
i_ed_sql = len(tokens)
i_sql_vocab.append((i_st_sql, i_ed_sql))
segment_ids += [1] * len(sub_tok)
if i < len(sql_vocab1) - 1:
tokens.append("[SEP]")
segment_ids.append(0)
elif i == len(sql_vocab1) - 1:
tokens.append("[SEP]")
segment_ids.append(1)
else:
raise EnvironmentError
# question
i_st_nlu = len(tokens) # to use it later
segment_ids.append(0)
for token in nlu1_tt:
tokens.append(token)
segment_ids.append(0)
i_ed_nlu = len(tokens)
tokens.append("[SEP]")
segment_ids.append(0)
i_nlu = (i_st_nlu, i_ed_nlu)
# headers
i_hds = []
# for doc
for i, hds11 in enumerate(hds1):
i_st_hd = len(tokens)
sub_tok = tokenizer.tokenize(hds11)
tokens += sub_tok
i_ed_hd = len(tokens)
i_hds.append((i_st_hd, i_ed_hd))
segment_ids += [1] * len(sub_tok)
if i < len(hds1) - 1:
tokens.append("[SEP]")
segment_ids.append(0)
elif i == len(hds1) - 1:
tokens.append("[SEP]")
segment_ids.append(1)
else:
raise EnvironmentError
return tokens, segment_ids, i_sql_vocab, i_nlu, i_hds
The provided code snippet includes necessary dependencies for implementing the `get_bert_output_s2s` function. Write a Python function `def get_bert_output_s2s(model_bert, tokenizer, nlu_t, hds, sql_vocab, max_seq_length)` to solve the following problem:
s2s version. Treat SQL-tokens as pseudo-headers sql_vocab = ("sql select", "sql where", "sql and", "sql equal", "sql greater than", "sql less than") e.g.) Q: What is the name of the player with score greater than 15? H: Name of the player, score Input: [CLS], what, is, ..., [SEP], name, of, the, player, [SEP], score, [SEP] sql, select, [SEP], sql, where, [SEP], sql, and, [SEP], ... Here, input is tokenized further by WordPiece (WP) tokenizer and fed into BERT. INPUT :param model_bert: :param tokenizer: WordPiece toknizer :param nlu: Question :param nlu_t: CoreNLP tokenized nlu. :param hds: Headers :param hs_t: None or 1st-level tokenized headers :param max_seq_length: max input token length OUTPUT tokens: BERT input tokens nlu_tt: WP-tokenized input natural language questions orig_to_tok_index: map the index of 1st-level-token to the index of 2nd-level-token tok_to_orig_index: inverse map.
Here is the function:
def get_bert_output_s2s(model_bert, tokenizer, nlu_t, hds, sql_vocab, max_seq_length):
"""
s2s version. Treat SQL-tokens as pseudo-headers
sql_vocab = ("sql select", "sql where", "sql and", "sql equal", "sql greater than", "sql less than")
e.g.)
Q: What is the name of the player with score greater than 15?
H: Name of the player, score
Input: [CLS], what, is, ...,
[SEP], name, of, the, player, [SEP], score,
[SEP] sql, select, [SEP], sql, where, [SEP], sql, and, [SEP], ...
Here, input is tokenized further by WordPiece (WP) tokenizer and fed into BERT.
INPUT
:param model_bert:
:param tokenizer: WordPiece toknizer
:param nlu: Question
:param nlu_t: CoreNLP tokenized nlu.
:param hds: Headers
:param hs_t: None or 1st-level tokenized headers
:param max_seq_length: max input token length
OUTPUT
tokens: BERT input tokens
nlu_tt: WP-tokenized input natural language questions
orig_to_tok_index: map the index of 1st-level-token to the index of 2nd-level-token
tok_to_orig_index: inverse map.
"""
l_n = []
l_hs = [] # The length of columns for each batch
l_input = []
input_ids = []
tokens = []
segment_ids = []
input_mask = []
i_nlu = [] # index to retreive the position of contextual vector later.
i_hds = []
i_sql_vocab = []
doc_tokens = []
nlu_tt = []
t_to_tt_idx = []
tt_to_t_idx = []
for b, nlu_t1 in enumerate(nlu_t):
hds1 = hds[b]
l_hs.append(len(hds1))
# 1. 2nd tokenization using WordPiece
tt_to_t_idx1 = [] # number indicates where sub-token belongs to in 1st-level-tokens (here, CoreNLP).
t_to_tt_idx1 = [] # orig_to_tok_idx[i] = start index of i-th-1st-level-token in all_tokens.
nlu_tt1 = [] # all_doc_tokens[ orig_to_tok_idx[i] ] returns first sub-token segement of i-th-1st-level-token
for (i, token) in enumerate(nlu_t1):
t_to_tt_idx1.append(
len(nlu_tt1)) # all_doc_tokens[ indicate the start position of original 'white-space' tokens.
sub_tokens = tokenizer.tokenize(token)
for sub_token in sub_tokens:
tt_to_t_idx1.append(i)
nlu_tt1.append(sub_token) # all_doc_tokens are further tokenized using WordPiece tokenizer
nlu_tt.append(nlu_tt1)
tt_to_t_idx.append(tt_to_t_idx1)
t_to_tt_idx.append(t_to_tt_idx1)
l_n.append(len(nlu_tt1))
# hds1_all_tok = tokenize_hds1(tokenizer, hds1)
# [CLS] nlu [SEP] col1 [SEP] col2 [SEP] ...col-n [SEP]
# 2. Generate BERT inputs & indices.
# Combine hds1 and sql_vocab
tokens1, segment_ids1, i_sql_vocab1, i_nlu1, i_hds1 = generate_inputs_s2s(tokenizer, nlu_tt1, hds1, sql_vocab)
# i_hds1
input_ids1 = tokenizer.convert_tokens_to_ids(tokens1)
# Input masks
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask1 = [1] * len(input_ids1)
# 3. Zero-pad up to the sequence length.
l_input.append(len(input_ids1))
while len(input_ids1) < max_seq_length:
input_ids1.append(0)
input_mask1.append(0)
segment_ids1.append(0)
assert len(input_ids1) == max_seq_length
assert len(input_mask1) == max_seq_length
assert len(segment_ids1) == max_seq_length
input_ids.append(input_ids1)
tokens.append(tokens1)
segment_ids.append(segment_ids1)
input_mask.append(input_mask1)
i_nlu.append(i_nlu1)
i_hds.append(i_hds1)
i_sql_vocab.append(i_sql_vocab1)
# Convert to tensor
all_input_ids = torch.tensor(input_ids, dtype=torch.long).to(device)
all_input_mask = torch.tensor(input_mask, dtype=torch.long).to(device)
all_segment_ids = torch.tensor(segment_ids, dtype=torch.long).to(device)
# 4. Generate BERT output.
all_encoder_layer, pooled_output = model_bert(all_input_ids, all_segment_ids, all_input_mask)
# 5. generate l_hpu from i_hds
l_hpu = gen_l_hpu(i_hds)
return all_encoder_layer, pooled_output, tokens, i_nlu, i_hds, i_sql_vocab, \
l_n, l_hpu, l_hs, l_input, \
nlu_tt, t_to_tt_idx, tt_to_t_idx | s2s version. Treat SQL-tokens as pseudo-headers sql_vocab = ("sql select", "sql where", "sql and", "sql equal", "sql greater than", "sql less than") e.g.) Q: What is the name of the player with score greater than 15? H: Name of the player, score Input: [CLS], what, is, ..., [SEP], name, of, the, player, [SEP], score, [SEP] sql, select, [SEP], sql, where, [SEP], sql, and, [SEP], ... Here, input is tokenized further by WordPiece (WP) tokenizer and fed into BERT. INPUT :param model_bert: :param tokenizer: WordPiece toknizer :param nlu: Question :param nlu_t: CoreNLP tokenized nlu. :param hds: Headers :param hs_t: None or 1st-level tokenized headers :param max_seq_length: max input token length OUTPUT tokens: BERT input tokens nlu_tt: WP-tokenized input natural language questions orig_to_tok_index: map the index of 1st-level-token to the index of 2nd-level-token tok_to_orig_index: inverse map. |
19,369 | import os, json
import random as rd
from copy import deepcopy
import difflib
import re
import torch
import torchvision.datasets as dsets
import torch.nn as nn
import torch.nn.functional as F
from matplotlib.pylab import *
from torch.autograd import Variable
from .utils import generate_perm_inv
from .utils import json_default_type_checker
def get_bert_output(model_bert, tokenizer, nlu_t, hds, max_seq_length):
"""
Here, input is toknized further by WordPiece (WP) tokenizer and fed into BERT.
INPUT
:param model_bert:
:param tokenizer: WordPiece toknizer
:param nlu: Question
:param nlu_t: CoreNLP tokenized nlu.
:param hds: Headers
:param hs_t: None or 1st-level tokenized headers
:param max_seq_length: max input token length
OUTPUT
tokens: BERT input tokens
nlu_tt: WP-tokenized input natural language questions
orig_to_tok_index: map the index of 1st-level-token to the index of 2nd-level-token
tok_to_orig_index: inverse map.
"""
l_n = []
l_hs = [] # The length of columns for each batch
input_ids = []
tokens = []
segment_ids = []
input_mask = []
i_nlu = [] # index to retreive the position of contextual vector later.
i_hds = []
doc_tokens = []
nlu_tt = []
t_to_tt_idx = []
tt_to_t_idx = []
# print('nlu_t', nlu_t)
for b, nlu_t1 in enumerate(nlu_t):
hds1 = deepcopy(hds[b])
hds1.append("空列")
l_hs.append(len(hds1))
# print('hds1:', hds1)
# print('nlu t1:', len(nlu_t1), nlu_t1)
# 1. 2nd tokenization using WordPiece
# tt_to_t_idx1 = [] # number indicates where sub-token belongs to in 1st-level-tokens (here, CoreNLP).
# t_to_tt_idx1 = [] # orig_to_tok_idx[i] = start index of i-th-1st-level-token in all_tokens.
# nlu_tt1 = [] # all_doc_tokens[ orig_to_tok_idx[i] ] returns first sub-token segement of i-th-1st-level-token
# for (i, token) in enumerate(nlu_t1):
# t_to_tt_idx1.append(
# len(nlu_tt1)) # all_doc_tokens[ indicate the start position of original 'white-space' tokens.
# sub_tokens = tokenizer.tokenize(token)
# for sub_token in sub_tokens:
# tt_to_t_idx1.append(i)
# nlu_tt1.append(sub_token) # all_doc_tokens are further tokenized using WordPiece tokenizer
# nlu_tt.append(nlu_tt1)
# tt_to_t_idx.append(tt_to_t_idx1)
# t_to_tt_idx.append(t_to_tt_idx1)
# print('nlu ti:', len(nlu_t1), nlu_t1)
l_n.append(len(nlu_t1))
# hds1_all_tok = tokenize_hds1(tokenizer, hds1)
# [CLS] nlu [SEP] col1 [SEP] col2 [SEP] ...col-n [SEP]
# 2. Generate BERT inputs & indices.
tokens1, segment_ids1, i_nlu1, i_hds1 = generate_inputs(tokenizer, nlu_t1, hds1)
input_ids1 = tokenizer.convert_tokens_to_ids(tokens1)
# print('tokens1:', tokens1)
# print('input ids:', input_ids1)
# print('segmentids:', segment_ids1)
# Input masks
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask1 = [1] * len(input_ids1)
# 3. Zero-pad up to the sequence length.
while len(input_ids1) < max_seq_length:
input_ids1.append(0)
input_mask1.append(0)
segment_ids1.append(0)
if len(input_ids1)!=max_seq_length:
print("Error: ", nlu_t1, tokens1, len(input_ids1), max_seq_length)
assert len(input_ids1) == max_seq_length
assert len(input_mask1) == max_seq_length
assert len(segment_ids1) == max_seq_length
input_ids.append(input_ids1)
tokens.append(tokens1)
segment_ids.append(segment_ids1)
input_mask.append(input_mask1)
i_nlu.append(i_nlu1)
i_hds.append(i_hds1)
# Convert to tensor
all_input_ids = torch.tensor(input_ids, dtype=torch.long).to(device)
all_input_mask = torch.tensor(input_mask, dtype=torch.long).to(device)
all_segment_ids = torch.tensor(segment_ids, dtype=torch.long).to(device)
# 4. Generate BERT output.
# print('all input:', all_input_ids)
all_encoder_layer, pooled_output = model_bert(all_input_ids, all_segment_ids)
# 5. generate l_hpu from i_hds
# print('all', all_encoder_layer.shape)
# print(' poold', pooled_output.shape)
# print('inlu:', i_nlu)
# print('ihds:', i_hds)
l_hpu = gen_l_hpu(i_hds)
# print('l hpu:', l_hpu)
return all_encoder_layer, pooled_output, tokens, i_nlu, i_hds, \
l_n, l_hpu, l_hs
def get_wemb_n(i_nlu, l_n, hS, num_hidden_layers, all_encoder_layer, num_out_layers_n):
"""
Get the representation of each tokens.
"""
# print('\n\nget emb nlu:')
# print('i nlu:', i_nlu)
# print('l n:', l_n)
# print('all encoder layer:', all_encoder_layer.shape)
bS = len(l_n)
l_n_max = max(l_n)
wemb_n = torch.zeros([bS, l_n_max, hS * num_out_layers_n]).to(device)
for b in range(bS):
# [B, max_len, dim]
# Fill zero for non-exist part.
l_n1 = l_n[b]
i_nlu1 = i_nlu[b]
for i_noln in range(num_out_layers_n):
i_layer = num_hidden_layers - 1 - i_noln
st = i_noln * hS
ed = (i_noln + 1) * hS
wemb_n[b, 0:(i_nlu1[1] - i_nlu1[0]), st:ed] = all_encoder_layer[i_layer][b, i_nlu1[0]:i_nlu1[1], :]
return wemb_n
#
def get_wemb_h(i_hds, l_hpu, l_hs, hS, num_hidden_layers, all_encoder_layer, num_out_layers_h):
"""
As if
[ [table-1-col-1-tok1, t1-c1-t2, ...],
[t1-c2-t1, t1-c2-t2, ...].
...
[t2-c1-t1, ...,]
]
"""
# print('\n\nget emb hds:')
# print('ihds:', i_hds)
# print('l_hpu:', l_hpu)
# print('l_hs:', l_hs)
# print('all encoder layer:', all_encoder_layer.shape)
# print('***\n\n')
bS = len(l_hs)
l_hpu_max = max(l_hpu)
num_of_all_hds = sum(l_hs)
wemb_h = torch.zeros([num_of_all_hds, l_hpu_max, hS * num_out_layers_h]).to(device)
b_pu = -1
for b, i_hds1 in enumerate(i_hds):
for b1, i_hds11 in enumerate(i_hds1):
b_pu += 1
for i_nolh in range(num_out_layers_h):
i_layer = num_hidden_layers - 1 - i_nolh
st = i_nolh * hS
ed = (i_nolh + 1) * hS
wemb_h[b_pu, 0:(i_hds11[1] - i_hds11[0]), st:ed] \
= all_encoder_layer[i_layer][b, i_hds11[0]:i_hds11[1], :]
return wemb_h
def get_wemb_bert(bert_config, model_bert, tokenizer, nlu_t, hds, max_seq_length, num_out_layers_n=1,
num_out_layers_h=1):
# print('nlu_t:', nlu_t)
# print('hds:', hds)
# get contextual output of all tokens from bert
all_encoder_layer, pooled_output, tokens, i_nlu, i_hds, \
l_n, l_hpu, l_hs = get_bert_output(model_bert, tokenizer, nlu_t, hds, max_seq_length)
# all_encoder_layer: BERT outputs from all layers.
# pooled_output: output of [CLS] vec.
# tokens: BERT intput tokens
# i_nlu: start and end indices of question in tokens
# i_hds: start and end indices of headers
# get the wemb
# print('pooled:', pooled_output.shape)
wemb_n = get_wemb_n(i_nlu, l_n, bert_config.hidden_size, bert_config.num_hidden_layers, all_encoder_layer,
num_out_layers_n)
wemb_h = get_wemb_h(i_hds, l_hpu, l_hs, bert_config.hidden_size, bert_config.num_hidden_layers, all_encoder_layer,
num_out_layers_h)
return wemb_n, wemb_h, l_n, l_hpu, l_hs | null |
19,370 | import os, json
import random as rd
from copy import deepcopy
import difflib
import re
import torch
import torchvision.datasets as dsets
import torch.nn as nn
import torch.nn.functional as F
from matplotlib.pylab import *
from torch.autograd import Variable
from .utils import generate_perm_inv
from .utils import json_default_type_checker
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
The provided code snippet includes necessary dependencies for implementing the `gen_pnt_n` function. Write a Python function `def gen_pnt_n(g_wvi, mL_w, mL_nt)` to solve the following problem:
Generate one-hot idx indicating vectors with their lenghts. :param g_wvi: e.g. [[[0, 6, 7, 8, 15], [0, 1, 2, 3, 4, 15]], [[0, 1, 2, 3, 16], [0, 7, 8, 9, 16]]] where_val idx in nlu_t. 0 = <BEG>, -1 = <END>. :param mL_w: 4 :param mL_nt: 200 :return:
Here is the function:
def gen_pnt_n(g_wvi, mL_w, mL_nt):
"""
Generate one-hot idx indicating vectors with their lenghts.
:param g_wvi: e.g. [[[0, 6, 7, 8, 15], [0, 1, 2, 3, 4, 15]], [[0, 1, 2, 3, 16], [0, 7, 8, 9, 16]]]
where_val idx in nlu_t. 0 = <BEG>, -1 = <END>.
:param mL_w: 4
:param mL_nt: 200
:return:
"""
bS = len(g_wvi)
for g_wvi1 in g_wvi:
for g_wvi11 in g_wvi1:
l11 = len(g_wvi11)
mL_g_wvi = max([max([0] + [len(tok) for tok in gwsi]) for gwsi in g_wvi]) - 1
# zero because of '' case.
# -1 because we already have <BEG>
if mL_g_wvi < 1:
mL_g_wvi = 1
# NLq_token_pos = torch.zeros(bS, 5 - 1, mL_g_wvi, self.max_NLq_token_num)
# l_g_wvi = torch.zeros(bS, 5 - 1)
pnt_n = torch.zeros(bS, mL_w, mL_g_wvi, mL_nt).to(device) # one hot
l_g_wvi = torch.zeros(bS, mL_w).to(device)
for b, g_wvi1 in enumerate(g_wvi):
i_wn = 0 # To prevent error from zero number of condition.
for i_wn, g_wvi11 in enumerate(g_wvi1):
# g_wvi11: [0, where_conds pos in NLq, end]
g_wvi11_n1 = g_wvi11[:-1] # doesn't count <END> idx.
l_g_wvi[b, i_wn] = len(g_wvi11_n1)
for t, idx in enumerate(g_wvi11_n1):
pnt_n[b, i_wn, t, idx] = 1
# Pad
if i_wn < (mL_w - 1): # maximum number of conidtions is 4
pnt_n[b, i_wn + 1:, 0, 1] = 1 # # cannot understand... [<BEG>, <END>]??
l_g_wvi[b, i_wn + 1:] = 1 # it means there is only <BEG>.
return pnt_n, l_g_wvi | Generate one-hot idx indicating vectors with their lenghts. :param g_wvi: e.g. [[[0, 6, 7, 8, 15], [0, 1, 2, 3, 4, 15]], [[0, 1, 2, 3, 16], [0, 7, 8, 9, 16]]] where_val idx in nlu_t. 0 = <BEG>, -1 = <END>. :param mL_w: 4 :param mL_nt: 200 :return: |
19,371 | import os, json
import random as rd
from copy import deepcopy
import difflib
import re
import torch
import torchvision.datasets as dsets
import torch.nn as nn
import torch.nn.functional as F
from matplotlib.pylab import *
from torch.autograd import Variable
from .utils import generate_perm_inv
from .utils import json_default_type_checker
The provided code snippet includes necessary dependencies for implementing the `pred_sc` function. Write a Python function `def pred_sc(s_sc)` to solve the following problem:
return: [ pr_wc1_i, pr_wc2_i, ...]
Here is the function:
def pred_sc(s_sc):
"""
return: [ pr_wc1_i, pr_wc2_i, ...]
"""
# get g_num
pr_sc = []
for s_sc1 in s_sc:
pr_sc.append(s_sc1.argmax().item())
return pr_sc | return: [ pr_wc1_i, pr_wc2_i, ...] |
19,372 | import os, json
import random as rd
from copy import deepcopy
import difflib
import re
import torch
import torchvision.datasets as dsets
import torch.nn as nn
import torch.nn.functional as F
from matplotlib.pylab import *
from torch.autograd import Variable
from .utils import generate_perm_inv
from .utils import json_default_type_checker
The provided code snippet includes necessary dependencies for implementing the `pred_sc_beam` function. Write a Python function `def pred_sc_beam(s_sc, beam_size)` to solve the following problem:
return: [ pr_wc1_i, pr_wc2_i, ...]
Here is the function:
def pred_sc_beam(s_sc, beam_size):
"""
return: [ pr_wc1_i, pr_wc2_i, ...]
"""
# get g_num
pr_sc_beam = []
for s_sc1 in s_sc:
val, idxes = s_sc1.topk(k=beam_size)
pr_sc_beam.append(idxes.tolist())
return pr_sc_beam | return: [ pr_wc1_i, pr_wc2_i, ...] |
19,373 | import os, json
import random as rd
from copy import deepcopy
import difflib
import re
import torch
import torchvision.datasets as dsets
import torch.nn as nn
import torch.nn.functional as F
from matplotlib.pylab import *
from torch.autograd import Variable
from .utils import generate_perm_inv
from .utils import json_default_type_checker
The provided code snippet includes necessary dependencies for implementing the `pred_sa` function. Write a Python function `def pred_sa(s_sa)` to solve the following problem:
return: [ pr_wc1_i, pr_wc2_i, ...]
Here is the function:
def pred_sa(s_sa):
"""
return: [ pr_wc1_i, pr_wc2_i, ...]
"""
# get g_num
pr_sa = []
for s_sa1 in s_sa:
pr_sa.append(s_sa1.argmax().item())
return pr_sa | return: [ pr_wc1_i, pr_wc2_i, ...] |
19,374 | import os, json
import random as rd
from copy import deepcopy
import difflib
import re
import torch
import torchvision.datasets as dsets
import torch.nn as nn
import torch.nn.functional as F
from matplotlib.pylab import *
from torch.autograd import Variable
from .utils import generate_perm_inv
from .utils import json_default_type_checker
The provided code snippet includes necessary dependencies for implementing the `pred_scco` function. Write a Python function `def pred_scco(s_cco, wn)` to solve the following problem:
return: [ pr_wc1_i, pr_wc2_i, ...]
Here is the function:
def pred_scco(s_cco, wn):
"""
return: [ pr_wc1_i, pr_wc2_i, ...]
"""
# get g_num
pr_scco = []
for b, s_cco1 in enumerate(s_cco):
# if where num is 1, cco must be 0
if wn[b] == 1:
pr_scco.append(s_cco1[0].argmax().item())
# if where num > 1, cco must be 1 or 2
else:
pr_scco.append(s_cco1[1:].argmax().item() + 1)
return pr_scco | return: [ pr_wc1_i, pr_wc2_i, ...] |
19,375 | import os, json
import random as rd
from copy import deepcopy
import difflib
import re
import torch
import torchvision.datasets as dsets
import torch.nn as nn
import torch.nn.functional as F
from matplotlib.pylab import *
from torch.autograd import Variable
from .utils import generate_perm_inv
from .utils import json_default_type_checker
The provided code snippet includes necessary dependencies for implementing the `pred_wn` function. Write a Python function `def pred_wn(s_wn)` to solve the following problem:
return: [ pr_wc1_i, pr_wc2_i, ...]
Here is the function:
def pred_wn(s_wn):
"""
return: [ pr_wc1_i, pr_wc2_i, ...]
"""
# get g_num
pr_wn = []
for s_wn1 in s_wn:
pr_wn.append(s_wn1.argmax().item())
# print(pr_wn, s_wn1)
# if s_wn1.argmax().item() == 3:
# input('')
return pr_wn | return: [ pr_wc1_i, pr_wc2_i, ...] |
19,376 | import os, json
import random as rd
from copy import deepcopy
import difflib
import re
import torch
import torchvision.datasets as dsets
import torch.nn as nn
import torch.nn.functional as F
from matplotlib.pylab import *
from torch.autograd import Variable
from .utils import generate_perm_inv
from .utils import json_default_type_checker
The provided code snippet includes necessary dependencies for implementing the `pred_slen` function. Write a Python function `def pred_slen(s_slen)` to solve the following problem:
return: [ pr_wc1_i, pr_wc2_i, ...]
Here is the function:
def pred_slen(s_slen):
"""
return: [ pr_wc1_i, pr_wc2_i, ...]
"""
# get g_num
pr_slen = []
for s_slen1 in s_slen:
pr_slen.append(s_slen1.argmax().item())
# print(pr_wn, s_wn1)
# if s_wn1.argmax().item() == 3:
# input('')
return pr_slen | return: [ pr_wc1_i, pr_wc2_i, ...] |
19,377 | import os, json
import random as rd
from copy import deepcopy
import difflib
import re
import torch
import torchvision.datasets as dsets
import torch.nn as nn
import torch.nn.functional as F
from matplotlib.pylab import *
from torch.autograd import Variable
from .utils import generate_perm_inv
from .utils import json_default_type_checker
The provided code snippet includes necessary dependencies for implementing the `pred_wc_old` function. Write a Python function `def pred_wc_old(sql_i, s_wc)` to solve the following problem:
return: [ pr_wc1_i, pr_wc2_i, ...]
Here is the function:
def pred_wc_old(sql_i, s_wc):
"""
return: [ pr_wc1_i, pr_wc2_i, ...]
"""
# get g_num
pr_wc = []
for b, sql_i1 in enumerate(sql_i):
wn = len(sql_i1['conds'])
s_wc1 = s_wc[b]
pr_wc1 = argsort(-s_wc1.data.cpu().numpy())[:wn]
pr_wc1.sort()
pr_wc.append(list(pr_wc1))
return pr_wc | return: [ pr_wc1_i, pr_wc2_i, ...] |
19,378 | import os, json
import random as rd
from copy import deepcopy
import difflib
import re
import torch
import torchvision.datasets as dsets
import torch.nn as nn
import torch.nn.functional as F
from matplotlib.pylab import *
from torch.autograd import Variable
from .utils import generate_perm_inv
from .utils import json_default_type_checker
The provided code snippet includes necessary dependencies for implementing the `pred_wc` function. Write a Python function `def pred_wc(wn, s_wc)` to solve the following problem:
return: [ pr_wc1_i, pr_wc2_i, ...] ! Returned index is sorted!
Here is the function:
def pred_wc(wn, s_wc):
"""
return: [ pr_wc1_i, pr_wc2_i, ...]
! Returned index is sorted!
"""
# get g_num
pr_wc = []
for b, wn1 in enumerate(wn):
s_wc1 = s_wc[b] # [hs, 4]
# print(s_wc1.shape, wn1)
# print(s_wc1.data.cpu().numpy)
# print(np.argmax(s_wc1.data.cpu().numpy(), axis=0))
pr_wc1 = np.argmax(s_wc1.data.cpu().numpy(), axis=0)[:wn1]
# pr_wc1 = argsort(-s_wc1.data.cpu().numpy())[:wn1]
pr_wc1.sort()
pr_wc.append(list(pr_wc1))
return pr_wc | return: [ pr_wc1_i, pr_wc2_i, ...] ! Returned index is sorted! |
19,379 | import os, json
import random as rd
from copy import deepcopy
import difflib
import re
import torch
import torchvision.datasets as dsets
import torch.nn as nn
import torch.nn.functional as F
from matplotlib.pylab import *
from torch.autograd import Variable
from .utils import generate_perm_inv
from .utils import json_default_type_checker
The provided code snippet includes necessary dependencies for implementing the `pred_sc_multi` function. Write a Python function `def pred_sc_multi(slen, s_sc)` to solve the following problem:
return: [ pr_wc1_i, pr_wc2_i, ...] ! Returned index is sorted!
Here is the function:
def pred_sc_multi(slen, s_sc):
"""
return: [ pr_wc1_i, pr_wc2_i, ...]
! Returned index is sorted!
"""
# get g_num
pr_sc = []
for b, slen1 in enumerate(slen):
s_sc1 = s_sc[b]
pr_sc1 = argsort(-s_sc1.data.cpu().numpy())[:slen1]
pr_sc1.sort()
# idx = np.sort(pr_sc1)
pr_sc.append(list(pr_sc1))
return pr_sc | return: [ pr_wc1_i, pr_wc2_i, ...] ! Returned index is sorted! |
19,380 | import os, json
import random as rd
from copy import deepcopy
import difflib
import re
import torch
import torchvision.datasets as dsets
import torch.nn as nn
import torch.nn.functional as F
from matplotlib.pylab import *
from torch.autograd import Variable
from .utils import generate_perm_inv
from .utils import json_default_type_checker
The provided code snippet includes necessary dependencies for implementing the `pred_sa_multi` function. Write a Python function `def pred_sa_multi(wn, s_wo)` to solve the following problem:
return: [ pr_wc1_i, pr_wc2_i, ...] ! Returned index is sorted!
Here is the function:
def pred_sa_multi(wn, s_wo):
"""
return: [ pr_wc1_i, pr_wc2_i, ...]
! Returned index is sorted!
"""
# get g_num
pr_wo_a = s_wo.argmax(dim=2) # [B, 4]
# get g_num
pr_wo = []
for b, pr_wo_a1 in enumerate(pr_wo_a):
wn1 = wn[b]
pr_wo.append(list(pr_wo_a1.data.cpu().numpy()[:wn1]))
return pr_wo | return: [ pr_wc1_i, pr_wc2_i, ...] ! Returned index is sorted! |
19,381 | import os, json
import random as rd
from copy import deepcopy
import difflib
import re
import torch
import torchvision.datasets as dsets
import torch.nn as nn
import torch.nn.functional as F
from matplotlib.pylab import *
from torch.autograd import Variable
from .utils import generate_perm_inv
from .utils import json_default_type_checker
The provided code snippet includes necessary dependencies for implementing the `pred_wc_sorted_by_prob` function. Write a Python function `def pred_wc_sorted_by_prob(s_wc)` to solve the following problem:
return: [ pr_wc1_i, pr_wc2_i, ...] ! Returned index is sorted by prob. All colume-indexes are returned here.
Here is the function:
def pred_wc_sorted_by_prob(s_wc):
"""
return: [ pr_wc1_i, pr_wc2_i, ...]
! Returned index is sorted by prob.
All colume-indexes are returned here.
"""
# get g_num
bS = len(s_wc)
pr_wc = []
for b in range(bS):
s_wc1 = s_wc[b]
pr_wc1 = argsort(-s_wc1.data.cpu().numpy())
pr_wc.append(list(pr_wc1))
return pr_wc | return: [ pr_wc1_i, pr_wc2_i, ...] ! Returned index is sorted by prob. All colume-indexes are returned here. |
19,382 | import os, json
import random as rd
from copy import deepcopy
import difflib
import re
import torch
import torchvision.datasets as dsets
import torch.nn as nn
import torch.nn.functional as F
from matplotlib.pylab import *
from torch.autograd import Variable
from .utils import generate_perm_inv
from .utils import json_default_type_checker
The provided code snippet includes necessary dependencies for implementing the `pred_wo` function. Write a Python function `def pred_wo(wn, s_wo)` to solve the following problem:
return: [ pr_wc1_i, pr_wc2_i, ...]
Here is the function:
def pred_wo(wn, s_wo):
"""
return: [ pr_wc1_i, pr_wc2_i, ...]
"""
# s_wo = [B, 4, n_op]
pr_wo_a = s_wo.argmax(dim=2) # [B, 4]
# get g_num
pr_wo = []
for b, pr_wo_a1 in enumerate(pr_wo_a):
wn1 = wn[b]
pr_wo.append(list(pr_wo_a1.data.cpu().numpy()[:wn1]))
return pr_wo | return: [ pr_wc1_i, pr_wc2_i, ...] |
19,383 | import os, json
import random as rd
from copy import deepcopy
import difflib
import re
import torch
import torchvision.datasets as dsets
import torch.nn as nn
import torch.nn.functional as F
from matplotlib.pylab import *
from torch.autograd import Variable
from .utils import generate_perm_inv
from .utils import json_default_type_checker
The provided code snippet includes necessary dependencies for implementing the `pred_wvi_se` function. Write a Python function `def pred_wvi_se(wn, s_wv)` to solve the following problem:
s_wv: [B, 4, mL, 2] - predict best st-idx & ed-idx
Here is the function:
def pred_wvi_se(wn, s_wv):
"""
s_wv: [B, 4, mL, 2]
- predict best st-idx & ed-idx
"""
s_wv_st, s_wv_ed = s_wv.split(1, dim=3) # [B, 4, mL, 2] -> [B, 4, mL, 1], [B, 4, mL, 1]
s_wv_st = s_wv_st.squeeze(3) # [B, 4, mL, 1] -> [B, 4, mL]
s_wv_ed = s_wv_ed.squeeze(3)
pr_wvi_st_idx = s_wv_st.argmax(dim=2) # [B, 4, mL] -> [B, 4, 1]
# pr_wvi_ed_idx = s_wv_ed.argmax(dim=2)
pr_wvi = []
for b, wn1 in enumerate(wn):
pr_wvi1 = []
for i_wn in range(wn1):
pr_wvi_st_idx11 = pr_wvi_st_idx[b][i_wn]
pr_wvi_ed_idx11 = s_wv_ed[b][i_wn][pr_wvi_st_idx11:].argmax() + pr_wvi_st_idx11
pr_wvi1.append([pr_wvi_st_idx11.item(), pr_wvi_ed_idx11.item()])
pr_wvi.append(pr_wvi1)
return pr_wvi | s_wv: [B, 4, mL, 2] - predict best st-idx & ed-idx |
19,384 | import os, json
import random as rd
from copy import deepcopy
import difflib
import re
import torch
import torchvision.datasets as dsets
import torch.nn as nn
import torch.nn.functional as F
from matplotlib.pylab import *
from torch.autograd import Variable
from .utils import generate_perm_inv
from .utils import json_default_type_checker
The provided code snippet includes necessary dependencies for implementing the `pred_wvi_se_beam` function. Write a Python function `def pred_wvi_se_beam(max_wn, s_wv, beam_size)` to solve the following problem:
s_wv: [B, 4, mL, 2] - predict best st-idx & ed-idx output: pr_wvi_beam = [B, max_wn, n_pairs, 2]. 2 means [st, ed]. prob_wvi_beam = [B, max_wn, n_pairs]
Here is the function:
def pred_wvi_se_beam(max_wn, s_wv, beam_size):
"""
s_wv: [B, 4, mL, 2]
- predict best st-idx & ed-idx
output:
pr_wvi_beam = [B, max_wn, n_pairs, 2]. 2 means [st, ed].
prob_wvi_beam = [B, max_wn, n_pairs]
"""
bS = s_wv.shape[0]
s_wv_st, s_wv_ed = s_wv.split(1, dim=3) # [B, 4, mL, 2] -> [B, 4, mL, 1], [B, 4, mL, 1]
s_wv_st = s_wv_st.squeeze(3) # [B, 4, mL, 1] -> [B, 4, mL]
s_wv_ed = s_wv_ed.squeeze(3)
prob_wv_st = F.softmax(s_wv_st, dim=-1).detach().to('cpu').numpy()
prob_wv_ed = F.softmax(s_wv_ed, dim=-1).detach().to('cpu').numpy()
k_logit = int(ceil(sqrt(beam_size)))
n_pairs = k_logit ** 2
assert n_pairs >= beam_size
values_st, idxs_st = s_wv_st.topk(k_logit) # [B, 4, mL] -> [B, 4, k_logit]
values_ed, idxs_ed = s_wv_ed.topk(k_logit) # [B, 4, mL] -> [B, 4, k_logit]
# idxs = [B, k_logit, 2]
# Generate all possible combination of st, ed indices & prob
pr_wvi_beam = [] # [B, max_wn, k_logit**2 [st, ed] paris]
prob_wvi_beam = zeros([bS, max_wn, n_pairs])
for b in range(bS):
pr_wvi_beam1 = []
idxs_st1 = idxs_st[b]
idxs_ed1 = idxs_ed[b]
for i_wn in range(max_wn):
idxs_st11 = idxs_st1[i_wn]
idxs_ed11 = idxs_ed1[i_wn]
pr_wvi_beam11 = []
pair_idx = -1
for i_k in range(k_logit):
for j_k in range(k_logit):
pair_idx += 1
st = idxs_st11[i_k].item()
ed = idxs_ed11[j_k].item()
pr_wvi_beam11.append([st, ed])
p1 = prob_wv_st[b, i_wn, st]
p2 = prob_wv_ed[b, i_wn, ed]
prob_wvi_beam[b, i_wn, pair_idx] = p1 * p2
pr_wvi_beam1.append(pr_wvi_beam11)
pr_wvi_beam.append(pr_wvi_beam1)
# prob
return pr_wvi_beam, prob_wvi_beam | s_wv: [B, 4, mL, 2] - predict best st-idx & ed-idx output: pr_wvi_beam = [B, max_wn, n_pairs, 2]. 2 means [st, ed]. prob_wvi_beam = [B, max_wn, n_pairs] |
19,385 | import os, json
import random as rd
from copy import deepcopy
import difflib
import re
import torch
import torchvision.datasets as dsets
import torch.nn as nn
import torch.nn.functional as F
from matplotlib.pylab import *
from torch.autograd import Variable
from .utils import generate_perm_inv
from .utils import json_default_type_checker
def is_whitespace_g_wvi(c):
# if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F:
if c == " ":
return True
return False | null |
19,386 | import os, json
import random as rd
from copy import deepcopy
import difflib
import re
import torch
import torchvision.datasets as dsets
import torch.nn as nn
import torch.nn.functional as F
from matplotlib.pylab import *
from torch.autograd import Variable
from .utils import generate_perm_inv
from .utils import json_default_type_checker
The provided code snippet includes necessary dependencies for implementing the `convert_pr_wvi_to_string` function. Write a Python function `def convert_pr_wvi_to_string(pr_wvi, nlu_t, nlu)` to solve the following problem:
- Convert to the string in whilte-space-separated tokens - Add-hoc addition.
Here is the function:
def convert_pr_wvi_to_string(pr_wvi, nlu_t, nlu):
"""
- Convert to the string in whilte-space-separated tokens
- Add-hoc addition.
"""
pr_wv_str_wp = [] # word-piece version
pr_wv_str = []
for b, pr_wvi1 in enumerate(pr_wvi):
pr_wv_str_wp1 = []
pr_wv_str1 = []
nlu_t1 = nlu_t[b]
for i_wn, pr_wvi11 in enumerate(pr_wvi1):
st_idx, ed_idx = pr_wvi11
# Ad-hoc modification of ed_idx to deal with wp-tokenization effect.
# e.g.) to convert "butler cc (" ->"butler cc (ks)" (dev set 1st question).
pr_wv_str_wp11 = nlu_t1[st_idx:ed_idx + 1]
pr_wv_str_wp1.append(pr_wv_str_wp11)
while ed_idx + 1 < len(nlu_t1) and nlu_t1[ed_idx + 1].startswith('##'):
ed_idx += 1
pr_wv_str11 = nlu_t1[st_idx:ed_idx + 1]
# print(st_wh_idx, ed_wh_idx)
pr_wv_str1.append(pr_wv_str11)
pr_wv_str_wp.append(pr_wv_str_wp1)
pr_wv_str.append(pr_wv_str1)
return pr_wv_str, pr_wv_str_wp | - Convert to the string in whilte-space-separated tokens - Add-hoc addition. |
19,387 | import os, json
import random as rd
from copy import deepcopy
import difflib
import re
import torch
import torchvision.datasets as dsets
import torch.nn as nn
import torch.nn.functional as F
from matplotlib.pylab import *
from torch.autograd import Variable
from .utils import generate_perm_inv
from .utils import json_default_type_checker
def pred_scco_value(s_cco, wn):
pr_scco = []
pr_scco_value = []
for b, s_cco1 in enumerate(s_cco):
if wn[b] == 1:
pr_scco.append(s_cco1[0].argmax().item())
else:
pr_scco.append(s_cco1[1:].argmax().item() + 1)
pr_scco_value.append(get_tensor_value(F.softmax(s_cco1, dim=0), [pr_scco[-1]]))
return pr_scco, pr_scco_value
def pred_wn_value(s_wn):
pr_wn = []
pr_wn_value = []
for s_wn1 in s_wn:
pr_wn.append(s_wn1.argmax().item())
pr_wn_value.append(get_tensor_value(F.softmax(s_wn1, dim=0), [pr_wn[-1]]))
return pr_wn, pr_wn_value
def pred_slen_value(s_slen):
pr_slen = []
pr_slen_value = []
for s_slen1 in s_slen:
pr_slen.append(s_slen1.argmax().item())
#pr_slen_value.append(F.softmax(s_slen1, dim=0).max().item())
pr_slen_value.append(get_tensor_value(F.softmax(s_slen1, dim=0), [pr_slen[-1]]))
return pr_slen, pr_slen_value
def pred_wc_value(wn, s_wc):
pr_wc = []
pr_wc_value = []
for b, wn1 in enumerate(wn):
s_wc1 = s_wc[b] # [hs, 4]
s_wc1_t = torch.transpose(s_wc1, 1, 0)
pr_wc1 = np.argmax(s_wc1.data.cpu().numpy(), axis=0)[:wn1]
pr_wc1.sort()
pr_wc.append(list(pr_wc1))
values = []
for j, pr_wc11 in enumerate(list(pr_wc1)):
sf_value = F.softmax(s_wc1_t[j], dim=0)
pr_wc_value1 = get_tensor_value(sf_value, [pr_wc11])
values.append(pr_wc_value1[0])
pr_wc_value.append(values)
return pr_wc, pr_wc_value
def pred_sc_multi_value(slen, s_sc):
pr_sc = []
pr_sc_value = []
for b, slen1 in enumerate(slen):
s_sc1 = s_sc[b]
pr_sc1 = argsort(-s_sc1.data.cpu().numpy())[:slen1]
pr_sc1.sort()
pr_sc.append(list(pr_sc1))
sf_value = F.softmax(s_sc1, dim=0)
#print("sf_value: ", sf_value)
pr_sc_value1 = get_tensor_value(sf_value, list(pr_sc1))
pr_sc_value.append(pr_sc_value1)
return pr_sc, pr_sc_value
def pred_sa_multi_value(wn, s_wo):
pr_wo_a = s_wo.argmax(dim=2) # [B, 4]
pr_wo = []
pr_wo_value = []
for b, pr_wo_a1 in enumerate(pr_wo_a):
wn1 = wn[b]
pr_wo1 = pr_wo_a1.data.cpu().numpy()[:wn1]
pr_wo.append(list(pr_wo1))
s_wo1 = s_wo[b]
values = []
for j, pr_wo11 in enumerate(list(pr_wo1)):
sf_value = F.softmax(s_wo1[j], dim=0)
pr_wo_value1 = get_tensor_value(sf_value, [pr_wo11])
values.append(pr_wo_value1[0])
pr_wo_value.append(values)
return pr_wo, pr_wo_value
def pred_wo_value(wn, s_wo):
pr_wo_a = s_wo.argmax(dim=2) # [B, 4]
pr_wo = []
pr_wo_value = []
for b, pr_wo_a1 in enumerate(pr_wo_a):
wn1 = wn[b]
pr_wo1 = pr_wo_a1.data.cpu().numpy()[:wn1]
pr_wo.append(list(pr_wo1))
s_wo1 = s_wo[b]
values = []
for j, pr_wo11 in enumerate(list(pr_wo1)):
sf_value = F.softmax(s_wo1[j], dim=0)
pr_wo_value1 = get_tensor_value(sf_value, [pr_wo11])
values.append(pr_wo_value1[0])
pr_wo_value.append(values)
return pr_wo, pr_wo_value
def pred_wvi_se_value(wn, s_wv):
s_wv_st, s_wv_ed = s_wv.split(1, dim=3) # [B, 4, mL, 2] -> [B, 4, mL, 1], [B, 4, mL, 1]
s_wv_st = s_wv_st.squeeze(3) # [B, 4, mL, 1] -> [B, 4, mL]
s_wv_ed = s_wv_ed.squeeze(3)
pr_wvi_st_idx = s_wv_st.argmax(dim=2) # [B, 4, mL] -> [B, 4, 1]
pr_wvi = []
pr_wvi_value = []
for b, wn1 in enumerate(wn):
pr_wvi1 = []
pr_wvi_value1 = []
for i_wn in range(wn1):
pr_wvi_st_idx11 = pr_wvi_st_idx[b][i_wn]
pr_wvi_ed_idx11 = s_wv_ed[b][i_wn][pr_wvi_st_idx11:].argmax() + pr_wvi_st_idx11
pr_wvi1.append([pr_wvi_st_idx11.item(), pr_wvi_ed_idx11.item()])
sf_st_value = F.softmax(s_wv_st[b, i_wn], dim=0)
pr_st_value1 = get_tensor_value(sf_st_value, [pr_wvi_st_idx11.item()])
sf_ed_value = F.softmax(s_wv_ed[b, i_wn], dim=0)
pr_ed_value1 = get_tensor_value(sf_ed_value, [pr_wvi_ed_idx11.item()])
pr_wvi_value1.append([pr_st_value1[0], pr_ed_value1[0]])
pr_wvi.append(pr_wvi1)
pr_wvi_value.append(pr_wvi_value1)
return pr_wvi, pr_wvi_value
def pred_sw_se(s_sc, s_cco, s_sa, s_wn, s_wc, s_wo, s_wv, s_slen):
#pr_slen = pred_slen(s_slen)
pr_slen, pr_slen_value = pred_slen_value(s_slen)
#print("sxron pr_slen: ", pr_slen)
#print("sxron pr_slen_value: ", pr_slen_value)
#pr_sc = pred_sc_multi(pr_slen, s_sc)
pr_sc, pr_sc_value = pred_sc_multi_value(pr_slen, s_sc)
#print("sxron pr_sc: ", pr_sc)
#print("sxron pr_sc_value: ", pr_sc_value)
#pr_sa = pred_sa_multi(pr_slen, s_sa)
pr_sa, pr_sa_value = pred_sa_multi_value(pr_slen, s_sa)
#print("sxron pr_sa: ", pr_sa)
#print("sxron pr_sa_value: ", pr_sa_value)
#pr_wn = pred_wn(s_wn)
pr_wn, pr_wn_value = pred_wn_value(s_wn)
#print("sxron pr_wn: ", pr_wn)
#print("sxron pr_wn_value: ", pr_wn_value)
#pr_scco = pred_scco(s_cco, pr_wn)
pr_scco, pr_scco_value = pred_scco_value(s_cco, pr_wn)
#print("sxron pr_scco: ", pr_scco)
#print("sxron pr_scco_value: ", pr_scco_value)
#pr_wc = pred_wc(pr_wn, s_wc)
pr_wc, pr_wc_value = pred_wc_value(pr_wn, s_wc)
#print("sxron pr_wc: ", pr_wc)
#print("sxron pr_wc_value: ", pr_wc_value)
#pr_wo = pred_wo(pr_wn, s_wo)
pr_wo, pr_wo_value = pred_wo_value(pr_wn, s_wo)
#print("sxron pr_wo: ", pr_wo)
#print("sxron pr_wo_value: ", pr_wo_value)
#pr_wvi = pred_wvi_se(pr_wn, s_wv)
pr_wvi, pr_wvi_value = pred_wvi_se_value(pr_wn, s_wv)
#print("sxron pr_wvi: ", pr_wvi)
#print("sxron pr_wvi_value: ", pr_wvi_value)
return pr_sc, pr_scco, pr_sa, pr_wn, pr_wc, pr_wo, pr_wvi, pr_slen | null |
19,388 | import os, json
import random as rd
from copy import deepcopy
import difflib
import re
import torch
import torchvision.datasets as dsets
import torch.nn as nn
import torch.nn.functional as F
from matplotlib.pylab import *
from torch.autograd import Variable
from .utils import generate_perm_inv
from .utils import json_default_type_checker
def find_sql_where_op(gt_sql_tokens_part):
"""
gt_sql_tokens_part: Between 'WHERE' and 'AND'(if exists).
"""
# sql_where_op = ['=', 'EQL', '<', 'LT', '>', 'GT']
sql_where_op = ['EQL', 'LT', 'GT'] # wv sometimes contains =, < or >.
for sql_where_op in sql_where_op:
if sql_where_op in gt_sql_tokens_part:
found_sql_where_op = sql_where_op
break
return found_sql_where_op
def find_sub_list(sl, l):
# from stack overflow.
results = []
sll = len(sl)
for ind in (i for i, e in enumerate(l) if e == sl[0]):
if l[ind:ind + sll] == sl:
results.append((ind, ind + sll - 1))
return results
The provided code snippet includes necessary dependencies for implementing the `get_g_wvi_bert` function. Write a Python function `def get_g_wvi_bert(nlu, nlu_t, wh_to_wp_index, sql_i, sql_t, tokenizer, nlu_wp_t)` to solve the following problem:
Generate SQuAD style start and end index of wv in nlu. Index is for of after WordPiece tokenization. Assumption: where_str always presents in the nlu.
Here is the function:
def get_g_wvi_bert(nlu, nlu_t, wh_to_wp_index, sql_i, sql_t, tokenizer, nlu_wp_t):
"""
Generate SQuAD style start and end index of wv in nlu. Index is for of after WordPiece tokenization.
Assumption: where_str always presents in the nlu.
"""
g_wvi = []
for b, sql_i1 in enumerate(sql_i):
nlu1 = nlu[b]
nlu_t1 = nlu_t[b]
nlu_wp_t1 = nlu_wp_t[b]
sql_t1 = sql_t[b]
wh_to_wp_index1 = wh_to_wp_index[b]
st = sql_t1.index('WHERE') + 1 if 'WHERE' in sql_t1 else len(sql_t1)
g_wvi1 = []
while st < len(sql_t1):
if 'AND' not in sql_t1[st:]:
ed = len(sql_t1)
else:
ed = sql_t1[st:].index('AND') + st
sql_wop = find_sql_where_op(sql_t1[st:ed]) # sql where operator
st_wop = st + sql_t1[st:ed].index(sql_wop)
wv_str11_t = sql_t1[st_wop + 1:ed]
results = find_sub_list(wv_str11_t, nlu_t1)
st_idx, ed_idx = results[0]
st_wp_idx = wh_to_wp_index1[st_idx]
ed_wp_idx = wh_to_wp_index1[ed_idx]
g_wvi11 = [st_wp_idx, ed_wp_idx]
g_wvi1.append(g_wvi11)
st = ed + 1
g_wvi.append(g_wvi1)
return g_wvi | Generate SQuAD style start and end index of wv in nlu. Index is for of after WordPiece tokenization. Assumption: where_str always presents in the nlu. |
19,389 | import os, json
import random as rd
from copy import deepcopy
import difflib
import re
import torch
import torchvision.datasets as dsets
import torch.nn as nn
import torch.nn.functional as F
from matplotlib.pylab import *
from torch.autograd import Variable
from .utils import generate_perm_inv
from .utils import json_default_type_checker
def get_amr_infos(t, l_n, l_hs):
batch_size = len(l_n)
maxlen = 0
for i, ln in enumerate(l_n):
if ln+ l_hs[i] > maxlen: maxlen = ln+ l_hs[i]
part_masks = Variable(torch.Tensor(batch_size, maxlen).zero_(), requires_grad=False)
heads = []
deps = []
# print('ln:',l_n)
for b, t1 in enumerate(t):
# print('s ques:', len(t1['struct_question']), t1['struct_question'])
assert len(t1['struct_question']) == len(t1['struct_label'])
assert len(t1['struct_question']) == l_n[b]
head = np.zeros((l_n[b] + l_hs[b]), dtype=np.int32)
dep = np.zeros((l_n[b] + l_hs[b]), dtype=np.int32)
# print('headi:', head.size)
for j in range(l_n[b]):
head[j] = t1['struct_question'][j] + l_n[b]
dep[j] = t1['struct_label'][j]
part_masks[b, j] = 1
heads.append(head)
deps.append(dep)
return heads, deps, part_masks | null |
19,390 | import os, json
import random as rd
from copy import deepcopy
import difflib
import re
import torch
import torchvision.datasets as dsets
import torch.nn as nn
import torch.nn.functional as F
from matplotlib.pylab import *
from torch.autograd import Variable
from .utils import generate_perm_inv
from .utils import json_default_type_checker
The provided code snippet includes necessary dependencies for implementing the `get_g_wvi_bert_from_g_wvi_corenlp` function. Write a Python function `def get_g_wvi_bert_from_g_wvi_corenlp(wh_to_wp_index, g_wvi_corenlp)` to solve the following problem:
Generate SQuAD style start and end index of wv in nlu. Index is for of after WordPiece tokenization. Assumption: where_str always presents in the nlu.
Here is the function:
def get_g_wvi_bert_from_g_wvi_corenlp(wh_to_wp_index, g_wvi_corenlp):
"""
Generate SQuAD style start and end index of wv in nlu. Index is for of after WordPiece tokenization.
Assumption: where_str always presents in the nlu.
"""
g_wvi = []
for b, g_wvi_corenlp1 in enumerate(g_wvi_corenlp):
wh_to_wp_index1 = wh_to_wp_index[b]
g_wvi1 = []
for i_wn, g_wvi_corenlp11 in enumerate(g_wvi_corenlp1):
st_idx, ed_idx = g_wvi_corenlp11
if st_idx == -100 and ed_idx == -100:
st_wp_idx = -100
ed_wp_idx = -100
else:
st_wp_idx = wh_to_wp_index1[st_idx]
ed_wp_idx = wh_to_wp_index1[ed_idx]
g_wvi11 = [st_wp_idx, ed_wp_idx]
g_wvi1.append(g_wvi11)
g_wvi.append(g_wvi1)
return g_wvi | Generate SQuAD style start and end index of wv in nlu. Index is for of after WordPiece tokenization. Assumption: where_str always presents in the nlu. |
19,391 | import os, json
import random as rd
from copy import deepcopy
import difflib
import re
import torch
import torchvision.datasets as dsets
import torch.nn as nn
import torch.nn.functional as F
from matplotlib.pylab import *
from torch.autograd import Variable
from .utils import generate_perm_inv
from .utils import json_default_type_checker
def find_sql_where_op(gt_sql_tokens_part):
"""
gt_sql_tokens_part: Between 'WHERE' and 'AND'(if exists).
"""
# sql_where_op = ['=', 'EQL', '<', 'LT', '>', 'GT']
sql_where_op = ['EQL', 'LT', 'GT'] # wv sometimes contains =, < or >.
for sql_where_op in sql_where_op:
if sql_where_op in gt_sql_tokens_part:
found_sql_where_op = sql_where_op
break
return found_sql_where_op
def find_sub_list(sl, l):
# from stack overflow.
results = []
sll = len(sl)
for ind in (i for i, e in enumerate(l) if e == sl[0]):
if l[ind:ind + sll] == sl:
results.append((ind, ind + sll - 1))
return results
The provided code snippet includes necessary dependencies for implementing the `get_g_wvi_bert_from_sql_i` function. Write a Python function `def get_g_wvi_bert_from_sql_i(nlu, nlu_t, wh_to_wp_index, sql_i, sql_t, tokenizer, nlu_wp_t)` to solve the following problem:
Generate SQuAD style start and end index of wv in nlu. Index is for of after WordPiece tokenization. Assumption: where_str always presents in the nlu.
Here is the function:
def get_g_wvi_bert_from_sql_i(nlu, nlu_t, wh_to_wp_index, sql_i, sql_t, tokenizer, nlu_wp_t):
"""
Generate SQuAD style start and end index of wv in nlu. Index is for of after WordPiece tokenization.
Assumption: where_str always presents in the nlu.
"""
g_wvi = []
for b, sql_i1 in enumerate(sql_i):
nlu1 = nlu[b]
nlu_t1 = nlu_t[b]
nlu_wp_t1 = nlu_wp_t[b]
sql_t1 = sql_t[b]
wh_to_wp_index1 = wh_to_wp_index[b]
st = sql_t1.index('WHERE') + 1 if 'WHERE' in sql_t1 else len(sql_t1)
g_wvi1 = []
while st < len(sql_t1):
if 'AND' not in sql_t1[st:]:
ed = len(sql_t1)
else:
ed = sql_t1[st:].index('AND') + st
sql_wop = find_sql_where_op(sql_t1[st:ed]) # sql where operator
st_wop = st + sql_t1[st:ed].index(sql_wop)
wv_str11_t = sql_t1[st_wop + 1:ed]
results = find_sub_list(wv_str11_t, nlu_t1)
st_idx, ed_idx = results[0]
st_wp_idx = wh_to_wp_index1[st_idx]
ed_wp_idx = wh_to_wp_index1[ed_idx]
g_wvi11 = [st_wp_idx, ed_wp_idx]
g_wvi1.append(g_wvi11)
st = ed + 1
g_wvi.append(g_wvi1)
return g_wvi | Generate SQuAD style start and end index of wv in nlu. Index is for of after WordPiece tokenization. Assumption: where_str always presents in the nlu. |
19,392 | import os, json
import random as rd
from copy import deepcopy
import difflib
import re
import torch
import torchvision.datasets as dsets
import torch.nn as nn
import torch.nn.functional as F
from matplotlib.pylab import *
from torch.autograd import Variable
from .utils import generate_perm_inv
from .utils import json_default_type_checker
def get_cnt_sc(g_sc, pr_sc):
cnt = 0
for b, g_sc1 in enumerate(g_sc):
pr_sc1 = pr_sc[b]
if pr_sc1 == g_sc1:
cnt += 1
return cnt
def get_cnt_sa(g_sa, pr_sa):
cnt = 0
for b, g_sa1 in enumerate(g_sa):
pr_sa1 = pr_sa[b]
if pr_sa1 == g_sa1:
cnt += 1
return cnt
def get_cnt_wn(g_wn, pr_wn):
cnt = 0
for b, g_wn1 in enumerate(g_wn):
pr_wn1 = pr_wn[b]
if pr_wn1 == g_wn1:
cnt += 1
return cnt
def get_cnt_wc(g_wc, pr_wc):
cnt = 0
for b, g_wc1 in enumerate(g_wc):
pr_wc1 = pr_wc[b]
pr_wn1 = len(pr_wc1)
g_wn1 = len(g_wc1)
if pr_wn1 != g_wn1:
continue
else:
wc1 = array(g_wc1)
wc1.sort()
if array_equal(pr_wc1, wc1):
cnt += 1
return cnt
def get_cnt_wo(g_wn, g_wc, g_wo, pr_wc, pr_wo, mode):
""" pr's are all sorted as pr_wc are sorted in increasing order (in column idx)
However, g's are not sorted.
Sort g's in increasing order (in column idx)
"""
cnt = 0
for b, g_wo1 in enumerate(g_wo):
g_wc1 = g_wc[b]
pr_wc1 = pr_wc[b]
pr_wo1 = pr_wo[b]
pr_wn1 = len(pr_wo1)
g_wn1 = g_wn[b]
if g_wn1 != pr_wn1:
continue
else:
# Sort based on wc sequence.
if mode == 'test':
idx = argsort(array(g_wc1))
g_wo1_s = array(g_wo1)[idx]
g_wo1_s = list(g_wo1_s)
elif mode == 'train':
# due to teacher forcing, no need to sort.
g_wo1_s = g_wo1
else:
raise ValueError
if type(pr_wo1) != list:
raise TypeError
if g_wo1_s == pr_wo1:
cnt += 1
return cnt
def get_cnt_wv(g_wn, g_wc, g_wvi, pr_wvi, mode):
""" usalbe only when g_wc was used to find pr_wv
g_wvi
"""
cnt = 0
for b, g_wvi1 in enumerate(g_wvi):
pr_wvi1 = pr_wvi[b]
g_wc1 = g_wc[b]
pr_wn1 = len(pr_wvi1)
g_wn1 = g_wn[b]
# Now sorting.
# Sort based wc sequence.
if mode == 'test':
idx1 = argsort(array(g_wc1))
elif mode == 'train':
idx1 = list(range(g_wn1))
else:
raise ValueError
if g_wn1 != pr_wn1:
continue
else:
flag = True
for i_wn, idx11 in enumerate(idx1):
g_wvi11 = g_wvi1[idx11]
pr_wvi11 = pr_wvi1[i_wn]
if g_wvi11 != pr_wvi11:
flag = False
break
if flag:
cnt += 1
return cnt
The provided code snippet includes necessary dependencies for implementing the `get_cnt_sw` function. Write a Python function `def get_cnt_sw(g_sc, g_sa, g_wn, g_wc, g_wo, g_wvi, pr_sc, pr_sa, pr_wn, pr_wc, pr_wo, pr_wvi, mode)` to solve the following problem:
usalbe only when g_wc was used to find pr_wv
Here is the function:
def get_cnt_sw(g_sc, g_sa, g_wn, g_wc, g_wo, g_wvi, pr_sc, pr_sa, pr_wn, pr_wc, pr_wo, pr_wvi, mode):
""" usalbe only when g_wc was used to find pr_wv
"""
cnt_sc = get_cnt_sc(g_sc, pr_sc)
cnt_sa = get_cnt_sa(g_sa, pr_sa)
cnt_wn = get_cnt_wn(g_wn, pr_wn)
cnt_wc = get_cnt_wc(g_wc, pr_wc)
cnt_wo = get_cnt_wo(g_wn, g_wc, g_wo, pr_wc, pr_wo, mode)
cnt_wv = get_cnt_wv(g_wn, g_wc, g_wvi, pr_wvi, mode)
return cnt_sc, cnt_sa, cnt_wn, cnt_wc, cnt_wo, cnt_wv | usalbe only when g_wc was used to find pr_wv |
19,393 | import os, json
import random as rd
from copy import deepcopy
import difflib
import re
import torch
import torchvision.datasets as dsets
import torch.nn as nn
import torch.nn.functional as F
from matplotlib.pylab import *
from torch.autograd import Variable
from .utils import generate_perm_inv
from .utils import json_default_type_checker
def get_cnt_sc_list(g_sc, pr_sc):
cnt_list = []
for b, g_sc1 in enumerate(g_sc):
pr_sc1 = pr_sc[b]
if pr_sc1 == g_sc1:
cnt_list.append(1)
else:
cnt_list.append(0)
return cnt_list
def get_cnt_wc_list(g_wc, pr_wc):
cnt_list = []
for b, g_wc1 in enumerate(g_wc):
pr_wc1 = pr_wc[b]
pr_wn1 = len(pr_wc1)
g_wn1 = len(g_wc1)
if pr_wn1 != g_wn1:
cnt_list.append(0)
continue
else:
wc1 = array(g_wc1)
wc1.sort()
if array_equal(pr_wc1, wc1):
cnt_list.append(1)
else:
cnt_list.append(0)
return cnt_list
def get_cnt_wo_list(g_wn, g_wc, g_wo, pr_wc, pr_wo, mode):
""" pr's are all sorted as pr_wc are sorted in increasing order (in column idx)
However, g's are not sorted.
Sort g's in increasing order (in column idx)
"""
cnt_list = []
for b, g_wo1 in enumerate(g_wo):
g_wc1 = g_wc[b]
pr_wc1 = pr_wc[b]
pr_wo1 = pr_wo[b]
pr_wn1 = len(pr_wo1)
g_wn1 = g_wn[b]
if g_wn1 != pr_wn1:
cnt_list.append(0)
continue
else:
# Sort based wc sequence.
if mode == 'test':
idx = argsort(array(g_wc1))
g_wo1_s = array(g_wo1)[idx]
g_wo1_s = list(g_wo1_s)
elif mode == 'train':
# due to tearch forcing, no need to sort.
g_wo1_s = g_wo1
else:
raise ValueError
if type(pr_wo1) != list:
raise TypeError
if g_wo1_s == pr_wo1:
cnt_list.append(1)
else:
cnt_list.append(0)
return cnt_list
def get_cnt_wvi_list(g_wn, g_wc, g_wvi, pr_wvi, mode):
""" usalbe only when g_wc was used to find pr_wv
"""
cnt_list = []
for b, g_wvi1 in enumerate(g_wvi):
g_wc1 = g_wc[b]
pr_wvi1 = pr_wvi[b]
pr_wn1 = len(pr_wvi1)
g_wn1 = g_wn[b]
# Now sorting.
# Sort based wc sequence.
if mode == 'test':
idx1 = argsort(array(g_wc1))
elif mode == 'train':
idx1 = list(range(g_wn1))
else:
raise ValueError
if g_wn1 != pr_wn1:
cnt_list.append(0)
continue
else:
flag = True
for i_wn, idx11 in enumerate(idx1):
g_wvi11 = g_wvi1[idx11]
pr_wvi11 = pr_wvi1[i_wn]
if g_wvi11 != pr_wvi11:
flag = False
break
if flag:
cnt_list.append(1)
else:
cnt_list.append(0)
return cnt_list
def get_cnt_wv_list(g_wn, g_wc, g_sql_i, pr_sql_i, mode):
""" usalbe only when g_wc was used to find pr_wv
"""
cnt_list = []
for b, g_wc1 in enumerate(g_wc):
pr_wn1 = len(pr_sql_i[b]["conds"])
g_wn1 = g_wn[b]
# Now sorting.
# Sort based wc sequence.
if mode == 'test':
idx1 = argsort(array(g_wc1))
elif mode == 'train':
idx1 = list(range(g_wn1))
else:
raise ValueError
if g_wn1 != pr_wn1:
cnt_list.append(0)
continue
else:
flag = True
for i_wn, idx11 in enumerate(idx1):
g_wvi_str11 = str(g_sql_i[b]["conds"][idx11][2]).lower()
if len(g_sql_i[b]["conds"][idx11]) > 3:
g_wvi_str11 = str(g_sql_i[b]["conds"][idx11][3]).lower()
pr_wvi_str11 = str(pr_sql_i[b]["conds"][i_wn][2]).lower()
if g_wvi_str11 != pr_wvi_str11:
flag = False
break
if flag:
cnt_list.append(1)
else:
cnt_list.append(0)
return cnt_list
The provided code snippet includes necessary dependencies for implementing the `get_cnt_sw_list` function. Write a Python function `def get_cnt_sw_list(g_sc, g_cond_conn_op, g_sa, g_wn, g_wc, g_wo, g_wvi, g_slen, pr_sc, pr_scco, pr_sa, pr_wn, pr_wc, pr_wo, pr_wvi, g_sql_i, pr_sql_i, mode)` to solve the following problem:
usalbe only when g_wc was used to find pr_wv
Here is the function:
def get_cnt_sw_list(g_sc, g_cond_conn_op, g_sa, g_wn, g_wc, g_wo, g_wvi, g_slen,
pr_sc, pr_scco, pr_sa, pr_wn, pr_wc, pr_wo, pr_wvi,
g_sql_i, pr_sql_i,
mode):
""" usalbe only when g_wc was used to find pr_wv
"""
# cnt_sc = get_cnt_sc_list(g_sc, pr_sc)
cnt_sc = get_cnt_wc_list(g_sc, pr_sc)
cnt_scco = get_cnt_sc_list(g_cond_conn_op, pr_scco)
# cnt_sa = get_cnt_sc_list(g_sa, pr_sa)
cnt_sa = get_cnt_wo_list(g_slen, g_sc, g_sa, pr_sc, pr_sa, mode)
cnt_wn = get_cnt_sc_list(g_wn, pr_wn)
cnt_wc = get_cnt_wc_list(g_wc, pr_wc)
cnt_wo = get_cnt_wo_list(g_wn, g_wc, g_wo, pr_wc, pr_wo, mode)
if pr_wvi:
cnt_wvi = get_cnt_wvi_list(g_wn, g_wc, g_wvi, pr_wvi, mode)
else:
cnt_wvi = [0] * len(cnt_sc)
cnt_wv = get_cnt_wv_list(g_wn, g_wc, g_sql_i, pr_sql_i,
mode) # compare using wv-str which presented in original data.
return cnt_sc, cnt_scco, cnt_sa, cnt_wn, cnt_wc, cnt_wo, cnt_wvi, cnt_wv | usalbe only when g_wc was used to find pr_wv |
19,394 | import os, json
import random as rd
from copy import deepcopy
import difflib
import re
import torch
import torchvision.datasets as dsets
import torch.nn as nn
import torch.nn.functional as F
from matplotlib.pylab import *
from torch.autograd import Variable
from .utils import generate_perm_inv
from .utils import json_default_type_checker
def get_cnt_lx_list(cnt_sc1, cnt_cco1, cnt_sa1, cnt_wn1, cnt_wc1, cnt_wo1, cnt_wv1):
# all cnt are list here.
cnt_list = []
cnt_lx = 0
for csc, ccco, csa, cwn, cwc, cwo, cwv in zip(cnt_sc1, cnt_cco1, cnt_sa1, cnt_wn1, cnt_wc1, cnt_wo1, cnt_wv1):
if csc and ccco and csa and cwn and cwc and cwo and cwv:
cnt_list.append(1)
else:
cnt_list.append(0)
return cnt_list | null |
19,395 | import os, json
import random as rd
from copy import deepcopy
import difflib
import re
import torch
import torchvision.datasets as dsets
import torch.nn as nn
import torch.nn.functional as F
from matplotlib.pylab import *
from torch.autograd import Variable
from .utils import generate_perm_inv
from .utils import json_default_type_checker
def get_cnt_x_list(engine, tb, g_sc, g_sa, g_sql_i, pr_sc, pr_sa, pr_sql_i):
cnt_x1_list = []
g_ans = []
pr_ans = []
for b in range(len(g_sc)):
g_ans1 = engine.execute(tb[b]['id'], g_sc[b], g_sa[b], g_sql_i[b]['conds'])
# print(f'cnt: {cnt}')
# print(f"pr_sql_i: {pr_sql_i[b]['conds']}")
try:
pr_ans1 = engine.execute(tb[b]['id'], pr_sc[b], pr_sa[b], pr_sql_i[b]['conds'])
if bool(pr_ans1): # not empty due to lack of the data from incorretly generated sql
if g_ans1 == pr_ans1:
cnt_x1 = 1
else:
cnt_x1 = 0
else:
cnt_x1 = 0
except:
# type error etc... Execution-guided decoding may be used here.
pr_ans1 = None
cnt_x1 = 0
cnt_x1_list.append(cnt_x1)
g_ans.append(g_ans1)
pr_ans.append(pr_ans1)
return cnt_x1_list, g_ans, pr_ans | null |
19,396 | import os, json
import random as rd
from copy import deepcopy
import difflib
import re
import torch
import torchvision.datasets as dsets
import torch.nn as nn
import torch.nn.functional as F
from matplotlib.pylab import *
from torch.autograd import Variable
from .utils import generate_perm_inv
from .utils import json_default_type_checker
The provided code snippet includes necessary dependencies for implementing the `get_mean_grad` function. Write a Python function `def get_mean_grad(named_parameters)` to solve the following problem:
Get list of mean, std of grad of each parameters Code based on web searched result..
Here is the function:
def get_mean_grad(named_parameters):
"""
Get list of mean, std of grad of each parameters
Code based on web searched result..
"""
mu_list = []
sig_list = []
for name, param in named_parameters:
if param.requires_grad: # and ("bias" not in name) :
# bias makes std = nan as it is of single parameters
magnitude = param.grad.abs()
mu_list.append(magnitude.mean())
if len(magnitude) == 1:
# why nan for single param? Anyway to avoid that..
sig_list.append(torch.tensor(0))
else:
sig_list.append(magnitude.std())
# if "svp_se"
return mu_list, sig_list | Get list of mean, std of grad of each parameters Code based on web searched result.. |
19,397 | import os, json
import random as rd
from copy import deepcopy
import difflib
import re
import torch
import torchvision.datasets as dsets
import torch.nn as nn
import torch.nn.functional as F
from matplotlib.pylab import *
from torch.autograd import Variable
from .utils import generate_perm_inv
from .utils import json_default_type_checker
def date(rows, nlu, idx):
ret = ""
items = []
for i in rows:
if idx < len(i):
items.append(str(i[idx]))
else:
items.append(i[-1])
nlu_date_norm = []
date = re.compile(r'(\d{2})[\年](\d{1,2})[\月](\d{1,2})[\日\号]')
nlu_date = date.findall(nlu)
for u in nlu_date:
nlu_date_norm.append((int(u[0]), int(u[1]), int(u[2])))
date = re.compile(r'[^\年^\d](\d{1,2})[\月](\d{1,2})[\日\号]')
nlu_date = date.findall(nlu)
for u in nlu_date:
nlu_date_norm.append((0, int(u[0]), int(u[1])))
date = re.compile(r'(\d{2})[\年](\d{1,2})[\月][^\d]')
nlu_date = date.findall(nlu)
for u in nlu_date:
nlu_date_norm.append((int(u[0]),int(u[1]),0))
#print(nlu_date_norm)
for item in items:
item_re = re.compile(r'(\d{2})[-](\d{1,2})[-](\d{1,2})')
item_date = item_re.findall(str(item))
if len(item_date) != 1:
continue
for u in nlu_date_norm:
if int(item_date[0][0]) != u[0] and u[0] != 0:
continue
if int(item_date[0][1]) != u[1] and u[1] != 0:
continue
if int(item_date[0][2]) != u[2] and u[2] != 0:
continue
return item, 1
return ret, 0 | null |
19,398 | import os, json
import random as rd
from copy import deepcopy
import difflib
import re
import torch
import torchvision.datasets as dsets
import torch.nn as nn
import torch.nn.functional as F
from matplotlib.pylab import *
from torch.autograd import Variable
from .utils import generate_perm_inv
from .utils import json_default_type_checker
def sim_sort1(rows, nlu, wv, idx, used):
ret = ""
same = -1
ret_idx = 0
items = []
for i in rows:
if idx < len(i):
items.append(str(i[idx]))
else:
items.append(i[-1])
for j, i in enumerate(items):
if i in used:
continue
samei = 0.0
for char in str(i):
if char in nlu:
samei += 1
if (samei/len(str(i))) >= same and len(str(i)) < 20:
if (samei/len(str(i))) == same and len(str(i)) < len(str(ret)):
continue
ret = i
ret_idx = j
same = samei/len(str(i))
return ret, same, j | null |
19,399 | import os, json
import random as rd
from copy import deepcopy
import difflib
import re
import torch
import torchvision.datasets as dsets
import torch.nn as nn
import torch.nn.functional as F
from matplotlib.pylab import *
from torch.autograd import Variable
from .utils import generate_perm_inv
from .utils import json_default_type_checker
def match_num(items, nlu):
nlu_num = [int(u) for u in re.findall(r"[^\.\d](\d+)[^\.\d]", '@'+re.sub("[-]", "", nlu)+'@')] # int
if len(nlu_num) == 0:
return "", 0
for j, item in enumerate(items):
tp = [int(u) for u in re.findall(r"[^\.\d](\d+)[^\.\d]", '@'+re.sub("[-]", "", str(item))+'@')]
if len(tp) != 1:
continue
if tp[0] in nlu_num:
return item, j
if len(str(tp[0])) >= 10:
continue
ss = num2char(str(tp[0]))
if ss == "":
continue
if nlu.find(ss) != -1:
return item, j
return "", 0
def num2char(num):
num_dict = {'1':'一', '2':'二', '3':'三', '4':'四', '5':'五', '6':'六', '7':'七', '8':'八', '9':'九', '0':'零', }
index_dict = {1:'', 2:'十', 3:'百', 4:'千', 5:'万', 6:'十', 7:'百', 8:'千', 9:'亿'}
num = num.strip()
num = re.sub('[%]', '', num)
# nums = list(num)
num = re.split('[.]', num)
num_p1, num_p2 = None, None
if len(num) == 1:
num_p1 = num[0]
elif len(num) == 2:
num_p1, num_p2 = num[0], num[1]
# for i in num:
# if i !=
nums_1 = num_p1
nums_index = [x for x in range(1, len(num_p1)+1)][-1::-1]
str1 = ''
for index, item in enumerate(num_p1):
str1 = "".join((str1, num_dict[item], index_dict[nums_index[index]]))
str1 = re.sub("零[十百千零]*", "零", str1)
str1 = re.sub("零万", "万", str1)
str1 = re.sub("亿万", "亿零", str1)
str1 = re.sub("零零", "零", str1)
str1 = re.sub("零\\b" , "", str1)
if num_p2 is not None:
str1 = "".join((str1, "点"))
for index, item in enumerate(num_p2):
str1 = "".join((str1, num_dict[item]))
return str1
def sim_sort2(rows, nlu, wv, idx, used):
ret = ""
same = -1
ret_idx = 0
nlu= re.sub(r"[,]", "", nlu)
nlu= re.sub(r"[ ]", "", nlu)
wv = ''.join(wv).replace('##', '')
items = []
for i in rows:
if idx < len(i):
items.append(i[idx])
else:
items.append(i[-1])
# for j, i in enumerate(items):
# if i in used:
# continue
# if nlu.find(str(i)) != -1:
# return i, 1, j
nlu= nlu.replace('湖南', '芒果TV湖南')
nlu= re.sub(r"[\鹅]", r"腾讯", nlu)
ret, ret_idx = match_num(items, nlu)
if ret != "":
return ret, 1, ret_idx
for j, i in enumerate(items):
if i in used:
continue
im = difflib.SequenceMatcher(None, wv, str(i)).quick_ratio()
# print(im, wv, str(i), same, ret_idx)
if im > same:
same = im
ret = str(i)
ret_idx = j
if type(i) is not str:
try:
im = difflib.SequenceMatcher(None, wv, num2char(str(i).replace('-', ''))).quick_ratio()
if im > same:
same = im
ret = str(i)
ret_idx = j
except:
pass
#print(i)
if same >= 0.5:
return ret, same, ret_idx
return "", same, j | null |
19,400 | import os, json
import random as rd
from copy import deepcopy
import difflib
import re
import torch
import torchvision.datasets as dsets
import torch.nn as nn
import torch.nn.functional as F
from matplotlib.pylab import *
from torch.autograd import Variable
from .utils import generate_perm_inv
from .utils import json_default_type_checker
def num2char(num):
num_dict = {'1':'一', '2':'二', '3':'三', '4':'四', '5':'五', '6':'六', '7':'七', '8':'八', '9':'九', '0':'零', }
index_dict = {1:'', 2:'十', 3:'百', 4:'千', 5:'万', 6:'十', 7:'百', 8:'千', 9:'亿'}
num = num.strip()
num = re.sub('[%]', '', num)
# nums = list(num)
num = re.split('[.]', num)
num_p1, num_p2 = None, None
if len(num) == 1:
num_p1 = num[0]
elif len(num) == 2:
num_p1, num_p2 = num[0], num[1]
# for i in num:
# if i !=
nums_1 = num_p1
nums_index = [x for x in range(1, len(num_p1)+1)][-1::-1]
str1 = ''
for index, item in enumerate(num_p1):
str1 = "".join((str1, num_dict[item], index_dict[nums_index[index]]))
str1 = re.sub("零[十百千零]*", "零", str1)
str1 = re.sub("零万", "万", str1)
str1 = re.sub("亿万", "亿零", str1)
str1 = re.sub("零零", "零", str1)
str1 = re.sub("零\\b" , "", str1)
if num_p2 is not None:
str1 = "".join((str1, "点"))
for index, item in enumerate(num_p2):
str1 = "".join((str1, num_dict[item]))
return str1
def sim_sort3(rows, nlu, idx, used):
ret = ""
same = -1
nlu = ''.join(nlu).replace('##', '')
nlu = nlu.replace('两', '二')
# print(nlu)
ret_idx = 0
items = []
for i in rows:
if idx < len(i):
try:
if abs(float(i[idx]) - int(i[idx])) < 1e-5:
i[idx] = int(i[idx])
except:
pass
items.append(i[idx])
else:
try:
if abs(float(i[-1]) - int(i[-1])) < 1e-5:
i[-1] = int(i[-1])
except:
pass
items.append(i[-1])
for j, i in enumerate(items):
if i in used:
continue
if nlu.find(str(i)) != -1:
# print(nlu, i)
return i, 1, j
for j, i in enumerate(items):
if i in used:
continue
im = difflib.SequenceMatcher(None, nlu, str(i)).quick_ratio()
# print(im, nlu, str(i), same)
if im > same:
same = im
ret = str(i)
ret_idx = j
# print('max', j)
# print(num2char(str(i).replace('-', '')))
try:
i = float(i)
if float(i) - int(i) < e-5:
i = int(i)
except:
pass
if type(i) is not str:
try:
im = difflib.SequenceMatcher(None, nlu, num2char(str(i).replace('-', ''))).quick_ratio()
# print(num2char(str(i).replace('-', '')))
if im > same:
same = im
ret = str(i)
ret_idx = j
except:
pass
try:
if abs(float(i) - float(nlu)) < e-5:
return str(i), 1, j
#print(i)
except:
pass
return ret, same, ret_idx | null |
19,401 | import os, json
import random as rd
from copy import deepcopy
import difflib
import re
import torch
import torchvision.datasets as dsets
import torch.nn as nn
import torch.nn.functional as F
from matplotlib.pylab import *
from torch.autograd import Variable
from .utils import generate_perm_inv
from .utils import json_default_type_checker
def generate_sql_i(pr_sc, pr_scco, pr_sa, pr_wn, pr_wc, pr_wo, pr_wv_str, nlu, t, table):
# print("((((((")
pr_sql_i = []
for b, nlu1 in enumerate(nlu):
tid1 = t[b]['table_id']
tab = table[tid1]
conds = []
for i_wn in range(pr_wn[b]):
conds1 = [pr_wc[b][i_wn], pr_wo[b][i_wn], str(''.join(pr_wv_str[b][i_wn]).replace('##', ''))]
conds.append(conds1)
if len(conds) == 1:
pr_scco[b] = 0
if len(conds) == 1 and pr_wc[b][0] == len(tab['header'])-1:
conds = [[len(tab['header'])-1, 2, 'Null']]
pr_scco[b] = 0
pr_sql_i1 = {'agg': pr_sa[b], 'cond_conn_op': pr_scco[b], 'sel': pr_sc[b], 'conds': conds}
pr_sql_i.append(pr_sql_i1)
return pr_sql_i | null |
19,402 | import os, json
import random as rd
from copy import deepcopy
import difflib
import re
import torch
import torchvision.datasets as dsets
import torch.nn as nn
import torch.nn.functional as F
from matplotlib.pylab import *
from torch.autograd import Variable
from .utils import generate_perm_inv
from .utils import json_default_type_checker
def json_default_type_checker(o):
"""
From https://stackoverflow.com/questions/11942364/typeerror-integer-is-not-json-serializable-when-serializing-json-in-python
"""
if isinstance(o, int64): return int(o)
raise TypeError
def save_for_evaluation(path_save, results, dset_name):
path_save_file = os.path.join(path_save, f'results_{dset_name}.json')
# if not os.path.exists(path_save_file):
# os.mknod(path_save_file)
with open(path_save_file, 'w', encoding='utf-8') as f:
for i, r1 in enumerate(results):
json_str = json.dumps(r1['query'], ensure_ascii=False, default=json_default_type_checker)
json_str += '\n'
f.writelines(json_str)
return path_save_file | null |
19,403 | import os, json
import random as rd
from copy import deepcopy
import difflib
import re
import torch
import torchvision.datasets as dsets
import torch.nn as nn
import torch.nn.functional as F
from matplotlib.pylab import *
from torch.autograd import Variable
from .utils import generate_perm_inv
from .utils import json_default_type_checker
def json_default_type_checker(o):
def save_for_evaluation_aux(path_save, results, dset_name, ):
path_save_file = os.path.join(path_save, f'results_aux_{dset_name}.jsonl')
with open(path_save_file, 'w', encoding='utf-8') as f:
for i, r1 in enumerate(results):
json_str = json.dumps(r1, ensure_ascii=False, default=json_default_type_checker)
json_str += '\n'
f.writelines(json_str) | null |
19,404 | import os, json
import random as rd
from copy import deepcopy
import difflib
import re
import torch
import torchvision.datasets as dsets
import torch.nn as nn
import torch.nn.functional as F
from matplotlib.pylab import *
from torch.autograd import Variable
from .utils import generate_perm_inv
from .utils import json_default_type_checker
The provided code snippet includes necessary dependencies for implementing the `check_sc_sa_pairs` function. Write a Python function `def check_sc_sa_pairs(tb, pr_sc, pr_sa, )` to solve the following problem:
Check whether pr_sc, pr_sa are allowed pairs or not. agg_ops = ['', 'MAX', 'MIN', 'COUNT', 'SUM', 'AVG']
Here is the function:
def check_sc_sa_pairs(tb, pr_sc, pr_sa, ):
"""
Check whether pr_sc, pr_sa are allowed pairs or not.
agg_ops = ['', 'MAX', 'MIN', 'COUNT', 'SUM', 'AVG']
"""
bS = len(pr_sc)
check = [False] * bS
for b, pr_sc1 in enumerate(pr_sc):
pr_sa1 = pr_sa[b]
hd_types1 = tb[b]['types']
hd_types11 = hd_types1[pr_sc1]
if hd_types11 == 'text':
if pr_sa1 == 0 or pr_sa1 == 3: # ''
check[b] = True
else:
check[b] = False
elif hd_types11 == 'real':
check[b] = True
else:
raise Exception("New TYPE!!")
return check | Check whether pr_sc, pr_sa are allowed pairs or not. agg_ops = ['', 'MAX', 'MIN', 'COUNT', 'SUM', 'AVG'] |
19,405 | import os, json
import random as rd
from copy import deepcopy
import difflib
import re
import torch
import torchvision.datasets as dsets
import torch.nn as nn
import torch.nn.functional as F
from matplotlib.pylab import *
from torch.autograd import Variable
from .utils import generate_perm_inv
from .utils import json_default_type_checker
def remap_sc_idx(idxs, pr_sc_beam):
for b, idxs1 in enumerate(idxs):
for i_beam, idxs11 in enumerate(idxs1):
sc_beam_idx = idxs[b][i_beam][0]
sc_idx = pr_sc_beam[b][sc_beam_idx]
idxs[b][i_beam][0] = sc_idx
return idxs | null |
19,406 | import os, json
import random as rd
from copy import deepcopy
import difflib
import re
import torch
import torchvision.datasets as dsets
import torch.nn as nn
import torch.nn.functional as F
from matplotlib.pylab import *
from torch.autograd import Variable
from .utils import generate_perm_inv
from .utils import json_default_type_checker
def sort_and_generate_pr_w(pr_sql_i):
pr_wc = []
pr_wo = []
pr_wv = []
for b, pr_sql_i1 in enumerate(pr_sql_i):
conds1 = pr_sql_i1["conds"]
pr_wc1 = []
pr_wo1 = []
pr_wv1 = []
# Generate
for i_wn, conds11 in enumerate(conds1):
pr_wc1.append(conds11[0])
pr_wo1.append(conds11[1])
pr_wv1.append(conds11[2])
# sort based on pr_wc1
idx = argsort(pr_wc1)
pr_wc1 = array(pr_wc1)[idx].tolist()
pr_wo1 = array(pr_wo1)[idx].tolist()
pr_wv1 = array(pr_wv1)[idx].tolist()
conds1_sorted = []
for i, idx1 in enumerate(idx):
conds1_sorted.append(conds1[idx1])
pr_wc.append(pr_wc1)
pr_wo.append(pr_wo1)
pr_wv.append(pr_wv1)
pr_sql_i1['conds'] = conds1_sorted
return pr_wc, pr_wo, pr_wv, pr_sql_i | null |
19,407 | import os, json
import random as rd
from copy import deepcopy
import difflib
import re
import torch
import torchvision.datasets as dsets
import torch.nn as nn
import torch.nn.functional as F
from matplotlib.pylab import *
from torch.autograd import Variable
from .utils import generate_perm_inv
from .utils import json_default_type_checker
def generate_sql_q1(sql_i1, tb1):
def generate_sql_q(sql_i, tb):
sql_q = []
for b, sql_i1 in enumerate(sql_i):
tb1 = tb[b]
sql_q1 = generate_sql_q1(sql_i1, tb1)
sql_q.append(sql_q1)
return sql_q | null |
19,408 | import os, json
import random as rd
from copy import deepcopy
import difflib
import re
import torch
import torchvision.datasets as dsets
import torch.nn as nn
import torch.nn.functional as F
from matplotlib.pylab import *
from torch.autograd import Variable
from .utils import generate_perm_inv
from .utils import json_default_type_checker
def get_pnt_idx1(col_pool_type, st_ed):
st, ed = st_ed
if col_pool_type == 'start_tok':
pnt_idx1 = st
elif col_pool_type == 'end_tok':
pnt_idx1 = ed
elif col_pool_type == 'avg':
pnt_idx1 = arange(st, ed, 1)
return pnt_idx1
The provided code snippet includes necessary dependencies for implementing the `gen_g_pnt_idx` function. Write a Python function `def gen_g_pnt_idx(g_wvi, sql_i, i_hds, i_sql_vocab, col_pool_type)` to solve the following problem:
sql_vocab = ( 0.. "sql none", "sql max", "sql min", "sql count", "sql sum", "sql average", ..5 6.. "sql select", "sql where", "sql and", .. 8 9.. "sql equal", "sql greater than", "sql less than", .. 11 12.. "sql start", "sql end" .. 13 )
Here is the function:
def gen_g_pnt_idx(g_wvi, sql_i, i_hds, i_sql_vocab, col_pool_type):
"""
sql_vocab = (
0.. "sql none", "sql max", "sql min", "sql count", "sql sum", "sql average", ..5
6.. "sql select", "sql where", "sql and", .. 8
9.. "sql equal", "sql greater than", "sql less than", .. 11
12.. "sql start", "sql end" .. 13
)
"""
g_pnt_idxs = []
for b, sql_i1 in enumerate(sql_i):
i_sql_vocab1 = i_sql_vocab[b]
i_hds1 = i_hds[b]
g_pnt_idxs1 = []
# start token
pnt_idx1 = get_pnt_idx1(col_pool_type, i_sql_vocab1[-2])
g_pnt_idxs1.append(pnt_idx1)
# select token
pnt_idx1 = get_pnt_idx1(col_pool_type, i_sql_vocab1[6])
g_pnt_idxs1.append(pnt_idx1)
# select agg
idx_agg = sql_i1["agg"]
pnt_idx1 = get_pnt_idx1(col_pool_type, i_sql_vocab1[idx_agg])
g_pnt_idxs1.append(pnt_idx1)
# select column
idx_sc = sql_i1["sel"]
pnt_idx1 = get_pnt_idx1(col_pool_type, i_hds1[idx_sc])
g_pnt_idxs1.append(pnt_idx1)
conds = sql_i1["conds"]
wn = len(conds)
if wn <= 0:
pass
else:
# select where
pnt_idx1 = get_pnt_idx1(col_pool_type, i_sql_vocab1[7])
g_pnt_idxs1.append(pnt_idx1)
for i_wn, conds1 in enumerate(conds):
# where column
idx_wc = conds1[0]
pnt_idx1 = get_pnt_idx1(col_pool_type, i_hds1[idx_wc])
g_pnt_idxs1.append(pnt_idx1)
# where op
idx_wo = conds1[1]
pnt_idx1 = get_pnt_idx1(col_pool_type, i_sql_vocab1[idx_wo + 9])
g_pnt_idxs1.append(pnt_idx1)
# where val
st, ed = g_wvi[b][i_wn]
end_pos_of_sql_vocab = i_sql_vocab1[-1][-1]
g_pnt_idxs1.append(st + 1 + end_pos_of_sql_vocab) # due to inital [CLS] token in BERT-input vector
g_pnt_idxs1.append(ed + 1 + end_pos_of_sql_vocab) # due to inital [CLS] token in BERT-input vector
# and token
if i_wn < wn - 1:
pnt_idx1 = get_pnt_idx1(col_pool_type, i_sql_vocab1[8])
g_pnt_idxs1.append(pnt_idx1)
# end token
pnt_idx1 = get_pnt_idx1(col_pool_type, i_sql_vocab1[-1])
g_pnt_idxs1.append(pnt_idx1)
g_pnt_idxs.append(g_pnt_idxs1)
return g_pnt_idxs | sql_vocab = ( 0.. "sql none", "sql max", "sql min", "sql count", "sql sum", "sql average", ..5 6.. "sql select", "sql where", "sql and", .. 8 9.. "sql equal", "sql greater than", "sql less than", .. 11 12.. "sql start", "sql end" .. 13 ) |
19,409 | import os, json
import random as rd
from copy import deepcopy
import difflib
import re
import torch
import torchvision.datasets as dsets
import torch.nn as nn
import torch.nn.functional as F
from matplotlib.pylab import *
from torch.autograd import Variable
from .utils import generate_perm_inv
from .utils import json_default_type_checker
def pred_pnt_idxs(score, pnt_start_tok, pnt_end_tok):
pr_pnt_idxs = []
for b, score1 in enumerate(score):
# score1 = [T, max_seq_length]
pr_pnt_idxs1 = [pnt_start_tok]
for t, score11 in enumerate(score1):
pnt = score11.argmax().item()
pr_pnt_idxs1.append(pnt)
if pnt == pnt_end_tok:
break
pr_pnt_idxs.append(pr_pnt_idxs1)
return pr_pnt_idxs | null |
19,410 | import os, json
import random as rd
from copy import deepcopy
import difflib
import re
import torch
import torchvision.datasets as dsets
import torch.nn as nn
import torch.nn.functional as F
from matplotlib.pylab import *
from torch.autograd import Variable
from .utils import generate_perm_inv
from .utils import json_default_type_checker
def generate_sql_q1_s2s(pnt_idxs1, tokens1, tb1):
"""
agg_ops = ['', 'max', 'min', 'count', 'sum', 'avg']
cond_ops = ['=', '>', '<', 'OP']
Temporal as it can show only one-time conditioned case.
sql_query: real sql_query
sql_plus_query: More redable sql_query
"PLUS" indicates, it deals with the some of db specific facts like PCODE <-> NAME
"""
sql_query = ""
for t, pnt_idxs11 in enumerate(pnt_idxs1):
tok = tokens1[pnt_idxs11]
sql_query += tok
if t < len(pnt_idxs1) - 1:
sql_query += " "
return sql_query
def generate_sql_q_s2s(pnt_idxs, tokens, tb):
sql_q = []
for b, pnt_idxs1 in enumerate(pnt_idxs):
tb1 = tb[b]
sql_q1 = generate_sql_q1_s2s(pnt_idxs1, tokens[b], tb1)
sql_q.append(sql_q1)
return sql_q | null |
19,411 | import os, json
import random as rd
from copy import deepcopy
import difflib
import re
import torch
import torchvision.datasets as dsets
import torch.nn as nn
import torch.nn.functional as F
from matplotlib.pylab import *
from torch.autograd import Variable
from .utils import generate_perm_inv
from .utils import json_default_type_checker
def gen_pnt_i_from_pnt(pnt, i_sql_vocab1, i_nlu1, i_hds1):
def gen_i_vg_from_pnt_idxs(pnt_idxs, i_sql_vocab, i_nlu, i_hds):
i_vg_list = []
i_vg_sub_list = []
for b, pnt_idxs1 in enumerate(pnt_idxs):
# if properly generated,
sql_q1_list = []
i_vg_list1 = [] # index of (sql_vocab, nlu, hds)
i_vg_sub_list1 = [] # index inside of each vocab group
for t, pnt in enumerate(pnt_idxs1):
i_vg, i_vg_sub = gen_pnt_i_from_pnt(pnt, i_sql_vocab[b], i_nlu[b], i_hds[b])
i_vg_list1.append(i_vg)
i_vg_sub_list1.append(i_vg_sub)
# sql_q1 = sql_q1.join(' ')
# sql_q.append(sql_q1)
i_vg_list.append(i_vg_list1)
i_vg_sub_list.append(i_vg_sub_list1)
return i_vg_list, i_vg_sub_list | null |
19,412 | import os, json
import random as rd
from copy import deepcopy
import difflib
import re
import torch
import torchvision.datasets as dsets
import torch.nn as nn
import torch.nn.functional as F
from matplotlib.pylab import *
from torch.autograd import Variable
from .utils import generate_perm_inv
from .utils import json_default_type_checker
def merge_wv_t1_eng(where_str_tokens, NLq):
"""
Almost copied of SQLNet.
The main purpose is pad blank line while combining tokens.
"""
nlq = NLq.lower()
where_str_tokens = [tok.lower() for tok in where_str_tokens]
alphabet = 'abcdefghijklmnopqrstuvwxyz0123456789$'
special = {'-LRB-': '(',
'-RRB-': ')',
'-LSB-': '[',
'-RSB-': ']',
'``': '"',
'\'\'': '"',
}
# '--': '\u2013'} # this generate error for test 5661 case.
ret = ''
double_quote_appear = 0
for raw_w_token in where_str_tokens:
# if '' (empty string) of None, continue
if not raw_w_token:
continue
# Change the special characters
w_token = special.get(raw_w_token, raw_w_token) # maybe necessary for some case?
# check the double quote
if w_token == '"':
double_quote_appear = 1 - double_quote_appear
# Check whether ret is empty. ret is selected where condition.
if len(ret) == 0:
pass
# Check blank character.
elif len(ret) > 0 and ret + ' ' + w_token in nlq:
# Pad ' ' if ret + ' ' is part of nlq.
ret = ret + ' '
elif len(ret) > 0 and ret + w_token in nlq:
pass # already in good form. Later, ret + w_token will performed.
# Below for unnatural question I guess. Is it likely to appear?
elif w_token == '"':
if double_quote_appear:
ret = ret + ' ' # pad blank line between next token when " because in this case, it is of closing apperas
# for the case of opening, no blank line.
elif w_token[0] not in alphabet:
pass # non alphabet one does not pad blank line.
# when previous character is the special case.
elif (ret[-1] not in ['(', '/', '\u2013', '#', '$', '&']) and (ret[-1] != '"' or not double_quote_appear):
ret = ret + ' '
ret = ret + w_token
return ret.strip()
The provided code snippet includes necessary dependencies for implementing the `gen_sql_q_from_i_vg` function. Write a Python function `def gen_sql_q_from_i_vg(tokens, nlu, nlu_t, hds, tt_to_t_idx, pnt_start_tok, pnt_end_tok, pnt_idxs, i_vg_list, i_vg_sub_list)` to solve the following problem:
( "none", "max", "min", "count", "sum", "average", "select", "where", "and", "equal", "greater than", "less than", "start", "end" ),
Here is the function:
def gen_sql_q_from_i_vg(tokens, nlu, nlu_t, hds, tt_to_t_idx, pnt_start_tok, pnt_end_tok, pnt_idxs, i_vg_list,
i_vg_sub_list):
"""
(
"none", "max", "min", "count", "sum", "average",
"select", "where", "and",
"equal", "greater than", "less than",
"start", "end"
),
"""
sql_q = []
sql_i = []
for b, nlu_t1 in enumerate(nlu_t):
sql_q1_list = []
sql_i1 = {}
tt_to_t_idx1 = tt_to_t_idx[b]
nlu_st_observed = False
agg_observed = False
wc_obs = False
wo_obs = False
conds = []
for t, i_vg in enumerate(i_vg_list[b]):
i_vg_sub = i_vg_sub_list[b][t]
pnt = pnt_idxs[b][t]
if i_vg == 0:
# sql_vocab
if pnt == pnt_start_tok or pnt == pnt_end_tok:
pass
else:
tok = tokens[b][pnt]
if tok in ["none", "max", "min", "count", "sum", "average"]:
agg_observed = True
if tok == "none":
pass
sql_i1["agg"] = ["none", "max", "min", "count", "sum", "average"].index(tok)
else:
if tok in ["greater", "less", "equal"]:
if tok == 'greater':
tok = '>'
elif tok == 'less':
tok = '<'
elif tok == 'equal':
tok = '='
# gen conds1
if wc_obs:
conds1.append(['=', '>', '<'].index(tok))
wo_obs = True
sql_q1_list.append(tok)
elif i_vg == 1:
# nlu case
if not nlu_st_observed:
idx_nlu_st = pnt
nlu_st_observed = True
else:
# now to wrap up
idx_nlu_ed = pnt
st_wh_idx = tt_to_t_idx1[idx_nlu_st - pnt_end_tok - 2]
ed_wh_idx = tt_to_t_idx1[idx_nlu_ed - pnt_end_tok - 2]
pr_wv_str11 = nlu_t1[st_wh_idx:ed_wh_idx + 1]
merged_wv11 = merge_wv_t1_eng(pr_wv_str11, nlu[b])
sql_q1_list.append(merged_wv11)
nlu_st_observed = False
if wc_obs and wo_obs:
conds1.append(merged_wv11)
conds.append(conds1)
wc_obs = False
wo_obs = False
elif i_vg == 2:
# headers
tok = hds[b][i_vg_sub]
if agg_observed:
sql_q1_list.append(f"({tok})")
sql_i1["sel"] = i_vg_sub
agg_observed = False
else:
wc_obs = True
conds1 = [i_vg_sub]
sql_q1_list.append(tok)
# insert table name between.
sql_i1["conds"] = conds
sql_i.append(sql_i1)
sql_q1 = ' '.join(sql_q1_list)
sql_q.append(sql_q1)
return sql_q, sql_i | ( "none", "max", "min", "count", "sum", "average", "select", "where", "and", "equal", "greater than", "less than", "start", "end" ), |
19,413 | import os, json
import random as rd
from copy import deepcopy
import difflib
import re
import torch
import torchvision.datasets as dsets
import torch.nn as nn
import torch.nn.functional as F
from matplotlib.pylab import *
from torch.autograd import Variable
from .utils import generate_perm_inv
from .utils import json_default_type_checker
def get_cnt_lx_list_s2s(g_pnt_idxs, pr_pnt_idxs):
# all cnt are list here.
cnt_list = []
for b, g_pnt_idxs1 in enumerate(g_pnt_idxs):
pr_pnt_idxs1 = pr_pnt_idxs[b]
if g_pnt_idxs1 == pr_pnt_idxs1:
cnt_list.append(1)
else:
cnt_list.append(0)
return cnt_list | null |
19,414 | import os, json
import random as rd
from copy import deepcopy
import difflib
import re
import torch
import torchvision.datasets as dsets
import torch.nn as nn
import torch.nn.functional as F
from matplotlib.pylab import *
from torch.autograd import Variable
from .utils import generate_perm_inv
from .utils import json_default_type_checker
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
The provided code snippet includes necessary dependencies for implementing the `get_wemb_h_FT_Scalar_1` function. Write a Python function `def get_wemb_h_FT_Scalar_1(i_hds, l_hs, hS, all_encoder_layer, col_pool_type='start_tok')` to solve the following problem:
As if [ [table-1-col-1-tok1, t1-c1-t2, ...], [t1-c2-t1, t1-c2-t2, ...]. ... [t2-c1-t1, ...,] ] # i_hds = [ [ Batch 1 ] [ Batch 2 ] ] # [Batch 1] = [ (col1_st_idx, col1_ed_idx), (col2_st_idx, col2_ed_idx), ...] # i_hds = [[(11, 14), (15, 19), (20, 21), (22, 24), (25, 27), (28, 29)], # [(16, 19), (20, 24), (25, 26), (27, 29), (30, 32), (33, 34)]] pool_type = 'start_tok', 'end_tok', 'avg'
Here is the function:
def get_wemb_h_FT_Scalar_1(i_hds, l_hs, hS, all_encoder_layer, col_pool_type='start_tok'):
"""
As if
[ [table-1-col-1-tok1, t1-c1-t2, ...],
[t1-c2-t1, t1-c2-t2, ...].
...
[t2-c1-t1, ...,]
]
# i_hds = [ [ Batch 1 ] [ Batch 2 ] ]
# [Batch 1] = [ (col1_st_idx, col1_ed_idx), (col2_st_idx, col2_ed_idx), ...]
# i_hds = [[(11, 14), (15, 19), (20, 21), (22, 24), (25, 27), (28, 29)],
# [(16, 19), (20, 24), (25, 26), (27, 29), (30, 32), (33, 34)]]
pool_type = 'start_tok', 'end_tok', 'avg'
"""
bS = len(l_hs)
l_hs_max = max(l_hs)
wemb_h = torch.zeros([bS, l_hs_max, hS]).to(device)
for b, i_hds1 in enumerate(i_hds):
for i_hd, st_ed_pair in enumerate(i_hds1):
st, ed = st_ed_pair
if col_pool_type == 'start_tok':
vec = all_encoder_layer[-1][b, st, :]
elif col_pool_type == 'end_tok':
vec = all_encoder_layer[-1][b, ed, :]
elif col_pool_type == 'avg':
vecs = all_encoder_layer[-1][b, st:ed, :]
vec = vecs.mean(dim=1, keepdim=True)
else:
raise ValueError
wemb_h[b, i_hd, :] = vec
return wemb_h | As if [ [table-1-col-1-tok1, t1-c1-t2, ...], [t1-c2-t1, t1-c2-t2, ...]. ... [t2-c1-t1, ...,] ] # i_hds = [ [ Batch 1 ] [ Batch 2 ] ] # [Batch 1] = [ (col1_st_idx, col1_ed_idx), (col2_st_idx, col2_ed_idx), ...] # i_hds = [[(11, 14), (15, 19), (20, 21), (22, 24), (25, 27), (28, 29)], # [(16, 19), (20, 24), (25, 26), (27, 29), (30, 32), (33, 34)]] pool_type = 'start_tok', 'end_tok', 'avg' |
19,415 | import os, json
import random as rd
from copy import deepcopy
import difflib
import re
import torch
import torchvision.datasets as dsets
import torch.nn as nn
import torch.nn.functional as F
from matplotlib.pylab import *
from torch.autograd import Variable
from .utils import generate_perm_inv
from .utils import json_default_type_checker
def cal_prob_tot(p_select, p_where):
p_tot = []
for b, p_select1 in enumerate(p_select):
p_where1 = p_where[b]
p_tot.append(p_select1 * p_where1)
return p_tot
def cal_prob_select(p_sc, p_sa):
p_select = []
for b, p_sc1 in enumerate(p_sc):
p1 = 1.0
p1 *= p_sc1
p1 *= p_sa[b]
p_select.append(p1)
return p_select
def cal_prob_where(p_wn, p_wc, p_wo, p_wvi):
p_where = []
for b, p_wn1 in enumerate(p_wn):
p1 = 1.0
p1 *= p_wn1
p_wc1 = p_wc[b]
for i_wn, p_wc11 in enumerate(p_wc1):
p_wo11 = p_wo[b][i_wn]
p_wv11_st, p_wv11_ed = p_wvi[b][i_wn]
p1 *= p_wc11
p1 *= p_wo11
p1 *= p_wv11_st
p1 *= p_wv11_ed
p_where.append(p1)
return p_where
def cal_prob_sc(s_sc, pr_sc):
ps = F.softmax(s_sc, dim=1)
p = []
for b, ps1 in enumerate(ps):
pr_sc1 = pr_sc[b]
p1 = ps1[pr_sc1]
p.append(p1.item())
return p
def cal_prob_sa(s_sa, pr_sa):
ps = F.softmax(s_sa, dim=1)
p = []
for b, ps1 in enumerate(ps):
pr_sa1 = pr_sa[b]
p1 = ps1[pr_sa1]
p.append(p1.item())
return p
def cal_prob_wn(s_wn, pr_wn):
ps = F.softmax(s_wn, dim=1)
p = []
for b, ps1 in enumerate(ps):
pr_wn1 = pr_wn[b]
p1 = ps1[pr_wn1]
p.append(p1.item())
return p
def cal_prob_wc(s_wc, pr_wc):
ps = torch.sigmoid(s_wc)
ps_out = []
for b, pr_wc1 in enumerate(pr_wc):
ps1 = array(ps[b].cpu())
ps_out1 = ps1[pr_wc1]
ps_out.append(list(ps_out1))
return ps_out
def cal_prob_wo(s_wo, pr_wo):
# assume there is always at least single condition.
ps = F.softmax(s_wo, dim=2)
ps_out = []
for b, pr_wo1 in enumerate(pr_wo):
ps_out1 = []
for n, pr_wo11 in enumerate(pr_wo1):
ps11 = ps[b][n]
ps_out1.append(ps11[pr_wo11].item())
ps_out.append(ps_out1)
return ps_out
def cal_prob_wvi_se(s_wv, pr_wvi):
prob_wv = F.softmax(s_wv, dim=-2).detach().to('cpu').numpy()
p_wv = []
for b, pr_wvi1 in enumerate(pr_wvi):
p_wv1 = []
for i_wn, pr_wvi11 in enumerate(pr_wvi1):
st, ed = pr_wvi11
p_st = prob_wv[b, i_wn, st, 0]
p_ed = prob_wv[b, i_wn, ed, 1]
p_wv1.append([p_st, p_ed])
p_wv.append(p_wv1)
return p_wv
The provided code snippet includes necessary dependencies for implementing the `cal_prob` function. Write a Python function `def cal_prob(s_sc, s_sa, s_wn, s_wc, s_wo, s_wv, pr_sc, pr_sa, pr_wn, pr_wc, pr_wo, pr_wvi)` to solve the following problem:
:param s_sc: [B, l_h] :param s_sa: [B, l_a] # 16 :param s_wn: [B, 5] :param s_wc: [B, l_h] :param s_wo: [B, 4, l_o] # :param s_wv: [B, 4, 22] :return:
Here is the function:
def cal_prob(s_sc, s_sa, s_wn, s_wc, s_wo, s_wv, pr_sc, pr_sa, pr_wn, pr_wc, pr_wo, pr_wvi):
"""
:param s_sc: [B, l_h]
:param s_sa: [B, l_a] # 16
:param s_wn: [B, 5]
:param s_wc: [B, l_h]
:param s_wo: [B, 4, l_o] #
:param s_wv: [B, 4, 22]
:return:
"""
# First get selected index
#
# Predict prob
p_sc = cal_prob_sc(s_sc, pr_sc)
p_sa = cal_prob_sa(s_sa, pr_sa)
p_wn = cal_prob_wn(s_wn, pr_wn)
p_wc = cal_prob_wc(s_wc, pr_wc)
p_wo = cal_prob_wo(s_wo, pr_wo)
p_wvi = cal_prob_wvi_se(s_wv, pr_wvi)
# calculate select-clause probability
p_select = cal_prob_select(p_sc, p_sa)
# calculate where-clause probability
p_where = cal_prob_where(p_wn, p_wc, p_wo, p_wvi)
# calculate total probability
p_tot = cal_prob_tot(p_select, p_where)
return p_tot, p_select, p_where, p_sc, p_sa, p_wn, p_wc, p_wo, p_wvi | :param s_sc: [B, l_h] :param s_sa: [B, l_a] # 16 :param s_wn: [B, 5] :param s_wc: [B, l_h] :param s_wo: [B, 4, l_o] # :param s_wv: [B, 4, 22] :return: |
19,416 | import os, json
import random as rd
from copy import deepcopy
import difflib
import re
import torch
import torchvision.datasets as dsets
import torch.nn as nn
import torch.nn.functional as F
from matplotlib.pylab import *
from torch.autograd import Variable
from .utils import generate_perm_inv
from .utils import json_default_type_checker
The provided code snippet includes necessary dependencies for implementing the `sort_pr_wc` function. Write a Python function `def sort_pr_wc(pr_wc, g_wc)` to solve the following problem:
Input: list pr_wc = [B, n_conds] g_wc = [B, n_conds] Return: list pr_wc_sorted = [B, n_conds]
Here is the function:
def sort_pr_wc(pr_wc, g_wc):
"""
Input: list
pr_wc = [B, n_conds]
g_wc = [B, n_conds]
Return: list
pr_wc_sorted = [B, n_conds]
"""
pr_wc_sorted = []
for b, pr_wc1 in enumerate(pr_wc):
g_wc1 = g_wc[b]
pr_wc1_sorted = []
if set(g_wc1) == set(pr_wc1):
pr_wc1_sorted = deepcopy(g_wc1)
else:
# no sorting when g_wc1 and pr_wc1 are different.
pr_wc1_sorted = deepcopy(pr_wc1)
pr_wc_sorted.append(pr_wc1_sorted)
return pr_wc_sorted | Input: list pr_wc = [B, n_conds] g_wc = [B, n_conds] Return: list pr_wc_sorted = [B, n_conds] |
19,417 | import os, json
import random as rd
from copy import deepcopy
import difflib
import re
import torch
import torchvision.datasets as dsets
import torch.nn as nn
import torch.nn.functional as F
from matplotlib.pylab import *
from torch.autograd import Variable
from .utils import generate_perm_inv
from .utils import json_default_type_checker
cn_sum = {
'〇': '0', '一': '1', '二': '2', '三': '3', '四': '4', '五': '5', '六': '6', '七': '7', '八': '8', '九': '9', '零': '0',
'壹': '1', '贰': '2', '叁': '3', '肆': '4', '伍': '5', '陆': '6', '柒': '7', '捌': '8', '玖': '9', '貮': '2', '两': '2',
}
cn_unit = {
'十': 10,
'拾': 10,
'百': 100,
'佰': 100,
'千': 1000,
'仟': 1000,
'万': 10000,
'萬': 10000,
'亿': 100000000,
'億': 100000000,
'兆': 1000000000000,
'角': 0.1,
'分': 0.01
}
def chn_to_sum(chn):
# 传入字符串
sum = 0
lis = []
flo = False
str_flo = ''
for i in chn:
if flo:
if i in cn_sum:
str_flo += cn_sum[i]
if i in cn_unit:
lis.append(cn_unit[i])
else:
if i == '点':
flo = True
if i in cn_sum:
lis.append(cn_sum[i])
if i in cn_unit:
lis.append(cn_unit[i])
for k in range(len(lis)):
if k == len(lis) - 1:
if str_flo:
sum += float('.' + str_flo)
if type(lis[k]) == str:
sum = sum + int(lis[k])
if type(lis[k]) in [int, float]:
if lis[k] > sum:
sum = (sum + int(lis[k - 1])) * lis[k]
else:
sum = sum + (int(lis[k - 1]) * lis[k])
return round(sum, 2) | null |
19,418 | import os
import sys
import time
import math
import torch
import torch.nn as nn
import torch.distributed as dist
import torch.multiprocessing as mp
from torch.nn.parallel import DistributedDataParallel
from uer.model_loader import load_model
from uer.model_saver import save_model
from uer.model_builder import build_model
from uer.utils.optimizers import *
from uer.utils.data import *
from uer.utils.vocab import Vocab
from uer.utils.seed import set_seed
def worker(proc_id, gpu_ranks, args, model):
"""
Args:
proc_id: The id of GPU for single GPU mode;
The id of process (and GPU) for multiprocessing distributed mode.
gpu_ranks: List of ranks of each process.
"""
set_seed(args.seed)
if args.dist_train:
rank = gpu_ranks[proc_id]
gpu_id = proc_id
elif args.single_gpu:
rank = None
gpu_id = proc_id
else:
rank = None
gpu_id = None
if args.dist_train:
train_loader = globals()[args.target.capitalize() + "DataLoader"](args, args.dataset_path, args.batch_size, rank, args.world_size, True)
else:
train_loader = globals()[args.target.capitalize() + "DataLoader"](args, args.dataset_path, args.batch_size, 0, 1, True)
if gpu_id is not None:
torch.cuda.set_device(gpu_id)
model.cuda(gpu_id)
# Build optimizer.
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'gamma', 'beta']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay_rate': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay_rate': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, correct_bias=False)
scheduler = WarmupLinearSchedule(optimizer, warmup_steps=args.total_steps*args.warmup, t_total=args.total_steps)
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
args.amp = amp
if args.dist_train:
# Initialize multiprocessing distributed training environment.
dist.init_process_group(backend=args.backend,
init_method=args.master_ip,
world_size=args.world_size,
rank=rank)
model = DistributedDataParallel(model, device_ids=[gpu_id])
print("Worker %d is training ... " % rank)
else:
print("Worker is training ...")
globals().get("train_"+args.target)(args, gpu_id, rank, train_loader, model, optimizer, scheduler)
def load_model(model, model_path):
if hasattr(model, "module"):
model.module.load_state_dict(torch.load(model_path, map_location='cpu'), strict=False)
else:
model.load_state_dict(torch.load(model_path, map_location='cpu'), strict=False)
return model
def build_model(args):
"""
Build universial encoder representations models.
The combinations of different embedding, encoder,
and target layers yield pretrained models of different
properties.
We could select suitable one for downstream tasks.
"""
if args.subword_type != "none":
subencoder = globals()[args.subencoder.capitalize() + "Subencoder"](args, len(args.sub_vocab))
else:
subencoder = None
embedding = globals()[args.embedding.capitalize() + "Embedding"](args, len(args.vocab))
encoder = globals()[args.encoder.capitalize() + "Encoder"](args)
target = globals()[args.target.capitalize() + "Target"](args, len(args.vocab))
# print('emb:::', embedding, '\nEncoder:', encoder, '\nTarget:', target)
model = BertEncoderModel(args, embedding, encoder, target, subencoder)
return model
class Vocab(object):
"""
"""
def __init__(self):
self.w2i = {}
self.i2w = []
self.w2c = {}
self.reserved_vocab_path = \
os.path.abspath(os.path.join(os.path.dirname(__file__), "../../models/reserved_vocab.txt"))
def load(self, vocab_path, is_quiet=False):
with open(vocab_path, mode="r", encoding="utf-8") as reader:
for index, line in enumerate(reader):
try:
w = line.strip().split()[0]
self.w2i[w] = index
self.i2w.append(w)
except:
self.w2i["???"+str(index)] = index
self.i2w.append("???"+str(index))
if not is_quiet:
print("Vocabulary file line " + str(index+1) + " has bad format token")
assert len(self.w2i) == len(self.i2w)
if not is_quiet:
print("Vocabulary Size: ", len(self))
def save(self, save_path):
print("Vocabulary Size: ", len(self))
with open(save_path, mode="w", encoding="utf-8") as writer:
for w in self.i2w:
writer.write(w + "\n")
print("Vocabulary saving done.")
def get(self, w):
return self.w2i.get(w, UNK_ID)
def __len__(self):
return len(self.i2w)
def worker(self, corpus_path, tokenizer, start, end):
"""
Worker that creates vocabulary from corpus[start:end].
"""
w2i, i2w, w2c = {}, [], {}
pos = 0
with open(corpus_path, mode="r", encoding="utf-8") as f:
while pos < start:
try:
f.readline()
except:
continue
finally:
pos += 1
while True:
try:
line = f.readline()
except:
continue
finally:
pos += 1
tokens = tokenizer.tokenize(line)
for t in tokens:
if t not in w2i:
w2i[t], w2c[t] = len(i2w), 1
i2w.append(t)
else:
w2c[t] += 1
if pos >= end - 1:
return (w2i, i2w, w2c)
def union(self, vocab_list):
""" Union vocab in all workers. """
w2i, i2w, w2c = {}, [], {}
index = 0
for v_p in vocab_list:
w2i_p, i2w_p, w2c_p = v_p.get()
for w in i2w_p:
if w not in w2i:
w2i[w], w2c[w] = len(i2w), w2c_p[w]
i2w.append(w)
else:
w2c[w] += w2c_p[w]
return (w2i, i2w, w2c)
def build(self, corpus_path, tokenizer, workers_num=1, min_count=1):
""" Build vocabulary from the given corpus. """
print("Start %d workers for building vocabulary..." % workers_num)
lines_num = count_lines(corpus_path)
pool = Pool(workers_num)
vocab_list = []
for i in range(workers_num):
start = i * lines_num // workers_num
end = (i+1) * lines_num // workers_num
vocab_list.append((pool.apply_async(func=self.worker, args=[corpus_path, tokenizer, start, end])))
pool.close()
pool.join()
# Union vocab in all workers.
w2i, i2w, w2c = self.union(vocab_list)
# Sort w2c according to word count.
sorted_w2c = sorted(w2c.items(), key=lambda item:item[1], reverse=True)
# Add special symbols and remove low frequency words.
with open(self.reserved_vocab_path, mode="r", encoding="utf-8") as reader:
self.i2w = [line.strip().split()[0] for line in reader]
for i, w in enumerate(self.i2w):
self.w2i[w] = i
self.w2c[w] = -1
for w, c in sorted_w2c:
if c < min_count:
break
if w not in self.w2i:
self.w2i[w], self.w2c[w] = len(self.i2w), c
self.i2w.append(w)
def set_seed(seed=7):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
def train_and_validate(args):
set_seed(args.seed)
# Load vocabulary.
vocab = Vocab()
vocab.load(args.vocab_path)
args.vocab = vocab
# Build model.
model = build_model(args)
# Load or initialize parameters.
if args.pretrained_model_path is not None:
# Initialize with pretrained model.
model = load_model(model, args.pretrained_model_path)
else:
# Initialize with normal distribution.
for n, p in list(model.named_parameters()):
if 'gamma' not in n and 'beta' not in n:
p.data.normal_(0, 0.02)
if args.dist_train:
# Multiprocessing distributed mode.
mp.spawn(worker, nprocs=args.ranks_num, args=(args.gpu_ranks, args, model), daemon=False)
elif args.single_gpu:
# Single GPU mode.
worker(args.gpu_id, None, args, model)
else:
# CPU mode.
worker(None, None, args, model) | null |
19,419 | import os
import sys
import time
import math
import torch
import torch.nn as nn
import torch.distributed as dist
import torch.multiprocessing as mp
from torch.nn.parallel import DistributedDataParallel
from uer.model_loader import load_model
from uer.model_saver import save_model
from uer.model_builder import build_model
from uer.utils.optimizers import *
from uer.utils.data import *
from uer.utils.vocab import Vocab
from uer.utils.seed import set_seed
def save_model(model, model_path):
if hasattr(model, "module"):
torch.save(model.module.state_dict(), model_path)
else:
torch.save(model.state_dict(), model_path)
def train_bert(args, gpu_id, rank, loader, model, optimizer, scheduler):
model.train()
start_time = time.time()
total_loss, total_loss_mlm, total_loss_nsp = 0., 0., 0.
# Calculate MLM accuracy.
total_correct_mlm, total_denominator = 0., 0.
# Calculate NSP accuracy.
total_correct_nsp, total_instances = 0., 0.
steps = 1
total_steps = args.total_steps
done_tokens = 0
loader_iter = iter(loader)
while True:
if steps == total_steps + 1:
break
src, tgt_mlm, tgt_nsp, seg = next(loader_iter)
if gpu_id is not None:
src = src.cuda(gpu_id)
tgt_mlm = tgt_mlm.cuda(gpu_id)
tgt_nsp = tgt_nsp.cuda(gpu_id)
seg = seg.cuda(gpu_id)
# Forward.
loss_info = model(src, (tgt_mlm, tgt_nsp), seg)
loss_mlm, loss_nsp, correct_mlm, correct_nsp, denominator = loss_info
# Backward.
loss = loss_mlm + loss_nsp
total_loss += loss.item()
total_loss_mlm += loss_mlm.item()
total_loss_nsp += loss_nsp.item()
total_correct_mlm += correct_mlm.item()
total_correct_nsp += correct_nsp.item()
total_denominator += denominator.item()
total_instances += src.size(0)
done_tokens += src.size(0) * src.size(1)
loss = loss / args.accumulation_steps
if args.fp16:
with args.amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
if steps % args.accumulation_steps == 0:
optimizer.step()
scheduler.step()
model.zero_grad()
if steps % args.report_steps == 0 and \
(not args.dist_train or (args.dist_train and rank == 0)):
loss = total_loss / args.report_steps
loss_mlm = total_loss_mlm / args.report_steps
loss_nsp = total_loss_nsp / args.report_steps
elapsed = time.time() - start_time
if args.dist_train:
done_tokens *= args.world_size
print("| {:8d}/{:8d} steps"
"| {:8.2f} tokens/s"
"| loss {:7.2f}"
"| loss_mlm: {:3.3f}"
"| loss_nsp: {:3.3f}"
"| acc_mlm: {:3.3f}"
"| acc_nsp: {:3.3f}".format(
steps,
total_steps,
done_tokens / elapsed,
loss,
loss_mlm,
loss_nsp,
total_correct_mlm / total_denominator,
total_correct_nsp / total_instances))
done_tokens = 0
total_loss, total_loss_mlm, total_loss_nsp = 0., 0., 0.
total_correct_mlm, total_denominator = 0., 0.
total_correct_nsp, total_instances = 0., 0.
start_time = time.time()
if steps % args.save_checkpoint_steps == 0 and \
(not args.dist_train or (args.dist_train and rank == 0)):
save_model(model, args.output_model_path + "-" + str(steps))
steps += 1 | null |
19,420 | import os
import sys
import time
import math
import torch
import torch.nn as nn
import torch.distributed as dist
import torch.multiprocessing as mp
from torch.nn.parallel import DistributedDataParallel
from uer.model_loader import load_model
from uer.model_saver import save_model
from uer.model_builder import build_model
from uer.utils.optimizers import *
from uer.utils.data import *
from uer.utils.vocab import Vocab
from uer.utils.seed import set_seed
def save_model(model, model_path):
def train_lm(args, gpu_id, rank, loader, model, optimizer, scheduler):
model.train()
start_time = time.time()
total_loss = 0.
# Calculate MLM accuracy.
total_correct, total_denominator = 0., 0.
# Calculate NSP accuracy.
steps = 1
total_steps = args.total_steps
loader_iter = iter(loader)
while True:
if steps == total_steps + 1:
break
src, tgt, seg = next(loader_iter)
if gpu_id is not None:
src = src.cuda(gpu_id)
tgt = tgt.cuda(gpu_id)
seg = seg.cuda(gpu_id)
# Forward.
loss_info = model(src, tgt, seg)
loss, correct, denominator = loss_info
# Backward.
total_loss += loss.item()
total_correct += correct.item()
total_denominator += denominator.item()
loss = loss / args.accumulation_steps
if args.fp16:
with args.amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
if steps % args.accumulation_steps == 0:
optimizer.step()
scheduler.step()
model.zero_grad()
if steps % args.report_steps == 0 and \
(not args.dist_train or (args.dist_train and rank == 0)):
loss = total_loss / args.report_steps
elapsed = time.time() - start_time
done_tokens = \
args.batch_size * src.size(1) * args.report_steps * args.world_size \
if args.dist_train \
else args.batch_size * src.size(1) * args.report_steps
print("| {:8d}/{:8d} steps"
"| {:8.2f} tokens/s"
"| loss {:7.2f}"
"| acc: {:3.3f}".format(
steps,
total_steps,
done_tokens / elapsed,
loss,
total_correct / total_denominator))
total_loss = 0.
total_correct, total_denominator = 0., 0.
start_time = time.time()
if steps % args.save_checkpoint_steps == 0 and \
(not args.dist_train or (args.dist_train and rank == 0)):
save_model(model, args.output_model_path + "-" + str(steps))
steps += 1 | null |
19,421 | import os
import sys
import time
import math
import torch
import torch.nn as nn
import torch.distributed as dist
import torch.multiprocessing as mp
from torch.nn.parallel import DistributedDataParallel
from uer.model_loader import load_model
from uer.model_saver import save_model
from uer.model_builder import build_model
from uer.utils.optimizers import *
from uer.utils.data import *
from uer.utils.vocab import Vocab
from uer.utils.seed import set_seed
def save_model(model, model_path):
def train_bilm(args, gpu_id, rank, loader, model, optimizer, scheduler):
model.train()
start_time = time.time()
total_loss, total_loss_forward, total_loss_backward = 0., 0., 0.
# Calculate BiLM accuracy.
total_correct_forward, total_correct_backward, total_denominator = 0., 0., 0.
steps = 1
total_steps = args.total_steps
loader_iter = iter(loader)
while True:
if steps == total_steps + 1:
break
src, tgt_forward, tgt_backward, seg = next(loader_iter)
if gpu_id is not None:
src = src.cuda(gpu_id)
tgt_forward = tgt_forward.cuda(gpu_id)
tgt_backward = tgt_backward.cuda(gpu_id)
seg = seg.cuda(gpu_id)
# Forward.
loss_info = model(src, (tgt_forward, tgt_backward), seg)
loss_forward, loss_backward, correct_forward, correct_backward, denominator = loss_info
# Backward.
loss = loss_forward + loss_backward
total_loss += loss.item()
total_loss_forward += loss_forward.item()
total_loss_backward += loss_backward.item()
total_correct_forward += correct_forward.item()
total_correct_backward += correct_backward.item()
total_denominator += denominator.item()
loss = loss / args.accumulation_steps
if args.fp16:
with args.amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
if steps % args.accumulation_steps == 0:
optimizer.step()
scheduler.step()
model.zero_grad()
if steps % args.report_steps == 0 and \
(not args.dist_train or (args.dist_train and rank == 0)):
loss = total_loss / args.report_steps
elapsed = time.time() - start_time
done_tokens = \
args.batch_size * src.size(1) * args.report_steps * args.world_size \
if args.dist_train \
else args.batch_size * src.size(1) * args.report_steps
print("| {:8d}/{:8d} steps"
"| {:8.2f} tokens/s"
"| loss {:7.2f}"
"| loss_forward {:3.3f}"
"| loss_backward {:3.3f}"
"| acc_forward: {:3.3f}"
"| acc_backward: {:3.3f}".format(
steps,
total_steps,
done_tokens / elapsed,
loss,
loss_forward,
loss_backward,
total_correct_forward / total_denominator,
total_correct_backward / total_denominator))
total_loss, total_loss_forward, total_loss_backward = 0., 0., 0.
total_correct_forward, total_correct_backward, total_denominator = 0., 0., 0.
start_time = time.time()
if steps % args.save_checkpoint_steps == 0 and \
(not args.dist_train or (args.dist_train and rank == 0)):
save_model(model, args.output_model_path + "-" + str(steps))
steps += 1 | null |
19,422 | import os
import sys
import time
import math
import torch
import torch.nn as nn
import torch.distributed as dist
import torch.multiprocessing as mp
from torch.nn.parallel import DistributedDataParallel
from uer.model_loader import load_model
from uer.model_saver import save_model
from uer.model_builder import build_model
from uer.utils.optimizers import *
from uer.utils.data import *
from uer.utils.vocab import Vocab
from uer.utils.seed import set_seed
def save_model(model, model_path):
def train_cls(args, gpu_id, rank, loader, model, optimizer, scheduler):
model.train()
start_time = time.time()
total_loss = 0.
total_correct, total_instances = 0., 0.
steps = 1
total_steps = args.total_steps
loader_iter = iter(loader)
while True:
if steps == total_steps + 1:
break
src, tgt, seg = next(loader_iter)
if gpu_id is not None:
src = src.cuda(gpu_id)
tgt = tgt.cuda(gpu_id)
seg = seg.cuda(gpu_id)
# Forward.
loss_info = model(src, tgt, seg)
loss, correct = loss_info
# Backward.
total_loss += loss.item()
total_correct += correct.item()
total_instances += src.size(0)
loss = loss / args.accumulation_steps
if args.fp16:
with args.amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
if steps % args.accumulation_steps == 0:
optimizer.step()
scheduler.step()
model.zero_grad()
if steps % args.report_steps == 0 and \
(not args.dist_train or (args.dist_train and rank == 0)):
loss = total_loss / args.report_steps
elapsed = time.time() - start_time
done_tokens = \
args.batch_size * src.size(1) * args.report_steps * args.world_size \
if args.dist_train \
else args.batch_size * src.size(1) * args.report_steps
print("| {:8d}/{:8d} steps"
"| {:8.2f} tokens/s"
"| loss {:7.2f}"
"| acc: {:3.3f}".format(
steps,
total_steps,
done_tokens / elapsed,
loss,
total_correct / total_instances))
total_loss = 0.
total_correct = 0.
total_instances = 0.
start_time = time.time()
if steps % args.save_checkpoint_steps == 0 and \
(not args.dist_train or (args.dist_train and rank == 0)):
save_model(model, args.output_model_path + "-" + str(steps))
steps += 1 | null |
19,423 | import os
import sys
import time
import math
import torch
import torch.nn as nn
import torch.distributed as dist
import torch.multiprocessing as mp
from torch.nn.parallel import DistributedDataParallel
from uer.model_loader import load_model
from uer.model_saver import save_model
from uer.model_builder import build_model
from uer.utils.optimizers import *
from uer.utils.data import *
from uer.utils.vocab import Vocab
from uer.utils.seed import set_seed
def save_model(model, model_path):
if hasattr(model, "module"):
torch.save(model.module.state_dict(), model_path)
else:
torch.save(model.state_dict(), model_path)
def train_mlm(args, gpu_id, rank, loader, model, optimizer, scheduler):
model.train()
start_time = time.time()
total_loss, total_loss_mlm, total_loss_nsp = 0., 0., 0.
# Calculate MLM accuracy.
total_correct, total_denominator = 0., 0.
# Calculate NSP accuracy.
total_instances = 0., 0.
steps = 1
total_steps = args.total_steps
loader_iter = iter(loader)
while True:
if steps == total_steps + 1:
break
src, tgt, seg = next(loader_iter)
if gpu_id is not None:
src = src.cuda(gpu_id)
tgt = tgt.cuda(gpu_id)
seg = seg.cuda(gpu_id)
# Forward.
loss_info = model(src, tgt, seg)
loss, correct, denominator = loss_info
# Backward.
total_loss += loss.item()
total_correct += correct.item()
total_denominator += denominator.item()
loss = loss / args.accumulation_steps
if args.fp16:
with args.amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
if steps % args.accumulation_steps == 0:
optimizer.step()
scheduler.step()
model.zero_grad()
if steps % args.report_steps == 0 and \
(not args.dist_train or (args.dist_train and rank == 0)):
loss = total_loss / args.report_steps
elapsed = time.time() - start_time
done_tokens = \
args.batch_size * src.size(1) * args.report_steps * args.world_size \
if args.dist_train \
else args.batch_size * src.size(1) * args.report_steps
print("| {:8d}/{:8d} steps"
"| {:8.2f} tokens/s"
"| loss {:7.2f}"
"| acc: {:3.3f}".format(
steps,
total_steps,
done_tokens / elapsed,
loss,
total_correct / total_denominator))
total_loss = 0.
total_correct, total_denominator = 0., 0.
start_time = time.time()
if steps % args.save_checkpoint_steps == 0 and \
(not args.dist_train or (args.dist_train and rank == 0)):
save_model(model, args.output_model_path + "-" + str(steps))
steps += 1 | null |
19,424 | from __future__ import absolute_import, division, print_function, unicode_literals
from uer.utils.constants import *
from uer.utils.vocab import Vocab
import collections
import unicodedata
The provided code snippet includes necessary dependencies for implementing the `whitespace_tokenize` function. Write a Python function `def whitespace_tokenize(text)` to solve the following problem:
Runs basic whitespace cleaning and splitting on a piece of text.
Here is the function:
def whitespace_tokenize(text):
"""Runs basic whitespace cleaning and splitting on a piece of text."""
text = text.strip()
if not text:
return []
tokens = text.split()
return tokens | Runs basic whitespace cleaning and splitting on a piece of text. |
19,425 | from __future__ import absolute_import, division, print_function, unicode_literals
from uer.utils.constants import *
from uer.utils.vocab import Vocab
import collections
import unicodedata
The provided code snippet includes necessary dependencies for implementing the `_is_whitespace` function. Write a Python function `def _is_whitespace(char)` to solve the following problem:
Checks whether `chars` is a whitespace character.
Here is the function:
def _is_whitespace(char):
"""Checks whether `chars` is a whitespace character."""
# \t, \n, and \r are technically contorl characters but we treat them
# as whitespace since they are generally considered as such.
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
cat = unicodedata.category(char)
if cat == "Zs":
return True
return False | Checks whether `chars` is a whitespace character. |
19,426 | from __future__ import absolute_import, division, print_function, unicode_literals
from uer.utils.constants import *
from uer.utils.vocab import Vocab
import collections
import unicodedata
The provided code snippet includes necessary dependencies for implementing the `_is_control` function. Write a Python function `def _is_control(char)` to solve the following problem:
Checks whether `chars` is a control character.
Here is the function:
def _is_control(char):
"""Checks whether `chars` is a control character."""
# These are technically control characters but we count them as whitespace
# characters.
if char == "\t" or char == "\n" or char == "\r":
return False
cat = unicodedata.category(char)
if cat.startswith("C"):
return True
return False | Checks whether `chars` is a control character. |
19,427 | from __future__ import absolute_import, division, print_function, unicode_literals
from uer.utils.constants import *
from uer.utils.vocab import Vocab
import collections
import unicodedata
The provided code snippet includes necessary dependencies for implementing the `_is_punctuation` function. Write a Python function `def _is_punctuation(char)` to solve the following problem:
Checks whether `chars` is a punctuation character.
Here is the function:
def _is_punctuation(char):
"""Checks whether `chars` is a punctuation character."""
cp = ord(char)
# We treat all non-letter/number ASCII as punctuation.
# Characters such as "^", "$", and "`" are not in the Unicode
# Punctuation class but we treat them as punctuation anyways, for
# consistency.
if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or
(cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):
return True
cat = unicodedata.category(char)
if cat.startswith("P"):
return True
return False | Checks whether `chars` is a punctuation character. |
19,428 | import os
import torch
import codecs
import random
import pickle
from multiprocessing import Pool
from uer.utils.constants import *
from uer.utils.misc import count_lines
from uer.utils.seed import set_seed
The provided code snippet includes necessary dependencies for implementing the `mask_seq` function. Write a Python function `def mask_seq(src, vocab_size)` to solve the following problem:
mask input sequence for MLM task args: src: a list of tokens vocab_size: the vocabulary size
Here is the function:
def mask_seq(src, vocab_size):
"""
mask input sequence for MLM task
args:
src: a list of tokens
vocab_size: the vocabulary size
"""
tgt_mlm = []
for (i, token) in enumerate(src):
if token == CLS_ID or token == SEP_ID:
continue
prob = random.random()
if prob < 0.15:
# 小于 0.15的时候,归一化一下
prob /= 0.15
if prob < 0.8:
src[i] = MASK_ID
elif prob < 0.9:
while True:
rdi = random.randint(1, vocab_size-1)
if rdi not in [CLS_ID, SEP_ID, MASK_ID]:
break
src[i] = rdi
tgt_mlm.append((i, token))
return src, tgt_mlm | mask input sequence for MLM task args: src: a list of tokens vocab_size: the vocabulary size |
19,429 | import os
import torch
import codecs
import random
import pickle
from multiprocessing import Pool
from uer.utils.constants import *
from uer.utils.misc import count_lines
from uer.utils.seed import set_seed
def merge_dataset(dataset_path, workers_num):
# Merge datasets.
f_writer = open(dataset_path, "wb")
for i in range(workers_num):
tmp_dataset_reader = open("dataset-tmp-"+str(i)+".pt", "rb")
while True:
tmp_data = tmp_dataset_reader.read(2^20)
if tmp_data:
f_writer.write(tmp_data)
else:
break
tmp_dataset_reader.close()
# os.remove("dataset-tmp-"+str(i)+".pt")
f_writer.close() | null |
19,430 | import json
def load_hyperparam(args):
with open(args.config_path, mode="r", encoding="utf-8") as f:
param = json.load(f)
args.emb_size = param.get("emb_size", 768)
args.hidden_size = param.get("hidden_size", 768)
args.kernel_size = param.get("kernel_size", 3)
args.block_size = param.get("block_size", 2)
args.feedforward_size = param.get("feedforward_size", 3072)
args.heads_num = param.get("heads_num", 12)
args.layers_num = param.get("layers_num", 12)
args.dropout = param.get("dropout", 0.1)
return args | null |
19,431 | import torch
import torch.nn as nn
from uer.utils.constants import *
The provided code snippet includes necessary dependencies for implementing the `word2sub` function. Write a Python function `def word2sub(word_ids, vocab, sub_vocab, subword_type)` to solve the following problem:
word_ids: batch_size, seq_length
Here is the function:
def word2sub(word_ids, vocab, sub_vocab, subword_type):
'''
word_ids: batch_size, seq_length
'''
batch_size, seq_length = word_ids.size()
device = word_ids.device
word_ids = word_ids.contiguous().view(-1).tolist()
words = [vocab.i2w[i] for i in word_ids]
max_length = max([len(w) for w in words])
sub_ids = torch.zeros((len(words), max_length), dtype=torch.long).to(device)
for i in range(len(words)):
for j, c in enumerate(words[i]):
sub_ids[i, j] = sub_vocab.w2i.get(c, UNK_ID)
return sub_ids | word_ids: batch_size, seq_length |
19,432 | import torch
import torch.nn as nn
def count_lines(file_path):
lines_num = 0
with open(file_path, 'rb') as f:
while True:
data = f.read(2^20)
if not data:
break
lines_num += data.count(b'\n')
return lines_num | null |
19,433 | import torch
import torch.nn as nn
def flip(x, dim):
indices = [slice(None)] * x.dim()
indices[dim] = torch.arange(x.size(dim) - 1, -1, -1,
dtype=torch.long, device=x.device)
return x[tuple(indices)] | null |
19,434 | import math
import torch
def gelu(x):
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0))) | null |
19,435 | import numpy as np
import os
import pandas as pd
import pickle
import copy
from collections import Counter
def com_sels(gold_sql, pr_sql, tablei, table_words):
# print('tablei:', tablei)
gold_sels = gold_sql['sel'].tolist()
gold_aggs = gold_sql['agg'].tolist()
pre_sels = pr_sql['sel']
pre_aggs = pr_sql['agg']
if len(gold_sels) != len(pre_sels):
return False
if len(gold_sels) == 1 and gold_sels[0] == len(tablei['header']):
if len(pre_sels) == 1 and pre_sels[0] == len(tablei['header']):
return True
else:
return False
else:
gold_sel_aggs = zip(gold_sels, gold_aggs)
pre_sel_aggs = zip(pre_sels, pre_aggs)
for idx, gold_sel_aggi in enumerate(gold_sel_aggs):
if gold_sel_aggi in pre_sel_aggs:
pass
else:
return False
return True | null |
19,436 | import numpy as np
import os
import pandas as pd
import pickle
import copy
from collections import Counter
def col_val_syn(table, table_words, col_index, val):
# table = self.tables[tableId]
val = str(val)
headers = table['header']
cond_col = headers[col_index]
cond_value_synonmys = []
cond_value_synonmys.append(val)
cond_value_synonmys.append(val.lower())
cond_value_synonmys.append(val+'的')
if table_words is None:
return cond_value_synonmys
## 单位
sel_unit = table['unit'][col_index]
if sel_unit != 'Null' and sel_unit != "":
sel_units = str(sel_unit).split('|')
# sel_uniti = random.choice(sel_units)
for sel_uniti in sel_units:
value_unit = str(val) + sel_uniti
cond_value_synonmys.append(value_unit)
for index, row in table_words.iterrows():
if row['列名'] == cond_col and str(row['归一化列值']) == val and (pd.isnull(row['同义词']) == False):
cond_value_synonmys += row['同义词'].split('|')
return cond_value_synonmys
def com_conds_final(gold_sql, pr_sql, tablei, table_words):
# print('tablei:', tablei)
gold_conds = gold_sql['conds']
pre_conds1 = copy.deepcopy(pr_sql['conds'])
# print('gold conds:', gold_conds)
pre_conds = []
for x in pre_conds1:
pre_idx, pre_op, pre_val = x[:3]
if pre_idx < len(tablei['header']) and tablei['types'][pre_idx] in ['text', 'bool']:
if pre_op not in [2, 3]:
x[1] = 2
if x not in pre_conds:
pre_conds.append(x)
# print('pre conds:', pre_conds)
if len(gold_conds) != len(pre_conds):
return False
## step 1: null cond:
if len(gold_conds) == 1 and int(gold_conds[0][0]) == len(tablei['header']):
# for pre_condi in pre_conds:
# if pre_condi[0] == len(tablei['header']):
# return True
if len(pre_conds) == 1 and pre_conds[0][0] == len(tablei['header']):
return True
else:
return False
else:
## step2 : 没有空条件,考虑同义词和单位
for gold_condsi in gold_conds:
# print('condi:', gold_condsi)
cond_idx, cond_op, val, val_syn = gold_condsi[:4]
cond_idx = int(cond_idx)
if cond_idx < len(tablei['header']):
val = str(val)
val_syn = str(val_syn)
cond_idx, cond_op = int(cond_idx), int(cond_op)
table_syns = col_val_syn(tablei, table_words, int(gold_condsi[0]), gold_condsi[2])
# print('table syns:', table_syns)
# print('kb syns:', kb_syns)
all_cond_syns = table_syns + [val_syn]
all_cond_syns_low = [x.lower() for x in all_cond_syns]
all_cond_syns += all_cond_syns_low
find_flag = False
for pre_condi in pre_conds:
pre_idx, pre_op, pre_val = pre_condi[:3]
pre_val = str(pre_val)
pre_idx, pre_op = int(pre_idx), int(pre_op)
val_right = False
for val_syni in all_cond_syns:
if val_syni in pre_val or pre_val in val_syni or pre_val == val_syni:
val_right = True
if pre_idx == cond_idx and pre_op == cond_op and (pre_val in all_cond_syns or val in pre_val or val_right):
find_flag = True
if not find_flag:
return False
return True | null |
19,437 | import numpy as np
import os
import pandas as pd
import pickle
import copy
from collections import Counter
def com_sels_final(gold_sql, pr_sql, tablei, table_words):
# print('tablei:', tablei)
gold_sels = gold_sql['sel'].tolist()
gold_aggs = gold_sql['agg'].tolist()
pre_sels = pr_sql['sel']
pre_aggs = pr_sql['agg']
if len(gold_sels) != len(pre_sels):
return False
if len(gold_sels) == 1 and gold_sels[0] == len(tablei['header']):
if len(pre_sels) == 1 and pre_sels[0] == len(tablei['header']):
return True
else:
return False
else:
gold_sel_aggs = zip(gold_sels, gold_aggs)
pre_sel_aggs = zip(pre_sels, pre_aggs)
for idx, gold_sel_aggi in enumerate(gold_sel_aggs):
if gold_sel_aggi in pre_sel_aggs:
pass
else:
return False
return True | null |
19,438 | import numpy as np
import os
import pandas as pd
import pickle
import copy
from collections import Counter
def com_sels_with_split_final(gold_sql_sc, gold_sql_sa, pr_sql, tablei, table_words):
# print('tablei:', tablei)
gold_sels = gold_sql_sc
gold_aggs = gold_sql_sa
pre_sels = pr_sql['sel']
pre_aggs = pr_sql['agg']
if len(gold_sels) != len(pre_sels):
return False
if len(gold_sels) == 1 and gold_sels[0] == len(tablei['header']):
if len(pre_sels) == 1 and pre_sels[0] == len(tablei['header']):
return True
else:
return False
else:
gold_sel_aggs = zip(gold_sels, gold_aggs)
pre_sel_aggs = list(zip(pre_sels, pre_aggs))
# print(pre_sel_aggs)
for idx, gold_sel_aggi in enumerate(gold_sel_aggs):
if gold_sel_aggi in pre_sel_aggs:
pass
else:
# print('not', gold_sel_aggi)
return False
return True | null |
19,439 | import torch.nn.functional as F
import torch.optim.lr_scheduler
import numpy as np
from uer.models.model import Model
from uer.model_builder import build_model
from uer.layers.layer_norm import LayerNorm
from uer.utils.act_fun import gelu
import torch.nn as nn
from torch.autograd import Variable
from matplotlib.pylab import *
The provided code snippet includes necessary dependencies for implementing the `orthonormal_initializer` function. Write a Python function `def orthonormal_initializer(output_size, input_size)` to solve the following problem:
adopted from Timothy Dozat https://github.com/tdozat/Parser/blob/master/lib/linalg.py
Here is the function:
def orthonormal_initializer(output_size, input_size):
"""
adopted from Timothy Dozat https://github.com/tdozat/Parser/blob/master/lib/linalg.py
"""
print(output_size, input_size)
I = np.eye(output_size)
lr = .1
eps = .05 / (output_size + input_size)
success = False
tries = 0
while not success and tries < 10:
Q = np.random.randn(input_size, output_size) / np.sqrt(output_size)
for i in range(100):
QTQmI = Q.T.dot(Q) - I
loss = np.sum(QTQmI ** 2 / 2)
Q2 = Q ** 2
Q -= lr * Q.dot(QTQmI) / (
np.abs(Q2 + Q2.sum(axis=0, keepdims=True) + Q2.sum(axis=1, keepdims=True) - 1) + eps)
if np.max(Q) > 1e6 or loss > 1e6 or not np.isfinite(loss):
tries += 1
lr /= 2
break
success = True
if success:
print('Orthogonal pretrainer loss: %.2e' % loss)
else:
print('Orthogonal pretrainer failed, using non-orthogonal random matrix')
Q = np.random.randn(input_size, output_size) / np.sqrt(output_size)
return np.transpose(Q.astype(np.float32)) | adopted from Timothy Dozat https://github.com/tdozat/Parser/blob/master/lib/linalg.py |
19,440 | import torch.nn.functional as F
import torch.optim.lr_scheduler
import numpy as np
from uer.models.model import Model
from uer.model_builder import build_model
from uer.layers.layer_norm import LayerNorm
from uer.utils.act_fun import gelu
import torch.nn as nn
from torch.autograd import Variable
from matplotlib.pylab import *
def pad_sequence(xs, length=None, padding=-1, dtype=np.float64):
lengths = [len(x) for x in xs]
if length is None:
length = max(lengths)
y = np.array([np.pad(x.astype(dtype), (0, length - l),
mode="constant", constant_values=padding)
for x, l in zip(xs, lengths)])
return torch.from_numpy(y) | null |
19,441 | import torch.nn.functional as F
import torch.optim.lr_scheduler
import numpy as np
from uer.models.model import Model
from uer.model_builder import build_model
from uer.layers.layer_norm import LayerNorm
from uer.utils.act_fun import gelu
import torch.nn as nn
from torch.autograd import Variable
from matplotlib.pylab import *
def _model_var(model, x):
p = next(filter(lambda p: p.requires_grad, model.parameters()))
if p.is_cuda:
x = x.cuda()
return torch.autograd.Variable(x) | null |
19,442 | import torch.nn.functional as F
import torch.optim.lr_scheduler
import numpy as np
from uer.models.model import Model
from uer.model_builder import build_model
from uer.layers.layer_norm import LayerNorm
from uer.utils.act_fun import gelu
import torch.nn as nn
from torch.autograd import Variable
from matplotlib.pylab import *
def generate_perm_inv(perm):
# Definitly correct.
perm_inv = zeros(len(perm), dtype=int32)
for i, p in enumerate(perm):
perm_inv[int(p)] = i
return perm_inv
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
The provided code snippet includes necessary dependencies for implementing the `encode` function. Write a Python function `def encode(lstm, wemb_l, l, return_hidden=False, hc0=None, last_only=False, U=None, V=None, ctx=None, l_hs=None)` to solve the following problem:
[batch_size, max token length, dim_emb]
Here is the function:
def encode(lstm, wemb_l, l, return_hidden=False, hc0=None, last_only=False, U=None, V=None, ctx=None, l_hs=None):
""" [batch_size, max token length, dim_emb]
"""
bS, mL, eS = wemb_l.shape
# sort before packking
l = array(l)
perm_idx = argsort(-l)
perm_idx_inv = generate_perm_inv(perm_idx)
# pack sequence
packed_wemb_l = nn.utils.rnn.pack_padded_sequence(wemb_l[perm_idx, :, :],
l[perm_idx],
batch_first=True)
# Time to encode
if hc0 is not None:
hc0 = (hc0[0][:, perm_idx], hc0[1][:, perm_idx])
# ipdb.set_trace()
packed_wemb_l = packed_wemb_l.float() # I don't know why..
packed_wenc, hc_out = lstm(packed_wemb_l, hc0)
hout, cout = hc_out
# unpack
wenc, _l = nn.utils.rnn.pad_packed_sequence(packed_wenc, batch_first=True)
if last_only:
if ctx is None:
# Take only final outputs for each columns.
wenc = wenc[tuple(range(bS)), l[perm_idx] - 1] # [batch_size, dim_emb]
wenc.unsqueeze_(1) # [batch_size, 1, dim_emb]
else:
ctx = ctx.unsqueeze(1)
# [batch_size, 1, dim_emb] -> [batch_size, 1, hS]
wenc_u = U(ctx)
# [batch_size, seq_len, dim_emb] -> [batch_size, seq_len, hS]
wenc_v = V(wenc)
start = 0
# [batch_size, 1, dim_emb]
wenc2 = torch.zeros(wenc.shape[0], 1, wenc.shape[2])
for b in range(ctx.shape[0]):
# [1, hS] * [batch_size, seq_len, hS] -> [batch_size, seq_len, hS]
attn = torch.mul(wenc_u[b], wenc_v[start:start + l_hs[b]])
# attn, _ = nn.utils.rnn.pad_packed_sequence(attn, batch_first=True)
# [batch_size, seq_len]
attn = F.softmax(attn.sum(2), dim=1)
wenc1 = torch.bmm(attn.unsqueeze(1), wenc[start:start + l_hs[b]])
wenc1 += ctx[b]
wenc2[start:start + l_hs[b]] = wenc1
start += l_hs[b]
wenc = wenc2
wenc = wenc[perm_idx_inv]
if return_hidden:
# hout.shape = [number_of_directoin * num_of_layer, seq_len(=batch size), dim * number_of_direction ] w/ batch_first.. w/o batch_first? I need to see.
hout = hout[:, perm_idx_inv].to(device)
cout = cout[:, perm_idx_inv].to(device) # Is this correct operation?
return wenc, hout, cout
else:
return wenc | [batch_size, max token length, dim_emb] |
19,443 | import torch.nn.functional as F
import torch.optim.lr_scheduler
import numpy as np
from uer.models.model import Model
from uer.model_builder import build_model
from uer.layers.layer_norm import LayerNorm
from uer.utils.act_fun import gelu
import torch.nn as nn
from torch.autograd import Variable
from matplotlib.pylab import *
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def encode_hpu(lstm, wemb_hpu, l_hpu, l_hs, U=None, V=None, ctx=None):
# wenc_hpu, hout, cout = encode(lstm,
# wemb_hpu,
# l_hpu,
# return_hidden=True,
# hc0=None,
# last_only=True,
# U=U,
# V=V,
# ctx=ctx,
# l_hs=l_hs)
# print("wemb_hpu:", wemb_hpu.shape)
emb_hs_mean = torch.mean(wemb_hpu, dim=1)
# print('mean:', emb_hs_mean.shape)
wenc_hpu = emb_hs_mean
bS_hpu, mL_hpu, eS = wemb_hpu.shape
hS = wenc_hpu.size(-1)
# print('l heasers:', l_hs)
wenc_hs = wenc_hpu.new_zeros(len(l_hs), max(l_hs), hS)
wenc_hs = wenc_hs.to(device)
# Re-pack according to batch.
# ret = [B_NLq, max_len_headers_all, dim_lstm]
st = 0
for i, l_hs1 in enumerate(l_hs):
wenc_hs[i, :l_hs1] = wenc_hpu[st:(st + l_hs1)]
st += l_hs1
# print('w enc hs:', wenc_hs.shape)
return wenc_hs | null |
19,444 | import torch.nn.functional as F
import torch.optim.lr_scheduler
import numpy as np
from uer.models.model import Model
from uer.model_builder import build_model
from uer.layers.layer_norm import LayerNorm
from uer.utils.act_fun import gelu
import torch.nn as nn
from torch.autograd import Variable
from matplotlib.pylab import *
def drop_sequence_sharedmask(inputs, dropout, batch_first=True):
if batch_first:
inputs = inputs.transpose(0, 1)
seq_length, batch_size, hidden_size = inputs.size()
drop_masks = inputs.data.new(batch_size, hidden_size).fill_(1 - dropout)
drop_masks = Variable(torch.bernoulli(drop_masks), requires_grad=False)
drop_masks = drop_masks / (1 - dropout)
drop_masks = torch.unsqueeze(drop_masks, dim=2).expand(-1, -1, seq_length).permute(2, 0, 1)
inputs = inputs * drop_masks
return inputs.transpose(1, 0) | null |
19,445 | import tensorflow as tf
import numpy as np
def float32_variable_storage_getter(getter, name, shape=None, dtype=None,
initializer=None, regularizer=None,
trainable=True,
*args, **kwargs):
"""Custom variable getter that forces trainable variables to be stored in
float32 precision and then casts them to the training precision.
"""
storage_dtype = tf.float32 if trainable else dtype
variable = getter(name, shape, dtype=storage_dtype,
initializer=initializer, regularizer=regularizer,
trainable=trainable,
*args, **kwargs)
if trainable and dtype != tf.float32:
variable = tf.cast(variable, dtype)
return variable
def get_custom_getter(compute_type):
return float32_variable_storage_getter if compute_type == tf.float16 else None | null |
19,446 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tfdeterminism import patch
import collections
import json
import math
import os
import random
from transformers import RobertaTokenizer
import modeling
import optimization
import numpy as np
from os.path import join as pjoin
from seqeval.metrics import (
precision_score,
recall_score,
f1_score,
classification_report,
)
import tokenization
import six
import tensorflow as tf
class InputExample(object):
"""A single training/test example for simple sequence classification.
For examples without an answer, the start and end position are -1.
"""
def __init__(self,
qas_id,
words,
labels,
boxes,
pos_boxes,
file_name):
self.qas_id = qas_id
self.words = words
self.labels = labels
self.boxes = boxes
self.pos_boxes = pos_boxes
self.file_name = file_name
def __str__(self):
return self.__repr__()
def __repr__(self):
s = ""
s += "qas_id: %s" % (tokenization.printable_text(self.qas_id))
s += ", words: %s" % (
tokenization.printable_text(" ".join(self.words)))
if labels:
s += ", labels: %d" % (" ".join(self.labels))
return s
The provided code snippet includes necessary dependencies for implementing the `read_funsd_examples` function. Write a Python function `def read_funsd_examples(input_file, is_training)` to solve the following problem:
Read a SQuAD json file into a list of SquadExample.
Here is the function:
def read_funsd_examples(input_file, is_training):
"""Read a SQuAD json file into a list of SquadExample."""
if is_training:
mode = "train"
#mode = "train_all"
else:
mode = "test"
#mode = "test"
#file_path = os.path.join(input_file, "{}.txt".format(mode))
#box_file_path = os.path.join(input_file, "{}_box.txt".format(mode))
file_path = input_file + ".txt"
box_file_path = input_file + "_box.txt"
guid_index = 1
examples = []
with open(file_path, encoding="utf-8") as f, open(
box_file_path, encoding="utf-8"
) as fb:
words = []
labels = []
boxes = []
pos_boxes = []
file_name = ""
for line, bline in zip(f, fb):
if line.startswith("-DOCSTART-") or line == "" or line == "\n":
if words:
if line.startswith("-DOCSTART-"):
file_name = line.strip().split()[-1]
examples.append(
InputExample(
qas_id=guid_index,
words=words,
labels=labels,
boxes=boxes,
pos_boxes=pos_boxes,
file_name=file_name,
)
)
guid_index += 1
words = []
boxes = []
labels = []
pos_boxes = []
else:
splits = line.split("\t")
bsplits = bline.split("\t")
assert len(splits) == 2
assert len(bsplits) == 3
assert splits[0] == bsplits[0]
words.append(splits[0])
if len(splits) > 1:
labels.append(splits[-1].replace("\n", ""))
box = bsplits[-2].replace("\n", "")
box = [int(b) for b in box.split()]
boxes.append(box)
pos_box = int(bsplits[-1].strip("\n"))
pos_boxes.append(pos_box)
else:
# Examples could have no label for mode = "test"
labels.append("O")
if words:
examples.append(
InputExample(
qas_id=guid_index,
words=words,
labels=labels,
boxes=boxes,
pos_boxes=pos_boxes,
file_name=file_name)
)
return examples | Read a SQuAD json file into a list of SquadExample. |
19,447 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tfdeterminism import patch
import collections
import json
import math
import os
import random
from transformers import RobertaTokenizer
import modeling
import optimization
import numpy as np
from os.path import join as pjoin
from seqeval.metrics import (
precision_score,
recall_score,
f1_score,
classification_report,
)
import tokenization
import six
import tensorflow as tf
tf.set_random_seed(SEED)
tf.random.set_random_seed(SEED)
FLAGS = flags.FLAGS
tf.flags.DEFINE_string(
"tpu_name", None,
"The Cloud TPU to use for training. This should be either the name "
"used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 "
"url.")
tf.flags.DEFINE_string(
"tpu_zone", None,
"[Optional] GCE zone where the Cloud TPU is located in. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
tf.flags.DEFINE_string(
"gcp_project", None,
"[Optional] Project name for the Cloud TPU-enabled project. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
tf.flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.")
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self,
example_index,
input_ids,
input_mask,
segment_ids,
label_ids,
pos_x0,
pos_y0,
pos_x1,
pos_y1,
pos_boxes,
file_name):
self.example_index = example_index
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_ids = label_ids
self.pos_x0 = pos_x0
self.pos_y0 = pos_y0
self.pos_x1 = pos_x1
self.pos_y1 = pos_y1
self.pos_bbox = pos_boxes
self.file_name = file_name
The provided code snippet includes necessary dependencies for implementing the `convert_examples_to_features` function. Write a Python function `def convert_examples_to_features(examples, tokenizer, max_seq_length, doc_stride, max_query_length, is_training, output_fn, label_list, cls_token_box=[0,0,0,0], sep_token_box=[1000, 1000, 1000, 1000], pad_token_box=[0, 0, 0, 0], pad_token_label_id=-1)` to solve the following problem:
Loads a data file into a list of `InputBatch`s.
Here is the function:
def convert_examples_to_features(examples, tokenizer, max_seq_length,
doc_stride, max_query_length, is_training,
output_fn, label_list, cls_token_box=[0,0,0,0],
sep_token_box=[1000, 1000, 1000, 1000],
pad_token_box=[0, 0, 0, 0], pad_token_label_id=-1):
"""Loads a data file into a list of `InputBatch`s."""
label_map = {label: i for i, label in enumerate(label_list)}
#pad_token_label_id = label_map[label_list[-1]]
cnt_bad = 0
for (example_index, example) in enumerate(examples):
if example_index % 10000 == 0:
tf.logging.info("Writing example %d of %d", example_index, len(examples))
token_boxes = []
tokens = []
label_ids = []
token_pos_boxes = []
cnt = 0
for word, label, box, pos_box in zip(example.words, example.labels, example.boxes, example.pos_boxes):
if FLAGS.use_roberta:
word_tokens = tokenizer.tokenize("pad "+word)[1:]
else:
word_tokens = tokenizer.tokenize(word)
tokens.extend(word_tokens)
token_boxes.extend([box] * len(word_tokens))
token_pos_boxes.extend([pos_box] * len(word_tokens))
label_ids.extend(
[label_map[label]] + [pad_token_label_id] * (len(word_tokens) - 1)
)
cnt += 1
special_tokens_count = 2
if len(tokens) > max_seq_length - special_tokens_count:
tokens = tokens[: (max_seq_length - special_tokens_count)]
token_boxes = token_boxes[: (max_seq_length - special_tokens_count)]
token_pos_boxes = token_pos_boxes[: (max_seq_length - special_tokens_count)]
label_ids = label_ids[: (max_seq_length - special_tokens_count)]
cnt_bad += 1
tf.logging.info("bad case %d" % cnt_bad)
# end
if FLAGS.use_roberta:
tokens += ["</s>"]
else:
tokens += ["[SEP]"]
token_boxes += [sep_token_box]
token_pos_boxes += [0]
label_ids += [pad_token_label_id]
segment_ids = [0] * len(tokens)
# start
if FLAGS.use_roberta:
tokens = ["<s>"] + tokens
else:
tokens = ["[CLS]"] + tokens
token_boxes = [cls_token_box] + token_boxes
label_ids = [pad_token_label_id] + label_ids
segment_ids = [0] + segment_ids
token_pos_boxes = [0] + token_pos_boxes
input_ids = tokenizer.convert_tokens_to_ids(tokens)
input_mask = [1] * len(input_ids)
pad_index = 1 if FLAGS.use_roberta else 0
while len(input_ids) < max_seq_length:
input_ids.append(pad_index)
input_mask.append(pad_index)
segment_ids.append(pad_index)
label_ids.append(pad_token_label_id)
token_boxes.append(pad_token_box)
token_pos_boxes.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
assert len(label_ids) == max_seq_length
assert len(token_boxes) == max_seq_length
assert len(token_pos_boxes) == max_seq_length
pos_x0 = []
pos_y0 = []
pos_x1 = []
pos_y1 = []
for each in token_boxes:
pos_x0.append(each[0])
pos_y0.append(each[1])
pos_x1.append(each[2])
pos_y1.append(each[3])
if example_index < 5:
tf.logging.info("*** Example ***")
tf.logging.info("example_index: %s" % (example_index))
tf.logging.info("tokens: %s" % " ".join(
[tokenization.printable_text(x) for x in tokens]))
tf.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
tf.logging.info(
"input_mask: %s" % " ".join([str(x) for x in input_mask]))
tf.logging.info(
"segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
tf.logging.info(
"label_ids: %s" % " ".join([str(x) for x in label_ids]))
tf.logging.info(
"boxes: %s" % " ".join([str(x) for x in token_boxes]))
feature = InputFeatures(
example_index=example_index,
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_ids=label_ids,
pos_x0=pos_x0,
pos_y0=pos_y0,
pos_x1=pos_x1,
pos_y1=pos_y1,
pos_boxes=token_pos_boxes,
file_name=example.file_name)
# Run callback
output_fn(feature) | Loads a data file into a list of `InputBatch`s. |
19,448 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tfdeterminism import patch
import collections
import json
import math
import os
import random
from transformers import RobertaTokenizer
import modeling
import optimization
import numpy as np
from os.path import join as pjoin
from seqeval.metrics import (
precision_score,
recall_score,
f1_score,
classification_report,
)
import tokenization
import six
import tensorflow as tf
tf.set_random_seed(SEED)
tf.random.set_random_seed(SEED)
tf.flags.DEFINE_string(
"tpu_name", None,
"The Cloud TPU to use for training. This should be either the name "
"used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 "
"url.")
tf.flags.DEFINE_string(
"tpu_zone", None,
"[Optional] GCE zone where the Cloud TPU is located in. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
tf.flags.DEFINE_string(
"gcp_project", None,
"[Optional] Project name for the Cloud TPU-enabled project. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
tf.flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.")
def create_model(bert_config, is_training, input_ids, input_mask, segment_ids,
use_one_hot_embeddings, pos_x0, pos_y0, pos_x1, pos_y1, pos_bbox, num_labels):
"""Creates a classification model."""
model = modeling.BertModel(
config=bert_config,
is_training=is_training,
input_ids=input_ids,
pos_x0=pos_x0,
pos_y0=pos_y0,
pos_x1=pos_x1,
pos_y1=pos_y1,
pos_bbox=pos_bbox if FLAGS.use_bbox_pos else None,
input_mask=input_mask,
token_type_ids=segment_ids,
compute_type=tf.float16 if FLAGS.use_fp16 else tf.float32,
use_roberta=FLAGS.use_roberta,
use_one_hot_embeddings=use_one_hot_embeddings,
use_position_embeddings=FLAGS.use_position_embeddings)
final_hidden = model.get_sequence_output()
final_hidden_shape = modeling.get_shape_list(final_hidden, expected_rank=3)
batch_size = final_hidden_shape[0]
seq_length = final_hidden_shape[1]
hidden_size = final_hidden_shape[2]
output_weights = tf.get_variable(
"cls/squad/output_weights", [num_labels, hidden_size],
initializer=tf.truncated_normal_initializer(stddev=0.02, seed=2222))
output_bias = tf.get_variable(
"cls/squad/output_bias", [num_labels], initializer=tf.zeros_initializer())
final_hidden_matrix = tf.reshape(final_hidden,
[batch_size * seq_length, hidden_size])
logits = tf.matmul(final_hidden_matrix, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
logits = tf.reshape(logits, [batch_size, seq_length, num_labels])
return logits
The provided code snippet includes necessary dependencies for implementing the `model_fn_builder` function. Write a Python function `def model_fn_builder(bert_config, init_checkpoint, learning_rate, num_train_steps, num_warmup_steps, use_tpu, use_one_hot_embeddings, num_labels)` to solve the following problem:
Returns `model_fn` closure for TPUEstimator.
Here is the function:
def model_fn_builder(bert_config, init_checkpoint, learning_rate,
num_train_steps, num_warmup_steps, use_tpu,
use_one_hot_embeddings, num_labels):
"""Returns `model_fn` closure for TPUEstimator."""
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
"""The `model_fn` for TPUEstimator."""
tf.logging.info("*** Features ***")
for name in sorted(features.keys()):
tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape))
example_index = features["example_index"]
input_ids = features["input_ids"]
input_mask = features["input_mask"]
segment_ids = features["segment_ids"]
pos_x0 = features["pos_x0"]
pos_y0 = features["pos_y0"]
pos_x1 = features["pos_x1"]
pos_y1 = features["pos_y1"]
pos_bbox = features["pos_bbox"]
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
label_logits = create_model(
bert_config=bert_config,
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
use_one_hot_embeddings=use_one_hot_embeddings,
pos_x0=pos_x0,
pos_y0=pos_y0,
pos_x1=pos_x1,
pos_y1=pos_y1,
pos_bbox=pos_bbox,
num_labels=num_labels)
tvars = tf.trainable_variables()
initialized_variable_names = {}
scaffold_fn = None
if init_checkpoint:
(assignment_map, initialized_variable_names
) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)
if use_tpu:
def tpu_scaffold():
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
return tf.train.Scaffold()
scaffold_fn = tpu_scaffold
else:
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
tf.logging.info("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape,
init_string)
output_spec = None
if mode == tf.estimator.ModeKeys.TRAIN:
seq_length = modeling.get_shape_list(input_ids)[1]
label_ids = features["label_ids"]
def compute_loss(logits, positions, num_labels):
one_hot_positions = tf.one_hot(
positions, depth=num_labels, dtype=tf.float32)
log_probs = tf.nn.log_softmax(logits, axis=-1)
loss = -tf.reduce_mean(
tf.reduce_sum(one_hot_positions * log_probs, axis=-1))
return loss
total_loss = compute_loss(label_logits, label_ids, num_labels)
#train_op = optimization.create_optimizer(
# total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)
train_op = optimization.create_optimizer(
total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)
logging_hook = tf.train.LoggingTensorHook({"loss": total_loss}, every_n_iter=10)
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
train_op=train_op,
training_hooks=[logging_hook],
scaffold_fn=scaffold_fn)
elif mode == tf.estimator.ModeKeys.PREDICT:
label_ids = features["label_ids"]
probabilities = tf.nn.softmax(label_logits, axis=-1)
predicts = tf.argmax(probabilities, axis=-1)
predictions = {
"example_index": example_index,
"label_logits": label_logits,
"predicts": predicts,
"label_ids": label_ids
}
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode, predictions=predictions, scaffold_fn=scaffold_fn)
else:
raise ValueError(
"Only TRAIN and PREDICT modes are supported: %s" % (mode))
return output_spec
return model_fn | Returns `model_fn` closure for TPUEstimator. |
19,449 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tfdeterminism import patch
import collections
import json
import math
import os
import random
from transformers import RobertaTokenizer
import modeling
import optimization
import numpy as np
from os.path import join as pjoin
from seqeval.metrics import (
precision_score,
recall_score,
f1_score,
classification_report,
)
import tokenization
import six
import tensorflow as tf
tf.set_random_seed(SEED)
tf.random.set_random_seed(SEED)
tf.flags.DEFINE_string(
"tpu_name", None,
"The Cloud TPU to use for training. This should be either the name "
"used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 "
"url.")
tf.flags.DEFINE_string(
"tpu_zone", None,
"[Optional] GCE zone where the Cloud TPU is located in. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
tf.flags.DEFINE_string(
"gcp_project", None,
"[Optional] Project name for the Cloud TPU-enabled project. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
tf.flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.")
The provided code snippet includes necessary dependencies for implementing the `input_fn_builder` function. Write a Python function `def input_fn_builder(input_file, seq_length, is_training, drop_remainder)` to solve the following problem:
Creates an `input_fn` closure to be passed to TPUEstimator.
Here is the function:
def input_fn_builder(input_file, seq_length, is_training, drop_remainder):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
name_to_features = {
"example_index":
tf.FixedLenFeature([], tf.int64),
"input_ids":
tf.FixedLenFeature([seq_length], tf.int64),
"input_mask":
tf.FixedLenFeature([seq_length], tf.int64),
"segment_ids":
tf.FixedLenFeature([seq_length], tf.int64),
"pos_x0":
tf.FixedLenFeature([seq_length], tf.int64),
"pos_y0":
tf.FixedLenFeature([seq_length], tf.int64),
"pos_x1":
tf.FixedLenFeature([seq_length], tf.int64),
"pos_y1":
tf.FixedLenFeature([seq_length], tf.int64),
"pos_bbox":
tf.FixedLenFeature([seq_length], tf.int64)
}
#if is_training:
name_to_features["label_ids"] = tf.FixedLenFeature([seq_length], tf.int64)
def _decode_record(record, name_to_features):
"""Decodes a record to a TensorFlow example."""
example = tf.parse_single_example(record, name_to_features)
# tf.Example only supports tf.int64, but the TPU only supports tf.int32.
# So cast all int64 to int32.
for name in list(example.keys()):
t = example[name]
if t.dtype == tf.int64:
t = tf.to_int32(t)
example[name] = t
return example
def input_fn(params):
"""The actual input function."""
batch_size = params["batch_size"]
# For training, we want a lot of parallel reading and shuffling.
# For eval, we want no shuffling and parallel reading doesn't matter.
d = tf.data.TFRecordDataset(input_file)
if is_training:
d = d.repeat()
#d = d.shuffle(buffer_size=100)
d = d.apply(
tf.contrib.data.map_and_batch(
lambda record: _decode_record(record, name_to_features),
batch_size=batch_size,
drop_remainder=drop_remainder))
return d
return input_fn | Creates an `input_fn` closure to be passed to TPUEstimator. |
19,450 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tfdeterminism import patch
import collections
import json
import math
import os
import random
from transformers import RobertaTokenizer
import modeling
import optimization
import numpy as np
from os.path import join as pjoin
from seqeval.metrics import (
precision_score,
recall_score,
f1_score,
classification_report,
)
import tokenization
import six
import tensorflow as tf
The provided code snippet includes necessary dependencies for implementing the `_compute_softmax` function. Write a Python function `def _compute_softmax(scores)` to solve the following problem:
Compute softmax probability over raw logits.
Here is the function:
def _compute_softmax(scores):
"""Compute softmax probability over raw logits."""
if not scores:
return []
max_score = None
for score in scores:
if max_score is None or score > max_score:
max_score = score
exp_scores = []
total_sum = 0.0
for score in scores:
x = math.exp(score - max_score)
exp_scores.append(x)
total_sum += x
probs = []
for score in exp_scores:
probs.append(score / total_sum)
return probs | Compute softmax probability over raw logits. |
19,451 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tfdeterminism import patch
import collections
import json
import math
import os
import random
from transformers import RobertaTokenizer
import modeling
import optimization
import numpy as np
from os.path import join as pjoin
from seqeval.metrics import (
precision_score,
recall_score,
f1_score,
classification_report,
)
import tokenization
import six
import tensorflow as tf
FLAGS = flags.FLAGS
The provided code snippet includes necessary dependencies for implementing the `validate_flags_or_throw` function. Write a Python function `def validate_flags_or_throw(bert_config)` to solve the following problem:
Validate the input FLAGS or throw an exception.
Here is the function:
def validate_flags_or_throw(bert_config):
"""Validate the input FLAGS or throw an exception."""
tokenization.validate_case_matches_checkpoint(FLAGS.do_lower_case,
FLAGS.init_checkpoint)
if not FLAGS.do_train and not FLAGS.do_predict:
raise ValueError("At least one of `do_train` or `do_predict` must be True.")
if FLAGS.do_train:
if not FLAGS.train_file:
raise ValueError(
"If `do_train` is True, then `train_file` must be specified.")
if FLAGS.do_predict:
if not FLAGS.predict_file:
raise ValueError(
"If `do_predict` is True, then `predict_file` must be specified.")
if FLAGS.max_seq_length > bert_config.max_position_embeddings:
raise ValueError(
"Cannot use sequence length %d because the BERT model "
"was only trained up to sequence length %d" %
(FLAGS.max_seq_length, bert_config.max_position_embeddings))
if FLAGS.max_seq_length <= FLAGS.max_query_length + 3:
raise ValueError(
"The max_seq_length (%d) must be greater than max_query_length "
"(%d) + 3" % (FLAGS.max_seq_length, FLAGS.max_query_length)) | Validate the input FLAGS or throw an exception. |
19,452 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tfdeterminism import patch
import collections
import json
import math
import os
import random
from transformers import RobertaTokenizer
import modeling
import optimization
import numpy as np
from os.path import join as pjoin
from seqeval.metrics import (
precision_score,
recall_score,
f1_score,
classification_report,
)
import tokenization
import six
import tensorflow as tf
def get_labels(path):
if path:
with open(path, "r") as f:
labels = f.read().splitlines()
if "O" not in labels:
labels = ["O"] + labels
'''
if "X" not in labels:
labels += ["X"]
'''
return labels
else:
return [
"O",
"B-MISC",
"I-MISC",
"B-PER",
"I-PER",
"B-ORG",
"I-ORG",
"B-LOC",
"I-LOC",
] | null |
19,457 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import re
import unicodedata
import six
import tensorflow as tf
The provided code snippet includes necessary dependencies for implementing the `_is_control` function. Write a Python function `def _is_control(char)` to solve the following problem:
Checks whether `chars` is a control character.
Here is the function:
def _is_control(char):
"""Checks whether `chars` is a control character."""
# These are technically control characters but we count them as whitespace
# characters.
if char == "\t" or char == "\n" or char == "\r":
return False
cat = unicodedata.category(char)
if cat.startswith("C"):
return True
return False | Checks whether `chars` is a control character. |
19,459 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tfdeterminism import patch
import collections
import copy
import json
import math
import re
import os
import numpy as np
import six
import tensorflow as tf
tf.set_random_seed(SEED)
tf.random.set_random_seed(SEED)
from gpu_environment import get_custom_getter
import my_layers
def gelu(input_tensor):
"""Gaussian Error Linear Unit.
This is a smoother version of the RELU.
Original paper: https://arxiv.org/abs/1606.08415
Args:
input_tensor: float Tensor to perform activation.
Returns:
`input_tensor` with the GELU activation applied.
"""
cdf = 0.5 * (1.0 + tf.erf(input_tensor / tf.sqrt(2.0)))
return input_tensor * cdf
"""
cdf = 0.5 * (1.0 + tf.tanh(
(np.sqrt(2 / np.pi) * (input_tensor + 0.044715 * tf.pow(input_tensor, 3)))))
return input_tensor * cdf
"""
def geluerf(x):
"""Gaussian Error Linear Unit.
This is a smoother version of the RELU.
Original paper: https://arxiv.org/abs/1606.08415
Args:
x: float Tensor to perform activation.
Returns:
`x` with the GELU activation applied.
"""
return x * 0.5 * (1.0 + tf.math.erf(x / math.sqrt(2.0)))
The provided code snippet includes necessary dependencies for implementing the `get_activation` function. Write a Python function `def get_activation(activation_string)` to solve the following problem:
Maps a string to a Python function, e.g., "relu" => `tf.nn.relu`. Args: activation_string: String name of the activation function. Returns: A Python function corresponding to the activation function. If `activation_string` is None, empty, or "linear", this will return None. If `activation_string` is not a string, it will return `activation_string`. Raises: ValueError: The `activation_string` does not correspond to a known activation.
Here is the function:
def get_activation(activation_string):
"""Maps a string to a Python function, e.g., "relu" => `tf.nn.relu`.
Args:
activation_string: String name of the activation function.
Returns:
A Python function corresponding to the activation function. If
`activation_string` is None, empty, or "linear", this will return None.
If `activation_string` is not a string, it will return `activation_string`.
Raises:
ValueError: The `activation_string` does not correspond to a known
activation.
"""
# We assume that anything that"s not a string is already an activation
# function, so we just return it.
if not isinstance(activation_string, six.string_types):
return activation_string
if not activation_string:
return None
act = activation_string.lower()
if act == "linear":
return None
elif act == "relu":
return tf.nn.relu
elif act == "gelu":
return gelu
elif act == "geluerf":
return geluerf
elif act == "tanh":
return tf.tanh
else:
raise ValueError("Unsupported activation: %s" % act) | Maps a string to a Python function, e.g., "relu" => `tf.nn.relu`. Args: activation_string: String name of the activation function. Returns: A Python function corresponding to the activation function. If `activation_string` is None, empty, or "linear", this will return None. If `activation_string` is not a string, it will return `activation_string`. Raises: ValueError: The `activation_string` does not correspond to a known activation. |
19,460 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tfdeterminism import patch
import collections
import copy
import json
import math
import re
import os
import numpy as np
import six
import tensorflow as tf
tf.set_random_seed(SEED)
tf.random.set_random_seed(SEED)
from gpu_environment import get_custom_getter
import my_layers
def layer_norm_and_dropout(input_tensor, dropout_prob, name=None, use_roberta=False):
"""Runs layer normalization followed by dropout."""
output_tensor = layer_norm(input_tensor, name=name, use_roberta=use_roberta)
output_tensor = dropout(output_tensor, dropout_prob)
return output_tensor
def create_initializer(initializer_range=0.02):
"""Creates a `truncated_normal_initializer` with the given range."""
return tf.truncated_normal_initializer(stddev=initializer_range, seed=2222)
def position_embedding_lookup(input_ids,
max_position_length,
embedding_size=128,
initializer_range=0.02,
word_embedding_name="",
use_one_hot_embeddings=False):
"""Looks up words embeddings for id tensor.
Args:
input_ids: int32 Tensor of shape [batch_size, seq_length] containing word
ids.
vocab_size: int. Size of the embedding vocabulary.
embedding_size: int. Width of the word embeddings.
initializer_range: float. Embedding initialization range.
word_embedding_name: string. Name of the embedding table.
use_one_hot_embeddings: bool. If True, use one-hot method for word
embeddings. If False, use `tf.nn.embedding_lookup()`. One hot is better
for TPUs.
Returns:
float Tensor of shape [batch_size, seq_length, embedding_size].
"""
# This function assumes that the input is of shape [batch_size, seq_length,
# num_inputs].
#
# If the input is a 2D tensor of shape [batch_size, seq_length], we
# reshape to [batch_size, seq_length, 1].
if input_ids.shape.ndims == 2:
input_ids = tf.expand_dims(input_ids, axis=[-1])
embedding_table = tf.get_variable(
name=word_embedding_name,
shape=[max_position_length, embedding_size],
initializer=create_initializer(initializer_range))
if use_one_hot_embeddings:
flat_input_ids = tf.reshape(input_ids, [-1])
one_hot_input_ids = tf.one_hot(flat_input_ids, depth=vocab_size)
output = tf.matmul(one_hot_input_ids, embedding_table)
else:
output = tf.nn.embedding_lookup(embedding_table, input_ids)
input_shape = get_shape_list(input_ids)
output = tf.reshape(output,
input_shape[0:-1] + [input_shape[-1] * embedding_size])
return (output, embedding_table)
def get_shape_list(tensor, expected_rank=None, name=None):
"""Returns a list of the shape of tensor, preferring static dimensions.
Args:
tensor: A tf.Tensor object to find the shape of.
expected_rank: (optional) int. The expected rank of `tensor`. If this is
specified and the `tensor` has a different rank, and exception will be
thrown.
name: Optional name of the tensor for the error message.
Returns:
A list of dimensions of the shape of tensor. All static dimensions will
be returned as python integers, and dynamic dimensions will be returned
as tf.Tensor scalars.
"""
if name is None:
name = tensor.name
if expected_rank is not None:
assert_rank(tensor, expected_rank, name)
shape = tensor.shape.as_list()
non_static_indexes = []
for (index, dim) in enumerate(shape):
if dim is None:
non_static_indexes.append(index)
if not non_static_indexes:
return shape
dyn_shape = tf.shape(tensor)
for index in non_static_indexes:
shape[index] = dyn_shape[index]
return shape
The provided code snippet includes necessary dependencies for implementing the `embedding_postprocessor` function. Write a Python function `def embedding_postprocessor(input_tensor, pos_x0, pos_y0, pos_x1, pos_y1, pos_bbox, use_token_type=False, token_type_ids=None, token_type_vocab_size=16, token_type_embedding_name="token_type_embeddings", use_position_embeddings=False, position_embedding_name="position_embeddings", initializer_range=0.02, max_position_embeddings=512, dropout_prob=0.1, use_roberta=False, max_position_length=1001, max_bbox_length=201, structure_position_embedding_name="structure_position_embeddings")` to solve the following problem:
Performs various post-processing on a word embedding tensor. Args: input_tensor: float Tensor of shape [batch_size, seq_length, embedding_size]. use_token_type: bool. Whether to add embeddings for `token_type_ids`. token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length]. Must be specified if `use_token_type` is True. token_type_vocab_size: int. The vocabulary size of `token_type_ids`. token_type_embedding_name: string. The name of the embedding table variable for token type ids. use_position_embeddings: bool. Whether to add position embeddings for the position of each token in the sequence. position_embedding_name: string. The name of the embedding table variable for positional embeddings. initializer_range: float. Range of the weight initialization. max_position_embeddings: int. Maximum sequence length that might ever be used with this model. This can be longer than the sequence length of input_tensor, but cannot be shorter. dropout_prob: float. Dropout probability applied to the final output tensor. Returns: float tensor with same shape as `input_tensor`. Raises: ValueError: One of the tensor shapes or input values is invalid.
Here is the function:
def embedding_postprocessor(input_tensor,
pos_x0,
pos_y0,
pos_x1,
pos_y1,
pos_bbox,
use_token_type=False,
token_type_ids=None,
token_type_vocab_size=16,
token_type_embedding_name="token_type_embeddings",
use_position_embeddings=False,
position_embedding_name="position_embeddings",
initializer_range=0.02,
max_position_embeddings=512,
dropout_prob=0.1,
use_roberta=False,
max_position_length=1001,
max_bbox_length=201,
structure_position_embedding_name="structure_position_embeddings"):
"""Performs various post-processing on a word embedding tensor.
Args:
input_tensor: float Tensor of shape [batch_size, seq_length,
embedding_size].
use_token_type: bool. Whether to add embeddings for `token_type_ids`.
token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length].
Must be specified if `use_token_type` is True.
token_type_vocab_size: int. The vocabulary size of `token_type_ids`.
token_type_embedding_name: string. The name of the embedding table variable
for token type ids.
use_position_embeddings: bool. Whether to add position embeddings for the
position of each token in the sequence.
position_embedding_name: string. The name of the embedding table variable
for positional embeddings.
initializer_range: float. Range of the weight initialization.
max_position_embeddings: int. Maximum sequence length that might ever be
used with this model. This can be longer than the sequence length of
input_tensor, but cannot be shorter.
dropout_prob: float. Dropout probability applied to the final output tensor.
Returns:
float tensor with same shape as `input_tensor`.
Raises:
ValueError: One of the tensor shapes or input values is invalid.
"""
input_shape = get_shape_list(input_tensor, expected_rank=3)
batch_size = input_shape[0]
seq_length = input_shape[1]
width = input_shape[2]
output = input_tensor
if use_token_type:
if token_type_ids is None:
raise ValueError("`token_type_ids` must be specified if"
"`use_token_type` is True.")
token_type_table = tf.get_variable(
name=token_type_embedding_name,
shape=[token_type_vocab_size, width],
initializer=create_initializer(initializer_range))
# This vocab will be small so we always do one-hot here, since it is always
# faster for a small vocabulary.
flat_token_type_ids = tf.reshape(token_type_ids, [-1])
one_hot_ids = tf.one_hot(flat_token_type_ids, depth=token_type_vocab_size)
token_type_embeddings = tf.matmul(one_hot_ids, token_type_table)
token_type_embeddings = tf.reshape(token_type_embeddings,
[batch_size, seq_length, width])
if not use_roberta:
output += token_type_embeddings
if False:
# original position embedding
pos_x0_embedding_table, pos_y0_embedding_table, pos_x1_embedding_table, pos_y1_embedding_table, bbox_embedding_table = [None] * 5
assert_op = tf.assert_less_equal(seq_length, max_position_embeddings)
with tf.control_dependencies([assert_op]):
full_position_embeddings = tf.get_variable(
name=position_embedding_name,
shape=[max_position_embeddings, width],
initializer=create_initializer(initializer_range))
# Since the position embedding table is a learned variable, we create it
# using a (long) sequence length `max_position_embeddings`. The actual
# sequence length might be shorter than this, for faster training of
# tasks that do not have long sequences.
#
# So `full_position_embeddings` is effectively an embedding table
# for position [0, 1, 2, ..., max_position_embeddings-1], and the current
# sequence has positions [0, 1, 2, ... seq_length-1], so we can just
# perform a slice.
pos_start = 2 if use_roberta else 0
position_embeddings = tf.slice(full_position_embeddings, [pos_start, 0],
[seq_length, -1])
num_dims = len(output.shape.as_list())
# Only the last two dimensions are relevant (`seq_length` and `width`), so
# we broadcast among the first dimensions, which is typically just
# the batch size.
position_broadcast_shape = []
for _ in range(num_dims - 2):
position_broadcast_shape.append(1)
position_broadcast_shape.extend([seq_length, width])
position_embeddings = tf.reshape(position_embeddings,
position_broadcast_shape)
output += position_embeddings
else:
# structure position embedding
assert_op = tf.assert_less_equal(seq_length, max_position_length)
with tf.control_dependencies([assert_op]):
(pos_x0_embedding_output, pos_x0_embedding_table) = position_embedding_lookup(
input_ids=pos_x0,
max_position_length=max_position_length,
embedding_size=width,
initializer_range=initializer_range,
word_embedding_name="pos_x0_embeddings",
use_one_hot_embeddings=False)
output += pos_x0_embedding_output
(pos_y0_embedding_output, pos_y0_embedding_table) = position_embedding_lookup(
input_ids=pos_y0,
max_position_length=max_position_length,
embedding_size=width,
initializer_range=initializer_range,
word_embedding_name="pos_y0_embeddings",
use_one_hot_embeddings=False)
output += pos_y0_embedding_output
(pos_x1_embedding_output, pos_x1_embedding_table) = position_embedding_lookup(
input_ids=pos_x1,
max_position_length=max_position_length,
embedding_size=width,
initializer_range=initializer_range,
word_embedding_name="pos_x1_embeddings",
use_one_hot_embeddings=False)
output += pos_x1_embedding_output
(pos_y1_embedding_output, pos_y1_embedding_table) = position_embedding_lookup(
input_ids=pos_y1,
max_position_length=max_position_length,
embedding_size=width,
initializer_range=initializer_range,
word_embedding_name="pos_y1_embeddings",
use_one_hot_embeddings=False)
output += pos_y1_embedding_output
if pos_bbox is not None:
(bbox_embedding_output, bbox_embedding_table) = position_embedding_lookup(
input_ids=pos_bbox,
max_position_length=max_bbox_length,
embedding_size=width,
initializer_range=initializer_range,
word_embedding_name="bbox_embeddings",
use_one_hot_embeddings=False)
output += bbox_embedding_output
else:
bbox_embedding_table = None
if use_position_embeddings:
full_position_embeddings = tf.get_variable(
name=position_embedding_name,
shape=[max_position_embeddings, width],
initializer=create_initializer(initializer_range))
pos_start = 2 if use_roberta else 0
position_embeddings = tf.slice(full_position_embeddings, [pos_start, 0],
[seq_length, -1])
num_dims = len(output.shape.as_list())
position_broadcast_shape = []
for _ in range(num_dims - 2):
position_broadcast_shape.append(1)
position_broadcast_shape.extend([seq_length, width])
position_embeddings = tf.reshape(position_embeddings,
position_broadcast_shape)
output += position_embeddings
output = layer_norm_and_dropout(output, dropout_prob, use_roberta=use_roberta)
return output, pos_x0_embedding_table, pos_y0_embedding_table, pos_x1_embedding_table, pos_y1_embedding_table, bbox_embedding_table | Performs various post-processing on a word embedding tensor. Args: input_tensor: float Tensor of shape [batch_size, seq_length, embedding_size]. use_token_type: bool. Whether to add embeddings for `token_type_ids`. token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length]. Must be specified if `use_token_type` is True. token_type_vocab_size: int. The vocabulary size of `token_type_ids`. token_type_embedding_name: string. The name of the embedding table variable for token type ids. use_position_embeddings: bool. Whether to add position embeddings for the position of each token in the sequence. position_embedding_name: string. The name of the embedding table variable for positional embeddings. initializer_range: float. Range of the weight initialization. max_position_embeddings: int. Maximum sequence length that might ever be used with this model. This can be longer than the sequence length of input_tensor, but cannot be shorter. dropout_prob: float. Dropout probability applied to the final output tensor. Returns: float tensor with same shape as `input_tensor`. Raises: ValueError: One of the tensor shapes or input values is invalid. |
19,461 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tfdeterminism import patch
import collections
import copy
import json
import math
import re
import os
import numpy as np
import six
import tensorflow as tf
tf.set_random_seed(SEED)
tf.random.set_random_seed(SEED)
from gpu_environment import get_custom_getter
import my_layers
def get_shape_list(tensor, expected_rank=None, name=None):
"""Returns a list of the shape of tensor, preferring static dimensions.
Args:
tensor: A tf.Tensor object to find the shape of.
expected_rank: (optional) int. The expected rank of `tensor`. If this is
specified and the `tensor` has a different rank, and exception will be
thrown.
name: Optional name of the tensor for the error message.
Returns:
A list of dimensions of the shape of tensor. All static dimensions will
be returned as python integers, and dynamic dimensions will be returned
as tf.Tensor scalars.
"""
if name is None:
name = tensor.name
if expected_rank is not None:
assert_rank(tensor, expected_rank, name)
shape = tensor.shape.as_list()
non_static_indexes = []
for (index, dim) in enumerate(shape):
if dim is None:
non_static_indexes.append(index)
if not non_static_indexes:
return shape
dyn_shape = tf.shape(tensor)
for index in non_static_indexes:
shape[index] = dyn_shape[index]
return shape
The provided code snippet includes necessary dependencies for implementing the `create_attention_mask_from_input_mask` function. Write a Python function `def create_attention_mask_from_input_mask(from_tensor, to_mask)` to solve the following problem:
Create 3D attention mask from a 2D tensor mask. Args: from_tensor: 2D or 3D Tensor of shape [batch_size, from_seq_length, ...]. to_mask: int32 Tensor of shape [batch_size, to_seq_length]. Returns: float Tensor of shape [batch_size, from_seq_length, to_seq_length].
Here is the function:
def create_attention_mask_from_input_mask(from_tensor, to_mask):
"""Create 3D attention mask from a 2D tensor mask.
Args:
from_tensor: 2D or 3D Tensor of shape [batch_size, from_seq_length, ...].
to_mask: int32 Tensor of shape [batch_size, to_seq_length].
Returns:
float Tensor of shape [batch_size, from_seq_length, to_seq_length].
"""
from_shape = get_shape_list(from_tensor, expected_rank=[2, 3])
batch_size = from_shape[0]
from_seq_length = from_shape[1]
to_shape = get_shape_list(to_mask, expected_rank=2)
to_seq_length = to_shape[1]
to_mask = tf.cast(
tf.reshape(to_mask, [batch_size, 1, to_seq_length]), tf.float32)
# We don't assume that `from_tensor` is a mask (although it could be). We
# don't actually care if we attend *from* padding tokens (only *to* padding)
# tokens so we create a tensor of all ones.
#
# `broadcast_ones` = [batch_size, from_seq_length, 1]
broadcast_ones = tf.ones(
shape=[batch_size, from_seq_length, 1], dtype=tf.float32)
# Here we broadcast along two dimensions to create the mask.
mask = broadcast_ones * to_mask
return mask | Create 3D attention mask from a 2D tensor mask. Args: from_tensor: 2D or 3D Tensor of shape [batch_size, from_seq_length, ...]. to_mask: int32 Tensor of shape [batch_size, to_seq_length]. Returns: float Tensor of shape [batch_size, from_seq_length, to_seq_length]. |
19,462 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tfdeterminism import patch
import collections
import copy
import json
import math
import re
import os
import numpy as np
import six
import tensorflow as tf
tf.set_random_seed(SEED)
tf.random.set_random_seed(SEED)
from gpu_environment import get_custom_getter
import my_layers
def gelu(input_tensor):
"""Gaussian Error Linear Unit.
This is a smoother version of the RELU.
Original paper: https://arxiv.org/abs/1606.08415
Args:
input_tensor: float Tensor to perform activation.
Returns:
`input_tensor` with the GELU activation applied.
"""
cdf = 0.5 * (1.0 + tf.erf(input_tensor / tf.sqrt(2.0)))
return input_tensor * cdf
"""
cdf = 0.5 * (1.0 + tf.tanh(
(np.sqrt(2 / np.pi) * (input_tensor + 0.044715 * tf.pow(input_tensor, 3)))))
return input_tensor * cdf
"""
def dropout(input_tensor, dropout_prob):
"""Perform dropout.
Args:
input_tensor: float Tensor.
dropout_prob: Python float. The probability of dropping out a value (NOT of
*keeping* a dimension as in `tf.nn.dropout`).
Returns:
A version of `input_tensor` with dropout applied.
"""
if dropout_prob is None or dropout_prob == 0.0:
return input_tensor
output = tf.nn.dropout(input_tensor, 1.0 - dropout_prob)
return output
def layer_norm(input_tensor, use_roberta, name=None):
"""Run layer normalization on the last dimension of the tensor."""
f = tf.contrib.layers.layer_norm if not use_roberta else my_layers.layer_norm
return f(
inputs=input_tensor, begin_norm_axis=-1, begin_params_axis=-1, scope=name)
def create_initializer(initializer_range=0.02):
"""Creates a `truncated_normal_initializer` with the given range."""
return tf.truncated_normal_initializer(stddev=initializer_range, seed=2222)
def attention_layer(from_tensor,
to_tensor,
attention_mask=None,
num_attention_heads=1,
size_per_head=512,
query_act=None,
key_act=None,
value_act=None,
attention_probs_dropout_prob=0.0,
initializer_range=0.02,
do_return_2d_tensor=False,
batch_size=None,
from_seq_length=None,
to_seq_length=None):
"""Performs multi-headed attention from `from_tensor` to `to_tensor`.
This is an implementation of multi-headed attention based on "Attention
is all you Need". If `from_tensor` and `to_tensor` are the same, then
this is self-attention. Each timestep in `from_tensor` attends to the
corresponding sequence in `to_tensor`, and returns a fixed-with vector.
This function first projects `from_tensor` into a "query" tensor and
`to_tensor` into "key" and "value" tensors. These are (effectively) a list
of tensors of length `num_attention_heads`, where each tensor is of shape
[batch_size, seq_length, size_per_head].
Then, the query and key tensors are dot-producted and scaled. These are
softmaxed to obtain attention probabilities. The value tensors are then
interpolated by these probabilities, then concatenated back to a single
tensor and returned.
In practice, the multi-headed attention are done with transposes and
reshapes rather than actual separate tensors.
Args:
from_tensor: float Tensor of shape [batch_size, from_seq_length,
from_width].
to_tensor: float Tensor of shape [batch_size, to_seq_length, to_width].
attention_mask: (optional) int32 Tensor of shape [batch_size,
from_seq_length, to_seq_length]. The values should be 1 or 0. The
attention scores will effectively be set to -infinity for any positions in
the mask that are 0, and will be unchanged for positions that are 1.
num_attention_heads: int. Number of attention heads.
size_per_head: int. Size of each attention head.
query_act: (optional) Activation function for the query transform.
key_act: (optional) Activation function for the key transform.
value_act: (optional) Activation function for the value transform.
attention_probs_dropout_prob: (optional) float. Dropout probability of the
attention probabilities.
initializer_range: float. Range of the weight initializer.
do_return_2d_tensor: bool. If True, the output will be of shape [batch_size
* from_seq_length, num_attention_heads * size_per_head]. If False, the
output will be of shape [batch_size, from_seq_length, num_attention_heads
* size_per_head].
batch_size: (Optional) int. If the input is 2D, this might be the batch size
of the 3D version of the `from_tensor` and `to_tensor`.
from_seq_length: (Optional) If the input is 2D, this might be the seq length
of the 3D version of the `from_tensor`.
to_seq_length: (Optional) If the input is 2D, this might be the seq length
of the 3D version of the `to_tensor`.
Returns:
float Tensor of shape [batch_size, from_seq_length,
num_attention_heads * size_per_head]. (If `do_return_2d_tensor` is
true, this will be of shape [batch_size * from_seq_length,
num_attention_heads * size_per_head]).
Raises:
ValueError: Any of the arguments or tensor shapes are invalid.
"""
def transpose_for_scores(input_tensor, batch_size, num_attention_heads,
seq_length, width):
output_tensor = tf.reshape(
input_tensor, [batch_size, seq_length, num_attention_heads, width])
output_tensor = tf.transpose(output_tensor, [0, 2, 1, 3])
return output_tensor
from_shape = get_shape_list(from_tensor, expected_rank=[2, 3])
to_shape = get_shape_list(to_tensor, expected_rank=[2, 3])
if len(from_shape) != len(to_shape):
raise ValueError(
"The rank of `from_tensor` must match the rank of `to_tensor`.")
if len(from_shape) == 3:
batch_size = from_shape[0]
from_seq_length = from_shape[1]
to_seq_length = to_shape[1]
elif len(from_shape) == 2:
if (batch_size is None or from_seq_length is None or to_seq_length is None):
raise ValueError(
"When passing in rank 2 tensors to attention_layer, the values "
"for `batch_size`, `from_seq_length`, and `to_seq_length` "
"must all be specified.")
# Scalar dimensions referenced here:
# B = batch size (number of sequences)
# F = `from_tensor` sequence length
# T = `to_tensor` sequence length
# N = `num_attention_heads`
# H = `size_per_head`
from_tensor_2d = reshape_to_matrix(from_tensor)
to_tensor_2d = reshape_to_matrix(to_tensor)
# `query_layer` = [B*F, N*H]
query_layer = tf.layers.dense(
from_tensor_2d,
num_attention_heads * size_per_head,
activation=query_act,
name="query",
kernel_initializer=create_initializer(initializer_range))
# `key_layer` = [B*T, N*H]
key_layer = tf.layers.dense(
to_tensor_2d,
num_attention_heads * size_per_head,
activation=key_act,
name="key",
kernel_initializer=create_initializer(initializer_range))
# `value_layer` = [B*T, N*H]
value_layer = tf.layers.dense(
to_tensor_2d,
num_attention_heads * size_per_head,
activation=value_act,
name="value",
kernel_initializer=create_initializer(initializer_range))
# `query_layer` = [B, N, F, H]
query_layer = transpose_for_scores(query_layer, batch_size,
num_attention_heads, from_seq_length,
size_per_head)
# `key_layer` = [B, N, T, H]
key_layer = transpose_for_scores(key_layer, batch_size, num_attention_heads,
to_seq_length, size_per_head)
# Take the dot product between "query" and "key" to get the raw
# attention scores.
# `attention_scores` = [B, N, F, T]
attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True)
attention_scores = tf.multiply(attention_scores,
1.0 / math.sqrt(float(size_per_head)))
if attention_mask is not None:
# `attention_mask` = [B, 1, F, T]
attention_mask = tf.expand_dims(attention_mask, axis=[1])
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
adder = (1.0 - tf.cast(attention_mask, attention_scores.dtype)) * -10000.0
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
attention_scores += adder
# Normalize the attention scores to probabilities.
# `attention_probs` = [B, N, F, T]
attention_probs = tf.nn.softmax(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = dropout(attention_probs, attention_probs_dropout_prob)
# `value_layer` = [B, T, N, H]
value_layer = tf.reshape(
value_layer,
[batch_size, to_seq_length, num_attention_heads, size_per_head])
# `value_layer` = [B, N, T, H]
value_layer = tf.transpose(value_layer, [0, 2, 1, 3])
# `context_layer` = [B, N, F, H]
context_layer = tf.matmul(attention_probs, value_layer)
# `context_layer` = [B, F, N, H]
context_layer = tf.transpose(context_layer, [0, 2, 1, 3])
if do_return_2d_tensor:
# `context_layer` = [B*F, N*H]
context_layer = tf.reshape(
context_layer,
[batch_size * from_seq_length, num_attention_heads * size_per_head])
else:
# `context_layer` = [B, F, N*H]
context_layer = tf.reshape(
context_layer,
[batch_size, from_seq_length, num_attention_heads * size_per_head])
return context_layer
def get_shape_list(tensor, expected_rank=None, name=None):
"""Returns a list of the shape of tensor, preferring static dimensions.
Args:
tensor: A tf.Tensor object to find the shape of.
expected_rank: (optional) int. The expected rank of `tensor`. If this is
specified and the `tensor` has a different rank, and exception will be
thrown.
name: Optional name of the tensor for the error message.
Returns:
A list of dimensions of the shape of tensor. All static dimensions will
be returned as python integers, and dynamic dimensions will be returned
as tf.Tensor scalars.
"""
if name is None:
name = tensor.name
if expected_rank is not None:
assert_rank(tensor, expected_rank, name)
shape = tensor.shape.as_list()
non_static_indexes = []
for (index, dim) in enumerate(shape):
if dim is None:
non_static_indexes.append(index)
if not non_static_indexes:
return shape
dyn_shape = tf.shape(tensor)
for index in non_static_indexes:
shape[index] = dyn_shape[index]
return shape
def reshape_to_matrix(input_tensor):
"""Reshapes a >= rank 2 tensor to a rank 2 tensor (i.e., a matrix)."""
ndims = input_tensor.shape.ndims
if ndims < 2:
raise ValueError("Input tensor must have at least rank 2. Shape = %s" %
(input_tensor.shape))
if ndims == 2:
return input_tensor
width = input_tensor.shape[-1]
output_tensor = tf.reshape(input_tensor, [-1, width])
return output_tensor
def reshape_from_matrix(output_tensor, orig_shape_list):
"""Reshapes a rank 2 tensor back to its original rank >= 2 tensor."""
if len(orig_shape_list) == 2:
return output_tensor
output_shape = get_shape_list(output_tensor)
orig_dims = orig_shape_list[0:-1]
width = output_shape[-1]
return tf.reshape(output_tensor, orig_dims + [width])
The provided code snippet includes necessary dependencies for implementing the `transformer_model` function. Write a Python function `def transformer_model(input_tensor, attention_mask=None, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, intermediate_act_fn=gelu, hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, initializer_range=0.02, do_return_all_layers=False, use_roberta=False)` to solve the following problem:
Multi-headed, multi-layer Transformer from "Attention is All You Need". This is almost an exact implementation of the original Transformer encoder. See the original paper: https://arxiv.org/abs/1706.03762 Also see: https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/models/transformer.py Args: input_tensor: float Tensor of shape [batch_size, seq_length, hidden_size]. attention_mask: (optional) int32 Tensor of shape [batch_size, seq_length, seq_length], with 1 for positions that can be attended to and 0 in positions that should not be. hidden_size: int. Hidden size of the Transformer. num_hidden_layers: int. Number of layers (blocks) in the Transformer. num_attention_heads: int. Number of attention heads in the Transformer. intermediate_size: int. The size of the "intermediate" (a.k.a., feed forward) layer. intermediate_act_fn: function. The non-linear activation function to apply to the output of the intermediate/feed-forward layer. hidden_dropout_prob: float. Dropout probability for the hidden layers. attention_probs_dropout_prob: float. Dropout probability of the attention probabilities. initializer_range: float. Range of the initializer (stddev of truncated normal). do_return_all_layers: Whether to also return all layers or just the final layer. Returns: float Tensor of shape [batch_size, seq_length, hidden_size], the final hidden layer of the Transformer. Raises: ValueError: A Tensor shape or parameter is invalid.
Here is the function:
def transformer_model(input_tensor,
attention_mask=None,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
intermediate_act_fn=gelu,
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
initializer_range=0.02,
do_return_all_layers=False,
use_roberta=False):
"""Multi-headed, multi-layer Transformer from "Attention is All You Need".
This is almost an exact implementation of the original Transformer encoder.
See the original paper:
https://arxiv.org/abs/1706.03762
Also see:
https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/models/transformer.py
Args:
input_tensor: float Tensor of shape [batch_size, seq_length, hidden_size].
attention_mask: (optional) int32 Tensor of shape [batch_size, seq_length,
seq_length], with 1 for positions that can be attended to and 0 in
positions that should not be.
hidden_size: int. Hidden size of the Transformer.
num_hidden_layers: int. Number of layers (blocks) in the Transformer.
num_attention_heads: int. Number of attention heads in the Transformer.
intermediate_size: int. The size of the "intermediate" (a.k.a., feed
forward) layer.
intermediate_act_fn: function. The non-linear activation function to apply
to the output of the intermediate/feed-forward layer.
hidden_dropout_prob: float. Dropout probability for the hidden layers.
attention_probs_dropout_prob: float. Dropout probability of the attention
probabilities.
initializer_range: float. Range of the initializer (stddev of truncated
normal).
do_return_all_layers: Whether to also return all layers or just the final
layer.
Returns:
float Tensor of shape [batch_size, seq_length, hidden_size], the final
hidden layer of the Transformer.
Raises:
ValueError: A Tensor shape or parameter is invalid.
"""
if hidden_size % num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (hidden_size, num_attention_heads))
attention_head_size = int(hidden_size / num_attention_heads)
input_shape = get_shape_list(input_tensor, expected_rank=3)
batch_size = input_shape[0]
seq_length = input_shape[1]
input_width = input_shape[2]
# The Transformer performs sum residuals on all layers so the input needs
# to be the same as the hidden size.
if input_width != hidden_size:
raise ValueError("The width of the input tensor (%d) != hidden size (%d)" %
(input_width, hidden_size))
# We keep the representation as a 2D tensor to avoid re-shaping it back and
# forth from a 3D tensor to a 2D tensor. Re-shapes are normally free on
# the GPU/CPU but may not be free on the TPU, so we want to minimize them to
# help the optimizer.
prev_output = reshape_to_matrix(input_tensor)
all_layer_outputs = []
for layer_idx in range(num_hidden_layers):
with tf.variable_scope("layer_%d" % layer_idx, reuse=False):
layer_input = prev_output
with tf.variable_scope("attention"):
attention_heads = []
with tf.variable_scope("self"):
attention_head = attention_layer(
from_tensor=layer_input,
to_tensor=layer_input,
attention_mask=attention_mask,
num_attention_heads=num_attention_heads,
size_per_head=attention_head_size,
attention_probs_dropout_prob=attention_probs_dropout_prob,
initializer_range=initializer_range,
do_return_2d_tensor=True,
batch_size=batch_size,
from_seq_length=seq_length,
to_seq_length=seq_length)
attention_heads.append(attention_head)
attention_output = None
if len(attention_heads) == 1:
attention_output = attention_heads[0]
else:
# In the case where we have other sequences, we just concatenate
# them to the self-attention head before the projection.
attention_output = tf.concat(attention_heads, axis=-1)
# Run a linear projection of `hidden_size` then add a residual
# with `layer_input`.
with tf.variable_scope("output"):
attention_output = tf.layers.dense(
attention_output,
hidden_size,
kernel_initializer=create_initializer(initializer_range))
attention_output = dropout(attention_output, hidden_dropout_prob)
attention_output = layer_norm(attention_output + layer_input, use_roberta=use_roberta)
# The activation is only applied to the "intermediate" hidden layer.
with tf.variable_scope("intermediate"):
intermediate_output = tf.layers.dense(
attention_output,
intermediate_size,
activation=intermediate_act_fn,
kernel_initializer=create_initializer(initializer_range))
# Down-project back to `hidden_size` then add the residual.
with tf.variable_scope("output"):
layer_output = tf.layers.dense(
intermediate_output,
hidden_size,
kernel_initializer=create_initializer(initializer_range))
layer_output = dropout(layer_output, hidden_dropout_prob)
layer_output = layer_norm(layer_output + attention_output, use_roberta=use_roberta)
prev_output = layer_output
all_layer_outputs.append(layer_output)
if do_return_all_layers:
final_outputs = []
for layer_output in all_layer_outputs:
final_output = reshape_from_matrix(layer_output, input_shape)
final_outputs.append(final_output)
return final_outputs
else:
final_output = reshape_from_matrix(prev_output, input_shape)
return final_output | Multi-headed, multi-layer Transformer from "Attention is All You Need". This is almost an exact implementation of the original Transformer encoder. See the original paper: https://arxiv.org/abs/1706.03762 Also see: https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/models/transformer.py Args: input_tensor: float Tensor of shape [batch_size, seq_length, hidden_size]. attention_mask: (optional) int32 Tensor of shape [batch_size, seq_length, seq_length], with 1 for positions that can be attended to and 0 in positions that should not be. hidden_size: int. Hidden size of the Transformer. num_hidden_layers: int. Number of layers (blocks) in the Transformer. num_attention_heads: int. Number of attention heads in the Transformer. intermediate_size: int. The size of the "intermediate" (a.k.a., feed forward) layer. intermediate_act_fn: function. The non-linear activation function to apply to the output of the intermediate/feed-forward layer. hidden_dropout_prob: float. Dropout probability for the hidden layers. attention_probs_dropout_prob: float. Dropout probability of the attention probabilities. initializer_range: float. Range of the initializer (stddev of truncated normal). do_return_all_layers: Whether to also return all layers or just the final layer. Returns: float Tensor of shape [batch_size, seq_length, hidden_size], the final hidden layer of the Transformer. Raises: ValueError: A Tensor shape or parameter is invalid. |
19,463 | import argparse
import time
import math
import os, sys
import json
import itertools
from typing import Callable, Dict, Iterable, List, Optional, Tuple
import torch
from torch import Tensor, device, dtype, nn
from torch.nn import CrossEntropyLoss
from torch.nn import functional as F
from torch.utils.data import DataLoader
import torch.nn.functional as F
import numpy as np
from gpu import (
add_gpu_params,
parse_gpu,
distributed_opt,
distributed_gather,
distributed_sync,
cleanup
)
from exp_utils import create_exp_dir
from data_utils import FT_Dataset
from model import GPT2Config, GPT2LMModel
def print_args(args):
if args.rank == 0:
print('=' * 100)
for k, v in args.__dict__.items():
print(' - {} : {}'.format(k, v))
print('=' * 100) | null |
19,464 | import argparse
import time
import math
import os, sys
import json
import itertools
from typing import Callable, Dict, Iterable, List, Optional, Tuple
import torch
from torch import Tensor, device, dtype, nn
from torch.nn import CrossEntropyLoss
from torch.nn import functional as F
from torch.utils.data import DataLoader
import torch.nn.functional as F
torch.set_printoptions(threshold=100000)
import numpy as np
from gpu import (
add_gpu_params,
parse_gpu,
distributed_opt,
distributed_gather,
distributed_sync,
cleanup
)
from exp_utils import create_exp_dir
from data_utils import FT_Dataset
from model import GPT2Config, GPT2LMModel
def _reorder_cache(past: Tuple, beam_idx: Tensor) -> Tuple[Tensor]:
return tuple(layer_past.index_select(1, beam_idx).contiguous().detach() for layer_past in past)
def _postprocess_next_token_scores(
scores,
history,
cur_len,
batch_size,
num_beams,
repetition_penalty=1.0,
no_repeat_ngram_size=4,
bad_words_ids=None,
min_length=0,
max_length=100,
eos_token_id=None,
):
# repetition penalty (from CTRL paper https://arxiv.org/abs/1909.05858)
if repetition_penalty != 1.0 and history is not None:
_enforce_repetition_penalty_(scores, batch_size, num_beams, history, repetition_penalty)
# score: batch_size * beam, vocab
# set eos token prob to zero if min_length is not reached
if eos_token_id is not None and cur_len < min_length:
for eos in eos_token_id:
scores[:, eos] = -float("inf")
if no_repeat_ngram_size > 0 and history is not None:
# calculate a list of banned tokens to prevent repetitively generating the same ngrams
num_batch_hypotheses = batch_size * num_beams
# from fairseq: https://github.com/pytorch/fairseq/blob/a07cb6f40480928c9e0548b737aadd36ee66ac76/fairseq/sequence_generator.py#L345
banned_batch_tokens = _calc_banned_ngram_tokens(
history, num_batch_hypotheses, no_repeat_ngram_size, cur_len
)
for i, banned_tokens in enumerate(banned_batch_tokens):
scores[i, banned_tokens] = -float("inf")
return scores
def _add_beam_candidate(
best_score,
best_sequence,
batch_size,
num_beams,
beam_scores,
history,
eos_token_id=None
):
last_tokens = history[:, -1]
for _i in range(batch_size * num_beams):
if eos_token_id is None or last_tokens[_i] in eos_token_id:
cur_len = history.shape[-1]
_score = beam_scores.view(-1)[_i] / cur_len ** args.length_penalty
batch_id = _i // num_beams
if not batch_id in best_score or best_score[batch_id] < _score:
best_score[batch_id] = _score
best_sequence[batch_id][:cur_len] = history[_i]
beam_scores.view(-1)[_i] = -float("inf")
def distributed_gather(args, tensor):
g_y = [torch.zeros_like(tensor) for _ in range(args.world_size)]
torch.distributed.all_gather(g_y, tensor, async_op=False)
return torch.stack(g_y)
def distributed_sync(args):
if args.platform == 'azure':
args.hvd.allreduce(torch.tensor(0), name='barrier')
else:
args.dist.barrier()
def beam(model, data_iter, args):
model.eval()
total_loss = 0.
start_time = time.time()
all_predictions = {}
with torch.no_grad():
for idx, data in enumerate(data_iter):
data = {key: value for key, value in data.items()}
_id = data['id'].to(args.device)
_query = data['query'].to(args.device)
_query_len = data['query_len'].to(args.device)
## local adaptation start.
## local adaptation end.
output = None
score = None
batch_size = _id.size(0)
num_beams = args.beam
length_penalty = args.length_penalty
_batch = torch.arange(0, _id.size(0), device=args.device, dtype=torch.long)
past = None
len_past = None
_query = _query.repeat(1, num_beams).view(batch_size * num_beams, -1)
_query_len = _query_len.unsqueeze(-1).repeat(1, num_beams).view(-1)
_bbatch = _batch.unsqueeze(-1).repeat(1, num_beams).view(-1)
# scores for each sentence in the beam
beam_scores = torch.zeros(
(batch_size, num_beams), dtype=torch.float, device=_query.device
)
best_sequence = torch.zeros(
(batch_size, args.eval_len), dtype=torch.long, device=_query.device
)
best_score = {}
history = None
with torch.no_grad():
for i in range(0, args.eval_len):
if i == 0:
logits, past = model(_query)
logits = logits[_bbatch, (_query_len-1).long(), :] # batch_size * beam, vocab
else:
#print('token_id.shape', token_id.shape, token_id)
#print('past.shape', past[0].shape)
#print('len_past.shape', len_past.shape, len_past)
logits, past = model(token_id, past=past, len_past=len_past)
logits = logits[:, -1, :] # batch_size * beam, vocab
logits = _postprocess_next_token_scores(
logits,
history,
i,
batch_size,
num_beams,
repetition_penalty=args.repetition_penalty,
no_repeat_ngram_size=args.no_repeat_ngram_size,
min_length=args.min_length,
eos_token_id=args.eos_token_id,
)
softmax_probs = F.softmax(logits, dim=-1)
##_prob, _w_idx = torch.topk(softmax_probs, num_beams) # batch_size, beam
vocab_size = softmax_probs.shape[-1]
_logprob = torch.log(softmax_probs) # batch_size * beam, vocab
if i == 0:
next_scores = _logprob.view(batch_size, num_beams, -1)[:, 0, :] # batch_size, vocab
else:
next_scores = beam_scores.unsqueeze(-1) + _logprob.view(batch_size, num_beams, -1)
next_scores = next_scores.view(batch_size, -1) # batch_size, beam * vocab
next_scores, next_tokens = torch.topk(
next_scores, num_beams, dim=1, largest=True, sorted=True
) # batch_size, num_beams
beam_id = (next_tokens // vocab_size).view(-1) # batch_size * num_beams
token_id = (next_tokens % vocab_size).view(-1).unsqueeze(-1) # batch_size, num_beams
beam_idx = beam_id.view(batch_size, num_beams) + (_batch * num_beams).unsqueeze(-1)
past = _reorder_cache(past, beam_idx.view(-1))
beam_scores = next_scores # batch_size, num_beams
len_past = (_query_len + i).long()
if history is None:
history = token_id.detach()
else:
history = torch.cat((history[beam_idx.view(-1)], token_id.detach()), dim=1).detach()
_add_beam_candidate(
best_score, best_sequence, batch_size, num_beams, beam_scores, history,
eos_token_id=args.eos_token_id
)
_add_beam_candidate(
best_score, best_sequence, batch_size, num_beams, beam_scores, history
)
with torch.no_grad():
_id = distributed_gather(args, _id)
output = distributed_gather(args, best_sequence)
#score = distributed_gather(args, score)
distributed_sync(args)
if args.rank == 0:
_id = _id.view(-1).cpu()
output = output.view(-1, output.shape[-1]).cpu()
#score = score.view(-1, score.shape[-1]).cpu()
for _b in range(0, _id.shape[-1]):
_i = int(_id[_b].item())
all_predictions[_i] = {}
all_predictions[_i]['id'] = _i
all_predictions[_i]['predict'] = output[_b].tolist()
#all_predictions[_i]['score'] = score[_b].tolist()
if idx % 10 == 0:
print('inference samples', idx)
if args.rank == 0:
pred_file = os.path.join(args.work_dir, args.output_file)
print('saving prediction file', pred_file)
with open(pred_file, 'w') as writer:
for _i in all_predictions:
writer.write(json.dumps(all_predictions[_i]) + '\n') | null |
19,465 | import json
import numpy as np
import argparse
import os
import sys
import re
import json
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data
import encoder
def stardard_tokenize(sent):
def post_process(sent, is_tokenize, is_lower):
if is_lower:
sent = sent.lower()
if is_tokenize:
sent = stardard_tokenize(sent)
return sent | null |
19,466 | import os, sys
import glob
import random
from collections import Counter, OrderedDict
import numpy as np
import torch
import json
import torch
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
class Corpus(object):
def __init__(self, path):
def get_lm_corpus(data):
print('Producing dataset {}...'.format(data))
corpus = Corpus(data)
return corpus | null |
19,467 | import os, sys
import glob
import random
from collections import Counter, OrderedDict
import numpy as np
import torch
import json
import torch
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
def padding_tokens(tokens, max_seq_length, pad_token, direct, max_context_length=0):
if max_context_length == 0:
max_context_length = max_seq_length
if len(tokens) > max_context_length:
if direct > 0:
pad_tokens = tokens[:max_context_length]
else:
pad_tokens = tokens[-max_context_length:]
else:
pad_tokens = tokens
token_len = len(pad_tokens)
pad_tokens = pad_tokens + [pad_token for _ in range(max_seq_length - token_len)]
return pad_tokens, token_len | null |
19,468 | import argparse
import time
import math
import os, sys
import numpy as np
import itertools
import torch
import random
from torch.utils.data import DataLoader
from gpu import (
add_gpu_params,
parse_gpu,
distributed_opt,
distributed_gather,
distributed_sync,
cleanup
)
from optimizer import (
create_adam_optimizer,
create_optimizer_scheduler,
add_optimizer_params,
create_adam_optimizer_from_args
)
from data_utils import FT_Dataset
from model import GPT2Config, GPT2LMModel, Conv1D
from exp_utils import create_exp_dir
from pst.utils import convert_sparse_network, schedule_sparsity_ratio, update_network_sparsity, save_sparse_model
def print_args(args):
if args.rank == 0:
print('=' * 100)
for k, v in args.__dict__.items():
print(f' - {k} : {v}')
print('=' * 100) | null |
19,469 | import argparse
import time
import math
import os, sys
import numpy as np
import itertools
import torch
import random
from torch.utils.data import DataLoader
torch.set_printoptions(threshold=100000)
from gpu import (
add_gpu_params,
parse_gpu,
distributed_opt,
distributed_gather,
distributed_sync,
cleanup
)
from optimizer import (
create_adam_optimizer,
create_optimizer_scheduler,
add_optimizer_params,
create_adam_optimizer_from_args
)
from data_utils import FT_Dataset
from model import GPT2Config, GPT2LMModel, Conv1D
from exp_utils import create_exp_dir
from pst.utils import convert_sparse_network, schedule_sparsity_ratio, update_network_sparsity, save_sparse_model
class AverageMeter(object):
def __init__(self):
def reset(self):
def update(self, val, n=1):
def optimizer_step(_loss, _optimizer, _model, _schedule, args, is_update=True):
def evaluate(model, valid_loader, args):
def distributed_sync(args):
def update_network_sparsity(model, sparsity):
def schedule_sparsity_ratio(
step,
total_step,
initial_warmup,
final_warmup,
initial_sparsity,
final_sparsity,
):
def save_sparse_model(model, save_path, logger=None):
def train_validate(
model,
optimizer,
scheduler,
train_loader,
valid_loader,
args,
train_step=0,
epoch=0
):
model.train()
avg_lm_loss = AverageMeter()
print('start to train the model................', epoch)
log_start_time = time.time()
best_val_ppl = None
train_loader.sampler.set_epoch(epoch)
for idx, data in enumerate(train_loader):
data = {key: value for key, value in data.items()}
_input = data['input'].to(args.device)
_target = data['target'].to(args.device)
_msk = data['mask'].to(args.device)
_lm_logits, _lm_loss = model(
_input, lm_labels=_target, lm_mask=_msk, label_smooth=args.label_smooth
)
_lm_loss = _lm_loss.mean()
train_step += 1
is_update = True if train_step % args.grad_acc == 0 else False
avg_lm_loss.update(_lm_loss.item())
optimizer_step(
_lm_loss/(args.grad_acc), optimizer, model, scheduler, args, is_update=is_update
)
if train_step % args.log_interval == 0:
elapsed = time.time() - log_start_time
lr = optimizer.param_groups[0]['lr']
log_str = f'| epoch {epoch:3d} step {train_step:>8d} | { idx + 1:>6d} batches | ' \
f'lr {lr:.3g} | ms/batch {elapsed * 1000 / args.log_interval:5.2f} | ' \
f'loss {avg_lm_loss.val:5.2f} | avg loss {avg_lm_loss.avg:5.2f} | ' \
f'ppl {math.exp(avg_lm_loss.avg):5.2f}'
if args.rank == 0:
print(log_str)
log_start_time = time.time()
avg_lm_loss.reset()
if train_step % args.save_interval == 0:
if args.rank == 0:
model_path = os.path.join(args.work_dir, f'model.{train_step}.pt')
print('saving checkpoint', model_path)
torch.save({'model_state_dict': model.state_dict()}, model_path)
distributed_sync(args)
# update network sparsity ratio
cur_sparsity = schedule_sparsity_ratio(train_step, args.max_step, args.initial_warmup,
args.final_warmup, args.initial_sparsity, args.sparsity)
update_network_sparsity(model, cur_sparsity)
# evaluation interval
if train_step % args.eval_interval == 0:
eval_start_time = time.time()
valid_loss, valid_ppl = evaluate(model, valid_loader, args)
if best_val_ppl is None or valid_ppl < best_val_ppl:
best_val_ppl = valid_ppl
log_str = f'| Eval {train_step // args.eval_interval:3d} at step {train_step:>8d} | ' \
f'time: {time.time() - eval_start_time:5.2f}s | valid loss {valid_loss:5.2f} | ' \
f'valid ppl {valid_ppl:5.2f} | best ppl {best_val_ppl:5.2f} '
if args.rank == 0:
print('-' * 100)
print(log_str)
print('-' * 100)
model.train()
distributed_sync(args)
if train_step == args.max_step:
break
if args.rank == 0:
model_path = os.path.join(args.work_dir, f'model.{train_step}.pt')
print('saving checkpoint', model_path)
torch.save({'model_state_dict': model.state_dict()}, model_path)
save_sparse_model(model, os.path.join(args.work_dir, 'sparse_model.pt'))
distributed_sync(args)
return train_step | null |
19,470 | import logging
import math
import os
from collections import OrderedDict
import copy
import math
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
import torch.nn.functional as F
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from torch.nn.parameter import Parameter
def gelu(x):
return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) | null |
19,471 | import logging
import math
import os
from collections import OrderedDict
import copy
import math
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
import torch.nn.functional as F
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from torch.nn.parameter import Parameter
def gelu_fast(x):
return 0.5 * x * (1.0 + torch.tanh(x * 0.7978845608 * (1.0 + 0.044715 * x * x))) | null |
19,472 | import logging
import math
import os
from collections import OrderedDict
import copy
import math
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
import torch.nn.functional as F
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from torch.nn.parameter import Parameter
The provided code snippet includes necessary dependencies for implementing the `gelu_new` function. Write a Python function `def gelu_new(x)` to solve the following problem:
Implementation of the gelu activation function currently in Google Bert repo (identical to OpenAI GPT). Also see https://arxiv.org/abs/1606.08415
Here is the function:
def gelu_new(x):
""" Implementation of the gelu activation function currently in Google Bert repo (identical to OpenAI GPT).
Also see https://arxiv.org/abs/1606.08415
"""
return 0.5 * x * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi) * (x + 0.044715 * torch.pow(x, 3.0)))) | Implementation of the gelu activation function currently in Google Bert repo (identical to OpenAI GPT). Also see https://arxiv.org/abs/1606.08415 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.