id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
163,107 | import json
from copy import deepcopy
from matplotlib.pylab import *
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import os
from .utils import generate_perm_inv
from .utils import json_default_type_checker
from sqlova.args import device
def get_cnt_lx_list(cnt_sc1, cnt_sa1, cnt_wn1, cnt_wc1, cnt_wo1, cnt_wv1):
# all cnt are list here.
cnt_list = []
cnt_lx = 0
for csc, csa, cwn, cwc, cwo, cwv in zip(cnt_sc1, cnt_sa1, cnt_wn1, cnt_wc1, cnt_wo1, cnt_wv1):
if csc and csa and cwn and cwc and cwo and cwv:
cnt_list.append(1)
else:
cnt_list.append(0)
return cnt_list | null |
163,108 | import json
from copy import deepcopy
from matplotlib.pylab import *
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import os
from .utils import generate_perm_inv
from .utils import json_default_type_checker
from sqlova.args import device
def get_cnt_x_list(engine, tb, g_sc, g_sa, g_sql_i, pr_sc, pr_sa, pr_sql_i):
cnt_x1_list = []
g_ans = []
pr_ans = []
for b in range(len(g_sc)):
g_ans1 = engine.execute(tb[b]['id'], g_sc[b], g_sa[b], g_sql_i[b]['conds'])
# print(f'cnt: {cnt}')
# print(f"pr_sql_i: {pr_sql_i[b]['conds']}")
try:
pr_ans1 = engine.execute(tb[b]['id'], pr_sc[b], pr_sa[b], pr_sql_i[b]['conds'])
if bool(pr_ans1): # not empty due to lack of the data from incorretly generated sql
if g_ans1 == pr_ans1:
cnt_x1 = 1
else:
cnt_x1 = 0
else:
cnt_x1 = 0
except:
# type error etc... Execution-guided decoding may be used here.
pr_ans1 = None
cnt_x1 = 0
cnt_x1_list.append(cnt_x1)
g_ans.append(g_ans1)
pr_ans.append(pr_ans1)
return cnt_x1_list, g_ans, pr_ans | null |
163,109 | import json
from copy import deepcopy
from matplotlib.pylab import *
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import os
from .utils import generate_perm_inv
from .utils import json_default_type_checker
from sqlova.args import device
The provided code snippet includes necessary dependencies for implementing the `get_mean_grad` function. Write a Python function `def get_mean_grad(named_parameters)` to solve the following problem:
Get list of mean, std of grad of each parameters Code based on web searched result..
Here is the function:
def get_mean_grad(named_parameters):
"""
Get list of mean, std of grad of each parameters
Code based on web searched result..
"""
mu_list = []
sig_list = []
for name, param in named_parameters:
if param.requires_grad: # and ("bias" not in name) :
# bias makes std = nan as it is of single parameters
magnitude = param.grad.abs()
mu_list.append(magnitude.mean())
if len(magnitude) == 1:
# why nan for single param? Anyway to avoid that..
sig_list.append(torch.tensor(0))
else:
sig_list.append(magnitude.std())
# if "svp_se"
return mu_list, sig_list | Get list of mean, std of grad of each parameters Code based on web searched result.. |
163,110 | import json
from copy import deepcopy
from matplotlib.pylab import *
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import os
from .utils import generate_perm_inv
from .utils import json_default_type_checker
from sqlova.args import device
def generate_sql_i(pr_sc, pr_sa, pr_wn, pr_wc, pr_wo, pr_wv_str, nlu):
pr_sql_i = []
for b, nlu1 in enumerate(nlu):
conds = []
for i_wn in range(pr_wn[b]):
conds1 = []
conds1.append(pr_wc[b][i_wn])
conds1.append(pr_wo[b][i_wn])
# merged_wv11 = merge_wv_t1_eng(pr_wv_str[b][i_wn], nlu[b])
merged_wv11 = ''.join([t.lstrip('##') for t in pr_wv_str[b][i_wn]])
conds1.append(merged_wv11)
conds.append(conds1)
pr_sql_i1 = {'agg': pr_sa[b], 'sel': pr_sc[b], 'conds': conds}
pr_sql_i.append(pr_sql_i1)
return pr_sql_i | null |
163,111 | import json
from copy import deepcopy
from matplotlib.pylab import *
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import os
from .utils import generate_perm_inv
from .utils import json_default_type_checker
from sqlova.args import device
def json_default_type_checker(o):
"""
From https://stackoverflow.com/questions/11942364/typeerror-integer-is-not-json-serializable-when-serializing-json-in-python
"""
if isinstance(o, int64): return int(o)
raise TypeError
def save_for_evaluation(path_save, results, dset_name, ):
path_save_file = os.path.join(path_save, f'results_{dset_name}.jsonl')
with open(path_save_file, 'w', encoding='utf-8') as f:
for i, r1 in enumerate(results):
json_str = json.dumps(r1, ensure_ascii=False, default=json_default_type_checker)
json_str += '\n'
f.writelines(json_str) | null |
163,112 | import json
from copy import deepcopy
from matplotlib.pylab import *
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import os
from .utils import generate_perm_inv
from .utils import json_default_type_checker
from sqlova.args import device
def json_default_type_checker(o):
"""
From https://stackoverflow.com/questions/11942364/typeerror-integer-is-not-json-serializable-when-serializing-json-in-python
"""
if isinstance(o, int64): return int(o)
raise TypeError
def save_for_evaluation_aux(path_save, results, dset_name, ):
path_save_file = os.path.join(path_save, f'results_aux_{dset_name}.jsonl')
with open(path_save_file, 'w', encoding='utf-8') as f:
for i, r1 in enumerate(results):
json_str = json.dumps(r1, ensure_ascii=False, default=json_default_type_checker)
json_str += '\n'
f.writelines(json_str) | null |
163,113 | import json
from copy import deepcopy
from matplotlib.pylab import *
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import os
from .utils import generate_perm_inv
from .utils import json_default_type_checker
from sqlova.args import device
The provided code snippet includes necessary dependencies for implementing the `check_sc_sa_pairs` function. Write a Python function `def check_sc_sa_pairs(tb, pr_sc, pr_sa, )` to solve the following problem:
Check whether pr_sc, pr_sa are allowed pairs or not. agg_ops = ['', 'MAX', 'MIN', 'COUNT', 'SUM', 'AVG']
Here is the function:
def check_sc_sa_pairs(tb, pr_sc, pr_sa, ):
"""
Check whether pr_sc, pr_sa are allowed pairs or not.
agg_ops = ['', 'MAX', 'MIN', 'COUNT', 'SUM', 'AVG']
"""
bS = len(pr_sc)
check = [False] * bS
for b, pr_sc1 in enumerate(pr_sc):
pr_sa1 = pr_sa[b]
hd_types1 = tb[b]['types']
hd_types11 = hd_types1[pr_sc1]
if hd_types11 == 'text':
if pr_sa1 == 0 or pr_sa1 == 3: # ''
check[b] = True
else:
check[b] = False
elif hd_types11 == 'real':
check[b] = True
else:
raise Exception("New TYPE!!")
return check | Check whether pr_sc, pr_sa are allowed pairs or not. agg_ops = ['', 'MAX', 'MIN', 'COUNT', 'SUM', 'AVG'] |
163,114 | import json
from copy import deepcopy
from matplotlib.pylab import *
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import os
from .utils import generate_perm_inv
from .utils import json_default_type_checker
from sqlova.args import device
def remap_sc_idx(idxs, pr_sc_beam):
for b, idxs1 in enumerate(idxs):
for i_beam, idxs11 in enumerate(idxs1):
sc_beam_idx = idxs[b][i_beam][0]
sc_idx = pr_sc_beam[b][sc_beam_idx]
idxs[b][i_beam][0] = sc_idx
return idxs | null |
163,115 | import json
from copy import deepcopy
from matplotlib.pylab import *
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import os
from .utils import generate_perm_inv
from .utils import json_default_type_checker
from sqlova.args import device
def sort_and_generate_pr_w(pr_sql_i):
pr_wc = []
pr_wo = []
pr_wv = []
for b, pr_sql_i1 in enumerate(pr_sql_i):
conds1 = pr_sql_i1["conds"]
pr_wc1 = []
pr_wo1 = []
pr_wv1 = []
# Generate
for i_wn, conds11 in enumerate(conds1):
pr_wc1.append( conds11[0])
pr_wo1.append( conds11[1])
pr_wv1.append( conds11[2])
# sort based on pr_wc1
idx = argsort(pr_wc1)
pr_wc1 = array(pr_wc1)[idx].tolist()
pr_wo1 = array(pr_wo1)[idx].tolist()
pr_wv1 = array(pr_wv1)[idx].tolist()
conds1_sorted = []
for i, idx1 in enumerate(idx):
conds1_sorted.append( conds1[idx1] )
pr_wc.append(pr_wc1)
pr_wo.append(pr_wo1)
pr_wv.append(pr_wv1)
pr_sql_i1['conds'] = conds1_sorted
return pr_wc, pr_wo, pr_wv, pr_sql_i | null |
163,116 | import json
from copy import deepcopy
from matplotlib.pylab import *
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import os
from .utils import generate_perm_inv
from .utils import json_default_type_checker
from sqlova.args import device
def generate_sql_q1(sql_i1, tb1):
"""
sql = {'sel': 5, 'agg': 4, 'conds': [[3, 0, '59']]}
agg_ops = ['', 'max', 'min', 'count', 'sum', 'avg']
cond_ops = ['=', '>', '<', 'OP']
Temporal as it can show only one-time conditioned case.
sql_query: real sql_query
sql_plus_query: More redable sql_query
"PLUS" indicates, it deals with the some of db specific facts like PCODE <-> NAME
"""
agg_ops = ['', 'max', 'min', 'count', 'sum', 'avg']
cond_ops = [">", "<", "==", "!=", "LIKE", "DESC"]
headers = tb1["header"]
# select_header = headers[sql['sel']].lower()
# try:
# select_table = tb1["name"]
# except:
# print(f"No table name while headers are {headers}")
select_table = tb1["id"]
select_agg = agg_ops[sql_i1['agg']]
select_header = headers[sql_i1['sel']]
sql_query_part1 = f'SELECT {select_agg}({select_header}) '
where_num = len(sql_i1['conds'])
if where_num == 0:
sql_query_part2 = f'FROM {select_table}'
# sql_plus_query_part2 = f'FROM {select_table}'
else:
sql_query_part2 = f'FROM {select_table} WHERE'
# sql_plus_query_part2 = f'FROM {select_table_refined} WHERE'
# ----------------------------------------------------------------------------------------------------------
for i in range(where_num):
# check 'OR'
# number_of_sub_conds = len(sql['conds'][i])
where_header_idx, where_op_idx, where_str = sql_i1['conds'][i][0:3]
where_header = headers[where_header_idx]
where_op = cond_ops[where_op_idx]
if i > 0:
sql_query_part2 += ' AND'
# sql_plus_query_part2 += ' AND'
sql_query_part2 += f" {where_header} {where_op} {where_str}"
sql_query = sql_query_part1 + sql_query_part2
# sql_plus_query = sql_plus_query_part1 + sql_plus_query_part2
return sql_query
def generate_sql_q(sql_i, tb):
sql_q = []
for b, sql_i1 in enumerate(sql_i):
tb1 = tb[b]
sql_q1 = generate_sql_q1(sql_i1, tb1)
sql_q.append(sql_q1)
return sql_q | null |
163,117 | import json
from copy import deepcopy
from matplotlib.pylab import *
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import os
from .utils import generate_perm_inv
from .utils import json_default_type_checker
from sqlova.args import device
def get_pnt_idx1(col_pool_type, st_ed):
st, ed = st_ed
if col_pool_type == 'start_tok':
pnt_idx1 = st
elif col_pool_type == 'end_tok':
pnt_idx1 = ed
elif col_pool_type == 'avg':
pnt_idx1 = arange(st, ed, 1)
return pnt_idx1
The provided code snippet includes necessary dependencies for implementing the `gen_g_pnt_idx` function. Write a Python function `def gen_g_pnt_idx(g_wvi, sql_i, i_hds, i_sql_vocab, col_pool_type)` to solve the following problem:
sql_vocab = ( 0.. "sql none", "sql max", "sql min", "sql count", "sql sum", "sql average", ..5 6.. "sql select", "sql where", "sql and", .. 8 9.. "sql equal", "sql greater than", "sql less than", .. 11 12.. "sql start", "sql end" .. 13 )
Here is the function:
def gen_g_pnt_idx(g_wvi, sql_i, i_hds, i_sql_vocab, col_pool_type):
"""
sql_vocab = (
0.. "sql none", "sql max", "sql min", "sql count", "sql sum", "sql average", ..5
6.. "sql select", "sql where", "sql and", .. 8
9.. "sql equal", "sql greater than", "sql less than", .. 11
12.. "sql start", "sql end" .. 13
)
"""
g_pnt_idxs = []
for b, sql_i1 in enumerate(sql_i):
i_sql_vocab1 = i_sql_vocab[b]
i_hds1 = i_hds[b]
g_pnt_idxs1 = []
# start token
pnt_idx1 = get_pnt_idx1(col_pool_type, i_sql_vocab1[-2])
g_pnt_idxs1.append(pnt_idx1)
# select token
pnt_idx1 = get_pnt_idx1(col_pool_type, i_sql_vocab1[6])
g_pnt_idxs1.append(pnt_idx1)
# select agg
idx_agg = sql_i1["agg"]
pnt_idx1 = get_pnt_idx1(col_pool_type, i_sql_vocab1[idx_agg])
g_pnt_idxs1.append(pnt_idx1)
# select column
idx_sc = sql_i1["sel"]
pnt_idx1 = get_pnt_idx1(col_pool_type, i_hds1[idx_sc])
g_pnt_idxs1.append(pnt_idx1)
conds = sql_i1["conds"]
wn = len(conds)
if wn <= 0:
pass
else:
# select where
pnt_idx1 = get_pnt_idx1(col_pool_type, i_sql_vocab1[7])
g_pnt_idxs1.append(pnt_idx1)
for i_wn, conds1 in enumerate(conds):
# where column
idx_wc = conds1[0]
pnt_idx1 = get_pnt_idx1(col_pool_type, i_hds1[idx_wc])
g_pnt_idxs1.append(pnt_idx1)
# where op
idx_wo = conds1[1]
pnt_idx1 = get_pnt_idx1(col_pool_type, i_sql_vocab1[idx_wo + 9])
g_pnt_idxs1.append(pnt_idx1)
# where val
st, ed = g_wvi[b][i_wn]
end_pos_of_sql_vocab = i_sql_vocab1[-1][-1]
g_pnt_idxs1.append(st + 1 + end_pos_of_sql_vocab) # due to inital [CLS] token in BERT-input vector
g_pnt_idxs1.append(ed + 1 + end_pos_of_sql_vocab) # due to inital [CLS] token in BERT-input vector
# and token
if i_wn < wn - 1:
pnt_idx1 = get_pnt_idx1(col_pool_type, i_sql_vocab1[8])
g_pnt_idxs1.append(pnt_idx1)
# end token
pnt_idx1 = get_pnt_idx1(col_pool_type, i_sql_vocab1[-1])
g_pnt_idxs1.append(pnt_idx1)
g_pnt_idxs.append(g_pnt_idxs1)
return g_pnt_idxs | sql_vocab = ( 0.. "sql none", "sql max", "sql min", "sql count", "sql sum", "sql average", ..5 6.. "sql select", "sql where", "sql and", .. 8 9.. "sql equal", "sql greater than", "sql less than", .. 11 12.. "sql start", "sql end" .. 13 ) |
163,118 | import json
from copy import deepcopy
from matplotlib.pylab import *
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import os
from .utils import generate_perm_inv
from .utils import json_default_type_checker
from sqlova.args import device
def pred_pnt_idxs(score, pnt_start_tok, pnt_end_tok):
pr_pnt_idxs = []
for b, score1 in enumerate(score):
# score1 = [T, max_seq_length]
pr_pnt_idxs1 = [pnt_start_tok]
for t, score11 in enumerate(score1):
pnt = score11.argmax().item()
pr_pnt_idxs1.append(pnt)
if pnt == pnt_end_tok:
break
pr_pnt_idxs.append(pr_pnt_idxs1)
return pr_pnt_idxs | null |
163,119 | import json
from copy import deepcopy
from matplotlib.pylab import *
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import os
from .utils import generate_perm_inv
from .utils import json_default_type_checker
from sqlova.args import device
def generate_sql_q1_s2s(pnt_idxs1, tokens1, tb1):
"""
agg_ops = ['', 'max', 'min', 'count', 'sum', 'avg']
cond_ops = ['=', '>', '<', 'OP']
Temporal as it can show only one-time conditioned case.
sql_query: real sql_query
sql_plus_query: More redable sql_query
"PLUS" indicates, it deals with the some of db specific facts like PCODE <-> NAME
"""
sql_query = ""
for t, pnt_idxs11 in enumerate(pnt_idxs1):
tok = tokens1[pnt_idxs11]
sql_query += tok
if t < len(pnt_idxs1)-1:
sql_query += " "
return sql_query
def generate_sql_q_s2s(pnt_idxs, tokens, tb):
sql_q = []
for b, pnt_idxs1 in enumerate(pnt_idxs):
tb1 = tb[b]
sql_q1 = generate_sql_q1_s2s(pnt_idxs1, tokens[b], tb1)
sql_q.append(sql_q1)
return sql_q | null |
163,120 | import json
from copy import deepcopy
from matplotlib.pylab import *
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import os
from .utils import generate_perm_inv
from .utils import json_default_type_checker
from sqlova.args import device
def gen_pnt_i_from_pnt(pnt, i_sql_vocab1, i_nlu1, i_hds1):
# Find where it belong
vg_list = [i_sql_vocab1, [i_nlu1], i_hds1] # as i_nlu has only single st and ed
i_vg = -1
i_vg_sub = -1
for i, vg in enumerate(vg_list):
idx_sub = find_where_pnt_belong(pnt, vg)
if idx_sub > -1:
i_vg = i
i_vg_sub = idx_sub
break
return i_vg, i_vg_sub
def gen_i_vg_from_pnt_idxs(pnt_idxs, i_sql_vocab, i_nlu, i_hds):
i_vg_list = []
i_vg_sub_list = []
for b, pnt_idxs1 in enumerate(pnt_idxs):
# if properly generated,
sql_q1_list = []
i_vg_list1 = [] # index of (sql_vocab, nlu, hds)
i_vg_sub_list1 = [] # index inside of each vocab group
for t, pnt in enumerate(pnt_idxs1):
i_vg, i_vg_sub = gen_pnt_i_from_pnt(pnt, i_sql_vocab[b], i_nlu[b], i_hds[b])
i_vg_list1.append(i_vg)
i_vg_sub_list1.append(i_vg_sub)
# sql_q1 = sql_q1.join(' ')
# sql_q.append(sql_q1)
i_vg_list.append(i_vg_list1)
i_vg_sub_list.append(i_vg_sub_list1)
return i_vg_list, i_vg_sub_list | null |
163,121 | import json
from copy import deepcopy
from matplotlib.pylab import *
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import os
from .utils import generate_perm_inv
from .utils import json_default_type_checker
from sqlova.args import device
def merge_wv_t1_eng(where_str_tokens, NLq):
"""
Almost copied of SQLNet.
The main purpose is pad blank line while combining tokens.
"""
nlq = NLq.lower()
where_str_tokens = [tok.lower() for tok in where_str_tokens]
alphabet = 'abcdefghijklmnopqrstuvwxyz0123456789$'
special = {'-LRB-': '(',
'-RRB-': ')',
'-LSB-': '[',
'-RSB-': ']',
'``': '"',
'\'\'': '"',
}
# '--': '\u2013'} # this generate error for test 5661 case.
ret = ''
double_quote_appear = 0
for raw_w_token in where_str_tokens:
# if '' (empty string) of None, continue
if not raw_w_token:
continue
# Change the special characters
w_token = special.get(raw_w_token, raw_w_token) # maybe necessary for some case?
# check the double quote
if w_token == '"':
double_quote_appear = 1 - double_quote_appear
# Check whether ret is empty. ret is selected where condition.
if len(ret) == 0:
pass
# Check blank character.
elif len(ret) > 0 and ret + ' ' + w_token in nlq:
# Pad ' ' if ret + ' ' is part of nlq.
ret = ret + ' '
elif len(ret) > 0 and ret + w_token in nlq:
pass # already in good form. Later, ret + w_token will performed.
# Below for unnatural question I guess. Is it likely to appear?
elif w_token == '"':
if double_quote_appear:
ret = ret + ' ' # pad blank line between next token when " because in this case, it is of closing apperas
# for the case of opening, no blank line.
elif w_token[0] not in alphabet:
pass # non alphabet one does not pad blank line.
# when previous character is the special case.
elif (ret[-1] not in ['(', '/', '\u2013', '#', '$', '&']) and (ret[-1] != '"' or not double_quote_appear):
ret = ret + ' '
ret = ret + w_token
return ret.strip()
The provided code snippet includes necessary dependencies for implementing the `gen_sql_q_from_i_vg` function. Write a Python function `def gen_sql_q_from_i_vg(tokens, nlu, nlu_t, hds, tt_to_t_idx, pnt_start_tok, pnt_end_tok, pnt_idxs, i_vg_list, i_vg_sub_list)` to solve the following problem:
( "none", "max", "min", "count", "sum", "average", "select", "where", "and", "equal", "greater than", "less than", "start", "end" ),
Here is the function:
def gen_sql_q_from_i_vg(tokens, nlu, nlu_t, hds, tt_to_t_idx, pnt_start_tok, pnt_end_tok, pnt_idxs, i_vg_list, i_vg_sub_list):
"""
(
"none", "max", "min", "count", "sum", "average",
"select", "where", "and",
"equal", "greater than", "less than",
"start", "end"
),
"""
sql_q = []
sql_i = []
for b, nlu_t1 in enumerate(nlu_t):
sql_q1_list = []
sql_i1 = {}
tt_to_t_idx1 = tt_to_t_idx[b]
nlu_st_observed = False
agg_observed = False
wc_obs = False
wo_obs = False
conds = []
for t, i_vg in enumerate(i_vg_list[b]):
i_vg_sub = i_vg_sub_list[b][t]
pnt = pnt_idxs[b][t]
if i_vg == 0:
# sql_vocab
if pnt == pnt_start_tok or pnt == pnt_end_tok:
pass
else:
tok = tokens[b][pnt]
if tok in ["none", "max", "min", "count", "sum", "average"]:
agg_observed = True
if tok == "none":
pass
sql_i1["agg"] = ["none", "max", "min", "count", "sum", "average"].index(tok)
else:
if tok in ["greater", "less", "equal"]:
if tok == 'greater':
tok = '>'
elif tok == 'less':
tok = '<'
elif tok == 'equal':
tok = '='
# gen conds1
if wc_obs:
conds1.append( ['=','>','<'].index(tok) )
wo_obs = True
sql_q1_list.append(tok)
elif i_vg == 1:
# nlu case
if not nlu_st_observed:
idx_nlu_st = pnt
nlu_st_observed = True
else:
# now to wrap up
idx_nlu_ed = pnt
st_wh_idx = tt_to_t_idx1[idx_nlu_st - pnt_end_tok - 2]
ed_wh_idx = tt_to_t_idx1[idx_nlu_ed - pnt_end_tok - 2]
pr_wv_str11 = nlu_t1[st_wh_idx:ed_wh_idx + 1]
merged_wv11 = merge_wv_t1_eng(pr_wv_str11, nlu[b])
sql_q1_list.append(merged_wv11)
nlu_st_observed = False
if wc_obs and wo_obs:
conds1.append(merged_wv11)
conds.append(conds1)
wc_obs = False
wo_obs = False
elif i_vg == 2:
# headers
tok = hds[b][i_vg_sub]
if agg_observed:
sql_q1_list.append(f"({tok})")
sql_i1["sel"] = i_vg_sub
agg_observed = False
else:
wc_obs = True
conds1 = [i_vg_sub]
sql_q1_list.append(tok)
# insert table name between.
sql_i1["conds"] = conds
sql_i.append(sql_i1)
sql_q1 = ' '.join(sql_q1_list)
sql_q.append(sql_q1)
return sql_q, sql_i | ( "none", "max", "min", "count", "sum", "average", "select", "where", "and", "equal", "greater than", "less than", "start", "end" ), |
163,122 | import json
from copy import deepcopy
from matplotlib.pylab import *
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import os
from .utils import generate_perm_inv
from .utils import json_default_type_checker
from sqlova.args import device
def get_cnt_lx_list_s2s(g_pnt_idxs, pr_pnt_idxs):
# all cnt are list here.
cnt_list = []
for b, g_pnt_idxs1 in enumerate(g_pnt_idxs):
pr_pnt_idxs1 = pr_pnt_idxs[b]
if g_pnt_idxs1 == pr_pnt_idxs1:
cnt_list.append(1)
else:
cnt_list.append(0)
return cnt_list | null |
163,123 | import json
from copy import deepcopy
from matplotlib.pylab import *
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import os
from .utils import generate_perm_inv
from .utils import json_default_type_checker
from sqlova.args import device
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
The provided code snippet includes necessary dependencies for implementing the `get_wemb_h_FT_Scalar_1` function. Write a Python function `def get_wemb_h_FT_Scalar_1(i_hds, l_hs, hS, all_encoder_layer, col_pool_type='start_tok')` to solve the following problem:
As if [ [table-1-col-1-tok1, t1-c1-t2, ...], [t1-c2-t1, t1-c2-t2, ...]. ... [t2-c1-t1, ...,] ] # i_hds = [ [ Batch 1 ] [ Batch 2 ] ] # [Batch 1] = [ (col1_st_idx, col1_ed_idx), (col2_st_idx, col2_ed_idx), ...] # i_hds = [[(11, 14), (15, 19), (20, 21), (22, 24), (25, 27), (28, 29)], # [(16, 19), (20, 24), (25, 26), (27, 29), (30, 32), (33, 34)]] pool_type = 'start_tok', 'end_tok', 'avg'
Here is the function:
def get_wemb_h_FT_Scalar_1(i_hds, l_hs, hS, all_encoder_layer, col_pool_type='start_tok'):
"""
As if
[ [table-1-col-1-tok1, t1-c1-t2, ...],
[t1-c2-t1, t1-c2-t2, ...].
...
[t2-c1-t1, ...,]
]
# i_hds = [ [ Batch 1 ] [ Batch 2 ] ]
# [Batch 1] = [ (col1_st_idx, col1_ed_idx), (col2_st_idx, col2_ed_idx), ...]
# i_hds = [[(11, 14), (15, 19), (20, 21), (22, 24), (25, 27), (28, 29)],
# [(16, 19), (20, 24), (25, 26), (27, 29), (30, 32), (33, 34)]]
pool_type = 'start_tok', 'end_tok', 'avg'
"""
bS = len(l_hs)
l_hs_max = max(l_hs)
wemb_h = torch.zeros([bS, l_hs_max, hS]).to(device)
for b, i_hds1 in enumerate(i_hds):
for i_hd, st_ed_pair in enumerate(i_hds1):
st, ed = st_ed_pair
if col_pool_type == 'start_tok':
vec = all_encoder_layer[-1][b, st,:]
elif col_pool_type == 'end_tok':
vec = all_encoder_layer[-1][b, ed, :]
elif col_pool_type == 'avg':
vecs = all_encoder_layer[-1][b, st:ed,:]
vec = vecs.mean(dim=1, keepdim=True)
else:
raise ValueError
wemb_h[b, i_hd, :] = vec
return wemb_h | As if [ [table-1-col-1-tok1, t1-c1-t2, ...], [t1-c2-t1, t1-c2-t2, ...]. ... [t2-c1-t1, ...,] ] # i_hds = [ [ Batch 1 ] [ Batch 2 ] ] # [Batch 1] = [ (col1_st_idx, col1_ed_idx), (col2_st_idx, col2_ed_idx), ...] # i_hds = [[(11, 14), (15, 19), (20, 21), (22, 24), (25, 27), (28, 29)], # [(16, 19), (20, 24), (25, 26), (27, 29), (30, 32), (33, 34)]] pool_type = 'start_tok', 'end_tok', 'avg' |
163,124 | import json
from copy import deepcopy
from matplotlib.pylab import *
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import os
from .utils import generate_perm_inv
from .utils import json_default_type_checker
from sqlova.args import device
def cal_prob_tot(p_select, p_where):
p_tot = []
for b, p_select1 in enumerate(p_select):
p_where1 = p_where[b]
p_tot.append( p_select1 * p_where1 )
return p_tot
def cal_prob_select(p_sc, p_sa):
p_select = []
for b, p_sc1 in enumerate(p_sc):
p1 = 1.0
p1 *= p_sc1
p1 *= p_sa[b]
p_select.append(p1)
return p_select
def cal_prob_where(p_wn, p_wc, p_wo, p_wvi):
p_where = []
for b, p_wn1 in enumerate(p_wn):
p1 = 1.0
p1 *= p_wn1
p_wc1 = p_wc[b]
for i_wn, p_wc11 in enumerate(p_wc1):
p_wo11 = p_wo[b][i_wn]
p_wv11_st, p_wv11_ed = p_wvi[b][i_wn]
p1 *= p_wc11
p1 *= p_wo11
p1 *= p_wv11_st
p1 *= p_wv11_ed
p_where.append(p1)
return p_where
def cal_prob_sc(s_sc, pr_sc):
ps = F.softmax(s_sc, dim=1)
p = []
for b, ps1 in enumerate(ps):
pr_sc1 = pr_sc[b]
p1 = ps1[pr_sc1]
p.append(p1.item())
return p
def cal_prob_sa(s_sa, pr_sa):
ps = F.softmax(s_sa, dim=1)
p = []
for b, ps1 in enumerate(ps):
pr_sa1 = pr_sa[b]
p1 = ps1[pr_sa1]
p.append(p1.item())
return p
def cal_prob_wn(s_wn, pr_wn):
ps = F.softmax(s_wn, dim=1)
p = []
for b, ps1 in enumerate(ps):
pr_wn1 = pr_wn[b]
p1 = ps1[pr_wn1]
p.append(p1.item())
return p
def cal_prob_wc(s_wc, pr_wc):
ps = torch.sigmoid(s_wc)
ps_out = []
for b, pr_wc1 in enumerate(pr_wc):
ps1 = array(ps[b].cpu())
ps_out1 = ps1[pr_wc1]
ps_out.append(list(ps_out1))
return ps_out
def cal_prob_wo(s_wo, pr_wo):
# assume there is always at least single condition.
ps = F.softmax(s_wo, dim=2)
ps_out = []
for b, pr_wo1 in enumerate(pr_wo):
ps_out1 = []
for n, pr_wo11 in enumerate(pr_wo1):
ps11 = ps[b][n]
ps_out1.append( ps11[pr_wo11].item() )
ps_out.append(ps_out1)
return ps_out
def cal_prob_wvi_se(s_wv, pr_wvi):
prob_wv = F.softmax(s_wv, dim=-2).detach().to('cpu').numpy()
p_wv = []
for b, pr_wvi1 in enumerate(pr_wvi):
p_wv1 = []
for i_wn, pr_wvi11 in enumerate(pr_wvi1):
st, ed = pr_wvi11
p_st = prob_wv[b, i_wn, st, 0]
p_ed = prob_wv[b, i_wn, ed, 1]
p_wv1.append([p_st, p_ed])
p_wv.append(p_wv1)
return p_wv
The provided code snippet includes necessary dependencies for implementing the `cal_prob` function. Write a Python function `def cal_prob(s_sc, s_sa, s_wn, s_wc, s_wo, s_wv, pr_sc, pr_sa, pr_wn, pr_wc, pr_wo, pr_wvi)` to solve the following problem:
:param s_sc: [B, l_h] :param s_sa: [B, l_a] # 16 :param s_wn: [B, 5] :param s_wc: [B, l_h] :param s_wo: [B, 4, l_o] # :param s_wv: [B, 4, 22] :return:
Here is the function:
def cal_prob(s_sc, s_sa, s_wn, s_wc, s_wo, s_wv, pr_sc, pr_sa, pr_wn, pr_wc, pr_wo, pr_wvi):
"""
:param s_sc: [B, l_h]
:param s_sa: [B, l_a] # 16
:param s_wn: [B, 5]
:param s_wc: [B, l_h]
:param s_wo: [B, 4, l_o] #
:param s_wv: [B, 4, 22]
:return:
"""
# First get selected index
#
# Predict prob
p_sc = cal_prob_sc(s_sc, pr_sc)
p_sa = cal_prob_sa(s_sa, pr_sa)
p_wn = cal_prob_wn(s_wn, pr_wn)
p_wc = cal_prob_wc(s_wc, pr_wc)
p_wo = cal_prob_wo(s_wo, pr_wo)
p_wvi = cal_prob_wvi_se(s_wv, pr_wvi)
# calculate select-clause probability
p_select = cal_prob_select(p_sc, p_sa)
# calculate where-clause probability
p_where = cal_prob_where(p_wn, p_wc, p_wo, p_wvi)
# calculate total probability
p_tot = cal_prob_tot(p_select, p_where)
return p_tot, p_select, p_where, p_sc, p_sa, p_wn, p_wc, p_wo, p_wvi | :param s_sc: [B, l_h] :param s_sa: [B, l_a] # 16 :param s_wn: [B, 5] :param s_wc: [B, l_h] :param s_wo: [B, 4, l_o] # :param s_wv: [B, 4, 22] :return: |
163,125 | import json
from copy import deepcopy
from matplotlib.pylab import *
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import os
from .utils import generate_perm_inv
from .utils import json_default_type_checker
from sqlova.args import device
The provided code snippet includes necessary dependencies for implementing the `sort_pr_wc` function. Write a Python function `def sort_pr_wc(pr_wc, g_wc)` to solve the following problem:
Input: list pr_wc = [B, n_conds] g_wc = [B, n_conds] Return: list pr_wc_sorted = [B, n_conds]
Here is the function:
def sort_pr_wc(pr_wc, g_wc):
"""
Input: list
pr_wc = [B, n_conds]
g_wc = [B, n_conds]
Return: list
pr_wc_sorted = [B, n_conds]
"""
pr_wc_sorted = []
for b, pr_wc1 in enumerate(pr_wc):
g_wc1 = g_wc[b]
pr_wc1_sorted = []
if set(g_wc1) == set(pr_wc1):
pr_wc1_sorted = deepcopy(g_wc1)
else:
# no sorting when g_wc1 and pr_wc1 are different.
pr_wc1_sorted = deepcopy(pr_wc1)
pr_wc_sorted.append(pr_wc1_sorted)
return pr_wc_sorted | Input: list pr_wc = [B, n_conds] g_wc = [B, n_conds] Return: list pr_wc_sorted = [B, n_conds] |
163,126 | class QAInput:
def qg_input_abstrativeqa(cls, context, question,hint, options=None):
question = question
source_text = f'[TASK] [ABSTRACTIVE] [QUESTION] {question}. [CONTEXT] {context} the answer is: {hint}'
return source_text
def qg_input_boolqa(cls, context, question, hint,options=None):
source_text = f'[TASK] [BOOL] [QUESTION] {question}. [CONTEXT] {context} the answer is: {hint}'
return source_text
def qg_input_extractive_qa(cls, context, question, hint,options=None):
source_text = f'[TASK] [EXTRACTIVE] [QUESTION] {question.lower().capitalize()}. [CONTEXT] {context} the answer is: {hint}'
return source_text
def qg_input_multirc(cls, context, question,hint, options=None):
source_text = f'[TASK] [MultiChoice] [QUESTION] {question} [OPTIONS] {options} [CONTEXT] {context} the answer is: {hint}'
return source_text
def preprocess_proqa_eval(
examples,
question_column: str,
answer_column:str,
hint_column:str,
format_name:str):
questions = examples[question_column]
answers = examples[answer_column]
hints = examples[hint_column]
# single_inputs = []
if format_name=="extractive":
inputs = [QAInput.qg_input_extractive_qa(question.split("\\n")[1], question.split("\\n")[0],hint='') for hint,question in zip(hints,questions)]
if format_name=="abstractive":
inputs = [QAInput.qg_input_abstrativeqa(question.split("\\n")[1], question.split("\\n")[0],hint='') for hint,question in zip(hints,questions)]
if format_name=="multichoice":
if len(questions[0].split("\\n"))==2:
inputs = [QAInput.qg_input_multirc(context = '',question = question.split("\\n")[0], options=question.split("\\n")[1],hint='') for hint,question in zip(hints,questions)]
else:
inputs = [QAInput.qg_input_multirc(context = question.split("\\n")[2],question = question.split("\\n")[0], options=question.split("\\n")[1],hint='') for hint,question in zip(hints,questions)]
if format_name =="bool":
inputs = [QAInput.qg_input_boolqa(context = question.split("\\n")[1],question = question.split("\\n")[0],hint='') for hint,question in zip(hints,questions)]
targets = answers
return inputs,targets,examples[hint_column] | null |
163,127 | class QAInput:
def qg_input_abstrativeqa(cls, context, question,hint, options=None):
question = question
source_text = f'[TASK] [ABSTRACTIVE] [QUESTION] {question}. [CONTEXT] {context} the answer is: {hint}'
return source_text
def qg_input_boolqa(cls, context, question, hint,options=None):
source_text = f'[TASK] [BOOL] [QUESTION] {question}. [CONTEXT] {context} the answer is: {hint}'
return source_text
def qg_input_extractive_qa(cls, context, question, hint,options=None):
source_text = f'[TASK] [EXTRACTIVE] [QUESTION] {question.lower().capitalize()}. [CONTEXT] {context} the answer is: {hint}'
return source_text
def qg_input_multirc(cls, context, question,hint, options=None):
source_text = f'[TASK] [MultiChoice] [QUESTION] {question} [OPTIONS] {options} [CONTEXT] {context} the answer is: {hint}'
return source_text
def preprocess_proqa(
examples,
question_column: str,
answer_column:str,
hint_column:str,
format_name:str):
questions = examples[question_column]
answers = examples[answer_column]
hints = examples[hint_column]
# single_inputs = []
if format_name=="extractive":
inputs = [QAInput.qg_input_extractive_qa(question.split("\\n")[1], question.split("\\n")[0],"") for hint,question in zip(hints,questions)]
if format_name=="abstractive":
inputs = [QAInput.qg_input_abstrativeqa(question.split("\\n")[1], question.split("\\n")[0],"") for hint,question in zip(hints,questions)]
if format_name=="multichoice":
if len(questions[0].split("\\n"))==2:
inputs = [QAInput.qg_input_multirc(context = '',question = question.split("\\n")[0], options=question.split("\\n")[1],hint="") for hint,question in zip(hints,questions)]
else:
inputs = [QAInput.qg_input_multirc(context = question.split("\\n")[2],question = question.split("\\n")[0], options=question.split("\\n")[1],hint="") for hint,question in zip(hints,questions)]
if format_name =="bool":
inputs = [QAInput.qg_input_boolqa(context = question.split("\\n")[1],question = question.split("\\n")[0],hint="") for hint,question in zip(hints,questions)]
targets = answers
return inputs, targets, examples[hint_column] | null |
163,128 | class QAInput:
def qg_input_abstrativeqa(cls, context, question,hint, options=None):
def qg_input_boolqa(cls, context, question, hint,options=None):
def qg_input_extractive_qa(cls, context, question, hint,options=None):
def qg_input_multirc(cls, context, question,hint, options=None):
def preprocess_simple(
examples,
question_column: str,
answer_column:str,
format_name:str):
questions = examples[question_column]
answers = examples[answer_column]
# single_inputs = []
if format_name=="extractive":
inputs = [QAInput.qg_input_extractive_qa(question.split("\\n")[1], question.split("\\n")[0],"") for question in questions]
if format_name=="abstractive":
inputs = [QAInput.qg_input_abstrativeqa(question.split("\\n")[1], question.split("\\n")[0],"") for question in questions]
if format_name=="multichoice":
if len(questions[0].split("\\n"))==2:
inputs = [QAInput.qg_input_multirc(context = '',question = question.split("\\n")[0], options=question.split("\\n")[1],hint="") for question in questions]
else:
inputs = [QAInput.qg_input_multirc(context = question.split("\\n")[2],question = question.split("\\n")[0], options=question.split("\\n")[1],hint="") for question in questions]
if format_name =="bool":
inputs = [QAInput.qg_input_boolqa(context = question.split("\\n")[1],question = question.split("\\n")[0],hint="") for question in questions]
targets = answers
return inputs, targets | null |
163,129 | from transformers import Seq2SeqTrainer, is_torch_tpu_available, EvalPrediction
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union
import nltk
import datasets
import re
import os
import numpy as np
import torch
import random
from pathlib import Path
import time
from transformers.trainer_utils import (
PREFIX_CHECKPOINT_DIR,
BestRun,
EvalLoopOutput,
EvalPrediction,
HPSearchBackend,
HubStrategy,
IntervalStrategy,
PredictionOutput,
ShardedDDPOption,
TrainerMemoryTracker,
TrainOutput,
default_compute_objective,
default_hp_space,
denumpify_detensorize,
get_last_checkpoint,
number_of_arguments,
set_seed,
speed_metrics,
)
import warnings
from transformers.trainer_pt_utils import (
DistributedLengthGroupedSampler,
DistributedSamplerWithLoop,
DistributedTensorGatherer,
IterableDatasetShard,
LabelSmoother,
LengthGroupedSampler,
SequentialDistributedSampler,
ShardSampler,
distributed_broadcast_scalars,
distributed_concat,
find_batch_size,
get_parameter_names,
nested_concat,
nested_detach,
nested_numpify,
nested_truncate,
nested_xla_mesh_reduce,
reissue_pt_warnings,
)
from transformers.file_utils import (
CONFIG_NAME,
WEIGHTS_NAME,
get_full_repo_name,
is_apex_available,
is_datasets_available,
is_in_notebook,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_torch_tpu_available,
)
from transformers.trainer_utils import PredictionOutput,EvalLoopOutput
def fix_buggy_characters(str):
return re.sub("[{}^\\\\`\u2047<]", " ", str)
def replace_punctuation(str):
return str.replace("\"", "").replace("'", "")
def score_string_similarity(str1, str2):
if str1 == str2:
return 3.0 # Better than perfect token match
str1 = fix_buggy_characters(replace_punctuation(str1))
str2 = fix_buggy_characters(replace_punctuation(str2))
if str1 == str2:
return 2.0
if " " in str1 or " " in str2:
str1_split = str1.split(" ")
str2_split = str2.split(" ")
overlap = list(set(str1_split) & set(str2_split))
return len(overlap) / max(len(str1_split), len(str2_split))
else:
if str1 == str2:
return 1.0
else:
return 0.0 | null |
163,139 | import collections
import csv
import json
import logging
import pickle
from typing import Dict
import jsonlines
import torch
from omegaconf import DictConfig
from dpr.utils.data_utils import App
from datasets import load_dataset,load_from_disk
from dpr.utils.data_utils import load_train_dataset
from dpr.data.biencoder_data import (
BiEncoderPassage,
normalize_passage,
normalize_question,
get_dpr_files,
read_nq_tables_jsonl,
split_tables_to_chunks,
)
def get_break():
return load_dataset("break_data","QDMR") | null |
163,140 | import collections
import csv
import json
import logging
import pickle
from typing import Dict
import jsonlines
import torch
from omegaconf import DictConfig
from dpr.utils.data_utils import App
from datasets import load_dataset,load_from_disk
from dpr.utils.data_utils import load_train_dataset
from dpr.data.biencoder_data import (
BiEncoderPassage,
normalize_passage,
normalize_question,
get_dpr_files,
read_nq_tables_jsonl,
split_tables_to_chunks,
)
def get_mtop():
return load_dataset("iohadrubin/mtop",name="mtop") | null |
163,141 | import collections
import csv
import json
import logging
import pickle
from typing import Dict
import jsonlines
import torch
from omegaconf import DictConfig
from dpr.utils.data_utils import App
from datasets import load_dataset,load_from_disk
from dpr.utils.data_utils import load_train_dataset
from dpr.data.biencoder_data import (
BiEncoderPassage,
normalize_passage,
normalize_question,
get_dpr_files,
read_nq_tables_jsonl,
split_tables_to_chunks,
)
def get_mtop():
return load_dataset("iohadrubin/smcalflow",name="smcalflow") | null |
163,142 | import collections
import csv
import json
import logging
import pickle
from typing import Dict
import jsonlines
import torch
from omegaconf import DictConfig
from dpr.utils.data_utils import App
from datasets import load_dataset,load_from_disk
from dpr.utils.data_utils import load_train_dataset
from dpr.data.biencoder_data import (
BiEncoderPassage,
normalize_passage,
normalize_question,
get_dpr_files,
read_nq_tables_jsonl,
split_tables_to_chunks,
)
def get_bool():
# assert False
return load_from_disk("../../../bool-keys.hf")#load_dataset("json", data_files="../../../.jsonl") | null |
163,143 | import collections
import csv
import json
import logging
import pickle
from typing import Dict
import jsonlines
import torch
from omegaconf import DictConfig
from dpr.utils.data_utils import App
from datasets import load_dataset,load_from_disk
from dpr.utils.data_utils import load_train_dataset
from dpr.data.biencoder_data import (
BiEncoderPassage,
normalize_passage,
normalize_question,
get_dpr_files,
read_nq_tables_jsonl,
split_tables_to_chunks,
)
def get_extractive():
# assert False
return load_from_disk("../../../extractive-keys.hf")#load_dataset("json", data_files="../../../.jsonl") | null |
163,144 | import collections
import csv
import json
import logging
import pickle
from typing import Dict
import jsonlines
import torch
from omegaconf import DictConfig
from dpr.utils.data_utils import App
from datasets import load_dataset,load_from_disk
from dpr.utils.data_utils import load_train_dataset
from dpr.data.biencoder_data import (
BiEncoderPassage,
normalize_passage,
normalize_question,
get_dpr_files,
read_nq_tables_jsonl,
split_tables_to_chunks,
)
def get_abstractive():
# assert False
return load_from_disk("../../../abstractive-keys.hf")#load_dataset("json", data_files="../../../.jsonl") | null |
163,145 | import collections
import csv
import json
import logging
import pickle
from typing import Dict
import jsonlines
import torch
from omegaconf import DictConfig
from dpr.utils.data_utils import App
from datasets import load_dataset,load_from_disk
from dpr.utils.data_utils import load_train_dataset
from dpr.data.biencoder_data import (
BiEncoderPassage,
normalize_passage,
normalize_question,
get_dpr_files,
read_nq_tables_jsonl,
split_tables_to_chunks,
)
def get_abstractive():
# assert False
return load_from_disk("../../../multichoice-keys.hf")#load_dataset("json", data_files="../../../.jsonl") | null |
163,146 | import collections
import csv
import json
import logging
import pickle
from typing import Dict
import jsonlines
import torch
from omegaconf import DictConfig
from dpr.utils.data_utils import App
from datasets import load_dataset,load_from_disk
from dpr.utils.data_utils import load_train_dataset
from dpr.data.biencoder_data import (
BiEncoderPassage,
normalize_passage,
normalize_question,
get_dpr_files,
read_nq_tables_jsonl,
split_tables_to_chunks,
)
def get_extractivequery():
# assert False
return load_from_disk("../../../extractive-querystest.hf")#load_dataset("json", data_files="../../../.jsonl") | null |
163,147 | import collections
import csv
import json
import logging
import pickle
from typing import Dict
import jsonlines
import torch
from omegaconf import DictConfig
from dpr.utils.data_utils import App
from datasets import load_dataset,load_from_disk
from dpr.utils.data_utils import load_train_dataset
from dpr.data.biencoder_data import (
BiEncoderPassage,
normalize_passage,
normalize_question,
get_dpr_files,
read_nq_tables_jsonl,
split_tables_to_chunks,
)
def get_boolquery():
# assert False
return load_from_disk("../../../bool-querystest.hf")#load_dataset("json", data_files="../../../.jsonl") | null |
163,148 | import collections
import csv
import json
import logging
import pickle
from typing import Dict
import jsonlines
import torch
from omegaconf import DictConfig
from dpr.utils.data_utils import App
from datasets import load_dataset,load_from_disk
from dpr.utils.data_utils import load_train_dataset
from dpr.data.biencoder_data import (
BiEncoderPassage,
normalize_passage,
normalize_question,
get_dpr_files,
read_nq_tables_jsonl,
split_tables_to_chunks,
)
def get_abstractivequery():
# assert False
return load_from_disk("../../../abstractive-querystest.hf")#load_dataset("json", data_files="../../../.jsonl") | null |
163,149 | import collections
import csv
import json
import logging
import pickle
from typing import Dict
import jsonlines
import torch
from omegaconf import DictConfig
from dpr.utils.data_utils import App
from datasets import load_dataset,load_from_disk
from dpr.utils.data_utils import load_train_dataset
from dpr.data.biencoder_data import (
BiEncoderPassage,
normalize_passage,
normalize_question,
get_dpr_files,
read_nq_tables_jsonl,
split_tables_to_chunks,
)
def get_abstractivequery():
# assert False
return load_from_disk("../../../multichoice-querystest.hf")#load_dataset("json", | null |
163,150 | import collections
import csv
import json
import logging
import pickle
from typing import Dict
import jsonlines
import torch
from omegaconf import DictConfig
from dpr.utils.data_utils import App
from datasets import load_dataset,load_from_disk
from dpr.utils.data_utils import load_train_dataset
from dpr.data.biencoder_data import (
BiEncoderPassage,
normalize_passage,
normalize_question,
get_dpr_files,
read_nq_tables_jsonl,
split_tables_to_chunks,
)
def get_break_question(entry):
if "question" in entry:
question = entry['question']
else:
question = entry['question_text']
return question | null |
163,151 | import collections
import csv
import json
import logging
import pickle
from typing import Dict
import jsonlines
import torch
from omegaconf import DictConfig
from dpr.utils.data_utils import App
from datasets import load_dataset,load_from_disk
from dpr.utils.data_utils import load_train_dataset
from dpr.data.biencoder_data import (
BiEncoderPassage,
normalize_passage,
normalize_question,
get_dpr_files,
read_nq_tables_jsonl,
split_tables_to_chunks,
)
def reformat(text):
def get_break_question_decomp(entry):
if "question" in entry:
question = entry['question']
else:
question = entry['question_text']
return f"{question}\t{reformat(entry['decomposition'])}" | null |
163,152 | import collections
import csv
import json
import logging
import pickle
from typing import Dict
import jsonlines
import torch
from omegaconf import DictConfig
from dpr.utils.data_utils import App
from datasets import load_dataset,load_from_disk
from dpr.utils.data_utils import load_train_dataset
from dpr.data.biencoder_data import (
BiEncoderPassage,
normalize_passage,
normalize_question,
get_dpr_files,
read_nq_tables_jsonl,
split_tables_to_chunks,
)
def reformat(text):
return " ".join([f"{i+1}#) {x.strip()}" for i,x in enumerate(text.split(";"))])
def get_break_decomp(entry):
return reformat(entry['decomposition']) | null |
163,153 | import collections
import csv
import json
import logging
import pickle
from typing import Dict
import jsonlines
import torch
from omegaconf import DictConfig
from dpr.utils.data_utils import App
from datasets import load_dataset,load_from_disk
from dpr.utils.data_utils import load_train_dataset
from dpr.data.biencoder_data import (
BiEncoderPassage,
normalize_passage,
normalize_question,
get_dpr_files,
read_nq_tables_jsonl,
split_tables_to_chunks,
)
def get_mtop_question(entry):
return entry['question'] | null |
163,154 | import collections
import csv
import json
import logging
import pickle
from typing import Dict
import jsonlines
import torch
from omegaconf import DictConfig
from dpr.utils.data_utils import App
from datasets import load_dataset,load_from_disk
from dpr.utils.data_utils import load_train_dataset
from dpr.data.biencoder_data import (
BiEncoderPassage,
normalize_passage,
normalize_question,
get_dpr_files,
read_nq_tables_jsonl,
split_tables_to_chunks,
)
def get_mtop_question_decomp(entry):
return f"{entry['question']}\t{entry['logical_form']}" | null |
163,155 | import collections
import csv
import json
import logging
import pickle
from typing import Dict
import jsonlines
import torch
from omegaconf import DictConfig
from dpr.utils.data_utils import App
from datasets import load_dataset,load_from_disk
from dpr.utils.data_utils import load_train_dataset
from dpr.data.biencoder_data import (
BiEncoderPassage,
normalize_passage,
normalize_question,
get_dpr_files,
read_nq_tables_jsonl,
split_tables_to_chunks,
)
def get_mtop_decomp(entry):
return entry['logical_form'] | null |
163,156 | import collections
import csv
import json
import logging
import pickle
from typing import Dict
import jsonlines
import torch
from omegaconf import DictConfig
from dpr.utils.data_utils import App
from datasets import load_dataset,load_from_disk
from dpr.utils.data_utils import load_train_dataset
from dpr.data.biencoder_data import (
BiEncoderPassage,
normalize_passage,
normalize_question,
get_dpr_files,
read_nq_tables_jsonl,
split_tables_to_chunks,
)
def get_smcalflow_question(entry):
return entry['user_utterance'] | null |
163,157 | import collections
import csv
import json
import logging
import pickle
from typing import Dict
import jsonlines
import torch
from omegaconf import DictConfig
from dpr.utils.data_utils import App
from datasets import load_dataset,load_from_disk
from dpr.utils.data_utils import load_train_dataset
from dpr.data.biencoder_data import (
BiEncoderPassage,
normalize_passage,
normalize_question,
get_dpr_files,
read_nq_tables_jsonl,
split_tables_to_chunks,
)
def get_smcalflow_question_decomp(entry):
return f"{entry['user_utterance']}\t{entry['lispress']}" | null |
163,158 | import collections
import csv
import json
import logging
import pickle
from typing import Dict
import jsonlines
import torch
from omegaconf import DictConfig
from dpr.utils.data_utils import App
from datasets import load_dataset,load_from_disk
from dpr.utils.data_utils import load_train_dataset
from dpr.data.biencoder_data import (
BiEncoderPassage,
normalize_passage,
normalize_question,
get_dpr_files,
read_nq_tables_jsonl,
split_tables_to_chunks,
)
def get_smcalflow_decomp(entry):
return entry['lispress'] | null |
163,159 | import collections
import csv
import json
import logging
import pickle
from typing import Dict
import jsonlines
import torch
from omegaconf import DictConfig
from dpr.utils.data_utils import App
from datasets import load_dataset,load_from_disk
from dpr.utils.data_utils import load_train_dataset
from dpr.data.biencoder_data import (
BiEncoderPassage,
normalize_passage,
normalize_question,
get_dpr_files,
read_nq_tables_jsonl,
split_tables_to_chunks,
)
def get_boolqa(entry):
line = entry["input"]
a = entry["output"]
questions = line.split("\\n")[0]
if line.split("\\n")[-1]!=line.split("\\n")[0]:
contexts = line.split("\\n")[-1]
else:
contexts = ""
q,c = questions, contexts
if c=="":
demo = a+' \n '+q
else:
demo = a+' \n '+q+" \\n "+c
return demo | null |
163,160 | import collections
import csv
import json
import logging
import pickle
from typing import Dict
import jsonlines
import torch
from omegaconf import DictConfig
from dpr.utils.data_utils import App
from datasets import load_dataset,load_from_disk
from dpr.utils.data_utils import load_train_dataset
from dpr.data.biencoder_data import (
BiEncoderPassage,
normalize_passage,
normalize_question,
get_dpr_files,
read_nq_tables_jsonl,
split_tables_to_chunks,
)
def get_boolq(entry):
line = entry["input"]
questions = line.split("\\n")[0]
if line.split("\\n")[-1]!=line.split("\\n")[0]:
contexts = line.split("\\n")[-1]
else:
contexts = ""
q,c = questions, contexts
if c=="":
demo = q
else:
demo = q+" \\n "+c
return demo | null |
163,161 | import collections
import csv
import json
import logging
import pickle
from typing import Dict
import jsonlines
import torch
from omegaconf import DictConfig
from dpr.utils.data_utils import App
from datasets import load_dataset,load_from_disk
from dpr.utils.data_utils import load_train_dataset
from dpr.data.biencoder_data import (
BiEncoderPassage,
normalize_passage,
normalize_question,
get_dpr_files,
read_nq_tables_jsonl,
split_tables_to_chunks,
)
def get_extractiveqa(entry):
line = entry["input"]
a = entry["output"]
questions = line.split("\\n")[0]
if line.split("\\n")[-1]!=line.split("\\n")[0]:
contexts = line.split("\\n")[-1]
else:
contexts = ""
q,c = questions, contexts
if c=="":
demo = a+' \n '+q
else:
demo = a+' \n '+q+" \\n "+c
return demo | null |
163,162 | import collections
import csv
import json
import logging
import pickle
from typing import Dict
import jsonlines
import torch
from omegaconf import DictConfig
from dpr.utils.data_utils import App
from datasets import load_dataset,load_from_disk
from dpr.utils.data_utils import load_train_dataset
from dpr.data.biencoder_data import (
BiEncoderPassage,
normalize_passage,
normalize_question,
get_dpr_files,
read_nq_tables_jsonl,
split_tables_to_chunks,
)
def get_extractiveq(entry):
line = entry["input"]
questions = line.split("\\n")[0]
if line.split("\\n")[-1]!=line.split("\\n")[0]:
contexts = line.split("\\n")[-1]
else:
contexts = ""
q,c = questions, contexts
if c=="":
demo = q
else:
demo = q+" \\n "+c
return demo | null |
163,163 | import collections
import csv
import json
import logging
import pickle
from typing import Dict
import jsonlines
import torch
from omegaconf import DictConfig
from dpr.utils.data_utils import App
from datasets import load_dataset,load_from_disk
from dpr.utils.data_utils import load_train_dataset
from dpr.data.biencoder_data import (
BiEncoderPassage,
normalize_passage,
normalize_question,
get_dpr_files,
read_nq_tables_jsonl,
split_tables_to_chunks,
)
def get_abstractiveqa(entry):
line = entry["input"]
a = entry["output"]
questions = line.split("\\n")[0]
if line.split("\\n")[-1]!=line.split("\\n")[0]:
contexts = line.split("\\n")[-1]
else:
contexts = ""
q,c = questions, contexts
if c=="":
demo = a+' \n '+q
else:
demo = a+' \n '+q+" \\n "+c
return demo | null |
163,164 | import collections
import csv
import json
import logging
import pickle
from typing import Dict
import jsonlines
import torch
from omegaconf import DictConfig
from dpr.utils.data_utils import App
from datasets import load_dataset,load_from_disk
from dpr.utils.data_utils import load_train_dataset
from dpr.data.biencoder_data import (
BiEncoderPassage,
normalize_passage,
normalize_question,
get_dpr_files,
read_nq_tables_jsonl,
split_tables_to_chunks,
)
def get_abstractiveq(entry):
line = entry["input"]
questions = line.split("\\n")[0]
if line.split("\\n")[-1]!=line.split("\\n")[0]:
contexts = line.split("\\n")[-1]
else:
contexts = ""
q,c = questions, contexts
if c=="":
demo = q
else:
demo = q+" \\n "+c
return demo | null |
163,165 | import collections
import csv
import json
import logging
import pickle
from typing import Dict
import jsonlines
import torch
from omegaconf import DictConfig
from dpr.utils.data_utils import App
from datasets import load_dataset,load_from_disk
from dpr.utils.data_utils import load_train_dataset
from dpr.data.biencoder_data import (
BiEncoderPassage,
normalize_passage,
normalize_question,
get_dpr_files,
read_nq_tables_jsonl,
split_tables_to_chunks,
)
def get_multichoiceqa(entry):
line = entry["input"]
a = entry["output"]
try:
options_ = line.split("\\n")[1]
except:
print(line)
assert False
options = options_
questions = line.split("\\n")[0]
if line.split("\\n")[-1]!=line.split("\\n")[1]:
contexts = line.split("\\n")[-1]
else:
contexts = ""
q,c,o = questions, contexts, options
if c=="":
demo =a+" \n "+q+" \\n "+o
else:
demo =a+" \n "+q+" \\n "+o+" \\n "+c
return demo | null |
163,166 | import collections
import csv
import json
import logging
import pickle
from typing import Dict
import jsonlines
import torch
from omegaconf import DictConfig
from dpr.utils.data_utils import App
from datasets import load_dataset,load_from_disk
from dpr.utils.data_utils import load_train_dataset
from dpr.data.biencoder_data import (
BiEncoderPassage,
normalize_passage,
normalize_question,
get_dpr_files,
read_nq_tables_jsonl,
split_tables_to_chunks,
)
def get_multichoiceq(entry):
line = entry["input"]
try:
options_ = line.split("\\n")[1]
except:
print(line)
assert False
options = options_
questions = line.split("\\n")[0]
if line.split("\\n")[-1]!=line.split("\\n")[1]:
contexts = line.split("\\n")[-1]
else:
contexts = ""
q,c,o = questions, contexts, options
if c=="":
demo =q+" \\n "+o
else:
demo =q+" \\n "+o+" \\n "+c
return demo | null |
163,167 | import collections
import logging
import string
import unicodedata
from multiprocessing import Pool as ProcessPool
import regex as re
from functools import partial
from typing import Tuple, List, Dict
from dpr.data.retriever_data import TableChunk
from dpr.utils.tokenizers import SimpleTokenizer
logger = logging.getLogger(__name__)
QAMatchStats = collections.namedtuple(
"QAMatchStats", ["top_k_hits", "questions_doc_hits"]
)
def check_answer(questions_answers_docs, tokenizer, match_type) -> List[bool]:
"""Search through all the top docs to see if they have any of the answers."""
answers, (doc_ids, doc_scores) = questions_answers_docs
global dpr_all_documents
hits = []
for i, doc_id in enumerate(doc_ids):
doc = dpr_all_documents[doc_id]
text = doc[0]
answer_found = False
if text is None: # cannot find the document for some reason
logger.warning("no doc in db")
hits.append(False)
continue
if has_answer(answers, text, tokenizer, match_type):
answer_found = True
hits.append(answer_found)
return hits
class SimpleTokenizer(Tokenizer):
ALPHA_NUM = r'[\p{L}\p{N}\p{M}]+'
NON_WS = r'[^\p{Z}\p{C}]'
def __init__(self, **kwargs):
"""
Args:
annotators: None or empty set (only tokenizes).
"""
self._regexp = regex.compile(
'(%s)|(%s)' % (self.ALPHA_NUM, self.NON_WS),
flags=regex.IGNORECASE + regex.UNICODE + regex.MULTILINE
)
if len(kwargs.get('annotators', {})) > 0:
logger.warning('%s only tokenizes! Skipping annotators: %s' %
(type(self).__name__, kwargs.get('annotators')))
self.annotators = set()
def tokenize(self, text):
data = []
matches = [m for m in self._regexp.finditer(text)]
for i in range(len(matches)):
# Get text
token = matches[i].group()
# Get whitespace
span = matches[i].span()
start_ws = span[0]
if i + 1 < len(matches):
end_ws = matches[i + 1].span()[0]
else:
end_ws = span[1]
# Format data
data.append((
token,
text[start_ws: end_ws],
span,
))
return Tokens(data, self.annotators)
The provided code snippet includes necessary dependencies for implementing the `calculate_matches` function. Write a Python function `def calculate_matches( all_docs: Dict[object, Tuple[str, str]], answers: List[List[str]], closest_docs: List[Tuple[List[object], List[float]]], workers_num: int, match_type: str, ) -> QAMatchStats` to solve the following problem:
Evaluates answers presence in the set of documents. This function is supposed to be used with a large collection of documents and results. It internally forks multiple sub-processes for evaluation and then merges results :param all_docs: dictionary of the entire documents database. doc_id -> (doc_text, title) :param answers: list of answers's list. One list per question :param closest_docs: document ids of the top results along with their scores :param workers_num: amount of parallel threads to process data :param match_type: type of answer matching. Refer to has_answer code for available options :return: matching information tuple. top_k_hits - a list where the index is the amount of top documents retrieved and the value is the total amount of valid matches across an entire dataset. questions_doc_hits - more detailed info with answer matches for every question and every retrieved document
Here is the function:
def calculate_matches(
all_docs: Dict[object, Tuple[str, str]],
answers: List[List[str]],
closest_docs: List[Tuple[List[object], List[float]]],
workers_num: int,
match_type: str,
) -> QAMatchStats:
"""
Evaluates answers presence in the set of documents. This function is supposed to be used with a large collection of
documents and results. It internally forks multiple sub-processes for evaluation and then merges results
:param all_docs: dictionary of the entire documents database. doc_id -> (doc_text, title)
:param answers: list of answers's list. One list per question
:param closest_docs: document ids of the top results along with their scores
:param workers_num: amount of parallel threads to process data
:param match_type: type of answer matching. Refer to has_answer code for available options
:return: matching information tuple.
top_k_hits - a list where the index is the amount of top documents retrieved and the value is the total amount of
valid matches across an entire dataset.
questions_doc_hits - more detailed info with answer matches for every question and every retrieved document
"""
global dpr_all_documents
dpr_all_documents = all_docs
logger.info("dpr_all_documents size %d", len(dpr_all_documents))
tok_opts = {}
tokenizer = SimpleTokenizer(**tok_opts)
processes = ProcessPool(processes=workers_num)
logger.info("Matching answers in top docs...")
get_score_partial = partial(
check_answer, match_type=match_type, tokenizer=tokenizer
)
questions_answers_docs = zip(answers, closest_docs)
scores = processes.map(get_score_partial, questions_answers_docs)
logger.info("Per question validation results len=%d", len(scores))
n_docs = len(closest_docs[0][0])
top_k_hits = [0] * n_docs
for question_hits in scores:
best_hit = next((i for i, x in enumerate(question_hits) if x), None)
if best_hit is not None:
top_k_hits[best_hit:] = [v + 1 for v in top_k_hits[best_hit:]]
return QAMatchStats(top_k_hits, scores) | Evaluates answers presence in the set of documents. This function is supposed to be used with a large collection of documents and results. It internally forks multiple sub-processes for evaluation and then merges results :param all_docs: dictionary of the entire documents database. doc_id -> (doc_text, title) :param answers: list of answers's list. One list per question :param closest_docs: document ids of the top results along with their scores :param workers_num: amount of parallel threads to process data :param match_type: type of answer matching. Refer to has_answer code for available options :return: matching information tuple. top_k_hits - a list where the index is the amount of top documents retrieved and the value is the total amount of valid matches across an entire dataset. questions_doc_hits - more detailed info with answer matches for every question and every retrieved document |
163,168 | import collections
import logging
import string
import unicodedata
from multiprocessing import Pool as ProcessPool
import regex as re
from functools import partial
from typing import Tuple, List, Dict
from dpr.data.retriever_data import TableChunk
from dpr.utils.tokenizers import SimpleTokenizer
def _normalize_answer(s):
def remove_articles(text):
return re.sub(r"\b(a|an|the)\b", " ", text)
def white_space_fix(text):
return " ".join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def exact_match_score(prediction, ground_truth):
return _normalize_answer(prediction) == _normalize_answer(ground_truth) | null |
163,169 | import collections
import logging
import string
import unicodedata
from multiprocessing import Pool as ProcessPool
import regex as re
from functools import partial
from typing import Tuple, List, Dict
from dpr.data.retriever_data import TableChunk
from dpr.utils.tokenizers import SimpleTokenizer
logger = logging.getLogger(__name__)
QATableMatchStats = collections.namedtuple(
"QAMatchStats", ["top_k_chunk_hits", "top_k_table_hits", "questions_doc_hits"]
)
TableChunk = collections.namedtuple("TableChunk", ["text", "title", "table_id"])
class SimpleTokenizer(Tokenizer):
ALPHA_NUM = r'[\p{L}\p{N}\p{M}]+'
NON_WS = r'[^\p{Z}\p{C}]'
def __init__(self, **kwargs):
"""
Args:
annotators: None or empty set (only tokenizes).
"""
self._regexp = regex.compile(
'(%s)|(%s)' % (self.ALPHA_NUM, self.NON_WS),
flags=regex.IGNORECASE + regex.UNICODE + regex.MULTILINE
)
if len(kwargs.get('annotators', {})) > 0:
logger.warning('%s only tokenizes! Skipping annotators: %s' %
(type(self).__name__, kwargs.get('annotators')))
self.annotators = set()
def tokenize(self, text):
data = []
matches = [m for m in self._regexp.finditer(text)]
for i in range(len(matches)):
# Get text
token = matches[i].group()
# Get whitespace
span = matches[i].span()
start_ws = span[0]
if i + 1 < len(matches):
end_ws = matches[i + 1].span()[0]
else:
end_ws = span[1]
# Format data
data.append((
token,
text[start_ws: end_ws],
span,
))
return Tokens(data, self.annotators)
def calculate_chunked_matches(
all_docs: Dict[object, TableChunk],
answers: List[List[str]],
closest_docs: List[Tuple[List[object], List[float]]],
workers_num: int,
match_type: str,
) -> QATableMatchStats:
global dpr_all_documents
dpr_all_documents = all_docs
global dpr_all_tables
dpr_all_tables = {}
for key, table_chunk in all_docs.items():
table_str, title, table_id = table_chunk
table_chunks = dpr_all_tables.get(table_id, [])
table_chunks.append((table_str, title))
dpr_all_tables[table_id] = table_chunks
tok_opts = {}
tokenizer = SimpleTokenizer(**tok_opts)
processes = ProcessPool(processes=workers_num)
logger.info("Matching answers in top docs...")
get_score_partial = partial(
check_chunked_docs_answer, match_type=match_type, tokenizer=tokenizer
)
questions_answers_docs = zip(answers, closest_docs)
scores = processes.map(get_score_partial, questions_answers_docs)
logger.info("Per question validation results len=%d", len(scores))
n_docs = len(closest_docs[0][0])
top_k_hits = [0] * n_docs
top_k_orig_hits = [0] * n_docs
for s in scores:
question_hits, question_orig_doc_hits = s
best_hit = next((i for i, x in enumerate(question_hits) if x), None)
if best_hit is not None:
top_k_hits[best_hit:] = [v + 1 for v in top_k_hits[best_hit:]]
best_hit = next((i for i, x in enumerate(question_orig_doc_hits) if x), None)
if best_hit is not None:
top_k_orig_hits[best_hit:] = [v + 1 for v in top_k_orig_hits[best_hit:]]
return QATableMatchStats(top_k_hits, top_k_orig_hits, scores) | null |
163,170 | import collections
import csv
import glob
import logging
import os
import random
from typing import Dict, List, Tuple
from datasets import load_dataset,load_from_disk
from dpr.utils.data_utils import load_train_dataset
import jsonlines
import numpy as np
import torch
from omegaconf import DictConfig
from torch import Tensor as T
import random
from dpr.utils.data_utils import read_data_from_json_files, Tensorizer,App
def get_dpr_files(source_name) -> List[str]:
if os.path.exists(source_name) or glob.glob(source_name):
return glob.glob(source_name)
else:
# try to use data downloader
from dpr.data.download_data import download
return download(source_name) | null |
163,171 | import collections
import csv
import glob
import logging
import os
import random
from typing import Dict, List, Tuple
from datasets import load_dataset,load_from_disk
from dpr.utils.data_utils import load_train_dataset
import jsonlines
import numpy as np
import torch
from omegaconf import DictConfig
from torch import Tensor as T
import random
from dpr.utils.data_utils import read_data_from_json_files, Tensorizer,App
def get_break_question(entry):
if "question" in entry:
question = entry['question']
else:
question = entry['question_text']
return question | null |
163,172 | import collections
import csv
import glob
import logging
import os
import random
from typing import Dict, List, Tuple
from datasets import load_dataset,load_from_disk
from dpr.utils.data_utils import load_train_dataset
import jsonlines
import numpy as np
import torch
from omegaconf import DictConfig
from torch import Tensor as T
import random
from dpr.utils.data_utils import read_data_from_json_files, Tensorizer,App
def reformat(text):
return " ".join([f"{i+1}#) {x.strip()}" for i,x in enumerate(text.split(";"))])
def get_break_question_decomp(entry):
if "question" in entry:
question = entry['question']
else:
question = entry['question_text']
return f"{question}\t{reformat(entry['decomposition'])}" | null |
163,173 | import collections
import csv
import glob
import logging
import os
import random
from typing import Dict, List, Tuple
from datasets import load_dataset,load_from_disk
from dpr.utils.data_utils import load_train_dataset
import jsonlines
import numpy as np
import torch
from omegaconf import DictConfig
from torch import Tensor as T
import random
from dpr.utils.data_utils import read_data_from_json_files, Tensorizer,App
def get_bool(entry):
line = entry["input"]
a = entry["output"]
questions = line.split("\\n")[0]
if line.split("\\n")[-1]!=line.split("\\n")[0]:
contexts = line.split("\\n")[-1]
else:
contexts = ""
q,c = questions, contexts
if c=="":
demo = a+' \n '+q
else:
demo = a+' \n '+q+" \\n "+c
return demo | null |
163,174 | import collections
import csv
import glob
import logging
import os
import random
from typing import Dict, List, Tuple
from datasets import load_dataset,load_from_disk
from dpr.utils.data_utils import load_train_dataset
import jsonlines
import numpy as np
import torch
from omegaconf import DictConfig
from torch import Tensor as T
import random
from dpr.utils.data_utils import read_data_from_json_files, Tensorizer,App
def get_extractive(entry):
line = entry["input"]
a = entry["output"]
questions = line.split("\\n")[0]
if line.split("\\n")[-1]!=line.split("\\n")[0]:
contexts = line.split("\\n")[-1]
else:
contexts = ""
q,c = questions, contexts
if c=="":
demo = a+' \n '+q
else:
demo = a+' \n '+q+" \\n "+c
return demo | null |
163,175 | import collections
import csv
import glob
import logging
import os
import random
from typing import Dict, List, Tuple
from datasets import load_dataset,load_from_disk
from dpr.utils.data_utils import load_train_dataset
import jsonlines
import numpy as np
import torch
from omegaconf import DictConfig
from torch import Tensor as T
import random
from dpr.utils.data_utils import read_data_from_json_files, Tensorizer,App
def get_abstractive(entry):
line = entry["input"]
a = entry["output"]
questions = line.split("\\n")[0]
if line.split("\\n")[-1]!=line.split("\\n")[0]:
contexts = line.split("\\n")[-1]
else:
contexts = ""
q,c = questions, contexts
if c=="":
demo = a+' \n '+q
else:
demo = a+' \n '+q+" \\n "+c
return demo | null |
163,176 | import collections
import csv
import glob
import logging
import os
import random
from typing import Dict, List, Tuple
from datasets import load_dataset,load_from_disk
from dpr.utils.data_utils import load_train_dataset
import jsonlines
import numpy as np
import torch
from omegaconf import DictConfig
from torch import Tensor as T
import random
from dpr.utils.data_utils import read_data_from_json_files, Tensorizer,App
def get_multichoice(entry):
line = entry["input"]
a = entry["output"]
try:
options_ = line.split("\\n")[1]
except:
print(line)
assert False
options = options_
questions = line.split("\\n")[0]
if line.split("\\n")[-1]!=line.split("\\n")[1]:
contexts = line.split("\\n")[-1]
else:
contexts = ""
q,c,o = questions, contexts, options
if c=="":
demo =a+" \n "+q+" \\n "+o
else:
demo =a+" \n "+q+" \\n "+o+" \\n "+c
return demo | null |
163,177 | import collections
import csv
import glob
import logging
import os
import random
from typing import Dict, List, Tuple
from datasets import load_dataset,load_from_disk
from dpr.utils.data_utils import load_train_dataset
import jsonlines
import numpy as np
import torch
from omegaconf import DictConfig
from torch import Tensor as T
import random
from dpr.utils.data_utils import read_data_from_json_files, Tensorizer,App
def reformat(text):
return " ".join([f"{i+1}#) {x.strip()}" for i,x in enumerate(text.split(";"))])
def get_break_decomp(entry):
return reformat(entry['decomposition']) | null |
163,178 | import collections
import csv
import glob
import logging
import os
import random
from typing import Dict, List, Tuple
from datasets import load_dataset,load_from_disk
from dpr.utils.data_utils import load_train_dataset
import jsonlines
import numpy as np
import torch
from omegaconf import DictConfig
from torch import Tensor as T
import random
from dpr.utils.data_utils import read_data_from_json_files, Tensorizer,App
def get_mtop_question(entry):
return entry['question'] | null |
163,179 | import collections
import csv
import glob
import logging
import os
import random
from typing import Dict, List, Tuple
from datasets import load_dataset,load_from_disk
from dpr.utils.data_utils import load_train_dataset
import jsonlines
import numpy as np
import torch
from omegaconf import DictConfig
from torch import Tensor as T
import random
from dpr.utils.data_utils import read_data_from_json_files, Tensorizer,App
def get_mtop_question_decomp(entry):
return f"{entry['question']}\t{entry['logical_form']}" | null |
163,180 | import collections
import csv
import glob
import logging
import os
import random
from typing import Dict, List, Tuple
from datasets import load_dataset,load_from_disk
from dpr.utils.data_utils import load_train_dataset
import jsonlines
import numpy as np
import torch
from omegaconf import DictConfig
from torch import Tensor as T
import random
from dpr.utils.data_utils import read_data_from_json_files, Tensorizer,App
def get_mtop_decomp(entry):
return entry['logical_form'] | null |
163,181 | import collections
import csv
import glob
import logging
import os
import random
from typing import Dict, List, Tuple
from datasets import load_dataset,load_from_disk
from dpr.utils.data_utils import load_train_dataset
import jsonlines
import numpy as np
import torch
from omegaconf import DictConfig
from torch import Tensor as T
import random
from dpr.utils.data_utils import read_data_from_json_files, Tensorizer,App
def get_smcalflow_question(entry):
return entry['user_utterance'] | null |
163,182 | import collections
import csv
import glob
import logging
import os
import random
from typing import Dict, List, Tuple
from datasets import load_dataset,load_from_disk
from dpr.utils.data_utils import load_train_dataset
import jsonlines
import numpy as np
import torch
from omegaconf import DictConfig
from torch import Tensor as T
import random
from dpr.utils.data_utils import read_data_from_json_files, Tensorizer,App
def get_smcalflow_question_decomp(entry):
return f"{entry['user_utterance']}\t{entry['lispress']}" | null |
163,183 | import collections
import csv
import glob
import logging
import os
import random
from typing import Dict, List, Tuple
from datasets import load_dataset,load_from_disk
from dpr.utils.data_utils import load_train_dataset
import jsonlines
import numpy as np
import torch
from omegaconf import DictConfig
from torch import Tensor as T
import random
from dpr.utils.data_utils import read_data_from_json_files, Tensorizer,App
def get_smcalflow_decomp(entry):
return entry['lispress'] | null |
163,184 | import collections
import csv
import glob
import logging
import os
import random
from typing import Dict, List, Tuple
from datasets import load_dataset,load_from_disk
from dpr.utils.data_utils import load_train_dataset
import jsonlines
import numpy as np
import torch
from omegaconf import DictConfig
from torch import Tensor as T
import random
from dpr.utils.data_utils import read_data_from_json_files, Tensorizer,App
def get_break():
return load_dataset("break_data","QDMR") | null |
163,185 | import collections
import csv
import glob
import logging
import os
import random
from typing import Dict, List, Tuple
from datasets import load_dataset,load_from_disk
from dpr.utils.data_utils import load_train_dataset
import jsonlines
import numpy as np
import torch
from omegaconf import DictConfig
from torch import Tensor as T
import random
from dpr.utils.data_utils import read_data_from_json_files, Tensorizer,App
def get_bool():
# assert False
return load_from_disk("../../../bool-bag.hf")#load_dataset("json", data_files="../../../.jsonl") | null |
163,186 | import collections
import csv
import glob
import logging
import os
import random
from typing import Dict, List, Tuple
from datasets import load_dataset,load_from_disk
from dpr.utils.data_utils import load_train_dataset
import jsonlines
import numpy as np
import torch
from omegaconf import DictConfig
from torch import Tensor as T
import random
from dpr.utils.data_utils import read_data_from_json_files, Tensorizer,App
def get_bool():
# assert False
return load_from_disk("../../../abstractive-bag.hf")#load_dataset("json", data_files="../../../.jsonl") | null |
163,187 | import collections
import csv
import glob
import logging
import os
import random
from typing import Dict, List, Tuple
from datasets import load_dataset,load_from_disk
from dpr.utils.data_utils import load_train_dataset
import jsonlines
import numpy as np
import torch
from omegaconf import DictConfig
from torch import Tensor as T
import random
from dpr.utils.data_utils import read_data_from_json_files, Tensorizer,App
def get_bool():
# assert False
return load_from_disk("../../../extractive-bag.hf")#load_dataset("json", data_files="../../../.jsonl") | null |
163,188 | import collections
import csv
import glob
import logging
import os
import random
from typing import Dict, List, Tuple
from datasets import load_dataset,load_from_disk
from dpr.utils.data_utils import load_train_dataset
import jsonlines
import numpy as np
import torch
from omegaconf import DictConfig
from torch import Tensor as T
import random
from dpr.utils.data_utils import read_data_from_json_files, Tensorizer,App
def get_bool():
# assert False
return load_from_disk("../../../multichoice-bag.hf")#load_dataset("json", data_files="../../../.jsonl") | null |
163,189 | import collections
import csv
import glob
import logging
import os
import random
from typing import Dict, List, Tuple
from datasets import load_dataset,load_from_disk
from dpr.utils.data_utils import load_train_dataset
import jsonlines
import numpy as np
import torch
from omegaconf import DictConfig
from torch import Tensor as T
import random
from dpr.utils.data_utils import read_data_from_json_files, Tensorizer,App
def get_mtop():
return load_dataset("iohadrubin/smcalflow",name="smcalflow") | null |
163,190 | import collections
import csv
import glob
import logging
import os
import random
from typing import Dict, List, Tuple
from datasets import load_dataset,load_from_disk
from dpr.utils.data_utils import load_train_dataset
import jsonlines
import numpy as np
import torch
from omegaconf import DictConfig
from torch import Tensor as T
import random
from dpr.utils.data_utils import read_data_from_json_files, Tensorizer,App
def get_mtop():
return load_dataset("iohadrubin/mtop",name="mtop") | null |
163,191 | import collections
import csv
import glob
import logging
import os
import random
from typing import Dict, List, Tuple
from datasets import load_dataset,load_from_disk
from dpr.utils.data_utils import load_train_dataset
import jsonlines
import numpy as np
import torch
from omegaconf import DictConfig
from torch import Tensor as T
import random
from dpr.utils.data_utils import read_data_from_json_files, Tensorizer,App
def normalize_passage(ctx_text: str):
ctx_text = ctx_text.replace("\n", " ").replace("’", "'")
return ctx_text | null |
163,192 | import collections
import csv
import glob
import logging
import os
import random
from typing import Dict, List, Tuple
from datasets import load_dataset,load_from_disk
from dpr.utils.data_utils import load_train_dataset
import jsonlines
import numpy as np
import torch
from omegaconf import DictConfig
from torch import Tensor as T
import random
from dpr.utils.data_utils import read_data_from_json_files, Tensorizer,App
def normalize_question(question: str) -> str:
question = question.replace("’", "'")
return question | null |
163,193 | import collections
import csv
import glob
import logging
import os
import random
from typing import Dict, List, Tuple
from datasets import load_dataset,load_from_disk
from dpr.utils.data_utils import load_train_dataset
import jsonlines
import numpy as np
import torch
from omegaconf import DictConfig
from torch import Tensor as T
import random
from dpr.utils.data_utils import read_data_from_json_files, Tensorizer,App
logger = logging.getLogger(__name__)
class Table(object):
def __init__(self, caption=""):
def __str__(self):
def get_key(self) -> str:
def visit(self, tokens_function, include_caption: bool = False) -> bool:
def to_dpr_json(self):
class NQTableParser(object):
def __init__(self, tokens, is_html_mask, title):
def parse(self) -> List[Table]:
def _on_table_start(self):
def _on_table_end(self):
def _onRowStart(self):
def _onRowEnd(self):
def _onCellStart(self):
def _on_cell_end(self):
def _on_content(self, token):
def read_nq_tables_jsonl(path: str) -> Dict[str, Table]:
tables_with_issues = 0
single_row_tables = 0
nested_tables = 0
regular_tables = 0
total_tables = 0
total_rows = 0
tables_dict = {}
with jsonlines.open(path, mode="r") as jsonl_reader:
for jline in jsonl_reader:
tokens = jline["tokens"]
if "( hide ) This section has multiple issues" in " ".join(tokens):
tables_with_issues += 1
continue
mask = jline["html_mask"]
# page_url = jline["doc_url"]
title = jline["title"]
p = NQTableParser(tokens, mask, title)
tables = p.parse()
# table = parse_table(tokens, mask)
nested_tables += len(tables[1:])
for t in tables:
total_tables += 1
# calc amount of non empty rows
non_empty_rows = sum(
[
1
for r in t.body
if r.cells and any([True for c in r.cells if c.value_tokens])
]
)
if non_empty_rows <= 1:
single_row_tables += 1
else:
regular_tables += 1
total_rows += len(t.body)
if t.get_key() not in tables_dict:
tables_dict[t.get_key()] = t
if len(tables_dict) % 1000 == 0:
logger.info("tables_dict %d", len(tables_dict))
logger.info("regular tables %d", regular_tables)
logger.info("tables_with_issues %d", tables_with_issues)
logger.info("single_row_tables %d", single_row_tables)
logger.info("nested_tables %d", nested_tables)
return tables_dict | null |
163,194 | import collections
import csv
import glob
import logging
import os
import random
from typing import Dict, List, Tuple
from datasets import load_dataset,load_from_disk
from dpr.utils.data_utils import load_train_dataset
import jsonlines
import numpy as np
import torch
from omegaconf import DictConfig
from torch import Tensor as T
import random
from dpr.utils.data_utils import read_data_from_json_files, Tensorizer,App
class Table(object):
def __init__(self, caption=""):
def __str__(self):
def get_key(self) -> str:
def visit(self, tokens_function, include_caption: bool = False) -> bool:
def to_dpr_json(self):
def get_table_string_for_answer_check(table: Table): # this doesn't use caption
table_text = ""
for r in table.body:
table_text += " . ".join([" ".join(c.value_tokens) for c in r.cells])
table_text += " . "
return table_text | null |
163,195 | import collections
import csv
import glob
import logging
import os
import random
from typing import Dict, List, Tuple
from datasets import load_dataset,load_from_disk
from dpr.utils.data_utils import load_train_dataset
import jsonlines
import numpy as np
import torch
from omegaconf import DictConfig
from torch import Tensor as T
import random
from dpr.utils.data_utils import read_data_from_json_files, Tensorizer,App
logger = logging.getLogger(__name__)
class Table(object):
def __init__(self, caption=""):
self.caption = caption
self.body: List[Row] = []
self.key = None
self.gold_match = False
def __str__(self):
table_str = "<T>: {}\n".format(self.caption)
table_str += " rows:\n"
for i, r in enumerate(self.body):
table_str += " row #{}: {}\n".format(i, str(r))
return table_str
def get_key(self) -> str:
if not self.key:
self.key = str(self)
return self.key
def visit(self, tokens_function, include_caption: bool = False) -> bool:
if include_caption:
tokens_function(self.caption, -1, -1)
for i, r in enumerate(self.body):
r.visit(tokens_function, i)
def to_dpr_json(self):
r = {
"caption": self.caption,
"rows": [r.to_dpr_json(i) for i, r in enumerate(self.body)],
}
if self.gold_match:
r["gold_match"] = 1
return r
class JsonLTablesQADataset(Dataset):
def __init__(
self,
file: str,
is_train_set: bool,
selector: DictConfig = None,
shuffle_positives: bool = False,
max_negatives: int = 1,
seed: int = 0,
max_len=100,
split_type: str = "type1",
):
super().__init__(selector, shuffle_positives=shuffle_positives)
self.data_files = glob.glob(file)
self.data = []
self.is_train_set = is_train_set
self.max_negatives = max_negatives
self.rnd = random.Random(seed)
self.max_len = max_len
self.linearize_func = JsonLTablesQADataset.get_lin_func(split_type)
def load_data(self):
data = []
for path in self.data_files:
with jsonlines.open(path, mode="r") as jsonl_reader:
data += [jline for jline in jsonl_reader]
# filter those without positive ctx
self.data = [r for r in data if len(r["positive_ctxs"]) > 0]
logger.info("Total cleaned data size: {}".format(len(self.data)))
def __getitem__(self, index) -> BiEncoderSample:
json_sample = self.data[index]
r = BiEncoderSample()
r.query = json_sample["question"]
positive_ctxs = json_sample["positive_ctxs"]
hard_negative_ctxs = json_sample["hard_negative_ctxs"]
if self.shuffle_positives:
self.rnd.shuffle(positive_ctxs)
if self.is_train_set:
self.rnd.shuffle(hard_negative_ctxs)
positive_ctxs = positive_ctxs[0:1]
hard_negative_ctxs = hard_negative_ctxs[0 : self.max_negatives]
r.positive_passages = [
BiEncoderPassage(self.linearize_func(self, ctx, True), ctx["caption"])
for ctx in positive_ctxs
]
r.negative_passages = []
r.hard_negative_passages = [
BiEncoderPassage(self.linearize_func(self, ctx, False), ctx["caption"])
for ctx in hard_negative_ctxs
]
return r
def __len__(self):
return len(self.data)
def get_lin_func(cls, split_type: str):
f = {
"type1": JsonLTablesQADataset._linearize_table,
}
return f[split_type]
def split_table(cls, t: dict, max_length: int):
rows = t["rows"]
header = None
header_len = 0
start_row = 0
# get the first non empty row as the "header"
for i, r in enumerate(rows):
row_lin, row_len = JsonLTablesQADataset._linearize_row(r)
if len(row_lin) > 1: # TODO: change to checking cell value tokens
header = row_lin
header_len += row_len
start_row = i
break
chunks = []
current_rows = [header]
current_len = header_len
for i in range(start_row + 1, len(rows)):
row_lin, row_len = JsonLTablesQADataset._linearize_row(rows[i])
if len(row_lin) > 1: # TODO: change to checking cell value tokens
current_rows.append(row_lin)
current_len += row_len
if current_len >= max_length:
# linearize chunk
linearized_str = "\n".join(current_rows) + "\n"
chunks.append(linearized_str)
current_rows = [header]
current_len = header_len
if len(current_rows) > 1:
linearized_str = "\n".join(current_rows) + "\n"
chunks.append(linearized_str)
return chunks
def _linearize_table(self, t: dict, is_positive: bool) -> str:
rows = t["rows"]
selected_rows = set()
rows_linearized = []
total_words_len = 0
# get the first non empty row as the "header"
for i, r in enumerate(rows):
row_lin, row_len = JsonLTablesQADataset._linearize_row(r)
if len(row_lin) > 1: # TODO: change to checking cell value tokens
selected_rows.add(i)
rows_linearized.append(row_lin)
total_words_len += row_len
break
# split to chunks
if is_positive:
row_idx_with_answers = [ap[0] for ap in t["answer_pos"]]
if self.shuffle_positives:
self.rnd.shuffle(row_idx_with_answers)
for i in row_idx_with_answers:
if i not in selected_rows:
row_lin, row_len = JsonLTablesQADataset._linearize_row(rows[i])
selected_rows.add(i)
rows_linearized.append(row_lin)
total_words_len += row_len
if total_words_len >= self.max_len:
break
if total_words_len < self.max_len: # append random rows
if self.is_train_set:
rows_indexes = np.random.permutation(range(len(rows)))
else:
rows_indexes = [*range(len(rows))]
for i in rows_indexes:
if i not in selected_rows:
row_lin, row_len = JsonLTablesQADataset._linearize_row(rows[i])
if len(row_lin) > 1: # TODO: change to checking cell value tokens
selected_rows.add(i)
rows_linearized.append(row_lin)
total_words_len += row_len
if total_words_len >= self.max_len:
break
linearized_str = ""
for r in rows_linearized:
linearized_str += r + "\n"
return linearized_str
def _linearize_row(cls, row: dict) -> Tuple[str, int]:
cell_values = [c["value"] for c in row["columns"]]
total_words = sum(len(c.split(" ")) for c in cell_values)
return ", ".join([c["value"] for c in row["columns"]]), total_words
def split_tables_to_chunks(
tables_dict: Dict[str, Table], max_table_len: int, split_type: str = "type1"
) -> List[Tuple[int, str, str, int]]:
tables_as_dicts = [t.to_dpr_json() for k, t in tables_dict.items()]
chunks = []
chunk_id = 0
for i, t in enumerate(tables_as_dicts):
# TODO: support other types
assert split_type == "type1"
table_chunks = JsonLTablesQADataset.split_table(t, max_table_len)
title = t["caption"]
for c in table_chunks:
# chunk id , text, title, external_id
chunks.append((chunk_id, c, title, i))
chunk_id += 1
if i % 1000 == 0:
logger.info("Splitted %d tables to %d chunks", i, len(chunks))
return chunks | null |
163,201 | import logging
from typing import Tuple
import torch
from torch import Tensor as T
from torch import nn
from transformers.models.bert import BertConfig, BertModel
from transformers.optimization import AdamW
from transformers.models.bert import BertTokenizer
from transformers.models.roberta import RobertaTokenizer
from dpr.models.biencoder import BiEncoder
from dpr.utils.data_utils import Tensorizer
from .reader import Reader
def get_bert_tensorizer(cfg, tokenizer=None):
def get_optimizer(
model: nn.Module,
learning_rate: float = 1e-5,
adam_eps: float = 1e-8,
weight_decay: float = 0.0,
) -> torch.optim.Optimizer:
class HFBertEncoder(BertModel):
def __init__(self, config, project_dim: int = 0):
def init_encoder(
cls,
cfg_name: str,
projection_dim: int = 0,
dropout: float = 0.1,
pretrained: bool = True,
**kwargs
) -> BertModel:
def forward(
self,
input_ids: T,
token_type_ids: T,
attention_mask: T,
representation_token_pos=0,
) -> Tuple[T, ...]:
def get_out_size(self):
class BiEncoder(nn.Module):
def __init__(
self,
question_model: nn.Module,
ctx_model: nn.Module,
fix_q_encoder: bool = False,
fix_ctx_encoder: bool = False,
):
def get_representation(
sub_model: nn.Module,
ids: T,
segments: T,
attn_mask: T,
fix_encoder: bool = False,
representation_token_pos=0,
) -> (T, T, T):
def forward(
self,
question_ids: T,
question_segments: T,
question_attn_mask: T,
context_ids: T,
ctx_segments: T,
ctx_attn_mask: T,
encoder_type: str = None,
representation_token_pos=0,
) -> Tuple[T, T]:
def create_biencoder_input(
cls,
samples: List,
tensorizer: Tensorizer,
insert_title: bool,
num_hard_negatives: int = 0,
num_other_negatives: int = 0,
shuffle: bool = True,
shuffle_positives: bool = True,
hard_neg_fallback: bool = True,
) -> BiEncoderBatch:
def create_biencoder_input2(
cls,
samples: List[BiEncoderSample],
tensorizer: Tensorizer,
insert_title: bool,
num_hard_negatives: int = 0,
num_other_negatives: int = 0,
shuffle: bool = True,
shuffle_positives: bool = False,
hard_neg_fallback: bool = True,
query_token: str = None,
) -> BiEncoderBatch:
def load_state(self, saved_state: CheckpointState):
def get_state_dict(self):
def get_bert_biencoder_components(cfg, inference_only: bool = False, **kwargs):
dropout = cfg.encoder.dropout if hasattr(cfg.encoder, "dropout") else 0.0
question_encoder = HFBertEncoder.init_encoder(
cfg.encoder.pretrained_model_cfg,
projection_dim=cfg.encoder.projection_dim,
dropout=dropout,
pretrained=cfg.encoder.pretrained,
**kwargs
)
ctx_encoder = HFBertEncoder.init_encoder(
cfg.encoder.pretrained_model_cfg,
projection_dim=cfg.encoder.projection_dim,
dropout=dropout,
pretrained=cfg.encoder.pretrained,
**kwargs
)
fix_ctx_encoder = cfg.fix_ctx_encoder if hasattr(cfg, "fix_ctx_encoder") else False
biencoder = BiEncoder(
question_encoder, ctx_encoder, fix_ctx_encoder=fix_ctx_encoder
)
optimizer = (
get_optimizer(
biencoder,
learning_rate=cfg.train.learning_rate,
adam_eps=cfg.train.adam_eps,
weight_decay=cfg.train.weight_decay,
)
if not inference_only
else None
)
tensorizer = get_bert_tensorizer(cfg)#dw
return tensorizer, biencoder, optimizer | null |
163,202 | import logging
from typing import Tuple
import torch
from torch import Tensor as T
from torch import nn
from transformers.models.bert import BertConfig, BertModel
from transformers.optimization import AdamW
from transformers.models.bert import BertTokenizer
from transformers.models.roberta import RobertaTokenizer
from dpr.models.biencoder import BiEncoder
from dpr.utils.data_utils import Tensorizer
from .reader import Reader
def get_bert_tensorizer(cfg, tokenizer=None):
sequence_length = cfg.encoder.sequence_length
pretrained_model_cfg = cfg.encoder.pretrained_model_cfg
if not tokenizer:
tokenizer = get_bert_tokenizer(
pretrained_model_cfg, do_lower_case=cfg.do_lower_case
)
if cfg.special_tokens:
_add_special_tokens(tokenizer, cfg.special_tokens)
return BertTensorizer(tokenizer, sequence_length)
def get_optimizer(
model: nn.Module,
learning_rate: float = 1e-5,
adam_eps: float = 1e-8,
weight_decay: float = 0.0,
) -> torch.optim.Optimizer:
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [
p
for n, p in model.named_parameters()
if not any(nd in n for nd in no_decay)
],
"weight_decay": weight_decay,
},
{
"params": [
p
for n, p in model.named_parameters()
if any(nd in n for nd in no_decay)
],
"weight_decay": 0.0,
},
]
optimizer = AdamW(optimizer_grouped_parameters, lr=learning_rate, eps=adam_eps)
return optimizer
class HFBertEncoder(BertModel):
def __init__(self, config, project_dim: int = 0):
BertModel.__init__(self, config)
assert config.hidden_size > 0, "Encoder hidden_size can't be zero"
self.encode_proj = (
nn.Linear(config.hidden_size, project_dim) if project_dim != 0 else None
)
self.init_weights()
def init_encoder(
cls,
cfg_name: str,
projection_dim: int = 0,
dropout: float = 0.1,
pretrained: bool = True,
**kwargs
) -> BertModel:
cfg = BertConfig.from_pretrained(cfg_name if cfg_name else "bert-base-uncased")
if dropout != 0:
cfg.attention_probs_dropout_prob = dropout
cfg.hidden_dropout_prob = dropout
if pretrained:
return cls.from_pretrained(
cfg_name, config=cfg, project_dim=projection_dim, **kwargs
)
else:
return HFBertEncoder(cfg, project_dim=projection_dim)
def forward(
self,
input_ids: T,
token_type_ids: T,
attention_mask: T,
representation_token_pos=0,
) -> Tuple[T, ...]:
if self.config.output_hidden_states:
sequence_output, pooled_output, hidden_states = super().forward(
input_ids=input_ids,
token_type_ids=token_type_ids,
attention_mask=attention_mask,
return_dict=False,
)
else:
hidden_states = None
sequence_output, pooled_output = super().forward(
input_ids=input_ids,
token_type_ids=token_type_ids,
attention_mask=attention_mask,
return_dict=False,
)
if isinstance(representation_token_pos, int):
pooled_output = sequence_output[:, representation_token_pos, :]
else: # treat as a tensor
bsz = sequence_output.size(0)
assert (
representation_token_pos.size(0) == bsz
), "query bsz={} while representation_token_pos bsz={}".format(
bsz, representation_token_pos.size(0)
)
pooled_output = torch.stack(
[
sequence_output[i, representation_token_pos[i, 1], :]
for i in range(bsz)
]
)
if self.encode_proj:
pooled_output = self.encode_proj(pooled_output)
return sequence_output, pooled_output, hidden_states
def get_out_size(self):
if self.encode_proj:
return self.encode_proj.out_features
return self.config.hidden_size
class Reader(nn.Module):
def __init__(self, encoder: nn.Module, hidden_size):
super(Reader, self).__init__()
self.encoder = encoder
self.qa_outputs = nn.Linear(hidden_size, 2)
self.qa_classifier = nn.Linear(hidden_size, 1)
init_weights([self.qa_outputs, self.qa_classifier])
def forward(self, input_ids: T, attention_mask: T, start_positions=None, end_positions=None, answer_mask=None):
# notations: N - number of questions in a batch, M - number of passages per questions, L - sequence length
N, M, L = input_ids.size()
start_logits, end_logits, relevance_logits = self._forward(input_ids.view(N * M, L),
attention_mask.view(N * M, L))
if self.training:
return compute_loss(start_positions, end_positions, answer_mask, start_logits, end_logits, relevance_logits,
N, M)
return start_logits.view(N, M, L), end_logits.view(N, M, L), relevance_logits.view(N, M)
def _forward(self, input_ids, attention_mask):
# TODO: provide segment values
sequence_output, _pooled_output, _hidden_states = self.encoder(input_ids, None, attention_mask)
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
rank_logits = self.qa_classifier(sequence_output[:, 0, :])
return start_logits, end_logits, rank_logits
def get_bert_reader_components(cfg, inference_only: bool = False, **kwargs):
dropout = cfg.encoder.dropout if hasattr(cfg.encoder, "dropout") else 0.0
encoder = HFBertEncoder.init_encoder(
cfg.encoder.pretrained_model_cfg,
projection_dim=cfg.encoder.projection_dim,
dropout=dropout,
pretrained=cfg.encoder.pretrained,
**kwargs
)
hidden_size = encoder.config.hidden_size
reader = Reader(encoder, hidden_size)
optimizer = (
get_optimizer(
reader,
learning_rate=cfg.train.learning_rate,
adam_eps=cfg.train.adam_eps,
weight_decay=cfg.train.weight_decay,
)
if not inference_only
else None
)
tensorizer = get_bert_tensorizer(cfg)
return tensorizer, reader, optimizer | null |
163,203 | import logging
from typing import Tuple
import torch
from pytext.models.representations.transformer_sentence_encoder import TransformerSentenceEncoder
from pytext.optimizer.optimizers import AdamW
from torch import Tensor as T
from torch import nn
from .biencoder import BiEncoder
def get_optimizer(model: nn.Module, learning_rate: float = 1e-5, adam_eps: float = 1e-8,
weight_decay: float = 0.0) -> torch.optim.Optimizer:
cfg = AdamW.Config()
cfg.lr = learning_rate
cfg.weight_decay = weight_decay
cfg.eps = adam_eps
optimizer = AdamW.from_config(cfg, model)
return optimizer
class PytextBertEncoder(TransformerSentenceEncoder):
def __init__(self, config: TransformerSentenceEncoder.Config,
padding_idx: int,
vocab_size: int,
projection_dim: int = 0,
*args,
**kwarg
):
TransformerSentenceEncoder.__init__(self, config, False, padding_idx, vocab_size, *args, **kwarg)
assert config.embedding_dim > 0, 'Encoder hidden_size can\'t be zero'
self.encode_proj = nn.Linear(config.embedding_dim, projection_dim) if projection_dim != 0 else None
def init_encoder(cls, pretrained_file: str = None, projection_dim: int = 0, dropout: float = 0.1,
vocab_size: int = 0,
padding_idx: int = 0, **kwargs):
cfg = get_pytext_bert_base_cfg()
if dropout != 0:
cfg.dropout = dropout
cfg.attention_dropout = dropout
cfg.activation_dropout = dropout
encoder = cls(cfg, padding_idx, vocab_size, projection_dim, **kwargs)
if pretrained_file:
logger.info('Loading pre-trained pytext encoder state from %s', pretrained_file)
state = torch.load(pretrained_file)
encoder.load_state_dict(state)
return encoder
def forward(self, input_ids: T, token_type_ids: T, attention_mask: T) -> Tuple[T, ...]:
pooled_output = super().forward((input_ids, attention_mask, token_type_ids, None))[0]
if self.encode_proj:
pooled_output = self.encode_proj(pooled_output)
return None, pooled_output, None
def get_out_size(self):
if self.encode_proj:
return self.encode_proj.out_features
return self.representation_dim
class BiEncoder(nn.Module):
"""Bi-Encoder model component. Encapsulates query/question and context/passage encoders."""
def __init__(
self,
question_model: nn.Module,
ctx_model: nn.Module,
fix_q_encoder: bool = False,
fix_ctx_encoder: bool = False,
):
super(BiEncoder, self).__init__()
self.question_model = question_model
self.ctx_model = ctx_model
self.fix_q_encoder = fix_q_encoder
self.fix_ctx_encoder = fix_ctx_encoder
def get_representation(
sub_model: nn.Module,
ids: T,
segments: T,
attn_mask: T,
fix_encoder: bool = False,
representation_token_pos=0,
) -> (T, T, T):
sequence_output = None
pooled_output = None
hidden_states = None
if ids is not None:
if fix_encoder:
with torch.no_grad():
sequence_output, pooled_output, hidden_states = sub_model(
ids,
segments,
attn_mask,
representation_token_pos=representation_token_pos,
)
if sub_model.training:
sequence_output.requires_grad_(requires_grad=True)
pooled_output.requires_grad_(requires_grad=True)
else:
sequence_output, pooled_output, hidden_states = sub_model(
ids,
segments,
attn_mask,
representation_token_pos=representation_token_pos,
)
return sequence_output, pooled_output, hidden_states
def forward(
self,
question_ids: T,
question_segments: T,
question_attn_mask: T,
context_ids: T,
ctx_segments: T,
ctx_attn_mask: T,
encoder_type: str = None,
representation_token_pos=0,
) -> Tuple[T, T]:
with autocase():
q_encoder = (
self.question_model
if encoder_type is None or encoder_type == "question"
else self.ctx_model
)
_q_seq, q_pooled_out, _q_hidden = self.get_representation(
q_encoder,
question_ids,
question_segments,
question_attn_mask,
self.fix_q_encoder,
representation_token_pos=representation_token_pos,
)
ctx_encoder = (
self.ctx_model
if encoder_type is None or encoder_type == "ctx"
else self.question_model
)
_ctx_seq, ctx_pooled_out, _ctx_hidden = self.get_representation(
ctx_encoder, context_ids, ctx_segments, ctx_attn_mask, self.fix_ctx_encoder
)
return q_pooled_out, ctx_pooled_out
# TODO delete once moved to the new method
def create_biencoder_input(
cls,
samples: List,
tensorizer: Tensorizer,
insert_title: bool,
num_hard_negatives: int = 0,
num_other_negatives: int = 0,
shuffle: bool = True,
shuffle_positives: bool = True,
hard_neg_fallback: bool = True,
) -> BiEncoderBatch:
"""
Creates a batch of the biencoder training tuple.
:param samples: list of data items (from json) to create the batch for
:param tensorizer: components to create model input tensors from a text sequence
:param insert_title: enables title insertion at the beginning of the context sequences
:param num_hard_negatives: amount of hard negatives per question (taken from samples' pools)
:param num_other_negatives: amount of other negatives per question (taken from samples' pools)
:param shuffle: shuffles negative passages pools
:param shuffle_positives: shuffles positive passages pools
:return: BiEncoderBatch tuple
"""
question_tensors = []
ctx_tensors = []
positive_ctx_indices = []
hard_neg_ctx_indices = []
for sample in samples:
# ctx+ & [ctx-] composition
# as of now, take the first(gold) ctx+ only
if shuffle and shuffle_positives:
positive_ctxs = sample["positive_ctxs"]
positive_ctx = positive_ctxs[np.random.choice(len(positive_ctxs))]
else:
positive_ctx = sample["positive_ctxs"][0]
neg_ctxs = sample["negative_ctxs"]
hard_neg_ctxs = sample["hard_negative_ctxs"]
if shuffle:
random.shuffle(neg_ctxs)
random.shuffle(hard_neg_ctxs)
if hard_neg_fallback and len(hard_neg_ctxs) == 0:
hard_neg_ctxs = neg_ctxs[0:num_hard_negatives]
neg_ctxs = neg_ctxs[0:num_other_negatives]
hard_neg_ctxs = hard_neg_ctxs[0:num_hard_negatives]
all_ctxs = [positive_ctx] + neg_ctxs + hard_neg_ctxs
hard_negatives_start_idx = 1
hard_negatives_end_idx = 1 + len(hard_neg_ctxs)
current_ctxs_len = len(ctx_tensors)
sample_ctxs_tensors = [
tensorizer.text_to_tensor(
ctx["text"],
title=ctx["title"] if (insert_title and "title" in ctx) else None,
)
for ctx in all_ctxs
]
ctx_tensors.extend(sample_ctxs_tensors)
positive_ctx_indices.append(current_ctxs_len)
hard_neg_ctx_indices.append(
[
i
for i in range(
current_ctxs_len + hard_negatives_start_idx,
current_ctxs_len + hard_negatives_end_idx,
)
]
)
question_tensors.append(tensorizer.text_to_tensor(question))
ctxs_tensor = torch.cat([ctx.view(1, -1) for ctx in ctx_tensors], dim=0)
questions_tensor = torch.cat([q.view(1, -1) for q in question_tensors], dim=0)
ctx_segments = torch.zeros_like(ctxs_tensor)
question_segments = torch.zeros_like(questions_tensor)
return BiEncoderBatch(
questions_tensor,
question_segments,
ctxs_tensor,
ctx_segments,
positive_ctx_indices,
hard_neg_ctx_indices,
"question",
)
def create_biencoder_input2(
cls,
samples: List[BiEncoderSample],
tensorizer: Tensorizer,
insert_title: bool,
num_hard_negatives: int = 0,
num_other_negatives: int = 0,
shuffle: bool = True,
shuffle_positives: bool = False,
hard_neg_fallback: bool = True,
query_token: str = None,
) -> BiEncoderBatch:
"""
Creates a batch of the biencoder training tuple.
:param samples: list of BiEncoderSample-s to create the batch for
:param tensorizer: components to create model input tensors from a text sequence
:param insert_title: enables title insertion at the beginning of the context sequences
:param num_hard_negatives: amount of hard negatives per question (taken from samples' pools)
:param num_other_negatives: amount of other negatives per question (taken from samples' pools)
:param shuffle: shuffles negative passages pools
:param shuffle_positives: shuffles positive passages pools
:return: BiEncoderBatch tuple
"""
question_tensors = []
ctx_tensors = []
positive_ctx_indices = []
hard_neg_ctx_indices = []
for sample in samples:
# ctx+ & [ctx-] composition
# as of now, take the first(gold) ctx+ only
if shuffle and shuffle_positives:
positive_ctxs = sample.positive_passages
positive_ctx = positive_ctxs[np.random.choice(len(positive_ctxs))]
else:
positive_ctx = sample.positive_passages[0]
neg_ctxs = sample.negative_passages
hard_neg_ctxs = sample.hard_negative_passages
question = sample.query
# question = normalize_question(sample.query)
if shuffle:
random.shuffle(neg_ctxs)
random.shuffle(hard_neg_ctxs)
if hard_neg_fallback and len(hard_neg_ctxs) == 0:
hard_neg_ctxs = neg_ctxs[0:num_hard_negatives]
neg_ctxs = neg_ctxs[0:num_other_negatives]
hard_neg_ctxs = hard_neg_ctxs[0:num_hard_negatives]
all_ctxs = [positive_ctx] + neg_ctxs + hard_neg_ctxs
hard_negatives_start_idx = 1
hard_negatives_end_idx = 1 + len(hard_neg_ctxs)
current_ctxs_len = len(ctx_tensors)
sample_ctxs_tensors = [
tensorizer.text_to_tensor(
ctx.text, title=ctx.title if (insert_title and ctx.title) else None
)
for ctx in all_ctxs
]
ctx_tensors.extend(sample_ctxs_tensors)
positive_ctx_indices.append(current_ctxs_len)
hard_neg_ctx_indices.append(
[
i
for i in range(
current_ctxs_len + hard_negatives_start_idx,
current_ctxs_len + hard_negatives_end_idx,
)
]
)
if query_token:
# TODO: tmp workaround for EL, remove or revise
if query_token == "[START_ENT]":
query_span = _select_span_with_token(
question, tensorizer, token_str=query_token
)
question_tensors.append(query_span)
else:
question_tensors.append(
tensorizer.text_to_tensor(" ".join([query_token, question]))
)
else:
question_tensors.append(tensorizer.text_to_tensor(question))
ctxs_tensor = torch.cat([ctx.view(1, -1) for ctx in ctx_tensors], dim=0)
questions_tensor = torch.cat([q.view(1, -1) for q in question_tensors], dim=0)
ctx_segments = torch.zeros_like(ctxs_tensor)
question_segments = torch.zeros_like(questions_tensor)
return BiEncoderBatch(
questions_tensor,
question_segments,
ctxs_tensor,
ctx_segments,
positive_ctx_indices,
hard_neg_ctx_indices,
"question",
)
def load_state(self, saved_state: CheckpointState):
# TODO: make a long term HF compatibility fix
if "question_model.embeddings.position_ids" in saved_state.model_dict:
del saved_state.model_dict["question_model.embeddings.position_ids"]
del saved_state.model_dict["ctx_model.embeddings.position_ids"]
self.load_state_dict(saved_state.model_dict,strict=False)
def get_state_dict(self):
return self.state_dict()
class BertTensorizer(Tensorizer):
def __init__(
self, tokenizer: BertTokenizer, max_length: int, pad_to_max: bool = True
):
self.tokenizer = tokenizer
self.max_length = max_length
self.pad_to_max = pad_to_max
def text_to_tensor(
self,
text: str,
title: str = None,
add_special_tokens: bool = True,
apply_max_len: bool = True,
):
text = text.strip()
# tokenizer automatic padding is explicitly disabled since its inconsistent behavior
# TODO: move max len to methods params?
if title:
token_ids = self.tokenizer.encode(
title,
text_pair=text,
add_special_tokens=add_special_tokens,
max_length=self.max_length if apply_max_len else 10000,
pad_to_max_length=False,
truncation=True,
)
else:
token_ids = self.tokenizer.encode(
text,
add_special_tokens=add_special_tokens,
max_length=self.max_length if apply_max_len else 10000,
pad_to_max_length=False,
truncation=True,
)
seq_len = self.max_length
if self.pad_to_max and len(token_ids) < seq_len:
token_ids = token_ids + [self.tokenizer.pad_token_id] * (
seq_len - len(token_ids)
)
if len(token_ids) >= seq_len:
token_ids = token_ids[0:seq_len] if apply_max_len else token_ids
token_ids[-1] = self.tokenizer.sep_token_id
return torch.tensor(token_ids)
def get_pair_separator_ids(self) -> T:
return torch.tensor([self.tokenizer.sep_token_id])
def get_pad_id(self) -> int:
return self.tokenizer.pad_token_id
def get_attn_mask(self, tokens_tensor: T) -> T:
return tokens_tensor != self.get_pad_id()
def is_sub_word_id(self, token_id: int):
token = self.tokenizer.convert_ids_to_tokens([token_id])[0]
return token.startswith("##") or token.startswith(" ##")
def to_string(self, token_ids, skip_special_tokens=True):
return self.tokenizer.decode(token_ids, skip_special_tokens=skip_special_tokens)
def set_pad_to_max(self, do_pad: bool):
self.pad_to_max = do_pad
def get_token_id(self, token: str) -> int:
return self.tokenizer.vocab[token]
def get_bert_biencoder_components(args, inference_only: bool = False):
# since bert tokenizer is the same in HF and pytext/fairseq, just use HF's implementation here for now
from .hf_models import get_tokenizer, BertTensorizer
tokenizer = get_tokenizer(args.pretrained_model_cfg, do_lower_case=args.do_lower_case)
question_encoder = PytextBertEncoder.init_encoder(args.pretrained_file,
projection_dim=args.projection_dim, dropout=args.dropout,
vocab_size=tokenizer.vocab_size,
padding_idx=tokenizer.pad_token_type_id
)
ctx_encoder = PytextBertEncoder.init_encoder(args.pretrained_file,
projection_dim=args.projection_dim, dropout=args.dropout,
vocab_size=tokenizer.vocab_size,
padding_idx=tokenizer.pad_token_type_id
)
biencoder = BiEncoder(question_encoder, ctx_encoder)
optimizer = get_optimizer(biencoder,
learning_rate=args.learning_rate,
adam_eps=args.adam_eps, weight_decay=args.weight_decay,
) if not inference_only else None
tensorizer = BertTensorizer(tokenizer, args.sequence_length)
return tensorizer, biencoder, optimizer | null |
163,207 | import collections
import logging
import random
from typing import Tuple, List
import numpy as np
import torch
import torch.nn.functional as F
from torch import Tensor as T
from torch import nn
from dpr.data.biencoder_data import BiEncoderSample
from dpr.utils.data_utils import Tensorizer
from dpr.utils.model_utils import CheckpointState
rnd = random.Random(0)
class Tensorizer(object):
"""
Component for all text to model input data conversions and related utility methods
"""
# Note: title, if present, is supposed to be put before text (i.e. optional title + document body)
def text_to_tensor(
self,
text: str,
title: str = None,
add_special_tokens: bool = True,
apply_max_len: bool = True,
):
raise NotImplementedError
def get_pair_separator_ids(self) -> T:
raise NotImplementedError
def get_pad_id(self) -> int:
raise NotImplementedError
def get_attn_mask(self, tokens_tensor: T):
raise NotImplementedError
def is_sub_word_id(self, token_id: int):
raise NotImplementedError
def to_string(self, token_ids, skip_special_tokens=True):
raise NotImplementedError
def set_pad_to_max(self, pad: bool):
raise NotImplementedError
def get_token_id(self, token: str) -> int:
raise NotImplementedError
def _pad_to_len(seq: T, pad_id: int, max_len: int):
s_len = seq.size(0)
if s_len > max_len:
return seq[0: max_len]
return torch.cat([seq, torch.Tensor().new_full((max_len - s_len,), pad_id, dtype=torch.long)], dim=0)
def _select_span_with_token(
text: str, tensorizer: Tensorizer, token_str: str = "[START_ENT]"
) -> T:
id = tensorizer.get_token_id(token_str)
query_tensor = tensorizer.text_to_tensor(text)
if id not in query_tensor:
query_tensor_full = tensorizer.text_to_tensor(text, apply_max_len=False)
token_indexes = (query_tensor_full == id).nonzero()
if token_indexes.size(0) > 0:
start_pos = token_indexes[0, 0].item()
# add some randomization to avoid overfitting to a specific token position
left_shit = int(tensorizer.max_length / 2)
rnd_shift = int((rnd.random() - 0.5) * left_shit / 2)
left_shit += rnd_shift
query_tensor = query_tensor_full[start_pos - left_shit :]
cls_id = tensorizer.tokenizer.cls_token_id
if query_tensor[0] != cls_id:
query_tensor = torch.cat([torch.tensor([cls_id]), query_tensor], dim=0)
from dpr.models.reader import _pad_to_len
query_tensor = _pad_to_len(
query_tensor, tensorizer.get_pad_id(), tensorizer.max_length
)
query_tensor[-1] = tensorizer.tokenizer.sep_token_id
# logger.info('aligned query_tensor %s', query_tensor)
assert id in query_tensor, "query_tensor={}".format(query_tensor)
return query_tensor
else:
raise RuntimeError(
"[START_ENT] toke not found for Entity Linking sample query={}".format(
text
)
)
else:
return query_tensor | null |
163,208 | import logging
from typing import Tuple
from fairseq.models.roberta.hub_interface import RobertaHubInterface
from fairseq.models.roberta.model import RobertaModel as FaiseqRobertaModel
from fairseq.optim.adam import FairseqAdam
from torch import Tensor as T
from torch import nn
from dpr.models.hf_models import get_roberta_tensorizer
from .biencoder import BiEncoder
def get_fairseq_adamw_optimizer(model: nn.Module, args):
setattr(args, 'lr', [args.learning_rate])
return FairseqAdam(args, model.parameters()).optimizer
class RobertaEncoder(nn.Module):
def __init__(self, fairseq_roberta_hub: RobertaHubInterface):
super(RobertaEncoder, self).__init__()
self.fairseq_roberta = fairseq_roberta_hub
def from_pretrained(cls, pretrained_dir_path: str):
model = FaiseqRobertaModel.from_pretrained(pretrained_dir_path)
return cls(model)
def forward(self, input_ids: T, token_type_ids: T, attention_mask: T) -> Tuple[T, ...]:
roberta_out = self.fairseq_roberta.extract_features(input_ids)
cls_out = roberta_out[:, 0, :]
return roberta_out, cls_out, None
def get_out_size(self):
raise NotImplementedError
def get_roberta_tensorizer(args, tokenizer=None):
if not tokenizer:
tokenizer = get_roberta_tokenizer(
args.pretrained_model_cfg, do_lower_case=args.do_lower_case
)
return RobertaTensorizer(tokenizer, args.sequence_length)
class BiEncoder(nn.Module):
"""Bi-Encoder model component. Encapsulates query/question and context/passage encoders."""
def __init__(
self,
question_model: nn.Module,
ctx_model: nn.Module,
fix_q_encoder: bool = False,
fix_ctx_encoder: bool = False,
):
super(BiEncoder, self).__init__()
self.question_model = question_model
self.ctx_model = ctx_model
self.fix_q_encoder = fix_q_encoder
self.fix_ctx_encoder = fix_ctx_encoder
def get_representation(
sub_model: nn.Module,
ids: T,
segments: T,
attn_mask: T,
fix_encoder: bool = False,
representation_token_pos=0,
) -> (T, T, T):
sequence_output = None
pooled_output = None
hidden_states = None
if ids is not None:
if fix_encoder:
with torch.no_grad():
sequence_output, pooled_output, hidden_states = sub_model(
ids,
segments,
attn_mask,
representation_token_pos=representation_token_pos,
)
if sub_model.training:
sequence_output.requires_grad_(requires_grad=True)
pooled_output.requires_grad_(requires_grad=True)
else:
sequence_output, pooled_output, hidden_states = sub_model(
ids,
segments,
attn_mask,
representation_token_pos=representation_token_pos,
)
return sequence_output, pooled_output, hidden_states
def forward(
self,
question_ids: T,
question_segments: T,
question_attn_mask: T,
context_ids: T,
ctx_segments: T,
ctx_attn_mask: T,
encoder_type: str = None,
representation_token_pos=0,
) -> Tuple[T, T]:
with autocase():
q_encoder = (
self.question_model
if encoder_type is None or encoder_type == "question"
else self.ctx_model
)
_q_seq, q_pooled_out, _q_hidden = self.get_representation(
q_encoder,
question_ids,
question_segments,
question_attn_mask,
self.fix_q_encoder,
representation_token_pos=representation_token_pos,
)
ctx_encoder = (
self.ctx_model
if encoder_type is None or encoder_type == "ctx"
else self.question_model
)
_ctx_seq, ctx_pooled_out, _ctx_hidden = self.get_representation(
ctx_encoder, context_ids, ctx_segments, ctx_attn_mask, self.fix_ctx_encoder
)
return q_pooled_out, ctx_pooled_out
# TODO delete once moved to the new method
def create_biencoder_input(
cls,
samples: List,
tensorizer: Tensorizer,
insert_title: bool,
num_hard_negatives: int = 0,
num_other_negatives: int = 0,
shuffle: bool = True,
shuffle_positives: bool = True,
hard_neg_fallback: bool = True,
) -> BiEncoderBatch:
"""
Creates a batch of the biencoder training tuple.
:param samples: list of data items (from json) to create the batch for
:param tensorizer: components to create model input tensors from a text sequence
:param insert_title: enables title insertion at the beginning of the context sequences
:param num_hard_negatives: amount of hard negatives per question (taken from samples' pools)
:param num_other_negatives: amount of other negatives per question (taken from samples' pools)
:param shuffle: shuffles negative passages pools
:param shuffle_positives: shuffles positive passages pools
:return: BiEncoderBatch tuple
"""
question_tensors = []
ctx_tensors = []
positive_ctx_indices = []
hard_neg_ctx_indices = []
for sample in samples:
# ctx+ & [ctx-] composition
# as of now, take the first(gold) ctx+ only
if shuffle and shuffle_positives:
positive_ctxs = sample["positive_ctxs"]
positive_ctx = positive_ctxs[np.random.choice(len(positive_ctxs))]
else:
positive_ctx = sample["positive_ctxs"][0]
neg_ctxs = sample["negative_ctxs"]
hard_neg_ctxs = sample["hard_negative_ctxs"]
if shuffle:
random.shuffle(neg_ctxs)
random.shuffle(hard_neg_ctxs)
if hard_neg_fallback and len(hard_neg_ctxs) == 0:
hard_neg_ctxs = neg_ctxs[0:num_hard_negatives]
neg_ctxs = neg_ctxs[0:num_other_negatives]
hard_neg_ctxs = hard_neg_ctxs[0:num_hard_negatives]
all_ctxs = [positive_ctx] + neg_ctxs + hard_neg_ctxs
hard_negatives_start_idx = 1
hard_negatives_end_idx = 1 + len(hard_neg_ctxs)
current_ctxs_len = len(ctx_tensors)
sample_ctxs_tensors = [
tensorizer.text_to_tensor(
ctx["text"],
title=ctx["title"] if (insert_title and "title" in ctx) else None,
)
for ctx in all_ctxs
]
ctx_tensors.extend(sample_ctxs_tensors)
positive_ctx_indices.append(current_ctxs_len)
hard_neg_ctx_indices.append(
[
i
for i in range(
current_ctxs_len + hard_negatives_start_idx,
current_ctxs_len + hard_negatives_end_idx,
)
]
)
question_tensors.append(tensorizer.text_to_tensor(question))
ctxs_tensor = torch.cat([ctx.view(1, -1) for ctx in ctx_tensors], dim=0)
questions_tensor = torch.cat([q.view(1, -1) for q in question_tensors], dim=0)
ctx_segments = torch.zeros_like(ctxs_tensor)
question_segments = torch.zeros_like(questions_tensor)
return BiEncoderBatch(
questions_tensor,
question_segments,
ctxs_tensor,
ctx_segments,
positive_ctx_indices,
hard_neg_ctx_indices,
"question",
)
def create_biencoder_input2(
cls,
samples: List[BiEncoderSample],
tensorizer: Tensorizer,
insert_title: bool,
num_hard_negatives: int = 0,
num_other_negatives: int = 0,
shuffle: bool = True,
shuffle_positives: bool = False,
hard_neg_fallback: bool = True,
query_token: str = None,
) -> BiEncoderBatch:
"""
Creates a batch of the biencoder training tuple.
:param samples: list of BiEncoderSample-s to create the batch for
:param tensorizer: components to create model input tensors from a text sequence
:param insert_title: enables title insertion at the beginning of the context sequences
:param num_hard_negatives: amount of hard negatives per question (taken from samples' pools)
:param num_other_negatives: amount of other negatives per question (taken from samples' pools)
:param shuffle: shuffles negative passages pools
:param shuffle_positives: shuffles positive passages pools
:return: BiEncoderBatch tuple
"""
question_tensors = []
ctx_tensors = []
positive_ctx_indices = []
hard_neg_ctx_indices = []
for sample in samples:
# ctx+ & [ctx-] composition
# as of now, take the first(gold) ctx+ only
if shuffle and shuffle_positives:
positive_ctxs = sample.positive_passages
positive_ctx = positive_ctxs[np.random.choice(len(positive_ctxs))]
else:
positive_ctx = sample.positive_passages[0]
neg_ctxs = sample.negative_passages
hard_neg_ctxs = sample.hard_negative_passages
question = sample.query
# question = normalize_question(sample.query)
if shuffle:
random.shuffle(neg_ctxs)
random.shuffle(hard_neg_ctxs)
if hard_neg_fallback and len(hard_neg_ctxs) == 0:
hard_neg_ctxs = neg_ctxs[0:num_hard_negatives]
neg_ctxs = neg_ctxs[0:num_other_negatives]
hard_neg_ctxs = hard_neg_ctxs[0:num_hard_negatives]
all_ctxs = [positive_ctx] + neg_ctxs + hard_neg_ctxs
hard_negatives_start_idx = 1
hard_negatives_end_idx = 1 + len(hard_neg_ctxs)
current_ctxs_len = len(ctx_tensors)
sample_ctxs_tensors = [
tensorizer.text_to_tensor(
ctx.text, title=ctx.title if (insert_title and ctx.title) else None
)
for ctx in all_ctxs
]
ctx_tensors.extend(sample_ctxs_tensors)
positive_ctx_indices.append(current_ctxs_len)
hard_neg_ctx_indices.append(
[
i
for i in range(
current_ctxs_len + hard_negatives_start_idx,
current_ctxs_len + hard_negatives_end_idx,
)
]
)
if query_token:
# TODO: tmp workaround for EL, remove or revise
if query_token == "[START_ENT]":
query_span = _select_span_with_token(
question, tensorizer, token_str=query_token
)
question_tensors.append(query_span)
else:
question_tensors.append(
tensorizer.text_to_tensor(" ".join([query_token, question]))
)
else:
question_tensors.append(tensorizer.text_to_tensor(question))
ctxs_tensor = torch.cat([ctx.view(1, -1) for ctx in ctx_tensors], dim=0)
questions_tensor = torch.cat([q.view(1, -1) for q in question_tensors], dim=0)
ctx_segments = torch.zeros_like(ctxs_tensor)
question_segments = torch.zeros_like(questions_tensor)
return BiEncoderBatch(
questions_tensor,
question_segments,
ctxs_tensor,
ctx_segments,
positive_ctx_indices,
hard_neg_ctx_indices,
"question",
)
def load_state(self, saved_state: CheckpointState):
# TODO: make a long term HF compatibility fix
if "question_model.embeddings.position_ids" in saved_state.model_dict:
del saved_state.model_dict["question_model.embeddings.position_ids"]
del saved_state.model_dict["ctx_model.embeddings.position_ids"]
self.load_state_dict(saved_state.model_dict,strict=False)
def get_state_dict(self):
return self.state_dict()
def get_roberta_biencoder_components(args, inference_only: bool = False, **kwargs):
question_encoder = RobertaEncoder.from_pretrained(args.pretrained_file)
ctx_encoder = RobertaEncoder.from_pretrained(args.pretrained_file)
biencoder = BiEncoder(question_encoder, ctx_encoder)
optimizer = get_fairseq_adamw_optimizer(biencoder, args) if not inference_only else None
tensorizer = get_roberta_tensorizer(args)
return tensorizer, biencoder, optimizer | null |
163,211 | import json
import logging
import pickle
import random
import itertools
import math
import torch
from torch import Tensor as T
from typing import List, Iterator, Callable, Tuple
import random
import random
random.seed(33)
def load_train_dataset(dataset,size=None,listify=True):
if size is not None:
p = size
data = dataset['train']
total_size = len(data)
rand = random.Random(x=int(p*total_size))
index_list = list(range(total_size))
rand.shuffle(index_list)
x = data.select(index_list[:int(p*total_size)])
else:
try:
x = dataset['train']
except:
x = dataset
if listify:
return list(x)
else:
return x | null |
163,212 | import json
import logging
import pickle
import random
import itertools
import math
import torch
from torch import Tensor as T
from typing import List, Iterator, Callable, Tuple
logger = logging.getLogger()
import random
def read_serialized_data_from_files(paths: List[str]) -> List:
results = []
for i, path in enumerate(paths):
with open(path, "rb") as reader:
logger.info("Reading file %s", path)
data = pickle.load(reader)
results.extend(data)
logger.info("Aggregated data size: {}".format(len(results)))
logger.info("Total data size: {}".format(len(results)))
return results | null |
163,213 | import json
import logging
import pickle
import random
import itertools
import math
import torch
from torch import Tensor as T
from typing import List, Iterator, Callable, Tuple
logger = logging.getLogger()
import random
def read_data_from_json_files(paths: List[str]) -> List:
results = []
for i, path in enumerate(paths):
with open(path, "r", encoding="utf-8") as f:
logger.info("Reading file %s" % path)
data = []
for line in f.readlines():
data.append(json.loads(line))
results = data
logger.info("Aggregated data size: {}".format(len(results)))
return results | null |
163,214 | import pickle
import torch
import torch.distributed as dist
def get_rank():
return dist.get_rank()
def get_world_size():
return dist.get_world_size()
def all_reduce(tensor, group=None):
if group is None:
group = get_default_group()
return dist.all_reduce(tensor, group=group)
The provided code snippet includes necessary dependencies for implementing the `all_gather_list` function. Write a Python function `def all_gather_list(data, group=None, max_size=16384)` to solve the following problem:
Gathers arbitrary data from all nodes into a list. Similar to :func:`~torch.distributed.all_gather` but for arbitrary Python data. Note that *data* must be picklable. Args: data (Any): data from the local worker to be gathered on other workers group (optional): group of the collective
Here is the function:
def all_gather_list(data, group=None, max_size=16384):
"""Gathers arbitrary data from all nodes into a list.
Similar to :func:`~torch.distributed.all_gather` but for arbitrary Python
data. Note that *data* must be picklable.
Args:
data (Any): data from the local worker to be gathered on other workers
group (optional): group of the collective
"""
SIZE_STORAGE_BYTES = 4 # int32 to encode the payload size
enc = pickle.dumps(data)
enc_size = len(enc)
if enc_size + SIZE_STORAGE_BYTES > max_size:
raise ValueError(
'encoded data exceeds max_size, this can be fixed by increasing buffer size: {}'.format(enc_size))
rank = get_rank()
world_size = get_world_size()
buffer_size = max_size * world_size
if not hasattr(all_gather_list, '_buffer') or \
all_gather_list._buffer.numel() < buffer_size:
all_gather_list._buffer = torch.cuda.ByteTensor(buffer_size)
all_gather_list._cpu_buffer = torch.ByteTensor(max_size).pin_memory()
buffer = all_gather_list._buffer
buffer.zero_()
cpu_buffer = all_gather_list._cpu_buffer
assert enc_size < 256 ** SIZE_STORAGE_BYTES, 'Encoded object size should be less than {} bytes'.format(
256 ** SIZE_STORAGE_BYTES)
size_bytes = enc_size.to_bytes(SIZE_STORAGE_BYTES, byteorder='big')
cpu_buffer[0:SIZE_STORAGE_BYTES] = torch.ByteTensor(list(size_bytes))
cpu_buffer[SIZE_STORAGE_BYTES: enc_size + SIZE_STORAGE_BYTES] = torch.ByteTensor(list(enc))
start = rank * max_size
size = enc_size + SIZE_STORAGE_BYTES
buffer[start: start + size].copy_(cpu_buffer[:size])
all_reduce(buffer, group=group)
try:
result = []
for i in range(world_size):
out_buffer = buffer[i * max_size: (i + 1) * max_size]
size = int.from_bytes(out_buffer[0:SIZE_STORAGE_BYTES], byteorder='big')
if size > 0:
result.append(pickle.loads(bytes(out_buffer[SIZE_STORAGE_BYTES: size + SIZE_STORAGE_BYTES].tolist())))
return result
except pickle.UnpicklingError:
raise Exception(
'Unable to unpickle data from other workers. all_gather_list requires all '
'workers to enter the function together, so this error usually indicates '
'that the workers have fallen out of sync somehow. Workers can fall out of '
'sync if one of them runs out of memory, or if there are other conditions '
'in your training script that can cause one worker to finish an epoch '
'while other workers are still iterating over their portions of the data.'
) | Gathers arbitrary data from all nodes into a list. Similar to :func:`~torch.distributed.all_gather` but for arbitrary Python data. Note that *data* must be picklable. Args: data (Any): data from the local worker to be gathered on other workers group (optional): group of the collective |
163,217 | import collections
import glob
import logging
import os
from typing import List
import torch
from torch import nn
from torch.optim.lr_scheduler import LambdaLR
from torch.serialization import default_restore_location
def move_to_device(sample, device):
if len(sample) == 0:
return {}
def _move_to_device(maybe_tensor, device):
if torch.is_tensor(maybe_tensor):
return maybe_tensor.to(device)
elif isinstance(maybe_tensor, dict):
return {
key: _move_to_device(value, device)
for key, value in maybe_tensor.items()
}
elif isinstance(maybe_tensor, list):
return [_move_to_device(x, device) for x in maybe_tensor]
elif isinstance(maybe_tensor, tuple):
return [_move_to_device(x, device) for x in maybe_tensor]
else:
return maybe_tensor
return _move_to_device(sample, device) | null |
163,223 | import os
import time
import torch
import copy,random
import sys
import gc
import json
import torch.nn as nn
import numpy as np
import torch.nn.functional as F
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data import DataLoader
from dataclasses import dataclass, field
from typing import Optional
from typing import List, Optional, Tuple
from tqdm import tqdm
from torch.cuda import amp
import warnings
from torch.cuda import amp
from contextlib import suppress as nullcontext
from transformers import AdamW, get_linear_schedule_with_warmup
import datasets
import numpy as np
from datasets import load_dataset, load_metric,load_from_disk,concatenate_datasets
import os
from functools import partial
from retrievermodel import init_biencoder_components
from rerankermodel.encoder import BertEncoder_For_CrossEncoder
from functools import partial
import transformers
from transformers import (
AutoConfig,
AutoModelForSeq2SeqLM,
AutoTokenizer,
DataCollatorForSeq2Seq,
HfArgumentParser,
M2M100Tokenizer,
MBart50Tokenizer,
EvalPrediction,
MBart50TokenizerFast,
MBartTokenizer,
MBartTokenizerFast,
Seq2SeqTrainer,
Seq2SeqTrainingArguments,
default_data_collator,
set_seed,
)
def pack(plm_score):
packed_plm_score = np.array(plm_score).reshape(-1,64).tolist()
return packed_plm_score | null |
163,224 | import os
import time
import torch
import copy,random
import sys
import gc
import json
import torch.nn as nn
import numpy as np
import torch.nn.functional as F
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data import DataLoader
from dataclasses import dataclass, field
from typing import Optional
from typing import List, Optional, Tuple
from tqdm import tqdm
from torch.cuda import amp
import warnings
from torch.cuda import amp
from contextlib import suppress as nullcontext
from transformers import AdamW, get_linear_schedule_with_warmup
import datasets
import numpy as np
from datasets import load_dataset, load_metric,load_from_disk,concatenate_datasets
import os
from functools import partial
from retrievermodel import init_biencoder_components
from rerankermodel.encoder import BertEncoder_For_CrossEncoder
from functools import partial
import transformers
from transformers import (
AutoConfig,
AutoModelForSeq2SeqLM,
AutoTokenizer,
DataCollatorForSeq2Seq,
HfArgumentParser,
M2M100Tokenizer,
MBart50Tokenizer,
EvalPrediction,
MBart50TokenizerFast,
MBartTokenizer,
MBartTokenizerFast,
Seq2SeqTrainer,
Seq2SeqTrainingArguments,
default_data_collator,
set_seed,
)
def get_model_obj(model: nn.Module):
def trainbc(bencoder,cencoder,dataset,training_args):
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{"params": [p for n, p in bencoder.named_parameters() if not any(
nd in n for nd in no_decay)], "weight_decay": 0.01},
{"params": [p for n, p in bencoder.named_parameters() if any(
nd in n for nd in no_decay)], "weight_decay": 0.0},
{"params": [p for n, p in cencoder.named_parameters() if not any(
nd in n for nd in no_decay)], "weight_decay": 0.01},
{"params": [p for n, p in cencoder.named_parameters() if any(
nd in n for nd in no_decay)], "weight_decay": 0.0}
]
optimizer = AdamW(
optimizer_grouped_parameters,
lr=2e-5,
)
device_id = training_args.local_rank
loss_fct = torch.nn.KLDivLoss()
if device_id==-1:
device_id=0
batch_sz = training_args.per_device_train_batch_size
offset = []
offset2 = []
offset3 = []
scaler = amp.GradScaler(enabled=True)
for itf in range(batch_sz):
offset.extend([itf*64]*16)
offset2.extend([itf*16]*4)
offset3.extend([itf*64]*4)
offset = torch.Tensor(offset).long().to(torch.device("cuda", device_id))
offset2 = torch.Tensor(offset2).long().to(torch.device("cuda", device_id))
offset3 = torch.Tensor(offset3).long().to(torch.device("cuda", device_id))
if training_args.local_rank == -1:
data_loader_train = DataLoader(dataset=dataset.select(range(100)), batch_size=batch_sz,collate_fn=default_data_collator)
else:
sampler = DistributedSampler(
dataset,
num_replicas=torch.distributed.get_world_size(),
rank=training_args.local_rank,
seed=training_args.seed,
)
data_loader_train = DataLoader(dataset=dataset,batch_size=batch_sz,sampler=sampler,drop_last=False,collate_fn=default_data_collator)
for i, data in tqdm(enumerate(data_loader_train)):
if data["query_ids"].size(0)!=batch_sz:
batch_sz = data["query_ids"].size(0)
offset = []
offset2 = []
offset3 = []
for itf in range(batch_sz):
offset.extend([itf*64]*16)
offset2.extend([itf*16]*4)
offset3.extend([itf*64]*4)
offset = torch.Tensor(offset).long().to(torch.device("cuda", device_id))
offset2 = torch.Tensor(offset2).long().to(torch.device("cuda", device_id))
offset3 = torch.Tensor(offset3).long().to(torch.device("cuda", device_id))
data["query_ids"] = data["query_ids"][:,0,:].squeeze(1)
data["query_attentions"] = data["query_attentions"][:,0,:].squeeze(1)
data = {x: data[x].to(torch.device("cuda", device_id)) for x in data if data[x] is not None}
bmodel_out = bencoder(
data["query_ids"].view(-1,112),#.squeeze(),
data["query_attentions"].view(-1,112),#.squeeze(),
data["ctx_ids"].view(-1,112),#.squeeze(),
data["ctx_attentions"].view(-1,112),#.squeeze(),
)
score_mask = (data["sub_ids"]>=999).int().mul(-999)
local_q_vector, local_ctx_vectors = bmodel_out
local_q_vector = local_q_vector.view(-1,768)
local_ctx_vectors = local_ctx_vectors.view(-1,64,768)
sim_scores = torch.bmm(
local_q_vector.unsqueeze(1), torch.transpose(local_ctx_vectors, 1, 2)
).squeeze(1)
cmodel_out = cencoder(
data["cross_ids"].view(-1,144),
data["cross_attentions"].view(-1,144),
data["cross_ctxs"].view(-1,144),
).squeeze(-1).view(-1,64)
if "plm" in data.keys():
plm_scores = torch.softmax(data["plm"],dim=-1)
bi_score = torch.nn.functional.log_softmax(sim_scores, dim=-1)
c_score = torch.nn.functional.log_softmax(cmodel_out, dim=-1)
kl_loss = loss_fct(bi_score,plm_scores)+loss_fct(c_score,plm_scores)
scaler.scale(kl_loss).backward()
scaler.step(optimizer)
scaler.update()
optimizer.zero_grad()
torch.save(get_model_obj(bencoder).state_dict(),"./retsave/biencoder-0.pt")
torch.save(get_model_obj(cencoder).state_dict(),"./rersave/cencoder-0.pt") | null |
163,225 | import sys
from typing import List, Optional, Tuple
def preprocess_all(
examples,
question_column: str,
answer_column: str,
)-> Tuple[List[str], List[str]]:
questions = examples[question_column]
answers = examples[answer_column]
inputs = questions
targets = answers
return inputs, targets | null |
163,226 | import sys
from typing import List, Optional, Tuple
def preprocess_sqaud_batch(
examples,
question_column: str,
context_column: str,
answer_column: str,
) -> Tuple[List[str], List[str]]:
questions = examples[question_column]
contexts = examples[context_column]
answers = examples[answer_column]
inputs = [QAInput.qg_input_extractive_qa(context, question) for question, context in zip(questions, contexts)]
targets = [answer["text"][0] if len(answer["text"]) > 0 else "" for answer in answers]
return inputs, targets | null |
163,227 | import sys
from typing import List, Optional, Tuple
def preprocess_sqaud_abstractive_batch(
examples,
question_column: str,
context_column: str,
answer_column: str,
) -> Tuple[List[str], List[str]]:
questions = examples[question_column]
contexts = examples[context_column]
answers = examples[answer_column]
inputs = [QAInput.qg_input_abstrativeqa(context, question) for question, context in zip(questions, contexts)]
targets = [answer["text"][0] if len(answer["text"]) > 0 else "" for answer in answers]
return inputs, targets | null |
163,228 | import sys
from typing import List, Optional, Tuple
def preprocess_boolq_batch(
examples,
question_column: str,
context_column: str,
answer_column: str,
) -> Tuple[List[str], List[str]]:
question_column, context_column, answer_column = 'question', 'passage', 'answer'
questions = examples[question_column]
contexts = examples[context_column]
answers = examples[answer_column]
inputs = [QAInput.qg_input_boolqa(context, question) for question, context in zip(questions, contexts)]
targets = [str(ans) for ans in answers] #[answer["text"][0] if len(answer["text"]) > 0 else "" for answer in answers]
# print(inputs,targets)
return inputs, targets | null |
163,229 | import sys
from typing import List, Optional, Tuple
def preprocess_boolq_batch_pretrain(
examples,
question_column: str,
context_column: str,
answer_column: str,
) -> Tuple[List[str], List[str]]:
questions = examples[question_column]
contexts = examples[context_column]
answers = examples[answer_column]
inputs = [QAInput.qg_input_boolqa(context, question) for question, context in zip(questions, contexts)]
targets = [answer["text"][0].capitalize() if len(answer["text"]) > 0 else "" for answer in answers]
# print(inputs,targets)
return inputs, targets | null |
163,230 | import sys
from typing import List, Optional, Tuple
def preprocess_narrativeqa_batch(
examples,
question_column: str,
context_column: str,
answer_column: str,
) -> Tuple[List[str], List[str]]:
contexts = [exp['summary']['text'] for exp in examples['document']]
questions = [exp['text'] for exp in examples['question']]
answers = [ans[0]['text'] for ans in examples['answers']]
inputs = [QAInput.qg_input_abstrativeqa(context, question) for question, context in zip(questions, contexts)]
targets = answers #[answer["text"][0] if len(answer["text"]) > 0 else "" for answer in answers]
return inputs, targets | null |
163,231 | import sys
from typing import List, Optional, Tuple
def preprocess_narrativeqa_batch_pretrain(
examples,
question_column: str,
context_column: str,
answer_column: str,
) -> Tuple[List[str], List[str]]:
questions = examples[question_column]
contexts = examples[context_column]
answers = examples[answer_column]
inputs = [QAInput.qg_input_abstrativeqa(context, question) for question, context in zip(questions, contexts)]
targets = [answer["text"][0] if len(answer["text"]) > 0 else "" for answer in answers]
return inputs, targets | null |
163,232 | import sys
from typing import List, Optional, Tuple
def preprocess_drop_batch(
examples,
question_column: str,
context_column: str,
answer_column: str,
) -> Tuple[List[str], List[str]]:
contexts = examples['passage']
questions = examples['question']
answers = examples['answers']
inputs = [QAInput.qg_input_abstrativeqa(context, question) for question, context in zip(questions, contexts)]
targets = [answer["text"][0] if len(answer["text"]) > 0 else "" for answer in answers]
return inputs, targets | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.