id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
163,233 | import sys
from typing import List, Optional, Tuple
def preprocess_race_batch(
examples,
question_column: str,
context_column: str,
answer_column: str,
) -> Tuple[List[str], List[str]]:
contexts = examples['article']
questions = examples['question']
all_options = examples['options']
answers = examples['answer']
options_texts = [f'options: A. {options[0]}; B. {options[1]}; C. {options[2]}; D. {options[3]}' for options in all_options]
inputs = [QAInput.qg_input_multirc(context, question, ops) for question, context, ops in zip(questions, contexts, options_texts)]
ans_map = {'A': 0, 'B': 1, 'C': 2, 'D': 3 }
targets = [options[ans_map[answer]] for options, answer in zip(all_options, answers)]
return inputs, targets | null |
163,234 | import sys
from typing import List, Optional, Tuple
def preprocess_newsqa_batch(
examples,
question_column: str,
context_column: str,
answer_column: str,
) -> Tuple[List[str], List[str]]:
questions = examples[question_column]
contexts = examples[context_column]
answers = examples[answer_column]
inputs = [QAInput.qg_input_extractive_qa(context, question) for question, context in zip(questions, contexts)]
# inputs = [QAInput.qg_input_abstrativeqa(context, question) for question, context in zip(questions, contexts)]
targets = [answer["text"][0] if len(answer["text"]) > 0 else "" for answer in answers]
return inputs, targets | null |
163,235 | import sys
from typing import List, Optional, Tuple
def preprocess_ropes_batch(
examples,
question_column: str,
context_column: str,
answer_column: str,
) -> Tuple[List[str], List[str]]:
questions = examples[question_column]
backgrounds = examples["background"]
situations = examples["situation"]
answers = examples[answer_column]
inputs = [QAInput.qg_input_extractive_qa(" ".join([background, situation]), question) for question, background, situation in zip(questions, backgrounds, situations)]
targets = [answer["text"][0] if len(answer["text"]) > 0 else "" for answer in answers]
return inputs, targets | null |
163,236 | import sys
from typing import List, Optional, Tuple
def preprocess_openbookqa_batch(
examples,
question_column: str,
context_column: str,
answer_column: str,
) -> Tuple[List[str], List[str]]:
questions = examples['question_stem']
all_options = examples['choices']
answers = examples['answerKey']
options_texts = [f"options: A. {options['text'][0]}; B. {options['text'][1]}; C. {options['text'][2]}; D. {options['text'][3]}" for options in all_options]
inputs = [QAInput.qg_input_multirc("", question, ops) for question, ops in zip(questions, options_texts)]
ans_map = {'A': 0, 'B': 1, 'C': 2, 'D': 3}
targets = [options['text'][ans_map[answer]] for options, answer in zip(all_options, answers)]
return inputs, targets | null |
163,237 | import sys
from typing import List, Optional, Tuple
def preprocess_social_iqa_batch(
examples,
question_column: str,
context_column: str,
answer_column: str,
) -> Tuple[List[str], List[str]]:
contexts = examples['article']
questions = examples['question']
all_options = examples['options']
answers = examples['answer']
options_texts = [f'options: A. {options[0]}; B. {options[1]}; C. {options[2]}' for options in all_options]
inputs = [QAInput.qg_input_multirc(context, question, ops) for question, context, ops in zip(questions, contexts, options_texts)]
ans_map = {'A': 0, 'B': 1, 'C': 2,}
targets = [options[ans_map[answer]] for options, answer in zip(all_options, answers)]
return inputs, targets | null |
163,238 | import sys
from typing import List, Optional, Tuple
def preprocess_dream_batch(
examples,
question_column: str,
context_column: str,
answer_column: str,
) -> Tuple[List[str], List[str]]:
contexts = [" ".join(dialogue) for dialogue in examples['dialogue']]
questions = examples['question']
all_options = examples['choice']
answers = examples['answer']
answer_idxs = [options.index(answer) for answer, options in zip(answers, all_options)]
options_texts = [f'options: A. {options[0]}; B. {options[1]}; C. {options[2]}' for options in all_options]
inputs = [QAInput.qg_input_multirc(context, question, ops) for question, context, ops in zip(questions, contexts, options_texts)]
targets = answers
return inputs, targets | null |
163,239 | import os
import time
import torch
import copy,random
import sys
import gc
import json
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data import DataLoader
from dataclasses import dataclass, field
from typing import Optional
from typing import List, Optional, Tuple
from tqdm import tqdm
import warnings
from torch.cuda import amp
from contextlib import suppress as nullcontext
from transformers import AdamW, get_linear_schedule_with_warmup
from models.metascorenometa import T5ForConditionalGeneration as PromptT5
from dataset_processors import *
from metatrainernometa import QuestionAnsweringTrainer
from retrievermodel import init_biencoder_components
from rerankermodel.encoder import BertEncoder_For_CrossEncoder
from loadscorea import *
from hintpreprocess import *
from transformers.trainer_callback import (
CallbackHandler,
DefaultFlowCallback,
PrinterCallback,
ProgressCallback,
TrainerCallback,
TrainerControl,
TrainerState,
)
import datasets
import numpy as np
from datasets import load_dataset, load_metric,load_from_disk,concatenate_datasets
import os
from functools import partial
import transformers
from transformers import (
AutoConfig,
AutoModelForSeq2SeqLM,
AutoTokenizer,
DataCollatorForSeq2Seq,
HfArgumentParser,
M2M100Tokenizer,
MBart50Tokenizer,
EvalPrediction,
MBart50TokenizerFast,
MBartTokenizer,
MBartTokenizerFast,
Seq2SeqTrainer,
Seq2SeqTrainingArguments,
default_data_collator,
set_seed,
)
from pathlib import Path
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
def get_model_obj(model: nn.Module):
return model.module if hasattr(model, "module") else model | null |
163,240 | import os
import time
import torch
import copy,random
import sys
import gc
import json
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data import DataLoader
from dataclasses import dataclass, field
from typing import Optional
from typing import List, Optional, Tuple
from tqdm import tqdm
import warnings
from torch.cuda import amp
from contextlib import suppress as nullcontext
from transformers import AdamW, get_linear_schedule_with_warmup
from models.metascorenometa import T5ForConditionalGeneration as PromptT5
from dataset_processors import *
from metatrainernometa import QuestionAnsweringTrainer
from retrievermodel import init_biencoder_components
from rerankermodel.encoder import BertEncoder_For_CrossEncoder
from loadscorea import *
from hintpreprocess import *
from transformers.trainer_callback import (
CallbackHandler,
DefaultFlowCallback,
PrinterCallback,
ProgressCallback,
TrainerCallback,
TrainerControl,
TrainerState,
)
import datasets
import numpy as np
from datasets import load_dataset, load_metric,load_from_disk,concatenate_datasets
import os
from functools import partial
import transformers
from transformers import (
AutoConfig,
AutoModelForSeq2SeqLM,
AutoTokenizer,
DataCollatorForSeq2Seq,
HfArgumentParser,
M2M100Tokenizer,
MBart50Tokenizer,
EvalPrediction,
MBart50TokenizerFast,
MBartTokenizer,
MBartTokenizerFast,
Seq2SeqTrainer,
Seq2SeqTrainingArguments,
default_data_collator,
set_seed,
)
from pathlib import Path
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
def train_qascore(model,trainset,training_args,data_collator):
optimizer = AdamW(
model.parameters(),
lr=1e-4,
)
qa_scores={}
device_id = training_args.local_rank
if device_id == -1:
device_id = 0
if training_args.local_rank == -1:
data_loader_qa = DataLoader(dataset=trainset, shuffle=False,batch_size=training_args.per_device_train_batch_size//2,collate_fn=data_collator)#, sampler=train_sampler)
else:
sampler = DistributedSampler(
trainset,
num_replicas=torch.distributed.get_world_size(),
rank=training_args.local_rank,
seed=training_args.seed,
)
data_loader_qa = DataLoader(dataset=trainset, shuffle=False,batch_size=training_args.per_device_train_batch_size//2,collate_fn=data_collator,sampler=sampler,drop_last=False)
for data in data_loader_qa:
# labels = data.pop("labels")
sample_id = data["sample_id"].numpy().tolist()
data = {x: data[x].to(torch.device("cuda", device_id)) for x in data if data[x] is not None}
kl_loss,qa_probs = model.forward_single(**data)
if model.activate_rer or model.activate_ret:
kl_loss.backward()
optimizer.step()
scores_id = qa_probs.detach().cpu().numpy().tolist()
for s_id,score in zip(sample_id,scores_id):
qa_scores[int(s_id)]=score
return qa_scores | null |
163,241 | import os
import time
import torch
import copy,random
import sys
import gc
import json
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data import DataLoader
from dataclasses import dataclass, field
from typing import Optional
from typing import List, Optional, Tuple
from tqdm import tqdm
import warnings
from torch.cuda import amp
from contextlib import suppress as nullcontext
from transformers import AdamW, get_linear_schedule_with_warmup
from models.metascorenometa import T5ForConditionalGeneration as PromptT5
from dataset_processors import *
from metatrainernometa import QuestionAnsweringTrainer
from retrievermodel import init_biencoder_components
from rerankermodel.encoder import BertEncoder_For_CrossEncoder
from loadscorea import *
from hintpreprocess import *
from transformers.trainer_callback import (
CallbackHandler,
DefaultFlowCallback,
PrinterCallback,
ProgressCallback,
TrainerCallback,
TrainerControl,
TrainerState,
)
import datasets
import numpy as np
from datasets import load_dataset, load_metric,load_from_disk,concatenate_datasets
import os
from functools import partial
import transformers
from transformers import (
AutoConfig,
AutoModelForSeq2SeqLM,
AutoTokenizer,
DataCollatorForSeq2Seq,
HfArgumentParser,
M2M100Tokenizer,
MBart50Tokenizer,
EvalPrediction,
MBart50TokenizerFast,
MBartTokenizer,
MBartTokenizerFast,
Seq2SeqTrainer,
Seq2SeqTrainingArguments,
default_data_collator,
set_seed,
)
from pathlib import Path
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
format2dataset = {
'extractive':['squad1_1','squad2','extractive','newsqa','quoref','ropes','adversarialqa_dbert_dev','adversarialqa_dbidaf_dev','adversarialqa_droberta_dev','record_extractive'],
'abstractive':['narrativeqa_dev','abstractive','natural_questions_with_dpr_para','drop','qaconv','tweetqa'],
'multichoice':['race_string','multichoice','openbookqa','mctest_corrected_the_separator','social_iqa','commonsenseqa','qasc','physical_iqa','winogrande_xl','onestopqa_advanced','onestopqa_elementry','onestopqa_intermediate','prost_multiple_choice_with_no_context','dream','processbank_test','cosmosqa','mcscript','mcscript2','quail','reclor','measuring_massive_multitask_language_understanding','head_qa_en_test','race_c','arc_hard','arc_easy'],
'bool':['boolq','bool','boolq_np','multirc','strategyqa','pubmedqa_pqal_short_ans']
}
dataset_files =["squad1_1","squad2","narrativeqa_dev","mctest_corrected_the_separator","race_string","arc_hard","arc_easy","boolq","openbookqa"]+["newsqa","quoref","ropes","drop","natural_questions_with_dpr_para","commonsenseqa","qasc","physical_iqa","social_iqa","winogrande_xl","multirc","boolq_np"]
def evaluate_model(eval_model, priority_level,format_name,training_args,data_collator,epoch_id):
device_id = training_args.local_rank
if device_id == -1:
device_id = 0
for item in dataset_files:
if not item in format2dataset[format_name]:
continue
lmax = 64
if lmax>len(load_from_disk("./epoch_data{}/{}-reteval.hf".format(str(epoch_id),item))):
lmax = len(load_from_disk("./epoch_data{}/{}-reteval.hf".format(str(epoch_id),item)))
dataset_ret = load_from_disk("./epoch_data{}/{}-reteval.hf".format(str(epoch_id),item)).select(range(lmax))
dataset_rer = load_from_disk("./epoch_data{}/{}-rereval.hf".format(str(epoch_id),item)).select(range(lmax))
dataset_first = load_from_disk("./epoch_data{}/{}-firsteval.hf".format(str(epoch_id),item)).select(range(lmax))
for (eval_ds,source) in [(dataset_first,"first"),(dataset_ret,"ret"),(dataset_rer,"rer")]:
if training_args.local_rank == -1:
data_loader_eval = DataLoader(dataset=eval_ds, shuffle=False,batch_size=training_args.per_device_train_batch_size*4,collate_fn=data_collator)#, sampler=train_sampler)
else:
sampler = DistributedSampler(
eval_ds,
num_replicas=torch.distributed.get_world_size(),
rank=training_args.local_rank,
seed=training_args.seed,
)
data_loader_eval = DataLoader(dataset=eval_ds, shuffle=False,batch_size=training_args.per_device_train_batch_size*4,collate_fn=data_collator,sampler=sampler,drop_last=False)
tnum = 0.0
for data in data_loader_eval:
labels = data.pop("labels")
data["eval_labels"]=labels
data = {x: data[x].to(torch.device("cuda", device_id)) for x in data if data[x] is not None}
if source!="first":
out = eval_model.module.eval_mode(**data)
priority_level[format_name][source]+=out.mean().item()
else:
out = eval_model.module.eval_golden(**data)
priority_level[format_name]["qa"]+=out.mean().item()
tnum +=1.0
if source!="first":
priority_level[format_name][source]=priority_level[format_name][source]
else:
priority_level[format_name]["qa"]=priority_level[format_name]["qa"]
return priority_level | null |
163,242 | import os
import time
import torch
import copy,random
import sys
import gc
import json
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data import DataLoader
from dataclasses import dataclass, field
from typing import Optional
from typing import List, Optional, Tuple
from tqdm import tqdm
import warnings
from torch.cuda import amp
from contextlib import suppress as nullcontext
from transformers import AdamW, get_linear_schedule_with_warmup
from models.metascorenometa import T5ForConditionalGeneration as PromptT5
from dataset_processors import *
from metatrainernometa import QuestionAnsweringTrainer
from retrievermodel import init_biencoder_components
from rerankermodel.encoder import BertEncoder_For_CrossEncoder
from loadscorea import *
from hintpreprocess import *
Batch_Size = 2
from transformers.trainer_callback import (
CallbackHandler,
DefaultFlowCallback,
PrinterCallback,
ProgressCallback,
TrainerCallback,
TrainerControl,
TrainerState,
)
import datasets
import numpy as np
from datasets import load_dataset, load_metric,load_from_disk,concatenate_datasets
import os
from functools import partial
import transformers
from transformers import (
AutoConfig,
AutoModelForSeq2SeqLM,
AutoTokenizer,
DataCollatorForSeq2Seq,
HfArgumentParser,
M2M100Tokenizer,
MBart50Tokenizer,
EvalPrediction,
MBart50TokenizerFast,
MBartTokenizer,
MBartTokenizerFast,
Seq2SeqTrainer,
Seq2SeqTrainingArguments,
default_data_collator,
set_seed,
)
from pathlib import Path
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
def trainbc(bencoder,cencoder,dataset,training_args,fwd,qaret,qarer):
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{"params": [p for n, p in bencoder.named_parameters() if not any(
nd in n for nd in no_decay)], "weight_decay": 0.01},
{"params": [p for n, p in bencoder.named_parameters() if any(
nd in n for nd in no_decay)], "weight_decay": 0.0},
{"params": [p for n, p in cencoder.named_parameters() if not any(
nd in n for nd in no_decay)], "weight_decay": 0.01},
{"params": [p for n, p in cencoder.named_parameters() if any(
nd in n for nd in no_decay)], "weight_decay": 0.0}
]
optimizer = AdamW(
optimizer_grouped_parameters,
lr=1e-5,
)
device_id = training_args.local_rank
loss_fct = torch.nn.KLDivLoss()
if device_id==-1:
device_id=0
bs_size = Batch_Size
offset = []
offset2 = []
offset3 = []
scaler = amp.GradScaler(enabled=True)
for itf in range(bs_size):
offset.extend([itf*64]*16)
offset2.extend([itf*16]*4)
offset3.extend([itf*64]*4)
offset = torch.Tensor(offset).long().to(torch.device("cuda", device_id))
offset2 = torch.Tensor(offset2).long().to(torch.device("cuda", device_id))
offset3 = torch.Tensor(offset3).long().to(torch.device("cuda", device_id))
if training_args.local_rank == -1:
data_loader_train = DataLoader(dataset=dataset.select(range(100)), batch_size=bs_size,collate_fn=default_data_collator)
else:
sampler = DistributedSampler(
dataset,
num_replicas=torch.distributed.get_world_size(),
rank=training_args.local_rank,
seed=training_args.seed,
)
data_loader_train = DataLoader(dataset=dataset,batch_size=bs_size,sampler=sampler,drop_last=False,collate_fn=default_data_collator)
for i, data in enumerate(data_loader_train):
if data["query_ids"].size(0)!=bs_size:
bs_size = data["query_ids"].size(0)
offset = []
offset2 = []
offset3 = []
for itf in range(bs_size):
offset.extend([itf*64]*16)
offset2.extend([itf*16]*4)
offset3.extend([itf*64]*4)
offset = torch.Tensor(offset).long().to(torch.device("cuda", device_id))
offset2 = torch.Tensor(offset2).long().to(torch.device("cuda", device_id))
offset3 = torch.Tensor(offset3).long().to(torch.device("cuda", device_id))
data["query_ids"] = data["query_ids"][:,0,:].squeeze(1)
data["query_attentions"] = data["query_attentions"][:,0,:].squeeze(1)
data = {x: data[x].to(torch.device("cuda", device_id)) for x in data if data[x] is not None}
bmodel_out = bencoder(
data["query_ids"].view(-1,112),#.squeeze(),
data["query_attentions"].view(-1,112),#.squeeze(),
data["ctx_ids"].view(-1,112),#.squeeze(),
data["ctx_attentions"].view(-1,112),#.squeeze(),
)
score_mask = (data["sub_ids"]>=999).int().mul(-999)
local_q_vector, local_ctx_vectors = bmodel_out
local_q_vector = local_q_vector.view(-1,768)
local_ctx_vectors = local_ctx_vectors.view(-1,64,768)
sim_scores = torch.bmm(
local_q_vector.unsqueeze(1), torch.transpose(local_ctx_vectors, 1, 2)
).squeeze(1)
cmodel_out = cencoder(
data["cross_ids"].view(-1,144),
data["cross_attentions"].view(-1,144),
data["cross_ctxs"].view(-1,144),
).squeeze(-1).view(-1,64)
if fwd:
bi_score = torch.softmax(sim_scores, dim=-1)
c_score = torch.nn.functional.log_softmax(cmodel_out, dim=-1)
kl_loss = loss_fct(c_score, bi_score)
else:
bi_score = torch.nn.functional.log_softmax(sim_scores, dim=-1)
c_score = torch.softmax(cmodel_out, dim=-1)
kl_loss = loss_fct(bi_score,c_score)
scaler.scale(kl_loss).backward()
scaler.step(optimizer)
scaler.update()
optimizer.zero_grad()
# sim_scores = sim_scores.add(score_mask)
#(bs*768)*(768*64)=(bs*64)
kl_loss_qa1 = 0.0
kl_loss_qa2 = 0.0
if "qa_scores" in data.keys() and (qaret or qarer):
with torch.no_grad():
previous_ids = []
sids = data["sample_id"].cpu().numpy().tolist()
for sid in sids:
try:
previous_ids.extend(format_hints_ids[int(sid)])
except:
previous_ids.extend([0,1,2,3])
previous_ids = torch.tensor(previous_ids).long().to(torch.device("cuda", device_id)).add(offset3)
previous_data = {}
for k_ in data.keys():
if len(data[k_].size())==3:
previous_data[k_] = torch.index_select(data[k_].view(bs_size*64,-1),dim=0,index=previous_ids).view(bs_size,4,-1).clone()
elif len(data[k_].size())==2:
if k_=="query_ids" or k_=="query_attentions":
previous_data[k_]=data[k_].clone()
elif k_!="qa_scores":
previous_data[k_] = torch.index_select(data[k_].view(-1),dim=0,index=previous_ids).view(bs_size,4).clone()
else:
previous_data[k_]=data[k_].clone()
else:
if k_=="sample_id":
previous_data[k_]=data[k_].clone()
cmodel_previous = cencoder(
previous_data["cross_ids"].view(-1,144),
previous_data["cross_attentions"].view(-1,144),
previous_data["cross_ctxs"].view(-1,144),
).squeeze(-1).view(-1,4)
dmodel_previous = bencoder(
previous_data["query_ids"].view(-1,112),#.squeeze(),
previous_data["query_attentions"].view(-1,112),#.squeeze(),
previous_data["ctx_ids"].view(-1,112),#.squeeze(),
previous_data["ctx_attentions"].view(-1,112),#.squeeze(),
)
local_q_vector, local_ctx_vectors = dmodel_previous
local_q_vector = local_q_vector.view(-1,768)
local_ctx_vectors = local_ctx_vectors.view(-1,4,768)
sim_scores = torch.bmm(
local_q_vector.unsqueeze(1), torch.transpose(local_ctx_vectors, 1, 2)
).squeeze(1)
bi_score_previous = torch.nn.functional.log_softmax(sim_scores, dim=-1)
c_score_previous = torch.nn.functional.log_softmax(cmodel_previous, dim=-1)
qa_scores = torch.softmax(data["qa_scores"], dim=-1)
if qarer:
kl_loss_qa1 = loss_fct(c_score_previous, qa_scores)
if qaret:
kl_loss_qa2 = loss_fct(bi_score_previous,qa_scores)
kl_qa_all = kl_loss_qa1+kl_loss_qa2
scaler.scale(kl_qa_all).backward()
scaler.step(optimizer)
scaler.update()
optimizer.zero_grad() | null |
163,243 | import os
import time
import torch
import copy,random
import sys
import gc
import json
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data import DataLoader
from dataclasses import dataclass, field
from typing import Optional
from typing import List, Optional, Tuple
from tqdm import tqdm
import warnings
from torch.cuda import amp
from contextlib import suppress as nullcontext
from transformers import AdamW, get_linear_schedule_with_warmup
from models.metascorenometa import T5ForConditionalGeneration as PromptT5
from dataset_processors import *
from metatrainernometa import QuestionAnsweringTrainer
from retrievermodel import init_biencoder_components
from rerankermodel.encoder import BertEncoder_For_CrossEncoder
from loadscorea import *
from hintpreprocess import *
from transformers.trainer_callback import (
CallbackHandler,
DefaultFlowCallback,
PrinterCallback,
ProgressCallback,
TrainerCallback,
TrainerControl,
TrainerState,
)
import datasets
import numpy as np
from datasets import load_dataset, load_metric,load_from_disk,concatenate_datasets
import os
from functools import partial
import transformers
from transformers import (
AutoConfig,
AutoModelForSeq2SeqLM,
AutoTokenizer,
DataCollatorForSeq2Seq,
HfArgumentParser,
M2M100Tokenizer,
MBart50Tokenizer,
EvalPrediction,
MBart50TokenizerFast,
MBartTokenizer,
MBartTokenizerFast,
Seq2SeqTrainer,
Seq2SeqTrainingArguments,
default_data_collator,
set_seed,
)
from pathlib import Path
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
def filthints(bencoder,cencoder,dataset,training_args,fwd,qaret,qarer):
device_id = training_args.local_rank
if device_id==-1:
device_id=0
bencoder.eval()
cencoder.eval()
bs_size = 48
offset = []
offset2 = []
offset3 = []
for itf in range(bs_size):
offset.extend([itf*64]*16)
offset2.extend([itf*16]*4)
offset3.extend([itf*64]*4)
offset = torch.Tensor(offset).long().to(torch.device("cuda", device_id))
offset2 = torch.Tensor(offset2).long().to(torch.device("cuda", device_id))
offset3 = torch.Tensor(offset3).long().to(torch.device("cuda", device_id))
if training_args.local_rank == -1:
data_loader_train = DataLoader(dataset, shuffle=True,batch_size=bs_size,collate_fn=default_data_collator)
else:
sampler = DistributedSampler(
dataset,
num_replicas=torch.distributed.get_world_size(),
rank=training_args.local_rank,
seed=training_args.seed,
)
data_loader_train = DataLoader(dataset=dataset,batch_size=bs_size,sampler=sampler,collate_fn=default_data_collator,drop_last=False)
for i, data in enumerate(data_loader_train):
if data["query_ids"].size(0)!=bs_size:
bs_size = data["query_ids"].size(0)
offset = []
offset2 = []
offset3 = []
for itf in range(bs_size):
offset.extend([itf*64]*16)
offset2.extend([itf*16]*4)
offset3.extend([itf*64]*4)
offset = torch.Tensor(offset).long().to(torch.device("cuda", device_id))
offset2 = torch.Tensor(offset2).long().to(torch.device("cuda", device_id))
offset3 = torch.Tensor(offset3).long().to(torch.device("cuda", device_id))
torch.cuda.synchronize()
start = time.time()
data = {x: data[x].to(torch.device("cuda", device_id)) for x in data if data[x] is not None}
with torch.no_grad():
bmodel_out = bencoder(
data["query_ids"].view(-1,112),#.squeeze(),
data["query_attentions"].view(-1,112),#.squeeze(),
data["ctx_ids"].view(-1,112),#.squeeze(),
data["ctx_attentions"].view(-1,112),#.squeeze(),
)
score_mask = (data["sub_ids"]>=999).int().mul(-999)
local_q_vector, local_ctx_vectors = bmodel_out
local_q_vector = local_q_vector.view(-1,64,768)[:,0,:].view(-1,768)
local_ctx_vectors = local_ctx_vectors.view(-1,64,768)
sim_scores = torch.bmm(
local_q_vector.unsqueeze(1), torch.transpose(local_ctx_vectors, 1, 2)
).squeeze(1)
# print(sim_scores)
sim_scores = sim_scores.add(score_mask)
#(1*768)*(768*64)=(1*64)
sort_result, sort_idxs = sim_scores.topk(16)#sort(dot_prod_scores, dim=0, descending=True)
sort_idxs = sort_idxs.view(-1).add(offset)
for k_ in data.keys():
if len(data[k_].size())==3:
data[k_] = torch.index_select(data[k_].view(bs_size*64,-1),dim=0,index=sort_idxs).view(bs_size,16,-1)
elif len(data[k_].size())==2:
if k_!="qa_scores":
data[k_] = torch.index_select(data[k_].view(-1),dim=0,index=sort_idxs).view(bs_size,16)
else:
pass
else:
if k_=="sample_id":
pass
else:
print(k_)
print(data[k_])
assert False
# torch.cuda.synchronize()
# print(time.time()-start,"select16_time")
cmodel_out = cencoder(
data["cross_ids"].view(-1,144),
data["cross_attentions"].view(-1,144),
data["cross_ctxs"].view(-1,144),
).squeeze(-1).view(-1,16)
dmodel_out = bencoder(
data["query_ids"].view(-1,112),#.squeeze(),
data["query_attentions"].view(-1,112),#.squeeze(),
data["ctx_ids"].view(-1,112),#.squeeze(),
data["ctx_attentions"].view(-1,112),#.squeeze(),
)
# score_mask = (data["sub_ids"]>=999).int().mul(-999)
local_q_vector, local_ctx_vectors = dmodel_out
local_q_vector = local_q_vector.view(-1,16,768)[:,0,:].view(-1,768)
local_ctx_vectors = local_ctx_vectors.view(-1,16,768)
sim_scores = torch.bmm(
local_q_vector.unsqueeze(1), torch.transpose(local_ctx_vectors, 1, 2)
).squeeze(1)
bi_score = sim_scores
c_score = cmodel_out
_,rerank_idxs = cmodel_out.topk(4)
# torch.cuda.synchronize()
# print(time.time()-start,"cmodel_time")
for bat in range(rerank_idxs.size(0)):
sel = torch.index_select(data["sub_ids"][bat],dim=0,index=rerank_idxs[bat])
ids = data["sample_id"][bat]
format_hints_ids[ids.item()]=sel.cpu().numpy().tolist()
for ix in range(rerank_idxs.size(0)):
ids = data["sample_id"][ix].item()
bscore = []
cscore = []
for item in rerank_idxs[ix]:
bscore.append(bi_score[ix][item].item())
cscore.append(c_score[ix][item].item())
format_ret_scores[ids]=bscore
format_rer_scores[ids]=cscore
# torch.cuda.synchronize()
# print(time.time()-start,"final_time") | null |
163,244 | import os
import time
import torch
import copy,random
import sys
import gc
import json
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data import DataLoader
from dataclasses import dataclass, field
from typing import Optional
from typing import List, Optional, Tuple
from tqdm import tqdm
import warnings
from torch.cuda import amp
from contextlib import suppress as nullcontext
from transformers import AdamW, get_linear_schedule_with_warmup
from models.metascorenometa import T5ForConditionalGeneration as PromptT5
from dataset_processors import *
from metatrainernometa import QuestionAnsweringTrainer
from retrievermodel import init_biencoder_components
from rerankermodel.encoder import BertEncoder_For_CrossEncoder
from loadscorea import *
from hintpreprocess import *
from transformers.trainer_callback import (
CallbackHandler,
DefaultFlowCallback,
PrinterCallback,
ProgressCallback,
TrainerCallback,
TrainerControl,
TrainerState,
)
import datasets
import numpy as np
from datasets import load_dataset, load_metric,load_from_disk,concatenate_datasets
import os
from functools import partial
import transformers
from transformers import (
AutoConfig,
AutoModelForSeq2SeqLM,
AutoTokenizer,
DataCollatorForSeq2Seq,
HfArgumentParser,
M2M100Tokenizer,
MBart50Tokenizer,
EvalPrediction,
MBart50TokenizerFast,
MBartTokenizer,
MBartTokenizerFast,
Seq2SeqTrainer,
Seq2SeqTrainingArguments,
default_data_collator,
set_seed,
)
from pathlib import Path
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
def rank_rr(bencoder,cencoder,dataset,training_args):
device_id = training_args.local_rank
bencoder.eval()
cencoder.eval()
if device_id==-1:
device_id=0
bs_size = 64
ngpus = torch.distributed.get_world_size()
if len(dataset)//ngpus<bs_size:
bs_size = len(dataset)//ngpus
offset = []
offset2 = []
offset3 = []
ppt = []
for itf in range(bs_size):
offset.extend([itf*64]*16)
offset2.extend([itf*16]*4)
offset3.extend([itf*64]*4)
offset = torch.Tensor(offset).long().to(torch.device("cuda", device_id))
offset2 = torch.Tensor(offset2).long().to(torch.device("cuda", device_id))
offset3 = torch.Tensor(offset3).long().to(torch.device("cuda", device_id))
if training_args.local_rank == -1:
data_loader_dev = DataLoader(dataset=dataset, shuffle=True,batch_size=bs_size,collate_fn=default_data_collator)
else:
sampler = DistributedSampler(
dataset,
shuffle=False,
num_replicas=torch.distributed.get_world_size(),
rank=training_args.local_rank,
seed=training_args.seed,
)
data_loader_dev = DataLoader(dataset=dataset,batch_size=bs_size,shuffle=False,sampler=sampler,collate_fn=default_data_collator,drop_last=False)
for i, data in enumerate(data_loader_dev):
data = {x: data[x].to(torch.device("cuda", device_id)) for x in data if data[x] is not None}
if True:
if data["query_ids"].size(0)!=bs_size:
bs_size = data["query_ids"].size(0)
offset = []
offset2 = []
offset3 = []
for itf in range(bs_size):
offset.extend([itf*64]*16)
offset2.extend([itf*16]*4)
offset3.extend([itf*64]*4)
offset = torch.Tensor(offset).long().to(torch.device("cuda", device_id))
offset2 = torch.Tensor(offset2).long().to(torch.device("cuda", device_id))
offset3 = torch.Tensor(offset3).long().to(torch.device("cuda", device_id))
bmodel_out = bencoder(
data["query_ids"].view(-1,112),#.squeeze(),
data["query_attentions"].view(-1,112),#.squeeze(),
data["ctx_ids"].view(-1,112),#.squeeze(),
data["ctx_attentions"].view(-1,112),#.squeeze(),
)
score_mask = (data["sub_ids"]>=999).int().mul(-999)
local_q_vector, local_ctx_vectors = bmodel_out
local_q_vector = local_q_vector.view(-1,64,768)[:,0,:].view(-1,768)
local_ctx_vectors = local_ctx_vectors.view(-1,64,768)
sim_scores = torch.bmm(
local_q_vector.unsqueeze(1), torch.transpose(local_ctx_vectors, 1, 2)
).squeeze(1)
sim_scores = sim_scores.add(score_mask)#.cpu()
#(1*768)*(768*64)=(1*64)
sort_result, sort_idxs = sim_scores.topk(16)#sort(dot_prod_scores, dim=0, descending=True)
for bat in range(sort_idxs.size(0)):
sel = torch.index_select(data["sub_ids"][bat],dim=0,index=sort_idxs[bat])
ids = data["sample_id"][bat]
first_select_ids[int(ids.item())]=sort_idxs[bat].cpu().numpy().tolist()
sort_idxs = sort_idxs.view(-1).add(offset)
_,sort_idxs4 = sim_scores.topk(4)
for bat in range(sort_idxs4.size(0)):
sel = torch.index_select(data["sub_ids"][bat],dim=0,index=sort_idxs4[bat])
ids = data["sample_id"][bat]
ret_select_ids[int(ids.item())]=sel.cpu().numpy().tolist()
for k_ in data.keys():
if len(data[k_].size())==3:
data[k_] = torch.index_select(data[k_].view(bs_size*64,-1),dim=0,index=sort_idxs).view(bs_size,16,-1)
elif len(data[k_].size())==2:
if k_!="qa_scores":
data[k_] = torch.index_select(data[k_].view(-1),dim=0,index=sort_idxs).view(bs_size,16)
else:
pass
else:
pass
cmodel_out = cencoder(
data["cross_ids"].view(-1,144),
data["cross_attentions"].view(-1,144),
data["cross_ctxs"].view(-1,144),
).squeeze(-1).view(-1,16)
_,rerank_idxs = cmodel_out.topk(4)
for bat in range(rerank_idxs.size(0)):
sel = torch.index_select(data["sub_ids"][bat],dim=0,index=rerank_idxs[bat])
ids = data["sample_id"][bat]
rer_select_ids[int(ids.item())]=sel.cpu().numpy().tolist() | null |
163,245 | import os
import time
import torch
import copy,random
import sys
import gc
import json
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data import DataLoader
from dataclasses import dataclass, field
from typing import Optional
from typing import List, Optional, Tuple
from tqdm import tqdm
import warnings
from torch.cuda import amp
from contextlib import suppress as nullcontext
from transformers import AdamW, get_linear_schedule_with_warmup
from models.metascorenometa import T5ForConditionalGeneration as PromptT5
from dataset_processors import *
from metatrainernometa import QuestionAnsweringTrainer
from retrievermodel import init_biencoder_components
from rerankermodel.encoder import BertEncoder_For_CrossEncoder
from loadscorea import *
from hintpreprocess import *
from transformers.trainer_callback import (
CallbackHandler,
DefaultFlowCallback,
PrinterCallback,
ProgressCallback,
TrainerCallback,
TrainerControl,
TrainerState,
)
import datasets
import numpy as np
from datasets import load_dataset, load_metric,load_from_disk,concatenate_datasets
import os
from functools import partial
import transformers
from transformers import (
AutoConfig,
AutoModelForSeq2SeqLM,
AutoTokenizer,
DataCollatorForSeq2Seq,
HfArgumentParser,
M2M100Tokenizer,
MBart50Tokenizer,
EvalPrediction,
MBart50TokenizerFast,
MBartTokenizer,
MBartTokenizerFast,
Seq2SeqTrainer,
Seq2SeqTrainingArguments,
default_data_collator,
set_seed,
)
from pathlib import Path
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
def preprocess_function_eval(examples,format_name):
global format2id
format2id = {'extractive':0,'abstractive':1,'multichoice':2,'bool':3}
dataset2format= {}
dataset_files =["squad1_1","squad2","narrativeqa_dev","mctest_corrected_the_separator","race_string","arc_hard","arc_easy","boolq","openbookqa"]+["newsqa","quoref","ropes","drop","natural_questions_with_dpr_para","commonsenseqa","qasc","physical_iqa","social_iqa","winogrande_xl","multirc","boolq_np"]
def generate_samples_validation(format_name,epoch_id):
start = 0
fhint = open("./plmresource/{}-hints.txt".format(format_name),'r')
hintlines = fhint.readlines()
hint_cands = [_.strip().split("<@#>") for _ in hintlines]
base_id = -format2id[format_name]*2e5
for item in dataset_files:
fm = dataset2format[item]
global tsname
tsname = item
if fm==format_name:
data_path = "./raw_data/{}-val.json".format(item)
dataset = load_dataset("json", data_files=data_path)["train"]
end = start+len(dataset)
sample_ids = [(base_id-i) for i in range(start,end)]
first_selection = []
ret_selection = []
rer_selection = []
for sid, cands in zip(sample_ids,hint_cands[start:end]):
try:
first_selection.append(" ; ".join([cands[_] for _ in first_sel[sid]]))
ret_selection.append(" ; ".join([cands[_] for _ in ret_sel[sid]]))
rer_selection.append(" ; ".join([cands[_] for _ in rer_sel[sid]]))
except:
print(sid)
print(first_sel[sid])
print(ret_sel[sid])
print(rer_sel[sid])
print(len(cands))
assert False
start += len(dataset)
dataset = dataset.add_column("hintret",ret_selection)
dataset_ret = dataset.map(
lambda x:preprocess_function_eval(x,format_name),
batched=True,
remove_columns=["input","output","hintret"],
load_from_cache_file=True,
desc="Running tokenizer on train dataset",
)
dataset = dataset.remove_columns("hintret")
dataset = dataset.add_column("hintrer",rer_selection)
dataset_rer = dataset.map(
lambda x:preprocess_function_eval(x,format_name),
batched=True,
remove_columns=["input","output","hintrer"],
load_from_cache_file=True,
desc="Running tokenizer on train dataset",
)
dataset = dataset.remove_columns("hintrer")
dataset = dataset.add_column("hintfirst",first_selection)
dataset_first = dataset.map(
lambda x:preprocess_function_eval(x,format_name),
batched=True,
remove_columns=["input","output","hintfirst"],
load_from_cache_file=True,
desc="Running tokenizer on train dataset",
)
dataset_ret.save_to_disk("./epoch_data{}/{}-reteval.hf".format(str(epoch_id),item))
dataset_rer.save_to_disk("./epoch_data{}/{}-rereval.hf".format(str(epoch_id),item))
dataset_first.save_to_disk("./epoch_data{}/{}-firsteval.hf".format(str(epoch_id),item))
start = end | null |
163,246 | import os
import time
import torch
import copy,random
import sys
import gc
import json
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data import DataLoader
from dataclasses import dataclass, field
from typing import Optional
from typing import List, Optional, Tuple
from tqdm import tqdm
import warnings
from torch.cuda import amp
from contextlib import suppress as nullcontext
from transformers import AdamW, get_linear_schedule_with_warmup
from models.metascorenometa import T5ForConditionalGeneration as PromptT5
from dataset_processors import *
from metatrainernometa import QuestionAnsweringTrainer
from retrievermodel import init_biencoder_components
from rerankermodel.encoder import BertEncoder_For_CrossEncoder
from loadscorea import *
from hintpreprocess import *
from transformers.trainer_callback import (
CallbackHandler,
DefaultFlowCallback,
PrinterCallback,
ProgressCallback,
TrainerCallback,
TrainerControl,
TrainerState,
)
import datasets
import numpy as np
from datasets import load_dataset, load_metric,load_from_disk,concatenate_datasets
import os
from functools import partial
import transformers
from transformers import (
AutoConfig,
AutoModelForSeq2SeqLM,
AutoTokenizer,
DataCollatorForSeq2Seq,
HfArgumentParser,
M2M100Tokenizer,
MBart50Tokenizer,
EvalPrediction,
MBart50TokenizerFast,
MBartTokenizer,
MBartTokenizerFast,
Seq2SeqTrainer,
Seq2SeqTrainingArguments,
default_data_collator,
set_seed,
)
from pathlib import Path
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
def preprocess_function(examples):
if True:
preprocess_fn = preprocess_proqa
inputs, targets,hints = preprocess_fn(examples, "input","output","hint",format_name=format_name)
model_inputs = tokenizer(inputs, max_length=max_source_length, padding=padding, truncation=True)
hints_inputs = tokenizer(hints, max_length=max_source_length, padding=padding, truncation=True)
model_inputs["input_ids"] = [xx[:-1]+[32108]+yy[:-1]+[1] for xx,yy in zip(model_inputs["input_ids"],hints_inputs["input_ids"])]
model_inputs["attention_mask"] = [xx[:-1]+[1]+yy[:-1]+[xx[-1]] for xx,yy in zip(model_inputs["attention_mask"],hints_inputs["attention_mask"])]
with tokenizer.as_target_tokenizer():
labels = tokenizer(targets, max_length=128, padding=padding, truncation=True)
if padding == "max_length":
labels["input_ids"] = [
[(l if l != tokenizer.pad_token_id else -100) for l in label] for label in labels["input_ids"]
]
model_inputs["labels"] = labels["input_ids"]
input_ids = copy.deepcopy(
[input_ids for input_ids in model_inputs['input_ids']])
seps = []
model_inputs['input_ids'] = input_ids # [format_prompt_ids+input_ids for input_ids in model_inputs['input_ids']]
model_inputs['attention_mask'] = [attention_mask for attention_mask in
model_inputs['attention_mask']]
return model_inputs
global format2id
format2id = {'extractive':0,'abstractive':1,'multichoice':2,'bool':3}
format2dataset = {
'extractive':['squad1_1','squad2','extractive','newsqa','quoref','ropes','adversarialqa_dbert_dev','adversarialqa_dbidaf_dev','adversarialqa_droberta_dev','record_extractive'],
'abstractive':['narrativeqa_dev','abstractive','natural_questions_with_dpr_para','drop','qaconv','tweetqa'],
'multichoice':['race_string','multichoice','openbookqa','mctest_corrected_the_separator','social_iqa','commonsenseqa','qasc','physical_iqa','winogrande_xl','onestopqa_advanced','onestopqa_elementry','onestopqa_intermediate','prost_multiple_choice_with_no_context','dream','processbank_test','cosmosqa','mcscript','mcscript2','quail','reclor','measuring_massive_multitask_language_understanding','head_qa_en_test','race_c','arc_hard','arc_easy'],
'bool':['boolq','bool','boolq_np','multirc','strategyqa','pubmedqa_pqal_short_ans']
}
dataset2format= {}
dataset_files =["squad1_1","squad2","narrativeqa_dev","mctest_corrected_the_separator","race_string","arc_hard","arc_easy","boolq","openbookqa"]+["newsqa","quoref","ropes","drop","natural_questions_with_dpr_para","commonsenseqa","qasc","physical_iqa","social_iqa","winogrande_xl","multirc","boolq_np"]
def generate_samples_with_hints(formats_hints_ids,format_name,epoch_id):
format_ids_seq = []
po = open("./json2select.json",'r')
po = json.load(po)
all_format_size = 0
for item in dataset_files:
if item in format2dataset[format_name]:
all_format_size+=len(po[item])
base_id = 2e5*format2id[format_name]
format_ids_seq=[(i+base_id) for i in range(all_format_size)]
pdz = open("textinput/{}-glminput.jsonl".format(format_name),'r',encoding='utf-8')
pdz_lines = pdz.readlines()
pdz_lines = [json.loads(item) for item in pdz_lines]
pdzs = [item['id'] for item in pdz_lines]
assert pdzs[-1]+1==all_format_size
splits= {"extractive":10,"abstractive":10,"multichoice":4,"bool":2}
splitn =splits[format_name]
total_json = []
for idx in range(int(splitn)):
a = open("./plmresource/{}-glmout.json".format(format_name+str(idx)),'r',encoding='utf-8')
b = json.load(a)["data"]
total_json.extend(b)
top_line = []
single_hints = []
current_saved = []
top_line_count = 0
clk = 1
for left,right in zip(total_json,pdzs):
if right == clk:
read_ = []
idss = format_hints_ids[format_ids_seq[clk-1]]
for i_ in idss:
read_.append(current_saved[i_])
clk+=1
top_line.append(" ; ".join(read_))
current_saved = []
current_saved.append(left)
read_ = []
idss = format_hints_ids[format_ids_seq[clk-1]]
for i_ in idss:
read_.append(current_saved[i_])
top_line.append(" ; ".join(read_))
start = 0
end = 0
for item in dataset_files:
fm = dataset2format[item]
global tsname
tsname = item
if fm==format_name:
sequence = po[item]
len_seq = len(po[item])
data_path = "./data_process/data/{}/train.json".format(item)
dataset = load_dataset("json", data_files=data_path)["train"].select(sequence)
assert len_seq==len(dataset)
end=start+len_seq
add_line = format_ids_seq[start:end]
hints = top_line[start:end]
dataset = dataset.add_column("hint",hints)
train_dataset = dataset.map(
preprocess_function,
batched=True,
remove_columns=["input","output","hint"],
load_from_cache_file=True,
desc="Running tokenizer on train dataset",
)
train_dataset = train_dataset.add_column("sample_id",add_line)
train_dataset.save_to_disk("./epoch_data{}/{}-train.hf".format(str(epoch_id),item))
start = end | null |
163,247 | import os
import time
import torch
import copy,random
import sys
import gc
import json
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data import DataLoader
from dataclasses import dataclass, field
from typing import Optional
from typing import List, Optional, Tuple
from tqdm import tqdm
import warnings
from torch.cuda import amp
from contextlib import suppress as nullcontext
from transformers import AdamW, get_linear_schedule_with_warmup
from models.metascorenometa import T5ForConditionalGeneration as PromptT5
from dataset_processors import *
from metatrainernometa import QuestionAnsweringTrainer
from retrievermodel import init_biencoder_components
from rerankermodel.encoder import BertEncoder_For_CrossEncoder
from loadscorea import *
from hintpreprocess import *
from transformers.trainer_callback import (
CallbackHandler,
DefaultFlowCallback,
PrinterCallback,
ProgressCallback,
TrainerCallback,
TrainerControl,
TrainerState,
)
import datasets
import numpy as np
from datasets import load_dataset, load_metric,load_from_disk,concatenate_datasets
import os
from functools import partial
import transformers
from transformers import (
AutoConfig,
AutoModelForSeq2SeqLM,
AutoTokenizer,
DataCollatorForSeq2Seq,
HfArgumentParser,
M2M100Tokenizer,
MBart50Tokenizer,
EvalPrediction,
MBart50TokenizerFast,
MBartTokenizer,
MBartTokenizerFast,
Seq2SeqTrainer,
Seq2SeqTrainingArguments,
default_data_collator,
set_seed,
)
from pathlib import Path
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
def main():
def compute_metrics(p: EvalPrediction):
return metric.compute(predictions=p.predictions, references=p.label_ids)
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, Seq2SeqTrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
if 'same' in model_args.model_name_or_path:
task2id = {'squad': 0, 'extractive': 0, 'narrativeqa': 1, 'abstractive': 1, 'race': 2, 'multichoice': 2,
'boolq': 3, 'bool': 3, 'newsqa': 8, 'quoref': 9, 'ropes': 10, 'drop': 11, 'nqopen': 12,
'boolq_np': 13, 'openbookqa': 14, 'mctest': 15, 'social_iqa': 16, 'dream': 17}
else:
task2id = {'squad': 0, 'extractive': 1, 'narrativeqa': 2, 'abstractive': 3, 'race': 4, 'multichoice': 5,
'boolq': 6, 'bool': 7, 'newsqa': 8, 'quoref': 9, 'ropes': 10, 'drop': 11, 'nqopen': 12,
'boolq_np': 13, 'openbookqa': 14, 'mctest': 15, 'social_iqa': 16, 'dream': 17}
dataset_name_to_metric = {
'squad1_1': 'metric/squad_v1_local/squad_v1_local.py',
'squad2': 'metric/squad_v2_local/squad_v2_local.py',
'newsqa': 'metric/squad_v1_local/squad_v1_local.py',
'boolq': 'metric/squad_v1_local/squad_v1_local.py',
'narrativeqa_dev': 'metric/rouge_local/rouge_metric.py',
'race_string': 'metric/accuracy.py',
'quoref': 'metric/squad_v1_local/squad_v1_local.py',
'ropes': 'metric/squad_v1_local/squad_v1_local.py',
'drop': 'metric/squad_v1_local/squad_v1_local.py',
'natural_questions_with_dpr_para': 'metric/squad_v1_local/squad_v1_local.py',
'boolq_np': 'metric/squad_v1_local/squad_v1_local.py',
'openbookqa': 'metric/accuracy.py',
'arc_hard': 'metric/accuracy.py',
'arc_easy': 'metric/accuracy.py',
'mctest_corrected_the_separator': 'metric/accuracy.py',
'social_iqa': 'metric/accuracy.py',
'dream': 'metric/accuracy.py',
'commonsenseqa':'metric/accuracy.py',
'qasc':'metric/accuracy.py',
'physical_iqa':'metric/accuracy.py',
'winogrande_xl':'metric/accuracy.py',
'multirc':'metric/squad_v1_local/squad_v1_local.py',
'onestopqa_advanced':'metric/accuracy.py',
'onestopqa_elementry':'metric/accuracy.py',
'onestopqa_intermediate':'metric/accuracy.py',
'prost_multiple_choice_with_no_context':'metric/accuracy.py',
'processbank_test':'metric/accuracy.py',
'cosmosqa':'metric/accuracy.py',
'mcscript':'metric/accuracy.py',
'mcscript2':'metric/accuracy.py',
'quail':'metric/accuracy.py',
'reclor':'metric/accuracy.py',
'measuring_massive_multitask_language_understanding':'metric/accuracy.py',
'head_qa_en_test':'metric/accuracy.py',
'race_c':'metric/accuracy.py',
'pubmedqa_pqal_short_ans':'metric/squad_v1_local/squad_v1_local.py',#
'strategyqa':'metric/squad_v1_local/squad_v1_local.py',#
'tweetqa':'metric/bleu_local/bleu.py',
'qaconv':'metric/squad_v1_local/squad_v1_local.py',
'record_extractive':'metric/squad_v1_local/squad_v1_local.py',
'adversarialqa_dbert_dev':'metric/squad_v1_local/squad_v1_local.py',
'adversarialqa_dbidaf_dev':'metric/squad_v1_local/squad_v1_local.py',
'adversarialqa_droberta_dev':'metric/squad_v1_local/squad_v1_local.py'
}
# Set seed before initializing model.
set_seed(training_args.seed)
config = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
global tokenizer
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
use_fast=model_args.use_fast_tokenizer,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
tokens_to_add = ['[ABSTRACTIVE]', '[BOOL]', '[EXTRACTIVE]', '[MultiChoice]']
special_tokens_dict = {'additional_special_tokens': ['[TASK]', '[QUESTION]', '[CONTEXT]',
'[OPTIONS]','[HINT]']}
tokenizer.add_tokens(tokens_to_add)
num_added_toks = tokenizer.add_special_tokens(special_tokens_dict)
added_tokens = tokenizer.get_added_vocab()
model = PromptT5.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
model.resize_token_embeddings(len(tokenizer))
global max_source_length
max_source_length = 1024
# Set decoder_start_token_id
if model.config.decoder_start_token_id is None and isinstance(tokenizer, (MBartTokenizer, MBartTokenizerFast)):
if isinstance(tokenizer, MBartTokenizer):
model.config.decoder_start_token_id = tokenizer.lang_code_to_id[data_args.target_lang]
else:
model.config.decoder_start_token_id = tokenizer.convert_tokens_to_ids(data_args.target_lang)
if model.config.decoder_start_token_id is None:
raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined")
prefix = data_args.source_prefix if data_args.source_prefix is not None else ""
if training_args.local_rank == -1 or training_args.no_cuda:
device = torch.device("cuda")
n_gpu = torch.cuda.device_count()
# Temporarily set max_target_length for training.
max_target_length = data_args.max_target_length
padding = "max_length" if data_args.pad_to_max_length else False
question_column = data_args.question_column
context_column = data_args.context_column
answer_column = data_args.answer_column
# import random
data_args.max_source_length = min(data_args.max_source_length, tokenizer.model_max_length)
# Data collator
label_pad_token_id = -100 if data_args.ignore_pad_token_for_loss else tokenizer.pad_token_id
if data_args.pad_to_max_length:
data_collator = default_data_collator
else:
data_collator = DataCollatorForSeq2Seq(
tokenizer,
model=model,
label_pad_token_id=label_pad_token_id,
pad_to_multiple_of=8 if training_args.fp16 else None,
)
try:
os.mkdir("./mem_scores")
except:
print("MemDir Exist\n")
#start
train_dataloaders = {}
eval_dataloaders = {}
replay_dataloaders = {}
to_be_train = ["squad1_1","squad2","narrativeqa_dev","boolq","arc_hard","arc_easy","openbookqa","race_string","mctest_corrected_the_separator","newsqa","quoref","ropes","drop","natural_questions_with_dpr_para","commonsenseqa","qasc","physical_iqa","social_iqa","winogrande_xl","multirc","boolq_np"]
to_be_train.extend(["narrativeqa_dev","boolq","arc_hard","arc_easy","openbookqa","mctest_corrected_the_separator","newsqa","quoref","ropes","commonsenseqa","qasc","physical_iqa","social_iqa","winogrande_xl","multirc","boolq_np"])
max_length = (
training_args.generation_max_length
if training_args.generation_max_length is not None
else data_args.val_max_target_length
)
num_beams = data_args.num_beams if data_args.num_beams is not None else training_args.generation_num_beams
if training_args.local_rank!=-1:
world_size = torch.distributed.get_world_size()
else:
world_size = 1
device_id = training_args.local_rank if training_args.local_rank!=-1 else 0
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': 1e-2},
{'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters, lr=1e-4,eps=1e-8)
# retriever_data =
tensorizer, bi_encoder, _ = init_biencoder_components(
"hf_bert", {}, inference_only=True #hf_bert
)
c_encoder = BertEncoder_For_CrossEncoder.from_pretrained(
"bert-base-uncased"
)
if training_args.local_rank == -1:
bi_encoder = bi_encoder.to(torch.device("cuda", device_id))
c_encoder = c_encoder.to(torch.device("cuda", device_id))
else:
bi_encoder = bi_encoder.to(torch.device("cuda", device_id))
c_encoder = c_encoder.to(torch.device("cuda", device_id))
bi_encoder = nn.parallel.DistributedDataParallel(
bi_encoder,
device_ids=[training_args.local_rank] if training_args.n_gpu != 0 else None,
output_device=training_args.local_rank if training_args.n_gpu else None,
find_unused_parameters=True,
)
c_encoder = nn.parallel.DistributedDataParallel(
c_encoder,
device_ids=[training_args.local_rank] if training_args.n_gpu != 0 else None,
output_device=training_args.local_rank if training_args.n_gpu else None,
)
global all_gpus
if training_args.local_rank==-1:
all_gpus = [-1]
else:
all_gpus = list(range(world_size))
global format_name
global priority_level
priority_level = {}
format2size={"bool":1,"multichoice":5,"extractive":29,"abstractive":9}
skip_evaluate = True
skip_selection = True
global format_train_sel
format_train_sel = {"bool":[],"extractive":[],"abstractive":[],"multichoice":[]}
for epoch_id in range(0, int(training_args.num_train_epochs)):
gc.collect()
torch.cuda.empty_cache()
for k_ in format2size.keys():
priority_level[k_]={"ret":0,"rer":0,"qa":0}
map_location = torch.device("cuda", training_args.local_rank)
global format_hints_ids
global format_ret_scores
global format_rer_scores
format_ret_scores = {}
format_rer_scores = {}
format_hints_ids = {}
global ret_select_ids
global rer_select_ids
global first_select_ids
first_select_ids = {}
ret_select_ids = {}
rer_select_ids = {}
tlm = get_model_obj(c_encoder)
if epoch_id>0:
tlm.load_state_dict(torch.load("./rersave/cencoder-{}.pt".format(str(epoch_id)),map_location=map_location))
else:
tlm.load_state_dict(torch.load("./cmodel13500.pt",map_location=map_location))
for format_name in ["bool","extractive","abstractive","multichoice"]:
sub_to_be_train = [_ for _ in format2dataset[format_name] if _ in to_be_train]
if skip_evaluate:
break
if epoch_id>0:
qa_scores = load_from_qa(all_gpus,sub_to_be_train)
to_load_model = get_model_obj(bi_encoder)
if epoch_id==0:
to_load_model.load_state_dict(torch.load("./retsave/biencoder-{}.pt".format(str(epoch_id)))["model_dict"])
else:
to_load_model.load_state_dict(torch.load("./retsave/biencoder-{}.pt".format(str(epoch_id)),map_location=map_location))
rr_dev_dataset = load_from_disk("./sel_file/{}val-retrak.hf".format(format_name))
if training_args.local_rank<=0:
print("Start ranking {}......".format(format_name))
with torch.no_grad(),amp.autocast(enabled=True):
rank_rr(bi_encoder,c_encoder,rr_dev_dataset,training_args)
if skip_evaluate==False:
fout = open("./mem_scores/rt_ids-{}.json".format(str(training_args.local_rank)),'w')
json.dump(ret_select_ids,fout)
fout.close()
fout = open("./mem_scores/rr_ids-{}.json".format(str(training_args.local_rank)),'w')
json.dump(rer_select_ids,fout)
fout.close()
fout = open("./mem_scores/qa_ids-{}.json".format(str(training_args.local_rank)),'w')
json.dump(first_select_ids,fout)
fout.close()
global first_sel
global ret_sel
global rer_sel
if training_args.local_rank!=-1:
torch.distributed.barrier()
first_sel,ret_sel,rer_sel = load_all_select_ids(all_gpus,to_be_train)
if epoch_id==0:
eval_model = PromptT5.from_pretrained("./t5-base")
else:
eval_model = PromptT5.from_pretrained("./epoch_ckpt{}".format(str(epoch_id)))
if training_args.local_rank == -1:
eval_model = eval_model.to(torch.device("cuda", device_id))
else:
eval_model = eval_model.to(torch.device("cuda", device_id))
eval_model = nn.parallel.DistributedDataParallel(
eval_model,
device_ids=[training_args.local_rank] if training_args.n_gpu != 0 else None,
output_device=training_args.local_rank if training_args.n_gpu else None,
)
for format_name in ["bool","extractive","abstractive","multichoice"]:
if skip_evaluate:
break
if training_args.local_rank<=0:
print("generating...",format_name)
generate_samples_validation(format_name,epoch_id)
if training_args.local_rank!=-1:
torch.distributed.barrier()
if training_args.local_rank<=0:
print("evaluating.....",format_name)
with torch.no_grad():
priority_level = evaluate_model(eval_model,priority_level,format_name,training_args,data_collator,epoch_id)
if skip_evaluate==False:
fout = open("./mem_scores/priority_level-{}.json".format(str(training_args.local_rank)),'w')
json.dump(priority_level,fout)
fout.close()
if training_args.local_rank!=-1:
torch.distributed.barrier()
priority_level = load_level(all_gpus)
format_hints_ids = {}
if skip_selection or epoch_id>0:
fin = open("./mem_scores/format_train_sel.json",'r')
format_train_sel = json.load(fin)
if training_args.local_rank<=0:
print(format_train_sel)
else:
format_train_sel = {"bool":[],"extractive":[],"abstractive":[],"multichoice":[]}
for format_name in ["bool","extractive","abstractive","multichoice"]:
sub_to_be_train = [_ for _ in format2dataset[format_name] if _ in to_be_train]
if epoch_id>0:
qa_scores = load_from_qa(all_gpus,sub_to_be_train)
start_line = 0
end_line = 0
to_load_model = get_model_obj(bi_encoder)
if epoch_id==0:
to_load_model.load_state_dict(torch.load("./retsave/biencoder-{}.pt".format(str(epoch_id)))["model_dict"])
else:
to_load_model.load_state_dict(torch.load("./retsave/biencoder-{}.pt".format(str(epoch_id)),map_location=map_location))
forward_train = True
qa2ret = False
qa2rer = False
if priority_level[format_name]["ret"]<priority_level[format_name]["rer"]:
forward_train = False
if priority_level[format_name]["ret"]<priority_level[format_name]["qa"]:
qa2ret = True
if priority_level[format_name]["rer"]<priority_level[format_name]["qa"]:
qa2rer = True
shard_offset = 0
qascore_offset = 0
for sub_set in range(format2size[format_name]):
if skip_selection==True:
break
rr_filt_dataset = load_from_disk("./sel_file/{}-retrak{}.hf".format(format_name,str(sub_set)))
if epoch_id==0:
rand_idxs = random.sample(range(len(rr_filt_dataset)),len(rr_filt_dataset)//20)
format_rand_idxs = [(_+shard_offset) for _ in rand_idxs]
shard_offset+=len(rr_filt_dataset)
format_train_sel[format_name].append(format_rand_idxs)
else:
format_rand_idxs = format_train_sel[format_name][sub_set]
rand_idxs = [(_-shard_offset) for _ in format_rand_idxs]
shard_offset+=len(rr_filt_dataset)
rr_train_dataset = rr_filt_dataset.select(rand_idxs)
if epoch_id>0:
additional_lines = qa_scores[qascore_offset:qascore_offset+len(rand_idxs)]
qascore_offset+=len(rand_idxs)
rr_train_dataset = rr_train_dataset.add_column("qa_scores",additional_lines)
with amp.autocast(enabled=True):
trainbc(bi_encoder,c_encoder,rr_train_dataset,training_args,forward_train,qa2ret,qa2rer)
filthints(bi_encoder,c_encoder,rr_filt_dataset,training_args,forward_train,qa2ret,qa2rer)
if training_args.local_rank<=0:
torch.save(get_model_obj(bi_encoder).state_dict(),"./retsave/biencoder-{}.pt".format(str(epoch_id+1)))
if training_args.local_rank!=-1:
torch.distributed.barrier()
if training_args.local_rank<=0 and skip_selection==False:
torch.save(get_model_obj(c_encoder).state_dict(),"./rersave/cencoder-{}.pt".format(str(epoch_id+1)))
if skip_selection==False:
fout = open("./mem_scores/format_hints-{}.json".format(str(training_args.local_rank)),'w')
json.dump(format_hints_ids,fout)
fout.close()
if training_args.local_rank<=0 and skip_selection==False:
fout = open("./mem_scores/format_train_sel.json",'w')
json.dump(format_train_sel,fout)
fout.close()
if training_args.local_rank!=-1:
torch.distributed.barrier()
format_hints_ids = load_hints(all_gpus)
if training_args.local_rank<=0:
print("\n=====================generate trainset on epoch{}================\n".format(str(epoch_id)))
for format_name in ["bool","extractive","abstractive","multichoice"]:
generate_samples_with_hints(format_hints_ids,format_name,epoch_id)
if skip_selection==False:
fout = open("./mem_scores/ret-{}.json".format(str(training_args.local_rank)),'w')
json.dump(format_ret_scores,fout)
fout.close()
fout = open("./mem_scores/rer-{}.json".format(str(training_args.local_rank)),'w')
json.dump(format_rer_scores,fout)
fout.close()
if training_args.local_rank!=-1:
torch.distributed.barrier()
eval_ds = {}
eval_exp = {}
if training_args.local_rank<=0:
print("\n=====================Load trainset on epoch{}================\n".format(str(epoch_id)))
for ds_name in to_be_train:
train_dataloaders[ds_name]= load_from_disk("./epoch_data{}/{}-train.hf".format(str(epoch_id),ds_name))
train_dataset = None
for item in to_be_train:
if train_dataset is None:
train_dataset = train_dataloaders[item]
else:
train_dataset = concatenate_datasets([train_dataset, train_dataloaders[item]])
format_offset = [0,0,0,0]
all_idxs = []
all_mkd_dataset = None
for fix,format_name in enumerate(["extractive","abstractive","multichoice","bool"]):
format_mkd_dataset = None
if fix>0:
format_offset[fix] = format_offset[fix-1]
sub_to_be_train = [_ for _ in dataset_files if _ in format2dataset[format_name]]
for tix,tname in enumerate(sub_to_be_train):
tmp_ds = load_from_disk("./epoch_data{}/{}-train.hf".format(str(epoch_id),tname))
format_offset[fix]+=len(tmp_ds)
if format_mkd_dataset is None:
format_mkd_dataset = tmp_ds
else:
format_mkd_dataset = concatenate_datasets([format_mkd_dataset,tmp_ds])
rand_idxs = format_train_sel[format_name][0]
for shard in format_train_sel[format_name][1:]:
rand_idxs.extend(shard)
format_mkd_dataset = format_mkd_dataset.select(rand_idxs)
if all_mkd_dataset is None:
all_mkd_dataset = format_mkd_dataset
else:
all_mkd_dataset = concatenate_datasets([all_mkd_dataset,format_mkd_dataset])
ofset = 0 if fix==0 else format_offset[fix-1]
rand_idxs = [(_+ofset) for _ in rand_idxs]
all_idxs.extend(rand_idxs)
ret_scores = load_from_ret(all_gpus,to_be_train)
ret_scores = [_ for idx,_ in enumerate(ret_scores) if idx in all_idxs]
rer_scores = load_from_rer(all_gpus,to_be_train)
rer_scores = [_ for idx,_ in enumerate(rer_scores) if idx in all_idxs]
all_mkd_dataset = all_mkd_dataset.add_column("ret_scores",ret_scores)
all_mkd_dataset = all_mkd_dataset.add_column("rer_scores",rer_scores)
pp = train_dataset["sample_id"]
activate_ret = False
activate_rer = False
avg_ret,avg_rer,avg_qa = 0.0, 0.0, 0.0
for k_ in priority_level.keys():
avg_ret+=priority_level[k_]["ret"]
avg_rer+=priority_level[k_]["rer"]
avg_qa+=priority_level[k_]["qa"]
if avg_ret>avg_rer:
if avg_ret>avg_qa:
activate_ret = True
else:
if avg_rer>avg_qa:
activate_rer = True
model.activate_ret = activate_ret
model.activate_rer = activate_rer
if training_args.local_rank == -1:
data_loader_train = DataLoader(dataset=train_dataset, shuffle=False,batch_size=training_args.per_device_train_batch_size,collate_fn=data_collator)#, sampler=train_sampler)
else:
sampler = DistributedSampler(
train_dataset,
num_replicas=torch.distributed.get_world_size(),
rank=training_args.local_rank,
seed=training_args.seed,
)
data_loader_train = DataLoader(dataset=train_dataset, shuffle=False,batch_size=training_args.per_device_train_batch_size,collate_fn=data_collator,sampler=sampler,drop_last=False)
total_steps = (len(train_dataset) // training_args.per_device_train_batch_size // world_size) * training_args.num_train_epochs // training_args.gradient_accumulation_steps
trainer = QuestionAnsweringTrainer(
model=model,
args=training_args,
train_dataset=train_dataset.select(range(1000)),#change
eval_dataset=None,
eval_examples=None,
answer_column_name=answer_column,
dataset_name="squad1_1",
tokenizer=None,
data_collator=data_collator,
compute_metrics= compute_metrics if training_args.predict_with_generate else None,
callbacks=[callbacker],
)
if training_args.local_rank<=0:
print("\n=====================MKD training on epoch{}================\n".format(str(epoch_id)))
to_save_mem_scores = train_qascore(model,all_mkd_dataset,training_args,data_collator)
fout = open("./mem_scores/{}.json".format(str(training_args.local_rank)),'w')
json.dump(to_save_mem_scores,fout)
fout.close()
if epoch_id>0:
model = PromptT5.from_pretrained("./epoch_ckpt{}".format(str(epoch_id)))
output_dir = "./epoch_ckpt{}".format(str(epoch_id+1))
if epoch_id>0:
train_result = trainer.train(resume_from_checkpoint="./epoch_ckpt{}".format(str(epoch_id)))
else:
train_result = trainer.train()
trainer.save_model(output_dir="./epoch_ckpt{}".format(str(epoch_id+1)))
trainer.save_score(training_args.local_rank)
trainer.args.output_dir = output_dir
trainer.save_state()
torch.save(trainer.optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
torch.save(trainer.lr_scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
gc.collect()
torch.cuda.empty_cache()
return None
def _mp_fn(index):
# For xla_spawn (TPUs)
main() | null |
163,248 | dataset_files =["squad1_1","squad2","narrativeqa_dev","mctest_corrected_the_separator","race_string","arc_hard","arc_easy","boolq","openbookqa"]+["newsqa","quoref","ropes","drop","natural_questions_with_dpr_para","commonsenseqa","qasc","physical_iqa","social_iqa","winogrande_xl","multirc","boolq_np"]
format2id = {"extractive":0,"abstractive":1,"multichoice":2,"bool":3}
dataset2format= {}
for k,vs in format2dataset.items():
for v in vs:
dataset2format[v] = k
import glob
import json
import logging
import pickle
import time
from typing import List, Tuple, Dict, Iterator
from datasets import *
import numpy as np
import torch
from torch import Tensor as T
from torch import nn
from datasets import load_from_disk
from transformers import (
AutoTokenizer,
AdamW,
get_linear_schedule_with_warmup,
TrainingArguments,
)
import json
tokenizer = AutoTokenizer.from_pretrained("./bert-base-uncased")
def read_hints(path):
if path=="extractive":
num_total = 4 #shards
elif path=="abstractive":
num_total = 4
elif path=="multichoice":
num_total = 4
else:
num_total = 2
all_hints = []
all_demons = []
total_pathin = "./textinput/"+path+"dev"+"-glminput.jsonl"
finput = open(total_pathin,'r',encoding='utf-8')
for num_id in range(num_total):
total_path = "./plmresource/"+path+"dev"+str(num_id)+"-glmout.json"
fin = open(total_path,'r',encoding='utf-8')
b = json.load(fin)["data"]
all_hints.extend(b)
for item in finput.readlines():
all_demons.append(json.loads(item)["text"])
assert len(all_demons)==len(all_hints)
return all_hints,all_demons
import random
from tqdm import tqdm
def gen_rr_corpus(name):
base_id = 2e5+format2id[name]
start_id = 0
for item in dataset_files:
fm = dataset2format[item]
if fm==name:
try:
data_path = "./data_process/data/{}/dev.json".format(item)
dataset = load_dataset("json", data_files=data_path)["train"]
except:
data_path = "./data_process/data/{}/test.json".format(item)
dataset = load_dataset("json", data_files=data_path)["train"]
demonstrations = []
format_hints = []
all_hints,all_demons = read_hints(name)
pdz = open("./textinput/{}dev-glminput.jsonl".format(name),'r',encoding='utf-8')
pdz_lines = pdz.readlines()
pdz_lines = [json.loads(item) for item in pdz_lines]
pdzs = [item['id'] for item in pdz_lines]
assert len(pdzs)==len(all_hints)
all_cases = []
case_prev = []
case_lens = []
idx = 0
pl = False
total_count = 0
for itemd,itemh,itemp in tqdm(zip(all_demons,all_hints,pdzs)):
if pl==True and itemp!=idx:
assert False
if itemp==idx:
idx+=1
if case_prev != []:
case_lens.append(len(case_prev))
# print(case_lens)
sub_ids = list(range(len(case_prev)))
if len(case_prev)>64:
assert False
else:
lacked = 64-len(case_prev)
pp = random.sample(case_prev,lacked)
sub_ids.extend([999]*lacked)
case_prev.extend(pp)
all_keys = case_prev[0].keys()
case_prev_reshape={}
for k in all_keys:
case_prev_reshape[k]=[kit[k] for kit in case_prev]
case_prev_reshape["sub_ids"]=sub_ids
case_prev_reshape["sample_id"]=base_id+start_id
start_id+=1
all_cases.append(case_prev_reshape)
case_prev=[]
if idx!=1 and (idx-1)%5000==0:
pout = open("./sel_file/{}dev-retrak{}.json".format(name,str(total_count)),'w',encoding='utf-8')
for atom_line in all_cases:
print(json.dumps(atom_line),file=pout)
pout.close()
ppp = load_dataset("json", data_files="./sel_file/{}dev-retrak{}.json".format(name,str(total_count)))["train"]
ppp.save_to_disk("./sel_file/{}dev-retrak{}.hf".format(name,str(total_count)))
all_cases = []
total_count+=1
try:
ctx,query = itemd.split("\n\nQ:")
ctx = "Q:".join(ctx.split("Q:")[1:]).strip()
pl=False
except:
pl=True
continue
ctx_q,ctx_a = "\nA:".join(ctx.split("\nA:")[:-1]),ctx.split("\nA:")[1].strip()
ctx_q_parts = ctx_q.split("\\n")
demon_view = ""
query_parts = (query.split("\nA:")[0]).split("\\n")
query_parts = [_.strip() for _ in query_parts]
ctx_q_parts =[_.strip() for _ in ctx_q_parts]
hint_view = ""
query_view = ""
if name=="multichoice":
if len(ctx_q_parts)==3:
ctx_qa = " \\n ".join([ctx_a,ctx_q_parts[2],ctx_q_parts[1],ctx_q_parts[0]])
ctx_qa_h = " \\n ".join([itemh,ctx_qa])
else:
assert len(ctx_q_parts)==2
ctx_qa = " \\n ".join([ctx_a,ctx_q_parts[1],ctx_q_parts[0]])
ctx_qa_h = " \\n ".join([itemh,ctx_qa])
if (len(query_parts[0].split(" "))>64):
query_parts[0] = " ".join(query_parts[0].split(" ")[:64])
if len(query_parts)==3:
query = " \\n ".join([query_parts[2],query_parts[1],query_parts[0]])
else:
assert len(query_parts)==2
query = " \\n ".join([query_parts[1],query_parts[0]])
else:
assert len(ctx_q_parts)==2
assert len(query_parts)==2
ctx_qa = " \\n ".join([ctx_a,ctx_q_parts[1],ctx_q_parts[0]])
ctx_qa_h = " \\n ".join([itemh,ctx_qa])
if (len(query_parts[0].split(" "))>64):
query_parts[0] = " ".join(query_parts[0].split(" ")[:64])
query = " \\n ".join([query_parts[1],query_parts[0]])
query_out = tokenizer(query,return_token_type_ids=True, return_attention_mask=True,max_length=112, truncation=True,padding='max_length')
query_ids,query_attentions,query_ctxs = query_out["input_ids"],query_out["attention_mask"],query_out["token_type_ids"]
ctx_out = tokenizer(ctx_qa,return_token_type_ids=True, return_attention_mask=True,max_length=112, truncation=True,padding='max_length')
ctx_ids,ctx_attentions,ctx_ctxs = ctx_out["input_ids"],ctx_out["attention_mask"],ctx_out["token_type_ids"]
cross_out = tokenizer(query,ctx_qa_h,return_token_type_ids=True, return_attention_mask=True,max_length=144, truncation=True,padding='max_length')
cross_ids,cross_attentions,cross_ctxs = cross_out["input_ids"],cross_out["attention_mask"],cross_out["token_type_ids"]
tp ={"query_ids":query_ids,"query_attentions":query_attentions,
"ctx_ids":ctx_ids,"ctx_attentions":ctx_attentions,
"cross_ids":cross_ids,"cross_attentions":cross_attentions,"cross_ctxs":cross_ctxs,"id":idx}
case_prev.append(tp)
if case_prev != []:
case_lens.append(len(case_prev))
# print(case_lens)
sub_ids = list(range(len(case_prev)))
if len(case_prev)>64:
assert False
else:
lacked = 64-len(case_prev)
pp = random.sample(case_prev,lacked)
sub_ids.extend([999]*lacked)
case_prev.extend(pp)
all_keys = case_prev[0].keys()
case_prev_reshape={}
for k in all_keys:
case_prev_reshape[k]=[kit[k] for kit in case_prev]
case_prev_reshape["sub_ids"]=sub_ids
case_prev_reshape["sample_id"]=base_id+start_id
start_id+=1
all_cases.append(case_prev_reshape)
case_prev=[]
pout = open("./sel_file/{}dev-retrak{}.json".format(name,str(total_count)),'w',encoding='utf-8')
for atom_line in all_cases:
print(json.dumps(atom_line),file=pout)
pout.close()
ppp = load_dataset("json", data_files="./sel_file/{}dev-retrak{}.json".format(name,str(total_count)))["train"]
ppp.save_to_disk("./sel_file/{}dev-retrak{}.hf".format(name,str(total_count)))
all_cases = [] | null |
163,250 | import json
def load_from_rer_(devices,datasets):
pp_total = {}
dataset2id = {}
selected = {"squad1_1":8164,"squad2":130319,"narrativeqa_dev":3567,"mctest_corrected_the_separator":342,"race_string":14536,"arc_hard":317,"arc_easy":395,"boolq":765,"openbookqa":580,"newsqa":445,"quoref":1574,"ropes":1272,"drop":5214,"natural_questions_with_dpr_para":32590,"commonsenseqa":1034,"qasc":653,"physical_iqa":494,"social_iqa":2077,"winogrande_xl":2634,"multirc":290,"boolq_np":923}
dataset_files =["squad1_1","squad2","narrativeqa_dev","mctest_corrected_the_separator","race_string","arc_hard","arc_easy","boolq","openbookqa"]+["newsqa","quoref","ropes","drop","natural_questions_with_dpr_para","commonsenseqa","qasc","physical_iqa","social_iqa","winogrande_xl","multirc","boolq_np"]
for d_ix,item in enumerate(dataset_files):
dataset2id[item]=d_ix
for item in devices:
fin = open("./mem_scores/rer-{}.json".format(item),'r')
pp = json.load(fin)
for i in pp.keys():
pp_total[int(i)]=pp[i]
all_rer_scores_ranked = []
for dataname in datasets:
sample_ids =list(range(selected[dataname]))
sample_ids = [_+dataset2id[dataname]*1000000 for _ in sample_ids]
sample_scores = [[0,1,2,3] for _ in sample_ids]
all_rer_scores_ranked.extend(sample_scores)
return all_rer_scores_ranked | null |
163,251 | import json
def load_from_ret_(devices,datasets):
pp_total = {}
dataset2id = {}
selected = {"squad1_1":8164,"squad2":130319,"narrativeqa_dev":3567,"mctest_corrected_the_separator":342,"race_string":14536,"arc_hard":317,"arc_easy":395,"boolq":765,"openbookqa":580,"newsqa":445,"quoref":1574,"ropes":1272,"drop":5214,"natural_questions_with_dpr_para":32590,"commonsenseqa":1034,"qasc":653,"physical_iqa":494,"social_iqa":2077,"winogrande_xl":2634,"multirc":290,"boolq_np":923}
dataset_files =["squad1_1","squad2","narrativeqa_dev","mctest_corrected_the_separator","race_string","arc_hard","arc_easy","boolq","openbookqa"]+["newsqa","quoref","ropes","drop","natural_questions_with_dpr_para","commonsenseqa","qasc","physical_iqa","social_iqa","winogrande_xl","multirc","boolq_np"]
for d_ix,item in enumerate(dataset_files):
dataset2id[item]=d_ix
for item in devices:
fin = open("./mem_scores/ret-{}.json".format(item),'r')
pp = json.load(fin)
for i in pp.keys():
pp_total[int(i)]=pp[i]
all_ret_scores_ranked = []
for dataname in datasets:
sample_ids =list(range(selected[dataname]))
sample_ids = [_+dataset2id[dataname]*1000000 for _ in sample_ids]
sample_scores = [[0,1,2,3] for _ in sample_ids]
all_ret_scores_ranked.extend(sample_scores)
return all_ret_scores_ranked | null |
163,252 | import json
def load_from_ret(devices,datasets):
pp_total = {}
dataset2id = {}
dataset_files =["squad1_1","squad2","narrativeqa_dev","mctest_corrected_the_separator","race_string","arc_hard","arc_easy","boolq","openbookqa"]+["newsqa","quoref","ropes","drop","natural_questions_with_dpr_para","commonsenseqa","qasc","physical_iqa","social_iqa","winogrande_xl","multirc","boolq_np"]
for d_ix,item in enumerate(dataset_files):
dataset2id[item]=d_ix
for item in devices:
fin = open("./mem_scores/ret-{}.json".format(item),'r')
pp = json.load(fin)
for i in pp.keys():
pp_total[int(float(i))]=pp[i]
all_ret_scores_ranked = pp_total.items()
all_ret_scores_ranked = sorted(all_ret_scores_ranked, key = lambda k:k[0])
all_ret_scores_ranked = [_[1] for _ in all_ret_scores_ranked]
return all_ret_scores_ranked | null |
163,253 | import json
def load_from_rer(devices,datasets):
pp_total = {}
dataset2id = {}
dataset_files =["squad1_1","squad2","narrativeqa_dev","mctest_corrected_the_separator","race_string","arc_hard","arc_easy","boolq","openbookqa"]+["newsqa","quoref","ropes","drop","natural_questions_with_dpr_para","commonsenseqa","qasc","physical_iqa","social_iqa","winogrande_xl","multirc","boolq_np"]
for d_ix,item in enumerate(dataset_files):
dataset2id[item]=d_ix
for item in devices:
fin = open("./mem_scores/rer-{}.json".format(item),'r')
pp = json.load(fin)
for i in pp.keys():
pp_total[int(float(i))]=pp[i]
all_rer_scores_ranked = []
all_rer_scores_ranked = pp_total.items()
all_rer_scores_ranked = sorted(all_rer_scores_ranked, key = lambda k:k[0])
all_rer_scores_ranked = [_[1] for _ in all_rer_scores_ranked]
return all_rer_scores_ranked | null |
163,254 | import json
def load_from_qa(devices,datasets):
pp_total = {}
dataset2id = {}
selected = {"squad1_1":8164,"squad2":130319,"narrativeqa_dev":3567,"mctest_corrected_the_separator":342,"race_string":14536,"arc_hard":317,"arc_easy":395,"boolq":765,"openbookqa":580,"newsqa":445,"quoref":1574,"ropes":1272,"drop":5214,"natural_questions_with_dpr_para":32590,"commonsenseqa":1034,"qasc":653,"physical_iqa":494,"social_iqa":2077,"winogrande_xl":2634,"multirc":290,"boolq_np":923}
dataset_files =["squad1_1","squad2","narrativeqa_dev","mctest_corrected_the_separator","race_string","arc_hard","arc_easy","boolq","openbookqa"]+["newsqa","quoref","ropes","drop","natural_questions_with_dpr_para","commonsenseqa","qasc","physical_iqa","social_iqa","winogrande_xl","multirc","boolq_np"]
for d_ix,item in enumerate(dataset_files):
dataset2id[item]=d_ix
for item in devices:
fin = open("./mem_scores/{}.json".format(item),'r')
pp = json.load(fin)
for i in pp.keys():
pp_total[int(float(i))]=pp[i]
all_qa_scores_ranked = []
all_qa_scores_ranked = pp_total.items()
all_qa_scores_ranked = sorted(all_qa_scores_ranked, key = lambda k:k[0])
all_qa_scores_ranked = [_[1] for _ in all_qa_scores_ranked]
return all_qa_scores_ranked | null |
163,255 | import json
def load_all_select_ids(devices,datasets):
qa_total = {}
rt_total = {}
rr_total = {}
dataset2id = {}
selected = {"squad1_1":8164,"squad2":130319,"narrativeqa_dev":3567,"mctest_corrected_the_separator":342,"race_string":14536,"arc_hard":317,"arc_easy":395,"boolq":765,"openbookqa":580,"newsqa":445,"quoref":1574,"ropes":1272,"drop":5214,"natural_questions_with_dpr_para":32590,"commonsenseqa":1034,"qasc":653,"physical_iqa":494,"social_iqa":2077,"winogrande_xl":2634,"multirc":290,"boolq_np":923}
dataset_files =["squad1_1","squad2","narrativeqa_dev","mctest_corrected_the_separator","race_string","arc_hard","arc_easy","boolq","openbookqa"]+["newsqa","quoref","ropes","drop","natural_questions_with_dpr_para","commonsenseqa","qasc","physical_iqa","social_iqa","winogrande_xl","multirc","boolq_np"]
for d_ix,item in enumerate(dataset_files):
dataset2id[item]=d_ix
for item in devices:
fid = open("./mem_scores/qa_ids-{}.json".format(item),'r')
# print("./mem_scores/qa_ids-{}.json".format(item))
pp = json.load(fid)
for i in pp.keys():
qa_total[int(i)]=pp[i]
rtid = open("./mem_scores/rt_ids-{}.json".format(item),'r')
pp = json.load(rtid)
for i in pp.keys():
rt_total[int(i)]=pp[i]
rrid = open("./mem_scores/rr_ids-{}.json".format(item),'r')
pp = json.load(rrid)
for i in pp.keys():
rr_total[int(i)]=pp[i]
# all_qa_scores_ranked = []
# for dataname in datasets:
# sample_ids =list(range(selected[dataname]))
# sample_ids = [_+dataset2id[dataname]*1000000 for _ in sample_ids]
# sample_scores = [pp_total[_] for _ in sample_ids]
# all_qa_scores_ranked.extend(sample_scores)
return qa_total, rt_total, rr_total#all_qa_scores_ranked | null |
163,256 | import json
def load_level(devices):
format2size={"bool":1,"multichoice":5,"extractive":29,"abstractive":9}
priority_level = {}
for k_ in format2size.keys():
priority_level[k_]={"ret":0,"rer":0,"qa":0}
for device in devices:
# print("device:",device)
fid = open("./mem_scores/priority_level-{}.json".format(device),'r',encoding='utf-8')
pp = json.load(fid)
for k_ in format2size.keys():
for item in ["ret","rer","qa"]:
priority_level[k_][item]+=pp[k_][item]
return priority_level | null |
163,257 | import json
def load_hints(devices):
total = {}
for device in devices:
fid = open("./mem_scores/format_hints-{}.json".format(device),'r',encoding='utf-8')
pp = json.load(fid)
for k_ in pp.keys():
total[int(float(k_))]=pp[k_]
return total | null |
163,258 | import json
def load_hints_dev(devices,epoch_id):
total = {}
for device in devices:
fid = open("./mem_scores/format_hintsdev-{}{}.json".format(device,str(epoch_id)),'r',encoding='utf-8')
pp = json.load(fid)
for k_ in pp.keys():
total[int(float(k_))]=pp[k_]
return total | null |
163,259 | import json
def load_hints_test(devices,epoch_id):
total = {}
for device in devices:
fid = open("./mem_scores/format_hintstest-{}{}.json".format(device,str(epoch_id)),'r',encoding='utf-8')
pp = json.load(fid)
for k_ in pp.keys():
total[int(float(k_))]=pp[k_]
return total | null |
163,260 | import logging
from typing import Tuple
import torch
from torch import Tensor as T
from torch import nn
from transformers.models.bert import BertConfig, BertModel
from transformers.optimization import AdamW
from transformers.models.bert import BertTokenizer
from transformers.models.roberta import RobertaTokenizer
from .biencoder import BiEncoder
from dpr.utils.data_utils import Tensorizer
from .reader import Reader
def get_bert_tensorizer(cfg, tokenizer=None):
sequence_length = 512# original:256!!!#cfg.encoder.sequence_length
pretrained_model_cfg = "bert-base-uncased"#cfg.encoder.pretrained_model_cfg
if not tokenizer:
tokenizer = get_bert_tokenizer(
"bert-base-uncased", True
)
# if cfg.special_tokens:!!!
# _add_special_tokens(tokenizer, cfg.special_tokens)
return BertTensorizer(tokenizer, sequence_length)
def get_optimizer(
model: nn.Module,
learning_rate: float = 1e-5,
adam_eps: float = 1e-8,
weight_decay: float = 0.0,
) -> torch.optim.Optimizer:
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [
p
for n, p in model.named_parameters()
if not any(nd in n for nd in no_decay)
],
"weight_decay": weight_decay,
},
{
"params": [
p
for n, p in model.named_parameters()
if any(nd in n for nd in no_decay)
],
"weight_decay": 0.0,
},
]
optimizer = AdamW(optimizer_grouped_parameters, lr=learning_rate, eps=adam_eps)
return optimizer
class HFBertEncoder(BertModel):
def __init__(self, config, project_dim: int = 0):
BertModel.__init__(self, config)
assert config.hidden_size > 0, "Encoder hidden_size can't be zero"
self.encode_proj = (
nn.Linear(config.hidden_size, project_dim) if project_dim != 0 else None
)
self.init_weights()
def init_encoder(
cls,
cfg_name: str,
projection_dim: int = 0,
dropout: float = 0.1,
pretrained: bool = True,
**kwargs
) -> BertModel:
cfg = BertConfig.from_pretrained(cfg_name if cfg_name else "bert-base-uncased")
if dropout != 0:
cfg.attention_probs_dropout_prob = dropout
cfg.hidden_dropout_prob = dropout
if pretrained:
return cls.from_pretrained(
cfg_name, config=cfg, project_dim=projection_dim, **kwargs
)
else:
return HFBertEncoder(cfg, project_dim=projection_dim)
def forward(
self,
input_ids: T,
attention_mask: T,
representation_token_pos=0,
) -> Tuple[T, ...]:
if self.config.output_hidden_states:
sequence_output, pooled_output, hidden_states = super().forward(
input_ids=input_ids,
# token_type_ids=token_type_ids,
attention_mask=attention_mask,
return_dict=False,
)
else:
hidden_states = None
sequence_output, pooled_output = super().forward(
input_ids=input_ids,
# token_type_ids=token_type_ids,
attention_mask=attention_mask,
return_dict=False,
)
if isinstance(representation_token_pos, int):
pooled_output = sequence_output[:, representation_token_pos, :]
else: # treat as a tensor
bsz = sequence_output.size(0)
assert (
representation_token_pos.size(0) == bsz
), "query bsz={} while representation_token_pos bsz={}".format(
bsz, representation_token_pos.size(0)
)
pooled_output = torch.stack(
[
sequence_output[i, representation_token_pos[i, 1], :]
for i in range(bsz)
]
)
if self.encode_proj:
pooled_output = self.encode_proj(pooled_output)
return sequence_output, pooled_output, hidden_states
def get_out_size(self):
if self.encode_proj:
return self.encode_proj.out_features
return self.config.hidden_size
class BiEncoder(nn.Module):
"""Bi-Encoder model component. Encapsulates query/question and context/passage encoders."""
def __init__(
self,
question_model: nn.Module,
ctx_model: nn.Module,
fix_q_encoder: bool = False,
fix_ctx_encoder: bool = False,
):
super(BiEncoder, self).__init__()
self.question_model = question_model
self.ctx_model = ctx_model
self.fix_q_encoder = fix_q_encoder
self.fix_ctx_encoder = fix_ctx_encoder
def get_representation(
sub_model: nn.Module,
ids: T,
attn_mask: T,
fix_encoder: bool = False,
representation_token_pos=0,
) -> (T, T, T):
sequence_output = None
pooled_output = None
hidden_states = None
if ids is not None:
if fix_encoder:
with torch.no_grad():
sequence_output, pooled_output, hidden_states = sub_model(
ids,
attn_mask,
representation_token_pos=representation_token_pos,
)
if sub_model.training:
sequence_output.requires_grad_(requires_grad=True)
pooled_output.requires_grad_(requires_grad=True)
else:
sequence_output, pooled_output, hidden_states = sub_model(
ids,
attn_mask,
representation_token_pos=representation_token_pos,
)
return sequence_output, pooled_output, hidden_states
def forward(
self,
question_ids: T,
question_attn_mask: T,
context_ids: T,
ctx_attn_mask: T,
encoder_type: str = None,
representation_token_pos=0,
) -> Tuple[T, T]:
with amp.autocast(enabled=True):
q_encoder = (
self.question_model
if encoder_type is None or encoder_type == "question"
else self.ctx_model
)
_q_seq, q_pooled_out, _q_hidden = self.get_representation(
q_encoder,
question_ids,
question_attn_mask,
self.fix_q_encoder,
representation_token_pos=representation_token_pos,
)
ctx_encoder = (
self.ctx_model
if encoder_type is None or encoder_type == "ctx"
else self.question_model
)
_ctx_seq, ctx_pooled_out, _ctx_hidden = self.get_representation(
ctx_encoder, context_ids, ctx_attn_mask, self.fix_ctx_encoder
)
return q_pooled_out, ctx_pooled_out
# TODO delete once moved to the new method
def create_biencoder_input(
cls,
samples: List,
tensorizer: Tensorizer,
insert_title: bool,
num_hard_negatives: int = 0,
num_other_negatives: int = 0,
shuffle: bool = True,
shuffle_positives: bool = True,
hard_neg_fallback: bool = True,
) -> BiEncoderBatch:
"""
Creates a batch of the biencoder training tuple.
:param samples: list of data items (from json) to create the batch for
:param tensorizer: components to create model input tensors from a text sequence
:param insert_title: enables title insertion at the beginning of the context sequences
:param num_hard_negatives: amount of hard negatives per question (taken from samples' pools)
:param num_other_negatives: amount of other negatives per question (taken from samples' pools)
:param shuffle: shuffles negative passages pools
:param shuffle_positives: shuffles positive passages pools
:return: BiEncoderBatch tuple
"""
question_tensors = []
ctx_tensors = []
positive_ctx_indices = []
hard_neg_ctx_indices = []
for sample in samples:
# ctx+ & [ctx-] composition
# as of now, take the first(gold) ctx+ only
if shuffle and shuffle_positives:
positive_ctxs = sample["positive_ctxs"]
positive_ctx = positive_ctxs[np.random.choice(len(positive_ctxs))]
else:
positive_ctx = sample["positive_ctxs"][0]
neg_ctxs = sample["negative_ctxs"]
hard_neg_ctxs = sample["hard_negative_ctxs"]
if shuffle:
random.shuffle(neg_ctxs)
random.shuffle(hard_neg_ctxs)
if hard_neg_fallback and len(hard_neg_ctxs) == 0:
hard_neg_ctxs = neg_ctxs[0:num_hard_negatives]
neg_ctxs = neg_ctxs[0:num_other_negatives]
hard_neg_ctxs = hard_neg_ctxs[0:num_hard_negatives]
all_ctxs = [positive_ctx] + neg_ctxs + hard_neg_ctxs
hard_negatives_start_idx = 1
hard_negatives_end_idx = 1 + len(hard_neg_ctxs)
current_ctxs_len = len(ctx_tensors)
sample_ctxs_tensors = [
tensorizer.text_to_tensor(
ctx["text"],
title=ctx["title"] if (insert_title and "title" in ctx) else None,
)
for ctx in all_ctxs
]
ctx_tensors.extend(sample_ctxs_tensors)
positive_ctx_indices.append(current_ctxs_len)
hard_neg_ctx_indices.append(
[
i
for i in range(
current_ctxs_len + hard_negatives_start_idx,
current_ctxs_len + hard_negatives_end_idx,
)
]
)
question_tensors.append(tensorizer.text_to_tensor(question))
ctxs_tensor = torch.cat([ctx.view(1, -1) for ctx in ctx_tensors], dim=0)
questions_tensor = torch.cat([q.view(1, -1) for q in question_tensors], dim=0)
ctx_segments = torch.zeros_like(ctxs_tensor)
question_segments = torch.zeros_like(questions_tensor)
return BiEncoderBatch(
questions_tensor,
question_segments,
ctxs_tensor,
ctx_segments,
positive_ctx_indices,
hard_neg_ctx_indices,
"question",
)
def create_biencoder_input2(
cls,
samples: List[BiEncoderSample],
tensorizer: Tensorizer,
insert_title: bool,
num_hard_negatives: int = 0,
num_other_negatives: int = 0,
shuffle: bool = True,
shuffle_positives: bool = False,
hard_neg_fallback: bool = True,
query_token: str = None,
) -> BiEncoderBatch:
"""
Creates a batch of the biencoder training tuple.
:param samples: list of BiEncoderSample-s to create the batch for
:param tensorizer: components to create model input tensors from a text sequence
:param insert_title: enables title insertion at the beginning of the context sequences
:param num_hard_negatives: amount of hard negatives per question (taken from samples' pools)
:param num_other_negatives: amount of other negatives per question (taken from samples' pools)
:param shuffle: shuffles negative passages pools
:param shuffle_positives: shuffles positive passages pools
:return: BiEncoderBatch tuple
"""
question_tensors = []
ctx_tensors = []
positive_ctx_indices = []
hard_neg_ctx_indices = []
for sample in samples:
# ctx+ & [ctx-] composition
# as of now, take the first(gold) ctx+ only
if shuffle and shuffle_positives:
positive_ctxs = sample.positive_passages
positive_ctx = positive_ctxs[np.random.choice(len(positive_ctxs))]
else:
positive_ctx = sample.positive_passages[0]
neg_ctxs = sample.negative_passages
hard_neg_ctxs = sample.hard_negative_passages
question = sample.query
# question = normalize_question(sample.query)
if shuffle:
random.shuffle(neg_ctxs)
random.shuffle(hard_neg_ctxs)
if hard_neg_fallback and len(hard_neg_ctxs) == 0:
hard_neg_ctxs = neg_ctxs[0:num_hard_negatives]
neg_ctxs = neg_ctxs[0:num_other_negatives]
hard_neg_ctxs = hard_neg_ctxs[0:num_hard_negatives]
all_ctxs = [positive_ctx] + neg_ctxs + hard_neg_ctxs
hard_negatives_start_idx = 1
hard_negatives_end_idx = 1 + len(hard_neg_ctxs)
current_ctxs_len = len(ctx_tensors)
sample_ctxs_tensors = [
tensorizer.text_to_tensor(
ctx.text, title=ctx.title if (insert_title and ctx.title) else None
)
for ctx in all_ctxs
]
ctx_tensors.extend(sample_ctxs_tensors)
positive_ctx_indices.append(current_ctxs_len)
hard_neg_ctx_indices.append(
[
i
for i in range(
current_ctxs_len + hard_negatives_start_idx,
current_ctxs_len + hard_negatives_end_idx,
)
]
)
if query_token:
# TODO: tmp workaround for EL, remove or revise
if query_token == "[START_ENT]":
query_span = _select_span_with_token(
question, tensorizer, token_str=query_token
)
question_tensors.append(query_span)
else:
question_tensors.append(
tensorizer.text_to_tensor(" ".join([query_token, question]))
)
else:
question_tensors.append(tensorizer.text_to_tensor(question))
ctxs_tensor = torch.cat([ctx.view(1, -1) for ctx in ctx_tensors], dim=0)
questions_tensor = torch.cat([q.view(1, -1) for q in question_tensors], dim=0)
ctx_segments = torch.zeros_like(ctxs_tensor)
question_segments = torch.zeros_like(questions_tensor)
return BiEncoderBatch(
questions_tensor,
question_segments,
ctxs_tensor,
ctx_segments,
positive_ctx_indices,
hard_neg_ctx_indices,
"question",
)
def load_state(self, saved_state: CheckpointState):
# TODO: make a long term HF compatibility fix
if "question_model.embeddings.position_ids" in saved_state.model_dict:
del saved_state.model_dict["question_model.embeddings.position_ids"]
del saved_state.model_dict["ctx_model.embeddings.position_ids"]
self.load_state_dict(saved_state.model_dict,strict=False)
def get_state_dict(self):
return self.state_dict()
def get_bert_biencoder_components(cfg, inference_only: bool = False, **kwargs):#here!ours!
dropout = 0.1 #cfg.encoder.dropout if hasattr(cfg.encoder, "dropout") else 0.0
question_encoder = HFBertEncoder.init_encoder(
"bert-base-uncased",#cfg.encoder.pretrained_model_cfg,
projection_dim=0,#cfg.encoder.projection_dim,
dropout=dropout,
pretrained=True,#cfg.encoder.pretrained,
**kwargs
)
ctx_encoder = HFBertEncoder.init_encoder(
"bert-base-uncased",#cfg.encoder.pretrained_model_cfg,
projection_dim=0,#cfg.encoder.projection_dim,
dropout=dropout,
pretrained=True,#cfg.encoder.pretrained,
**kwargs
)
fix_ctx_encoder = False#cfg.fix_ctx_encoder if hasattr(cfg, "fix_ctx_encoder") else False
#!!!!!!!!!
biencoder = BiEncoder(
question_encoder, ctx_encoder, fix_ctx_encoder=fix_ctx_encoder
)
optimizer = (
get_optimizer(
biencoder,
learning_rate=1e-5,#cfg.train.learning_rate,
adam_eps=1e-8,#cfg.train.adam_eps,
weight_decay=0.0#cfg.train.weight_decay,
)
if not inference_only
else None
)
tensorizer = get_bert_tensorizer(cfg)#dw
return tensorizer, biencoder, optimizer | null |
163,261 | import logging
from typing import Tuple
import torch
from torch import Tensor as T
from torch import nn
from transformers.models.bert import BertConfig, BertModel
from transformers.optimization import AdamW
from transformers.models.bert import BertTokenizer
from transformers.models.roberta import RobertaTokenizer
from .biencoder import BiEncoder
from dpr.utils.data_utils import Tensorizer
from .reader import Reader
def get_bert_tensorizer(cfg, tokenizer=None):
def get_optimizer(
model: nn.Module,
learning_rate: float = 1e-5,
adam_eps: float = 1e-8,
weight_decay: float = 0.0,
) -> torch.optim.Optimizer:
class HFBertEncoder(BertModel):
def __init__(self, config, project_dim: int = 0):
def init_encoder(
cls,
cfg_name: str,
projection_dim: int = 0,
dropout: float = 0.1,
pretrained: bool = True,
**kwargs
) -> BertModel:
def forward(
self,
input_ids: T,
attention_mask: T,
representation_token_pos=0,
) -> Tuple[T, ...]:
def get_out_size(self):
class Reader(nn.Module):
def __init__(self, encoder: nn.Module, hidden_size):
def forward(self, input_ids: T, attention_mask: T, start_positions=None, end_positions=None, answer_mask=None):
def _forward(self, input_ids, attention_mask):
def get_bert_reader_components(cfg, inference_only: bool = False, **kwargs):
dropout = cfg.encoder.dropout if hasattr(cfg.encoder, "dropout") else 0.0
encoder = HFBertEncoder.init_encoder(
cfg.encoder.pretrained_model_cfg,
projection_dim=cfg.encoder.projection_dim,
dropout=dropout,
pretrained=cfg.encoder.pretrained,
**kwargs
)
hidden_size = encoder.config.hidden_size
reader = Reader(encoder, hidden_size)
optimizer = (
get_optimizer(
reader,
learning_rate=cfg.train.learning_rate,
adam_eps=cfg.train.adam_eps,
weight_decay=cfg.train.weight_decay,
)
if not inference_only
else None
)
tensorizer = get_bert_tensorizer(cfg)
return tensorizer, reader, optimizer | null |
163,262 | import logging
from typing import Tuple
import torch
from torch import Tensor as T
from torch import nn
from transformers.models.bert import BertConfig, BertModel
from transformers.optimization import AdamW
from transformers.models.bert import BertTokenizer
from transformers.models.roberta import RobertaTokenizer
from .biencoder import BiEncoder
from dpr.utils.data_utils import Tensorizer
from .reader import Reader
logger = logging.getLogger(__name__)
def _add_special_tokens(tokenizer, special_tokens):
logger.info("Adding special tokens %s", special_tokens)
special_tokens_num = len(special_tokens)
# TODO: this is a hack-y logic that uses some private tokenizer structure which can be changed in HF code
assert special_tokens_num < 50
unused_ids = [
tokenizer.vocab["[unused{}]".format(i)] for i in range(special_tokens_num)
]
logger.info("Utilizing the following unused token ids %s", unused_ids)
for idx, id in enumerate(unused_ids):
del tokenizer.vocab["[unused{}]".format(idx)]
tokenizer.vocab[special_tokens[idx]] = id
tokenizer.ids_to_tokens[id] = special_tokens[idx]
tokenizer._additional_special_tokens = list(special_tokens)
logger.info(
"Added special tokenizer.additional_special_tokens %s",
tokenizer.additional_special_tokens,
)
logger.info("Tokenizer's all_special_tokens %s", tokenizer.all_special_tokens) | null |
163,263 | import logging
from typing import Tuple
import torch
from torch import Tensor as T
from torch import nn
from transformers.models.bert import BertConfig, BertModel
from transformers.optimization import AdamW
from transformers.models.bert import BertTokenizer
from transformers.models.roberta import RobertaTokenizer
from .biencoder import BiEncoder
from dpr.utils.data_utils import Tensorizer
from .reader import Reader
def get_roberta_tokenizer(pretrained_cfg_name: str, do_lower_case: bool = True):
class RobertaTensorizer(BertTensorizer):
def __init__(self, tokenizer, max_length: int, pad_to_max: bool = True):
def get_roberta_tensorizer(args, tokenizer=None):
if not tokenizer:
tokenizer = get_roberta_tokenizer(
args.pretrained_model_cfg, do_lower_case=args.do_lower_case
)
return RobertaTensorizer(tokenizer, args.sequence_length) | null |
163,264 | import logging
from typing import Tuple
import torch
from pytext.models.representations.transformer_sentence_encoder import TransformerSentenceEncoder
from pytext.optimizer.optimizers import AdamW
from torch import Tensor as T
from torch import nn
from .biencoder import BiEncoder
def get_optimizer(model: nn.Module, learning_rate: float = 1e-5, adam_eps: float = 1e-8,
weight_decay: float = 0.0) -> torch.optim.Optimizer:
class PytextBertEncoder(TransformerSentenceEncoder):
def __init__(self, config: TransformerSentenceEncoder.Config,
padding_idx: int,
vocab_size: int,
projection_dim: int = 0,
*args,
**kwarg
):
def init_encoder(cls, pretrained_file: str = None, projection_dim: int = 0, dropout: float = 0.1,
vocab_size: int = 0,
padding_idx: int = 0, **kwargs):
def forward(self, input_ids: T, token_type_ids: T, attention_mask: T) -> Tuple[T, ...]:
def get_out_size(self):
class BiEncoder(nn.Module):
def __init__(
self,
question_model: nn.Module,
ctx_model: nn.Module,
fix_q_encoder: bool = False,
fix_ctx_encoder: bool = False,
):
def get_representation(
sub_model: nn.Module,
ids: T,
attn_mask: T,
fix_encoder: bool = False,
representation_token_pos=0,
) -> (T, T, T):
def forward(
self,
question_ids: T,
question_attn_mask: T,
context_ids: T,
ctx_attn_mask: T,
encoder_type: str = None,
representation_token_pos=0,
) -> Tuple[T, T]:
def create_biencoder_input(
cls,
samples: List,
tensorizer: Tensorizer,
insert_title: bool,
num_hard_negatives: int = 0,
num_other_negatives: int = 0,
shuffle: bool = True,
shuffle_positives: bool = True,
hard_neg_fallback: bool = True,
) -> BiEncoderBatch:
def create_biencoder_input2(
cls,
samples: List[BiEncoderSample],
tensorizer: Tensorizer,
insert_title: bool,
num_hard_negatives: int = 0,
num_other_negatives: int = 0,
shuffle: bool = True,
shuffle_positives: bool = False,
hard_neg_fallback: bool = True,
query_token: str = None,
) -> BiEncoderBatch:
def load_state(self, saved_state: CheckpointState):
def get_state_dict(self):
class BertTensorizer(Tensorizer):
def __init__(
self, tokenizer: BertTokenizer, max_length: int, pad_to_max: bool = True
):
def text_to_tensor(
self,
text: str,
title: str = None,
add_special_tokens: bool = True,
apply_max_len: bool = True,
):
def get_pair_separator_ids(self) -> T:
def get_pad_id(self) -> int:
def get_attn_mask(self, tokens_tensor: T) -> T:
def is_sub_word_id(self, token_id: int):
def to_string(self, token_ids, skip_special_tokens=True):
def set_pad_to_max(self, do_pad: bool):
def get_token_id(self, token: str) -> int:
def get_bert_biencoder_components(args, inference_only: bool = False):
# since bert tokenizer is the same in HF and pytext/fairseq, just use HF's implementation here for now
from .hf_models import get_tokenizer, BertTensorizer
tokenizer = get_tokenizer(args.pretrained_model_cfg, do_lower_case=args.do_lower_case)
question_encoder = PytextBertEncoder.init_encoder(args.pretrained_file,
projection_dim=args.projection_dim, dropout=args.dropout,
vocab_size=tokenizer.vocab_size,
padding_idx=tokenizer.pad_token_type_id
)
ctx_encoder = PytextBertEncoder.init_encoder(args.pretrained_file,
projection_dim=args.projection_dim, dropout=args.dropout,
vocab_size=tokenizer.vocab_size,
padding_idx=tokenizer.pad_token_type_id
)
biencoder = BiEncoder(question_encoder, ctx_encoder)
optimizer = get_optimizer(biencoder,
learning_rate=args.learning_rate,
adam_eps=args.adam_eps, weight_decay=args.weight_decay,
) if not inference_only else None
tensorizer = BertTensorizer(tokenizer, args.sequence_length)
return tensorizer, biencoder, optimizer | null |
163,266 | from torch.cuda import amp
import collections
import logging
import random
from typing import Tuple, List
import numpy as np
import torch
import torch.nn.functional as F
from torch import Tensor as T
from torch import nn
from dpr.data.biencoder_data import BiEncoderSample
from dpr.utils.data_utils import Tensorizer
from dpr.utils.model_utils import CheckpointState
The provided code snippet includes necessary dependencies for implementing the `dot_product_scores` function. Write a Python function `def dot_product_scores(q_vectors: T, ctx_vectors: T) -> T` to solve the following problem:
calculates q->ctx scores for every row in ctx_vector :param q_vector: :param ctx_vector: :return:
Here is the function:
def dot_product_scores(q_vectors: T, ctx_vectors: T) -> T:
"""
calculates q->ctx scores for every row in ctx_vector
:param q_vector:
:param ctx_vector:
:return:
"""
# q_vector: n1 x D, ctx_vectors: n2 x D, result n1 x n2
r = torch.matmul(q_vectors, torch.transpose(ctx_vectors, 0, 1))
return r | calculates q->ctx scores for every row in ctx_vector :param q_vector: :param ctx_vector: :return: |
163,267 | from torch.cuda import amp
import collections
import logging
import random
from typing import Tuple, List
import numpy as np
import torch
import torch.nn.functional as F
from torch import Tensor as T
from torch import nn
from dpr.data.biencoder_data import BiEncoderSample
from dpr.utils.data_utils import Tensorizer
from dpr.utils.model_utils import CheckpointState
def cosine_scores(q_vector: T, ctx_vectors: T):
# q_vector: n1 x D, ctx_vectors: n2 x D, result n1 x n2
return F.cosine_similarity(q_vector, ctx_vectors, dim=1) | null |
163,268 | from torch.cuda import amp
import collections
import logging
import random
from typing import Tuple, List
import numpy as np
import torch
import torch.nn.functional as F
from torch import Tensor as T
from torch import nn
from dpr.data.biencoder_data import BiEncoderSample
from dpr.utils.data_utils import Tensorizer
from dpr.utils.model_utils import CheckpointState
rnd = random.Random(0)
class Tensorizer(object):
"""
Component for all text to model input data conversions and related utility methods
"""
# Note: title, if present, is supposed to be put before text (i.e. optional title + document body)
def text_to_tensor(
self,
text: str,
title: str = None,
add_special_tokens: bool = True,
apply_max_len: bool = True,
):
raise NotImplementedError
def get_pair_separator_ids(self) -> T:
raise NotImplementedError
def get_pad_id(self) -> int:
raise NotImplementedError
def get_attn_mask(self, tokens_tensor: T):
raise NotImplementedError
def is_sub_word_id(self, token_id: int):
raise NotImplementedError
def to_string(self, token_ids, skip_special_tokens=True):
raise NotImplementedError
def set_pad_to_max(self, pad: bool):
raise NotImplementedError
def get_token_id(self, token: str) -> int:
raise NotImplementedError
def _pad_to_len(seq: T, pad_id: int, max_len: int):
s_len = seq.size(0)
if s_len > max_len:
return seq[0: max_len]
return torch.cat([seq, torch.Tensor().new_full((max_len - s_len,), pad_id, dtype=torch.long)], dim=0)
def _select_span_with_token(
text: str, tensorizer: Tensorizer, token_str: str = "[START_ENT]"
) -> T:
id = tensorizer.get_token_id(token_str)
query_tensor = tensorizer.text_to_tensor(text)
if id not in query_tensor:
query_tensor_full = tensorizer.text_to_tensor(text, apply_max_len=False)
token_indexes = (query_tensor_full == id).nonzero()
if token_indexes.size(0) > 0:
start_pos = token_indexes[0, 0].item()
# add some randomization to avoid overfitting to a specific token position
left_shit = int(tensorizer.max_length / 2)
rnd_shift = int((rnd.random() - 0.5) * left_shit / 2)
left_shit += rnd_shift
query_tensor = query_tensor_full[start_pos - left_shit :]
cls_id = tensorizer.tokenizer.cls_token_id
if query_tensor[0] != cls_id:
query_tensor = torch.cat([torch.tensor([cls_id]), query_tensor], dim=0)
from dpr.models.reader import _pad_to_len
query_tensor = _pad_to_len(
query_tensor, tensorizer.get_pad_id(), tensorizer.max_length
)
query_tensor[-1] = tensorizer.tokenizer.sep_token_id
# logger.info('aligned query_tensor %s', query_tensor)
assert id in query_tensor, "query_tensor={}".format(query_tensor)
return query_tensor
else:
raise RuntimeError(
"[START_ENT] toke not found for Entity Linking sample query={}".format(
text
)
)
else:
return query_tensor | null |
163,269 | import logging
from typing import Tuple
from fairseq.models.roberta.hub_interface import RobertaHubInterface
from fairseq.models.roberta.model import RobertaModel as FaiseqRobertaModel
from fairseq.optim.adam import FairseqAdam
from torch import Tensor as T
from torch import nn
from dpr.models.hf_models import get_roberta_tensorizer
from .biencoder import BiEncoder
def get_fairseq_adamw_optimizer(model: nn.Module, args):
setattr(args, 'lr', [args.learning_rate])
return FairseqAdam(args, model.parameters()).optimizer
class RobertaEncoder(nn.Module):
def __init__(self, fairseq_roberta_hub: RobertaHubInterface):
super(RobertaEncoder, self).__init__()
self.fairseq_roberta = fairseq_roberta_hub
def from_pretrained(cls, pretrained_dir_path: str):
model = FaiseqRobertaModel.from_pretrained(pretrained_dir_path)
return cls(model)
def forward(self, input_ids: T, token_type_ids: T, attention_mask: T) -> Tuple[T, ...]:
roberta_out = self.fairseq_roberta.extract_features(input_ids)
cls_out = roberta_out[:, 0, :]
return roberta_out, cls_out, None
def get_out_size(self):
raise NotImplementedError
def get_roberta_tensorizer(args, tokenizer=None):
if not tokenizer:
tokenizer = get_roberta_tokenizer(
args.pretrained_model_cfg, do_lower_case=args.do_lower_case
)
return RobertaTensorizer(tokenizer, args.sequence_length)
class BiEncoder(nn.Module):
"""Bi-Encoder model component. Encapsulates query/question and context/passage encoders."""
def __init__(
self,
question_model: nn.Module,
ctx_model: nn.Module,
fix_q_encoder: bool = False,
fix_ctx_encoder: bool = False,
):
super(BiEncoder, self).__init__()
self.question_model = question_model
self.ctx_model = ctx_model
self.fix_q_encoder = fix_q_encoder
self.fix_ctx_encoder = fix_ctx_encoder
def get_representation(
sub_model: nn.Module,
ids: T,
attn_mask: T,
fix_encoder: bool = False,
representation_token_pos=0,
) -> (T, T, T):
sequence_output = None
pooled_output = None
hidden_states = None
if ids is not None:
if fix_encoder:
with torch.no_grad():
sequence_output, pooled_output, hidden_states = sub_model(
ids,
attn_mask,
representation_token_pos=representation_token_pos,
)
if sub_model.training:
sequence_output.requires_grad_(requires_grad=True)
pooled_output.requires_grad_(requires_grad=True)
else:
sequence_output, pooled_output, hidden_states = sub_model(
ids,
attn_mask,
representation_token_pos=representation_token_pos,
)
return sequence_output, pooled_output, hidden_states
def forward(
self,
question_ids: T,
question_attn_mask: T,
context_ids: T,
ctx_attn_mask: T,
encoder_type: str = None,
representation_token_pos=0,
) -> Tuple[T, T]:
with amp.autocast(enabled=True):
q_encoder = (
self.question_model
if encoder_type is None or encoder_type == "question"
else self.ctx_model
)
_q_seq, q_pooled_out, _q_hidden = self.get_representation(
q_encoder,
question_ids,
question_attn_mask,
self.fix_q_encoder,
representation_token_pos=representation_token_pos,
)
ctx_encoder = (
self.ctx_model
if encoder_type is None or encoder_type == "ctx"
else self.question_model
)
_ctx_seq, ctx_pooled_out, _ctx_hidden = self.get_representation(
ctx_encoder, context_ids, ctx_attn_mask, self.fix_ctx_encoder
)
return q_pooled_out, ctx_pooled_out
# TODO delete once moved to the new method
def create_biencoder_input(
cls,
samples: List,
tensorizer: Tensorizer,
insert_title: bool,
num_hard_negatives: int = 0,
num_other_negatives: int = 0,
shuffle: bool = True,
shuffle_positives: bool = True,
hard_neg_fallback: bool = True,
) -> BiEncoderBatch:
"""
Creates a batch of the biencoder training tuple.
:param samples: list of data items (from json) to create the batch for
:param tensorizer: components to create model input tensors from a text sequence
:param insert_title: enables title insertion at the beginning of the context sequences
:param num_hard_negatives: amount of hard negatives per question (taken from samples' pools)
:param num_other_negatives: amount of other negatives per question (taken from samples' pools)
:param shuffle: shuffles negative passages pools
:param shuffle_positives: shuffles positive passages pools
:return: BiEncoderBatch tuple
"""
question_tensors = []
ctx_tensors = []
positive_ctx_indices = []
hard_neg_ctx_indices = []
for sample in samples:
# ctx+ & [ctx-] composition
# as of now, take the first(gold) ctx+ only
if shuffle and shuffle_positives:
positive_ctxs = sample["positive_ctxs"]
positive_ctx = positive_ctxs[np.random.choice(len(positive_ctxs))]
else:
positive_ctx = sample["positive_ctxs"][0]
neg_ctxs = sample["negative_ctxs"]
hard_neg_ctxs = sample["hard_negative_ctxs"]
if shuffle:
random.shuffle(neg_ctxs)
random.shuffle(hard_neg_ctxs)
if hard_neg_fallback and len(hard_neg_ctxs) == 0:
hard_neg_ctxs = neg_ctxs[0:num_hard_negatives]
neg_ctxs = neg_ctxs[0:num_other_negatives]
hard_neg_ctxs = hard_neg_ctxs[0:num_hard_negatives]
all_ctxs = [positive_ctx] + neg_ctxs + hard_neg_ctxs
hard_negatives_start_idx = 1
hard_negatives_end_idx = 1 + len(hard_neg_ctxs)
current_ctxs_len = len(ctx_tensors)
sample_ctxs_tensors = [
tensorizer.text_to_tensor(
ctx["text"],
title=ctx["title"] if (insert_title and "title" in ctx) else None,
)
for ctx in all_ctxs
]
ctx_tensors.extend(sample_ctxs_tensors)
positive_ctx_indices.append(current_ctxs_len)
hard_neg_ctx_indices.append(
[
i
for i in range(
current_ctxs_len + hard_negatives_start_idx,
current_ctxs_len + hard_negatives_end_idx,
)
]
)
question_tensors.append(tensorizer.text_to_tensor(question))
ctxs_tensor = torch.cat([ctx.view(1, -1) for ctx in ctx_tensors], dim=0)
questions_tensor = torch.cat([q.view(1, -1) for q in question_tensors], dim=0)
ctx_segments = torch.zeros_like(ctxs_tensor)
question_segments = torch.zeros_like(questions_tensor)
return BiEncoderBatch(
questions_tensor,
question_segments,
ctxs_tensor,
ctx_segments,
positive_ctx_indices,
hard_neg_ctx_indices,
"question",
)
def create_biencoder_input2(
cls,
samples: List[BiEncoderSample],
tensorizer: Tensorizer,
insert_title: bool,
num_hard_negatives: int = 0,
num_other_negatives: int = 0,
shuffle: bool = True,
shuffle_positives: bool = False,
hard_neg_fallback: bool = True,
query_token: str = None,
) -> BiEncoderBatch:
"""
Creates a batch of the biencoder training tuple.
:param samples: list of BiEncoderSample-s to create the batch for
:param tensorizer: components to create model input tensors from a text sequence
:param insert_title: enables title insertion at the beginning of the context sequences
:param num_hard_negatives: amount of hard negatives per question (taken from samples' pools)
:param num_other_negatives: amount of other negatives per question (taken from samples' pools)
:param shuffle: shuffles negative passages pools
:param shuffle_positives: shuffles positive passages pools
:return: BiEncoderBatch tuple
"""
question_tensors = []
ctx_tensors = []
positive_ctx_indices = []
hard_neg_ctx_indices = []
for sample in samples:
# ctx+ & [ctx-] composition
# as of now, take the first(gold) ctx+ only
if shuffle and shuffle_positives:
positive_ctxs = sample.positive_passages
positive_ctx = positive_ctxs[np.random.choice(len(positive_ctxs))]
else:
positive_ctx = sample.positive_passages[0]
neg_ctxs = sample.negative_passages
hard_neg_ctxs = sample.hard_negative_passages
question = sample.query
# question = normalize_question(sample.query)
if shuffle:
random.shuffle(neg_ctxs)
random.shuffle(hard_neg_ctxs)
if hard_neg_fallback and len(hard_neg_ctxs) == 0:
hard_neg_ctxs = neg_ctxs[0:num_hard_negatives]
neg_ctxs = neg_ctxs[0:num_other_negatives]
hard_neg_ctxs = hard_neg_ctxs[0:num_hard_negatives]
all_ctxs = [positive_ctx] + neg_ctxs + hard_neg_ctxs
hard_negatives_start_idx = 1
hard_negatives_end_idx = 1 + len(hard_neg_ctxs)
current_ctxs_len = len(ctx_tensors)
sample_ctxs_tensors = [
tensorizer.text_to_tensor(
ctx.text, title=ctx.title if (insert_title and ctx.title) else None
)
for ctx in all_ctxs
]
ctx_tensors.extend(sample_ctxs_tensors)
positive_ctx_indices.append(current_ctxs_len)
hard_neg_ctx_indices.append(
[
i
for i in range(
current_ctxs_len + hard_negatives_start_idx,
current_ctxs_len + hard_negatives_end_idx,
)
]
)
if query_token:
# TODO: tmp workaround for EL, remove or revise
if query_token == "[START_ENT]":
query_span = _select_span_with_token(
question, tensorizer, token_str=query_token
)
question_tensors.append(query_span)
else:
question_tensors.append(
tensorizer.text_to_tensor(" ".join([query_token, question]))
)
else:
question_tensors.append(tensorizer.text_to_tensor(question))
ctxs_tensor = torch.cat([ctx.view(1, -1) for ctx in ctx_tensors], dim=0)
questions_tensor = torch.cat([q.view(1, -1) for q in question_tensors], dim=0)
ctx_segments = torch.zeros_like(ctxs_tensor)
question_segments = torch.zeros_like(questions_tensor)
return BiEncoderBatch(
questions_tensor,
question_segments,
ctxs_tensor,
ctx_segments,
positive_ctx_indices,
hard_neg_ctx_indices,
"question",
)
def load_state(self, saved_state: CheckpointState):
# TODO: make a long term HF compatibility fix
if "question_model.embeddings.position_ids" in saved_state.model_dict:
del saved_state.model_dict["question_model.embeddings.position_ids"]
del saved_state.model_dict["ctx_model.embeddings.position_ids"]
self.load_state_dict(saved_state.model_dict,strict=False)
def get_state_dict(self):
return self.state_dict()
def get_roberta_biencoder_components(args, inference_only: bool = False, **kwargs):
question_encoder = RobertaEncoder.from_pretrained(args.pretrained_file)
ctx_encoder = RobertaEncoder.from_pretrained(args.pretrained_file)
biencoder = BiEncoder(question_encoder, ctx_encoder)
optimizer = get_fairseq_adamw_optimizer(biencoder, args) if not inference_only else None
tensorizer = get_roberta_tensorizer(args)
return tensorizer, biencoder, optimizer | null |
163,272 | import os
import time
import torch
import copy,random
import sys
import gc
import json
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data import DataLoader
from dataclasses import dataclass, field
from typing import Optional
from typing import List, Optional, Tuple
from tqdm import tqdm
import warnings
from torch.cuda import amp
from contextlib import suppress as nullcontext
from transformers import AdamW, get_linear_schedule_with_warmup
from models.metat5 import T5ForConditionalGeneration as PromptT5
from dataset_processors import *
from simpletrainer import QuestionAnsweringTrainer
from retrievermodel import init_biencoder_components
from rerankermodel.encoder import BertEncoder_For_CrossEncoder
from loadscorea import *
from hintpreprocess import *
from transformers.trainer_callback import (
CallbackHandler,
DefaultFlowCallback,
PrinterCallback,
ProgressCallback,
TrainerCallback,
TrainerControl,
TrainerState,
)
import datasets
import numpy as np
from datasets import load_dataset, load_metric,load_from_disk,concatenate_datasets
import os
from functools import partial
import transformers
from transformers import (
AutoConfig,
AutoModelForSeq2SeqLM,
AutoTokenizer,
DataCollatorForSeq2Seq,
HfArgumentParser,
M2M100Tokenizer,
MBart50Tokenizer,
EvalPrediction,
MBart50TokenizerFast,
MBartTokenizer,
MBartTokenizerFast,
Seq2SeqTrainer,
Seq2SeqTrainingArguments,
default_data_collator,
set_seed,
)
from pathlib import Path
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
def get_model_obj(model: nn.Module):
return model.module if hasattr(model, "module") else model | null |
163,273 | import os
import time
import torch
import copy,random
import sys
import gc
import json
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data import DataLoader
from dataclasses import dataclass, field
from typing import Optional
from typing import List, Optional, Tuple
from tqdm import tqdm
import warnings
from torch.cuda import amp
from contextlib import suppress as nullcontext
from transformers import AdamW, get_linear_schedule_with_warmup
from models.metat5 import T5ForConditionalGeneration as PromptT5
from dataset_processors import *
from simpletrainer import QuestionAnsweringTrainer
from retrievermodel import init_biencoder_components
from rerankermodel.encoder import BertEncoder_For_CrossEncoder
from loadscorea import *
from hintpreprocess import *
from transformers.trainer_callback import (
CallbackHandler,
DefaultFlowCallback,
PrinterCallback,
ProgressCallback,
TrainerCallback,
TrainerControl,
TrainerState,
)
import datasets
import numpy as np
from datasets import load_dataset, load_metric,load_from_disk,concatenate_datasets
import os
from functools import partial
import transformers
from transformers import (
AutoConfig,
AutoModelForSeq2SeqLM,
AutoTokenizer,
DataCollatorForSeq2Seq,
HfArgumentParser,
M2M100Tokenizer,
MBart50Tokenizer,
EvalPrediction,
MBart50TokenizerFast,
MBartTokenizer,
MBartTokenizerFast,
Seq2SeqTrainer,
Seq2SeqTrainingArguments,
default_data_collator,
set_seed,
)
from pathlib import Path
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
def train_qascore(model,trainset,training_args,data_collator):
optimizer = AdamW(
model.parameters(),
lr=1e-4,
)
qa_scores={}
device_id = training_args.local_rank
if device_id == -1:
device_id = 0
if training_args.local_rank == -1:
data_loader_qa = DataLoader(dataset=trainset, shuffle=False,batch_size=training_args.per_device_train_batch_size//2,collate_fn=data_collator)#, sampler=train_sampler)
else:
sampler = DistributedSampler(
trainset,
num_replicas=torch.distributed.get_world_size(),
rank=training_args.local_rank,
seed=training_args.seed,
)
data_loader_qa = DataLoader(dataset=trainset, shuffle=False,batch_size=training_args.per_device_train_batch_size//2,collate_fn=data_collator,sampler=sampler,drop_last=False)
for data in data_loader_qa:
# labels = data.pop("labels")
sample_id = data["sample_id"].numpy().tolist()
data = {x: data[x].to(torch.device("cuda", device_id)) for x in data if data[x] is not None}
kl_loss,qa_probs = model.forward_single(**data)
if model.activate_rer or model.activate_ret:
kl_loss.backward()
optimizer.step()
scores_id = qa_probs.detach().cpu().numpy().tolist()
for s_id,score in zip(sample_id,scores_id):
qa_scores[int(s_id)]=score
return qa_scores | null |
163,274 | import os
import time
import torch
import copy,random
import sys
import gc
import json
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data import DataLoader
from dataclasses import dataclass, field
from typing import Optional
from typing import List, Optional, Tuple
from tqdm import tqdm
import warnings
from torch.cuda import amp
from contextlib import suppress as nullcontext
from transformers import AdamW, get_linear_schedule_with_warmup
from models.metat5 import T5ForConditionalGeneration as PromptT5
from dataset_processors import *
from simpletrainer import QuestionAnsweringTrainer
from retrievermodel import init_biencoder_components
from rerankermodel.encoder import BertEncoder_For_CrossEncoder
from loadscorea import *
from hintpreprocess import *
from transformers.trainer_callback import (
CallbackHandler,
DefaultFlowCallback,
PrinterCallback,
ProgressCallback,
TrainerCallback,
TrainerControl,
TrainerState,
)
import datasets
import numpy as np
from datasets import load_dataset, load_metric,load_from_disk,concatenate_datasets
import os
from functools import partial
import transformers
from transformers import (
AutoConfig,
AutoModelForSeq2SeqLM,
AutoTokenizer,
DataCollatorForSeq2Seq,
HfArgumentParser,
M2M100Tokenizer,
MBart50Tokenizer,
EvalPrediction,
MBart50TokenizerFast,
MBartTokenizer,
MBartTokenizerFast,
Seq2SeqTrainer,
Seq2SeqTrainingArguments,
default_data_collator,
set_seed,
)
from pathlib import Path
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
format2dataset = {
'extractive':['squad1_1','squad2','extractive','newsqa','quoref','ropes','adversarialqa_dbert_dev','adversarialqa_dbidaf_dev','adversarialqa_droberta_dev','record_extractive'],
'abstractive':['narrativeqa_dev','abstractive','natural_questions_with_dpr_para','drop','qaconv','tweetqa'],
'multichoice':['race_string','multichoice','openbookqa','mctest_corrected_the_separator','social_iqa','commonsenseqa','qasc','physical_iqa','winogrande_xl','onestopqa_advanced','onestopqa_elementry','onestopqa_intermediate','prost_multiple_choice_with_no_context','dream','processbank_test','cosmosqa','mcscript','mcscript2','quail','reclor','measuring_massive_multitask_language_understanding','head_qa_en_test','race_c','arc_hard','arc_easy'],
'bool':['boolq','bool','boolq_np','multirc','strategyqa','pubmedqa_pqal_short_ans']
}
dataset_files =["squad1_1","squad2","narrativeqa_dev","mctest_corrected_the_separator","race_string","arc_hard","arc_easy","boolq","openbookqa"]+["newsqa","quoref","ropes","drop","natural_questions_with_dpr_para","commonsenseqa","qasc","physical_iqa","social_iqa","winogrande_xl","multirc","boolq_np"]
def evaluate_model(eval_model, priority_level,format_name,training_args,data_collator,epoch_id):
device_id = training_args.local_rank
if device_id == -1:
device_id = 0
for item in dataset_files:
if not item in format2dataset[format_name]:
continue
lmax = 64
if lmax>len(load_from_disk("./epoch_data{}/{}-reteval.hf".format(str(epoch_id),item))):
lmax = len(load_from_disk("./epoch_data{}/{}-reteval.hf".format(str(epoch_id),item)))
dataset_ret = load_from_disk("./epoch_data{}/{}-reteval.hf".format(str(epoch_id),item)).select(range(lmax))
dataset_rer = load_from_disk("./epoch_data{}/{}-rereval.hf".format(str(epoch_id),item)).select(range(lmax))
dataset_first = load_from_disk("./epoch_data{}/{}-firsteval.hf".format(str(epoch_id),item)).select(range(lmax))
for (eval_ds,source) in [(dataset_first,"first"),(dataset_ret,"ret"),(dataset_rer,"rer")]:
if training_args.local_rank == -1:
data_loader_eval = DataLoader(dataset=eval_ds, shuffle=False,batch_size=training_args.per_device_train_batch_size*4,collate_fn=data_collator)#, sampler=train_sampler)
else:
sampler = DistributedSampler(
eval_ds,
num_replicas=torch.distributed.get_world_size(),
rank=training_args.local_rank,
seed=training_args.seed,
)
data_loader_eval = DataLoader(dataset=eval_ds, shuffle=False,batch_size=training_args.per_device_train_batch_size*4,collate_fn=data_collator,sampler=sampler,drop_last=False)
tnum = 0.0
for data in data_loader_eval:
labels = data.pop("labels")
data["eval_labels"]=labels
data = {x: data[x].to(torch.device("cuda", device_id)) for x in data if data[x] is not None}
if source!="first":
out = eval_model.module.eval_mode(**data)
priority_level[format_name][source]+=out.mean().item()
else:
out = eval_model.module.eval_golden(**data)
priority_level[format_name]["qa"]+=out.mean().item()
tnum +=1.0
if source!="first":
priority_level[format_name][source]=priority_level[format_name][source]
else:
priority_level[format_name]["qa"]=priority_level[format_name]["qa"]
return priority_level | null |
163,275 | import os
import time
import torch
import copy,random
import sys
import gc
import json
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data import DataLoader
from dataclasses import dataclass, field
from typing import Optional
from typing import List, Optional, Tuple
from tqdm import tqdm
import warnings
from torch.cuda import amp
from contextlib import suppress as nullcontext
from transformers import AdamW, get_linear_schedule_with_warmup
from models.metat5 import T5ForConditionalGeneration as PromptT5
from dataset_processors import *
from simpletrainer import QuestionAnsweringTrainer
from retrievermodel import init_biencoder_components
from rerankermodel.encoder import BertEncoder_For_CrossEncoder
from loadscorea import *
from hintpreprocess import *
Batch_Size = 2
from transformers.trainer_callback import (
CallbackHandler,
DefaultFlowCallback,
PrinterCallback,
ProgressCallback,
TrainerCallback,
TrainerControl,
TrainerState,
)
import datasets
import numpy as np
from datasets import load_dataset, load_metric,load_from_disk,concatenate_datasets
import os
from functools import partial
import transformers
from transformers import (
AutoConfig,
AutoModelForSeq2SeqLM,
AutoTokenizer,
DataCollatorForSeq2Seq,
HfArgumentParser,
M2M100Tokenizer,
MBart50Tokenizer,
EvalPrediction,
MBart50TokenizerFast,
MBartTokenizer,
MBartTokenizerFast,
Seq2SeqTrainer,
Seq2SeqTrainingArguments,
default_data_collator,
set_seed,
)
from pathlib import Path
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
def trainbc(bencoder,cencoder,dataset,training_args,fwd,qaret,qarer):
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{"params": [p for n, p in bencoder.named_parameters() if not any(
nd in n for nd in no_decay)], "weight_decay": 0.01},
{"params": [p for n, p in bencoder.named_parameters() if any(
nd in n for nd in no_decay)], "weight_decay": 0.0},
{"params": [p for n, p in cencoder.named_parameters() if not any(
nd in n for nd in no_decay)], "weight_decay": 0.01},
{"params": [p for n, p in cencoder.named_parameters() if any(
nd in n for nd in no_decay)], "weight_decay": 0.0}
]
optimizer = AdamW(
optimizer_grouped_parameters,
lr=1e-5,
)
device_id = training_args.local_rank
loss_fct = torch.nn.KLDivLoss()
if device_id==-1:
device_id=0
bs_size = Batch_Size
offset = []
offset2 = []
offset3 = []
scaler = amp.GradScaler(enabled=True)
for itf in range(bs_size):
offset.extend([itf*64]*16)
offset2.extend([itf*16]*4)
offset3.extend([itf*64]*4)
offset = torch.Tensor(offset).long().to(torch.device("cuda", device_id))
offset2 = torch.Tensor(offset2).long().to(torch.device("cuda", device_id))
offset3 = torch.Tensor(offset3).long().to(torch.device("cuda", device_id))
if training_args.local_rank == -1:
data_loader_train = DataLoader(dataset=dataset.select(range(100)), batch_size=bs_size,collate_fn=default_data_collator)
else:
sampler = DistributedSampler(
dataset,
num_replicas=torch.distributed.get_world_size(),
rank=training_args.local_rank,
seed=training_args.seed,
)
data_loader_train = DataLoader(dataset=dataset,batch_size=bs_size,sampler=sampler,drop_last=False,collate_fn=default_data_collator)
for i, data in enumerate(data_loader_train):
if data["query_ids"].size(0)!=bs_size:
bs_size = data["query_ids"].size(0)
offset = []
offset2 = []
offset3 = []
for itf in range(bs_size):
offset.extend([itf*64]*16)
offset2.extend([itf*16]*4)
offset3.extend([itf*64]*4)
offset = torch.Tensor(offset).long().to(torch.device("cuda", device_id))
offset2 = torch.Tensor(offset2).long().to(torch.device("cuda", device_id))
offset3 = torch.Tensor(offset3).long().to(torch.device("cuda", device_id))
data["query_ids"] = data["query_ids"][:,0,:].squeeze(1)
data["query_attentions"] = data["query_attentions"][:,0,:].squeeze(1)
data = {x: data[x].to(torch.device("cuda", device_id)) for x in data if data[x] is not None}
bmodel_out = bencoder(
data["query_ids"].view(-1,112),#.squeeze(),
data["query_attentions"].view(-1,112),#.squeeze(),
data["ctx_ids"].view(-1,112),#.squeeze(),
data["ctx_attentions"].view(-1,112),#.squeeze(),
)
score_mask = (data["sub_ids"]>=999).int().mul(-999)
local_q_vector, local_ctx_vectors = bmodel_out
local_q_vector = local_q_vector.view(-1,768)
local_ctx_vectors = local_ctx_vectors.view(-1,64,768)
sim_scores = torch.bmm(
local_q_vector.unsqueeze(1), torch.transpose(local_ctx_vectors, 1, 2)
).squeeze(1)
cmodel_out = cencoder(
data["cross_ids"].view(-1,144),
data["cross_attentions"].view(-1,144),
data["cross_ctxs"].view(-1,144),
).squeeze(-1).view(-1,64)
if fwd:
bi_score = torch.softmax(sim_scores, dim=-1)
c_score = torch.nn.functional.log_softmax(cmodel_out, dim=-1)
kl_loss = loss_fct(c_score, bi_score)
else:
bi_score = torch.nn.functional.log_softmax(sim_scores, dim=-1)
c_score = torch.softmax(cmodel_out, dim=-1)
kl_loss = loss_fct(bi_score,c_score)
scaler.scale(kl_loss).backward()
scaler.step(optimizer)
scaler.update()
optimizer.zero_grad()
# sim_scores = sim_scores.add(score_mask)
#(bs*768)*(768*64)=(bs*64)
kl_loss_qa1 = 0.0
kl_loss_qa2 = 0.0
if "qa_scores" in data.keys() and (qaret or qarer):
with torch.no_grad():
previous_ids = []
sids = data["sample_id"].cpu().numpy().tolist()
for sid in sids:
try:
previous_ids.extend(format_hints_ids[int(sid)])
except:
previous_ids.extend([0,1,2,3])
previous_ids = torch.tensor(previous_ids).long().to(torch.device("cuda", device_id)).add(offset3)
previous_data = {}
for k_ in data.keys():
if len(data[k_].size())==3:
previous_data[k_] = torch.index_select(data[k_].view(bs_size*64,-1),dim=0,index=previous_ids).view(bs_size,4,-1).clone()
elif len(data[k_].size())==2:
if k_=="query_ids" or k_=="query_attentions":
previous_data[k_]=data[k_].clone()
elif k_!="qa_scores":
previous_data[k_] = torch.index_select(data[k_].view(-1),dim=0,index=previous_ids).view(bs_size,4).clone()
else:
previous_data[k_]=data[k_].clone()
else:
if k_=="sample_id":
previous_data[k_]=data[k_].clone()
cmodel_previous = cencoder(
previous_data["cross_ids"].view(-1,144),
previous_data["cross_attentions"].view(-1,144),
previous_data["cross_ctxs"].view(-1,144),
).squeeze(-1).view(-1,4)
dmodel_previous = bencoder(
previous_data["query_ids"].view(-1,112),#.squeeze(),
previous_data["query_attentions"].view(-1,112),#.squeeze(),
previous_data["ctx_ids"].view(-1,112),#.squeeze(),
previous_data["ctx_attentions"].view(-1,112),#.squeeze(),
)
local_q_vector, local_ctx_vectors = dmodel_previous
local_q_vector = local_q_vector.view(-1,768)
local_ctx_vectors = local_ctx_vectors.view(-1,4,768)
sim_scores = torch.bmm(
local_q_vector.unsqueeze(1), torch.transpose(local_ctx_vectors, 1, 2)
).squeeze(1)
bi_score_previous = torch.nn.functional.log_softmax(sim_scores, dim=-1)
c_score_previous = torch.nn.functional.log_softmax(cmodel_previous, dim=-1)
qa_scores = torch.softmax(data["qa_scores"], dim=-1)
if qarer:
kl_loss_qa1 = loss_fct(c_score_previous, qa_scores)
if qaret:
kl_loss_qa2 = loss_fct(bi_score_previous,qa_scores)
kl_qa_all = kl_loss_qa1+kl_loss_qa2
scaler.scale(kl_qa_all).backward()
scaler.step(optimizer)
scaler.update()
optimizer.zero_grad() | null |
163,276 | import os
import time
import torch
import copy,random
import sys
import gc
import json
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data import DataLoader
from dataclasses import dataclass, field
from typing import Optional
from typing import List, Optional, Tuple
from tqdm import tqdm
import warnings
from torch.cuda import amp
from contextlib import suppress as nullcontext
from transformers import AdamW, get_linear_schedule_with_warmup
from models.metat5 import T5ForConditionalGeneration as PromptT5
from dataset_processors import *
from simpletrainer import QuestionAnsweringTrainer
from retrievermodel import init_biencoder_components
from rerankermodel.encoder import BertEncoder_For_CrossEncoder
from loadscorea import *
from hintpreprocess import *
from transformers.trainer_callback import (
CallbackHandler,
DefaultFlowCallback,
PrinterCallback,
ProgressCallback,
TrainerCallback,
TrainerControl,
TrainerState,
)
import datasets
import numpy as np
from datasets import load_dataset, load_metric,load_from_disk,concatenate_datasets
import os
from functools import partial
import transformers
from transformers import (
AutoConfig,
AutoModelForSeq2SeqLM,
AutoTokenizer,
DataCollatorForSeq2Seq,
HfArgumentParser,
M2M100Tokenizer,
MBart50Tokenizer,
EvalPrediction,
MBart50TokenizerFast,
MBartTokenizer,
MBartTokenizerFast,
Seq2SeqTrainer,
Seq2SeqTrainingArguments,
default_data_collator,
set_seed,
)
from pathlib import Path
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
def filthints(bencoder,cencoder,dataset,training_args,fwd,qaret,qarer):
device_id = training_args.local_rank
if device_id==-1:
device_id=0
bencoder.eval()
cencoder.eval()
bs_size = 48
offset = []
offset2 = []
offset3 = []
for itf in range(bs_size):
offset.extend([itf*64]*16)
offset2.extend([itf*16]*4)
offset3.extend([itf*64]*4)
offset = torch.Tensor(offset).long().to(torch.device("cuda", device_id))
offset2 = torch.Tensor(offset2).long().to(torch.device("cuda", device_id))
offset3 = torch.Tensor(offset3).long().to(torch.device("cuda", device_id))
if training_args.local_rank == -1:
data_loader_train = DataLoader(dataset, shuffle=True,batch_size=bs_size,collate_fn=default_data_collator)
else:
sampler = DistributedSampler(
dataset,
num_replicas=torch.distributed.get_world_size(),
rank=training_args.local_rank,
seed=training_args.seed,
)
data_loader_train = DataLoader(dataset=dataset,batch_size=bs_size,sampler=sampler,collate_fn=default_data_collator,drop_last=False)
for i, data in enumerate(data_loader_train):
if data["query_ids"].size(0)!=bs_size:
bs_size = data["query_ids"].size(0)
offset = []
offset2 = []
offset3 = []
for itf in range(bs_size):
offset.extend([itf*64]*16)
offset2.extend([itf*16]*4)
offset3.extend([itf*64]*4)
offset = torch.Tensor(offset).long().to(torch.device("cuda", device_id))
offset2 = torch.Tensor(offset2).long().to(torch.device("cuda", device_id))
offset3 = torch.Tensor(offset3).long().to(torch.device("cuda", device_id))
torch.cuda.synchronize()
start = time.time()
data = {x: data[x].to(torch.device("cuda", device_id)) for x in data if data[x] is not None}
with torch.no_grad():
bmodel_out = bencoder(
data["query_ids"].view(-1,112),#.squeeze(),
data["query_attentions"].view(-1,112),#.squeeze(),
data["ctx_ids"].view(-1,112),#.squeeze(),
data["ctx_attentions"].view(-1,112),#.squeeze(),
)
score_mask = (data["sub_ids"]>=999).int().mul(-999)
local_q_vector, local_ctx_vectors = bmodel_out
local_q_vector = local_q_vector.view(-1,64,768)[:,0,:].view(-1,768)
local_ctx_vectors = local_ctx_vectors.view(-1,64,768)
sim_scores = torch.bmm(
local_q_vector.unsqueeze(1), torch.transpose(local_ctx_vectors, 1, 2)
).squeeze(1)
# print(sim_scores)
sim_scores = sim_scores.add(score_mask)
#(1*768)*(768*64)=(1*64)
sort_result, sort_idxs = sim_scores.topk(16)#sort(dot_prod_scores, dim=0, descending=True)
sort_idxs = sort_idxs.view(-1).add(offset)
for k_ in data.keys():
if len(data[k_].size())==3:
data[k_] = torch.index_select(data[k_].view(bs_size*64,-1),dim=0,index=sort_idxs).view(bs_size,16,-1)
elif len(data[k_].size())==2:
if k_!="qa_scores":
data[k_] = torch.index_select(data[k_].view(-1),dim=0,index=sort_idxs).view(bs_size,16)
else:
pass
else:
if k_=="sample_id":
pass
else:
print(k_)
print(data[k_])
assert False
# torch.cuda.synchronize()
# print(time.time()-start,"select16_time")
cmodel_out = cencoder(
data["cross_ids"].view(-1,144),
data["cross_attentions"].view(-1,144),
data["cross_ctxs"].view(-1,144),
).squeeze(-1).view(-1,16)
dmodel_out = bencoder(
data["query_ids"].view(-1,112),#.squeeze(),
data["query_attentions"].view(-1,112),#.squeeze(),
data["ctx_ids"].view(-1,112),#.squeeze(),
data["ctx_attentions"].view(-1,112),#.squeeze(),
)
# score_mask = (data["sub_ids"]>=999).int().mul(-999)
local_q_vector, local_ctx_vectors = dmodel_out
local_q_vector = local_q_vector.view(-1,16,768)[:,0,:].view(-1,768)
local_ctx_vectors = local_ctx_vectors.view(-1,16,768)
sim_scores = torch.bmm(
local_q_vector.unsqueeze(1), torch.transpose(local_ctx_vectors, 1, 2)
).squeeze(1)
bi_score = sim_scores
c_score = cmodel_out
_,rerank_idxs = cmodel_out.topk(4)
# torch.cuda.synchronize()
# print(time.time()-start,"cmodel_time")
for bat in range(rerank_idxs.size(0)):
sel = torch.index_select(data["sub_ids"][bat],dim=0,index=rerank_idxs[bat])
ids = data["sample_id"][bat]
format_hints_ids[ids.item()]=sel.cpu().numpy().tolist()
for ix in range(rerank_idxs.size(0)):
ids = data["sample_id"][ix].item()
bscore = []
cscore = []
for item in rerank_idxs[ix]:
bscore.append(bi_score[ix][item].item())
cscore.append(c_score[ix][item].item())
format_ret_scores[ids]=bscore
format_rer_scores[ids]=cscore
# torch.cuda.synchronize()
# print(time.time()-start,"final_time") | null |
163,277 | import os
import time
import torch
import copy,random
import sys
import gc
import json
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data import DataLoader
from dataclasses import dataclass, field
from typing import Optional
from typing import List, Optional, Tuple
from tqdm import tqdm
import warnings
from torch.cuda import amp
from contextlib import suppress as nullcontext
from transformers import AdamW, get_linear_schedule_with_warmup
from models.metat5 import T5ForConditionalGeneration as PromptT5
from dataset_processors import *
from simpletrainer import QuestionAnsweringTrainer
from retrievermodel import init_biencoder_components
from rerankermodel.encoder import BertEncoder_For_CrossEncoder
from loadscorea import *
from hintpreprocess import *
from transformers.trainer_callback import (
CallbackHandler,
DefaultFlowCallback,
PrinterCallback,
ProgressCallback,
TrainerCallback,
TrainerControl,
TrainerState,
)
import datasets
import numpy as np
from datasets import load_dataset, load_metric,load_from_disk,concatenate_datasets
import os
from functools import partial
import transformers
from transformers import (
AutoConfig,
AutoModelForSeq2SeqLM,
AutoTokenizer,
DataCollatorForSeq2Seq,
HfArgumentParser,
M2M100Tokenizer,
MBart50Tokenizer,
EvalPrediction,
MBart50TokenizerFast,
MBartTokenizer,
MBartTokenizerFast,
Seq2SeqTrainer,
Seq2SeqTrainingArguments,
default_data_collator,
set_seed,
)
from pathlib import Path
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
def rank_rr(bencoder,cencoder,dataset,training_args):
device_id = training_args.local_rank
bencoder.eval()
cencoder.eval()
if device_id==-1:
device_id=0
bs_size = 64
ngpus = torch.distributed.get_world_size()
if len(dataset)//ngpus<bs_size:
bs_size = len(dataset)//ngpus
offset = []
offset2 = []
offset3 = []
ppt = []
for itf in range(bs_size):
offset.extend([itf*64]*16)
offset2.extend([itf*16]*4)
offset3.extend([itf*64]*4)
offset = torch.Tensor(offset).long().to(torch.device("cuda", device_id))
offset2 = torch.Tensor(offset2).long().to(torch.device("cuda", device_id))
offset3 = torch.Tensor(offset3).long().to(torch.device("cuda", device_id))
if training_args.local_rank == -1:
data_loader_dev = DataLoader(dataset=dataset, shuffle=True,batch_size=bs_size,collate_fn=default_data_collator)
else:
sampler = DistributedSampler(
dataset,
shuffle=False,
num_replicas=torch.distributed.get_world_size(),
rank=training_args.local_rank,
seed=training_args.seed,
)
data_loader_dev = DataLoader(dataset=dataset,batch_size=bs_size,shuffle=False,sampler=sampler,collate_fn=default_data_collator,drop_last=False)
for i, data in enumerate(data_loader_dev):
data = {x: data[x].to(torch.device("cuda", device_id)) for x in data if data[x] is not None}
if True:
if data["query_ids"].size(0)!=bs_size:
bs_size = data["query_ids"].size(0)
offset = []
offset2 = []
offset3 = []
for itf in range(bs_size):
offset.extend([itf*64]*16)
offset2.extend([itf*16]*4)
offset3.extend([itf*64]*4)
offset = torch.Tensor(offset).long().to(torch.device("cuda", device_id))
offset2 = torch.Tensor(offset2).long().to(torch.device("cuda", device_id))
offset3 = torch.Tensor(offset3).long().to(torch.device("cuda", device_id))
bmodel_out = bencoder(
data["query_ids"].view(-1,112),#.squeeze(),
data["query_attentions"].view(-1,112),#.squeeze(),
data["ctx_ids"].view(-1,112),#.squeeze(),
data["ctx_attentions"].view(-1,112),#.squeeze(),
)
score_mask = (data["sub_ids"]>=999).int().mul(-999)
local_q_vector, local_ctx_vectors = bmodel_out
local_q_vector = local_q_vector.view(-1,64,768)[:,0,:].view(-1,768)
local_ctx_vectors = local_ctx_vectors.view(-1,64,768)
sim_scores = torch.bmm(
local_q_vector.unsqueeze(1), torch.transpose(local_ctx_vectors, 1, 2)
).squeeze(1)
sim_scores = sim_scores.add(score_mask)#.cpu()
#(1*768)*(768*64)=(1*64)
sort_result, sort_idxs = sim_scores.topk(16)#sort(dot_prod_scores, dim=0, descending=True)
for bat in range(sort_idxs.size(0)):
sel = torch.index_select(data["sub_ids"][bat],dim=0,index=sort_idxs[bat])
ids = data["sample_id"][bat]
first_select_ids[int(ids.item())]=sort_idxs[bat].cpu().numpy().tolist()
sort_idxs = sort_idxs.view(-1).add(offset)
_,sort_idxs4 = sim_scores.topk(4)
for bat in range(sort_idxs4.size(0)):
sel = torch.index_select(data["sub_ids"][bat],dim=0,index=sort_idxs4[bat])
ids = data["sample_id"][bat]
ret_select_ids[int(ids.item())]=sel.cpu().numpy().tolist()
for k_ in data.keys():
if len(data[k_].size())==3:
data[k_] = torch.index_select(data[k_].view(bs_size*64,-1),dim=0,index=sort_idxs).view(bs_size,16,-1)
elif len(data[k_].size())==2:
if k_!="qa_scores":
data[k_] = torch.index_select(data[k_].view(-1),dim=0,index=sort_idxs).view(bs_size,16)
else:
pass
else:
pass
cmodel_out = cencoder(
data["cross_ids"].view(-1,144),
data["cross_attentions"].view(-1,144),
data["cross_ctxs"].view(-1,144),
).squeeze(-1).view(-1,16)
_,rerank_idxs = cmodel_out.topk(4)
for bat in range(rerank_idxs.size(0)):
sel = torch.index_select(data["sub_ids"][bat],dim=0,index=rerank_idxs[bat])
ids = data["sample_id"][bat]
rer_select_ids[int(ids.item())]=sel.cpu().numpy().tolist() | null |
163,278 | import os
import time
import torch
import copy,random
import sys
import gc
import json
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data import DataLoader
from dataclasses import dataclass, field
from typing import Optional
from typing import List, Optional, Tuple
from tqdm import tqdm
import warnings
from torch.cuda import amp
from contextlib import suppress as nullcontext
from transformers import AdamW, get_linear_schedule_with_warmup
from models.metat5 import T5ForConditionalGeneration as PromptT5
from dataset_processors import *
from simpletrainer import QuestionAnsweringTrainer
from retrievermodel import init_biencoder_components
from rerankermodel.encoder import BertEncoder_For_CrossEncoder
from loadscorea import *
from hintpreprocess import *
from transformers.trainer_callback import (
CallbackHandler,
DefaultFlowCallback,
PrinterCallback,
ProgressCallback,
TrainerCallback,
TrainerControl,
TrainerState,
)
import datasets
import numpy as np
from datasets import load_dataset, load_metric,load_from_disk,concatenate_datasets
import os
from functools import partial
import transformers
from transformers import (
AutoConfig,
AutoModelForSeq2SeqLM,
AutoTokenizer,
DataCollatorForSeq2Seq,
HfArgumentParser,
M2M100Tokenizer,
MBart50Tokenizer,
EvalPrediction,
MBart50TokenizerFast,
MBartTokenizer,
MBartTokenizerFast,
Seq2SeqTrainer,
Seq2SeqTrainingArguments,
default_data_collator,
set_seed,
)
from pathlib import Path
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
def preprocess_function_eval(examples,format_name):
if True:
preprocess_fn = preprocess_simple
inputs, targets= preprocess_fn(examples, "input","output",format_name=format_name)
model_inputs = tokenizer(inputs, max_length=max_source_length, padding=padding, truncation=True)
with tokenizer.as_target_tokenizer():
labels = tokenizer(targets, max_length=128, padding=padding, truncation=True)
if padding == "max_length":
labels["input_ids"] = [
[(l if l != tokenizer.pad_token_id else -100) for l in label] for label in labels["input_ids"]
]
model_inputs["labels"] = labels["input_ids"]
meta_ids = [- (i + 1) for i in range(10)]*5
input_ids = copy.deepcopy(
[meta_ids+input_ids for input_ids in model_inputs['input_ids']])
model_inputs['input_ids'] = input_ids # [format_prompt_ids+input_ids for input_ids in model_inputs['input_ids']]
model_inputs['attention_mask'] = [[1]*50+attention_mask for attention_mask in
model_inputs['attention_mask']]
return model_inputs
dataset2format= {}
dataset_files =["squad1_1","squad2","narrativeqa_dev","mctest_corrected_the_separator","race_string","arc_hard","arc_easy","boolq","openbookqa"]+["newsqa","quoref","ropes","drop","natural_questions_with_dpr_para","commonsenseqa","qasc","physical_iqa","social_iqa","winogrande_xl","multirc","boolq_np"]
unseen_files = ("onestopqa_advanced,onestopqa_elementry,onestopqa_intermediate,prost_multiple_choice_with_no_context,dream,processbank_test,cosmosqa,mcscript,mcscript2,quail,reclor,measuring_massive_multitask_language_understanding,head_qa_en_test,race_c,pubmedqa_pqal_short_ans,strategyqa,tweetqa,qaconv,record_extractive,adversarialqa_dbert_dev,adversarialqa_dbidaf_dev,adversarialqa_droberta_dev".split(","))
def generate_samples_eval(format_name):
tott2 = 0
for item in dataset_files+unseen_files:
fm = dataset2format[item]
global tsname
tsname = item
if fm==format_name:
try:
data_path = "./data_process/data/{}/dev.json".format(item)
dataset = load_dataset("json", data_files=data_path)["train"]
except:
data_path = "./data_process/data/{}/test.json".format(item)
dataset = load_dataset("json", data_files=data_path)["train"]
dataset = dataset.map(
lambda x:preprocess_function_eval(x,format_name),
batched=True,
remove_columns=["input","output"],
load_from_cache_file=True,
desc="Running tokenizer on train dataset",
)
dataset.save_to_disk("./onlymeta/{}-eval.hf".format(item)) | null |
163,279 | import os
import time
import torch
import copy,random
import sys
import gc
import json
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data import DataLoader
from dataclasses import dataclass, field
from typing import Optional
from typing import List, Optional, Tuple
from tqdm import tqdm
import warnings
from torch.cuda import amp
from contextlib import suppress as nullcontext
from transformers import AdamW, get_linear_schedule_with_warmup
from models.metat5 import T5ForConditionalGeneration as PromptT5
from dataset_processors import *
from simpletrainer import QuestionAnsweringTrainer
from retrievermodel import init_biencoder_components
from rerankermodel.encoder import BertEncoder_For_CrossEncoder
from loadscorea import *
from hintpreprocess import *
from transformers.trainer_callback import (
CallbackHandler,
DefaultFlowCallback,
PrinterCallback,
ProgressCallback,
TrainerCallback,
TrainerControl,
TrainerState,
)
import datasets
import numpy as np
from datasets import load_dataset, load_metric,load_from_disk,concatenate_datasets
import os
from functools import partial
import transformers
from transformers import (
AutoConfig,
AutoModelForSeq2SeqLM,
AutoTokenizer,
DataCollatorForSeq2Seq,
HfArgumentParser,
M2M100Tokenizer,
MBart50Tokenizer,
EvalPrediction,
MBart50TokenizerFast,
MBartTokenizer,
MBartTokenizerFast,
Seq2SeqTrainer,
Seq2SeqTrainingArguments,
default_data_collator,
set_seed,
)
from pathlib import Path
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
def preprocess_function(examples,format_name):
if True:
preprocess_fn = preprocess_simple
inputs, targets = preprocess_fn(examples, "input","output",format_name=format_name)
model_inputs = tokenizer(inputs, max_length=max_source_length, padding=padding, truncation=True)
# model_inputs["input_ids"] = model_inputs["input_ids"]#[xx[:-1]+[32108]+yy[:-1]+[1] for xx,yy in zip(model_inputs["input_ids"],hints_inputs["input_ids"])]
# model_inputs["attention_mask"] = #[xx[:-1]+[1]+yy[:-1]+[xx[-1]] for xx,yy in zip(model_inputs["attention_mask"],hints_inputs["attention_mask"])]
with tokenizer.as_target_tokenizer():
labels = tokenizer(targets, max_length=128, padding=padding, truncation=True)
if padding == "max_length":
labels["input_ids"] = [
[(l if l != tokenizer.pad_token_id else -100) for l in label] for label in labels["input_ids"]
]
model_inputs["labels"] = labels["input_ids"]
meta_ids = [- (i + 1) for i in range(10)]*5
input_ids = copy.deepcopy(
[meta_ids + input_ids for input_ids in model_inputs['input_ids']])
seps = []
model_inputs['input_ids'] = input_ids # [format_prompt_ids+input_ids for input_ids in model_inputs['input_ids']]
model_inputs['attention_mask'] = [[1] * 50 + attention_mask for attention_mask in
model_inputs['attention_mask']]
return model_inputs
dataset2format= {}
dataset_files =["squad1_1","squad2","narrativeqa_dev","mctest_corrected_the_separator","race_string","arc_hard","arc_easy","boolq","openbookqa"]+["newsqa","quoref","ropes","drop","natural_questions_with_dpr_para","commonsenseqa","qasc","physical_iqa","social_iqa","winogrande_xl","multirc","boolq_np"]
def generate_samples(format_name):
format_ids_seq = []
po = open("./json2select.json",'r')
po = json.load(po)
start = 0
end = 0
for item in dataset_files:
fm = dataset2format[item]
global tsname
tsname = item
if fm==format_name:
sequence = po[item]
len_seq = len(po[item])
data_path = "./data_process/data/{}/train.json".format(item)
dataset = load_dataset("json", data_files=data_path)["train"].select(sequence)
assert len_seq==len(dataset)
train_dataset = dataset.map(
lambda x:preprocess_function(x,format_name),
batched=True,
remove_columns=["input","output"],
load_from_cache_file=True,
desc="Running tokenizer on train dataset",
)
# train_dataset = train_dataset.add_column("sample_id",add_line)
train_dataset.save_to_disk("./onlymeta/{}-train.hf".format(item))
# start = end | null |
163,280 | import os
import time
import torch
import copy,random
import sys
import gc
import json
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data import DataLoader
from dataclasses import dataclass, field
from typing import Optional
from typing import List, Optional, Tuple
from tqdm import tqdm
import warnings
from torch.cuda import amp
from contextlib import suppress as nullcontext
from transformers import AdamW, get_linear_schedule_with_warmup
from models.metat5 import T5ForConditionalGeneration as PromptT5
from dataset_processors import *
from simpletrainer import QuestionAnsweringTrainer
from retrievermodel import init_biencoder_components
from rerankermodel.encoder import BertEncoder_For_CrossEncoder
from loadscorea import *
from hintpreprocess import *
from transformers.trainer_callback import (
CallbackHandler,
DefaultFlowCallback,
PrinterCallback,
ProgressCallback,
TrainerCallback,
TrainerControl,
TrainerState,
)
import datasets
import numpy as np
from datasets import load_dataset, load_metric,load_from_disk,concatenate_datasets
import os
from functools import partial
import transformers
from transformers import (
AutoConfig,
AutoModelForSeq2SeqLM,
AutoTokenizer,
DataCollatorForSeq2Seq,
HfArgumentParser,
M2M100Tokenizer,
MBart50Tokenizer,
EvalPrediction,
MBart50TokenizerFast,
MBartTokenizer,
MBartTokenizerFast,
Seq2SeqTrainer,
Seq2SeqTrainingArguments,
default_data_collator,
set_seed,
)
from pathlib import Path
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
def main():
def compute_metrics(p: EvalPrediction):
return metric.compute(predictions=p.predictions, references=p.label_ids)
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, Seq2SeqTrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
if 'same' in model_args.model_name_or_path:
task2id = {'squad': 0, 'extractive': 0, 'narrativeqa': 1, 'abstractive': 1, 'race': 2, 'multichoice': 2,
'boolq': 3, 'bool': 3, 'newsqa': 8, 'quoref': 9, 'ropes': 10, 'drop': 11, 'nqopen': 12,
'boolq_np': 13, 'openbookqa': 14, 'mctest': 15, 'social_iqa': 16, 'dream': 17}
else:
task2id = {'squad': 0, 'extractive': 1, 'narrativeqa': 2, 'abstractive': 3, 'race': 4, 'multichoice': 5,
'boolq': 6, 'bool': 7, 'newsqa': 8, 'quoref': 9, 'ropes': 10, 'drop': 11, 'nqopen': 12,
'boolq_np': 13, 'openbookqa': 14, 'mctest': 15, 'social_iqa': 16, 'dream': 17}
dataset_name_to_metric = {
'squad1_1': 'metric/squad_v1_local/squad_v1_local.py',
'squad2': 'metric/squad_v2_local/squad_v2_local.py',
'newsqa': 'metric/squad_v1_local/squad_v1_local.py',
'boolq': 'metric/squad_v1_local/squad_v1_local.py',
'narrativeqa_dev': 'metric/rouge_local/rouge_metric.py',
'race_string': 'metric/accuracy.py',
'quoref': 'metric/squad_v1_local/squad_v1_local.py',
'ropes': 'metric/squad_v1_local/squad_v1_local.py',
'drop': 'metric/squad_v1_local/squad_v1_local.py',
'natural_questions_with_dpr_para': 'metric/squad_v1_local/squad_v1_local.py',
'boolq_np': 'metric/squad_v1_local/squad_v1_local.py',
'openbookqa': 'metric/accuracy.py',
'arc_hard': 'metric/accuracy.py',
'arc_easy': 'metric/accuracy.py',
'mctest_corrected_the_separator': 'metric/accuracy.py',
'social_iqa': 'metric/accuracy.py',
'dream': 'metric/accuracy.py',
'commonsenseqa':'metric/accuracy.py',
'qasc':'metric/accuracy.py',
'physical_iqa':'metric/accuracy.py',
'winogrande_xl':'metric/accuracy.py',
'multirc':'metric/squad_v1_local/squad_v1_local.py',
'onestopqa_advanced':'metric/accuracy.py',
'onestopqa_elementry':'metric/accuracy.py',
'onestopqa_intermediate':'metric/accuracy.py',
'prost_multiple_choice_with_no_context':'metric/accuracy.py',
'processbank_test':'metric/accuracy.py',
'cosmosqa':'metric/accuracy.py',
'mcscript':'metric/accuracy.py',
'mcscript2':'metric/accuracy.py',
'quail':'metric/accuracy.py',
'reclor':'metric/accuracy.py',
'measuring_massive_multitask_language_understanding':'metric/accuracy.py',
'head_qa_en_test':'metric/accuracy.py',
'race_c':'metric/accuracy.py',
'pubmedqa_pqal_short_ans':'metric/squad_v1_local/squad_v1_local.py',#
'strategyqa':'metric/squad_v1_local/squad_v1_local.py',#
'tweetqa':'metric/bleu_local/bleu.py',
'qaconv':'metric/squad_v1_local/squad_v1_local.py',
'record_extractive':'metric/squad_v1_local/squad_v1_local.py',
'adversarialqa_dbert_dev':'metric/squad_v1_local/squad_v1_local.py',
'adversarialqa_dbidaf_dev':'metric/squad_v1_local/squad_v1_local.py',
'adversarialqa_droberta_dev':'metric/squad_v1_local/squad_v1_local.py'
}
# Set seed before initializing model.
set_seed(training_args.seed)
config = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
global tokenizer
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
use_fast=model_args.use_fast_tokenizer,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
tokens_to_add = ['[ABSTRACTIVE]', '[BOOL]', '[EXTRACTIVE]', '[MultiChoice]']
special_tokens_dict = {'additional_special_tokens': ['[TASK]', '[QUESTION]', '[CONTEXT]',
'[OPTIONS]','[HINT]']}
tokenizer.add_tokens(tokens_to_add)
num_added_toks = tokenizer.add_special_tokens(special_tokens_dict)
added_tokens = tokenizer.get_added_vocab()
model = PromptT5.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
model.copy_encoder()
model.resize_token_embeddings(len(tokenizer))
global max_source_length
max_source_length = 1024
# Set decoder_start_token_id
if model.config.decoder_start_token_id is None and isinstance(tokenizer, (MBartTokenizer, MBartTokenizerFast)):
if isinstance(tokenizer, MBartTokenizer):
model.config.decoder_start_token_id = tokenizer.lang_code_to_id[data_args.target_lang]
else:
model.config.decoder_start_token_id = tokenizer.convert_tokens_to_ids(data_args.target_lang)
if model.config.decoder_start_token_id is None:
raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined")
prefix = data_args.source_prefix if data_args.source_prefix is not None else ""
if training_args.local_rank == -1 or training_args.no_cuda:
device = torch.device("cuda")
n_gpu = torch.cuda.device_count()
# Temporarily set max_target_length for training.
max_target_length = data_args.max_target_length
padding = "max_length" if data_args.pad_to_max_length else False
question_column = data_args.question_column
context_column = data_args.context_column
answer_column = data_args.answer_column
# import random
data_args.max_source_length = min(data_args.max_source_length, tokenizer.model_max_length)
# Data collator
label_pad_token_id = -100 if data_args.ignore_pad_token_for_loss else tokenizer.pad_token_id
if data_args.pad_to_max_length:
data_collator = default_data_collator
else:
data_collator = DataCollatorForSeq2Seq(
tokenizer,
model=model,
label_pad_token_id=label_pad_token_id,
pad_to_multiple_of=8 if training_args.fp16 else None,
)
try:
os.mkdir("./mem_scorescores")
except:
print("MemDir Exist\n")
#start
train_dataloaders = {}
eval_dataloaders = {}
replay_dataloaders = {}
to_be_train = ["squad1_1","squad2","narrativeqa_dev","boolq","arc_hard","arc_easy","openbookqa","race_string","mctest_corrected_the_separator","newsqa","quoref","ropes","drop","natural_questions_with_dpr_para","commonsenseqa","qasc","physical_iqa","social_iqa","winogrande_xl","multirc","boolq_np"]
to_be_train.extend(["narrativeqa_dev","boolq","arc_hard","arc_easy","openbookqa","mctest_corrected_the_separator","newsqa","quoref","ropes","commonsenseqa","qasc","physical_iqa","social_iqa","winogrande_xl","multirc","boolq_np"])
max_length = (
training_args.generation_max_length
if training_args.generation_max_length is not None
else data_args.val_max_target_length
)
num_beams = data_args.num_beams if data_args.num_beams is not None else training_args.generation_num_beams
if training_args.local_rank!=-1:
world_size = torch.distributed.get_world_size()
else:
world_size = 1
device_id = training_args.local_rank if training_args.local_rank!=-1 else 0
for format_name in ["bool","extractive","abstractive","multichoice"]:
generate_samples(format_name)
generate_samples_eval(format_name)
if training_args.local_rank!=-1:
torch.distributed.barrier()
eval_ds = {}
eval_exp = {}
if training_args.local_rank<=0:
print("\n=====================Load trainset==========\n")
for ds_name in dataset_files:
train_dataloaders[ds_name]= load_from_disk("./onlymeta/{}-train.hf".format(ds_name))
for ds_name in dataset_files+unseen_files:
eval_ds[ds_name] = load_from_disk("./onlymeta/{}-eval.hf".format(ds_name))
eval_exp[ds_name] = load_from_disk("./processed/{}-evalex.hf".format(ds_name))
train_dataset = None
for item in dataset_files:
if train_dataset is None:
train_dataset = train_dataloaders[item]
else:
train_dataset = concatenate_datasets([train_dataset, train_dataloaders[item]])
trainer = QuestionAnsweringTrainer(
model=model,
args=training_args,
train_dataset=train_dataset.select(range(1000)),
eval_dataset=None,
eval_examples=None,
answer_column_name=answer_column,
dataset_name="squad1_1",
tokenizer=None,
data_collator=data_collator,
compute_metrics= compute_metrics if training_args.predict_with_generate else None,
)
train_result = trainer.train()
output_dir = "./only_meta_ckpt"
trainer.save_model(output_dir="./only_meta_ckpt")
trainer.args.output_dir = output_dir
trainer.save_state()
gc.collect()
torch.cuda.empty_cache()
model.set_test()
for item in dataset_files+unseen_files:
print("current_test:",item)
eval_dataset,eval_examples = eval_ds[item],eval_exp[item]
len_ds = len(eval_dataset)
range_ds = list(range(len_ds))
eval_dataset = eval_dataset.add_column("id",range_ds).add_column("example_id",range_ds)
# assert False
metric = load_metric(dataset_name_to_metric[item])
data_collator = DataCollatorForSeq2Seq(
tokenizer,
model=model,
label_pad_token_id=label_pad_token_id,
pad_to_multiple_of=8 if training_args.fp16 else None,
)
tester = QuestionAnsweringTrainer(
model=model,
args=training_args,
train_dataset=None,
eval_dataset=eval_dataset,
eval_examples=eval_examples,
answer_column_name=answer_column,
dataset_name=item,
tokenizer=tokenizer,
data_collator=data_collator,
compute_metrics=compute_metrics if training_args.predict_with_generate else None,
)
max_length, num_beams, ignore_keys_for_eval = None, None, None
metrics = tester.evaluate(max_length=max_length, num_beams=num_beams, ignore_keys=ignore_keys_for_eval,
metric_key_prefix="eval")
max_eval_samples = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(eval_dataset)
metrics["eval_samples"] = min(max_eval_samples, len(eval_dataset))
tester.log_metrics("eval", metrics)
return None
def _mp_fn(index):
# For xla_spawn (TPUs)
main() | null |
163,281 | import datasets
import collections
import math
def _get_ngrams(segment, max_order):
"""Extracts all n-grams upto a given maximum order from an input segment.
Args:
segment: text segment from which n-grams will be extracted.
max_order: maximum length in tokens of the n-grams returned by this
methods.
Returns:
The Counter containing all n-grams upto max_order in segment
with a count of how many times each n-gram occurred.
"""
ngram_counts = collections.Counter()
for order in range(1, max_order + 1):
for i in range(0, len(segment) - order + 1):
ngram = tuple(segment[i:i+order])
ngram_counts[ngram] += 1
return ngram_counts
The provided code snippet includes necessary dependencies for implementing the `compute_bleu` function. Write a Python function `def compute_bleu(reference_corpus, translation_corpus, max_order=1, smooth=False)` to solve the following problem:
Computes BLEU score of translated segments against one or more references. Args: reference_corpus: list of lists of references for each translation. Each reference should be tokenized into a list of tokens. translation_corpus: list of translations to score. Each translation should be tokenized into a list of tokens. max_order: Maximum n-gram order to use when computing BLEU score. smooth: Whether or not to apply Lin et al. 2004 smoothing. Returns: 3-Tuple with the BLEU score, n-gram precisions, geometric mean of n-gram precisions and brevity penalty.
Here is the function:
def compute_bleu(reference_corpus, translation_corpus, max_order=1,
smooth=False):
"""Computes BLEU score of translated segments against one or more references.
Args:
reference_corpus: list of lists of references for each translation. Each
reference should be tokenized into a list of tokens.
translation_corpus: list of translations to score. Each translation
should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
3-Tuple with the BLEU score, n-gram precisions, geometric mean of n-gram
precisions and brevity penalty.
"""
matches_by_order = [0] * max_order
possible_matches_by_order = [0] * max_order
reference_length = 0
translation_length = 0
for (references, translation) in zip(reference_corpus,
translation_corpus):
# translation = references[0]
translation = [item.lower() for item in translation]
reference_length += min(len(r) for r in references)
translation_length += len(translation)
merged_ref_ngram_counts = collections.Counter()
for reference in references:
merged_ref_ngram_counts |= _get_ngrams(reference, max_order)
# print(merged_ref_ngram_counts)
translation_ngram_counts = _get_ngrams(translation, max_order)
overlap = translation_ngram_counts & merged_ref_ngram_counts
for ngram in overlap:
matches_by_order[len(ngram)-1] += overlap[ngram]
for order in range(1, max_order+1):
possible_matches = len(translation) - order + 1
if possible_matches > 0:
possible_matches_by_order[order-1] += possible_matches
precisions = [0] * max_order
for i in range(0, max_order):
if smooth:
precisions[i] = ((matches_by_order[i] + 1.) /
(possible_matches_by_order[i] + 1.))
else:
if possible_matches_by_order[i] > 0:
precisions[i] = (float(matches_by_order[i]) /
possible_matches_by_order[i])
else:
precisions[i] = 0.0
if min(precisions) > 0:
p_log_sum = sum((1. / max_order) * math.log(p) for p in precisions)
geo_mean = math.exp(p_log_sum)
else:
geo_mean = 0
ratio = float(translation_length) / reference_length
if ratio > 1.0:
bp = 1.
else:
bp = math.exp(1 - 1. / ratio)
bleu = geo_mean * bp
return (bleu, precisions, bp, ratio, translation_length, reference_length) | Computes BLEU score of translated segments against one or more references. Args: reference_corpus: list of lists of references for each translation. Each reference should be tokenized into a list of tokens. translation_corpus: list of translations to score. Each translation should be tokenized into a list of tokens. max_order: Maximum n-gram order to use when computing BLEU score. smooth: Whether or not to apply Lin et al. 2004 smoothing. Returns: 3-Tuple with the BLEU score, n-gram precisions, geometric mean of n-gram precisions and brevity penalty. |
163,284 | import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
def normalize_answer(s):
"""Lower text and remove punctuation, articles and extra whitespace."""
def remove_articles(text):
regex = re.compile(r"\b(a|an|the)\b", re.UNICODE)
return re.sub(regex, " ", text)
def white_space_fix(text):
return " ".join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def compute_exact(a_gold, a_pred):
return int(normalize_answer(a_gold) == normalize_answer(a_pred))
def compute_f1(a_gold, a_pred):
gold_toks = get_tokens(a_gold)
pred_toks = get_tokens(a_pred)
common = collections.Counter(gold_toks) & collections.Counter(pred_toks)
num_same = sum(common.values())
if len(gold_toks) == 0 or len(pred_toks) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks)
if num_same == 0:
return 0
precision = 1.0 * num_same / len(pred_toks)
recall = 1.0 * num_same / len(gold_toks)
f1 = (2 * precision * recall) / (precision + recall)
return f1
def get_raw_scores(dataset, preds):
exact_scores = {}
f1_scores = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
qid = qa["id"]
if qid not in preds:
print(f"Missing prediction for {qid}")
continue
a_pred = preds[qid]
gold_answers = [t for t in qa["answers"]["text"] if normalize_answer(t)]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
gold_answers = [""]
if a_pred != "":
exact_scores[qid] = 0
f1_scores[qid] = 0
else:
exact_scores[qid] = 1
f1_scores[qid] = 1
else:
# Take max over all gold answers
exact_scores[qid] = max(compute_exact(a, a_pred) for a in gold_answers)
f1_scores[qid] = max(compute_f1(a, a_pred) for a in gold_answers)
return exact_scores, f1_scores | null |
163,290 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import re
from absl import logging
import nltk
import numpy as np
import six
from six.moves import map
from six.moves import range
from .scoring import *
from .rouge_tokenizers import *
import collections
The provided code snippet includes necessary dependencies for implementing the `_create_ngrams` function. Write a Python function `def _create_ngrams(tokens, n)` to solve the following problem:
Creates ngrams from the given list of tokens. Args: tokens: A list of tokens from which ngrams are created. n: Number of tokens to use, e.g. 2 for bigrams. Returns: A dictionary mapping each bigram to the number of occurrences.
Here is the function:
def _create_ngrams(tokens, n):
"""Creates ngrams from the given list of tokens.
Args:
tokens: A list of tokens from which ngrams are created.
n: Number of tokens to use, e.g. 2 for bigrams.
Returns:
A dictionary mapping each bigram to the number of occurrences.
"""
ngrams = collections.Counter()
for ngram in (tuple(tokens[i:i + n]) for i in range(len(tokens) - n + 1)):
ngrams[ngram] += 1
return ngrams | Creates ngrams from the given list of tokens. Args: tokens: A list of tokens from which ngrams are created. n: Number of tokens to use, e.g. 2 for bigrams. Returns: A dictionary mapping each bigram to the number of occurrences. |
163,291 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import re
from absl import logging
import nltk
import numpy as np
import six
from six.moves import map
from six.moves import range
from .scoring import *
from .rouge_tokenizers import *
def _lcs_table(ref, can):
"""Create 2-d LCS score table."""
rows = len(ref)
cols = len(can)
lcs_table = [[0] * (cols + 1) for _ in range(rows + 1)]
for i in range(1, rows + 1):
for j in range(1, cols + 1):
if ref[i - 1] == can[j - 1]:
lcs_table[i][j] = lcs_table[i - 1][j - 1] + 1
else:
lcs_table[i][j] = max(lcs_table[i - 1][j], lcs_table[i][j - 1])
return lcs_table
class Score(
collections.namedtuple("Score", ["precision", "recall", "fmeasure"])):
"""Tuple containing precision, recall, and f-measure values."""
def func_fmeasure(precision, recall):
"""Computes f-measure given precision and recall values."""
if precision + recall > 0:
return 2 * precision * recall / (precision + recall)
else:
return 0.0
The provided code snippet includes necessary dependencies for implementing the `_score_lcs` function. Write a Python function `def _score_lcs(target_tokens, prediction_tokens)` to solve the following problem:
Computes LCS (Longest Common Subsequence) rouge scores. Args: target_tokens: Tokens from the target text. prediction_tokens: Tokens from the predicted text. Returns: A Score object containing computed scores.
Here is the function:
def _score_lcs(target_tokens, prediction_tokens):
"""Computes LCS (Longest Common Subsequence) rouge scores.
Args:
target_tokens: Tokens from the target text.
prediction_tokens: Tokens from the predicted text.
Returns:
A Score object containing computed scores.
"""
if not target_tokens or not prediction_tokens:
return Score(precision=0, recall=0, fmeasure=0)
# Compute length of LCS from the bottom up in a table (DP appproach).
lcs_table = _lcs_table(target_tokens, prediction_tokens)
lcs_length = lcs_table[-1][-1]
precision = lcs_length / len(prediction_tokens)
recall = lcs_length / len(target_tokens)
fmeasure = func_fmeasure(precision, recall)
return Score(precision=precision, recall=recall, fmeasure=fmeasure) | Computes LCS (Longest Common Subsequence) rouge scores. Args: target_tokens: Tokens from the target text. prediction_tokens: Tokens from the predicted text. Returns: A Score object containing computed scores. |
163,292 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import re
from absl import logging
import nltk
import numpy as np
import six
from six.moves import map
from six.moves import range
from .scoring import *
from .rouge_tokenizers import *
def _union_lcs(ref, c_list):
"""Find union LCS between a ref sentence and list of candidate sentences.
Args:
ref: list of tokens
c_list: list of list of indices for LCS into reference summary
Returns:
List of tokens in ref representing union LCS.
"""
lcs_list = [lcs_ind(ref, c) for c in c_list]
return [ref[i] for i in _find_union(lcs_list)]
import collections
class Score(
collections.namedtuple("Score", ["precision", "recall", "fmeasure"])):
"""Tuple containing precision, recall, and f-measure values."""
def func_fmeasure(precision, recall):
"""Computes f-measure given precision and recall values."""
if precision + recall > 0:
return 2 * precision * recall / (precision + recall)
else:
return 0.0
The provided code snippet includes necessary dependencies for implementing the `_summary_level_lcs` function. Write a Python function `def _summary_level_lcs(ref_sent, can_sent)` to solve the following problem:
ROUGE: Summary-level LCS, section 3.2 in ROUGE paper. Args: ref_sent: list of tokenized reference sentences can_sent: list of tokenized candidate sentences Returns: summary level ROUGE score
Here is the function:
def _summary_level_lcs(ref_sent, can_sent):
"""ROUGE: Summary-level LCS, section 3.2 in ROUGE paper.
Args:
ref_sent: list of tokenized reference sentences
can_sent: list of tokenized candidate sentences
Returns:
summary level ROUGE score
"""
if not ref_sent or not can_sent:
return Score(precision=0, recall=0, fmeasure=0)
m = sum(map(len, ref_sent))
n = sum(map(len, can_sent))
if not n or not m:
return Score(precision=0, recall=0, fmeasure=0)
# get token counts to prevent double counting
token_cnts_r = collections.Counter()
token_cnts_c = collections.Counter()
for s in ref_sent:
# s is a list of tokens
token_cnts_r.update(s)
for s in can_sent:
token_cnts_c.update(s)
hits = 0
for r in ref_sent:
lcs = _union_lcs(r, can_sent)
# Prevent double-counting:
# The paper describes just computing hits += len(_union_lcs()),
# but the implementation prevents double counting. We also
# implement this as in version 1.5.5.
for t in lcs:
if token_cnts_c[t] > 0 and token_cnts_r[t] > 0:
hits += 1
token_cnts_c[t] -= 1
token_cnts_r[t] -= 1
recall = hits / m
precision = hits / n
fmeasure = func_fmeasure(precision, recall)
return Score(precision=precision, recall=recall, fmeasure=fmeasure) | ROUGE: Summary-level LCS, section 3.2 in ROUGE paper. Args: ref_sent: list of tokenized reference sentences can_sent: list of tokenized candidate sentences Returns: summary level ROUGE score |
163,293 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import re
from absl import logging
import nltk
import numpy as np
import six
from six.moves import map
from six.moves import range
from .scoring import *
from .rouge_tokenizers import *
import six
from six.moves import range
class Score(
collections.namedtuple("Score", ["precision", "recall", "fmeasure"])):
"""Tuple containing precision, recall, and f-measure values."""
def func_fmeasure(precision, recall):
"""Computes f-measure given precision and recall values."""
if precision + recall > 0:
return 2 * precision * recall / (precision + recall)
else:
return 0.0
import six
The provided code snippet includes necessary dependencies for implementing the `_score_ngrams` function. Write a Python function `def _score_ngrams(target_ngrams, prediction_ngrams)` to solve the following problem:
Compute n-gram based rouge scores. Args: target_ngrams: A Counter object mapping each ngram to number of occurrences for the target text. prediction_ngrams: A Counter object mapping each ngram to number of occurrences for the prediction text. Returns: A Score object containing computed scores.
Here is the function:
def _score_ngrams(target_ngrams, prediction_ngrams):
"""Compute n-gram based rouge scores.
Args:
target_ngrams: A Counter object mapping each ngram to number of
occurrences for the target text.
prediction_ngrams: A Counter object mapping each ngram to number of
occurrences for the prediction text.
Returns:
A Score object containing computed scores.
"""
intersection_ngrams_count = 0
for ngram in six.iterkeys(target_ngrams):
intersection_ngrams_count += min(target_ngrams[ngram],
prediction_ngrams[ngram])
target_ngrams_count = sum(target_ngrams.values())
prediction_ngrams_count = sum(prediction_ngrams.values())
precision = intersection_ngrams_count / max(prediction_ngrams_count, 1)
recall = intersection_ngrams_count / max(target_ngrams_count, 1)
fmeasure = func_fmeasure(precision, recall)
return Score(precision=precision, recall=recall, fmeasure=fmeasure) | Compute n-gram based rouge scores. Args: target_ngrams: A Counter object mapping each ngram to number of occurrences for the target text. prediction_ngrams: A Counter object mapping each ngram to number of occurrences for the prediction text. Returns: A Score object containing computed scores. |
163,294 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
from nltk.stem import porter
import re
import six
NON_ALPHANUM_RE = re.compile(NON_ALPHANUM_PATTERN)
SPACES_RE = re.compile(SPACES_PATTERN)
VALID_TOKEN_RE = re.compile(VALID_TOKEN_PATTERN)
The provided code snippet includes necessary dependencies for implementing the `_tokenize` function. Write a Python function `def _tokenize(text, stemmer)` to solve the following problem:
Tokenize input text into a list of tokens. This approach aims to replicate the approach taken by Chin-Yew Lin in the original ROUGE implementation. Args: text: A text blob to tokenize. stemmer: An optional stemmer. Returns: A list of string tokens extracted from input text.
Here is the function:
def _tokenize(text, stemmer):
"""Tokenize input text into a list of tokens.
This approach aims to replicate the approach taken by Chin-Yew Lin in
the original ROUGE implementation.
Args:
text: A text blob to tokenize.
stemmer: An optional stemmer.
Returns:
A list of string tokens extracted from input text.
"""
# Convert everything to lowercase.
text = text.lower()
# Replace any non-alpha-numeric characters with spaces.
text = NON_ALPHANUM_RE.sub(" ", six.ensure_str(text))
tokens = SPACES_RE.split(text)
if stemmer:
# Only stem words more than 3 characters long.
tokens = [six.ensure_str(stemmer.stem(x)) if len(x) > 3 else x
for x in tokens]
# One final check to drop any empty or invalid tokens.
tokens = [x for x in tokens if VALID_TOKEN_RE.match(x)]
return tokens | Tokenize input text into a list of tokens. This approach aims to replicate the approach taken by Chin-Yew Lin in the original ROUGE implementation. Args: text: A text blob to tokenize. stemmer: An optional stemmer. Returns: A list of string tokens extracted from input text. |
163,295 | import argparse
import json
import re
import string
import sys
from collections import Counter
def f1_score(prediction, ground_truth):
prediction_tokens = normalize_answer(prediction).split()
ground_truth_tokens = normalize_answer(ground_truth).split()
common = Counter(prediction_tokens) & Counter(ground_truth_tokens)
num_same = sum(common.values())
if num_same == 0:
return 0
precision = 1.0 * num_same / len(prediction_tokens)
recall = 1.0 * num_same / len(ground_truth_tokens)
f1 = (2 * precision * recall) / (precision + recall)
return f1
def exact_match_score(prediction, ground_truth):
return normalize_answer(prediction) == normalize_answer(ground_truth)
def metric_max_over_ground_truths(metric_fn, prediction, ground_truths):
scores_for_ground_truths = []
for ground_truth in ground_truths:
score = metric_fn(prediction, ground_truth)
scores_for_ground_truths.append(score)
return max(scores_for_ground_truths)
def evaluate(dataset, predictions):
f1 = exact_match = total = 0
for article in dataset:
for paragraph in article["paragraphs"]:
for qa in paragraph["qas"]:
total += 1
if qa["id"] not in predictions:
message = "Unanswered question " + qa["id"] + " will receive score 0."
print(message, file=sys.stderr)
continue
ground_truths = list(map(lambda x: x["text"], qa["answers"]))
prediction = predictions[qa["id"]]
exact_match += metric_max_over_ground_truths(exact_match_score, prediction, ground_truths)
f1 += metric_max_over_ground_truths(f1_score, prediction, ground_truths)
exact_match = 100.0 * exact_match / total
f1 = 100.0 * f1 / total
return {"exact_match": exact_match, "f1": f1} | null |
163,296 | import logging
import t5
import os
import json
import functools
import tensorflow as tf
import tensorflow_datasets as tfds
def dataset_preprocessor(ds):
def normalize_text(text):
"""Lowercase and remove quotes from a TensorFlow string."""
text = tf.strings.lower(text)
text = tf.strings.regex_replace(text, "'(.*)'", r"\1")
return text
def to_inputs_and_targets(ex):
return {
"inputs": normalize_text(ex["inputs"]),
"targets": normalize_text(ex["targets"])
}
return ds.map(to_inputs_and_targets,
num_parallel_calls=tf.data.experimental.AUTOTUNE) | null |
163,297 | import logging
import t5
import os
import json
import functools
import tensorflow as tf
import tensorflow_datasets as tfds
DATA_DIR = f"gs://unifiedqa/data/"
def get_path(data_dir1, split):
tsv_path = {
"train": os.path.join(data_dir1, "train.tsv"),
"dev": os.path.join(data_dir1, "dev.tsv"),
"test": os.path.join(data_dir1, "test.tsv")
}
return tsv_path[split]
print(f" >>>> adding one mixture for `union_mixture`")
def dataset_fn(split, shuffle_files=False, dataset=""):
# We only have one file for each split.
del shuffle_files
# Load lines from the text file as examples.
ds = tf.data.TextLineDataset(get_path(DATA_DIR + dataset, split))
# Split each "<question>\t<answer>" example into (question, answer) tuple.
print(" >>>> about to read csv . . . ")
ds = ds.map(
functools.partial(tf.io.decode_csv, record_defaults=["", ""],
field_delim="\t", use_quote_delim=False),
num_parallel_calls=tf.data.experimental.AUTOTUNE)
# print(" >>>> after reading csv . . . ")
# Map each tuple to a {"question": ... "answer": ...} dict.
ds = ds.map(lambda *ex: dict(zip(["inputs", "targets"], ex)))
# print(" >>>> after mapping . . . ")
return ds | null |
163,298 | import glob
import json
import logging
import pickle
import time
from typing import List, Tuple, Dict, Iterator
import numpy as np
import torch
from torch import Tensor as T
from torch import nn
from datasets import load_from_disk
from transformers import (
AutoTokenizer,
AdamW,
get_linear_schedule_with_warmup,
TrainingArguments,
)
import json
tokenizer = AutoTokenizer.from_pretrained("./bert-base-uncased")
format2id = {"extractive":0,"abstractive":1,"multichoice":2,"bool":3}
def read_hints(path):
if path=="extractive":
num_total = 10
elif path=="abstractive":
num_total = 10
elif path=="multichoice":
num_total = 4
else:
num_total = 2
all_hints = []
all_demons = []
total_pathin = "./textinput/"+path+"-glminput.jsonl"
finput = open(total_pathin,'r',encoding='utf-8')
for num_id in range(num_total):
total_path = "./plmresource/"+path+str(num_id)+"-glmout.json"
fin = open(total_path,'r',encoding='utf-8')
b = json.load(fin)["data"]
all_hints.extend(b)
for item in finput.readlines():
all_demons.append(json.loads(item)["text"])
assert len(all_demons)==len(all_hints)
return all_hints,all_demons
import random
from tqdm import tqdm
def gen_rr_corpus(name):
po = json.load(open("./json2select.json",'r'))
base_id = 2e5*format2id[name]
start_id = 0
demonstrations = []
format_hints = []
all_hints,all_demons = read_hints(name)
pdz = open("./textinput/{}-glminput.jsonl".format(name),'r',encoding='utf-8')
pdz_lines = pdz.readlines()
pdz_lines = [json.loads(item) for item in pdz_lines]
pdzs = [item['id'] for item in pdz_lines]
assert len(pdzs)==len(all_hints)
all_cases = []
case_prev = []
case_lens = []
idx = 0
pl = False
total_count = 0
for itemd,itemh,itemp in tqdm(zip(all_demons,all_hints,pdzs)):
# print(itemd)
if pl==True and itemp!=idx:
assert False
if itemp==idx:
idx+=1
if idx!=1 and (idx-1)%5000==0:
total_count+=1
continue
if case_prev != []:
case_lens.append(len(case_prev))
sub_ids = list(range(len(case_prev)))
if len(case_prev)>64:
assert False
else:
lacked = 64-len(case_prev)
pp = random.sample(case_prev,lacked)
sub_ids.extend([999]*lacked)
case_prev.extend(pp)
all_keys = case_prev[0].keys()
case_prev_reshape={}
for k in all_keys:
case_prev_reshape[k]=[kit[k] for kit in case_prev]
case_prev_reshape["sub_ids"]=sub_ids
case_prev_reshape["sample_id"]=base_id+start_id
start_id+=1
#all_cases.extend(case_prev)
all_cases.append(case_prev_reshape)
case_prev=[]
if idx!=1 and (idx-1)%5000==0:
pout = open("./sel_file/{}-retrak{}.json".format(name,str(total_count)),'w',encoding='utf-8')
for atom_line in all_cases:
print(json.dumps(atom_line),file=pout)
pout.close()
ppp = load_dataset("json", data_files="./sel_file/{}-retrak{}.json".format(name,str(total_count)))["train"]
ppp.save_to_disk("./sel_file/{}-retrak{}.hf".format(name,str(total_count)))
all_cases = []
total_count+=1
try:
ctx,query = itemd.split("\n\nQ:")
ctx = "Q:".join(ctx.split("Q:")[1:]).strip()
pl=False
except:
pl=True
continue
ctx_q,ctx_a = "\nA:".join(ctx.split("\nA:")[:-1]),ctx.split("\nA:")[1].strip()
ctx_q_parts = ctx_q.split("\\n")
demon_view = ""
query_parts = (query.split("\nA:")[0]).split("\\n")
query_parts = [_.strip() for _ in query_parts]
ctx_q_parts =[_.strip() for _ in ctx_q_parts]
hint_view = ""
query_view = ""
if name=="multichoice":
if len(ctx_q_parts)==3:
ctx_qa = " \\n ".join([ctx_a,ctx_q_parts[2],ctx_q_parts[1],ctx_q_parts[0]])
ctx_qa_h = " \\n ".join([itemh,ctx_qa])
else:
assert len(ctx_q_parts)==2
ctx_qa = " \\n ".join([ctx_a,ctx_q_parts[1],ctx_q_parts[0]])
ctx_qa_h = " \\n ".join([itemh,ctx_qa])
if (len(query_parts[0].split(" "))>64):
query_parts[0] = " ".join(query_parts[0].split(" ")[:64])
if len(query_parts)==3:
query = " \\n ".join([query_parts[2],query_parts[1],query_parts[0]])
else:
assert len(query_parts)==2
query = " \\n ".join([query_parts[1],query_parts[0]])
else:
assert len(ctx_q_parts)==2
assert len(query_parts)==2
ctx_qa = " \\n ".join([ctx_a,ctx_q_parts[1],ctx_q_parts[0]])
ctx_qa_h = " \\n ".join([itemh,ctx_qa])
if (len(query_parts[0].split(" "))>64):
query_parts[0] = " ".join(query_parts[0].split(" ")[:64])
query = " \\n ".join([query_parts[1],query_parts[0]])
query_out = tokenizer(query,return_token_type_ids=True, return_attention_mask=True,max_length=112, truncation=True,padding='max_length')
query_ids,query_attentions,query_ctxs = query_out["input_ids"],query_out["attention_mask"],query_out["token_type_ids"]
ctx_out = tokenizer(ctx_qa,return_token_type_ids=True, return_attention_mask=True,max_length=112, truncation=True,padding='max_length')
ctx_ids,ctx_attentions,ctx_ctxs = ctx_out["input_ids"],ctx_out["attention_mask"],ctx_out["token_type_ids"]
cross_out = tokenizer(query,ctx_qa_h,return_token_type_ids=True, return_attention_mask=True,max_length=144, truncation=True,padding='max_length')
cross_ids,cross_attentions,cross_ctxs = cross_out["input_ids"],cross_out["attention_mask"],cross_out["token_type_ids"]
tp ={"query_ids":query_ids,"query_attentions":query_attentions,
"ctx_ids":ctx_ids,"ctx_attentions":ctx_attentions,
"cross_ids":cross_ids,"cross_attentions":cross_attentions,"cross_ctxs":cross_ctxs,"id":idx}
case_prev.append(tp)
if case_prev != []:
case_lens.append(len(case_prev))
# print(case_lens)
sub_ids = list(range(len(case_prev)))
if len(case_prev)>64:
assert False
else:
lacked = 64-len(case_prev)
pp = random.sample(case_prev,lacked)
sub_ids.extend([999]*lacked)
case_prev.extend(pp)
all_keys = case_prev[0].keys()
case_prev_reshape={}
for k in all_keys:
case_prev_reshape[k]=[kit[k] for kit in case_prev]
case_prev_reshape["sub_ids"]=sub_ids
case_prev_reshape["sample_id"]=base_id+start_id
start_id+=1
all_cases.append(case_prev_reshape)
case_prev=[]
pout = open("./sel_file/{}-retrak{}.json".format(name,str(total_count)),'w',encoding='utf-8')
for atom_line in all_cases:
print(json.dumps(atom_line),file=pout)
pout.close()
ppp = load_dataset("json", data_files="./sel_file/{}-retrak{}.json".format(name,str(total_count)))["train"]
ppp.save_to_disk("./sel_file/{}-retrak{}.hf".format(name,str(total_count)))
all_cases = [] | null |
163,299 | import os
import time
import torch
import copy,random
import sys
import gc
import json
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data import DataLoader
from dataclasses import dataclass, field
from typing import Optional
from typing import List, Optional, Tuple
from tqdm import tqdm
import warnings
from torch.cuda import amp
from contextlib import suppress as nullcontext
from transformers import AdamW, get_linear_schedule_with_warmup
from models.metascorea import T5ForConditionalGeneration as PromptT5
from models.metascorea2 import T5ForConditionalGeneration as PromptT5plus
from dataset_processors import *
from metatrainer import QuestionAnsweringTrainer
from retrievermodel import init_biencoder_components
from rerankermodel.encoder import BertEncoder_For_CrossEncoder
from loadscorea import *
from hintpreprocess import *
from transformers.trainer_callback import (
CallbackHandler,
DefaultFlowCallback,
PrinterCallback,
ProgressCallback,
TrainerCallback,
TrainerControl,
TrainerState,
)
import datasets
import numpy as np
from datasets import load_dataset, load_metric,load_from_disk,concatenate_datasets
import os
from functools import partial
import transformers
from transformers import (
AutoConfig,
AutoModelForSeq2SeqLM,
AutoTokenizer,
DataCollatorForSeq2Seq,
HfArgumentParser,
M2M100Tokenizer,
MBart50Tokenizer,
EvalPrediction,
MBart50TokenizerFast,
MBartTokenizer,
MBartTokenizerFast,
Seq2SeqTrainer,
Seq2SeqTrainingArguments,
default_data_collator,
set_seed,
)
from pathlib import Path
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
def get_model_obj(model: nn.Module):
return model.module if hasattr(model, "module") else model | null |
163,300 | import os
import time
import torch
import copy,random
import sys
import gc
import json
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data import DataLoader
from dataclasses import dataclass, field
from typing import Optional
from typing import List, Optional, Tuple
from tqdm import tqdm
import warnings
from torch.cuda import amp
from contextlib import suppress as nullcontext
from transformers import AdamW, get_linear_schedule_with_warmup
from models.metascorea import T5ForConditionalGeneration as PromptT5
from models.metascorea2 import T5ForConditionalGeneration as PromptT5plus
from dataset_processors import *
from metatrainer import QuestionAnsweringTrainer
from retrievermodel import init_biencoder_components
from rerankermodel.encoder import BertEncoder_For_CrossEncoder
from loadscorea import *
from hintpreprocess import *
from transformers.trainer_callback import (
CallbackHandler,
DefaultFlowCallback,
PrinterCallback,
ProgressCallback,
TrainerCallback,
TrainerControl,
TrainerState,
)
import datasets
import numpy as np
from datasets import load_dataset, load_metric,load_from_disk,concatenate_datasets
import os
from functools import partial
import transformers
from transformers import (
AutoConfig,
AutoModelForSeq2SeqLM,
AutoTokenizer,
DataCollatorForSeq2Seq,
HfArgumentParser,
M2M100Tokenizer,
MBart50Tokenizer,
EvalPrediction,
MBart50TokenizerFast,
MBartTokenizer,
MBartTokenizerFast,
Seq2SeqTrainer,
Seq2SeqTrainingArguments,
default_data_collator,
set_seed,
)
from pathlib import Path
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
def train_qascore(model,trainset,training_args,data_collator):
optimizer = AdamW(
model.parameters(),
lr=1e-4,
)
qa_scores={}
device_id = training_args.local_rank
if device_id == -1:
device_id = 0
if training_args.local_rank == -1:
data_loader_qa = DataLoader(dataset=trainset, shuffle=False,batch_size=training_args.per_device_train_batch_size//2,collate_fn=data_collator)#, sampler=train_sampler)
else:
sampler = DistributedSampler(
trainset,
num_replicas=torch.distributed.get_world_size(),
rank=training_args.local_rank,
seed=training_args.seed,
)
data_loader_qa = DataLoader(dataset=trainset, shuffle=False,batch_size=training_args.per_device_train_batch_size//2,collate_fn=data_collator,sampler=sampler,drop_last=False)
for data in data_loader_qa:
# labels = data.pop("labels")
sample_id = data["sample_id"].numpy().tolist()
data = {x: data[x].to(torch.device("cuda", device_id)) for x in data if data[x] is not None}
kl_loss,qa_probs = model.forward_single(**data)
if model.activate_rer or model.activate_ret:
kl_loss.backward()
optimizer.step()
scores_id = qa_probs.detach().cpu().numpy().tolist()
for s_id,score in zip(sample_id,scores_id):
qa_scores[int(s_id)]=score
return qa_scores | null |
163,301 | import os
import time
import torch
import copy,random
import sys
import gc
import json
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data import DataLoader
from dataclasses import dataclass, field
from typing import Optional
from typing import List, Optional, Tuple
from tqdm import tqdm
import warnings
from torch.cuda import amp
from contextlib import suppress as nullcontext
from transformers import AdamW, get_linear_schedule_with_warmup
from models.metascorea import T5ForConditionalGeneration as PromptT5
from models.metascorea2 import T5ForConditionalGeneration as PromptT5plus
from dataset_processors import *
from metatrainer import QuestionAnsweringTrainer
from retrievermodel import init_biencoder_components
from rerankermodel.encoder import BertEncoder_For_CrossEncoder
from loadscorea import *
from hintpreprocess import *
from transformers.trainer_callback import (
CallbackHandler,
DefaultFlowCallback,
PrinterCallback,
ProgressCallback,
TrainerCallback,
TrainerControl,
TrainerState,
)
import datasets
import numpy as np
from datasets import load_dataset, load_metric,load_from_disk,concatenate_datasets
import os
from functools import partial
import transformers
from transformers import (
AutoConfig,
AutoModelForSeq2SeqLM,
AutoTokenizer,
DataCollatorForSeq2Seq,
HfArgumentParser,
M2M100Tokenizer,
MBart50Tokenizer,
EvalPrediction,
MBart50TokenizerFast,
MBartTokenizer,
MBartTokenizerFast,
Seq2SeqTrainer,
Seq2SeqTrainingArguments,
default_data_collator,
set_seed,
)
from pathlib import Path
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
format2dataset = {
'extractive':['squad1_1','squad2','extractive','newsqa','quoref','ropes','adversarialqa_dbert_dev','adversarialqa_dbidaf_dev','adversarialqa_droberta_dev','record_extractive'],
'abstractive':['narrativeqa_dev','abstractive','natural_questions_with_dpr_para','drop','qaconv','tweetqa'],
'multichoice':['race_string','multichoice','openbookqa','mctest_corrected_the_separator','social_iqa','commonsenseqa','qasc','physical_iqa','winogrande_xl','onestopqa_advanced','onestopqa_elementry','onestopqa_intermediate','prost_multiple_choice_with_no_context','dream','processbank_test','cosmosqa','mcscript','mcscript2','quail','reclor','measuring_massive_multitask_language_understanding','head_qa_en_test','race_c','arc_hard','arc_easy'],
'bool':['boolq','bool','boolq_np','multirc','strategyqa','pubmedqa_pqal_short_ans']
}
dataset_files =["squad1_1","squad2","narrativeqa_dev","mctest_corrected_the_separator","race_string","arc_hard","arc_easy","boolq","openbookqa"]+["newsqa","quoref","ropes","drop","natural_questions_with_dpr_para","commonsenseqa","qasc","physical_iqa","social_iqa","winogrande_xl","multirc","boolq_np"]
def evaluate_model(eval_model, priority_level,format_name,training_args,data_collator,epoch_id):
device_id = training_args.local_rank
if device_id == -1:
device_id = 0
for item in dataset_files:
if not item in format2dataset[format_name]:
continue
lmax = 64
if lmax>len(load_from_disk("./epoch_data{}/{}-reteval.hf".format(str(epoch_id),item))):
lmax = len(load_from_disk("./epoch_data{}/{}-reteval.hf".format(str(epoch_id),item)))
dataset_ret = load_from_disk("./epoch_data{}/{}-reteval.hf".format(str(epoch_id),item)).select(range(lmax))
dataset_rer = load_from_disk("./epoch_data{}/{}-rereval.hf".format(str(epoch_id),item)).select(range(lmax))
dataset_first = load_from_disk("./epoch_data{}/{}-firsteval.hf".format(str(epoch_id),item)).select(range(lmax))
for (eval_ds,source) in [(dataset_first,"first"),(dataset_ret,"ret"),(dataset_rer,"rer")]:
if training_args.local_rank == -1:
data_loader_eval = DataLoader(dataset=eval_ds, shuffle=False,batch_size=training_args.per_device_train_batch_size*4,collate_fn=data_collator)#, sampler=train_sampler)
else:
sampler = DistributedSampler(
eval_ds,
num_replicas=torch.distributed.get_world_size(),
rank=training_args.local_rank,
seed=training_args.seed,
)
data_loader_eval = DataLoader(dataset=eval_ds, shuffle=False,batch_size=training_args.per_device_train_batch_size*4,collate_fn=data_collator,sampler=sampler,drop_last=False)
tnum = 0.0
for data in data_loader_eval:
labels = data.pop("labels")
data["eval_labels"]=labels
data = {x: data[x].to(torch.device("cuda", device_id)) for x in data if data[x] is not None}
if source!="first":
out = eval_model.module.eval_mode(**data)
priority_level[format_name][source]+=out.mean().item()
else:
out = eval_model.module.eval_golden(**data)
priority_level[format_name]["qa"]+=out.mean().item()
tnum +=1.0
if source!="first":
priority_level[format_name][source]=priority_level[format_name][source]
else:
priority_level[format_name]["qa"]=priority_level[format_name]["qa"]
return priority_level | null |
163,302 | import os
import time
import torch
import copy,random
import sys
import gc
import json
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data import DataLoader
from dataclasses import dataclass, field
from typing import Optional
from typing import List, Optional, Tuple
from tqdm import tqdm
import warnings
from torch.cuda import amp
from contextlib import suppress as nullcontext
from transformers import AdamW, get_linear_schedule_with_warmup
from models.metascorea import T5ForConditionalGeneration as PromptT5
from models.metascorea2 import T5ForConditionalGeneration as PromptT5plus
from dataset_processors import *
from metatrainer import QuestionAnsweringTrainer
from retrievermodel import init_biencoder_components
from rerankermodel.encoder import BertEncoder_For_CrossEncoder
from loadscorea import *
from hintpreprocess import *
from transformers.trainer_callback import (
CallbackHandler,
DefaultFlowCallback,
PrinterCallback,
ProgressCallback,
TrainerCallback,
TrainerControl,
TrainerState,
)
import datasets
import numpy as np
from datasets import load_dataset, load_metric,load_from_disk,concatenate_datasets
import os
from functools import partial
import transformers
from transformers import (
AutoConfig,
AutoModelForSeq2SeqLM,
AutoTokenizer,
DataCollatorForSeq2Seq,
HfArgumentParser,
M2M100Tokenizer,
MBart50Tokenizer,
EvalPrediction,
MBart50TokenizerFast,
MBartTokenizer,
MBartTokenizerFast,
Seq2SeqTrainer,
Seq2SeqTrainingArguments,
default_data_collator,
set_seed,
)
from pathlib import Path
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
def trainbc(bencoder,cencoder,dataset,training_args,fwd,qaret,qarer):
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{"params": [p for n, p in bencoder.named_parameters() if not any(
nd in n for nd in no_decay)], "weight_decay": 0.01},
{"params": [p for n, p in bencoder.named_parameters() if any(
nd in n for nd in no_decay)], "weight_decay": 0.0},
{"params": [p for n, p in cencoder.named_parameters() if not any(
nd in n for nd in no_decay)], "weight_decay": 0.01},
{"params": [p for n, p in cencoder.named_parameters() if any(
nd in n for nd in no_decay)], "weight_decay": 0.0}
]
optimizer = AdamW(
optimizer_grouped_parameters,
lr=1e-5,
)
device_id = training_args.local_rank
loss_fct = torch.nn.KLDivLoss()
if device_id==-1:
device_id=0
bs_size = training_args.per_device_train_batch_size
offset = []
offset2 = []
offset3 = []
scaler = amp.GradScaler(enabled=True)
for itf in range(bs_size):
offset.extend([itf*training_args.c_size]*16)
offset2.extend([itf*16]*4)
offset3.extend([itf*training_args.c_size]*4)
offset = torch.Tensor(offset).long().to(torch.device("cuda", device_id))
offset2 = torch.Tensor(offset2).long().to(torch.device("cuda", device_id))
offset3 = torch.Tensor(offset3).long().to(torch.device("cuda", device_id))
if training_args.local_rank == -1:
data_loader_train = DataLoader(dataset=dataset.select(range(100)), batch_size=bs_size,collate_fn=default_data_collator)
else:
sampler = DistributedSampler(
dataset,
num_replicas=torch.distributed.get_world_size(),
rank=training_args.local_rank,
seed=training_args.seed,
)
data_loader_train = DataLoader(dataset=dataset,batch_size=bs_size,sampler=sampler,drop_last=False,collate_fn=default_data_collator)
for i, data in enumerate(data_loader_train):
if data["query_ids"].size(0)!=bs_size:
bs_size = data["query_ids"].size(0)
offset = []
offset2 = []
offset3 = []
for itf in range(bs_size):
offset.extend([itf*training_args.c_size]*16)
offset2.extend([itf*16]*4)
offset3.extend([itf*training_args.c_size]*4)
offset = torch.Tensor(offset).long().to(torch.device("cuda", device_id))
offset2 = torch.Tensor(offset2).long().to(torch.device("cuda", device_id))
offset3 = torch.Tensor(offset3).long().to(torch.device("cuda", device_id))
data["query_ids"] = data["query_ids"][:,0,:].squeeze(1)
data["query_attentions"] = data["query_attentions"][:,0,:].squeeze(1)
data = {x: data[x].to(torch.device("cuda", device_id)) for x in data if data[x] is not None}
bmodel_out = bencoder(
data["query_ids"].view(-1,112),#.squeeze(),
data["query_attentions"].view(-1,112),#.squeeze(),
data["ctx_ids"].view(-1,112),#.squeeze(),
data["ctx_attentions"].view(-1,112),#.squeeze(),
)
score_mask = (data["sub_ids"]>=999).int().mul(-999)
local_q_vector, local_ctx_vectors = bmodel_out
local_q_vector = local_q_vector.view(-1,768)
local_ctx_vectors = local_ctx_vectors.view(-1,training_args.c_size,768)
sim_scores = torch.bmm(
local_q_vector.unsqueeze(1), torch.transpose(local_ctx_vectors, 1, 2)
).squeeze(1)
cmodel_out = cencoder(
data["cross_ids"].view(-1,144),
data["cross_attentions"].view(-1,144),
data["cross_ctxs"].view(-1,144),
).squeeze(-1).view(-1,training_args.c_size)
if fwd:
bi_score = torch.softmax(sim_scores, dim=-1)
c_score = torch.nn.functional.log_softmax(cmodel_out, dim=-1)
kl_loss = loss_fct(c_score, bi_score)
else:
bi_score = torch.nn.functional.log_softmax(sim_scores, dim=-1)
c_score = torch.softmax(cmodel_out, dim=-1)
kl_loss = loss_fct(bi_score,c_score)
scaler.scale(kl_loss).backward()
scaler.step(optimizer)
scaler.update()
optimizer.zero_grad()
# sim_scores = sim_scores.add(score_mask)
#(bs*768)*(768*64)=(bs*64)
kl_loss_qa1 = 0.0
kl_loss_qa2 = 0.0
if "qa_scores" in data.keys() and (qaret or qarer):
with torch.no_grad():
previous_ids = []
sids = data["sample_id"].cpu().numpy().tolist()
for sid in sids:
try:
previous_ids.extend(format_hints_ids[int(sid)])
except:
previous_ids.extend([0,1,2,3])
previous_ids = torch.tensor(previous_ids).long().to(torch.device("cuda", device_id)).add(offset3)
previous_data = {}
for k_ in data.keys():
if len(data[k_].size())==3:
previous_data[k_] = torch.index_select(data[k_].view(bs_size*training_args.c_size,-1),dim=0,index=previous_ids).view(bs_size,4,-1).clone()
elif len(data[k_].size())==2:
if k_=="query_ids" or k_=="query_attentions":
previous_data[k_]=data[k_].clone()
elif k_!="qa_scores":
previous_data[k_] = torch.index_select(data[k_].view(-1),dim=0,index=previous_ids).view(bs_size,4).clone()
else:
previous_data[k_]=data[k_].clone()
else:
if k_=="sample_id":
previous_data[k_]=data[k_].clone()
cmodel_previous = cencoder(
previous_data["cross_ids"].view(-1,144),
previous_data["cross_attentions"].view(-1,144),
previous_data["cross_ctxs"].view(-1,144),
).squeeze(-1).view(-1,4)
dmodel_previous = bencoder(
previous_data["query_ids"].view(-1,112),#.squeeze(),
previous_data["query_attentions"].view(-1,112),#.squeeze(),
previous_data["ctx_ids"].view(-1,112),#.squeeze(),
previous_data["ctx_attentions"].view(-1,112),#.squeeze(),
)
local_q_vector, local_ctx_vectors = dmodel_previous
local_q_vector = local_q_vector.view(-1,768)
local_ctx_vectors = local_ctx_vectors.view(-1,4,768)
sim_scores = torch.bmm(
local_q_vector.unsqueeze(1), torch.transpose(local_ctx_vectors, 1, 2)
).squeeze(1)
bi_score_previous = torch.nn.functional.log_softmax(sim_scores, dim=-1)
c_score_previous = torch.nn.functional.log_softmax(cmodel_previous, dim=-1)
qa_scores = torch.softmax(data["qa_scores"], dim=-1)
if qarer:
kl_loss_qa1 = loss_fct(c_score_previous, qa_scores)
if qaret:
kl_loss_qa2 = loss_fct(bi_score_previous,qa_scores)
kl_qa_all = kl_loss_qa1+kl_loss_qa2
scaler.scale(kl_qa_all).backward()
scaler.step(optimizer)
scaler.update()
optimizer.zero_grad() | null |
163,303 | import os
import time
import torch
import copy,random
import sys
import gc
import json
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data import DataLoader
from dataclasses import dataclass, field
from typing import Optional
from typing import List, Optional, Tuple
from tqdm import tqdm
import warnings
from torch.cuda import amp
from contextlib import suppress as nullcontext
from transformers import AdamW, get_linear_schedule_with_warmup
from models.metascorea import T5ForConditionalGeneration as PromptT5
from models.metascorea2 import T5ForConditionalGeneration as PromptT5plus
from dataset_processors import *
from metatrainer import QuestionAnsweringTrainer
from retrievermodel import init_biencoder_components
from rerankermodel.encoder import BertEncoder_For_CrossEncoder
from loadscorea import *
from hintpreprocess import *
from transformers.trainer_callback import (
CallbackHandler,
DefaultFlowCallback,
PrinterCallback,
ProgressCallback,
TrainerCallback,
TrainerControl,
TrainerState,
)
import datasets
import numpy as np
from datasets import load_dataset, load_metric,load_from_disk,concatenate_datasets
import os
from functools import partial
import transformers
from transformers import (
AutoConfig,
AutoModelForSeq2SeqLM,
AutoTokenizer,
DataCollatorForSeq2Seq,
HfArgumentParser,
M2M100Tokenizer,
MBart50Tokenizer,
EvalPrediction,
MBart50TokenizerFast,
MBartTokenizer,
MBartTokenizerFast,
Seq2SeqTrainer,
Seq2SeqTrainingArguments,
default_data_collator,
set_seed,
)
from pathlib import Path
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
def filthints(bencoder,cencoder,dataset,training_args,fwd,qaret,qarer):
device_id = training_args.local_rank
if device_id==-1:
device_id=0
bencoder.eval()
cencoder.eval()
bs_size = 48
offset = []
offset2 = []
offset3 = []
for itf in range(bs_size):
offset.extend([itf*training_args.c_size]*16)
offset2.extend([itf*16]*4)
offset3.extend([itf*training_args.c_size]*4)
offset = torch.Tensor(offset).long().to(torch.device("cuda", device_id))
offset2 = torch.Tensor(offset2).long().to(torch.device("cuda", device_id))
offset3 = torch.Tensor(offset3).long().to(torch.device("cuda", device_id))
if training_args.local_rank == -1:
data_loader_train = DataLoader(dataset, shuffle=True,batch_size=bs_size,collate_fn=default_data_collator)
else:
sampler = DistributedSampler(
dataset,
num_replicas=torch.distributed.get_world_size(),
rank=training_args.local_rank,
seed=training_args.seed,
)
data_loader_train = DataLoader(dataset=dataset,batch_size=bs_size,sampler=sampler,collate_fn=default_data_collator,drop_last=False)
for i, data in enumerate(data_loader_train):
if data["query_ids"].size(0)!=bs_size:
bs_size = data["query_ids"].size(0)
offset = []
offset2 = []
offset3 = []
for itf in range(bs_size):
offset.extend([itf*training_args.c_size]*16)
offset2.extend([itf*16]*4)
offset3.extend([itf*training_args.c_size]*4)
offset = torch.Tensor(offset).long().to(torch.device("cuda", device_id))
offset2 = torch.Tensor(offset2).long().to(torch.device("cuda", device_id))
offset3 = torch.Tensor(offset3).long().to(torch.device("cuda", device_id))
torch.cuda.synchronize()
start = time.time()
data = {x: data[x].to(torch.device("cuda", device_id)) for x in data if data[x] is not None}
with torch.no_grad():
bmodel_out = bencoder(
data["query_ids"].view(-1,112),#.squeeze(),
data["query_attentions"].view(-1,112),#.squeeze(),
data["ctx_ids"].view(-1,112),#.squeeze(),
data["ctx_attentions"].view(-1,112),#.squeeze(),
)
score_mask = (data["sub_ids"]>=999).int().mul(-999)
local_q_vector, local_ctx_vectors = bmodel_out
local_q_vector = local_q_vector.view(-1,training_args.c_size,768)[:,0,:].view(-1,768)
local_ctx_vectors = local_ctx_vectors.view(-1,training_args.c_size,768)
sim_scores = torch.bmm(
local_q_vector.unsqueeze(1), torch.transpose(local_ctx_vectors, 1, 2)
).squeeze(1)
# print(sim_scores)
sim_scores = sim_scores.add(score_mask)
#(1*768)*(768*64)=(1*64)
sort_result, sort_idxs = sim_scores.topk(16)#sort(dot_prod_scores, dim=0, descending=True)
sort_idxs = sort_idxs.view(-1).add(offset)
for k_ in data.keys():
if len(data[k_].size())==3:
data[k_] = torch.index_select(data[k_].view(bs_size*training_args.c_size,-1),dim=0,index=sort_idxs).view(bs_size,16,-1)
elif len(data[k_].size())==2:
if k_!="qa_scores":
data[k_] = torch.index_select(data[k_].view(-1),dim=0,index=sort_idxs).view(bs_size,16)
else:
pass
else:
if k_=="sample_id":
pass
else:
print(k_)
print(data[k_])
assert False
# torch.cuda.synchronize()
# print(time.time()-start,"select16_time")
cmodel_out = cencoder(
data["cross_ids"].view(-1,144),
data["cross_attentions"].view(-1,144),
data["cross_ctxs"].view(-1,144),
).squeeze(-1).view(-1,16)
dmodel_out = bencoder(
data["query_ids"].view(-1,112),#.squeeze(),
data["query_attentions"].view(-1,112),#.squeeze(),
data["ctx_ids"].view(-1,112),#.squeeze(),
data["ctx_attentions"].view(-1,112),#.squeeze(),
)
# score_mask = (data["sub_ids"]>=999).int().mul(-999)
local_q_vector, local_ctx_vectors = dmodel_out
local_q_vector = local_q_vector.view(-1,16,768)[:,0,:].view(-1,768)
local_ctx_vectors = local_ctx_vectors.view(-1,16,768)
sim_scores = torch.bmm(
local_q_vector.unsqueeze(1), torch.transpose(local_ctx_vectors, 1, 2)
).squeeze(1)
bi_score = sim_scores
c_score = cmodel_out
_,rerank_idxs = cmodel_out.topk(4)
# torch.cuda.synchronize()
# print(time.time()-start,"cmodel_time")
for bat in range(rerank_idxs.size(0)):
sel = torch.index_select(data["sub_ids"][bat],dim=0,index=rerank_idxs[bat])
ids = data["sample_id"][bat]
format_hints_ids[ids.item()]=sel.cpu().numpy().tolist()
for ix in range(rerank_idxs.size(0)):
ids = data["sample_id"][ix].item()
bscore = []
cscore = []
for item in rerank_idxs[ix]:
bscore.append(bi_score[ix][item].item())
cscore.append(c_score[ix][item].item())
format_ret_scores[ids]=bscore
format_rer_scores[ids]=cscore
# torch.cuda.synchronize()
# print(time.time()-start,"final_time") | null |
163,304 | import os
import time
import torch
import copy,random
import sys
import gc
import json
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data import DataLoader
from dataclasses import dataclass, field
from typing import Optional
from typing import List, Optional, Tuple
from tqdm import tqdm
import warnings
from torch.cuda import amp
from contextlib import suppress as nullcontext
from transformers import AdamW, get_linear_schedule_with_warmup
from models.metascorea import T5ForConditionalGeneration as PromptT5
from models.metascorea2 import T5ForConditionalGeneration as PromptT5plus
from dataset_processors import *
from metatrainer import QuestionAnsweringTrainer
from retrievermodel import init_biencoder_components
from rerankermodel.encoder import BertEncoder_For_CrossEncoder
from loadscorea import *
from hintpreprocess import *
from transformers.trainer_callback import (
CallbackHandler,
DefaultFlowCallback,
PrinterCallback,
ProgressCallback,
TrainerCallback,
TrainerControl,
TrainerState,
)
import datasets
import numpy as np
from datasets import load_dataset, load_metric,load_from_disk,concatenate_datasets
import os
from functools import partial
import transformers
from transformers import (
AutoConfig,
AutoModelForSeq2SeqLM,
AutoTokenizer,
DataCollatorForSeq2Seq,
HfArgumentParser,
M2M100Tokenizer,
MBart50Tokenizer,
EvalPrediction,
MBart50TokenizerFast,
MBartTokenizer,
MBartTokenizerFast,
Seq2SeqTrainer,
Seq2SeqTrainingArguments,
default_data_collator,
set_seed,
)
from pathlib import Path
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
def rank_rr(bencoder,cencoder,dataset,training_args):
device_id = training_args.local_rank
bencoder.eval()
cencoder.eval()
if device_id==-1:
device_id=0
bs_size = 64
ngpus = torch.distributed.get_world_size()
if len(dataset)//ngpus<bs_size:
bs_size = len(dataset)//ngpus
offset = []
offset2 = []
offset3 = []
ppt = []
for itf in range(bs_size):
offset.extend([itf*training_args.c_size]*16)
offset2.extend([itf*16]*4)
offset3.extend([itf*training_args.c_size]*4)
offset = torch.Tensor(offset).long().to(torch.device("cuda", device_id))
offset2 = torch.Tensor(offset2).long().to(torch.device("cuda", device_id))
offset3 = torch.Tensor(offset3).long().to(torch.device("cuda", device_id))
if training_args.local_rank == -1:
data_loader_dev = DataLoader(dataset=dataset, shuffle=True,batch_size=bs_size,collate_fn=default_data_collator)
else:
sampler = DistributedSampler(
dataset,
shuffle=False,
num_replicas=torch.distributed.get_world_size(),
rank=training_args.local_rank,
seed=training_args.seed,
)
data_loader_dev = DataLoader(dataset=dataset,batch_size=bs_size,shuffle=False,sampler=sampler,collate_fn=default_data_collator,drop_last=False)
for i, data in enumerate(data_loader_dev):
data = {x: data[x].to(torch.device("cuda", device_id)) for x in data if data[x] is not None}
if True:
if data["query_ids"].size(0)!=bs_size:
bs_size = data["query_ids"].size(0)
offset = []
offset2 = []
offset3 = []
for itf in range(bs_size):
offset.extend([itf*training_args.c_size]*16)
offset2.extend([itf*16]*4)
offset3.extend([itf*training_args.c_size]*4)
offset = torch.Tensor(offset).long().to(torch.device("cuda", device_id))
offset2 = torch.Tensor(offset2).long().to(torch.device("cuda", device_id))
offset3 = torch.Tensor(offset3).long().to(torch.device("cuda", device_id))
bmodel_out = bencoder(
data["query_ids"].view(-1,112),#.squeeze(),
data["query_attentions"].view(-1,112),#.squeeze(),
data["ctx_ids"].view(-1,112),#.squeeze(),
data["ctx_attentions"].view(-1,112),#.squeeze(),
)
score_mask = (data["sub_ids"]>=999).int().mul(-999)
local_q_vector, local_ctx_vectors = bmodel_out
local_q_vector = local_q_vector.view(-1,training_args.c_size,768)[:,0,:].view(-1,768)
local_ctx_vectors = local_ctx_vectors.view(-1,training_args.c_size,768)
sim_scores = torch.bmm(
local_q_vector.unsqueeze(1), torch.transpose(local_ctx_vectors, 1, 2)
).squeeze(1)
sim_scores = sim_scores.add(score_mask)#.cpu()
#(1*768)*(768*64)=(1*64)
sort_result, sort_idxs = sim_scores.topk(16)#sort(dot_prod_scores, dim=0, descending=True)
for bat in range(sort_idxs.size(0)):
sel = torch.index_select(data["sub_ids"][bat],dim=0,index=sort_idxs[bat])
ids = data["sample_id"][bat]
first_select_ids[int(ids.item())]=sort_idxs[bat].cpu().numpy().tolist()
sort_idxs = sort_idxs.view(-1).add(offset)
_,sort_idxs4 = sim_scores.topk(4)
for bat in range(sort_idxs4.size(0)):
sel = torch.index_select(data["sub_ids"][bat],dim=0,index=sort_idxs4[bat])
ids = data["sample_id"][bat]
ret_select_ids[int(ids.item())]=sel.cpu().numpy().tolist()
for k_ in data.keys():
if len(data[k_].size())==3:
data[k_] = torch.index_select(data[k_].view(bs_size*training_args.c_size,-1),dim=0,index=sort_idxs).view(bs_size,16,-1)
elif len(data[k_].size())==2:
if k_!="qa_scores":
data[k_] = torch.index_select(data[k_].view(-1),dim=0,index=sort_idxs).view(bs_size,16)
else:
pass
else:
pass
cmodel_out = cencoder(
data["cross_ids"].view(-1,144),
data["cross_attentions"].view(-1,144),
data["cross_ctxs"].view(-1,144),
).squeeze(-1).view(-1,16)
_,rerank_idxs = cmodel_out.topk(4)
for bat in range(rerank_idxs.size(0)):
sel = torch.index_select(data["sub_ids"][bat],dim=0,index=rerank_idxs[bat])
ids = data["sample_id"][bat]
rer_select_ids[int(ids.item())]=sel.cpu().numpy().tolist() | null |
163,305 | import os
import time
import torch
import copy,random
import sys
import gc
import json
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data import DataLoader
from dataclasses import dataclass, field
from typing import Optional
from typing import List, Optional, Tuple
from tqdm import tqdm
import warnings
from torch.cuda import amp
from contextlib import suppress as nullcontext
from transformers import AdamW, get_linear_schedule_with_warmup
from models.metascorea import T5ForConditionalGeneration as PromptT5
from models.metascorea2 import T5ForConditionalGeneration as PromptT5plus
from dataset_processors import *
from metatrainer import QuestionAnsweringTrainer
from retrievermodel import init_biencoder_components
from rerankermodel.encoder import BertEncoder_For_CrossEncoder
from loadscorea import *
from hintpreprocess import *
from transformers.trainer_callback import (
CallbackHandler,
DefaultFlowCallback,
PrinterCallback,
ProgressCallback,
TrainerCallback,
TrainerControl,
TrainerState,
)
import datasets
import numpy as np
from datasets import load_dataset, load_metric,load_from_disk,concatenate_datasets
import os
from functools import partial
import transformers
from transformers import (
AutoConfig,
AutoModelForSeq2SeqLM,
AutoTokenizer,
DataCollatorForSeq2Seq,
HfArgumentParser,
M2M100Tokenizer,
MBart50Tokenizer,
EvalPrediction,
MBart50TokenizerFast,
MBartTokenizer,
MBartTokenizerFast,
Seq2SeqTrainer,
Seq2SeqTrainingArguments,
default_data_collator,
set_seed,
)
from pathlib import Path
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
def preprocess_function_eval(examples,format_name):
if True:
preprocess_fn = preprocess_proqa_eval
if "hintret" in examples.keys():
k_ = "hintret"
elif "hintrer" in examples.keys():
k_ = "hintrer"
else:
k_ = "hintfirst"
inputs, targets,hints= preprocess_fn(examples, "input","output",k_,format_name=format_name)
model_inputs = tokenizer(inputs, max_length=max_source_length, padding=padding, truncation=True)
hints_inputs = tokenizer(hints, max_length=max_source_length, padding=padding, truncation=True)
model_inputs["input_ids"] = [xx[:-1]+[32108]+yy[:-1]+[1] for xx,yy in zip(model_inputs["input_ids"],hints_inputs["input_ids"])]
model_inputs["attention_mask"] = [xx[:-1]+[1]+yy[:-1]+[xx[-1]] for xx,yy in zip(model_inputs["attention_mask"],hints_inputs["attention_mask"])]
# sample_ids =list(range(len(inputs)))
# sample_ids = [5000+_+dataset2id[tsname]*1000000 for _ in sample_ids]
with tokenizer.as_target_tokenizer():
labels = tokenizer(targets, max_length=128, padding=padding, truncation=True)
if padding == "max_length":
labels["input_ids"] = [
[(l if l != tokenizer.pad_token_id else -100) for l in label] for label in labels["input_ids"]
]
model_inputs["labels"] = labels["input_ids"]
format_id = format2id[dataset2format[tsname]]
meta_ids = [- (i + 1) for i in range(10)]*5
input_ids = copy.deepcopy(
[input_ids for input_ids in model_inputs['input_ids']])
model_inputs['input_ids'] = input_ids # [format_prompt_ids+input_ids for input_ids in model_inputs['input_ids']]
model_inputs['attention_mask'] = [attention_mask for attention_mask in
model_inputs['attention_mask']]
# model_inputs["sample_id"] = sample_ids
return model_inputs
global format2id
format2id = {'extractive':0,'abstractive':1,'multichoice':2,'bool':3}
dataset2format= {}
dataset_files =["squad1_1","squad2","narrativeqa_dev","mctest_corrected_the_separator","race_string","arc_hard","arc_easy","boolq","openbookqa"]+["newsqa","quoref","ropes","drop","natural_questions_with_dpr_para","commonsenseqa","qasc","physical_iqa","social_iqa","winogrande_xl","multirc","boolq_np"]
def generate_samples_validation(format_name,epoch_id):
start = 0
fhint = open("./plmresource/{}-hints.txt".format(format_name),'r')
hintlines = fhint.readlines()
hint_cands = [_.strip().split("<@#>") for _ in hintlines]
base_id = -format2id[format_name]*2e5
for item in dataset_files:
fm = dataset2format[item]
global tsname
tsname = item
if fm==format_name:
data_path = "./raw_data/{}-val.json".format(item)
dataset = load_dataset("json", data_files=data_path)["train"]
end = start+len(dataset)
sample_ids = [(base_id-i) for i in range(start,end)]
first_selection = []
ret_selection = []
rer_selection = []
for sid, cands in zip(sample_ids,hint_cands[start:end]):
try:
first_selection.append(" ; ".join([cands[_] for _ in first_sel[sid]]))
ret_selection.append(" ; ".join([cands[_] for _ in ret_sel[sid]]))
rer_selection.append(" ; ".join([cands[_] for _ in rer_sel[sid]]))
except:
print(sid)
print(first_sel[sid])
print(ret_sel[sid])
print(rer_sel[sid])
print(len(cands))
assert False
start += len(dataset)
dataset = dataset.add_column("hintret",ret_selection)
dataset_ret = dataset.map(
lambda x:preprocess_function_eval(x,format_name),
batched=True,
remove_columns=["input","output","hintret"],
load_from_cache_file=True,
desc="Running tokenizer on train dataset",
)
dataset = dataset.remove_columns("hintret")
dataset = dataset.add_column("hintrer",rer_selection)
dataset_rer = dataset.map(
lambda x:preprocess_function_eval(x,format_name),
batched=True,
remove_columns=["input","output","hintrer"],
load_from_cache_file=True,
desc="Running tokenizer on train dataset",
)
dataset = dataset.remove_columns("hintrer")
dataset = dataset.add_column("hintfirst",first_selection)
dataset_first = dataset.map(
lambda x:preprocess_function_eval(x,format_name),
batched=True,
remove_columns=["input","output","hintfirst"],
load_from_cache_file=True,
desc="Running tokenizer on train dataset",
)
dataset_ret.save_to_disk("./epoch_data{}/{}-reteval.hf".format(str(epoch_id),item))
dataset_rer.save_to_disk("./epoch_data{}/{}-rereval.hf".format(str(epoch_id),item))
dataset_first.save_to_disk("./epoch_data{}/{}-firsteval.hf".format(str(epoch_id),item))
start = end | null |
163,306 | import os
import time
import torch
import copy,random
import sys
import gc
import json
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data import DataLoader
from dataclasses import dataclass, field
from typing import Optional
from typing import List, Optional, Tuple
from tqdm import tqdm
import warnings
from torch.cuda import amp
from contextlib import suppress as nullcontext
from transformers import AdamW, get_linear_schedule_with_warmup
from models.metascorea import T5ForConditionalGeneration as PromptT5
from models.metascorea2 import T5ForConditionalGeneration as PromptT5plus
from dataset_processors import *
from metatrainer import QuestionAnsweringTrainer
from retrievermodel import init_biencoder_components
from rerankermodel.encoder import BertEncoder_For_CrossEncoder
from loadscorea import *
from hintpreprocess import *
from transformers.trainer_callback import (
CallbackHandler,
DefaultFlowCallback,
PrinterCallback,
ProgressCallback,
TrainerCallback,
TrainerControl,
TrainerState,
)
import datasets
import numpy as np
from datasets import load_dataset, load_metric,load_from_disk,concatenate_datasets
import os
from functools import partial
import transformers
from transformers import (
AutoConfig,
AutoModelForSeq2SeqLM,
AutoTokenizer,
DataCollatorForSeq2Seq,
HfArgumentParser,
M2M100Tokenizer,
MBart50Tokenizer,
EvalPrediction,
MBart50TokenizerFast,
MBartTokenizer,
MBartTokenizerFast,
Seq2SeqTrainer,
Seq2SeqTrainingArguments,
default_data_collator,
set_seed,
)
from pathlib import Path
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
def preprocess_function(examples):
if True:
preprocess_fn = preprocess_proqa
inputs, targets,hints = preprocess_fn(examples, "input","output","hint",format_name=format_name)
model_inputs = tokenizer(inputs, max_length=max_source_length, padding=padding, truncation=True)
hints_inputs = tokenizer(hints, max_length=max_source_length, padding=padding, truncation=True)
model_inputs["input_ids"] = [xx[:-1]+[32108]+yy[:-1]+[1] for xx,yy in zip(model_inputs["input_ids"],hints_inputs["input_ids"])]
model_inputs["attention_mask"] = [xx[:-1]+[1]+yy[:-1]+[xx[-1]] for xx,yy in zip(model_inputs["attention_mask"],hints_inputs["attention_mask"])]
with tokenizer.as_target_tokenizer():
labels = tokenizer(targets, max_length=128, padding=padding, truncation=True)
if padding == "max_length":
labels["input_ids"] = [
[(l if l != tokenizer.pad_token_id else -100) for l in label] for label in labels["input_ids"]
]
model_inputs["labels"] = labels["input_ids"]
meta_ids = [- (i + 1) for i in range(10)]*5
input_ids = copy.deepcopy(
[meta_ids + input_ids for input_ids in model_inputs['input_ids']])
seps = []
model_inputs['input_ids'] = input_ids # [format_prompt_ids+input_ids for input_ids in model_inputs['input_ids']]
model_inputs['attention_mask'] = [[1] * 50 + attention_mask for attention_mask in
model_inputs['attention_mask']]
return model_inputs
global format2id
format2id = {'extractive':0,'abstractive':1,'multichoice':2,'bool':3}
format2dataset = {
'extractive':['squad1_1','squad2','extractive','newsqa','quoref','ropes','adversarialqa_dbert_dev','adversarialqa_dbidaf_dev','adversarialqa_droberta_dev','record_extractive'],
'abstractive':['narrativeqa_dev','abstractive','natural_questions_with_dpr_para','drop','qaconv','tweetqa'],
'multichoice':['race_string','multichoice','openbookqa','mctest_corrected_the_separator','social_iqa','commonsenseqa','qasc','physical_iqa','winogrande_xl','onestopqa_advanced','onestopqa_elementry','onestopqa_intermediate','prost_multiple_choice_with_no_context','dream','processbank_test','cosmosqa','mcscript','mcscript2','quail','reclor','measuring_massive_multitask_language_understanding','head_qa_en_test','race_c','arc_hard','arc_easy'],
'bool':['boolq','bool','boolq_np','multirc','strategyqa','pubmedqa_pqal_short_ans']
}
dataset2format= {}
dataset_files =["squad1_1","squad2","narrativeqa_dev","mctest_corrected_the_separator","race_string","arc_hard","arc_easy","boolq","openbookqa"]+["newsqa","quoref","ropes","drop","natural_questions_with_dpr_para","commonsenseqa","qasc","physical_iqa","social_iqa","winogrande_xl","multirc","boolq_np"]
def generate_samples_with_hints(formats_hints_ids,format_name,epoch_id):
format_ids_seq = []
po = open("./json2select.json",'r')
po = json.load(po)
all_format_size = 0
for item in dataset_files:
if item in format2dataset[format_name]:
all_format_size+=len(po[item])
base_id = 2e5*format2id[format_name]
format_ids_seq=[(i+base_id) for i in range(all_format_size)]
pdz = open("textinput/{}-glminput.jsonl".format(format_name),'r',encoding='utf-8')
pdz_lines = pdz.readlines()
pdz_lines = [json.loads(item) for item in pdz_lines]
pdzs = [item['id'] for item in pdz_lines]
assert pdzs[-1]+1==all_format_size
splits= {"extractive":10,"abstractive":10,"multichoice":4,"bool":2}
splitn =splits[format_name]
total_json = []
for idx in range(int(splitn)):
a = open("./plmresource/{}-glmout.json".format(format_name+str(idx)),'r',encoding='utf-8')
b = json.load(a)["data"]
total_json.extend(b)
top_line = []
single_hints = []
current_saved = []
top_line_count = 0
clk = 1
for left,right in zip(total_json,pdzs):
if right == clk:
read_ = []
idss = format_hints_ids[format_ids_seq[clk-1]]
for i_ in idss:
read_.append(current_saved[i_])
clk+=1
top_line.append(" ; ".join(read_))
current_saved = []
current_saved.append(left)
read_ = []
idss = format_hints_ids[format_ids_seq[clk-1]]
for i_ in idss:
read_.append(current_saved[i_])
top_line.append(" ; ".join(read_))
start = 0
end = 0
for item in dataset_files:
fm = dataset2format[item]
global tsname
tsname = item
if fm==format_name:
sequence = po[item]
len_seq = len(po[item])
data_path = "./data_process/data/{}/train.json".format(item)
dataset = load_dataset("json", data_files=data_path)["train"].select(sequence)
assert len_seq==len(dataset)
end=start+len_seq
add_line = format_ids_seq[start:end]
hints = top_line[start:end]
dataset = dataset.add_column("hint",hints)
train_dataset = dataset.map(
preprocess_function,
batched=True,
remove_columns=["input","output","hint"],
load_from_cache_file=True,
desc="Running tokenizer on train dataset",
)
train_dataset = train_dataset.add_column("sample_id",add_line)
train_dataset.save_to_disk("./epoch_data{}/{}-train.hf".format(str(epoch_id),item))
start = end | null |
163,307 | import os
import time
import torch
import copy,random
import sys
import gc
import json
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data import DataLoader
from dataclasses import dataclass, field
from typing import Optional
from typing import List, Optional, Tuple
from tqdm import tqdm
import warnings
from torch.cuda import amp
from contextlib import suppress as nullcontext
from transformers import AdamW, get_linear_schedule_with_warmup
from models.metascorea import T5ForConditionalGeneration as PromptT5
from models.metascorea2 import T5ForConditionalGeneration as PromptT5plus
from dataset_processors import *
from metatrainer import QuestionAnsweringTrainer
from retrievermodel import init_biencoder_components
from rerankermodel.encoder import BertEncoder_For_CrossEncoder
from loadscorea import *
from hintpreprocess import *
from transformers.trainer_callback import (
CallbackHandler,
DefaultFlowCallback,
PrinterCallback,
ProgressCallback,
TrainerCallback,
TrainerControl,
TrainerState,
)
import datasets
import numpy as np
from datasets import load_dataset, load_metric,load_from_disk,concatenate_datasets
import os
from functools import partial
import transformers
from transformers import (
AutoConfig,
AutoModelForSeq2SeqLM,
AutoTokenizer,
DataCollatorForSeq2Seq,
HfArgumentParser,
M2M100Tokenizer,
MBart50Tokenizer,
EvalPrediction,
MBart50TokenizerFast,
MBartTokenizer,
MBartTokenizerFast,
Seq2SeqTrainer,
Seq2SeqTrainingArguments,
default_data_collator,
set_seed,
)
from pathlib import Path
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
def main():
def compute_metrics(p: EvalPrediction):
return metric.compute(predictions=p.predictions, references=p.label_ids)
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, Seq2SeqTrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
if 'same' in model_args.model_name_or_path:
task2id = {'squad': 0, 'extractive': 0, 'narrativeqa': 1, 'abstractive': 1, 'race': 2, 'multichoice': 2,
'boolq': 3, 'bool': 3, 'newsqa': 8, 'quoref': 9, 'ropes': 10, 'drop': 11, 'nqopen': 12,
'boolq_np': 13, 'openbookqa': 14, 'mctest': 15, 'social_iqa': 16, 'dream': 17}
else:
task2id = {'squad': 0, 'extractive': 1, 'narrativeqa': 2, 'abstractive': 3, 'race': 4, 'multichoice': 5,
'boolq': 6, 'bool': 7, 'newsqa': 8, 'quoref': 9, 'ropes': 10, 'drop': 11, 'nqopen': 12,
'boolq_np': 13, 'openbookqa': 14, 'mctest': 15, 'social_iqa': 16, 'dream': 17}
dataset_name_to_metric = {
'squad1_1': 'metric/squad_v1_local/squad_v1_local.py',
'squad2': 'metric/squad_v2_local/squad_v2_local.py',
'newsqa': 'metric/squad_v1_local/squad_v1_local.py',
'boolq': 'metric/squad_v1_local/squad_v1_local.py',
'narrativeqa_dev': 'metric/rouge_local/rouge_metric.py',
'race_string': 'metric/accuracy.py',
'quoref': 'metric/squad_v1_local/squad_v1_local.py',
'ropes': 'metric/squad_v1_local/squad_v1_local.py',
'drop': 'metric/squad_v1_local/squad_v1_local.py',
'natural_questions_with_dpr_para': 'metric/squad_v1_local/squad_v1_local.py',
'boolq_np': 'metric/squad_v1_local/squad_v1_local.py',
'openbookqa': 'metric/accuracy.py',
'arc_hard': 'metric/accuracy.py',
'arc_easy': 'metric/accuracy.py',
'mctest_corrected_the_separator': 'metric/accuracy.py',
'social_iqa': 'metric/accuracy.py',
'dream': 'metric/accuracy.py',
'commonsenseqa':'metric/accuracy.py',
'qasc':'metric/accuracy.py',
'physical_iqa':'metric/accuracy.py',
'winogrande_xl':'metric/accuracy.py',
'multirc':'metric/squad_v1_local/squad_v1_local.py',
'onestopqa_advanced':'metric/accuracy.py',
'onestopqa_elementry':'metric/accuracy.py',
'onestopqa_intermediate':'metric/accuracy.py',
'prost_multiple_choice_with_no_context':'metric/accuracy.py',
'processbank_test':'metric/accuracy.py',
'cosmosqa':'metric/accuracy.py',
'mcscript':'metric/accuracy.py',
'mcscript2':'metric/accuracy.py',
'quail':'metric/accuracy.py',
'reclor':'metric/accuracy.py',
'measuring_massive_multitask_language_understanding':'metric/accuracy.py',
'head_qa_en_test':'metric/accuracy.py',
'race_c':'metric/accuracy.py',
'pubmedqa_pqal_short_ans':'metric/squad_v1_local/squad_v1_local.py',#
'strategyqa':'metric/squad_v1_local/squad_v1_local.py',#
'tweetqa':'metric/bleu_local/bleu.py',
'qaconv':'metric/squad_v1_local/squad_v1_local.py',
'record_extractive':'metric/squad_v1_local/squad_v1_local.py',
'adversarialqa_dbert_dev':'metric/squad_v1_local/squad_v1_local.py',
'adversarialqa_dbidaf_dev':'metric/squad_v1_local/squad_v1_local.py',
'adversarialqa_droberta_dev':'metric/squad_v1_local/squad_v1_local.py'
}
# Set seed before initializing model.
set_seed(training_args.seed)
config = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
global tokenizer
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
use_fast=model_args.use_fast_tokenizer,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
tokens_to_add = ['[ABSTRACTIVE]', '[BOOL]', '[EXTRACTIVE]', '[MultiChoice]']
special_tokens_dict = {'additional_special_tokens': ['[TASK]', '[QUESTION]', '[CONTEXT]',
'[OPTIONS]','[HINT]']}
tokenizer.add_tokens(tokens_to_add)
num_added_toks = tokenizer.add_special_tokens(special_tokens_dict)
added_tokens = tokenizer.get_added_vocab()
model = PromptT5.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
model.copy_encoder()
model.resize_token_embeddings(len(tokenizer))
global max_source_length
max_source_length = 1024
# Set decoder_start_token_id
if model.config.decoder_start_token_id is None and isinstance(tokenizer, (MBartTokenizer, MBartTokenizerFast)):
if isinstance(tokenizer, MBartTokenizer):
model.config.decoder_start_token_id = tokenizer.lang_code_to_id[data_args.target_lang]
else:
model.config.decoder_start_token_id = tokenizer.convert_tokens_to_ids(data_args.target_lang)
if model.config.decoder_start_token_id is None:
raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined")
prefix = data_args.source_prefix if data_args.source_prefix is not None else ""
if training_args.local_rank == -1 or training_args.no_cuda:
device = torch.device("cuda")
n_gpu = torch.cuda.device_count()
# Temporarily set max_target_length for training.
max_target_length = data_args.max_target_length
padding = "max_length" if data_args.pad_to_max_length else False
question_column = data_args.question_column
context_column = data_args.context_column
answer_column = data_args.answer_column
# import random
data_args.max_source_length = min(data_args.max_source_length, tokenizer.model_max_length)
# Data collator
label_pad_token_id = -100 if data_args.ignore_pad_token_for_loss else tokenizer.pad_token_id
if data_args.pad_to_max_length:
data_collator = default_data_collator
else:
data_collator = DataCollatorForSeq2Seq(
tokenizer,
model=model,
label_pad_token_id=label_pad_token_id,
pad_to_multiple_of=8 if training_args.fp16 else None,
)
try:
os.mkdir("./mem_scores")
except:
print("MemDir Exist\n")
#start
train_dataloaders = {}
eval_dataloaders = {}
replay_dataloaders = {}
to_be_train = ["squad1_1","squad2","narrativeqa_dev","boolq","arc_hard","arc_easy","openbookqa","race_string","mctest_corrected_the_separator","newsqa","quoref","ropes","drop","natural_questions_with_dpr_para","commonsenseqa","qasc","physical_iqa","social_iqa","winogrande_xl","multirc","boolq_np"]
to_be_train.extend(["narrativeqa_dev","boolq","arc_hard","arc_easy","openbookqa","mctest_corrected_the_separator","newsqa","quoref","ropes","commonsenseqa","qasc","physical_iqa","social_iqa","winogrande_xl","multirc","boolq_np"])
max_length = (
training_args.generation_max_length
if training_args.generation_max_length is not None
else data_args.val_max_target_length
)
num_beams = data_args.num_beams if data_args.num_beams is not None else training_args.generation_num_beams
if training_args.local_rank!=-1:
world_size = torch.distributed.get_world_size()
else:
world_size = 1
device_id = training_args.local_rank if training_args.local_rank!=-1 else 0
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': 1e-2},
{'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters, lr=1e-4,eps=1e-8)
# retriever_data =
tensorizer, bi_encoder, _ = init_biencoder_components(
"hf_bert", {}, inference_only=True #hf_bert
)
c_encoder = BertEncoder_For_CrossEncoder.from_pretrained(
"bert-base-uncased"
)
if training_args.local_rank == -1:
bi_encoder = bi_encoder.to(torch.device("cuda", device_id))
c_encoder = c_encoder.to(torch.device("cuda", device_id))
else:
bi_encoder = bi_encoder.to(torch.device("cuda", device_id))
c_encoder = c_encoder.to(torch.device("cuda", device_id))
bi_encoder = nn.parallel.DistributedDataParallel(
bi_encoder,
device_ids=[training_args.local_rank] if training_args.n_gpu != 0 else None,
output_device=training_args.local_rank if training_args.n_gpu else None,
find_unused_parameters=True,
)
c_encoder = nn.parallel.DistributedDataParallel(
c_encoder,
device_ids=[training_args.local_rank] if training_args.n_gpu != 0 else None,
output_device=training_args.local_rank if training_args.n_gpu else None,
)
global all_gpus
if training_args.local_rank==-1:
all_gpus = [-1]
else:
all_gpus = list(range(world_size))
global format_name
global priority_level
priority_level = {}
format2size={"bool":1,"multichoice":5,"extractive":29,"abstractive":9}
skip_evaluate = False
skip_selection = False
global format_train_sel
format_train_sel = {"bool":[],"extractive":[],"abstractive":[],"multichoice":[]}
for epoch_id in range(0, int(training_args.num_train_epochs)):
gc.collect()
torch.cuda.empty_cache()
for k_ in format2size.keys():
priority_level[k_]={"ret":0,"rer":0,"qa":0}
map_location = torch.device("cuda", training_args.local_rank)
global format_hints_ids
global format_ret_scores
global format_rer_scores
format_ret_scores = {}
format_rer_scores = {}
format_hints_ids = {}
global ret_select_ids
global rer_select_ids
global first_select_ids
first_select_ids = {}
ret_select_ids = {}
rer_select_ids = {}
tlm = get_model_obj(c_encoder)
if epoch_id>0:
tlm.load_state_dict(torch.load("./rersave/cencoder-{}.pt".format(str(epoch_id)),map_location=map_location))
else:
tlm.load_state_dict(torch.load("./cmodel13500.pt",map_location=map_location))
for format_name in ["bool","extractive","abstractive","multichoice"]:
sub_to_be_train = [_ for _ in format2dataset[format_name] if _ in to_be_train]
if skip_evaluate:
break
if epoch_id>0:
qa_scores = load_from_qa(all_gpus,sub_to_be_train)
to_load_model = get_model_obj(bi_encoder)
if epoch_id==0:
to_load_model.load_state_dict(torch.load("./retsave/biencoder-{}.pt".format(str(epoch_id)))["model_dict"])
else:
to_load_model.load_state_dict(torch.load("./retsave/biencoder-{}.pt".format(str(epoch_id)),map_location=map_location))
rr_dev_dataset = load_from_disk("./sel_file/{}val-retrak.hf".format(format_name))
if training_args.local_rank<=0:
print("Start ranking {}......".format(format_name))
with torch.no_grad(),amp.autocast(enabled=True):
rank_rr(bi_encoder,c_encoder,rr_dev_dataset,training_args)
if skip_evaluate==False:
fout = open("./mem_scores/rt_ids-{}.json".format(str(training_args.local_rank)),'w')
json.dump(ret_select_ids,fout)
fout.close()
fout = open("./mem_scores/rr_ids-{}.json".format(str(training_args.local_rank)),'w')
json.dump(rer_select_ids,fout)
fout.close()
fout = open("./mem_scores/qa_ids-{}.json".format(str(training_args.local_rank)),'w')
json.dump(first_select_ids,fout)
fout.close()
global first_sel
global ret_sel
global rer_sel
if training_args.local_rank!=-1:
torch.distributed.barrier()
first_sel,ret_sel,rer_sel = load_all_select_ids(all_gpus,to_be_train)
if epoch_id==0:
eval_model = PromptT5.from_pretrained("./t5-base")
eval_model.copy_encoder()
else:
eval_model = PromptT5plus.from_pretrained("./epoch_ckpt{}".format(str(epoch_id)))
if training_args.local_rank == -1:
eval_model = eval_model.to(torch.device("cuda", device_id))
else:
eval_model = eval_model.to(torch.device("cuda", device_id))
eval_model = nn.parallel.DistributedDataParallel(
eval_model,
device_ids=[training_args.local_rank] if training_args.n_gpu != 0 else None,
output_device=training_args.local_rank if training_args.n_gpu else None,
)
for format_name in ["bool","extractive","abstractive","multichoice"]:
if skip_evaluate:
break
if training_args.local_rank<=0:
print("generating...",format_name)
generate_samples_validation(format_name,epoch_id)
if training_args.local_rank!=-1:
torch.distributed.barrier()
if training_args.local_rank<=0:
print("evaluating.....",format_name)
with torch.no_grad():
priority_level = evaluate_model(eval_model,priority_level,format_name,training_args,data_collator,epoch_id)
if skip_evaluate==False:
fout = open("./mem_scores/priority_level-{}.json".format(str(training_args.local_rank)),'w')
json.dump(priority_level,fout)
fout.close()
if training_args.local_rank!=-1:
torch.distributed.barrier()
priority_level = load_level(all_gpus)
format_hints_ids = {}
if skip_selection or epoch_id>0:
fin = open("./mem_scores/format_train_sel.json",'r')
format_train_sel = json.load(fin)
if training_args.local_rank<=0:
print(format_train_sel)
else:
format_train_sel = {"bool":[],"extractive":[],"abstractive":[],"multichoice":[]}
for format_name in ["bool","extractive","abstractive","multichoice"]:
sub_to_be_train = [_ for _ in format2dataset[format_name] if _ in to_be_train]
if epoch_id>0:
qa_scores = load_from_qa(all_gpus,sub_to_be_train)
start_line = 0
end_line = 0
to_load_model = get_model_obj(bi_encoder)
if epoch_id==0:
to_load_model.load_state_dict(torch.load("./retsave/biencoder-{}.pt".format(str(epoch_id)))["model_dict"])
else:
to_load_model.load_state_dict(torch.load("./retsave/biencoder-{}.pt".format(str(epoch_id)),map_location=map_location))
forward_train = True
qa2ret = False
qa2rer = False
if priority_level[format_name]["ret"]<priority_level[format_name]["rer"]:
forward_train = False
if priority_level[format_name]["ret"]<priority_level[format_name]["qa"]:
qa2ret = True
if priority_level[format_name]["rer"]<priority_level[format_name]["qa"]:
qa2rer = True
shard_offset = 0
qascore_offset = 0
for sub_set in range(format2size[format_name]):
if skip_selection==True:
break
rr_filt_dataset = load_from_disk("./sel_file/{}-retrak{}.hf".format(format_name,str(sub_set)))
if epoch_id==0:
rand_idxs = random.sample(range(len(rr_filt_dataset)),len(rr_filt_dataset)//20)
format_rand_idxs = [(_+shard_offset) for _ in rand_idxs]
shard_offset+=len(rr_filt_dataset)
format_train_sel[format_name].append(format_rand_idxs)
else:
format_rand_idxs = format_train_sel[format_name][sub_set]
rand_idxs = [(_-shard_offset) for _ in format_rand_idxs]
shard_offset+=len(rr_filt_dataset)
rr_train_dataset = rr_filt_dataset.select(rand_idxs)
if epoch_id>0:
additional_lines = qa_scores[qascore_offset:qascore_offset+len(rand_idxs)]
qascore_offset+=len(rand_idxs)
rr_train_dataset = rr_train_dataset.add_column("qa_scores",additional_lines)
with amp.autocast(enabled=True):
trainbc(bi_encoder,c_encoder,rr_train_dataset,training_args,forward_train,qa2ret,qa2rer)
filthints(bi_encoder,c_encoder,rr_filt_dataset,training_args,forward_train,qa2ret,qa2rer)
if training_args.local_rank<=0:
torch.save(get_model_obj(bi_encoder).state_dict(),"./retsave/biencoder-{}.pt".format(str(epoch_id+1)))
if training_args.local_rank!=-1:
torch.distributed.barrier()
if training_args.local_rank<=0 and skip_selection==False:
torch.save(get_model_obj(c_encoder).state_dict(),"./rersave/cencoder-{}.pt".format(str(epoch_id+1)))
if skip_selection==False:
fout = open("./mem_scores/format_hints-{}.json".format(str(training_args.local_rank)),'w')
json.dump(format_hints_ids,fout)
fout.close()
if training_args.local_rank<=0 and skip_selection==False:
fout = open("./mem_scores/format_train_sel.json",'w')
json.dump(format_train_sel,fout)
fout.close()
if training_args.local_rank!=-1:
torch.distributed.barrier()
format_hints_ids = load_hints(all_gpus)
if training_args.local_rank<=0:
print("\n=====================generate trainset on epoch{}================\n".format(str(epoch_id)))
for format_name in ["bool","extractive","abstractive","multichoice"]:
generate_samples_with_hints(format_hints_ids,format_name,epoch_id)
if skip_selection==False:
fout = open("./mem_scores/ret-{}.json".format(str(training_args.local_rank)),'w')
json.dump(format_ret_scores,fout)
fout.close()
fout = open("./mem_scores/rer-{}.json".format(str(training_args.local_rank)),'w')
json.dump(format_rer_scores,fout)
fout.close()
if training_args.local_rank!=-1:
torch.distributed.barrier()
eval_ds = {}
eval_exp = {}
if training_args.local_rank<=0:
print("\n=====================Load trainset on epoch{}================\n".format(str(epoch_id)))
for ds_name in to_be_train:
train_dataloaders[ds_name]= load_from_disk("./epoch_data{}/{}-train.hf".format(str(epoch_id),ds_name))
train_dataset = None
for item in to_be_train:
if train_dataset is None:
train_dataset = train_dataloaders[item]
else:
train_dataset = concatenate_datasets([train_dataset, train_dataloaders[item]])
format_offset = [0,0,0,0]
all_idxs = []
all_mkd_dataset = None
for fix,format_name in enumerate(["extractive","abstractive","multichoice","bool"]):
format_mkd_dataset = None
if fix>0:
format_offset[fix] = format_offset[fix-1]
sub_to_be_train = [_ for _ in dataset_files if _ in format2dataset[format_name]]
for tix,tname in enumerate(sub_to_be_train):
tmp_ds = load_from_disk("./epoch_data{}/{}-train.hf".format(str(epoch_id),tname))
format_offset[fix]+=len(tmp_ds)
if format_mkd_dataset is None:
format_mkd_dataset = tmp_ds
else:
format_mkd_dataset = concatenate_datasets([format_mkd_dataset,tmp_ds])
rand_idxs = format_train_sel[format_name][0]
for shard in format_train_sel[format_name][1:]:
rand_idxs.extend(shard)
format_mkd_dataset = format_mkd_dataset.select(rand_idxs)
if all_mkd_dataset is None:
all_mkd_dataset = format_mkd_dataset
else:
all_mkd_dataset = concatenate_datasets([all_mkd_dataset,format_mkd_dataset])
ofset = 0 if fix==0 else format_offset[fix-1]
rand_idxs = [(_+ofset) for _ in rand_idxs]
all_idxs.extend(rand_idxs)
ret_scores = load_from_ret(all_gpus,to_be_train)
ret_scores = [_ for idx,_ in enumerate(ret_scores) if idx in all_idxs]
rer_scores = load_from_rer(all_gpus,to_be_train)
rer_scores = [_ for idx,_ in enumerate(rer_scores) if idx in all_idxs]
all_mkd_dataset = all_mkd_dataset.add_column("ret_scores",ret_scores)
all_mkd_dataset = all_mkd_dataset.add_column("rer_scores",rer_scores)
pp = train_dataset["sample_id"]
activate_ret = False
activate_rer = False
avg_ret,avg_rer,avg_qa = 0.0, 0.0, 0.0
for k_ in priority_level.keys():
avg_ret+=priority_level[k_]["ret"]
avg_rer+=priority_level[k_]["rer"]
avg_qa+=priority_level[k_]["qa"]
if avg_ret>avg_rer:
if avg_ret>avg_qa:
activate_ret = True
else:
if avg_rer>avg_qa:
activate_rer = True
model.activate_ret = activate_ret
model.activate_rer = activate_rer
if training_args.local_rank == -1:
data_loader_train = DataLoader(dataset=train_dataset, shuffle=False,batch_size=training_args.per_device_train_batch_size,collate_fn=data_collator)#, sampler=train_sampler)
else:
sampler = DistributedSampler(
train_dataset,
num_replicas=torch.distributed.get_world_size(),
rank=training_args.local_rank,
seed=training_args.seed,
)
data_loader_train = DataLoader(dataset=train_dataset, shuffle=False,batch_size=training_args.per_device_train_batch_size,collate_fn=data_collator,sampler=sampler,drop_last=False)
total_steps = (len(train_dataset) // training_args.per_device_train_batch_size // world_size) * training_args.num_train_epochs // training_args.gradient_accumulation_steps
trainer = QuestionAnsweringTrainer(
model=model,
args=training_args,
train_dataset=train_dataset,
eval_dataset=None,
eval_examples=None,
answer_column_name=answer_column,
dataset_name="squad1_1",
tokenizer=None,
data_collator=data_collator,
compute_metrics= compute_metrics if training_args.predict_with_generate else None,
callbacks=[callbacker],
)
if training_args.local_rank<=0:
print("\n=====================MKD training on epoch{}================\n".format(str(epoch_id)))
to_save_mem_scores = train_qascore(model,all_mkd_dataset,training_args,data_collator)
fout = open("./mem_scores/{}.json".format(str(training_args.local_rank)),'w')
json.dump(to_save_mem_scores,fout)
fout.close()
if epoch_id>0:
model = PromptT5plus.from_pretrained("./epoch_ckpt{}".format(str(epoch_id)))
output_dir = "./epoch_ckpt{}".format(str(epoch_id+1))
if epoch_id>0:
train_result = trainer.train(resume_from_checkpoint="./epoch_ckpt{}".format(str(epoch_id)))
else:
train_result = trainer.train()
trainer.save_model(output_dir="./epoch_ckpt{}".format(str(epoch_id+1)))
trainer.save_score(training_args.local_rank)
trainer.args.output_dir = output_dir
trainer.save_state()
torch.save(trainer.optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
torch.save(trainer.lr_scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
gc.collect()
torch.cuda.empty_cache()
return None
def _mp_fn(index):
# For xla_spawn (TPUs)
main() | null |
163,308 | import copy
import math
import os
import warnings
import time
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from torch.utils.checkpoint import checkpoint
import torch.nn.functional as F
from transformers.activations import ACT2FN
from transformers.file_utils import (
DUMMY_INPUTS,
DUMMY_MASK,
add_start_docstrings,
add_start_docstrings_to_model_forward,
is_torch_fx_proxy,
replace_return_docstrings,
)
from transformers.modeling_outputs import (
BaseModelOutput,
BaseModelOutputWithPastAndCrossAttentions,
Seq2SeqLMOutput,
Seq2SeqModelOutput,
)
from transformers.modeling_utils import PreTrainedModel, find_pruneable_heads_and_indices, prune_linear_layer
from transformers.utils import logging
from transformers.utils.model_parallel_utils import assert_device_map, get_device_map
from transformers.models.t5.configuration_t5 import T5Config
logger = logging.get_logger(__name__)
The provided code snippet includes necessary dependencies for implementing the `load_tf_weights_in_t5` function. Write a Python function `def load_tf_weights_in_t5(model, config, tf_checkpoint_path)` to solve the following problem:
Load tf checkpoints in a pytorch model.
Here is the function:
def load_tf_weights_in_t5(model, config, tf_checkpoint_path):
"""Load tf checkpoints in a pytorch model."""
try:
import re
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise
tf_path = os.path.abspath(tf_checkpoint_path)
logger.info(f"Converting TensorFlow checkpoint from {tf_path}")
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
tf_weights = {}
for name, shape in init_vars:
logger.info(f"Loading TF weight {name} with shape {shape}")
array = tf.train.load_variable(tf_path, name)
names.append(name)
tf_weights[name] = array
for txt_name in names:
name = txt_name.split("/")
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if any(
n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"]
for n in name
):
logger.info(f"Skipping {'/'.join(name)}")
tf_weights.pop(txt_name, None)
continue
if "_slot_" in name[-1]:
logger.info(f"Skipping {'/'.join(name)}")
tf_weights.pop(txt_name, None)
continue
pointer = model
array = tf_weights[txt_name]
for m_name in name:
if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
scope_names = re.split(r"_(\d+)", m_name)
else:
scope_names = [m_name]
if scope_names[0] in ["kernel", "scale", "embedding"]:
pointer = getattr(pointer, "weight")
elif scope_names[0] == "self_attention":
pointer = getattr(pointer, "layer")
pointer = pointer[0]
elif scope_names[0] == "enc_dec_attention":
pointer = getattr(pointer, "layer")
pointer = pointer[1]
elif scope_names[0] == "dense_relu_dense":
pointer = getattr(pointer, "layer")
pointer = pointer[2]
elif scope_names[0] == "rms_norm":
if hasattr(pointer, "layer_norm"):
pointer = getattr(pointer, "layer_norm")
elif hasattr(pointer, "final_layer_norm"):
pointer = getattr(pointer, "final_layer_norm")
elif scope_names[0] == "scale":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
pointer = getattr(pointer, "bias")
elif scope_names[0] == "squad":
pointer = getattr(pointer, "classifier")
elif scope_names[0] == "decoder" and name[1] == "logits":
continue
elif scope_names[0] == "logits":
pointer = getattr(pointer, "lm_head")
elif scope_names[0] == "wi" and len(scope_names) > 1 and scope_names[1].isdigit():
pointer = getattr(pointer, f"wi_{scope_names[1]}")
continue
else:
try:
pointer = getattr(pointer, scope_names[0])
except AttributeError:
logger.info(f"Skipping {'/'.join(name)}")
continue
if len(scope_names) >= 2:
num = int(scope_names[1])
pointer = pointer[num]
if scope_names[0] not in ["kernel", "scale", "embedding"]:
pointer = getattr(pointer, "weight")
if scope_names[0] != "embedding":
logger.info(f"Transposing numpy weight of shape {array.shape} for {name}")
array = np.transpose(array)
try:
assert (
pointer.shape == array.shape
), f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched"
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
logger.info(f"Initialize PyTorch weight {name}")
pointer.data = torch.from_numpy(array.astype(np.float32))
tf_weights.pop(txt_name, None)
logger.info(f"Weights not copied to PyTorch model: {', '.join(tf_weights.keys())}.")
return model | Load tf checkpoints in a pytorch model. |
163,313 | import json
import re
import torch
import transformers
import transformers.models.llama.modeling_llama
from functools import partial
def process_system_message(system_message, functions):
assert "with a function call to actually excute your step." in system_message
# we find that following ReACT format and merging the thought node and function call node is easier for model to learn to integrate the action input json string in its prediction than learn to predict a json string directly.
system_message = system_message.replace("with a function call to actually excute your step.", "with a function call to actually excute your step. Your output should follow this format:\nThought:\nAction\nAction Input:\n")
# add all the function dicts in the prompt.
system_message = system_message + "\nSpecifically, you have access to the following APIs: " + str(functions)
return system_message | null |
163,314 | import json
import re
import torch
import transformers
import transformers.models.llama.modeling_llama
from functools import partial
The provided code snippet includes necessary dependencies for implementing the `get_gpu_memory` function. Write a Python function `def get_gpu_memory(max_gpus=None)` to solve the following problem:
Get available memory for each GPU.
Here is the function:
def get_gpu_memory(max_gpus=None):
"""Get available memory for each GPU."""
gpu_memory = []
num_gpus = (
torch.cuda.device_count()
if max_gpus is None
else min(max_gpus, torch.cuda.device_count())
)
for gpu_id in range(num_gpus):
with torch.cuda.device(gpu_id):
device = torch.cuda.current_device()
gpu_properties = torch.cuda.get_device_properties(device)
total_memory = gpu_properties.total_memory / (1024**3)
allocated_memory = torch.cuda.memory_allocated() / (1024**3)
available_memory = total_memory - allocated_memory
gpu_memory.append(available_memory)
return gpu_memory | Get available memory for each GPU. |
163,315 | import json
import re
import torch
import transformers
import transformers.models.llama.modeling_llama
from functools import partial
def standardize_category(category):
save_category = category.replace(" ", "_").replace(",", "_").replace("/", "_")
while " " in save_category or "," in save_category:
save_category = save_category.replace(" ", "_").replace(",", "_")
save_category = save_category.replace("__", "_")
return save_category | null |
163,316 | import json
import re
import torch
import transformers
import transformers.models.llama.modeling_llama
from functools import partial
def standardize(string):
res = re.compile("[^\\u4e00-\\u9fa5^a-z^A-Z^0-9^_]")
string = res.sub("_", string)
string = re.sub(r"(_)\1+","_", string).lower()
while True:
if len(string) == 0:
return string
if string[0] == "_":
string = string[1:]
else:
break
while True:
if len(string) == 0:
return string
if string[-1] == "_":
string = string[:-1]
else:
break
if string[0].isdigit():
string = "get_" + string
return string | null |
163,317 | import json
import re
import torch
import transformers
import transformers.models.llama.modeling_llama
from functools import partial
def change_name(name):
change_list = ["from", "class", "return", "false", "true", "id", "and"]
if name in change_list:
name = "is_" + name
return name | null |
163,318 | import json
import re
import torch
import transformers
import transformers.models.llama.modeling_llama
from functools import partial
class CondenseRotaryEmbedding(torch.nn.Module):
def __init__(self, dim, ratio, max_position_embeddings=2048, base=10000, device=None):
super().__init__()
inv_freq = 1.0 / (base ** (torch.arange(0, dim, 2).float().to(device) / dim))
self.register_buffer("inv_freq", inv_freq)
self.dim = dim
# Build here to make `torch.jit.trace` work.
self.ratio = ratio
max_position_embeddings *= ratio
print(f"Condensing Positional embeddings from {max_position_embeddings} to {max_position_embeddings // ratio}")
self.max_seq_len_cached = max_position_embeddings
t = torch.arange(self.max_seq_len_cached, device=self.inv_freq.device, dtype=self.inv_freq.dtype) / ratio
freqs = torch.einsum("i,j->ij", t, self.inv_freq)
# Different from paper, but it uses a different permutation in order to obtain the same calculation
emb = torch.cat((freqs, freqs), dim=-1)
dtype = torch.get_default_dtype()
self.register_buffer("cos_cached", emb.cos()[None, None, :, :].to(dtype), persistent=False)
self.register_buffer("sin_cached", emb.sin()[None, None, :, :].to(dtype), persistent=False)
def _set_base(self, new_base):
self.base = new_base
self.inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2).float().to(self.inv_freq.device) / self.dim))
self._set_cos_sin_cache(
seq_len=self.max_seq_len_cached, device=self.inv_freq.device, dtype=torch.get_default_dtype()
)
def _set_cos_sin_cache(self, seq_len, device, dtype):
self.max_seq_len_cached = seq_len
t = torch.arange(self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype) / self.ratio
freqs = torch.einsum("i,j->ij", t, self.inv_freq)
# Different from paper, but it uses a different permutation in order to obtain the same calculation
emb = torch.cat((freqs, freqs), dim=-1).to(device)
self.register_buffer("cos_cached", emb.cos()[None, None, :, :].to(dtype), persistent=False)
self.register_buffer("sin_cached", emb.sin()[None, None, :, :].to(dtype), persistent=False)
def forward(self, x, seq_len=None):
# x: [bs, num_attention_heads, seq_len, head_size]
# This `if` block is unlikely to be run after we build sin/cos in `__init__`. Keep the logic here just in case.
if seq_len > self.max_seq_len_cached:
self._set_cos_sin_cache(self, seq_len, x.device, x.dtype)
return (
self.cos_cached[:, :, :seq_len, ...].to(dtype=x.dtype),
self.sin_cached[:, :, :seq_len, ...].to(dtype=x.dtype),
)
def replace_llama_with_condense(ratio):
transformers.models.llama.modeling_llama.LlamaRotaryEmbedding = partial(CondenseRotaryEmbedding, ratio=ratio) | null |
163,319 | import json
import re
import torch
import transformers
import transformers.models.llama.modeling_llama
from functools import partial
def process_retrieval_ducoment(documents_df):
ir_corpus = {}
corpus2tool = {}
for row in documents_df.itertuples():
doc = json.loads(row.document_content)
ir_corpus[row.docid] = (doc.get('category_name', '') or '') + ', ' + \
(doc.get('tool_name', '') or '') + ', ' + \
(doc.get('api_name', '') or '') + ', ' + \
(doc.get('api_description', '') or '') + \
', required_params: ' + json.dumps(doc.get('required_parameters', '')) + \
', optional_params: ' + json.dumps(doc.get('optional_parameters', '')) + \
', return_schema: ' + json.dumps(doc.get('template_response', ''))
corpus2tool[(doc.get('category_name', '') or '') + ', ' + \
(doc.get('tool_name', '') or '') + ', ' + \
(doc.get('api_name', '') or '') + ', ' + \
(doc.get('api_description', '') or '') + \
', required_params: ' + json.dumps(doc.get('required_parameters', '')) + \
', optional_params: ' + json.dumps(doc.get('optional_parameters', '')) + \
', return_schema: ' + json.dumps(doc.get('template_response', ''))] = doc['category_name'] + '\t' + doc['tool_name'] + '\t' + doc['api_name']
return ir_corpus, corpus2tool | null |
163,320 | import gc
import abc
import numpy as np
import math
from typing import Iterable
import torch
from transformers.generation.logits_process import (
LogitsProcessorList,
RepetitionPenaltyLogitsProcessor,
TemperatureLogitsWarper,
TopKLogitsWarper,
TopPLogitsWarper,
)
from config import base_list, bsz
def softmax_bias(answers,temperature=1):
sums = 0.0
answers = [ 10**((cont/temperature)/400) for cont in answers]
for cont in answers:
assert type(cont) == float or type(cont) == int
sums += cont
answers = [ cont/sums for cont in answers]
return np.array(answers) | null |
163,321 | import gc
import abc
import numpy as np
import math
from typing import Iterable
import torch
from transformers.generation.logits_process import (
LogitsProcessorList,
RepetitionPenaltyLogitsProcessor,
TemperatureLogitsWarper,
TopKLogitsWarper,
TopPLogitsWarper,
)
from config import base_list, bsz
The provided code snippet includes necessary dependencies for implementing the `compute_epsilon_new_node` function. Write a Python function `def compute_epsilon_new_node(p_new_node)` to solve the following problem:
根据公式换算delta
Here is the function:
def compute_epsilon_new_node(p_new_node):
'''
根据公式换算delta
'''
delta = 400 * math.log10(p_new_node /(1-p_new_node))
return 1000 + delta | 根据公式换算delta |
163,322 | import gc
import abc
import numpy as np
import math
from typing import Iterable
import torch
from transformers.generation.logits_process import (
LogitsProcessorList,
RepetitionPenaltyLogitsProcessor,
TemperatureLogitsWarper,
TopKLogitsWarper,
TopPLogitsWarper,
)
from config import base_list, bsz
def react_parser(string):
thought = [string[string.find("Thought: ") + len("Thought: "): string.find("\nAction: ")]]
action = [string[string.find("Action: ") + len("Action: "): string.find("\nAction Input: ")]]
action_input = [string[string.find("Action Input: ") + len("Action Input: "):]]
return thought[0], action[0], action_input[0] | null |
163,323 | import gc
import abc
import numpy as np
import math
from typing import Iterable
import torch
from transformers.generation.logits_process import (
LogitsProcessorList,
RepetitionPenaltyLogitsProcessor,
TemperatureLogitsWarper,
TopKLogitsWarper,
TopPLogitsWarper,
)
from config import base_list, bsz
def prepare_logits_processor(
temperature: float, repetition_penalty: float, top_p: float, top_k: int
) -> LogitsProcessorList:
processor_list = LogitsProcessorList()
# TemperatureLogitsWarper doesn't accept 0.0, 1.0 makes it a no-op so we skip two cases.
if temperature >= 1e-5 and temperature != 1.0:
processor_list.append(TemperatureLogitsWarper(temperature))
if repetition_penalty > 1.0:
processor_list.append(RepetitionPenaltyLogitsProcessor(repetition_penalty))
if 1e-8 <= top_p < 1.0:
processor_list.append(TopPLogitsWarper(top_p))
if top_k > 0:
processor_list.append(TopKLogitsWarper(top_k))
return processor_list
base_list = [10000, 17500, 18000, 19000, 20000, 25000]
bsz = 2
def generate_stream(
model, tokenizer, params, device, context_len=8192, stream_interval=2, force_generate=False
):
prompt = params["prompt"]
len_prompt = len(prompt)
# Attention_buckets
# bsz = 2
model.cuda()
model.eval()
# base_list = [10000, 15000, 17500, 18000, 19000, 20000] #[10000, 17500, 18000, 19000, 20000, 25000]
model.set_base_mean(base_list, bsz)
temperature = float(params.get("temperature", 1.0))
repetition_penalty = float(params.get("repetition_penalty", 1.0))
top_p = float(params.get("top_p", 1.0))
top_k = int(params.get("top_k", -1)) # -1 means disable
max_new_tokens = int(params.get("max_new_tokens", 256))
stop_str = params.get("stop", None)
echo = bool(params.get("echo", True))
stop_token_ids = params.get("stop_token_ids", None) or []
stop_token_ids.append(tokenizer.eos_token_id)
logits_processor = prepare_logits_processor(
temperature, repetition_penalty, top_p, top_k
)
input_ids = tokenizer(prompt).input_ids
input_echo_len = len(input_ids)
output_ids = list(input_ids)
if model.config.is_encoder_decoder:
max_src_len = context_len
else:
max_src_len = context_len - max_new_tokens - 8
input_ids = input_ids[-max_src_len:]
if model.config.is_encoder_decoder:
encoder_output = model.encoder(
input_ids=torch.as_tensor([input_ids], device=device)
)[0]
start_ids = torch.as_tensor(
[[model.generation_config.decoder_start_token_id]],
dtype=torch.int64,
device=device,
)
past_key_values = out = None
for i in range(max_new_tokens):
if i == 0:
if model.config.is_encoder_decoder:
out = model.decoder(
input_ids=start_ids,
encoder_hidden_states=encoder_output,
use_cache=True,
)
logits = model.lm_head(out[0])
else:
with torch.no_grad():
out = model(torch.as_tensor([input_ids], device=device), use_cache=True)
logits = out.logits
past_key_values = out.past_key_values
else:
if model.config.is_encoder_decoder:
out = model.decoder(
input_ids=torch.as_tensor([[token]], device=device),
encoder_hidden_states=encoder_output,
use_cache=True,
past_key_values=past_key_values,
)
logits = model.lm_head(out[0])
else:
with torch.no_grad():
out = model(
input_ids=torch.as_tensor([[token]], device=device),
use_cache=True,
past_key_values=past_key_values,
)
logits = out.logits
past_key_values = out.past_key_values
logit_ = logits[:, -1, :]
# logit_ = logits
if logits_processor:
if repetition_penalty > 1.0:
tmp_output_ids = torch.as_tensor([output_ids], device=logits.device)
else:
tmp_output_ids = None
last_token_logits = logits_processor(tmp_output_ids, logit_)[0]
else:
last_token_logits = logit_
if device == "mps":
# Switch to CPU by avoiding some bugs in mps backend.
last_token_logits = last_token_logits.float().to("cpu")
if temperature < 1e-5 or top_p < 1e-8: # greedy
token = int(torch.argmax(last_token_logits))
else:
probs = torch.softmax(last_token_logits, dim=-1)
token = int(torch.multinomial(probs, num_samples=1))
output_ids.append(token)
if token in stop_token_ids:
stopped = True
else:
stopped = False
if i == 0 and force_generate:
stopped = False
if i == max_new_tokens - 1 or stopped:
if echo:
tmp_output_ids = output_ids
rfind_start = len_prompt
else:
tmp_output_ids = output_ids[input_echo_len:]
rfind_start = 0
output = tokenizer.decode(
tmp_output_ids,
skip_special_tokens=True,
spaces_between_special_tokens=False,
)
if stop_str:
if isinstance(stop_str, str):
pos = output.rfind(stop_str, rfind_start)
if pos != -1:
output = output[:pos]
stopped = True
elif isinstance(stop_str, Iterable):
for each_stop in stop_str:
pos = output.rfind(each_stop, rfind_start)
if pos != -1:
output = output[:pos]
stopped = True
break
else:
raise ValueError("Invalid stop field type.")
yield {
"text": output,
"usage": {
"prompt_tokens": input_echo_len,
"completion_tokens": i,
"total_tokens": input_echo_len + i,
},
"finish_reason": None,
}
if stopped:
break
# finish stream event, which contains finish reason
if i == max_new_tokens - 1:
finish_reason = "length"
elif stopped:
finish_reason = "stop"
else:
finish_reason = None
yield {
"text": output,
"usage": {
"prompt_tokens": input_echo_len,
"completion_tokens": i,
"total_tokens": input_echo_len + i,
},
"finish_reason": finish_reason,
}
# clean
del past_key_values, out
gc.collect()
torch.cuda.empty_cache() | null |
163,324 | import math
import random
from typing import List, Optional, Tuple, Union
import torch
import torch.nn.functional as F
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACT2FN
from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, SequenceClassifierOutputWithPast
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings
from .configuration_llama import LlamaConfig
The provided code snippet includes necessary dependencies for implementing the `_make_causal_mask` function. Write a Python function `def _make_causal_mask( input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0 )` to solve the following problem:
Make causal mask used for bi-directional self-attention.
Here is the function:
def _make_causal_mask(
input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0
):
"""
Make causal mask used for bi-directional self-attention.
"""
bsz, tgt_len = input_ids_shape
mask = torch.full((tgt_len, tgt_len), torch.finfo(dtype).min, device=device)
mask_cond = torch.arange(mask.size(-1), device=device)
mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0)
mask = mask.to(dtype)
if past_key_values_length > 0:
mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1)
return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length) | Make causal mask used for bi-directional self-attention. |
163,325 | import math
import random
from typing import List, Optional, Tuple, Union
import torch
import torch.nn.functional as F
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACT2FN
from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, SequenceClassifierOutputWithPast
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings
from .configuration_llama import LlamaConfig
The provided code snippet includes necessary dependencies for implementing the `_expand_mask` function. Write a Python function `def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None)` to solve the following problem:
Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
Here is the function:
def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):
"""
Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
"""
bsz, src_len = mask.size()
tgt_len = tgt_len if tgt_len is not None else src_len
expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype)
inverted_mask = 1.0 - expanded_mask
return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min) | Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. |
163,326 | import math
import random
from typing import List, Optional, Tuple, Union
import torch
import torch.nn.functional as F
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACT2FN
from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, SequenceClassifierOutputWithPast
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings
from .configuration_llama import LlamaConfig
def rotate_half(x):
def apply_rotary_pos_emb(q, k, cos, sin, position_ids, past_key_values_length=0, flip_sin=False):
# The first two dimensions of cos and sin are always 1, so we can `squeeze` them.
# cos = cos.squeeze(1).squeeze(0) # [bsz, seq_len, dim]
# sin = sin.squeeze(1).squeeze(0) # [bsz, seq_len, dim]
# cos = cos[:, position_ids].unsqueeze(1) # [bs, 1, seq_len, dim]
# sin = sin[:, position_ids].unsqueeze(1) # [bs, 1, seq_len, dim]
cos = cos.squeeze(1) # [bsz, seq_len, dim]
sin = sin.squeeze(1) # [bsz, seq_len, dim]
q_cos = cos[:, position_ids] # [bs, 1, seq_len, dim]
q_sin = sin[:, position_ids] # [bs, 1, seq_len, dim]
if past_key_values_length != 0:
seq_length = position_ids.shape[-1] + past_key_values_length
key_positon_ids = torch.arange(
0, seq_length, dtype=torch.long, device=position_ids.device
)
key_position_ids = key_positon_ids.unsqueeze(0).view(-1, seq_length)
k_cos = cos[:, key_position_ids]
k_sin = sin[:, key_position_ids]
else:
k_cos = q_cos
k_sin = q_sin
# if flip_sin:
# q_sin *= -1
# k_sin *= -1
q_embed = (q * q_cos) + (rotate_half(q) * q_sin)
k_embed = (k * k_cos) + (rotate_half(k) * k_sin)
return q_embed, k_embed | null |
163,327 | import math
import random
from typing import List, Optional, Tuple, Union
import torch
import torch.nn.functional as F
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACT2FN
from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, SequenceClassifierOutputWithPast
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings
from .configuration_llama import LlamaConfig
The provided code snippet includes necessary dependencies for implementing the `repeat_kv` function. Write a Python function `def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor` to solve the following problem:
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch, num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
Here is the function:
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
"""
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
"""
batch, num_key_value_heads, slen, head_dim = hidden_states.shape
if n_rep == 1:
return hidden_states
hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim) | This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch, num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim) |
163,328 | import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
d = 128
import json
from xopen import xopen
import random
from scipy.signal import argrelextrema
from matplotlib.pyplot import MultipleLocator
import seaborn as sns
import palettable
def theta(i, base):
return base ** (-2 * i / d)
def diff_qmkn(m, base):
result = 0
for j in range(int(d/2)):
result += -2 * theta(j, base) * np.sin(m * theta(j, base))
return result/np.sqrt(d) | null |
163,329 | import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
import json
from xopen import xopen
import random
from scipy.signal import argrelextrema
from matplotlib.pyplot import MultipleLocator
import seaborn as sns
import palettable
def get_tokenizer():
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('meta-llama/Llama-2-7b-chat-hf', use_fast=False, padding_side="left", cache_dir='../llama-2-7b-hf')
tokenizer.pad_token_id = 0 if tokenizer.pad_token_id is None else tokenizer.pad_token_id
tokenizer.bos_token_id = 1
return tokenizer | null |
163,330 | import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
import json
from xopen import xopen
import random
from scipy.signal import argrelextrema
from matplotlib.pyplot import MultipleLocator
import seaborn as sns
import palettable
def get_kv_retrieval_prompt(
data,
key: str,
query_aware_contextualization: bool = False,
):
with open('../lost-in-the-middle/src/lost_in_the_middle/prompts/kv_retrieval.prompt') as f:
prompt_template = f.read().rstrip("\n")
# Format the KV data into a string
formatted_kv_records = ""
for index, record in enumerate(data):
start_character = "{" if index == 0 else " "
data_string = f'"{record[0]}": "{record[1]}"'
end_character = ",\n" if index != len(data) - 1 else "}"
formatted_kv_records += start_character + data_string + end_character
return prompt_template.format(formatted_kv_records=formatted_kv_records, key=key)
def get_kv_sample():
# tokenizer = get_tokenizer()
with xopen('../lost-in-the-middle/kv_retrieval_data/kv-retrieval-75_keys.jsonl') as fin:
data = fin.readlines()
idx = random.choice(range(len(data)))
input_example = json.loads(data[idx])
key = input_example["key"]
value = input_example["value"]
original_kv_index = input_example["ordered_kv_records"].index([key, value])
original_kv = input_example["ordered_kv_records"].pop(original_kv_index)
ordered_kv_records = input_example["ordered_kv_records"][:5]
ordered_kv_records.insert(12, original_kv)
kv_prompt = get_kv_retrieval_prompt(
data=ordered_kv_records[:-1], key=key,
)
print(kv_prompt)
print(value) | null |
163,331 | import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
import json
from xopen import xopen
import random
from scipy.signal import argrelextrema
from matplotlib.pyplot import MultipleLocator
import seaborn as sns
import palettable
def get_kv_retrieval_prompt(
data,
key: str,
query_aware_contextualization: bool = False,
):
with open('../lost-in-the-middle/src/lost_in_the_middle/prompts/kv_retrieval.prompt') as f:
prompt_template = f.read().rstrip("\n")
# Format the KV data into a string
formatted_kv_records = ""
for index, record in enumerate(data):
start_character = "{" if index == 0 else " "
data_string = f'"{record[0]}": "{record[1]}"'
end_character = ",\n" if index != len(data) - 1 else "}"
formatted_kv_records += start_character + data_string + end_character
return prompt_template.format(formatted_kv_records=formatted_kv_records, key=key)
def print_data_len():
cnt = 0
min_ = 20000
max_ = 0
with xopen('../kv-retrieval-75_keys.jsonl') as fin:
for line in fin:
input_example = json.loads(line)
key = input_example["key"]
value = input_example["value"]
original_kv_index = input_example["ordered_kv_records"].index([key, value])
original_kv = input_example["ordered_kv_records"].pop(original_kv_index)
ordered_kv_records = input_example["ordered_kv_records"][:25]
ordered_kv_records.insert(12, original_kv)
kv_prompt = get_kv_retrieval_prompt(
data=ordered_kv_records[:-1], key=key,
)
kv_prompts = kv_prompt.split( f'"{original_kv[1]}"')[0]
prompt_input_pre = tokenizer.encode(kv_prompts) #, return_tensors="pt", padding=True
left = len(prompt_input_pre)
prompt_input_pre = tokenizer.encode(kv_prompts+f'"{original_kv[1]}"') #, return_tensors="pt", padding=True
right = len(prompt_input_pre)
prompt_input = tokenizer.encode(kv_prompt)
total = len(prompt_input)
if total-right > max_:
max_ = total-right
if total-right < min_:
min_ = total-right
print(total)
cnt += 1
print(min_)
print(max_) | null |
163,332 | import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
import json
from xopen import xopen
import random
from scipy.signal import argrelextrema
from matplotlib.pyplot import MultipleLocator
import seaborn as sns
import palettable
def caculate_mid_mse(sample, target):
total = 0
idx = 0
idx_target = 0
mse = 0.0
while idx_target < len(target) and idx < len(sample)-1:
if sample[idx] <= target[idx_target] <= sample[idx+1]:
mse += np.power(target[idx_target] - (sample[idx] + sample[idx+1])/2, 2)
total += 1
idx_target += 1
elif sample[idx] > target[idx_target]:
idx_target += 1
else:
idx += 1
mse = np.sqrt(mse/total) if total != 0 else float('inf')
return mse | null |
163,333 | import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
import json
from xopen import xopen
import random
from scipy.signal import argrelextrema
from matplotlib.pyplot import MultipleLocator
import seaborn as sns
import palettable
def merge_peak(ori_peak, new_peak):
# new_peak = [k for k in new_peak if ori_peak[0] < k < ori_peak[-1]]
result = ori_peak + new_peak
result.sort()
return result
def find_peak_points(mn, base):
all_f_values_short = np.vectorize(qmkn)(mn, base)
# plt.plot(mn, all_f_values_short)
peak_all = [k for k in argrelextrema(all_f_values_short, np.greater)[0]]
p = [all_f_values_short[k] for k in peak_all]
res = []
node = max(p)
while len(res) < 6:
if abs(all_f_values_short[peak_all[0]] - node) <= 0.001:
if len(res) < 1 or mn[peak_all[0]] - mn[res[-1]] > 50:
res.append(peak_all[0])
index = min(peak_all[0]+50, peak_all[1])
# if len(res) > 2:
# tmp = all_f_values_short[index: int(index+1.5*(res[-1] - res[-2]))]
# else:
tmp = all_f_values_short[index: index+500]
node = max(tmp)
peak_all.pop(0)
else:
peak_all.pop(0)
mn = [mn[k] for k in res]
# plt.plot(mn, np.vectorize(qmkn)(mn, base), marker='o')
# plt.savefig('test3.png')
return mn
def find_trough_points(mn, base):
all_f_values_short = np.vectorize(qmkn)(mn, base)
# plt.plot(mn[:-500], all_f_values_short[:-500])
peak_all = [k for k in argrelextrema(all_f_values_short, np.less)[0]]
p = [all_f_values_short[k] for k in peak_all]
res = []
node = min(p)
while len(res) < 6:
if abs(all_f_values_short[peak_all[-1]] - node) <= 0.001:
if len(res) < 1 or res[-1] - peak_all[-1] > 60:
res.append(peak_all[-1])
index = min(peak_all[-1]-50, peak_all[-2])
if index <=0:
break
tmp = all_f_values_short[:index]
node = min(tmp)
peak_all.pop(-1)
else:
peak_all.pop(-1)
# prinst(res)
mn = [mn[k] for k in res][::-1]
# plt.plot(mn, np.vectorize(qmkn)(mn, base), marker='o')
# plt.savefig('test3.png')
return mn
def select_cover_max(mn, base_low, base_high, base_gap, final_base, trough, peak, cover_rand):
max_cover = 0
min_dis = float('inf')
chosen_base = None
for base in range(base_low, base_high+1, base_gap):
if base in final_base:
continue
peak_ = find_peak_points(mn, base)
trough_ = find_trough_points(mn, base)
cover1, dis1 = caculate_cover(peak_, trough, cover_rand)
cover2, dis2 = caculate_cover(peak, trough_, cover_rand)
cover = cover1 + cover2
dis = (dis1 + dis2)/cover if cover != 0 else float('inf')
if cover > max_cover:
max_cover = cover
min_dis = dis
chosen_base = base
elif cover == max_cover:
if dis < min_dis:
min_dis = dis
chosen_base = base
return chosen_base, max_cover, min_dis
def fliter(peak, trough, rand):
# peak_ = []
# for k in peak:
# flag = 0
# for i in range(rand):
# if k + i in trough or k - i in trough:
# flag = 1
# if flag == 0:
# peak_.append(k)
# trough_ = []
# for k in trough:
# flag = 0
# for i in range(rand):
# if k + i in peak or k - i in peak:
# flag = 1
# if flag == 0:
# trough_.append(k)
peak_ = [k for k in peak if k not in trough ]
trough_ = [k for k in trough if k not in peak]
return peak_, trough_
def base_choose_method1():
word_low = 1000
word_high = 4090
total_base_num = 6
# new_base_num = total_base_num - 2
base_gap = 500
base_low = 10000
base_high = 30000
ori_base = 10000
rand = 1
cover_rand = 3
final_base = [10000, 18000, 19000]
# final_base = [10000, 17500, 18000, 19000, 20000, 25000]
mn = [k for k in range(word_low, word_high+1)]
if len(final_base) == 0:
total_max_cover = 0
total_min_dis = float('inf')
for ori_base in range(base_low, base_high+1, base_gap):
trough = find_trough_points(mn, ori_base)
peak = find_peak_points(mn, ori_base)
tmp_base = []
chosen_base, max_cover, min_dis = select_cover_max(mn, ori_base+base_gap, base_high, base_gap, tmp_base, trough, peak, cover_rand)
if max_cover > total_max_cover:
total_max_cover = max_cover
final_base = [ori_base, chosen_base]
elif max_cover == total_max_cover:
if min_dis < total_min_dis:
total_min_dis = min_dis
final_base = [ori_base, chosen_base]
print(total_max_cover)
print(final_base)
trough = find_trough_points(mn, final_base[0])
peak = find_peak_points(mn, final_base[0])
for i in range(1, len(final_base)):
trough_chosen = find_trough_points(mn, final_base[i])
peak_chosen = find_peak_points(mn, final_base[i])
peak = merge_peak(peak, peak_chosen)
trough = merge_peak(trough, trough_chosen)
peak, trough = fliter(peak, trough, rand)
for time in range(total_base_num - len(final_base)):
chosen_base, max_cover, min_dis = select_cover_max(mn, base_low, base_high, base_gap, final_base, trough, peak, cover_rand)
print(max_cover)
print(chosen_base)
final_base.append(chosen_base)
peak_chosen = find_peak_points(mn, chosen_base)
trough_chosen = find_trough_points(mn, chosen_base)
peak = merge_peak(peak, peak_chosen)
trough = merge_peak(trough, trough_chosen)
peak, trough = fliter(peak, trough, rand)
print(peak)
print(trough)
final_base.sort()
print(final_base)
print(trough)
print(peak) | null |
163,334 | import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
import json
from xopen import xopen
import random
from scipy.signal import argrelextrema
from matplotlib.pyplot import MultipleLocator
import seaborn as sns
import palettable
def qmkn(m, base):
result = 0
for j in range(int(d / 2)):
result += 2*np.cos(m * theta(j, base))
return result/np.sqrt(d)
def plt_base_list(final_base):
base_list = final_base#[10000, 12000, 18000, 28000, 30000]
mn = [k for k in range(900, 4096, 1)]
for base in base_list:
plt.plot(mn, np.vectorize(qmkn)(mn,base))
plt.savefig('test3.png') | null |
163,335 | import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
import json
from xopen import xopen
import random
from scipy.signal import argrelextrema
from matplotlib.pyplot import MultipleLocator
import seaborn as sns
import palettable
def qmkn(m, base):
result = 0
for j in range(int(d / 2)):
result += 2*np.cos(m * theta(j, base))
return result/np.sqrt(d)
def find_high_points(mn, base, ratio=1.5):
all_f_values_short = np.vectorize(qmkn)(mn, base)
last_mn = [k for k in mn if qmkn(k, base)/all_f_values_short.mean() > ratio]
return set(last_mn) | null |
163,336 | import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
import json
from xopen import xopen
import random
from scipy.signal import argrelextrema
from matplotlib.pyplot import MultipleLocator
import seaborn as sns
import palettable
def qmkn(m, base):
def plot_introduction():
plt.figure(figsize=(7,8))
m_values_short = [1, 5, 10, 15, 20, 25, 30, 35]
f_values_short = [100, 72, 88, 72, 78, 48, 90, 92]
axs0 = plt.subplot(211)
axs0.plot(m_values_short, f_values_short, marker='o', linewidth=3.5, markersize='10', markerfacecolor='none', markeredgewidth='3',
color='yellowgreen', alpha=0.6)
axs0.set_title('(a)',fontsize=20)
axs0.set_xlabel('Target key-value pair index',fontsize=18,labelpad=10)
axs0.set_ylabel('Accuary',fontsize=18)
plt.xlim(-2,38)
plt.ylim(35,110)
x_major_locator=MultipleLocator(5)
axs0.xaxis.set_major_locator(x_major_locator)
m_values_short = [k for k in range(-1,3005, 1)]
f_values_short = np.vectorize(qmkn)(m_values_short,10000)
axs1 = plt.subplot(212)
axs1.plot(m_values_short, f_values_short,linewidth=2, color='steelblue', alpha=0.8)
axs1.set_title('(b)',fontsize=20)
axs1.set_xlabel('Relative token position',fontsize=18,labelpad=10)
axs1.set_ylabel('Attention score before softmax',fontsize=16, labelpad=5)
axs0.tick_params(labelsize=15, axis="both", which="major", width=1, length=5)
axs1.tick_params(labelsize=15)
# x_major_locator=MultipleLocator(250)
# axs1.xaxis.set_major_locator(x_major_locator)
# # plt.legend()
# fig, axs = plt.subplots(2,1, figsize=(8,10))
# plt.ylim(40,110)
# axs[1].plot(m_values_short, f_values_short)
# axs[1].set_title('(b)',fontsize=20)
# axs[1].set_xlabel('Distance betwween query and key tokens',fontsize=20,labelpad=10)
# axs[1].set_ylabel('Attention score before softmax',fontsize=16)
# # plt.legend()
# m_values_short = [1, 5, 10, 15, 20, 25, 30, 35]
# f_values_short = [100, 72, 88, 72, 78, 48, 90, 92]
# axs[0].plot(m_values_short, f_values_short, marker='s',linewidth=3.0, markersize='12',color='olivedrab')
# axs[0].set_title('(a)',fontsize=20)
# axs[0].set_xlabel('Target key-value pair index',fontsize=20,labelpad=10)
# axs[0].set_ylabel('Accuary',fontsize=18)
# # # plt.legend()
# # plt.savefig('test4.png')
# axs[0].tick_params(labelsize=15, axis="both", which="major", width=1, length=5)
# axs[1].tick_params(labelsize=15)
# plt.tight_layout()
# axs1.grid(True)
axs0.yaxis.grid(color='lightgray', linestyle='--')
bwith = 1.5 #边框宽度设置为2
# axs0.set_facecolor('aliceblue')
# axs1.set_facecolor('aliceblue')
# #设置边框
axs0.spines['bottom'].set_linewidth(bwith)#图框下边
axs0.spines['left'].set_linewidth(bwith)#图框左边
axs0.spines['top'].set_linewidth(bwith)#图框上边
axs0.spines['right'].set_linewidth(bwith)#图框右边
axs1.spines['bottom'].set_linewidth(bwith)#图框下边
axs1.spines['left'].set_linewidth(bwith)#图框左边
axs1.spines['top'].set_linewidth(bwith)#图框上边
axs1.spines['right'].set_linewidth(bwith)#图框右边
plt.tight_layout()
# plt.savefig('test3.png',dpi=100)
plt.savefig('test3.pdf') | null |
163,337 | import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
import json
from xopen import xopen
import random
from scipy.signal import argrelextrema
from matplotlib.pyplot import MultipleLocator
import seaborn as sns
import palettable
def qmkn(m, base):
def plot_base_selection_sample():
import mpl_toolkits.axisartist as axisartist
#创建画布
fig = plt.figure(figsize=(6, 4))
#使用axisartist.Subplot方法创建一个绘图区对象ax
ax = axisartist.Subplot(fig, 111)
#将绘图区对象添加到画布中
fig.add_axes(ax)
base_list = [10000, 20000, 30000]
mn = [k for k in range(1, 3000, 1)]
for base in base_list:
plt.plot(mn, np.vectorize(qmkn)(mn,base))
#给x坐标轴加上箭头
ax.axis["bottom"].set_axisline_style("->", size = 1.0)
#添加y坐标轴,且加上箭头
ax.axis["left"].set_axisline_style("->", size = 1.0)
#设置x、y轴上刻度显示方向
base_list = [10000, 20000, 30000]
ax.axis['top'].set_visible(False)
ax.axis['right'].set_visible(False)
plt.xticks([])
plt.yticks([])
ax = plt.gca()
bwith = 2 #边框宽度设置为2
# #设置边框
ax.spines['bottom'].set_linewidth(bwith)#图框下边
ax.spines['left'].set_linewidth(bwith)#图框左边
plt.savefig('test3.png') | null |
163,338 | import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
import json
from xopen import xopen
import random
from scipy.signal import argrelextrema
from matplotlib.pyplot import MultipleLocator
import seaborn as sns
import palettable
def qmkn(m, base):
result = 0
for j in range(int(d / 2)):
result += 2*np.cos(m * theta(j, base))
return result/np.sqrt(d)
def plot_base_selection():
# 加图例
# from scipy.interpolate import make_interp_spline
plt.figure(figsize=(12,5))
axs0 = plt.subplot(121)
base_list = [10000, 20000] #[10000, 15000, 18000, 19000, 20000]
mn = np.array([k for k in range(1400, 2100, 1)])
# mn = np.linspace(1500,2000, 10000)
x_sm = mn
base = 18000
# peak = find_trough_points(mn, base)
# print(peak)
y_sm0 = np.vectorize(qmkn)(mn,base)
# y_sm = make_interp_spline(mn, np.vectorize(qmkn)(mn,base))(x_sm)
axs0.plot(x_sm, y_sm0, alpha=0.6, color='#1f77b4', label = 'candidate 1')
axs0.scatter([1690,1805], np.vectorize(qmkn)([1690,1805], base), color='#1f77b4')#[1451,1549,1690,1805,1970],1970, 2103
axs0.set_title('(a)',fontsize=16)
i = 0
# peak = find_peak_points(mn, base)
# print(peak)
buff1 = 6
buff2 = 10
# trough = find_peak_points(mn, 10000)
# print(trough)
# trough = find_trough_points(mn, 20000)
# print(trough)
# plt.scatter(trough, np.vectorize(qmkn)(trough, base))W
y_sm1 = np.vectorize(qmkn)(mn,10000)
y_sm2 = np.vectorize(qmkn)(mn,20000)
# y_sm = make_interp_spline(mn, np.vectorize(qmkn)(mn,base))(x_sm)
axs0.plot(x_sm, y_sm1+buff1, alpha=0.6, color='#8467bd', label = '$\mathcal{B}_{c} $')
axs0.plot(x_sm, y_sm2+buff2, alpha=0.6, color='#2ca02c', label = 'candidate 2')
i += 1
MIN = min(min(y_sm0), min(y_sm1),min(y_sm2))
MAX = max(max(y_sm0),max(y_sm1),max(y_sm2))
axs0.scatter([1707,1835], np.vectorize(qmkn)([1707,1835], 10000)+buff1, color='#8467bd') #[1707,1835,1970],1970,2118]
axs0.scatter([1798,1882], np.vectorize(qmkn)([1798,1882], 20000)+buff2, color='#2ca02c') #[1540,1613,1798,1882,2098],2098, 2197]
axs0.set_ylim((MIN-3, MAX+13))
axs0.set_yticks([])
axs0.legend(loc=3)
axs1 = plt.subplot(122)
base_list = [17500, 18000, 19000, 20000, 10000, 25000] #[10000, 15000,17500, 18000, 19000, 20000]
mn = [k for k in range(900, 4200, 1)]
i = 0
for base in base_list:
buff = 3.9*i
if base == 10000:
buff += 0.6
axs1.plot(mn, np.vectorize(qmkn)(mn,base)+buff, alpha=0.8, label = str(base))
i += 1
axs1.set_yticks([])
axs1.legend(loc=3)
axs0.set_xlabel('Relative token position',fontsize=14,labelpad=10)
axs1.set_xlabel('Relative token position',fontsize=14,labelpad=10)
axs1.set_title('(b)',fontsize=16)
plt.tight_layout()
# ax = plt.gca()
# ax.axes.yaxis.set_ticklabels([])
# plt.grid()
# plt.grid(color='lightgray', linestyle='--')
# plt.savefig('test4.png',dpi=1000)
plt.savefig('test4.pdf') | null |
163,339 | import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
import json
from xopen import xopen
import random
from scipy.signal import argrelextrema
from matplotlib.pyplot import MultipleLocator
import seaborn as sns
import palettable
def plot_ablation_heat():
f = [float(k.strip()) for k in open('heat.txt').readlines()]
n = len(f)
matrix = np.random.random((n, n))
for i in range(n):
for j in range(n):
matrix[i][j] = (f[i] - f[j])
ax = sns.heatmap(matrix, annot=True, annot_kws={"size":6.5}, cmap='summer_r',
vmin=-0.9,vmax=0.9, center=0.05,
cbar=False,
xticklabels=['1.00','1.20','1.40', '1.50', '1.65', '1.80', '1.90', '2.00','2.25','2.50', '3.00', '$\mathcal{B}_{A.S.1}$ ','$\mathcal{B}_{A.S.2}$ ','$\mathcal{B}_{c1} $','$\mathcal{B}_{c2} $','$\mathcal{B}_{c3} $'] , #x轴方向刻度标签开关、赋值,可选“auto”, bool, list-like(传入列表), or int,
yticklabels=['1.00','1.20','1.40', '1.50', '1.65', '1.80', '1.90', '2.00','2.25','2.50', '3.00', '$\mathcal{B}_{A.S.1}$ ','$\mathcal{B}_{A.S.2}$ ','$\mathcal{B}_{c1} $','$\mathcal{B}_{c2} $','$\mathcal{B}_{c3} $'])
#['10000','12000','14000', '15000', '16500', '18000', '19000', '20000','22500','25000', '30000', '$\mathcal{B}_{rand1} $','$\mathcal{B}_{rand2} $','$\mathcal{B}_{c1} $','$\mathcal{B}_{c2} $','$\mathcal{B}_{c3} $']
# cbar = ax.collections[0].colorbar
# cbar.ax.tick_params(labelsize=9)
# plt.tight_layout()
plt.xticks(fontsize=9)
plt.yticks(fontsize=9)
# plt.savefig('test.png',dpi=1000)
plt.savefig('test.pdf') | null |
163,340 | import os
import json
import argparse
import regex
import unicodedata
import string
def normalize_answer(s):
def remove_articles(text):
return regex.sub(r'\b(a|an|the)\b', ' ', text)
def white_space_fix(text):
return ' '.join(text.split())
def lower(text):
return text.lower()
return white_space_fix(remove_articles(lower(s))) | null |
163,341 | import numpy as np
import torch
import torch.nn.functional as F
The provided code snippet includes necessary dependencies for implementing the `unsqueeze` function. Write a Python function `def unsqueeze(input, dims)` to solve the following problem:
Implement multi-dimension unsqueeze function.
Here is the function:
def unsqueeze(input, dims):
""" Implement multi-dimension unsqueeze function. """
if isinstance(dims, (list, tuple)):
dims = [dim if dim >= 0 else dim + len(input.shape) + 1 for dim in dims]
dims = sorted(dims, reverse=True)
shape = list(input.shape)
for dim in dims:
shape.insert(dim, 1)
return torch.reshape(input, shape)
elif isinstance(dims, int):
return input.unsqueeze(dims)
else:
raise ValueError('Warning: type(dims) must in (list, tuple, int)!') | Implement multi-dimension unsqueeze function. |
163,342 | import numpy as np
import torch
import torch.nn.functional as F
The provided code snippet includes necessary dependencies for implementing the `gumbel_softmax` function. Write a Python function `def gumbel_softmax(input, tau=1, eps=1e-10, use_gpu=False)` to solve the following problem:
Basic implement of gumbel_softmax.
Here is the function:
def gumbel_softmax(input, tau=1, eps=1e-10, use_gpu=False):
""" Basic implement of gumbel_softmax. """
U = torch.tensor(np.random.rand(*input.shape))
if use_gpu:
U = U.cuda()
gumbel = 0.0 - torch.log(eps - torch.log(U + eps))
y = input + gumbel
return F.softmax(y / tau) | Basic implement of gumbel_softmax. |
163,343 | import numpy as np
import torch
import torch.nn.functional as F
def equal(x, y, dtype=None):
""" Implement equal in dy-graph mode. """
if dtype is None:
dtype = "float32"
if isinstance(x, torch.Tensor):
x = x.numpy()
if isinstance(y, torch.Tensor):
y = y.numpy()
out = np.equal(x, y).astype(dtype)
return torch.tensor(out)
The provided code snippet includes necessary dependencies for implementing the `not_equal` function. Write a Python function `def not_equal(x, y, dtype=None)` to solve the following problem:
Implement not_equal in dy-graph mode.
Here is the function:
def not_equal(x, y, dtype=None):
""" Implement not_equal in dy-graph mode. """
return 1 - equal(x, y, dtype) | Implement not_equal in dy-graph mode. |
163,344 | from collections import Counter
from nltk.translate import bleu_score
from nltk.translate.bleu_score import SmoothingFunction
import numpy as np
The provided code snippet includes necessary dependencies for implementing the `distinct` function. Write a Python function `def distinct(seqs)` to solve the following problem:
Calculate intra/inter distinct 1/2.
Here is the function:
def distinct(seqs):
""" Calculate intra/inter distinct 1/2. """
batch_size = len(seqs)
intra_dist1, intra_dist2 = [], []
unigrams_all, bigrams_all = Counter(), Counter()
for seq in seqs:
unigrams = Counter(seq)
bigrams = Counter(zip(seq, seq[1:]))
intra_dist1.append((len(unigrams)+1e-12) / (len(seq)+1e-5))
intra_dist2.append((len(bigrams)+1e-12) / (max(0, len(seq)-1)+1e-5))
unigrams_all.update(unigrams)
bigrams_all.update(bigrams)
inter_dist1 = (len(unigrams_all)+1e-12) / (sum(unigrams_all.values())+1e-5)
inter_dist2 = (len(bigrams_all)+1e-12) / (sum(bigrams_all.values())+1e-5)
intra_dist1 = np.average(intra_dist1)
intra_dist2 = np.average(intra_dist2)
return intra_dist1, intra_dist2, inter_dist1, inter_dist2 | Calculate intra/inter distinct 1/2. |
163,345 | from collections import Counter
from nltk.translate import bleu_score
from nltk.translate.bleu_score import SmoothingFunction
import numpy as np
The provided code snippet includes necessary dependencies for implementing the `bleu` function. Write a Python function `def bleu(hyps, refs)` to solve the following problem:
Calculate bleu 1/2.
Here is the function:
def bleu(hyps, refs):
""" Calculate bleu 1/2. """
bleu_1 = []
bleu_2 = []
for hyp, ref in zip(hyps, refs):
try:
score = bleu_score.sentence_bleu(
[ref], hyp,
smoothing_function=SmoothingFunction().method7,
weights=[1, 0, 0, 0])
except:
score = 0
bleu_1.append(score)
try:
score = bleu_score.sentence_bleu(
[ref], hyp,
smoothing_function=SmoothingFunction().method7,
weights=[0.5, 0.5, 0, 0])
except:
score = 0
bleu_2.append(score)
bleu_1 = np.average(bleu_1)
bleu_2 = np.average(bleu_2)
return bleu_1, bleu_2 | Calculate bleu 1/2. |
163,346 |
The provided code snippet includes necessary dependencies for implementing the `batch` function. Write a Python function `def batch(reader, batch_size, drop_last=False)` to solve the following problem:
This operator creates a batched reader which combines the data from the input reader to batched data. Args: reader(generator): the data reader to read from. batch_size(int): size of each mini-batch. drop_last(bool, optional): If set to True, the last batch is dropped when the size of last batch is not equal to batch_size, if set to False, it will not. Default: False. Returns: The batched reader. Return Type: generator
Here is the function:
def batch(reader, batch_size, drop_last=False):
"""
This operator creates a batched reader which combines the data from the
input reader to batched data.
Args:
reader(generator): the data reader to read from.
batch_size(int): size of each mini-batch.
drop_last(bool, optional): If set to True, the last batch is dropped when
the size of last batch is not equal to batch_size, if set to False,
it will not. Default: False.
Returns:
The batched reader.
Return Type:
generator
"""
def batch_reader():
r = reader()
b = []
for instance in r:
b.append(instance)
if len(b) == batch_size:
yield b
b = []
if drop_last == False and len(b) != 0:
yield b
# Batch size check
batch_size = int(batch_size)
if batch_size <= 0:
raise ValueError("batch_size should be a positive integeral value, "
"but got batch_size={}".format(batch_size))
return batch_reader | This operator creates a batched reader which combines the data from the input reader to batched data. Args: reader(generator): the data reader to read from. batch_size(int): size of each mini-batch. drop_last(bool, optional): If set to True, the last batch is dropped when the size of last batch is not equal to batch_size, if set to False, it will not. Default: False. Returns: The batched reader. Return Type: generator |
163,347 | from __future__ import absolute_import, division, print_function, unicode_literals
import collections
import json
import logging
import os
import regex as re
import sys
import unicodedata
def clean_string(string):
replace_mp = {
" - ": "-",
" ' ": "'",
" n't": "n't",
" 'm": "'m",
" do not": " don't",
" 's": "'s",
" 've": "'ve",
" 're": "'re"
}
for k, v in replace_mp.items():
string = string.replace(k, v)
return string | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.