id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
165,129 | from typing import Dict,List
import pandas as pd
def tapex_post_prepare():
pass | null |
165,130 | from typing import Dict,List
import pandas as pd
def dic2prompt(dic:Dict):
prompt = ''
pass | null |
165,131 | import os
from tqdm import tqdm
import requests
The provided code snippet includes necessary dependencies for implementing the `download_file` function. Write a Python function `def download_file(url, download_dir=None)` to solve the following problem:
Download file into local file system from url
Here is the function:
def download_file(url, download_dir=None):
"""
Download file into local file system from url
"""
local_filename = url.split('/')[-1]
if download_dir is None:
download_dir = os.curdir
elif not os.path.exists(download_dir):
os.makedirs(download_dir)
with requests.get(url, stream=True) as r:
file_name = os.path.join(download_dir, local_filename)
if os.path.exists(file_name):
os.remove(file_name)
write_f = open(file_name, "wb")
for data in tqdm(r.iter_content()):
write_f.write(data)
write_f.close()
return os.path.abspath(file_name) | Download file into local file system from url |
165,132 | import pandas as pd
import numpy as np
import argparse
import os
import time
import json
import copy
from typing import List, Dict
import random
import multiprocessing
print(ROOT_DIR)
import sys
from gloc.generation.generator import Generator
from gloc.utils import dict2df
class Generator(object):
"""
CodeX generation wrapper
"""
def __init__(self,args, keys=None) -> None:
self.args = args
self.keys = keys
self.current_key_id = 0
self._few_shot_prompt_cache = dict()
# if the args provided, will initialize with the prompt builder for full usage
self.prompt_builder = PromptBuilder(args) if args else None
def build_few_shot_prompt_from_file(
self,
file_path: str,
n_shots: int
):
"""
Build few-shot prompt for generation from file.
"""
key = file_path + '_shot' + str(n_shots)
if key in self._few_shot_prompt_cache.keys():
return self._few_shot_prompt_cache[key]
with open(file_path, 'r') as f:
lines = f.readlines()
few_shot_prompt_list = []
one_shot_prompt = ''
last_line = None
for line in lines:
if line == '\n' and last_line == '\n':
few_shot_prompt_list.append(one_shot_prompt)
one_shot_prompt = ''
else:
one_shot_prompt += line
last_line = line
few_shot_prompt_list.append(one_shot_prompt)
few_shot_prompt_list = few_shot_prompt_list[:n_shots]
few_shot_prompt_list[-1] = few_shot_prompt_list[
-1].strip() # It is essential for prompting to remove extra '\n'
few_shot_prompt = '\n'.join(few_shot_prompt_list)
self._few_shot_prompt_cache[key] = few_shot_prompt
return few_shot_prompt
def build_generate_prompt(
self,
data_item: Dict,
num_rows: int,
select_type: str
):
"""
Build the generate prompt
"""
return self.prompt_builder.build_generate_prompt(
**data_item,
num_rows=num_rows,
select_type=select_type
)
def generate_one_pass(
self,
prompts: List[Tuple],
verbose: bool = False
):
"""
Generate one pass with codex according to the generation phase.
"""
result_idx_to_eid = []
for p in prompts:
result_idx_to_eid.extend([p[0]] * self.args.sampling_n)
prompts = [p[1] for p in prompts]
result = self._call_codex_api(
engine=self.args.engine,
prompt=prompts,
max_tokens=self.args.max_generation_tokens,
temperature=self.args.temperature,
top_p=self.args.top_p,
n=self.args.sampling_n,
stop=self.args.stop_tokens
)
if verbose:
print('\n', '*' * 20, 'Codex API Call', '*' * 20)
for prompt in prompts:
print(prompt)
print('\n')
print('- - - - - - - - - - ->>')
# parse api results
response_dict = dict()
for idx, g in enumerate(result['choices']):
try:
text = g['text']
logprob = sum(g['logprobs']['token_logprobs'])
eid = result_idx_to_eid[idx]
eid_pairs = response_dict.get(eid, None)
if eid_pairs is None:
eid_pairs = []
response_dict[eid] = eid_pairs
eid_pairs.append((text, logprob,np.mean(g['logprobs']['token_logprobs'])))
if verbose:
print(text)
except ValueError as e:
if verbose:
print('----------- Error Msg--------')
print(e)
print(text)
print('-----------------------------')
pass
return response_dict
def _call_codex_api(
self,
engine: str,
prompt: Union[str, List],
max_tokens,
temperature: float,
top_p: float,
n: int,
stop: List[str]
):
start_time = time.time()
result = None
while result is None:
try:
key = self.keys[self.current_key_id]
self.current_key_id = (self.current_key_id + 1) % len(self.keys)
print(f"Using openai api key: {key}")
result = openai.Completion.create(
engine=engine,
prompt=prompt,
api_key=key,
max_tokens=max_tokens,
temperature=temperature,
top_p=top_p,
n=n,
stop=stop,
logprobs=1
)
print('Openai api inference time:', time.time() - start_time)
return result
except Exception as e:
print(e, 'Retry.')
time.sleep(20)
def dict2df(table: Dict, add_row_id=False, lower_case=True):
"""
Dict to pd.DataFrame.
tapex format.
"""
header, rows = table[0], table[1:]
df = pd.DataFrame(data=rows, columns=header)
return df
The provided code snippet includes necessary dependencies for implementing the `worker` function. Write a Python function `def worker( pid: int, args, generator: Generator, g_eids: List, dataset: List[Dict], tokenizer )` to solve the following problem:
A worker process for annotating.
Here is the function:
def worker(
pid: int,
args,
generator: Generator,
g_eids: List,
dataset: List[Dict],
tokenizer
):
"""
A worker process for annotating.
"""
generation_dict = dict()
built_few_shot_prompts = []
for g_eid in g_eids:
try:
data_item = dataset[g_eid]
generation_dict[g_eid] = {
'generations': [],
'data_item': copy.deepcopy(data_item)
}
n_shots = args.n_shots
few_shot_prompt = generator.build_few_shot_prompt_from_file(
file_path=args.prompt_file,
n_shots=n_shots
)
# print(data_item)
# print(data_item['statement'])
# print(data_item['table_caption'])
# print(type(dict2df(data_item['table_text'])))
generate_prompt = generator.build_generate_prompt(
data_item={
'question': data_item['statement'],
'title': data_item['table_caption'] if 'table_caption' in data_item.keys() else None,
'table': dict2df(data_item['table_text'])
},
num_rows = args.num_rows,
select_type=args.select_type
)
# print("there!!!!!!!!!")
prompt = few_shot_prompt + '\n\n' + generate_prompt
max_prompt_tokens = args.max_api_total_tokens - args.max_generation_tokens
while len(tokenizer.tokenize(prompt)) >= max_prompt_tokens: # TODO: Add shrink rows
n_shots -= 1
assert n_shots >= 0
few_shot_prompt = generator.build_few_shot_prompt_from_file(
file_path=args.prompt_file,
n_shots=n_shots
)
prompt = few_shot_prompt + "\n\n" + generate_prompt
print("*"*80)
print(prompt)
built_few_shot_prompts.append((g_eid,prompt))
print(f"Process#{pid}: Building prompt for eid#{g_eid}, original_id#{data_item['statement']}")
if len(built_few_shot_prompts) < args.n_parallel_prompts:
continue
print(f"Process#{pid}: Prompts ready with {len(built_few_shot_prompts)} parallels. Run openai API.")
response_dict = generator.generate_one_pass(
prompts=built_few_shot_prompts,
verbose=args.verbose
)
for eid, g_pairs in response_dict.items():
g_pairs = sorted(g_pairs, key=lambda x: x[1], reverse=True)
generation_dict[eid]['generations'] = g_pairs
built_few_shot_prompts = []
except Exception as e:
print(f"Process#{pid}: eid#{g_eid}, wtqid#{data_item['statement']} generation error: {e}")
# Final generation inference
if len(built_few_shot_prompts) > 0:
response_dict = generator.generate_one_pass(
prompts=built_few_shot_prompts,
verbose=args.verbose
)
for eid, g_pairs in response_dict.items():
g_pairs = sorted(g_pairs, key=lambda x: x[1], reverse=True)
generation_dict[eid]['generations'] = g_pairs
return generation_dict | A worker process for annotating. |
165,134 | import pandas as pd
import numpy as np
import argparse
import os
import time
import json
import copy
from typing import List, Dict
import random
import multiprocessing
print(ROOT_DIR)
import sys
from gloc.generation.generator import Generator
from gloc.utils import dict2df
class Generator(object):
"""
CodeX generation wrapper
"""
def __init__(self,args, keys=None) -> None:
self.args = args
self.keys = keys
self.current_key_id = 0
self._few_shot_prompt_cache = dict()
# if the args provided, will initialize with the prompt builder for full usage
self.prompt_builder = PromptBuilder(args) if args else None
def build_few_shot_prompt_from_file(
self,
file_path: str,
n_shots: int
):
"""
Build few-shot prompt for generation from file.
"""
key = file_path + '_shot' + str(n_shots)
if key in self._few_shot_prompt_cache.keys():
return self._few_shot_prompt_cache[key]
with open(file_path, 'r') as f:
lines = f.readlines()
few_shot_prompt_list = []
one_shot_prompt = ''
last_line = None
for line in lines:
if line == '\n' and last_line == '\n':
few_shot_prompt_list.append(one_shot_prompt)
one_shot_prompt = ''
else:
one_shot_prompt += line
last_line = line
few_shot_prompt_list.append(one_shot_prompt)
few_shot_prompt_list = few_shot_prompt_list[:n_shots]
few_shot_prompt_list[-1] = few_shot_prompt_list[
-1].strip() # It is essential for prompting to remove extra '\n'
few_shot_prompt = '\n'.join(few_shot_prompt_list)
self._few_shot_prompt_cache[key] = few_shot_prompt
return few_shot_prompt
def build_generate_prompt(
self,
data_item: Dict,
num_rows: int,
select_type: str
):
"""
Build the generate prompt
"""
return self.prompt_builder.build_generate_prompt(
**data_item,
num_rows=num_rows,
select_type=select_type
)
def generate_one_pass(
self,
prompts: List[Tuple],
verbose: bool = False
):
"""
Generate one pass with codex according to the generation phase.
"""
result_idx_to_eid = []
for p in prompts:
result_idx_to_eid.extend([p[0]] * self.args.sampling_n)
prompts = [p[1] for p in prompts]
result = self._call_codex_api(
engine=self.args.engine,
prompt=prompts,
max_tokens=self.args.max_generation_tokens,
temperature=self.args.temperature,
top_p=self.args.top_p,
n=self.args.sampling_n,
stop=self.args.stop_tokens
)
if verbose:
print('\n', '*' * 20, 'Codex API Call', '*' * 20)
for prompt in prompts:
print(prompt)
print('\n')
print('- - - - - - - - - - ->>')
# parse api results
response_dict = dict()
for idx, g in enumerate(result['choices']):
try:
text = g['text']
logprob = sum(g['logprobs']['token_logprobs'])
eid = result_idx_to_eid[idx]
eid_pairs = response_dict.get(eid, None)
if eid_pairs is None:
eid_pairs = []
response_dict[eid] = eid_pairs
eid_pairs.append((text, logprob,np.mean(g['logprobs']['token_logprobs'])))
if verbose:
print(text)
except ValueError as e:
if verbose:
print('----------- Error Msg--------')
print(e)
print(text)
print('-----------------------------')
pass
return response_dict
def _call_codex_api(
self,
engine: str,
prompt: Union[str, List],
max_tokens,
temperature: float,
top_p: float,
n: int,
stop: List[str]
):
start_time = time.time()
result = None
while result is None:
try:
key = self.keys[self.current_key_id]
self.current_key_id = (self.current_key_id + 1) % len(self.keys)
print(f"Using openai api key: {key}")
result = openai.Completion.create(
engine=engine,
prompt=prompt,
api_key=key,
max_tokens=max_tokens,
temperature=temperature,
top_p=top_p,
n=n,
stop=stop,
logprobs=1
)
print('Openai api inference time:', time.time() - start_time)
return result
except Exception as e:
print(e, 'Retry.')
time.sleep(20)
def dict2df(table: Dict, add_row_id=False, lower_case=True):
"""
Dict to pd.DataFrame.
tapex format.
"""
header, rows = table[0], table[1:]
df = pd.DataFrame(data=rows, columns=header)
return df
The provided code snippet includes necessary dependencies for implementing the `worker` function. Write a Python function `def worker( pid: int, args, generator: Generator, g_eids: List, dataset: List[Dict], tokenizer )` to solve the following problem:
A worker process for annotating.
Here is the function:
def worker(
pid: int,
args,
generator: Generator,
g_eids: List,
dataset: List[Dict],
tokenizer
):
"""
A worker process for annotating.
"""
generation_dict = dict()
built_few_shot_prompts = []
for g_eid in g_eids:
try:
data_item = dataset[g_eid]
generation_dict[g_eid] = {
'generations': [],
'data_item': copy.deepcopy(data_item)
}
n_shots = args.n_shots
few_shot_prompt = generator.build_few_shot_prompt_from_file(
file_path=args.prompt_file,
n_shots=n_shots
)
generate_prompt = generator.build_generate_prompt(
data_item={
'question': data_item['statement'],
'title': data_item['table_caption'] if 'table_caption' in data_item.keys() else None,
'table': dict2df(data_item['table_text'])
},
num_rows = args.num_rows,
select_type=args.select_type
)
# print("there!!!!!!!!!")
prompt = few_shot_prompt + '\n\n' + generate_prompt
max_prompt_tokens = args.max_api_total_tokens - args.max_generation_tokens
while len(tokenizer.tokenize(prompt)) >= max_prompt_tokens: # TODO: Add shrink rows
n_shots -= 1
assert n_shots >= 0
few_shot_prompt = generator.build_few_shot_prompt_from_file(
file_path=args.prompt_file,
n_shots=n_shots
)
prompt = few_shot_prompt + "\n\n" + generate_prompt
# print("*"*80)
# print(prompt)
built_few_shot_prompts.append((g_eid,prompt))
print(f"Process#{pid}: Building prompt for eid#{g_eid}, original_id#{data_item['statement']}")
if len(built_few_shot_prompts) < args.n_parallel_prompts:
continue
print(f"Process#{pid}: Prompts ready with {len(built_few_shot_prompts)} parallels. Run openai API.")
response_dict = generator.generate_one_pass(
prompts=built_few_shot_prompts,
verbose=args.verbose
)
for eid, g_pairs in response_dict.items():
g_pairs = sorted(g_pairs, key=lambda x: x[1], reverse=True)
generation_dict[eid]['generations'] = g_pairs
built_few_shot_prompts = []
except Exception as e:
print(f"Process#{pid}: eid#{g_eid}, wtqid#{data_item['statement']} generation error: {e}")
# Final generation inference
if len(built_few_shot_prompts) > 0:
response_dict = generator.generate_one_pass(
prompts=built_few_shot_prompts,
verbose=args.verbose
)
for eid, g_pairs in response_dict.items():
g_pairs = sorted(g_pairs, key=lambda x: x[1], reverse=True)
generation_dict[eid]['generations'] = g_pairs
return generation_dict | A worker process for annotating. |
165,135 | import pandas as pd
import numpy as np
import argparse
import os
import time
import json
import copy
from typing import List, Dict
import random
import multiprocessing
import re
import collections
print(ROOT_DIR)
import sys
from gloc.generation.generator import Generator
from gloc.utils import dict2df
def merge_res(dic):
acc = 0.
pos_cnt = 0
neg_cnt = 0
pos = 0
neg = 0
for key in dic:
to_union = collections.defaultdict(float)
it = dic[key]
# CodeX 没有产生任何东西
table = it['data_item']['table_text']
######### col filed################
preds = it['generations']
for pred in preds:
log_prob_mean = pred[2]
pred = pred[0]
pred = pred.split('therefore,the answer is :')[-1]
key = pred
# print(pred)
to_union[key] += np.exp(log_prob_mean)
d_ordered = sorted(to_union.items(),key=lambda x:x[1],reverse=True)
try:
pred_answer = d_ordered[0][0]
except Exception:
pred_answer = 'error'
gt = it['data_item']['answer'][0]
gt = gt.strip().lower()
pred_answer = pred_answer.strip().lower()
print(gt)
print(pred_answer)
if gt == pred_answer:
acc += 1
print('Quasi-exact match ACC:', acc/len(dic)) | null |
165,136 | import pandas as pd
import numpy as np
import argparse
import os
import time
import json
import copy
from typing import List, Dict
import random
import multiprocessing
import re
import collections
print(ROOT_DIR)
import sys
from gloc.generation.generator import Generator
from gloc.utils import dict2df
class Generator(object):
"""
CodeX generation wrapper
"""
def __init__(self,args, keys=None) -> None:
self.args = args
self.keys = keys
self.current_key_id = 0
self._few_shot_prompt_cache = dict()
# if the args provided, will initialize with the prompt builder for full usage
self.prompt_builder = PromptBuilder(args) if args else None
def build_few_shot_prompt_from_file(
self,
file_path: str,
n_shots: int
):
"""
Build few-shot prompt for generation from file.
"""
key = file_path + '_shot' + str(n_shots)
if key in self._few_shot_prompt_cache.keys():
return self._few_shot_prompt_cache[key]
with open(file_path, 'r') as f:
lines = f.readlines()
few_shot_prompt_list = []
one_shot_prompt = ''
last_line = None
for line in lines:
if line == '\n' and last_line == '\n':
few_shot_prompt_list.append(one_shot_prompt)
one_shot_prompt = ''
else:
one_shot_prompt += line
last_line = line
few_shot_prompt_list.append(one_shot_prompt)
few_shot_prompt_list = few_shot_prompt_list[:n_shots]
few_shot_prompt_list[-1] = few_shot_prompt_list[
-1].strip() # It is essential for prompting to remove extra '\n'
few_shot_prompt = '\n'.join(few_shot_prompt_list)
self._few_shot_prompt_cache[key] = few_shot_prompt
return few_shot_prompt
def build_generate_prompt(
self,
data_item: Dict,
num_rows: int,
select_type: str
):
"""
Build the generate prompt
"""
return self.prompt_builder.build_generate_prompt(
**data_item,
num_rows=num_rows,
select_type=select_type
)
def generate_one_pass(
self,
prompts: List[Tuple],
verbose: bool = False
):
"""
Generate one pass with codex according to the generation phase.
"""
result_idx_to_eid = []
for p in prompts:
result_idx_to_eid.extend([p[0]] * self.args.sampling_n)
prompts = [p[1] for p in prompts]
result = self._call_codex_api(
engine=self.args.engine,
prompt=prompts,
max_tokens=self.args.max_generation_tokens,
temperature=self.args.temperature,
top_p=self.args.top_p,
n=self.args.sampling_n,
stop=self.args.stop_tokens
)
if verbose:
print('\n', '*' * 20, 'Codex API Call', '*' * 20)
for prompt in prompts:
print(prompt)
print('\n')
print('- - - - - - - - - - ->>')
# parse api results
response_dict = dict()
for idx, g in enumerate(result['choices']):
try:
text = g['text']
logprob = sum(g['logprobs']['token_logprobs'])
eid = result_idx_to_eid[idx]
eid_pairs = response_dict.get(eid, None)
if eid_pairs is None:
eid_pairs = []
response_dict[eid] = eid_pairs
eid_pairs.append((text, logprob,np.mean(g['logprobs']['token_logprobs'])))
if verbose:
print(text)
except ValueError as e:
if verbose:
print('----------- Error Msg--------')
print(e)
print(text)
print('-----------------------------')
pass
return response_dict
def _call_codex_api(
self,
engine: str,
prompt: Union[str, List],
max_tokens,
temperature: float,
top_p: float,
n: int,
stop: List[str]
):
start_time = time.time()
result = None
while result is None:
try:
key = self.keys[self.current_key_id]
self.current_key_id = (self.current_key_id + 1) % len(self.keys)
print(f"Using openai api key: {key}")
result = openai.Completion.create(
engine=engine,
prompt=prompt,
api_key=key,
max_tokens=max_tokens,
temperature=temperature,
top_p=top_p,
n=n,
stop=stop,
logprobs=1
)
print('Openai api inference time:', time.time() - start_time)
return result
except Exception as e:
print(e, 'Retry.')
time.sleep(20)
def dict2df(table: Dict, add_row_id=False, lower_case=True):
"""
Dict to pd.DataFrame.
tapex format.
"""
header, rows = table[0], table[1:]
df = pd.DataFrame(data=rows, columns=header)
return df
The provided code snippet includes necessary dependencies for implementing the `worker` function. Write a Python function `def worker( pid: int, args, generator: Generator, g_eids: List, dataset: List[Dict], tokenizer )` to solve the following problem:
A worker process for annotating.
Here is the function:
def worker(
pid: int,
args,
generator: Generator,
g_eids: List,
dataset: List[Dict],
tokenizer
):
"""
A worker process for annotating.
"""
generation_dict = dict()
built_few_shot_prompts = []
for g_eid in g_eids:
try:
data_item = dataset[g_eid]
generation_dict[g_eid] = {
'generations': [],
'data_item': copy.deepcopy(data_item)
}
n_shots = args.n_shots
few_shot_prompt = generator.build_few_shot_prompt_from_file(
file_path=args.prompt_file,
n_shots=n_shots
)
# print(data_item)
# print(data_item['statement'])
# print(data_item['table_caption'])
# print(type(dict2df(data_item['table_text'])))
generate_prompt = generator.build_generate_prompt(
data_item={
'question': data_item['statement'],
'title': data_item['table_caption'] if 'table_caption' in data_item.keys() else None,
'table': dict2df(data_item['table_text'])
},
num_rows = args.num_rows,
select_type=args.select_type
)
# print("there!!!!!!!!!")
prompt = few_shot_prompt + '\n\n' + generate_prompt
max_prompt_tokens = args.max_api_total_tokens - args.max_generation_tokens
while len(tokenizer.tokenize(prompt)) >= max_prompt_tokens: # TODO: Add shrink rows
n_shots -= 1
assert n_shots >= 0
few_shot_prompt = generator.build_few_shot_prompt_from_file(
file_path=args.prompt_file,
n_shots=n_shots
)
prompt = few_shot_prompt + "\n\n" + generate_prompt
# print("*"*80)
print(generate_prompt)
built_few_shot_prompts.append((g_eid,prompt))
print(f"Process#{pid}: Building prompt for eid#{g_eid}, original_id#{data_item['statement']}")
if len(built_few_shot_prompts) < args.n_parallel_prompts:
continue
print(f"Process#{pid}: Prompts ready with {len(built_few_shot_prompts)} parallels. Run openai API.")
response_dict = generator.generate_one_pass(
prompts=built_few_shot_prompts,
verbose=args.verbose
)
for eid, g_pairs in response_dict.items():
g_pairs = sorted(g_pairs, key=lambda x: x[1], reverse=True)
generation_dict[eid]['generations'] = g_pairs
built_few_shot_prompts = []
except Exception as e:
print(f"Process#{pid}: eid#{g_eid}, wtqid#{data_item['statement']} generation error: {e}")
# Final generation inference
if len(built_few_shot_prompts) > 0:
response_dict = generator.generate_one_pass(
prompts=built_few_shot_prompts,
verbose=args.verbose
)
for eid, g_pairs in response_dict.items():
g_pairs = sorted(g_pairs, key=lambda x: x[1], reverse=True)
generation_dict[eid]['generations'] = g_pairs
return generation_dict | A worker process for annotating. |
165,137 | import pandas as pd
import numpy as np
import argparse
import os
import time
import json
import copy
from typing import List, Dict
import random
import multiprocessing
print(ROOT_DIR)
import sys
from gloc.generation.generator import Generator
from gloc.utils import dict2df
class Generator(object):
"""
CodeX generation wrapper
"""
def __init__(self,args, keys=None) -> None:
self.args = args
self.keys = keys
self.current_key_id = 0
self._few_shot_prompt_cache = dict()
# if the args provided, will initialize with the prompt builder for full usage
self.prompt_builder = PromptBuilder(args) if args else None
def build_few_shot_prompt_from_file(
self,
file_path: str,
n_shots: int
):
"""
Build few-shot prompt for generation from file.
"""
key = file_path + '_shot' + str(n_shots)
if key in self._few_shot_prompt_cache.keys():
return self._few_shot_prompt_cache[key]
with open(file_path, 'r') as f:
lines = f.readlines()
few_shot_prompt_list = []
one_shot_prompt = ''
last_line = None
for line in lines:
if line == '\n' and last_line == '\n':
few_shot_prompt_list.append(one_shot_prompt)
one_shot_prompt = ''
else:
one_shot_prompt += line
last_line = line
few_shot_prompt_list.append(one_shot_prompt)
few_shot_prompt_list = few_shot_prompt_list[:n_shots]
few_shot_prompt_list[-1] = few_shot_prompt_list[
-1].strip() # It is essential for prompting to remove extra '\n'
few_shot_prompt = '\n'.join(few_shot_prompt_list)
self._few_shot_prompt_cache[key] = few_shot_prompt
return few_shot_prompt
def build_generate_prompt(
self,
data_item: Dict,
num_rows: int,
select_type: str
):
"""
Build the generate prompt
"""
return self.prompt_builder.build_generate_prompt(
**data_item,
num_rows=num_rows,
select_type=select_type
)
def generate_one_pass(
self,
prompts: List[Tuple],
verbose: bool = False
):
"""
Generate one pass with codex according to the generation phase.
"""
result_idx_to_eid = []
for p in prompts:
result_idx_to_eid.extend([p[0]] * self.args.sampling_n)
prompts = [p[1] for p in prompts]
result = self._call_codex_api(
engine=self.args.engine,
prompt=prompts,
max_tokens=self.args.max_generation_tokens,
temperature=self.args.temperature,
top_p=self.args.top_p,
n=self.args.sampling_n,
stop=self.args.stop_tokens
)
if verbose:
print('\n', '*' * 20, 'Codex API Call', '*' * 20)
for prompt in prompts:
print(prompt)
print('\n')
print('- - - - - - - - - - ->>')
# parse api results
response_dict = dict()
for idx, g in enumerate(result['choices']):
try:
text = g['text']
logprob = sum(g['logprobs']['token_logprobs'])
eid = result_idx_to_eid[idx]
eid_pairs = response_dict.get(eid, None)
if eid_pairs is None:
eid_pairs = []
response_dict[eid] = eid_pairs
eid_pairs.append((text, logprob,np.mean(g['logprobs']['token_logprobs'])))
if verbose:
print(text)
except ValueError as e:
if verbose:
print('----------- Error Msg--------')
print(e)
print(text)
print('-----------------------------')
pass
return response_dict
def _call_codex_api(
self,
engine: str,
prompt: Union[str, List],
max_tokens,
temperature: float,
top_p: float,
n: int,
stop: List[str]
):
start_time = time.time()
result = None
while result is None:
try:
key = self.keys[self.current_key_id]
self.current_key_id = (self.current_key_id + 1) % len(self.keys)
print(f"Using openai api key: {key}")
result = openai.Completion.create(
engine=engine,
prompt=prompt,
api_key=key,
max_tokens=max_tokens,
temperature=temperature,
top_p=top_p,
n=n,
stop=stop,
logprobs=1
)
print('Openai api inference time:', time.time() - start_time)
return result
except Exception as e:
print(e, 'Retry.')
time.sleep(20)
def dict2df(table: Dict, add_row_id=False, lower_case=True):
"""
Dict to pd.DataFrame.
tapex format.
"""
header, rows = table[0], table[1:]
df = pd.DataFrame(data=rows, columns=header)
return df
The provided code snippet includes necessary dependencies for implementing the `worker` function. Write a Python function `def worker( pid: int, args, generator: Generator, g_eids: List, dataset: List[Dict], tokenizer )` to solve the following problem:
A worker process for annotating.
Here is the function:
def worker(
pid: int,
args,
generator: Generator,
g_eids: List,
dataset: List[Dict],
tokenizer
):
"""
A worker process for annotating.
"""
generation_dict = dict()
built_few_shot_prompts = []
for g_eid in g_eids:
try:
data_item = dataset[g_eid]
generation_dict[g_eid] = {
'generations': [],
'data_item': copy.deepcopy(data_item)
}
n_shots = args.n_shots
few_shot_prompt = generator.build_few_shot_prompt_from_file(
file_path=args.prompt_file,
n_shots=n_shots
)
generate_prompt = generator.build_generate_prompt(
data_item={
'question': data_item['statement'],
'title': data_item['table_caption'] if 'table_caption' in data_item.keys() else None,
'table': dict2df(data_item['table_text'])
},
num_rows = args.num_rows,
select_type=args.select_type
)
# print("there!!!!!!!!!")
prompt = few_shot_prompt + '\n\n' + generate_prompt
max_prompt_tokens = args.max_api_total_tokens - args.max_generation_tokens
while len(tokenizer.tokenize(prompt)) >= max_prompt_tokens: # TODO: Add shrink rows
n_shots -= 1
assert n_shots >= 0
few_shot_prompt = generator.build_few_shot_prompt_from_file(
file_path=args.prompt_file,
n_shots=n_shots
)
prompt = few_shot_prompt + "\n\n" + generate_prompt
print("*"*80)
print(prompt)
built_few_shot_prompts.append((g_eid,prompt))
print(f"Process#{pid}: Building prompt for eid#{g_eid}, original_id#{data_item['statement']}")
if len(built_few_shot_prompts) < args.n_parallel_prompts:
continue
print(f"Process#{pid}: Prompts ready with {len(built_few_shot_prompts)} parallels. Run openai API.")
response_dict = generator.generate_one_pass(
prompts=built_few_shot_prompts,
verbose=args.verbose
)
for eid, g_pairs in response_dict.items():
g_pairs = sorted(g_pairs, key=lambda x: x[1], reverse=True)
generation_dict[eid]['generations'] = g_pairs
built_few_shot_prompts = []
except Exception as e:
print(f"Process#{pid}: eid#{g_eid}, wtqid#{data_item['statement']} generation error: {e}")
# Final generation inference
if len(built_few_shot_prompts) > 0:
response_dict = generator.generate_one_pass(
prompts=built_few_shot_prompts,
verbose=args.verbose
)
for eid, g_pairs in response_dict.items():
g_pairs = sorted(g_pairs, key=lambda x: x[1], reverse=True)
generation_dict[eid]['generations'] = g_pairs
return generation_dict | A worker process for annotating. |
165,139 | import pandas as pd
import numpy as np
import argparse
import os
import time
import json
import copy
from typing import List, Dict
import random
import multiprocessing
print(ROOT_DIR)
import sys
from gloc.generation.generator import Generator
from gloc.utils import dict2df
class Generator(object):
"""
CodeX generation wrapper
"""
def __init__(self,args, keys=None) -> None:
self.args = args
self.keys = keys
self.current_key_id = 0
self._few_shot_prompt_cache = dict()
# if the args provided, will initialize with the prompt builder for full usage
self.prompt_builder = PromptBuilder(args) if args else None
def build_few_shot_prompt_from_file(
self,
file_path: str,
n_shots: int
):
"""
Build few-shot prompt for generation from file.
"""
key = file_path + '_shot' + str(n_shots)
if key in self._few_shot_prompt_cache.keys():
return self._few_shot_prompt_cache[key]
with open(file_path, 'r') as f:
lines = f.readlines()
few_shot_prompt_list = []
one_shot_prompt = ''
last_line = None
for line in lines:
if line == '\n' and last_line == '\n':
few_shot_prompt_list.append(one_shot_prompt)
one_shot_prompt = ''
else:
one_shot_prompt += line
last_line = line
few_shot_prompt_list.append(one_shot_prompt)
few_shot_prompt_list = few_shot_prompt_list[:n_shots]
few_shot_prompt_list[-1] = few_shot_prompt_list[
-1].strip() # It is essential for prompting to remove extra '\n'
few_shot_prompt = '\n'.join(few_shot_prompt_list)
self._few_shot_prompt_cache[key] = few_shot_prompt
return few_shot_prompt
def build_generate_prompt(
self,
data_item: Dict,
num_rows: int,
select_type: str
):
"""
Build the generate prompt
"""
return self.prompt_builder.build_generate_prompt(
**data_item,
num_rows=num_rows,
select_type=select_type
)
def generate_one_pass(
self,
prompts: List[Tuple],
verbose: bool = False
):
"""
Generate one pass with codex according to the generation phase.
"""
result_idx_to_eid = []
for p in prompts:
result_idx_to_eid.extend([p[0]] * self.args.sampling_n)
prompts = [p[1] for p in prompts]
result = self._call_codex_api(
engine=self.args.engine,
prompt=prompts,
max_tokens=self.args.max_generation_tokens,
temperature=self.args.temperature,
top_p=self.args.top_p,
n=self.args.sampling_n,
stop=self.args.stop_tokens
)
if verbose:
print('\n', '*' * 20, 'Codex API Call', '*' * 20)
for prompt in prompts:
print(prompt)
print('\n')
print('- - - - - - - - - - ->>')
# parse api results
response_dict = dict()
for idx, g in enumerate(result['choices']):
try:
text = g['text']
logprob = sum(g['logprobs']['token_logprobs'])
eid = result_idx_to_eid[idx]
eid_pairs = response_dict.get(eid, None)
if eid_pairs is None:
eid_pairs = []
response_dict[eid] = eid_pairs
eid_pairs.append((text, logprob,np.mean(g['logprobs']['token_logprobs'])))
if verbose:
print(text)
except ValueError as e:
if verbose:
print('----------- Error Msg--------')
print(e)
print(text)
print('-----------------------------')
pass
return response_dict
def _call_codex_api(
self,
engine: str,
prompt: Union[str, List],
max_tokens,
temperature: float,
top_p: float,
n: int,
stop: List[str]
):
start_time = time.time()
result = None
while result is None:
try:
key = self.keys[self.current_key_id]
self.current_key_id = (self.current_key_id + 1) % len(self.keys)
print(f"Using openai api key: {key}")
result = openai.Completion.create(
engine=engine,
prompt=prompt,
api_key=key,
max_tokens=max_tokens,
temperature=temperature,
top_p=top_p,
n=n,
stop=stop,
logprobs=1
)
print('Openai api inference time:', time.time() - start_time)
return result
except Exception as e:
print(e, 'Retry.')
time.sleep(20)
def dict2df(table: Dict, add_row_id=False, lower_case=True):
"""
Dict to pd.DataFrame.
tapex format.
"""
header, rows = table[0], table[1:]
df = pd.DataFrame(data=rows, columns=header)
return df
The provided code snippet includes necessary dependencies for implementing the `worker` function. Write a Python function `def worker( pid: int, args, generator: Generator, g_eids: List, dataset: List[Dict], tokenizer )` to solve the following problem:
A worker process for annotating.
Here is the function:
def worker(
pid: int,
args,
generator: Generator,
g_eids: List,
dataset: List[Dict],
tokenizer
):
"""
A worker process for annotating.
"""
generation_dict = dict()
built_few_shot_prompts = []
for g_eid in g_eids:
try:
data_item = dataset[g_eid]
generation_dict[g_eid] = {
'generations': [],
'data_item': copy.deepcopy(data_item)
}
n_shots = args.n_shots
few_shot_prompt = generator.build_few_shot_prompt_from_file(
file_path=args.prompt_file,
n_shots=n_shots
)
# print(data_item)
# print(data_item['statement'])
# print(data_item['table_caption'])
# print(type(dict2df(data_item['table_text'])))
generate_prompt = generator.build_generate_prompt(
data_item={
'question': data_item['statement'],
'title': data_item['table_caption'] if 'table_caption' in data_item.keys() else None,
'table': dict2df(data_item['table_text'])
},
num_rows = args.num_rows,
select_type=args.select_type
)
# print("there!!!!!!!!!")
prompt = few_shot_prompt + '\n\n' + generate_prompt
max_prompt_tokens = args.max_api_total_tokens - args.max_generation_tokens
while len(tokenizer.tokenize(prompt)) >= max_prompt_tokens: # TODO: Add shrink rows
n_shots -= 1
assert n_shots >= 0
few_shot_prompt = generator.build_few_shot_prompt_from_file(
file_path=args.prompt_file,
n_shots=n_shots
)
prompt = few_shot_prompt + "\n\n" + generate_prompt
# print("*"*80)
# print(prompt)
built_few_shot_prompts.append((g_eid,prompt))
print(f"Process#{pid}: Building prompt for eid#{g_eid}, original_id#{data_item['statement']}")
if len(built_few_shot_prompts) < args.n_parallel_prompts:
continue
print(f"Process#{pid}: Prompts ready with {len(built_few_shot_prompts)} parallels. Run openai API.")
response_dict = generator.generate_one_pass(
prompts=built_few_shot_prompts,
verbose=args.verbose
)
for eid, g_pairs in response_dict.items():
g_pairs = sorted(g_pairs, key=lambda x: x[1], reverse=True)
generation_dict[eid]['generations'] = g_pairs
built_few_shot_prompts = []
except Exception as e:
print(f"Process#{pid}: eid#{g_eid}, wtqid#{data_item['statement']} generation error: {e}")
# Final generation inference
if len(built_few_shot_prompts) > 0:
response_dict = generator.generate_one_pass(
prompts=built_few_shot_prompts,
verbose=args.verbose
)
for eid, g_pairs in response_dict.items():
g_pairs = sorted(g_pairs, key=lambda x: x[1], reverse=True)
generation_dict[eid]['generations'] = g_pairs
return generation_dict | A worker process for annotating. |
165,140 | import pandas as pd
import numpy as np
import argparse
import os
import time
import json
import copy
from typing import List, Dict
import random
import multiprocessing
import re
import collections
print(ROOT_DIR)
import sys
from gloc.generation.generator import Generator
from gloc.utils import dict2df
def merge_res(dic):
acc = 0.
for key in dic:
to_union = collections.defaultdict(float)
it = dic[key]
# CodeX 没有产生任何东西
table = it['data_item']['table_text']
######### col filed################
preds = it['generations']
for pred in preds:
log_prob_mean = pred[2]
pred = pred[0]
try:
# pred = re.findall(pattern,pred[0])[0][1]
pred = pred.split(':')[-1]
# print(pred)
# input()
except Exception:
continue
if pred.count('True')>=1:
key = 1
to_union[key] += np.exp(log_prob_mean)
elif pred.count('False')>=1:
key = 0
to_union[key] += np.exp(log_prob_mean)
if to_union[0] > to_union[1]:
pred_answer = 0
else:
pred_answer = 1
gt = it['data_item']['label']
if gt == pred_answer:
acc += 1
print('ACC:', acc/len(dic)) | null |
165,141 | import pandas as pd
import numpy as np
import argparse
import os
import time
import json
import copy
from typing import List, Dict
import random
import multiprocessing
import re
import collections
print(ROOT_DIR)
import sys
from gloc.generation.generator import Generator
from gloc.utils import dict2df
class Generator(object):
"""
CodeX generation wrapper
"""
def __init__(self,args, keys=None) -> None:
self.args = args
self.keys = keys
self.current_key_id = 0
self._few_shot_prompt_cache = dict()
# if the args provided, will initialize with the prompt builder for full usage
self.prompt_builder = PromptBuilder(args) if args else None
def build_few_shot_prompt_from_file(
self,
file_path: str,
n_shots: int
):
"""
Build few-shot prompt for generation from file.
"""
key = file_path + '_shot' + str(n_shots)
if key in self._few_shot_prompt_cache.keys():
return self._few_shot_prompt_cache[key]
with open(file_path, 'r') as f:
lines = f.readlines()
few_shot_prompt_list = []
one_shot_prompt = ''
last_line = None
for line in lines:
if line == '\n' and last_line == '\n':
few_shot_prompt_list.append(one_shot_prompt)
one_shot_prompt = ''
else:
one_shot_prompt += line
last_line = line
few_shot_prompt_list.append(one_shot_prompt)
few_shot_prompt_list = few_shot_prompt_list[:n_shots]
few_shot_prompt_list[-1] = few_shot_prompt_list[
-1].strip() # It is essential for prompting to remove extra '\n'
few_shot_prompt = '\n'.join(few_shot_prompt_list)
self._few_shot_prompt_cache[key] = few_shot_prompt
return few_shot_prompt
def build_generate_prompt(
self,
data_item: Dict,
num_rows: int,
select_type: str
):
"""
Build the generate prompt
"""
return self.prompt_builder.build_generate_prompt(
**data_item,
num_rows=num_rows,
select_type=select_type
)
def generate_one_pass(
self,
prompts: List[Tuple],
verbose: bool = False
):
"""
Generate one pass with codex according to the generation phase.
"""
result_idx_to_eid = []
for p in prompts:
result_idx_to_eid.extend([p[0]] * self.args.sampling_n)
prompts = [p[1] for p in prompts]
result = self._call_codex_api(
engine=self.args.engine,
prompt=prompts,
max_tokens=self.args.max_generation_tokens,
temperature=self.args.temperature,
top_p=self.args.top_p,
n=self.args.sampling_n,
stop=self.args.stop_tokens
)
if verbose:
print('\n', '*' * 20, 'Codex API Call', '*' * 20)
for prompt in prompts:
print(prompt)
print('\n')
print('- - - - - - - - - - ->>')
# parse api results
response_dict = dict()
for idx, g in enumerate(result['choices']):
try:
text = g['text']
logprob = sum(g['logprobs']['token_logprobs'])
eid = result_idx_to_eid[idx]
eid_pairs = response_dict.get(eid, None)
if eid_pairs is None:
eid_pairs = []
response_dict[eid] = eid_pairs
eid_pairs.append((text, logprob,np.mean(g['logprobs']['token_logprobs'])))
if verbose:
print(text)
except ValueError as e:
if verbose:
print('----------- Error Msg--------')
print(e)
print(text)
print('-----------------------------')
pass
return response_dict
def _call_codex_api(
self,
engine: str,
prompt: Union[str, List],
max_tokens,
temperature: float,
top_p: float,
n: int,
stop: List[str]
):
start_time = time.time()
result = None
while result is None:
try:
key = self.keys[self.current_key_id]
self.current_key_id = (self.current_key_id + 1) % len(self.keys)
print(f"Using openai api key: {key}")
result = openai.Completion.create(
engine=engine,
prompt=prompt,
api_key=key,
max_tokens=max_tokens,
temperature=temperature,
top_p=top_p,
n=n,
stop=stop,
logprobs=1
)
print('Openai api inference time:', time.time() - start_time)
return result
except Exception as e:
print(e, 'Retry.')
time.sleep(20)
def dict2df(table: Dict, add_row_id=False, lower_case=True):
"""
Dict to pd.DataFrame.
tapex format.
"""
header, rows = table[0], table[1:]
df = pd.DataFrame(data=rows, columns=header)
return df
The provided code snippet includes necessary dependencies for implementing the `worker` function. Write a Python function `def worker( pid: int, args, generator: Generator, g_eids: List, dataset: List[Dict], tokenizer )` to solve the following problem:
A worker process for annotating.
Here is the function:
def worker(
pid: int,
args,
generator: Generator,
g_eids: List,
dataset: List[Dict],
tokenizer
):
"""
A worker process for annotating.
"""
generation_dict = dict()
built_few_shot_prompts = []
for g_eid in g_eids:
try:
data_item = dataset[g_eid]
generation_dict[g_eid] = {
'generations': [],
'data_item': copy.deepcopy(data_item)
}
n_shots = args.n_shots
few_shot_prompt = generator.build_few_shot_prompt_from_file(
file_path=args.prompt_file,
n_shots=n_shots
)
generate_prompt = generator.build_generate_prompt(
data_item={
'question': data_item['statement'],
'title': data_item['table_caption'] if 'table_caption' in data_item.keys() else None,
'table': dict2df(data_item['table_text'])
},
num_rows = args.num_rows,
select_type=args.select_type
)
# print("there!!!!!!!!!")
prompt = few_shot_prompt + '\n\n' + generate_prompt
max_prompt_tokens = args.max_api_total_tokens - args.max_generation_tokens
while len(tokenizer.tokenize(prompt)) >= max_prompt_tokens: # TODO: Add shrink rows
n_shots -= 1
assert n_shots >= 0
few_shot_prompt = generator.build_few_shot_prompt_from_file(
file_path=args.prompt_file,
n_shots=n_shots
)
prompt = few_shot_prompt + "\n\n" + generate_prompt
# print("*"*80)
print(generate_prompt)
built_few_shot_prompts.append((g_eid,prompt))
print(f"Process#{pid}: Building prompt for eid#{g_eid}, original_id#{data_item['statement']}")
if len(built_few_shot_prompts) < args.n_parallel_prompts:
continue
print(f"Process#{pid}: Prompts ready with {len(built_few_shot_prompts)} parallels. Run openai API.")
response_dict = generator.generate_one_pass(
prompts=built_few_shot_prompts,
verbose=args.verbose
)
for eid, g_pairs in response_dict.items():
g_pairs = sorted(g_pairs, key=lambda x: x[1], reverse=True)
generation_dict[eid]['generations'] = g_pairs
built_few_shot_prompts = []
except Exception as e:
print(f"Process#{pid}: eid#{g_eid}, wtqid#{data_item['statement']} generation error: {e}")
# Final generation inference
if len(built_few_shot_prompts) > 0:
response_dict = generator.generate_one_pass(
prompts=built_few_shot_prompts,
verbose=args.verbose
)
for eid, g_pairs in response_dict.items():
g_pairs = sorted(g_pairs, key=lambda x: x[1], reverse=True)
generation_dict[eid]['generations'] = g_pairs
return generation_dict | A worker process for annotating. |
165,142 | from curses import meta
import os
from os.path import join, exists
import json
from tqdm import tqdm
import copy
from rich import print
def load_file():
split_list = ['dev', 'train', 'devtest']
output = []
for split in split_list:
file_path = f'../data_dstc11/simmc2.1_dials_dstc11_{split}.json'
with open(file_path) as f_in:
data = json.load(f_in)['dialogue_data']
for dialogue in data:
line_output = []
dialogue_idx = dialogue['dialogue_idx']
scene_ids = list(sorted(dialogue['scene_ids'].items(), key=lambda item: int(item[0])))
domain = dialogue['domain']
history_object = []
prev_turn = None
for iter_idx, turn in enumerate(dialogue['dialogue']):
turn_idx = turn['turn_idx']
transcript = turn['transcript']
system_transcript = turn['system_transcript']
user_slot_values = turn['transcript_annotated']['act_attributes']['slot_values']
user_objects = turn['transcript_annotated']['act_attributes']['objects']
system_slot_values = turn['system_transcript_annotated']['act_attributes']['slot_values']
disambiguation_label = turn['transcript_annotated']['disambiguation_label']
disambiguation_candidates = turn['transcript_annotated']['disambiguation_candidates']
disambiguation_candidates_raw = turn['transcript_annotated']['disambiguation_candidates_raw']
if disambiguation_label == 1:
disambiguation_transcript = turn.get('disambiguation_transcript', 'None')
output.append({
'disambiguation_transcript': disambiguation_transcript,
'transcript': transcript,
'disambiguation_candidates': disambiguation_candidates,
'disambiguation_candidates_raw': disambiguation_candidates_raw,
'split': split,
'type': ''
})
prev_turn = turn
with open('./result/output.json', 'w') as f_out:
json.dump(output, f_out, indent=4, ensure_ascii=False) | null |
165,143 | import os
from os.path import join, exists
import json
from tqdm import tqdm
from rich import print
import numpy as np
import imagesize
OBJ_BEGIN_TOKEN = '<SOO>'
OBJ_END_TOKEN = '<EOO>'
NOCOREF_TOKEN = '<NOCOREF>'
DISAMBIGUATION_TOKEN = '<DISAM>'
def arrange_object_special_tokens(scene_dir, image_dir, scene_ids, object_item2id, insert_bbox_coords):
'''
scene_dir: 存储scene json文件的文件夹
image_dir: 存储image文件的文件夹
scene_ids:dialog所对应的对话的id
object_item2id:item2id文件针对于prefab用的
insert_bbox_coords:是否插入3d场景下的数据信息
'''
arrange_list = []
arrange_bbox_list = []
scene_loaded_list = []
obj_dict_possibly_duplicated = dict()
for scene_id_idx, scene_id in enumerate(scene_ids):
with open(os.path.join(scene_dir, f"{scene_id}_scene.json"), 'r') as f_in:
scene = json.load(f_in)
scene_loaded_list.append(scene)
for obj in scene['scenes'][0]['objects']:
obj_dict_possibly_duplicated[obj['index']] = scene_id_idx
num_scene = len(scene_ids)
for scene_id_idx, scene_id in enumerate(scene_ids):
scene = scene_loaded_list[scene_id_idx]
bbox_id = scene_id[2:] if scene_id.startswith('m_') else scene_id # 如果是m_开头的要去除
with open(os.path.join(scene_dir, f"{bbox_id}_bbox.json"), 'r') as f_in:
bbox = json.load(f_in)
camera_position = []; camera_dir_vec = []
for bbox_item in bbox['Items']:
if bbox_item['name'] == 'camera':
camera_position = np.array(bbox_item['position'])
if bbox_item['name'] == 'camera_forward':
camera_dir_vec = np.array(bbox_item['position'])
if insert_bbox_coords:
largest_z_value = 0
for obj in scene['scenes'][0]['objects']:
position = np.array(obj['position']) # 利用了position的位置信息进行处理
obj_displacement = position - camera_position
theta = np.dot(obj_displacement, camera_dir_vec) / (np.linalg.norm(obj_displacement)*np.linalg.norm(camera_dir_vec))
largest_z_value = max(np.linalg.norm(obj_displacement) * np.cos(theta), largest_z_value)
# 把当前场景下的所有的Object都放进来了
for obj in scene['scenes'][0]['objects']:
assert obj['index'] in obj_dict_possibly_duplicated, "SOMETHING IS MISSING!"
if scene_id_idx == obj_dict_possibly_duplicated[obj['index']]:
if insert_bbox_coords:
position = np.array(obj['position'])
obj_displacement = position - camera_position
theta = np.dot(obj_displacement, camera_dir_vec) / (np.linalg.norm(obj_displacement)*np.linalg.norm(camera_dir_vec))
z_value = np.linalg.norm(obj_displacement) * np.cos(theta)
# image name
image_id = None
if "m" in scene_id[0]: image_id = scene_id[2:]
else: image_id = scene_id
image_file_name = os.path.join(image_dir, image_id+".png")
if os.path.exists(image_file_name):
img_w, img_h = imagesize.get(image_file_name)
x1, y1, h, w = obj['bbox']
x2, y2 = x1 + w, y1 + h
pos_str = '[({:.4f},{:.4f},{:.4f},{:.4f},{:.4f},{:.4f})]'.format(x1/img_w -0.5, y1/img_h -0.5, x2/img_w -0.5, y2/img_h -0.5, (x2-x1)*(y2-y1)/(img_w*img_h), z_value/largest_z_value)
arrange_bbox_list.append([x1/img_w -0.5, y1/img_h -0.5, x2/img_w -0.5, y2/img_h -0.5, (x2-x1)*(y2-y1)/(img_w*img_h), z_value/largest_z_value])
else:
print(f'{scene_id} is not present in img_size!!!')
pos_str = '[({:.4f},{:.4f},{:.4f},{:.4f},{:.4f},{:.4f})]'.format(0.0, 0.0, 0.0, 0.0, 0.0, z_value/largest_z_value)
arrange_bbox_list.append([0.0, 0.0, 0.0, 0.0, 0.0, z_value/largest_z_value])
else:
pos_str = ''
if (num_scene != 1) and (scene_id_idx == 0):
arrange_list.append(OBJ_PREVI + "<" + str(obj['index']) + ">" + object_item2id[obj['prefab_path']])
else:
arrange_list.append(OBJ_START + "<" + str(obj['index']) + ">" + object_item2id[obj['prefab_path']])
return ''.join(arrange_list), arrange_bbox_list
def process_metadata_dict(scene_dir, scene_ids, all_metadata):
''' 根据scene ids 生成对应的 object dict'''
output = {}
for scene_id in scene_ids:
with open(os.path.join(scene_dir, f"{scene_id}_scene.json"), 'r') as f_in:
scene = json.load(f_in)
for obj in scene['scenes'][0]['objects']:
obj_index, obj_prefab_path = obj['index'], obj['prefab_path']
output[obj_index] = all_metadata[obj_prefab_path]
return output
The provided code snippet includes necessary dependencies for implementing the `process_for_vlbert_task2` function. Write a Python function `def process_for_vlbert_task2()` to solve the following problem:
为VLBert模型的训练准备
Here is the function:
def process_for_vlbert_task2():
''' 为VLBert模型的训练准备'''
scene_dir = '../../data_dstc11/jsons'
image_dir = '../../data_dstc11/images'
obj_item2id_path = '../data/item2id.json'
fashion_metadata_path = '../../data_dstc11/fashion_prefab_metadata_all.json'
furniture_metadata_path = '../../data_dstc11/furniture_prefab_metadata_all.json'
all_metadata = {}
with open(fashion_metadata_path) as f_in:
all_metadata.update(json.load(f_in))
with open(furniture_metadata_path) as f_in:
all_metadata.update(json.load(f_in))
with open(obj_item2id_path) as f_in:
object_item2id = json.load(f_in)
output = []
# split_list = ['teststd_public'] # For Final Evaluation
split_list = ['dev'] # For Evaluation
# split_list = ['train'] # For Training
for split in split_list:
file_path = f'../../data_dstc11/simmc2.1_dials_dstc11_{split}.json'
with open(file_path) as f_in:
data = json.load(f_in)['dialogue_data']
for dialogue in tqdm(data, desc=f'{split} '):
dialogue_idx = dialogue['dialogue_idx']
scene_ids = list(sorted(dialogue['scene_ids'].items(), key=lambda item: int(item[0])))
is_fashion = True if dialogue['domain'] == 'fashion' else False
obj_metadata_dict = process_metadata_dict(scene_dir, list(dialogue['scene_ids'].values()), all_metadata)
lst_context = []
sys_lst_context = []
prev_turn = None
prev_sys_object_ids = []
for turn in dialogue['dialogue']:
turn_idx = turn['turn_idx']
system_transcript = turn['system_transcript']
transcript = turn['transcript']
user_object = turn['transcript_annotated']['act_attributes']['objects']
sys_object = turn['system_transcript_annotated']['act_attributes']['objects']
disambiguation_label = turn['transcript_annotated']['disambiguation_label']
disambiguation_candidates = turn['transcript_annotated']['disambiguation_candidates']
slot_values = turn['transcript_annotated']['act_attributes']['slot_values']
intent = turn['transcript_annotated']['act']
turn_scene_ids = [item[1] for item in scene_ids if int(item[0]) <= turn_idx]
object_str, bbox_data = arrange_object_special_tokens(scene_dir, image_dir, turn_scene_ids, object_item2id, True)
if prev_turn is None:
lst_context.append(f'User : {transcript}')
sys_lst_context.append(f'User : {transcript} System : {system_transcript}')
else:
prev_system_transcript = prev_turn['system_transcript']
lst_context.append(f'System : {prev_system_transcript} User : {transcript}')
sys_lst_context.append(f'User : {transcript} System : {system_transcript}')
output.append({
'input': DISAMBIGUATION_TOKEN + ' ' + ' '.join(lst_context[-2:]) + OBJ_BEGIN_TOKEN + NOCOREF_TOKEN + object_str + OBJ_END_TOKEN,
'disambiguation_label': disambiguation_label,
'is_fashion': is_fashion,
'bbox': bbox_data,
'intent': intent,
'slot_values': slot_values,
'reference_objects': user_object,
'disambiguation_objects': disambiguation_candidates,
'dialogue_idx': dialogue_idx,
'turn_idx': turn_idx,
'role': 'User'
})
prev_turn = turn
print(len(output))
# with open('../data/simmc2.1_dials_dstc11_task2_predict.json', 'w') as f_out:
# json.dump(output, f_out, indent=4, ensure_ascii=False)
with open('../data/simmc2.1_dials_dstc11_task2_eval.json', 'w') as f_out:
json.dump(output, f_out, indent=4, ensure_ascii=False)
# with open('../data/simmc2.1_dials_dstc11_task2_eval_teststd.json', 'w') as f_out:
# json.dump(output, f_out, indent=4, ensure_ascii=False) | 为VLBert模型的训练准备 |
165,144 | from genericpath import exists
import os
from os.path import join
import json
import argparse
import torch
from torch.optim import AdamW
from torch.nn.utils.rnn import pad_sequence
from torch.utils.data import SequentialSampler, DistributedSampler
from tqdm import tqdm, trange
from rich import print
from datetime import datetime
import torch.multiprocessing
from sklearn.metrics import precision_recall_fscore_support
from torch.utils.data import DataLoader
from transformers import (
LongformerTokenizerFast,
get_linear_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_constant_schedule_with_warmup
)
from torch import distributed as dist
from utils.metadata import load_metadata,available_sizes2st
from utils.set_config import set_device, set_seed
from model.backbone import VLBertModel
from utils.dataset import get_task2_dataset, DataLoaderX
class DataLoaderX(DataLoader):
def __iter__(self):
return BackgroundGenerator(super().__iter__())
\
def get_task2_dataset(args, tokenizer, all_objects_meta, eval=False):
if not eval:
dataset = LineByLineTask2Dataset(args.train_input_file, tokenizer, all_objects_meta, eval=eval)
else:
dataset = LineByLineTask2Dataset(args.eval_input_file, tokenizer, all_objects_meta, eval=eval)
return dataset
The provided code snippet includes necessary dependencies for implementing the `evaluate` function. Write a Python function `def evaluate(args, model, tokenizer, all_objects_meta)` to solve the following problem:
模型方法的评估函数
Here is the function:
def evaluate(args, model, tokenizer, all_objects_meta):
''' 模型方法的评估函数'''
def collate_eval_bart(examples):
enc_input = list(map(lambda x: x[0], examples))
enc_attention_mask = list(map(lambda x: x[1], examples))
boxes = list(map(lambda x: x[2], examples))
misc = list(map(lambda x: x[3], examples))
disambiguation_labels = list(map(lambda x: x[5], examples))
if tokenizer._pad_token is None:
enc_input_pad = pad_sequence(enc_input, batch_first=True)
else:
enc_input_pad = pad_sequence(enc_input, batch_first=True, padding_value=tokenizer.pad_token_id)
enc_attention_pad = pad_sequence(enc_attention_mask, batch_first=True, padding_value=0)
return enc_input_pad, enc_attention_pad, boxes, misc, torch.vstack(disambiguation_labels).squeeze()
def rec_prec_f1(n_correct, n_true, n_pred):
rec = n_correct / n_true if n_true != 0 else 0
prec = n_correct / n_pred if n_pred != 0 else 0
f1 = 2 * prec * rec / (prec + rec) if (prec + rec) != 0 else 0
return rec, prec, f1
eval_dataset = get_task2_dataset(args, tokenizer, all_objects_meta, eval=True)
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoaderX(eval_dataset, sampler=eval_sampler, num_workers=args.num_workers, batch_size=args.eval_batch_size, collate_fn=collate_eval_bart, pin_memory=True)
n_pred_objects, n_true_objects, n_correct_objects = 0, 0, 0
n_total_disambiguation, n_true_disambiguation = 0, 0
evaluate_disambiguation_list = []
evaluate_object_list = []
model.eval()
for batch in tqdm(eval_dataloader, desc="Evaluating", colour='blue', leave=False):
enc_input = batch[0].to(args.device)
enc_attention_mask = batch[1].to(args.device)
boxes = batch[2] # batch, num_obj_per_line, 6
misc = batch[3] # batch, num_obj_per_line, dict
disambiguation_labels = batch[4].to(args.device)
with torch.no_grad():
s_pred_objects, s_true_objects, s_correct_objects, disambiguation_true_items, disambiguation_total_items, disambiguation_pred_list, output_pred_list = model.evaluate(enc_input, enc_attention_mask, boxes, misc, disambiguation_labels)
n_pred_objects += s_pred_objects
n_true_objects += s_true_objects
n_correct_objects += s_correct_objects
n_true_disambiguation += disambiguation_true_items
n_total_disambiguation += disambiguation_total_items
evaluate_disambiguation_list.extend(disambiguation_pred_list)
evaluate_object_list.extend(output_pred_list)
coref_rec, coref_prec, coref_f1 = rec_prec_f1(n_correct_objects, n_true_objects, n_pred_objects)
# 保存测试的disambiguation的预测结果
with open(join(args.checkpoint_name_or_path, 'simmc2.1_task2_disam_predicted.json'), 'w') as f_out:
json.dump(evaluate_disambiguation_list, f_out, indent=4, ensure_ascii=False)
# 保存测试的object prediction的预测结果
with open(join(args.checkpoint_name_or_path, 'simmc2.1_task2_predicted.json'), 'w') as f_out:
json.dump(evaluate_object_list, f_out, indent=4, ensure_ascii=False)
return {
'precision': coref_prec,
'recall': coref_rec,
'f1-score': coref_f1,
'disambiguation_acc': n_true_disambiguation/n_total_disambiguation
} | 模型方法的评估函数 |
165,145 | import torch
import numpy as np
import random
import random
random.seed(33)
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed) | null |
165,146 | import torch
import numpy as np
import random
import random
random.seed(33)
def set_seed_ddp(args):
seed = args.seed + args.local_rank
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(seed) | null |
165,147 | import torch
import numpy as np
import random
def set_device(args):
if torch.cuda.is_available():
args.device = torch.device("cuda")
torch.cuda.set_device(args.local_rank)
else:
args.device = torch.device('cpu') | null |
165,148 | import torch
import numpy as np
import random
The provided code snippet includes necessary dependencies for implementing the `set_config` function. Write a Python function `def set_config(args, config)` to solve the following problem:
combine the config and args
Here is the function:
def set_config(args, config):
''' combine the config and args'''
config.text_config.dialog_max_position_embeddings = args.dialog_max_position_embeddings
config.text_config.num_hidden_layers = args.num_hidden_layers
config.vision_config.num_hidden_layers = args.num_hidden_layers
config.vision_config.dropout = args.dropout_rate
config.vision_config.attention_dropout = args.dropout_rate
config.text_config.dropout = args.dropout_rate
config.text_config.attention_dropout = args.dropout_rate | combine the config and args |
165,149 | import json
import torch
from transformers.tokenization_utils import PreTrainedTokenizer
from torch.utils.data import DataLoader, Dataset, RandomSampler, SequentialSampler, DistributedSampler
from prefetch_generator import BackgroundGenerator
import copy
from .metadata import FASHION_COLOR, FASHION_PATTERN, FASHION_SLEEVE_LENGTH, FURNITURE_BRAND, FURNITURE_COLOR, FURNITURE_CUSTOMER_RATING, FURNITURE_MATERIALS, FURNITURE_PRICE, fashion_meta_attrs, furniture_meta_attrs, available_sizes2st
FASHION_SPECIAL_TOKENS = [f"<@1{i:03}>" for i in range(NUM_FASHION_ITEMS)]
FURNITURE_SPECIAL_TOKENS = [f"<@2{i:03}>" for i in range(NUM_FURNITURE_ITEMS)]
OBJECT_INDICES = [f"<{i}>" for i in range(MAX_NUM_OBJ_IN_SCENE)]
def get_input_id(tokenizer, tokens):
# 获取输入的token所对应的id的信息:长度可能不唯一
return tokenizer(tokens).input_ids[1:-1]
The provided code snippet includes necessary dependencies for implementing the `id_converter` function. Write a Python function `def id_converter(tokenizer)` to solve the following problem:
获取Specical Token所对应的id信息
Here is the function:
def id_converter(tokenizer):
''' 获取Specical Token所对应的id信息'''
id2index = {get_input_id(tokenizer, index)[0]: index for index in OBJECT_INDICES}
id2fashion_st = {get_input_id(tokenizer, st)[0]: st for st in FASHION_SPECIAL_TOKENS}
id2furniture_st = {get_input_id(tokenizer, st)[0]: st for st in FURNITURE_SPECIAL_TOKENS}
return id2index, id2fashion_st, id2furniture_st | 获取Specical Token所对应的id信息 |
165,150 | import json
import os
import re
from typing import List
import attr
from attr.validators import instance_of
DATA_DIR = ''
class FashionMetadata:
name: str = attr.ib(
converter=str,
validator=instance_of(str)
)
asset_type: str = attr.ib(
converter=str,
validator=instance_of(str)
)
customer_review: float = attr.ib(
converter=float,
validator=instance_of(float)
)
available_sizes: List[str] = attr.ib(
converter=lambda x: [str(_) for _ in x],
validator=instance_of(list)
)
color: str = attr.ib(
converter=str,
validator=instance_of(str)
)
pattern: str = attr.ib(
converter=str,
validator=instance_of(str)
)
brand: str = attr.ib(
converter=str,
validator=instance_of(str)
)
sleeve_length: str = attr.ib(
converter=str,
validator=instance_of(str)
)
type: str = attr.ib(
converter=str,
validator=instance_of(str)
)
price: float = attr.ib(
converter=float,
validator=instance_of(float)
)
size: str = attr.ib(
converter=str,
validator=instance_of(str)
)
def check_in(attribute, value, listing):
"""Universal checker that validates if value is in the given list."""
if value not in listing:
raise ValueError("{} must be one of {}, but received {}.".format(attribute.name, listing, value))
def check(self, attribute, value):
self.check_in(attribute, value, FASHION_ASSET_TYPE)
def check(self, attribute, value):
self.check_in(attribute, value, FASHION_BRAND)
def check(self, attribute, value):
self.check_in(attribute, value, FASHION_PATTERN)
def check(self, attribute, value):
self.check_in(attribute, value, FASHION_COLOR)
def check(self, attribute, value):
self.check_in(attribute ,value, FASHION_SLEEVE_LENGTH)
def check(self, attribute, value):
self.check_in(attribute, value, FASHION_TYPE)
def check(self, attribute, value):
common = set(value) & set(FASHION_SIZES)
if len(common) < 1:
raise ValueError("Available sizes must be one of {}, but receieved {}.".format(FASHION_SIZES, value))
def check(self, attribute, value):
listing = getattr(self, "available_sizes")
self.check_in(attribute, value, listing)
def check(self, attribute, value):
if not (0.0 <= value <= 5.0):
raise ValueError("Rating must be in range [0.0, 5.0].")
class FurnitureMetadata:
name: str = attr.ib(
converter=str,
validator=instance_of(str)
)
brand: str = attr.ib(
converter=str,
validator=instance_of(str)
)
color: str = attr.ib(
converter=str,
validator=instance_of(str)
)
customer_review: float = attr.ib(
converter=float,
validator=instance_of(float)
)
materials: str = attr.ib(
converter=str,
validator=instance_of(str)
)
price: str = attr.ib(
converter=str,
validator=instance_of(str)
)
type: str = attr.ib(
converter=str,
validator=instance_of(str)
)
def check_in(attribute, value, listing):
"""Universal checker that validates if value is in the given list."""
if value not in listing:
raise ValueError("{} must be one of {}, but received {}.".format(attribute.name, listing, value))
def check(self, attribute, value):
self.check_in(attribute, value, FURNITURE_BRAND)
def check(self, attribute, value):
self.check_in(attribute, value, FURNITURE_COLOR)
def check(self, attribute, value):
self.check_in(attribute, value, FURNITURE_MATERIALS)
def check(self, attribute, value):
self.check_in(attribute, value, FURNITURE_TYPE)
def check(self, attribute, value):
if not (0.0 <= value <= 5.0):
raise ValueError("Rating must be in range [0.0, 5.0].")
The provided code snippet includes necessary dependencies for implementing the `main_function` function. Write a Python function `def main_function()` to solve the following problem:
Converts each key from CamelCase to snake_case. Also changes some key names to be more consistent across dataset.
Here is the function:
def main_function():
"""
Converts each key from CamelCase to snake_case.
Also changes some key names to be more consistent
across dataset.
"""
_underscore1 = re.compile(r'(.)([A-Z][a-z]+)')
_underscore2 = re.compile(r'([a-z0-9])([A-Z])')
def key_map(key):
subbed = _underscore1.sub(r'\1_\2', key)
subbed = _underscore2.sub(r'\1_\2', subbed).lower()
if subbed in ("customer_review", "customer_rating"):
return "customer_review"
return subbed
FASHION_JSON = os.path.join(DATA_DIR, "fashion_prefab_metadata_all.json")
FURNITURE_JSON = os.path.join(DATA_DIR, "furniture_prefab_metadata_all.json")
fashion_items = json.load(open(FASHION_JSON, 'r'))
fashion_items = [
FashionMetadata(
name=key,
**{key_map(k): v for k,v in value.items()}
) for key, value in fashion_items.items()
]
furniture_items = json.load(open(FURNITURE_JSON, 'r'))
furniture_items = [
FurnitureMetadata(
name=key,
**{key_map(k): v for k,v in value.items()}
) for key, value in furniture_items.items()
]
return fashion_items, furniture_items | Converts each key from CamelCase to snake_case. Also changes some key names to be more consistent across dataset. |
165,151 | import json
import os
import re
from typing import List
import attr
from attr.validators import instance_of
class FashionMetadata:
name: str = attr.ib(
converter=str,
validator=instance_of(str)
)
asset_type: str = attr.ib(
converter=str,
validator=instance_of(str)
)
customer_review: float = attr.ib(
converter=float,
validator=instance_of(float)
)
available_sizes: List[str] = attr.ib(
converter=lambda x: [str(_) for _ in x],
validator=instance_of(list)
)
color: str = attr.ib(
converter=str,
validator=instance_of(str)
)
pattern: str = attr.ib(
converter=str,
validator=instance_of(str)
)
brand: str = attr.ib(
converter=str,
validator=instance_of(str)
)
sleeve_length: str = attr.ib(
converter=str,
validator=instance_of(str)
)
type: str = attr.ib(
converter=str,
validator=instance_of(str)
)
price: float = attr.ib(
converter=float,
validator=instance_of(float)
)
size: str = attr.ib(
converter=str,
validator=instance_of(str)
)
def check_in(attribute, value, listing):
"""Universal checker that validates if value is in the given list."""
if value not in listing:
raise ValueError("{} must be one of {}, but received {}.".format(attribute.name, listing, value))
def check(self, attribute, value):
self.check_in(attribute, value, FASHION_ASSET_TYPE)
def check(self, attribute, value):
self.check_in(attribute, value, FASHION_BRAND)
def check(self, attribute, value):
self.check_in(attribute, value, FASHION_PATTERN)
def check(self, attribute, value):
self.check_in(attribute, value, FASHION_COLOR)
def check(self, attribute, value):
self.check_in(attribute ,value, FASHION_SLEEVE_LENGTH)
def check(self, attribute, value):
self.check_in(attribute, value, FASHION_TYPE)
def check(self, attribute, value):
common = set(value) & set(FASHION_SIZES)
if len(common) < 1:
raise ValueError("Available sizes must be one of {}, but receieved {}.".format(FASHION_SIZES, value))
def check(self, attribute, value):
listing = getattr(self, "available_sizes")
self.check_in(attribute, value, listing)
def check(self, attribute, value):
if not (0.0 <= value <= 5.0):
raise ValueError("Rating must be in range [0.0, 5.0].")
class FurnitureMetadata:
name: str = attr.ib(
converter=str,
validator=instance_of(str)
)
brand: str = attr.ib(
converter=str,
validator=instance_of(str)
)
color: str = attr.ib(
converter=str,
validator=instance_of(str)
)
customer_review: float = attr.ib(
converter=float,
validator=instance_of(float)
)
materials: str = attr.ib(
converter=str,
validator=instance_of(str)
)
price: str = attr.ib(
converter=str,
validator=instance_of(str)
)
type: str = attr.ib(
converter=str,
validator=instance_of(str)
)
def check_in(attribute, value, listing):
"""Universal checker that validates if value is in the given list."""
if value not in listing:
raise ValueError("{} must be one of {}, but received {}.".format(attribute.name, listing, value))
def check(self, attribute, value):
self.check_in(attribute, value, FURNITURE_BRAND)
def check(self, attribute, value):
self.check_in(attribute, value, FURNITURE_COLOR)
def check(self, attribute, value):
self.check_in(attribute, value, FURNITURE_MATERIALS)
def check(self, attribute, value):
self.check_in(attribute, value, FURNITURE_TYPE)
def check(self, attribute, value):
if not (0.0 <= value <= 5.0):
raise ValueError("Rating must be in range [0.0, 5.0].")
The provided code snippet includes necessary dependencies for implementing the `load_metadata` function. Write a Python function `def load_metadata(data_dir)` to solve the following problem:
Converts each key from CamelCase to snake_case. Also changes some key names to be more consistent across dataset.
Here is the function:
def load_metadata(data_dir):
"""
Converts each key from CamelCase to snake_case.
Also changes some key names to be more consistent
across dataset.
"""
_underscore1 = re.compile(r'(.)([A-Z][a-z]+)')
_underscore2 = re.compile(r'([a-z0-9])([A-Z])')
def key_map(key):
subbed = _underscore1.sub(r'\1_\2', key)
subbed = _underscore2.sub(r'\1_\2', subbed).lower()
if subbed in ("customer_review", "customer_rating"):
return "customer_review"
return subbed
FASHION_JSON = os.path.join(data_dir, "fashion_prefab_metadata_all.json")
FURNITURE_JSON = os.path.join(data_dir, "furniture_prefab_metadata_all.json")
fashion_items = json.load(open(FASHION_JSON, 'r'))
fashion_items = [
FashionMetadata(
name=key,
**{key_map(k): v for k,v in value.items()}
) for key, value in fashion_items.items()
]
furniture_items = json.load(open(FURNITURE_JSON, 'r'))
furniture_items = [
FurnitureMetadata(
name=key,
**{key_map(k): v for k,v in value.items()}
) for key, value in furniture_items.items()
]
return fashion_items, furniture_items | Converts each key from CamelCase to snake_case. Also changes some key names to be more consistent across dataset. |
165,152 | import argparse
import logging
import math
import os
import sys
from typing import Dict, Optional, Any, List, Tuple, Callable
logger = logging.getLogger("fairseq_cli.train")
import numpy as np
import torch
from fairseq import (
# checkpoint_utils,
options,
quantization_utils,
tasks,
utils,
)
from fairseq.data import iterators
from fairseq.data.plasma_utils import PlasmaStore
from fairseq.dataclass.configs import FairseqConfig
from fairseq.dataclass.utils import convert_namespace_to_omegaconf
from fairseq.distributed import fsdp_enable_wrap, fsdp_wrap, utils as distributed_utils
from fairseq.file_io import PathManager
from fairseq.logging import meters, metrics, progress_bar
from fairseq.model_parallel.megatron_trainer import MegatronTrainer
from omegaconf import DictConfig, OmegaConf
from utils import checkpoint_utils
from trainer import Trainer
import ipdb
def _flatten_config(cfg: DictConfig):
config = OmegaConf.to_container(cfg)
# remove any legacy Namespaces and replace with a single "args"
namespace = None
for k, v in list(config.items()):
if isinstance(v, argparse.Namespace):
namespace = v
del config[k]
if namespace is not None:
config["args"] = vars(namespace)
return config
def validate_and_save(
cfg: DictConfig,
trainer: Trainer,
task: tasks.FairseqTask,
epoch_itr,
valid_subsets: List[str],
end_of_epoch: bool,
) -> Tuple[List[Optional[float]], bool]:
num_updates = trainer.get_num_updates()
max_update = cfg.optimization.max_update or math.inf
# Stopping conditions (and an additional one based on validation loss later
# on)
should_stop = False
if num_updates >= max_update:
should_stop = True
logger.info(
f"Stopping training due to "
f"num_updates: {num_updates} >= max_update: {max_update}"
)
training_time_hours = trainer.cumulative_training_time() / (60 * 60)
if (
cfg.optimization.stop_time_hours > 0
and training_time_hours > cfg.optimization.stop_time_hours
):
should_stop = True
logger.info(
f"Stopping training due to "
f"cumulative_training_time: {training_time_hours} > "
f"stop_time_hours: {cfg.optimization.stop_time_hours} hour(s)"
)
do_save = (
(end_of_epoch and epoch_itr.epoch % cfg.checkpoint.save_interval == 0)
or should_stop
or (
cfg.checkpoint.save_interval_updates > 0
and num_updates > 0
and num_updates % cfg.checkpoint.save_interval_updates == 0
and num_updates >= cfg.dataset.validate_after_updates
)
)
do_validate = (
(not end_of_epoch and do_save) # validate during mid-epoch saves
or (end_of_epoch and epoch_itr.epoch % cfg.dataset.validate_interval == 0)
or should_stop
or (
cfg.dataset.validate_interval_updates > 0
and num_updates > 0
and num_updates % cfg.dataset.validate_interval_updates == 0
)
) and not cfg.dataset.disable_validation and num_updates >= cfg.dataset.validate_after_updates
# Validate
valid_losses = [None]
if end_of_epoch and epoch_itr.epoch % cfg.dataset.validate_interval == 0 and not cfg.dataset.disable_validation:
#if do_validate:
valid_losses = validate(cfg, trainer, task, epoch_itr, valid_subsets)
should_stop |= should_stop_early(cfg, valid_losses[0])
# Save checkpoint
if do_save or should_stop:
checkpoint_utils.save_checkpoint(
cfg.checkpoint, trainer, epoch_itr, valid_losses[0]
)
return valid_losses, should_stop
def get_training_stats(stats: Dict[str, Any]) -> Dict[str, Any]:
stats["wall"] = round(metrics.get_meter("default", "wall").elapsed_time, 0)
return stats
class Trainer(object):
def add_cmdline_argument(cls, parser):
""" Add the cmdline arguments of trainer. """
group = parser.add_argument_group("Trainer")
group.add_argument("--seed", type=int, default=11,
help="The number of seed to fix random operations.")
group.add_argument("--gpu", type=int, default=0,
help="Whether to use gpu for running, default using cpu.")
group.add_argument("--valid_metric_name", type=str, default="-loss",
help="The validation metric determining which checkpoint is the best.")
group.add_argument("--num_epochs", type=int, default=10,
help="Total number of training epochs to perform.")
group.add_argument("--save_dir", type=str, required=True,
help="The output directory where the model will be saved.")
group.add_argument("--token_loss", type=str2bool, default=True,
help="Whether to update token loss or sentence loss.")
group.add_argument("--batch_size", type=int, default=8,
help="Total batch size for training/evaluation/inference.")
group.add_argument("--log_steps", type=int, default=100,
help="The number of training steps to output current metrics "
"on past training dataset.")
group.add_argument("--valid_steps", type=int, default=2000,
help="The number of training steps to perform a evaluation "
"on validation datasets.")
group.add_argument("--save_checkpoint", type=str2bool, default=True,
help="Whether to save one checkpoints for each training epoch.")
DataLoader.add_cmdline_argument(group)
return group
def __init__(self, model, to_tensor, hparams, logger=None, lr_scheduler=None, optimizer=None,
reader=None, evaluator=None):
self.to_tensor = to_tensor
self.hparams = hparams
self.do_train = hparams.do_train
self.do_infer = hparams.do_infer
self.data_name = hparams.data_name
self.is_decreased_valid_metric = hparams.valid_metric_name[0] == "-"
self.valid_metric_name = hparams.valid_metric_name[1:]
self.num_epochs = hparams.num_epochs
self.save_dir = hparams.save_dir
self.log_steps = hparams.log_steps
self.valid_steps = hparams.valid_steps
self.save_checkpoint = hparams.save_checkpoint
self.gradient_accumulation_steps = hparams.gradient_accumulation_steps
self.weight_decay = hparams.weight_decay
self.batch_size = hparams.batch_size
self.warmup_steps = hparams.warmup_steps
self.gpu = hparams.gpu
self.lr = hparams.lr
self.lr_scheduler = lr_scheduler
self.optimizer = optimizer
self.model = model
self.func_model = self.model.module if self.gpu > 1 else self.model
self.reader = reader
self.evaluator = evaluator
self.tokenizer = reader.tokenizer
if not os.path.exists(self.save_dir):
os.makedirs(self.save_dir)
self.logger = logger or get_logger(os.path.join(self.save_dir, "trainer.log"), "trainer")
self.batch_metrics_tracker = MetricsTracker()
self.token_metrics_tracker = MetricsTracker()
self.best_valid_metric = float("inf" if self.is_decreased_valid_metric else "-inf")
self.epoch = 0
def decode_generated_bspn_resp(self, generated):
"""
decode generated
return decoded ('bspn', 'resp')
"""
decoded = {}
eos_r_id = self.reader.eos_r_id
eos_b_id = self.reader.eos_b_id
# eos_r may not exists if galaxy generated repetitive words.
if eos_r_id in generated:
eos_r_idx = generated.index(eos_r_id)
else:
eos_r_idx = len(generated) - 1
# self.logger.info('eos_r not in generated: ' + self.tokenizer.decode(generated))
# predicted bspn, resp
eos_b_idx = generated.index(eos_b_id)
decoded['bspn'] = generated[:eos_b_idx + 1]
decoded['resp'] = generated[eos_b_idx + 1: eos_r_idx + 1]
return decoded
def decode_generated_act_resp(self, generated):
"""
decode generated
return decoded['resp'] ('bspn', 'aspn')
"""
decoded = {}
eos_a_id = self.reader.eos_a_id
eos_r_id = self.reader.eos_r_id
eos_b_id = self.reader.eos_b_id
# eos_r may not exists if galaxy generated repetitive words.
if eos_r_id in generated:
eos_r_idx = generated.index(eos_r_id)
else:
eos_r_idx = len(generated) - 1
self.logger.info('eos_r not in generated: ' + self.tokenizer.decode(generated))
if self.reader.use_true_curr_aspn: # only predict resp
decoded['resp'] = generated[: eos_r_idx + 1]
else: # predicted aspn, resp
eos_a_idx = generated.index(eos_a_id)
decoded['aspn'] = generated[: eos_a_idx + 1]
decoded['resp'] = generated[eos_a_idx + 1: eos_r_idx + 1]
return decoded
def decode_generated_bspn(self, generated):
eos_b_id = self.reader.eos_b_id
if eos_b_id in generated:
eos_b_idx = generated.index(eos_b_id)
else:
eos_b_idx = len(generated) - 1
return generated[: eos_b_idx + 1]
def set_optimizers(self):
"""
Setup the optimizer and the learning rate scheduler.
from transformers.Trainer
parameters from cfg: lr (1e-3); warmup_steps
"""
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ["bias", "norm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": self.weight_decay,
},
{
"params": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay)],
"weight_decay": 0.0,
},
]
optimizer = AdamW(optimizer_grouped_parameters, lr=self.lr)
num_training_steps = self.reader.set_stats['train']['num_training_steps_per_epoch'] * \
self.num_epochs // self.gradient_accumulation_steps
num_warmup_steps = self.warmup_steps if self.warmup_steps >= 0 else int(num_training_steps * 0.1)
lr_scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=num_warmup_steps,
num_training_steps=num_training_steps
)
self.optimizer = optimizer
self.lr_scheduler = lr_scheduler
def train(self, train_data, dev_data):
# log info
set_stats = self.reader.set_stats['train']
self.logger.info("***** Running training *****")
self.logger.info(" Num Training steps(one turn in a batch of dialogs) per epoch = %d",
set_stats['num_training_steps_per_epoch'])
self.logger.info(" Num Turns = %d", set_stats['num_turns'])
self.logger.info(" Num Dialogs = %d", set_stats['num_dials'])
self.logger.info(" Num Epochs = %d", self.num_epochs)
self.logger.info(" Batch size = %d", self.batch_size)
self.logger.info(" Gradient Accumulation steps = %d", self.gradient_accumulation_steps)
self.logger.info(" Total optimization steps = %d", set_stats['num_training_steps_per_epoch'] *
self.num_epochs // self.gradient_accumulation_steps)
# begin training
num_epochs = self.num_epochs - self.epoch
for epoch in range(num_epochs):
self.train_epoch(train_data=train_data, dev_data=dev_data)
def train_epoch(self, train_data, dev_data):
"""
Train an epoch.
"""
raise NotImplementedError
def infer(self, data_type):
"""
Inference interface.
"""
raise NotImplementedError
def save(self, is_best=False):
""" save """
train_state = {"epoch": self.epoch,
"best_valid_metric": self.best_valid_metric,
"optimizer": self.optimizer.state_dict()}
if self.lr_scheduler is not None:
train_state["lr_scheduler"] = self.lr_scheduler.state_dict()
# Save checkpoint
if self.save_checkpoint:
model_file = os.path.join(self.save_dir, f"state_epoch_{self.epoch}.model")
torch.save(self.model.state_dict(), model_file)
self.logger.info(f"Saved model state to '{model_file}'")
train_file = os.path.join(self.save_dir, f"state_epoch_{self.epoch}.train")
torch.save(train_state, train_file)
self.logger.info(f"Saved train state to '{train_file}'")
# Save current best model
if is_best:
best_model_file = os.path.join(self.save_dir, "best.model")
torch.save(self.model.state_dict(), best_model_file)
best_train_file = os.path.join(self.save_dir, "best.train")
torch.save(train_state, best_train_file)
self.logger.info(
f"Saved best model state to '{best_model_file}' with new best valid metric "
f"{self.valid_metric_name.upper()}={self.best_valid_metric:.3f}")
def load(self):
""" load """
def _load_model_state():
model_state_dict = torch.load(f'{self.func_model.init_checkpoint}.model',
map_location=lambda storage, loc: storage)
if 'module.' in list(model_state_dict.keys())[0]:
new_model_state_dict = OrderedDict()
for k, v in model_state_dict.items():
assert k[:7] == 'module.'
new_model_state_dict[k[7:]] = v
model_state_dict = new_model_state_dict
new_model_state_dict = OrderedDict()
parameters = {name: param for name, param in self.func_model.named_parameters()}
for name, param in model_state_dict.items():
if name in parameters:
if param.shape != parameters[name].shape:
assert hasattr(param, "numpy")
arr = param.numpy()
z = np.random.normal(scale=self.func_model.initializer_range,
size=parameters[name].shape).astype("float32")
if name == 'embedder.token_embedding.weight':
z[-param.shape[0]:] = arr
print(f"part of parameter({name}) random normlize initialize")
else:
if z.shape[0] < param.shape[0]:
z = arr[:z.shape[0]]
print(f"part of parameter({name}) are dropped")
else:
z[:param.shape[0]] = arr
print(f"part of parameter({name}) random normlize initialize")
dtype, device = param.dtype, param.device
z = torch.tensor(z, dtype=dtype, device=device)
new_model_state_dict[name] = z
else:
new_model_state_dict[name] = param
else:
print(f"parameter({name}) are dropped")
model_state_dict = new_model_state_dict
for name in parameters:
if name not in model_state_dict:
if parameters[name].requires_grad:
print(f"parameter({name}) random normlize initialize")
z = np.random.normal(scale=self.func_model.initializer_range,
size=parameters[name].shape).astype("float32")
dtype, device = parameters[name].dtype, parameters[name].device
model_state_dict[name] = torch.tensor(z, dtype=dtype, device=device)
else:
model_state_dict[name] = parameters[name]
self.func_model.load_state_dict(model_state_dict)
self.logger.info(f"Loaded model state from '{self.func_model.init_checkpoint}.model'")
def _load_train_state():
train_file = f"{self.func_model.init_checkpoint}.train"
if os.path.exists(train_file):
train_state_dict = torch.load(train_file, map_location=lambda storage, loc: storage)
self.epoch = train_state_dict["epoch"]
self.best_valid_metric = train_state_dict["best_valid_metric"]
if self.optimizer is not None and "optimizer" in train_state_dict:
self.optimizer.load_state_dict(train_state_dict["optimizer"])
if self.lr_scheduler is not None and "lr_scheduler" in train_state_dict:
self.lr_scheduler.load_state_dict(train_state_dict["lr_scheduler"])
self.logger.info(
f"Loaded train state from '{train_file}' with (epoch-{self.epoch} "
f"best_valid_metric={self.best_valid_metric:.3f})")
else:
self.logger.info(f"Loaded no train state")
if self.func_model.init_checkpoint is None:
self.logger.info(f"Loaded no model !!!")
return
_load_model_state()
_load_train_state()
The provided code snippet includes necessary dependencies for implementing the `train` function. Write a Python function `def train( cfg: DictConfig, trainer: Trainer, task: tasks.FairseqTask, epoch_itr ) -> Tuple[List[Optional[float]], bool]` to solve the following problem:
Train the model for one epoch and return validation losses.
Here is the function:
def train(
cfg: DictConfig, trainer: Trainer, task: tasks.FairseqTask, epoch_itr
) -> Tuple[List[Optional[float]], bool]:
"""Train the model for one epoch and return validation losses."""
# Initialize data iterator
itr = epoch_itr.next_epoch_itr(
fix_batches_to_gpus=cfg.distributed_training.fix_batches_to_gpus,
shuffle=(epoch_itr.next_epoch_idx > cfg.dataset.curriculum),
)
update_freq = (
cfg.optimization.update_freq[epoch_itr.epoch - 1]
if epoch_itr.epoch <= len(cfg.optimization.update_freq)
else cfg.optimization.update_freq[-1]
)
itr = iterators.GroupedIterator(itr, update_freq)
if cfg.common.tpu:
itr = utils.tpu_data_loader(itr)
progress = progress_bar.progress_bar(
itr,
log_format=cfg.common.log_format,
log_file=cfg.common.log_file,
log_interval=cfg.common.log_interval,
epoch=epoch_itr.epoch,
tensorboard_logdir=(
cfg.common.tensorboard_logdir
if distributed_utils.is_master(cfg.distributed_training)
else None
),
default_log_format=("tqdm" if not cfg.common.no_progress_bar else "simple"),
wandb_project=(
cfg.common.wandb_project
if distributed_utils.is_master(cfg.distributed_training)
else None
),
wandb_run_name=os.environ.get(
"WANDB_NAME", os.path.basename(cfg.checkpoint.save_dir)
),
azureml_logging=(
cfg.common.azureml_logging
if distributed_utils.is_master(cfg.distributed_training)
else False
),
)
progress.update_config(_flatten_config(cfg))
trainer.begin_epoch(epoch_itr.epoch)
valid_subsets = cfg.dataset.valid_subset.split(",")
should_stop = False
num_updates = trainer.get_num_updates()
logger.info("Start iterating over samples")
for i, samples in enumerate(progress):
with metrics.aggregate("train_inner"), torch.autograd.profiler.record_function(
"train_step-%d" % i
):
log_output = trainer.train_step(samples)
if log_output is not None: # not OOM, overflow, ...
# log mid-epoch stats
num_updates = trainer.get_num_updates()
if num_updates % cfg.common.log_interval == 0:
stats = get_training_stats(metrics.get_smoothed_values("train_inner"))
progress.log(stats, tag="train_inner", step=num_updates)
# reset mid-epoch stats after each log interval
# the end-of-epoch stats will still be preserved
metrics.reset_meters("train_inner")
end_of_epoch = not itr.has_next()
valid_losses, should_stop = validate_and_save(cfg, trainer, task, epoch_itr, valid_subsets, end_of_epoch)
if should_stop:
break
# log end-of-epoch stats
logger.info("end of epoch {} (average epoch stats below)".format(epoch_itr.epoch))
stats = get_training_stats(metrics.get_smoothed_values("train"))
progress.print(stats, tag="train", step=num_updates)
# reset epoch-level meters
metrics.reset_meters("train")
return valid_losses, should_stop | Train the model for one epoch and return validation losses. |
165,153 | import argparse
import logging
import math
import os
import sys
from typing import Dict, Optional, Any, List, Tuple, Callable
logger = logging.getLogger("fairseq_cli.train")
import numpy as np
import torch
from fairseq import (
# checkpoint_utils,
options,
quantization_utils,
tasks,
utils,
)
from fairseq.data import iterators
from fairseq.data.plasma_utils import PlasmaStore
from fairseq.dataclass.configs import FairseqConfig
from fairseq.dataclass.utils import convert_namespace_to_omegaconf
from fairseq.distributed import fsdp_enable_wrap, fsdp_wrap, utils as distributed_utils
from fairseq.file_io import PathManager
from fairseq.logging import meters, metrics, progress_bar
from fairseq.model_parallel.megatron_trainer import MegatronTrainer
from omegaconf import DictConfig, OmegaConf
from utils import checkpoint_utils
from trainer import Trainer
import ipdb
def main(cfg: FairseqConfig) -> None:
if isinstance(cfg, argparse.Namespace):
cfg = convert_namespace_to_omegaconf(cfg)
utils.import_user_module(cfg.common)
if distributed_utils.is_master(cfg.distributed_training) and "job_logging_cfg" in cfg:
# make hydra logging work with ddp (see # see https://github.com/facebookresearch/hydra/issues/1126)
logging.config.dictConfig(OmegaConf.to_container(cfg.job_logging_cfg))
assert (
cfg.dataset.max_tokens is not None or cfg.dataset.batch_size is not None
), "Must specify batch size either with --max-tokens or --batch-size"
metrics.reset()
if cfg.common.log_file is not None:
handler = logging.FileHandler(filename=cfg.common.log_file)
logger.addHandler(handler)
np.random.seed(cfg.common.seed)
utils.set_torch_seed(cfg.common.seed)
if distributed_utils.is_master(cfg.distributed_training):
checkpoint_utils.verify_checkpoint_directory(cfg.checkpoint.save_dir)
# Print args
logger.info(cfg)
if cfg.checkpoint.write_checkpoints_asynchronously:
try:
import iopath # noqa: F401
except ImportError:
logging.exception(
"Asynchronous checkpoint writing is specified but iopath is "
"not installed: `pip install iopath`"
)
return
# Setup task, e.g., translation, language modeling, etc.
task = tasks.setup_task(cfg.task)
assert cfg.criterion, "Please specify criterion to train a model"
# Build model and criterion
if cfg.distributed_training.ddp_backend == "fully_sharded":
with fsdp_enable_wrap(cfg.distributed_training):
model = fsdp_wrap(task.build_model(cfg.model))
else:
model = task.build_model(cfg.model)
criterion = task.build_criterion(cfg.criterion)
logger.info(model)
logger.info("task: {}".format(task.__class__.__name__))
logger.info("model: {}".format(model.__class__.__name__))
logger.info("criterion: {}".format(criterion.__class__.__name__))
logger.info(
"num. shared model params: {:,} (num. trained: {:,})".format(
sum(p.numel() for p in model.parameters() if not getattr(p, "expert", False)),
sum(p.numel() for p in model.parameters() if not getattr(p, "expert", False) and p.requires_grad)
)
)
logger.info(
"num. expert model params: {} (num. trained: {})".format(
sum(p.numel() for p in model.parameters() if getattr(p, "expert", False)),
sum(p.numel() for p in model.parameters() if getattr(p, "expert", False) and p.requires_grad),
)
)
# Load valid dataset (we load training data below, based on the latest checkpoint)
# We load the valid dataset AFTER building the model
# data_utils.raise_if_valid_subsets_unintentionally_ignored(cfg)
if cfg.dataset.combine_valid_subsets:
task.load_dataset("valid", combine=True, epoch=1)
else:
for valid_sub_split in cfg.dataset.valid_subset.split(","):
task.load_dataset(valid_sub_split, combine=False, epoch=1)
# (optionally) Configure quantization
if cfg.common.quantization_config_path is not None:
quantizer = quantization_utils.Quantizer(
config_path=cfg.common.quantization_config_path,
max_epoch=cfg.optimization.max_epoch,
max_update=cfg.optimization.max_update,
)
else:
quantizer = None
# Build trainer
if cfg.common.model_parallel_size == 1:
trainer = Trainer(cfg, task, model, criterion, quantizer)
else:
trainer = MegatronTrainer(cfg, task, model, criterion)
logger.info(
"training on {} devices (GPUs/TPUs)".format(
cfg.distributed_training.distributed_world_size
)
)
logger.info(
"max tokens per device = {} and max sentences per device = {}".format(
cfg.dataset.max_tokens,
cfg.dataset.batch_size,
)
)
# Load the latest checkpoint if one is available and restore the
# corresponding train iterator
extra_state, epoch_itr = checkpoint_utils.load_checkpoint(
cfg.checkpoint,
trainer,
# don't cache epoch iterators for sharded datasets
disable_iterator_cache=True,
)
if cfg.common.tpu:
import torch_xla.core.xla_model as xm
xm.rendezvous("load_checkpoint") # wait for all workers
max_epoch = cfg.optimization.max_epoch or math.inf
if max_epoch > 0 and max_epoch != math.inf:
total_num_updates = sum(
math.ceil(len(epoch_itr) / cfg.optimization.update_freq[i])
if i < len(cfg.optimization.update_freq) else
math.ceil(len(epoch_itr) / cfg.optimization.update_freq[-1])
for i in range(max_epoch)
)
trainer.lr_reinit(total_num_updates, trainer.get_num_updates())
lr = trainer.get_lr()
train_meter = meters.StopwatchMeter()
train_meter.start()
while epoch_itr.next_epoch_idx <= max_epoch:
if lr <= cfg.optimization.stop_min_lr:
logger.info(
f"stopping training because current learning rate ({lr}) is smaller "
"than or equal to minimum learning rate "
f"(--stop-min-lr={cfg.optimization.stop_min_lr})"
)
break
# train for one epoch
valid_losses, should_stop = train(cfg, trainer, task, epoch_itr)
if should_stop:
break
# only use first validation loss to update the learning rate
lr = trainer.lr_step(epoch_itr.epoch, valid_losses[0])
epoch_itr = trainer.get_train_iterator(
epoch_itr.next_epoch_idx,
# sharded data: get train iterator for next epoch
load_dataset=True,
# don't cache epoch iterators for sharded datasets
disable_iterator_cache=True,
)
train_meter.stop()
logger.info("done training in {:.1f} seconds".format(train_meter.sum))
# ioPath implementation to wait for all asynchronous file writes to complete.
if cfg.checkpoint.write_checkpoints_asynchronously:
logger.info(
"ioPath PathManager waiting for all asynchronous checkpoint "
"writes to finish."
)
PathManager.async_close()
logger.info("ioPath PathManager finished waiting.")
def cli_main(
modify_parser: Optional[Callable[[argparse.ArgumentParser], None]] = None
) -> None:
parser = options.get_training_parser()
args = options.parse_args_and_arch(parser, modify_parser=modify_parser)
cfg = convert_namespace_to_omegaconf(args)
if cfg.common.use_plasma_view:
server = PlasmaStore(path=cfg.common.plasma_path)
logger.info(f"Started plasma server pid {server.server.pid} {cfg.common.plasma_path}")
if args.profile:
with torch.cuda.profiler.profile():
with torch.autograd.profiler.emit_nvtx():
distributed_utils.call_main(cfg, main)
else:
distributed_utils.call_main(cfg, main)
# if cfg.common.use_plasma_view:
# server.server.kill() | null |
165,154 | import contextlib
import logging
import sys
import time
from argparse import Namespace
from itertools import chain
from typing import Any, Dict, List
import torch
from fairseq import models, optim, utils
from fairseq.dataclass.configs import FairseqConfig
from fairseq.dataclass.utils import convert_namespace_to_omegaconf
from fairseq.distributed import utils as distributed_utils
from fairseq.file_io import PathManager
from fairseq.logging import meters, metrics
from fairseq.models.ema import build_ema
from fairseq.nan_detector import NanDetector
from fairseq.optim import lr_scheduler
from omegaconf import OmegaConf
import random
from utils import checkpoint_utils
def _catalog_shared_params(module, memo=None, prefix=""):
if memo is None:
first_call = True
memo = {}
else:
first_call = False
for name, param in module._parameters.items():
param_prefix = prefix + ("." if prefix else "") + name
if param not in memo:
memo[param] = []
memo[param].append(param_prefix)
for name, m in module._modules.items():
if m is None:
continue
submodule_prefix = prefix + ("." if prefix else "") + name
_catalog_shared_params(m, memo, submodule_prefix)
if first_call:
return [x for x in memo.values() if len(x) > 1] | null |
165,155 | import contextlib
import logging
import sys
import time
from argparse import Namespace
from itertools import chain
from typing import Any, Dict, List
import torch
from fairseq import models, optim, utils
from fairseq.dataclass.configs import FairseqConfig
from fairseq.dataclass.utils import convert_namespace_to_omegaconf
from fairseq.distributed import utils as distributed_utils
from fairseq.file_io import PathManager
from fairseq.logging import meters, metrics
from fairseq.models.ema import build_ema
from fairseq.nan_detector import NanDetector
from fairseq.optim import lr_scheduler
from omegaconf import OmegaConf
import random
from utils import checkpoint_utils
def _get_module_by_path(module, path):
path = path.split(".")
for name in path:
module = getattr(module, name)
return module | null |
165,156 | import contextlib
import logging
import sys
import time
from argparse import Namespace
from itertools import chain
from typing import Any, Dict, List
import torch
from fairseq import models, optim, utils
from fairseq.dataclass.configs import FairseqConfig
from fairseq.dataclass.utils import convert_namespace_to_omegaconf
from fairseq.distributed import utils as distributed_utils
from fairseq.file_io import PathManager
from fairseq.logging import meters, metrics
from fairseq.models.ema import build_ema
from fairseq.nan_detector import NanDetector
from fairseq.optim import lr_scheduler
from omegaconf import OmegaConf
import random
from utils import checkpoint_utils
def _set_module_by_path(module, path, value):
path = path.split(".")
for name in path[:-1]:
module = getattr(module, name)
setattr(module, path[-1], value) | null |
165,157 | from io import BytesIO
import logging
import warnings
import string
import numpy as np
import torch
import base64
from torchvision import transforms
from PIL import Image, ImageFile
from data import data_utils
from data.ofa_dataset import OFADataset
def collate(samples, pad_idx, eos_idx):
if len(samples) == 0:
return {}
def merge(key):
return data_utils.collate_tokens(
[s[key] for s in samples],
pad_idx,
eos_idx=eos_idx,
)
id = np.array([s["id"] for s in samples])
src_tokens = merge("source")
src_lengths = torch.LongTensor([s["source"].ne(pad_idx).long().sum() for s in samples])
patch_images = torch.stack([sample['patch_image'] for sample in samples], dim=0)
patch_masks = torch.cat([sample['patch_mask'] for sample in samples])
prev_output_tokens = None
target = None
if samples[0].get("target", None) is not None:
target = merge("target")
tgt_lengths = torch.LongTensor([s["target"].ne(pad_idx).long().sum() for s in samples])
ntokens = tgt_lengths.sum().item()
if samples[0].get("prev_output_tokens", None) is not None:
prev_output_tokens = merge("prev_output_tokens")
else:
ntokens = src_lengths.sum().item()
batch = {
"id": id,
"nsentences": len(samples),
"ntokens": ntokens,
"net_input": {
"src_tokens": src_tokens,
"src_lengths": src_lengths,
"patch_images": patch_images,
"patch_masks": patch_masks,
"prev_output_tokens": prev_output_tokens
},
"target": target,
}
return batch | null |
165,158 | from io import BytesIO
import logging
import warnings
import numpy as np
import torch
import base64
from torchvision import transforms
from PIL import Image, ImageFile
from data import data_utils
from data.ofa_dataset import OFADataset
def collate(samples, pad_idx, eos_idx):
if len(samples) == 0:
return {}
def merge(key):
return data_utils.collate_tokens(
[s[key] for s in samples],
pad_idx,
eos_idx=eos_idx,
)
id = np.array([s["id"] for s in samples])
src_tokens = merge("source")
src_lengths = torch.LongTensor([s["source"].ne(pad_idx).long().sum() for s in samples])
patch_images = torch.stack([sample['patch_image'] for sample in samples], dim=0)
patch_masks = torch.cat([sample['patch_mask'] for sample in samples])
conf = None
if samples[0].get("conf", None) is not None:
conf = torch.cat([s['conf'] for s in samples], dim=0)
ref_dict = None
if samples[0].get("ref_dict", None) is not None:
ref_dict = np.array([s['ref_dict'] for s in samples])
constraint_masks = None
if samples[0].get("constraint_mask", None) is not None:
constraint_masks = merge("constraint_mask")
decoder_prompts = None
if samples[0].get("decoder_prompt", None) is not None:
decoder_prompts = np.array([s['decoder_prompt'].tolist() for s in samples])
prefix_tokens = None
if samples[0].get("decoder_prompt", None) is not None:
prefix_tokens = merge("decoder_prompt")
prefix_tokens = prefix_tokens[:, 1:]
prev_output_tokens = None
target = None
if samples[0].get("target", None) is not None:
target = merge("target")
tgt_lengths = torch.LongTensor(
[s["target"].ne(pad_idx).long().sum() for s in samples]
)
ntokens = tgt_lengths.sum().item()
if samples[0].get("prev_output_tokens", None) is not None:
prev_output_tokens = merge("prev_output_tokens")
else:
ntokens = src_lengths.sum().item()
batch = {
"id": id,
"nsentences": len(samples),
"ntokens": ntokens,
"net_input": {
"src_tokens": src_tokens,
"src_lengths": src_lengths,
"patch_images": patch_images,
"patch_masks": patch_masks,
"prev_output_tokens": prev_output_tokens
},
"conf": conf,
"ref_dict": ref_dict,
"constraint_masks": constraint_masks,
"decoder_prompts": decoder_prompts,
"target": target,
"prefix_tokens": prefix_tokens
}
return batch | null |
165,159 | import logging
import warnings
import string
import numpy as np
import torch
import base64
from torchvision import transforms
from PIL import Image, ImageFile
from data import data_utils
from data.ofa_dataset import OFADataset
from io import BytesIO
def collate(samples, pad_idx, eos_idx):
if len(samples) == 0:
return {}
def merge(key):
return data_utils.collate_tokens(
[s[key] for s in samples],
pad_idx,
eos_idx=eos_idx,
)
id = np.array([s["id"] for s in samples])
src_tokens = merge("source")
src_lengths = torch.LongTensor([s["source"].ne(pad_idx).long().sum() for s in samples])
patch_images1 = torch.stack([sample['patch_image1'] for sample in samples], dim=0)
#patch_images2 = torch.stack([sample['patch_image2'] for sample in samples], dim=0)
patch_masks = torch.cat([sample['patch_mask'] for sample in samples])
prev_output_tokens = None
target = None
if samples[0].get("target", None) is not None:
target = merge("target")
tgt_lengths = torch.LongTensor([s["target"].ne(pad_idx).long().sum() for s in samples])
ntokens = tgt_lengths.sum().item()
if samples[0].get("prev_output_tokens", None) is not None:
prev_output_tokens = merge("prev_output_tokens")
else:
ntokens = src_lengths.sum().item()
batch = {
"id": id,
"nsentences": len(samples),
"ntokens": ntokens,
"net_input": {
"src_tokens": src_tokens,
"src_lengths": src_lengths,
"patch_images": patch_images1,
#"patch_images_2": patch_images2,
"patch_masks": patch_masks,
"prev_output_tokens": prev_output_tokens
},
"target": target,
}
return batch | null |
165,160 | from io import BytesIO
import logging
import warnings
import base64
import random
import numpy as np
import torch
from PIL import Image, ImageFile
from itertools import chain
from data.ofa_dataset import OFADataset
from data import data_utils
from PIL import Image
from io import BytesIO
import base64
def collate(
samples,
pad_idx,
eos_idx,
left_pad_source=False,
left_pad_target=False,
):
if len(samples) == 0:
return {}
def merge(key, left_pad, move_eos_to_beginning=False):
return data_utils.collate_tokens(
[s[key] for s in samples],
pad_idx,
eos_idx,
left_pad,
move_eos_to_beginning,
)
id = np.array([s["id"] for s in samples])
src_tokens = merge("source", left_pad=left_pad_source)
# sort by descending source length
src_lengths = torch.LongTensor([s["source"].ne(pad_idx).long().sum() for s in samples])
code_images = np.array([s["code_image"] for s in samples])
code_masks = torch.cat([sample['code_mask'] for sample in samples])
prev_output_tokens = None
target = None
if samples[0].get("target", None) is not None:
target = merge("target", left_pad=left_pad_target)
tgt_lengths = torch.LongTensor(
[s["target"].ne(pad_idx).long().sum() for s in samples]
)
ntokens = tgt_lengths.sum().item()
if samples[0].get("prev_output_tokens", None) is not None:
prev_output_tokens = merge("prev_output_tokens", left_pad=left_pad_target)
else:
ntokens = src_lengths.sum().item()
batch = {
"id": id,
"nsentences": len(samples),
"ntokens": ntokens,
"net_input": {
"src_tokens": src_tokens,
"src_lengths": src_lengths,
"code_masks": code_masks,
"prev_output_tokens": prev_output_tokens
},
"code_images": code_images,
"target": target
}
return batch | null |
165,161 | from io import BytesIO
import logging
import warnings
import base64
import random
import numpy as np
import torch
from PIL import Image, ImageFile
from itertools import chain
from data.ofa_dataset import OFADataset
from data import data_utils
from PIL import Image
from io import BytesIO
import base64
def preprocess_vqgan(x):
x = 2. * x - 1.
return x | null |
165,162 | from io import BytesIO
import logging
import warnings
import numpy as np
import torch
import base64
from torchvision import transforms
from PIL import Image, ImageFile
from data import data_utils
from data.ofa_dataset import OFADataset
def collate(samples, pad_idx, eos_idx):
if len(samples) == 0:
return {}
def merge(key):
return data_utils.collate_tokens(
[s[key] for s in samples],
pad_idx,
eos_idx=eos_idx,
)
id = np.array([s["id"] for s in samples])
src_tokens = merge("source")
src_lengths = torch.LongTensor([s["source"].ne(pad_idx).long().sum() for s in samples])
patch_images = torch.stack([sample['patch_image'] for sample in samples], dim=0)
patch_masks = torch.cat([sample['patch_mask'] for sample in samples])
ref_dict = None
if samples[0].get("ref_dict", None) is not None:
ref_dict = np.array([s['ref_dict'] for s in samples])
constraint_masks = None
if samples[0].get("constraint_mask", None) is not None:
constraint_masks = merge("constraint_mask")
decoder_prompts = None
if samples[0].get("decoder_prompt", None) is not None:
decoder_prompts = np.array([s['decoder_prompt'].tolist() for s in samples])
prev_output_tokens = None
target = None
if samples[0].get("target", None) is not None:
target = merge("target")
tgt_lengths = torch.LongTensor(
[s["target"].ne(pad_idx).long().sum() for s in samples]
)
ntokens = tgt_lengths.sum().item()
if samples[0].get("prev_output_tokens", None) is not None:
prev_output_tokens = merge("prev_output_tokens")
else:
ntokens = src_lengths.sum().item()
batch = {
"id": id,
"nsentences": len(samples),
"ntokens": ntokens,
"net_input": {
"src_tokens": src_tokens,
"src_lengths": src_lengths,
"patch_images": patch_images,
"patch_masks": patch_masks,
"prev_output_tokens": prev_output_tokens
},
"ref_dict": ref_dict,
"constraint_masks": constraint_masks,
"decoder_prompts": decoder_prompts,
"target": target
}
return batch | null |
165,163 | from io import BytesIO
import logging
import warnings
import numpy as np
import torch
import base64
import utils.transforms as T
from PIL import Image, ImageFile
from data import data_utils
from data.ofa_dataset import OFADataset
def collate(samples, pad_idx, eos_idx):
if len(samples) == 0:
return {}
def merge(key):
return data_utils.collate_tokens(
[s[key] for s in samples],
pad_idx,
eos_idx=eos_idx,
)
id = np.array([s["id"] for s in samples])
src_tokens = merge("source")
src_lengths = torch.LongTensor([s["source"].ne(pad_idx).long().sum() for s in samples])
patch_images = torch.stack([sample['patch_image'] for sample in samples], dim=0)
patch_masks = torch.cat([sample['patch_mask'] for sample in samples])
w_resize_ratios = torch.stack([s["w_resize_ratio"] for s in samples], dim=0)
h_resize_ratios = torch.stack([s["h_resize_ratio"] for s in samples], dim=0)
region_coords = torch.stack([s['region_coord'] for s in samples], dim=0)
prev_output_tokens = None
target = None
if samples[0].get("target", None) is not None:
target = merge("target")
tgt_lengths = torch.LongTensor([s["target"].ne(pad_idx).long().sum() for s in samples])
ntokens = tgt_lengths.sum().item()
if samples[0].get("prev_output_tokens", None) is not None:
prev_output_tokens = merge("prev_output_tokens")
else:
ntokens = src_lengths.sum().item()
batch = {
"id": id,
"nsentences": len(samples),
"ntokens": ntokens,
"net_input": {
"src_tokens": src_tokens,
"src_lengths": src_lengths,
"patch_images": patch_images,
"patch_masks": patch_masks,
"prev_output_tokens": prev_output_tokens
},
"target": target,
"w_resize_ratios": w_resize_ratios,
"h_resize_ratios": h_resize_ratios,
"region_coords": region_coords
}
return batch | null |
165,168 | import contextlib
import itertools
import logging
import re
import warnings
from typing import Optional, Tuple
import numpy as np
import torch
from fairseq.file_io import PathManager
from fairseq import utils
import os
The provided code snippet includes necessary dependencies for implementing the `batch_by_size` function. Write a Python function `def batch_by_size( indices, num_tokens_fn, num_tokens_vec=None, max_tokens=None, max_sentences=None, required_batch_size_multiple=1, fixed_shapes=None, )` to solve the following problem:
Yield mini-batches of indices bucketed by size. Batches may contain sequences of different lengths. Args: indices (List[int]): ordered list of dataset indices num_tokens_fn (callable): function that returns the number of tokens at a given index num_tokens_vec (List[int], optional): precomputed vector of the number of tokens for each index in indices (to enable faster batch generation) max_tokens (int, optional): max number of tokens in each batch (default: None). max_sentences (int, optional): max number of sentences in each batch (default: None). required_batch_size_multiple (int, optional): require batch size to be less than N or a multiple of N (default: 1). fixed_shapes (List[Tuple[int, int]], optional): if given, batches will only be created with the given shapes. *max_sentences* and *required_batch_size_multiple* will be ignored (default: None).
Here is the function:
def batch_by_size(
indices,
num_tokens_fn,
num_tokens_vec=None,
max_tokens=None,
max_sentences=None,
required_batch_size_multiple=1,
fixed_shapes=None,
):
"""
Yield mini-batches of indices bucketed by size. Batches may contain
sequences of different lengths.
Args:
indices (List[int]): ordered list of dataset indices
num_tokens_fn (callable): function that returns the number of tokens at
a given index
num_tokens_vec (List[int], optional): precomputed vector of the number
of tokens for each index in indices (to enable faster batch generation)
max_tokens (int, optional): max number of tokens in each batch
(default: None).
max_sentences (int, optional): max number of sentences in each
batch (default: None).
required_batch_size_multiple (int, optional): require batch size to
be less than N or a multiple of N (default: 1).
fixed_shapes (List[Tuple[int, int]], optional): if given, batches will
only be created with the given shapes. *max_sentences* and
*required_batch_size_multiple* will be ignored (default: None).
"""
try:
from fairseq.data.data_utils_fast import (
batch_by_size_fn,
batch_by_size_vec,
batch_fixed_shapes_fast,
)
except ImportError:
raise ImportError(
"Please build Cython components with: "
"`python setup.py build_ext --inplace`"
)
except ValueError:
raise ValueError(
"Please build (or rebuild) Cython components with `python setup.py build_ext --inplace`."
)
# added int() to avoid TypeError: an integer is required
max_tokens = (
int(max_tokens) if max_tokens is not None else -1
)
max_sentences = max_sentences if max_sentences is not None else -1
bsz_mult = required_batch_size_multiple
if not isinstance(indices, np.ndarray):
indices = np.fromiter(indices, dtype=np.int64, count=-1)
if num_tokens_vec is not None and not isinstance(num_tokens_vec, np.ndarray):
num_tokens_vec = np.fromiter(num_tokens_vec, dtype=np.int64, count=-1)
if fixed_shapes is None:
if num_tokens_vec is None:
return batch_by_size_fn(
indices,
num_tokens_fn,
max_tokens,
max_sentences,
bsz_mult,
)
else:
return batch_by_size_vec(
indices,
num_tokens_vec,
max_tokens,
max_sentences,
bsz_mult,
)
else:
fixed_shapes = np.array(fixed_shapes, dtype=np.int64)
sort_order = np.lexsort(
[
fixed_shapes[:, 1].argsort(), # length
fixed_shapes[:, 0].argsort(), # bsz
]
)
fixed_shapes_sorted = fixed_shapes[sort_order]
return batch_fixed_shapes_fast(indices, num_tokens_fn, fixed_shapes_sorted) | Yield mini-batches of indices bucketed by size. Batches may contain sequences of different lengths. Args: indices (List[int]): ordered list of dataset indices num_tokens_fn (callable): function that returns the number of tokens at a given index num_tokens_vec (List[int], optional): precomputed vector of the number of tokens for each index in indices (to enable faster batch generation) max_tokens (int, optional): max number of tokens in each batch (default: None). max_sentences (int, optional): max number of sentences in each batch (default: None). required_batch_size_multiple (int, optional): require batch size to be less than N or a multiple of N (default: 1). fixed_shapes (List[Tuple[int, int]], optional): if given, batches will only be created with the given shapes. *max_sentences* and *required_batch_size_multiple* will be ignored (default: None). |
165,169 | import contextlib
import itertools
import logging
import re
import warnings
from typing import Optional, Tuple
import numpy as np
import torch
from fairseq.file_io import PathManager
from fairseq import utils
import os
The provided code snippet includes necessary dependencies for implementing the `compute_mask_indices` function. Write a Python function `def compute_mask_indices( shape: Tuple[int, int], padding_mask: Optional[torch.Tensor], mask_prob: float, mask_length: int, mask_type: str = "static", mask_other: float = 0.0, min_masks: int = 0, no_overlap: bool = False, min_space: int = 0, ) -> np.ndarray` to solve the following problem:
Computes random mask spans for a given shape Args: shape: the the shape for which to compute masks. should be of size 2 where first element is batch size and 2nd is timesteps padding_mask: optional padding mask of the same size as shape, which will prevent masking padded elements mask_prob: probability for each token to be chosen as start of the span to be masked. this will be multiplied by number of timesteps divided by length of mask span to mask approximately this percentage of all elements. however due to overlaps, the actual number will be smaller (unless no_overlap is True) mask_type: how to compute mask lengths static = fixed size uniform = sample from uniform distribution [mask_other, mask_length*2] normal = sample from normal distribution with mean mask_length and stdev mask_other. mask is min 1 element poisson = sample from possion distribution with lambda = mask length min_masks: minimum number of masked spans no_overlap: if false, will switch to an alternative recursive algorithm that prevents spans from overlapping min_space: only used if no_overlap is True, this is how many elements to keep unmasked between spans
Here is the function:
def compute_mask_indices(
shape: Tuple[int, int],
padding_mask: Optional[torch.Tensor],
mask_prob: float,
mask_length: int,
mask_type: str = "static",
mask_other: float = 0.0,
min_masks: int = 0,
no_overlap: bool = False,
min_space: int = 0,
) -> np.ndarray:
"""
Computes random mask spans for a given shape
Args:
shape: the the shape for which to compute masks.
should be of size 2 where first element is batch size and 2nd is timesteps
padding_mask: optional padding mask of the same size as shape, which will prevent masking padded elements
mask_prob: probability for each token to be chosen as start of the span to be masked. this will be multiplied by
number of timesteps divided by length of mask span to mask approximately this percentage of all elements.
however due to overlaps, the actual number will be smaller (unless no_overlap is True)
mask_type: how to compute mask lengths
static = fixed size
uniform = sample from uniform distribution [mask_other, mask_length*2]
normal = sample from normal distribution with mean mask_length and stdev mask_other. mask is min 1 element
poisson = sample from possion distribution with lambda = mask length
min_masks: minimum number of masked spans
no_overlap: if false, will switch to an alternative recursive algorithm that prevents spans from overlapping
min_space: only used if no_overlap is True, this is how many elements to keep unmasked between spans
"""
bsz, all_sz = shape
mask = np.full((bsz, all_sz), False)
all_num_mask = int(
# add a random number for probabilistic rounding
mask_prob * all_sz / float(mask_length)
+ np.random.rand()
)
all_num_mask = max(min_masks, all_num_mask)
mask_idcs = []
for i in range(bsz):
if padding_mask is not None:
sz = all_sz - padding_mask[i].long().sum().item()
num_mask = int(
# add a random number for probabilistic rounding
mask_prob * sz / float(mask_length)
+ np.random.rand()
)
num_mask = max(min_masks, num_mask)
else:
sz = all_sz
num_mask = all_num_mask
if mask_type == "static":
lengths = np.full(num_mask, mask_length)
elif mask_type == "uniform":
lengths = np.random.randint(mask_other, mask_length * 2 + 1, size=num_mask)
elif mask_type == "normal":
lengths = np.random.normal(mask_length, mask_other, size=num_mask)
lengths = [max(1, int(round(x))) for x in lengths]
elif mask_type == "poisson":
lengths = np.random.poisson(mask_length, size=num_mask)
lengths = [int(round(x)) for x in lengths]
else:
raise Exception("unknown mask selection " + mask_type)
if sum(lengths) == 0:
lengths[0] = min(mask_length, sz - 1)
if no_overlap:
mask_idc = []
def arrange(s, e, length, keep_length):
span_start = np.random.randint(s, e - length)
mask_idc.extend(span_start + i for i in range(length))
new_parts = []
if span_start - s - min_space >= keep_length:
new_parts.append((s, span_start - min_space + 1))
if e - span_start - keep_length - min_space > keep_length:
new_parts.append((span_start + length + min_space, e))
return new_parts
parts = [(0, sz)]
min_length = min(lengths)
for length in sorted(lengths, reverse=True):
lens = np.fromiter(
(e - s if e - s >= length + min_space else 0 for s, e in parts),
np.int,
)
l_sum = np.sum(lens)
if l_sum == 0:
break
probs = lens / np.sum(lens)
c = np.random.choice(len(parts), p=probs)
s, e = parts.pop(c)
parts.extend(arrange(s, e, length, min_length))
mask_idc = np.asarray(mask_idc)
else:
min_len = min(lengths)
if sz - min_len <= num_mask:
min_len = sz - num_mask - 1
mask_idc = np.random.choice(sz - min_len, num_mask, replace=False)
mask_idc = np.asarray(
[
mask_idc[j] + offset
for j in range(len(mask_idc))
for offset in range(lengths[j])
]
)
mask_idcs.append(np.unique(mask_idc[mask_idc < sz]))
min_len = min([len(m) for m in mask_idcs])
for i, mask_idc in enumerate(mask_idcs):
if len(mask_idc) > min_len:
mask_idc = np.random.choice(mask_idc, min_len, replace=False)
mask[i, mask_idc] = True
return mask | Computes random mask spans for a given shape Args: shape: the the shape for which to compute masks. should be of size 2 where first element is batch size and 2nd is timesteps padding_mask: optional padding mask of the same size as shape, which will prevent masking padded elements mask_prob: probability for each token to be chosen as start of the span to be masked. this will be multiplied by number of timesteps divided by length of mask span to mask approximately this percentage of all elements. however due to overlaps, the actual number will be smaller (unless no_overlap is True) mask_type: how to compute mask lengths static = fixed size uniform = sample from uniform distribution [mask_other, mask_length*2] normal = sample from normal distribution with mean mask_length and stdev mask_other. mask is min 1 element poisson = sample from possion distribution with lambda = mask length min_masks: minimum number of masked spans no_overlap: if false, will switch to an alternative recursive algorithm that prevents spans from overlapping min_space: only used if no_overlap is True, this is how many elements to keep unmasked between spans |
165,172 | import contextlib
import itertools
import logging
import re
import warnings
from typing import Optional, Tuple
import numpy as np
import torch
from fairseq.file_io import PathManager
from fairseq import utils
import os
def get_buckets(sizes, num_buckets):
buckets = np.unique(
np.percentile(
sizes,
np.linspace(0, 100, num_buckets + 1),
interpolation='lower',
)[1:]
)
return buckets | null |
165,175 | import logging
import warnings
import torch
import numpy as np
from data import data_utils
from data.ofa_dataset import OFADataset
def collate(samples, pad_idx, eos_idx):
if len(samples) == 0:
return {}
def merge(key):
return data_utils.collate_tokens(
[s[key] for s in samples],
pad_idx,
eos_idx=eos_idx,
)
src_tokens = merge("source")
src_lengths = torch.LongTensor([s["source"].ne(pad_idx).long().sum() for s in samples])
prev_output_tokens = None
target = None
if samples[0].get("target", None) is not None:
target = merge("target")
tgt_lengths = torch.LongTensor(
[s["target"].ne(pad_idx).long().sum() for s in samples]
)
ntokens = tgt_lengths.sum().item()
if samples[0].get("prev_output_tokens", None) is not None:
prev_output_tokens = merge("prev_output_tokens")
else:
ntokens = src_lengths.sum().item()
target_strs = np.array([s["target_str"] for s in samples])
batch = {
"nsentences": len(samples),
"ntokens": ntokens,
"net_input": {
"src_tokens": src_tokens,
"src_lengths": src_lengths,
"prev_output_tokens": prev_output_tokens
},
"target": target,
"target_strs": target_strs
}
return batch | null |
165,176 | from io import BytesIO
import logging
import warnings
import functools
import numpy as np
import torch
import base64
from torchvision import transforms
from timm.data import create_transform
from utils.vision_helper import RandomAugment
from PIL import Image, ImageFile
from data import data_utils
from data.ofa_dataset import OFADataset
def collate(samples, pad_idx, eos_idx):
if len(samples) == 0:
return {}
def merge(key):
return data_utils.collate_tokens(
[s[key] for s in samples],
pad_idx,
eos_idx=eos_idx,
)
id = np.array([s["id"] for s in samples])
src_tokens = merge("source")
src_lengths = torch.LongTensor([s["source"].ne(pad_idx).long().sum() for s in samples])
patch_images = torch.stack([sample['patch_image'] for sample in samples], dim=0)
patch_masks = torch.cat([sample['patch_mask'] for sample in samples])
conf = None
if samples[0].get("conf", None) is not None:
conf = torch.cat([s['conf'] for s in samples], dim=0)
ref_dict = None
if samples[0].get("ref_dict", None) is not None:
ref_dict = np.array([s['ref_dict'] for s in samples])
constraint_masks = None
if samples[0].get("constraint_mask", None) is not None:
constraint_masks = merge("constraint_mask")
prev_output_tokens = None
target = None
if samples[0].get("target", None) is not None:
target = merge("target")
tgt_lengths = torch.LongTensor(
[s["target"].ne(pad_idx).long().sum() for s in samples]
)
ntokens = tgt_lengths.sum().item()
if samples[0].get("prev_output_tokens", None) is not None:
prev_output_tokens = merge("prev_output_tokens")
else:
ntokens = src_lengths.sum().item()
batch = {
"id": id,
"nsentences": len(samples),
"ntokens": ntokens,
"net_input": {
"src_tokens": src_tokens,
"src_lengths": src_lengths,
"patch_images": patch_images,
"patch_masks": patch_masks,
"prev_output_tokens": prev_output_tokens
},
"conf": conf,
"ref_dict": ref_dict,
"constraint_masks": constraint_masks,
"target": target,
}
return batch | null |
165,177 | from io import BytesIO
import math
import logging
import random
import warnings
import numpy as np
import torch
import base64
from torchvision import transforms
from PIL import Image, ImageFile
from data import data_utils
from data.ofa_dataset import OFADataset
from utils.vision_helper import RandomAugment
import utils.transforms as T
def get_whole_word_mask(bpe, dictionary):
if bpe is not None:
def is_beginning_of_word(i):
if i < dictionary.nspecial:
# special elements are always considered beginnings
return True
tok = dictionary[i]
if tok.startswith("madeupword"):
return True
try:
return bpe.is_beginning_of_word(tok)
except ValueError:
return True
mask_whole_words = torch.ByteTensor(
list(map(is_beginning_of_word, range(len(dictionary))))
)
return mask_whole_words
return None | null |
165,178 | from io import BytesIO
import math
import logging
import random
import warnings
import numpy as np
import torch
import base64
from torchvision import transforms
from PIL import Image, ImageFile
from data import data_utils
from data.ofa_dataset import OFADataset
from utils.vision_helper import RandomAugment
import utils.transforms as T
def collate(samples, pad_idx, eos_idx):
if len(samples) == 0:
return {}
def merge(key):
return data_utils.collate_tokens(
[s[key] for s in samples],
pad_idx,
eos_idx=eos_idx,
)
id = np.array([s["id"] for s in samples])
src_tokens = merge("source")
src_lengths = torch.LongTensor([s["source"].ne(pad_idx).long().sum() for s in samples])
patch_images = torch.stack([sample['patch_image'] for sample in samples], dim=0)
patch_masks = torch.cat([sample['patch_mask'] for sample in samples])
code_masks = None
if samples[0].get("code_mask", None) is not None:
code_masks = torch.cat([sample['code_mask'] for sample in samples])
conf = torch.cat([s['conf'] for s in samples], dim=0)
prev_output_tokens = None
target = None
if samples[0].get("target", None) is not None:
target = merge("target")
tgt_lengths = torch.LongTensor([s["target"].ne(pad_idx).long().sum() for s in samples])
ntokens = tgt_lengths.sum().item()
if samples[0].get("prev_output_tokens", None) is not None:
prev_output_tokens = merge("prev_output_tokens")
else:
ntokens = src_lengths.sum().item()
batch = {
"id": id,
"nsentences": len(samples),
"ntokens": ntokens,
"net_input": {
"src_tokens": src_tokens,
"src_lengths": src_lengths,
"patch_images": patch_images,
"patch_masks": patch_masks,
"code_masks": code_masks,
"prev_output_tokens": prev_output_tokens
},
"target": target,
"conf": conf
}
return batch | null |
165,179 | import logging
import warnings
import torch
import numpy as np
from data import data_utils
from data.ofa_dataset import OFADataset
def collate(samples, pad_idx, eos_idx):
if len(samples) == 0:
return {}
def merge(key):
return data_utils.collate_tokens(
[s[key] for s in samples],
pad_idx,
eos_idx=eos_idx,
)
src_tokens = merge("source")
src_lengths = torch.LongTensor([s["source"].ne(pad_idx).long().sum() for s in samples])
ref_dict = None
if samples[0].get("ref_dict", None) is not None:
ref_dict = np.array([s['ref_dict'] for s in samples])
constraint_masks = None
if samples[0].get("constraint_mask", None) is not None:
constraint_masks = merge("constraint_mask")
prev_output_tokens = None
target = None
if samples[0].get("target", None) is not None:
target = merge("target")
tgt_lengths = torch.LongTensor(
[s["target"].ne(pad_idx).long().sum() for s in samples]
)
ntokens = tgt_lengths.sum().item()
if samples[0].get("prev_output_tokens", None) is not None:
prev_output_tokens = merge("prev_output_tokens")
else:
ntokens = src_lengths.sum().item()
batch = {
"nsentences": len(samples),
"ntokens": ntokens,
"net_input": {
"src_tokens": src_tokens,
"src_lengths": src_lengths,
"prev_output_tokens": prev_output_tokens
},
"ref_dict": ref_dict,
"constraint_masks": constraint_masks,
"target": target,
}
return batch | null |
165,186 | from dataclasses import dataclass, field
import json
import logging
import os
import math
import base64
from typing import Optional
from argparse import Namespace
from omegaconf import DictConfig, OmegaConf
from torchvision import transforms
from PIL import Image
from io import BytesIO
import torch
import numpy as np
from fairseq import metrics
from fairseq.tasks import register_task
from fairseq.dataclass import ChoiceEnum
from models import search, clip
from models.taming.models.vqgan import GumbelVQ
from data.mm_data.image_gen_dataset import ImageGenDataset
from data.file_dataset import FileDataset
from tasks.ofa_task import OFATask, OFAConfig
def custom_to_pil(x):
x = x.detach().cpu()
x = torch.clamp(x, -1., 1.)
x = (x + 1.) / 2.
x = x.permute(1, 2, 0).numpy()
x = (255 * x).astype(np.uint8)
x = Image.fromarray(x)
if not x.mode == "RGB":
x = x.convert("RGB")
return x | null |
165,187 | import math
from dataclasses import dataclass, field
from typing import Optional
import torch
import torch.nn.functional as F
import numpy as np
from fairseq import metrics, utils
from fairseq.criterions import FairseqCriterion, register_criterion
from fairseq.dataclass import FairseqDataclass
from omegaconf import II
def construct_rdrop_sample(x):
if isinstance(x, dict):
for key in x:
x[key] = construct_rdrop_sample(x[key])
return x
elif isinstance(x, torch.Tensor):
return x.repeat(2, *([1] * (x.dim()-1)))
elif isinstance(x, int):
return x * 2
elif isinstance(x, np.ndarray):
return x.repeat(2)
else:
raise NotImplementedError | null |
165,188 | import math
from dataclasses import dataclass, field
from typing import Optional
import torch
import torch.nn.functional as F
import numpy as np
from fairseq import metrics, utils
from fairseq.criterions import FairseqCriterion, register_criterion
from fairseq.dataclass import FairseqDataclass
from omegaconf import II
def kl_loss(p, q):
p_loss = F.kl_div(p, torch.exp(q), reduction='sum')
q_loss = F.kl_div(q, torch.exp(p), reduction='sum')
loss = (p_loss + q_loss) / 2
return loss
def label_smoothed_nll_loss(
lprobs, target, epsilon, update_num, reduce=True,
drop_worst_ratio=0.0, drop_worst_after=0, use_rdrop=False, reg_alpha=1.0,
constraint_masks=None, constraint_start=None, constraint_end=None
):
if target.dim() == lprobs.dim() - 1:
target = target.unsqueeze(-1)
nll_loss = -lprobs.gather(dim=-1, index=target).squeeze(-1)
if constraint_masks is not None:
smooth_loss = -lprobs.masked_fill(~constraint_masks, 0).sum(dim=-1, keepdim=True).squeeze(-1)
eps_i = epsilon / (constraint_masks.sum(1) - 1 + 1e-6)
elif constraint_start is not None and constraint_end is not None:
constraint_range = [0, 1, 2, 3] + list(range(constraint_start, constraint_end))
smooth_loss = -lprobs[:, constraint_range].sum(dim=-1, keepdim=True).squeeze(-1)
eps_i = epsilon / (len(constraint_range) - 1 + 1e-6)
else:
smooth_loss = -lprobs.sum(dim=-1, keepdim=True).squeeze(-1)
eps_i = epsilon / (lprobs.size(-1) - 1)
loss = (1.0 - epsilon - eps_i) * nll_loss + eps_i * smooth_loss
if drop_worst_ratio > 0 and update_num > drop_worst_after:
if use_rdrop:
true_batch_size = loss.size(0) // 2
_, indices = torch.topk(loss[:true_batch_size], k=int(true_batch_size * (1 - drop_worst_ratio)), largest=False)
loss = torch.cat([loss[indices], loss[indices+true_batch_size]])
nll_loss = torch.cat([nll_loss[indices], nll_loss[indices+true_batch_size]])
lprobs = torch.cat([lprobs[indices], lprobs[indices+true_batch_size]])
else:
loss, indices = torch.topk(loss, k=int(loss.shape[0] * (1 - drop_worst_ratio)), largest=False)
nll_loss = nll_loss[indices]
lprobs = lprobs[indices]
ntokens = loss.numel()
nll_loss = nll_loss.sum()
loss = loss.sum()
if use_rdrop:
true_batch_size = lprobs.size(0) // 2
p = lprobs[:true_batch_size]
q = lprobs[true_batch_size:]
if constraint_start is not None and constraint_end is not None:
constraint_range = [0, 1, 2, 3] + list(range(constraint_start, constraint_end))
p = p[:, constraint_range]
q = q[:, constraint_range]
loss += kl_loss(p, q) * reg_alpha
return loss, nll_loss, ntokens | null |
165,190 | import math
from dataclasses import dataclass, field
from typing import Optional
import torch
import torch.nn.functional as F
import numpy as np
from fairseq import metrics, utils
from fairseq.criterions import FairseqCriterion, register_criterion
from fairseq.dataclass import FairseqDataclass
from omegaconf import II
def kl_loss(p, q):
p_loss = F.kl_div(p, torch.exp(q), reduction='sum')
q_loss = F.kl_div(q, torch.exp(p), reduction='sum')
loss = (p_loss + q_loss) / 2
return loss
def label_smoothed_nll_loss(
lprobs, target, epsilon, update_num, reduce=True,
drop_worst_ratio=0.0, drop_worst_after=0, use_rdrop=False, reg_alpha=1.0,
constraint_masks=None, constraint_start=None, constraint_end=None, drop_best_ratio=0.0,
drop_best_after=0,
):
if target.dim() == lprobs.dim() - 1:
target = target.unsqueeze(-1)
nll_loss = -lprobs.gather(dim=-1, index=target).squeeze(-1)
if constraint_masks is not None:
smooth_loss = -lprobs.masked_fill(~constraint_masks, 0).sum(dim=-1, keepdim=True).squeeze(-1)
eps_i = epsilon / (constraint_masks.sum(1) - 1 + 1e-6)
elif constraint_start is not None and constraint_end is not None:
constraint_range = [0, 1, 2, 3] + list(range(constraint_start, constraint_end))
smooth_loss = -lprobs[:, constraint_range].sum(dim=-1, keepdim=True).squeeze(-1)
eps_i = epsilon / (len(constraint_range) - 1 + 1e-6)
else:
smooth_loss = -lprobs.sum(dim=-1, keepdim=True).squeeze(-1)
eps_i = epsilon / (lprobs.size(-1) - 1)
loss = (1.0 - epsilon - eps_i) * nll_loss + eps_i * smooth_loss
if drop_worst_ratio > 0 and update_num > drop_worst_after:
if use_rdrop:
true_batch_size = loss.size(0) // 2
_, indices = torch.topk(loss[:true_batch_size], k=int(true_batch_size * (1 - drop_worst_ratio)), largest=False)
loss = torch.cat([loss[indices], loss[indices+true_batch_size]])
nll_loss = torch.cat([nll_loss[indices], nll_loss[indices+true_batch_size]])
lprobs = torch.cat([lprobs[indices], lprobs[indices+true_batch_size]])
else:
loss, indices = torch.topk(loss, k=int(loss.shape[0] * (1 - drop_worst_ratio)), largest=False)
nll_loss = nll_loss[indices]
lprobs = lprobs[indices]
target = target[indices]
if update_num > drop_best_after:
loss, indices = torch.topk(loss, k=int(loss.shape[0] * (1 - drop_best_ratio)), largest=True)
nll_loss = nll_loss[indices]
lprobs = lprobs[indices]
target = target[indices]
ntokens = loss.numel()
nll_loss = nll_loss.sum()
loss = loss.sum()
if use_rdrop:
true_batch_size = lprobs.size(0) // 2
p = lprobs[:true_batch_size]
q = lprobs[true_batch_size:]
if constraint_start is not None and constraint_end is not None:
constraint_range = [0, 1, 2, 3] + list(range(constraint_start, constraint_end))
p = p[:, constraint_range]
q = q[:, constraint_range]
loss += kl_loss(p, q) * reg_alpha
return loss, nll_loss, ntokens,lprobs,target | null |
165,191 | import math
from dataclasses import dataclass, field
from typing import Optional
from PIL import Image
from torchvision import transforms
import torch
import numpy as np
from fairseq import metrics
from fairseq.data import data_utils
from fairseq.criterions import FairseqCriterion, register_criterion
from fairseq.dataclass import FairseqDataclass
from fairseq import utils
from omegaconf import II
from models import clip
def custom_to_pil(x):
x = x.detach().cpu()
x = torch.clamp(x, -1., 1.)
x = (x + 1.) / 2.
x = x.permute(1, 2, 0).numpy()
x = (255 * x).astype(np.uint8)
x = Image.fromarray(x)
if not x.mode == "RGB":
x = x.convert("RGB")
return x | null |
165,192 | import math
from dataclasses import dataclass, field
from typing import Optional
from PIL import Image
from torchvision import transforms
import torch
import numpy as np
from fairseq import metrics
from fairseq.data import data_utils
from fairseq.criterions import FairseqCriterion, register_criterion
from fairseq.dataclass import FairseqDataclass
from fairseq import utils
from omegaconf import II
from models import clip
def scst_loss(lprobs, target, reward, ignore_index=None, reduce=True):
loss = -lprobs.gather(dim=-1, index=target.unsqueeze(-1)).squeeze() * reward.unsqueeze(-1)
if ignore_index is not None:
pad_mask = target.eq(ignore_index)
loss.masked_fill_(pad_mask, 0.0)
ntokens = (~pad_mask).sum()
else:
loss = loss.squeeze(-1)
ntokens = target.numel()
if reduce:
loss = loss.sum()
return loss, ntokens | null |
165,193 | import math
import string
from dataclasses import dataclass, field
from collections import OrderedDict
from typing import Optional
import torch
from fairseq import metrics, utils
from fairseq.criterions import FairseqCriterion, register_criterion
from fairseq.dataclass import FairseqDataclass
from omegaconf import II
from data import data_utils
from utils.cider.pyciderevalcap.ciderD.ciderD import CiderD
import nltk
import numpy as np
def scst_loss(lprobs, target, reward, ignore_index=None, reduce=True):
loss = -lprobs.gather(dim=-1, index=target.unsqueeze(-1)).squeeze() * reward.unsqueeze(-1)
if ignore_index is not None:
pad_mask = target.eq(ignore_index)
loss.masked_fill_(pad_mask, 0.0)
ntokens = (~pad_mask).sum()
else:
loss = loss.squeeze(-1)
ntokens = target.numel()
if reduce:
loss = loss.sum()
return loss, ntokens | null |
165,194 | from typing import Dict, List, Optional
import torch
import torch.nn as nn
from fairseq import utils
from fairseq.modules import LayerNorm
from fairseq.modules.fairseq_dropout import FairseqDropout
from fairseq.modules.quant_noise import quant_noise
from torch import Tensor
from .unify_multihead_attention import MultiheadAttention
The provided code snippet includes necessary dependencies for implementing the `drop_path` function. Write a Python function `def drop_path(x, drop_prob: float = 0.0, training: bool = False)` to solve the following problem:
Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks, however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the argument.
Here is the function:
def drop_path(x, drop_prob: float = 0.0, training: bool = False):
"""
Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks,
however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the
layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the
argument.
"""
if drop_prob == 0.0 or not training:
return x
keep_prob = 1 - drop_prob
shape = (1, x.shape[1], 1)
random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device)
random_tensor.floor_() # binarize
output = x.div(keep_prob) * random_tensor
return output | Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks, however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the argument. |
165,195 | import math
import random
from typing import Any, Dict, List, Optional, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import utils
from fairseq.distributed import fsdp_wrap
from fairseq.models import (
FairseqEncoder,
FairseqEncoderDecoderModel,
FairseqIncrementalDecoder,
register_model,
register_model_architecture,
)
from fairseq.modules import (
AdaptiveSoftmax,
BaseLayer,
FairseqDropout,
LayerDropModuleList,
LayerNorm,
SinusoidalPositionalEmbedding,
GradMultiply
)
from fairseq.modules.checkpoint_activations import checkpoint_wrapper
from fairseq.modules.quant_noise import quant_noise as apply_quant_noise_
from torch import Tensor
from .unify_transformer_layer import TransformerEncoderLayer, TransformerDecoderLayer
from .resnet import ResNet
def BatchNorm2d(out_chan, momentum=0.1, eps=1e-3):
return nn.SyncBatchNorm.convert_sync_batchnorm(
nn.BatchNorm2d(out_chan, momentum=momentum, eps=eps)
) | null |
165,196 | import math
import random
from typing import Any, Dict, List, Optional, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import utils
from fairseq.distributed import fsdp_wrap
from fairseq.models import (
FairseqEncoder,
FairseqEncoderDecoderModel,
FairseqIncrementalDecoder,
register_model,
register_model_architecture,
)
from fairseq.modules import (
AdaptiveSoftmax,
BaseLayer,
FairseqDropout,
LayerDropModuleList,
LayerNorm,
SinusoidalPositionalEmbedding,
GradMultiply
)
from fairseq.modules.checkpoint_activations import checkpoint_wrapper
from fairseq.modules.quant_noise import quant_noise as apply_quant_noise_
from torch import Tensor
from .unify_transformer_layer import TransformerEncoderLayer, TransformerDecoderLayer
from .resnet import ResNet
DEFAULT_MAX_SOURCE_POSITIONS = 1024
def make_token_bucket_position(bucket_size, max_position=DEFAULT_MAX_SOURCE_POSITIONS):
context_pos = torch.arange(max_position, dtype=torch.long)[:, None]
memory_pos = torch.arange(max_position, dtype=torch.long)[None, :]
relative_pos = context_pos - memory_pos
sign = torch.sign(relative_pos)
mid = bucket_size // 2
abs_pos = torch.where((relative_pos<mid) & (relative_pos > -mid), mid-1, torch.abs(relative_pos))
log_pos = torch.ceil(torch.log(abs_pos/mid)/math.log((max_position-1)/mid) * (mid-1)) + mid
log_pos = log_pos.int()
bucket_pos = torch.where(abs_pos.le(mid), relative_pos, log_pos*sign).long()
return bucket_pos + bucket_size - 1 | null |
165,197 | import math
import random
from typing import Any, Dict, List, Optional, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import utils
from fairseq.distributed import fsdp_wrap
from fairseq.models import (
FairseqEncoder,
FairseqEncoderDecoderModel,
FairseqIncrementalDecoder,
register_model,
register_model_architecture,
)
from fairseq.modules import (
AdaptiveSoftmax,
BaseLayer,
FairseqDropout,
LayerDropModuleList,
LayerNorm,
SinusoidalPositionalEmbedding,
GradMultiply
)
from fairseq.modules.checkpoint_activations import checkpoint_wrapper
from fairseq.modules.quant_noise import quant_noise as apply_quant_noise_
from torch import Tensor
from .unify_transformer_layer import TransformerEncoderLayer, TransformerDecoderLayer
from .resnet import ResNet
def make_image_bucket_position(bucket_size, num_relative_distance):
coords_h = torch.arange(bucket_size)
coords_w = torch.arange(bucket_size)
coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
relative_coords[:, :, 0] += bucket_size - 1 # shift to start from 0
relative_coords[:, :, 1] += bucket_size - 1
relative_coords[:, :, 0] *= 2 * bucket_size - 1
relative_position_index = torch.zeros(size=(bucket_size * bucket_size + 1,) * 2, dtype=relative_coords.dtype)
relative_position_index[1:, 1:] = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
relative_position_index[0, 0:] = num_relative_distance - 3
relative_position_index[0:, 0] = num_relative_distance - 2
relative_position_index[0, 0] = num_relative_distance - 1
return relative_position_index | null |
165,198 | import math
import random
from typing import Any, Dict, List, Optional, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import utils
from fairseq.distributed import fsdp_wrap
from fairseq.models import (
FairseqEncoder,
FairseqEncoderDecoderModel,
FairseqIncrementalDecoder,
register_model,
register_model_architecture,
)
from fairseq.modules import (
AdaptiveSoftmax,
BaseLayer,
FairseqDropout,
LayerDropModuleList,
LayerNorm,
SinusoidalPositionalEmbedding,
GradMultiply
)
from fairseq.modules.checkpoint_activations import checkpoint_wrapper
from fairseq.modules.quant_noise import quant_noise as apply_quant_noise_
from torch import Tensor
from .unify_transformer_layer import TransformerEncoderLayer, TransformerDecoderLayer
from .resnet import ResNet
def Embedding(num_embeddings, embedding_dim, padding_idx=None, zero_init=False):
m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx)
nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5)
if padding_idx is not None:
nn.init.constant_(m.weight[padding_idx], 0)
if zero_init:
nn.init.constant_(m.weight, 0)
return m | null |
165,199 | import math
import random
from typing import Any, Dict, List, Optional, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import utils
from fairseq.distributed import fsdp_wrap
from fairseq.models import (
FairseqEncoder,
FairseqEncoderDecoderModel,
FairseqIncrementalDecoder,
register_model,
register_model_architecture,
)
from fairseq.modules import (
AdaptiveSoftmax,
BaseLayer,
FairseqDropout,
LayerDropModuleList,
LayerNorm,
SinusoidalPositionalEmbedding,
GradMultiply
)
from fairseq.modules.checkpoint_activations import checkpoint_wrapper
from fairseq.modules.quant_noise import quant_noise as apply_quant_noise_
from torch import Tensor
from .unify_transformer_layer import TransformerEncoderLayer, TransformerDecoderLayer
from .resnet import ResNet
def Linear(in_features, out_features, bias=True):
m = nn.Linear(in_features, out_features, bias)
nn.init.xavier_uniform_(m.weight)
if bias:
nn.init.constant_(m.bias, 0.0)
return m | null |
165,200 | import math
import random
from typing import Any, Dict, List, Optional, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import utils
from fairseq.distributed import fsdp_wrap
from fairseq.models import (
FairseqEncoder,
FairseqEncoderDecoderModel,
FairseqIncrementalDecoder,
register_model,
register_model_architecture,
)
from fairseq.modules import (
AdaptiveSoftmax,
BaseLayer,
FairseqDropout,
LayerDropModuleList,
LayerNorm,
SinusoidalPositionalEmbedding,
GradMultiply
)
from fairseq.modules.checkpoint_activations import checkpoint_wrapper
from fairseq.modules.quant_noise import quant_noise as apply_quant_noise_
from torch import Tensor
from .unify_transformer_layer import TransformerEncoderLayer, TransformerDecoderLayer
from .resnet import ResNet
def base_architecture(args):
args.encoder_embed_path = getattr(args, "encoder_embed_path", None)
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048)
args.encoder_layers = getattr(args, "encoder_layers", 6)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False)
args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False)
args.decoder_embed_path = getattr(args, "decoder_embed_path", None)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim)
args.decoder_ffn_embed_dim = getattr(
args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim
)
args.decoder_layers = getattr(args, "decoder_layers", 6)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False)
args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False)
args.attention_dropout = getattr(args, "attention_dropout", 0.0)
args.activation_dropout = getattr(args, "activation_dropout", 0.0)
args.activation_fn = getattr(args, "activation_fn", "relu")
args.dropout = getattr(args, "dropout", 0.1)
args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
args.share_decoder_input_output_embed = getattr(
args, "share_decoder_input_output_embed", False
)
args.share_all_embeddings = getattr(args, "share_all_embeddings", False)
args.no_token_positional_embeddings = getattr(
args, "no_token_positional_embeddings", False
)
args.adaptive_input = getattr(args, "adaptive_input", False)
args.no_cross_attention = getattr(args, "no_cross_attention", False)
args.cross_self_attention = getattr(args, "cross_self_attention", False)
args.decoder_output_dim = getattr(
args, "decoder_output_dim", args.decoder_embed_dim
)
args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim)
args.no_scale_embedding = getattr(args, "no_scale_embedding", False)
args.layernorm_embedding = getattr(args, "layernorm_embedding", False)
args.tie_adaptive_weights = getattr(args, "tie_adaptive_weights", False)
args.checkpoint_activations = getattr(args, "checkpoint_activations", False)
args.offload_activations = getattr(args, "offload_activations", False)
if args.offload_activations:
args.checkpoint_activations = True
args.encoder_layers_to_keep = getattr(args, "encoder_layers_to_keep", None)
args.decoder_layers_to_keep = getattr(args, "decoder_layers_to_keep", None)
args.encoder_layerdrop = getattr(args, "encoder_layerdrop", 0)
args.decoder_layerdrop = getattr(args, "decoder_layerdrop", 0)
args.quant_noise_pq = getattr(args, "quant_noise_pq", 0)
args.quant_noise_pq_block_size = getattr(args, "quant_noise_pq_block_size", 8)
args.quant_noise_scalar = getattr(args, "quant_noise_scalar", 0) | null |
165,201 | from typing import Optional
import logging
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import utils
from fairseq.models import register_model, register_model_architecture
from fairseq.modules.transformer_sentence_encoder import init_bert_params
from .unify_transformer import TransformerModel
def ofa_large_architecture(args):
args.encoder_embed_path = getattr(args, "encoder_embed_path", None)
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 4 * 1024)
args.encoder_layers = getattr(args, "encoder_layers", 12)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 16)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", True)
args.encoder_learned_pos = getattr(args, "encoder_learned_pos", True)
args.decoder_embed_path = getattr(args, "decoder_embed_path", None)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim)
args.decoder_ffn_embed_dim = getattr(
args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim
)
args.decoder_layers = getattr(args, "decoder_layers", 12)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", True)
args.decoder_learned_pos = getattr(args, "decoder_learned_pos", True)
args.attention_dropout = getattr(args, "attention_dropout", 0.0)
args.relu_dropout = getattr(args, "relu_dropout", 0.0)
args.dropout = getattr(args, "dropout", 0.0)
args.max_target_positions = getattr(args, "max_target_positions", 1024)
args.max_source_positions = getattr(args, "max_source_positions", 1024)
args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
args.share_decoder_input_output_embed = getattr(
args, "share_decoder_input_output_embed", True
)
args.share_all_embeddings = getattr(args, "share_all_embeddings", True)
args.decoder_output_dim = getattr(
args, "decoder_output_dim", args.decoder_embed_dim
)
args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim)
args.no_scale_embedding = getattr(args, "no_scale_embedding", True)
args.layernorm_embedding = getattr(args, "layernorm_embedding", True)
args.activation_fn = getattr(args, "activation_fn", "gelu")
args.pooler_activation_fn = getattr(args, "pooler_activation_fn", "tanh")
args.pooler_dropout = getattr(args, "pooler_dropout", 0.0)
args.pooler_classifier = getattr(args, "pooler_classifier", "mlp")
args.resnet_drop_path_rate = getattr(args, "resnet_drop_path_rate", 0.0)
args.encoder_drop_path_rate = getattr(args, "encoder_drop_path_rate", 0.0)
args.decoder_drop_path_rate = getattr(args, "decoder_drop_path_rate", 0.0)
args.resnet_type = getattr(args, "resnet_type", "resnet152")
args.token_bucket_size = getattr(args, "token_bucket_size", 256)
args.image_bucket_size = getattr(args, "image_bucket_size", 42)
args.freeze_encoder_embedding = getattr(args, "freeze_encoder_embedding", False)
args.freeze_decoder_embedding = getattr(args, "freeze_decoder_embedding", False)
args.add_type_embedding = getattr(args, "add_type_embedding", True)
args.attn_scale_factor = getattr(args, "attn_scale_factor", 2)
args.code_image_size = getattr(args, "code_image_size", 128)
args.patch_layernorm_embedding = getattr(args, "patch_layernorm_embedding", True)
args.code_layernorm_embedding = getattr(args, "code_layernorm_embedding", True)
args.entangle_position_embedding = getattr(args, "entangle_position_embedding", False)
args.disable_entangle = getattr(args, "disable_entangle", False)
args.sync_bn = getattr(args, "sync_bn", False)
args.scale_attn = getattr(args, "scale_attn", False)
args.scale_fc = getattr(args, "scale_fc", False)
args.scale_heads = getattr(args, "scale_heads", False)
args.scale_resids = getattr(args, "scale_resids", False)
def ofa_base_architecture(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 768)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 4 * 768)
args.encoder_layers = getattr(args, "encoder_layers", 6)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 12)
args.decoder_layers = getattr(args, "decoder_layers", 6)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 12)
args.resnet_type = getattr(args, "resnet_type", "resnet101")
ofa_large_architecture(args) | null |
165,202 | from typing import Optional
import logging
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import utils
from fairseq.models import register_model, register_model_architecture
from fairseq.modules.transformer_sentence_encoder import init_bert_params
from .unify_transformer import TransformerModel
def ofa_large_architecture(args):
args.encoder_embed_path = getattr(args, "encoder_embed_path", None)
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 4 * 1024)
args.encoder_layers = getattr(args, "encoder_layers", 12)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 16)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", True)
args.encoder_learned_pos = getattr(args, "encoder_learned_pos", True)
args.decoder_embed_path = getattr(args, "decoder_embed_path", None)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim)
args.decoder_ffn_embed_dim = getattr(
args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim
)
args.decoder_layers = getattr(args, "decoder_layers", 12)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", True)
args.decoder_learned_pos = getattr(args, "decoder_learned_pos", True)
args.attention_dropout = getattr(args, "attention_dropout", 0.0)
args.relu_dropout = getattr(args, "relu_dropout", 0.0)
args.dropout = getattr(args, "dropout", 0.0)
args.max_target_positions = getattr(args, "max_target_positions", 1024)
args.max_source_positions = getattr(args, "max_source_positions", 1024)
args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
args.share_decoder_input_output_embed = getattr(
args, "share_decoder_input_output_embed", True
)
args.share_all_embeddings = getattr(args, "share_all_embeddings", True)
args.decoder_output_dim = getattr(
args, "decoder_output_dim", args.decoder_embed_dim
)
args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim)
args.no_scale_embedding = getattr(args, "no_scale_embedding", True)
args.layernorm_embedding = getattr(args, "layernorm_embedding", True)
args.activation_fn = getattr(args, "activation_fn", "gelu")
args.pooler_activation_fn = getattr(args, "pooler_activation_fn", "tanh")
args.pooler_dropout = getattr(args, "pooler_dropout", 0.0)
args.pooler_classifier = getattr(args, "pooler_classifier", "mlp")
args.resnet_drop_path_rate = getattr(args, "resnet_drop_path_rate", 0.0)
args.encoder_drop_path_rate = getattr(args, "encoder_drop_path_rate", 0.0)
args.decoder_drop_path_rate = getattr(args, "decoder_drop_path_rate", 0.0)
args.resnet_type = getattr(args, "resnet_type", "resnet152")
args.token_bucket_size = getattr(args, "token_bucket_size", 256)
args.image_bucket_size = getattr(args, "image_bucket_size", 42)
args.freeze_encoder_embedding = getattr(args, "freeze_encoder_embedding", False)
args.freeze_decoder_embedding = getattr(args, "freeze_decoder_embedding", False)
args.add_type_embedding = getattr(args, "add_type_embedding", True)
args.attn_scale_factor = getattr(args, "attn_scale_factor", 2)
args.code_image_size = getattr(args, "code_image_size", 128)
args.patch_layernorm_embedding = getattr(args, "patch_layernorm_embedding", True)
args.code_layernorm_embedding = getattr(args, "code_layernorm_embedding", True)
args.entangle_position_embedding = getattr(args, "entangle_position_embedding", False)
args.disable_entangle = getattr(args, "disable_entangle", False)
args.sync_bn = getattr(args, "sync_bn", False)
args.scale_attn = getattr(args, "scale_attn", False)
args.scale_fc = getattr(args, "scale_fc", False)
args.scale_heads = getattr(args, "scale_heads", False)
args.scale_resids = getattr(args, "scale_resids", False)
def ofa_huge_architecture(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1280)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 4 * 1280)
args.encoder_layers = getattr(args, "encoder_layers", 24)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 16)
args.decoder_layers = getattr(args, "decoder_layers", 12)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16)
args.resnet_type = getattr(args, "resnet_type", "resnet152")
ofa_large_architecture(args) | null |
165,203 | from typing import Optional
import logging
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import utils
from fairseq.models import register_model, register_model_architecture
from fairseq.modules.transformer_sentence_encoder import init_bert_params
from .unify_transformer import TransformerModel
def ofa_large_architecture(args):
def ofa_medium_architecture(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 4 * 512)
args.encoder_layers = getattr(args, "encoder_layers", 4)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8)
args.decoder_layers = getattr(args, "decoder_layers", 4)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8)
args.resnet_type = getattr(args, "resnet_type", "resnet101")
ofa_large_architecture(args) | null |
165,204 | from typing import Optional
import logging
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import utils
from fairseq.models import register_model, register_model_architecture
from fairseq.modules.transformer_sentence_encoder import init_bert_params
from .unify_transformer import TransformerModel
def ofa_large_architecture(args):
args.encoder_embed_path = getattr(args, "encoder_embed_path", None)
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 4 * 1024)
args.encoder_layers = getattr(args, "encoder_layers", 12)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 16)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", True)
args.encoder_learned_pos = getattr(args, "encoder_learned_pos", True)
args.decoder_embed_path = getattr(args, "decoder_embed_path", None)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim)
args.decoder_ffn_embed_dim = getattr(
args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim
)
args.decoder_layers = getattr(args, "decoder_layers", 12)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", True)
args.decoder_learned_pos = getattr(args, "decoder_learned_pos", True)
args.attention_dropout = getattr(args, "attention_dropout", 0.0)
args.relu_dropout = getattr(args, "relu_dropout", 0.0)
args.dropout = getattr(args, "dropout", 0.0)
args.max_target_positions = getattr(args, "max_target_positions", 1024)
args.max_source_positions = getattr(args, "max_source_positions", 1024)
args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
args.share_decoder_input_output_embed = getattr(
args, "share_decoder_input_output_embed", True
)
args.share_all_embeddings = getattr(args, "share_all_embeddings", True)
args.decoder_output_dim = getattr(
args, "decoder_output_dim", args.decoder_embed_dim
)
args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim)
args.no_scale_embedding = getattr(args, "no_scale_embedding", True)
args.layernorm_embedding = getattr(args, "layernorm_embedding", True)
args.activation_fn = getattr(args, "activation_fn", "gelu")
args.pooler_activation_fn = getattr(args, "pooler_activation_fn", "tanh")
args.pooler_dropout = getattr(args, "pooler_dropout", 0.0)
args.pooler_classifier = getattr(args, "pooler_classifier", "mlp")
args.resnet_drop_path_rate = getattr(args, "resnet_drop_path_rate", 0.0)
args.encoder_drop_path_rate = getattr(args, "encoder_drop_path_rate", 0.0)
args.decoder_drop_path_rate = getattr(args, "decoder_drop_path_rate", 0.0)
args.resnet_type = getattr(args, "resnet_type", "resnet152")
args.token_bucket_size = getattr(args, "token_bucket_size", 256)
args.image_bucket_size = getattr(args, "image_bucket_size", 42)
args.freeze_encoder_embedding = getattr(args, "freeze_encoder_embedding", False)
args.freeze_decoder_embedding = getattr(args, "freeze_decoder_embedding", False)
args.add_type_embedding = getattr(args, "add_type_embedding", True)
args.attn_scale_factor = getattr(args, "attn_scale_factor", 2)
args.code_image_size = getattr(args, "code_image_size", 128)
args.patch_layernorm_embedding = getattr(args, "patch_layernorm_embedding", True)
args.code_layernorm_embedding = getattr(args, "code_layernorm_embedding", True)
args.entangle_position_embedding = getattr(args, "entangle_position_embedding", False)
args.disable_entangle = getattr(args, "disable_entangle", False)
args.sync_bn = getattr(args, "sync_bn", False)
args.scale_attn = getattr(args, "scale_attn", False)
args.scale_fc = getattr(args, "scale_fc", False)
args.scale_heads = getattr(args, "scale_heads", False)
args.scale_resids = getattr(args, "scale_resids", False)
def ofa_medium_architecture(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 256)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 4 * 256)
args.encoder_layers = getattr(args, "encoder_layers", 4)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 4)
args.decoder_layers = getattr(args, "decoder_layers", 4)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 4)
args.resnet_type = getattr(args, "resnet_type", "resnet50")
ofa_large_architecture(args) | null |
165,205 | import torch
import torch.nn as nn
The provided code snippet includes necessary dependencies for implementing the `drop_path` function. Write a Python function `def drop_path(x, drop_prob: float = 0., training: bool = False)` to solve the following problem:
Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). This is the same as the DropConnect impl I created for EfficientNet, etc networks, however, the original name is misleading as 'Drop Connect' is a.sh different form of dropout in a.sh separate paper... See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the layer and argument names to 'drop path' rather than mix DropConnect as a.sh layer name and use 'survival rate' as the argument.
Here is the function:
def drop_path(x, drop_prob: float = 0., training: bool = False):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
This is the same as the DropConnect impl I created for EfficientNet, etc networks, however,
the original name is misleading as 'Drop Connect' is a.sh different form of dropout in a.sh separate paper...
See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for
changing the layer and argument names to 'drop path' rather than mix DropConnect as a.sh layer name and use
'survival rate' as the argument.
"""
if drop_prob == 0. or not training:
return x
keep_prob = 1 - drop_prob
shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device)
random_tensor.floor_() # binarize
output = x.div(keep_prob) * random_tensor
return output | Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). This is the same as the DropConnect impl I created for EfficientNet, etc networks, however, the original name is misleading as 'Drop Connect' is a.sh different form of dropout in a.sh separate paper... See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the layer and argument names to 'drop path' rather than mix DropConnect as a.sh layer name and use 'survival rate' as the argument. |
165,208 | import torch
import torch.nn as nn
import torch.nn.functional as F
from models.taming.modules.losses.lpips import LPIPS
from models.taming.modules.discriminator.model import NLayerDiscriminator, weights_init
def adopt_weight(weight, global_step, threshold=0, value=0.):
if global_step < threshold:
weight = value
return weight | null |
165,209 | import torch
import torch.nn as nn
import torch.nn.functional as F
from models.taming.modules.losses.lpips import LPIPS
from models.taming.modules.discriminator.model import NLayerDiscriminator, weights_init
def hinge_d_loss(logits_real, logits_fake):
loss_real = torch.mean(F.relu(1. - logits_real))
loss_fake = torch.mean(F.relu(1. + logits_fake))
d_loss = 0.5 * (loss_real + loss_fake)
return d_loss | null |
165,210 | import torch
import torch.nn as nn
import torch.nn.functional as F
from models.taming.modules.losses.lpips import LPIPS
from models.taming.modules.discriminator.model import NLayerDiscriminator, weights_init
def vanilla_d_loss(logits_real, logits_fake):
d_loss = 0.5 * (
torch.mean(torch.nn.functional.softplus(-logits_real)) +
torch.mean(torch.nn.functional.softplus(logits_fake)))
return d_loss | null |
165,211 | import torch
import torch.nn as nn
from torchvision import models
from collections import namedtuple
from models.taming.util import get_ckpt_path
def normalize_tensor(x,eps=1e-10):
norm_factor = torch.sqrt(torch.sum(x**2,dim=1,keepdim=True))
return x/(norm_factor+eps) | null |
165,212 | import torch
import torch.nn as nn
from torchvision import models
from collections import namedtuple
from models.taming.util import get_ckpt_path
def spatial_average(x, keepdim=True):
return x.mean([2,3],keepdim=keepdim) | null |
165,217 | import functools
import torch.nn as nn
from models.taming.modules.util import ActNorm
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
nn.init.normal_(m.weight.data, 0.0, 0.02)
elif classname.find('BatchNorm') != -1:
nn.init.normal_(m.weight.data, 1.0, 0.02)
nn.init.constant_(m.bias.data, 0) | null |
165,218 | import os, hashlib
import requests
from tqdm import tqdm
import importlib
def get_obj_from_str(string, reload=False):
module, cls = string.rsplit(".", 1)
if reload:
module_imp = importlib.import_module(module)
importlib.reload(module_imp)
return getattr(importlib.import_module(module, package=None), cls)
def instantiate_from_config(config):
if not "target" in config:
raise KeyError("Expected key `target` to instantiate.")
return get_obj_from_str(config["target"])(**config.get("params", dict())) | null |
165,219 | import os, hashlib
import requests
from tqdm import tqdm
import importlib
URL_MAP = {
"vgg_lpips": "https://heibox.uni-heidelberg.de/f/607503859c864bc1b30b/?dl=1"
}
CKPT_MAP = {
"vgg_lpips": "vgg.pth"
}
MD5_MAP = {
"vgg_lpips": "d507d7349b931f0638a25a48a722f98a"
}
def download(url, local_path, chunk_size=1024):
os.makedirs(os.path.split(local_path)[0], exist_ok=True)
with requests.get(url, stream=True) as r:
total_size = int(r.headers.get("content-length", 0))
with tqdm(total=total_size, unit="B", unit_scale=True) as pbar:
with open(local_path, "wb") as f:
for data in r.iter_content(chunk_size=chunk_size):
if data:
f.write(data)
pbar.update(chunk_size)
def md5_hash(path):
with open(path, "rb") as f:
content = f.read()
return hashlib.md5(content).hexdigest()
def get_ckpt_path(name, root, check=False):
assert name in URL_MAP
path = os.path.join(root, CKPT_MAP[name])
if not os.path.exists(path) or (check and not md5_hash(path) == MD5_MAP[name]):
print("Downloading {} model from {} to {}".format(name, URL_MAP[name], path))
download(URL_MAP[name], path)
md5 = md5_hash(path)
assert md5 == MD5_MAP[name], md5
return path | null |
165,220 | import os, hashlib
import requests
from tqdm import tqdm
import importlib
class KeyNotFoundError(Exception):
def __init__(self, cause, keys=None, visited=None):
self.cause = cause
self.keys = keys
self.visited = visited
messages = list()
if keys is not None:
messages.append("Key not found: {}".format(keys))
if visited is not None:
messages.append("Visited: {}".format(visited))
messages.append("Cause:\n{}".format(cause))
message = "\n".join(messages)
super().__init__(message)
The provided code snippet includes necessary dependencies for implementing the `retrieve` function. Write a Python function `def retrieve( list_or_dict, key, splitval="/", default=None, expand=True, pass_success=False )` to solve the following problem:
Given a nested list or dict return the desired value at key expanding callable nodes if necessary and :attr:`expand` is ``True``. The expansion is done in-place. Parameters ---------- list_or_dict : list or dict Possibly nested list or dictionary. key : str key/to/value, path like string describing all keys necessary to consider to get to the desired value. List indices can also be passed here. splitval : str String that defines the delimiter between keys of the different depth levels in `key`. default : obj Value returned if :attr:`key` is not found. expand : bool Whether to expand callable nodes on the path or not. Returns ------- The desired value or if :attr:`default` is not ``None`` and the :attr:`key` is not found returns ``default``. Raises ------ Exception if ``key`` not in ``list_or_dict`` and :attr:`default` is ``None``.
Here is the function:
def retrieve(
list_or_dict, key, splitval="/", default=None, expand=True, pass_success=False
):
"""Given a nested list or dict return the desired value at key expanding
callable nodes if necessary and :attr:`expand` is ``True``. The expansion
is done in-place.
Parameters
----------
list_or_dict : list or dict
Possibly nested list or dictionary.
key : str
key/to/value, path like string describing all keys necessary to
consider to get to the desired value. List indices can also be
passed here.
splitval : str
String that defines the delimiter between keys of the
different depth levels in `key`.
default : obj
Value returned if :attr:`key` is not found.
expand : bool
Whether to expand callable nodes on the path or not.
Returns
-------
The desired value or if :attr:`default` is not ``None`` and the
:attr:`key` is not found returns ``default``.
Raises
------
Exception if ``key`` not in ``list_or_dict`` and :attr:`default` is
``None``.
"""
keys = key.split(splitval)
success = True
try:
visited = []
parent = None
last_key = None
for key in keys:
if callable(list_or_dict):
if not expand:
raise KeyNotFoundError(
ValueError(
"Trying to get past callable node with expand=False."
),
keys=keys,
visited=visited,
)
list_or_dict = list_or_dict()
parent[last_key] = list_or_dict
last_key = key
parent = list_or_dict
try:
if isinstance(list_or_dict, dict):
list_or_dict = list_or_dict[key]
else:
list_or_dict = list_or_dict[int(key)]
except (KeyError, IndexError, ValueError) as e:
raise KeyNotFoundError(e, keys=keys, visited=visited)
visited += [key]
# final expansion of retrieved value
if expand and callable(list_or_dict):
list_or_dict = list_or_dict()
parent[last_key] = list_or_dict
except KeyNotFoundError as e:
if default is None:
raise e
else:
list_or_dict = default
success = False
if not pass_success:
return list_or_dict
else:
return list_or_dict, success | Given a nested list or dict return the desired value at key expanding callable nodes if necessary and :attr:`expand` is ``True``. The expansion is done in-place. Parameters ---------- list_or_dict : list or dict Possibly nested list or dictionary. key : str key/to/value, path like string describing all keys necessary to consider to get to the desired value. List indices can also be passed here. splitval : str String that defines the delimiter between keys of the different depth levels in `key`. default : obj Value returned if :attr:`key` is not found. expand : bool Whether to expand callable nodes on the path or not. Returns ------- The desired value or if :attr:`default` is not ``None`` and the :attr:`key` is not found returns ``default``. Raises ------ Exception if ``key`` not in ``list_or_dict`` and :attr:`default` is ``None``. |
165,221 | import hashlib
import os
import urllib
import warnings
from typing import Any, Union, List
from pkg_resources import packaging
import torch
from PIL import Image
from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize
from tqdm import tqdm
from .model import build_model
from .simple_tokenizer import SimpleTokenizer as _Tokenizer
_MODELS = {
"RN50": "https://openaipublic.azureedge.net/clip/models/afeb0e10f9e5a86da6080e35cf09123aca3b358a0c3e3b6c78a7b63bc04b6762/RN50.pt",
"RN101": "https://openaipublic.azureedge.net/clip/models/8fa8567bab74a42d41c5915025a8e4538c3bdbe8804a470a72f30b0d94fab599/RN101.pt",
"RN50x4": "https://openaipublic.azureedge.net/clip/models/7e526bd135e493cef0776de27d5f42653e6b4c8bf9e0f653bb11773263205fdd/RN50x4.pt",
"RN50x16": "https://openaipublic.azureedge.net/clip/models/52378b407f34354e150460fe41077663dd5b39c54cd0bfd2b27167a4a06ec9aa/RN50x16.pt",
"ViT-B/32": "https://openaipublic.azureedge.net/clip/models/40d365715913c9da98579312b702a82c18be219cc2a73407c4526f58eba950af/ViT-B-32.pt",
"ViT-B/16": "https://openaipublic.azureedge.net/clip/models/5806e77cd80f8b59890b7e101eabd078d9fb84e6937f9e85e4ecb61988df416f/ViT-B-16.pt",
}
def _download(url: str, root: str):
os.makedirs(root, exist_ok=True)
filename = os.path.basename(url)
expected_sha256 = url.split("/")[-2]
download_target = os.path.join(root, filename)
if os.path.exists(download_target) and not os.path.isfile(download_target):
raise RuntimeError(f"{download_target} exists and is not a regular file")
if os.path.isfile(download_target):
if hashlib.sha256(open(download_target, "rb").read()).hexdigest() == expected_sha256:
return download_target
else:
warnings.warn(f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file")
with urllib.request.urlopen(url) as source, open(download_target, "wb") as output:
with tqdm(total=int(source.info().get("Content-Length")), ncols=80, unit='iB', unit_scale=True, unit_divisor=1024) as loop:
while True:
buffer = source.read(8192)
if not buffer:
break
output.write(buffer)
loop.update(len(buffer))
if hashlib.sha256(open(download_target, "rb").read()).hexdigest() != expected_sha256:
raise RuntimeError(f"Model has been downloaded but the SHA256 checksum does not not match")
return download_target
def _transform(n_px):
return Compose([
Resize(n_px, interpolation=BICUBIC),
CenterCrop(n_px),
_convert_image_to_rgb,
ToTensor(),
Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)),
])
def available_models() -> List[str]:
"""Returns the names of available CLIP models"""
return list(_MODELS.keys())
def build_model(state_dict: dict):
vit = "visual.proj" in state_dict
if vit:
vision_width = state_dict["visual.conv1.weight"].shape[0]
vision_layers = len(
[k for k in state_dict.keys() if k.startswith("visual.") and k.endswith(".attn.in_proj_weight")])
vision_patch_size = state_dict["visual.conv1.weight"].shape[-1]
grid_size = round((state_dict["visual.positional_embedding"].shape[0] - 1) ** 0.5)
image_resolution = vision_patch_size * grid_size
else:
counts: list = [len(set(k.split(".")[2] for k in state_dict if k.startswith(f"visual.layer{b}"))) for b in
[1, 2, 3, 4]]
vision_layers = tuple(counts)
vision_width = state_dict["visual.layer1.0.conv1.weight"].shape[0]
output_width = round((state_dict["visual.attnpool.positional_embedding"].shape[0] - 1) ** 0.5)
vision_patch_size = None
assert output_width ** 2 + 1 == state_dict["visual.attnpool.positional_embedding"].shape[0]
image_resolution = output_width * 32
embed_dim = state_dict["text_projection"].shape[1]
context_length = state_dict["positional_embedding"].shape[0]
vocab_size = state_dict["token_embedding.weight"].shape[0]
transformer_width = state_dict["ln_final.weight"].shape[0]
transformer_heads = transformer_width // 64
transformer_layers = len(set(k.split(".")[2] for k in state_dict if k.startswith(f"transformer.resblocks")))
model = CLIP(
embed_dim,
image_resolution, vision_layers, vision_width, vision_patch_size,
context_length, vocab_size, transformer_width, transformer_heads, transformer_layers
)
for key in ["input_resolution", "context_length", "vocab_size"]:
if key in state_dict:
del state_dict[key]
convert_weights(model)
model.load_state_dict(state_dict)
return model.eval()
The provided code snippet includes necessary dependencies for implementing the `load` function. Write a Python function `def load(name: str, device: Union[str, torch.device] = "cuda" if torch.cuda.is_available() else "cpu", jit: bool = False, download_root: str = None)` to solve the following problem:
Load a CLIP model Parameters ---------- name : str A model name listed by `clip.available_models()`, or the path to a model checkpoint containing the state_dict device : Union[str, torch.device] The device to put the loaded model jit : bool Whether to load the optimized JIT model or more hackable non-JIT model (default). download_root: str path to download the model files; by default, it uses "~/.cache/clip" Returns ------- model : torch.nn.Module The CLIP model preprocess : Callable[[PIL.Image], torch.Tensor] A torchvision transform that converts a PIL image into a tensor that the returned model can take as its input
Here is the function:
def load(name: str, device: Union[str, torch.device] = "cuda" if torch.cuda.is_available() else "cpu", jit: bool = False, download_root: str = None):
"""Load a CLIP model
Parameters
----------
name : str
A model name listed by `clip.available_models()`, or the path to a model checkpoint containing the state_dict
device : Union[str, torch.device]
The device to put the loaded model
jit : bool
Whether to load the optimized JIT model or more hackable non-JIT model (default).
download_root: str
path to download the model files; by default, it uses "~/.cache/clip"
Returns
-------
model : torch.nn.Module
The CLIP model
preprocess : Callable[[PIL.Image], torch.Tensor]
A torchvision transform that converts a PIL image into a tensor that the returned model can take as its input
"""
if name in _MODELS:
model_path = _download(_MODELS[name], download_root or os.path.expanduser("~/.cache/clip"))
elif os.path.isfile(name):
model_path = name
else:
raise RuntimeError(f"Model {name} not found; available models = {available_models()}")
try:
# loading JIT archive
model = torch.jit.load(model_path, map_location=device if jit else "cpu").eval()
state_dict = None
except RuntimeError:
# loading saved state dict
if jit:
warnings.warn(f"File {model_path} is not a JIT archive. Loading as a state dict instead")
jit = False
state_dict = torch.load(model_path, map_location="cpu")
if not jit:
model = build_model(state_dict or model.state_dict()).to(device)
if str(device) == "cpu":
model.float()
return model, _transform(model.visual.input_resolution)
# patch the device names
device_holder = torch.jit.trace(lambda: torch.ones([]).to(torch.device(device)), example_inputs=[])
device_node = [n for n in device_holder.graph.findAllNodes("prim::Constant") if "Device" in repr(n)][-1]
def patch_device(module):
try:
graphs = [module.graph] if hasattr(module, "graph") else []
except RuntimeError:
graphs = []
if hasattr(module, "forward1"):
graphs.append(module.forward1.graph)
for graph in graphs:
for node in graph.findAllNodes("prim::Constant"):
if "value" in node.attributeNames() and str(node["value"]).startswith("cuda"):
node.copyAttributes(device_node)
model.apply(patch_device)
patch_device(model.encode_image)
patch_device(model.encode_text)
# patch dtype to float32 on CPU
if str(device) == "cpu":
float_holder = torch.jit.trace(lambda: torch.ones([]).float(), example_inputs=[])
float_input = list(float_holder.graph.findNode("aten::to").inputs())[1]
float_node = float_input.node()
def patch_float(module):
try:
graphs = [module.graph] if hasattr(module, "graph") else []
except RuntimeError:
graphs = []
if hasattr(module, "forward1"):
graphs.append(module.forward1.graph)
for graph in graphs:
for node in graph.findAllNodes("aten::to"):
inputs = list(node.inputs())
for i in [1, 2]: # dtype can be the second or third argument to aten::to()
if inputs[i].node()["value"] == 5:
inputs[i].node().copyAttributes(float_node)
model.apply(patch_float)
patch_float(model.encode_image)
patch_float(model.encode_text)
model.float()
return model, _transform(model.input_resolution.item()) | Load a CLIP model Parameters ---------- name : str A model name listed by `clip.available_models()`, or the path to a model checkpoint containing the state_dict device : Union[str, torch.device] The device to put the loaded model jit : bool Whether to load the optimized JIT model or more hackable non-JIT model (default). download_root: str path to download the model files; by default, it uses "~/.cache/clip" Returns ------- model : torch.nn.Module The CLIP model preprocess : Callable[[PIL.Image], torch.Tensor] A torchvision transform that converts a PIL image into a tensor that the returned model can take as its input |
165,228 | import logging
import os
import sys
import numpy as np
import torch
from fairseq import distributed_utils, options, tasks, utils
from fairseq.dataclass.utils import convert_namespace_to_omegaconf
from fairseq.logging import progress_bar
from fairseq.utils import reset_logging
from omegaconf import DictConfig
import torch.distributed as dist
from utils import checkpoint_utils
from utils.eval_utils import eval_step, merge_results
def apply_half(t):
if t.dtype is torch.float32:
return t.to(dtype=torch.half)
return t | null |
165,229 | import logging
import os
import sys
import numpy as np
import torch
from fairseq import distributed_utils, options, tasks, utils
from fairseq.dataclass.utils import convert_namespace_to_omegaconf
from fairseq.logging import progress_bar
from fairseq.utils import reset_logging
from omegaconf import DictConfig
import torch.distributed as dist
from utils import checkpoint_utils
from utils.eval_utils import eval_step, merge_results
def main(cfg: DictConfig, **kwargs):
utils.import_user_module(cfg.common)
reset_logging()
logger.info(cfg)
assert (
cfg.dataset.max_tokens is not None or cfg.dataset.batch_size is not None
), "Must specify batch size either with --max-tokens or --batch-size"
# Fix seed for stochastic decoding
if cfg.common.seed is not None and not cfg.generation.no_seed_provided:
np.random.seed(cfg.common.seed)
utils.set_torch_seed(cfg.common.seed)
use_fp16 = cfg.common.fp16
use_cuda = torch.cuda.is_available() and not cfg.common.cpu
if use_cuda:
torch.cuda.set_device(cfg.distributed_training.device_id)
# Load ensemble
overrides = eval(cfg.common_eval.model_overrides)
logger.info("loading model(s) from {}".format(cfg.common_eval.path))
models, saved_cfg, task = checkpoint_utils.load_model_ensemble_and_task(
utils.split_paths(cfg.common_eval.path),
arg_overrides=overrides,
suffix=cfg.checkpoint.checkpoint_suffix,
strict=(cfg.checkpoint.checkpoint_shard_count == 1),
num_shards=cfg.checkpoint.checkpoint_shard_count,
)
# loading the dataset should happen after the checkpoint has been loaded so we can give it the saved task config
task.load_dataset(cfg.dataset.gen_subset, task_cfg=saved_cfg.task)
# Move models to GPU
for model, ckpt_path in zip(models, utils.split_paths(cfg.common_eval.path)):
if kwargs['ema_eval']:
logger.info("loading EMA weights from {}".format(ckpt_path))
model.load_state_dict(checkpoint_utils.load_ema_from_checkpoint(ckpt_path)['model'])
model.eval()
if use_fp16:
model.half()
if use_cuda and not cfg.distributed_training.pipeline_model_parallel:
model.cuda()
model.prepare_for_inference_(cfg)
# Load dataset (possibly sharded)
itr = task.get_batch_iterator(
dataset=task.dataset(cfg.dataset.gen_subset),
max_tokens=cfg.dataset.max_tokens,
max_sentences=cfg.dataset.batch_size,
max_positions=utils.resolve_max_positions(
task.max_positions(), *[m.max_positions() for m in models]
),
ignore_invalid_inputs=cfg.dataset.skip_invalid_size_inputs_valid_test,
required_batch_size_multiple=cfg.dataset.required_batch_size_multiple,
seed=cfg.common.seed,
num_shards=cfg.distributed_training.distributed_world_size,
shard_id=cfg.distributed_training.distributed_rank,
num_workers=cfg.dataset.num_workers,
data_buffer_size=cfg.dataset.data_buffer_size,
).next_epoch_itr(shuffle=False)
progress = progress_bar.progress_bar(
itr,
log_format=cfg.common.log_format,
log_interval=cfg.common.log_interval,
default_log_format=("tqdm" if not cfg.common.no_progress_bar else "simple"),
)
# Initialize generator
generator = task.build_generator(models, cfg.generation)
results = []
score_sum = torch.FloatTensor([0]).cuda()
score_cnt = torch.FloatTensor([0]).cuda()
for sample in progress:
if "net_input" not in sample:
continue
sample = utils.move_to_cuda(sample) if use_cuda else sample
sample = utils.apply_to_sample(apply_half, sample) if cfg.common.fp16 else sample
with torch.no_grad():
result, scores = eval_step(task, generator, models, sample, **kwargs)
results += result
score_sum += sum(scores) if scores is not None else 0
score_cnt += len(scores) if scores is not None else 0
progress.log({"sentences": sample["nsentences"]})
dist.barrier()
merge_results(task, cfg, logger, score_cnt, score_sum, results)
def cli_main():
parser = options.get_generation_parser()
parser.add_argument("--ema-eval", action='store_true', help="Use EMA weights to make evaluation.")
parser.add_argument("--beam-search-vqa-eval", action='store_true', help="Use beam search for vqa evaluation (faster inference speed but sub-optimal result), if not specified, we compute scores for each answer in the candidate set, which is slower but can obtain best result.")
args = options.parse_args_and_arch(parser)
cfg = convert_namespace_to_omegaconf(args)
distributed_utils.call_main(cfg, main, ema_eval=args.ema_eval, beam_search_vqa_eval=args.beam_search_vqa_eval) | null |
165,230 | import ast
import collections
import contextlib
import logging
import numpy as np
import os
import re
import time
import traceback
import math
from collections import OrderedDict
from typing import Any, Dict, Optional, Union
import torch
from fairseq.dataclass.configs import CheckpointConfig
from fairseq.dataclass.utils import (
convert_namespace_to_omegaconf,
overwrite_args_by_name,
)
from fairseq.distributed.fully_sharded_data_parallel import FSDP, has_FSDP
from fairseq.file_io import PathManager
from fairseq.models import FairseqDecoder, FairseqEncoder
from omegaconf import DictConfig, open_dict, OmegaConf
from data import data_utils
logger = logging.getLogger(__name__)
def save_checkpoint(cfg: CheckpointConfig, trainer, epoch_itr, val_loss):
from fairseq import meters
# only one worker should attempt to create the required dir
if trainer.data_parallel_rank == 0:
os.makedirs(cfg.save_dir, exist_ok=True)
prev_best = getattr(save_checkpoint, "best", val_loss)
if val_loss is not None:
best_function = max if cfg.maximize_best_checkpoint_metric else min
save_checkpoint.best = best_function(val_loss, prev_best)
if cfg.no_save:
return
trainer.consolidate_optimizer() # TODO(SS): do we need this if no_save_optimizer_state
if not trainer.should_save_checkpoint_on_current_rank:
if trainer.always_call_state_dict_during_save_checkpoint:
trainer.state_dict()
return
write_timer = meters.StopwatchMeter()
write_timer.start()
epoch = epoch_itr.epoch
end_of_epoch = epoch_itr.end_of_epoch()
updates = trainer.get_num_updates()
logger.info(f"Preparing to save checkpoint for epoch {epoch} @ {updates} updates")
def is_better(a, b):
return a >= b if cfg.maximize_best_checkpoint_metric else a <= b
suffix = trainer.checkpoint_suffix
checkpoint_conds = collections.OrderedDict()
checkpoint_conds["checkpoint{}{}.pt".format(epoch, suffix)] = (
end_of_epoch and not cfg.no_epoch_checkpoints and epoch % cfg.save_interval == 0
)
checkpoint_conds["checkpoint_{}_{}{}.pt".format(epoch, updates, suffix)] = (
not end_of_epoch
and cfg.save_interval_updates > 0
and updates % cfg.save_interval_updates == 0
)
checkpoint_conds["checkpoint_best{}.pt".format(suffix)] = val_loss is not None and (
not hasattr(save_checkpoint, "best")
or is_better(val_loss, save_checkpoint.best)
)
if val_loss is not None and cfg.keep_best_checkpoints > 0:
worst_best = getattr(save_checkpoint, "best", None)
chkpts = checkpoint_paths(
cfg.save_dir,
pattern=r"checkpoint\.best_{}_(\d+\.?\d*){}\.pt".format(
cfg.best_checkpoint_metric, suffix
),
)
if len(chkpts) > 0:
p = chkpts[-1] if cfg.maximize_best_checkpoint_metric else chkpts[0]
worst_best = float(p.rsplit("_")[-1].replace("{}.pt".format(suffix), ""))
# add random digits to resolve ties
with data_utils.numpy_seed(epoch, updates, val_loss):
rand_sfx = np.random.randint(0, cfg.keep_best_checkpoints)
checkpoint_conds[
"checkpoint.best_{}_{:.3f}{}{}.pt".format(
cfg.best_checkpoint_metric,
val_loss,
rand_sfx,
suffix
)
] = worst_best is None or is_better(val_loss, worst_best)
checkpoint_conds[
"checkpoint_last{}.pt".format(suffix)
] = not cfg.no_last_checkpoints
extra_state = {"train_iterator": epoch_itr.state_dict(), "val_loss": val_loss}
if hasattr(save_checkpoint, "best"):
extra_state.update({"best": save_checkpoint.best})
checkpoints = [
os.path.join(cfg.save_dir, fn) for fn, cond in checkpoint_conds.items() if cond
]
if len(checkpoints) > 0:
trainer.save_checkpoint(checkpoints[0], extra_state)
for cp in checkpoints[1:]:
if cfg.write_checkpoints_asynchronously:
# TODO[ioPath]: Need to implement a delayed asynchronous
# file copying/moving feature.
logger.warning(
f"ioPath is not copying {checkpoints[0]} to {cp} "
"since async write mode is on."
)
else:
assert PathManager.copy(
checkpoints[0], cp, overwrite=True
), f"Failed to copy {checkpoints[0]} to {cp}"
write_timer.stop()
logger.info(
"Saved checkpoint {} (epoch {} @ {} updates, score {}) (writing took {} seconds)".format(
checkpoints[0], epoch, updates, val_loss, write_timer.sum
)
)
if not end_of_epoch and cfg.keep_interval_updates > 0:
# remove old checkpoints; checkpoints are sorted in descending order
if cfg.keep_interval_updates_pattern == -1:
checkpoints = checkpoint_paths(
cfg.save_dir, pattern=r"checkpoint_\d+_(\d+){}\.pt".format(suffix)
)
else:
checkpoints = checkpoint_paths(
cfg.save_dir,
pattern=r"checkpoint_\d+_(\d+){}\.pt".format(suffix),
keep_match=True,
)
checkpoints = [
x[0]
for x in checkpoints
if x[1] % cfg.keep_interval_updates_pattern != 0
]
for old_chk in checkpoints[cfg.keep_interval_updates :]:
if os.path.lexists(old_chk):
os.remove(old_chk)
elif PathManager.exists(old_chk):
PathManager.rm(old_chk)
if cfg.keep_last_epochs > 0:
# remove old epoch checkpoints; checkpoints are sorted in descending order
checkpoints = checkpoint_paths(
cfg.save_dir, pattern=r"checkpoint(\d+){}\.pt".format(suffix)
)
for old_chk in checkpoints[cfg.keep_last_epochs :]:
if os.path.lexists(old_chk):
os.remove(old_chk)
elif PathManager.exists(old_chk):
PathManager.rm(old_chk)
if cfg.keep_best_checkpoints > 0:
# only keep the best N checkpoints according to validation metric
checkpoints = checkpoint_paths(
cfg.save_dir,
pattern=r"checkpoint\.best_{}_(\d+\.?\d*){}\.pt".format(
cfg.best_checkpoint_metric, suffix
),
)
if not cfg.maximize_best_checkpoint_metric:
checkpoints = checkpoints[::-1]
for old_chk in checkpoints[cfg.keep_best_checkpoints :]:
if os.path.lexists(old_chk):
os.remove(old_chk)
elif PathManager.exists(old_chk):
PathManager.rm(old_chk)
The provided code snippet includes necessary dependencies for implementing the `load_checkpoint` function. Write a Python function `def load_checkpoint(cfg: CheckpointConfig, trainer, **passthrough_args)` to solve the following problem:
Load a checkpoint and restore the training iterator. *passthrough_args* will be passed through to ``trainer.get_train_iterator``.
Here is the function:
def load_checkpoint(cfg: CheckpointConfig, trainer, **passthrough_args):
"""
Load a checkpoint and restore the training iterator.
*passthrough_args* will be passed through to
``trainer.get_train_iterator``.
"""
reset_optimizer = cfg.reset_optimizer
reset_lr_scheduler = cfg.reset_lr_scheduler
optimizer_overrides = ast.literal_eval(cfg.optimizer_overrides)
reset_meters = cfg.reset_meters
reset_dataloader = cfg.reset_dataloader
if cfg.finetune_from_model is not None and (
reset_optimizer or reset_lr_scheduler or reset_meters or reset_dataloader
):
raise ValueError(
"--finetune-from-model can not be set together with either --reset-optimizer"
" or reset_lr_scheduler or reset_meters or reset_dataloader"
)
suffix = trainer.checkpoint_suffix
if (
cfg.restore_file == "checkpoint_last.pt"
): # default value of restore_file is 'checkpoint_last.pt'
checkpoint_path = os.path.join(
cfg.save_dir, "checkpoint_last{}.pt".format(suffix)
)
first_launch = not PathManager.exists(checkpoint_path)
if cfg.finetune_from_model is not None and first_launch:
# if there is no last checkpoint to restore, start the finetune from pretrained model
# else just use usual logic to load checkpoint, e.g. restart from last checkpoint and etc.
if PathManager.exists(cfg.finetune_from_model):
checkpoint_path = cfg.finetune_from_model
reset_optimizer = True
reset_lr_scheduler = True
reset_meters = True
reset_dataloader = True
logger.info(
f"loading pretrained model from {checkpoint_path}: "
"optimizer, lr scheduler, meters, dataloader will be reset"
)
else:
raise ValueError(
f"--funetune-from-model {cfg.finetune_from_model} does not exist"
)
elif suffix is not None:
checkpoint_path = cfg.restore_file.replace(".pt", suffix + ".pt")
else:
checkpoint_path = cfg.restore_file
if cfg.restore_file != "checkpoint_last.pt" and cfg.finetune_from_model:
raise ValueError(
"--finetune-from-model and --restore-file (non-default value) "
"can not be specified together: " + str(cfg)
)
extra_state = trainer.load_checkpoint(
checkpoint_path,
reset_optimizer,
reset_lr_scheduler,
optimizer_overrides,
reset_meters=reset_meters,
)
if (
extra_state is not None
and "best" in extra_state
and not reset_optimizer
and not reset_meters
):
save_checkpoint.best = extra_state["best"]
if extra_state is not None and not reset_dataloader:
# restore iterator from checkpoint
itr_state = extra_state["train_iterator"]
epoch_itr = trainer.get_train_iterator(
epoch=itr_state["epoch"], load_dataset=True, **passthrough_args
)
epoch_itr.load_state_dict(itr_state)
_n = itr_state['iterations_in_epoch']
offset = sum(len(_) for _ in epoch_itr.batch_sampler[:_n])
epoch_itr.dataset.dataset._seek(offset=offset)
true_num = int(math.ceil(len(epoch_itr.dataset) / 8)) * 8
another_offset = ((epoch_itr.epoch - 1) * true_num + offset) // 8
if hasattr(epoch_itr.dataset, 'pure_text_dataset'):
text_offset = (2 * another_offset) % len(epoch_itr.dataset.pure_text_dataset)
epoch_itr.dataset.pure_text_dataset._seek(offset=text_offset)
if hasattr(epoch_itr.dataset, 'pure_image_dataset'):
image_offset = another_offset % len(epoch_itr.dataset.pure_image_dataset)
epoch_itr.dataset.pure_image_dataset._seek(offset=image_offset)
if hasattr(epoch_itr.dataset, 'detection_dataset'):
detection_offset = another_offset % len(epoch_itr.dataset.detection_dataset)
epoch_itr.dataset.detection_dataset._seek(offset=detection_offset)
else:
epoch_itr = trainer.get_train_iterator(
epoch=1, load_dataset=True, **passthrough_args
)
trainer.lr_step(epoch_itr.epoch)
return extra_state, epoch_itr | Load a checkpoint and restore the training iterator. *passthrough_args* will be passed through to ``trainer.get_train_iterator``. |
165,231 | import ast
import collections
import contextlib
import logging
import numpy as np
import os
import re
import time
import traceback
import math
from collections import OrderedDict
from typing import Any, Dict, Optional, Union
import torch
from fairseq.dataclass.configs import CheckpointConfig
from fairseq.dataclass.utils import (
convert_namespace_to_omegaconf,
overwrite_args_by_name,
)
from fairseq.distributed.fully_sharded_data_parallel import FSDP, has_FSDP
from fairseq.file_io import PathManager
from fairseq.models import FairseqDecoder, FairseqEncoder
from omegaconf import DictConfig, open_dict, OmegaConf
from data import data_utils
def load_model_ensemble_and_task(
filenames,
arg_overrides: Optional[Dict[str, Any]] = None,
task=None,
strict=True,
suffix="",
num_shards=1,
state=None,
):
assert state is None or len(filenames) == 1
from fairseq import tasks
assert not (
strict and num_shards > 1
), "Cannot load state dict with strict=True and checkpoint shards > 1"
ensemble = []
cfg = None
for filename in filenames:
orig_filename = filename
model_shard_state = {"shard_weights": [], "shard_metadata": []}
assert num_shards > 0
st = time.time()
for shard_idx in range(num_shards):
filename = get_maybe_sharded_checkpoint_filename(
orig_filename, suffix, shard_idx, num_shards
)
if not PathManager.exists(filename):
raise IOError("Model file not found: {}".format(filename))
if state is None:
state = load_checkpoint_to_cpu(filename, arg_overrides)
if "args" in state and state["args"] is not None:
cfg = convert_namespace_to_omegaconf(state["args"])
elif "cfg" in state and state["cfg"] is not None:
cfg = state["cfg"]
else:
raise RuntimeError(
f"Neither args nor cfg exist in state keys = {state.keys()}"
)
if task is None:
task = tasks.setup_task(cfg.task)
if "task_state" in state:
task.load_state_dict(state["task_state"])
if "fsdp_metadata" in state and num_shards > 1:
model_shard_state["shard_weights"].append(state["model"])
model_shard_state["shard_metadata"].append(state["fsdp_metadata"])
# check FSDP import before the code goes too far
if not has_FSDP:
raise ImportError(
"Cannot find FullyShardedDataParallel. "
"Please install fairscale with: pip install fairscale"
)
if shard_idx == num_shards - 1:
consolidated_model_state = FSDP.consolidate_shard_weights(
shard_weights=model_shard_state["shard_weights"],
shard_metadata=model_shard_state["shard_metadata"],
)
model = task.build_model(cfg.model)
model.load_state_dict(
consolidated_model_state, strict=strict, model_cfg=cfg.model
)
else:
# model parallel checkpoint or unsharded checkpoint
model = task.build_model(cfg.model)
model.load_state_dict(
state["model"], strict=strict, model_cfg=cfg.model
)
# reset state so it gets loaded for the next model in ensemble
state = None
if shard_idx % 10 == 0 and shard_idx > 0:
elapsed = time.time() - st
logger.info(
f"Loaded {shard_idx} shards in {elapsed:.2f}s, {elapsed / (shard_idx+1):.2f}s/shard"
)
# build model for ensemble
ensemble.append(model)
return ensemble, cfg, task
The provided code snippet includes necessary dependencies for implementing the `load_model_ensemble` function. Write a Python function `def load_model_ensemble( filenames, arg_overrides: Optional[Dict[str, Any]] = None, task=None, strict=True, suffix="", num_shards=1, state=None, )` to solve the following problem:
Loads an ensemble of models. Args: filenames (List[str]): checkpoint files to load arg_overrides (Dict[str,Any], optional): override model args that were used during model training task (fairseq.tasks.FairseqTask, optional): task to use for loading
Here is the function:
def load_model_ensemble(
filenames,
arg_overrides: Optional[Dict[str, Any]] = None,
task=None,
strict=True,
suffix="",
num_shards=1,
state=None,
):
"""Loads an ensemble of models.
Args:
filenames (List[str]): checkpoint files to load
arg_overrides (Dict[str,Any], optional): override model args that
were used during model training
task (fairseq.tasks.FairseqTask, optional): task to use for loading
"""
assert not (
strict and num_shards > 1
), "Cannot load state dict with strict=True and checkpoint shards > 1"
ensemble, args, _task = load_model_ensemble_and_task(
filenames,
arg_overrides,
task,
strict,
suffix,
num_shards,
state,
)
return ensemble, args | Loads an ensemble of models. Args: filenames (List[str]): checkpoint files to load arg_overrides (Dict[str,Any], optional): override model args that were used during model training task (fairseq.tasks.FairseqTask, optional): task to use for loading |
165,232 | import ast
import collections
import contextlib
import logging
import numpy as np
import os
import re
import time
import traceback
import math
from collections import OrderedDict
from typing import Any, Dict, Optional, Union
import torch
from fairseq.dataclass.configs import CheckpointConfig
from fairseq.dataclass.utils import (
convert_namespace_to_omegaconf,
overwrite_args_by_name,
)
from fairseq.distributed.fully_sharded_data_parallel import FSDP, has_FSDP
from fairseq.file_io import PathManager
from fairseq.models import FairseqDecoder, FairseqEncoder
from omegaconf import DictConfig, open_dict, OmegaConf
from data import data_utils
logger = logging.getLogger(__name__)
The provided code snippet includes necessary dependencies for implementing the `prune_state_dict` function. Write a Python function `def prune_state_dict(state_dict, model_cfg: Optional[DictConfig])` to solve the following problem:
Prune the given state_dict if desired for LayerDrop (https://arxiv.org/abs/1909.11556). Training with LayerDrop allows models to be robust to pruning at inference time. This function prunes state_dict to allow smaller models to be loaded from a larger model and re-maps the existing state_dict for this to occur. It's called by functions that load models from checkpoints and does not need to be called directly.
Here is the function:
def prune_state_dict(state_dict, model_cfg: Optional[DictConfig]):
"""Prune the given state_dict if desired for LayerDrop
(https://arxiv.org/abs/1909.11556).
Training with LayerDrop allows models to be robust to pruning at inference
time. This function prunes state_dict to allow smaller models to be loaded
from a larger model and re-maps the existing state_dict for this to occur.
It's called by functions that load models from checkpoints and does not
need to be called directly.
"""
arch = None
if model_cfg is not None:
arch = (
model_cfg._name
if isinstance(model_cfg, DictConfig)
else getattr(model_cfg, "arch", None)
)
if not model_cfg or arch is None or arch == "ptt_transformer":
# args should not be none, but don't crash if it is.
return state_dict
encoder_layers_to_keep = getattr(model_cfg, "encoder_layers_to_keep", None)
decoder_layers_to_keep = getattr(model_cfg, "decoder_layers_to_keep", None)
if not encoder_layers_to_keep and not decoder_layers_to_keep:
return state_dict
# apply pruning
logger.info(
"Pruning model to specified layer configuration - this works best if the model was trained with LayerDrop"
)
def create_pruning_pass(layers_to_keep, layer_name):
keep_layers = sorted(
int(layer_string) for layer_string in layers_to_keep.split(",")
)
mapping_dict = {}
for i in range(len(keep_layers)):
mapping_dict[str(keep_layers[i])] = str(i)
regex = re.compile(r"^{layer}.*\.layers\.(\d+)".format(layer=layer_name))
return {"substitution_regex": regex, "mapping_dict": mapping_dict}
pruning_passes = []
if encoder_layers_to_keep:
pruning_passes.append(create_pruning_pass(encoder_layers_to_keep, "encoder"))
if decoder_layers_to_keep:
pruning_passes.append(create_pruning_pass(decoder_layers_to_keep, "decoder"))
new_state_dict = {}
for layer_name in state_dict.keys():
match = re.search(r"\.layers\.(\d+)\.", layer_name)
# if layer has no number in it, it is a supporting layer, such as an
# embedding
if not match:
new_state_dict[layer_name] = state_dict[layer_name]
continue
# otherwise, layer should be pruned.
original_layer_number = match.group(1)
# figure out which mapping dict to replace from
for pruning_pass in pruning_passes:
if original_layer_number in pruning_pass["mapping_dict"] and pruning_pass[
"substitution_regex"
].search(layer_name):
new_layer_number = pruning_pass["mapping_dict"][original_layer_number]
substitution_match = pruning_pass["substitution_regex"].search(
layer_name
)
new_state_key = (
layer_name[: substitution_match.start(1)]
+ new_layer_number
+ layer_name[substitution_match.end(1) :]
)
new_state_dict[new_state_key] = state_dict[layer_name]
# Since layers are now pruned, *_layers_to_keep are no longer needed.
# This is more of "It would make it work fix" rather than a proper fix.
if isinstance(model_cfg, DictConfig):
context = open_dict(model_cfg)
else:
context = contextlib.ExitStack()
with context:
if hasattr(model_cfg, "encoder_layers_to_keep"):
model_cfg.encoder_layers_to_keep = None
if hasattr(model_cfg, "decoder_layers_to_keep"):
model_cfg.decoder_layers_to_keep = None
return new_state_dict | Prune the given state_dict if desired for LayerDrop (https://arxiv.org/abs/1909.11556). Training with LayerDrop allows models to be robust to pruning at inference time. This function prunes state_dict to allow smaller models to be loaded from a larger model and re-maps the existing state_dict for this to occur. It's called by functions that load models from checkpoints and does not need to be called directly. |
165,233 | import ast
import collections
import contextlib
import logging
import numpy as np
import os
import re
import time
import traceback
import math
from collections import OrderedDict
from typing import Any, Dict, Optional, Union
import torch
from fairseq.dataclass.configs import CheckpointConfig
from fairseq.dataclass.utils import (
convert_namespace_to_omegaconf,
overwrite_args_by_name,
)
from fairseq.distributed.fully_sharded_data_parallel import FSDP, has_FSDP
from fairseq.file_io import PathManager
from fairseq.models import FairseqDecoder, FairseqEncoder
from omegaconf import DictConfig, open_dict, OmegaConf
from data import data_utils
def load_checkpoint_to_cpu(path, arg_overrides=None, load_on_all_ranks=False):
"""Loads a checkpoint to CPU (with upgrading for backward compatibility).
If doing single-GPU training or if the checkpoint is only being loaded by at
most one process on each node (current default behavior is for only rank 0
to read the checkpoint from disk), load_on_all_ranks should be False to
avoid errors from torch.distributed not having been initialized or
torch.distributed.barrier() hanging.
If all processes on each node may be loading the checkpoint
simultaneously, load_on_all_ranks should be set to True to avoid I/O
conflicts.
There's currently no support for > 1 but < all processes loading the
checkpoint on each node.
"""
local_path = PathManager.get_local_path(path)
# The locally cached file returned by get_local_path() may be stale for
# remote files that are periodically updated/overwritten (ex:
# checkpoint_last.pt) - so we remove the local copy, sync across processes
# (if needed), and then download a fresh copy.
if local_path != path and PathManager.path_requires_pathmanager(path):
try:
os.remove(local_path)
except FileNotFoundError:
# With potentially multiple processes removing the same file, the
# file being missing is benign (missing_ok isn't available until
# Python 3.8).
pass
if load_on_all_ranks:
torch.distributed.barrier()
local_path = PathManager.get_local_path(path)
with open(local_path, "rb") as f:
state = torch.load(f, map_location=torch.device("cpu"))
if "args" in state and state["args"] is not None and arg_overrides is not None:
args = state["args"]
for arg_name, arg_val in arg_overrides.items():
setattr(args, arg_name, arg_val)
if "cfg" in state and state["cfg"] is not None:
# hack to be able to set Namespace in dict config. this should be removed when we update to newer
# omegaconf version that supports object flags, or when we migrate all existing models
from omegaconf import _utils
old_primitive = _utils.is_primitive_type
_utils.is_primitive_type = lambda _: True
state["cfg"] = OmegaConf.create(state["cfg"])
_utils.is_primitive_type = old_primitive
OmegaConf.set_struct(state["cfg"], True)
if arg_overrides is not None:
overwrite_args_by_name(state["cfg"], arg_overrides)
state = _upgrade_state_dict(state)
return state
The provided code snippet includes necessary dependencies for implementing the `load_pretrained_component_from_model` function. Write a Python function `def load_pretrained_component_from_model( component: Union[FairseqEncoder, FairseqDecoder], checkpoint: str )` to solve the following problem:
Load a pretrained FairseqEncoder or FairseqDecoder from checkpoint into the provided `component` object. If state_dict fails to load, there may be a mismatch in the architecture of the corresponding `component` found in the `checkpoint` file.
Here is the function:
def load_pretrained_component_from_model(
component: Union[FairseqEncoder, FairseqDecoder], checkpoint: str
):
"""
Load a pretrained FairseqEncoder or FairseqDecoder from checkpoint into the
provided `component` object. If state_dict fails to load, there may be a
mismatch in the architecture of the corresponding `component` found in the
`checkpoint` file.
"""
if not PathManager.exists(checkpoint):
raise IOError("Model file not found: {}".format(checkpoint))
state = load_checkpoint_to_cpu(checkpoint)
if isinstance(component, FairseqEncoder):
component_type = "encoder"
elif isinstance(component, FairseqDecoder):
component_type = "decoder"
else:
raise ValueError(
"component to load must be either a FairseqEncoder or "
"FairseqDecoder. Loading other component types are not supported."
)
component_state_dict = OrderedDict()
for key in state["model"].keys():
if key.startswith(component_type):
# encoder.input_layers.0.0.weight --> input_layers.0.0.weight
component_subkey = key[len(component_type) + 1 :]
component_state_dict[component_subkey] = state["model"][key]
component.load_state_dict(component_state_dict, strict=True)
return component | Load a pretrained FairseqEncoder or FairseqDecoder from checkpoint into the provided `component` object. If state_dict fails to load, there may be a mismatch in the architecture of the corresponding `component` found in the `checkpoint` file. |
165,234 | import ast
import collections
import contextlib
import logging
import numpy as np
import os
import re
import time
import traceback
import math
from collections import OrderedDict
from typing import Any, Dict, Optional, Union
import torch
from fairseq.dataclass.configs import CheckpointConfig
from fairseq.dataclass.utils import (
convert_namespace_to_omegaconf,
overwrite_args_by_name,
)
from fairseq.distributed.fully_sharded_data_parallel import FSDP, has_FSDP
from fairseq.file_io import PathManager
from fairseq.models import FairseqDecoder, FairseqEncoder
from omegaconf import DictConfig, open_dict, OmegaConf
from data import data_utils
logger = logging.getLogger(__name__)
def verify_checkpoint_directory(save_dir: str) -> None:
if not os.path.exists(save_dir):
os.makedirs(save_dir, exist_ok=True)
temp_file_path = os.path.join(save_dir, "dummy")
try:
with open(temp_file_path, "w"):
pass
except OSError as e:
logger.warning(
"Unable to access checkpoint save directory: {}".format(save_dir)
)
raise e
else:
os.remove(temp_file_path) | null |
165,235 | import ast
import collections
import contextlib
import logging
import numpy as np
import os
import re
import time
import traceback
import math
from collections import OrderedDict
from typing import Any, Dict, Optional, Union
import torch
from fairseq.dataclass.configs import CheckpointConfig
from fairseq.dataclass.utils import (
convert_namespace_to_omegaconf,
overwrite_args_by_name,
)
from fairseq.distributed.fully_sharded_data_parallel import FSDP, has_FSDP
from fairseq.file_io import PathManager
from fairseq.models import FairseqDecoder, FairseqEncoder
from omegaconf import DictConfig, open_dict, OmegaConf
from data import data_utils
The provided code snippet includes necessary dependencies for implementing the `load_ema_from_checkpoint` function. Write a Python function `def load_ema_from_checkpoint(fpath)` to solve the following problem:
Loads exponential moving averaged (EMA) checkpoint from input and returns a model with ema weights. Args: fpath: A string path of checkpoint to load from. Returns: A dict of string keys mapping to various values. The 'model' key from the returned dict should correspond to an OrderedDict mapping string parameter names to torch Tensors.
Here is the function:
def load_ema_from_checkpoint(fpath):
"""Loads exponential moving averaged (EMA) checkpoint from input and
returns a model with ema weights.
Args:
fpath: A string path of checkpoint to load from.
Returns:
A dict of string keys mapping to various values. The 'model' key
from the returned dict should correspond to an OrderedDict mapping
string parameter names to torch Tensors.
"""
params_dict = collections.OrderedDict()
new_state = None
with PathManager.open(fpath, 'rb') as f:
new_state = torch.load(
f,
map_location=(
lambda s, _: torch.serialization.default_restore_location(s, 'cpu')
),
)
# EMA model is stored in a separate "extra state"
model_params = new_state['extra_state']['ema']
for key in list(model_params.keys()):
p = model_params[key]
if isinstance(p, torch.HalfTensor):
p = p.float()
if key not in params_dict:
params_dict[key] = p.clone()
# NOTE: clone() is needed in case of p is a shared parameter
else:
raise ValueError("Key {} is repeated in EMA model params.".format(key))
if len(params_dict) == 0:
raise ValueError(
f"Input checkpoint path '{fpath}' does not contain "
"ema model weights, is this model trained with EMA?"
)
new_state['model'] = params_dict
return new_state | Loads exponential moving averaged (EMA) checkpoint from input and returns a model with ema weights. Args: fpath: A string path of checkpoint to load from. Returns: A dict of string keys mapping to various values. The 'model' key from the returned dict should correspond to an OrderedDict mapping string parameter names to torch Tensors. |
165,236 | import random
import torch
import torchvision.transforms as T
import torchvision.transforms.functional as F
import numpy as np
from PIL import Image
def crop(image, target, region, delete=True):
cropped_image = F.crop(image, *region)
target = target.copy()
i, j, h, w = region
# should we do something wrt the original size?
target["size"] = torch.tensor([h, w])
fields = ["labels", "area"]
if "boxes" in target:
boxes = target["boxes"]
max_size = torch.as_tensor([w, h], dtype=torch.float32)
cropped_boxes = boxes - torch.as_tensor([j, i, j, i])
cropped_boxes = torch.min(cropped_boxes.reshape(-1, 2, 2), max_size)
cropped_boxes = cropped_boxes.clamp(min=0)
area = (cropped_boxes[:, 1, :] - cropped_boxes[:, 0, :]).prod(dim=1)
target["boxes"] = cropped_boxes.reshape(-1, 4)
target["area"] = area
fields.append("boxes")
if "polygons" in target:
polygons = target["polygons"]
num_polygons = polygons.shape[0]
max_size = torch.as_tensor([w, h], dtype=torch.float32)
start_coord = torch.cat([torch.tensor([j, i], dtype=torch.float32)
for _ in range(polygons.shape[1] // 2)], dim=0)
cropped_boxes = polygons - start_coord
cropped_boxes = torch.min(cropped_boxes.reshape(num_polygons, -1, 2), max_size)
cropped_boxes = cropped_boxes.clamp(min=0)
target["polygons"] = cropped_boxes.reshape(num_polygons, -1)
fields.append("polygons")
if "masks" in target:
# FIXME should we update the area here if there are no boxes?
target['masks'] = target['masks'][:, i:i + h, j:j + w]
fields.append("masks")
# remove elements for which the boxes or masks that have zero area
if delete and ("boxes" in target or "masks" in target):
# favor boxes selection when defining which elements to keep
# this is compatible with previous implementation
if "boxes" in target:
cropped_boxes = target['boxes'].reshape(-1, 2, 2)
keep = torch.all(cropped_boxes[:, 1, :] > cropped_boxes[:, 0, :], dim=1)
else:
keep = target['masks'].flatten(1).any(1)
for field in fields:
target[field] = target[field][keep.tolist()]
return cropped_image, target | null |
165,237 | import random
import torch
import torchvision.transforms as T
import torchvision.transforms.functional as F
import numpy as np
from PIL import Image
def hflip(image, target):
flipped_image = F.hflip(image)
w, h = image.size
target = target.copy()
if "boxes" in target:
boxes = target["boxes"]
boxes = boxes[:, [2, 1, 0, 3]] * torch.as_tensor([-1, 1, -1, 1]) + torch.as_tensor([w, 0, w, 0])
target["boxes"] = boxes
if "polygons" in target:
polygons = target["polygons"]
num_polygons = polygons.shape[0]
polygons = polygons.reshape(num_polygons, -1, 2) * torch.as_tensor([-1, 1]) + torch.as_tensor([w, 0])
target["polygons"] = polygons
if "masks" in target:
target['masks'] = target['masks'].flip(-1)
return flipped_image, target | null |
165,238 | import random
import torch
import torchvision.transforms as T
import torchvision.transforms.functional as F
import numpy as np
from PIL import Image
def resize(image, target, size, max_size=None):
# size can be min_size (scalar) or (w, h) tuple
def get_size_with_aspect_ratio(image_size, size, max_size=None):
w, h = image_size
if (w <= h and w == size) or (h <= w and h == size):
if max_size is not None:
max_size = int(max_size)
h = min(h, max_size)
w = min(w, max_size)
return (h, w)
if w < h:
ow = size
oh = int(size * h / w)
else:
oh = size
ow = int(size * w / h)
if max_size is not None:
max_size = int(max_size)
oh = min(oh, max_size)
ow = min(ow, max_size)
return (oh, ow)
def get_size(image_size, size, max_size=None):
if isinstance(size, (list, tuple)):
return size[::-1]
else:
return get_size_with_aspect_ratio(image_size, size, max_size)
size = get_size(image.size, size, max_size)
rescaled_image = F.resize(image, size, interpolation=Image.BICUBIC)
if target is None:
return rescaled_image
ratios = tuple(float(s) / float(s_orig) for s, s_orig in zip(rescaled_image.size, image.size))
ratio_width, ratio_height = ratios
#print(ratios)
target = target.copy()
if "boxes" in target:
boxes = target["boxes"]
scaled_boxes = boxes * torch.as_tensor([ratio_width, ratio_height, ratio_width, ratio_height])
target["boxes"] = scaled_boxes
if "polygons" in target:
polygons = target["polygons"]
scaled_ratio = torch.cat([torch.tensor([ratio_width, ratio_height])
for _ in range(polygons.shape[1] // 2)], dim=0)
scaled_polygons = polygons * scaled_ratio
target["polygons"] = scaled_polygons
if "area" in target:
area = target["area"]
scaled_area = area * (ratio_width * ratio_height)
target["area"] = scaled_area
h, w = size
target["size"] = torch.tensor([h, w])
if "masks" in target:
assert False
# target['masks'] = interpolate(
# target['masks'][:, None].float(), size, mode="nearest")[:, 0] > 0.5
return rescaled_image, target | null |
165,262 | import string
import math
import torch
from data import data_utils
def eval_vqa_gen(task, generator, models, sample, **kwargs):
hypos = task.inference_step(generator, models, sample)
results = []
for i, sample_id in enumerate(sample["id"].tolist()):
detok_hypo_str = decode_fn(hypos[i][0]["tokens"], task.tgt_dict, task.bpe, generator)
results.append({"question_id": sample_id, "answer": detok_hypo_str.strip()})
scores = [ref_dict.get(result['answer'], 0) for ref_dict, result in zip(sample['ref_dict'], results)]
return results, scores
def zero_shot_step(task, generator, models, sample, **kwargs):
generator.zero_shot = True
if task.cfg._name == 'vqa_gen':
generator.constraint_trie = None
return eval_vqa_gen(task, generator, models, sample, **kwargs)
else:
raise NotImplementedError | null |
165,263 | import string
import math
import json
from itertools import chain
import os
import torch
import torch.distributed as dist
from data import data_utils
from tasks.nlg_tasks.gigaword import fix_tokenization
import random
def eval_simmc2(task, generator, models, sample, **kwargs):
hypos = task.inference_step(generator, models, sample)
results = []
for i, sample_id in enumerate(sample["id"].tolist()):
#candidates, scores = [], []
#for j in range(len(hypos[i])):
detok_hypo_str = decode_fn(hypos[i][0]["tokens"], task.tgt_dict, task.bpe, generator)
#candidates.append(detok_hypo_str)
#scores.append(hypos[i][j]["score"].cpu().detach())
#detok_hypo_str = random.choice(candidates)
'''
cur_score = 10
print(scores)
for pred_sent, score in zip(candidates, scores):
if score < cur_score:
cur_score = score
detok_hypo_str = ' '.join(pred_sent)
print(cur_score)
'''
results.append({"turn_id": str(sample_id), "predictions": detok_hypo_str})
return results, None
def eval_caption(task, generator, models, sample, **kwargs):
transtab = str.maketrans({key: None for key in string.punctuation})
hypos = task.inference_step(generator, models, sample)
results = []
for i, sample_id in enumerate(sample["id"].tolist()):
detok_hypo_str = decode_fn(hypos[i][0]["tokens"], task.tgt_dict, task.bpe, generator)
results.append({"image_id": str(sample_id), "caption": detok_hypo_str.translate(transtab).strip()})
return results, None
def eval_vqa_gen(task, generator, models, sample, **kwargs):
if kwargs['beam_search_vqa_eval']:
hypos = task.inference_step(generator, models, sample, prefix_tokens=sample['prefix_tokens'])
results = []
for i, sample_id in enumerate(sample["id"].tolist()):
prefix_len = sample['prefix_tokens'][i].ne(1).sum().item()
detok_hypo_str = decode_fn(hypos[i][0]["tokens"][prefix_len:], task.tgt_dict, task.bpe, generator)
results.append({"question_id": int(sample_id), "answer": detok_hypo_str.strip()})
scores = [ref_dict.get(result['answer'], 0) for ref_dict, result in zip(sample['ref_dict'], results)]
return results, scores
encoder_out = models[0].encoder(
sample["net_input"]["src_tokens"],
src_lengths=sample["net_input"]["src_lengths"],
patch_images=sample["net_input"]["patch_images"],
patch_masks=sample["net_input"]["patch_masks"]
)
device = sample["net_input"]["src_tokens"].device
eos_item = torch.tensor([task.src_dict.eos()])
pad = task.src_dict.pad()
valid_result = []
for valid_answers, valid_constraint_masks in zip(task.valid_answers_list, task.valid_constraint_masks_list):
valid_size = len(valid_answers)
valid_tgt_items = [
torch.cat([torch.tensor(decoder_prompt[1:]), valid_answer, eos_item])
for decoder_prompt in sample["decoder_prompts"] for valid_answer in valid_answers
]
valid_prev_items = [
torch.cat([torch.tensor(decoder_prompt), valid_answer])
for decoder_prompt in sample["decoder_prompts"] for valid_answer in valid_answers
]
valid_constraint_mask_items = [
torch.cat(
[torch.zeros(len(decoder_prompt) - 1, valid_constraint_mask.size(1)).bool(), valid_constraint_mask],
dim=0
)
for decoder_prompt in sample["decoder_prompts"] for valid_constraint_mask in valid_constraint_masks
]
valid_tgt = data_utils.collate_tokens(valid_tgt_items, pad_idx=pad).to(device)
valid_prev_output = data_utils.collate_tokens(valid_prev_items, pad_idx=pad).to(device)
valid_constraint_masks = data_utils.collate_tokens(valid_constraint_mask_items, pad_idx=pad).to(device)
new_encoder_out = {}
new_encoder_out["encoder_out"] = [
encoder_out["encoder_out"][0].repeat_interleave(valid_size, dim=1)
]
new_encoder_out["encoder_padding_mask"] = [
encoder_out["encoder_padding_mask"][0].repeat_interleave(valid_size, dim=0)
]
new_encoder_out["position_embeddings"] = [
encoder_out["position_embeddings"][0].repeat_interleave(valid_size, dim=0)
]
decoder_out = models[0].decoder(valid_prev_output, encoder_out=new_encoder_out)
decoder_out[0].masked_fill_(~valid_constraint_masks, -math.inf)
lprobs = models[0].get_normalized_probs(decoder_out, log_probs=True)
scores = lprobs.gather(dim=-1, index=valid_tgt.unsqueeze(-1)).squeeze(-1)
scores = scores.masked_fill(valid_tgt.eq(task.tgt_dict.pad()), 0)
scores = scores.masked_fill((~valid_constraint_masks).all(2), 0)
scores = scores.sum(1)
scores = scores.view(-1, valid_size)
valid_result.append(scores)
valid_result = torch.cat(valid_result, dim=-1)
predicts = valid_result.argmax(1).tolist()
hyps = [task.index2ans[predict_index] for predict_index in predicts]
results = [{"question_id": int(id), "answer": hyp} for id, hyp in zip(sample["id"].tolist(), hyps)]
scores = [ref_dict.get(hyp, 0) for ref_dict, hyp in zip(sample['ref_dict'], hyps)]
return results, scores
def eval_refcoco(task, generator, models, sample, **kwargs):
def _calculate_ap_score(hyps, refs, thresh=0.5):
interacts = torch.cat(
[torch.where(hyps[:, :2] < refs[:, :2], refs[:, :2], hyps[:, :2]),
torch.where(hyps[:, 2:] < refs[:, 2:], hyps[:, 2:], refs[:, 2:])],
dim=1
)
area_predictions = (hyps[:, 2] - hyps[:, 0]) * (hyps[:, 3] - hyps[:, 1])
area_targets = (refs[:, 2] - refs[:, 0]) * (refs[:, 3] - refs[:, 1])
interacts_w = interacts[:, 2] - interacts[:, 0]
interacts_h = interacts[:, 3] - interacts[:, 1]
area_interacts = interacts_w * interacts_h
ious = area_interacts / (area_predictions + area_targets - area_interacts + 1e-6)
return ((ious >= thresh) & (interacts_w > 0) & (interacts_h > 0)).float()
gen_out = task.inference_step(generator, models, sample)
hyps = []
for i in range(len(gen_out)):
hyps.append(gen_out[i][0]["tokens"][:-1] - len(task.src_dict) + task.cfg.num_bins)
hyps = torch.stack(hyps, dim=0)
hyps = hyps / (task.cfg.num_bins - 1) * task.cfg.max_image_size
hyps[:, ::2] /= sample['w_resize_ratios'].unsqueeze(1)
hyps[:, 1::2] /= sample['h_resize_ratios'].unsqueeze(1)
results = [
{"uniq_id": sample_id,
"box": [hyps[i][0].item(), hyps[i][1].item(), hyps[i][2].item(), hyps[i][3].item()]}
for i, sample_id in enumerate(sample["id"].tolist())
]
scores = _calculate_ap_score(hyps, sample['region_coords'].float())
return results, scores
def eval_snli_ve(task, generator, models, sample, **kwargs):
encoder_out = models[0].encoder(
sample["net_input"]["src_tokens"],
src_lengths=sample["net_input"]["src_lengths"],
patch_images=sample["net_input"]["patch_images"],
patch_masks=sample["net_input"]["patch_masks"]
)
device = sample["net_input"]["src_tokens"].device
eos_item = torch.tensor([task.src_dict.eos()])
pad = task.src_dict.pad()
valid_result = []
for valid_answers, valid_constraint_masks in zip(task.valid_answers_list, task.valid_constraint_masks_list):
valid_size = len(valid_answers)
valid_tgt_items = [
torch.cat([torch.tensor(decoder_prompt[1:]), valid_answer, eos_item])
for decoder_prompt in sample["decoder_prompts"] for valid_answer in valid_answers
]
valid_prev_items = [
torch.cat([torch.tensor(decoder_prompt), valid_answer])
for decoder_prompt in sample["decoder_prompts"] for valid_answer in valid_answers
]
valid_constraint_mask_items = [
torch.cat(
[torch.zeros(len(decoder_prompt) - 1, valid_constraint_mask.size(1)).bool(), valid_constraint_mask],
dim=0
)
for decoder_prompt in sample["decoder_prompts"] for valid_constraint_mask in valid_constraint_masks
]
valid_tgt = data_utils.collate_tokens(valid_tgt_items, pad_idx=pad).to(device)
valid_prev_output = data_utils.collate_tokens(valid_prev_items, pad_idx=pad).to(device)
valid_constraint_masks = data_utils.collate_tokens(valid_constraint_mask_items, pad_idx=pad).to(device)
new_encoder_out = {}
new_encoder_out["encoder_out"] = [
encoder_out["encoder_out"][0].repeat_interleave(valid_size, dim=1)
]
new_encoder_out["encoder_padding_mask"] = [
encoder_out["encoder_padding_mask"][0].repeat_interleave(valid_size, dim=0)
]
new_encoder_out["position_embeddings"] = [
encoder_out["position_embeddings"][0].repeat_interleave(valid_size, dim=0)
]
decoder_out = models[0].decoder(valid_prev_output, encoder_out=new_encoder_out)
decoder_out[0].masked_fill_(~valid_constraint_masks, -math.inf)
lprobs = models[0].get_normalized_probs(decoder_out, log_probs=True)
scores = lprobs.gather(dim=-1, index=valid_tgt.unsqueeze(-1)).squeeze(-1)
scores = scores.masked_fill(valid_tgt.eq(task.tgt_dict.pad()), 0)
scores = scores.masked_fill((~valid_constraint_masks).all(2), 0)
scores = scores.sum(1)
scores = scores.view(-1, valid_size)
valid_result.append(scores)
valid_result = torch.cat(valid_result, dim=-1)
predicts = valid_result.argmax(1).tolist()
hyps = [task.index2ans[predict_index] for predict_index in predicts]
results = [{"uniq_id": id, "answer": hyp} for id, hyp in zip(sample["id"].tolist(), hyps)]
scores = [ref_dict.get(hyp, 0) for ref_dict, hyp in zip(sample['ref_dict'], hyps)]
return results, scores
def eval_image_gen(task, generator, models, sample, **kwargs):
hypos, _ = task.inference_image(generator, sample, models)
tokens = sample['net_input']['src_tokens'][0].view(-1).tolist()
caption = task.bpe.decode(task.tgt_dict.string([token for token in tokens if token >= 4]))[
38:].replace('/', '')
text_similarity_score, indices = task.compute_text_similarity(hypos, caption,
sample['net_input']['src_tokens'].device)
results = []
for i, indice in enumerate(indices):
results.append({"sample_id": str(sample["id"][0]), "score": text_similarity_score[i], "image": hypos[indice]})
scores = [max(text_similarity_score).item()]
sorted_hyps = [hypos[indice] for indice in indices]
# dump results
if task.cfg.gen_images_path:
caption_tokens = sample['net_input']['src_tokens'][0].view(-1).tolist()
caption = task.bpe.decode(task.tgt_dict.string([token for token in caption_tokens if token >= 4]))[
38:].replace('/', '')
task.dump_images(sorted_hyps, text=caption, path=os.path.join(task.cfg.gen_images_path, 'all_results'))
task.dump_images(sorted_hyps, text=caption, path=os.path.join(task.cfg.gen_images_path, 'top1'), topk=1)
return results, scores
def eval_glue(task, generator, models, sample, **kwargs):
net_output = models[0](**sample["net_input"])
net_output[0].masked_fill_(~sample["constraint_masks"], -math.inf)
last_token_ids = sample["net_input"]["prev_output_tokens"].ne(task.src_dict.pad()).sum(1, keepdim=True) - 1
logits = net_output[0].gather(1, last_token_ids.unsqueeze(2).expand(-1, -1, net_output[0].size(2)))
logits = logits.squeeze(1)
predicts = logits.argmax(1).tolist()
hyps = [task.bpe.decode(task.src_dict[predict]).strip() for predict in predicts]
results = [{"hyp": hyp, "ref": ref_dict.keys()[0]} for hyp, ref_dict in zip(hyps, sample['ref_dict'])]
return results, None
def eval_gigaword(task, generator, models, sample, **kwargs):
gen_out = task.inference_step(generator, models, sample)
hyps, refs = [], []
results = []
for i in range(len(gen_out)):
hyp = decode_fn(gen_out[i][0]["tokens"], task.tgt_dict, task.bpe, generator).lower().strip()
hyp = fix_tokenization(hyp).replace('1', '#')
ref = sample['target_strs'][i]
hyps.append(hyp)
refs.append(ref)
results.append({"hyp": hyp, "ref": ref})
return results, None
def eval_image_classify(task, generator, models, sample, **kwargs):
batch_size = sample["net_input"]["src_tokens"].size(0)
encoder_out = models[0].encoder(
sample["net_input"]["src_tokens"],
src_lengths=sample["net_input"]["src_lengths"],
patch_images=sample["net_input"]["patch_images"],
patch_masks=sample["net_input"]["patch_masks"]
)
device = sample["net_input"]["src_tokens"].device
valid_result = []
for valid_tgt, valid_prev_output, valid_constraint_masks in zip(task.valid_tgt_list,
task.valid_prev_output_list,
task.valid_constraint_masks_list):
valid_tgt_size = valid_tgt.size(0)
valid_tgt = valid_tgt.repeat(batch_size, 1).to(device)
valid_prev_output = valid_prev_output.repeat(batch_size, 1).to(device)
valid_constraint_masks = valid_constraint_masks.repeat(batch_size, 1, 1).to(device)
new_encoder_out = {}
new_encoder_out["encoder_out"] = [
encoder_out["encoder_out"][0].repeat_interleave(valid_tgt_size, dim=1)
]
new_encoder_out["encoder_padding_mask"] = [
encoder_out["encoder_padding_mask"][0].repeat_interleave(valid_tgt_size, dim=0)
]
new_encoder_out["position_embeddings"] = [
encoder_out["position_embeddings"][0].repeat_interleave(valid_tgt_size, dim=0)
]
decoder_out = models[0].decoder(valid_prev_output, encoder_out=new_encoder_out)
decoder_out[0].masked_fill_(~valid_constraint_masks, -math.inf)
lprobs = models[0].get_normalized_probs(decoder_out, log_probs=True)
scores = lprobs.gather(dim=-1, index=valid_tgt.unsqueeze(-1)).squeeze(-1)
scores = scores.masked_fill(valid_tgt.eq(task.tgt_dict.pad()), 0)
scores = scores.sum(1)
scores = scores.view(-1, valid_tgt_size)
valid_result.append(scores)
valid_result = torch.cat(valid_result, dim=-1)
predicts = valid_result.argmax(1).tolist()
hyps = [task.index2ans[predict_index] for predict_index in predicts]
scores = [ref_dict.get(hyp, 0) for ref_dict, hyp in zip(sample['ref_dict'], hyps)]
results = [{"uniq_id": id, "answer": hyp} for id, hyp in zip(sample["id"].tolist(), hyps)]
return results, scores
def eval_step(task, generator, models, sample, **kwargs):
if task.cfg._name == 'caption':
return eval_caption(task, generator, models, sample, **kwargs)
elif task.cfg._name == 'vqa_gen':
return eval_vqa_gen(task, generator, models, sample, **kwargs)
elif task.cfg._name == 'refcoco':
return eval_refcoco(task, generator, models, sample, **kwargs)
elif task.cfg._name == 'snli_ve':
return eval_snli_ve(task, generator, models, sample, **kwargs)
elif task.cfg._name == 'image_gen':
return eval_image_gen(task, generator, models, sample, **kwargs)
elif task.cfg._name in {'cola', 'mnli', 'mrpc', 'qnli', 'qqp', 'rte', 'sst2'}:
return eval_glue(task, generator, models, sample, **kwargs)
elif task.cfg._name == 'gigaword':
return eval_gigaword(task, generator, models, sample, **kwargs)
elif task.cfg._name == 'image_classify':
return eval_image_classify(task, generator, models, sample, **kwargs)
elif task.cfg._name == 'simmc2':
return eval_simmc2(task, generator, models, sample, **kwargs)
else:
raise NotImplementedError | null |
165,264 | import string
import math
import json
from itertools import chain
import os
import torch
import torch.distributed as dist
from data import data_utils
from tasks.nlg_tasks.gigaword import fix_tokenization
import random
def merge_results(task, cfg, logger, score_cnt, score_sum, results):
if task.cfg._name == 'image_gen':
if cfg.distributed_training.distributed_world_size > 1:
dist.all_reduce(score_sum.data)
dist.all_reduce(score_cnt.data)
if score_cnt.item() > 0:
logger.info("score_sum: {}, score_cnt: {}, score: {}".format(
score_sum, score_cnt, round(score_sum.item() / score_cnt.item(), 4)
))
else:
gather_results = None
if cfg.distributed_training.distributed_world_size > 1:
gather_results = [None for _ in range(dist.get_world_size())]
dist.all_gather_object(gather_results, results)
dist.all_reduce(score_sum.data)
dist.all_reduce(score_cnt.data)
if score_cnt.item() > 0:
logger.info("score_sum: {}, score_cnt: {}, score: {}".format(
score_sum, score_cnt, round(score_sum.item() / score_cnt.item(), 4)
))
if cfg.distributed_training.distributed_world_size == 1 or dist.get_rank() == 0:
os.makedirs(cfg.common_eval.results_path, exist_ok=True)
output_path = os.path.join(cfg.common_eval.results_path, "{}_predict.json".format(cfg.dataset.gen_subset))
gather_results = list(chain(*gather_results)) if gather_results is not None else results
with open(output_path, 'w') as fw:
json.dump(gather_results, fw) | null |
165,265 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import six
from six.moves import cPickle
from collections import defaultdict
import numpy as np
import math
import os
def precook(s, n=4, out=False):
"""
Takes a string as input and returns an object that can be given to
either cook_refs or cook_test. This is optional: cook_refs and cook_test
can take string arguments as well.
:param s: string : sentence to be converted into ngrams
:param n: int : number of ngrams for which representation is calculated
:return: term frequency vector for occuring ngrams
"""
words = s.split()
counts = defaultdict(int)
for k in range(1,n+1):
for i in range(len(words)-k+1):
ngram = tuple(words[i:i+k])
counts[ngram] += 1
return counts
The provided code snippet includes necessary dependencies for implementing the `cook_refs` function. Write a Python function `def cook_refs(refs, n=4)` to solve the following problem:
Takes a list of reference sentences for a single segment and returns an object that encapsulates everything that BLEU needs to know about them. :param refs: list of string : reference sentences for some image :param n: int : number of ngrams for which (ngram) representation is calculated :return: result (list of dict)
Here is the function:
def cook_refs(refs, n=4): ## lhuang: oracle will call with "average"
'''Takes a list of reference sentences for a single segment
and returns an object that encapsulates everything that BLEU
needs to know about them.
:param refs: list of string : reference sentences for some image
:param n: int : number of ngrams for which (ngram) representation is calculated
:return: result (list of dict)
'''
return [precook(ref, n) for ref in refs] | Takes a list of reference sentences for a single segment and returns an object that encapsulates everything that BLEU needs to know about them. :param refs: list of string : reference sentences for some image :param n: int : number of ngrams for which (ngram) representation is calculated :return: result (list of dict) |
165,266 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import six
from six.moves import cPickle
from collections import defaultdict
import numpy as np
import math
import os
def precook(s, n=4, out=False):
"""
Takes a string as input and returns an object that can be given to
either cook_refs or cook_test. This is optional: cook_refs and cook_test
can take string arguments as well.
:param s: string : sentence to be converted into ngrams
:param n: int : number of ngrams for which representation is calculated
:return: term frequency vector for occuring ngrams
"""
words = s.split()
counts = defaultdict(int)
for k in range(1,n+1):
for i in range(len(words)-k+1):
ngram = tuple(words[i:i+k])
counts[ngram] += 1
return counts
The provided code snippet includes necessary dependencies for implementing the `cook_test` function. Write a Python function `def cook_test(test, n=4)` to solve the following problem:
Takes a test sentence and returns an object that encapsulates everything that BLEU needs to know about it. :param test: list of string : hypothesis sentence for some image :param n: int : number of ngrams for which (ngram) representation is calculated :return: result (dict)
Here is the function:
def cook_test(test, n=4):
'''Takes a test sentence and returns an object that
encapsulates everything that BLEU needs to know about it.
:param test: list of string : hypothesis sentence for some image
:param n: int : number of ngrams for which (ngram) representation is calculated
:return: result (dict)
'''
return precook(test, n, True) | Takes a test sentence and returns an object that encapsulates everything that BLEU needs to know about it. :param test: list of string : hypothesis sentence for some image :param n: int : number of ngrams for which (ngram) representation is calculated :return: result (dict) |
165,267 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
from collections import defaultdict
import numpy as np
import pdb
import math
import six
from six.moves import cPickle
import os
def precook(s, n=4, out=False):
"""
Takes a string as input and returns an object that can be given to
either cook_refs or cook_test. This is optional: cook_refs and cook_test
can take string arguments as well.
:param s: string : sentence to be converted into ngrams
:param n: int : number of ngrams for which representation is calculated
:return: term frequency vector for occuring ngrams
"""
words = s.split()
counts = defaultdict(int)
for k in range(1,n+1):
for i in range(len(words)-k+1):
ngram = tuple(words[i:i+k])
counts[ngram] += 1
return counts
The provided code snippet includes necessary dependencies for implementing the `cook_refs` function. Write a Python function `def cook_refs(refs, n=4)` to solve the following problem:
Takes a list of reference sentences for a single segment and returns an object that encapsulates everything that BLEU needs to know about them. :param refs: list of string : reference sentences for some image :param n: int : number of ngrams for which (ngram) representation is calculated :return: result (list of dict)
Here is the function:
def cook_refs(refs, n=4): ## lhuang: oracle will call with "average"
'''Takes a list of reference sentences for a single segment
and returns an object that encapsulates everything that BLEU
needs to know about them.
:param refs: list of string : reference sentences for some image
:param n: int : number of ngrams for which (ngram) representation is calculated
:return: result (list of dict)
'''
return [precook(ref, n) for ref in refs] | Takes a list of reference sentences for a single segment and returns an object that encapsulates everything that BLEU needs to know about them. :param refs: list of string : reference sentences for some image :param n: int : number of ngrams for which (ngram) representation is calculated :return: result (list of dict) |
165,268 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
from collections import defaultdict
import numpy as np
import pdb
import math
import six
from six.moves import cPickle
import os
def precook(s, n=4, out=False):
"""
Takes a string as input and returns an object that can be given to
either cook_refs or cook_test. This is optional: cook_refs and cook_test
can take string arguments as well.
:param s: string : sentence to be converted into ngrams
:param n: int : number of ngrams for which representation is calculated
:return: term frequency vector for occuring ngrams
"""
words = s.split()
counts = defaultdict(int)
for k in range(1,n+1):
for i in range(len(words)-k+1):
ngram = tuple(words[i:i+k])
counts[ngram] += 1
return counts
The provided code snippet includes necessary dependencies for implementing the `cook_test` function. Write a Python function `def cook_test(test, n=4)` to solve the following problem:
Takes a test sentence and returns an object that encapsulates everything that BLEU needs to know about it. :param test: list of string : hypothesis sentence for some image :param n: int : number of ngrams for which (ngram) representation is calculated :return: result (dict)
Here is the function:
def cook_test(test, n=4):
'''Takes a test sentence and returns an object that
encapsulates everything that BLEU needs to know about it.
:param test: list of string : hypothesis sentence for some image
:param n: int : number of ngrams for which (ngram) representation is calculated
:return: result (dict)
'''
return precook(test, n, True) | Takes a test sentence and returns an object that encapsulates everything that BLEU needs to know about it. :param test: list of string : hypothesis sentence for some image :param n: int : number of ngrams for which (ngram) representation is calculated :return: result (dict) |
165,269 | import os
from os.path import join, exists
import json
from tqdm import tqdm
from rich import print
import numpy as np
import imagesize
OBJ_BEGIN_TOKEN = '<SOO>'
OBJ_END_TOKEN = '<EOO>'
NOCOREF_TOKEN = '<NOCOREF>'
DISAMBIGUATION_TOKEN = '<DISAM>'
FASHION_DST = "<INTENT><FAS_TYPE><FAS_PRICE><FAS_CUSTOMER_REVIEW><FAS_BRAND><FAS_SIZE><FAS_PATTERN><FAS_COLOR><FAS_SLEEVE_LENGTH><FAS_AVAILABLE_SIZE>"
FURNITURE_DST = "<INTENT><FUR_TYPE><FUR_MATERIALS><FUR_PRICE><FUR_BRAND><FUR_CUSTOMER_RATING><FUR_COLOR>"
def arrange_object_special_tokens(scene_dir, image_dir, scene_ids, object_item2id, insert_bbox_coords):
'''
scene_dir: 存储scene json文件的文件夹
image_dir: 存储image文件的文件夹
scene_ids:dialog所对应的对话的id
object_item2id:item2id文件针对于prefab用的
insert_bbox_coords:是否插入3d场景下的数据信息
'''
arrange_list = []
arrange_bbox_list = []
scene_loaded_list = []
obj_dict_possibly_duplicated = dict()
for scene_id_idx, scene_id in enumerate(scene_ids):
with open(os.path.join(scene_dir, f"{scene_id}_scene.json"), 'r') as f_in:
scene = json.load(f_in)
scene_loaded_list.append(scene)
for obj in scene['scenes'][0]['objects']:
obj_dict_possibly_duplicated[obj['index']] = scene_id_idx
num_scene = len(scene_ids)
for scene_id_idx, scene_id in enumerate(scene_ids):
scene = scene_loaded_list[scene_id_idx]
bbox_id = scene_id[2:] if scene_id.startswith('m_') else scene_id # 如果是m_开头的要去除
with open(os.path.join(scene_dir, f"{bbox_id}_bbox.json"), 'r') as f_in:
bbox = json.load(f_in)
camera_position = []; camera_dir_vec = []
for bbox_item in bbox['Items']:
if bbox_item['name'] == 'camera':
camera_position = np.array(bbox_item['position'])
if bbox_item['name'] == 'camera_forward':
camera_dir_vec = np.array(bbox_item['position'])
if insert_bbox_coords:
largest_z_value = 0
for obj in scene['scenes'][0]['objects']:
position = np.array(obj['position']) # 利用了position的位置信息进行处理
obj_displacement = position - camera_position
theta = np.dot(obj_displacement, camera_dir_vec) / (np.linalg.norm(obj_displacement)*np.linalg.norm(camera_dir_vec))
largest_z_value = max(np.linalg.norm(obj_displacement) * np.cos(theta), largest_z_value)
# 把当前场景下的所有的Object都放进来了
for obj in scene['scenes'][0]['objects']:
assert obj['index'] in obj_dict_possibly_duplicated, "SOMETHING IS MISSING!"
if scene_id_idx == obj_dict_possibly_duplicated[obj['index']]:
if insert_bbox_coords:
position = np.array(obj['position'])
obj_displacement = position - camera_position
theta = np.dot(obj_displacement, camera_dir_vec) / (np.linalg.norm(obj_displacement)*np.linalg.norm(camera_dir_vec))
z_value = np.linalg.norm(obj_displacement) * np.cos(theta)
# image name
image_id = None
if "m" in scene_id[0]: image_id = scene_id[2:]
else: image_id = scene_id
image_file_name = os.path.join(image_dir, image_id+".png")
if os.path.exists(image_file_name):
img_w, img_h = imagesize.get(image_file_name)
x1, y1, h, w = obj['bbox']
x2, y2 = x1 + w, y1 + h
pos_str = '[({:.4f},{:.4f},{:.4f},{:.4f},{:.4f},{:.4f})]'.format(x1/img_w -0.5, y1/img_h -0.5, x2/img_w -0.5, y2/img_h -0.5, (x2-x1)*(y2-y1)/(img_w*img_h), z_value/largest_z_value)
arrange_bbox_list.append([x1/img_w -0.5, y1/img_h -0.5, x2/img_w -0.5, y2/img_h -0.5, (x2-x1)*(y2-y1)/(img_w*img_h), z_value/largest_z_value])
else:
print(f'{scene_id} is not present in img_size!!!')
pos_str = '[({:.4f},{:.4f},{:.4f},{:.4f},{:.4f},{:.4f})]'.format(0.0, 0.0, 0.0, 0.0, 0.0, z_value/largest_z_value)
arrange_bbox_list.append([0.0, 0.0, 0.0, 0.0, 0.0, z_value/largest_z_value])
else:
pos_str = ''
if (num_scene != 1) and (scene_id_idx == 0):
arrange_list.append(OBJ_PREVI + "<" + str(obj['index']) + ">" + object_item2id[obj['prefab_path']])
else:
arrange_list.append(OBJ_START + "<" + str(obj['index']) + ">" + object_item2id[obj['prefab_path']])
return ''.join(arrange_list), arrange_bbox_list
def process_metadata_dict(scene_dir, scene_ids, all_metadata):
''' 根据scene ids 生成对应的 object dict'''
output = {}
for scene_id in scene_ids:
with open(os.path.join(scene_dir, f"{scene_id}_scene.json"), 'r') as f_in:
scene = json.load(f_in)
for obj in scene['scenes'][0]['objects']:
obj_index, obj_prefab_path = obj['index'], obj['prefab_path']
output[obj_index] = all_metadata[obj_prefab_path]
return output
The provided code snippet includes necessary dependencies for implementing the `process_for_vlbert_task3` function. Write a Python function `def process_for_vlbert_task3()` to solve the following problem:
为VLBert模型的训练准备
Here is the function:
def process_for_vlbert_task3():
''' 为VLBert模型的训练准备'''
scene_dir = '../../data_dstc11/jsons'
image_dir = '../../data_dstc11/images'
obj_item2id_path = '../data/item2id.json'
fashion_metadata_path = '../../data_dstc11/fashion_prefab_metadata_all.json'
furniture_metadata_path = '../../data_dstc11/furniture_prefab_metadata_all.json'
all_metadata = {}
with open(fashion_metadata_path) as f_in:
all_metadata.update(json.load(f_in))
with open(furniture_metadata_path) as f_in:
all_metadata.update(json.load(f_in))
with open(obj_item2id_path) as f_in:
object_item2id = json.load(f_in)
output = []
# split_list = ['teststd_public'] # For Final Evaluation
split_list = ['dev'] # For Evaluation
# split_list = ['train'] # For Training
for split in split_list:
file_path = f'../../data_dstc11/simmc2.1_dials_dstc11_{split}.json'
with open(file_path) as f_in:
data = json.load(f_in)['dialogue_data']
for dialogue in tqdm(data, desc=f'{split} '):
dialogue_idx = dialogue['dialogue_idx']
scene_ids = list(sorted(dialogue['scene_ids'].items(), key=lambda item: int(item[0])))
is_fashion = True if dialogue['domain'] == 'fashion' else False
obj_metadata_dict = process_metadata_dict(scene_dir, list(dialogue['scene_ids'].values()), all_metadata)
lst_context = []
sys_lst_context = []
prev_turn = None
prev_sys_object_ids = []
for turn in dialogue['dialogue']:
turn_idx = turn['turn_idx']
system_transcript = turn['system_transcript']
transcript = turn['transcript']
user_object = turn['transcript_annotated']['act_attributes']['objects']
sys_object = turn['system_transcript_annotated']['act_attributes']['objects']
disambiguation_label = turn['transcript_annotated']['disambiguation_label']
disambiguation_candidates = turn['transcript_annotated']['disambiguation_candidates']
slot_values = turn['transcript_annotated']['act_attributes']['slot_values']
intent = turn['transcript_annotated']['act']
turn_scene_ids = [item[1] for item in scene_ids if int(item[0]) <= turn_idx]
object_str, bbox_data = arrange_object_special_tokens(scene_dir, image_dir, turn_scene_ids, object_item2id, True)
if prev_turn is None:
lst_context.append(f'User : {transcript}')
sys_lst_context.append(f'User : {transcript} System : {system_transcript}')
else:
prev_system_transcript = prev_turn['system_transcript']
lst_context.append(f'System : {prev_system_transcript} User : {transcript}')
sys_lst_context.append(f'User : {transcript} System : {system_transcript}')
if is_fashion:
input_str = DISAMBIGUATION_TOKEN + ' ' + ' '.join(lst_context[-2:]) + FASHION_DST + OBJ_BEGIN_TOKEN + NOCOREF_TOKEN + object_str + OBJ_END_TOKEN
else:
input_str = DISAMBIGUATION_TOKEN + ' ' + ' '.join(lst_context[-2:]) + FURNITURE_DST + OBJ_BEGIN_TOKEN + NOCOREF_TOKEN + object_str + OBJ_END_TOKEN
output.append({
'input': input_str,
'disambiguation_label': disambiguation_label,
'is_fashion': is_fashion,
'bbox': bbox_data,
'intent': intent,
'slot_values': slot_values,
'reference_objects': user_object,
'disambiguation_objects': disambiguation_candidates,
'dialogue_idx': dialogue_idx,
'turn_idx': turn_idx,
'role': 'User'
})
prev_turn = turn
print(len(output))
# with open('../data_dstc11/task2/simmc2.1_dials_dstc11_task3_predict.json', 'w') as f_out:
# json.dump(output, f_out, indent=4, ensure_ascii=False)
with open('../data_dstc11/task2/simmc2.1_dials_dstc11_task3_eval.json', 'w') as f_out:
json.dump(output, f_out, indent=4, ensure_ascii=False)
# with open('../data_dstc11/task2/simmc2.1_dials_dstc11_task3_eval_teststd.json', 'w') as f_out:
# json.dump(output, f_out, indent=4, ensure_ascii=False) | 为VLBert模型的训练准备 |
165,271 | from genericpath import exists
import os
from os.path import join
import json
import argparse
import torch
from torch.optim import AdamW
from torch.nn.utils.rnn import pad_sequence
from torch.utils.data import SequentialSampler, DistributedSampler
from tqdm import tqdm, trange
from rich import print
from datetime import datetime
import torch.multiprocessing
from sklearn.metrics import precision_recall_fscore_support
from torch.utils.data import DataLoader
from transformers import (
LongformerTokenizerFast,
get_linear_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_constant_schedule_with_warmup
)
from torch import distributed as dist
from utils.metadata import load_metadata,available_sizes2st
from utils.set_config import set_device, set_seed
from model.backbone import VLBertModelWithDST
from utils.dataset import get_dst_dataset, DataLoaderX
\
def get_dst_dataset(args, tokenizer, all_objects_meta, eval=False, fashion_slot_map=None, furniture_slot_map=None):
if not eval:
dataset = LineByLineDSTDataset(args.train_input_file, tokenizer, all_objects_meta, eval=eval, fashion_slot_map=fashion_slot_map, furniture_slot_map=furniture_slot_map)
else:
dataset = LineByLineDSTDataset(args.eval_input_file, tokenizer, all_objects_meta, eval=eval, fashion_slot_map=fashion_slot_map, furniture_slot_map=furniture_slot_map)
return dataset
The provided code snippet includes necessary dependencies for implementing the `evaluate` function. Write a Python function `def evaluate(args, model, tokenizer, all_objects_meta, fashion_slot_map, furniture_slot_map)` to solve the following problem:
模型方法的评估函数
Here is the function:
def evaluate(args, model, tokenizer, all_objects_meta, fashion_slot_map, furniture_slot_map):
''' 模型方法的评估函数'''
def collate_eval_bart(examples):
enc_input = list(map(lambda x: x[0], examples))
enc_attention_mask = list(map(lambda x: x[1], examples))
boxes = list(map(lambda x: x[2], examples))
misc = list(map(lambda x: x[3], examples))
disambiguation_labels = list(map(lambda x: x[5], examples))
intent = list(map(lambda x: x[6], examples))
slot_values = list(map(lambda x: x[7], examples))
if tokenizer._pad_token is None:
enc_input_pad = pad_sequence(enc_input, batch_first=True)
else:
enc_input_pad = pad_sequence(enc_input, batch_first=True, padding_value=tokenizer.pad_token_id)
enc_attention_pad = pad_sequence(enc_attention_mask, batch_first=True, padding_value=0)
return enc_input_pad, enc_attention_pad, boxes, misc, torch.vstack(disambiguation_labels).squeeze(), intent, slot_values
def rec_prec_f1(n_correct, n_true, n_pred):
rec = n_correct / n_true if n_true != 0 else 0
prec = n_correct / n_pred if n_pred != 0 else 0
f1 = 2 * prec * rec / (prec + rec) if (prec + rec) != 0 else 0
return rec, prec, f1
eval_dataset = get_dst_dataset(args, tokenizer, all_objects_meta, eval=True, fashion_slot_map=fashion_slot_map, furniture_slot_map=furniture_slot_map)
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, num_workers=args.num_workers, batch_size=args.eval_batch_size, collate_fn=collate_eval_bart, pin_memory=True, drop_last=False)
n_pred_objects, n_true_objects, n_correct_objects = 0, 0, 0
n_total_disambiguation, n_true_disambiguation = 0, 0
intent_target_list, intent_pred_list = [], []
slot_values_target_list, slot_values_pred_list = [], []
for batch in tqdm(eval_dataloader, desc="Evaluating", colour='blue', leave=False):
enc_input = batch[0].to(args.device)
enc_attention_mask = batch[1].to(args.device)
boxes = batch[2] # batch, num_obj_per_line, 6
misc = batch[3] # batch, num_obj_per_line, dict
disambiguation_labels = batch[4].to(args.device)
intent = batch[5]
slot_values = batch[6]
with torch.no_grad():
s_pred_objects, s_true_objects, s_correct_objects, disambiguation_true_items, disambiguation_total_items, intent_target, intent_pred, slot_values_target, slot_values_pred = model.evaluate(enc_input, enc_attention_mask, boxes, misc, disambiguation_labels, intent, slot_values)
n_pred_objects += s_pred_objects
n_true_objects += s_true_objects
n_correct_objects += s_correct_objects
n_true_disambiguation += disambiguation_true_items
n_total_disambiguation += disambiguation_total_items
intent_target_list.extend(intent_target)
intent_pred_list.extend(intent_pred)
slot_values_target_list.extend(slot_values_target)
slot_values_pred_list.extend(slot_values_pred)
with open(join(args.checkpoint_name_or_path, 'simmc2.1_task3_predicted.json'), 'w') as f_out:
json.dump({
'slot_values': slot_values_pred_list,
'intent': intent_pred_list
}, f_out, indent=4, ensure_ascii=False)
intent_pre, intent_rec, intent_f1, sup = precision_recall_fscore_support(intent_target_list, intent_pred_list)
n_correct_slot_values, n_true_slot_values, n_pred_slot_values = 0, 0, 0
for idx in range(len(slot_values_target_list)):
for key in slot_values_target_list[idx].keys():
if key == 'availableSizes':
if slot_values_target_list[idx][key] != [0, 0, 0, 0, 0, 0]:
n_true_slot_values += 1
if slot_values_pred_list[idx][key] != [0, 0, 0, 0, 0, 0]:
n_pred_slot_values += 1
if slot_values_target_list[idx][key] == slot_values_pred_list[idx][key] and slot_values_pred_list[idx][key] != [0, 0, 0, 0, 0, 0]:
n_correct_slot_values += 1
else:
if slot_values_target_list[idx][key] != 0:
n_true_slot_values += 1
if slot_values_pred_list[idx][key] != 0:
n_pred_slot_values += 1
if slot_values_target_list[idx][key] == slot_values_pred_list[idx][key] and slot_values_pred_list[idx][key] != 0:
n_correct_slot_values += 1
coref_rec, coref_prec, coref_f1 = rec_prec_f1(n_correct_objects, n_true_objects, n_pred_objects)
slot_values_rec, slot_values_prec, slot_values_f1 = rec_prec_f1(n_correct_slot_values, n_true_slot_values, n_pred_slot_values)
return {
'slot_value_precision': slot_values_prec,
'slot_value_recall': slot_values_rec,
'slot_value_f1-score': slot_values_f1,
'intent_precision': intent_pre.mean(),
'intent_recall': intent_rec.mean(),
'intent_f1-score': intent_f1.mean(),
} | 模型方法的评估函数 |
165,277 | import json
import torch
from transformers.tokenization_utils import PreTrainedTokenizer
from torch.utils.data import DataLoader, Dataset, RandomSampler, SequentialSampler, DistributedSampler
from prefetch_generator import BackgroundGenerator
import copy
from .metadata import FASHION_COLOR, FASHION_PATTERN, FASHION_SLEEVE_LENGTH, FURNITURE_BRAND, FURNITURE_COLOR, FURNITURE_CUSTOMER_RATING, FURNITURE_MATERIALS, FURNITURE_PRICE, fashion_meta_attrs, furniture_meta_attrs, available_sizes2st
class LineByLineTask2Dataset(Dataset):
def __init__(self, input_file, tokenizer: PreTrainedTokenizer, all_objects_meta, eval=False):
''' 训练的输入数据集'''
with open(input_file) as f_in:
self.data = json.load(f_in)
# Other tasks
lines = []
self.boxes = [] # 存储了原来的Object Bbox Position信息
self.generation = []
self.nocoref = []
self.disambiguation_objects = []
self.disambiguation_labels = []
self.misc = []
self.is_fashion = []
corefs = []
vocab2id = tokenizer.get_vocab()
id2vocab = {v: k for k, v in vocab2id.items()}
EOM_id = vocab2id[END_OF_MULTIMODAL_CONTEXTS]
for dialog in self.data:
self.disambiguation_labels.append(dialog['disambiguation_label'])
self.is_fashion.append(dialog['is_fashion'])
self.boxes.append(dialog['bbox'])
lines.append(dialog['input'])
corefs.append([f'<{index}>' for index in dialog['reference_objects']]) # 解决任务2
# if not eval:
# coref_object = []
# coref_object.extend(dialog['reference_objects'])
# coref_object.extend(dialog['disambiguation_objects'])
# corefs.append([f'<{index}>' for index in coref_object])
# else:
# # corefs.append([f'<{index}>' for index in dialog['reference_objects']])
# corefs.append([f'<{index}>' for index in dialog['disambiguation_objects']]) # 解决任务1
encode_text = tokenizer(lines, add_special_tokens=True)
self.examples = encode_text.input_ids
self.examples_attention_mask = encode_text.attention_mask
nocoref_id = get_input_id(tokenizer, NO_COREF)[0] # 获取NOCOREF_ID的id形式
id2index, id2fashion_st, id2furniture_st = id_converter(tokenizer)
for idx, tokenized_line in enumerate(self.examples):
tl = tokenized_line
EOM_indices = [i for i, tokenized_id in enumerate(tl) if tokenized_id == EOM_id]
if EOM_indices: # 判断其是否为空
EOM_last_idx = EOM_indices[-1]
else:
EOM_last_idx = -1
self.nocoref.append((tl.index(nocoref_id), 1 if not corefs[idx] else 0)) # 判断是否存在Object指代
line_labels = []
if self.is_fashion[idx]:
for i, token_id in enumerate(tl):
if token_id in id2index and i > EOM_last_idx: # this token is for item index 因为scene token都是在Multimodal Token id的后面
temp = dict()
pos = i
item_index = id2index[token_id]
fashion_st = id2fashion_st[tl[i+1]]
temp['is_fashion'] = True
temp['pos'] = pos
temp['coref_label'] = 1 if item_index in corefs[idx] else 0
temp['misc_labels'] = dict()
for attr_name, attr_value in all_objects_meta[fashion_st].items():
if attr_name != 'available_sizes':
temp['misc_labels'][attr_name] = fashion_meta_attrs[attr_name].index(attr_value)
else:
temp['misc_labels'][attr_name] = [1 if x in attr_value else 0 for x in fashion_meta_attrs[attr_name]] # 因为avaliable size的gt可能不止一个所以使用的损失函数不太一样可能有两个
line_labels.append(temp)
else:
for i, token_id in enumerate(tl):
if token_id in id2index and i > EOM_last_idx: # this token is for item index
temp = dict()
pos = i
item_index = id2index[token_id]
furniture_st = id2furniture_st[tl[i+1]]
temp['is_fashion'] = False
temp['pos'] = pos # 代表是第几个Object Info
temp['coref_label'] = 1 if item_index in corefs[idx] else 0
temp['misc_labels'] = dict()
for attr_name, attr_value in all_objects_meta[furniture_st].items():
temp['misc_labels'][attr_name] = furniture_meta_attrs[attr_name].index(attr_value)
line_labels.append(temp)
self.misc.append(line_labels)
def __len__(self):
return len(self.examples)
def __getitem__(self, i):
return torch.tensor(self.examples[i], dtype=torch.long), \
torch.tensor(self.examples_attention_mask[i], dtype=torch.long), \
self.boxes[i], \
self.misc[i], \
self.nocoref[i], \
torch.tensor(self.disambiguation_labels[i], dtype=torch.long),
def get_task2_dataset(args, tokenizer, all_objects_meta, eval=False):
if not eval:
dataset = LineByLineTask2Dataset(args.train_input_file, tokenizer, all_objects_meta, eval=eval)
else:
dataset = LineByLineTask2Dataset(args.eval_input_file, tokenizer, all_objects_meta, eval=eval)
return dataset | null |
165,280 | import os
from os.path import join, exists
import json
from tqdm import tqdm
from rich import print
import numpy as np
import imagesize
The provided code snippet includes necessary dependencies for implementing the `process_metadata_dict` function. Write a Python function `def process_metadata_dict(scene_dir, scene_ids, all_metadata)` to solve the following problem:
根据scene ids 生成对应的 object dict
Here is the function:
def process_metadata_dict(scene_dir, scene_ids, all_metadata):
''' 根据scene ids 生成对应的 object dict'''
output = {}
for scene_id in scene_ids:
with open(os.path.join(scene_dir, f"{scene_id}_scene.json"), 'r') as f_in:
scene = json.load(f_in)
for obj in scene['scenes'][0]['objects']:
obj_index, obj_prefab_path = obj['index'], obj['prefab_path']
output[obj_index] = all_metadata[obj_prefab_path]
return output | 根据scene ids 生成对应的 object dict |
165,281 | import os
from os.path import join, exists
import json
from tqdm import tqdm
from rich import print
import numpy as np
import imagesize
OBJ_BEGIN_TOKEN = '<SOO>'
OBJ_END_TOKEN = '<EOO>'
NOCOREF_TOKEN = '<NOCOREF>'
def arrange_object_special_tokens(scene_dir, image_dir, scene_ids, object_item2id, insert_bbox_coords):
'''
scene_dir: 存储scene json文件的文件夹
image_dir: 存储image文件的文件夹
scene_ids:dialog所对应的对话的id
object_item2id:item2id文件针对于prefab用的
insert_bbox_coords:是否插入3d场景下的数据信息
'''
arrange_list = []
arrange_bbox_list = []
scene_loaded_list = []
obj_dict_possibly_duplicated = dict()
for scene_id_idx, scene_id in enumerate(scene_ids):
with open(os.path.join(scene_dir, f"{scene_id}_scene.json"), 'r') as f_in:
scene = json.load(f_in)
scene_loaded_list.append(scene)
for obj in scene['scenes'][0]['objects']:
obj_dict_possibly_duplicated[obj['index']] = scene_id_idx
num_scene = len(scene_ids)
for scene_id_idx, scene_id in enumerate(scene_ids):
scene = scene_loaded_list[scene_id_idx]
bbox_id = scene_id[2:] if scene_id.startswith('m_') else scene_id # 如果是m_开头的要去除
with open(os.path.join(scene_dir, f"{bbox_id}_bbox.json"), 'r') as f_in:
bbox = json.load(f_in)
camera_position = []; camera_dir_vec = []
for bbox_item in bbox['Items']:
if bbox_item['name'] == 'camera':
camera_position = np.array(bbox_item['position'])
if bbox_item['name'] == 'camera_forward':
camera_dir_vec = np.array(bbox_item['position'])
if insert_bbox_coords:
largest_z_value = 0
for obj in scene['scenes'][0]['objects']:
position = np.array(obj['position']) # 利用了position的位置信息进行处理
obj_displacement = position - camera_position
theta = np.dot(obj_displacement, camera_dir_vec) / (np.linalg.norm(obj_displacement)*np.linalg.norm(camera_dir_vec))
largest_z_value = max(np.linalg.norm(obj_displacement) * np.cos(theta), largest_z_value)
# 把当前场景下的所有的Object都放进来了
for obj in scene['scenes'][0]['objects']:
assert obj['index'] in obj_dict_possibly_duplicated, "SOMETHING IS MISSING!"
if scene_id_idx == obj_dict_possibly_duplicated[obj['index']]:
if insert_bbox_coords:
position = np.array(obj['position'])
obj_displacement = position - camera_position
theta = np.dot(obj_displacement, camera_dir_vec) / (np.linalg.norm(obj_displacement)*np.linalg.norm(camera_dir_vec))
z_value = np.linalg.norm(obj_displacement) * np.cos(theta)
# image name
image_id = None
if "m" in scene_id[0]: image_id = scene_id[2:]
else: image_id = scene_id
image_file_name = os.path.join(image_dir, image_id+".png")
if os.path.exists(image_file_name):
img_w, img_h = imagesize.get(image_file_name)
x1, y1, h, w = obj['bbox']
x2, y2 = x1 + w, y1 + h
pos_str = '[({:.4f},{:.4f},{:.4f},{:.4f},{:.4f},{:.4f})]'.format(x1/img_w -0.5, y1/img_h -0.5, x2/img_w -0.5, y2/img_h -0.5, (x2-x1)*(y2-y1)/(img_w*img_h), z_value/largest_z_value)
arrange_bbox_list.append([x1/img_w -0.5, y1/img_h -0.5, x2/img_w -0.5, y2/img_h -0.5, (x2-x1)*(y2-y1)/(img_w*img_h), z_value/largest_z_value])
else:
print(f'{scene_id} is not present in img_size!!!')
pos_str = '[({:.4f},{:.4f},{:.4f},{:.4f},{:.4f},{:.4f})]'.format(0.0, 0.0, 0.0, 0.0, 0.0, z_value/largest_z_value)
arrange_bbox_list.append([0.0, 0.0, 0.0, 0.0, 0.0, z_value/largest_z_value])
else:
pos_str = ''
if (num_scene != 1) and (scene_id_idx == 0):
arrange_list.append(OBJ_PREVI + "<" + str(obj['index']) + ">" + object_item2id[obj['prefab_path']])
else:
arrange_list.append(OBJ_START + "<" + str(obj['index']) + ">" + object_item2id[obj['prefab_path']])
return ''.join(arrange_list), arrange_bbox_list
The provided code snippet includes necessary dependencies for implementing the `process_for_vlbert_task1` function. Write a Python function `def process_for_vlbert_task1()` to solve the following problem:
为VLBert模型的训练准备
Here is the function:
def process_for_vlbert_task1():
''' 为VLBert模型的训练准备'''
scene_dir = '../../data_dstc11/jsons'
image_dir = '../../data_dstc11/images'
obj_item2id_path = '../data/item2id.json'
fashion_metadata_path = '../../data_dstc11/fashion_prefab_metadata_all.json'
furniture_metadata_path = '../../data_dstc11/furniture_prefab_metadata_all.json'
all_metadata = {}
with open(fashion_metadata_path) as f_in:
all_metadata.update(json.load(f_in))
with open(furniture_metadata_path) as f_in:
all_metadata.update(json.load(f_in))
with open(obj_item2id_path) as f_in:
object_item2id = json.load(f_in)
output = []
# split_list = ['teststd_public'] # For Final Evaluation
split_list = ['dev'] # For Evaluation
# split_list = ['train'] # For Training
for split in split_list:
file_path = f'../../data_dstc11/simmc2.1_dials_dstc11_{split}.json'
with open(file_path) as f_in:
data = json.load(f_in)['dialogue_data']
for dialogue in tqdm(data, desc=f'{split} '):
dialogue_idx = dialogue['dialogue_idx']
scene_ids = list(sorted(dialogue['scene_ids'].items(), key=lambda item: int(item[0])))
is_fashion = True if dialogue['domain'] == 'fashion' else False
lst_context = []
sys_lst_context = []
prev_turn = None
for turn in dialogue['dialogue']:
turn_idx = turn['turn_idx']
system_transcript = turn['system_transcript']
transcript = turn['transcript']
user_object = turn['transcript_annotated']['act_attributes']['objects']
sys_object = turn['system_transcript_annotated']['act_attributes']['objects']
disambiguation_label = turn['transcript_annotated']['disambiguation_label']
disambiguation_candidates = turn['transcript_annotated']['disambiguation_candidates']
slot_values = turn['transcript_annotated']['act_attributes']['slot_values']
intent = turn['transcript_annotated']['act']
turn_scene_ids = [item[1] for item in scene_ids if int(item[0]) <= turn_idx]
object_str, bbox_data = arrange_object_special_tokens(scene_dir, image_dir, turn_scene_ids, object_item2id, True)
if prev_turn is None:
lst_context.append(f'User : {transcript}')
sys_lst_context.append(f'User : {transcript} System : {system_transcript}')
else:
prev_system_transcript = prev_turn['system_transcript']
lst_context.append(f'System : {prev_system_transcript} User : {transcript}')
sys_lst_context.append(f'User : {transcript} System : {system_transcript}')
# 部分情况下选择全量数据
if disambiguation_label == 1:
output.append({
'input': ' '.join(lst_context[-2:]) + OBJ_BEGIN_TOKEN + NOCOREF_TOKEN + object_str + OBJ_END_TOKEN,
'disambiguation_label': disambiguation_label,
'is_fashion': is_fashion,
'bbox': bbox_data,
'intent': intent,
'slot_values': slot_values,
'reference_objects': user_object,
'disambiguation_objects': disambiguation_candidates,
'dialogue_idx': dialogue_idx,
'turn_idx': turn_idx,
'role': 'User'
})
prev_turn = turn
print(len(output))
# with open('../data/simmc2.1_dials_dstc11_task1_predict.json', 'w') as f_out:
# json.dump(output, f_out, indent=4, ensure_ascii=False)
with open('../data/simmc2.1_dials_dstc11_task1_eval.json', 'w') as f_out:
json.dump(output, f_out, indent=4, ensure_ascii=False)
# with open('../data/simmc2.1_dials_dstc11_task1_eval_teststd.json', 'w') as f_out:
# json.dump(output, f_out, indent=4, ensure_ascii=False) | 为VLBert模型的训练准备 |
165,283 | from genericpath import exists
import os
from os.path import join
import json
import argparse
import torch
from torch.optim import AdamW
from torch.nn.utils.rnn import pad_sequence
from torch.utils.data import SequentialSampler, DistributedSampler
from tqdm import tqdm, trange
from rich import print
from datetime import datetime
import torch.multiprocessing
from sklearn.metrics import precision_recall_fscore_support
from torch.utils.data import DataLoader
from transformers import (
LongformerTokenizerFast,
get_linear_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_constant_schedule_with_warmup
)
from torch import distributed as dist
from utils.metadata import load_metadata,available_sizes2st
from utils.set_config import set_device, set_seed
from model.backbone import VLBertModelWithDisam
from utils.dataset import get_task1_dataset, DataLoaderX
\
def get_task1_dataset(args, tokenizer, all_objects_meta, eval=False, pretrain=False):
class DataLoaderX(DataLoader):
def __iter__(self):
\
def evaluate(args, model, tokenizer, all_objects_meta):
def collate_eval_bart(examples):
enc_input = list(map(lambda x: x[0], examples))
enc_attention_mask = list(map(lambda x: x[1], examples))
boxes = list(map(lambda x: x[2], examples))
misc = list(map(lambda x: x[3], examples))
disam_label = list(map(lambda x: x[4], examples))
dialog_ids = list(map(lambda x: x[5], examples))
turn_ids = list(map(lambda x: x[6], examples))
if tokenizer._pad_token is None:
enc_input_pad = pad_sequence(enc_input, batch_first=True)
else:
enc_input_pad = pad_sequence(enc_input, batch_first=True, padding_value=tokenizer.pad_token_id)
enc_attention_pad = pad_sequence(enc_attention_mask, batch_first=True, padding_value=0)
return enc_input_pad, enc_attention_pad, boxes, misc, disam_label, dialog_ids, turn_ids
def rec_prec_f1(n_correct, n_true, n_pred):
rec = n_correct / n_true if n_true != 0 else 0
prec = n_correct / n_pred if n_pred != 0 else 0
f1 = 2 * prec * rec / (prec + rec) if (prec + rec) != 0 else 0
return rec, prec, f1
eval_dataset = get_task1_dataset(args, tokenizer, all_objects_meta, eval=True, pretrain=False)
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoaderX(eval_dataset, sampler=eval_sampler, num_workers=args.num_workers, batch_size=args.eval_batch_size, collate_fn=collate_eval_bart, pin_memory=True)
n_pred_objects, n_true_objects, n_correct_objects = 0, 0, 0
output_data = {}
output_data_list = []
for batch in tqdm(eval_dataloader, desc="Evaluating", colour='blue', leave=False):
enc_input = batch[0].to(args.device)
enc_attention_mask = batch[1].to(args.device)
boxes = batch[2] # batch, num_obj_per_line, 6
misc = batch[3] # batch, num_obj_per_line, dict
disam_label = batch[4]
dialog_id_list = batch[5]
turn_id_list = batch[6]
with torch.no_grad():
s_pred_objects, s_true_objects, s_correct_objects, output_item_list = model.evaluate(enc_input, enc_attention_mask, boxes, misc, disam_label)
n_pred_objects += s_pred_objects
n_true_objects += s_true_objects
n_correct_objects += s_correct_objects
for idx, sub_list in enumerate(output_item_list):
dialog_id, turn_id = dialog_id_list[idx], turn_id_list[idx]
object_id_list = [int(item[1:-1]) for item in sub_list]
output_data_list.append(object_id_list)
with open(join(args.checkpoint_name_or_path, 'simmc2.1_task1_predicted.json'), 'w') as f_out:
json.dump(output_data_list, f_out, indent=4, ensure_ascii=False)
coref_rec, coref_prec, coref_f1 = rec_prec_f1(n_correct_objects, n_true_objects, n_pred_objects)
return {
'precision': coref_prec,
'recall': coref_rec,
'f1-score': coref_f1,
} | null |
165,288 | import json
import torch
from os.path import join, exists
from transformers.tokenization_utils import PreTrainedTokenizer
from torch.utils.data import DataLoader, Dataset, RandomSampler, SequentialSampler, DistributedSampler
from prefetch_generator import BackgroundGenerator
import copy
from .metadata import FASHION_COLOR, FASHION_PATTERN, FASHION_SLEEVE_LENGTH, FURNITURE_BRAND, FURNITURE_COLOR, FURNITURE_CUSTOMER_RATING, FURNITURE_MATERIALS, FURNITURE_PRICE, fashion_meta_attrs, furniture_meta_attrs, available_sizes2st
FASHION_SPECIAL_TOKENS = [f"<@1{i:03}>" for i in range(NUM_FASHION_ITEMS)]
FURNITURE_SPECIAL_TOKENS = [f"<@2{i:03}>" for i in range(NUM_FURNITURE_ITEMS)]
OBJECT_INDICES = [f"<{i}>" for i in range(MAX_NUM_OBJ_IN_SCENE)]
def get_input_id(tokenizer, tokens):
# 获取输入的token所对应的id的信息:长度可能不唯一
return tokenizer(tokens).input_ids[1:-1]
The provided code snippet includes necessary dependencies for implementing the `id_converter` function. Write a Python function `def id_converter(tokenizer)` to solve the following problem:
获取Specical Token所对应的id信息
Here is the function:
def id_converter(tokenizer):
''' 获取Specical Token所对应的id信息'''
id2index = {get_input_id(tokenizer, index)[0]: index for index in OBJECT_INDICES}
id2fashion_st = {get_input_id(tokenizer, st)[0]: st for st in FASHION_SPECIAL_TOKENS}
id2furniture_st = {get_input_id(tokenizer, st)[0]: st for st in FURNITURE_SPECIAL_TOKENS}
return id2index, id2fashion_st, id2furniture_st | 获取Specical Token所对应的id信息 |
165,289 | import json
import torch
from os.path import join, exists
from transformers.tokenization_utils import PreTrainedTokenizer
from torch.utils.data import DataLoader, Dataset, RandomSampler, SequentialSampler, DistributedSampler
from prefetch_generator import BackgroundGenerator
import copy
from .metadata import FASHION_COLOR, FASHION_PATTERN, FASHION_SLEEVE_LENGTH, FURNITURE_BRAND, FURNITURE_COLOR, FURNITURE_CUSTOMER_RATING, FURNITURE_MATERIALS, FURNITURE_PRICE, fashion_meta_attrs, furniture_meta_attrs, available_sizes2st
class LineByLineTask2Dataset(Dataset):
def __init__(self, input_file, tokenizer: PreTrainedTokenizer, all_objects_meta, eval=False):
''' 训练的输入数据集'''
with open(input_file) as f_in:
self.data = json.load(f_in)
# Other tasks
lines = []
self.boxes = [] # 存储了原来的Object Bbox Position信息
self.generation = []
self.nocoref = []
self.disambiguation_objects = []
self.disambiguation_labels = []
self.misc = []
self.is_fashion = []
corefs = []
vocab2id = tokenizer.get_vocab()
id2vocab = {v: k for k, v in vocab2id.items()}
EOM_id = vocab2id[END_OF_MULTIMODAL_CONTEXTS]
for dialog in self.data:
self.disambiguation_labels.append(dialog['disambiguation_label'])
self.is_fashion.append(dialog['is_fashion'])
self.boxes.append(dialog['bbox'])
lines.append(dialog['input'])
corefs.append([f'<{index}>' for index in dialog['reference_objects']]) # 解决任务2
# if not eval:
# coref_object = []
# coref_object.extend(dialog['reference_objects'])
# coref_object.extend(dialog['disambiguation_objects'])
# corefs.append([f'<{index}>' for index in coref_object])
# else:
# # corefs.append([f'<{index}>' for index in dialog['reference_objects']])
# corefs.append([f'<{index}>' for index in dialog['disambiguation_objects']]) # 解决任务1
encode_text = tokenizer(lines, add_special_tokens=True)
self.examples = encode_text.input_ids
self.examples_attention_mask = encode_text.attention_mask
nocoref_id = get_input_id(tokenizer, NO_COREF)[0] # 获取NOCOREF_ID的id形式
id2index, id2fashion_st, id2furniture_st = id_converter(tokenizer)
for idx, tokenized_line in enumerate(self.examples):
tl = tokenized_line
EOM_indices = [i for i, tokenized_id in enumerate(tl) if tokenized_id == EOM_id]
if EOM_indices: # 判断其是否为空
EOM_last_idx = EOM_indices[-1]
else:
EOM_last_idx = -1
self.nocoref.append((tl.index(nocoref_id), 1 if not corefs[idx] else 0)) # 判断是否存在Object指代
line_labels = []
if self.is_fashion[idx]:
for i, token_id in enumerate(tl):
if token_id in id2index and i > EOM_last_idx: # this token is for item index 因为scene token都是在Multimodal Token id的后面
temp = dict()
pos = i
item_index = id2index[token_id]
fashion_st = id2fashion_st[tl[i+1]]
temp['is_fashion'] = True
temp['pos'] = pos
temp['coref_label'] = 1 if item_index in corefs[idx] else 0
temp['misc_labels'] = dict()
temp['object_index'] = item_index
for attr_name, attr_value in all_objects_meta[fashion_st].items():
if attr_name != 'available_sizes':
temp['misc_labels'][attr_name] = fashion_meta_attrs[attr_name].index(attr_value)
else:
temp['misc_labels'][attr_name] = [1 if x in attr_value else 0 for x in fashion_meta_attrs[attr_name]] # 因为avaliable size的gt可能不止一个所以使用的损失函数不太一样可能有两个
line_labels.append(temp)
else:
for i, token_id in enumerate(tl):
if token_id in id2index and i > EOM_last_idx: # this token is for item index
temp = dict()
pos = i
item_index = id2index[token_id]
furniture_st = id2furniture_st[tl[i+1]]
temp['is_fashion'] = False
temp['pos'] = pos # 代表是第几个Object Info
temp['coref_label'] = 1 if item_index in corefs[idx] else 0
temp['misc_labels'] = dict()
temp['object_index'] = item_index
for attr_name, attr_value in all_objects_meta[furniture_st].items():
temp['misc_labels'][attr_name] = furniture_meta_attrs[attr_name].index(attr_value)
line_labels.append(temp)
self.misc.append(line_labels)
def __len__(self):
return len(self.examples)
def __getitem__(self, i):
return torch.tensor(self.examples[i], dtype=torch.long), \
torch.tensor(self.examples_attention_mask[i], dtype=torch.long), \
self.boxes[i], \
self.misc[i], \
self.nocoref[i], \
torch.tensor(self.disambiguation_labels[i], dtype=torch.long),
def get_task2_dataset(args, tokenizer, all_objects_meta, eval=False):
if not eval:
dataset = LineByLineTask2Dataset(args.train_input_file, tokenizer, all_objects_meta, eval=eval)
else:
dataset = LineByLineTask2Dataset(args.eval_input_file, tokenizer, all_objects_meta, eval=eval)
return dataset | null |
165,290 | import json
import torch
from os.path import join, exists
from transformers.tokenization_utils import PreTrainedTokenizer
from torch.utils.data import DataLoader, Dataset, RandomSampler, SequentialSampler, DistributedSampler
from prefetch_generator import BackgroundGenerator
import copy
from .metadata import FASHION_COLOR, FASHION_PATTERN, FASHION_SLEEVE_LENGTH, FURNITURE_BRAND, FURNITURE_COLOR, FURNITURE_CUSTOMER_RATING, FURNITURE_MATERIALS, FURNITURE_PRICE, fashion_meta_attrs, furniture_meta_attrs, available_sizes2st
class LineByLineTask1Dataset(Dataset):
def __init__(self, input_file, tokenizer: PreTrainedTokenizer, all_objects_meta, eval=False, eval_disam_path=None):
''' 训练的输入数据集'''
with open(input_file) as f_in:
self.data = json.load(f_in)
# Other tasks
lines = []
self.boxes = [] # 存储了原来的Object Bbox Position信息
self.nocoref = []
self.misc = []
self.is_fashion = []
self.dialog_ids = []
self.turn_ids = []
self.eval = eval
corefs = []
vocab2id = tokenizer.get_vocab()
id2vocab = {v: k for k, v in vocab2id.items()}
EOM_id = vocab2id[END_OF_MULTIMODAL_CONTEXTS]
for dialog in self.data:
self.is_fashion.append(dialog['is_fashion'])
self.boxes.append(dialog['bbox'])
self.dialog_ids.append(dialog['dialogue_idx'])
self.turn_ids.append(dialog['turn_idx'])
lines.append(dialog['input'])
corefs.append([f'<{index}>' for index in dialog['disambiguation_objects']]) # 解决任务1
# if not eval:
# coref_object = []
# coref_object.extend(dialog['reference_objects'])
# coref_object.extend(dialog['disambiguation_objects'])
# corefs.append([f'<{index}>' for index in coref_object])
# else:
# # corefs.append([f'<{index}>' for index in dialog['reference_objects']])
# corefs.append([f'<{index}>' for index in dialog['disambiguation_objects']]) # 解决任务1
encode_text = tokenizer(lines, add_special_tokens=True)
self.examples = encode_text.input_ids
self.examples_attention_mask = encode_text.attention_mask
# 读取 任务 2 的预测结果
if eval_disam_path is None:
self.disam_label = [1 for i in range(len(self.examples))]
else:
print('EXIST disambiguation predict result: ', eval_disam_path)
with open(join(eval_disam_path, 'simmc2.1_task2_disam_predicted.json')) as f_in:
self.disam_label = json.load(f_in)
id2index, id2fashion_st, id2furniture_st = id_converter(tokenizer)
for idx, tokenized_line in enumerate(self.examples):
tl = tokenized_line
EOM_indices = [i for i, tokenized_id in enumerate(tl) if tokenized_id == EOM_id]
if EOM_indices: # 判断其是否为空
EOM_last_idx = EOM_indices[-1]
else:
EOM_last_idx = -1
line_labels = []
if self.is_fashion[idx]:
for i, token_id in enumerate(tl):
if token_id in id2index and i > EOM_last_idx: # this token is for item index 因为scene token都是在Multimodal Token id的后面
temp = dict()
pos = i
item_index = id2index[token_id]
fashion_st = id2fashion_st[tl[i+1]]
temp['is_fashion'] = True
temp['pos'] = pos
temp['coref_label'] = 1 if item_index in corefs[idx] else 0
temp['misc_labels'] = dict()
temp['object_index'] = item_index
for attr_name, attr_value in all_objects_meta[fashion_st].items():
if attr_name != 'available_sizes':
temp['misc_labels'][attr_name] = fashion_meta_attrs[attr_name].index(attr_value)
else:
temp['misc_labels'][attr_name] = [1 if x in attr_value else 0 for x in fashion_meta_attrs[attr_name]] # 因为avaliable size的gt可能不止一个所以使用的损失函数不太一样可能有两个
line_labels.append(temp)
else:
for i, token_id in enumerate(tl):
if token_id in id2index and i > EOM_last_idx: # this token is for item index
temp = dict()
pos = i
item_index = id2index[token_id]
furniture_st = id2furniture_st[tl[i+1]]
temp['is_fashion'] = False
temp['pos'] = pos # 代表是第几个Object Info
temp['coref_label'] = 1 if item_index in corefs[idx] else 0
temp['misc_labels'] = dict()
temp['object_index'] = item_index
for attr_name, attr_value in all_objects_meta[furniture_st].items():
temp['misc_labels'][attr_name] = furniture_meta_attrs[attr_name].index(attr_value)
line_labels.append(temp)
self.misc.append(line_labels)
def __len__(self):
return len(self.examples)
def __getitem__(self, i):
if self.eval:
return torch.tensor(self.examples[i], dtype=torch.long), \
torch.tensor(self.examples_attention_mask[i], dtype=torch.long), \
self.boxes[i], \
self.misc[i], \
self.disam_label[i], \
self.dialog_ids[i], \
self.turn_ids[i]
else:
return torch.tensor(self.examples[i], dtype=torch.long), \
torch.tensor(self.examples_attention_mask[i], dtype=torch.long), \
self.boxes[i], \
self.misc[i]
def get_task1_dataset(args, tokenizer, all_objects_meta, eval=False, pretrain=False):
if pretrain:
dataset = LineByLineTask1Dataset(args.pretrain_input_file, tokenizer, all_objects_meta, eval=eval)
return dataset
if not eval:
dataset = LineByLineTask1Dataset(args.train_input_file, tokenizer, all_objects_meta, eval=eval)
else:
dataset = LineByLineTask1Dataset(args.eval_input_file, tokenizer, all_objects_meta, eval=eval, eval_disam_path=args.eval_disam_path)
return dataset | null |
165,291 | import json
import torch
from os.path import join, exists
from transformers.tokenization_utils import PreTrainedTokenizer
from torch.utils.data import DataLoader, Dataset, RandomSampler, SequentialSampler, DistributedSampler
from prefetch_generator import BackgroundGenerator
import copy
from .metadata import FASHION_COLOR, FASHION_PATTERN, FASHION_SLEEVE_LENGTH, FURNITURE_BRAND, FURNITURE_COLOR, FURNITURE_CUSTOMER_RATING, FURNITURE_MATERIALS, FURNITURE_PRICE, fashion_meta_attrs, furniture_meta_attrs, available_sizes2st
class LineByLineDSTDataset(Dataset):
def __init__(self, input_file, tokenizer: PreTrainedTokenizer, all_objects_meta, eval=False, fashion_slot_map=None, furniture_slot_map=None):
''' 训练的输入数据集'''
with open(input_file) as f_in:
self.data = json.load(f_in)
# Other tasks
lines = []
self.boxes = [] # 存储了原来的Object Bbox Position信息
self.generation = []
self.nocoref = []
self.disambiguation_objects = []
self.disambiguation_labels = []
self.misc = []
self.intent = []
self.slot_value = []
self.is_fashion = []
self.slot_data = []
self.intent_data = []
self.intent_template = [
'REQUEST:GET',
'REQUEST:COMPARE',
'INFORM:REFINE',
'INFORM:GET',
'ASK:GET',
'INFORM:DISAMBIGUATE',
'REQUEST:ADD_TO_CART'
]
self.fashion_slot = {
'type': 0,
'price': 0,
'customerReview': 0,
'brand': 0,
'size': 0,
'pattern': 0,
'color': 0,
'sleeveLength': 0,
'availableSizes': 0
}
self.furniture_slot = {
'type': 0,
'material': 0,
'price': 0,
'brand': 0,
'customerRating': 0,
'color': 0
}
self.available_size_data = ['XS', 'S', 'M', 'L', 'XL', 'XXL']
corefs = []
vocab2id = tokenizer.get_vocab()
id2vocab = {v: k for k, v in vocab2id.items()}
EOM_id = vocab2id[END_OF_MULTIMODAL_CONTEXTS]
for dialog in self.data:
self.disambiguation_labels.append(dialog['disambiguation_label'])
self.is_fashion.append(dialog['is_fashion'])
self.boxes.append(dialog['bbox'])
self.intent_data.append(dialog['intent'])
self.slot_data.append(dialog['slot_values'])
lines.append(dialog['input'])
corefs.append([f'<{index}>' for index in dialog['reference_objects']]) # 解决任务2
# if not eval:
# coref_object = []
# coref_object.extend(dialog['reference_objects'])
# coref_object.extend(dialog['disambiguation_objects'])
# corefs.append([f'<{index}>' for index in coref_object])
# else:
# # corefs.append([f'<{index}>' for index in dialog['reference_objects']])
# corefs.append([f'<{index}>' for index in dialog['disambiguation_objects']]) # 解决任务1
encode_text = tokenizer(lines, add_special_tokens=True)
self.examples = encode_text.input_ids
self.examples_attention_mask = encode_text.attention_mask
nocoref_id = get_input_id(tokenizer, NO_COREF)[0] # 获取NOCOREF_ID的id形式
intent_id = get_input_id(tokenizer, INTENT_TOKEN)[0]
id2index, id2fashion_st, id2furniture_st = id_converter(tokenizer)
for idx, tokenized_line in enumerate(self.examples):
tl = tokenized_line
EOM_indices = [i for i, tokenized_id in enumerate(tl) if tokenized_id == EOM_id]
if EOM_indices: # 判断其是否为空
EOM_last_idx = EOM_indices[-1]
else:
EOM_last_idx = -1
self.nocoref.append((tl.index(nocoref_id), 1 if not corefs[idx] else 0)) # 判断是否存在Object指代
self.intent.append((tl.index(intent_id), self.intent_template.index(self.intent_data[idx])))
line_labels = []
if self.is_fashion[idx]:
for i, token_id in enumerate(tl):
if token_id in id2index and i > EOM_last_idx: # this token is for item index 因为scene token都是在Multimodal Token id的后面
temp = dict()
pos = i
item_index = id2index[token_id]
fashion_st = id2fashion_st[tl[i+1]]
temp['is_fashion'] = True
temp['pos'] = pos
temp['coref_label'] = 1 if item_index in corefs[idx] else 0
temp['misc_labels'] = dict()
for attr_name, attr_value in all_objects_meta[fashion_st].items():
if attr_name != 'available_sizes':
temp['misc_labels'][attr_name] = fashion_meta_attrs[attr_name].index(attr_value)
else:
temp['misc_labels'][attr_name] = [1 if x in attr_value else 0 for x in fashion_meta_attrs[attr_name]] # 因为avaliable size的gt可能不止一个所以使用的损失函数不太一样可能有两个
line_labels.append(temp)
else:
for i, token_id in enumerate(tl):
if token_id in id2index and i > EOM_last_idx: # this token is for item index
temp = dict()
pos = i
item_index = id2index[token_id]
furniture_st = id2furniture_st[tl[i+1]]
temp['is_fashion'] = False
temp['pos'] = pos # 代表是第几个Object Info
temp['coref_label'] = 1 if item_index in corefs[idx] else 0
temp['misc_labels'] = dict()
for attr_name, attr_value in all_objects_meta[furniture_st].items():
temp['misc_labels'][attr_name] = furniture_meta_attrs[attr_name].index(attr_value)
line_labels.append(temp)
self.misc.append(line_labels)
line_slot_values = self.slot_data[idx]
line_slot_map = dict()
if self.is_fashion[idx]:
line_slot_map = copy.deepcopy(self.fashion_slot)
for attr_name in line_slot_map.keys():
attr_index = tl.index(get_input_id(tokenizer, FASHION_TOKEN_MAP[attr_name])[0])
if attr_name != 'availableSizes':
line_slot_map[attr_name] = (attr_index, fashion_slot_map[attr_name].index(line_slot_values[attr_name]) + 1) if attr_name in line_slot_values.keys() else (attr_index, 0)
else:
line_slot_map[attr_name] = (attr_index, [1 if item_size in line_slot_values.get(attr_name, []) else 0 for item_size in self.available_size_data])
else:
line_slot_map = copy.deepcopy(self.furniture_slot)
for attr_name in line_slot_map.keys():
attr_index = tl.index(get_input_id(tokenizer, FURNITURE_TOKEN_MAP[attr_name])[0])
line_slot_map[attr_name] = (attr_index, furniture_slot_map[attr_name].index(line_slot_values[attr_name]) + 1) if attr_name in line_slot_values.keys() else (attr_index, 0)
self.slot_value.append(line_slot_map)
def __len__(self):
return len(self.examples)
def __getitem__(self, i):
return torch.tensor(self.examples[i], dtype=torch.long), \
torch.tensor(self.examples_attention_mask[i], dtype=torch.long), \
self.boxes[i], \
self.misc[i], \
self.nocoref[i], \
torch.tensor(self.disambiguation_labels[i], dtype=torch.long), \
self.intent[i], \
self.slot_value[i]
def get_dst_dataset(args, tokenizer, all_objects_meta, eval=False, fashion_slot_map=None, furniture_slot_map=None):
if not eval:
dataset = LineByLineDSTDataset(args.train_input_file, tokenizer, all_objects_meta, eval=eval, fashion_slot_map=fashion_slot_map, furniture_slot_map=furniture_slot_map)
else:
dataset = LineByLineDSTDataset(args.eval_input_file, tokenizer, all_objects_meta, eval=eval, fashion_slot_map=fashion_slot_map, furniture_slot_map=furniture_slot_map)
return dataset | null |
165,294 | import torch
def preprocess(utterances, sql, tokenizer):
text = ""
for u in utterances:
for t in u.split(' '):
text = text + ' ' + t.strip()
sql = sql.strip()
sql = sql.replace(".", " ")
sql = sql.replace("_", " ")
l = []
for char in sql:
if char.isupper():
l.append(' ')
l.append(char)
sql = ''.join(l)
sql = ' '.join(sql.split())
# input:
# [CLS] utterance [SEP] sql [SEP]
token_encoding = tokenizer.encode_plus(text, sql, max_length=128, pad_to_max_length=True)
tokenized_token = token_encoding['input_ids']
attention_mask = token_encoding['attention_mask']
tokens_tensor = torch.tensor(tokenized_token)
attention_mask_tensor = torch.tensor(attention_mask)
return tokens_tensor, attention_mask_tensor | null |
165,295 | import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from torch.optim.lr_scheduler import StepLR
import numpy as np
import logging
import argparse
import os
from .dataset import NL2SQL_Dataset
from .model import ReRanker
from sklearn.metrics import confusion_matrix, accuracy_score
def parser_arg():
parser = argparse.ArgumentParser()
parser.add_argument("--seed", type=int, default=666)
parser.add_argument("--gpu", type=str, default='0')
parser.add_argument("--data_path", type=str, default="./reranker/data")
parser.add_argument("--train", action="store_true")
parser.add_argument("--test", action="store_true")
parser.add_argument("--epoches", type=int, default=20)
parser.add_argument("--batch_size", type=int, default=16)
parser.add_argument("--cls_lr", default=1e-3)
parser.add_argument("--bert_lr", default=5e-6)
parser.add_argument("--threshold", default=0.5)
parser.add_argument("--save_dir", default="./reranker/checkpoints")
parser.add_argument("--base_model", default="roberta")
args = parser.parse_args()
return args | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.