id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
164,797 | from typing import List
from uie.extraction.record_schema import RecordSchema
from uie.extraction.predict_parser import get_predict_parser, PredictParser
from uie.extraction.scorer import Metric, RecordMetric, OrderedRecordMetric
def eval_pred(predict_parser: PredictParser, gold_list, pred_list, text_list=None, raw_list=None):
class RecordSchema:
def __init__(self, type_list, role_list, type_role_dict):
def __repr__(self) -> str:
def get_empty_schema():
def read_from_file(filename):
def write_to_file(self, filename):
def get_predict_parser(decoding_schema, label_constraint):
def get_extract_metrics(pred_lns: List[str], tgt_lns: List[str], label_constraint: RecordSchema, decoding_format='tree'):
predict_parser = get_predict_parser(decoding_schema=decoding_format, label_constraint=label_constraint)
return eval_pred(
predict_parser=predict_parser,
gold_list=tgt_lns,
pred_list=pred_lns
) | null |
164,798 |
The provided code snippet includes necessary dependencies for implementing the `convert_spot_asoc` function. Write a Python function `def convert_spot_asoc(spot_asoc_instance, structure_maker)` to solve the following problem:
将一个 Spot-Asoc 实例转换成目标字符串 Args: spot_asoc_instance ([type]): [description] structure_maker ([type]): [description] Returns: [type]: [description]
Here is the function:
def convert_spot_asoc(spot_asoc_instance, structure_maker):
"""将一个 Spot-Asoc 实例转换成目标字符串
Args:
spot_asoc_instance ([type]): [description]
structure_maker ([type]): [description]
Returns:
[type]: [description]
"""
spot_instance_str_rep_list = list()
for spot in spot_asoc_instance:
spot_str_rep = [
spot['label'],
structure_maker.target_span_start,
spot['span'],
]
for asoc_label, asoc_span in spot.get('asoc', list()):
asoc_str_rep = [
structure_maker.span_start,
asoc_label,
structure_maker.target_span_start,
asoc_span,
structure_maker.span_end,
]
spot_str_rep += [' '.join(asoc_str_rep)]
spot_instance_str_rep_list += [' '.join([
structure_maker.record_start,
' '.join(spot_str_rep),
structure_maker.record_end,
])]
target_text = ' '.join([
structure_maker.sent_start,
' '.join(spot_instance_str_rep_list),
structure_maker.sent_end,
])
return target_text | 将一个 Spot-Asoc 实例转换成目标字符串 Args: spot_asoc_instance ([type]): [description] structure_maker ([type]): [description] Returns: [type]: [description] |
164,799 |
The provided code snippet includes necessary dependencies for implementing the `convert_spot_asoc_name` function. Write a Python function `def convert_spot_asoc_name(spot_asoc_instance, structure_maker)` to solve the following problem:
将一个 Spot-Asoc-Name 实例转换成目标字符串 Args: spot_asoc_instance ([type]): [description] structure_maker ([type]): [description] Returns: [type]: [description]
Here is the function:
def convert_spot_asoc_name(spot_asoc_instance, structure_maker):
"""将一个 Spot-Asoc-Name 实例转换成目标字符串
Args:
spot_asoc_instance ([type]): [description]
structure_maker ([type]): [description]
Returns:
[type]: [description]
"""
spot_instance_str_rep_list = list()
for spot in spot_asoc_instance:
spot_str_rep = [
spot['span'],
structure_maker.target_span_start,
spot['label'],
]
for asoc_label, asoc_span in spot.get('asoc', list()):
asoc_str_rep = [
structure_maker.span_start,
asoc_span,
structure_maker.target_span_start,
asoc_label,
structure_maker.span_end,
]
spot_str_rep += [' '.join(asoc_str_rep)]
spot_instance_str_rep_list += [' '.join([
structure_maker.record_start,
' '.join(spot_str_rep),
structure_maker.record_end,
])]
target_text = ' '.join([
structure_maker.sent_start,
' '.join(spot_instance_str_rep_list),
structure_maker.sent_end,
])
return target_text | 将一个 Spot-Asoc-Name 实例转换成目标字符串 Args: spot_asoc_instance ([type]): [description] structure_maker ([type]): [description] Returns: [type]: [description] |
164,800 | from typing import Dict
def list_dictionary(d, n_tab=-1):
if isinstance(d, list):
for i in d:
list_dictionary(i, n_tab)
elif isinstance(d, dict):
n_tab += 1
for key, value in d.items():
if key == '<end>':
print("{}{}".format(" " * n_tab, key))
else:
print("{}{}".format(" " * n_tab, key))
list_dictionary(value, n_tab)
else:
print("{}{}".format("\t" * n_tab, d))
def print_tree(tree):
list_dictionary(tree) | null |
164,801 | from typing import Dict
def get_label_name_tree(label_name_list, tokenizer, end_symbol='<end>'):
sub_token_tree = dict()
label_tree = dict()
for typename in label_name_list:
after_tokenized = tokenizer.encode(typename, add_special_tokens=False)
# label_tree[typename] = tokenizer.convert_ids_to_tokens(after_tokenized)
label_tree[typename] = after_tokenized
for _, sub_label_seq in label_tree.items():
parent = sub_token_tree
for value in sub_label_seq:
if value not in parent:
parent[value] = dict()
parent = parent[value]
parent[end_symbol] = None
return sub_token_tree | null |
164,802 | from collections import defaultdict
from copy import deepcopy
from typing import Dict, List
import sys
def tuple_offset(offset):
if isinstance(offset, tuple):
return offset
else:
return tuple(offset) | null |
164,803 | from collections import defaultdict
from copy import deepcopy
from typing import Dict, List
import sys
def warning_tp_increment(gold, pred, prefix):
sys.stderr.write(f"{prefix} TP Increment Warning, Gold Offset: {gold['offset']}\n")
sys.stderr.write(f"{prefix} TP Increment Warning, Pred Offset: {pred['offset']}\n")
sys.stderr.write(f"{prefix} TP Increment Warning, Gold String: {gold['string']}\n")
sys.stderr.write(f"{prefix} TP Increment Warning, Pred String: {pred['string']}\n")
sys.stderr.write(f"===============\n") | null |
164,804 | import re
def fix_unk_from_text(span, text, unk='<unk>'):
"""
Find span from the text to fix unk in the generated span
从 text 中找到 span,修复span
Example:
span = "<unk> colo e Bengo"
text = "At 159 meters above sea level , Angola International Airport is located at Ícolo e Bengo , part of Luanda Province , in Angola ."
span = "<unk> colo e Bengo"
text = "Ícolo e Bengo , part of Luanda Province , in Angola ."
span = "Arr<unk> s negre"
text = "The main ingredients of Arròs negre , which is from Spain , are white rice , cuttlefish or squid , cephalopod ink , cubanelle and cubanelle peppers . Arròs negre is from the Catalonia region ."
span = "colo <unk>"
text = "At 159 meters above sea level , Angola International Airport is located at e Bengo , part of Luanda Province , in Angola . coloÍ"
span = "Tarō As<unk>"
text = "The leader of Japan is Tarō Asō ."
span = "Tar<unk> As<unk>"
text = "The leader of Japan is Tarō Asō ."
span = "<unk>Tar As<unk>"
text = "The leader of Japan is ōTar Asō ."
"""
if unk not in span:
return span
def clean_wildcard(x):
sp = ".*?()[]+"
return re.sub("("+"|".join([f"\\{s}" for s in sp])+")", "\\\\\g<1>", x)
match = r'\s*\S+\s*'.join([clean_wildcard(item.strip()) for item in span.split(unk)])
result = re.search(match, text)
if not result:
return span
return result.group().strip()
def test_fix_unk_from_text():
span_text_list = [
("<unk> colo e Bengo",
"At 159 meters above sea level , Angola International Airport is located at Ícolo e Bengo , part of Luanda Province , in Angola .",
"Ícolo e Bengo"),
("<unk> colo e Bengo",
"Ícolo e Bengo , part of Luanda Province , in Angola .",
"Ícolo e Bengo"),
("Arr<unk> s negre",
"The main ingredients of Arròs negre , which is from Spain , are white rice , cuttlefish or squid , cephalopod ink , cubanelle and cubanelle peppers . Arròs negre is from the Catalonia region .",
"Arròs negre"),
("colo <unk>",
"At 159 meters above sea level , Angola International Airport is located at e Bengo , part of Luanda Province , in Angola . coloÍ",
"coloÍ"),
("Tarō As<unk>", "The leader of Japan is Tarō Asō .", "Tarō Asō"),
("Tar<unk> As<unk>", "The leader of Japan is Tarō Asō .", "Tarō Asō"),
("<unk>Tar As<unk>", "The leader of Japan is ōTar Asō .", "ōTar Asō"),
("Atatürk Monument ( <unk> zmir )",
"The Atatürk Monument ( İzmir ) can be found in Turkey .",
"Atatürk Monument ( İzmir )"),
("The Atatürk Monument [ <unk> zmir ]",
"The Atatürk Monument [ İzmir ] can be found in Turkey .",
"The Atatürk Monument [ İzmir ]")
]
for span, text, gold in span_text_list:
print(span, '|', fix_unk_from_text(span, text))
assert fix_unk_from_text(span, text) == gold | null |
164,805 | from collections import Counter
import logging
from nltk.tree import ParentedTree
import re
from typing import Tuple, List, Dict
from uie.extraction.constants import (
null_span,
type_start,
type_end,
span_start,
)
from uie.extraction.predict_parser.predict_parser import PredictParser
from uie.extraction.predict_parser.utils import fix_unk_from_text
left_bracket = '【'
right_bracket = '】'
def add_space(text):
"""
add space between special token
:param text:
:return:
"""
new_text_list = list()
for item in zip(split_bracket.findall(text), split_bracket.split(text)[1:]):
new_text_list += item
return ' '.join(new_text_list)
type_start = '<extra_id_0>'
type_end = '<extra_id_1>'
def convert_bracket(text):
text = add_space(text)
for start in [type_start]:
text = text.replace(start, left_bracket)
for end in [type_end]:
text = text.replace(end, right_bracket)
return text | null |
164,806 | from collections import Counter
import logging
from nltk.tree import ParentedTree
import re
from typing import Tuple, List, Dict
from uie.extraction.constants import (
null_span,
type_start,
type_end,
span_start,
)
from uie.extraction.predict_parser.predict_parser import PredictParser
from uie.extraction.predict_parser.utils import fix_unk_from_text
def find_bracket_num(tree_str):
def check_well_form(tree_str):
return find_bracket_num(tree_str) == 0 | null |
164,807 | from collections import Counter
import logging
from nltk.tree import ParentedTree
import re
from typing import Tuple, List, Dict
from uie.extraction.constants import (
null_span,
type_start,
type_end,
span_start,
)
from uie.extraction.predict_parser.predict_parser import PredictParser
from uie.extraction.predict_parser.utils import fix_unk_from_text
left_bracket = '【'
right_bracket = '】'
def clean_text(tree_str):
count = 0
sum_count = 0
tree_str_list = tree_str.split()
for index, char in enumerate(tree_str_list):
if char == left_bracket:
count += 1
sum_count += 1
elif char == right_bracket:
count -= 1
sum_count += 1
else:
pass
if count == 0 and sum_count > 0:
return ' '.join(tree_str_list[:index + 1])
return ' '.join(tree_str_list) | null |
164,808 | from collections import Counter
import logging
from nltk.tree import ParentedTree
import re
from typing import Tuple, List, Dict
from uie.extraction.constants import (
null_span,
type_start,
type_end,
span_start,
)
from uie.extraction.predict_parser.predict_parser import PredictParser
from uie.extraction.predict_parser.utils import fix_unk_from_text
span_start = '<extra_id_5>'
def resplit_label_span(label, span, split_symbol=span_start):
label_span = label + ' ' + span
if split_symbol in label_span:
try:
new_label, new_span = label_span.split(split_symbol)
return new_label.strip(), new_span.strip()
except:
print('resplit_label_span error:', label_span, split_symbol)
return label, span | null |
164,809 | from collections import Counter
import logging
from nltk.tree import ParentedTree
import re
from typing import Tuple, List, Dict
from uie.extraction.constants import (
null_span,
type_start,
type_end,
span_start,
)
from uie.extraction.predict_parser.predict_parser import PredictParser
from uie.extraction.predict_parser.utils import fix_unk_from_text
ight_bracket = '】'
def find_bracket_num(tree_str):
"""
Count Bracket Number, 0 indicate num_left = num_right
:param tree_str:
:return:
"""
count = 0
for char in tree_str:
if char == left_bracket:
count += 1
elif char == right_bracket:
count -= 1
else:
pass
return count
The provided code snippet includes necessary dependencies for implementing the `add_bracket` function. Write a Python function `def add_bracket(tree_str)` to solve the following problem:
add right bracket to fill ill-formed :param tree_str: :return:
Here is the function:
def add_bracket(tree_str):
"""
add right bracket to fill ill-formed
:param tree_str:
:return:
"""
tree_str_list = tree_str.split()
bracket_num = find_bracket_num(tree_str_list)
tree_str_list += [right_bracket] * bracket_num
return ' '.join(tree_str_list) | add right bracket to fill ill-formed :param tree_str: :return: |
164,810 | from collections import Counter
import logging
from nltk.tree import ParentedTree
import re
from typing import Tuple, List, Dict
from uie.extraction.constants import (
null_span,
type_start,
type_end,
span_start,
)
from uie.extraction.predict_parser.predict_parser import PredictParser
from uie.extraction.predict_parser.utils import fix_unk_from_text
The provided code snippet includes necessary dependencies for implementing the `get_tree_str` function. Write a Python function `def get_tree_str(tree)` to solve the following problem:
get str from event tree :param tree: :return:
Here is the function:
def get_tree_str(tree):
"""
get str from event tree
:param tree:
:return:
"""
str_list = list()
for element in tree:
if isinstance(element, str):
str_list += [element]
return ' '.join(str_list) | get str from event tree :param tree: :return: |
164,811 | from collections import Counter
import logging
from nltk.tree import ParentedTree
import re
from typing import Tuple, List, Dict
from uie.extraction.constants import (
null_span,
type_start,
type_end,
span_start,
)
from uie.extraction.predict_parser.predict_parser import PredictParser
from uie.extraction.predict_parser.utils import fix_unk_from_text
logger = logging.getLogger(__name__)
def fix_unk_from_text(span, text, unk='<unk>'):
"""
Find span from the text to fix unk in the generated span
从 text 中找到 span,修复span
Example:
span = "<unk> colo e Bengo"
text = "At 159 meters above sea level , Angola International Airport is located at Ícolo e Bengo , part of Luanda Province , in Angola ."
span = "<unk> colo e Bengo"
text = "Ícolo e Bengo , part of Luanda Province , in Angola ."
span = "Arr<unk> s negre"
text = "The main ingredients of Arròs negre , which is from Spain , are white rice , cuttlefish or squid , cephalopod ink , cubanelle and cubanelle peppers . Arròs negre is from the Catalonia region ."
span = "colo <unk>"
text = "At 159 meters above sea level , Angola International Airport is located at e Bengo , part of Luanda Province , in Angola . coloÍ"
span = "Tarō As<unk>"
text = "The leader of Japan is Tarō Asō ."
span = "Tar<unk> As<unk>"
text = "The leader of Japan is Tarō Asō ."
span = "<unk>Tar As<unk>"
text = "The leader of Japan is ōTar Asō ."
"""
if unk not in span:
return span
def clean_wildcard(x):
sp = ".*?()[]+"
return re.sub("("+"|".join([f"\\{s}" for s in sp])+")", "\\\\\g<1>", x)
match = r'\s*\S+\s*'.join([clean_wildcard(item.strip()) for item in span.split(unk)])
result = re.search(match, text)
if not result:
return span
return result.group().strip()
def rewrite_label_span(label, span, label_set=None, text=None):
# Invalid Type
if label_set and label not in label_set:
logger.debug('Invalid Label: %s' % label)
return None, None
# Fix unk using Text
if text is not None and '<unk>' in span:
span = fix_unk_from_text(span, text, '<unk>')
# Invalid Text Span
if text is not None and span not in text:
logger.debug('Invalid Text Span: %s\n%s\n' % (span, text))
return None, None
return label, span | null |
164,812 | import json
from collections import defaultdict
from typing import List
class RecordSchema:
def __init__(self, type_list, role_list, type_role_dict):
self.type_list = type_list
self.role_list = role_list
self.type_role_dict = type_role_dict
def __repr__(self) -> str:
return f"Type: {self.type_list}\n" \
f"Role: {self.role_list}\n" \
f"Map: {self.type_role_dict}"
def get_empty_schema():
return RecordSchema(type_list=list(), role_list=list(), type_role_dict=dict())
def read_from_file(filename):
lines = open(filename).readlines()
type_list = json.loads(lines[0])
role_list = json.loads(lines[1])
type_role_dict = json.loads(lines[2])
return RecordSchema(type_list, role_list, type_role_dict)
def write_to_file(self, filename):
with open(filename, 'w') as output:
output.write(json.dumps(self.type_list) + '\n')
output.write(json.dumps(self.role_list) + '\n')
output.write(json.dumps(self.type_role_dict) + '\n')
def merge_schema(schema_list: List[RecordSchema]):
type_set = set()
role_set = set()
type_role_dict = defaultdict(list)
for schema in schema_list:
for type_name in schema.type_list:
type_set.add(type_name)
for role_name in schema.role_list:
role_set.add(role_name)
for type_name in schema.type_role_dict:
type_role_dict[type_name] += schema.type_role_dict[type_name]
for type_name in type_role_dict:
type_role_dict[type_name] = list(set(type_role_dict[type_name]))
return RecordSchema(type_list=list(type_set),
role_list=list(role_set),
type_role_dict=type_role_dict,
) | null |
164,813 | from collections import defaultdict, OrderedDict
import os
from uie.extraction.record_schema import RecordSchema
from uie.extraction.predict_parser import get_predict_parser
from uie.sel2record.record import EntityRecord, MapConfig, RelationRecord, EventRecord
import logging
The provided code snippet includes necessary dependencies for implementing the `proprocessing_graph_record` function. Write a Python function `def proprocessing_graph_record(graph, schema_dict)` to solve the following problem:
Mapping generated spot-asoc result to Entity/Relation/Event 将抽取的Spot-Asoc结构,根据不同的 Schema 转换成 Entity/Relation/Event 结果
Here is the function:
def proprocessing_graph_record(graph, schema_dict):
"""Mapping generated spot-asoc result to Entity/Relation/Event
将抽取的Spot-Asoc结构,根据不同的 Schema 转换成 Entity/Relation/Event 结果
"""
records = {
'entity': list(),
'relation': list(),
'event': list(),
}
entity_dict = OrderedDict()
# 根据不同任务的 Schema 将不同的 Spot 对应到不同抽取结果: Entity/Event
# Mapping generated spot result to Entity/Event
for record in graph['pred_record']:
if record['type'] in schema_dict['entity'].type_list:
records['entity'] += [{
'trigger': record['trigger'],
'type': record['type']
}]
entity_dict[record['trigger']] = record['type']
elif record['type'] in schema_dict['event'].type_list:
records['event'] += [record]
else:
print("Type `%s` invalid." % record['type'])
# 根据不同任务的 Schema 将不同的 Asoc 对应到不同抽取结果: Relation/Argument
# Mapping generated asoc result to Relation/Argument
for record in graph['pred_record']:
if record['type'] in schema_dict['entity'].type_list:
for role in record['roles']:
records['relation'] += [{
'type': role[0],
'roles': [(record['type'], record['trigger']),
(entity_dict.get(
role[1], record['type']), role[1]),
]
}]
if len(entity_dict) > 0:
for record in records['event']:
if record['type'] in schema_dict['event'].type_list:
new_role_list = list()
for role in record['roles']:
if role[1] in entity_dict:
new_role_list += [role]
record['roles'] = new_role_list
return records | Mapping generated spot-asoc result to Entity/Relation/Event 将抽取的Spot-Asoc结构,根据不同的 Schema 转换成 Entity/Relation/Event 结果 |
164,814 | from asyncio.log import logger
import sys
from typing import Tuple
import numpy
import logging
The provided code snippet includes necessary dependencies for implementing the `match_sublist` function. Write a Python function `def match_sublist(the_list, to_match)` to solve the following problem:
:param the_list: [1, 2, 3, 4, 5, 6, 1, 2, 4, 5] :param to_match: [1, 2] :return: [(0, 1), (6, 7)]
Here is the function:
def match_sublist(the_list, to_match):
"""
:param the_list: [1, 2, 3, 4, 5, 6, 1, 2, 4, 5]
:param to_match: [1, 2]
:return:
[(0, 1), (6, 7)]
"""
len_to_match = len(to_match)
matched_list = list()
for index in range(len(the_list) - len_to_match + 1):
if to_match == the_list[index:index + len_to_match]:
matched_list += [(index, index + len_to_match - 1)]
return matched_list | :param the_list: [1, 2, 3, 4, 5, 6, 1, 2, 4, 5] :param to_match: [1, 2] :return: [(0, 1), (6, 7)] |
164,815 | from asyncio.log import logger
import sys
from typing import Tuple
import numpy
import logging
def check_overlap(x, y):
if x[0] > y[1] or y[0] > x[1]:
return False
else:
return True | null |
164,816 | from asyncio.log import logger
import sys
from typing import Tuple
import numpy
import logging
def get_index_tuple(matched: Tuple[int, int]):
return tuple(range(matched[0], matched[1] + 1)) | null |
164,817 | from asyncio.log import logger
import sys
from typing import Tuple
import numpy
import logging
def span_to_token(text, span_to_token_strategy='space'):
if span_to_token_strategy == 'space':
return text.split(' ')
elif span_to_token_strategy == 'list':
return list(text)
else:
raise NotImplementedError(
f"The span to token strategy {span_to_token_strategy} is not implemented.") | null |
164,818 | import json
import re
from tqdm import tqdm
import transformers as huggingface_transformers
from uie.extraction.record_schema import RecordSchema
from uie.sel2record.record import MapConfig
from uie.extraction.scorer import *
from uie.sel2record.sel2record import SEL2Record
import math
import os
def read_json_file(file_name):
return [json.loads(line) for line in open(file_name)] | null |
164,819 | import json
import re
from tqdm import tqdm
import transformers as huggingface_transformers
from uie.extraction.record_schema import RecordSchema
from uie.sel2record.record import MapConfig
from uie.extraction.scorer import *
from uie.sel2record.sel2record import SEL2Record
import math
import os
class RecordSchema:
def __init__(self, type_list, role_list, type_role_dict):
self.type_list = type_list
self.role_list = role_list
self.type_role_dict = type_role_dict
def __repr__(self) -> str:
return f"Type: {self.type_list}\n" \
f"Role: {self.role_list}\n" \
f"Map: {self.type_role_dict}"
def get_empty_schema():
return RecordSchema(type_list=list(), role_list=list(), type_role_dict=dict())
def read_from_file(filename):
lines = open(filename).readlines()
type_list = json.loads(lines[0])
role_list = json.loads(lines[1])
type_role_dict = json.loads(lines[2])
return RecordSchema(type_list, role_list, type_role_dict)
def write_to_file(self, filename):
with open(filename, 'w') as output:
output.write(json.dumps(self.type_list) + '\n')
output.write(json.dumps(self.role_list) + '\n')
output.write(json.dumps(self.type_role_dict) + '\n')
def schema_to_ssi(schema: RecordSchema):
ssi = "<spot> " + "<spot> ".join(sorted(schema.type_list))
ssi += "<asoc> " + "<asoc> ".join(sorted(schema.role_list))
ssi += "<extra_id_2> "
return ssi | null |
164,820 | import json
import re
from tqdm import tqdm
import transformers as huggingface_transformers
from uie.extraction.record_schema import RecordSchema
from uie.sel2record.record import MapConfig
from uie.extraction.scorer import *
from uie.sel2record.sel2record import SEL2Record
import math
import os
special_to_remove = {'<pad>', '</s>'}
def post_processing(x):
for special in special_to_remove:
x = x.replace(special, '')
return x.strip() | null |
164,821 | import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSeq2SeqLM,
AutoTokenizer,
DataCollatorForSeq2Seq,
HfArgumentParser,
default_data_collator,
set_seed
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
from uie.extraction import constants
from uie.extraction.record_schema import RecordSchema
from uie.extraction.predict_parser import decoding_format_dict
from uie.extraction.extraction_metrics import get_extract_metrics
from uie.extraction.noiser.spot_asoc_noiser import SpotAsocNoiser
from uie.extraction.dataset_processer import PrefixGenerator
from uie.seq2seq.constrained_seq2seq import (
ConstraintSeq2SeqTrainingArguments,
ConstraintSeq2SeqTrainer,
OriginalConstraintSeq2SeqTrainer,
UIEPretrainConstraintSeq2SeqTrainer,
UIEFinetuneConstraintSeq2SeqTrainer,
MetaPretrainConstraintSeq2SeqTrainer,
MetaFinetuneConstraintSeq2SeqTrainer,
)
from uie.seq2seq.data_collator import (
DataCollatorForMetaSeq2Seq,
DynamicSSIGenerator,
)
from uie.seq2seq.features import RecordFeature
from uie.seq2seq.model import PromptSeq2SeqTransformer
from uie.seq2seq.noise_record import create_noised_record
import pdb
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, ConstraintSeq2SeqTrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
last_checkpoint = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome."
)
elif last_checkpoint is not None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
)
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN)
logger.info("Options:")
logger.info(model_args)
logger.info(data_args)
logger.info(training_args)
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
logger.info("Training/evaluation parameters %s", training_args)
# Set seed before initializing model.
set_seed(training_args.seed)
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files in the summarization task, this script will use the first column for the full texts and the
# second column for the summaries (unless you specify column names for this with the `text_column` and
# `record_column` arguments).
# For translation, only JSON files are supported, with one field named "translation" containing two keys for the
# source and target languages (unless you adapt what follows).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name)
else:
data_files = {}
if data_args.train_file is not None:
data_files["train"] = data_args.train_file
extension = data_args.train_file.split(".")[-1]
if training_args.do_eval and data_args.validation_file is not None:
data_files["validation"] = data_args.validation_file
extension = data_args.validation_file.split(".")[-1]
if training_args.do_predict and data_args.test_file is not None:
data_files["test"] = data_args.test_file
extension = data_args.test_file.split(".")[-1]
logger.info(data_files)
datasets = load_dataset("uie_json.py", data_files=data_files, block_size=(10<<22))
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
logger.info(datasets)
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
logger.info("Load Config: %s" % model_args.config_name if model_args.config_name else model_args.model_name_or_path)
config = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
config.max_length = data_args.max_target_length
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
use_fast=model_args.use_fast_tokenizer,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
to_remove_token_list = list()
if tokenizer.bos_token:
to_remove_token_list += [tokenizer.bos_token]
if tokenizer.eos_token:
to_remove_token_list += [tokenizer.eos_token]
if tokenizer.pad_token:
to_remove_token_list += [tokenizer.pad_token]
if model_args.use_prompt_tuning_model:
MODEL = PromptSeq2SeqTransformer
else:
MODEL = AutoModelForSeq2SeqLM
if model_args.load_config_only:
model = MODEL.from_config(config)
else:
model = MODEL.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
mirror='tuna',
)
if training_args.do_train:
to_add_special_token = list()
for special_token in [constants.type_start, constants.type_end, constants.text_start, constants.span_start, constants.spot_prompt, constants.asoc_prompt]:
if special_token not in tokenizer.get_vocab():
to_add_special_token += [special_token]
tokenizer.add_special_tokens(
{"additional_special_tokens": tokenizer.special_tokens_map_extended['additional_special_tokens'] + to_add_special_token}
)
model.resize_token_embeddings(len(tokenizer))
logger.info(tokenizer)
# Set decoder_start_token_id
if model.config.decoder_start_token_id is None:
raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined")
if data_args.record_schema and os.path.exists(data_args.record_schema):
record_schema = RecordSchema.read_from_file(data_args.record_schema)
else:
record_schema = None
if data_args.source_prefix is not None:
if data_args.source_prefix == 'schema':
prefix = PrefixGenerator.get_schema_prefix(schema=record_schema)
elif data_args.source_prefix.startswith('meta'):
prefix = ""
else:
prefix = data_args.source_prefix
else:
prefix = ""
logger.info(f"Prefix: {prefix}")
logger.info(f"Prefix Length: {len(tokenizer.tokenize(prefix))}")
# Preprocessing the datasets.
# We need to tokenize inputs and targets.
if training_args.do_train:
column_names = datasets["train"].column_names
elif training_args.do_eval:
column_names = datasets["validation"].column_names
elif training_args.do_predict:
column_names = datasets["test"].column_names
else:
logger.info("There is nothing to do. Please pass `do_train`, `do_eval` and/or `do_predict`.")
return
# To serialize preprocess_function below, each of those four variables needs to be defined (even if we won't use
# them all).
text_column = data_args.text_column
record_column = data_args.record_column
logger.info('Using src: %s and tgt: %s' % (text_column, record_column))
# Temporarily set max_target_length for training.
max_target_length = data_args.max_target_length
padding = "max_length" if data_args.pad_to_max_length else False
if training_args.label_smoothing_factor > 0 and not hasattr(model, "prepare_decoder_input_ids_from_labels"):
logger.error(
"label_smoothing is enabled but the `prepare_decoder_input_ids_from_labels` method is not defined for"
f"`{model.__class__.__name__}`. This will lead to loss being calculated twice and will take up more memory"
)
def preprocess_function(examples):
inputs = examples[text_column]
targets = examples[record_column]
inputs = [prefix + inp for inp in inputs]
model_inputs = tokenizer(inputs, max_length=data_args.max_source_length, padding=padding, truncation=True)
model_inputs["text"] = inputs
# Setup the tokenizer for targets
with tokenizer.as_target_tokenizer():
labels = tokenizer(targets, max_length=max_target_length, padding=padding, truncation=True)
# If we are padding here, replace all tokenizer.pad_token_id in the labels by -100 when we want to ignore
# padding in the loss.
if padding == "max_length" and data_args.ignore_pad_token_for_loss:
labels["input_ids"] = [
[(_label if _label != tokenizer.pad_token_id else -100) for _label in label] for label in labels["input_ids"]
]
model_inputs["labels"] = labels["input_ids"]
# set noised record inputs
noised_record_list = []
for idx, noised_record in enumerate(examples["noised_record"]):
if noised_record is None:
tokens = examples["tokens"][idx]
entity_list = examples["entity"][idx]
triple_list = examples["relation"][idx]
event_list = examples["event"][idx]
noised_record = create_noised_record(tokens, entity_list, triple_list, event_list)
noised_record_list.append(noised_record)
model_inputs["noised_record"] = noised_record_list
# model_inputs["noised_record"] = examples["noised_record"]
# others
model_inputs['sample_prompt'] = [False] * len(model_inputs['input_ids'])
if data_args.source_prefix is not None and data_args.source_prefix.startswith('meta'):
model_inputs['spots'] = examples['spot']
model_inputs['asocs'] = examples['asoc']
model_inputs['spot_asoc'] = examples['spot_asoc']
# sample_prompt=True for Finetune and Pretrain
model_inputs['sample_prompt'] = [True] * len(model_inputs['input_ids'])
return model_inputs
def preprocess_function_eval(examples):
model_inputs = preprocess_function(examples)
# sample_prompt=False for evaluation
model_inputs['sample_prompt'] = [False] * len(model_inputs['input_ids'])
return model_inputs
def postprocess_text(x_str):
# Clean `bos` `eos` `pad` for cleaned text
for to_remove_token in to_remove_token_list:
x_str = x_str.replace(to_remove_token, '')
return x_str.strip()
logger.info("Start Data Preprocessing ...")
if training_args.do_train:
train_dataset = datasets["train"]
if data_args.max_train_samples is not None:
train_dataset = train_dataset.select(range(data_args.max_train_samples))
train_dataset = train_dataset.map(
preprocess_function,
batched=True,
num_proc=data_args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not data_args.overwrite_cache,
features=RecordFeature,
)
if training_args.do_eval:
max_target_length = data_args.val_max_target_length
eval_dataset = datasets["validation"]
if data_args.max_val_samples is not None:
eval_dataset = eval_dataset.select(range(data_args.max_val_samples))
eval_dataset = eval_dataset.map(
preprocess_function_eval,
batched=True,
num_proc=data_args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not data_args.overwrite_cache,
features=RecordFeature,
)
if training_args.do_predict:
max_target_length = data_args.val_max_target_length
test_dataset = datasets["test"]
if data_args.max_test_samples is not None:
test_dataset = test_dataset.select(range(data_args.max_test_samples))
test_dataset = test_dataset.map(
preprocess_function_eval,
batched=True,
num_proc=data_args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not data_args.overwrite_cache,
features=RecordFeature,
)
logger.info("End Data Preprocessing ...")
# Data collator
label_pad_token_id = -100 if data_args.ignore_pad_token_for_loss else tokenizer.pad_token_id
if data_args.pad_to_max_length:
data_collator = default_data_collator
elif data_args.source_prefix.startswith('meta'):
if data_args.spot_noise > 0 or data_args.asoc_noise > 0:
if data_args.decoding_format == 'spotasoc':
spot_asoc_nosier = SpotAsocNoiser(
spot_noise_ratio=data_args.spot_noise,
asoc_noise_ratio=data_args.asoc_noise,
null_span=constants.null_span,
)
else:
raise NotImplementedError(
"decoding_format `spotasoc` is not implemented."
)
else:
spot_asoc_nosier = None
data_collator = DataCollatorForMetaSeq2Seq(
tokenizer,
model=model,
label_pad_token_id=label_pad_token_id,
pad_to_multiple_of=8 if training_args.fp16 else None,
max_length=data_args.max_source_length,
max_prefix_length=data_args.max_prefix_length,
max_target_length=data_args.max_target_length,
negative_sampler=DynamicSSIGenerator(
tokenizer=tokenizer,
schema=record_schema,
positive_rate=data_args.meta_positive_rate,
negative=data_args.meta_negative,
ordered_prompt=data_args.ordered_prompt,
),
spot_asoc_nosier=spot_asoc_nosier,
decoding_format=data_args.decoding_format,
)
else:
data_collator = DataCollatorForSeq2Seq(
tokenizer,
model=model,
label_pad_token_id=label_pad_token_id,
pad_to_multiple_of=8 if training_args.fp16 else None,
)
def compute_metrics(eval_preds):
preds, labels = eval_preds
if isinstance(preds, tuple):
preds = preds[0]
decoded_preds = tokenizer.batch_decode(preds, skip_special_tokens=False, clean_up_tokenization_spaces=False)
if data_args.ignore_pad_token_for_loss:
# Replace -100 in the labels as we can't decode them.
labels = np.where(labels != -100, labels, tokenizer.pad_token_id)
decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=False, clean_up_tokenization_spaces=False)
decoded_preds = [postprocess_text(x) for x in decoded_preds]
decoded_labels = [postprocess_text(x) for x in decoded_labels]
result = get_extract_metrics(
pred_lns=decoded_preds,
tgt_lns=decoded_labels,
label_constraint=record_schema,
decoding_format=data_args.decoding_format,
)
prediction_lens = [np.count_nonzero(pred != tokenizer.pad_token_id) for pred in preds]
result["gen_len"] = np.mean(prediction_lens)
result = {k: round(v, 4) for k, v in result.items()}
return result
# Initialize our Trainer
if training_args.trainer_type == "uie_pretrain":
TRAINER = UIEPretrainConstraintSeq2SeqTrainer
elif training_args.trainer_type == "uie_finetune":
TRAINER = UIEFinetuneConstraintSeq2SeqTrainer
elif training_args.trainer_type == "meta_pretrain":
TRAINER = MetaPretrainConstraintSeq2SeqTrainer
elif training_args.trainer_type == "meta_finetune":
TRAINER = MetaFinetuneConstraintSeq2SeqTrainer
else:
TRAINER = OriginalConstraintSeq2SeqTrainer
trainer = TRAINER(
model=model,
args=training_args,
train_dataset=train_dataset if training_args.do_train else None,
eval_dataset=eval_dataset if training_args.do_eval else None,
tokenizer=tokenizer,
data_collator=data_collator,
compute_metrics=compute_metrics if training_args.predict_with_generate else None,
decoding_type_schema=record_schema,
decoding_format=data_args.decoding_format,
source_prefix=prefix,
task=data_args.task,
)
# Training
if training_args.do_train:
if model_args.from_checkpoint:
if last_checkpoint is not None:
checkpoint = last_checkpoint
elif os.path.isdir(model_args.model_name_or_path):
checkpoint = model_args.model_name_or_path
else:
checkpoint = None
else:
checkpoint = None
train_result = trainer.train(resume_from_checkpoint=checkpoint)
trainer.save_model() # Saves the tokenizer too for easy upload
output_train_file = os.path.join(training_args.output_dir, "train_results.txt")
if trainer.is_world_process_zero():
with open(output_train_file, "w") as writer:
logger.info("***** Train results *****")
for key, value in sorted(train_result.metrics.items()):
logger.info(f" {key} = {value}")
writer.write(f"{key} = {value}\n")
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir, "trainer_state.json"))
# Evaluation
results = {}
if training_args.do_eval:
logger.info("*** Evaluate ***")
results = trainer.evaluate(max_length=data_args.val_max_target_length, num_beams=data_args.num_beams)
results = {k: round(v, 4) for k, v in results.items()}
eval_results = trainer.predict(
eval_dataset,
metric_key_prefix="eval",
max_length=data_args.val_max_target_length,
num_beams=data_args.num_beams,
)
output_eval_file = os.path.join(training_args.output_dir, "eval_results_seq2seq.txt")
if trainer.is_world_process_zero():
with open(output_eval_file, "w") as writer:
logger.info("***** Eval results *****")
for key, value in sorted(results.items()):
logger.info(f" {key} = {value}")
writer.write(f"{key} = {value}\n")
if training_args.predict_with_generate:
eval_preds = tokenizer.batch_decode(
eval_results.predictions, skip_special_tokens=False, clean_up_tokenization_spaces=False
)
eval_preds = [postprocess_text(pred) for pred in eval_preds]
output_test_preds_file = os.path.join(training_args.output_dir, "eval_preds_seq2seq.txt")
with open(output_test_preds_file, "w") as writer:
writer.write("\n".join(eval_preds))
if training_args.do_predict:
logger.info("*** Test ***")
test_results = trainer.predict(
test_dataset,
metric_key_prefix="test",
max_length=data_args.val_max_target_length,
num_beams=data_args.num_beams,
)
test_metrics = test_results.metrics
test_metrics["test_loss"] = round(test_metrics["test_loss"], 4)
output_test_result_file = os.path.join(training_args.output_dir, "test_results_seq2seq.txt")
if trainer.is_world_process_zero():
with open(output_test_result_file, "w") as writer:
logger.info("***** Test results *****")
for key, value in sorted(test_metrics.items()):
logger.info(f" {key} = {value}")
writer.write(f"{key} = {value}\n")
if training_args.predict_with_generate:
test_preds = tokenizer.batch_decode(
test_results.predictions, skip_special_tokens=False, clean_up_tokenization_spaces=False
)
test_preds = [postprocess_text(pred) for pred in test_preds]
output_test_preds_file = os.path.join(training_args.output_dir, "test_preds_seq2seq.txt")
with open(output_test_preds_file, "w") as writer:
writer.write("\n".join(test_preds))
return results
def _mp_fn(index):
# For xla_spawn (TPUs)
main() | null |
164,822 | from tensorboard.backend.event_processing import event_accumulator
import matplotlib.pyplot as plt
def read_tensorboard_data(tensorboard_log_path, val_name):
ea = event_accumulator.EventAccumulator(tensorboard_log_path)
ea.Reload()
print("All scalers:")
print(ea.scalars.Keys())
val = ea.scalars.Items(val_name)
return val | null |
164,823 | from tensorboard.backend.event_processing import event_accumulator
import matplotlib.pyplot as plt
def plot(vals, val_names, max_step=None):
plt.figure()
for val, val_name in zip(vals, val_names):
x = [i.step for i in val]
y = [i.value for i in val]
if max_step is not None:
x = [i for i in x if i < max_step]
y = y[:len(x)]
plt.plot(x, y, label=val_name)
plt.xlabel("step")
plt.ylabel("loss")
plt.legend()
plt.show() | null |
164,824 | import json
import os
from collections import OrderedDict
import numpy as np
from tabulate import tabulate
def align_float(x):
return '%.2f' % x if isinstance(x, float) else x | null |
164,825 | import json
import os
from collections import OrderedDict
import numpy as np
from tabulate import tabulate
def parse_trainer_state(filename):
trainer_state = json.load(open(filename))
if trainer_state['best_model_checkpoint'] is not None:
return trainer_state['best_model_checkpoint'].split('/')[-1].replace('checkpoint-', '')
else:
return 'last' | null |
164,826 | import json
import os
from collections import OrderedDict
import numpy as np
from tabulate import tabulate
def parse_global_step(filename):
return str(json.load(open(filename))['global_step']) | null |
164,827 | import json
import os
from collections import OrderedDict
import numpy as np
from tabulate import tabulate
def check_out_of_memory(filename):
if os.path.exists(filename):
try:
with open(filename) as fin:
for line in fin:
if 'CUDA out of memory' in line:
return True
except UnicodeDecodeError:
return False
return False | null |
164,828 | import json
import os
from collections import OrderedDict
import numpy as np
from tabulate import tabulate
def get_run_name(folder_name, prefix):
split_list = folder_name.replace('/', '_').split('_') \
if prefix == 'run' \
else folder_name.split('_')[1:]
new_att_list = list()
for att in split_list:
if att.startswith(prefix):
continue
new_att_list += [att]
return '_'.join(new_att_list) | null |
164,829 | import argparse
import json
import os
from collections import Counter, defaultdict
from transformers import AutoTokenizer
from tabulate import tabulate
from tqdm import tqdm
from uie.seq2seq.t5_bert_tokenizer import T5BertTokenizer
from uie.extraction.dataset_processer import PrefixGenerator
from uie.extraction.record_schema import RecordSchema
def find_key(count):
if count > 512:
return '7.>512'
elif 384 < count <= 512:
return "6.384-512"
elif 320 < count <= 384:
return "5.320-384"
elif 256 < count <= 320:
return "4.256-320"
elif 192 < count <= 256:
return "3.192-256"
elif 128 < count <= 192:
return "2.128-192"
elif 64 < count <= 128:
return "1. 64-128"
elif count == 0:
return "8. =0"
else:
return "0. <64"
def get_acc_list(counter):
sum_instance = float(sum(counter.values()))
acc_list = list()
acc_counter = defaultdict(int)
for k in sorted(counter.keys()):
v = counter[k]
acc_counter[find_key(k)] += v
acc = 0
for k in sorted(acc_counter.keys()):
acc += acc_counter[k]
acc_list += [(k, acc, "%.2f" % (acc / sum_instance * 100))]
return acc_list | null |
164,830 | import argparse
import json
import os
import sys
import numpy as np
from pprint import pprint
from uie.extraction.scorer import EntityScorer, RelationScorer, EventScorer
def read_file(file_name):
return [line for line in open(file_name).readlines()] | null |
164,831 | import argparse
import json
import os
import sys
import numpy as np
from pprint import pprint
from uie.extraction.scorer import EntityScorer, RelationScorer, EventScorer
def write_to_file(result, output_filename, prefix=None):
with open(output_filename, 'w') as output:
for key, value in result.items():
if prefix:
key = '%s_%s' % (prefix, key)
output.write("%s=%s\n" % (key, value)) | null |
164,832 | import json
import os
import random
import argparse
from collections import OrderedDict
from tqdm import tqdm
import pdb
visited_type = set()
def get_visited_type(instance_id_list, instance_type_dict):
visited_type = set()
for i, instance_id in enumerate(instance_id_list):
if i == 0:
visited_type |= instance_type_dict[instance_id]
else:
visited_type &= instance_type_dict[instance_id]
return visited_type | null |
164,833 | import json
import os
import random
import argparse
from tqdm import tqdm
from copy import deepcopy
import numpy as np
import pdb
random.seed(seed)
np.random.seed(seed)
THRESHOLD = 0.8
def noise_entity_type(entity_list):
entity_type_list = []
for entity in entity_list:
entity_type_list.append(entity["type"])
entity_type_list = list(set(entity_type_list))
noised_entity_list = []
for entity in entity_list:
noised_entity = deepcopy(entity)
if np.random.rand() > THRESHOLD:
noised_entity_type = random.choice(entity_type_list)
noised_entity["type"] = noised_entity_type
noised_entity_list.append(noised_entity)
return noised_entity_list | null |
164,834 | import json
import os
import random
import argparse
from tqdm import tqdm
from copy import deepcopy
import numpy as np
import pdb
random.seed(seed)
np.random.seed(seed)
NOISE_OFFSET_RANGE = list(range(NOISE_OFFSET_THRESHOLD))
NOISE_OFFSET_WEIGHT = np.exp(- DECAY_COEF * np.array(NOISE_OFFSET_RANGE))
NOISE_OFFSET_WEIGHT = NOISE_OFFSET_WEIGHT / NOISE_OFFSET_WEIGHT.sum()
def noise_entity_offset(entity_list, tokens):
noised_entity_list = []
for entity in entity_list:
noised_entity = deepcopy(entity)
entity_offset = noised_entity["offset"]
start_index, end_index = entity_offset[0], entity_offset[-1]
start_noise = np.random.choice(NOISE_OFFSET_RANGE, p=NOISE_OFFSET_WEIGHT)
end_noise = np.random.choice(NOISE_OFFSET_RANGE, p=NOISE_OFFSET_WEIGHT)
noised_start_index = max(start_index-start_noise, 0)
noised_end_index = min(end_index+end_noise, len(tokens)-1)
noised_entity_offset = list(range(noised_start_index, noised_end_index+1))
noised_entity_mention = " ".join(tokens[noised_start_index:noised_end_index+1])
noised_entity["offset"] = noised_entity_offset
noised_entity["text"] = noised_entity_mention
noised_entity_list.append(noised_entity)
return noised_entity_list | null |
164,835 | import json
import os
import random
import argparse
from tqdm import tqdm
from copy import deepcopy
import numpy as np
import pdb
random.seed(seed)
np.random.seed(seed)
THRESHOLD = 0.8
def noise_entity_with_other_entity(entity_list):
type_entity_mapping = {}
for entity in entity_list:
entity_type = entity["type"]
if entity_type not in type_entity_mapping:
type_entity_mapping[entity_type] = []
type_entity_mapping[entity_type].append(entity)
noised_entity_list = []
for entity in entity_list:
noised_entity = deepcopy(entity)
if np.random.rand() > THRESHOLD:
entity_type = noised_entity["type"]
other_entity = random.choice(type_entity_mapping[entity_type])
noised_entity["text"] = other_entity["text"]
noised_entity["offset"] = other_entity["offset"]
noised_entity_list.append(noised_entity)
return noised_entity_list | null |
164,836 | import json
import os
import random
import argparse
from tqdm import tqdm
from copy import deepcopy
import numpy as np
import pdb
random.seed(seed)
np.random.seed(seed)
THRESHOLD = 0.8
def noise_relation_type(triple_list):
relation_type_list = []
for triple in triple_list:
relation_type_list.append(triple["type"])
relation_type_list = list(set(relation_type_list))
noised_triple_list = []
for triple in triple_list:
noised_triple = deepcopy(triple)
if np.random.rand() > THRESHOLD:
noised_relation_type = random.choice(relation_type_list)
noised_triple["type"] = noised_relation_type
noised_triple_list.append(noised_triple)
return noised_triple_list | null |
164,837 | import json
import os
import random
import argparse
from tqdm import tqdm
from copy import deepcopy
import numpy as np
import pdb
random.seed(seed)
np.random.seed(seed)
TRIPLE_THRESHOLD = [0.6, 0.8]
def noise_triple_num(triple_list, entity_list):
noised_triple_list = []
for triple in triple_list:
p = np.random.rand()
if p < TRIPLE_THRESHOLD[0]: # do nothing
noised_triple_list.append(triple)
elif p < TRIPLE_THRESHOLD[1]: # add noised triple
noised_triple_list.append(triple)
noised_triple = deepcopy(triple)
replaced_tail = random.choice(entity_list)
noised_triple["args"][1] = replaced_tail
noised_triple_list.append(noised_triple)
else: # remove triple
pass
return noised_triple_list | null |
164,838 | import json
import os
import random
import argparse
from tqdm import tqdm
from copy import deepcopy
import numpy as np
import pdb
def build_entity_dict(entity_list):
entity_dict = {}
for entity in entity_list:
entity_uri = entity["uri"]
entity_dict[entity_uri] = entity
return entity_dict | null |
164,839 | import json
import os
import random
import argparse
from tqdm import tqdm
from copy import deepcopy
import numpy as np
import pdb
def update_relation_triple_by_noised_entity(triple_list, noised_entity_dict):
noised_triple_list = []
for triple in triple_list:
noised_triple = deepcopy(triple)
head, tail = noised_triple["args"]
noised_head, noised_tail = noised_entity_dict[head["uri"]], noised_entity_dict[tail["uri"]]
noised_triple["args"] = [noised_head, noised_tail]
noised_triple_list.append(noised_triple)
return noised_triple_list | null |
164,840 | import json
import os
import random
import argparse
from tqdm import tqdm
from copy import deepcopy
import numpy as np
import pdb
def create_spot_asoc_field(instance_entity_list, instance_triple_list):
instance_spot_asoc_list = []
for entity in instance_entity_list:
instance_spot_asoc = {
"span": entity["text"],
"label": entity["type"],
"asoc": []
}
for triple in instance_triple_list:
if triple["args"][0]["uri"] == entity["uri"]:
asoc_record = [triple["type"], triple["args"][1]["text"]]
instance_spot_asoc["asoc"].append(asoc_record)
instance_spot_asoc_list.append(instance_spot_asoc)
return instance_spot_asoc_list | null |
164,841 | import json
import os
import random
import argparse
from tqdm import tqdm
from copy import deepcopy
import numpy as np
import pdb
def create_record_field(instance_spot_asoc_list):
instance_record = "<extra_id_0> "
for instance_spot_asoc in instance_spot_asoc_list:
instance_record += "<extra_id_0> "
instance_record += instance_spot_asoc["label"] + " "
instance_record += "<extra_id_5> "
instance_record += instance_spot_asoc["span"] + " "
if len(instance_spot_asoc["asoc"]) != 0:
for asoc in instance_spot_asoc["asoc"]:
instance_record += "<extra_id_0> "
instance_record += asoc[0] + " "
instance_record += "<extra_id_5> "
instance_record += asoc[1] + " "
instance_record += "<extra_id_1> "
instance_record += "<extra_id_1> "
instance_record += "<extra_id_1>"
return instance_record | null |
164,842 | import json
import os
import random
import argparse
from tqdm import tqdm
import pdb
def create_spot_asoc_field(instance_entity_list, instance_triple_list):
instance_spot_asoc_list = []
for entity in instance_entity_list:
instance_spot_asoc = {
"span": entity["text"],
"label": entity["type"],
"asoc": []
}
for triple in instance_triple_list:
if triple["args"][0]["uri"] == entity["uri"]:
asoc_record = [triple["type"], triple["args"][1]["text"]]
instance_spot_asoc["asoc"].append(asoc_record)
instance_spot_asoc_list.append(instance_spot_asoc)
return instance_spot_asoc_list
def create_record_field(instance_spot_asoc_list):
instance_record = "<extra_id_0> "
for instance_spot_asoc in instance_spot_asoc_list:
instance_record += "<extra_id_0> "
instance_record += instance_spot_asoc["label"] + " "
instance_record += "<extra_id_5> "
instance_record += instance_spot_asoc["span"] + " "
if len(instance_spot_asoc["asoc"]) != 0:
for asoc in instance_spot_asoc["asoc"]:
instance_record += "<extra_id_0> "
instance_record += asoc[0] + " "
instance_record += "<extra_id_5> "
instance_record += asoc[1] + " "
instance_record += "<extra_id_1> "
instance_record += "<extra_id_1> "
instance_record += "<extra_id_1>"
return instance_record
def filter_entity_by_entity_type(entity_list, target_entity_type_list):
'''
{"type": "rocket stage", "offset": [11, 12, 13], "text": "S-II", "uri": "Q1093699"}
'''
filtered_entity_list = [entity for entity in entity_list if entity["type"] in target_entity_type_list]
return filtered_entity_list
def filter_triple_by_entity_list(triple_list, filtered_entity_list):
'''
{"type": "part of", "args": [{"type": "rocket stage", "offset": [1, 2, 3], "text": "MS-II", "uri": "Q6717655"}, {"type": "rocket stage", "offset": [11, 12, 13], "text": "S-II", "uri": "Q1093699"}]}
'''
filtered_triple_list = []
for triple in triple_list:
head, tail = triple["args"]
if head in filtered_entity_list and tail in filtered_entity_list:
filtered_triple_list.append(triple)
return filtered_triple_list
def build_target_relation_type_list(filtered_triple_list):
target_relation_type_list = [triple["type"] for triple in filtered_triple_list]
target_relation_type_list = list(set(target_relation_type_list))
return target_relation_type_list
def filter_triple_by_relation_type(triple_list, target_relation_type_list):
'''
{"type": "part of", "args": [{"type": "rocket stage", "offset": [1, 2, 3], "text": "MS-II", "uri": "Q6717655"}, {"type": "rocket stage", "offset": [11, 12, 13], "text": "S-II", "uri": "Q1093699"}]}
'''
filtered_triple_list = [triple for triple in triple_list if triple["type"] in target_relation_type_list]
return filtered_triple_list
def filter_entity_by_triple_list(entity_list, filtered_triple_list):
filtered_entity_list = []
for triple in filtered_triple_list:
head, tail = triple["args"]
filtered_entity_list.append(head)
filtered_entity_list.append(tail)
entity_uri_set = set()
unique_filtered_entity_list = []
for entity in filtered_entity_list:
uri = entity["uri"]
if uri not in entity_uri_set:
entity_uri_set.add(uri)
unique_filtered_entity_list.append(entity)
return unique_filtered_entity_list
def build_target_entity_type_list(filtered_entity_list):
target_entity_type_list = [entity["type"] for entity in filtered_entity_list]
target_entity_type_list = list(set(target_entity_type_list))
return target_entity_type_list
def create_instance(instance_line, target_entity_type_list, target_relation_type_list):
instance = json.loads(instance_line)
entity_list = instance["entity"]
triple_list = instance["relation"]
spot_asoc_list = instance["spot_asoc"]
record = instance["record"]
if len(target_relation_type_list) == 0:
filtered_entity_list = filter_entity_by_entity_type(entity_list, target_entity_type_list)
filtered_triple_list = filter_triple_by_entity_list(triple_list, filtered_entity_list)
current_target_entity_type_list = target_entity_type_list
current_target_relation_type_list = build_target_relation_type_list(filtered_triple_list)
else:
filtered_triple_list = filter_triple_by_relation_type(triple_list, target_relation_type_list)
filtered_entity_list = filter_entity_by_triple_list(entity_list, filtered_triple_list)
current_target_entity_type_list = build_target_entity_type_list(filtered_entity_list)
current_target_relation_type_list = target_relation_type_list
filtered_spot_asoc_list = create_spot_asoc_field(filtered_entity_list, filtered_triple_list)
filtered_record = create_record_field(filtered_spot_asoc_list)
instance["entity"] = filtered_entity_list
instance["relation"] = filtered_triple_list
instance["spot"] = current_target_entity_type_list
instance["asoc"] = current_target_relation_type_list
instance["spot_asoc"] = filtered_spot_asoc_list
instance["record"] = filtered_record
return instance | null |
164,843 | import os
import json
import math
import time
import argparse
from tqdm import tqdm
import networkx as nx
import pdb
def score(x_label, y_label, add_coef=True):
x_label = set(x_label)
y_label = set(y_label)
y2x_score = len(x_label & y_label) / len(x_label)
if add_coef:
y2x_score += 1 / len(y_label)
x2y_score = len(x_label & y_label) / len(y_label)
if add_coef:
x2y_score += + 1 / len(x_label)
if x2y_score > y2x_score:
final_score = x2y_score
flag = True
else:
final_score = y2x_score
flag = False
return final_score, flag | null |
164,844 | import json
import os
import random
import argparse
from tqdm import tqdm
from nltk.tokenize import WordPunctTokenizer
import numpy as np
import pdb
ALL_ENTITY_CNT = 0
NOMATCH_ENTITY_CNT = 0
NON_OFFSET_ENTITY_CNT = 0
def word_tokenize(text):
return word_tokenizer.tokenize(text)
text_length_list = []
relation_list = []
entity_type_list = []
triple_type_list = []
entity_type_list = list(set(entity_type_list))
relation_list = list(set(relation_list))
for head_entity_type, realtion_type, tail_entity_type in triple_type_list:
if record_type_list.get(head_entity_type) is None:
record_type_list[head_entity_type] = []
record_type_list[head_entity_type].append(realtion_type)
for head_entity_type, record_relation_list in record_type_list.items():
record_type_list[head_entity_type] = list(set(record_relation_list))
def record2instance(record):
instance = {
"text": None,
"tokens": None,
"record": None,
"entity": None,
"relation": None,
"event": [],
"spot": None,
"asoc": None,
"spot_asoc": None,
}
# create text field
text = record["sentence_value"]
instance["text"] = text
# create tokens field
tokens = word_tokenize(text)
text_length_list.append(len(tokens))
instance["tokens"] = tokens
# create entity field
entities = record["sentence_entities"]
instance_entity_list = []
for entity in entities:
entity_uri = entity["uri"]
entity_mention = entity["surfaceform"]
entity_type = entity["tag"]
entity_offset = entity["boundaries_token"]
if entity_type == "#dateTime":
entity_type = "date time"
elif entity_type == "#decimal":
entity_type = "decimal"
elif entity_type == "":
entity_type = "other"
if entity_mention == "":
continue
try:
start_index, end_index = entity_offset[0], entity_offset[-1]
except:
global NON_OFFSET_ENTITY_CNT
NON_OFFSET_ENTITY_CNT += 1
return None
current_mention = " ".join(tokens[start_index:end_index+1])
original_mention = " ".join(word_tokenize(entity_mention))
if current_mention != original_mention:
global NOMATCH_ENTITY_CNT
NOMATCH_ENTITY_CNT += 1
global ALL_ENTITY_CNT
ALL_ENTITY_CNT += 1
entity_offset = list(range(start_index, end_index+1))
instance_entity = {
"type": entity_type,
"offset": entity_offset,
"text": entity_mention,
"uri": entity_uri
}
instance_entity_list.append(instance_entity)
instance["entity"] = instance_entity_list
# create spot field
instance_entity_type_list = [i["type"] for i in instance_entity_list]
instance["spot"] = list(set(instance_entity_type_list))
entity_type_list.extend(instance_entity_type_list)
# create relation field
triples = record["sentence_triples"]
instance_relation_list = []
for triple in triples:
subj = triple["subject"]
obj = triple["object"]
predicate = triple["predicate"]
relation_type = predicate["surfaceform"]
try:
head_entity = [i for i in instance_entity_list if i["uri"] == subj["uri"]][0]
except IndexError:
continue
try:
tail_entity = [i for i in instance_entity_list if i["uri"] == obj["uri"]][0]
except IndexError:
continue
head_entity_type = head_entity["type"]
tail_entity_type = tail_entity["type"]
triple_type = (head_entity_type, relation_type, tail_entity_type)
triple_type_list.append(triple_type)
instance_relation = {
"type": relation_type,
"args": [
head_entity,
tail_entity
]
}
instance_relation_list.append(instance_relation)
instance["relation"] = instance_relation_list
# create asoc field
instance_asoc_list = [i["type"] for i in instance_relation_list]
instance["asoc"] = list(set(instance_asoc_list))
relation_list.extend(instance_asoc_list)
# create spot_asoc field
instance_spot_asoc_list = []
for entity in instance_entity_list:
instance_spot_asoc = {
"span": entity["text"],
"label": entity["type"],
"asoc": []
}
for triple in instance_relation_list:
if triple["args"][0]["uri"] == entity["uri"]:
asoc_record = [triple["type"], triple["args"][1]["text"]]
instance_spot_asoc["asoc"].append(asoc_record)
instance_spot_asoc_list.append(instance_spot_asoc)
instance["spot_asoc"] = instance_spot_asoc_list
# create record field
instance_record = "<extra_id_0> "
for instance_spot_asoc in instance_spot_asoc_list:
instance_record += "<extra_id_0> "
instance_record += instance_spot_asoc["label"] + " "
instance_record += "<extra_id_5> "
instance_record += instance_spot_asoc["span"] + " "
if len(instance_spot_asoc["asoc"]) != 0:
for asoc in instance_spot_asoc["asoc"]:
instance_record += "<extra_id_0> "
instance_record += asoc[0] + " "
instance_record += "<extra_id_5> "
instance_record += asoc[1] + " "
instance_record += "<extra_id_1> "
instance_record += "<extra_id_1> "
instance_record += "<extra_id_1>"
instance["record"] = instance_record
return instance | null |
164,845 | from collections import Counter
import os
import json
from typing import Dict, List
from tqdm import tqdm
from universal_ie.generation_format.generation_format import GenerationFormat
from universal_ie.generation_format import generation_format_dict
from universal_ie.generation_format.structure_marker import BaseStructureMarker
from universal_ie.dataset import Dataset
from universal_ie.ie_format import Sentence
class GenerationFormat:
def __init__(self,
structure_maker: StructureMarker,
label_mapper: Dict = None,
language: str = 'en') -> None:
def get_label_str(self, label: Label):
def annotate_entities(
self, tokens: List[str], entities: List[Entity]):
def annotate_given_entities(self, tokens: List[str], entities: Union[List[Entity], Entity]):
def annotate_events(self, tokens: List[str], events: List[Event]):
def annotate_event_given_predicate(self, tokens: List[str], event: Event):
def annotate_relation_extraction(self, tokens: List[str],
relations: List[Relation]):
def output_schema(self, filename: str):
def get_entity_schema(self, entities: List[Entity]):
def get_relation_schema(self, relations: List[Relation]):
def get_event_schema(self, events: List[Event]):
class BaseStructureMarker(StructureMarker):
def __init__(self) -> None:
def convert_graph(
generation_class: GenerationFormat,
output_folder: str,
datasets: Dict[str, List[Sentence]],
language: str = "en",
label_mapper: Dict = None,
):
convertor = generation_class(
structure_maker=BaseStructureMarker(),
language=language,
label_mapper=label_mapper,
)
counter = Counter()
os.makedirs(output_folder, exist_ok=True)
schema_counter = {
"entity": list(),
"relation": list(),
"event": list(),
}
for data_type, instance_list in datasets.items():
with open(os.path.join(output_folder, f"{data_type}.json"), "w") as output:
for instance in tqdm(instance_list):
counter.update([f"{data_type} sent"])
converted_graph = convertor.annonote_graph(
tokens=instance.tokens,
entities=instance.entities,
relations=instance.relations,
events=instance.events,
)
src, tgt, spot_labels, asoc_labels = converted_graph[:4]
spot_asoc = converted_graph[4]
schema_counter["entity"] += instance.entities
schema_counter["relation"] += instance.relations
schema_counter["event"] += instance.events
output.write(
"%s\n"
% json.dumps(
{
"text": src,
"tokens": instance.tokens,
"record": tgt,
"entity": [
entity.to_offset(label_mapper)
for entity in instance.entities
],
"relation": [
relation.to_offset(
ent_label_mapper=label_mapper,
rel_label_mapper=label_mapper,
)
for relation in instance.relations
],
"event": [
event.to_offset(evt_label_mapper=label_mapper)
for event in instance.events
],
"spot": list(spot_labels),
"asoc": list(asoc_labels),
"spot_asoc": spot_asoc,
},
ensure_ascii=False,
)
)
convertor.output_schema(os.path.join(output_folder, "record.schema"))
convertor.get_entity_schema(schema_counter["entity"]).write_to_file(
os.path.join(output_folder, f"entity.schema")
)
convertor.get_relation_schema(schema_counter["relation"]).write_to_file(
os.path.join(output_folder, f"relation.schema")
)
convertor.get_event_schema(schema_counter["event"]).write_to_file(
os.path.join(output_folder, f"event.schema")
)
print(counter)
print(output_folder)
print("==========================") | null |
164,846 | from collections import Counter
import os
import json
from typing import Dict, List
from tqdm import tqdm
from universal_ie.generation_format.generation_format import GenerationFormat
from universal_ie.generation_format import generation_format_dict
from universal_ie.generation_format.structure_marker import BaseStructureMarker
from universal_ie.dataset import Dataset
from universal_ie.ie_format import Sentence
def convert_to_oneie(output_folder: str, datasets: Dict[str, List[Sentence]]):
os.makedirs(output_folder, exist_ok=True)
counter = Counter()
for data_type, instance_list in datasets.items():
with open(
os.path.join(output_folder, f"{data_type}.oneie.json"), "w"
) as output:
for instance in tqdm(instance_list):
counter.update([f"{data_type} sent"])
entity_mentions = [
{
"id": entity.record_id,
"entity_type": str(entity.label),
"text": entity.span.text,
"start": entity.span.indexes[0],
"end": entity.span.indexes[-1] + 1,
}
for entity in instance.entities
]
relation_mentions = [
{
"id": relation.record_id,
"relation_type": str(relation.label),
"argument": [
{
"entity_id": relation.arg1.record_id,
"text": relation.arg1.span.text,
"role": "Arg-1",
},
{
"entity_id": relation.arg2.record_id,
"text": relation.arg2.span.text,
"role": "Arg-2",
},
],
}
for relation in instance.relations
]
event_mentions = [
{
"id": event.record_id,
"event_type": str(event.label),
"trigger": {
"text": event.span.text,
"start": event.span.indexes[0],
"end": event.span.indexes[-1] + 1,
},
"argument": [
{
"id": arg[1].record_id,
"text": arg[1].span.text,
"role": str(arg[0]),
}
for arg in event.args
],
}
for event in instance.events
]
instance_dict = {
"tokens": instance.tokens,
"sent_id": instance.text_id,
"entity_mentions": entity_mentions,
"relation_mentions": relation_mentions,
"event_mentions": event_mentions,
}
instance_str = json.dumps(instance_dict, ensure_ascii=False)
output.write(f"{instance_str}\n")
print(counter)
print(output_folder)
print("==========================") | null |
164,847 | from typing import List
import os
import sys
def tokens_to_str(tokens: List[str], language: str = 'en') -> str:
if language == 'en':
return ' '.join(tokens)
elif language == 'zh':
return ''.join(tokens)
else:
raise NotImplementedError('Language %s not supported' % language) | null |
164,848 | from typing import List
import os
import sys
def label_format(s):
import re
def uncamelize(s):
re_outer = re.compile(r'([^A-Z ])([A-Z])')
re_inner = re.compile(r'\b[A-Z]+(?=[A-Z][a-z])')
sub = re_inner.sub(r'\g<0> ', re_outer.sub(r'\1 \2', s)).lower()
return sub
def remove(s):
return s.replace("_", " ").replace("-", " ").replace(".", " ")
s = remove(uncamelize(s)).split()
if len(s) > 1 and s[0] == s[1]:
s = s[1:]
return " ".join(s)
def load_dict_ini_file(filename):
print("Warning: `load_dict_ini_file` is deprecated.")
if not os.path.exists(filename):
sys.stderr.write(f'[warning] cannot load label mapper from {filename}\n')
return {}
mapper = dict()
for line in open(filename):
key, value = line.strip().split('=')
mapper[key] = label_format(value)
return mapper | null |
164,849 | from typing import List
import os
import sys
The provided code snippet includes necessary dependencies for implementing the `change_ptb_token_back` function. Write a Python function `def change_ptb_token_back(token)` to solve the following problem:
将 PTBTokenized 的 Token 转换会原始字符串 Args: token (str): PTBTokenize 后的 Token 字符串 Returns: str: 原始 Token 字符串
Here is the function:
def change_ptb_token_back(token):
"""将 PTBTokenized 的 Token 转换会原始字符串
Args:
token (str): PTBTokenize 后的 Token 字符串
Returns:
str: 原始 Token 字符串
"""
ptb_token_map = {
'``': '"',
"''": '"',
'-LRB-': '(',
'-RRB-': ')',
'-LSB-': '[',
'-RSB-': ']',
'-LCB-': '{',
'-RCB-': '}',
}
for ptb_token, raw_token in ptb_token_map.items():
if token == ptb_token:
return raw_token
return token | 将 PTBTokenized 的 Token 转换会原始字符串 Args: token (str): PTBTokenize 后的 Token 字符串 Returns: str: 原始 Token 字符串 |
164,850 | from typing import List
import os
import sys
global_mislabel_log = set()
def change_name_using_label_mapper(label_name, label_mapper):
if label_mapper is None or len(label_mapper) == 0:
return label_name
if label_name not in label_mapper:
print(f"{label_name} not found in mapper")
global global_mislabel_log
if label_name not in global_mislabel_log:
global_mislabel_log.add(label_name)
return label_mapper.get(label_name, label_name) | null |
164,851 | from collections import Counter
import json
from typing import List, Optional, Tuple, Set
from tqdm import tqdm
from universal_ie.task_format.task_format import TaskFormat
from universal_ie.utils import tokens_to_str
from universal_ie.ie_format import Entity, Label, Sentence, Span
The provided code snippet includes necessary dependencies for implementing the `bio_tags_to_spans` function. Write a Python function `def bio_tags_to_spans( tag_sequence: List[str], classes_to_ignore: List[str] = None ) -> List[Tuple[str, Tuple[int, int]]]` to solve the following problem:
Given a sequence corresponding to BIO tags, extracts spans. Spans are inclusive and can be of zero length, representing a single word span. Ill-formed spans are also included (i.e those which do not start with a "B-LABEL"), as otherwise it is possible to get a perfect precision score whilst still predicting ill-formed spans in addition to the correct spans. This function works properly when the spans are unlabeled (i.e., your labels are simply "B", "I", and "O"). # Parameters tag_sequence : `List[str]`, required. The integer class labels for a sequence. classes_to_ignore : `List[str]`, optional (default = `None`). A list of string class labels `excluding` the bio tag which should be ignored when extracting spans. # Returns spans : `List[TypedStringSpan]` The typed, extracted spans from the sequence, in the format (label, (span_start, span_end)). Note that the label `does not` contain any BIO tag prefixes.
Here is the function:
def bio_tags_to_spans(
tag_sequence: List[str], classes_to_ignore: List[str] = None
) -> List[Tuple[str, Tuple[int, int]]]:
"""
Given a sequence corresponding to BIO tags, extracts spans.
Spans are inclusive and can be of zero length, representing a single word span.
Ill-formed spans are also included (i.e those which do not start with a "B-LABEL"),
as otherwise it is possible to get a perfect precision score whilst still predicting
ill-formed spans in addition to the correct spans. This function works properly when
the spans are unlabeled (i.e., your labels are simply "B", "I", and "O").
# Parameters
tag_sequence : `List[str]`, required.
The integer class labels for a sequence.
classes_to_ignore : `List[str]`, optional (default = `None`).
A list of string class labels `excluding` the bio tag
which should be ignored when extracting spans.
# Returns
spans : `List[TypedStringSpan]`
The typed, extracted spans from the sequence, in the format (label, (span_start, span_end)).
Note that the label `does not` contain any BIO tag prefixes.
"""
classes_to_ignore = classes_to_ignore or []
spans: Set[Tuple[str, Tuple[int, int]]] = set()
span_start = 0
span_end = 0
active_conll_tag = None
for index, string_tag in enumerate(tag_sequence):
# Actual BIO tag.
bio_tag = string_tag[0]
if bio_tag not in ["B", "I", "O"]:
raise RuntimeError('Invalid tag sequence %s' % tag_sequence)
conll_tag = string_tag[2:]
if bio_tag == "O" or conll_tag in classes_to_ignore:
# The span has ended.
if active_conll_tag is not None:
spans.add((active_conll_tag, (span_start, span_end)))
active_conll_tag = None
# We don't care about tags we are
# told to ignore, so we do nothing.
continue
elif bio_tag == "B":
# We are entering a new span; reset indices
# and active tag to new span.
if active_conll_tag is not None:
spans.add((active_conll_tag, (span_start, span_end)))
active_conll_tag = conll_tag
span_start = index
span_end = index
elif bio_tag == "I" and conll_tag == active_conll_tag:
# We're inside a span.
span_end += 1
else:
# This is the case the bio label is an "I", but either:
# 1) the span hasn't started - i.e. an ill formed span.
# 2) The span is an I tag for a different conll annotation.
# We'll process the previous span if it exists, but also
# include this span. This is important, because otherwise,
# a model may get a perfect F1 score whilst still including
# false positive ill-formed spans.
if active_conll_tag is not None:
spans.add((active_conll_tag, (span_start, span_end)))
active_conll_tag = conll_tag
span_start = index
span_end = index
# Last token might have been a part of a valid span.
if active_conll_tag is not None:
spans.add((active_conll_tag, (span_start, span_end)))
return list(spans) | Given a sequence corresponding to BIO tags, extracts spans. Spans are inclusive and can be of zero length, representing a single word span. Ill-formed spans are also included (i.e those which do not start with a "B-LABEL"), as otherwise it is possible to get a perfect precision score whilst still predicting ill-formed spans in addition to the correct spans. This function works properly when the spans are unlabeled (i.e., your labels are simply "B", "I", and "O"). # Parameters tag_sequence : `List[str]`, required. The integer class labels for a sequence. classes_to_ignore : `List[str]`, optional (default = `None`). A list of string class labels `excluding` the bio tag which should be ignored when extracting spans. # Returns spans : `List[TypedStringSpan]` The typed, extracted spans from the sequence, in the format (label, (span_start, span_end)). Note that the label `does not` contain any BIO tag prefixes. |
164,852 | from collections import Counter
import json
from typing import List, Optional, Tuple, Set
from tqdm import tqdm
from universal_ie.task_format.task_format import TaskFormat
from universal_ie.utils import tokens_to_str
from universal_ie.ie_format import Entity, Label, Sentence, Span
def _iob1_start_of_chunk(
prev_bio_tag: Optional[str],
prev_conll_tag: Optional[str],
curr_bio_tag: str,
curr_conll_tag: str,
) -> bool:
if curr_bio_tag == "B":
return True
if curr_bio_tag == "I" and prev_bio_tag == "O":
return True
if curr_bio_tag != "O" and prev_conll_tag != curr_conll_tag:
return True
return False
The provided code snippet includes necessary dependencies for implementing the `iob1_tags_to_spans` function. Write a Python function `def iob1_tags_to_spans( tag_sequence: List[str], classes_to_ignore: List[str] = None ) -> List[Tuple[str, Tuple[int, int]]]` to solve the following problem:
Given a sequence corresponding to IOB1 tags, extracts spans. Spans are inclusive and can be of zero length, representing a single word span. Ill-formed spans are also included (i.e., those where "B-LABEL" is not preceded by "I-LABEL" or "B-LABEL"). # Parameters tag_sequence : `List[str]`, required. The integer class labels for a sequence. classes_to_ignore : `List[str]`, optional (default = `None`). A list of string class labels `excluding` the bio tag which should be ignored when extracting spans. # Returns spans : `List[TypedStringSpan]` The typed, extracted spans from the sequence, in the format (label, (span_start, span_end)). Note that the label `does not` contain any BIO tag prefixes.
Here is the function:
def iob1_tags_to_spans(
tag_sequence: List[str], classes_to_ignore: List[str] = None
) -> List[Tuple[str, Tuple[int, int]]]:
"""
Given a sequence corresponding to IOB1 tags, extracts spans.
Spans are inclusive and can be of zero length, representing a single word span.
Ill-formed spans are also included (i.e., those where "B-LABEL" is not preceded
by "I-LABEL" or "B-LABEL").
# Parameters
tag_sequence : `List[str]`, required.
The integer class labels for a sequence.
classes_to_ignore : `List[str]`, optional (default = `None`).
A list of string class labels `excluding` the bio tag
which should be ignored when extracting spans.
# Returns
spans : `List[TypedStringSpan]`
The typed, extracted spans from the sequence, in the format (label, (span_start, span_end)).
Note that the label `does not` contain any BIO tag prefixes.
"""
classes_to_ignore = classes_to_ignore or []
spans: Set[Tuple[str, Tuple[int, int]]] = set()
span_start = 0
span_end = 0
active_conll_tag = None
prev_bio_tag = None
prev_conll_tag = None
for index, string_tag in enumerate(tag_sequence):
curr_bio_tag = string_tag[0]
curr_conll_tag = string_tag[2:]
if curr_bio_tag not in ["B", "I", "O"]:
raise RuntimeError('Invalid tag sequence %s' % tag_sequence)
if curr_bio_tag == "O" or curr_conll_tag in classes_to_ignore:
# The span has ended.
if active_conll_tag is not None:
spans.add((active_conll_tag, (span_start, span_end)))
active_conll_tag = None
elif _iob1_start_of_chunk(prev_bio_tag, prev_conll_tag, curr_bio_tag, curr_conll_tag):
# We are entering a new span; reset indices
# and active tag to new span.
if active_conll_tag is not None:
spans.add((active_conll_tag, (span_start, span_end)))
active_conll_tag = curr_conll_tag
span_start = index
span_end = index
else:
# bio_tag == "I" and curr_conll_tag == active_conll_tag
# We're continuing a span.
span_end += 1
prev_bio_tag = string_tag[0]
prev_conll_tag = string_tag[2:]
# Last token might have been a part of a valid span.
if active_conll_tag is not None:
spans.add((active_conll_tag, (span_start, span_end)))
return list(spans) | Given a sequence corresponding to IOB1 tags, extracts spans. Spans are inclusive and can be of zero length, representing a single word span. Ill-formed spans are also included (i.e., those where "B-LABEL" is not preceded by "I-LABEL" or "B-LABEL"). # Parameters tag_sequence : `List[str]`, required. The integer class labels for a sequence. classes_to_ignore : `List[str]`, optional (default = `None`). A list of string class labels `excluding` the bio tag which should be ignored when extracting spans. # Returns spans : `List[TypedStringSpan]` The typed, extracted spans from the sequence, in the format (label, (span_start, span_end)). Note that the label `does not` contain any BIO tag prefixes. |
164,853 | from collections import Counter
import json
from typing import List, Optional, Tuple, Set
from tqdm import tqdm
from universal_ie.task_format.task_format import TaskFormat
from universal_ie.utils import tokens_to_str
from universal_ie.ie_format import Entity, Label, Sentence, Span
The provided code snippet includes necessary dependencies for implementing the `bmes_tags_to_spans` function. Write a Python function `def bmes_tags_to_spans( tag_sequence: List[str], classes_to_ignore: List[str] = None ) -> List[Tuple[str, Tuple[int, int]]]` to solve the following problem:
Given a sequence corresponding to BMES tags, extracts spans. Spans are inclusive and can be of zero length, representing a single word span. Ill-formed spans are also included (i.e those which do not start with a "B-LABEL"), as otherwise it is possible to get a perfect precision score whilst still predicting ill-formed spans in addition to the correct spans. This function works properly when the spans are unlabeled (i.e., your labels are simply "B", "M", "E" and "S"). # Parameters tag_sequence : `List[str]`, required. The integer class labels for a sequence. classes_to_ignore : `List[str]`, optional (default = `None`). A list of string class labels `excluding` the bio tag which should be ignored when extracting spans. # Returns spans : `List[TypedStringSpan]` The typed, extracted spans from the sequence, in the format (label, (span_start, span_end)). Note that the label `does not` contain any BIO tag prefixes.
Here is the function:
def bmes_tags_to_spans(
tag_sequence: List[str], classes_to_ignore: List[str] = None
) -> List[Tuple[str, Tuple[int, int]]]:
"""
Given a sequence corresponding to BMES tags, extracts spans.
Spans are inclusive and can be of zero length, representing a single word span.
Ill-formed spans are also included (i.e those which do not start with a "B-LABEL"),
as otherwise it is possible to get a perfect precision score whilst still predicting
ill-formed spans in addition to the correct spans.
This function works properly when the spans are unlabeled (i.e., your labels are
simply "B", "M", "E" and "S").
# Parameters
tag_sequence : `List[str]`, required.
The integer class labels for a sequence.
classes_to_ignore : `List[str]`, optional (default = `None`).
A list of string class labels `excluding` the bio tag
which should be ignored when extracting spans.
# Returns
spans : `List[TypedStringSpan]`
The typed, extracted spans from the sequence, in the format (label, (span_start, span_end)).
Note that the label `does not` contain any BIO tag prefixes.
"""
def extract_bmes_tag_label(text):
bmes_tag = text[0]
label = text[2:]
return bmes_tag, label
spans: List[Tuple[str, List[int]]] = []
prev_bmes_tag: Optional[str] = None
for index, tag in enumerate(tag_sequence):
bmes_tag, label = extract_bmes_tag_label(tag)
if bmes_tag in ("B", "S"):
# Regardless of tag, we start a new span when reaching B & S.
spans.append((label, [index, index]))
elif bmes_tag in ("M", "E") and prev_bmes_tag in ("B", "M") and spans[-1][0] == label:
# Only expand the span if
# 1. Valid transition: B/M -> M/E.
# 2. Matched label.
spans[-1][1][1] = index
else:
# Best effort split for invalid span.
spans.append((label, [index, index]))
# update previous BMES tag.
prev_bmes_tag = bmes_tag
classes_to_ignore = classes_to_ignore or []
return [
# to tuple.
(span[0], (span[1][0], span[1][1]))
for span in spans
if span[0] not in classes_to_ignore
] | Given a sequence corresponding to BMES tags, extracts spans. Spans are inclusive and can be of zero length, representing a single word span. Ill-formed spans are also included (i.e those which do not start with a "B-LABEL"), as otherwise it is possible to get a perfect precision score whilst still predicting ill-formed spans in addition to the correct spans. This function works properly when the spans are unlabeled (i.e., your labels are simply "B", "M", "E" and "S"). # Parameters tag_sequence : `List[str]`, required. The integer class labels for a sequence. classes_to_ignore : `List[str]`, optional (default = `None`). A list of string class labels `excluding` the bio tag which should be ignored when extracting spans. # Returns spans : `List[TypedStringSpan]` The typed, extracted spans from the sequence, in the format (label, (span_start, span_end)). Note that the label `does not` contain any BIO tag prefixes. |
164,854 | from collections import Counter
import json
from typing import List, Optional, Tuple, Set
from tqdm import tqdm
from universal_ie.task_format.task_format import TaskFormat
from universal_ie.utils import tokens_to_str
from universal_ie.ie_format import Entity, Label, Sentence, Span
def bioul_tags_to_spans(
tag_sequence: List[str], classes_to_ignore: List[str] = None
) -> List[Tuple[str, Tuple[int, int]]]:
"""
Given a sequence corresponding to BIOUL tags, extracts spans.
Spans are inclusive and can be of zero length, representing a single word span.
Ill-formed spans are not allowed and will raise `InvalidTagSequence`.
This function works properly when the spans are unlabeled (i.e., your labels are
simply "B", "I", "O", "U", and "L").
# Parameters
tag_sequence : `List[str]`, required.
The tag sequence encoded in BIOUL, e.g. ["B-PER", "L-PER", "O"].
classes_to_ignore : `List[str]`, optional (default = `None`).
A list of string class labels `excluding` the bio tag
which should be ignored when extracting spans.
# Returns
spans : `List[TypedStringSpan]`
The typed, extracted spans from the sequence, in the format (label, (span_start, span_end)).
"""
spans = []
classes_to_ignore = classes_to_ignore or []
index = 0
while index < len(tag_sequence):
label = tag_sequence[index]
if label[0] == "U":
spans.append((label.partition("-")[2], (index, index)))
elif label[0] == "B":
start = index
while label[0] != "L":
index += 1
if index >= len(tag_sequence):
raise RuntimeError('Invalid tag sequence %s' % tag_sequence)
# raise InvalidTagSequence(tag_sequence)
label = tag_sequence[index]
if not (label[0] == "I" or label[0] == "L"):
raise RuntimeError('Invalid tag sequence %s' % tag_sequence)
# raise InvalidTagSequence(tag_sequence)
spans.append((label.partition("-")[2], (start, index)))
else:
if label != "O":
raise RuntimeError('Invalid tag sequence %s' % tag_sequence)
# raise InvalidTagSequence(tag_sequence)
index += 1
return [span for span in spans if span[0] not in classes_to_ignore]
The provided code snippet includes necessary dependencies for implementing the `bmeso_tags_to_spans` function. Write a Python function `def bmeso_tags_to_spans( tag_sequence: List[str], classes_to_ignore: List[str] = None ) -> List[Tuple[str, Tuple[int, int]]]` to solve the following problem:
bmeso -> bioul B = Beginning I/M = Inside / Middle L/E = Last / End O = Outside U/W/S = Unit-length / Whole / Singleton
Here is the function:
def bmeso_tags_to_spans(
tag_sequence: List[str], classes_to_ignore: List[str] = None
) -> List[Tuple[str, Tuple[int, int]]]:
"""
bmeso -> bioul
B = Beginning
I/M = Inside / Middle
L/E = Last / End
O = Outside
U/W/S = Unit-length / Whole / Singleton
"""
new_tag = list()
for label in tag_sequence:
if label[0] == 'M':
new_tag += ['I-' + label.partition("-")[2]]
elif label[0] == 'E':
new_tag += ['L-' + label.partition("-")[2]]
elif label[0] == 'S':
new_tag += ['U-' + label.partition("-")[2]]
else:
new_tag += [label]
return bioul_tags_to_spans(tag_sequence=new_tag, classes_to_ignore=classes_to_ignore) | bmeso -> bioul B = Beginning I/M = Inside / Middle L/E = Last / End O = Outside U/W/S = Unit-length / Whole / Singleton |
164,855 | from collections import Counter
import json
from typing import List, Optional, Tuple, Set
from tqdm import tqdm
from universal_ie.task_format.task_format import TaskFormat
from universal_ie.utils import tokens_to_str
from universal_ie.ie_format import Entity, Label, Sentence, Span
def bioul_tags_to_spans(
tag_sequence: List[str], classes_to_ignore: List[str] = None
) -> List[Tuple[str, Tuple[int, int]]]:
"""
Given a sequence corresponding to BIOUL tags, extracts spans.
Spans are inclusive and can be of zero length, representing a single word span.
Ill-formed spans are not allowed and will raise `InvalidTagSequence`.
This function works properly when the spans are unlabeled (i.e., your labels are
simply "B", "I", "O", "U", and "L").
# Parameters
tag_sequence : `List[str]`, required.
The tag sequence encoded in BIOUL, e.g. ["B-PER", "L-PER", "O"].
classes_to_ignore : `List[str]`, optional (default = `None`).
A list of string class labels `excluding` the bio tag
which should be ignored when extracting spans.
# Returns
spans : `List[TypedStringSpan]`
The typed, extracted spans from the sequence, in the format (label, (span_start, span_end)).
"""
spans = []
classes_to_ignore = classes_to_ignore or []
index = 0
while index < len(tag_sequence):
label = tag_sequence[index]
if label[0] == "U":
spans.append((label.partition("-")[2], (index, index)))
elif label[0] == "B":
start = index
while label[0] != "L":
index += 1
if index >= len(tag_sequence):
raise RuntimeError('Invalid tag sequence %s' % tag_sequence)
# raise InvalidTagSequence(tag_sequence)
label = tag_sequence[index]
if not (label[0] == "I" or label[0] == "L"):
raise RuntimeError('Invalid tag sequence %s' % tag_sequence)
# raise InvalidTagSequence(tag_sequence)
spans.append((label.partition("-")[2], (start, index)))
else:
if label != "O":
raise RuntimeError('Invalid tag sequence %s' % tag_sequence)
# raise InvalidTagSequence(tag_sequence)
index += 1
return [span for span in spans if span[0] not in classes_to_ignore]
The provided code snippet includes necessary dependencies for implementing the `bieso_tags_to_spans` function. Write a Python function `def bieso_tags_to_spans( tag_sequence: List[str], classes_to_ignore: List[str] = None ) -> List[Tuple[str, Tuple[int, int]]]` to solve the following problem:
bmeso -> bioul B = Beginning I/M = Inside / Middle L/E = Last / End O = Outside U/W/S = Unit-length / Whole / Singleton
Here is the function:
def bieso_tags_to_spans(
tag_sequence: List[str], classes_to_ignore: List[str] = None
) -> List[Tuple[str, Tuple[int, int]]]:
"""
bmeso -> bioul
B = Beginning
I/M = Inside / Middle
L/E = Last / End
O = Outside
U/W/S = Unit-length / Whole / Singleton
"""
new_tag = list()
for label in tag_sequence:
if label[0] == 'E':
new_tag += ['L-' + label.partition("-")[2]]
elif label[0] == 'S':
new_tag += ['U-' + label.partition("-")[2]]
else:
new_tag += [label]
return bioul_tags_to_spans(tag_sequence=new_tag, classes_to_ignore=classes_to_ignore) | bmeso -> bioul B = Beginning I/M = Inside / Middle L/E = Last / End O = Outside U/W/S = Unit-length / Whole / Singleton |
164,856 | from collections import defaultdict
from typing import List, Dict
from universal_ie.utils import tokens_to_str
from universal_ie.generation_format.generation_format import GenerationFormat, StructureMarker
from universal_ie.ie_format import Entity, Event, Label, Relation, Span
def convert_spot_asoc(spot_asoc_instance, structure_maker):
spot_instance_str_rep_list = list()
for spot in spot_asoc_instance:
spot_str_rep = [
spot['label'],
structure_maker.target_span_start,
spot['span'],
]
for asoc_label, asoc_span in spot.get('asoc', list()):
asoc_str_rep = [
structure_maker.span_start,
asoc_label,
structure_maker.target_span_start,
asoc_span,
structure_maker.span_end,
]
spot_str_rep += [' '.join(asoc_str_rep)]
spot_instance_str_rep_list += [' '.join([
structure_maker.record_start,
' '.join(spot_str_rep),
structure_maker.record_end,
])]
target_text = ' '.join([
structure_maker.sent_start,
' '.join(spot_instance_str_rep_list),
structure_maker.sent_end,
])
return target_text | null |
164,857 | import json
from collections import defaultdict
from typing import List
class RecordSchema:
def __init__(self, type_list, role_list, type_role_dict):
self.type_list = type_list
self.role_list = role_list
self.type_role_dict = type_role_dict
def read_from_file(filename):
lines = open(filename).readlines()
type_list = json.loads(lines[0])
role_list = json.loads(lines[1])
type_role_dict = json.loads(lines[2])
return RecordSchema(type_list, role_list, type_role_dict)
def write_to_file(self, filename):
with open(filename, 'w') as output:
output.write(json.dumps(self.type_list, ensure_ascii=False) + '\n')
output.write(json.dumps(self.role_list, ensure_ascii=False) + '\n')
output.write(json.dumps(self.type_role_dict, ensure_ascii=False) + '\n')
def merge_schema(schema_list: List[RecordSchema]):
type_set = set()
role_set = set()
type_role_dict = defaultdict(list)
for schema in schema_list:
for type_name in schema.type_list:
type_set.add(type_name)
for role_name in schema.role_list:
role_set.add(role_name)
for type_name in schema.type_role_dict:
type_role_dict[type_name] += schema.type_role_dict[type_name]
for type_name in type_role_dict:
type_role_dict[type_name] = list(set(type_role_dict[type_name]))
return RecordSchema(type_list=list(type_set),
role_list=list(role_set),
type_role_dict=type_role_dict,
) | null |
164,858 | import os
import math
import shutil
import random
import argparse
def split_ratio_file(in_filename, out_filename, ratio=0.1, seed=None):
lines = open(in_filename).readlines()
if seed:
random.seed(seed)
random.shuffle(lines)
lines = lines[:math.ceil(len(lines) * ratio)]
with open(out_filename, 'w') as output:
for line in lines:
output.write(line.strip() + '\n') | null |
164,859 | import os
import shutil
import random
import argparse
from collections import defaultdict
import json
import sys
from universal_ie.record_schema import RecordSchema
def n_shot_smaple(source_filename, target_filename, record_schema,
spot_asoc_key='spot', num_shot=5, min_len=None, seed=None):
train_data = [json.loads(line.strip()) for line in open(source_filename)]
if seed:
random.seed(seed)
random.shuffle(train_data)
# 记录每一句的类别信息
type_to_sentence_dict = defaultdict(list)
for index, instance in enumerate(train_data):
for spot in instance[spot_asoc_key]:
if spot not in record_schema.type_list:
continue
if min_len is not None and len(instance['tokens']) < min_len:
continue
type_to_sentence_dict[spot] += [index]
sampled_data = list()
for entity in type_to_sentence_dict:
if len(type_to_sentence_dict[entity]) < num_shot:
sys.stderr.write(
f'[WARN] {entity} in {source_filename} is less than shot num {num_shot}\n'
)
sampled = type_to_sentence_dict[entity]
else:
sampled = random.sample(type_to_sentence_dict[entity], num_shot)
sampled_data += [train_data[index] for index in sampled]
with open(target_filename, 'w') as output:
for instance in sampled_data:
output.write(json.dumps(instance) + '\n')
return sampled_data | null |
164,860 | import json
import os
import sys
from collections import Counter
import tabulate
def count_line_in_file(filename):
return sum([1 for _ in open(filename)])
def count_record_in_file(filename, key):
counter = Counter()
for line in open(filename):
instance = json.loads(line)
counter.update([key + ' entity'] * len(instance['entity']))
counter.update([key + ' relation'] * len(instance['relation']))
counter.update([key + ' event'] * len(instance['event']))
for event in instance['event']:
counter.update([key + ' role'] * len(event['args']))
return counter
def count_folder(folder_name):
data_map = {
'train': 'train.json',
'val': 'val.json',
'test': 'test.json',
}
intance_counter = {'name': folder_name}
for key, name in data_map.items():
filename = f"{folder_name}/{name}"
if not os.path.exists(filename):
sys.stderr.write(f'[warn] {filename} not exists.\n')
continue
intance_counter[key] = count_line_in_file(filename)
intance_counter.update(count_record_in_file(filename, key))
for key in ['entity', 'relation', 'event']:
filename = f"{folder_name}/{key}.schema"
if not os.path.exists(filename):
sys.stderr.write(f'[warn] {filename} not exists.\n')
intance_counter[key] = 0
continue
intance_counter[key] = len(json.loads(open(filename).readline()))
return intance_counter | null |
164,861 | import json
import os
import sys
from collections import Counter
import tabulate
def walk_dir(folder_name):
for root, dirs, files in os.walk(folder_name):
for file in dirs:
folder_name = os.path.join(root, file)
if os.path.exists(f"{os.path.join(root, file)}/record.schema"):
yield os.path.join(root, file) | null |
164,863 | import sys
import logging
import pdb
import os
import json
from pathlib import Path
import pickle
from contextlib import nullcontext
from dataclasses import asdict, fields
from transformers.hf_argparser import HfArgumentParser
from transformers.training_args_seq2seq import Seq2SeqTrainingArguments
from transformers.models.auto import AutoConfig, AutoTokenizer
from transformers.data.data_collator import DataCollatorForSeq2Seq
from transformers.trainer_utils import get_last_checkpoint, set_seed
from transformers.models.t5.modeling_t5 import T5ForConditionalGeneration
from transformers.models.t5.tokenization_t5_fast import T5TokenizerFast
from transformers.tokenization_utils_fast import PreTrainedTokenizerFast
from tokenizers import AddedToken
from seq2seq.utils.args import ModelArguments
from seq2seq.utils.picard_model_wrapper import PicardArguments, PicardLauncher, with_picard
from seq2seq.utils.dataset import DataTrainingArguments, DataArguments
from seq2seq.utils.dataset_loader import load_dataset
from seq2seq.utils.spider import SpiderTrainer
def fetch_seq2seq_dataset(dataset_splits_dataset=None, output_path=None):
seq2seq_dataset = {}
for i in range(len(dataset_splits_dataset)):
seq2seq_dataset[i] = dataset_splits_dataset[i]
json.dump(seq2seq_dataset, open(output_path, 'w'), indent=4) | null |
164,864 | import sys
import logging
import pdb
import os
import json
from pathlib import Path
import pickle
from contextlib import nullcontext
from dataclasses import asdict, fields
from transformers.hf_argparser import HfArgumentParser
from transformers.training_args_seq2seq import Seq2SeqTrainingArguments
from transformers.models.auto import AutoConfig, AutoTokenizer
from transformers.data.data_collator import DataCollatorForSeq2Seq
from transformers.trainer_utils import get_last_checkpoint, set_seed
from transformers.models.t5.modeling_t5 import T5ForConditionalGeneration
from transformers.models.t5.tokenization_t5_fast import T5TokenizerFast
from transformers.tokenization_utils_fast import PreTrainedTokenizerFast
from tokenizers import AddedToken
from seq2seq.utils.args import ModelArguments
from seq2seq.utils.picard_model_wrapper import PicardArguments, PicardLauncher, with_picard
from seq2seq.utils.dataset import DataTrainingArguments, DataArguments
from seq2seq.utils.dataset_loader import load_dataset
from seq2seq.utils.spider import SpiderTrainer
from seq2seq.utils.dataset_graph import TokenizedDataset
def fetch_seq2seq_dataset(dataset_splits_dataset=None, output_path=None):
seq2seq_dataset = {}
for i in range(len(dataset_splits_dataset)):
seq2seq_dataset[i] = dataset_splits_dataset[i]
json.dump(seq2seq_dataset, open(output_path, 'w'), indent=4) | null |
164,865 | import copy
import math
import os
import warnings
import torch.nn.functional as F
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from torch.utils.checkpoint import checkpoint
from transformers.activations import ACT2FN
from transformers.file_utils import (
DUMMY_INPUTS,
DUMMY_MASK,
add_start_docstrings,
add_start_docstrings_to_model_forward,
is_torch_fx_proxy,
replace_return_docstrings,
)
from transformers.modeling_outputs import (
BaseModelOutput,
BaseModelOutputWithPastAndCrossAttentions,
Seq2SeqLMOutput,
Seq2SeqModelOutput,
)
from transformers.modeling_utils import PreTrainedModel, find_pruneable_heads_and_indices, prune_linear_layer
from transformers.utils import logging
from transformers.utils.model_parallel_utils import assert_device_map, get_device_map
from transformers.models.t5.configuration_t5 import T5Config
from .graphix.rgat_tuning import RGAT_Layer
logger = logging.get_logger(__name__)
The provided code snippet includes necessary dependencies for implementing the `load_tf_weights_in_t5` function. Write a Python function `def load_tf_weights_in_t5(model, config, tf_checkpoint_path)` to solve the following problem:
Load tf checkpoints in a pytorch model.
Here is the function:
def load_tf_weights_in_t5(model, config, tf_checkpoint_path):
"""Load tf checkpoints in a pytorch model."""
try:
import re
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise
tf_path = os.path.abspath(tf_checkpoint_path)
logger.info(f"Converting TensorFlow checkpoint from {tf_path}")
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
tf_weights = {}
for name, shape in init_vars:
logger.info(f"Loading TF weight {name} with shape {shape}")
array = tf.train.load_variable(tf_path, name)
names.append(name)
tf_weights[name] = array
for txt_name in names:
name = txt_name.split("/")
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if any(
n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"]
for n in name
):
logger.info(f"Skipping {'/'.join(name)}")
tf_weights.pop(txt_name, None)
continue
if "_slot_" in name[-1]:
logger.info(f"Skipping {'/'.join(name)}")
tf_weights.pop(txt_name, None)
continue
pointer = model
array = tf_weights[txt_name]
for m_name in name:
if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
scope_names = re.split(r"_(\d+)", m_name)
else:
scope_names = [m_name]
if scope_names[0] in ["kernel", "scale", "embedding"]:
pointer = getattr(pointer, "weight")
elif scope_names[0] == "self_attention":
pointer = getattr(pointer, "layer")
pointer = pointer[0]
elif scope_names[0] == "enc_dec_attention":
pointer = getattr(pointer, "layer")
pointer = pointer[1]
elif scope_names[0] == "dense_relu_dense":
pointer = getattr(pointer, "layer")
pointer = pointer[2]
elif scope_names[0] == "rms_norm":
if hasattr(pointer, "layer_norm"):
pointer = getattr(pointer, "layer_norm")
elif hasattr(pointer, "final_layer_norm"):
pointer = getattr(pointer, "final_layer_norm")
elif scope_names[0] == "scale":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
pointer = getattr(pointer, "bias")
elif scope_names[0] == "squad":
pointer = getattr(pointer, "classifier")
elif scope_names[0] == "decoder" and name[1] == "logits":
continue
elif scope_names[0] == "logits":
pointer = getattr(pointer, "lm_head")
elif scope_names[0] == "wi" and len(scope_names) > 1 and scope_names[1].isdigit():
pointer = getattr(pointer, f"wi_{scope_names[1]}")
continue
else:
try:
pointer = getattr(pointer, scope_names[0])
except AttributeError:
logger.info(f"Skipping {'/'.join(name)}")
continue
if len(scope_names) >= 2:
num = int(scope_names[1])
pointer = pointer[num]
if scope_names[0] not in ["kernel", "scale", "embedding"]:
pointer = getattr(pointer, "weight")
if scope_names[0] != "embedding":
logger.info(f"Transposing numpy weight of shape {array.shape} for {name}")
array = np.transpose(array)
try:
assert (
pointer.shape == array.shape
), f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched"
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
logger.info(f"Initialize PyTorch weight {name}")
pointer.data = torch.from_numpy(array.astype(np.float32))
tf_weights.pop(txt_name, None)
logger.info(f"Weights not copied to PyTorch model: {', '.join(tf_weights.keys())}.")
return model | Load tf checkpoints in a pytorch model. |
164,866 | import importlib
from collections import OrderedDict
from transformers.configuration_utils import PretrainedConfig
from transformers.dynamic_module_utils import get_class_from_dynamic_module
from transformers.file_utils import copy_func
from transformers.utils import logging
from transformers.models.auto.configuration_auto import AutoConfig, model_type_to_module_name, replace_list_option_in_docstrings
def _get_model_class(config, model_mapping):
supported_models = model_mapping[type(config)]
if not isinstance(supported_models, (list, tuple)):
return supported_models
name_to_model = {model.__name__: model for model in supported_models}
architectures = getattr(config, "architectures", [])
for arch in architectures:
if arch in name_to_model:
return name_to_model[arch]
elif f"TF{arch}" in name_to_model:
return name_to_model[f"TF{arch}"]
elif f"Flax{arch}" in name_to_model:
return name_to_model[f"Flax{arch}"]
# If not architecture is set in the config or match the supported models, the first element of the tuple is the
# defaults.
return supported_models[0] | null |
164,867 | import importlib
from collections import OrderedDict
from transformers.configuration_utils import PretrainedConfig
from transformers.dynamic_module_utils import get_class_from_dynamic_module
from transformers.file_utils import copy_func
from transformers.utils import logging
from transformers.models.auto.configuration_auto import AutoConfig, model_type_to_module_name, replace_list_option_in_docstrings
CLASS_DOCSTRING = """
This is a generic model class that will be instantiated as one of the model classes of the library when created
with the [`~BaseAutoModelClass.from_pretrained`] class method or the [`~BaseAutoModelClass.from_config`] class
method.
This class cannot be instantiated directly using `__init__()` (throws an error).
"""
FROM_CONFIG_DOCSTRING = """
Instantiates one of the model classes of the library from a configuration.
Note:
Loading a model from its configuration file does **not** load the model weights. It only affects the
model's configuration. Use [`~BaseAutoModelClass.from_pretrained`] to load the model weights.
Args:
config ([`PretrainedConfig`]):
The model class to instantiate is selected based on the configuration class:
List options
Examples:
```python
>>> from transformers import AutoConfig, BaseAutoModelClass
>>> # Download configuration from huggingface.co and cache.
>>> config = AutoConfig.from_pretrained("checkpoint_placeholder")
>>> model = BaseAutoModelClass.from_config(config)
```
"""
FROM_PRETRAINED_TORCH_DOCSTRING = """
Instantiate one of the model classes of the library from a pretrained model.
The model class to instantiate is selected based on the `model_type` property of the config object (either
passed as an argument or loaded from `pretrained_model_name_or_path` if possible), or when it's missing, by
falling back to using pattern matching on `pretrained_model_name_or_path`:
List options
The model is set in evaluation mode by default using `model.eval()` (so for instance, dropout modules are
deactivated). To train the model, you should first set it back in training mode with `model.train()`
Args:
pretrained_model_name_or_path (`str` or `os.PathLike`):
Can be either:
- A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co.
Valid model ids can be located at the root-level, like `bert-base-uncased`, or namespaced under a
user or organization name, like `dbmdz/bert-base-german-cased`.
- A path to a *directory* containing model weights saved using
[`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`.
- A path or url to a *tensorflow index checkpoint file* (e.g, `./tf_model/model.ckpt.index`). In
this case, `from_tf` should be set to `True` and a configuration object should be provided as
`config` argument. This loading path is slower than converting the TensorFlow checkpoint in a
PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.
model_args (additional positional arguments, *optional*):
Will be passed along to the underlying model `__init__()` method.
config ([`PretrainedConfig`], *optional*):
Configuration for the model to use instead of an automatically loaded configuration. Configuration can
be automatically loaded when:
- The model is a model provided by the library (loaded with the *model id* string of a pretrained
model).
- The model was saved using [`~PreTrainedModel.save_pretrained`] and is reloaded by supplying the
save directory.
- The model is loaded by supplying a local directory as `pretrained_model_name_or_path` and a
configuration JSON file named *config.json* is found in the directory.
state_dict (*Dict[str, torch.Tensor]*, *optional*):
A state dictionary to use instead of a state dictionary loaded from saved weights file.
This option can be used if you want to create a model from a pretrained configuration but load your own
weights. In this case though, you should check if using [`~PreTrainedModel.save_pretrained`] and
[`~PreTrainedModel.from_pretrained`] is not a simpler option.
cache_dir (`str` or `os.PathLike`, *optional*):
Path to a directory in which a downloaded pretrained model configuration should be cached if the
standard cache should not be used.
from_tf (`bool`, *optional*, defaults to `False`):
Load the model weights from a TensorFlow checkpoint save file (see docstring of
`pretrained_model_name_or_path` argument).
force_download (`bool`, *optional*, defaults to `False`):
Whether or not to force the (re-)download of the model weights and configuration files, overriding the
cached versions if they exist.
resume_download (`bool`, *optional*, defaults to `False`):
Whether or not to delete incompletely received files. Will attempt to resume the download if such a
file exists.
proxies (`Dict[str, str]`, *optional*):
A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
output_loading_info(`bool`, *optional*, defaults to `False`):
Whether ot not to also return a dictionary containing missing keys, unexpected keys and error messages.
local_files_only(`bool`, *optional*, defaults to `False`):
Whether or not to only look at local files (e.g., not try downloading the model).
revision(`str`, *optional*, defaults to `"main"`):
The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
identifier allowed by git.
trust_remote_code (`bool`, *optional*, defaults to `False`):
Whether or not to allow for custom models defined on the Hub in their own modeling files. This option
should only be set to `True` for repositories you trust and in which you have read the code, as it will
execute code present on the Hub on your local machine.
kwargs (additional keyword arguments, *optional*):
Can be used to update the configuration object (after it being loaded) and initiate the model (e.g.,
`output_attentions=True`). Behaves differently depending on whether a `config` is provided or
automatically loaded:
- If a configuration is provided with `config`, `**kwargs` will be directly passed to the
underlying model's `__init__` method (we assume all relevant updates to the configuration have
already been done)
- If a configuration is not provided, `kwargs` will be first passed to the configuration class
initialization function ([`~PretrainedConfig.from_pretrained`]). Each key of `kwargs` that
corresponds to a configuration attribute will be used to override said attribute with the
supplied `kwargs` value. Remaining keys that do not correspond to any configuration attribute
will be passed to the underlying model's `__init__` function.
Examples:
```python
>>> from transformers import AutoConfig, BaseAutoModelClass
>>> # Download model and configuration from huggingface.co and cache.
>>> model = BaseAutoModelClass.from_pretrained("checkpoint_placeholder")
>>> # Update configuration during loading
>>> model = BaseAutoModelClass.from_pretrained("checkpoint_placeholder", output_attentions=True)
>>> model.config.output_attentions
True
>>> # Loading from a TF checkpoint file instead of a PyTorch model (slower)
>>> config = AutoConfig.from_pretrained("./tf_model/shortcut_placeholder_tf_model_config.json")
>>> model = BaseAutoModelClass.from_pretrained(
... "./tf_model/shortcut_placeholder_tf_checkpoint.ckpt.index", from_tf=True, config=config
... )
```
"""
FROM_PRETRAINED_TF_DOCSTRING = """
Instantiate one of the model classes of the library from a pretrained model.
The model class to instantiate is selected based on the `model_type` property of the config object (either
passed as an argument or loaded from `pretrained_model_name_or_path` if possible), or when it's missing, by
falling back to using pattern matching on `pretrained_model_name_or_path`:
List options
Args:
pretrained_model_name_or_path (`str` or `os.PathLike`):
Can be either:
- A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co.
Valid model ids can be located at the root-level, like `bert-base-uncased`, or namespaced under a
user or organization name, like `dbmdz/bert-base-german-cased`.
- A path to a *directory* containing model weights saved using
[`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`.
- A path or url to a *PyTorch state_dict save file* (e.g, `./pt_model/pytorch_model.bin`). In this
case, `from_pt` should be set to `True` and a configuration object should be provided as `config`
argument. This loading path is slower than converting the PyTorch model in a TensorFlow model
using the provided conversion scripts and loading the TensorFlow model afterwards.
model_args (additional positional arguments, *optional*):
Will be passed along to the underlying model `__init__()` method.
config ([`PretrainedConfig`], *optional*):
Configuration for the model to use instead of an automatically loaded configuration. Configuration can
be automatically loaded when:
- The model is a model provided by the library (loaded with the *model id* string of a pretrained
model).
- The model was saved using [`~PreTrainedModel.save_pretrained`] and is reloaded by supplying the
save directory.
- The model is loaded by supplying a local directory as `pretrained_model_name_or_path` and a
configuration JSON file named *config.json* is found in the directory.
cache_dir (`str` or `os.PathLike`, *optional*):
Path to a directory in which a downloaded pretrained model configuration should be cached if the
standard cache should not be used.
from_pt (`bool`, *optional*, defaults to `False`):
Load the model weights from a PyTorch checkpoint save file (see docstring of
`pretrained_model_name_or_path` argument).
force_download (`bool`, *optional*, defaults to `False`):
Whether or not to force the (re-)download of the model weights and configuration files, overriding the
cached versions if they exist.
resume_download (`bool`, *optional*, defaults to `False`):
Whether or not to delete incompletely received files. Will attempt to resume the download if such a
file exists.
proxies (`Dict[str, str]`, *optional*):
A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
output_loading_info(`bool`, *optional*, defaults to `False`):
Whether ot not to also return a dictionary containing missing keys, unexpected keys and error messages.
local_files_only(`bool`, *optional*, defaults to `False`):
Whether or not to only look at local files (e.g., not try downloading the model).
revision(`str`, *optional*, defaults to `"main"`):
The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
identifier allowed by git.
trust_remote_code (`bool`, *optional*, defaults to `False`):
Whether or not to allow for custom models defined on the Hub in their own modeling files. This option
should only be set to `True` for repositories you trust and in which you have read the code, as it will
execute code present on the Hub on your local machine.
kwargs (additional keyword arguments, *optional*):
Can be used to update the configuration object (after it being loaded) and initiate the model (e.g.,
`output_attentions=True`). Behaves differently depending on whether a `config` is provided or
automatically loaded:
- If a configuration is provided with `config`, `**kwargs` will be directly passed to the
underlying model's `__init__` method (we assume all relevant updates to the configuration have
already been done)
- If a configuration is not provided, `kwargs` will be first passed to the configuration class
initialization function ([`~PretrainedConfig.from_pretrained`]). Each key of `kwargs` that
corresponds to a configuration attribute will be used to override said attribute with the
supplied `kwargs` value. Remaining keys that do not correspond to any configuration attribute
will be passed to the underlying model's `__init__` function.
Examples:
```python
>>> from transformers import AutoConfig, BaseAutoModelClass
>>> # Download model and configuration from huggingface.co and cache.
>>> model = BaseAutoModelClass.from_pretrained("checkpoint_placeholder")
>>> # Update configuration during loading
>>> model = BaseAutoModelClass.from_pretrained("checkpoint_placeholder", output_attentions=True)
>>> model.config.output_attentions
True
>>> # Loading from a PyTorch checkpoint file instead of a TensorFlow model (slower)
>>> config = AutoConfig.from_pretrained("./pt_model/shortcut_placeholder_pt_model_config.json")
>>> model = BaseAutoModelClass.from_pretrained(
... "./pt_model/shortcut_placeholder_pytorch_model.bin", from_pt=True, config=config
... )
```
"""
FROM_PRETRAINED_FLAX_DOCSTRING = """
Instantiate one of the model classes of the library from a pretrained model.
The model class to instantiate is selected based on the `model_type` property of the config object (either
passed as an argument or loaded from `pretrained_model_name_or_path` if possible), or when it's missing, by
falling back to using pattern matching on `pretrained_model_name_or_path`:
List options
Args:
pretrained_model_name_or_path (`str` or `os.PathLike`):
Can be either:
- A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co.
Valid model ids can be located at the root-level, like `bert-base-uncased`, or namespaced under a
user or organization name, like `dbmdz/bert-base-german-cased`.
- A path to a *directory* containing model weights saved using
[`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`.
- A path or url to a *PyTorch state_dict save file* (e.g, `./pt_model/pytorch_model.bin`). In this
case, `from_pt` should be set to `True` and a configuration object should be provided as `config`
argument. This loading path is slower than converting the PyTorch model in a TensorFlow model
using the provided conversion scripts and loading the TensorFlow model afterwards.
model_args (additional positional arguments, *optional*):
Will be passed along to the underlying model `__init__()` method.
config ([`PretrainedConfig`], *optional*):
Configuration for the model to use instead of an automatically loaded configuration. Configuration can
be automatically loaded when:
- The model is a model provided by the library (loaded with the *model id* string of a pretrained
model).
- The model was saved using [`~PreTrainedModel.save_pretrained`] and is reloaded by supplying the
save directory.
- The model is loaded by supplying a local directory as `pretrained_model_name_or_path` and a
configuration JSON file named *config.json* is found in the directory.
cache_dir (`str` or `os.PathLike`, *optional*):
Path to a directory in which a downloaded pretrained model configuration should be cached if the
standard cache should not be used.
from_pt (`bool`, *optional*, defaults to `False`):
Load the model weights from a PyTorch checkpoint save file (see docstring of
`pretrained_model_name_or_path` argument).
force_download (`bool`, *optional*, defaults to `False`):
Whether or not to force the (re-)download of the model weights and configuration files, overriding the
cached versions if they exist.
resume_download (`bool`, *optional*, defaults to `False`):
Whether or not to delete incompletely received files. Will attempt to resume the download if such a
file exists.
proxies (`Dict[str, str]`, *optional*):
A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
output_loading_info(`bool`, *optional*, defaults to `False`):
Whether ot not to also return a dictionary containing missing keys, unexpected keys and error messages.
local_files_only(`bool`, *optional*, defaults to `False`):
Whether or not to only look at local files (e.g., not try downloading the model).
revision(`str`, *optional*, defaults to `"main"`):
The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
identifier allowed by git.
trust_remote_code (`bool`, *optional*, defaults to `False`):
Whether or not to allow for custom models defined on the Hub in their own modeling files. This option
should only be set to `True` for repositories you trust and in which you have read the code, as it will
execute code present on the Hub on your local machine.
kwargs (additional keyword arguments, *optional*):
Can be used to update the configuration object (after it being loaded) and initiate the model (e.g.,
`output_attentions=True`). Behaves differently depending on whether a `config` is provided or
automatically loaded:
- If a configuration is provided with `config`, `**kwargs` will be directly passed to the
underlying model's `__init__` method (we assume all relevant updates to the configuration have
already been done)
- If a configuration is not provided, `kwargs` will be first passed to the configuration class
initialization function ([`~PretrainedConfig.from_pretrained`]). Each key of `kwargs` that
corresponds to a configuration attribute will be used to override said attribute with the
supplied `kwargs` value. Remaining keys that do not correspond to any configuration attribute
will be passed to the underlying model's `__init__` function.
Examples:
```python
>>> from transformers import AutoConfig, BaseAutoModelClass
>>> # Download model and configuration from huggingface.co and cache.
>>> model = BaseAutoModelClass.from_pretrained("checkpoint_placeholder")
>>> # Update configuration during loading
>>> model = BaseAutoModelClass.from_pretrained("checkpoint_placeholder", output_attentions=True)
>>> model.config.output_attentions
True
>>> # Loading from a PyTorch checkpoint file instead of a TensorFlow model (slower)
>>> config = AutoConfig.from_pretrained("./pt_model/shortcut_placeholder_pt_model_config.json")
>>> model = BaseAutoModelClass.from_pretrained(
... "./pt_model/shortcut_placeholder_pytorch_model.bin", from_pt=True, config=config
... )
```
"""
class _BaseAutoModelClass:
# Base class for auto models.
_model_mapping = None
def __init__(self, *args, **kwargs):
raise EnvironmentError(
f"{self.__class__.__name__} is designed to be instantiated "
f"using the `{self.__class__.__name__}.from_pretrained(pretrained_model_name_or_path)` or "
f"`{self.__class__.__name__}.from_config(config)` methods."
)
def from_config(cls, config, **kwargs):
trust_remote_code = kwargs.pop("trust_remote_code", False)
if hasattr(config, "auto_map") and cls.__name__ in config.auto_map:
if not trust_remote_code:
raise ValueError(
"Loading this model requires you to execute the modeling file in that repo "
"on your local machine. Make sure you have read the code there to avoid malicious use, then set "
"the option `trust_remote_code=True` to remove this error."
)
if kwargs.get("revision", None) is None:
logger.warning(
"Explicitly passing a `revision` is encouraged when loading a model with custom code to ensure "
"no malicious code has been contributed in a newer revision."
)
class_ref = config.auto_map[cls.__name__]
module_file, class_name = class_ref.split(".")
model_class = get_class_from_dynamic_module(config.name_or_path, module_file + ".py", class_name, **kwargs)
return model_class._from_config(config, **kwargs)
elif type(config) in cls._model_mapping.keys():
model_class = _get_model_class(config, cls._model_mapping)
return model_class._from_config(config, **kwargs)
raise ValueError(
f"Unrecognized configuration class {config.__class__} for this kind of AutoModel: {cls.__name__}.\n"
f"Model type should be one of {', '.join(c.__name__ for c in cls._model_mapping.keys())}."
)
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
config = kwargs.pop("config", None)
trust_remote_code = kwargs.pop("trust_remote_code", False)
kwargs["_from_auto"] = True
if not isinstance(config, PretrainedConfig):
config, kwargs = AutoConfig.from_pretrained(
pretrained_model_name_or_path, return_unused_kwargs=True, trust_remote_code=trust_remote_code, **kwargs
)
if hasattr(config, "auto_map") and cls.__name__ in config.auto_map:
if not trust_remote_code:
raise ValueError(
f"Loading {pretrained_model_name_or_path} requires you to execute the modeling file in that repo "
"on your local machine. Make sure you have read the code there to avoid malicious use, then set "
"the option `trust_remote_code=True` to remove this error."
)
if kwargs.get("revision", None) is None:
logger.warning(
"Explicitly passing a `revision` is encouraged when loading a model with custom code to ensure "
"no malicious code has been contributed in a newer revision."
)
class_ref = config.auto_map[cls.__name__]
module_file, class_name = class_ref.split(".")
model_class = get_class_from_dynamic_module(
pretrained_model_name_or_path, module_file + ".py", class_name, **kwargs
)
return model_class.from_pretrained(pretrained_model_name_or_path, *model_args, config=config, **kwargs)
elif type(config) in cls._model_mapping.keys():
model_class = _get_model_class(config, cls._model_mapping)
if "t5" in model_class.config_class.model_type:
from .modeling_t5 import T5ForConditionalGeneration
return T5ForConditionalGeneration.from_pretrained(pretrained_model_name_or_path, *model_args, config=config, **kwargs)
return model_class.from_pretrained(pretrained_model_name_or_path, *model_args, config=config, **kwargs)
raise ValueError(
f"Unrecognized configuration class {config.__class__} for this kind of AutoModel: {cls.__name__}.\n"
f"Model type should be one of {', '.join(c.__name__ for c in cls._model_mapping.keys())}."
)
def register(cls, config_class, model_class):
"""
Register a new model for this class.
Args:
config_class ([`PretrainedConfig`]):
The configuration corresponding to the model to register.
model_class ([`PreTrainedModel`]):
The model to register.
"""
if hasattr(model_class, "config_class") and model_class.config_class != config_class:
raise ValueError(
"The model class you are passing has a `config_class` attribute that is not consistent with the "
f"config class you passed (model has {model_class.config_class} and you passed {config_class}. Fix "
"one of those so they match!"
)
cls._model_mapping.register(config_class, model_class)
def insert_head_doc(docstring, head_doc=""):
if len(head_doc) > 0:
return docstring.replace(
"one of the model classes of the library ",
f"one of the model classes of the library (with a {head_doc} head) ",
)
return docstring.replace(
"one of the model classes of the library ", "one of the base model classes of the library "
)
def auto_class_update(cls, checkpoint_for_example="bert-base-cased", head_doc=""):
# Create a new class with the right name from the base class
model_mapping = cls._model_mapping
name = cls.__name__
class_docstring = insert_head_doc(CLASS_DOCSTRING, head_doc=head_doc)
cls.__doc__ = class_docstring.replace("BaseAutoModelClass", name)
# Now we need to copy and re-register `from_config` and `from_pretrained` as class methods otherwise we can't
# have a specific docstrings for them.
from_config = copy_func(_BaseAutoModelClass.from_config)
from_config_docstring = insert_head_doc(FROM_CONFIG_DOCSTRING, head_doc=head_doc)
from_config_docstring = from_config_docstring.replace("BaseAutoModelClass", name)
from_config_docstring = from_config_docstring.replace("checkpoint_placeholder", checkpoint_for_example)
from_config.__doc__ = from_config_docstring
from_config = replace_list_option_in_docstrings(model_mapping._model_mapping, use_model_types=False)(from_config)
cls.from_config = classmethod(from_config)
if name.startswith("TF"):
from_pretrained_docstring = FROM_PRETRAINED_TF_DOCSTRING
elif name.startswith("Flax"):
from_pretrained_docstring = FROM_PRETRAINED_FLAX_DOCSTRING
else:
from_pretrained_docstring = FROM_PRETRAINED_TORCH_DOCSTRING
from_pretrained = copy_func(_BaseAutoModelClass.from_pretrained)
from_pretrained_docstring = insert_head_doc(from_pretrained_docstring, head_doc=head_doc)
from_pretrained_docstring = from_pretrained_docstring.replace("BaseAutoModelClass", name)
from_pretrained_docstring = from_pretrained_docstring.replace("checkpoint_placeholder", checkpoint_for_example)
shortcut = checkpoint_for_example.split("/")[-1].split("-")[0]
from_pretrained_docstring = from_pretrained_docstring.replace("shortcut_placeholder", shortcut)
from_pretrained.__doc__ = from_pretrained_docstring
from_pretrained = replace_list_option_in_docstrings(model_mapping._model_mapping)(from_pretrained)
cls.from_pretrained = classmethod(from_pretrained)
return cls | null |
164,868 | import importlib
from collections import OrderedDict
from transformers.configuration_utils import PretrainedConfig
from transformers.dynamic_module_utils import get_class_from_dynamic_module
from transformers.file_utils import copy_func
from transformers.utils import logging
from transformers.models.auto.configuration_auto import AutoConfig, model_type_to_module_name, replace_list_option_in_docstrings
def get_values(model_mapping):
result = []
for model in model_mapping.values():
if isinstance(model, (list, tuple)):
result += list(model)
else:
result.append(model)
return result | null |
164,869 | import importlib
from collections import OrderedDict
from transformers.configuration_utils import PretrainedConfig
from transformers.dynamic_module_utils import get_class_from_dynamic_module
from transformers.file_utils import copy_func
from transformers.utils import logging
from transformers.models.auto.configuration_auto import AutoConfig, model_type_to_module_name, replace_list_option_in_docstrings
def getattribute_from_module(module, attr):
if attr is None:
return None
if isinstance(attr, tuple):
return tuple(getattribute_from_module(module, a) for a in attr)
if hasattr(module, attr):
return getattr(module, attr)
# Some of the mappings have entries model_type -> object of another model type. In that case we try to grab the
# object at the top level.
transformers_module = importlib.import_module("transformers")
return getattribute_from_module(transformers_module, attr) | null |
164,880 | import json
from typing import Callable, Tuple
import logging
import datasets.load
from datasets.dataset_dict import DatasetDict
from datasets.metric import Metric
from datasets.arrow_dataset import Dataset, concatenate_datasets
from transformers.tokenization_utils_fast import PreTrainedTokenizerFast
from transformers.training_args import TrainingArguments
from seq2seq.utils.args import ModelArguments
from seq2seq.utils.dataset import (
DataArguments,
DataTrainingArguments,
DatasetSplits,
TrainSplit,
_prepare_train_split,
prepare_splits,
)
from seq2seq.utils.spider import spider_add_serialized_schema, spider_pre_process_function
from seq2seq.utils.cosql import cosql_add_serialized_schema, cosql_pre_process_function
def _log_duplicate_count(dataset: Dataset, dataset_name: str, split: str) -> None:
d = dataset.to_dict()
d_t = [tuple((k, tuple(v)) for k, v in zip(d.keys(), vs)) for vs in zip(*d.values())]
d_t_ = set(d_t)
num_examples = len(d_t)
duplicate_count = num_examples - len(d_t_)
if duplicate_count > 0:
logger.warning(
f"The split ``{split}`` of the dataset ``{dataset_name}`` contains {duplicate_count} duplicates out of {num_examples} examples"
)
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
"""
model_name_or_path: str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
)
config_name: Optional[str] = field(
default=None,
metadata={"help": "Pretrained config name or path if not the same as model_name"},
)
tokenizer_name: Optional[str] = field(
default=None,
metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"},
)
cache_dir: Optional[str] = field(
default=None,
metadata={"help": "Where to store the pretrained models downloaded from huggingface.co"},
)
use_fast_tokenizer: bool = field(
default=True,
metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},
)
model_revision: str = field(
default="main",
metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
)
use_auth_token: bool = field(
default=False,
metadata={
"help": "Will use the token generated when running `transformers-cli login` (necessary to use this script "
"with private models)."
},
)
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
overwrite_cache: bool = field(
default=False,
metadata={"help": "Overwrite the cached training and evaluation sets"},
)
preprocessing_num_workers: Optional[int] = field(
default=None,
metadata={"help": "The number of processes to use for the preprocessing."},
)
max_source_length: Optional[int] = field(
default=1024,
metadata={
"help": "The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
},
)
max_target_length: Optional[int] = field(
default=1024,
metadata={
"help": "The maximum total sequence length for target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
},
)
val_max_target_length: Optional[int] = field(
default=1024,
metadata={
"help": "The maximum total sequence length for validation target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded. Will default to `max_target_length`."
"This argument is also used to override the ``max_length`` param of ``model.generate``, which is used "
"during ``evaluate`` and ``predict``."
},
)
val_max_time: Optional[int] = field(
default=None,
metadata={
"help": "The maximum allowed time in seconds for generation of one example. This setting can be used to stop "
"generation whenever the full generation exceeds the specified amount of time."
},
)
max_train_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
},
)
max_val_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of validation or test examples to this "
"value if set."
},
)
num_beams: int = field(
default=1,
metadata={
"help": "Number of beams to use for evaluation. This argument will be passed to ``model.generate``, "
"which is used during ``evaluate`` and ``predict``."
},
)
num_beam_groups: int = field(
default=1,
metadata={
"help": "Number of beam groups to use for evaluation. This argument will be passed to ``model.generate``, "
"which is used during ``evaluate`` and ``predict``."
},
)
diversity_penalty: Optional[float] = field(
default=None,
metadata={
"help": "Diversity penalty to use for evaluation. This argument will be passed to ``model.generate``, "
"which is used during ``evaluate`` and ``predict``."
},
)
num_return_sequences: Optional[int] = field(
default=None,
metadata={
"help": "The number of sequences to generate during evaluation. This argument will be passed to "
"``model.generate``, which is used during ``evaluate`` and ``predict``."
},
)
ignore_pad_token_for_loss: bool = field(
default=True,
metadata={
"help": "Whether or not to ignore the tokens corresponding to padded labels in the loss computation or not."
},
)
source_prefix: Optional[str] = field(
default=None,
metadata={"help": "A prefix to add before every source text (useful for T5 models)."},
)
schema_serialization_type: str = field(
default="peteshaw",
metadata={"help": "Choose between ``verbose`` and ``peteshaw`` schema serialization."},
)
schema_serialization_randomized: bool = field(
default=False,
metadata={"help": "Whether or not to randomize the order of tables."},
)
schema_serialization_with_db_id: bool = field(
default=True,
metadata={"help": "Whether or not to add the database id to the context. Needed for Picard."},
)
schema_serialization_with_db_content: bool = field(
default=True,
metadata={"help": "Whether or not to use the database content to resolve field matches."},
)
normalize_query: bool = field(default=True, metadata={"help": "Whether to normalize the SQL queries."})
target_with_db_id: bool = field(
default=True,
metadata={"help": "Whether or not to add the database id to the target. Needed for Picard."},
)
def __post_init__(self):
if self.val_max_target_length is None:
self.val_max_target_length = self.max_target_length
class DataArguments:
dataset: str = field(
metadata={"help": "The dataset to be used. Choose between ``spider``, ``cosql``, or ``cosql+spider``, or ``spider_realistic``, or ``spider_syn``, or ``spider_dk``."},
)
dataset_paths: Dict[str, str] = field(
default_factory=lambda: {
"spider": "./seq2seq/datasets/spider",
"cosql": "./seq2seq/datasets/cosql",
"spider_realistic": "./seq2seq/datasets/spider_realistic",
"spider_syn": "./seq2seq/datasets/spider_syn",
"spider_dk": "./seq2seq/datasets/spider_dk"
},
metadata={"help": "Paths of the dataset modules."},
)
metric_config: str = field(
default="both",
metadata={"help": "Choose between ``exact_match``, ``test_suite``, or ``both``."},
)
#we are referencing spider_realistic to spider metrics only as both use the main spider dataset as base.
metric_paths: Dict[str, str] = field(
default_factory=lambda: {
"spider": "./seq2seq/metrics/spider",
"spider_realistic" : "./seq2seq/metrics/spider",
"cosql": "./seq2seq/metrics/cosql",
"spider_syn":"./seq2seq/metrics/spider",
"spider_dk":"./seq2seq/metrics/spider"
},
metadata={"help": "Paths of the metric modules."},
)
test_suite_db_dir: Optional[str] = field(
default=None,
metadata={"help": "Path to the test-suite databases."})
data_config_file : Optional[str] = field(
default=None,
metadata={"help": "Path to data configuration file (specifying the database splits)"}
)
test_sections : Optional[List[str]] = field(
default=None,
metadata={"help": "Sections from the data config to use for testing"}
)
class TrainSplit(object):
dataset: Dataset
schemas: Dict[str, dict]
class DatasetSplits(object):
train_split: Optional[TrainSplit]
eval_split: Optional[EvalSplit]
test_splits: Optional[Dict[str, EvalSplit]]
schemas: Dict[str, dict]
def _prepare_train_split(
dataset: Dataset,
data_training_args: DataTrainingArguments,
add_serialized_schema: Callable[[dict], dict],
pre_process_function: Callable[[dict, Optional[int], Optional[int]], dict],
) -> TrainSplit:
schemas = _get_schemas(examples=dataset)
dataset = dataset.map(
add_serialized_schema,
batched=False,
num_proc=data_training_args.preprocessing_num_workers,
load_from_cache_file=not data_training_args.overwrite_cache,
)
if data_training_args.max_train_samples is not None:
dataset = dataset.select(range(data_training_args.max_train_samples))
column_names = dataset.column_names
dataset = dataset.map(
lambda batch: pre_process_function(
batch=batch,
max_source_length=data_training_args.max_source_length,
max_target_length=data_training_args.max_target_length,
),
batched=True,
num_proc=data_training_args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not data_training_args.overwrite_cache,
)
return TrainSplit(dataset=dataset, schemas=schemas)
def prepare_splits(
dataset_dict: DatasetDict,
data_args: DataArguments,
training_args: TrainingArguments,
data_training_args: DataTrainingArguments,
add_serialized_schema: Callable[[dict], dict],
pre_process_function: Callable[[dict, Optional[int], Optional[int]], dict],
) -> DatasetSplits:
train_split, eval_split, test_splits = None, None, None
if training_args.do_train:
train_split = _prepare_train_split(
dataset_dict["train"],
data_training_args=data_training_args,
add_serialized_schema=add_serialized_schema,
pre_process_function=pre_process_function,
)
if training_args.do_eval:
eval_split = _prepare_eval_split(
dataset_dict["validation"],
data_training_args=data_training_args,
add_serialized_schema=add_serialized_schema,
pre_process_function=pre_process_function,
)
if training_args.do_predict:
test_splits = {
section: _prepare_eval_split(
dataset_dict[section],
data_training_args=data_training_args,
add_serialized_schema=add_serialized_schema,
pre_process_function=pre_process_function,
)
for section in data_args.test_sections
}
test_split_schemas = {}
for split in test_splits.values():
test_split_schemas.update(split.schemas)
schemas = {
**(train_split.schemas if train_split is not None else {}),
**(eval_split.schemas if eval_split is not None else {}),
**(test_split_schemas if test_splits is not None else {}),
}
return DatasetSplits(
train_split=train_split,
eval_split=eval_split,
test_splits=test_splits,
schemas=schemas
)
def spider_add_serialized_schema(ex: dict, data_training_args: DataTrainingArguments) -> dict:
serialized_schema = serialize_schema(
question=ex["question"],
db_path=ex["db_path"],
db_id=ex["db_id"],
db_column_names=ex["db_column_names"],
db_table_names=ex["db_table_names"],
schema_serialization_type=data_training_args.schema_serialization_type,
schema_serialization_randomized=data_training_args.schema_serialization_randomized,
schema_serialization_with_db_id=data_training_args.schema_serialization_with_db_id,
schema_serialization_with_db_content=data_training_args.schema_serialization_with_db_content,
normalize_query=data_training_args.normalize_query,
)
return {"serialized_schema": serialized_schema}
def spider_pre_process_function(
batch: dict,
max_source_length: Optional[int],
max_target_length: Optional[int],
data_training_args: DataTrainingArguments,
tokenizer: PreTrainedTokenizerBase,
) -> dict:
prefix = data_training_args.source_prefix if data_training_args.source_prefix is not None else ""
inputs = [
spider_get_input(question=question, serialized_schema=serialized_schema, prefix=prefix)
for question, serialized_schema in zip(batch["question"], batch["serialized_schema"])
]
model_inputs: dict = tokenizer(
inputs,
max_length=max_source_length,
padding=False,
truncation=True,
return_overflowing_tokens=False,
)
targets = [
spider_get_target(
query=query,
db_id=db_id,
normalize_query=data_training_args.normalize_query,
target_with_db_id=data_training_args.target_with_db_id,
)
for db_id, query in zip(batch["db_id"], batch["query"])
]
# Setup the tokenizer for targets
with tokenizer.as_target_tokenizer():
labels = tokenizer(
targets,
max_length=max_target_length,
padding=False,
truncation=True,
return_overflowing_tokens=False,
)
model_inputs["labels"] = labels["input_ids"]
model_inputs['serialized_schema'] = batch['serialized_schema']
model_inputs['db_column_names'] = batch['db_column_names']
model_inputs['db_table_names'] = batch['db_table_names']
model_inputs['db_foreign_keys'] = batch['db_foreign_keys']
model_inputs['db_primary_keys'] = batch['db_primary_keys']
# model_inputs['sampled_columns_idx'] = batch['sampled_columns_idx']
model_inputs['raw_question_toks'] = batch['raw_question_toks']
model_inputs['db_id'] = batch['db_id']
model_inputs['query'] = batch['query']
model_inputs["seq_out"] = targets
assert(len(model_inputs["labels"]) == len(targets))
return model_inputs
def cosql_add_serialized_schema(
ex: dict,
data_training_args: DataTrainingArguments,
) -> dict:
serialized_schema = serialize_schema(
question=" | ".join(ex["utterances"]),
db_path=ex["db_path"],
db_id=ex["db_id"],
db_column_names=ex["db_column_names"],
db_table_names=ex["db_table_names"],
schema_serialization_type=data_training_args.schema_serialization_type,
schema_serialization_randomized=data_training_args.schema_serialization_randomized,
schema_serialization_with_db_id=data_training_args.schema_serialization_with_db_id,
schema_serialization_with_db_content=data_training_args.schema_serialization_with_db_content,
normalize_query=data_training_args.normalize_query,
)
return {"serialized_schema": serialized_schema}
def cosql_pre_process_function(
batch: dict,
max_source_length: Optional[int],
max_target_length: Optional[int],
data_training_args: DataTrainingArguments,
tokenizer: PreTrainedTokenizerBase,
) -> dict:
prefix = data_training_args.source_prefix if data_training_args.source_prefix is not None else ""
inputs = [
cosql_get_input(utterances=utterances, serialized_schema=serialized_schema, prefix=prefix)
for utterances, serialized_schema in zip(batch["utterances"], batch["serialized_schema"])
]
model_inputs: dict = tokenizer(
inputs,
max_length=max_source_length,
padding=False,
truncation=True,
return_overflowing_tokens=False,
)
targets = [
cosql_get_target(
query=query,
db_id=db_id,
normalize_query=data_training_args.normalize_query,
target_with_db_id=data_training_args.target_with_db_id,
)
for db_id, query in zip(batch["db_id"], batch["query"])
]
# Setup the tokenizer for targets
with tokenizer.as_target_tokenizer():
labels = tokenizer(
targets,
max_length=max_target_length,
padding=False,
truncation=True,
return_overflowing_tokens=False,
)
model_inputs["labels"] = labels["input_ids"]
return model_inputs
def load_dataset(
data_args: DataArguments,
model_args: ModelArguments,
data_training_args: DataTrainingArguments,
training_args: TrainingArguments,
tokenizer: PreTrainedTokenizerFast,
) -> Tuple[Metric, DatasetSplits]:
_spider_dataset_dict: Callable[[], DatasetDict] = lambda: datasets.load.load_dataset(
path=data_args.dataset_paths["spider"], cache_dir=model_args.cache_dir
)
_spider_metric: Callable[[], Metric] = lambda: datasets.load.load_metric(
path=data_args.metric_paths["spider"], config_name=data_args.metric_config, test_suite_db_dir=data_args.test_suite_db_dir
)
_spider_add_serialized_schema = lambda ex: spider_add_serialized_schema(
ex=ex,
data_training_args=data_training_args,
)
_spider_pre_process_function = lambda batch, max_source_length, max_target_length: spider_pre_process_function(
batch=batch,
max_source_length=max_source_length,
max_target_length=max_target_length,
data_training_args=data_training_args,
tokenizer=tokenizer,
)
_cosql_dataset_dict: Callable[[], DatasetDict] = lambda: datasets.load.load_dataset(
path=data_args.dataset_paths["cosql"], cache_dir=model_args.cache_dir
)
_cosql_metric: Callable[[], Metric] = lambda: datasets.load.load_metric(
path=data_args.metric_paths["cosql"], config_name=data_args.metric_config, test_suite_db_dir=data_args.test_suite_db_dir
)
_cosql_add_serialized_schema = lambda ex: cosql_add_serialized_schema(
ex=ex,
data_training_args=data_training_args,
)
_cosql_pre_process_function = lambda batch, max_source_length, max_target_length: cosql_pre_process_function(
batch=batch,
max_source_length=max_source_length,
max_target_length=max_target_length,
data_training_args=data_training_args,
tokenizer=tokenizer,
)
#adding spider_realistic dataset, metric, using schema and preprocess funtions of spider as it is
_spider_realistic_dataset_dict : Callable[[], DatasetDict] = lambda: datasets.load.load_dataset(
path=data_args.dataset_paths['spider_realistic'], cache_dir=model_args.cache_dir
)
_spider_realistic_metric: Callable[[], Metric] = lambda: datasets.load.load_metric(
path=data_args.metric_paths["spider_realistic"], config_name=data_args.metric_config, test_suite_db_dir=data_args.test_suite_db_dir
)
_spider_syn_dataset_dict : Callable[[], DatasetDict] = lambda: datasets.load.load_dataset(
path=data_args.dataset_paths['spider_syn'], cache_dir=model_args.cache_dir
)
_spider_syn_metric: Callable[[], Metric] = lambda: datasets.load.load_metric(
path=data_args.metric_paths["spider_syn"], config_name=data_args.metric_config, test_suite_db_dir=data_args.test_suite_db_dir
)
_spider_dk_dataset_dict : Callable[[], DatasetDict] = lambda: datasets.load.load_dataset(
path=data_args.dataset_paths['spider_dk'], cache_dir=model_args.cache_dir
)
_spider_dk_metric: Callable[[], Metric] = lambda: datasets.load.load_metric(
path=data_args.metric_paths["spider_dk"], config_name=data_args.metric_config, test_suite_db_dir=data_args.test_suite_db_dir
)
_prepare_splits_kwargs = {
"data_args": data_args,
"training_args": training_args,
"data_training_args": data_training_args,
}
if data_args.dataset == "spider":
metric = _spider_metric()
dataset_splits = prepare_splits(
dataset_dict=_spider_dataset_dict(),
add_serialized_schema=_spider_add_serialized_schema,
pre_process_function=_spider_pre_process_function,
**_prepare_splits_kwargs,
)
elif data_args.dataset == "cosql":
metric = _cosql_metric()
dataset_splits = prepare_splits(
dataset_dict=_cosql_dataset_dict(),
add_serialized_schema=_cosql_add_serialized_schema,
pre_process_function=_cosql_pre_process_function,
**_prepare_splits_kwargs,
)
elif data_args.dataset == "spider_realistic":
metric = _spider_realistic_metric()
dataset_splits = prepare_splits(
dataset_dict= _spider_realistic_dataset_dict(),
add_serialized_schema=_spider_add_serialized_schema,
pre_process_function=_spider_pre_process_function,
**_prepare_splits_kwargs,
)
elif data_args.dataset == "spider_dk":
metric = _spider_dk_metric()
dataset_splits = prepare_splits(
dataset_dict= _spider_dk_dataset_dict(),
add_serialized_schema=_spider_add_serialized_schema,
pre_process_function=_spider_pre_process_function,
**_prepare_splits_kwargs,
)
elif data_args.dataset == "spider_syn":
metric = _spider_syn_metric()
dataset_splits = prepare_splits(
dataset_dict= _spider_syn_dataset_dict(),
add_serialized_schema=_spider_add_serialized_schema,
pre_process_function=_spider_pre_process_function,
**_prepare_splits_kwargs,
)
elif data_args.dataset == "cosql+spider":
metric = _cosql_metric()
cosql_dataset_splits = prepare_splits(
dataset_dict=_cosql_dataset_dict(),
add_serialized_schema=_cosql_add_serialized_schema,
pre_process_function=_cosql_pre_process_function,
**_prepare_splits_kwargs,
)
spider_training_split = (
_prepare_train_split(
dataset=_spider_dataset_dict()["train"],
data_training_args=data_training_args,
add_serialized_schema=_spider_add_serialized_schema,
pre_process_function=_spider_pre_process_function,
)
if training_args.do_train
else None
)
if cosql_dataset_splits.train_split is None and spider_training_split is None:
train_split = None
elif cosql_dataset_splits.train_split is None:
train_split = spider_training_split
elif spider_training_split is None:
train_split = cosql_dataset_splits.train_split
else:
dataset: Dataset = concatenate_datasets(
dsets=[cosql_dataset_splits.train_split.dataset, spider_training_split.dataset]
)
train_split = TrainSplit(
dataset=dataset,
schemas={**spider_training_split.schemas, **cosql_dataset_splits.train_split.schemas},
)
schemas = {
**cosql_dataset_splits.schemas,
**(spider_training_split.schemas if spider_training_split is not None else {}),
}
dataset_splits = DatasetSplits(
train_split=train_split,
eval_split=cosql_dataset_splits.eval_split,
test_splits=cosql_dataset_splits.test_splits,
schemas=schemas,
)
else:
raise NotImplementedError()
if dataset_splits.train_split is not None:
_log_duplicate_count(dataset=dataset_splits.train_split.dataset, dataset_name=data_args.dataset, split="train")
if dataset_splits.eval_split is not None:
_log_duplicate_count(dataset=dataset_splits.eval_split.dataset, dataset_name=data_args.dataset, split="eval")
if dataset_splits.test_splits is not None:
for section, split in dataset_splits.test_splits.items():
_log_duplicate_count(dataset=split.dataset, dataset_name=data_args.dataset, split=section)
return metric, dataset_splits | null |
164,881 | from dataclasses import dataclass
from typing import Union, List, Dict, Optional
from transformers.pipelines.text2text_generation import ReturnType, Text2TextGenerationPipeline
from transformers.tokenization_utils import TruncationStrategy
from transformers.tokenization_utils_base import BatchEncoding
from third_party.spider.preprocess.get_tables import dump_db_json_schema
from seq2seq.utils.dataset import serialize_schema
from seq2seq.utils.spider import spider_get_input
from seq2seq.utils.cosql import cosql_get_input
def dump_db_json_schema(db, f): # sqlite3就是用来操作数据库的Python接口
"""read table and column info"""
conn = sqlite3.connect(db)
conn.execute("pragma foreign_keys=ON")
cursor = conn.execute("SELECT name FROM sqlite_master WHERE type='table';")
data = {
"db_id": f,
"table_names_original": [],
"table_names": [],
"column_names_original": [(-1, "*")],
"column_names": [(-1, "*")],
"column_types": ["text"],
"primary_keys": [],
"foreign_keys": [],
}
fk_holder = []
for i, item in enumerate(cursor.fetchall()):
table_name = item[0]
data["table_names_original"].append(table_name)
data["table_names"].append(table_name.lower().replace("_", " ")) # 这是处理后的table names
fks = conn.execute(
"PRAGMA foreign_key_list('{}') ".format(table_name)
).fetchall()
print("db:{} table:{} fks:{}".format(f,table_name,fks))
fk_holder.extend([[(table_name, fk[3]), (fk[2], fk[4])] for fk in fks])
cur = conn.execute("PRAGMA table_info('{}') ".format(table_name))
for j, col in enumerate(cur.fetchall()):
data["column_names_original"].append((i, col[1]))
data["column_names"].append((i, col[1].lower().replace("_", " ")))
# varchar, '' -> text, int, numeric -> integer,
col_type = col[2].lower()
if (
"char" in col_type
or col_type == ""
or "text" in col_type
or "var" in col_type
):
data["column_types"].append("text")
elif (
"int" in col_type
or "numeric" in col_type
or "decimal" in col_type
or "number" in col_type
or "id" in col_type
or "real" in col_type
or "double" in col_type
or "float" in col_type
):
data["column_types"].append("number")
elif "date" in col_type or "time" in col_type or "year" in col_type:
data["column_types"].append("time")
elif "boolean" in col_type:
data["column_types"].append("boolean")
else:
data["column_types"].append("others")
if col[5] == 1:
data["primary_keys"].append(len(data["column_names"]) - 1)
data["foreign_keys"] = fk_holder
data["foreign_keys"] = convert_fk_index(data)
return data
def get_schema(db_path: str, db_id: str) -> dict:
schema = dump_db_json_schema(db_path + "/" + db_id + "/" + db_id + ".sqlite", db_id)
return {
"db_table_names": schema["table_names_original"],
"db_column_names": {
"table_id": [table_id for table_id, _ in schema["column_names_original"]],
"column_name": [column_name for _, column_name in schema["column_names_original"]],
},
"db_column_types": schema["column_types"],
"db_primary_keys": {"column_id": [column_id for column_id in schema["primary_keys"]]},
"db_foreign_keys": {
"column_id": [column_id for column_id, _ in schema["foreign_keys"]],
"other_column_id": [other_column_id for _, other_column_id in schema["foreign_keys"]],
},
} | null |
164,882 | from copy import deepcopy
from typing import Optional, Union, Any, Callable, AsyncContextManager, List, Dict
from dataclasses import dataclass, field
import collections
import asyncio
import sys
import subprocess
import warnings
import time
from tenacity import retry, wait_random_exponential, stop_after_delay, before_sleep_log
import torch
from transformers import LogitsProcessorList
from transformers.configuration_utils import PretrainedConfig
from transformers.generation_utils import GreedySearchOutput, SampleOutput, BeamSearchOutput, BeamSampleOutput
from transformers.generation_logits_process import LogitsProcessor
from transformers.file_utils import copy_func
from transformers.models.auto.auto_factory import _get_model_class
from transformers.models.auto.configuration_auto import AutoConfig
from transformers.tokenization_utils_fast import PreTrainedTokenizerFast
from transformers.models.auto import AutoModelForSeq2SeqLM
import logging
import pdb
logger = logging.getLogger(__name__)
class PicardArguments:
"""
Arguments pertaining to Picard.
"""
use_picard: bool = field(default=True, metadata={"help": "Whether or not to use Picard."})
launch_picard: bool = field(
default=True,
metadata={"help": "Whether or not to launch Picard. If ``False``, an already running Picard is used."},
)
picard_host: str = field(default="localhost", metadata={"help": "The host name for Picard."})
picard_port: int = field(default=9090, metadata={"help": "The port number for Picard."})
picard_mode: str = field(
default="parse_with_guards",
metadata={
"help": "Picard mode. Choose between ``lex``, ``parse_without_guards``, ``parse_with_guards``, and ``parse_with_guards_and_type_checking."
},
)
picard_schedule: str = field(
default="incremental",
metadata={"help": "Picard schedule. Choose between ``incremental`` and ``finalizing``."},
)
picard_max_tokens_to_check: int = field(
default=2,
metadata={"help": "The maximum number of tokens to check with Picard."},
)
def __post_init__(self):
self.use_picard = picard_available and self.use_picard
self.launch_picard = self.use_picard and self.launch_picard
class PicardLogitsProcessor(LogitsProcessor):
def __init__(
self,
eos_token_id: int,
get_client: Callable[[], AsyncContextManager[Picard]],
filter_value: float = -float("Inf"),
max_tokens_to_check: int = 1,
mode: str = "parse_with_guards",
schedule: str = "incremental",
):
self.eos_token_id = eos_token_id
self.get_client = get_client
self.filter_value = filter_value
self.max_tokens_to_check = max_tokens_to_check
self.mode = mode
self.schedule = schedule
async def _feed(self, client: Picard, input_ids: List[int], token: int) -> bool:
if self.mode == "lex":
mode = Mode.LEXING
elif self.mode == "parse_without_guards":
mode = Mode.PARSING_WITHOUT_GUARDS
elif self.mode == "parse" or self.mode == "parse_with_guards":
mode = Mode.PARSING_WITH_GUARDS
elif self.mode == "parse_with_guards_and_type_checking":
mode = Mode.PARSING_WITH_GUARDS_AND_TYPE_CHECKING
else:
raise ValueError("unexpected picard mode")
try:
res = await client.feed(input_ids, token, mode)
except FeedException as e:
logger.error(f"unexpected feed error: {e}, input ids were: {input_ids}, token was: {token}")
raise e
except TransportError as e:
logger.error(f"unexpected transport error: {e}, input ids were: {input_ids}, token was: {token}")
raise e
if isinstance(res.feedResult.value, FeedTimeoutFailure):
logger.warning(f"timeout failure: {input_ids + [token]}")
return False
elif isinstance(res.feedResult.value, FeedParseFailure):
logger.debug(f"parsing failure: {input_ids + [token]}")
return False
elif isinstance(res.feedResult.value, FeedPartialSuccess):
logger.debug(f"parsing partial: {input_ids + [token]}")
return True
elif isinstance(res.feedResult.value, FeedCompleteSuccess):
logger.info(f"parsing success: {input_ids + [token]}")
return True
else:
# unexpected parsing result
raise ValueError("unexpected picard parsing result")
async def _check_token(self, client: Picard, input_ids: List[int], token: int) -> bool:
if self.schedule == "incremental":
# check at every step
return await self._feed(client=client, input_ids=input_ids, token=token)
elif self.schedule == "finalizing":
# only check when decoded string is finalized
if token == self.eos_token_id:
return await self._feed(client=client, input_ids=input_ids, token=token)
else:
return True
else:
raise ValueError("unexpected picard schedule")
wait=wait_random_exponential(multiplier=1, max=60),
stop=stop_after_delay(600),
before_sleep=before_sleep_log(logger, logging.WARNING),
)
async def _mask(
self,
client: Picard,
indices_to_remove: torch.Tensor,
batch_idx: int,
input_ids_batch: torch.Tensor,
top_token: torch.Tensor,
) -> None:
res = await self._check_token(client=client, input_ids=input_ids_batch.tolist(), token=top_token.item())
if not res:
indices_to_remove[batch_idx, top_token] = True
async def _mask_top_k(
self,
indices_to_remove: torch.Tensor,
input_ids: torch.Tensor,
top_tokens: torch.Tensor,
) -> None:
async with self.get_client() as client:
futures = [
self._mask(
client=client,
indices_to_remove=indices_to_remove,
batch_idx=batch_idx,
input_ids_batch=input_ids_batch,
top_token=top_token,
)
for batch_idx, (input_ids_batch, top_token_batch) in enumerate(zip(input_ids, top_tokens))
for top_token in top_token_batch
]
for f in asyncio.as_completed(futures):
await f
wait=wait_random_exponential(multiplier=1, max=60),
stop=stop_after_delay(600),
before_sleep=before_sleep_log(logger, logging.WARNING),
)
async def _batch_mask_top_k(
self,
indices_to_remove: torch.Tensor,
input_ids: torch.Tensor,
top_tokens: torch.Tensor,
) -> None:
if self.mode == "lex":
mode = Mode.LEXING
elif self.mode == "parse_without_guards":
mode = Mode.PARSING_WITHOUT_GUARDS
elif self.mode == "parse" or self.mode == "parse_with_guards":
mode = Mode.PARSING_WITH_GUARDS
elif self.mode == "parse_with_guards_and_type_checking":
mode = Mode.PARSING_WITH_GUARDS_AND_TYPE_CHECKING
else:
raise ValueError("unexpected picard mode")
async with self.get_client() as client:
try:
res = await client.batchFeed(input_ids.tolist(), top_tokens.tolist(), mode)
except FeedException as e:
logger.error(
f"unexpected feed error: {e}, input ids were: {input_ids.tolist()}, top tokens were: {top_tokens.tolist()}"
)
raise e
except TransportError as e:
logger.error(
f"unexpected transport error: {e}, input ids were: {input_ids.tolist()}, top tokens were: {top_tokens.tolist()}"
)
raise e
for r in res:
if isinstance(r.feedResult.value, FeedTimeoutFailure):
logger.warning(f"timeout failure: {input_ids[r.batchId].tolist() + [r.topToken]}")
indices_to_remove[r.batchId, r.topToken] = True
elif isinstance(r.feedResult.value, FeedParseFailure):
logger.debug(f"parsing failure: {input_ids[r.batchId].tolist() + [r.topToken]}")
indices_to_remove[r.batchId, r.topToken] = True
elif isinstance(r.feedResult.value, FeedPartialSuccess):
logger.debug(f"parsing partial: {input_ids[r.batchId].tolist() + [r.topToken]}")
elif isinstance(r.feedResult.value, FeedCompleteSuccess):
logger.info(f"parsing success: {input_ids[r.batchId].tolist() + [r.topToken]}")
else:
# unexpected parsing result
raise ValueError("unexpected picard parsing result")
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
top_k = min(max(1, self.max_tokens_to_check), scores.size(-1)) # Safety check
top_scores, top_tokens = torch.topk(scores, top_k)
# Remove all tokens with a probability less than the last token of the top-k
lowest_top_k_scores = top_scores[..., -1, None]
del top_scores
indices_to_remove = scores < lowest_top_k_scores
del lowest_top_k_scores
# Do not mask the EOS token because otherwise production can continue indefinitely if all other tokens are masked
indices_to_remove[:, self.eos_token_id] = False
# Mask top-k tokens rejected by picard
asyncio.run(
self._batch_mask_top_k(
indices_to_remove=indices_to_remove,
input_ids=input_ids,
top_tokens=top_tokens,
)
if self.schedule == "incremental"
else self._mask_top_k(
indices_to_remove=indices_to_remove,
input_ids=input_ids,
top_tokens=top_tokens,
),
debug=False,
)
del top_tokens
scores = scores.masked_fill(indices_to_remove, self.filter_value)
del indices_to_remove
return scores
def get_picard_schema(
db_table_names: List[str],
db_column_names: Dict[str, Union[List[str], List[int]]],
db_column_types: List[str],
db_primary_keys: Dict[str, List[int]],
db_foreign_keys: Dict[str, List[int]],
) -> SQLSchema:
star_id = next((c_id for c_id, c_name in enumerate(db_column_names["column_name"]) if c_name == "*"))
column_names = dict(
(str(c_id), c_name) for c_id, c_name in enumerate(db_column_names["column_name"]) if c_id != star_id
)
column_types = dict(
(str(c_id), _get_picard_column_type(c_type)) for c_id, c_type in enumerate(db_column_types) if c_id != star_id
)
table_names = dict((str(t_id), t_name) for t_id, t_name in enumerate(db_table_names))
column_to_table = dict(
(str(c_id), str(t_id))
for c_id, (t_id, _c_name) in enumerate(zip(db_column_names["table_id"], db_column_names["column_name"]))
if c_id != star_id
)
table_to_columns = collections.defaultdict(list)
for c_id, (t_id, _c_name) in enumerate(zip(db_column_names["table_id"], db_column_names["column_name"])):
if c_id == star_id:
continue
table_to_columns[str(t_id)].append(str(c_id))
foreign_keys = dict(
(str(c_id), str(other_c_id))
for c_id, other_c_id in zip(db_foreign_keys["column_id"], db_foreign_keys["other_column_id"])
if c_id != star_id and other_c_id != star_id
)
primary_keys = [str(c_id) for c_id in db_primary_keys["column_id"] if c_id != star_id]
return SQLSchema(
columnNames=column_names,
columnTypes=column_types,
tableNames=table_names,
columnToTable=column_to_table,
tableToColumns=table_to_columns,
foreignKeys=foreign_keys,
primaryKeys=primary_keys,
)
"The bare T5 Model transformer outputting raw hidden-states without any specific head on top.",
)
class T5ForConditionalGeneration(T5PreTrainedModel):
_keys_to_ignore_on_load_missing = [
r"encoder\.embed_tokens\.weight",
r"decoder\.embed_tokens\.weight",
r"lm_head\.weight",
]
_keys_to_ignore_on_load_unexpected = [
r"decoder\.block\.0\.layer\.1\.EncDecAttention\.relative_attention_bias\.weight",
]
def __init__(self, config):
super().__init__(config)
self.model_dim = config.d_model
self.shared = nn.Embedding(config.vocab_size, config.d_model)
encoder_config = copy.deepcopy(config)
encoder_config.is_decoder = False
encoder_config.use_cache = False
encoder_config.is_encoder_decoder = False
self.encoder = T5Stack(encoder_config, self.shared)
decoder_config = copy.deepcopy(config)
decoder_config.is_decoder = True
decoder_config.is_encoder_decoder = False
decoder_config.num_layers = config.num_decoder_layers
self.decoder = T5Stack(decoder_config, self.shared)
self.lm_head = nn.Linear(config.d_model, config.vocab_size, bias=False)
# Initialize weights and apply final processing
self.post_init()
# Model parallel
self.model_parallel = False
self.device_map = None
def parallelize(self, device_map=None):
self.device_map = (
get_device_map(len(self.encoder.block), range(torch.cuda.device_count()))
if device_map is None
else device_map
)
assert_device_map(self.device_map, len(self.encoder.block))
self.encoder.parallelize(self.device_map)
self.decoder.parallelize(self.device_map)
self.lm_head = self.lm_head.to(self.decoder.first_device)
self.model_parallel = True
def deparallelize(self):
self.encoder.deparallelize()
self.decoder.deparallelize()
self.encoder = self.encoder.to("cpu")
self.decoder = self.decoder.to("cpu")
self.lm_head = self.lm_head.to("cpu")
self.model_parallel = False
self.device_map = None
torch.cuda.empty_cache()
def get_input_embeddings(self):
return self.shared
def set_input_embeddings(self, new_embeddings):
self.shared = new_embeddings
self.encoder.set_input_embeddings(new_embeddings)
self.decoder.set_input_embeddings(new_embeddings)
def set_output_embeddings(self, new_embeddings):
self.lm_head = new_embeddings
def get_output_embeddings(self):
return self.lm_head
def get_encoder(self):
return self.encoder
def get_decoder(self):
return self.decoder
def forward(
self,
input_ids=None,
attention_mask=None,
decoder_input_ids=None,
decoder_attention_mask=None,
head_mask=None,
decoder_head_mask=None,
cross_attn_head_mask=None,
encoder_outputs=None,
past_key_values=None,
inputs_embeds=None,
decoder_inputs_embeds=None,
labels=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
graph_batch=None, # TODO: Jinyang
relation_emb=None # TODO: Jinyang
):
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[-100, 0, ...,
config.vocab_size - 1]`. All labels set to `-100` are ignored (masked), the loss is only computed for
labels in `[0, ..., config.vocab_size]`
Returns:
Examples:
```python
>>> from transformers import T5Tokenizer, T5ForConditionalGeneration
>>> tokenizer = T5Tokenizer.from_pretrained("t5-small")
>>> model = T5ForConditionalGeneration.from_pretrained("t5-small")
>>> # training
>>> input_ids = tokenizer("The <extra_id_0> walks in <extra_id_1> park", return_tensors="pt").input_ids
>>> labels = tokenizer("<extra_id_0> cute dog <extra_id_1> the <extra_id_2>", return_tensors="pt").input_ids
>>> outputs = model(input_ids=input_ids, labels=labels)
>>> loss = outputs.loss
>>> logits = outputs.logits
>>> # inference
>>> input_ids = tokenizer(
... "summarize: studies have shown that owning a dog is good for you", return_tensors="pt"
>>> ).input_ids # Batch size 1
>>> outputs = model.generate(input_ids)
>>> print(tokenizer.decode(outputs[0], skip_special_tokens=True))
>>> # studies have shown that owning a dog is good for you.
```"""
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# FutureWarning: head_mask was separated into two input args - head_mask, decoder_head_mask
if head_mask is not None and decoder_head_mask is None:
if self.config.num_layers == self.config.num_decoder_layers:
warnings.warn(__HEAD_MASK_WARNING_MSG, FutureWarning)
decoder_head_mask = head_mask
# Encode if needed (training, first prediction pass)
if encoder_outputs is None:
# Convert encoder inputs in embeddings if needed
encoder_outputs = self.encoder(
input_ids=input_ids,
attention_mask=attention_mask,
inputs_embeds=inputs_embeds,
head_mask=head_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
graph_batch=graph_batch, # TODO: Jinyang
relation_emb=relation_emb # TODO: Jinyang
)
elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
encoder_outputs = BaseModelOutput(
last_hidden_state=encoder_outputs[0],
hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
)
hidden_states = encoder_outputs[0]
if self.model_parallel:
torch.cuda.set_device(self.decoder.first_device)
if labels is not None and decoder_input_ids is None and decoder_inputs_embeds is None:
# get decoder inputs from shifting lm labels to the right
decoder_input_ids = self._shift_right(labels)
# Set device for model parallelism
if self.model_parallel:
torch.cuda.set_device(self.decoder.first_device)
hidden_states = hidden_states.to(self.decoder.first_device)
if decoder_input_ids is not None:
decoder_input_ids = decoder_input_ids.to(self.decoder.first_device)
if attention_mask is not None:
attention_mask = attention_mask.to(self.decoder.first_device)
if decoder_attention_mask is not None:
decoder_attention_mask = decoder_attention_mask.to(self.decoder.first_device)
# Decode
decoder_outputs = self.decoder(
input_ids=decoder_input_ids,
attention_mask=decoder_attention_mask,
inputs_embeds=decoder_inputs_embeds,
past_key_values=past_key_values,
encoder_hidden_states=hidden_states,
encoder_attention_mask=attention_mask,
head_mask=decoder_head_mask,
cross_attn_head_mask=cross_attn_head_mask,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = decoder_outputs[0]
# Set device for model parallelism
if self.model_parallel:
torch.cuda.set_device(self.encoder.first_device)
self.lm_head = self.lm_head.to(self.encoder.first_device)
sequence_output = sequence_output.to(self.lm_head.weight.device)
if self.config.tie_word_embeddings:
# Rescale output before projecting on vocab
# See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/transformer.py#L586
sequence_output = sequence_output * (self.model_dim**-0.5)
lm_logits = self.lm_head(sequence_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss(ignore_index=-100)
loss = loss_fct(lm_logits.view(-1, lm_logits.size(-1)), labels.view(-1))
# TODO(thom): Add z_loss https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L666
if not return_dict:
output = (lm_logits,) + decoder_outputs[1:] + encoder_outputs
return ((loss,) + output) if loss is not None else output
return Seq2SeqLMOutput(
loss=loss,
logits=lm_logits,
past_key_values=decoder_outputs.past_key_values,
decoder_hidden_states=decoder_outputs.hidden_states,
decoder_attentions=decoder_outputs.attentions,
cross_attentions=decoder_outputs.cross_attentions,
encoder_last_hidden_state=encoder_outputs.last_hidden_state,
encoder_hidden_states=encoder_outputs.hidden_states,
encoder_attentions=encoder_outputs.attentions,
)
def prepare_inputs_for_generation(
self,
input_ids,
past=None,
attention_mask=None,
head_mask=None,
decoder_head_mask=None,
cross_attn_head_mask=None,
use_cache=None,
encoder_outputs=None,
**kwargs
):
# cut decoder_input_ids if past is used
if past is not None:
input_ids = input_ids[:, -1:]
return {
"decoder_input_ids": input_ids,
"past_key_values": past,
"encoder_outputs": encoder_outputs,
"attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
"use_cache": use_cache,
}
def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor):
return self._shift_right(labels)
def _reorder_cache(self, past, beam_idx):
# if decoder past is not included in output
# speedy decoding is disabled and no need to reorder
if past is None:
logger.warning("You might want to consider setting `use_cache=True` to speed up decoding")
return past
reordered_decoder_past = ()
for layer_past_states in past:
# get the correct batch idx from layer past batch dim
# batch dim of `past` is at 2nd position
reordered_layer_past_states = ()
for layer_past_state in layer_past_states:
# need to set correct `past` for each of the four key / value states
reordered_layer_past_states = reordered_layer_past_states + (
layer_past_state.index_select(0, beam_idx.to(layer_past_state.device)),
)
assert reordered_layer_past_states[0].shape == layer_past_states[0].shape
assert len(reordered_layer_past_states) == len(layer_past_states)
reordered_decoder_past = reordered_decoder_past + (reordered_layer_past_states,)
return reordered_decoder_past
"The bare T5 Model transformer outputting encoder's raw hidden-states without any specific head on top.",
T5_START_DOCSTRING,
)
def with_picard(
model_cls, #AutoModelForSeq2SeqLM,
picard_args: PicardArguments,
tokenizer: PreTrainedTokenizerFast,
schemas: Optional[Dict[str, dict]] = None,
):
schema_cache: Dict[str, dict] = deepcopy(schemas) if schemas is not None else dict()
def get_picard_client() -> AsyncContextManager[Picard]:
return get_client(
Picard,
host=picard_args.picard_host,
port=picard_args.picard_port,
timeout=1,
protocol=Protocol.BINARY,
)
async def _init_picard() -> None:
async with get_picard_client() as client:
for db_id, db_info in schema_cache.items():
await _register_schema(db_id=db_id, db_info=db_info, picard_client=client)
await _register_tokenizer(picard_client=client)
async def _register_schema(db_id: str, db_info: dict, picard_client: Picard) -> None:
sql_schema = get_picard_schema(**db_info)
try:
await picard_client.registerSQLSchema(db_id, sql_schema)
except RegisterSQLSchemaException:
# db already registered
logger.debug(f"schema already registered: {db_id}")
pass
async def _register_schema_without_client(db_id: str, db_info: dict) -> None:
async with get_picard_client() as client:
await _register_schema(db_id=db_id, db_info=db_info, picard_client=client)
async def _register_tokenizer(picard_client: Picard) -> None:
assert isinstance(tokenizer, PreTrainedTokenizerFast)
json_str = tokenizer.backend_tokenizer.to_str(pretty=False)
await picard_client.registerTokenizer(json_str)
def _add_schema(db_id: str, db_info: dict) -> None:
if not db_id in schema_cache:
schema_cache[db_id] = deepcopy(db_info)
asyncio.run(_register_schema_without_client(db_id=db_id, db_info=db_info), debug=False)
else:
assert db_info == schema_cache[db_id], "unexpected schema change"
@torch.no_grad()
def _generate(
self,
*args,
logits_processor: Optional[LogitsProcessorList] = LogitsProcessorList(),
eos_token_id: Optional[int] = None,
**kwargs,
) -> Union[GreedySearchOutput, SampleOutput, BeamSearchOutput, BeamSampleOutput, torch.LongTensor]:
# pdb.set_trace()
# print("Picard successfully load! \n")
# self.config.decoder_start_token_id = self.config.decoder_start_token_id + kwargs['db_id']
eos_token_id = eos_token_id if eos_token_id is not None else self.config.eos_token_id
logits_processor.append(
PicardLogitsProcessor(
eos_token_id=eos_token_id,
get_client=get_picard_client,
max_tokens_to_check=picard_args.picard_max_tokens_to_check,
mode=picard_args.picard_mode,
schedule=picard_args.picard_schedule,
)
)
return self.old_generate(*args, logits_processor=logits_processor, eos_token_id=eos_token_id, **kwargs)
class _PicardAutoModelClass(model_cls):
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
config = kwargs.pop("config", None)
kwargs["_from_auto"] = True
if not isinstance(config, PretrainedConfig):
config, kwargs = AutoConfig.from_pretrained(
pretrained_model_name_or_path, return_unused_kwargs=True, **kwargs
)
if type(config) in cls._model_mapping.keys():
model_class = _get_model_class(config, cls._model_mapping)
if "t5" in model_class.config_class.model_type:
from seq2seq.models.modeling_t5 import T5ForConditionalGeneration
model_class = T5ForConditionalGeneration
generate = copy_func(_generate)
generate.__doc__ = model_class.generate.__doc__
model_class.old_generate = copy_func(model_class.generate)
model_class.generate = generate
model_class.add_schema = staticmethod(copy_func(_add_schema))
return model_class.from_pretrained(pretrained_model_name_or_path, *model_args, config=config, **kwargs)
raise ValueError(
f"Unrecognized configuration class {config.__class__} for this kind of AutoModel: {cls.__name__}.\n"
f"Model type should be one of {', '.join(c.__name__ for c in cls._model_mapping.keys())}."
)
asyncio.run(_init_picard(), debug=False)
return _PicardAutoModelClass | null |
164,883 | import os, json, pickle, argparse, sys, time
import pdb
import torch
from collections import defaultdict
import numpy as np
import re
The provided code snippet includes necessary dependencies for implementing the `quote_normalization` function. Write a Python function `def quote_normalization(question)` to solve the following problem:
Normalize all usage of quotation marks into a separate \"
Here is the function:
def quote_normalization(question):
""" Normalize all usage of quotation marks into a separate \" """
new_question, quotation_marks = [], ["'", '"', '`', '‘', '’', '“', '”', '``', "''", "‘‘", "’’"]
for idx, tok in enumerate(question):
if len(tok) > 2 and tok[0] in quotation_marks and tok[-1] in quotation_marks:
new_question += ["\"", tok[1:-1], "\""]
elif len(tok) > 2 and tok[0] in quotation_marks:
new_question += ["\"", tok[1:]]
elif len(tok) > 2 and tok[-1] in quotation_marks:
new_question += [tok[:-1], "\"" ]
elif tok in quotation_marks:
new_question.append("\"")
elif len(tok) == 2 and tok[0] in quotation_marks:
# special case: the length of entity value is 1
if idx + 1 < len(question) and question[idx + 1] in quotation_marks:
new_question += ["\"", tok[1]]
else:
new_question.append(tok)
else:
new_question.append(tok)
return new_question | Normalize all usage of quotation marks into a separate \" |
164,885 | import os, json, pickle, argparse, sys, time
import pdb
import torch
from collections import defaultdict
import numpy as np
import re
def question_subword_matrix(processed_question_toks, relations, tokenizer):
# question: a str of question
# relations: matrix of relations
# return: new subword-based relation matrix
question_dict = defaultdict()
question = " ".join(processed_question_toks) + " ; "
tokenized_question = tokenizer(question)
word_ids = tokenized_question.word_ids()
# reduce the special token like ("101", "102")
word_ids = word_ids[:-1]
subword_matrix = [['symbol'] * len(word_ids) for _ in range(len(word_ids))]
# contruct a dict mapping from idx of original tokens --> list of subwords: {5: [5, 6], 6: [7], }
for i,j in enumerate(word_ids):
# i: index of sub words
# j: index of original tokens
if j in question_dict:
question_dict[j].append(i)
else:
question_dict[j] = [i]
if len(processed_question_toks) + 1 != len(question_dict):
print("{} processed_question_toks".format(len(processed_question_toks)))
print("question dict is {}".format(question_dict))
print("computed length of question_dict is {}".format(len(question_dict)))
print("processed_question_toks: {}".format(processed_question_toks))
assert len(processed_question_toks) + 1 == len(question_dict)
# fully connect subwords as new matrix:
for r in range(len(processed_question_toks)):
for c in range(len(processed_question_toks)):
for sub_idx_r in question_dict[r]:
for sub_idx_c in question_dict[c]:
subword_matrix[sub_idx_r][sub_idx_c] = relations[r][c]
subword_matrix = np.array(subword_matrix, dtype='<U100')
subword_matrix = subword_matrix.tolist()
return subword_matrix, question_dict | null |
164,886 | import os, json, pickle, argparse, sys, time
import pdb
import torch
from collections import defaultdict
import numpy as np
import re
def subword_dict(input_ids):
word_subword_mapping = defaultdict()
for sub_idx, word_idx in enumerate(input_ids):
if word_idx is None:
break
if word_idx in word_subword_mapping:
word_subword_mapping[word_idx].append(sub_idx)
else:
word_subword_mapping[word_idx] = [sub_idx]
return word_subword_mapping
def ids_mapping(idx_lst: list, subword_dict: dict, schema_items: list):
new_ids_mapping = defaultdict()
# key: index of schema items; value: indexes of subword ids in tokenized ids
for i, sub_idx in enumerate(idx_lst):
if type(sub_idx) is int:
new_ids_mapping[i] = subword_dict[sub_idx]
else:
# home town -> [32, 34]
new_ids_mapping[i] = expand_ids(sub_idx, subword_dict)
return new_ids_mapping
def _add_prefix(prefix_num, new_mapping):
new_col_seq = []
for col_idx in new_mapping:
new_col_seq.append(prefix_num + col_idx)
return new_col_seq
def find_schema_idx(db_seq, table_items, column_items, new_mapping, init_idx=0):
seq_lst = db_seq.split(" ")
special_token = ["|", ':', ',', 'schema:', '(', ')']
table_idx_lst = []
column_idx_lst = []
schema_items = [item.lower() for item in table_items + column_items]
schema_elements = " ".join(schema_items).split(" ")
db_id = ""
for i, item in enumerate(seq_lst):
if item in special_token:
continue
if seq_lst[i - 1] == '(':
continue
if i < len(seq_lst) - 1:
if seq_lst[i - 1] == seq_lst[i + 1] == '|':
db_id = item
elif seq_lst[i - 1] == '|' and seq_lst[i + 1] == ':':
table_idx_lst.append(i + init_idx)
elif seq_lst[i - 1] == ":":
# head columns
if seq_lst[i + 1] == ",":
# head columns without value
if item in schema_elements:
column_idx_lst.append(i + init_idx)
elif seq_lst[i + 1] == "(":
# head columns with value
if item in schema_elements:
column_idx_lst.append(i + init_idx)
elif seq_lst[i - 1] == ",":
if seq_lst[i + 1] == "," or seq_lst[i + 1] == "(":
# middle columns (with values):
if item in schema_elements:
column_idx_lst.append(i + init_idx)
elif seq_lst[i + 1] == "|":
# tail columns:
if item in schema_elements:
column_idx_lst.append(i + init_idx)
elif seq_lst[i + 1] == ")":
continue
else:
# columns with multiple words: "home town"
temp_idx_lst = []
match_multi_words(cur_idx=i + init_idx, column_cur_idx=temp_idx_lst, seq_lst=seq_lst)
if item in schema_elements:
column_idx_lst.append(temp_idx_lst)
# append the last element:
else:
if item in schema_elements:
if seq_lst[i - 1] == "," or seq_lst[i - 1] == ":" or item == '*':
column_idx_lst.append(i + init_idx)
# put * into the head position:
star_idx = column_idx_lst.pop()
column_idx_lst.insert(0, star_idx)
# pdb.set_trace()
if len(table_idx_lst + column_idx_lst) != len(column_items + table_items):
print("wrong: {}".format(db_seq))
pdb.set_trace()
assert len(table_idx_lst + column_idx_lst) == len(table_items + column_items)
return table_idx_lst, column_idx_lst, db_id
def schema_subword_matrix(db_sep, init_idx, tables, tokenizer, table_items=None, column_items=None, new_mapping=None):
schema_items = table_items + column_items
struct_in = "schema: {} | *".format(db_sep)
# normalize struct_in:
struct_in = re.sub(' +', ' ', struct_in)
table_idx_lst, column_idx_lst, db_id = find_schema_idx(db_seq=struct_in, table_items=table_items,
column_items=column_items, new_mapping=new_mapping, init_idx=0)
if len(table_idx_lst + column_idx_lst) != len(column_items + table_items):
print("wrong: {}".format(struct_in))
pdb.set_trace()
assert len(table_idx_lst + column_idx_lst) == len(column_items + table_items)
schema_relations = tables[db_id]['relations']
schema_idx_lst = table_idx_lst + column_idx_lst
schema_subword_token = tokenizer(struct_in, max_length=1024) # 546 is the longest input seq for schema
schema_ids = schema_subword_token.word_ids()[:-1]
# pdb.set_trace()
subword_mapping_dict = subword_dict(schema_ids)
subword_matrix = [['symbol'] * len(schema_ids) for _ in range(len(schema_ids))]
# get the mapping dict for original schema items:
schema_to_ids = ids_mapping(idx_lst=schema_idx_lst, subword_dict=subword_mapping_dict, schema_items=schema_items)
assert len(schema_to_ids) == len(schema_idx_lst)
# fully-connected subwords as new matrix including dummy symbols:
# for r in range(len(schema_idx_lst)):
# for c in range(len(schema_idx_lst)):
# for sub_idx_r in schema_to_ids[r]:
# for sub_idx_c in schema_to_ids[c]:
# subword_matrix[sub_idx_r][sub_idx_c] = schema_relations[r][c]
# pdb.set_trace()
table_len = len(table_items)
new_table_seq = [t for t in range(table_len + 1)]# including "*"
new_col_seq = _add_prefix(table_len + 1, new_mapping)
new_schema_idx_seq = new_table_seq + new_col_seq
for r in range(len(schema_idx_lst)):
for c in range(len(schema_idx_lst)):
for sub_idx_r in schema_to_ids[r]:
for sub_idx_c in schema_to_ids[c]:
subword_matrix[sub_idx_r][sub_idx_c] = schema_relations[new_schema_idx_seq[r]][new_schema_idx_seq[c]] # TODO
subword_matrix = np.array(subword_matrix, dtype='<U100')
subword_matrix = subword_matrix.tolist()
return subword_matrix, subword_mapping_dict, struct_in, schema_to_ids | null |
164,887 | import os, json, pickle, argparse, sys, time
import pdb
import torch
from collections import defaultdict
import numpy as np
import re
def _add_prefix(prefix_num, new_mapping):
new_col_seq = []
for col_idx in new_mapping:
new_col_seq.append(prefix_num + col_idx)
return new_col_seq
The provided code snippet includes necessary dependencies for implementing the `schema_linking_subword` function. Write a Python function `def schema_linking_subword(question_subword_dict: dict, schema_2_ids: dict, schema_linking: tuple, question_subword_len: int, schema_subword_len: int, new_mapping_zip: list)` to solve the following problem:
load new_mapping_zip
Here is the function:
def schema_linking_subword(question_subword_dict: dict, schema_2_ids: dict, schema_linking: tuple, question_subword_len: int, schema_subword_len: int, new_mapping_zip: list):
# assert dim match:
q_schema_mat, schema_q_mat = schema_linking
assert len(question_subword_dict) == len(q_schema_mat) + 1
assert len(schema_2_ids) == len(schema_q_mat)
q_schema_subword_matrix = [[0] * schema_subword_len for _ in range(question_subword_len)]
schema_q_subword_matrix = [[0] * question_subword_len for _ in range(schema_subword_len)]
# pdb.set_trace()
'''load new_mapping_zip'''
new_mapping, table_items, column_items = new_mapping_zip
table_len = len(table_items)
new_table_seq = [t for t in range(table_len + 1)]# including "*"
new_col_seq = _add_prefix(table_len + 1, new_mapping)
new_schema_idx_seq = new_table_seq + new_col_seq
# construct subword_matrix for q_schema_mat:
for r in range(len(q_schema_mat)):
for c in range(len(schema_2_ids)):
temp_relation = q_schema_mat[r][new_schema_idx_seq[c]] # TODO
for sub_idx_r in question_subword_dict[r]:
for sub_idx_c in schema_2_ids[c]:
q_schema_subword_matrix[sub_idx_r][sub_idx_c] = temp_relation
# construct subword_matrix for schema_q_mat:
for r_s in range(len(schema_2_ids)):
for c_q in range(len(q_schema_mat)):
tmp_relation = schema_q_mat[new_schema_idx_seq[r_s]][c_q] # TODO
for sub_idx_s in schema_2_ids[r_s]:
for sub_idx_q in question_subword_dict[c_q]:
schema_q_subword_matrix[sub_idx_s][sub_idx_q] = tmp_relation
q_schema_subword_matrix = np.array(q_schema_subword_matrix, dtype='<U100')
schema_q_subword_matrix = np.array(schema_q_subword_matrix, dtype='<U100')
subword_schema_linking = (q_schema_subword_matrix.tolist(), schema_q_subword_matrix.tolist())
return subword_schema_linking | load new_mapping_zip |
164,888 | import json
import pickle
import pdb
import argparse
dummy_relations = ['question-table-nomatch', 'question-column-nomatch', 'column-question-nomatch', 'table-question-nomatch']
def flatten_fk(foreign_keys_lst):
final_lst = []
for fk_pairs in foreign_keys_lst:
for columns in fk_pairs:
final_lst.append(columns)
return list(set(final_lst))
def mapping_idx(sampled_columns_idx):
mapping_dict = {}
for i, col_idx in enumerate(sampled_columns_idx):
mapping_dict[col_idx] = i
return mapping_dict
def recaption_fk(ori_fk_lsts, sampled_columns_idx_mapping):
new_fk_lsts = []
for ori_fk_lst in ori_fk_lsts:
new_fk_lsts.append([sampled_columns_idx_mapping[col] for col in ori_fk_lst])
return new_fk_lsts
def sampling_database(dataset, tables, output_path=None):
for idx, data in enumerate(dataset):
sampled_columns_idx = []
schema_linking = data['schema_linking']
db_corr = tables[data['db_id']]
table_names_original = db_corr['table_names_original']
column_names_original = db_corr['column_names_original']
primary_keys = db_corr['primary_keys']
foreign_keys = db_corr['foreign_keys']
table_len = len(table_names_original)
# make sure
assert len(schema_linking[-1]) == len(db_corr['relations'])
# keep the original database structures:
# primary keys + foreign keys:
fk_idx = flatten_fk(foreign_keys)
pk_idx = primary_keys
sampled_columns_idx.sort()
sampled_columns_idx = list(set(fk_idx + pk_idx))
for i in range(len(schema_linking[0])):
for j in range(len(schema_linking[1])):
if schema_linking[0][i][j] not in dummy_relations and j >= table_len:
sampled_columns_idx.append(j-table_len)
sampled_columns_idx.sort()
sampled_columns_idx = list(set(sampled_columns_idx))
sampled_columns_idx_mapping = mapping_idx(sampled_columns_idx)
# recaption foreign keys and primary keys:
new_primary_keys = []
for pk in primary_keys:
if pk not in sampled_columns_idx_mapping:
pdb.set_trace()
new_primary_keys.append(sampled_columns_idx_mapping[pk])
new_primary_keys = new_primary_keys
new_foreign_keys = recaption_fk(foreign_keys, sampled_columns_idx_mapping)
assert len(primary_keys) == len(new_primary_keys)
assert len(foreign_keys) == len(new_foreign_keys)
# collection:
new_column_names_original = []
for col_idx in sampled_columns_idx:
new_column_names_original.append(column_names_original[col_idx])
data['new_column_names_original'] = new_column_names_original
data['new_table_names_original'] = table_names_original
data['new_primary_keys'] = new_primary_keys
data['new_foreign_keys'] = new_foreign_keys
new_column_types = []
for co_idx1 in sampled_columns_idx:
new_column_types.append(db_corr['column_types'][co_idx1])
# double check:
assert len(new_column_names_original) == len(sampled_columns_idx)
data['new_column_types'] = new_column_types
data['graph_idx_eval'] = idx
data['sampled_columns_idx'] = sampled_columns_idx
if output_path:
json.dump(dataset, open(output_path, 'w'), indent=4)
return dataset | null |
164,889 | import json
import pdb
from map_subword_serialize import schema_linking_subword
import argparse
from transformers import AutoTokenizer
import pickle
def merge_graph_pedia(graph_pedia_train, graph_pedia_dev, graph_all_output_path=None):
# keep the index of train set as original.
graph_pedia_all = pickle.load(open(graph_pedia_train, "rb"))
dev_start_idx = len(graph_pedia_all)
graph_pedia_dev = pickle.load(open(graph_pedia_dev, "rb"))
for dev_idx, v in graph_pedia_dev.items():
graph_pedia_all[dev_idx + dev_start_idx] = v
pickle.dump(graph_pedia_all, open(graph_all_output_path, 'wb'))
return graph_pedia_dev | null |
164,890 | import re
ex = example.replace('t1', 'concert')
ex = ex.replace('t2', 'stadium')
def map_alias(example):
alias_map = {}
example_list = example.split(' ')
for i, ex in enumerate(example_list):
if ex in ['as', 'AS']:
alias_map[example_list[i + 1]] = example_list[i - 1]
return alias_map | null |
164,891 | import re
ex = example.replace('t1', 'concert')
ex = ex.replace('t2', 'stadium')
def replace_alias(example, mapping):
ex = example
for k, v in mapping.items():
ex = ex.replace(k, v)
if 'as' in example:
ex = ex.replace(' as ' + v, '')
elif 'AS' in example:
ex = ex.replace(' AS ' + v, '')
return ex | null |
164,892 | import json
import argparse
def merge_train(train_spider, train_others, output_path=None):
total_train = train_spider + train_others
if output_path:
json.dump(total_train, open(output_path, "w"), indent=4) | null |
164,893 | import os, json, pickle, argparse, sys, time
import pdb
import math, dgl, torch
import numpy as np
import os, sys
from collections import defaultdict
from transformers import AutoTokenizer
def process_subgraph_datasets(processer, seq2seq_dataset, output_path = None, graph_output_path = None, graph_pedia=None, train_len=None):
seq2seq_dataset_formal = []
graph_pedia = defaultdict()
for i_str, data in seq2seq_dataset.items():
new_data = processer.process_subgraph_utils(data)
graph_pedia[int(i_str)] = data['graph']
# if graph_pedia is not None:
# graph_pedia[int(i_str) + train_len] = data['graph']
del new_data['question_subword_matrix']
del new_data['question_subword_dict']
del new_data['question_token_relations']
del new_data['schema_linking']
del new_data['schema_subword_relations']
del new_data['schema_relations']
del new_data['schema_subword_mapping_dict']
del new_data['schema_to_ids']
del new_data['schema_linking_subword']
del new_data['graph']
seq2seq_dataset_formal.append(data)
if int(i_str) % 1000 == 0:
print("processing {}th data".format(int(i_str)))
if output_path:
json.dump(seq2seq_dataset_formal, open(output_path, "w"))
if graph_output_path:
pickle.dump(graph_pedia, open(graph_output_path, "wb"))
return seq2seq_dataset_formal, graph_pedia | null |
164,896 | import os, json, pickle, argparse, sys, time
import pdb
import torch
from collections import defaultdict
import numpy as np
import re
def subword_dict(input_ids):
word_subword_mapping = defaultdict()
for sub_idx, word_idx in enumerate(input_ids):
if word_idx is None:
break
if word_idx in word_subword_mapping:
word_subword_mapping[word_idx].append(sub_idx)
else:
word_subword_mapping[word_idx] = [sub_idx]
return word_subword_mapping
def ids_mapping(idx_lst: list, subword_dict: dict, schema_items: list):
new_ids_mapping = defaultdict()
# key: index of schema items; value: indexes of subword ids in tokenized ids
for i, sub_idx in enumerate(idx_lst):
if type(sub_idx) is int:
new_ids_mapping[i] = subword_dict[sub_idx]
else:
# home town -> [32, 34]
new_ids_mapping[i] = expand_ids(sub_idx, subword_dict)
return new_ids_mapping
def backprop_database_idx(table_items, column_items, sampled_columns_idx):
table_len = len(table_items)
table_idx_ori = list(range(table_len))
table_idx_new = list(range(table_len))
column_idx_ori = [table_len + idx for idx in sampled_columns_idx]
column_idx_new = list(range(len(sampled_columns_idx)))
column_idx_new = [table_len + idx for idx in column_idx_new]
schema_idx_new = table_idx_new + column_idx_new
schema_idx_ori = table_idx_ori + column_idx_ori
return schema_idx_new, schema_idx_ori
def find_schema_idx(db_seq, table_items, column_items, init_idx=0):
seq_lst = db_seq.split(" ")
special_token = ["|", ':', ',', 'schema:', '(', ')']
table_idx_lst = []
column_idx_lst = []
schema_items = [item.lower() for item in table_items + column_items]
schema_elements = " ".join(schema_items).split(" ")
db_id = ""
for i, item in enumerate(seq_lst):
if item in special_token:
continue
if seq_lst[i - 1] == '(':
continue
if i < len(seq_lst) - 1:
if seq_lst[i - 1] == seq_lst[i + 1] == '|':
db_id = item
elif seq_lst[i - 1] == '|' and seq_lst[i + 1] == ':':
table_idx_lst.append(i + init_idx)
elif seq_lst[i - 1] == ":":
# head columns
if seq_lst[i + 1] == ",":
# head columns without value
if item in schema_elements:
column_idx_lst.append(i + init_idx)
elif seq_lst[i + 1] == "(":
# head columns with value
if item in schema_elements:
column_idx_lst.append(i + init_idx)
elif seq_lst[i + 1] == "|":
# head columns with value
if item in schema_elements:
column_idx_lst.append(i + init_idx)
elif seq_lst[i - 1] == ",":
if seq_lst[i + 1] == "," or seq_lst[i + 1] == "(":
# middle columns (with values):
if item in schema_elements:
column_idx_lst.append(i + init_idx)
elif seq_lst[i + 1] == "|":
# tail columns:
if item in schema_elements:
column_idx_lst.append(i + init_idx)
elif seq_lst[i + 1] == ")":
continue
else:
# columns with multiple words: "home town"
temp_idx_lst = []
match_multi_words(cur_idx=i + init_idx, column_cur_idx=temp_idx_lst, seq_lst=seq_lst)
if item in schema_elements:
column_idx_lst.append(temp_idx_lst)
# append the last element:
else:
if item in schema_elements:
if seq_lst[i - 1] == "," or seq_lst[i - 1] == ":" or item == '*':
column_idx_lst.append(i + init_idx)
# put * into the head position:
star_idx = column_idx_lst.pop()
column_idx_lst.insert(0, star_idx)
# pdb.set_trace()
if len(table_idx_lst + column_idx_lst) != len(column_items + table_items):
print("wrong: {}".format(db_seq))
pdb.set_trace()
assert len(table_idx_lst + column_idx_lst) == len(table_items + column_items)
return table_idx_lst, column_idx_lst, db_id
def schema_subword_matrix(db_sep, init_idx, tables, tokenizer, table_items=None, column_items=None, sampled_columns=None, sampled_columns_idx=None):
schema_items = table_items + column_items
struct_in = "schema: {} | *".format(db_sep)
# normalize struct_in:
struct_in = re.sub(' +', ' ', struct_in)
table_idx_lst, column_idx_lst, db_id = find_schema_idx(db_seq=struct_in, table_items=table_items,
column_items=column_items, init_idx=0)
if len(table_idx_lst + column_idx_lst) != len(column_items + table_items):
print("wrong: {}".format(struct_in))
pdb.set_trace()
assert len(table_idx_lst + column_idx_lst) == len(column_items + table_items)
schema_relations = tables[db_id]['relations']
schema_idx_lst = table_idx_lst + column_idx_lst
schema_subword_token = tokenizer(struct_in, max_length=1024) # 546 is the longest input seq for schema
schema_ids = schema_subword_token.word_ids()[:-1]
# pdb.set_trace()
subword_mapping_dict = subword_dict(schema_ids)
subword_matrix = [['symbol'] * len(schema_ids) for _ in range(len(schema_ids))]
# get the mapping dict for original schema items:
schema_to_ids = ids_mapping(idx_lst=schema_idx_lst, subword_dict=subword_mapping_dict, schema_items=schema_items)
assert len(schema_to_ids) == len(schema_idx_lst)
# fully-connected subwords as new matrix including dummy symbols:
schema_idx_new, schema_idx_ori = backprop_database_idx(table_items, column_items, sampled_columns_idx)
for r in range(len(schema_idx_lst)):
for c in range(len(schema_idx_lst)):
for sub_idx_r in schema_to_ids[r]:
for sub_idx_c in schema_to_ids[c]:
# subword_matrix[sub_idx_r][sub_idx_c] = schema_relations[r][c]
subword_matrix[sub_idx_r][sub_idx_c] = schema_relations[schema_idx_ori[r]][schema_idx_ori[c]]
subword_matrix = np.array(subword_matrix, dtype='<U100')
subword_matrix = subword_matrix.tolist()
return subword_matrix, subword_mapping_dict, struct_in, schema_to_ids, schema_idx_ori | null |
164,897 | import os, json, pickle, argparse, sys, time
import pdb
import torch
from collections import defaultdict
import numpy as np
import re
def schema_linking_subword(question_subword_dict: dict, schema_2_ids: dict, schema_linking: tuple, question_subword_len: int, schema_subword_len: int, schema_idx_ori=None):
# assert dim match:
q_schema_mat, schema_q_mat = schema_linking
assert len(question_subword_dict) == len(q_schema_mat) + 1
# assert len(schema_2_ids) == len(schema_q_mat) # if there's sampled preprocessing, then it should be masked
q_schema_subword_matrix = [[0] * schema_subword_len for _ in range(question_subword_len)]
schema_q_subword_matrix = [[0] * question_subword_len for _ in range(schema_subword_len)]
# pdb.set_trace()
# construct subword_matrix for q_schema_mat:
for r in range(len(q_schema_mat)):
for c in range(len(schema_2_ids)):
# temp_relation = q_schema_mat[r][c]
if c >= len(schema_idx_ori):
pdb.set_trace()
temp_relation = q_schema_mat[r][schema_idx_ori[c]]
for sub_idx_r in question_subword_dict[r]:
for sub_idx_c in schema_2_ids[c]:
q_schema_subword_matrix[sub_idx_r][sub_idx_c] = temp_relation
# construct subword_matrix for schema_q_mat:
for r_s in range(len(schema_2_ids)):
for c_q in range(len(q_schema_mat)):
# tmp_relation = schema_q_mat[r_s][c_q]
tmp_relation = schema_q_mat[schema_idx_ori[r_s]][c_q]
for sub_idx_s in schema_2_ids[r_s]:
for sub_idx_q in question_subword_dict[c_q]:
schema_q_subword_matrix[sub_idx_s][sub_idx_q] = tmp_relation
q_schema_subword_matrix = np.array(q_schema_subword_matrix, dtype='<U100')
schema_q_subword_matrix = np.array(schema_q_subword_matrix, dtype='<U100')
subword_schema_linking = (q_schema_subword_matrix.tolist(), schema_q_subword_matrix.tolist())
return subword_schema_linking | null |
164,898 | import os, json, pickle, argparse, sys, time
import pdb
import torch
from collections import defaultdict
import numpy as np
import re
def schema_linking_subword_sampled(question_subword_dict: dict, schema_2_ids: dict, schema_linking: tuple, question_subword_len: int, schema_subword_len: int, schema_idx_ori=None):
q_schema_mat, schema_q_mat = schema_linking
assert len(question_subword_dict) == len(q_schema_mat) + 1
q_schema_subword_matrix = [[0] * schema_subword_len for _ in range(question_subword_len)]
schema_q_subword_matrix = [[0] * question_subword_len for _ in range(schema_subword_len)]
# pdb.set_trace()
# construct subword_matrix for q_schema_mat:
for r in range(len(q_schema_mat)):
for c in range(len(schema_2_ids)):
# temp_relation = q_schema_mat[r][c]
if c >= len(schema_idx_ori):
pdb.set_trace()
temp_relation = q_schema_mat[r][schema_idx_ori[c]]
for sub_idx_r in question_subword_dict[r]:
for sub_idx_c in schema_2_ids[c]:
q_schema_subword_matrix[sub_idx_r][sub_idx_c] = temp_relation
# construct subword_matrix for schema_q_mat:
for r_s in range(len(schema_2_ids)):
for c_q in range(len(q_schema_mat)):
# tmp_relation = schema_q_mat[r_s][c_q]
tmp_relation = schema_q_mat[schema_idx_ori[r_s]][c_q]
for sub_idx_s in schema_2_ids[r_s]:
for sub_idx_q in question_subword_dict[c_q]:
schema_q_subword_matrix[sub_idx_s][sub_idx_q] = tmp_relation
q_schema_subword_matrix = np.array(q_schema_subword_matrix, dtype='<U100')
schema_q_subword_matrix = np.array(schema_q_subword_matrix, dtype='<U100')
subword_schema_linking = (q_schema_subword_matrix.tolist(), schema_q_subword_matrix.tolist())
return subword_schema_linking | null |
164,899 | from map_function import question_subword_matrix
from transformers import AutoTokenizer
import pickle
import argparse
def question_subword_matrix(processed_question_toks, relations, tokenizer):
def question_subword_dataset(dataset, tokenizer, output_path=None):
for i, data in enumerate(dataset):
processed_question_toks = data['raw_question_toks']
relations = data['relations']
data['subword_relations'], data['subword_dict'] = question_subword_matrix(processed_question_toks=processed_question_toks,
relations=relations, tokenizer=tokenizer)
if i % 500 == 0:
print("processing {}th data".format(i))
if output_path:
pickle.dump(dataset, open(output_path, "wb"))
return dataset | null |
164,901 | import os, json, pickle, argparse, sys, time
import pdb
from supar import Parser
The provided code snippet includes necessary dependencies for implementing the `quote_normalization` function. Write a Python function `def quote_normalization(question)` to solve the following problem:
Normalize all usage of quotation marks into a separate \"
Here is the function:
def quote_normalization(question):
""" Normalize all usage of quotation marks into a separate \" """
new_question, quotation_marks = [], ["'", '"', '`', '‘', '’', '“', '”', '``', "''", "‘‘", "’’"]
for idx, tok in enumerate(question):
if len(tok) > 2 and tok[0] in quotation_marks and tok[-1] in quotation_marks:
new_question += ["\"", tok[1:-1], "\""]
elif len(tok) > 2 and tok[0] in quotation_marks:
new_question += ["\"", tok[1:]]
elif len(tok) > 2 and tok[-1] in quotation_marks:
new_question += [tok[:-1], "\"" ]
elif tok in quotation_marks:
new_question.append("\"")
elif len(tok) == 2 and tok[0] in quotation_marks:
# special case: the length of entity value is 1
if idx + 1 < len(question) and question[idx + 1] in quotation_marks:
new_question += ["\"", tok[1]]
else:
new_question.append(tok)
else:
new_question.append(tok)
return new_question | Normalize all usage of quotation marks into a separate \" |
164,902 | import os, json, pickle, argparse, sys, time
import pdb
from supar import Parser
def inject_syntax_dataset(processor, dataset, output_path=None):
syntax_dataset = []
for idx, data in enumerate(dataset):
entry = processor.inject_syntax(data)
syntax_dataset.append(entry)
if idx % 100 == 0:
print("************************ processing {}th dataset ************************".format(idx))
if output_path:
pickle.dump(syntax_dataset, open(output_path, "wb"))
return syntax_dataset | null |
164,903 | import os, json, pickle, argparse, sys, time
import pdb
from supar import Parser
def inject_syntax_dataset_json(processor, dataset, mode='train', output_path=None):
syntax_dataset = []
for idx, data in enumerate(dataset):
entry = processor.inject_syntax(data)
if mode == 'dev':
# please switch the length of your training data.
entry['graph_idx'] = idx + 8577
else:
entry['graph_idx'] = idx
syntax_dataset.append(entry)
if idx % 1000 == 0:
print("************************ processing {}th dataset ************************".format(idx))
if output_path:
json.dump(syntax_dataset, open(output_path, "w"), indent=4)
return syntax_dataset | null |
164,904 | import os, json, pickle, argparse, sys, time
from preprocess.common_utils import Preprocessor
def process_tables(processor, tables_list, output_path=None, verbose=False):
tables = {}
for each in tables_list:
if verbose:
print('*************** Processing database %s **************' % (each['db_id']))
tables[each['db_id']] = processor.preprocess_database(each, verbose=verbose)
print('In total, process %d databases .' % (len(tables)))
if output_path is not None:
pickle.dump(tables, open(output_path, 'wb'))
return tables | null |
164,905 | import os, json, pickle, argparse, sys, time
from preprocess.common_utils import Preprocessor
def process_example(processor, entry, db, trans, verbose=False):
# preprocess raw tokens
entry = processor.pipeline(entry, db, verbose=verbose)
return entry
def process_dataset(processor, dataset, tables, output_path=None, skip_large=False, verbose=False):
processed_dataset = []
for idx, entry in enumerate(dataset):
if skip_large and len(tables[entry['db_id']]['column_names']) > 100: continue
if idx % 1000 == 0:
print('*************** Processing %d-th sample **************' % (idx))
if verbose:
print('*************** Processing %d-th sample **************' % (idx))
entry = process_example(processor, entry, tables[entry['db_id']], trans=None, verbose=verbose)
processed_dataset.append(entry)
print('In total, process %d samples , skip %d extremely large databases.' % (len(processed_dataset), len(dataset) - len(processed_dataset)))
if output_path is not None:
# serialize preprocessed dataset
pickle.dump(processed_dataset, open(output_path, 'wb'))
return processed_dataset | null |
164,906 | import os, sqlite3
import numpy as np
import stanza, torch
from nltk.corpus import stopwords
from itertools import product, combinations
def is_number(s):
try:
float(s)
return True
except ValueError:
return False | null |
164,907 | import os, sqlite3
import numpy as np
import stanza, torch
from nltk.corpus import stopwords
from itertools import product, combinations
The provided code snippet includes necessary dependencies for implementing the `quote_normalization` function. Write a Python function `def quote_normalization(question)` to solve the following problem:
Normalize all usage of quotation marks into a separate \"
Here is the function:
def quote_normalization(question):
""" Normalize all usage of quotation marks into a separate \" """
new_question, quotation_marks = [], ["'", '"', '`', '‘', '’', '“', '”', '``', "''", "‘‘", "’’"]
for idx, tok in enumerate(question):
if len(tok) > 2 and tok[0] in quotation_marks and tok[-1] in quotation_marks:
new_question += ["\"", tok[1:-1], "\""]
elif len(tok) > 2 and tok[0] in quotation_marks:
new_question += ["\"", tok[1:]]
elif len(tok) > 2 and tok[-1] in quotation_marks:
new_question += [tok[:-1], "\"" ]
elif tok in quotation_marks:
new_question.append("\"")
elif len(tok) == 2 and tok[0] in quotation_marks:
# special case: the length of entity value is 1
if idx + 1 < len(question) and question[idx + 1] in quotation_marks:
new_question += ["\"", tok[1]]
else:
new_question.append(tok)
else:
new_question.append(tok)
return new_question | Normalize all usage of quotation marks into a separate \" |
164,910 | import os, sqlite3
import numpy as np
import stanza, torch
from nltk.corpus import stopwords
from itertools import product, combinations
import torch.nn.functional as F
from transformers import AutoModel, AutoConfig, AutoTokenizer
import geoopt as gt
def is_number(s):
try:
float(s)
return True
except ValueError:
return False | null |
164,911 | import os, sqlite3
import numpy as np
import stanza, torch
from nltk.corpus import stopwords
from itertools import product, combinations
import torch.nn.functional as F
from transformers import AutoModel, AutoConfig, AutoTokenizer
import geoopt as gt
def agg(input):
# if input.size(0)==1:
# return input.squeeze()
# else :
return torch.sum(input,dim=1,keepdim=True)/input.size(1) | null |
164,912 | import os, sqlite3
import numpy as np
import stanza, torch
from nltk.corpus import stopwords
from itertools import product, combinations
import torch.nn.functional as F
from transformers import AutoModel, AutoConfig, AutoTokenizer
import geoopt as gt
The provided code snippet includes necessary dependencies for implementing the `quote_normalization` function. Write a Python function `def quote_normalization(question)` to solve the following problem:
Normalize all usage of quotation marks into a separate \"
Here is the function:
def quote_normalization(question):
""" Normalize all usage of quotation marks into a separate \" """
new_question, quotation_marks = [], ["'", '"', '`', '‘', '’', '“', '”', '``', "''", "‘‘", "’’"]
for idx, tok in enumerate(question):
if len(tok) > 2 and tok[0] in quotation_marks and tok[-1] in quotation_marks:
new_question += ["\"", tok[1:-1], "\""]
elif len(tok) > 2 and tok[0] in quotation_marks:
new_question += ["\"", tok[1:]]
elif len(tok) > 2 and tok[-1] in quotation_marks:
new_question += [tok[:-1], "\"" ]
elif tok in quotation_marks:
new_question.append("\"")
elif len(tok) == 2 and tok[0] in quotation_marks:
# special case: the length of entity value is 1
if idx + 1 < len(question) and question[idx + 1] in quotation_marks:
new_question += ["\"", tok[1]]
else:
new_question.append(tok)
else:
new_question.append(tok)
return new_question | Normalize all usage of quotation marks into a separate \" |
164,916 | import json
import pdb
from map_subword_serialize import question_subword_matrix
import argparse
from transformers import AutoTokenizer
import pickle
def question_subword_matrix(processed_question_toks, relations, tokenizer):
def question_subword_dataset(seq2seq_dataset, syntax_dataset, tokenizer, output_path = None):
for i_str, data in seq2seq_dataset.items():
processed_question_toks = data['raw_question_toks']
relations = syntax_dataset[int(i_str)]['relations']
question_sub_matrix, question_subword_dict, = \
question_subword_matrix(processed_question_toks=processed_question_toks, relations=relations, tokenizer=tokenizer)
data['question_subword_matrix'], data['question_subword_dict'] = question_sub_matrix, question_subword_dict
data['question_token_relations'] = relations
data['schema_linking'] = syntax_dataset[int(i_str)]['schema_linking']
data['graph_idx'] = syntax_dataset[int(i_str)]['graph_idx']
if int(i_str) % 500 == 0:
print("processing {}th data".format(int(i_str)))
if output_path:
pickle.dump(seq2seq_dataset, open(output_path, "wb"))
return seq2seq_dataset | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.