id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
163,493 | import copy
import math
import os
import warnings
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from torch.utils.checkpoint import checkpoint
from transformers.activations import ACT2FN
from transformers.file_utils import (
DUMMY_INPUTS,
DUMMY_MASK,
add_start_docstrings,
add_start_docstrings_to_model_forward,
is_torch_fx_proxy,
replace_return_docstrings,
)
from transformers.modeling_outputs import (
BaseModelOutput,
BaseModelOutputWithPastAndCrossAttentions,
Seq2SeqLMOutput,
Seq2SeqModelOutput,
)
from transformers.modeling_utils import PreTrainedModel, find_pruneable_heads_and_indices, prune_linear_layer
from transformers.utils import logging
from transformers.utils.model_parallel_utils import assert_device_map, get_device_map
from transformers.models.t5.configuration_t5 import T5Config
from .adapter import Adapter
logger = logging.get_logger(__name__)
The provided code snippet includes necessary dependencies for implementing the `load_tf_weights_in_t5` function. Write a Python function `def load_tf_weights_in_t5(model, config, tf_checkpoint_path)` to solve the following problem:
Load tf checkpoints in a pytorch model.
Here is the function:
def load_tf_weights_in_t5(model, config, tf_checkpoint_path):
"""Load tf checkpoints in a pytorch model."""
try:
import re
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise
tf_path = os.path.abspath(tf_checkpoint_path)
logger.info(f"Converting TensorFlow checkpoint from {tf_path}")
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
tf_weights = {}
for name, shape in init_vars:
logger.info(f"Loading TF weight {name} with shape {shape}")
array = tf.train.load_variable(tf_path, name)
names.append(name)
tf_weights[name] = array
for txt_name in names:
name = txt_name.split("/")
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if any(
n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"]
for n in name
):
logger.info(f"Skipping {'/'.join(name)}")
tf_weights.pop(txt_name, None)
continue
if "_slot_" in name[-1]:
logger.info(f"Skipping {'/'.join(name)}")
tf_weights.pop(txt_name, None)
continue
pointer = model
array = tf_weights[txt_name]
for m_name in name:
if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
scope_names = re.split(r"_(\d+)", m_name)
else:
scope_names = [m_name]
if scope_names[0] in ["kernel", "scale", "embedding"]:
pointer = getattr(pointer, "weight")
elif scope_names[0] == "self_attention":
pointer = getattr(pointer, "layer")
pointer = pointer[0]
elif scope_names[0] == "enc_dec_attention":
pointer = getattr(pointer, "layer")
pointer = pointer[1]
elif scope_names[0] == "dense_relu_dense":
pointer = getattr(pointer, "layer")
pointer = pointer[2]
elif scope_names[0] == "rms_norm":
if hasattr(pointer, "layer_norm"):
pointer = getattr(pointer, "layer_norm")
elif hasattr(pointer, "final_layer_norm"):
pointer = getattr(pointer, "final_layer_norm")
elif scope_names[0] == "scale":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
pointer = getattr(pointer, "bias")
elif scope_names[0] == "squad":
pointer = getattr(pointer, "classifier")
elif scope_names[0] == "decoder" and name[1] == "logits":
continue
elif scope_names[0] == "logits":
pointer = getattr(pointer, "lm_head")
elif scope_names[0] == "wi" and len(scope_names) > 1 and scope_names[1].isdigit():
pointer = getattr(pointer, f"wi_{scope_names[1]}")
continue
else:
try:
pointer = getattr(pointer, scope_names[0])
except AttributeError:
logger.info(f"Skipping {'/'.join(name)}")
continue
if len(scope_names) >= 2:
num = int(scope_names[1])
pointer = pointer[num]
if scope_names[0] not in ["kernel", "scale", "embedding"]:
pointer = getattr(pointer, "weight")
if scope_names[0] != "embedding":
logger.info(f"Transposing numpy weight of shape {array.shape} for {name}")
array = np.transpose(array)
try:
assert (
pointer.shape == array.shape
), f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched"
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
logger.info(f"Initialize PyTorch weight {name}")
pointer.data = torch.from_numpy(array.astype(np.float32))
tf_weights.pop(txt_name, None)
logger.info(f"Weights not copied to PyTorch model: {', '.join(tf_weights.keys())}.")
return model | Load tf checkpoints in a pytorch model. |
163,494 | from collections import OrderedDict
import torch
from torch import nn
from transformers import AutoTokenizer
from .base import PushToHubFriendlyModel
from ..prompt.modeling_auto import AutoModelForSeq2SeqLM
The provided code snippet includes necessary dependencies for implementing the `aggregate_prompt` function. Write a Python function `def aggregate_prompt( past_prompt_dict: OrderedDict, task_names_list=None, strategy="simple_concat" )` to solve the following problem:
past_prompt_dict: a dict of past_prompt from different tasks.
Here is the function:
def aggregate_prompt(
past_prompt_dict: OrderedDict, task_names_list=None, strategy="simple_concat"
):
"""
past_prompt_dict: a dict of past_prompt from different tasks.
"""
constructed_prompt = None
if strategy in ["simple_separate", "separate_with_new_prefix"]:
# stack all prefix on the dim of bsz
for task_name in task_names_list:
bsz = len(task_names_list)
prompt_of_this_task = past_prompt_dict[task_name]
if not constructed_prompt:
constructed_prompt = [{k: {_k: _v for _k, _v in v.items()} for k, v in item.items()} for item in prompt_of_this_task]
continue
for layer_number, prompt_of_this_task_in_this_layer in enumerate(
prompt_of_this_task
):
constructed_prompt_layer = constructed_prompt[layer_number]
for prompt_pos in [
"decoder_prompt",
"cross_attention_prompt",
"encoder_prompt",
]:
for key_value_attention_mask in [
"prev_key",
"prev_value",
"prev_key_padding_mask",
]:
if key_value_attention_mask == "prev_key_padding_mask":
constructed_prompt_layer[prompt_pos][
key_value_attention_mask
] = torch.cat(
[
constructed_prompt_layer[prompt_pos][
key_value_attention_mask
],
prompt_of_this_task[layer_number][prompt_pos][
key_value_attention_mask
],
],
dim=0,
)
else:
#print(constructed_prompt_layer[prompt_pos][
# key_value_attention_mask
# ].shape)
constructed_prompt_layer[prompt_pos][
key_value_attention_mask
] = torch.cat(
[
constructed_prompt_layer[prompt_pos][
key_value_attention_mask
],
prompt_of_this_task[layer_number][prompt_pos][
key_value_attention_mask
],
],
dim=0,
)
# concat in the dim of the bsz
# TODO: add code of attention padding when with different prefix len.
elif strategy in ["simple_concat", "concat_with_new_prefix"]:
for task_name, prompt_of_this_task in past_prompt_dict.items():
if task_name == "new_prefix":
continue
if not constructed_prompt:
constructed_prompt = [{k: {_k: _v for _k, _v in v.items()} for k, v in item.items()} for item in prompt_of_this_task]
continue
for layer_number, prompt_of_this_task_in_this_layer in enumerate(
prompt_of_this_task
):
constructed_prompt_layer = constructed_prompt[layer_number]
for prompt_pos in [
"decoder_prompt",
"cross_attention_prompt",
"encoder_prompt",
]:
for key_value_attention_mask in [
"prev_key",
"prev_value",
"prev_key_padding_mask",
]:
if key_value_attention_mask == "prev_key_padding_mask":
constructed_prompt_layer[prompt_pos][
key_value_attention_mask
] = torch.cat(
[
constructed_prompt_layer[prompt_pos][
key_value_attention_mask
],
prompt_of_this_task[layer_number][prompt_pos][
key_value_attention_mask
],
],
dim=1,
)
else:
constructed_prompt_layer[prompt_pos][
key_value_attention_mask
] = torch.cat(
[
constructed_prompt_layer[prompt_pos][
key_value_attention_mask
],
prompt_of_this_task[layer_number][prompt_pos][
key_value_attention_mask
],
],
dim=2,
)
# concat in the dim of the prefix_len
elif strategy == "gnn":
pass
else:
raise ValueError("Other strategy has been implemented yet!!")
if strategy in ["separate_with_new_prefix", "concat_with_new_prefix"]:
# add the shared prefix in the front of multi prefix_s
new_prefix = past_prompt_dict["new_prefix"]
for layer_number, _ in enumerate(new_prefix):
constructed_prompt_layer = constructed_prompt[layer_number]
for prompt_pos in [
"decoder_prompt",
"cross_attention_prompt",
"encoder_prompt",
]:
for key_value_attention_mask in [
"prev_key",
"prev_value",
"prev_key_padding_mask",
]:
if key_value_attention_mask == "prev_key_padding_mask":
constructed_prompt_layer[prompt_pos][
key_value_attention_mask
] = torch.cat(
[
new_prefix[layer_number][prompt_pos][
key_value_attention_mask
],
constructed_prompt_layer[prompt_pos][
key_value_attention_mask
],
],
dim=1,
)
else:
constructed_prompt_layer[prompt_pos][
key_value_attention_mask
] = torch.cat(
[
new_prefix[layer_number][prompt_pos][
key_value_attention_mask
],
constructed_prompt_layer[prompt_pos][
key_value_attention_mask
],
],
dim=2,
)
return constructed_prompt | past_prompt_dict: a dict of past_prompt from different tasks. |
163,496 | import collections
import json
import time
import os
from typing import Any, Dict, List, Optional, Tuple, Union, Callable, Iterable
from typing import NamedTuple
import datasets
from datasets import load_metric
import numpy as np
import torch
import transformers.trainer_seq2seq
from torch.utils.data import Dataset
from packaging import version
from torch import nn
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data.sampler import SequentialSampler
from transformers.deepspeed import is_deepspeed_zero3_enabled
from transformers.file_utils import is_datasets_available, is_sagemaker_mp_enabled
from transformers.trainer_pt_utils import (
DistributedLengthGroupedSampler,
DistributedSamplerWithLoop,
LengthGroupedSampler,
)
from transformers.trainer_utils import PredictionOutput, speed_metrics
from transformers.training_args import ParallelMode
from .training_arguments import WrappedSeq2SeqTrainingArguments
def lmap(f: Callable, x: Iterable) -> List:
return list(map(f, x)) | null |
163,497 | import collections
import json
import time
import os
from typing import Any, Dict, List, Optional, Tuple, Union, Callable, Iterable
from typing import NamedTuple
import datasets
from datasets import load_metric
import numpy as np
import torch
import transformers.trainer_seq2seq
from torch.utils.data import Dataset
from packaging import version
from torch import nn
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data.sampler import SequentialSampler
from transformers.deepspeed import is_deepspeed_zero3_enabled
from transformers.file_utils import is_datasets_available, is_sagemaker_mp_enabled
from transformers.trainer_pt_utils import (
DistributedLengthGroupedSampler,
DistributedSamplerWithLoop,
LengthGroupedSampler,
)
from transformers.trainer_utils import PredictionOutput, speed_metrics
from transformers.training_args import ParallelMode
from .training_arguments import WrappedSeq2SeqTrainingArguments
def normalize(x: str) -> str:
x = x.replace("[", " ")
x = x.replace("]", " ")
x = x.replace("group_by", "group by")
x = x.replace("order_by", "order by")
x = " ".join(x.strip().split())
return x | null |
163,498 | import collections
import json
import time
import os
from typing import Any, Dict, List, Optional, Tuple, Union, Callable, Iterable
from typing import NamedTuple
import datasets
from datasets import load_metric
import numpy as np
import torch
import transformers.trainer_seq2seq
from torch.utils.data import Dataset
from packaging import version
from torch import nn
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data.sampler import SequentialSampler
from transformers.deepspeed import is_deepspeed_zero3_enabled
from transformers.file_utils import is_datasets_available, is_sagemaker_mp_enabled
from transformers.trainer_pt_utils import (
DistributedLengthGroupedSampler,
DistributedSamplerWithLoop,
LengthGroupedSampler,
)
from transformers.trainer_utils import PredictionOutput, speed_metrics
from transformers.training_args import ParallelMode
from .training_arguments import WrappedSeq2SeqTrainingArguments
class EvalPrediction(NamedTuple):
predictions: List[str]
items: List[dict]
def squad_metrics(p: EvalPrediction):
metric = load_metric("./squad.py", cache_dir="./cache")
return metric.compute(predictions=p.predictions, references=p.items) | null |
163,499 | import importlib
def get_model(model):
Model = importlib.import_module('models.{}'.format(model)).Model
return Model | null |
163,500 | import importlib
def get_constructor(constructor):
Constructor = importlib.import_module('{}'.format(constructor)).Constructor
return Constructor | null |
163,501 | import importlib
def get_evaluator(evaluate_tool):
EvaluateTool = importlib.import_module('{}'.format(evaluate_tool)).EvaluateTool
return EvaluateTool | null |
163,505 | import json
import sqlite3
from nltk import word_tokenize
def tokenize(string):
string = str(string)
string = string.replace("\'", "\"") # ensures all string values wrapped by "" problem??
quote_idxs = [idx for idx, char in enumerate(string) if char == '"']
assert len(quote_idxs) % 2 == 0, "Unexpected quote"
# keep string value as token
vals = {}
for i in range(len(quote_idxs)-1, -1, -2):
qidx1 = quote_idxs[i-1]
qidx2 = quote_idxs[i]
val = string[qidx1: qidx2+1]
key = "__val_{}_{}__".format(qidx1, qidx2)
string = string[:qidx1] + key + string[qidx2+1:]
vals[key] = val
toks = [word.lower() for word in word_tokenize(string)]
# replace with string value token
for i in range(len(toks)):
if toks[i] in vals:
toks[i] = vals[toks[i]]
# find if there exists !=, >=, <=
eq_idxs = [idx for idx, tok in enumerate(toks) if tok == "="]
eq_idxs.reverse()
prefix = ('!', '>', '<')
for eq_idx in eq_idxs:
pre_tok = toks[eq_idx-1]
if pre_tok in prefix:
toks = toks[:eq_idx-1] + [pre_tok + "="] + toks[eq_idx+1: ]
return toks
def get_tables_with_alias(schema, toks):
toks = normalize_table_alias(toks)
tables = scan_alias(toks)
for key in schema:
assert key not in tables, "Alias {} has the same name in table".format(key)
tables[key] = key
return tables, toks
def parse_sql(toks, start_idx, tables_with_alias, schema):
isBlock = False # indicate whether this is a block of sql/sub-sql
len_ = len(toks)
idx = start_idx
sql = {}
if toks[idx] == '(':
isBlock = True
idx += 1
# parse from clause in order to get default tables
from_end_idx, table_units, conds, default_tables = parse_from(toks, start_idx, tables_with_alias, schema)
sql['from'] = {'table_units': table_units, 'conds': conds}
# select clause
_, select_col_units = parse_select(toks, idx, tables_with_alias, schema, default_tables)
idx = from_end_idx
sql['select'] = select_col_units
# where clause
idx, where_conds = parse_where(toks, idx, tables_with_alias, schema, default_tables)
sql['where'] = where_conds
# group by clause
idx, group_col_units = parse_group_by(toks, idx, tables_with_alias, schema, default_tables)
sql['groupBy'] = group_col_units
# having clause
idx, having_conds = parse_having(toks, idx, tables_with_alias, schema, default_tables)
sql['having'] = having_conds
# order by clause
idx, order_col_units = parse_order_by(toks, idx, tables_with_alias, schema, default_tables)
sql['orderBy'] = order_col_units
# limit clause
idx, limit_val = parse_limit(toks, idx)
sql['limit'] = limit_val
idx = skip_semicolon(toks, idx)
if isBlock:
assert toks[idx] == ')'
idx += 1 # skip ')'
idx = skip_semicolon(toks, idx)
# intersect/union/except clause
for op in SQL_OPS: # initialize IUE
sql[op] = None
if idx < len_ and toks[idx] in SQL_OPS:
sql_op = toks[idx]
idx += 1
idx, IUE_sql = parse_sql(toks, idx, tables_with_alias, schema)
sql[sql_op] = IUE_sql
return idx, sql
def get_sql(schema, query):
toks = tokenize(query)
tables_with_alias, toks = get_tables_with_alias(schema.schema, toks)
_, sql = parse_sql(toks, 0, tables_with_alias, schema)
return sql | null |
163,506 | from collections import OrderedDict, Counter
from itertools import chain
import re, os
def remove_comment(text):
text = re.sub(re.compile("#.*"), "", text)
text = '\n'.join(filter(lambda x: x, text.split('\n')))
return text | null |
163,507 | import os, sys
import json
import sqlite3
import traceback
import argparse
from process_sql import tokenize, get_schema, get_tables_with_alias, Schema, get_sql
def condition_has_or(conds):
return 'or' in conds[1::2] | null |
163,508 | import os, sys
import json
import sqlite3
import traceback
import argparse
from process_sql import tokenize, get_schema, get_tables_with_alias, Schema, get_sql
WHERE_OPS = ('not', 'between', '=', '>', '<', '>=', '<=', '!=', 'in', 'like', 'is', 'exists')
def condition_has_like(conds):
return WHERE_OPS.index('like') in [cond_unit[1] for cond_unit in conds[::2]] | null |
163,509 | import os, sys
import json
import sqlite3
import traceback
import argparse
from process_sql import tokenize, get_schema, get_tables_with_alias, Schema, get_sql
def condition_has_sql(conds):
for cond_unit in conds[::2]:
val1, val2 = cond_unit[3], cond_unit[4]
if val1 is not None and type(val1) is dict:
return True
if val2 is not None and type(val2) is dict:
return True
return False | null |
163,510 | import os, sys
import json
import sqlite3
import traceback
import argparse
from process_sql import tokenize, get_schema, get_tables_with_alias, Schema, get_sql
UNIT_OPS = ('none', '-', '+', "*", '/')
def val_has_op(val_unit):
return val_unit[0] != UNIT_OPS.index('none') | null |
163,511 | import os, sys
import json
import sqlite3
import traceback
import argparse
from process_sql import tokenize, get_schema, get_tables_with_alias, Schema, get_sql
def accuracy(count, total):
if count == total:
return 1
return 0 | null |
163,512 | import os, sys
import json
import sqlite3
import traceback
import argparse
from process_sql import tokenize, get_schema, get_tables_with_alias, Schema, get_sql
def recall(count, total):
if count == total:
return 1
return 0 | null |
163,513 | import os, sys
import json
import sqlite3
import traceback
import argparse
from process_sql import tokenize, get_schema, get_tables_with_alias, Schema, get_sql
def F1(acc, rec):
if (acc + rec) == 0:
return 0
return (2. * acc * rec) / (acc + rec) | null |
163,514 | import os, sys
import json
import sqlite3
import traceback
import argparse
from process_sql import tokenize, get_schema, get_tables_with_alias, Schema, get_sql
def get_scores(count, pred_total, label_total):
if pred_total != label_total:
return 0,0,0
elif count == pred_total:
return 1,1,1
return 0,0,0 | null |
163,515 | import os, sys
import json
import sqlite3
import traceback
import argparse
from process_sql import tokenize, get_schema, get_tables_with_alias, Schema, get_sql
def eval_sel(pred, label):
pred_sel = pred['select'][1]
label_sel = label['select'][1]
label_wo_agg = [unit[1] for unit in label_sel]
pred_total = len(pred_sel)
label_total = len(label_sel)
cnt = 0
cnt_wo_agg = 0
for unit in pred_sel:
if unit in label_sel:
cnt += 1
label_sel.remove(unit)
if unit[1] in label_wo_agg:
cnt_wo_agg += 1
label_wo_agg.remove(unit[1])
return label_total, pred_total, cnt, cnt_wo_agg | null |
163,516 | import os, sys
import json
import sqlite3
import traceback
import argparse
from process_sql import tokenize, get_schema, get_tables_with_alias, Schema, get_sql
def eval_where(pred, label):
pred_conds = [unit for unit in pred['where'][::2]]
label_conds = [unit for unit in label['where'][::2]] # val1 is also considered
label_wo_agg = [unit[2] for unit in label_conds] # val_unit
pred_total = len(pred_conds)
label_total = len(label_conds)
cnt = 0
cnt_wo_agg = 0
for unit in pred_conds:
if unit in label_conds:
cnt += 1
label_conds.remove(unit)
if unit[2] in label_wo_agg:
cnt_wo_agg += 1
label_wo_agg.remove(unit[2])
return label_total, pred_total, cnt, cnt_wo_agg | null |
163,517 | import os, sys
import json
import sqlite3
import traceback
import argparse
from process_sql import tokenize, get_schema, get_tables_with_alias, Schema, get_sql
def eval_group(pred, label):
pred_cols = [unit[1] for unit in pred['groupBy']]
label_cols = [unit[1] for unit in label['groupBy']]
pred_total = len(pred_cols)
label_total = len(label_cols)
cnt = 0
pred_cols = [pred.split(".")[1] if "." in pred else pred for pred in pred_cols]
label_cols = [label.split(".")[1] if "." in label else label for label in label_cols]
for col in pred_cols:
if col in label_cols:
cnt += 1
label_cols.remove(col)
return label_total, pred_total, cnt | null |
163,518 | import os, sys
import json
import sqlite3
import traceback
import argparse
from process_sql import tokenize, get_schema, get_tables_with_alias, Schema, get_sql
def eval_having(pred, label):
pred_total = label_total = cnt = 0
if len(pred['groupBy']) > 0:
pred_total = 1
if len(label['groupBy']) > 0:
label_total = 1
pred_cols = [unit[1] for unit in pred['groupBy']]
label_cols = [unit[1] for unit in label['groupBy']]
if pred_total == label_total == 1 \
and pred_cols == label_cols \
and pred['having'] == label['having']:
cnt = 1
return label_total, pred_total, cnt | null |
163,519 | import os, sys
import json
import sqlite3
import traceback
import argparse
from process_sql import tokenize, get_schema, get_tables_with_alias, Schema, get_sql
def eval_order(pred, label):
pred_total = label_total = cnt = 0
if len(pred['orderBy']) > 0:
pred_total = 1
if len(label['orderBy']) > 0:
label_total = 1
if len(label['orderBy']) > 0 and pred['orderBy'] == label['orderBy'] and \
((pred['limit'] is None and label['limit'] is None) or (pred['limit'] is not None and label['limit'] is not None)):
cnt = 1
return label_total, pred_total, cnt | null |
163,520 | import os, sys
import json
import sqlite3
import traceback
import argparse
from process_sql import tokenize, get_schema, get_tables_with_alias, Schema, get_sql
def eval_and_or(pred, label):
pred_ao = pred['where'][1::2]
label_ao = label['where'][1::2]
pred_ao = set(pred_ao)
label_ao = set(label_ao)
if pred_ao == label_ao:
return 1,1,1
return len(pred_ao),len(label_ao),0 | null |
163,521 | import os, sys
import json
import sqlite3
import traceback
import argparse
from process_sql import tokenize, get_schema, get_tables_with_alias, Schema, get_sql
def eval_nested(pred, label):
label_total = 0
pred_total = 0
cnt = 0
if pred is not None:
pred_total += 1
if label is not None:
label_total += 1
if pred is not None and label is not None:
cnt += Evaluator().eval_exact_match(pred, label)
return label_total, pred_total, cnt
def eval_IUEN(pred, label):
lt1, pt1, cnt1 = eval_nested(pred['intersect'], label['intersect'])
lt2, pt2, cnt2 = eval_nested(pred['except'], label['except'])
lt3, pt3, cnt3 = eval_nested(pred['union'], label['union'])
label_total = lt1 + lt2 + lt3
pred_total = pt1 + pt2 + pt3
cnt = cnt1 + cnt2 + cnt3
return label_total, pred_total, cnt | null |
163,522 | import os, sys
import json
import sqlite3
import traceback
import argparse
from process_sql import tokenize, get_schema, get_tables_with_alias, Schema, get_sql
def get_keywords(sql):
res = set()
if len(sql['where']) > 0:
res.add('where')
if len(sql['groupBy']) > 0:
res.add('group')
if len(sql['having']) > 0:
res.add('having')
if len(sql['orderBy']) > 0:
res.add(sql['orderBy'][0])
res.add('order')
if sql['limit'] is not None:
res.add('limit')
if sql['except'] is not None:
res.add('except')
if sql['union'] is not None:
res.add('union')
if sql['intersect'] is not None:
res.add('intersect')
# or keyword
ao = sql['from']['conds'][1::2] + sql['where'][1::2] + sql['having'][1::2]
if len([token for token in ao if token == 'or']) > 0:
res.add('or')
cond_units = sql['from']['conds'][::2] + sql['where'][::2] + sql['having'][::2]
# not keyword
if len([cond_unit for cond_unit in cond_units if cond_unit[0]]) > 0:
res.add('not')
# in keyword
if len([cond_unit for cond_unit in cond_units if cond_unit[1] == WHERE_OPS.index('in')]) > 0:
res.add('in')
# like keyword
if len([cond_unit for cond_unit in cond_units if cond_unit[1] == WHERE_OPS.index('like')]) > 0:
res.add('like')
return res
def eval_keywords(pred, label):
pred_keywords = get_keywords(pred)
label_keywords = get_keywords(label)
pred_total = len(pred_keywords)
label_total = len(label_keywords)
cnt = 0
for k in pred_keywords:
if k in label_keywords:
cnt += 1
return label_total, pred_total, cnt | null |
163,523 | import os, sys
import json
import sqlite3
import traceback
import argparse
from process_sql import tokenize, get_schema, get_tables_with_alias, Schema, get_sql
WHERE_OPS = ('not', 'between', '=', '>', '<', '>=', '<=', '!=', 'in', 'like', 'is', 'exists')
def count_component1(sql):
count = 0
if len(sql['where']) > 0:
count += 1
if len(sql['groupBy']) > 0:
count += 1
if len(sql['orderBy']) > 0:
count += 1
if sql['limit'] is not None:
count += 1
if len(sql['from']['table_units']) > 0: # JOIN
count += len(sql['from']['table_units']) - 1
ao = sql['from']['conds'][1::2] + sql['where'][1::2] + sql['having'][1::2]
count += len([token for token in ao if token == 'or'])
cond_units = sql['from']['conds'][::2] + sql['where'][::2] + sql['having'][::2]
count += len([cond_unit for cond_unit in cond_units if cond_unit[1] == WHERE_OPS.index('like')])
return count | null |
163,524 | import os, sys
import json
import sqlite3
import traceback
import argparse
from process_sql import tokenize, get_schema, get_tables_with_alias, Schema, get_sql
def get_nestedSQL(sql):
def count_component2(sql):
nested = get_nestedSQL(sql)
return len(nested) | null |
163,525 | import os, sys
import json
import sqlite3
import traceback
import argparse
from process_sql import tokenize, get_schema, get_tables_with_alias, Schema, get_sql
def count_agg(units):
return len([unit for unit in units if has_agg(unit)])
def count_others(sql):
count = 0
# number of aggregation
agg_count = count_agg(sql['select'][1])
agg_count += count_agg(sql['where'][::2])
agg_count += count_agg(sql['groupBy'])
if len(sql['orderBy']) > 0:
agg_count += count_agg([unit[1] for unit in sql['orderBy'][1] if unit[1]] +
[unit[2] for unit in sql['orderBy'][1] if unit[2]])
agg_count += count_agg(sql['having'])
if agg_count > 1:
count += 1
# number of select columns
if len(sql['select'][1]) > 1:
count += 1
# number of where conditions
if len(sql['where']) > 1:
count += 1
# number of group by clauses
if len(sql['groupBy']) > 1:
count += 1
return count | null |
163,526 | import os, sys
import json
import sqlite3
import traceback
import argparse
from process_sql import tokenize, get_schema, get_tables_with_alias, Schema, get_sql
def isValidSQL(sql, db):
conn = sqlite3.connect(db)
cursor = conn.cursor()
try:
cursor.execute(sql)
except:
return False
return True | null |
163,527 | import os, sys
import json
import sqlite3
import traceback
import argparse
from process_sql import tokenize, get_schema, get_tables_with_alias, Schema, get_sql
class Evaluator:
"""A simple evaluator"""
def __init__(self):
self.partial_scores = None
def eval_hardness(self, sql):
count_comp1_ = count_component1(sql)
count_comp2_ = count_component2(sql)
count_others_ = count_others(sql)
if count_comp1_ <= 1 and count_others_ == 0 and count_comp2_ == 0:
return "easy"
elif (count_others_ <= 2 and count_comp1_ <= 1 and count_comp2_ == 0) or \
(count_comp1_ <= 2 and count_others_ < 2 and count_comp2_ == 0):
return "medium"
elif (count_others_ > 2 and count_comp1_ <= 2 and count_comp2_ == 0) or \
(2 < count_comp1_ <= 3 and count_others_ <= 2 and count_comp2_ == 0) or \
(count_comp1_ <= 1 and count_others_ == 0 and count_comp2_ <= 1):
return "hard"
else:
return "extra"
def eval_exact_match(self, pred, label):
partial_scores = self.eval_partial_match(pred, label)
self.partial_scores = partial_scores
for _, score in partial_scores.items():
if score['f1'] != 1:
return 0
if len(label['from']['table_units']) > 0:
if label['from']['table_units'][0][0] == 'sql' and pred['from']['table_units'][0][0] == 'sql':
return self.eval_exact_match(pred['from']['table_units'][0][1], label['from']['table_units'][0][1]) # still wrong
else:
label_tables = sorted(label['from']['table_units'])
pred_tables = sorted(pred['from']['table_units'])
return label_tables == pred_tables
return 1
def eval_partial_match(self, pred, label):
res = {}
label_total, pred_total, cnt, cnt_wo_agg = eval_sel(pred, label)
acc, rec, f1 = get_scores(cnt, pred_total, label_total)
res['select'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total}
acc, rec, f1 = get_scores(cnt_wo_agg, pred_total, label_total)
res['select(no AGG)'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total}
label_total, pred_total, cnt, cnt_wo_agg = eval_where(pred, label)
acc, rec, f1 = get_scores(cnt, pred_total, label_total)
res['where'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total}
acc, rec, f1 = get_scores(cnt_wo_agg, pred_total, label_total)
res['where(no OP)'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total}
label_total, pred_total, cnt = eval_group(pred, label)
acc, rec, f1 = get_scores(cnt, pred_total, label_total)
res['group(no Having)'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total}
label_total, pred_total, cnt = eval_having(pred, label)
acc, rec, f1 = get_scores(cnt, pred_total, label_total)
res['group'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total}
label_total, pred_total, cnt = eval_order(pred, label)
acc, rec, f1 = get_scores(cnt, pred_total, label_total)
res['order'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total}
label_total, pred_total, cnt = eval_and_or(pred, label)
acc, rec, f1 = get_scores(cnt, pred_total, label_total)
res['and/or'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total}
label_total, pred_total, cnt = eval_IUEN(pred, label)
acc, rec, f1 = get_scores(cnt, pred_total, label_total)
res['IUEN'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total}
label_total, pred_total, cnt = eval_keywords(pred, label)
acc, rec, f1 = get_scores(cnt, pred_total, label_total)
res['keywords'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total}
return res
def print_scores(scores, etype):
levels = ['easy', 'medium', 'hard', 'extra', 'all']
partial_types = ['select', 'select(no AGG)', 'where', 'where(no OP)', 'group(no Having)',
'group', 'order', 'and/or', 'IUEN', 'keywords']
print("{:20} {:20} {:20} {:20} {:20} {:20}".format("", *levels))
counts = [scores[level]['count'] for level in levels]
print("{:20} {:<20d} {:<20d} {:<20d} {:<20d} {:<20d}".format("count", *counts))
if etype in ["all", "exec"]:
print('===================== EXECUTION ACCURACY =====================')
this_scores = [scores[level]['exec'] for level in levels]
print("{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}".format("execution", *this_scores))
if etype in ["all", "match"]:
print('\n====================== EXACT MATCHING ACCURACY =====================')
exact_scores = [scores[level]['exact'] for level in levels]
print("{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}".format("exact match", *exact_scores))
print('\n---------------------PARTIAL MATCHING ACCURACY----------------------')
for type_ in partial_types:
this_scores = [scores[level]['partial'][type_]['acc'] for level in levels]
print("{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}".format(type_, *this_scores))
print('---------------------- PARTIAL MATCHING RECALL ----------------------')
for type_ in partial_types:
this_scores = [scores[level]['partial'][type_]['rec'] for level in levels]
print("{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}".format(type_, *this_scores))
print('---------------------- PARTIAL MATCHING F1 --------------------------')
for type_ in partial_types:
this_scores = [scores[level]['partial'][type_]['f1'] for level in levels]
print("{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}".format(type_, *this_scores))
def eval_exec_match(db, p_str, g_str, pred, gold):
"""
return 1 if the values between prediction and gold are matching
in the corresponding index. Currently not support multiple col_unit(pairs).
"""
conn = sqlite3.connect(db)
cursor = conn.cursor()
try:
cursor.execute(p_str)
p_res = cursor.fetchall()
except:
return False
cursor.execute(g_str)
q_res = cursor.fetchall()
def res_map(res, val_units):
rmap = {}
for idx, val_unit in enumerate(val_units):
key = tuple(val_unit[1]) if not val_unit[2] else (val_unit[0], tuple(val_unit[1]), tuple(val_unit[2]))
rmap[key] = [r[idx] for r in res]
return rmap
p_val_units = [unit[1] for unit in pred['select'][1]]
q_val_units = [unit[1] for unit in gold['select'][1]]
return res_map(p_res, p_val_units) == res_map(q_res, q_val_units)
def rebuild_sql_val(sql):
if sql is None or not DISABLE_VALUE:
return sql
if len(sql['from']['table_units']) > 0 and sql['from']['table_units'][0][0] == 'sql':
sql['from']['table_units'][0] = ('sql', rebuild_sql_val(sql['from']['table_units'][0][1]))
sql['from']['conds'] = rebuild_condition_val(sql['from']['conds'])
sql['having'] = rebuild_condition_val(sql['having'])
sql['where'] = rebuild_condition_val(sql['where'])
sql['intersect'] = rebuild_sql_val(sql['intersect'])
sql['except'] = rebuild_sql_val(sql['except'])
sql['union'] = rebuild_sql_val(sql['union'])
return sql
def build_valid_col_units(table_units, schema):
col_ids = [table_unit[1] for table_unit in table_units if table_unit[0] == TABLE_TYPE['table_unit']]
prefixs = [col_id[:-2] for col_id in col_ids]
valid_col_units= []
for value in schema.idMap.values():
if '.' in value and value[:value.index('.')] in prefixs:
valid_col_units.append(value)
return valid_col_units
def rebuild_sql_col(valid_col_units, sql, kmap):
if sql is None:
return sql
sql['select'] = rebuild_select_col(valid_col_units, sql['select'], kmap)
sql['from'] = rebuild_from_col(valid_col_units, sql['from'], kmap)
sql['where'] = rebuild_condition_col(valid_col_units, sql['where'], kmap)
sql['groupBy'] = rebuild_group_by_col(valid_col_units, sql['groupBy'], kmap)
sql['orderBy'] = rebuild_order_by_col(valid_col_units, sql['orderBy'], kmap)
sql['having'] = rebuild_condition_col(valid_col_units, sql['having'], kmap)
sql['intersect'] = rebuild_sql_col(valid_col_units, sql['intersect'], kmap)
sql['except'] = rebuild_sql_col(valid_col_units, sql['except'], kmap)
sql['union'] = rebuild_sql_col(valid_col_units, sql['union'], kmap)
return sql
class Schema:
"""
Simple schema which maps table&column to a unique identifier
"""
def __init__(self, schema):
self._schema = schema
self._idMap = self._map(self._schema)
def schema(self):
return self._schema
def idMap(self):
return self._idMap
def _map(self, schema):
idMap = {"*": "__all__"}
id = 1
for key, vals in schema.items():
for val in vals:
idMap[key.lower() + "." + val.lower()] = (
"__" + key.lower() + "." + val.lower() + "__"
)
id += 1
for key in schema:
idMap[key.lower()] = "__" + key.lower() + "__"
id += 1
return idMap
def get_schema(db):
"""
Get database's schema, which is a dict with table name as key
and list of column names as value
:param db: database path
:return: schema dict
"""
schema = {}
conn = sqlite3.connect(db)
cursor = conn.cursor()
# fetch table names
cursor.execute("SELECT name FROM sqlite_master WHERE type='table';")
tables = [str(table[0].lower()) for table in cursor.fetchall()]
# fetch table info
for table in tables:
cursor.execute("PRAGMA table_info({})".format(table))
schema[table] = [str(col[1].lower()) for col in cursor.fetchall()]
return schema
def get_sql(schema, query):
toks = tokenize(query)
tables_with_alias = get_tables_with_alias(schema.schema, toks)
_, sql = parse_sql(toks, 0, tables_with_alias, schema)
return sql
def evaluate(gold, predict, db_dir, etype, kmaps):
with open(gold) as f:
glist = [l.strip().split('\t') for l in f.readlines() if len(l.strip()) > 0]
with open(predict) as f:
plist = [l.strip().split('\t') for l in f.readlines() if len(l.strip()) > 0]
# plist = [("select max(Share),min(Share) from performance where Type != 'terminal'", "orchestra")]
# glist = [("SELECT max(SHARE) , min(SHARE) FROM performance WHERE TYPE != 'Live final'", "orchestra")]
evaluator = Evaluator()
levels = ['easy', 'medium', 'hard', 'extra', 'all']
partial_types = ['select', 'select(no AGG)', 'where', 'where(no OP)', 'group(no Having)',
'group', 'order', 'and/or', 'IUEN', 'keywords']
entries = []
scores = {}
for level in levels:
scores[level] = {'count': 0, 'partial': {}, 'exact': 0.}
scores[level]['exec'] = 0
for type_ in partial_types:
scores[level]['partial'][type_] = {'acc': 0., 'rec': 0., 'f1': 0.,'acc_count':0,'rec_count':0}
eval_err_num = 0
for p, g in zip(plist, glist):
p_str = p[0]
g_str, db = g
db_name = db
db = os.path.join(db_dir, db, db + ".sqlite")
schema = Schema(get_schema(db))
# .schema: map lowercased raw tab name to lowercased raw col name list
# .idMap: map tab name to __tab__, tab.col to __tab.col__, * to __all__, all lowercased
g_sql = get_sql(schema, g_str)
hardness = evaluator.eval_hardness(g_sql)
scores[hardness]['count'] += 1
scores['all']['count'] += 1
try:
p_sql = get_sql(schema, p_str)
except:
# If p_sql is not valid, then we will use an empty sql to evaluate with the correct sql
p_sql = {
"except": None,
"from": {
"conds": [],
"table_units": []
},
"groupBy": [],
"having": [],
"intersect": None,
"limit": None,
"orderBy": [],
"select": [
False,
[]
],
"union": None,
"where": []
}
eval_err_num += 1
print("eval_err_num:{}".format(eval_err_num))
# rebuild sql for value evaluation
kmap = kmaps[db_name]
g_valid_col_units = build_valid_col_units(g_sql['from']['table_units'], schema)
# extract all __tab.col__ that has tab in from clause, not include __all__
g_sql = rebuild_sql_val(g_sql)
g_sql = rebuild_sql_col(g_valid_col_units, g_sql, kmap) # kmap: map __tab.col__ to pivot __tab.col__
p_valid_col_units = build_valid_col_units(p_sql['from']['table_units'], schema)
p_sql = rebuild_sql_val(p_sql)
p_sql = rebuild_sql_col(p_valid_col_units, p_sql, kmap)
if etype in ["all", "exec"]:
exec_score = eval_exec_match(db, p_str, g_str, p_sql, g_sql)
if exec_score:
scores[hardness]['exec'] += 1
if etype in ["all", "match"]:
exact_score = evaluator.eval_exact_match(p_sql, g_sql)
partial_scores = evaluator.partial_scores
if exact_score == 0:
print("{} pred: {}".format(hardness,p_str))
print("{} gold: {}".format(hardness,g_str))
print("")
scores[hardness]['exact'] += exact_score
scores['all']['exact'] += exact_score
for type_ in partial_types:
if partial_scores[type_]['pred_total'] > 0:
scores[hardness]['partial'][type_]['acc'] += partial_scores[type_]['acc']
scores[hardness]['partial'][type_]['acc_count'] += 1
if partial_scores[type_]['label_total'] > 0:
scores[hardness]['partial'][type_]['rec'] += partial_scores[type_]['rec']
scores[hardness]['partial'][type_]['rec_count'] += 1
scores[hardness]['partial'][type_]['f1'] += partial_scores[type_]['f1']
if partial_scores[type_]['pred_total'] > 0:
scores['all']['partial'][type_]['acc'] += partial_scores[type_]['acc']
scores['all']['partial'][type_]['acc_count'] += 1
if partial_scores[type_]['label_total'] > 0:
scores['all']['partial'][type_]['rec'] += partial_scores[type_]['rec']
scores['all']['partial'][type_]['rec_count'] += 1
scores['all']['partial'][type_]['f1'] += partial_scores[type_]['f1']
entries.append({
'predictSQL': p_str,
'goldSQL': g_str,
'hardness': hardness,
'exact': exact_score,
'partial': partial_scores
})
for level in levels:
if scores[level]['count'] == 0:
continue
if etype in ["all", "exec"]:
scores[level]['exec'] /= scores[level]['count']
if etype in ["all", "match"]:
scores[level]['exact'] /= scores[level]['count']
for type_ in partial_types:
if scores[level]['partial'][type_]['acc_count'] == 0:
scores[level]['partial'][type_]['acc'] = 0
else:
scores[level]['partial'][type_]['acc'] = scores[level]['partial'][type_]['acc'] / \
scores[level]['partial'][type_]['acc_count'] * 1.0
if scores[level]['partial'][type_]['rec_count'] == 0:
scores[level]['partial'][type_]['rec'] = 0
else:
scores[level]['partial'][type_]['rec'] = scores[level]['partial'][type_]['rec'] / \
scores[level]['partial'][type_]['rec_count'] * 1.0
if scores[level]['partial'][type_]['acc'] == 0 and scores[level]['partial'][type_]['rec'] == 0:
scores[level]['partial'][type_]['f1'] = 1
else:
scores[level]['partial'][type_]['f1'] = \
2.0 * scores[level]['partial'][type_]['acc'] * scores[level]['partial'][type_]['rec'] / (
scores[level]['partial'][type_]['rec'] + scores[level]['partial'][type_]['acc'])
print_scores(scores, etype)
return scores | null |
163,528 | import os, sys
import json
import sqlite3
import traceback
import argparse
from process_sql import tokenize, get_schema, get_tables_with_alias, Schema, get_sql
def build_foreign_key_map(entry):
cols_orig = entry["column_names_original"]
tables_orig = entry["table_names_original"]
# rebuild cols corresponding to idmap in Schema
cols = []
for col_orig in cols_orig:
if col_orig[0] >= 0:
t = tables_orig[col_orig[0]]
c = col_orig[1]
cols.append("__" + t.lower() + "." + c.lower() + "__")
else:
cols.append("__all__")
def keyset_in_list(k1, k2, k_list):
for k_set in k_list:
if k1 in k_set or k2 in k_set:
return k_set
new_k_set = set()
k_list.append(new_k_set)
return new_k_set
foreign_key_list = []
foreign_keys = entry["foreign_keys"]
for fkey in foreign_keys:
key1, key2 = fkey
key_set = keyset_in_list(key1, key2, foreign_key_list)
key_set.add(key1)
key_set.add(key2)
foreign_key_map = {}
for key_set in foreign_key_list:
sorted_list = sorted(list(key_set))
midx = sorted_list[0]
for idx in sorted_list:
foreign_key_map[cols[idx]] = cols[midx]
return foreign_key_map
def build_foreign_key_map_from_json(table):
with open(table) as f:
data = json.load(f)
tables = {}
for entry in data:
tables[entry['db_id']] = build_foreign_key_map(entry)
return tables | null |
163,529 | import os, sqlite3
import numpy as np
import stanza, torch
from nltk.corpus import stopwords
from itertools import product, combinations
import torch.nn.functional as F
from utils.constants import MAX_RELATIVE_DIST
from transformers import AutoModel, AutoConfig, AutoTokenizer
import geoopt as gt
def is_number(s):
try:
float(s)
return True
except ValueError:
return False | null |
163,530 | import os, sqlite3
import numpy as np
import stanza, torch
from nltk.corpus import stopwords
from itertools import product, combinations
import torch.nn.functional as F
from utils.constants import MAX_RELATIVE_DIST
from transformers import AutoModel, AutoConfig, AutoTokenizer
import geoopt as gt
def agg(input):
# if input.size(0)==1:
# return input.squeeze()
# else :
return torch.sum(input,dim=1,keepdim=True)/input.size(1) | null |
163,531 | import os, sqlite3
import numpy as np
import stanza, torch
from nltk.corpus import stopwords
from itertools import product, combinations
import torch.nn.functional as F
from utils.constants import MAX_RELATIVE_DIST
from transformers import AutoModel, AutoConfig, AutoTokenizer
import geoopt as gt
The provided code snippet includes necessary dependencies for implementing the `quote_normalization` function. Write a Python function `def quote_normalization(question)` to solve the following problem:
Normalize all usage of quotation marks into a separate \"
Here is the function:
def quote_normalization(question):
""" Normalize all usage of quotation marks into a separate \" """
new_question, quotation_marks = [], ["'", '"', '`', '‘', '’', '“', '”', '``', "''", "‘‘", "’’"]
for idx, tok in enumerate(question):
if len(tok) > 2 and tok[0] in quotation_marks and tok[-1] in quotation_marks:
new_question += ["\"", tok[1:-1], "\""]
elif len(tok) > 2 and tok[0] in quotation_marks:
new_question += ["\"", tok[1:]]
elif len(tok) > 2 and tok[-1] in quotation_marks:
new_question += [tok[:-1], "\"" ]
elif tok in quotation_marks:
new_question.append("\"")
elif len(tok) == 2 and tok[0] in quotation_marks:
# special case: the length of entity value is 1
if idx + 1 < len(question) and question[idx + 1] in quotation_marks:
new_question += ["\"", tok[1]]
else:
new_question.append(tok)
else:
new_question.append(tok)
return new_question | Normalize all usage of quotation marks into a separate \" |
163,532 | import os, sys
import json
import sqlite3
import traceback
import argparse
from process_sql import get_sql
schemas, db_names, tables = get_schemas_from_json(table_file)
with open(sql_path) as inf:
sql_data = json.load(inf)
for data in sql_data:
try:
if data['query'] == 'SELECT T1.company_name FROM Third_Party_Companies AS T1 JOIN Maintenance_Contracts AS T2 ON T1.company_id = T2.maintenance_contract_company_id JOIN Ref_Company_Types AS T3 ON T1.company_type_code = T3.company_type_code ORDER BY T2.contract_end_date DESC LIMIT 1':
data['query'] = 'SELECT T1.company_type FROM Third_Party_Companies AS T1 JOIN Maintenance_Contracts AS T2 ON T1.company_id = T2.maintenance_contract_company_id ORDER BY T2.contract_end_date DESC LIMIT 1'
data['query_toks'] = ['SELECT', 'T1.company_type', 'FROM', 'Third_Party_Companies', 'AS', 'T1', 'JOIN', 'Maintenance_Contracts', 'AS', 'T2', 'ON', 'T1.company_id', '=', 'T2.maintenance_contract_company_id', 'ORDER', 'BY', 'T2.contract_end_date', 'DESC', 'LIMIT', '1']
data['query_toks_no_value'] = ['select', 't1', '.', 'company_type', 'from', 'third_party_companies', 'as', 't1', 'join', 'maintenance_contracts', 'as', 't2', 'on', 't1', '.', 'company_id', '=', 't2', '.', 'maintenance_contract_company_id', 'order', 'by', 't2', '.', 'contract_end_date', 'desc', 'limit', 'value']
data['question'] = 'What is the type of the company who concluded its contracts most recently?'
data['question_toks'] = ['What', 'is', 'the', 'type', 'of', 'the', 'company', 'who', 'concluded', 'its', 'contracts', 'most', 'recently', '?']
if data['query'].startswith('SELECT T1.fname FROM student AS T1 JOIN lives_in AS T2 ON T1.stuid = T2.stuid WHERE T2.dormid IN'):
data['query'] = data['query'].replace('IN (SELECT T2.dormid)', 'IN (SELECT T3.dormid)')
index = data['query_toks'].index('(') + 2
assert data['query_toks'][index] == 'T2.dormid'
data['query_toks'][index] = 'T3.dormid'
index = data['query_toks_no_value'].index('(') + 2
assert data['query_toks_no_value'][index] == 't2'
data['query_toks_no_value'][index] = 't3'
db_id = data["db_id"]
schema = schemas[db_id]
table = tables[db_id]
schema = Schema(schema, table)
sql = data["query"]
sql_label = get_sql(schema, sql)
data["sql"] = sql_label
sql_data_new.append(data)
except:
print("db_id: ", db_id)
print("sql: ", sql)
with open(output_file, 'wt') as out:
json.dump(sql_data_new, out, sort_keys=True, indent=4, separators=(',', ': '))
def get_schemas_from_json(fpath):
with open(fpath) as f:
data = json.load(f)
db_names = [db['db_id'] for db in data]
tables = {}
schemas = {}
for db in data:
db_id = db['db_id']
schema = {} #{'table': [col.lower, ..., ]} * -> __all__
column_names_original = db['column_names_original']
table_names_original = db['table_names_original']
tables[db_id] = {'column_names_original': column_names_original, 'table_names_original': table_names_original}
for i, tabn in enumerate(table_names_original):
table = str(tabn.lower())
cols = [str(col.lower()) for td, col in column_names_original if td == i]
schema[table] = cols
schemas[db_id] = schema
return schemas, db_names, tables | null |
163,533 | import os
import sys
import json
import sqlite3
from os import listdir, makedirs
from os.path import isfile, isdir, join, split, exists, splitext
import traceback
def convert_fk_index(data):
fk_holder = []
for fk in data["foreign_keys"]:
tn, col, ref_tn, ref_col = fk[0][0], fk[0][1], fk[1][0], fk[1][1]
ref_cid, cid = None, None
try:
tid = data['table_names_original'].index(tn)
ref_tid = data['table_names_original'].index(ref_tn)
for i, (tab_id, col_org) in enumerate(data['column_names_original']):
if tab_id == ref_tid and ref_col == col_org:
ref_cid = i
elif tid == tab_id and col == col_org:
cid = i
if ref_cid and cid:
fk_holder.append([cid, ref_cid])
except:
traceback.print_exc()
print("table_names_original: ", data['table_names_original'])
print("finding tab name: ", tn, ref_tn)
sys.exit()
return fk_holder
The provided code snippet includes necessary dependencies for implementing the `dump_db_json_schema` function. Write a Python function `def dump_db_json_schema(db, f)` to solve the following problem:
read table and column info
Here is the function:
def dump_db_json_schema(db, f):
'''read table and column info'''
conn = sqlite3.connect(db)
conn.execute('pragma foreign_keys=ON')
cursor = conn.execute("SELECT name FROM sqlite_master WHERE type='table';")
data = {'db_id': f,
'table_names_original': [],
'table_names': [],
'column_names_original': [(-1, '*')],
'column_names': [(-1, '*')],
'column_types': ['text'],
'primary_keys': [],
'foreign_keys': []}
fk_holder = []
for i, item in enumerate(cursor.fetchall()):
table_name = item[0]
data['table_names_original'].append(table_name)
data['table_names'].append(table_name.lower().replace("_", ' '))
fks = conn.execute("PRAGMA foreign_key_list('{}') ".format(table_name)).fetchall()
fk_holder.extend([[(table_name, fk[3]), (fk[2], fk[4])] for fk in fks])
cur = conn.execute("PRAGMA table_info('{}') ".format(table_name))
for j, col in enumerate(cur.fetchall()):
data['column_names_original'].append((i, col[1]))
data['column_names'].append((i, col[1].lower().replace("_", " ")))
col_type = col[2].lower()
if 'char' in col_type or col_type == '' or 'text' in col_type or 'var' in col_type:
data['column_types'].append('text')
elif 'int' in col_type or 'numeric' in col_type or 'decimal' in col_type or 'number' in col_type\
or 'id' in col_type or 'real' in col_type or 'double' in col_type or 'float' in col_type:
data['column_types'].append('number')
elif 'date' in col_type or 'time' in col_type or 'year' in col_type:
data['column_types'].append('time')
elif 'boolean' in col_type:
data['column_types'].append('boolean')
else:
data['column_types'].append('others')
if col[5] == 1:
data['primary_keys'].append(len(data['column_names'])-1)
data["foreign_keys"] = fk_holder
data['foreign_keys'] = convert_fk_index(data)
return data | read table and column info |
163,534 | import argparse, os, sys, pickle, json
from collections import Counter
def construct_vocab_from_dataset(*data_paths, table_path='data/tables.bin', mwf=4, reference_file=None, output_path=None, sep='\t'):
words = []
tables = pickle.load(open(table_path, 'rb'))
for fp in data_paths:
dataset = pickle.load(open(fp, 'rb'))
for ex in dataset:
words.extend(ex['processed_question_toks'])
db = tables[ex['db_id']]
words.extend(['table'] * len(db['table_names']))
words.extend(db['column_types'])
for c in db['processed_column_toks']:
words.extend(c)
for t in db['processed_table_toks']:
words.extend(t)
cnt = Counter(words)
vocab = sorted(list(cnt.items()), key=lambda x: - x[1])
glove_vocab = set()
with open(reference_file, 'r', encoding='utf-8') as inf:
for line in inf:
line = line.strip()
if line == '': continue
glove_vocab.add(line)
oov_words, oov_but_freq_words = set(), []
for w, c in vocab:
if w not in glove_vocab:
oov_words.add(w)
if c >= mwf:
oov_but_freq_words.append((w, c))
print('Out of glove vocabulary size: %d\nAmong them, %d words occur equal or more than %d times in training dataset.' % (len(oov_words), len(oov_but_freq_words), mwf))
with open(output_path, 'w') as of:
# first serialize oov but frequent words, allowing fine-tune them during training
for w, c in oov_but_freq_words:
of.write(w + sep + str(c) + '\n')
# next serialize words in both train vocab and glove vocab according to decreasing frequency
for w, c in vocab:
if w not in oov_words:
of.write(w + sep + str(c) + '\n')
return len(vocab) | null |
163,535 | import os
import traceback
import re
import sys
import json
import sqlite3
import sqlparse
import random
from os import listdir, makedirs
from collections import OrderedDict
from nltk import word_tokenize, tokenize
from os.path import isfile, isdir, join, split, exists, splitext
from process_sql import get_sql
def get_schemas_from_json(fpath):
with open(fpath) as f:
data = json.load(f)
db_names = [db['db_id'] for db in data]
tables = {}
schemas = {}
for db in data:
db_id = db['db_id']
schema = {} #{'table': [col.lower, ..., ]} * -> __all__
column_names_original = db['column_names_original']
table_names_original = db['table_names_original']
tables[db_id] = {'column_names_original': column_names_original, 'table_names_original': table_names_original}
for i, tabn in enumerate(table_names_original):
table = str(tabn.lower())
cols = [str(col.lower()) for td, col in column_names_original if td == i]
schema[table] = cols
schemas[db_id] = schema
return schemas, db_names, tables | null |
163,536 | import os, sqlite3
import numpy as np
import stanza, torch
from nltk.corpus import stopwords
from itertools import product, combinations
import torch.nn.functional as F
from transformers import AutoModel, AutoConfig,AutoTokenizer
def is_number(s):
try:
float(s)
return True
except ValueError:
return False | null |
163,537 | import os, sqlite3
import numpy as np
import stanza, torch
from nltk.corpus import stopwords
from itertools import product, combinations
import torch.nn.functional as F
from transformers import AutoModel, AutoConfig,AutoTokenizer
def agg(input):
# if input.size(0)==1:
# return input.squeeze()
# else :
return torch.sum(input,dim=1)/input.size(1) | null |
163,538 | import os, sqlite3
import numpy as np
import stanza, torch
from nltk.corpus import stopwords
from itertools import product, combinations
import torch.nn.functional as F
from transformers import AutoModel, AutoConfig,AutoTokenizer
The provided code snippet includes necessary dependencies for implementing the `quote_normalization` function. Write a Python function `def quote_normalization(question)` to solve the following problem:
Normalize all usage of quotation marks into a separate \"
Here is the function:
def quote_normalization(question):
""" Normalize all usage of quotation marks into a separate \" """
new_question, quotation_marks = [], ["'", '"', '`', '‘', '’', '“', '”', '``', "''", "‘‘", "’’"]
for idx, tok in enumerate(question):
if len(tok) > 2 and tok[0] in quotation_marks and tok[-1] in quotation_marks:
new_question += ["\"", tok[1:-1], "\""]
elif len(tok) > 2 and tok[0] in quotation_marks:
new_question += ["\"", tok[1:]]
elif len(tok) > 2 and tok[-1] in quotation_marks:
new_question += [tok[:-1], "\"" ]
elif tok in quotation_marks:
new_question.append("\"")
elif len(tok) == 2 and tok[0] in quotation_marks:
# special case: the length of entity value is 1
if idx + 1 < len(question) and question[idx + 1] in quotation_marks:
new_question += ["\"", tok[1]]
else:
new_question.append(tok)
else:
new_question.append(tok)
return new_question | Normalize all usage of quotation marks into a separate \" |
163,539 | import sys, os, json, pickle, argparse, time, torch
from argparse import Namespace
from preprocess.process_dataset import process_tables, process_dataset
from preprocess.process_graphs import process_dataset_graph
from preprocess.common_utils import Preprocessor
from preprocess.graph_utils import GraphProcessor
from utils.example import Example
from utils.batch import Batch
from model.model_utils import Registrable
from model.model_constructor import *
dataset, tables = preprocess_database_and_dataset(db_dir=args.db_dir, table_path=args.table_path, dataset_path=args.dataset_path, method=params.model)
dataset = load_examples(dataset, tables)
with open(args.output_path, 'w', encoding='utf8') as of:
evaluator = Example.evaluator
for idx, hyp in enumerate(all_hyps):
pred_sql = evaluator.obtain_sql(hyp, dataset[idx].db)
# best_ast = hyp[0].tree # by default, the top beam prediction
# pred_sql = Example.trans.ast_to_surface_code(best_ast, dataset[idx].db)
of.write(pred_sql + '\n')
def process_tables(processor, tables_list, output_path=None, verbose=False):
tables = {}
for each in tables_list:
if verbose:
print('*************** Processing database %s **************' % (each['db_id']))
tables[each['db_id']] = processor.preprocess_database(each, verbose=verbose)
print('In total, process %d databases .' % (len(tables)))
if output_path is not None:
pickle.dump(tables, open(output_path, 'wb'))
return tables
def process_dataset(processor, dataset, tables, output_path=None, skip_large=False, verbose=False):
from utils.constants import GRAMMAR_FILEPATH
grammar = ASDLGrammar.from_filepath(GRAMMAR_FILEPATH)
trans = TransitionSystem.get_class_by_lang('sql')(grammar)
processed_dataset = []
for idx, entry in enumerate(dataset):
if skip_large and len(tables[entry['db_id']]['column_names']) > 100: continue
if verbose:
print('*************** Processing %d-th sample **************' % (idx))
entry = process_example(processor, entry, tables[entry['db_id']], trans, verbose=verbose)
processed_dataset.append(entry)
print('In total, process %d samples , skip %d extremely large databases.' % (len(processed_dataset), len(dataset) - len(processed_dataset)))
if output_path is not None:
# serialize preprocessed dataset
pickle.dump(processed_dataset, open(output_path, 'wb'))
return processed_dataset
def process_dataset_graph(processor, dataset, tables, method, output_path=None, skip_large=False):
processed_dataset = []
for idx, entry in enumerate(dataset):
db = tables[entry['db_id']]
if skip_large and len(db['column_names']) > 100:
continue
if (idx + 1) % 500 == 0:
print('Processing the %d-th example ...' % (idx + 1))
entry = processor.process_graph_utils(entry, db, method=method)
processed_dataset.append(entry)
print('In total, process %d samples, skip %d samples .' % (len(processed_dataset), len(dataset) - len(processed_dataset)))
if output_path is not None:
# serialize preprocessed dataset
pickle.dump(processed_dataset, open(output_path, 'wb'))
return processed_dataset
class Preprocessor():
def __init__(self, db_dir='data/database', db_content=True):
super(Preprocessor, self).__init__()
self.db_dir = db_dir
self.db_content = db_content
self.nlp = stanza.Pipeline('en', processors='tokenize,pos,lemma')#, use_gpu=False)
self.stopwords = stopwords.words("english")
self.device = torch.device("cuda:0")
self.hidden_size=1024
self.max_batch_size = 8
self.plm_model =AutoModel.from_pretrained(os.path.join('./pretrained_models', 'electra-large-discriminator')).to(self.device)
self.plm_tokenizer = AutoTokenizer.from_pretrained(os.path.join('./pretrained_models', 'electra-large-discriminator'))
self.config = self.plm_model.config
self.ball = gt.Stereographic(-1)
self.threshold = 0.4
def pipeline(self, entry: dict, db: dict, verbose: bool = False):
""" db should be preprocessed """
entry = self.preprocess_question(entry, db, verbose=verbose)
entry = self.schema_linking(entry, db, verbose=verbose)
entry = self.extract_subgraph(entry, db, verbose=verbose)
return entry
def preprocess_database(self, db: dict, verbose: bool = False):
""" Tokenize, lemmatize, lowercase table and column names for each database """
table_toks, table_names = [], []
for tab in db['table_names']:
doc = self.nlp(tab)
tab = [w.lemma.lower() for s in doc.sentences for w in s.words]
table_toks.append(tab)
table_names.append(" ".join(tab))
db['processed_table_toks'], db['processed_table_names'] = table_toks, table_names
column_toks, column_names = [], []
for _, c in db['column_names']:
doc = self.nlp(c)
c = [w.lemma.lower() for s in doc.sentences for w in s.words]
column_toks.append(c)
column_names.append(" ".join(c))
db['processed_column_toks'], db['processed_column_names'] = column_toks, column_names
column2table = list(map(lambda x: x[0], db['column_names'])) # from column id to table id
table2columns = [[] for _ in range(len(table_names))] # from table id to column ids list
for col_id, col in enumerate(db['column_names']):
if col_id == 0: continue
table2columns[col[0]].append(col_id)
db['column2table'], db['table2columns'] = column2table, table2columns
t_num, c_num, dtype = len(db['table_names']), len(db['column_names']), '<U100'
# relations in tables, tab_num * tab_num
tab_mat = np.array([['table-table-generic'] * t_num for _ in range(t_num)], dtype=dtype)
table_fks = set(map(lambda pair: (column2table[pair[0]], column2table[pair[1]]), db['foreign_keys']))
for (tab1, tab2) in table_fks:
if (tab2, tab1) in table_fks:
tab_mat[tab1, tab2], tab_mat[tab2, tab1] = 'table-table-fkb', 'table-table-fkb'
else:
tab_mat[tab1, tab2], tab_mat[tab2, tab1] = 'table-table-fk', 'table-table-fkr'
tab_mat[list(range(t_num)), list(range(t_num))] = 'table-table-identity'
# relations in columns, c_num * c_num
col_mat = np.array([['column-column-generic'] * c_num for _ in range(c_num)], dtype=dtype)
for i in range(t_num):
col_ids = [idx for idx, t in enumerate(column2table) if t == i]
col1, col2 = list(zip(*list(product(col_ids, col_ids))))
col_mat[col1, col2] = 'column-column-sametable'
col_mat[list(range(c_num)), list(range(c_num))] = 'column-column-identity'
if len(db['foreign_keys']) > 0:
col1, col2 = list(zip(*db['foreign_keys']))
col_mat[col1, col2], col_mat[col2, col1] = 'column-column-fk', 'column-column-fkr'
col_mat[0, list(range(c_num))] = '*-column-generic'
col_mat[list(range(c_num)), 0] = 'column-*-generic'
col_mat[0, 0] = '*-*-identity'
# relations between tables and columns, t_num*c_num and c_num*t_num
tab_col_mat = np.array([['table-column-generic'] * c_num for _ in range(t_num)], dtype=dtype)
col_tab_mat = np.array([['column-table-generic'] * t_num for _ in range(c_num)], dtype=dtype)
cols, tabs = list(zip(*list(map(lambda x: (x, column2table[x]), range(1, c_num))))) # ignore *
col_tab_mat[cols, tabs], tab_col_mat[tabs, cols] = 'column-table-has', 'table-column-has'
if len(db['primary_keys']) > 0:
cols, tabs = list(zip(*list(map(lambda x: (x, column2table[x]), db['primary_keys']))))
col_tab_mat[cols, tabs], tab_col_mat[tabs, cols] = 'column-table-pk', 'table-column-pk'
col_tab_mat[0, list(range(t_num))] = '*-table-generic'
tab_col_mat[list(range(t_num)), 0] = 'table-*-generic'
relations = np.concatenate([
np.concatenate([tab_mat, tab_col_mat], axis=1),
np.concatenate([col_tab_mat, col_mat], axis=1)
], axis=0)
db['relations'] = relations.tolist()
if verbose:
print('Tables:', ', '.join(db['table_names']))
print('Lemmatized:', ', '.join(table_names))
print('Columns:', ', '.join(list(map(lambda x: x[1], db['column_names']))))
print('Lemmatized:', ', '.join(column_names), '\n')
return db
def preprocess_question(self, entry: dict, db: dict, verbose: bool = False):
""" Tokenize, lemmatize, lowercase question"""
# stanza tokenize, lemmatize and POS tag
question = ' '.join(quote_normalization(entry['question_toks']))
doc = self.nlp(question)
raw_toks = [w.text.lower() for s in doc.sentences for w in s.words]
toks = [w.lemma.lower() for s in doc.sentences for w in s.words]
pos_tags = [w.xpos for s in doc.sentences for w in s.words]
entry['raw_question_toks'] = raw_toks
entry['processed_question_toks'] = toks
entry['pos_tags'] = pos_tags
# relations in questions, q_num * q_num
q_num, dtype = len(toks), '<U100'
if q_num <= MAX_RELATIVE_DIST + 1:
dist_vec = ['question-question-dist' + str(i) if i != 0 else 'question-question-identity'
for i in range(- MAX_RELATIVE_DIST, MAX_RELATIVE_DIST + 1, 1)]
starting = MAX_RELATIVE_DIST
else:
dist_vec = ['question-question-generic'] * (q_num - MAX_RELATIVE_DIST - 1) + \
['question-question-dist' + str(i) if i != 0 else 'question-question-identity' \
for i in range(- MAX_RELATIVE_DIST, MAX_RELATIVE_DIST + 1, 1)] + \
['question-question-generic'] * (q_num - MAX_RELATIVE_DIST - 1)
starting = q_num - 1
q_mat = np.array([dist_vec[starting - i: starting - i + q_num] for i in range(q_num)], dtype=dtype)
entry['relations'] = q_mat.tolist()
if verbose:
print('Question:', entry['question'])
print('Tokenized:', ' '.join(entry['raw_question_toks']))
print('Lemmatized:', ' '.join(entry['processed_question_toks']))
print('Pos tags:', ' '.join(entry['pos_tags']), '\n')
return entry
def extract_subgraph(self, entry: dict, db: dict, verbose: bool = False):
sql = entry['sql']
used_schema = {'table': set(), 'column': set()}
used_schema = self.extract_subgraph_from_sql(sql, used_schema)
entry['used_tables'] = sorted(list(used_schema['table']))
entry['used_columns'] = sorted(list(used_schema['column']))
if verbose:
print('Used tables:', entry['used_tables'])
print('Used columns:', entry['used_columns'], '\n')
return entry
def extract_subgraph_from_sql(self, sql: dict, used_schema: dict):
select_items = sql['select'][1]
# select clause
for _, val_unit in select_items:
if val_unit[0] == 0:
col_unit = val_unit[1]
used_schema['column'].add(col_unit[1])
else:
col_unit1, col_unit2 = val_unit[1:]
used_schema['column'].add(col_unit1[1])
used_schema['column'].add(col_unit2[1])
# from clause conds
table_units = sql['from']['table_units']
for _, t in table_units:
if type(t) == dict:
used_schema = self.extract_subgraph_from_sql(t, used_schema)
else:
used_schema['table'].add(t)
# from, where and having conds
used_schema = self.extract_subgraph_from_conds(sql['from']['conds'], used_schema)
used_schema = self.extract_subgraph_from_conds(sql['where'], used_schema)
used_schema = self.extract_subgraph_from_conds(sql['having'], used_schema)
# groupBy and orderBy clause
groupBy = sql['groupBy']
for col_unit in groupBy:
used_schema['column'].add(col_unit[1])
orderBy = sql['orderBy']
if len(orderBy) > 0:
orderBy = orderBy[1]
for val_unit in orderBy:
if val_unit[0] == 0:
col_unit = val_unit[1]
used_schema['column'].add(col_unit[1])
else:
col_unit1, col_unit2 = val_unit[1:]
used_schema['column'].add(col_unit1[1])
used_schema['column'].add(col_unit2[1])
# union, intersect and except clause
if sql['intersect']:
used_schema = self.extract_subgraph_from_sql(sql['intersect'], used_schema)
if sql['union']:
used_schema = self.extract_subgraph_from_sql(sql['union'], used_schema)
if sql['except']:
used_schema = self.extract_subgraph_from_sql(sql['except'], used_schema)
return used_schema
def extract_subgraph_from_conds(self, conds: list, used_schema: dict):
if len(conds) == 0:
return used_schema
for cond in conds:
if cond in ['and', 'or']:
continue
val_unit, val1, val2 = cond[2:]
if val_unit[0] == 0:
col_unit = val_unit[1]
used_schema['column'].add(col_unit[1])
else:
col_unit1, col_unit2 = val_unit[1:]
used_schema['column'].add(col_unit1[1])
used_schema['column'].add(col_unit2[1])
if type(val1) == list:
used_schema['column'].add(val1[1])
elif type(val1) == dict:
used_schema = self.extract_subgraph_from_sql(val1, used_schema)
if type(val2) == list:
used_schema['column'].add(val1[1])
elif type(val2) == dict:
used_schema = self.extract_subgraph_from_sql(val2, used_schema)
return used_schema
def schema_linking(self, entry: dict, db: dict, verbose: bool = False):
""" Perform schema linking: both question and database need to be preprocessed """
raw_question_toks, question_toks = entry['raw_question_toks'], entry['processed_question_toks']
table_toks, column_toks = db['processed_table_toks'], db['processed_column_toks']
table_names, column_names = db['processed_table_names'], db['processed_column_names']
q_num, t_num, c_num, dtype = len(question_toks), len(table_toks), len(column_toks), '<U100'
assert len(column_names)==len(column_toks) and len(table_names) == len(table_toks) and len(raw_question_toks)==len(question_toks)
question_id = [self.plm_tokenizer.cls_token_id]
question = [q.lower() for q in question_toks]
question_subword_len = []
for w in question:
toks = self.plm_tokenizer.convert_tokens_to_ids(self.plm_tokenizer.tokenize(w))
question_id.extend(toks)
question_subword_len.append(len(toks))
question_mask_plm = [0] + [1] * (len(question_id) - 1) + [0]
exact_question_token = len(question_id) - 1
question_id.append(self.plm_tokenizer.sep_token_id)
masked_question_id = [question_id]
start = 1
for i, sub_len in enumerate(question_subword_len):
tmp_question_id = question_id.copy()
for m in range(start, start + sub_len):
tmp_question_id[m] = self.plm_tokenizer.mask_token_id
masked_question_id.append(tmp_question_id)
start += sub_len
table = [t.lower().split() for t in table_names]
table_id, table_mask_plm, table_subword_len = [], [], []
table_word_len = []
for s in table:
l = 0
for w in s:
toks = self.plm_tokenizer.convert_tokens_to_ids(self.plm_tokenizer.tokenize(w))
table_id.extend(toks)
table_subword_len.append(len(toks))
l += len(toks)
table_word_len.append(l)
table_mask_plm = [1] * len(table_id)
column = [t.lower().split() for t in column_names]
column_id, column_mask_plm, column_subword_len = [], [], []
column_word_len = []
for s in column:
l = 0
for w in s:
toks = self.plm_tokenizer.convert_tokens_to_ids(self.plm_tokenizer.tokenize(w))
column_id.extend(toks)
column_subword_len.append(len(toks))
l += len(toks)
column_word_len.append(l)
column_mask_plm = [1] * len(column_id) + [0]
exact_column_token = len(column_id)
column_id.append(self.plm_tokenizer.sep_token_id)
question_mask_plm = question_mask_plm + [0] * (len(table_id) + len(column_id))
table_mask_plm = [0] * len(question_id) + table_mask_plm + [0] * len(column_id)
column_mask_plm = [0] * (len(question_id) + len(table_id)) + column_mask_plm
input_id = []
segment_id = []
atten_mask = []
for i, msk_q_id in enumerate(masked_question_id):
input_id.append(msk_q_id + table_id + column_id)
segment_id.append([0] * len(msk_q_id) + [1] * (len(table_id) + len(column_id)))
atten_mask.append([1] * len(input_id[-1]))
start = 0
total_size = len(input_id)
store_arr = []
if total_size <= self.max_batch_size:
ii = torch.tensor(input_id, dtype=torch.long, device=self.device)
im = torch.tensor(atten_mask, dtype=torch.float, device=self.device)
si = torch.tensor(segment_id, dtype=torch.long, device=self.device)
outputs = self.plm_model(ii, im)[0].squeeze()
store_arr.append(outputs)
else:
while start < len(input_id):
if start + self.max_batch_size <= len(input_id):
ii = torch.tensor(input_id[start: start + self.max_batch_size], dtype=torch.long, device=self.device)
im = torch.tensor(atten_mask[start: start + self.max_batch_size], dtype=torch.float, device=self.device)
si = torch.tensor(segment_id[start: start + self.max_batch_size], dtype=torch.long, device=self.device)
outputs = self.plm_model(ii, im)[0] # .squeeze()
store_arr.append(outputs)
else:
ii = torch.tensor(input_id[start: len(input_id)], dtype=torch.long, device=self.device)
im = torch.tensor(atten_mask[start: len(input_id)], dtype=torch.float, device=self.device)
si = torch.tensor(segment_id[start: len(input_id)], dtype=torch.long, device=self.device)
outputs = self.plm_model(ii, im)[0] # .squeeze()
store_arr.append(outputs)
start += self.max_batch_size
assert len(store_arr) > 0
if len(store_arr) == 1:
outputs = store_arr[0]
else:
outputs = store_arr[0]
for t in store_arr[1:]:
outputs = torch.cat((outputs, t), dim=0)
q_tab_mat = outputs.new_zeros(len(raw_question_toks), len(table_names))
old_tables = outputs.masked_select(torch.tensor(table_mask_plm, dtype=torch.bool, device=self.device).unsqueeze(-1).unsqueeze(0).repeat(outputs.size(0),1,1)).view(outputs.size(0),len(table_id), self.hidden_size)
start = 0
new_table_arr = []
for i, sub_len in enumerate(table_word_len):
curr = old_tables[:, start:start + sub_len]
new_table_arr.append(agg(curr))
start += sub_len
new_tables = torch.cat(new_table_arr, 1)
tbl_cmp = new_tables[0:1]
tbl_msk = new_tables[1:]
assert tbl_msk.size(0) == len(raw_question_toks)
for i in range(len(table_word_len)):
a = self.ball.expmap0(tbl_cmp[:, i])
b = self.ball.expmap0(tbl_msk[:, i])
dis=self.ball.dist(a,b)
q_tab_mat[:, i] = dis
q_col_mat = outputs.new_zeros(len(raw_question_toks), len(column_names))
old_columns = outputs.masked_select(torch.tensor(column_mask_plm, dtype=torch.bool, device=self.device).unsqueeze(-1).unsqueeze(0).repeat(outputs.size(0),1,1)).view(outputs.size(0),exact_column_token, self.hidden_size)
new_column_arr = []
start = 0
for i, sub_len in enumerate(column_word_len):
curr = old_columns[:, start:start + sub_len]
new_column_arr.append(agg(curr))
start += sub_len
new_column = torch.cat(new_column_arr, 1)
col_cmp = new_column[0:1]
col_msk = new_column[1:]
assert col_msk.size(0) == len(raw_question_toks)
for i in range(len(column_word_len)):
a = self.ball.expmap0(col_cmp[:, i])
b = self.ball.expmap0(col_msk[:, i])
dis=self.ball.dist(a,b)
q_col_mat[:, i] = dis
use_matrix = torch.cat([q_tab_mat,q_col_mat], dim=1)
matrix_min=torch.min(use_matrix)
matrix_max=torch.max(use_matrix)
use_matrix=(use_matrix-matrix_min)/(matrix_max-matrix_min)
use_q_tab_mat = use_matrix[:, :q_tab_mat.size(1)]
use_q_col_mat = use_matrix[:, q_tab_mat.size(1):]
assert use_q_tab_mat.size(1) == t_num and use_q_col_mat.size(1)== c_num
use_tab_q_mat = use_q_tab_mat.transpose(0,1).cpu().detach().numpy()
use_col_q_mat = use_q_col_mat.transpose(0,1).cpu().detach().numpy()
table_matched_pairs = {'partial': [], 'exact': []}
q_tab_mat = np.array([['question-table-nomatch'] * t_num for _ in range(q_num)], dtype=dtype)
tab_q_mat = np.array([['table-question-nomatch'] * q_num for _ in range(t_num)], dtype=dtype)
max_len = max([len(t) for t in table_toks])
index_pairs = list(filter(lambda x: x[1] - x[0] <= max_len, combinations(range(q_num + 1), 2)))
index_pairs = sorted(index_pairs, key=lambda x: x[1] - x[0])
for i, j in index_pairs:
phrase = ' '.join(question_toks[i: j])
if phrase in self.stopwords: continue
for idx, name in enumerate(table_names):
if phrase == name: # fully match will overwrite partial match due to sort
q_tab_mat[range(i, j), idx] = 'question-table-exactmatch'
tab_q_mat[idx, range(i, j)] = 'table-question-exactmatch'
if verbose:
table_matched_pairs['exact'].append(str((name, idx, phrase, i, j)))
elif (j - i == 1 and phrase in name.split()) or (j - i > 1 and phrase in name):
q_tab_mat[range(i, j), idx] = 'question-table-partialmatch'
tab_q_mat[idx, range(i, j)] = 'table-question-partialmatch'
if verbose:
table_matched_pairs['partial'].append(str((name, idx, phrase, i, j)))
assert use_tab_q_mat.shape[0]==t_num and use_tab_q_mat.shape[1]==q_num
for x in range(t_num):
for y in range(q_num):
if question_toks[y] in self.stopwords or question_toks[y] in '."?,':
continue
if use_tab_q_mat[x,y]>self.threshold:
if 'partialmatch' in tab_q_mat[x,y]:
tab_q_mat[x,y] = 'table-question-partialsemanticmatch'
q_tab_mat[y,x] = 'question-table-partialsemanticmatch'
elif 'exact' in tab_q_mat[x,y]:
continue
elif 'nomatch' in tab_q_mat[x,y]:
tab_q_mat[x,y] = 'table-question-semanticmatch'
q_tab_mat[y,x] = 'question-table-semanticmatch'
# relations between questions and columns
column_matched_pairs = {'partial': [], 'exact': [], 'value': []}
q_col_mat = np.array([['question-column-nomatch'] * c_num for _ in range(q_num)], dtype=dtype)
col_q_mat = np.array([['column-question-nomatch'] * q_num for _ in range(c_num)], dtype=dtype)
max_len = max([len(c) for c in column_toks])
index_pairs = list(filter(lambda x: x[1] - x[0] <= max_len, combinations(range(q_num + 1), 2)))
index_pairs = sorted(index_pairs, key=lambda x: x[1] - x[0])
for i, j in index_pairs:
phrase = ' '.join(question_toks[i: j])
if phrase in self.stopwords: continue
for idx, name in enumerate(column_names):
if phrase == name: # fully match will overwrite partial match due to sort
q_col_mat[range(i, j), idx] = 'question-column-exactmatch'
col_q_mat[idx, range(i, j)] = 'column-question-exactmatch'
if verbose:
column_matched_pairs['exact'].append(str((name, idx, phrase, i, j)))
elif (j - i == 1 and phrase in name.split()) or (j - i > 1 and phrase in name):
q_col_mat[range(i, j), idx] = 'question-column-partialmatch'
col_q_mat[idx, range(i, j)] = 'column-question-partialmatch'
if verbose:
column_matched_pairs['partial'].append(str((name, idx, phrase, i, j)))
if self.db_content:
db_file = os.path.join(self.db_dir, db['db_id'], db['db_id'] + '.sqlite')
if not os.path.exists(db_file):
raise ValueError('[ERROR]: database file %s not found ...' % (db_file))
conn = sqlite3.connect(db_file)
conn.text_factory = lambda b: b.decode(errors='ignore')
conn.execute('pragma foreign_keys=ON')
for i, (tab_id, col_name) in enumerate(db['column_names_original']):
if i == 0 or 'id' in column_toks[i]: # ignore * and special token 'id'
continue
tab_name = db['table_names_original'][tab_id]
try:
cursor = conn.execute("SELECT DISTINCT \"%s\" FROM \"%s\";" % (col_name, tab_name))
cell_values = cursor.fetchall()
cell_values = [str(each[0]) for each in cell_values]
cell_values = [[str(float(each))] if is_number(each) else each.lower().split() for each in cell_values]
except Exception as e:
print(e)
for j, word in enumerate(raw_question_toks):
word = str(float(word)) if is_number(word) else word
for c in cell_values:
if word in c and 'nomatch' in q_col_mat[j, i] and word not in self.stopwords:
q_col_mat[j, i] = 'question-column-valuematch'
col_q_mat[i, j] = 'column-question-valuematch'
if verbose:
column_matched_pairs['value'].append(str((column_names[i], i, word, j, j + 1)))
break
conn.close()
assert use_col_q_mat.shape[0]==c_num and use_col_q_mat.shape[1]==q_num
for x in range(c_num):
for y in range(q_num):
if question_toks[y] in self.stopwords or question_toks[y] in '."?,':
continue
if use_col_q_mat[x,y]>self.threshold:
if 'partialmatch' in col_q_mat[x,y]:
col_q_mat[x,y] = 'column-question-partialsemanticmatch'
q_col_mat[y,x] = 'question-column-partialsemanticmatch'
elif 'exact' in col_q_mat[x,y] or 'value' in col_q_mat[x,y]:
continue
elif 'nomatch' in col_q_mat[x,y]:
col_q_mat[x,y] = 'column-question-semanticmatch'
q_col_mat[y,x] = 'question-column-semanticmatch'
# two symmetric schema linking matrix: q_num x (t_num + c_num), (t_num + c_num) x q_num
q_col_mat[:, 0] = 'question-*-generic'
col_q_mat[0] = '*-question-generic'
q_schema = np.concatenate([q_tab_mat, q_col_mat], axis=1)
schema_q = np.concatenate([tab_q_mat, col_q_mat], axis=0)
entry['schema_linking'] = (q_schema.tolist(), schema_q.tolist())
if verbose:
print('Question:', ' '.join(question_toks))
print('Table matched: (table name, column id, question span, start id, end id)')
print('Exact match:', ', '.join(table_matched_pairs['exact']) if table_matched_pairs['exact'] else 'empty')
print('Partial match:', ', '.join(table_matched_pairs['partial']) if table_matched_pairs['partial'] else 'empty')
print('Column matched: (column name, column id, question span, start id, end id)')
print('Exact match:', ', '.join(column_matched_pairs['exact']) if column_matched_pairs['exact'] else 'empty')
print('Partial match:', ', '.join(column_matched_pairs['partial']) if column_matched_pairs['partial'] else 'empty')
print('Value match:', ', '.join(column_matched_pairs['value']) if column_matched_pairs['value'] else 'empty', '\n')
return entry
class GraphProcessor():
def process_rgatsql(self, ex: dict, db: dict, relation: list):
graph = GraphExample()
num_nodes = int(math.sqrt(len(relation)))
local_edges = [(idx // num_nodes, idx % num_nodes, (special_column_mapping_dict[r] if r in special_column_mapping_dict else r))
for idx, r in enumerate(relation) if r not in nonlocal_relations]
nonlocal_edges = [(idx // num_nodes, idx % num_nodes, (special_column_mapping_dict[r] if r in special_column_mapping_dict else r))
for idx, r in enumerate(relation) if r in nonlocal_relations]
global_edges = local_edges + nonlocal_edges
src_ids, dst_ids = list(map(lambda r: r[0], global_edges)), list(map(lambda r: r[1], global_edges))
graph.global_g = dgl.graph((src_ids, dst_ids), num_nodes=num_nodes, idtype=torch.int32)
graph.global_edges = global_edges
src_ids, dst_ids = list(map(lambda r: r[0], local_edges)), list(map(lambda r: r[1], local_edges))
graph.local_g = dgl.graph((src_ids, dst_ids), num_nodes=num_nodes, idtype=torch.int32)
graph.local_edges = local_edges
# graph pruning for nodes
q_num = len(ex['processed_question_toks'])
s_num = num_nodes - q_num
graph.question_mask = [1] * q_num + [0] * s_num
graph.schema_mask = [0] * q_num + [1] * s_num
graph.gp = dgl.heterograph({
('question', 'to', 'schema'): (list(range(q_num)) * s_num,
[i for i in range(s_num) for _ in range(q_num)])
}, num_nodes_dict={'question': q_num, 'schema': s_num}, idtype=torch.int32
)
t_num = len(db['processed_table_toks'])
def check_node(i):
if i < t_num and i in ex['used_tables']:
return 1.0
elif i >= t_num and i - t_num in ex['used_columns']:
return 1.0
else: return 0.0
graph.node_label = list(map(check_node, range(s_num)))
ex['graph'] = graph
return ex
def process_lgesql(self, ex: dict, db: dict, relation: list):
ex = self.process_rgatsql(ex, db, relation)
graph = ex['graph']
lg = graph.local_g.line_graph(backtracking=False)
# prevent information propagate through matching edges
match_ids = [idx for idx, r in enumerate(graph.global_edges) if 'match' in r[2]]
src, dst, eids = lg.edges(form='all', order='eid')
eids = [e for u, v, e in zip(src.tolist(), dst.tolist(), eids.tolist()) if not (u in match_ids and v in match_ids)]
graph.lg = lg.edge_subgraph(eids, preserve_nodes=True).remove_self_loop().add_self_loop()
ex['graph'] = graph
return ex
def process_graph_utils(self, ex: dict, db: dict, method: str = 'rgatsql'):
""" Example should be preprocessed by self.pipeline
"""
q = np.array(ex['relations'], dtype='<U100')
s = np.array(db['relations'], dtype='<U100')
q_s = np.array(ex['schema_linking'][0], dtype='<U100')
s_q = np.array(ex['schema_linking'][1], dtype='<U100')
relation = np.concatenate([
np.concatenate([q, q_s], axis=1),
np.concatenate([s_q, s], axis=1)
], axis=0)
relation = relation.flatten().tolist()
if method == 'rgatsql':
ex = self.process_rgatsql(ex, db, relation)
elif method == 'lgesql':
ex = self.process_lgesql(ex, db, relation)
return ex
def preprocess_database_and_dataset(db_dir='database/', table_path='data/tables.json', dataset_path='data/dev.json', method='lgesql'):
tables = json.load(open(table_path, 'r'))
dataset = json.load(open(dataset_path, 'r'))
processor = Preprocessor(db_dir=db_dir, db_content=True)
output_tables = process_tables(processor, tables)
output_dataset = process_dataset(processor, dataset, output_tables)
graph_processor = GraphProcessor()
output_dataset = process_dataset_graph(graph_processor, output_dataset, output_tables, method=method)
return output_dataset, output_tables | null |
163,540 | import sys, os, json, pickle, argparse, time, torch
from argparse import Namespace
from preprocess.process_dataset import process_tables, process_dataset
from preprocess.process_graphs import process_dataset_graph
from preprocess.common_utils import Preprocessor
from preprocess.graph_utils import GraphProcessor
from utils.example import Example
from utils.batch import Batch
from model.model_utils import Registrable
from model.model_constructor import *
Example.configuration(plm=params.plm, method=params.model, tables=tables, table_path=args.table_path, db_dir=args.db_dir)
class Example():
def configuration(cls, plm=None, method='lgesql', table_path='data/tables.json', tables='data/tables.bin', db_dir='data/database'):
cls.plm, cls.method = plm, method
cls.grammar = ASDLGrammar.from_filepath(GRAMMAR_FILEPATH)
cls.trans = TransitionSystem.get_class_by_lang('sql')(cls.grammar)
cls.tables = pickle.load(open(tables, 'rb')) if type(tables) == str else tables
cls.evaluator = Evaluator(cls.trans, table_path, db_dir)
if plm is None:
cls.word2vec = Word2vecUtils()
cls.tokenizer = lambda x: x
cls.word_vocab = Vocab(padding=True, unk=True, boundary=True, default=UNK,
filepath='./pretrained_models/glove.42b.300d/vocab.txt', specials=SCHEMA_TYPES) # word vocab for glove.42B.300d
else:
cls.tokenizer = AutoTokenizer.from_pretrained(os.path.join('./pretrained_models', plm))
cls.word_vocab = cls.tokenizer.get_vocab()
cls.relation_vocab = Vocab(padding=False, unk=False, boundary=False, iterable=RELATIONS, default=None)
cls.graph_factory = GraphFactory(cls.method, cls.relation_vocab)
def load_dataset(cls, choice, debug=False):
assert choice in ['train', 'dev']
fp = os.path.join('data', choice + '.' + cls.method + '.bin')
datasets = pickle.load(open(fp, 'rb'))
# question_lens = [len(ex['processed_question_toks']) for ex in datasets]
# print('Max/Min/Avg question length in %s dataset is: %d/%d/%.2f' % (choice, max(question_lens), min(question_lens), float(sum(question_lens))/len(question_lens)))
# action_lens = [len(ex['actions']) for ex in datasets]
# print('Max/Min/Avg action length in %s dataset is: %d/%d/%.2f' % (choice, max(action_lens), min(action_lens), float(sum(action_lens))/len(action_lens)))
examples, outliers = [], 0
for ex in datasets:
if choice == 'train' and len(cls.tables[ex['db_id']]['column_names']) > 100:
outliers += 1
continue
examples.append(cls(ex, cls.tables[ex['db_id']]))
if debug and len(examples) >= 100:
return examples
if choice == 'train':
print("Skip %d extremely large samples in training dataset ..." % (outliers))
return examples
def __init__(self, ex: dict, db: dict):
super(Example, self).__init__()
self.ex = ex
self.db = db
""" Mapping word to corresponding index """
if Example.plm is None:
self.question = ex['processed_question_toks']
self.question_id = [Example.word_vocab[w] for w in self.question]
self.column = [[db['column_types'][idx].lower()] + c for idx, c in enumerate(db['processed_column_toks'])]
self.column_id = [[Example.word_vocab[w] for w in c] for c in self.column]
self.table = [['table'] + t for t in db['processed_table_toks']]
self.table_id = [[Example.word_vocab[w] for w in t] for t in self.table]
else:
t = Example.tokenizer
self.question = [q.lower() for q in ex['raw_question_toks']]
self.question_id = [t.cls_token_id] # map token to id
self.question_mask_plm = [] # remove SEP token in our case
self.question_subword_len = [] # subword len for each word, exclude SEP token
for w in self.question:
toks = t.convert_tokens_to_ids(t.tokenize(w))
self.question_id.extend(toks)
self.question_subword_len.append(len(toks))
self.question_mask_plm = [0] + [1] * (len(self.question_id) - 1) + [0]
self.question_id.append(t.sep_token_id)
self.table = [['table'] + t.lower().split() for t in db['table_names']]
self.table_id, self.table_mask_plm, self.table_subword_len = [], [], []
self.table_word_len = []
for s in self.table:
l = 0
for w in s:
toks = t.convert_tokens_to_ids(t.tokenize(w))
self.table_id.extend(toks)
self.table_subword_len.append(len(toks))
l += len(toks)
self.table_word_len.append(l)
self.table_mask_plm = [1] * len(self.table_id)
self.column = [[db['column_types'][idx].lower()] + c.lower().split() for idx, (_, c) in enumerate(db['column_names'])]
self.column_id, self.column_mask_plm, self.column_subword_len = [], [], []
self.column_word_len = []
for s in self.column:
l = 0
for w in s:
toks = t.convert_tokens_to_ids(t.tokenize(w))
self.column_id.extend(toks)
self.column_subword_len.append(len(toks))
l += len(toks)
self.column_word_len.append(l)
self.column_mask_plm = [1] * len(self.column_id) + [0]
self.column_id.append(t.sep_token_id)
self.input_id = self.question_id + self.table_id + self.column_id
self.segment_id = [0] * len(self.question_id) + [1] * (len(self.table_id) + len(self.column_id)) \
if Example.plm != 'grappa_large_jnt' and not Example.plm.startswith('roberta') \
else [0] * (len(self.question_id) + len(self.table_id) + len(self.column_id))
self.question_mask_plm = self.question_mask_plm + [0] * (len(self.table_id) + len(self.column_id))
self.table_mask_plm = [0] * len(self.question_id) + self.table_mask_plm + [0] * len(self.column_id)
self.column_mask_plm = [0] * (len(self.question_id) + len(self.table_id)) + self.column_mask_plm
self.graph = Example.graph_factory.graph_construction(ex, db)
# outputs
self.query = ' '.join(ex['query'].split('\t'))
self.ast = ex['ast']
self.tgt_action = ex['actions']
self.used_tables, self.used_columns = ex['used_tables'], ex['used_columns']
def load_examples(dataset, tables):
ex_list = []
for ex in dataset:
ex_list.append(Example(ex, tables[ex['db_id']]))
return ex_list | null |
163,541 | import sys, os, time, json, gc
from argparse import Namespace
from utils.args import init_args
from utils.hyperparams import hyperparam_path
from utils.initialization import *
from utils.example import Example
from utils.batch import Batch
from utils.optimization import set_optimizer
from model.model_utils import Registrable
from model.model_constructor import *
args = init_args(sys.argv[1:])
device = set_torch_device(args.device)
if args.read_model_path:
params = json.load(open(os.path.join(args.read_model_path, 'params.json')), object_hook=lambda d: Namespace(**d))
params.lazy_load = True
else:
params = args
train_dataset, dev_dataset = Example.load_dataset('train'), Example.load_dataset('dev')
args.word_vocab, args.relation_num = len(Example.word_vocab), len(Example.relation_vocab)
model = Registrable.by_name('text2sql')(params, sql_trans).to(device)
if args.read_model_path:
check_point = torch.load(open(os.path.join(args.read_model_path, 'model.bin'), 'rb'), map_location=device)
model.load_state_dict(check_point['model'])
logger.info("Load saved model from path: %s" % (args.read_model_path))
else:
json.dump(vars(params), open(os.path.join(exp_path, 'params.json'), 'w'), indent=4)
if params.plm is None:
ratio = Example.word2vec.load_embeddings(model.encoder.input_layer.word_embed, Example.word_vocab, device=device)
logger.info("Init model and word embedding layer with a coverage %.2f" % (ratio))
if not args.testing:
num_training_steps = ((len(train_dataset) + args.batch_size - 1) // args.batch_size) * args.max_epoch
num_warmup_steps = int(num_training_steps * args.warmup_ratio)
logger.info('Total training steps: %d;\t Warmup steps: %d' % (num_training_steps, num_warmup_steps))
optimizer, scheduler = set_optimizer(model, args, num_warmup_steps, num_training_steps)
start_epoch, nsamples, best_result = 0, len(train_dataset), {'dev_acc': 0.}
train_index, step_size = np.arange(nsamples), args.batch_size // args.grad_accumulate
if args.read_model_path and args.load_optimizer:
optimizer.load_state_dict(check_point['optim'])
scheduler.load_state_dict(check_point['scheduler'])
start_epoch = check_point['epoch'] + 1
logger.info('Start training ......')
for i in range(start_epoch, args.max_epoch):
start_time = time.time()
epoch_loss, epoch_gp_loss, count = 0, 0, 0
np.random.shuffle(train_index)
model.train()
for j in range(0, nsamples, step_size):
count += 1
cur_dataset = [train_dataset[k] for k in train_index[j: j + step_size]]
current_batch = Batch.from_example_list(cur_dataset, device, train=True, smoothing=args.smoothing)
loss, gp_loss = model(current_batch) # see utils/batch.py for batch elements
epoch_loss += loss.item()
epoch_gp_loss += gp_loss.item()
loss += gp_loss
loss.backward()
if count == args.grad_accumulate or j + step_size >= nsamples:
count = 0
model.pad_embedding_grad_zero()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
logger.info('Training: \tEpoch: %d\tTime: %.4f\tTraining loss: %.4f/%.4f' % (i, time.time() - start_time, epoch_loss, epoch_gp_loss))
torch.cuda.empty_cache()
gc.collect()
if i < args.eval_after_epoch: # avoid unnecessary evaluation
continue
start_time = time.time()
dev_acc = decode('dev', os.path.join(exp_path, 'dev.iter' + str(i)), acc_type='sql')
logger.info('Evaluation: \tEpoch: %d\tTime: %.4f\tDev acc: %.4f' % (i, time.time() - start_time, dev_acc))
if dev_acc > best_result['dev_acc']:
best_result['dev_acc'], best_result['iter'] = dev_acc, i
torch.save({
'epoch': i, 'model': model.state_dict(),
'optim': optimizer.state_dict(),
'scheduler': scheduler.state_dict()
}, open(os.path.join(exp_path, 'model.bin'), 'wb'))
logger.info('NEW BEST MODEL: \tEpoch: %d\tDev acc: %.4f' % (i, dev_acc))
logger.info('FINAL BEST RESULT: \tEpoch: %d\tDev acc: %.4f' % (best_result['iter'], best_result['dev_acc']))
else:
start_time = time.time()
dev_acc = decode('dev', output_path=os.path.join(args.read_model_path, 'dev.eval'), acc_type='sql')
dev_acc_checker = decode('dev', output_path=os.path.join(args.read_model_path, 'dev.eval.checker'), acc_type='sql', use_checker=True)
logger.info("Evaluation costs %.2fs ; Dev dataset exact match/checker is %.4f/%.4f ." % (time.time() - start_time, dev_acc, dev_acc_checker))
class Batch():
def __init__(self, examples, device='cpu'):
super(Batch, self).__init__()
self.examples = examples
self.device = device
def from_example_list(cls, ex_list, device='cpu', train=True, method='text2sql', **kwargs):
method_dict = {
"text2sql": from_example_list_text2sql,
}
return method_dict[method](ex_list, device, train=train, **kwargs)
def __len__(self):
return len(self.examples)
def __getitem__(self, idx):
return self.examples[idx]
def max_question_len(self):
return torch.max(self.question_lens).item()
def max_table_len(self):
return torch.max(self.table_lens).item()
def max_column_len(self):
return torch.max(self.column_lens).item()
def max_table_word_len(self):
return torch.max(self.table_word_lens).item()
def max_column_word_len(self):
return torch.max(self.column_word_lens).item()
def max_question_subword_len(self):
return torch.max(self.question_subword_lens).item()
def max_table_subword_len(self):
return torch.max(self.table_subword_lens).item()
def max_column_subword_len(self):
return torch.max(self.column_subword_lens).item()
""" Different types of nodes are seperated instead of concatenated together """
def mask(self):
return torch.cat([self.question_mask, self.table_mask, self.column_mask], dim=1)
def question_mask(self):
return lens2mask(self.question_lens)
def table_mask(self):
return lens2mask(self.table_lens)
def column_mask(self):
return lens2mask(self.column_lens)
def table_word_mask(self):
return lens2mask(self.table_word_lens)
def column_word_mask(self):
return lens2mask(self.column_word_lens)
def question_subword_mask(self):
return lens2mask(self.question_subword_lens)
def table_subword_mask(self):
return lens2mask(self.table_subword_lens)
def column_subword_mask(self):
return lens2mask(self.column_subword_lens)
def get_frontier_field_idx(self, t):
ids = []
for e in self.examples:
if t < len(e.tgt_action):
ids.append(Example.grammar.field2id[e.tgt_action[t].frontier_field])
# assert self.grammar.id2field[ids[-1]] == e.tgt_action[t].frontier_field
else:
ids.append(0)
return torch.tensor(ids, dtype=torch.long, device=self.device)
def get_frontier_prod_idx(self, t):
ids = []
for e in self.examples:
if t < len(e.tgt_action):
ids.append(Example.grammar.prod2id[e.tgt_action[t].frontier_prod])
# assert self.grammar.id2prod[ids[-1]] == e.tgt_action[t].frontier_prod
else:
ids.append(0)
return torch.tensor(ids, dtype=torch.long, device=self.device)
def get_frontier_field_type_idx(self, t):
ids = []
for e in self.examples:
if t < len(e.tgt_action):
ids.append(Example.grammar.type2id[e.tgt_action[t].frontier_field.type])
# assert self.grammar.id2type[ids[-1]] == e.tgt_action[t].frontier_field.type
else:
ids.append(0)
return torch.tensor(ids, dtype=torch.long, device=self.device)
def decode(choice, output_path, acc_type='sql', use_checker=False):
assert acc_type in ['beam', 'ast', 'sql'] and choice in ['train', 'dev']
model.eval()
dataset = train_dataset if choice == 'train' else dev_dataset
all_hyps = []
with torch.no_grad():
for i in range(0, len(dataset), args.batch_size):
current_batch = Batch.from_example_list(dataset[i: i + args.batch_size], device, train=False)
hyps = model.parse(current_batch, args.beam_size)
all_hyps.extend(hyps)
acc = evaluator.acc(all_hyps, dataset, output_path, acc_type=acc_type, etype='match', use_checker=use_checker)
torch.cuda.empty_cache()
gc.collect()
return acc | null |
163,542 | import torch
import torch.nn as nn
import torch.nn.functional as F
def cumsoftmax(x, dim=-1):
return torch.cumsum(F.softmax(x, dim=dim), dim=dim) | null |
163,543 | import dgl, math, torch
def src_dot_dst(src_field, dst_field, out_field):
def func(edges):
return {out_field: (edges.src[src_field] * edges.dst[dst_field]).sum(-1, keepdim=True)}
return func | null |
163,544 | import dgl, math, torch
def src_sum_edge_mul_dst(src_field, dst_field, e_field, out_field):
def func(edges):
return {out_field: ((edges.src[src_field] + edges.data[e_field]) * edges.dst[dst_field]).sum(-1, keepdim=True)}
return func | null |
163,545 | import dgl, math, torch
def scaled_exp(field, scale_constant):
def func(edges):
# clamp for softmax numerical stability
return {field: torch.exp((edges.data[field] / scale_constant).clamp(-10, 10))}
return func | null |
163,546 | import dgl, math, torch
def src_sum_edge_mul_edge(src_field, e_field1, e_field2, out_field):
def func(edges):
return {out_field: (edges.src[src_field] + edges.data[e_field1]) * edges.data[e_field2]}
return func | null |
163,547 | import dgl, math, torch
def div_by_z(in_field, norm_field, out_field):
def func(nodes):
return {out_field: nodes.data[in_field] / nodes.data[norm_field]}
return func | null |
163,548 | import copy, math
import torch
import torch.nn as nn
import torch.nn.utils.rnn as rnn_utils
The provided code snippet includes necessary dependencies for implementing the `clones` function. Write a Python function `def clones(module, N)` to solve the following problem:
Produce N identical layers.
Here is the function:
def clones(module, N):
"Produce N identical layers."
return nn.ModuleList([copy.deepcopy(module) for _ in range(N)]) | Produce N identical layers. |
163,549 | import copy, math
import torch
import torch.nn as nn
import torch.nn.utils.rnn as rnn_utils
def lens2mask(lens):
bsize = lens.numel()
max_len = lens.max()
masks = torch.arange(0, max_len).type_as(lens).to(lens.device).repeat(bsize, 1).lt(lens.unsqueeze(1))
masks.requires_grad = False
return masks | null |
163,550 | import copy, math
import torch
import torch.nn as nn
import torch.nn.utils.rnn as rnn_utils
def mask2matrix(mask):
col_mask, row_mask = mask.unsqueeze(-1), mask.unsqueeze(-2)
return col_mask & row_mask | null |
163,551 | import copy, math
import torch
import torch.nn as nn
import torch.nn.utils.rnn as rnn_utils
The provided code snippet includes necessary dependencies for implementing the `tile` function. Write a Python function `def tile(x, count, dim=0)` to solve the following problem:
Tiles x on dimension dim count times. E.g. [1, 2, 3], count=2 ==> [1, 1, 2, 2, 3, 3] [[1, 2], [3, 4]], count=3, dim=1 ==> [[1, 1, 1, 2, 2, 2], [3, 3, 3, 4, 4, 4]] Different from torch.repeat
Here is the function:
def tile(x, count, dim=0):
"""
Tiles x on dimension dim count times.
E.g. [1, 2, 3], count=2 ==> [1, 1, 2, 2, 3, 3]
[[1, 2], [3, 4]], count=3, dim=1 ==> [[1, 1, 1, 2, 2, 2], [3, 3, 3, 4, 4, 4]]
Different from torch.repeat
"""
if x is None:
return x
elif type(x) in [list, tuple]:
return type(x)([tile(each, count, dim) for each in x])
else:
perm = list(range(len(x.size())))
if dim != 0:
perm[0], perm[dim] = perm[dim], perm[0]
x = x.permute(perm).contiguous()
out_size = list(x.size())
out_size[0] *= count
batch = x.size(0)
x = x.contiguous().view(batch, -1) \
.transpose(0, 1) \
.repeat(count, 1) \
.transpose(0, 1) \
.contiguous() \
.view(*out_size)
if dim != 0:
x = x.permute(perm).contiguous()
return x | Tiles x on dimension dim count times. E.g. [1, 2, 3], count=2 ==> [1, 1, 2, 2, 3, 3] [[1, 2], [3, 4]], count=3, dim=1 ==> [[1, 1, 1, 2, 2, 2], [3, 3, 3, 4, 4, 4]] Different from torch.repeat |
163,552 | import copy, math
import torch
import torch.nn as nn
import torch.nn.utils.rnn as rnn_utils
The provided code snippet includes necessary dependencies for implementing the `rnn_wrapper` function. Write a Python function `def rnn_wrapper(encoder, inputs, lens, cell='lstm')` to solve the following problem:
@args: encoder(nn.Module): rnn series bidirectional encoder, batch_first=True inputs(torch.FloatTensor): rnn inputs, [bsize x max_seq_len x in_dim] lens(torch.LongTensor): seq len for each sample, allow length=0, padding with 0-vector, [bsize] @return: out(torch.FloatTensor): output of encoder, bsize x max_seq_len x hidden_dim*2 hidden_states([tuple of ]torch.FloatTensor): final hidden states, num_layers*2 x bsize x hidden_dim
Here is the function:
def rnn_wrapper(encoder, inputs, lens, cell='lstm'):
"""
@args:
encoder(nn.Module): rnn series bidirectional encoder, batch_first=True
inputs(torch.FloatTensor): rnn inputs, [bsize x max_seq_len x in_dim]
lens(torch.LongTensor): seq len for each sample, allow length=0, padding with 0-vector, [bsize]
@return:
out(torch.FloatTensor): output of encoder, bsize x max_seq_len x hidden_dim*2
hidden_states([tuple of ]torch.FloatTensor): final hidden states, num_layers*2 x bsize x hidden_dim
"""
# rerank according to lens and remove empty inputs
sorted_lens, sort_key = torch.sort(lens, descending=True)
nonzero_num, total_num = torch.sum(sorted_lens > 0).item(), sorted_lens.size(0)
sort_key = sort_key[:nonzero_num]
sorted_inputs = torch.index_select(inputs, dim=0, index=sort_key)
# forward non empty inputs
packed_inputs = rnn_utils.pack_padded_sequence(sorted_inputs, sorted_lens[:nonzero_num].tolist(), batch_first=True)
packed_out, sorted_h = encoder(packed_inputs) # bsize x srclen x dim
sorted_out, _ = rnn_utils.pad_packed_sequence(packed_out, batch_first=True)
if cell.upper() == 'LSTM':
sorted_h, sorted_c = sorted_h
# rerank according to sort_key
out_shape = list(sorted_out.size())
out_shape[0] = total_num
out = sorted_out.new_zeros(*out_shape).scatter_(0, sort_key.unsqueeze(-1).unsqueeze(-1).repeat(1, *out_shape[1:]), sorted_out)
h_shape = list(sorted_h.size())
h_shape[1] = total_num
h = sorted_h.new_zeros(*h_shape).scatter_(1, sort_key.unsqueeze(0).unsqueeze(-1).repeat(h_shape[0], 1, h_shape[-1]), sorted_h)
if cell.upper() == 'LSTM':
c = sorted_c.new_zeros(*h_shape).scatter_(1, sort_key.unsqueeze(0).unsqueeze(-1).repeat(h_shape[0], 1, h_shape[-1]), sorted_c)
return out, (h.contiguous(), c.contiguous())
return out, h.contiguous() | @args: encoder(nn.Module): rnn series bidirectional encoder, batch_first=True inputs(torch.FloatTensor): rnn inputs, [bsize x max_seq_len x in_dim] lens(torch.LongTensor): seq len for each sample, allow length=0, padding with 0-vector, [bsize] @return: out(torch.FloatTensor): output of encoder, bsize x max_seq_len x hidden_dim*2 hidden_states([tuple of ]torch.FloatTensor): final hidden states, num_layers*2 x bsize x hidden_dim |
163,553 | import torch
import numpy as np
from utils.example import Example, get_position_ids
from utils.constants import PAD, UNK
from model.model_utils import lens2mask, cached_property
import torch.nn.functional as F
def from_example_list_base(ex_list, device='cpu', train=True):
"""
question_lens: torch.long, bsize
questions: torch.long, bsize x max_question_len, include [CLS] if add_cls
table_lens: torch.long, bsize, number of tables for each example
table_word_lens: torch.long, number of words for each table name
tables: torch.long, sum_of_tables x max_table_word_len
column_lens: torch.long, bsize, number of columns for each example
column_word_lens: torch.long, number of words for each column name
columns: torch.long, sum_of_columns x max_column_word_len
"""
batch = Batch(ex_list, device)
plm = Example.plm
pad_idx = Example.word_vocab[PAD] if plm is None else Example.tokenizer.pad_token_id
question_lens = [len(ex.question) for ex in ex_list]
batch.question_lens = torch.tensor(question_lens, dtype=torch.long, device=device)
batch.table_lens = torch.tensor([len(ex.table) for ex in ex_list], dtype=torch.long, device=device)
table_word_lens = [len(t) for ex in ex_list for t in ex.table]
batch.table_word_lens = torch.tensor(table_word_lens, dtype=torch.long, device=device)
batch.column_lens = torch.tensor([len(ex.column) for ex in ex_list], dtype=torch.long, device=device)
column_word_lens = [len(c) for ex in ex_list for c in ex.column]
batch.column_word_lens = torch.tensor(column_word_lens, dtype=torch.long, device=device)
if plm is None: # glove.42B.300d
questions = [ex.question_id + [pad_idx] * (batch.max_question_len - len(ex.question_id)) for ex in ex_list]
batch.questions = torch.tensor(questions, dtype=torch.long, device=device)
tables = [t + [pad_idx] * (batch.max_table_word_len - len(t)) for ex in ex_list for t in ex.table_id]
batch.tables = torch.tensor(tables, dtype=torch.long, device=device)
columns = [c + [pad_idx] * (batch.max_column_word_len - len(c)) for ex in ex_list for c in ex.column_id]
batch.columns = torch.tensor(columns, dtype=torch.long, device=device)
else:
# prepare inputs for pretrained models
batch.inputs = {"input_ids": None, "attention_mask": None, "token_type_ids": None, "position_ids": None}
input_lens = [len(ex.input_id) for ex in ex_list]
max_len = max(input_lens)
input_ids = [ex.input_id + [pad_idx] * (max_len - len(ex.input_id)) for ex in ex_list]
batch.inputs["input_ids"] = torch.tensor(input_ids, dtype=torch.long, device=device)
attention_mask = [[1] * l + [0] * (max_len - l) for l in input_lens]
batch.inputs["attention_mask"] = torch.tensor(attention_mask, dtype=torch.float, device=device)
token_type_ids = [ex.segment_id + [0] * (max_len - len(ex.segment_id)) for ex in ex_list]
batch.inputs["token_type_ids"] = torch.tensor(token_type_ids, dtype=torch.long, device=device)
position_ids = [get_position_ids(ex, shuffle=train) + [0] * (max_len - len(ex.input_id)) for ex in ex_list]
batch.inputs["position_ids"] = torch.tensor(position_ids, dtype=torch.long, device=device)
# extract representations after plm, remove [SEP]
question_mask_plm = [ex.question_mask_plm + [0] * (max_len - len(ex.question_mask_plm)) for ex in ex_list]
batch.question_mask_plm = torch.tensor(question_mask_plm, dtype=torch.bool, device=device)
table_mask_plm = [ex.table_mask_plm + [0] * (max_len - len(ex.table_mask_plm)) for ex in ex_list]
batch.table_mask_plm = torch.tensor(table_mask_plm, dtype=torch.bool, device=device)
column_mask_plm = [ex.column_mask_plm + [0] * (max_len - len(ex.column_mask_plm)) for ex in ex_list]
batch.column_mask_plm = torch.tensor(column_mask_plm, dtype=torch.bool, device=device)
# subword aggregation
question_subword_lens = [l for ex in ex_list for l in ex.question_subword_len]
batch.question_subword_lens = torch.tensor(question_subword_lens, dtype=torch.long, device=device)
table_subword_lens = [l for ex in ex_list for l in ex.table_subword_len]
batch.table_subword_lens = torch.tensor(table_subword_lens, dtype=torch.long, device=device)
column_subword_lens = [l for ex in ex_list for l in ex.column_subword_len]
batch.column_subword_lens = torch.tensor(column_subword_lens, dtype=torch.long, device=device)
batch.question_unk_mask, batch.table_unk_mask, batch.column_unk_mask = None, None, None
if not train and plm is None:
# during evaluation, for words not in vocab but in glove vocab, extract its correpsonding embedding
word2vec, unk_idx = Example.word2vec, Example.word_vocab[UNK]
question_unk_mask = (batch.questions == unk_idx).cpu()
if question_unk_mask.any().item():
raw_questions = np.array([ex.question + [PAD] * (batch.max_question_len - len(ex.question)) for ex in ex_list], dtype='<U100')
unk_words = raw_questions[question_unk_mask.numpy()].tolist()
unk_word_embeddings = [word2vec.emb(w) for w in unk_words]
oov_flag = torch.tensor([True if e is not None else False for e in unk_word_embeddings], dtype=torch.bool)
if oov_flag.any().item():
batch.question_unk_mask = question_unk_mask.masked_scatter_(torch.clone(question_unk_mask), oov_flag).to(device)
batch.question_unk_embeddings = torch.tensor([e for e in unk_word_embeddings if e is not None], dtype=torch.float, device=device)
table_unk_mask = (batch.tables == unk_idx).cpu()
if table_unk_mask.any().item():
raw_tables = np.array([t + [PAD] * (batch.max_table_word_len - len(t)) for ex in ex_list for t in ex.table], dtype='<U100')
unk_words = raw_tables[table_unk_mask.numpy()].tolist()
unk_word_embeddings = [word2vec.emb(w) for w in unk_words]
oov_flag = torch.tensor([True if e is not None else False for e in unk_word_embeddings], dtype=torch.bool)
if oov_flag.any().item():
batch.table_unk_mask = table_unk_mask.masked_scatter_(torch.clone(table_unk_mask), oov_flag).to(device)
batch.table_unk_embeddings = torch.tensor([e for e in unk_word_embeddings if e is not None], dtype=torch.float, device=device)
column_unk_mask = (batch.columns == unk_idx).cpu()
if column_unk_mask.any().item():
raw_columns = np.array([c + [PAD] * (batch.max_column_word_len - len(c)) for ex in ex_list for c in ex.column], dtype='<U100')
unk_words = raw_columns[column_unk_mask.numpy()].tolist()
unk_word_embeddings = [word2vec.emb(w) for w in unk_words]
oov_flag = torch.tensor([True if e is not None else False for e in unk_word_embeddings], dtype=torch.bool)
if oov_flag.any().item():
batch.column_unk_mask = column_unk_mask.masked_scatter_(torch.clone(column_unk_mask), oov_flag).to(device)
batch.column_unk_embeddings = torch.tensor([e for e in unk_word_embeddings if e is not None], dtype=torch.float, device=device)
return batch
class Example():
def configuration(cls, plm=None, method='lgesql', table_path='data/tables.json', tables='data/tables.bin', db_dir='data/database'):
cls.plm, cls.method = plm, method
cls.grammar = ASDLGrammar.from_filepath(GRAMMAR_FILEPATH)
cls.trans = TransitionSystem.get_class_by_lang('sql')(cls.grammar)
cls.tables = pickle.load(open(tables, 'rb')) if type(tables) == str else tables
cls.evaluator = Evaluator(cls.trans, table_path, db_dir)
if plm is None:
cls.word2vec = Word2vecUtils()
cls.tokenizer = lambda x: x
cls.word_vocab = Vocab(padding=True, unk=True, boundary=True, default=UNK,
filepath='./pretrained_models/glove.42b.300d/vocab.txt', specials=SCHEMA_TYPES) # word vocab for glove.42B.300d
else:
cls.tokenizer = AutoTokenizer.from_pretrained(os.path.join('./pretrained_models', plm))
cls.word_vocab = cls.tokenizer.get_vocab()
cls.relation_vocab = Vocab(padding=False, unk=False, boundary=False, iterable=RELATIONS, default=None)
cls.graph_factory = GraphFactory(cls.method, cls.relation_vocab)
def load_dataset(cls, choice, debug=False):
assert choice in ['train', 'dev']
fp = os.path.join('data', choice + '.' + cls.method + '.bin')
datasets = pickle.load(open(fp, 'rb'))
# question_lens = [len(ex['processed_question_toks']) for ex in datasets]
# print('Max/Min/Avg question length in %s dataset is: %d/%d/%.2f' % (choice, max(question_lens), min(question_lens), float(sum(question_lens))/len(question_lens)))
# action_lens = [len(ex['actions']) for ex in datasets]
# print('Max/Min/Avg action length in %s dataset is: %d/%d/%.2f' % (choice, max(action_lens), min(action_lens), float(sum(action_lens))/len(action_lens)))
examples, outliers = [], 0
for ex in datasets:
if choice == 'train' and len(cls.tables[ex['db_id']]['column_names']) > 100:
outliers += 1
continue
examples.append(cls(ex, cls.tables[ex['db_id']]))
if debug and len(examples) >= 100:
return examples
if choice == 'train':
print("Skip %d extremely large samples in training dataset ..." % (outliers))
return examples
def __init__(self, ex: dict, db: dict):
super(Example, self).__init__()
self.ex = ex
self.db = db
""" Mapping word to corresponding index """
if Example.plm is None:
self.question = ex['processed_question_toks']
self.question_id = [Example.word_vocab[w] for w in self.question]
self.column = [[db['column_types'][idx].lower()] + c for idx, c in enumerate(db['processed_column_toks'])]
self.column_id = [[Example.word_vocab[w] for w in c] for c in self.column]
self.table = [['table'] + t for t in db['processed_table_toks']]
self.table_id = [[Example.word_vocab[w] for w in t] for t in self.table]
else:
t = Example.tokenizer
self.question = [q.lower() for q in ex['raw_question_toks']]
self.question_id = [t.cls_token_id] # map token to id
self.question_mask_plm = [] # remove SEP token in our case
self.question_subword_len = [] # subword len for each word, exclude SEP token
for w in self.question:
toks = t.convert_tokens_to_ids(t.tokenize(w))
self.question_id.extend(toks)
self.question_subword_len.append(len(toks))
self.question_mask_plm = [0] + [1] * (len(self.question_id) - 1) + [0]
self.question_id.append(t.sep_token_id)
self.table = [['table'] + t.lower().split() for t in db['table_names']]
self.table_id, self.table_mask_plm, self.table_subword_len = [], [], []
self.table_word_len = []
for s in self.table:
l = 0
for w in s:
toks = t.convert_tokens_to_ids(t.tokenize(w))
self.table_id.extend(toks)
self.table_subword_len.append(len(toks))
l += len(toks)
self.table_word_len.append(l)
self.table_mask_plm = [1] * len(self.table_id)
self.column = [[db['column_types'][idx].lower()] + c.lower().split() for idx, (_, c) in enumerate(db['column_names'])]
self.column_id, self.column_mask_plm, self.column_subword_len = [], [], []
self.column_word_len = []
for s in self.column:
l = 0
for w in s:
toks = t.convert_tokens_to_ids(t.tokenize(w))
self.column_id.extend(toks)
self.column_subword_len.append(len(toks))
l += len(toks)
self.column_word_len.append(l)
self.column_mask_plm = [1] * len(self.column_id) + [0]
self.column_id.append(t.sep_token_id)
self.input_id = self.question_id + self.table_id + self.column_id
self.segment_id = [0] * len(self.question_id) + [1] * (len(self.table_id) + len(self.column_id)) \
if Example.plm != 'grappa_large_jnt' and not Example.plm.startswith('roberta') \
else [0] * (len(self.question_id) + len(self.table_id) + len(self.column_id))
self.question_mask_plm = self.question_mask_plm + [0] * (len(self.table_id) + len(self.column_id))
self.table_mask_plm = [0] * len(self.question_id) + self.table_mask_plm + [0] * len(self.column_id)
self.column_mask_plm = [0] * (len(self.question_id) + len(self.table_id)) + self.column_mask_plm
self.graph = Example.graph_factory.graph_construction(ex, db)
# outputs
self.query = ' '.join(ex['query'].split('\t'))
self.ast = ex['ast']
self.tgt_action = ex['actions']
self.used_tables, self.used_columns = ex['used_tables'], ex['used_columns']
The provided code snippet includes necessary dependencies for implementing the `from_example_list_text2sql` function. Write a Python function `def from_example_list_text2sql(ex_list, device='cpu', train=True, **kwargs)` to solve the following problem:
New fields: batch.lens, batch.max_len, batch.relations, batch.relations_mask
Here is the function:
def from_example_list_text2sql(ex_list, device='cpu', train=True, **kwargs):
""" New fields: batch.lens, batch.max_len, batch.relations, batch.relations_mask
"""
batch = from_example_list_base(ex_list, device, train)
batch.graph = Example.graph_factory.batch_graphs(ex_list, device, train=train, **kwargs)
if train:
batch.max_action_num = max([len(ex.tgt_action) for ex in ex_list])
return batch | New fields: batch.lens, batch.max_len, batch.relations, batch.relations_mask |
163,554 | import logging
import re, math
import torch
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from torch.nn.utils import clip_grad_norm_
from collections import defaultdict
schedule_dict = {
"constant": get_constant_schedule,
"linear": get_linear_schedule_with_warmup,
"ratsql": get_ratsql_schedule_with_warmup,
"cosine": get_cosine_schedule_with_warmup,
}
class AdamW(Optimizer):
""" Implements Adam algorithm with weight decay fix.
Parameters:
lr (float): learning rate. Default 1e-3.
betas (tuple of 2 floats): Adams beta parameters (b1, b2). Default: (0.9, 0.999)
eps (float): Adams epsilon. Default: 1e-6
weight_decay (float): Weight decay. Default: 0.0
correct_bias (bool): can be set to False to avoid correcting bias in Adam (e.g. like in Bert TF repository). Default True.
"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-6, weight_decay=0.0, max_grad_norm=-1, correct_bias=True):
if lr < 0.0:
raise ValueError("Invalid learning rate: {} - should be >= 0.0".format(lr))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter: {} - should be in [0.0, 1.0[".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter: {} - should be in [0.0, 1.0[".format(betas[1]))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {} - should be >= 0.0".format(eps))
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, max_grad_norm=max_grad_norm, correct_bias=correct_bias)
super().__init__(params, defaults)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group["params"]:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError("Adam does not support sparse gradients, please consider SparseAdam instead")
state = self.state[p]
# State initialization
if len(state) == 0:
state["step"] = 0
# Exponential moving average of gradient values
state["exp_avg"] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state["exp_avg_sq"] = torch.zeros_like(p.data)
exp_avg, exp_avg_sq = state["exp_avg"], state["exp_avg_sq"]
beta1, beta2 = group["betas"]
state["step"] += 1
# Add grad clipping
if group['max_grad_norm'] > 0:
clip_grad_norm_(p, group['max_grad_norm'])
# Decay the first and second moment running average coefficient
# In-place operations to update the averages at the same time
exp_avg.mul_(beta1).add_(grad, alpha=1.0 - beta1)
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1.0 - beta2)
denom = exp_avg_sq.sqrt().add_(group["eps"])
step_size = group["lr"]
if group["correct_bias"]: # No bias correction for Bert
bias_correction1 = 1.0 - beta1 ** state["step"]
bias_correction2 = 1.0 - beta2 ** state["step"]
step_size = step_size * math.sqrt(bias_correction2) / bias_correction1
p.data.addcdiv_(exp_avg, denom, value=-step_size)
# Just adding the square of the weights to the loss function is *not*
# the correct way of using L2 regularization/weight decay with Adam,
# since that will interact with the m and v parameters in strange ways.
#
# Instead we want to decay the weights in a manner that doesn't interact
# with the m/v parameters. This is equivalent to adding the square
# of the weights to the loss with plain (non-momentum) SGD.
# Add weight decay at the end (fixed version)
if group["weight_decay"] > 0.0:
p.data.add_(p.data, alpha=-group["lr"] * group["weight_decay"])
return loss
def set_optimizer(model, args, num_warmup_steps, num_training_steps, last_epoch=-1):
plm = hasattr(model.encoder.input_layer, 'plm_model')
if plm and args.layerwise_decay <= 0.: # fix plm params
for n, p in model.named_parameters():
if 'plm_model' in n:
p.requires_grad = False
params = [(n, p) for n, p in model.named_parameters() if p.requires_grad]
no_decay = ['bias', 'LayerNorm.weight']
if plm and 0. < args.layerwise_decay <= 0.5: # seperate lr for plm
grouped_params = [
{'params': list(set([p for n, p in params if 'plm_model' in n and not any(nd in n for nd in no_decay)])), 'lr': args.layerwise_decay * args.lr, 'weight_decay': args.l2},
{'params': list(set([p for n, p in params if 'plm_model' in n and any(nd in n for nd in no_decay)])), 'lr': args.layerwise_decay * args.lr, 'weight_decay': 0.0},
{'params': list(set([p for n, p in params if 'plm_model' not in n and not any(nd in n for nd in no_decay)])), 'weight_decay': args.l2},
{'params': list(set([p for n, p in params if 'plm_model' not in n and any(nd in n for nd in no_decay)])), 'weight_decay': 0.0},
]
print('Use seperate lr %f for pretrained model ...' % (args.lr * args.layerwise_decay))
elif plm and 0.5 < args.layerwise_decay < 1.: # lr decay layerwise for plm
pattern = r'encoder\.layer\.(.*?)\.'
num_layers = int(model.encoder.input_layer.plm_model.config.num_hidden_layers)
groups = {"decay": defaultdict(list), "no_decay": defaultdict(list)} # record grouped params
for n, p in params:
res = re.search(pattern, n) if 'plm_model' in n else None
depth = int(res.group(1)) if res is not None else 0 if 'plm_model' in n else num_layers
if any(nd in n for nd in no_decay):
groups["no_decay"][int(depth)].append(p)
else:
groups["decay"][int(depth)].append(p)
grouped_params = []
for d in groups["decay"]:
lr = args.lr * (args.layerwise_decay ** (num_layers - d))
grouped_params.append({'params': list(set(groups["decay"][d])), 'lr': lr, 'weight_decay': args.l2})
for d in groups["no_decay"]:
lr = args.lr * (args.layerwise_decay ** (num_layers - d))
grouped_params.append({'params': list(set(groups["no_decay"][d])), 'lr': lr, 'weight_decay': 0.0})
print('Use layerwise decay (rate %f) lr %f for pretrained model ...' % (args.layerwise_decay, args.lr))
else: # the same lr for plm and other modules
grouped_params = [
{'params': list(set([p for n, p in params if not any(nd in n for nd in no_decay)])), 'weight_decay': args.l2},
{'params': list(set([p for n, p in params if any(nd in n for nd in no_decay)])), 'weight_decay': 0.0},
]
print('Use the same lr %f for all parameters ...' % (args.lr))
optimizer = AdamW(grouped_params, lr=args.lr, max_grad_norm=args.max_norm)
schedule_func = schedule_dict[args.lr_schedule]
scheduler = schedule_func(optimizer, num_warmup_steps, num_training_steps, last_epoch=last_epoch)
return optimizer, scheduler | null |
163,555 | import logging
import re, math
import torch
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from torch.nn.utils import clip_grad_norm_
from collections import defaultdict
The provided code snippet includes necessary dependencies for implementing the `get_ratsql_schedule_with_warmup` function. Write a Python function `def get_ratsql_schedule_with_warmup(optimizer, num_warmup_steps, num_training_steps, last_epoch=-1)` to solve the following problem:
Create a schedule with a learning rate that decreases according to the formular in RATSQL model
Here is the function:
def get_ratsql_schedule_with_warmup(optimizer, num_warmup_steps, num_training_steps, last_epoch=-1):
""" Create a schedule with a learning rate that decreases according to the formular
in RATSQL model
"""
def lr_lambda(current_step):
if current_step < num_warmup_steps:
return float(current_step) / float(max(1.0, num_warmup_steps))
return max(0.0, math.sqrt((num_training_steps - current_step) / float(num_training_steps - num_warmup_steps)))
return LambdaLR(optimizer, lr_lambda, last_epoch) | Create a schedule with a learning rate that decreases according to the formular in RATSQL model |
163,556 | import logging
import re, math
import torch
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from torch.nn.utils import clip_grad_norm_
from collections import defaultdict
The provided code snippet includes necessary dependencies for implementing the `get_constant_schedule` function. Write a Python function `def get_constant_schedule(optimizer, *args, last_epoch=-1)` to solve the following problem:
Create a schedule with a constant learning rate.
Here is the function:
def get_constant_schedule(optimizer, *args, last_epoch=-1):
""" Create a schedule with a constant learning rate.
"""
return LambdaLR(optimizer, lambda _: 1, last_epoch=last_epoch) | Create a schedule with a constant learning rate. |
163,557 | import logging
import re, math
import torch
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from torch.nn.utils import clip_grad_norm_
from collections import defaultdict
The provided code snippet includes necessary dependencies for implementing the `get_constant_schedule_with_warmup` function. Write a Python function `def get_constant_schedule_with_warmup(optimizer, num_warmup_steps, last_epoch=-1)` to solve the following problem:
Create a schedule with a constant learning rate preceded by a warmup period during which the learning rate increases linearly between 0 and 1.
Here is the function:
def get_constant_schedule_with_warmup(optimizer, num_warmup_steps, last_epoch=-1):
""" Create a schedule with a constant learning rate preceded by a warmup
period during which the learning rate increases linearly between 0 and 1.
"""
def lr_lambda(current_step):
if current_step < num_warmup_steps:
return float(current_step) / float(max(1.0, num_warmup_steps))
return 1.0
return LambdaLR(optimizer, lr_lambda, last_epoch=last_epoch) | Create a schedule with a constant learning rate preceded by a warmup period during which the learning rate increases linearly between 0 and 1. |
163,558 | import logging
import re, math
import torch
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from torch.nn.utils import clip_grad_norm_
from collections import defaultdict
The provided code snippet includes necessary dependencies for implementing the `get_linear_schedule_with_warmup` function. Write a Python function `def get_linear_schedule_with_warmup(optimizer, num_warmup_steps, num_training_steps, last_epoch=-1)` to solve the following problem:
Create a schedule with a learning rate that decreases linearly after linearly increasing during a warmup period.
Here is the function:
def get_linear_schedule_with_warmup(optimizer, num_warmup_steps, num_training_steps, last_epoch=-1):
""" Create a schedule with a learning rate that decreases linearly after
linearly increasing during a warmup period.
"""
def lr_lambda(current_step):
if current_step < num_warmup_steps:
return float(current_step) / float(max(1, num_warmup_steps))
return max(
0.0, float(num_training_steps - current_step) / float(max(1, num_training_steps - num_warmup_steps))
)
return LambdaLR(optimizer, lr_lambda, last_epoch) | Create a schedule with a learning rate that decreases linearly after linearly increasing during a warmup period. |
163,559 | import logging
import re, math
import torch
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from torch.nn.utils import clip_grad_norm_
from collections import defaultdict
The provided code snippet includes necessary dependencies for implementing the `get_cosine_schedule_with_warmup` function. Write a Python function `def get_cosine_schedule_with_warmup(optimizer, num_warmup_steps, num_training_steps, num_cycles=0.5, last_epoch=-1)` to solve the following problem:
Create a schedule with a learning rate that decreases following the values of the cosine function between 0 and `pi * cycles` after a warmup period during which it increases linearly between 0 and 1.
Here is the function:
def get_cosine_schedule_with_warmup(optimizer, num_warmup_steps, num_training_steps, num_cycles=0.5, last_epoch=-1):
""" Create a schedule with a learning rate that decreases following the
values of the cosine function between 0 and `pi * cycles` after a warmup
period during which it increases linearly between 0 and 1.
"""
def lr_lambda(current_step):
if current_step < num_warmup_steps:
return float(current_step) / float(max(1, num_warmup_steps))
progress = float(current_step - num_warmup_steps) / float(max(1, num_training_steps - num_warmup_steps))
return max(0.0, 0.5 * (1.0 + math.cos(math.pi * float(num_cycles) * 2.0 * progress)))
return LambdaLR(optimizer, lr_lambda, last_epoch) | Create a schedule with a learning rate that decreases following the values of the cosine function between 0 and `pi * cycles` after a warmup period during which it increases linearly between 0 and 1. |
163,560 | import logging
import re, math
import torch
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from torch.nn.utils import clip_grad_norm_
from collections import defaultdict
The provided code snippet includes necessary dependencies for implementing the `get_cosine_with_hard_restarts_schedule_with_warmup` function. Write a Python function `def get_cosine_with_hard_restarts_schedule_with_warmup( optimizer, num_warmup_steps, num_training_steps, num_cycles=1.0, last_epoch=-1 )` to solve the following problem:
Create a schedule with a learning rate that decreases following the values of the cosine function with several hard restarts, after a warmup period during which it increases linearly between 0 and 1.
Here is the function:
def get_cosine_with_hard_restarts_schedule_with_warmup(
optimizer, num_warmup_steps, num_training_steps, num_cycles=1.0, last_epoch=-1
):
""" Create a schedule with a learning rate that decreases following the
values of the cosine function with several hard restarts, after a warmup
period during which it increases linearly between 0 and 1.
"""
def lr_lambda(current_step):
if current_step < num_warmup_steps:
return float(current_step) / float(max(1, num_warmup_steps))
progress = float(current_step - num_warmup_steps) / float(max(1, num_training_steps - num_warmup_steps))
if progress >= 1.0:
return 0.0
return max(0.0, 0.5 * (1.0 + math.cos(math.pi * ((float(num_cycles) * progress) % 1.0))))
return LambdaLR(optimizer, lr_lambda, last_epoch) | Create a schedule with a learning rate that decreases following the values of the cosine function with several hard restarts, after a warmup period during which it increases linearly between 0 and 1. |
163,561 | import sys, os
def hyperparam_path_text2sql(args):
task = 'task_%s__model_%s_view_%s' % (args.task, args.model, args.local_and_nonlocal)
task += '' if 'without' in args.output_model else '_gp_%s' % (args.smoothing)
# encoder params
exp_path = 'emb_%s' % (args.embed_size) if args.plm is None else 'plm_%s' % (args.plm)
exp_path += '__gnn_%s_x_%s' % (args.gnn_hidden_size, args.gnn_num_layers)
exp_path += '__share' if args.relation_share_layers else ''
exp_path += '__head_%s' % (args.num_heads)
exp_path += '__share' if args.relation_share_heads else ''
exp_path += '__dp_%s' % (args.dropout)
exp_path += '__dpa_%s' % (args.attn_drop)
exp_path += '__dpc_%s' % (args.drop_connect)
# decoder params
# exp_path += '__cell_%s_%s_x_%s' % (args.lstm, args.lstm_hidden_size, args.lstm_num_layers)
# exp_path += '_chunk_%s' % (args.chunk_size) if args.lstm == 'onlstm' else ''
# exp_path += '_no' if args.no_parent_state else ''
# exp_path += '__attvec_%s' % (args.att_vec_size)
# exp_path += '__sepcxt' if args.sep_cxt else '__jointcxt'
# exp_path += '_no' if args.no_context_feeding else ''
# exp_path += '__ae_%s' % (args.action_embed_size)
# exp_path += '_no' if args.no_parent_production_embed else ''
# exp_path += '__fe_%s' % ('no' if args.no_parent_field_embed else args.field_embed_size)
# exp_path += '__te_%s' % ('no' if args.no_parent_field_type_embed else args.type_embed_size)
# training params
exp_path += '__bs_%s' % (args.batch_size)
exp_path += '__lr_%s' % (args.lr) if args.plm is None else '__lr_%s_ld_%s' % (args.lr, args.layerwise_decay)
exp_path += '__l2_%s' % (args.l2)
exp_path += '__wp_%s' % (args.warmup_ratio)
exp_path += '__sd_%s' % (args.lr_schedule)
exp_path += '__me_%s' % (args.max_epoch)
exp_path += '__mn_%s' % (args.max_norm)
exp_path += '__bm_%s' % (args.beam_size)
exp_path += '__seed_%s' % (args.seed)
exp_path = os.path.join(EXP_PATH, task, exp_path)
return exp_path
def hyperparam_path(args):
if args.read_model_path and args.testing:
return args.read_model_path
exp_path = hyperparam_path_text2sql(args)
if not os.path.exists(exp_path):
os.makedirs(exp_path)
return exp_path | null |
163,562 | import argparse
import sys
def add_argument_base(arg_parser):
def add_argument_encoder(arg_parser):
def add_argument_decoder(arg_parser):
def init_args(params=sys.argv[1:]):
arg_parser = argparse.ArgumentParser()
arg_parser = add_argument_base(arg_parser)
arg_parser = add_argument_encoder(arg_parser)
arg_parser = add_argument_decoder(arg_parser)
opt = arg_parser.parse_args(params)
if opt.model == 'rgatsql' and opt.local_and_nonlocal == 'msde':
opt.local_and_nonlocal = 'global'
if opt.model == 'lgesql' and opt.local_and_nonlocal == 'global':
opt.local_and_nonlocal = 'msde'
return opt | null |
163,563 | import sys, os, logging
import random, torch, dgl
import numpy as np
def set_logger(exp_path, testing=False):
logFormatter = logging.Formatter('%(asctime)s - %(message)s') #('%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger('mylogger')
logger.setLevel(logging.DEBUG)
if testing:
fileHandler = logging.FileHandler('%s/log_test.txt' % (exp_path), mode='w')
else:
fileHandler = logging.FileHandler('%s/log_train.txt' % (exp_path), mode='w')
fileHandler.setFormatter(logFormatter)
logger.addHandler(fileHandler)
consoleHandler = logging.StreamHandler(sys.stdout)
consoleHandler.setFormatter(logFormatter)
logger.addHandler(consoleHandler)
return logger | null |
163,564 | import sys, os, logging
import random, torch, dgl
import numpy as np
import random
random.seed(33)
def set_random_seed(random_seed=999):
random.seed(random_seed)
torch.manual_seed(random_seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(random_seed)
np.random.seed(random_seed)
dgl.random.seed(random_seed) | null |
163,565 | import sys, os, logging
import random, torch, dgl
import numpy as np
def set_torch_device(deviceId):
if deviceId < 0:
device = torch.device("cpu")
else:
assert torch.cuda.device_count() >= deviceId + 1
device = torch.device("cuda:%d" % (deviceId))
# os.environ['CUDA_LAUNCH_BLOCKING'] = "1" # used when debug
## These two sentences are used to ensure reproducibility with cudnnbacken
# torch.backends.cudnn.deterministic = True
# torch.backends.cudnn.benchmark = False
torch.backends.cudnn.enabled = False
return device | null |
163,566 | import logging
import os
import time
import torch
import datasets
import transformers
from transformers import (
HfArgumentParser,
set_seed,
EarlyStoppingCallback,
)
from transformers.trainer_utils import get_last_checkpoint
from collections import OrderedDict
import utils.tool
from utils.configue import Configure
from utils.dataset import TokenizedDataset
from utils.trainer import EvaluateFriendlySeq2SeqTrainer
from utils.training_arguments import WrappedSeq2SeqTrainingArguments
import pickle
import pdb
def compute_params(model):
total_num = sum(p.numel() for p in model.parameters())
trainable_num = sum(p.numel() for p in model.parameters() if p.requires_grad)
print("Total_num: {}, trainable_num:{}".format(total_num, trainable_num))
return | null |
163,567 | from typing import Dict, Any
from third_party.spider import evaluation as spider_evaluation
def compute_exact_match_metric(predictions, references) -> Dict[str, Any]:
foreign_key_maps = dict()
for reference in references:
if reference["db_id"] not in foreign_key_maps:
foreign_key_maps[reference["db_id"]] = spider_evaluation.build_foreign_key_map(
{
"table_names_original": reference["db_table_names"],
"column_names_original": list(
zip(
reference["db_column_names"]["table_id"],
reference["db_column_names"]["column_name"],
)
),
"foreign_keys": list(
zip(
reference["db_foreign_keys"]["column_id"],
reference["db_foreign_keys"]["other_column_id"],
)
),
}
)
evaluator = spider_evaluation.Evaluator(references[0]["db_path"], foreign_key_maps, "match")
for prediction, reference in zip(predictions, references):
turn_idx = reference.get("turn_idx", 0)
# skip final utterance-query pairs
if turn_idx < 0:
continue
_ = evaluator.evaluate_one(reference["db_id"], reference["query"], prediction)
evaluator.finalize()
return {
"exact_match": evaluator.scores["all"]["exact"],
} | null |
163,568 | import os
import pdb
import sys
import json
import numpy as np
import argparse
import sqlite3
import warnings
import multiprocessing as mp
from collections import OrderedDict
from func_timeout import func_timeout, FunctionTimedOut
def result_callback(result):
exec_result.append(result)
def execute_model(sql, db_place, idx):
try:
result = func_timeout(30.0, execute_sql, args=(sql, db_place))
except KeyboardInterrupt:
sys.exit(0)
except FunctionTimedOut:
result = [(f'timeout',)]
except Exception as e:
print('except:{}'.format(e))
result = [(f'error',)] # possibly len(query) > 512 or not executable
# print(result)
# result = str(set([ret[0] for ret in result]))
result = {'sql_idx': idx, 'results': result}
return result
def run_sql_parallel(sql, db_place, num_cpus=1):
pool = mp.Pool(processes=num_cpus)
pool.apply_async(execute_model, args=(sql, db_place), callback=result_callback)
pool.close()
pool.join() | null |
163,569 | import os
import pdb
import sys
import json
import numpy as np
import argparse
import sqlite3
import warnings
import multiprocessing as mp
from collections import OrderedDict
from func_timeout import func_timeout, FunctionTimedOut
def result_callback(result):
exec_result.append(result)
def execute_model(sql, db_place, idx):
try:
result = func_timeout(30.0, execute_sql, args=(sql, db_place))
except KeyboardInterrupt:
sys.exit(0)
except FunctionTimedOut:
result = [(f'timeout',)]
except Exception as e:
print('except:{}'.format(e))
result = [(f'error',)] # possibly len(query) > 512 or not executable
# print(result)
# result = str(set([ret[0] for ret in result]))
result = {'sql_idx': idx, 'results': result}
return result
def run_sqls_parallel(sqls, db_place, num_cpus=1):
pool = mp.Pool(processes=num_cpus)
for i, sql in enumerate(sqls):
# if i == 10:
# break
print('*************** processing {}th sql ***************'.format(i))
print(sql)
pool.apply_async(execute_model, args=(sql, db_place, i), callback=result_callback)
pool.close()
pool.join() | null |
163,570 | import os
import pdb
import sys
import json
import numpy as np
import argparse
import sqlite3
import warnings
import multiprocessing as mp
from collections import OrderedDict
from func_timeout import func_timeout, FunctionTimedOut
def package_sqls(sql_path, db_name, mode='codex'):
clean_sqls = []
if mode == 'codex':
sql_data = json.load(open(sql_path + db_name + '_sql.json', 'r'))
for idx, sql_str in sql_data.items():
clean_sqls.append(sql_str)
elif mode == 'gt':
sqls = open(sql_path + db_name + '.sql')
sql_txt = sqls.readlines()
sql_txt = [sql.split('\t')[0] for sql in sql_txt]
for idx, sql_str in enumerate(sql_txt):
clean_sqls.append(sql_str)
return clean_sqls | null |
163,571 | import os
import pdb
import sys
import json
import numpy as np
import argparse
import sqlite3
import warnings
import multiprocessing as mp
from collections import OrderedDict
from func_timeout import func_timeout, FunctionTimedOut
def export_sqls(sql_path, db_name):
cleaned_sqls = []
sql_data = json.load(open(sql_path + db_name + '.json', 'r'))
for idx, sql_item in enumerate(sql_data):
cleaned_sqls.append(sql_item['query'])
return cleaned_sqls | null |
163,572 | import os
import pdb
import sys
import json
import numpy as np
import argparse
import sqlite3
import warnings
import multiprocessing as mp
from collections import OrderedDict
from func_timeout import func_timeout, FunctionTimedOut
def sort_results(list_of_dicts):
return sorted(list_of_dicts, key=lambda x: x['sql_idx']) | null |
163,573 | import os
import pdb
import sys
import json
import numpy as np
import argparse
import sqlite3
import warnings
import multiprocessing as mp
from collections import OrderedDict
from func_timeout import func_timeout, FunctionTimedOut
def compute_execution_accuracy(gt_results, predict_results):
num_correct = 0
num_queries = len(gt_results)
mismatch_idx = []
for i, result in enumerate(gt_results):
if set(result['results']) == set(predict_results[i]['results']):
num_correct += 1
else:
mismatch_idx.append(i)
acc = (num_correct / num_queries) * 100
return acc | null |
163,575 | import os
import torch
import random
import re
from copy import deepcopy
from typing import List, Dict
from datasets.dataset_dict import DatasetDict
from torch.utils.data import Dataset
from torch.utils.data.dataset import T_co
from third_party.miscs.bridge_content_encoder import get_database_matches
from tqdm import tqdm
def serialize_schema_natural_language(
question: str,
db_path: str,
db_id: str,
db_column_names: Dict[str, str],
db_table_names: List[str],
db_primary_keys,
db_foreign_keys,
schema_serialization_with_db_content: bool = False,
normalize_query: bool = True,
) -> str:
overall_description = f'{db_id} contains tables such as ' \
f'{", ".join([table_name.lower() if normalize_query else table_name for table_name in db_table_names])}.'
table_description_primary_key_template = lambda table_name, primary_key: \
f'{primary_key} is the primary key.'
table_description = lambda table_name, column_names: \
f'Table {table_name} has columns such as {", ".join(column_names)}.'
value_description = lambda column_value_pairs: \
f'{"".join(["The {} contains values such as {}.".format(column, value) for column, value in column_value_pairs])}'
foreign_key_description = lambda table_1, column_1, table_2, column_2: \
f'The {column_1} of {table_1} is the foreign key of {column_2} of {table_2}.'
db_primary_keys = db_primary_keys["column_id"]
db_foreign_keys = list(zip(db_foreign_keys["column_id"], db_foreign_keys["other_column_id"]))
descriptions = [overall_description]
db_table_name_strs = []
db_column_name_strs = []
value_sep = ", "
for table_id, table_name in enumerate(db_table_names):
table_name_str = table_name.lower() if normalize_query else table_name
db_table_name_strs.append(table_name_str)
columns = []
column_value_pairs = []
primary_keys = []
for column_id, (x, y) in enumerate(zip(db_column_names["table_id"], db_column_names["column_name"])):
if column_id == 0:
continue
column_str = y.lower() if normalize_query else y
db_column_name_strs.append(column_str)
if x == table_id:
columns.append(column_str)
if column_id in db_primary_keys:
primary_keys.append(column_str)
if schema_serialization_with_db_content:
matches = get_database_matches(
question=question,
table_name=table_name,
column_name=y,
db_path=(db_path + "/" + db_id + "/" + db_id + ".sqlite"),
)
if matches:
column_value_pairs.append((column_str, value_sep.join(matches)))
table_description_columns_str = table_description(table_name_str, columns)
descriptions.append(table_description_columns_str)
table_description_primary_key_str = table_description_primary_key_template(table_name_str, ", ".join(primary_keys))
descriptions.append(table_description_primary_key_str)
if len(column_value_pairs) > 0:
value_description_str = value_description(column_value_pairs)
descriptions.append(value_description_str)
for x, y in db_foreign_keys:
# get the table and column of x
x_table_name = db_table_name_strs[db_column_names["table_id"][x]]
x_column_name = db_column_name_strs[x]
# get the table and column of y
y_table_name = db_table_name_strs[db_column_names["table_id"][y]]
y_column_name = db_column_name_strs[y]
foreign_key_description_str = foreign_key_description(x_table_name, x_column_name, y_table_name, y_column_name)
descriptions.append(foreign_key_description_str)
return " ".join(descriptions)
def serialize_schema(
question: str,
db_path: str,
db_id: str,
db_column_names: Dict[str, str],
db_table_names: List[str],
schema_serialization_type: str = "peteshaw",
schema_serialization_randomized: bool = False,
schema_serialization_with_db_id: bool = True,
schema_serialization_with_db_content: bool = False,
normalize_query: bool = True,
) -> str:
if schema_serialization_type == "verbose":
db_id_str = "Database: {db_id}. "
table_sep = ". "
table_str = "Table: {table}. Columns: {columns}"
column_sep = ", "
column_str_with_values = "{column} ({values})"
column_str_without_values = "{column}"
value_sep = ", "
elif schema_serialization_type == "peteshaw":
# see https://github.com/google-research/language/blob/master/language/nqg/tasks/spider/append_schema.py#L42
db_id_str = " | {db_id}"
table_sep = ""
table_str = " | {table} : {columns}"
column_sep = " , "
column_str_with_values = "{column} ( {values} )"
column_str_without_values = "{column}"
value_sep = " , "
else:
raise NotImplementedError
def get_column_str(table_name: str, column_name: str) -> str:
column_name_str = column_name.lower() if normalize_query else column_name
if schema_serialization_with_db_content:
matches = get_database_matches(
question=question,
table_name=table_name,
column_name=column_name,
db_path=(db_path + "/" + db_id + "/" + db_id + ".sqlite"),
)
if matches:
return column_str_with_values.format(
column=column_name_str, values=value_sep.join(matches)
)
else:
return column_str_without_values.format(column=column_name_str)
else:
return column_str_without_values.format(column=column_name_str)
tables = [
table_str.format(
table=table_name.lower() if normalize_query else table_name,
columns=column_sep.join(
map(
lambda y: get_column_str(table_name=table_name, column_name=y[1]),
filter(
lambda y: y[0] == table_id,
zip(
db_column_names["table_id"],
db_column_names["column_name"],
),
),
)
),
)
for table_id, table_name in enumerate(db_table_names)
]
if schema_serialization_randomized:
random.shuffle(tables)
if schema_serialization_with_db_id:
serialized_schema = db_id_str.format(db_id=db_id) + table_sep.join(tables)
else:
serialized_schema = table_sep.join(tables)
return serialized_schema
def spider_add_serialized_schema(ex: dict, args) -> dict:
if getattr(args.seq2seq, "schema_serialization_with_nl"):
serialized_schema = serialize_schema_natural_language(
question=ex["question"],
db_path=ex["db_path"],
db_id=ex["db_id"],
db_column_names=ex["db_column_names"],
db_table_names=ex["db_table_names"],
db_primary_keys=ex["db_primary_keys"],
db_foreign_keys=ex["db_foreign_keys"],
schema_serialization_with_db_content=args.seq2seq.schema_serialization_with_db_content,
normalize_query=True,
)
else:
serialized_schema = serialize_schema(
question=ex["question"],
db_path=ex["db_path"],
db_id=ex["db_id"],
db_column_names=ex["db_column_names"],
db_table_names=ex["db_table_names"],
schema_serialization_type="peteshaw",
schema_serialization_randomized=False,
schema_serialization_with_db_id=True,
schema_serialization_with_db_content=args.seq2seq.schema_serialization_with_db_content,
normalize_query=True,
)
return {"serialized_schema": serialized_schema} | null |
163,576 | import os
import torch
import random
import re
from copy import deepcopy
from typing import List, Dict
from datasets.dataset_dict import DatasetDict
from torch.utils.data import Dataset
from torch.utils.data.dataset import T_co
from third_party.miscs.bridge_content_encoder import get_database_matches
from tqdm import tqdm
def spider_get_input(
question: str,
serialized_schema: str,
prefix: str,
) -> str:
return prefix + question.strip() + " " + serialized_schema.strip()
def spider_get_target(
query: str,
db_id: str,
normalize_query: bool,
target_with_db_id: bool,
) -> str:
_normalize = normalize if normalize_query else (lambda x: x)
return f"{db_id} | {_normalize(query)}" if target_with_db_id else _normalize(query)
def spider_pre_process_function(batch: dict, args):
prefix = ""
inputs = [
spider_get_input(
question=question, serialized_schema=serialized_schema, prefix=prefix
)
for question, serialized_schema in zip(
batch["question"], batch["serialized_schema"]
)
]
targets = [
spider_get_target(
query=query,
db_id=db_id,
normalize_query=True,
target_with_db_id=args.seq2seq.target_with_db_id,
)
for db_id, query in zip(batch["db_id"], batch["query"])
]
return zip(inputs, targets) | null |
163,577 | import os
import torch
import random
import re
from copy import deepcopy
from typing import List, Dict
from datasets.dataset_dict import DatasetDict
from torch.utils.data import Dataset
from torch.utils.data.dataset import T_co
from third_party.miscs.bridge_content_encoder import get_database_matches
from tqdm import tqdm
def spider_get_target(
query: str,
db_id: str,
normalize_query: bool,
target_with_db_id: bool,
) -> str:
_normalize = normalize if normalize_query else (lambda x: x)
return f"{db_id} | {_normalize(query)}" if target_with_db_id else _normalize(query)
def spider_pre_process_one_function(item: dict, args):
prefix = ""
seq_out = spider_get_target(
query=item["query"],
db_id=item["db_id"],
normalize_query=True,
target_with_db_id=args.seq2seq.target_with_db_id,
)
return prefix + item["question"].strip(), seq_out | null |
163,578 | import os
import torch
import random
import re
from copy import deepcopy
from typing import List, Dict
from datasets.dataset_dict import DatasetDict
from torch.utils.data import Dataset
from torch.utils.data.dataset import T_co
from third_party.miscs.bridge_content_encoder import get_database_matches
from tqdm import tqdm
def _get_schemas(examples: Dataset) -> Dict[str, dict]:
schemas: Dict[str, dict] = dict()
for ex in examples:
if ex["db_id"] not in schemas:
schemas[ex["db_id"]] = {
"db_table_names": ex["db_table_names"],
"db_column_names": ex["db_column_names"],
"db_column_types": ex["db_column_types"],
"db_primary_keys": ex["db_primary_keys"],
"db_foreign_keys": ex["db_foreign_keys"],
}
return schemas | null |
163,579 | import os
import torch
import random
import re
from copy import deepcopy
from typing import List, Dict
from datasets.dataset_dict import DatasetDict
from torch.utils.data import Dataset
from torch.utils.data.dataset import T_co
from third_party.miscs.bridge_content_encoder import get_database_matches
from tqdm import tqdm
def serialize_schema_natural_language(
question: str,
db_path: str,
db_id: str,
db_column_names: Dict[str, str],
db_table_names: List[str],
db_primary_keys,
db_foreign_keys,
schema_serialization_with_db_content: bool = False,
normalize_query: bool = True,
) -> str:
overall_description = f'{db_id} contains tables such as ' \
f'{", ".join([table_name.lower() if normalize_query else table_name for table_name in db_table_names])}.'
table_description_primary_key_template = lambda table_name, primary_key: \
f'{primary_key} is the primary key.'
table_description = lambda table_name, column_names: \
f'Table {table_name} has columns such as {", ".join(column_names)}.'
value_description = lambda column_value_pairs: \
f'{"".join(["The {} contains values such as {}.".format(column, value) for column, value in column_value_pairs])}'
foreign_key_description = lambda table_1, column_1, table_2, column_2: \
f'The {column_1} of {table_1} is the foreign key of {column_2} of {table_2}.'
db_primary_keys = db_primary_keys["column_id"]
db_foreign_keys = list(zip(db_foreign_keys["column_id"], db_foreign_keys["other_column_id"]))
descriptions = [overall_description]
db_table_name_strs = []
db_column_name_strs = []
value_sep = ", "
for table_id, table_name in enumerate(db_table_names):
table_name_str = table_name.lower() if normalize_query else table_name
db_table_name_strs.append(table_name_str)
columns = []
column_value_pairs = []
primary_keys = []
for column_id, (x, y) in enumerate(zip(db_column_names["table_id"], db_column_names["column_name"])):
if column_id == 0:
continue
column_str = y.lower() if normalize_query else y
db_column_name_strs.append(column_str)
if x == table_id:
columns.append(column_str)
if column_id in db_primary_keys:
primary_keys.append(column_str)
if schema_serialization_with_db_content:
matches = get_database_matches(
question=question,
table_name=table_name,
column_name=y,
db_path=(db_path + "/" + db_id + "/" + db_id + ".sqlite"),
)
if matches:
column_value_pairs.append((column_str, value_sep.join(matches)))
table_description_columns_str = table_description(table_name_str, columns)
descriptions.append(table_description_columns_str)
table_description_primary_key_str = table_description_primary_key_template(table_name_str, ", ".join(primary_keys))
descriptions.append(table_description_primary_key_str)
if len(column_value_pairs) > 0:
value_description_str = value_description(column_value_pairs)
descriptions.append(value_description_str)
for x, y in db_foreign_keys:
# get the table and column of x
x_table_name = db_table_name_strs[db_column_names["table_id"][x]]
x_column_name = db_column_name_strs[x]
# get the table and column of y
y_table_name = db_table_name_strs[db_column_names["table_id"][y]]
y_column_name = db_column_name_strs[y]
foreign_key_description_str = foreign_key_description(x_table_name, x_column_name, y_table_name, y_column_name)
descriptions.append(foreign_key_description_str)
return " ".join(descriptions)
def serialize_schema(
question: str,
db_path: str,
db_id: str,
db_column_names: Dict[str, str],
db_table_names: List[str],
schema_serialization_type: str = "peteshaw",
schema_serialization_randomized: bool = False,
schema_serialization_with_db_id: bool = True,
schema_serialization_with_db_content: bool = False,
normalize_query: bool = True,
) -> str:
if schema_serialization_type == "verbose":
db_id_str = "Database: {db_id}. "
table_sep = ". "
table_str = "Table: {table}. Columns: {columns}"
column_sep = ", "
column_str_with_values = "{column} ({values})"
column_str_without_values = "{column}"
value_sep = ", "
elif schema_serialization_type == "peteshaw":
# see https://github.com/google-research/language/blob/master/language/nqg/tasks/bird/append_schema.py#L42
db_id_str = " | {db_id}"
table_sep = ""
table_str = " | {table} : {columns}"
column_sep = " , "
column_str_with_values = "{column} ( {values} )"
column_str_without_values = "{column}"
value_sep = " , "
else:
raise NotImplementedError
def get_column_str(table_name: str, column_name: str) -> str:
column_name_str = column_name.lower() if normalize_query else column_name
if schema_serialization_with_db_content:
matches = get_database_matches(
question=question,
table_name=table_name,
column_name=column_name,
db_path=(db_path + "/" + db_id + "/" + db_id + ".sqlite"),
)
if matches:
return column_str_with_values.format(
column=column_name_str, values=value_sep.join(matches)
)
else:
return column_str_without_values.format(column=column_name_str)
else:
return column_str_without_values.format(column=column_name_str)
tables = [
table_str.format(
table=table_name.lower() if normalize_query else table_name,
columns=column_sep.join(
map(
lambda y: get_column_str(table_name=table_name, column_name=y[1]),
filter(
lambda y: y[0] == table_id,
zip(
db_column_names["table_id"],
db_column_names["column_name"],
),
),
)
),
)
for table_id, table_name in enumerate(db_table_names)
]
if schema_serialization_randomized:
random.shuffle(tables)
if schema_serialization_with_db_id:
serialized_schema = db_id_str.format(db_id=db_id) + table_sep.join(tables)
else:
serialized_schema = table_sep.join(tables)
return serialized_schema
def bird_add_serialized_schema(ex: dict, args) -> dict:
if getattr(args.seq2seq, "schema_serialization_with_nl"):
serialized_schema = serialize_schema_natural_language(
question=ex["question"],
db_path=ex["db_path"],
db_id=ex["db_id"],
db_column_names=ex["db_column_names"],
db_table_names=ex["db_table_names"],
db_primary_keys=ex["db_primary_keys"],
db_foreign_keys=ex["db_foreign_keys"],
schema_serialization_with_db_content=args.seq2seq.schema_serialization_with_db_content,
normalize_query=True,
)
else:
serialized_schema = serialize_schema(
question=ex["question"],
db_path=ex["db_path"],
db_id=ex["db_id"],
db_column_names=ex["db_column_names"],
db_table_names=ex["db_table_names"],
schema_serialization_type="peteshaw",
schema_serialization_randomized=False,
schema_serialization_with_db_id=True,
schema_serialization_with_db_content=args.seq2seq.schema_serialization_with_db_content,
normalize_query=True,
)
return {"serialized_schema": serialized_schema} | null |
163,580 | import os
import torch
import random
import re
from copy import deepcopy
from typing import List, Dict
from datasets.dataset_dict import DatasetDict
from torch.utils.data import Dataset
from torch.utils.data.dataset import T_co
from third_party.miscs.bridge_content_encoder import get_database_matches
from tqdm import tqdm
def bird_get_input(
question: str,
serialized_schema: str,
prefix: str,
) -> str:
return prefix + question.strip() + " " + serialized_schema.strip()
def bird_get_target(
query: str,
db_id: str,
normalize_query: bool,
target_with_db_id: bool,
) -> str:
_normalize = normalize if normalize_query else (lambda x: x)
return f"{db_id} | {_normalize(query)}" if target_with_db_id else _normalize(query)
def bird_pre_process_function(batch: dict, args):
prefix = ""
inputs = [
bird_get_input(
question=question, serialized_schema=serialized_schema, prefix=prefix
)
for question, serialized_schema in zip(
batch["question"], batch["serialized_schema"]
)
]
targets = [
bird_get_target(
query=query,
db_id=db_id,
normalize_query=True,
target_with_db_id=args.seq2seq.target_with_db_id,
)
for db_id, query in zip(batch["db_id"], batch["query"])
]
return zip(inputs, targets) | null |
163,581 | import os
import torch
import random
import re
from copy import deepcopy
from typing import List, Dict
from datasets.dataset_dict import DatasetDict
from torch.utils.data import Dataset
from torch.utils.data.dataset import T_co
from third_party.miscs.bridge_content_encoder import get_database_matches
from tqdm import tqdm
def bird_get_target(
query: str,
db_id: str,
normalize_query: bool,
target_with_db_id: bool,
) -> str:
_normalize = normalize if normalize_query else (lambda x: x)
return f"{db_id} | {_normalize(query)}" if target_with_db_id else _normalize(query)
def bird_pre_process_one_function(item: dict, args):
prefix = ""
seq_out = bird_get_target(
query=item["query"],
db_id=item["db_id"],
normalize_query=True,
target_with_db_id=args.seq2seq.target_with_db_id,
)
return prefix + item["question"].strip(), seq_out | null |
163,583 | import copy
import math
import os
import warnings
import pdb
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from torch.utils.checkpoint import checkpoint
from .rgat_tuning import RGAT_Layer
from transformers.activations import ACT2FN
from transformers.file_utils import (
DUMMY_INPUTS,
DUMMY_MASK,
add_start_docstrings,
add_start_docstrings_to_model_forward,
is_torch_fx_proxy,
replace_return_docstrings,
)
from transformers.modeling_outputs import (
BaseModelOutput,
BaseModelOutputWithPastAndCrossAttentions,
Seq2SeqLMOutput,
Seq2SeqModelOutput,
)
from transformers.modeling_utils import PreTrainedModel, find_pruneable_heads_and_indices, prune_linear_layer
from transformers.utils import logging
from transformers.utils.model_parallel_utils import assert_device_map, get_device_map
from transformers.models.t5.configuration_t5 import T5Config
import torch.nn.functional as F
logger = logging.get_logger(__name__)
The provided code snippet includes necessary dependencies for implementing the `load_tf_weights_in_t5` function. Write a Python function `def load_tf_weights_in_t5(model, config, tf_checkpoint_path)` to solve the following problem:
Load tf checkpoints in a pytorch model.
Here is the function:
def load_tf_weights_in_t5(model, config, tf_checkpoint_path):
"""Load tf checkpoints in a pytorch model."""
try:
import re
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise
tf_path = os.path.abspath(tf_checkpoint_path)
logger.info(f"Converting TensorFlow checkpoint from {tf_path}")
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
tf_weights = {}
for name, shape in init_vars:
logger.info(f"Loading TF weight {name} with shape {shape}")
array = tf.train.load_variable(tf_path, name)
names.append(name)
tf_weights[name] = array
for txt_name in names:
name = txt_name.split("/")
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if any(
n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"]
for n in name
):
logger.info(f"Skipping {'/'.join(name)}")
tf_weights.pop(txt_name, None)
continue
if "_slot_" in name[-1]:
logger.info(f"Skipping {'/'.join(name)}")
tf_weights.pop(txt_name, None)
continue
pointer = model
array = tf_weights[txt_name]
for m_name in name:
if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
scope_names = re.split(r"_(\d+)", m_name)
else:
scope_names = [m_name]
if scope_names[0] in ["kernel", "scale", "embedding"]:
pointer = getattr(pointer, "weight")
elif scope_names[0] == "self_attention":
pointer = getattr(pointer, "layer")
pointer = pointer[0]
elif scope_names[0] == "enc_dec_attention":
pointer = getattr(pointer, "layer")
pointer = pointer[1]
elif scope_names[0] == "dense_relu_dense":
pointer = getattr(pointer, "layer")
pointer = pointer[2]
elif scope_names[0] == "rms_norm":
if hasattr(pointer, "layer_norm"):
pointer = getattr(pointer, "layer_norm")
elif hasattr(pointer, "final_layer_norm"):
pointer = getattr(pointer, "final_layer_norm")
elif scope_names[0] == "scale":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
pointer = getattr(pointer, "bias")
elif scope_names[0] == "squad":
pointer = getattr(pointer, "classifier")
elif scope_names[0] == "decoder" and name[1] == "logits":
continue
elif scope_names[0] == "logits":
pointer = getattr(pointer, "lm_head")
elif scope_names[0] == "wi" and len(scope_names) > 1 and scope_names[1].isdigit():
pointer = getattr(pointer, f"wi_{scope_names[1]}")
continue
else:
try:
pointer = getattr(pointer, scope_names[0])
except AttributeError:
logger.info(f"Skipping {'/'.join(name)}")
continue
if len(scope_names) >= 2:
num = int(scope_names[1])
pointer = pointer[num]
if scope_names[0] not in ["kernel", "scale", "embedding"]:
pointer = getattr(pointer, "weight")
if scope_names[0] != "embedding":
logger.info(f"Transposing numpy weight of shape {array.shape} for {name}")
array = np.transpose(array)
try:
assert (
pointer.shape == array.shape
), f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched"
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
logger.info(f"Initialize PyTorch weight {name}")
pointer.data = torch.from_numpy(array.astype(np.float32))
tf_weights.pop(txt_name, None)
logger.info(f"Weights not copied to PyTorch model: {', '.join(tf_weights.keys())}.")
return model | Load tf checkpoints in a pytorch model. |
163,589 | import copy
import math
import os
import warnings
import pdb
import pickle
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from torch.utils.checkpoint import checkpoint
from .rgat_tuning import RGAT_Tuning, RGAT_Layer
from transformers.activations import ACT2FN
from transformers.file_utils import (
DUMMY_INPUTS,
DUMMY_MASK,
add_start_docstrings,
add_start_docstrings_to_model_forward,
is_torch_fx_proxy,
replace_return_docstrings,
)
from transformers.modeling_outputs import (
BaseModelOutput,
BaseModelOutputWithPastAndCrossAttentions,
Seq2SeqLMOutput,
Seq2SeqModelOutput,
)
from transformers.modeling_utils import PreTrainedModel, find_pruneable_heads_and_indices, prune_linear_layer
from transformers.utils import logging
from transformers.utils.model_parallel_utils import assert_device_map, get_device_map
from transformers.models.t5.configuration_t5 import T5Config
import torch.nn.functional as F
import sys
logger = logging.get_logger(__name__)
The provided code snippet includes necessary dependencies for implementing the `load_tf_weights_in_t5` function. Write a Python function `def load_tf_weights_in_t5(model, config, tf_checkpoint_path)` to solve the following problem:
Load tf checkpoints in a pytorch model.
Here is the function:
def load_tf_weights_in_t5(model, config, tf_checkpoint_path):
"""Load tf checkpoints in a pytorch model."""
try:
import re
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise
tf_path = os.path.abspath(tf_checkpoint_path)
logger.info(f"Converting TensorFlow checkpoint from {tf_path}")
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
tf_weights = {}
for name, shape in init_vars:
logger.info(f"Loading TF weight {name} with shape {shape}")
array = tf.train.load_variable(tf_path, name)
names.append(name)
tf_weights[name] = array
for txt_name in names:
name = txt_name.split("/")
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if any(
n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"]
for n in name
):
logger.info(f"Skipping {'/'.join(name)}")
tf_weights.pop(txt_name, None)
continue
if "_slot_" in name[-1]:
logger.info(f"Skipping {'/'.join(name)}")
tf_weights.pop(txt_name, None)
continue
pointer = model
array = tf_weights[txt_name]
for m_name in name:
if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
scope_names = re.split(r"_(\d+)", m_name)
else:
scope_names = [m_name]
if scope_names[0] in ["kernel", "scale", "embedding"]:
pointer = getattr(pointer, "weight")
elif scope_names[0] == "self_attention":
pointer = getattr(pointer, "layer")
pointer = pointer[0]
elif scope_names[0] == "enc_dec_attention":
pointer = getattr(pointer, "layer")
pointer = pointer[1]
elif scope_names[0] == "dense_relu_dense":
pointer = getattr(pointer, "layer")
pointer = pointer[2]
elif scope_names[0] == "rms_norm":
if hasattr(pointer, "layer_norm"):
pointer = getattr(pointer, "layer_norm")
elif hasattr(pointer, "final_layer_norm"):
pointer = getattr(pointer, "final_layer_norm")
elif scope_names[0] == "scale":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
pointer = getattr(pointer, "bias")
elif scope_names[0] == "squad":
pointer = getattr(pointer, "classifier")
elif scope_names[0] == "decoder" and name[1] == "logits":
continue
elif scope_names[0] == "logits":
pointer = getattr(pointer, "lm_head")
elif scope_names[0] == "wi" and len(scope_names) > 1 and scope_names[1].isdigit():
pointer = getattr(pointer, f"wi_{scope_names[1]}")
continue
else:
try:
pointer = getattr(pointer, scope_names[0])
except AttributeError:
logger.info(f"Skipping {'/'.join(name)}")
continue
if len(scope_names) >= 2:
num = int(scope_names[1])
pointer = pointer[num]
if scope_names[0] not in ["kernel", "scale", "embedding"]:
pointer = getattr(pointer, "weight")
if scope_names[0] != "embedding":
logger.info(f"Transposing numpy weight of shape {array.shape} for {name}")
array = np.transpose(array)
try:
assert (
pointer.shape == array.shape
), f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched"
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
logger.info(f"Initialize PyTorch weight {name}")
pointer.data = torch.from_numpy(array.astype(np.float32))
tf_weights.pop(txt_name, None)
logger.info(f"Weights not copied to PyTorch model: {', '.join(tf_weights.keys())}.")
return model | Load tf checkpoints in a pytorch model. |
163,590 | import copy
import math
import os
import warnings
import pdb
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from torch.utils.checkpoint import checkpoint
from .rgat_tuning import RGAT_Tuning, RGAT_Layer
from transformers.activations import ACT2FN
from transformers.file_utils import (
DUMMY_INPUTS,
DUMMY_MASK,
add_start_docstrings,
add_start_docstrings_to_model_forward,
is_torch_fx_proxy,
replace_return_docstrings,
)
from transformers.modeling_outputs import (
BaseModelOutput,
BaseModelOutputWithPastAndCrossAttentions,
Seq2SeqLMOutput,
Seq2SeqModelOutput,
)
from transformers.modeling_utils import PreTrainedModel, find_pruneable_heads_and_indices, prune_linear_layer
from transformers.utils import logging
from transformers.utils.model_parallel_utils import assert_device_map, get_device_map
from transformers.models.t5.configuration_t5 import T5Config
import torch.nn.functional as F
logger = logging.get_logger(__name__)
The provided code snippet includes necessary dependencies for implementing the `load_tf_weights_in_t5` function. Write a Python function `def load_tf_weights_in_t5(model, config, tf_checkpoint_path)` to solve the following problem:
Load tf checkpoints in a pytorch model.
Here is the function:
def load_tf_weights_in_t5(model, config, tf_checkpoint_path):
"""Load tf checkpoints in a pytorch model."""
try:
import re
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise
tf_path = os.path.abspath(tf_checkpoint_path)
logger.info(f"Converting TensorFlow checkpoint from {tf_path}")
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
tf_weights = {}
for name, shape in init_vars:
logger.info(f"Loading TF weight {name} with shape {shape}")
array = tf.train.load_variable(tf_path, name)
names.append(name)
tf_weights[name] = array
for txt_name in names:
name = txt_name.split("/")
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if any(
n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"]
for n in name
):
logger.info(f"Skipping {'/'.join(name)}")
tf_weights.pop(txt_name, None)
continue
if "_slot_" in name[-1]:
logger.info(f"Skipping {'/'.join(name)}")
tf_weights.pop(txt_name, None)
continue
pointer = model
array = tf_weights[txt_name]
for m_name in name:
if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
scope_names = re.split(r"_(\d+)", m_name)
else:
scope_names = [m_name]
if scope_names[0] in ["kernel", "scale", "embedding"]:
pointer = getattr(pointer, "weight")
elif scope_names[0] == "self_attention":
pointer = getattr(pointer, "layer")
pointer = pointer[0]
elif scope_names[0] == "enc_dec_attention":
pointer = getattr(pointer, "layer")
pointer = pointer[1]
elif scope_names[0] == "dense_relu_dense":
pointer = getattr(pointer, "layer")
pointer = pointer[2]
elif scope_names[0] == "rms_norm":
if hasattr(pointer, "layer_norm"):
pointer = getattr(pointer, "layer_norm")
elif hasattr(pointer, "final_layer_norm"):
pointer = getattr(pointer, "final_layer_norm")
elif scope_names[0] == "scale":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
pointer = getattr(pointer, "bias")
elif scope_names[0] == "squad":
pointer = getattr(pointer, "classifier")
elif scope_names[0] == "decoder" and name[1] == "logits":
continue
elif scope_names[0] == "logits":
pointer = getattr(pointer, "lm_head")
elif scope_names[0] == "wi" and len(scope_names) > 1 and scope_names[1].isdigit():
pointer = getattr(pointer, f"wi_{scope_names[1]}")
continue
else:
try:
pointer = getattr(pointer, scope_names[0])
except AttributeError:
logger.info(f"Skipping {'/'.join(name)}")
continue
if len(scope_names) >= 2:
num = int(scope_names[1])
pointer = pointer[num]
if scope_names[0] not in ["kernel", "scale", "embedding"]:
pointer = getattr(pointer, "weight")
if scope_names[0] != "embedding":
logger.info(f"Transposing numpy weight of shape {array.shape} for {name}")
array = np.transpose(array)
try:
assert (
pointer.shape == array.shape
), f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched"
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
logger.info(f"Initialize PyTorch weight {name}")
pointer.data = torch.from_numpy(array.astype(np.float32))
tf_weights.pop(txt_name, None)
logger.info(f"Weights not copied to PyTorch model: {', '.join(tf_weights.keys())}.")
return model | Load tf checkpoints in a pytorch model. |
163,592 | import copy
import math
import os
import warnings
import pdb
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from torch.utils.checkpoint import checkpoint
from .rgat_tuning import RGAT_Tuning
from transformers.activations import ACT2FN
from transformers.file_utils import (
DUMMY_INPUTS,
DUMMY_MASK,
add_start_docstrings,
add_start_docstrings_to_model_forward,
is_torch_fx_proxy,
replace_return_docstrings,
)
from transformers.modeling_outputs import (
BaseModelOutput,
BaseModelOutputWithPastAndCrossAttentions,
Seq2SeqLMOutput,
Seq2SeqModelOutput,
)
from transformers.modeling_utils import PreTrainedModel, find_pruneable_heads_and_indices, prune_linear_layer
from transformers.utils import logging
from transformers.utils.model_parallel_utils import assert_device_map, get_device_map
from transformers.models.t5.configuration_t5 import T5Config
logger = logging.get_logger(__name__)
The provided code snippet includes necessary dependencies for implementing the `load_tf_weights_in_t5` function. Write a Python function `def load_tf_weights_in_t5(model, config, tf_checkpoint_path)` to solve the following problem:
Load tf checkpoints in a pytorch model.
Here is the function:
def load_tf_weights_in_t5(model, config, tf_checkpoint_path):
"""Load tf checkpoints in a pytorch model."""
try:
import re
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise
tf_path = os.path.abspath(tf_checkpoint_path)
logger.info(f"Converting TensorFlow checkpoint from {tf_path}")
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
tf_weights = {}
for name, shape in init_vars:
logger.info(f"Loading TF weight {name} with shape {shape}")
array = tf.train.load_variable(tf_path, name)
names.append(name)
tf_weights[name] = array
for txt_name in names:
name = txt_name.split("/")
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if any(
n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"]
for n in name
):
logger.info(f"Skipping {'/'.join(name)}")
tf_weights.pop(txt_name, None)
continue
if "_slot_" in name[-1]:
logger.info(f"Skipping {'/'.join(name)}")
tf_weights.pop(txt_name, None)
continue
pointer = model
array = tf_weights[txt_name]
for m_name in name:
if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
scope_names = re.split(r"_(\d+)", m_name)
else:
scope_names = [m_name]
if scope_names[0] in ["kernel", "scale", "embedding"]:
pointer = getattr(pointer, "weight")
elif scope_names[0] == "self_attention":
pointer = getattr(pointer, "layer")
pointer = pointer[0]
elif scope_names[0] == "enc_dec_attention":
pointer = getattr(pointer, "layer")
pointer = pointer[1]
elif scope_names[0] == "dense_relu_dense":
pointer = getattr(pointer, "layer")
pointer = pointer[2]
elif scope_names[0] == "rms_norm":
if hasattr(pointer, "layer_norm"):
pointer = getattr(pointer, "layer_norm")
elif hasattr(pointer, "final_layer_norm"):
pointer = getattr(pointer, "final_layer_norm")
elif scope_names[0] == "scale":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
pointer = getattr(pointer, "bias")
elif scope_names[0] == "squad":
pointer = getattr(pointer, "classifier")
elif scope_names[0] == "decoder" and name[1] == "logits":
continue
elif scope_names[0] == "logits":
pointer = getattr(pointer, "lm_head")
elif scope_names[0] == "wi" and len(scope_names) > 1 and scope_names[1].isdigit():
pointer = getattr(pointer, f"wi_{scope_names[1]}")
continue
else:
try:
pointer = getattr(pointer, scope_names[0])
except AttributeError:
logger.info(f"Skipping {'/'.join(name)}")
continue
if len(scope_names) >= 2:
num = int(scope_names[1])
pointer = pointer[num]
if scope_names[0] not in ["kernel", "scale", "embedding"]:
pointer = getattr(pointer, "weight")
if scope_names[0] != "embedding":
logger.info(f"Transposing numpy weight of shape {array.shape} for {name}")
array = np.transpose(array)
try:
assert (
pointer.shape == array.shape
), f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched"
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
logger.info(f"Initialize PyTorch weight {name}")
pointer.data = torch.from_numpy(array.astype(np.float32))
tf_weights.pop(txt_name, None)
logger.info(f"Weights not copied to PyTorch model: {', '.join(tf_weights.keys())}.")
return model | Load tf checkpoints in a pytorch model. |
163,596 | import pickle
import dgl
import pdb
from collections import defaultdict
graph = pickle.load(open('data/graph_pedia_total.bin', 'rb'))
def compute_relations(graph_pedia):
relation_count = defaultdict()
for idx, graph in graph_pedia.items():
relation_lst = graph['edges']
for e in relation_lst:
r = e[-1]
if r in relation_count:
relation_count[r] += 1
else:
relation_count[r] = 1
return relation_count | null |
163,597 | import dgl, math, torch
import pdb
def src_dot_dst(src_field, dst_field, out_field):
def func(edges):
return {out_field: (edges.src[src_field] * edges.dst[dst_field]).sum(-1, keepdim=True)}
pdb.set_trace()
return func | null |
163,598 | import dgl, math, torch
import pdb
def src_sum_edge_mul_dst(src_field, dst_field, e_field, out_field):
def func(edges):
return {out_field: ((edges.src[src_field] + edges.data[e_field]) * edges.dst[dst_field]).sum(-1, keepdim=True)}
return func | null |
163,599 | import dgl, math, torch
import pdb
def scaled_exp(field, scale_constant):
def func(edges):
# clamp for softmax numerical stability
return {field: torch.exp((edges.data[field] / scale_constant).clamp(-10, 10))}
return func | null |
163,600 | import dgl, math, torch
import pdb
def src_sum_edge_mul_edge(src_field, e_field1, e_field2, out_field):
def func(edges):
return {out_field: (edges.src[src_field] + edges.data[e_field1]) * edges.data[e_field2]}
return func | null |
163,601 | import dgl, math, torch
import pdb
def div_by_z(in_field, norm_field, out_field):
def func(nodes):
# print(nodes.data[norm_field])
return {out_field: nodes.data[in_field] / (nodes.data[norm_field] + 1e-10)}
# TODO: Jinyang
return func | null |
163,616 | import json
import argparse
import pdb
def fetch_sql(predicted_results, output_path=None):
final_sql = {}
invalid_result = []
for k, v in predicted_results.items():
idx = int(k)
print("------------------- processing {}th example -------------------".format(idx))
print(v)
try:
cot, sql = v.split(': SELECT')
clean_sql = 'SELECT' + sql
except Exception as e:
invalid_result.append(idx)
clean_sql = 0 # filter resutls without valid SQL, i.e., too long, etc.
final_sql[k] = clean_sql
if output_path:
json.dump(final_sql, open(output_path, 'w'), indent=4)
return final_sql, invalid_result | null |
163,617 | import argparse
import fnmatch
import json
import os
import pdb
import pickle
import re
import sqlite3
from typing import Dict, List, Tuple
import backoff
import openai
import pandas as pd
import sqlparse
from tqdm import tqdm
The provided code snippet includes necessary dependencies for implementing the `get_db_schemas` function. Write a Python function `def get_db_schemas(bench_root: str, db_name: str) -> Dict[str, str]` to solve the following problem:
Read an sqlite file, and return the CREATE commands for each of the tables in the database.
Here is the function:
def get_db_schemas(bench_root: str, db_name: str) -> Dict[str, str]:
"""
Read an sqlite file, and return the CREATE commands for each of the tables in the database.
"""
asdf = 'database' if bench_root == 'spider' else 'databases'
with sqlite3.connect(f'file:{bench_root}/{asdf}/{db_name}/{db_name}.sqlite?mode=ro', uri=True) as conn:
# conn.text_factory = bytes
cursor = conn.cursor()
cursor.execute("SELECT name FROM sqlite_master WHERE type='table';")
tables = cursor.fetchall()
schemas = {}
for table in tables:
cursor.execute("SELECT sql FROM sqlite_master WHERE type='table' AND name='{}';".format(table[0]))
schemas[table[0]] = cursor.fetchone()[0]
return schemas | Read an sqlite file, and return the CREATE commands for each of the tables in the database. |
163,618 | import argparse
import fnmatch
import json
import os
import pdb
import pickle
import re
import sqlite3
from typing import Dict, List, Tuple
import backoff
import openai
import pandas as pd
import sqlparse
from tqdm import tqdm
def few_shot():
ini_table = "CREATE TABLE singer\n(\n singer_id TEXT not null\n primary key,\n nation TEXT not null,\n sname TEXT null,\n dname TEXT null,\n cname TEXT null,\n age INTEGER not null,\n year INTEGER not null,\n birth_year INTEGER null,\n salary REAL null,\n city TEXT null,\n phone_number INTEGER null,\n-- tax REAL null,\n)"
ini_prompt = "-- External Knowledge: age = year - birth_year;\n-- Using valid SQLite and understading External Knowledge, answer the following questions for the tables provided above.\n-- How many singers in USA who is older than 27?\nThe final SQL is: Let's think step by step."
ini_cot_result = "1. referring to external knowledge, we need to filter singers 'by year' - 'birth_year' > 27; 2. we should find out the singers of step 1 in which nation = 'US', 3. use COUNT() to count how many singers. Finally the SQL is: SELECT COUNT(*) FROM singer WHERE year - birth_year > 27;</s>"
one_shot_demo = ini_table + '\n' + ini_prompt + '\n' + ini_cot_result
return one_shot_demo | null |
163,619 | import argparse
import fnmatch
import json
import os
import pdb
import pickle
import re
import sqlite3
from typing import Dict, List, Tuple
import backoff
import openai
import pandas as pd
import sqlparse
from tqdm import tqdm
def few_shot_no_kg():
ini_table = "CREATE TABLE singer\n(\n singer_id TEXT not null\n primary key,\n nation TEXT not null,\n sname TEXT null,\n dname TEXT null,\n cname TEXT null,\n age INTEGER not null,\n year INTEGER not null,\n age INTEGER null,\n salary REAL null,\n city TEXT null,\n phone_number INTEGER null,\n-- tax REAL null,\n)"
ini_prompt = "-- External Knowledge:\n-- Using valid SQLite and understading External Knowledge, answer the following questions for the tables provided above.\n-- How many singers in USA who is older than 27?\nThe final SQL is: Let's think step by step."
ini_cot_result = "1. 'older than 27' refers to age > 27 in SQL; 2. we should find out the singers of step 1 in which nation = 'US', 3. use COUNT() to count how many singers. Finally the SQL is: SELECT COUNT(*) FROM singer WHERE age > 27;</s>"
one_shot_demo = ini_table + '\n' + ini_prompt + '\n' + ini_cot_result
return one_shot_demo | null |
163,620 | import argparse
import fnmatch
import json
import os
import pdb
import pickle
import re
import sqlite3
from typing import Dict, List, Tuple
import backoff
import openai
import pandas as pd
import sqlparse
from tqdm import tqdm
openai.debug=True
def quota_giveup(e):
return isinstance(e, openai.error.RateLimitError) and "quota" in str(e) | null |
163,621 | import argparse
import fnmatch
import json
import os
import pdb
import pickle
import re
import sqlite3
from typing import Dict, List, Tuple
import backoff
import openai
import pandas as pd
import sqlparse
from tqdm import tqdm
openai.debug=True
def generate_combined_prompts_one(db_path, question, knowledge=None):
schema_prompt = generate_schema_prompt(db_path, num_rows=None) # This is the entry to collect values
comment_prompt = generate_comment_prompt(question, knowledge)
combined_prompts = schema_prompt + '\n\n' + comment_prompt + cot_wizard() + '\nSELECT '
# combined_prompts = few_shot() + '\n\n' + schema_prompt + '\n\n' + comment_prompt
# print(combined_prompts)
return combined_prompts
def connect_gpt(engine, prompt, max_tokens, temperature, stop):
# print(prompt)
try:
result = openai.Completion.create(engine=engine, prompt=prompt, max_tokens=max_tokens, temperature=temperature, stop=stop)
except Exception as e:
result = 'error:{}'.format(e)
return result
The provided code snippet includes necessary dependencies for implementing the `collect_response_from_gpt` function. Write a Python function `def collect_response_from_gpt(db_path_list, question_list, api_key, engine, knowledge_list=None)` to solve the following problem:
:param db_path: str :param question_list: [] :return: dict of responses collected from openai
Here is the function:
def collect_response_from_gpt(db_path_list, question_list, api_key, engine, knowledge_list=None):
'''
:param db_path: str
:param question_list: []
:return: dict of responses collected from openai
'''
responses_dict = {}
response_list = []
openai.api_key = api_key
for i, question in tqdm(enumerate(question_list)):
print('--------------------- processing {}th question ---------------------'.format(i))
print('the question is: {}'.format(question))
if knowledge_list:
cur_prompt = generate_combined_prompts_one(db_path=db_path_list[i], question=question, knowledge=knowledge_list[i])
else:
cur_prompt = generate_combined_prompts_one(db_path=db_path_list[i], question=question)
plain_result = connect_gpt(engine=engine, prompt=cur_prompt, max_tokens=256, temperature=0, stop=['--', '\n\n', ';', '#'])
# pdb.set_trace()
# plain_result = connect_gpt(engine=engine, prompt=cur_prompt, max_tokens=256, temperature=0, stop=['</s>'])
# determine wheter the sql is wrong
if type(plain_result) == str:
sql = plain_result
else:
sql = 'SELECT' + plain_result['choices'][0]['text']
# responses_dict[i] = sql
db_id = db_path_list[i].split('/')[-1].split('.sqlite')[0]
sql = sql + '\t----- bird -----\t' + db_id # to avoid unpredicted \t appearing in codex results
response_list.append(sql)
return response_list | :param db_path: str :param question_list: [] :return: dict of responses collected from openai |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.