id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
165,296 | import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from torch.optim.lr_scheduler import StepLR
import numpy as np
import logging
import argparse
import os
from .dataset import NL2SQL_Dataset
from .model import ReRanker
from sklearn.metrics import confusion_matrix, accuracy_score
The provided code snippet includes necessary dependencies for implementing the `init` function. Write a Python function `def init(args)` to solve the following problem:
guaranteed reproducible
Here is the function:
def init(args):
"""
guaranteed reproducible
"""
os.environ["CUDA_VISIBLE_DEVICES"]= str(args.gpu)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
if not os.path.exists(args.save_dir):
os.mkdir(args.save_dir)
print("make dir : ", args.save_dir) | guaranteed reproducible |
165,297 | import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from torch.optim.lr_scheduler import StepLR
import numpy as np
import logging
import argparse
import os
from .dataset import NL2SQL_Dataset
from .model import ReRanker
from sklearn.metrics import confusion_matrix, accuracy_score
def evaluate2(args, output_flat, target_flat):
print("start evalute ...")
pred = torch.gt(output_flat, args.threshold).float()
pred = pred.cpu().numpy()
target = target_flat.cpu().numpy()
ret = confusion_matrix(target, pred)
neg_recall, pos_recall = ret.diagonal() / ret.sum(axis=1)
neg_acc, pos_acc = ret.diagonal() / ret.sum(axis=0)
acc = accuracy_score(target, pred)
print(" All Acc:\t {:.3f}%".format(100.0 * acc))
print(" Neg Recall: \t {:.3f}% \t Pos Recall: \t {:.3f}% \n Neg Acc: \t {:.3f}% \t Pos Acc: \t {:.3f}% \n".format(100.0 * neg_recall, 100.0 * pos_recall, 100.0 * neg_acc, 100.0 * pos_acc)) | null |
165,298 | import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from torch.optim.lr_scheduler import StepLR
import numpy as np
import logging
import argparse
import os
from .dataset import NL2SQL_Dataset
from .model import ReRanker
from sklearn.metrics import confusion_matrix, accuracy_score
def print_evalute(evalute_lst):
all_cnt, all_acc, pos_cnt, pos_acc, neg_cnt, neg_acc = evalute_lst
print("\n All Acc {}/{} ({:.2f}%) \t Pos Acc: {}/{} ({:.2f}%) \t Neg Acc: {}/{} ({:.2f}%) \n".format(all_acc, all_cnt, \
100.* all_acc / all_cnt, \
pos_acc, pos_cnt, \
100. * pos_acc / pos_cnt, \
neg_acc, neg_cnt, \
100. * neg_acc / neg_cnt))
return 100. * all_acc / all_cnt
def evaluate(args, output, target, evalute_lst):
all_cnt, all_acc, pos_cnt, pos_acc, neg_cnt, neg_acc = evalute_lst
output = torch.gt(output, args.threshold).float()
for idx in range(target.shape[0]):
gt = target[idx]
pred = output[idx]
if gt == 1:
pos_cnt += 1
if gt == pred:
pos_acc += 1
elif gt == 0:
neg_cnt += 1
if gt == pred:
neg_acc += 1
all_acc = pos_acc + neg_acc
all_cnt = pos_cnt + neg_cnt
return [all_cnt, all_acc, pos_cnt, pos_acc, neg_cnt, neg_acc]
def train(args, model, train_loader, epoch):
model.train()
criterion = nn.BCELoss()
idx = 0
evalute_lst = [0] * 6
for batch_idx, (tokens, attention_mask, target) in enumerate(train_loader):
tokens, attention_mask, target = tokens.cuda(), attention_mask.cuda(), target.cuda().float()
model.zero_grad()
output = model(input_ids=tokens, attention_mask=attention_mask)
output = output.squeeze(dim=-1)
loss = criterion(output, target)
loss.backward()
model.cls_trainer.step()
model.bert_trainer.step()
idx += len(target)
evalute_lst = evaluate(args, output, target, evalute_lst)
if batch_idx % 50 == 0:
print("Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}".format(
epoch, idx, len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
return print_evalute(evalute_lst) | null |
165,299 | import os
import sys
import numpy as np
import random
from parse_args import interpret_args
import data_util
from data_util import atis_data
from model.schema_interaction_model import SchemaInteractionATISModel
from logger import Logger
from model.model import ATISModel
from model_util import Metrics, evaluate_utterance_sample, evaluate_interaction_sample, \
train_epoch_with_utterances, train_epoch_with_interactions, evaluate_using_predicted_queries
import torch
VALID_EVAL_METRICS = [Metrics.LOSS, Metrics.TOKEN_ACCURACY, Metrics.STRING_ACCURACY]
TRAIN_EVAL_METRICS = [Metrics.LOSS, Metrics.TOKEN_ACCURACY, Metrics.STRING_ACCURACY]
class Logger():
"""Attributes:
fileptr (file): File pointer for input/output.
lines (list of str): The lines read from the log.
"""
def __init__(self, filename, option):
self.fileptr = open(filename, option)
if option == "r":
self.lines = self.fileptr.readlines()
else:
self.lines = []
def put(self, string):
"""Writes to the file."""
self.fileptr.write(string + "\n")
self.fileptr.flush()
def close(self):
"""Closes the logger."""
self.fileptr.close()
def findlast(self, identifier, default=0.):
"""Finds the last line in the log with a certain value."""
for line in self.lines[::-1]:
if line.lower().startswith(identifier):
string = line.strip().split("\t")[1]
if string.replace(".", "").isdigit():
return float(string)
elif string.lower() == "true":
return True
elif string.lower() == "false":
return False
else:
return string
return default
def contains(self, string):
"""Dtermines whether the string is present in the log."""
for line in self.lines[::-1]:
if string.lower() in line.lower():
return True
return False
def findlast_log_before(self, before_str):
"""Finds the last entry in the log before another entry."""
loglines = []
in_line = False
for line in self.lines[::-1]:
if line.startswith(before_str):
in_line = True
elif in_line:
loglines.append(line)
if line.strip() == "" and in_line:
return "".join(loglines[::-1])
return "".join(loglines[::-1])
class Metrics(Enum):
"""Definitions of simple metrics to compute."""
LOSS = 1
TOKEN_ACCURACY = 2
STRING_ACCURACY = 3
CORRECT_TABLES = 4
STRICT_CORRECT_TABLES = 5
SEMANTIC_QUERIES = 6
SYNTACTIC_QUERIES = 7
def train_epoch_with_utterances(batches,
model,
randomize=True):
"""Trains model for a single epoch given batches of utterance data.
Inputs:
batches (UtteranceBatch): The batches to give to training.
model (ATISModel): The model obect.
learning_rate (float): The learning rate to use during training.
dropout_amount (float): Amount of dropout to set in the model.
randomize (bool): Whether or not to randomize the order that the batches are seen.
"""
if randomize:
random.shuffle(batches)
progbar = get_progressbar("train ", len(batches))
progbar.start()
loss_sum = 0.
for i, batch in enumerate(batches):
batch_loss = model.train_step(batch)
loss_sum += batch_loss
progbar.update(i)
progbar.finish()
total_loss = loss_sum / len(batches)
return total_loss
def train_epoch_with_interactions(interaction_batches,
params,
model,
randomize=True):
"""Trains model for single epoch given batches of interactions.
Inputs:
interaction_batches (list of InteractionBatch): The batches to train on.
params (namespace): Parameters to run with.
model (ATISModel): Model to train.
randomize (bool): Whether or not to randomize the order that batches are seen.
"""
if randomize:
random.shuffle(interaction_batches)
progbar = get_progressbar("train ", len(interaction_batches))
progbar.start()
loss_sum = 0.
for i, interaction_batch in enumerate(interaction_batches):
print('i %d', i)
assert len(interaction_batch) == 1
interaction = interaction_batch.items[0]
#if i == 649:
# continue
if interaction.identifier == "raw/atis2/12-1.1/ATIS2/TEXT/TEST/NOV92/770/5":
continue
if 'sparc' in params.data_directory and "baseball_1" in interaction.identifier:
continue
if "baseball_1" in interaction.identifier:
continue
batch_loss = model.train_step(interaction, params.train_maximum_sql_length)
loss_sum += batch_loss
torch.cuda.empty_cache()
progbar.update(i)
progbar.finish()
total_loss = loss_sum / len(interaction_batches)
return total_loss
def evaluate_utterance_sample(sample,
model,
max_generation_length,
name="",
gold_forcing=False,
metrics=None,
total_num=-1,
database_username="",
database_password="",
database_timeout=0,
write_results=False):
"""Evaluates a sample of utterance examples.
Inputs:
sample (list of Utterance): Examples to evaluate.
model (ATISModel): Model to predict with.
max_generation_length (int): Maximum length to generate.
name (str): Name to log with.
gold_forcing (bool): Whether to force the gold tokens during decoding.
metrics (list of Metric): Metrics to evaluate with.
total_num (int): Number to divide by when reporting results.
database_username (str): Username to use for executing queries.
database_password (str): Password to use when executing queries.
database_timeout (float): Timeout on queries when executing.
write_results (bool): Whether to write the results to a file.
"""
assert metrics
if total_num < 0:
total_num = len(sample)
metrics_sums = {}
for metric in metrics:
metrics_sums[metric] = 0.
predictions_file = open(name + "_predictions.json", "w")
print("Predicting with filename " + str(name) + "_predictions.json")
progbar = get_progressbar(name, len(sample))
progbar.start()
predictions = []
for i, item in enumerate(sample):
_, loss, predicted_seq = model.eval_step(
item, max_generation_length, feed_gold_query=gold_forcing)
loss = loss / len(item.gold_query())
predictions.append(predicted_seq)
flat_sequence = item.flatten_sequence(predicted_seq)
token_accuracy = torch_utils.per_token_accuracy(
item.gold_query(), predicted_seq)
if write_results:
write_prediction(
predictions_file,
identifier=item.interaction.identifier,
input_seq=item.input_sequence(),
probability=0,
prediction=predicted_seq,
flat_prediction=flat_sequence,
gold_query=item.gold_query(),
flat_gold_queries=item.original_gold_queries(),
gold_tables=item.gold_tables(),
index_in_interaction=item.utterance_index,
database_username=database_username,
database_password=database_password,
database_timeout=database_timeout)
update_sums(metrics,
metrics_sums,
predicted_seq,
flat_sequence,
item.gold_query(),
item.original_gold_queries()[0],
gold_forcing,
loss,
token_accuracy,
database_username=database_username,
database_password=database_password,
database_timeout=database_timeout,
gold_table=item.gold_tables()[0])
progbar.update(i)
progbar.finish()
predictions_file.close()
return construct_averages(metrics_sums, total_num), None
def evaluate_interaction_sample(sample,
model,
max_generation_length,
name="",
gold_forcing=False,
metrics=None,
total_num=-1,
database_username="",
database_password="",
database_timeout=0,
use_predicted_queries=False,
write_results=False,
use_gpu=False,
compute_metrics=False):
""" Evaluates a sample of interactions. """
predictions_file = open(name + "_predictions.json", "w")
print("Predicting with file " + str(name + "_predictions.json"))
metrics_sums = {}
for metric in metrics:
metrics_sums[metric] = 0.
progbar = get_progressbar(name, len(sample))
progbar.start()
num_utterances = 0
"""
ignore_with_gpu = [line.strip() for line in open(
"data/cpu_full_interactions.txt").readlines()]
"""
predictions = []
use_gpu = not ("--no_gpus" in sys.argv or "--no_gpus=1" in sys.argv)
model.eval()
for i, interaction in enumerate(sample):
try:
with torch.no_grad():
if use_predicted_queries:
example_preds = model.predict_with_predicted_queries(
interaction,
max_generation_length)
else:
example_preds = model.predict_with_gold_queries(
interaction,
max_generation_length,
feed_gold_query=gold_forcing)
torch.cuda.empty_cache()
except RuntimeError as exception:
print("Failed on interaction: " + str(interaction.identifier))
print(exception)
print("\n\n")
exit()
predictions.extend(example_preds)
assert len(example_preds) == len(
interaction.interaction.utterances) or not example_preds
for j, pred in enumerate(example_preds):
num_utterances += 1
sequence, loss, token_accuracy, _, decoder_results = pred
if use_predicted_queries:
item = interaction.processed_utterances[j]
original_utt = interaction.interaction.utterances[item.index]
gold_query = original_utt.gold_query_to_use
original_gold_query = original_utt.original_gold_query
gold_table = original_utt.gold_sql_results
gold_queries = [q[0] for q in original_utt.all_gold_queries]
gold_tables = [q[1] for q in original_utt.all_gold_queries]
index = item.index
else:
item = interaction.gold_utterances()[j]
gold_query = item.gold_query()
original_gold_query = item.original_gold_query()
gold_table = item.gold_table()
gold_queries = item.original_gold_queries()
gold_tables = item.gold_tables()
index = item.utterance_index
if loss:
loss = loss / len(gold_query)
flat_sequence = item.flatten_sequence(sequence)
if write_results:
ori_beam = decoder_results.beam
beam = []
for x in ori_beam:
beam.append((-x[0], item.flatten_sequence(x[1].sequence)))
beam.sort()
write_prediction(
predictions_file,
identifier=interaction.identifier,
input_seq=item.input_sequence(),
probability=decoder_results.probability,
prediction=sequence,
flat_prediction=flat_sequence,
gold_query=gold_query,
flat_gold_queries=gold_queries,
gold_tables=gold_tables,
index_in_interaction=index,
database_username=database_username,
database_password=database_password,
database_timeout=database_timeout,
compute_metrics=compute_metrics,
beam = beam)##################
update_sums(metrics,
metrics_sums,
sequence,
flat_sequence,
gold_query,
original_gold_query,
gold_forcing,
loss,
token_accuracy,
database_username=database_username,
database_password=database_password,
database_timeout=database_timeout,
gold_table=gold_table)
progbar.update(i)
progbar.finish()
if total_num < 0:
total_num = num_utterances
predictions_file.close()
return construct_averages(metrics_sums, total_num), predictions
The provided code snippet includes necessary dependencies for implementing the `train` function. Write a Python function `def train(model, data, params)` to solve the following problem:
Trains a model. Inputs: model (ATISModel): The model to train. data (ATISData): The data that is used to train. params (namespace): Training parameters.
Here is the function:
def train(model, data, params):
""" Trains a model.
Inputs:
model (ATISModel): The model to train.
data (ATISData): The data that is used to train.
params (namespace): Training parameters.
"""
# Get the training batches.
log = Logger(os.path.join(params.logdir, params.logfile), "w")
num_train_original = atis_data.num_utterances(data.train_data)
log.put("Original number of training utterances:\t"
+ str(num_train_original))
eval_fn = evaluate_utterance_sample
trainbatch_fn = data.get_utterance_batches
trainsample_fn = data.get_random_utterances
validsample_fn = data.get_all_utterances
batch_size = params.batch_size
if params.interaction_level:
batch_size = 1
eval_fn = evaluate_interaction_sample
trainbatch_fn = data.get_interaction_batches
trainsample_fn = data.get_random_interactions
validsample_fn = data.get_all_interactions
maximum_output_length = params.train_maximum_sql_length
train_batches = trainbatch_fn(batch_size,
max_output_length=maximum_output_length,
randomize=not params.deterministic)
if params.num_train >= 0:
train_batches = train_batches[:params.num_train]
training_sample = trainsample_fn(params.train_evaluation_size,
max_output_length=maximum_output_length)
valid_examples = validsample_fn(data.valid_data,
max_output_length=maximum_output_length)
num_train_examples = sum([len(batch) for batch in train_batches])
num_steps_per_epoch = len(train_batches)
log.put(
"Actual number of used training examples:\t" +
str(num_train_examples))
log.put("(Shortened by output limit of " +
str(maximum_output_length) +
")")
log.put("Number of steps per epoch:\t" + str(num_steps_per_epoch))
log.put("Batch size:\t" + str(batch_size))
print(
"Kept " +
str(num_train_examples) +
"/" +
str(num_train_original) +
" examples")
print(
"Batch size of " +
str(batch_size) +
" gives " +
str(num_steps_per_epoch) +
" steps per epoch")
# Keeping track of things during training.
epochs = 0
patience = params.initial_patience
learning_rate_coefficient = 1.
previous_epoch_loss = float('inf')
maximum_validation_accuracy = 0.
maximum_string_accuracy = 0.
countdown = int(patience)
if params.scheduler:
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(model.trainer, mode='min', )
keep_training = True
while keep_training:
log.put("Epoch:\t" + str(epochs))
model.set_dropout(params.dropout_amount)
if not params.scheduler:
model.set_learning_rate(learning_rate_coefficient * params.initial_learning_rate)
# Run a training step.
if params.interaction_level:
epoch_loss = train_epoch_with_interactions(
train_batches,
params,
model,
randomize=not params.deterministic)
else:
epoch_loss = train_epoch_with_utterances(
train_batches,
model,
randomize=not params.deterministic)
log.put("train epoch loss:\t" + str(epoch_loss))
model.set_dropout(0.)
# Run an evaluation step on a sample of the training data.
train_eval_results = eval_fn(training_sample,
model,
params.train_maximum_sql_length,
name=os.path.join(params.logdir, "train-eval"),
write_results=True,
gold_forcing=True,
metrics=TRAIN_EVAL_METRICS)[0]
for name, value in train_eval_results.items():
log.put(
"train final gold-passing " +
name.name +
":\t" +
"%.2f" %
value)
# Run an evaluation step on the validation set.
valid_eval_results = eval_fn(valid_examples,
model,
params.eval_maximum_sql_length,
name=os.path.join(params.logdir, "valid-eval"),
write_results=True,
gold_forcing=True,
metrics=VALID_EVAL_METRICS)[0]
for name, value in valid_eval_results.items():
log.put("valid gold-passing " + name.name + ":\t" + "%.2f" % value)
if name.name == "STRING_ACCURACY":
print("vaild STRING_ACCURACY", value)
valid_loss = valid_eval_results[Metrics.LOSS]
valid_token_accuracy = valid_eval_results[Metrics.TOKEN_ACCURACY]
string_accuracy = valid_eval_results[Metrics.STRING_ACCURACY]
if params.scheduler:
scheduler.step(valid_loss)
if valid_loss > previous_epoch_loss:
learning_rate_coefficient *= params.learning_rate_ratio
log.put(
"learning rate coefficient:\t" +
str(learning_rate_coefficient))
previous_epoch_loss = valid_loss
saved = False
if not saved and string_accuracy > maximum_string_accuracy:
maximum_string_accuracy = string_accuracy
patience = patience * params.patience_ratio
countdown = int(patience)
last_save_file = os.path.join(params.logdir, "save_" + str(epochs))
model.save(last_save_file)
log.put(
"maximum string accuracy:\t" +
str(maximum_string_accuracy))
log.put("patience:\t" + str(patience))
log.put("save file:\t" + str(last_save_file))
if countdown <= 0:
keep_training = False
countdown -= 1
log.put("countdown:\t" + str(countdown))
log.put("")
epochs += 1
log.put("Finished training!")
log.close()
return last_save_file | Trains a model. Inputs: model (ATISModel): The model to train. data (ATISData): The data that is used to train. params (namespace): Training parameters. |
165,300 | import os
import sys
import numpy as np
import random
from parse_args import interpret_args
import data_util
from data_util import atis_data
from model.schema_interaction_model import SchemaInteractionATISModel
from logger import Logger
from model.model import ATISModel
from model_util import Metrics, evaluate_utterance_sample, evaluate_interaction_sample, \
train_epoch_with_utterances, train_epoch_with_interactions, evaluate_using_predicted_queries
import torch
FINAL_EVAL_METRICS = [Metrics.STRING_ACCURACY, Metrics.TOKEN_ACCURACY]
def evaluate_utterance_sample(sample,
model,
max_generation_length,
name="",
gold_forcing=False,
metrics=None,
total_num=-1,
database_username="",
database_password="",
database_timeout=0,
write_results=False):
"""Evaluates a sample of utterance examples.
Inputs:
sample (list of Utterance): Examples to evaluate.
model (ATISModel): Model to predict with.
max_generation_length (int): Maximum length to generate.
name (str): Name to log with.
gold_forcing (bool): Whether to force the gold tokens during decoding.
metrics (list of Metric): Metrics to evaluate with.
total_num (int): Number to divide by when reporting results.
database_username (str): Username to use for executing queries.
database_password (str): Password to use when executing queries.
database_timeout (float): Timeout on queries when executing.
write_results (bool): Whether to write the results to a file.
"""
assert metrics
if total_num < 0:
total_num = len(sample)
metrics_sums = {}
for metric in metrics:
metrics_sums[metric] = 0.
predictions_file = open(name + "_predictions.json", "w")
print("Predicting with filename " + str(name) + "_predictions.json")
progbar = get_progressbar(name, len(sample))
progbar.start()
predictions = []
for i, item in enumerate(sample):
_, loss, predicted_seq = model.eval_step(
item, max_generation_length, feed_gold_query=gold_forcing)
loss = loss / len(item.gold_query())
predictions.append(predicted_seq)
flat_sequence = item.flatten_sequence(predicted_seq)
token_accuracy = torch_utils.per_token_accuracy(
item.gold_query(), predicted_seq)
if write_results:
write_prediction(
predictions_file,
identifier=item.interaction.identifier,
input_seq=item.input_sequence(),
probability=0,
prediction=predicted_seq,
flat_prediction=flat_sequence,
gold_query=item.gold_query(),
flat_gold_queries=item.original_gold_queries(),
gold_tables=item.gold_tables(),
index_in_interaction=item.utterance_index,
database_username=database_username,
database_password=database_password,
database_timeout=database_timeout)
update_sums(metrics,
metrics_sums,
predicted_seq,
flat_sequence,
item.gold_query(),
item.original_gold_queries()[0],
gold_forcing,
loss,
token_accuracy,
database_username=database_username,
database_password=database_password,
database_timeout=database_timeout,
gold_table=item.gold_tables()[0])
progbar.update(i)
progbar.finish()
predictions_file.close()
return construct_averages(metrics_sums, total_num), None
def evaluate_interaction_sample(sample,
model,
max_generation_length,
name="",
gold_forcing=False,
metrics=None,
total_num=-1,
database_username="",
database_password="",
database_timeout=0,
use_predicted_queries=False,
write_results=False,
use_gpu=False,
compute_metrics=False):
""" Evaluates a sample of interactions. """
predictions_file = open(name + "_predictions.json", "w")
print("Predicting with file " + str(name + "_predictions.json"))
metrics_sums = {}
for metric in metrics:
metrics_sums[metric] = 0.
progbar = get_progressbar(name, len(sample))
progbar.start()
num_utterances = 0
"""
ignore_with_gpu = [line.strip() for line in open(
"data/cpu_full_interactions.txt").readlines()]
"""
predictions = []
use_gpu = not ("--no_gpus" in sys.argv or "--no_gpus=1" in sys.argv)
model.eval()
for i, interaction in enumerate(sample):
try:
with torch.no_grad():
if use_predicted_queries:
example_preds = model.predict_with_predicted_queries(
interaction,
max_generation_length)
else:
example_preds = model.predict_with_gold_queries(
interaction,
max_generation_length,
feed_gold_query=gold_forcing)
torch.cuda.empty_cache()
except RuntimeError as exception:
print("Failed on interaction: " + str(interaction.identifier))
print(exception)
print("\n\n")
exit()
predictions.extend(example_preds)
assert len(example_preds) == len(
interaction.interaction.utterances) or not example_preds
for j, pred in enumerate(example_preds):
num_utterances += 1
sequence, loss, token_accuracy, _, decoder_results = pred
if use_predicted_queries:
item = interaction.processed_utterances[j]
original_utt = interaction.interaction.utterances[item.index]
gold_query = original_utt.gold_query_to_use
original_gold_query = original_utt.original_gold_query
gold_table = original_utt.gold_sql_results
gold_queries = [q[0] for q in original_utt.all_gold_queries]
gold_tables = [q[1] for q in original_utt.all_gold_queries]
index = item.index
else:
item = interaction.gold_utterances()[j]
gold_query = item.gold_query()
original_gold_query = item.original_gold_query()
gold_table = item.gold_table()
gold_queries = item.original_gold_queries()
gold_tables = item.gold_tables()
index = item.utterance_index
if loss:
loss = loss / len(gold_query)
flat_sequence = item.flatten_sequence(sequence)
if write_results:
ori_beam = decoder_results.beam
beam = []
for x in ori_beam:
beam.append((-x[0], item.flatten_sequence(x[1].sequence)))
beam.sort()
write_prediction(
predictions_file,
identifier=interaction.identifier,
input_seq=item.input_sequence(),
probability=decoder_results.probability,
prediction=sequence,
flat_prediction=flat_sequence,
gold_query=gold_query,
flat_gold_queries=gold_queries,
gold_tables=gold_tables,
index_in_interaction=index,
database_username=database_username,
database_password=database_password,
database_timeout=database_timeout,
compute_metrics=compute_metrics,
beam = beam)##################
update_sums(metrics,
metrics_sums,
sequence,
flat_sequence,
gold_query,
original_gold_query,
gold_forcing,
loss,
token_accuracy,
database_username=database_username,
database_password=database_password,
database_timeout=database_timeout,
gold_table=gold_table)
progbar.update(i)
progbar.finish()
if total_num < 0:
total_num = num_utterances
predictions_file.close()
return construct_averages(metrics_sums, total_num), predictions
def evaluate_using_predicted_queries(sample,
model,
name="",
gold_forcing=False,
metrics=None,
total_num=-1,
database_username="",
database_password="",
database_timeout=0,
snippet_keep_age=1):
predictions_file = open(name + "_predictions.json", "w")
print("Predicting with file " + str(name + "_predictions.json"))
assert not gold_forcing
metrics_sums = {}
for metric in metrics:
metrics_sums[metric] = 0.
progbar = get_progressbar(name, len(sample))
progbar.start()
num_utterances = 0
predictions = []
for i, item in enumerate(sample):
int_predictions = []
item.start_interaction()
while not item.done():
utterance = item.next_utterance(snippet_keep_age)
predicted_sequence, loss, _, probability = model.eval_step(
utterance)
int_predictions.append((utterance, predicted_sequence))
flat_sequence = utterance.flatten_sequence(predicted_sequence)
if sql_util.executable(
flat_sequence,
username=database_username,
password=database_password,
timeout=database_timeout) and probability >= 0.24:
utterance.set_pred_query(
item.remove_snippets(predicted_sequence))
item.add_utterance(utterance,
item.remove_snippets(predicted_sequence),
previous_snippets=utterance.snippets())
else:
# Add the /previous/ predicted query, guaranteed to be syntactically
# correct
seq = []
utterance.set_pred_query(seq)
item.add_utterance(
utterance, seq, previous_snippets=utterance.snippets())
original_utt = item.interaction.utterances[utterance.index]
write_prediction(
predictions_file,
identifier=item.interaction.identifier,
input_seq=utterance.input_sequence(),
probability=probability,
prediction=predicted_sequence,
flat_prediction=flat_sequence,
gold_query=original_utt.gold_query_to_use,
flat_gold_queries=[
q[0] for q in original_utt.all_gold_queries],
gold_tables=[
q[1] for q in original_utt.all_gold_queries],
index_in_interaction=utterance.index,
database_username=database_username,
database_password=database_password,
database_timeout=database_timeout)
update_sums(metrics,
metrics_sums,
predicted_sequence,
flat_sequence,
original_utt.gold_query_to_use,
original_utt.original_gold_query,
gold_forcing,
loss,
token_accuracy=0,
database_username=database_username,
database_password=database_password,
database_timeout=database_timeout,
gold_table=original_utt.gold_sql_results)
predictions.append(int_predictions)
progbar.update(i)
progbar.finish()
if total_num < 0:
total_num = num_utterances
predictions_file.close()
return construct_averages(metrics_sums, total_num), predictions
The provided code snippet includes necessary dependencies for implementing the `evaluate` function. Write a Python function `def evaluate(model, data, params, last_save_file, split)` to solve the following problem:
Evaluates a pretrained model on a dataset. Inputs: model (ATISModel): Model class. data (ATISData): All of the data. params (namespace): Parameters for the model. last_save_file (str): Location where the model save file is.
Here is the function:
def evaluate(model, data, params, last_save_file, split):
"""Evaluates a pretrained model on a dataset.
Inputs:
model (ATISModel): Model class.
data (ATISData): All of the data.
params (namespace): Parameters for the model.
last_save_file (str): Location where the model save file is.
"""
if last_save_file:
model.load(last_save_file)
else:
if not params.save_file:
raise ValueError(
"Must provide a save file name if not training first.")
model.load(params.save_file)
filename = split
if filename == 'dev':
split = data.dev_data
elif filename == 'train':
split = data.train_data
elif filename == 'test':
split = data.test_data
elif filename == 'valid':
split = data.valid_data
else:
raise ValueError("Split not recognized: " + str(params.evaluate_split))
if params.use_predicted_queries:
filename += "_use_predicted_queries"
else:
filename += "_use_gold_queries"
if filename == 'train':
full_name = os.path.join(params.logdir, filename) + params.results_note
else:
full_name = os.path.join("results", params.save_file.split('/')[-1]) + params.results_note
if params.interaction_level or params.use_predicted_queries:
examples = data.get_all_interactions(split)
if params.interaction_level:
valid_eval_results, _ = evaluate_interaction_sample(
examples,
model,
name=full_name,
metrics=FINAL_EVAL_METRICS,
total_num=atis_data.num_utterances(split),
database_username=params.database_username,
database_password=params.database_password,
database_timeout=params.database_timeout,
use_predicted_queries=params.use_predicted_queries,
max_generation_length=params.eval_maximum_sql_length,
write_results=True,
use_gpu=True,
compute_metrics=params.compute_metrics)
else:
evaluate_using_predicted_queries(
examples,
model,
name=full_name,
metrics=FINAL_EVAL_METRICS,
total_num=atis_data.num_utterances(split),
database_username=params.database_username,
database_password=params.database_password,
database_timeout=params.database_timeout)
else:
examples = data.get_all_utterances(split)
evaluate_utterance_sample(
examples,
model,
name=full_name,
gold_forcing=False,
metrics=FINAL_EVAL_METRICS,
total_num=atis_data.num_utterances(split),
max_generation_length=params.eval_maximum_sql_length,
database_username=params.database_username,
database_password=params.database_password,
database_timeout=params.database_timeout,
write_results=True)
#for name, value in valid_eval_results.items():
# print("valid gold-passing " + name.name + ":\t" + "%.2f" % value) | Evaluates a pretrained model on a dataset. Inputs: model (ATISModel): Model class. data (ATISData): All of the data. params (namespace): Parameters for the model. last_save_file (str): Location where the model save file is. |
165,301 | import os
import sys
import numpy as np
import random
from parse_args import interpret_args
import data_util
from data_util import atis_data
from model.schema_interaction_model import SchemaInteractionATISModel
from logger import Logger
from model.model import ATISModel
from model_util import Metrics, evaluate_utterance_sample, evaluate_interaction_sample, \
train_epoch_with_utterances, train_epoch_with_interactions, evaluate_using_predicted_queries
import torch
The provided code snippet includes necessary dependencies for implementing the `init_env` function. Write a Python function `def init_env(params)` to solve the following problem:
seed manually to make runs reproducible
Here is the function:
def init_env(params):
"""
seed manually to make runs reproducible
"""
seed = params.seed
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
random.seed(seed)
np.random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed) | seed manually to make runs reproducible |
165,302 | import argparse
import os
import sys
import pickle
import json
import shutil
import sqlparse
from postprocess_eval import get_candidate_tables
def write_interaction(interaction_list,split,output_dir):
json_split = os.path.join(output_dir,split+'.json')
pkl_split = os.path.join(output_dir,split+'.pkl')
with open(json_split, 'w') as outfile:
for interaction in interaction_list:
json.dump(interaction, outfile, indent = 4)
outfile.write('\n')
new_objs = []
for i, obj in enumerate(interaction_list):
new_interaction = []
for ut in obj["interaction"]:
sql = ut["sql"]
sqls = [sql]
tok_sql_list = []
for sql in sqls:
results = []
tokenized_sql = sql.split()
tok_sql_list.append((tokenized_sql, results))
ut["sql"] = tok_sql_list
new_interaction.append(ut)
obj["interaction"] = new_interaction
new_objs.append(obj)
with open(pkl_split,'wb') as outfile:
pickle.dump(new_objs, outfile)
return
def read_database_schema(database_schema_filename, schema_tokens, column_names, database_schemas_dict):
with open(database_schema_filename) as f:
database_schemas = json.load(f)
def get_schema_tokens(table_schema):
column_names_surface_form = []
column_names = []
column_names_original = table_schema['column_names_original']
table_names = table_schema['table_names']
table_names_original = table_schema['table_names_original']
for i, (table_id, column_name) in enumerate(column_names_original):
if table_id >= 0:
table_name = table_names_original[table_id]
column_name_surface_form = '{}.{}'.format(table_name,column_name)
else:
# this is just *
column_name_surface_form = column_name
column_names_surface_form.append(column_name_surface_form.lower())
column_names.append(column_name.lower())
# also add table_name.*
for table_name in table_names_original:
column_names_surface_form.append('{}.*'.format(table_name.lower()))
return column_names_surface_form, column_names
for table_schema in database_schemas:
database_id = table_schema['db_id']
database_schemas_dict[database_id] = table_schema
schema_tokens[database_id], column_names[database_id] = get_schema_tokens(table_schema)
return schema_tokens, column_names, database_schemas_dict
def read_spider(spider_dir, database_schemas, column_names, output_vocab, schema_tokens, remove_from):
interaction_list = {}
train_json = os.path.join(spider_dir, 'train.json')
interaction_list = read_spider_split(train_json, interaction_list, database_schemas, column_names, output_vocab, schema_tokens, remove_from)
dev_json = os.path.join(spider_dir, 'dev.json')
interaction_list = read_spider_split(dev_json, interaction_list, database_schemas, column_names, output_vocab, schema_tokens, remove_from)
return interaction_list
def read_sparc(sparc_dir, database_schemas, column_names, output_vocab, schema_tokens, remove_from):
interaction_list = {}
train_json = os.path.join(sparc_dir, 'train_no_value.json')
interaction_list = read_data_json(train_json, interaction_list, database_schemas, column_names, output_vocab, schema_tokens, remove_from)
dev_json = os.path.join(sparc_dir, 'dev_no_value.json')
interaction_list = read_data_json(dev_json, interaction_list, database_schemas, column_names, output_vocab, schema_tokens, remove_from)
return interaction_list
def read_cosql(cosql_dir, database_schemas, column_names, output_vocab, schema_tokens, remove_from):
interaction_list = {}
train_json = os.path.join(cosql_dir, 'train.json')
interaction_list = read_data_json(train_json, interaction_list, database_schemas, column_names, output_vocab, schema_tokens, remove_from)
dev_json = os.path.join(cosql_dir, 'dev.json')
interaction_list = read_data_json(dev_json, interaction_list, database_schemas, column_names, output_vocab, schema_tokens, remove_from)
return interaction_list
def read_db_split(data_dir):
train_database = []
with open(os.path.join(data_dir,'train_db_ids.txt')) as f:
for line in f:
train_database.append(line.strip())
dev_database = []
with open(os.path.join(data_dir,'dev_db_ids.txt')) as f:
for line in f:
dev_database.append(line.strip())
return train_database, dev_database
def preprocess(dataset, remove_from=False):
# Validate output_vocab
output_vocab = ['_UNK', '_EOS', '.', 't1', 't2', '=', 'select', 'from', 'as', 'value', 'join', 'on', ')', '(', 'where', 't3', 'by', ',', 'count', 'group', 'order', 'distinct', 't4', 'and', 'limit', 'desc', '>', 'avg', 'having', 'max', 'in', '<', 'sum', 't5', 'intersect', 'not', 'min', 'except', 'or', 'asc', 'like', '!', 'union', 'between', 't6', '-', 't7', '+', '/']
if remove_from:
output_vocab = ['_UNK', '_EOS', '=', 'select', 'value', ')', '(', 'where', ',', 'count', 'group_by', 'order_by', 'distinct', 'and', 'limit_value', 'limit', 'desc', '>', 'avg', 'having', 'max', 'in', '<', 'sum', 'intersect', 'not', 'min', 'except', 'or', 'asc', 'like', '!=', 'union', 'between', '-', '+', '/']
print('size of output_vocab', len(output_vocab))
print('output_vocab', output_vocab)
print()
if dataset == 'spider':
spider_dir = 'data/spider/'
database_schema_filename = 'data/spider/tables.json'
output_dir = 'data/spider_data'
if remove_from:
output_dir = 'data/spider_data_removefrom'
train_database, dev_database = read_db_split(spider_dir)
elif dataset == 'sparc':
sparc_dir = 'data/sparc/'
database_schema_filename = 'data/sparc/tables.json'
output_dir = 'data/sparc_data'
if remove_from:
output_dir = 'data_process/sparc_data_removefrom'
train_database, dev_database = read_db_split(sparc_dir)
elif dataset == 'cosql':
cosql_dir = 'data/cosql/'
database_schema_filename = 'data/cosql/tables.json'
output_dir = 'data/cosql_data'
if remove_from:
output_dir = 'data_process/cosql_data_removefrom'
train_database, dev_database = read_db_split(cosql_dir)
if os.path.isdir(output_dir):
shutil.rmtree(output_dir)
if os.path.isdir('data_process'):
shutil.rmtree('data_process')
os.mkdir('data_process')
os.mkdir(output_dir)
schema_tokens = {}
column_names = {}
database_schemas = {}
print('Reading spider database schema file')
schema_tokens, column_names, database_schemas = read_database_schema(database_schema_filename, schema_tokens, column_names, database_schemas)
num_database = len(schema_tokens)
print('num_database', num_database, len(train_database), len(dev_database))
print('total number of schema_tokens / databases:', len(schema_tokens))
output_database_schema_filename = os.path.join(output_dir, 'tables.json')
with open(output_database_schema_filename, 'w') as outfile:
json.dump([v for k,v in database_schemas.items()], outfile, indent=4)
if dataset == 'spider':
interaction_list = read_spider(spider_dir, database_schemas, column_names, output_vocab, schema_tokens, remove_from)
elif dataset == 'sparc':
interaction_list = read_sparc(sparc_dir, database_schemas, column_names, output_vocab, schema_tokens, remove_from)
elif dataset == 'cosql':
interaction_list = read_cosql(cosql_dir, database_schemas, column_names, output_vocab, schema_tokens, remove_from)
print('interaction_list length', len(interaction_list))
train_interaction = []
for database_id in interaction_list:
if database_id not in dev_database:
train_interaction += interaction_list[database_id]
dev_interaction = []
for database_id in dev_database:
dev_interaction += interaction_list[database_id]
print('train interaction: ', len(train_interaction))
print('dev interaction: ', len(dev_interaction))
write_interaction(train_interaction, 'train', output_dir)
write_interaction(dev_interaction, 'dev', output_dir)
return | null |
165,303 | import os
import json
import copy
import torch
import sqlparse
import argparse
import subprocess
from collections import defaultdict
from reranker.predict import Ranker
import numpy
ranker = Ranker("./submit_models/reranker_roberta.pt")
def postprocess_one(pred_sql, schema):
pred_sql = pred_sql.replace('group_by', 'group by').replace('order_by', 'order by').replace('limit_value', 'limit 1').replace('_EOS', '').replace(' value ',' 1 ').replace('distinct', '').strip(',').strip()
if pred_sql.endswith('value'):
pred_sql = pred_sql[:-len('value')] + '1'
try:
format_sql = sqlparse.format(pred_sql, reindent=True)
except:
if len(pred_sql) == 0:
return "None"
return pred_sql
format_sql_2 = normalize_space(format_sql)
num_select = format_sql_2.count('select')
if num_select > 1:
final_sql = postprocess_nested(format_sql_2, schema)
else:
final_sql, _ = postprocess_single(format_sql_2, schema)
if final_sql == "":
return "None"
return final_sql
def postprocess(predictions, database_schema, remove_from=False):
import math
use_reranker = True
correct = 0
total = 0
postprocess_sqls = {}
utterances = []
Count = 0
score_vis = dict()
if os.path.exists('./score_vis_en.json'):
with open('./score_vis_en.json', 'r') as f:
score_vis = json.load(f)
for pred in predictions:
Count += 1
if Count % 10 == 0:
print('Count', Count, "score_vis", len(score_vis))
db_id = pred['database_id']
schema = database_schema[db_id]
try:
return_match = pred['return_match']
except:
return_match = ''
if db_id not in postprocess_sqls:
postprocess_sqls[db_id] = []
interaction_id = pred['interaction_id']
turn_id = pred['index_in_interaction']
total += 1
if turn_id == 0:
utterances = []
question = ' '.join(pred['input_seq'])
pred_sql_str = ' '.join(pred['flat_prediction'])
utterances.append(question)
beam_sql_strs = []
for score, beam_sql_str in pred['beam']:
beam_sql_strs.append( (score, ' '.join(beam_sql_str)))
gold_sql_str = ' '.join(pred['flat_gold_queries'][0])
if pred_sql_str == gold_sql_str:
correct += 1
postprocess_sql = pred_sql_str
if remove_from:
postprocess_sql = postprocess_one(pred_sql_str, schema)
sqls = []
key_idx = dict()
for i in range(len(beam_sql_strs)):
sql = postprocess_one(beam_sql_strs[i][1], schema)
key = '&'.join(utterances)+'#'+sql
if key not in score_vis:
if key not in key_idx:
key_idx[key]=len(sqls)
sqls.append(sql.replace("value", "1"))
if use_reranker and len(sqls) > 0:
score_list = ranker.get_score_batch(utterances, sqls)
for i in range(len(beam_sql_strs)):
score = beam_sql_strs[i][0]
sql = postprocess_one(beam_sql_strs[i][1], schema)
if use_reranker:
key = '&'.join(utterances)+'#'+sql
old_score = score
if key in score_vis:
score = score_vis[key]
else:
score = score_list[key_idx[key]]
score_vis[key] = score
score += i * 1e-4
score = -math.log(score)
score += old_score
beam_sql_strs[i] = (score, sql)
assert beam_sql_strs[0][1] == postprocess_sql
if use_reranker and Count % 100 == 0:
with open('score_vis_en.json', 'w') as file:
json.dump(score_vis, file)
gold_sql_str = postprocess_one(gold_sql_str, schema)
postprocess_sqls[db_id].append((postprocess_sql, interaction_id, turn_id, beam_sql_strs, question, gold_sql_str))
print (correct, total, float(correct)/total)
return postprocess_sqls | null |
165,304 | import os
import json
import copy
import torch
import sqlparse
import argparse
import subprocess
from collections import defaultdict
from reranker.predict import Ranker
import numpy
def read_prediction(pred_file):
if len( pred_file.split(',') ) > 1:
pred_files = pred_file.split(',')
sum_predictions = []
for pred_file in pred_files:
print('Read prediction from', pred_file)
predictions = []
with open(pred_file) as f:
for line in f:
pred = json.loads(line)
pred["beam"] = pred["beam"][:5]
predictions.append(pred)
print('Number of predictions', len(predictions))
if len(sum_predictions) == 0:
sum_predictions = predictions
else:
assert len(sum_predictions) == len(predictions)
for i in range(len(sum_predictions)):
sum_predictions[i]['beam'] += predictions[i]['beam']
return sum_predictions
else:
print('Read prediction from', pred_file)
predictions = []
with open(pred_file) as f:
for line in f:
pred = json.loads(line)
pred["beam"] = pred["beam"][:5]
predictions.append(pred)
print('Number of predictions', len(predictions))
return predictions | null |
165,305 | import os
import json
import copy
import torch
import sqlparse
import argparse
import subprocess
from collections import defaultdict
from reranker.predict import Ranker
import numpy
def read_schema(table_schema_path):
with open(table_schema_path) as f:
database_schema = json.load(f)
database_schema_dict = {}
for table_schema in database_schema:
db_id = table_schema['db_id']
database_schema_dict[db_id] = table_schema
return database_schema_dict | null |
165,306 | import os
import json
import copy
import torch
import sqlparse
import argparse
import subprocess
from collections import defaultdict
from reranker.predict import Ranker
import numpy
def write_and_evaluate(postprocess_sqls, db_path, table_schema_path, gold_path, dataset):
db_list = []
with open(gold_path) as f:
for line in f:
line_split = line.strip().split('\t')
if len(line_split) != 2:
continue
db = line.strip().split('\t')[1]
if db not in db_list:
db_list.append(db)
output_file = 'output_temp.txt'
if dataset == 'spider':
with open(output_file, "w") as f:
for db in db_list:
for postprocess_sql, interaction_id, turn_id in postprocess_sqls[db]:
f.write(postprocess_sql+'\n')
command = 'python3 eval_scripts/evaluation.py --db {} --table {} --etype match --gold {} --pred {}'.format(db_path,
table_schema_path,
gold_path,
os.path.abspath(output_file))
elif dataset in ['sparc', 'cosql']:
cnt = 0
with open(output_file, "w") as f:
last_id = None
for db in db_list:
for postprocess_sql, interaction_id, turn_id, beam_sql_strs, question, gold_str in postprocess_sqls[db]:
if last_id is not None and last_id != str(interaction_id)+db:
f.write('\n')
last_id = str(interaction_id) + db
f.write('{}\n'.format( '\t'.join( [x[1] for x in beam_sql_strs] ) ))
f.write('{}\n'.format( '\t'.join( [str(x[0]) for x in beam_sql_strs] )) )
f.write('{}\n'.format( question ))
cnt += 1
"""
predict_file = 'predicted_sql.txt'
cnt = 0
with open(predict_file, "w") as f:
for db in db_list:
for postprocess_sql, interaction_id, turn_id, beam_sql_strs, question, gold_str in postprocess_sqls[db]:
print(postprocess_sql)
print(beam_sql_strs)
print(question)
print(gold_str)
if turn_id == 0 and cnt > 0:
f.write('\n')
f.write('{}\n'.format(postprocess_sql))
cnt += 1
"""
command = 'python3 eval_scripts/gen_final_en.py --db {} --table {} --etype match --gold {} --pred {}'.format(db_path,
table_schema_path,
gold_path,
os.path.abspath(output_file))
#command += '; rm output_temp.txt'
print('begin command')
return command | null |
165,307 | import argparse
import os
import sqlite3 as db
import pprint
def process_schema(args):
schema_name = args.db
schema_path = os.path.join(args.root_path, schema_name, schema_name + '.sqlite')
print("load db data from", schema_path)
conn = db.connect(schema_path)
cur = conn.cursor()
cur.execute("SELECT name FROM sqlite_master WHERE type='table'")
tables_name = cur.fetchall()
table_name_lst = [tuple[0] for tuple in tables_name]
print(table_name_lst)
for table_name in table_name_lst:
cur.execute("SELECT * FROM %s" % table_name)
col_name_list = [tuple[0] for tuple in cur.description]
print(table_name, col_name_list)
tables = cur.fetchall()
#print(tables) | null |
165,308 | import json
import sqlite3
import sys
from nltk import word_tokenize
The provided code snippet includes necessary dependencies for implementing the `get_schema` function. Write a Python function `def get_schema(db)` to solve the following problem:
Get database's schema, which is a dict with table name as key and list of column names as value :param db: database path :return: schema dict
Here is the function:
def get_schema(db):
"""
Get database's schema, which is a dict with table name as key
and list of column names as value
:param db: database path
:return: schema dict
"""
schema = {}
conn = sqlite3.connect(db)
cursor = conn.cursor()
# fetch table names
cursor.execute("SELECT name FROM sqlite_master WHERE type='table';")
tables = [str(table[0].lower()) for table in cursor.fetchall()]
# fetch table info
for table in tables:
cursor.execute("PRAGMA table_info({})".format(table))
schema[table] = [str(col[1].lower()) for col in cursor.fetchall()]
return schema | Get database's schema, which is a dict with table name as key and list of column names as value :param db: database path :return: schema dict |
165,309 | import json
import sqlite3
import sys
from nltk import word_tokenize
def get_schema_from_json(fpath):
with open(fpath) as f:
data = json.load(f)
schema = {}
for entry in data:
table = str(entry['table'].lower())
cols = [str(col['column_name'].lower()) for col in entry['col_data']]
schema[table] = cols
return schema | null |
165,310 | import json
import sqlite3
import sys
from nltk import word_tokenize
def load_data(fpath):
with open(fpath) as f:
data = json.load(f)
return data | null |
165,311 | import json
import sqlite3
import sys
from nltk import word_tokenize
def tokenize(string):
string = str(string)
string = string.replace("\'", "\"") # ensures all string values wrapped by "" problem??
quote_idxs = [idx for idx, char in enumerate(string) if char == '"']
assert len(quote_idxs) % 2 == 0, "Unexpected quote"
# keep string value as token
vals = {}
for i in range(len(quote_idxs)-1, -1, -2):
qidx1 = quote_idxs[i-1]
qidx2 = quote_idxs[i]
val = string[qidx1: qidx2+1]
key = "__val_{}_{}__".format(qidx1, qidx2)
string = string[:qidx1] + key + string[qidx2+1:]
vals[key] = val
toks = [word.lower() for word in word_tokenize(string)]
# replace with string value token
for i in range(len(toks)):
if toks[i] in vals:
toks[i] = vals[toks[i]]
# find if there exists !=, >=, <=
eq_idxs = [idx for idx, tok in enumerate(toks) if tok == "="]
eq_idxs.reverse()
prefix = ('!', '>', '<')
for eq_idx in eq_idxs:
pre_tok = toks[eq_idx-1]
if pre_tok in prefix:
toks = toks[:eq_idx-1] + [pre_tok + "="] + toks[eq_idx+1: ]
return toks
def get_tables_with_alias(schema, toks):
tables = scan_alias(toks)
for key in schema:
assert key not in tables, "Alias {} has the same name in table".format(key)
tables[key] = key
return tables
def parse_sql(toks, start_idx, tables_with_alias, schema):
isBlock = False # indicate whether this is a block of sql/sub-sql
len_ = len(toks)
idx = start_idx
sql = {}
if toks[idx] == '(':
isBlock = True
idx += 1
# parse from clause in order to get default tables
from_end_idx, table_units, conds, default_tables = parse_from(toks, start_idx, tables_with_alias, schema)
sql['from'] = {'table_units': table_units, 'conds': conds}
# select clause
_, select_col_units = parse_select(toks, idx, tables_with_alias, schema, default_tables)
idx = from_end_idx
sql['select'] = select_col_units
# where clause
idx, where_conds = parse_where(toks, idx, tables_with_alias, schema, default_tables)
sql['where'] = where_conds
# group by clause
idx, group_col_units = parse_group_by(toks, idx, tables_with_alias, schema, default_tables)
sql['groupBy'] = group_col_units
# having clause
idx, having_conds = parse_having(toks, idx, tables_with_alias, schema, default_tables)
sql['having'] = having_conds
# order by clause
idx, order_col_units = parse_order_by(toks, idx, tables_with_alias, schema, default_tables)
sql['orderBy'] = order_col_units
# limit clause
idx, limit_val = parse_limit(toks, idx)
sql['limit'] = limit_val
idx = skip_semicolon(toks, idx)
if isBlock:
assert toks[idx] == ')'
idx += 1 # skip ')'
idx = skip_semicolon(toks, idx)
# intersect/union/except clause
for op in SQL_OPS: # initialize IUE
sql[op] = None
if idx < len_ and toks[idx] in SQL_OPS:
sql_op = toks[idx]
idx += 1
idx, IUE_sql = parse_sql(toks, idx, tables_with_alias, schema)
sql[sql_op] = IUE_sql
return idx, sql
def get_sql(schema, query):
toks = tokenize(query)
tables_with_alias = get_tables_with_alias(schema.schema, toks)
_, sql = parse_sql(toks, 0, tables_with_alias, schema)
return sql | null |
165,332 | import os, sys
import json
import sqlite3
import traceback
import argparse
from process_sql import tokenize, get_schema, get_tables_with_alias, Schema, get_sql
class Evaluator:
"""A simple evaluator"""
def __init__(self):
self.partial_scores = None
def eval_hardness(self, sql):
count_comp1_ = count_component1(sql)
count_comp2_ = count_component2(sql)
count_others_ = count_others(sql)
if count_comp1_ <= 1 and count_others_ == 0 and count_comp2_ == 0:
return "easy"
elif (count_others_ <= 2 and count_comp1_ <= 1 and count_comp2_ == 0) or \
(count_comp1_ <= 2 and count_others_ < 2 and count_comp2_ == 0):
return "medium"
elif (count_others_ > 2 and count_comp1_ <= 2 and count_comp2_ == 0) or \
(2 < count_comp1_ <= 3 and count_others_ <= 2 and count_comp2_ == 0) or \
(count_comp1_ <= 1 and count_others_ == 0 and count_comp2_ <= 1):
return "hard"
else:
return "extra"
def eval_exact_match(self, pred, label):
partial_scores = self.eval_partial_match(pred, label)
self.partial_scores = partial_scores
for key, score in partial_scores.items():
if score['f1'] != 1:
return 0
if len(label['from']['table_units']) > 0:
label_tables = sorted(label['from']['table_units'])
pred_tables = sorted(pred['from']['table_units'])
return label_tables == pred_tables
return 1
def eval_partial_match(self, pred, label):
res = {}
label_total, pred_total, cnt, cnt_wo_agg = eval_sel(pred, label)
acc, rec, f1 = get_scores(cnt, pred_total, label_total)
res['select'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total}
acc, rec, f1 = get_scores(cnt_wo_agg, pred_total, label_total)
res['select(no AGG)'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total}
label_total, pred_total, cnt, cnt_wo_agg = eval_where(pred, label)
acc, rec, f1 = get_scores(cnt, pred_total, label_total)
res['where'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total}
acc, rec, f1 = get_scores(cnt_wo_agg, pred_total, label_total)
res['where(no OP)'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total}
label_total, pred_total, cnt = eval_group(pred, label)
acc, rec, f1 = get_scores(cnt, pred_total, label_total)
res['group(no Having)'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total}
label_total, pred_total, cnt = eval_having(pred, label)
acc, rec, f1 = get_scores(cnt, pred_total, label_total)
res['group'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total}
label_total, pred_total, cnt = eval_order(pred, label)
acc, rec, f1 = get_scores(cnt, pred_total, label_total)
res['order'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total}
label_total, pred_total, cnt = eval_and_or(pred, label)
acc, rec, f1 = get_scores(cnt, pred_total, label_total)
res['and/or'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total}
label_total, pred_total, cnt = eval_IUEN(pred, label)
acc, rec, f1 = get_scores(cnt, pred_total, label_total)
res['IUEN'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total}
label_total, pred_total, cnt = eval_keywords(pred, label)
acc, rec, f1 = get_scores(cnt, pred_total, label_total)
res['keywords'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total}
return res
def print_scores(scores, etype):
turns = ['turn 1', 'turn 2', 'turn 3', 'turn 4', 'turn >4']
levels = ['easy', 'medium', 'hard', 'extra', 'all', "joint_all"]
partial_types = ['select', 'select(no AGG)', 'where', 'where(no OP)', 'group(no Having)',
'group', 'order', 'and/or', 'IUEN', 'keywords']
print("{:20} {:20} {:20} {:20} {:20} {:20} {:20}".format("", *levels))
counts = [scores[level]['count'] for level in levels]
print("{:20} {:<20d} {:<20d} {:<20d} {:<20d} {:<20d} {:<20d}".format("count", *counts))
if etype in ["all", "exec"]:
print('===================== EXECUTION ACCURACY =====================')
this_scores = [scores[level]['exec'] for level in levels]
print("{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}".format("execution", *this_scores))
if etype in ["all", "match"]:
print('\n====================== EXACT MATCHING ACCURACY =====================')
exact_scores = [scores[level]['exact'] for level in levels]
print("{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}".format("exact match", *exact_scores))
print('\n---------------------PARTIAL MATCHING ACCURACY----------------------')
for type_ in partial_types:
this_scores = [scores[level]['partial'][type_]['acc'] for level in levels]
print("{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}".format(type_, *this_scores))
print('---------------------- PARTIAL MATCHING RECALL ----------------------')
for type_ in partial_types:
this_scores = [scores[level]['partial'][type_]['rec'] for level in levels]
print("{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}".format(type_, *this_scores))
print('---------------------- PARTIAL MATCHING F1 --------------------------')
for type_ in partial_types:
this_scores = [scores[level]['partial'][type_]['f1'] for level in levels]
print("{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}".format(type_, *this_scores))
print("\n\n{:20} {:20} {:20} {:20} {:20} {:20}".format("", *turns))
counts = [scores[turn]['count'] for turn in turns]
print("{:20} {:<20d} {:<20d} {:<20d} {:<20d} {:<20d}".format("count", *counts))
if etype in ["all", "exec"]:
print('===================== TRUN XECUTION ACCURACY =====================')
this_scores = [scores[turn]['exec'] for turn in turns]
print("{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}".format("execution", *this_scores))
if etype in ["all", "match"]:
print('\n====================== TRUN EXACT MATCHING ACCURACY =====================')
exact_scores = [scores[turn]['exact'] for turn in turns]
print("{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}".format("exact match", *exact_scores))
def eval_exec_match(db, p_str, g_str, pred, gold):
"""
return 1 if the values between prediction and gold are matching
in the corresponding index. Currently not support multiple col_unit(pairs).
"""
conn = sqlite3.connect(db)
cursor = conn.cursor()
try:
cursor.execute(p_str)
p_res = cursor.fetchall()
except:
return False
cursor.execute(g_str)
q_res = cursor.fetchall()
def res_map(res, val_units):
rmap = {}
for idx, val_unit in enumerate(val_units):
key = tuple(val_unit[1]) if not val_unit[2] else (val_unit[0], tuple(val_unit[1]), tuple(val_unit[2]))
rmap[key] = [r[idx] for r in res]
return rmap
p_val_units = [unit[1] for unit in pred['select'][1]]
q_val_units = [unit[1] for unit in gold['select'][1]]
return res_map(p_res, p_val_units) == res_map(q_res, q_val_units)
def rebuild_sql_val(sql):
if sql is None or not DISABLE_VALUE:
return sql
sql['from']['conds'] = rebuild_condition_val(sql['from']['conds'])
sql['having'] = rebuild_condition_val(sql['having'])
sql['where'] = rebuild_condition_val(sql['where'])
sql['intersect'] = rebuild_sql_val(sql['intersect'])
sql['except'] = rebuild_sql_val(sql['except'])
sql['union'] = rebuild_sql_val(sql['union'])
return sql
def build_valid_col_units(table_units, schema):
col_ids = [table_unit[1] for table_unit in table_units if table_unit[0] == TABLE_TYPE['table_unit']]
prefixs = [col_id[:-2] for col_id in col_ids]
valid_col_units= []
for value in schema.idMap.values():
if '.' in value and value[:value.index('.')] in prefixs:
valid_col_units.append(value)
return valid_col_units
def rebuild_sql_col(valid_col_units, sql, kmap):
if sql is None:
return sql
sql['select'] = rebuild_select_col(valid_col_units, sql['select'], kmap)
sql['from'] = rebuild_from_col(valid_col_units, sql['from'], kmap)
sql['where'] = rebuild_condition_col(valid_col_units, sql['where'], kmap)
sql['groupBy'] = rebuild_group_by_col(valid_col_units, sql['groupBy'], kmap)
sql['orderBy'] = rebuild_order_by_col(valid_col_units, sql['orderBy'], kmap)
sql['having'] = rebuild_condition_col(valid_col_units, sql['having'], kmap)
sql['intersect'] = rebuild_sql_col(valid_col_units, sql['intersect'], kmap)
sql['except'] = rebuild_sql_col(valid_col_units, sql['except'], kmap)
sql['union'] = rebuild_sql_col(valid_col_units, sql['union'], kmap)
return sql
class Schema:
"""
Simple schema which maps table&column to a unique identifier
"""
def __init__(self, schema):
self._schema = schema
self._idMap = self._map(self._schema)
def schema(self):
return self._schema
def idMap(self):
return self._idMap
def _map(self, schema):
idMap = {"*": "__all__"}
id = 1
for key, vals in schema.items():
for val in vals:
idMap[key.lower() + "." + val.lower()] = (
"__" + key.lower() + "." + val.lower() + "__"
)
id += 1
for key in schema:
idMap[key.lower()] = "__" + key.lower() + "__"
id += 1
return idMap
def get_schema(db):
"""
Get database's schema, which is a dict with table name as key
and list of column names as value
:param db: database path
:return: schema dict
"""
schema = {}
conn = sqlite3.connect(db)
cursor = conn.cursor()
# fetch table names
cursor.execute("SELECT name FROM sqlite_master WHERE type='table';")
tables = [str(table[0].lower()) for table in cursor.fetchall()]
# fetch table info
for table in tables:
cursor.execute("PRAGMA table_info({})".format(table))
schema[table] = [str(col[1].lower()) for col in cursor.fetchall()]
return schema
def get_sql(schema, query):
toks = tokenize(query)
tables_with_alias = get_tables_with_alias(schema.schema, toks)
_, sql = parse_sql(toks, 0, tables_with_alias, schema)
return sql
def evaluate(gold, predict, db_dir, etype, kmaps):
with open(gold) as f:
glist = []
gseq_one = []
for l in f.readlines():
if len(l.strip()) == 0:
glist.append(gseq_one)
gseq_one = []
else:
lstrip = l.strip().split('\t')
gseq_one.append(lstrip)
#glist = [l.strip().split('\t') for l in f.readlines() if len(l.strip()) > 0]
with open(predict) as f:
plist = []
pseq_one = []
for l in f.readlines():
if len(l.strip()) == 0:
plist.append(pseq_one)
pseq_one = []
else:
pseq_one.append(l.strip().split('\t'))
#plist = [l.strip().split('\t') for l in f.readlines() if len(l.strip()) > 0]
# plist = [[("select product_type_code from products group by product_type_code order by count ( * ) desc limit value", "orchestra")]]
# glist = [[("SELECT product_type_code FROM Products GROUP BY product_type_code ORDER BY count(*) DESC LIMIT 1", "customers_and_orders")]]
evaluator = Evaluator()
turns = ['turn 1', 'turn 2', 'turn 3', 'turn 4', 'turn >4']
levels = ['easy', 'medium', 'hard', 'extra', 'all', 'joint_all']
partial_types = ['select', 'select(no AGG)', 'where', 'where(no OP)', 'group(no Having)',
'group', 'order', 'and/or', 'IUEN', 'keywords']
entries = []
scores = {}
for turn in turns:
scores[turn] = {'count': 0, 'exact': 0.}
scores[turn]['exec'] = 0
for level in levels:
scores[level] = {'count': 0, 'partial': {}, 'exact': 0.}
scores[level]['exec'] = 0
for type_ in partial_types:
scores[level]['partial'][type_] = {'acc': 0., 'rec': 0., 'f1': 0.,'acc_count':0,'rec_count':0}
eval_err_num = 0
for p, g in zip(plist, glist):
scores['joint_all']['count'] += 1
turn_scores = {"exec": [], "exact": []}
for idx, pg in enumerate(zip(p, g)):
p, g = pg
p_str = p[0]
p_str = p_str.replace("value", "1")
g_str, db = g
db_name = db
db = os.path.join(db_dir, db, db + ".sqlite")
schema = Schema(get_schema(db))
g_sql = get_sql(schema, g_str)
hardness = evaluator.eval_hardness(g_sql)
if idx > 3:
idx = ">4"
else:
idx += 1
turn_id = "turn " + str(idx)
scores[turn_id]['count'] += 1
scores[hardness]['count'] += 1
scores['all']['count'] += 1
try:
p_sql = get_sql(schema, p_str)
except:
# If p_sql is not valid, then we will use an empty sql to evaluate with the correct sql
p_sql = {
"except": None,
"from": {
"conds": [],
"table_units": []
},
"groupBy": [],
"having": [],
"intersect": None,
"limit": None,
"orderBy": [],
"select": [
False,
[]
],
"union": None,
"where": []
}
eval_err_num += 1
print("eval_err_num:{}".format(eval_err_num))
# rebuild sql for value evaluation
kmap = kmaps[db_name]
g_valid_col_units = build_valid_col_units(g_sql['from']['table_units'], schema)
g_sql = rebuild_sql_val(g_sql)
g_sql = rebuild_sql_col(g_valid_col_units, g_sql, kmap)
p_valid_col_units = build_valid_col_units(p_sql['from']['table_units'], schema)
p_sql = rebuild_sql_val(p_sql)
p_sql = rebuild_sql_col(p_valid_col_units, p_sql, kmap)
if etype in ["all", "exec"]:
exec_score = eval_exec_match(db, p_str, g_str, p_sql, g_sql)
if exec_score:
scores[hardness]['exec'] += 1
scores[turn_id]['exec'] += 1
turn_scores['exec'].append(1)
else:
turn_scores['exec'].append(0)
if etype in ["all", "match"]:
exact_score = evaluator.eval_exact_match(p_sql, g_sql)
partial_scores = evaluator.partial_scores
if exact_score == 0:
turn_scores['exact'].append(0)
print("{} pred: {}".format(hardness,p_str))
print("{} gold: {}".format(hardness,g_str))
print("")
else:
turn_scores['exact'].append(1)
scores[turn_id]['exact'] += exact_score
scores[hardness]['exact'] += exact_score
scores['all']['exact'] += exact_score
for type_ in partial_types:
if partial_scores[type_]['pred_total'] > 0:
scores[hardness]['partial'][type_]['acc'] += partial_scores[type_]['acc']
scores[hardness]['partial'][type_]['acc_count'] += 1
if partial_scores[type_]['label_total'] > 0:
scores[hardness]['partial'][type_]['rec'] += partial_scores[type_]['rec']
scores[hardness]['partial'][type_]['rec_count'] += 1
scores[hardness]['partial'][type_]['f1'] += partial_scores[type_]['f1']
if partial_scores[type_]['pred_total'] > 0:
scores['all']['partial'][type_]['acc'] += partial_scores[type_]['acc']
scores['all']['partial'][type_]['acc_count'] += 1
if partial_scores[type_]['label_total'] > 0:
scores['all']['partial'][type_]['rec'] += partial_scores[type_]['rec']
scores['all']['partial'][type_]['rec_count'] += 1
scores['all']['partial'][type_]['f1'] += partial_scores[type_]['f1']
entries.append({
'predictSQL': p_str,
'goldSQL': g_str,
'hardness': hardness,
'exact': exact_score,
'partial': partial_scores
})
if all(v == 1 for v in turn_scores["exec"]):
scores['joint_all']['exec'] += 1
if all(v == 1 for v in turn_scores["exact"]):
scores['joint_all']['exact'] += 1
for turn in turns:
if scores[turn]['count'] == 0:
continue
if etype in ["all", "exec"]:
scores[turn]['exec'] /= scores[turn]['count']
if etype in ["all", "match"]:
scores[turn]['exact'] /= scores[turn]['count']
for level in levels:
if scores[level]['count'] == 0:
continue
if etype in ["all", "exec"]:
scores[level]['exec'] /= scores[level]['count']
if etype in ["all", "match"]:
scores[level]['exact'] /= scores[level]['count']
for type_ in partial_types:
if scores[level]['partial'][type_]['acc_count'] == 0:
scores[level]['partial'][type_]['acc'] = 0
else:
scores[level]['partial'][type_]['acc'] = scores[level]['partial'][type_]['acc'] / \
scores[level]['partial'][type_]['acc_count'] * 1.0
if scores[level]['partial'][type_]['rec_count'] == 0:
scores[level]['partial'][type_]['rec'] = 0
else:
scores[level]['partial'][type_]['rec'] = scores[level]['partial'][type_]['rec'] / \
scores[level]['partial'][type_]['rec_count'] * 1.0
if scores[level]['partial'][type_]['acc'] == 0 and scores[level]['partial'][type_]['rec'] == 0:
scores[level]['partial'][type_]['f1'] = 1
else:
scores[level]['partial'][type_]['f1'] = \
2.0 * scores[level]['partial'][type_]['acc'] * scores[level]['partial'][type_]['rec'] / (
scores[level]['partial'][type_]['rec'] + scores[level]['partial'][type_]['acc'])
print_scores(scores, etype) | null |
165,354 | import os, sys
import json
import sqlite3
import traceback
import argparse
from process_sql import tokenize, get_schema, get_tables_with_alias, Schema, get_sql
class Evaluator:
"""A simple evaluator"""
def __init__(self):
self.partial_scores = None
def eval_hardness(self, sql):
count_comp1_ = count_component1(sql)
count_comp2_ = count_component2(sql)
count_others_ = count_others(sql)
if count_comp1_ <= 1 and count_others_ == 0 and count_comp2_ == 0:
return "easy"
elif (count_others_ <= 2 and count_comp1_ <= 1 and count_comp2_ == 0) or \
(count_comp1_ <= 2 and count_others_ < 2 and count_comp2_ == 0):
return "medium"
elif (count_others_ > 2 and count_comp1_ <= 2 and count_comp2_ == 0) or \
(2 < count_comp1_ <= 3 and count_others_ <= 2 and count_comp2_ == 0) or \
(count_comp1_ <= 1 and count_others_ == 0 and count_comp2_ <= 1):
return "hard"
else:
return "extra"
def eval_exact_match(self, pred, label):
partial_scores = self.eval_partial_match(pred, label)
self.partial_scores = partial_scores
for _, score in partial_scores.items():
if score['f1'] != 1:
return 0
if len(label['from']['table_units']) > 0:
label_tables = sorted(label['from']['table_units'])
pred_tables = sorted(pred['from']['table_units'])
return label_tables == pred_tables
return 1
def eval_partial_match(self, pred, label):
res = {}
label_total, pred_total, cnt, cnt_wo_agg = eval_sel(pred, label)
acc, rec, f1 = get_scores(cnt, pred_total, label_total)
res['select'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total}
acc, rec, f1 = get_scores(cnt_wo_agg, pred_total, label_total)
res['select(no AGG)'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total}
label_total, pred_total, cnt, cnt_wo_agg = eval_where(pred, label)
acc, rec, f1 = get_scores(cnt, pred_total, label_total)
res['where'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total}
acc, rec, f1 = get_scores(cnt_wo_agg, pred_total, label_total)
res['where(no OP)'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total}
label_total, pred_total, cnt = eval_group(pred, label)
acc, rec, f1 = get_scores(cnt, pred_total, label_total)
res['group(no Having)'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total}
label_total, pred_total, cnt = eval_having(pred, label)
acc, rec, f1 = get_scores(cnt, pred_total, label_total)
res['group'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total}
label_total, pred_total, cnt = eval_order(pred, label)
acc, rec, f1 = get_scores(cnt, pred_total, label_total)
res['order'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total}
label_total, pred_total, cnt = eval_and_or(pred, label)
acc, rec, f1 = get_scores(cnt, pred_total, label_total)
res['and/or'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total}
label_total, pred_total, cnt = eval_IUEN(pred, label)
acc, rec, f1 = get_scores(cnt, pred_total, label_total)
res['IUEN'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total}
label_total, pred_total, cnt = eval_keywords(pred, label)
acc, rec, f1 = get_scores(cnt, pred_total, label_total)
res['keywords'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total}
return res
def print_scores(scores, etype):
levels = ['easy', 'medium', 'hard', 'extra', 'all']
partial_types = ['select', 'select(no AGG)', 'where', 'where(no OP)', 'group(no Having)',
'group', 'order', 'and/or', 'IUEN', 'keywords']
print("{:20} {:20} {:20} {:20} {:20} {:20}".format("", *levels))
counts = [scores[level]['count'] for level in levels]
print("{:20} {:<20d} {:<20d} {:<20d} {:<20d} {:<20d}".format("count", *counts))
if etype in ["all", "exec"]:
print('===================== EXECUTION ACCURACY =====================')
this_scores = [scores[level]['exec'] for level in levels]
print("{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}".format("execution", *this_scores))
if etype in ["all", "match"]:
print('\n====================== EXACT MATCHING ACCURACY =====================')
exact_scores = [scores[level]['exact'] for level in levels]
print("{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}".format("exact match", *exact_scores))
print('\n---------------------PARTIAL MATCHING ACCURACY----------------------')
for type_ in partial_types:
this_scores = [scores[level]['partial'][type_]['acc'] for level in levels]
print("{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}".format(type_, *this_scores))
print('---------------------- PARTIAL MATCHING RECALL ----------------------')
for type_ in partial_types:
this_scores = [scores[level]['partial'][type_]['rec'] for level in levels]
print("{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}".format(type_, *this_scores))
print('---------------------- PARTIAL MATCHING F1 --------------------------')
for type_ in partial_types:
this_scores = [scores[level]['partial'][type_]['f1'] for level in levels]
print("{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}".format(type_, *this_scores))
def eval_exec_match(db, p_str, g_str, pred, gold):
"""
return 1 if the values between prediction and gold are matching
in the corresponding index. Currently not support multiple col_unit(pairs).
"""
conn = sqlite3.connect(db)
cursor = conn.cursor()
try:
cursor.execute(p_str)
p_res = cursor.fetchall()
except:
return False
cursor.execute(g_str)
q_res = cursor.fetchall()
def res_map(res, val_units):
rmap = {}
for idx, val_unit in enumerate(val_units):
key = tuple(val_unit[1]) if not val_unit[2] else (val_unit[0], tuple(val_unit[1]), tuple(val_unit[2]))
rmap[key] = [r[idx] for r in res]
return rmap
p_val_units = [unit[1] for unit in pred['select'][1]]
q_val_units = [unit[1] for unit in gold['select'][1]]
return res_map(p_res, p_val_units) == res_map(q_res, q_val_units)
def rebuild_sql_val(sql):
if sql is None or not DISABLE_VALUE:
return sql
sql['from']['conds'] = rebuild_condition_val(sql['from']['conds'])
sql['having'] = rebuild_condition_val(sql['having'])
sql['where'] = rebuild_condition_val(sql['where'])
sql['intersect'] = rebuild_sql_val(sql['intersect'])
sql['except'] = rebuild_sql_val(sql['except'])
sql['union'] = rebuild_sql_val(sql['union'])
return sql
def build_valid_col_units(table_units, schema):
col_ids = [table_unit[1] for table_unit in table_units if table_unit[0] == TABLE_TYPE['table_unit']]
prefixs = [col_id[:-2] for col_id in col_ids]
valid_col_units= []
for value in schema.idMap.values():
if '.' in value and value[:value.index('.')] in prefixs:
valid_col_units.append(value)
return valid_col_units
def rebuild_sql_col(valid_col_units, sql, kmap):
if sql is None:
return sql
sql['select'] = rebuild_select_col(valid_col_units, sql['select'], kmap)
sql['from'] = rebuild_from_col(valid_col_units, sql['from'], kmap)
sql['where'] = rebuild_condition_col(valid_col_units, sql['where'], kmap)
sql['groupBy'] = rebuild_group_by_col(valid_col_units, sql['groupBy'], kmap)
sql['orderBy'] = rebuild_order_by_col(valid_col_units, sql['orderBy'], kmap)
sql['having'] = rebuild_condition_col(valid_col_units, sql['having'], kmap)
sql['intersect'] = rebuild_sql_col(valid_col_units, sql['intersect'], kmap)
sql['except'] = rebuild_sql_col(valid_col_units, sql['except'], kmap)
sql['union'] = rebuild_sql_col(valid_col_units, sql['union'], kmap)
return sql
class Schema:
"""
Simple schema which maps table&column to a unique identifier
"""
def __init__(self, schema):
self._schema = schema
self._idMap = self._map(self._schema)
def schema(self):
return self._schema
def idMap(self):
return self._idMap
def _map(self, schema):
idMap = {"*": "__all__"}
id = 1
for key, vals in schema.items():
for val in vals:
idMap[key.lower() + "." + val.lower()] = (
"__" + key.lower() + "." + val.lower() + "__"
)
id += 1
for key in schema:
idMap[key.lower()] = "__" + key.lower() + "__"
id += 1
return idMap
def get_schema(db):
"""
Get database's schema, which is a dict with table name as key
and list of column names as value
:param db: database path
:return: schema dict
"""
schema = {}
conn = sqlite3.connect(db)
cursor = conn.cursor()
# fetch table names
cursor.execute("SELECT name FROM sqlite_master WHERE type='table';")
tables = [str(table[0].lower()) for table in cursor.fetchall()]
# fetch table info
for table in tables:
cursor.execute("PRAGMA table_info({})".format(table))
schema[table] = [str(col[1].lower()) for col in cursor.fetchall()]
return schema
def get_sql(schema, query):
toks = tokenize(query)
tables_with_alias = get_tables_with_alias(schema.schema, toks)
_, sql = parse_sql(toks, 0, tables_with_alias, schema)
return sql
def evaluate(gold, predict, db_dir, etype, kmaps):
with open(gold) as f:
glist = [l.strip().split('\t') for l in f.readlines() if len(l.strip()) > 0]
with open(predict) as f:
plist = [l.strip().split('\t') for l in f.readlines() if len(l.strip()) > 0]
# plist = [("select max(Share),min(Share) from performance where Type != 'terminal'", "orchestra")]
# glist = [("SELECT max(SHARE) , min(SHARE) FROM performance WHERE TYPE != 'Live final'", "orchestra")]
evaluator = Evaluator()
levels = ['easy', 'medium', 'hard', 'extra', 'all']
partial_types = ['select', 'select(no AGG)', 'where', 'where(no OP)', 'group(no Having)',
'group', 'order', 'and/or', 'IUEN', 'keywords']
entries = []
scores = {}
for level in levels:
scores[level] = {'count': 0, 'partial': {}, 'exact': 0.}
scores[level]['exec'] = 0
for type_ in partial_types:
scores[level]['partial'][type_] = {'acc': 0., 'rec': 0., 'f1': 0.,'acc_count':0,'rec_count':0}
eval_err_num = 0
for p, g in zip(plist, glist):
p_str = p[0]
g_str, db = g
db_name = db
db = os.path.join(db_dir, db, db + ".sqlite")
schema = Schema(get_schema(db))
g_sql = get_sql(schema, g_str)
hardness = evaluator.eval_hardness(g_sql)
scores[hardness]['count'] += 1
scores['all']['count'] += 1
try:
p_sql = get_sql(schema, p_str)
except:
# If p_sql is not valid, then we will use an empty sql to evaluate with the correct sql
p_sql = {
"except": None,
"from": {
"conds": [],
"table_units": []
},
"groupBy": [],
"having": [],
"intersect": None,
"limit": None,
"orderBy": [],
"select": [
False,
[]
],
"union": None,
"where": []
}
eval_err_num += 1
print("eval_err_num:{}".format(eval_err_num))
# rebuild sql for value evaluation
kmap = kmaps[db_name]
g_valid_col_units = build_valid_col_units(g_sql['from']['table_units'], schema)
g_sql = rebuild_sql_val(g_sql)
g_sql = rebuild_sql_col(g_valid_col_units, g_sql, kmap)
p_valid_col_units = build_valid_col_units(p_sql['from']['table_units'], schema)
p_sql = rebuild_sql_val(p_sql)
p_sql = rebuild_sql_col(p_valid_col_units, p_sql, kmap)
if etype in ["all", "exec"]:
exec_score = eval_exec_match(db, p_str, g_str, p_sql, g_sql)
if exec_score:
scores[hardness]['exec'] += 1
if etype in ["all", "match"]:
exact_score = evaluator.eval_exact_match(p_sql, g_sql)
partial_scores = evaluator.partial_scores
if exact_score == 0:
print("{} pred: {}".format(hardness,p_str))
print("{} gold: {}".format(hardness,g_str))
print("")
scores[hardness]['exact'] += exact_score
scores['all']['exact'] += exact_score
for type_ in partial_types:
if partial_scores[type_]['pred_total'] > 0:
scores[hardness]['partial'][type_]['acc'] += partial_scores[type_]['acc']
scores[hardness]['partial'][type_]['acc_count'] += 1
if partial_scores[type_]['label_total'] > 0:
scores[hardness]['partial'][type_]['rec'] += partial_scores[type_]['rec']
scores[hardness]['partial'][type_]['rec_count'] += 1
scores[hardness]['partial'][type_]['f1'] += partial_scores[type_]['f1']
if partial_scores[type_]['pred_total'] > 0:
scores['all']['partial'][type_]['acc'] += partial_scores[type_]['acc']
scores['all']['partial'][type_]['acc_count'] += 1
if partial_scores[type_]['label_total'] > 0:
scores['all']['partial'][type_]['rec'] += partial_scores[type_]['rec']
scores['all']['partial'][type_]['rec_count'] += 1
scores['all']['partial'][type_]['f1'] += partial_scores[type_]['f1']
entries.append({
'predictSQL': p_str,
'goldSQL': g_str,
'hardness': hardness,
'exact': exact_score,
'partial': partial_scores
})
for level in levels:
if scores[level]['count'] == 0:
continue
if etype in ["all", "exec"]:
scores[level]['exec'] /= scores[level]['count']
if etype in ["all", "match"]:
scores[level]['exact'] /= scores[level]['count']
for type_ in partial_types:
if scores[level]['partial'][type_]['acc_count'] == 0:
scores[level]['partial'][type_]['acc'] = 0
else:
scores[level]['partial'][type_]['acc'] = scores[level]['partial'][type_]['acc'] / \
scores[level]['partial'][type_]['acc_count'] * 1.0
if scores[level]['partial'][type_]['rec_count'] == 0:
scores[level]['partial'][type_]['rec'] = 0
else:
scores[level]['partial'][type_]['rec'] = scores[level]['partial'][type_]['rec'] / \
scores[level]['partial'][type_]['rec_count'] * 1.0
if scores[level]['partial'][type_]['acc'] == 0 and scores[level]['partial'][type_]['rec'] == 0:
scores[level]['partial'][type_]['f1'] = 1
else:
scores[level]['partial'][type_]['f1'] = \
2.0 * scores[level]['partial'][type_]['acc'] * scores[level]['partial'][type_]['rec'] / (
scores[level]['partial'][type_]['rec'] + scores[level]['partial'][type_]['acc'])
print_scores(scores, etype) | null |
165,356 | import os, sys, copy
import json
import math
import sqlite3
import traceback
import argparse
import tqdm
from process_sql import tokenize, get_schema, get_tables_with_alias, Schema, get_sql
def condition_has_or(conds):
return 'or' in conds[1::2] | null |
165,357 | import os, sys, copy
import json
import math
import sqlite3
import traceback
import argparse
import tqdm
from process_sql import tokenize, get_schema, get_tables_with_alias, Schema, get_sql
WHERE_OPS = ('not', 'between', '=', '>', '<', '>=', '<=', '!=', 'in', 'like', 'is', 'exists')
def condition_has_like(conds):
return WHERE_OPS.index('like') in [cond_unit[1] for cond_unit in conds[::2]] | null |
165,358 | import os, sys, copy
import json
import math
import sqlite3
import traceback
import argparse
import tqdm
from process_sql import tokenize, get_schema, get_tables_with_alias, Schema, get_sql
def condition_has_sql(conds):
for cond_unit in conds[::2]:
val1, val2 = cond_unit[3], cond_unit[4]
if val1 is not None and type(val1) is dict:
return True
if val2 is not None and type(val2) is dict:
return True
return False | null |
165,359 | import os, sys, copy
import json
import math
import sqlite3
import traceback
import argparse
import tqdm
from process_sql import tokenize, get_schema, get_tables_with_alias, Schema, get_sql
UNIT_OPS = ('none', '-', '+', "*", '/')
def val_has_op(val_unit):
return val_unit[0] != UNIT_OPS.index('none') | null |
165,360 | import os, sys, copy
import json
import math
import sqlite3
import traceback
import argparse
import tqdm
from process_sql import tokenize, get_schema, get_tables_with_alias, Schema, get_sql
def accuracy(count, total):
if count == total:
return 1
return 0 | null |
165,361 | import os, sys, copy
import json
import math
import sqlite3
import traceback
import argparse
import tqdm
from process_sql import tokenize, get_schema, get_tables_with_alias, Schema, get_sql
def recall(count, total):
if count == total:
return 1
return 0 | null |
165,362 | import os, sys, copy
import json
import math
import sqlite3
import traceback
import argparse
import tqdm
from process_sql import tokenize, get_schema, get_tables_with_alias, Schema, get_sql
def F1(acc, rec):
if (acc + rec) == 0:
return 0
return (2. * acc * rec) / (acc + rec) | null |
165,363 | import os, sys, copy
import json
import math
import sqlite3
import traceback
import argparse
import tqdm
from process_sql import tokenize, get_schema, get_tables_with_alias, Schema, get_sql
def get_scores(count, pred_total, label_total):
if pred_total != label_total:
return 0,0,0
elif count == pred_total:
return 1,1,1
return 0,0,0 | null |
165,364 | import os, sys, copy
import json
import math
import sqlite3
import traceback
import argparse
import tqdm
from process_sql import tokenize, get_schema, get_tables_with_alias, Schema, get_sql
def eval_sel(pred, label):
pred_sel = pred['select'][1]
label_sel = label['select'][1]
label_wo_agg = [unit[1] for unit in label_sel]
pred_total = len(pred_sel)
label_total = len(label_sel)
cnt = 0
cnt_wo_agg = 0
for unit in pred_sel:
if unit in label_sel:
cnt += 1
label_sel.remove(unit)
if unit[1] in label_wo_agg:
cnt_wo_agg += 1
label_wo_agg.remove(unit[1])
return label_total, pred_total, cnt, cnt_wo_agg | null |
165,365 | import os, sys, copy
import json
import math
import sqlite3
import traceback
import argparse
import tqdm
from process_sql import tokenize, get_schema, get_tables_with_alias, Schema, get_sql
def eval_where(pred, label):
pred_conds = [unit for unit in pred['where'][::2]]
label_conds = [unit for unit in label['where'][::2]]
label_wo_agg = [unit[2] for unit in label_conds]
pred_total = len(pred_conds)
label_total = len(label_conds)
cnt = 0
cnt_wo_agg = 0
for unit in pred_conds:
if unit in label_conds:
cnt += 1
label_conds.remove(unit)
if unit[2] in label_wo_agg:
cnt_wo_agg += 1
label_wo_agg.remove(unit[2])
return label_total, pred_total, cnt, cnt_wo_agg | null |
165,366 | import os, sys, copy
import json
import math
import sqlite3
import traceback
import argparse
import tqdm
from process_sql import tokenize, get_schema, get_tables_with_alias, Schema, get_sql
def eval_group(pred, label):
pred_cols = [unit[1] for unit in pred['groupBy']]
label_cols = [unit[1] for unit in label['groupBy']]
pred_total = len(pred_cols)
label_total = len(label_cols)
cnt = 0
pred_cols = [pred.split(".")[1] if "." in pred else pred for pred in pred_cols]
label_cols = [label.split(".")[1] if "." in label else label for label in label_cols]
for col in pred_cols:
if col in label_cols:
cnt += 1
label_cols.remove(col)
return label_total, pred_total, cnt | null |
165,367 | import os, sys, copy
import json
import math
import sqlite3
import traceback
import argparse
import tqdm
from process_sql import tokenize, get_schema, get_tables_with_alias, Schema, get_sql
def eval_having(pred, label):
pred_total = label_total = cnt = 0
if len(pred['groupBy']) > 0:
pred_total = 1
if len(label['groupBy']) > 0:
label_total = 1
pred_cols = [unit[1] for unit in pred['groupBy']]
label_cols = [unit[1] for unit in label['groupBy']]
if pred_total == label_total == 1 \
and pred_cols == label_cols \
and pred['having'] == label['having']:
cnt = 1
return label_total, pred_total, cnt | null |
165,368 | import os, sys, copy
import json
import math
import sqlite3
import traceback
import argparse
import tqdm
from process_sql import tokenize, get_schema, get_tables_with_alias, Schema, get_sql
def eval_order(pred, label):
pred_total = label_total = cnt = 0
if len(pred['orderBy']) > 0:
pred_total = 1
if len(label['orderBy']) > 0:
label_total = 1
if len(label['orderBy']) > 0 and pred['orderBy'] == label['orderBy'] and \
((pred['limit'] is None and label['limit'] is None) or (pred['limit'] is not None and label['limit'] is not None)):
cnt = 1
return label_total, pred_total, cnt | null |
165,369 | import os, sys, copy
import json
import math
import sqlite3
import traceback
import argparse
import tqdm
from process_sql import tokenize, get_schema, get_tables_with_alias, Schema, get_sql
def eval_and_or(pred, label):
pred_ao = pred['where'][1::2]
label_ao = label['where'][1::2]
pred_ao = set(pred_ao)
label_ao = set(label_ao)
if pred_ao == label_ao:
return 1,1,1
return len(pred_ao),len(label_ao),0 | null |
165,370 | import os, sys, copy
import json
import math
import sqlite3
import traceback
import argparse
import tqdm
from process_sql import tokenize, get_schema, get_tables_with_alias, Schema, get_sql
def eval_nested(pred, label):
label_total = 0
pred_total = 0
cnt = 0
if pred is not None:
pred_total += 1
if label is not None:
label_total += 1
if pred is not None and label is not None:
cnt += Evaluator().eval_exact_match(pred, label)
return label_total, pred_total, cnt
def eval_IUEN(pred, label):
lt1, pt1, cnt1 = eval_nested(pred['intersect'], label['intersect'])
lt2, pt2, cnt2 = eval_nested(pred['except'], label['except'])
lt3, pt3, cnt3 = eval_nested(pred['union'], label['union'])
label_total = lt1 + lt2 + lt3
pred_total = pt1 + pt2 + pt3
cnt = cnt1 + cnt2 + cnt3
return label_total, pred_total, cnt | null |
165,371 | import os, sys, copy
import json
import math
import sqlite3
import traceback
import argparse
import tqdm
from process_sql import tokenize, get_schema, get_tables_with_alias, Schema, get_sql
def get_keywords(sql):
res = set()
if len(sql['where']) > 0:
res.add('where')
if len(sql['groupBy']) > 0:
res.add('group')
if len(sql['having']) > 0:
res.add('having')
if len(sql['orderBy']) > 0:
res.add(sql['orderBy'][0])
res.add('order')
if sql['limit'] is not None:
res.add('limit')
if sql['except'] is not None:
res.add('except')
if sql['union'] is not None:
res.add('union')
if sql['intersect'] is not None:
res.add('intersect')
# or keyword
ao = sql['from']['conds'][1::2] + sql['where'][1::2] + sql['having'][1::2]
if len([token for token in ao if token == 'or']) > 0:
res.add('or')
cond_units = sql['from']['conds'][::2] + sql['where'][::2] + sql['having'][::2]
# not keyword
if len([cond_unit for cond_unit in cond_units if cond_unit[0]]) > 0:
res.add('not')
# in keyword
if len([cond_unit for cond_unit in cond_units if cond_unit[1] == WHERE_OPS.index('in')]) > 0:
res.add('in')
# like keyword
if len([cond_unit for cond_unit in cond_units if cond_unit[1] == WHERE_OPS.index('like')]) > 0:
res.add('like')
return res
def eval_keywords(pred, label):
pred_keywords = get_keywords(pred)
label_keywords = get_keywords(label)
pred_total = len(pred_keywords)
label_total = len(label_keywords)
cnt = 0
for k in pred_keywords:
if k in label_keywords:
cnt += 1
return label_total, pred_total, cnt | null |
165,372 | import os, sys, copy
import json
import math
import sqlite3
import traceback
import argparse
import tqdm
from process_sql import tokenize, get_schema, get_tables_with_alias, Schema, get_sql
WHERE_OPS = ('not', 'between', '=', '>', '<', '>=', '<=', '!=', 'in', 'like', 'is', 'exists')
def count_component1(sql):
count = 0
if len(sql['where']) > 0:
count += 1
if len(sql['groupBy']) > 0:
count += 1
if len(sql['orderBy']) > 0:
count += 1
if sql['limit'] is not None:
count += 1
if len(sql['from']['table_units']) > 0: # JOIN
count += len(sql['from']['table_units']) - 1
ao = sql['from']['conds'][1::2] + sql['where'][1::2] + sql['having'][1::2]
count += len([token for token in ao if token == 'or'])
cond_units = sql['from']['conds'][::2] + sql['where'][::2] + sql['having'][::2]
count += len([cond_unit for cond_unit in cond_units if cond_unit[1] == WHERE_OPS.index('like')])
return count | null |
165,373 | import os, sys, copy
import json
import math
import sqlite3
import traceback
import argparse
import tqdm
from process_sql import tokenize, get_schema, get_tables_with_alias, Schema, get_sql
def get_nestedSQL(sql):
nested = []
for cond_unit in sql['from']['conds'][::2] + sql['where'][::2] + sql['having'][::2]:
if type(cond_unit[3]) is dict:
nested.append(cond_unit[3])
if type(cond_unit[4]) is dict:
nested.append(cond_unit[4])
if sql['intersect'] is not None:
nested.append(sql['intersect'])
if sql['except'] is not None:
nested.append(sql['except'])
if sql['union'] is not None:
nested.append(sql['union'])
return nested
def count_component2(sql):
nested = get_nestedSQL(sql)
return len(nested) | null |
165,374 | import os, sys, copy
import json
import math
import sqlite3
import traceback
import argparse
import tqdm
from process_sql import tokenize, get_schema, get_tables_with_alias, Schema, get_sql
def count_agg(units):
return len([unit for unit in units if has_agg(unit)])
def count_others(sql):
count = 0
# number of aggregation
agg_count = count_agg(sql['select'][1])
agg_count += count_agg(sql['where'][::2])
agg_count += count_agg(sql['groupBy'])
if len(sql['orderBy']) > 0:
agg_count += count_agg([unit[1] for unit in sql['orderBy'][1] if unit[1]] +
[unit[2] for unit in sql['orderBy'][1] if unit[2]])
agg_count += count_agg(sql['having'])
if agg_count > 1:
count += 1
# number of select columns
if len(sql['select'][1]) > 1:
count += 1
# number of where conditions
if len(sql['where']) > 1:
count += 1
# number of group by clauses
if len(sql['groupBy']) > 1:
count += 1
return count | null |
165,375 | import os, sys, copy
import json
import math
import sqlite3
import traceback
import argparse
import tqdm
from process_sql import tokenize, get_schema, get_tables_with_alias, Schema, get_sql
def isValidSQL(sql, db):
conn = sqlite3.connect(db)
cursor = conn.cursor()
try:
cursor.execute(sql)
except:
return False
return True | null |
165,376 | import os, sys, copy
import json
import math
import sqlite3
import traceback
import argparse
import tqdm
from process_sql import tokenize, get_schema, get_tables_with_alias, Schema, get_sql
stop_word = set(['_UNK', '_EOS', 'select', 'value', ')', '(', 'where', '=', ',', 'count', 'group_by', 'order_by', 'limit_value', 'desc', '>', 'distinct', 'and', 'avg', 'having', '<', 'in', 'sum', 'max', 'asc', 'not', 'or', 'like', 'min', 'intersect', 'except', '!=', 'union', 'between', '-', '+', '0'])
stop_word |= set(CLAUSE_KEYWORDS)
stop_word |= set(JOIN_KEYWORDS)
stop_word |= set(WHERE_OPS)
stop_word |= set(UNIT_OPS)
stop_word |= set(AGG_OPS)
stop_word |= set(COND_OPS)
stop_word |= set(SQL_OPS)
stop_word |= set(ORDER_OPS)
class Evaluator:
"""A simple evaluator"""
def __init__(self):
self.partial_scores = None
def eval_hardness(self, sql):
count_comp1_ = count_component1(sql)
count_comp2_ = count_component2(sql)
count_others_ = count_others(sql)
if count_comp1_ <= 1 and count_others_ == 0 and count_comp2_ == 0:
return "easy"
elif (count_others_ <= 2 and count_comp1_ <= 1 and count_comp2_ == 0) or \
(count_comp1_ <= 2 and count_others_ < 2 and count_comp2_ == 0):
return "medium"
elif (count_others_ > 2 and count_comp1_ <= 2 and count_comp2_ == 0) or \
(2 < count_comp1_ <= 3 and count_others_ <= 2 and count_comp2_ == 0) or \
(count_comp1_ <= 1 and count_others_ == 0 and count_comp2_ <= 1):
return "hard"
else:
return "extra"
def eval_exact_match(self, pred, label):
partial_scores = self.eval_partial_match(pred, label)
self.partial_scores = partial_scores
for key, score in partial_scores.items():
if score['f1'] != 1:
return 0
if len(label['from']['table_units']) > 0:
label_tables = sorted(label['from']['table_units'])
pred_tables = sorted(pred['from']['table_units'])
return label_tables == pred_tables
return 1
def eval_partial_match(self, pred, label):
res = {}
label_total, pred_total, cnt, cnt_wo_agg = eval_sel(pred, label)
acc, rec, f1 = get_scores(cnt, pred_total, label_total)
res['select'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total}
acc, rec, f1 = get_scores(cnt_wo_agg, pred_total, label_total)
res['select(no AGG)'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total}
label_total, pred_total, cnt, cnt_wo_agg = eval_where(pred, label)
acc, rec, f1 = get_scores(cnt, pred_total, label_total)
res['where'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total}
acc, rec, f1 = get_scores(cnt_wo_agg, pred_total, label_total)
res['where(no OP)'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total}
label_total, pred_total, cnt = eval_group(pred, label)
acc, rec, f1 = get_scores(cnt, pred_total, label_total)
res['group(no Having)'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total}
label_total, pred_total, cnt = eval_having(pred, label)
acc, rec, f1 = get_scores(cnt, pred_total, label_total)
res['group'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total}
label_total, pred_total, cnt = eval_order(pred, label)
acc, rec, f1 = get_scores(cnt, pred_total, label_total)
res['order'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total}
label_total, pred_total, cnt = eval_and_or(pred, label)
acc, rec, f1 = get_scores(cnt, pred_total, label_total)
res['and/or'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total}
label_total, pred_total, cnt = eval_IUEN(pred, label)
acc, rec, f1 = get_scores(cnt, pred_total, label_total)
res['IUEN'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total}
label_total, pred_total, cnt = eval_keywords(pred, label)
acc, rec, f1 = get_scores(cnt, pred_total, label_total)
res['keywords'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total}
return res
def print_scores(scores, etype):
turns = ['turn 1', 'turn 2', 'turn 3', 'turn 4', 'turn >4']
levels = ['easy', 'medium', 'hard', 'extra', 'all', "joint_all"]
partial_types = ['select', 'select(no AGG)', 'where', 'where(no OP)', 'group(no Having)',
'group', 'order', 'and/or', 'IUEN', 'keywords']
print("{:20} {:20} {:20} {:20} {:20} {:20} {:20}".format("", *levels))
counts = [scores[level]['count'] for level in levels]
print("{:20} {:<20d} {:<20d} {:<20d} {:<20d} {:<20d} {:<20d}".format("count", *counts))
if etype in ["all", "exec"]:
print('===================== EXECUTION ACCURACY =====================')
this_scores = [scores[level]['exec'] for level in levels]
print("{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}".format("execution", *this_scores))
if etype in ["all", "match"]:
print('\n====================== EXACT MATCHING ACCURACY =====================')
exact_scores = [scores[level]['exact'] for level in levels]
print("{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}".format("exact match", *exact_scores))
print('\n---------------------PARTIAL MATCHING ACCURACY----------------------')
for type_ in partial_types:
this_scores = [scores[level]['partial'][type_]['acc'] for level in levels]
print("{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}".format(type_, *this_scores))
print('---------------------- PARTIAL MATCHING RECALL ----------------------')
for type_ in partial_types:
this_scores = [scores[level]['partial'][type_]['rec'] for level in levels]
print("{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}".format(type_, *this_scores))
print('---------------------- PARTIAL MATCHING F1 --------------------------')
for type_ in partial_types:
this_scores = [scores[level]['partial'][type_]['f1'] for level in levels]
print("{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}".format(type_, *this_scores))
print("\n\n{:20} {:20} {:20} {:20} {:20} {:20}".format("", *turns))
counts = [scores[turn]['count'] for turn in turns]
print("{:20} {:<20d} {:<20d} {:<20d} {:<20d} {:<20d}".format("count", *counts))
if etype in ["all", "exec"]:
print('===================== TRUN XECUTION ACCURACY =====================')
this_scores = [scores[turn]['exec'] for turn in turns]
print("{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}".format("execution", *this_scores))
if etype in ["all", "match"]:
print('\n====================== TRUN EXACT MATCHING ACCURACY =====================')
exact_scores = [scores[turn]['exact'] for turn in turns]
print("{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}".format("exact match", *exact_scores))
def cmp(sql1, sql2, kmap, evaluator2, schema):
#kmap = kmaps[db_name]
p_sql = copy.deepcopy(sql1)
g_sql = copy.deepcopy(sql2)
g_valid_col_units = build_valid_col_units(g_sql['from']['table_units'], schema)
g_sql = rebuild_sql_val(g_sql)
g_sql = rebuild_sql_col(g_valid_col_units, g_sql, kmap)
p_valid_col_units = build_valid_col_units(p_sql['from']['table_units'], schema)
p_sql = rebuild_sql_val(p_sql)
p_sql = rebuild_sql_col(p_valid_col_units, p_sql, kmap)
exact_score = evaluator2.eval_exact_match(p_sql, g_sql)
return exact_score == True
def eval_exec_match(db, p_str, g_str, pred, gold):
"""
return 1 if the values between prediction and gold are matching
in the corresponding index. Currently not support multiple col_unit(pairs).
"""
conn = sqlite3.connect(db)
cursor = conn.cursor()
try:
cursor.execute(p_str)
p_res = cursor.fetchall()
except:
return False
cursor.execute(g_str)
q_res = cursor.fetchall()
def res_map(res, val_units):
rmap = {}
for idx, val_unit in enumerate(val_units):
key = tuple(val_unit[1]) if not val_unit[2] else (val_unit[0], tuple(val_unit[1]), tuple(val_unit[2]))
rmap[key] = [r[idx] for r in res]
return rmap
p_val_units = [unit[1] for unit in pred['select'][1]]
q_val_units = [unit[1] for unit in gold['select'][1]]
return res_map(p_res, p_val_units) == res_map(q_res, q_val_units)
def rebuild_sql_val(sql):
if sql is None or not DISABLE_VALUE:
return sql
sql['from']['conds'] = rebuild_condition_val(sql['from']['conds'])
sql['having'] = rebuild_condition_val(sql['having'])
sql['where'] = rebuild_condition_val(sql['where'])
sql['intersect'] = rebuild_sql_val(sql['intersect'])
sql['except'] = rebuild_sql_val(sql['except'])
sql['union'] = rebuild_sql_val(sql['union'])
return sql
def build_valid_col_units(table_units, schema):
col_ids = [table_unit[1] for table_unit in table_units if table_unit[0] == TABLE_TYPE['table_unit']]
prefixs = [col_id[:-2] for col_id in col_ids]
valid_col_units= []
for value in schema.idMap.values():
if '.' in value and value[:value.index('.')] in prefixs:
valid_col_units.append(value)
return valid_col_units
def rebuild_sql_col(valid_col_units, sql, kmap):
if sql is None:
return sql
sql['select'] = rebuild_select_col(valid_col_units, sql['select'], kmap)
sql['from'] = rebuild_from_col(valid_col_units, sql['from'], kmap)
sql['where'] = rebuild_condition_col(valid_col_units, sql['where'], kmap)
sql['groupBy'] = rebuild_group_by_col(valid_col_units, sql['groupBy'], kmap)
sql['orderBy'] = rebuild_order_by_col(valid_col_units, sql['orderBy'], kmap)
sql['having'] = rebuild_condition_col(valid_col_units, sql['having'], kmap)
sql['intersect'] = rebuild_sql_col(valid_col_units, sql['intersect'], kmap)
sql['except'] = rebuild_sql_col(valid_col_units, sql['except'], kmap)
sql['union'] = rebuild_sql_col(valid_col_units, sql['union'], kmap)
return sql
class Schema:
"""
Simple schema which maps table&column to a unique identifier
"""
def __init__(self, schema):
self._schema = schema
self._idMap = self._map(self._schema)
def schema(self):
return self._schema
def idMap(self):
return self._idMap
def _map(self, schema):
idMap = {"*": "__all__"}
id = 1
for key, vals in schema.items():
for val in vals:
idMap[key.lower() + "." + val.lower()] = (
"__" + key.lower() + "." + val.lower() + "__"
)
id += 1
for key in schema:
idMap[key.lower()] = "__" + key.lower() + "__"
id += 1
return idMap
def get_schema(db):
"""
Get database's schema, which is a dict with table name as key
and list of column names as value
:param db: database path
:return: schema dict
"""
schema = {}
conn = sqlite3.connect(db)
cursor = conn.cursor()
# fetch table names
cursor.execute("SELECT name FROM sqlite_master WHERE type='table';")
tables = [str(table[0].lower()) for table in cursor.fetchall()]
# fetch table info
for table in tables:
cursor.execute("PRAGMA table_info({})".format(table))
schema[table] = [str(col[1].lower()) for col in cursor.fetchall()]
return schema
def get_sql(schema, query):
toks = tokenize(query)
tables_with_alias = get_tables_with_alias(schema.schema, toks)
_, sql = parse_sql(toks, 0, tables_with_alias, schema)
return sql
def evaluate(gold, predict, db_dir, etype, kmaps):
with open(gold) as f:
glist = []
gseq_one = []
for l in f.readlines():
if len(l.strip()) == 0:
glist.append(gseq_one)
gseq_one = []
else:
lstrip = l.strip().split('\t')
gseq_one.append(lstrip)
#glist = [l.strip().split('\t') for l in f.readlines() if len(l.strip()) > 0]
with open(predict) as f:
plist = []
pseq_one = []
p_socre_list = []
pseq_score_one = []
question_list = []
question_one = []
while True:
l = f.readline()
if l == "":
break
if len(l.strip()) == 0:
plist.append(pseq_one)
pseq_one = []
p_socre_list.append(pseq_score_one)
pseq_score_one = []
question_list.append(question_one)
question_one = []
else:
x = l.strip().split('\t')
pseq_one.append(x)
l2 = f.readline()
y = l2.strip().split('\t')
y = [math.exp(-float(s)) for s in y]
assert len(x) == len(y)
pseq_score_one.append(y)
question_one.append(f.readline().strip())
#print('len(x)', len(x))
#plist = [l.strip().split('\t') for l in f.readlines() if len(l.strip()) > 0]
# plist = [[("select product_type_code from products group by product_type_code order by count ( * ) desc limit value", "orchestra")]]
# glist = [[("SELECT product_type_code FROM Products GROUP BY product_type_code ORDER BY count(*) DESC LIMIT 1", "customers_and_orders")]]
evaluator = Evaluator()
evaluator2 = Evaluator()
turns = ['turn 1', 'turn 2', 'turn 3', 'turn 4', 'turn >4']
levels = ['easy', 'medium', 'hard', 'extra', 'all', 'joint_all']
partial_types = ['select', 'select(no AGG)', 'where', 'where(no OP)', 'group(no Having)',
'group', 'order', 'and/or', 'IUEN', 'keywords']
entries = []
scores = {}
for turn in turns:
scores[turn] = {'count': 0, 'exact': 0.}
scores[turn]['exec'] = 0
for level in levels:
scores[level] = {'count': 0, 'partial': {}, 'exact': 0.}
scores[level]['exec'] = 0
for type_ in partial_types:
scores[level]['partial'][type_] = {'acc': 0., 'rec': 0., 'f1': 0.,'acc_count':0,'rec_count':0}
eval_err_num = 0
n1 = 0
n2 = 0
n3 = 0
predict_file = open("./predicted_sql_en.txt", "w")
for p, g, s, questions in zip(plist, glist, p_socre_list, question_list):
scores['joint_all']['count'] += 1
turn_scores = {"exec": [], "exact": []}
predict_str = ''
for idx, pgs in enumerate(zip(p, g, s, questions)):
p, g, s, question = pgs
#p_str = p[0]
#p_str = p_str.replace("value", "1")
g_str, db = g
db_name = db
db = os.path.join(db_dir, db, db + ".sqlite")
schema = Schema(get_schema(db))
try:
g_sql = get_sql(schema, g_str)
except:
continue
hardness = evaluator.eval_hardness(g_sql)
ori_idx = idx
if idx > 3:
idx = ">4"
else:
idx += 1
turn_id = "turn " + str(idx)
scores[turn_id]['count'] += 1
scores[hardness]['count'] += 1
scores['all']['count'] += 1
p_sql = None
flag = False
p_sql_socre = []
for p_str, s in zip(p, s):
cur_s = s
flag2 = False
try:
p_str = p_str.replace("value", "1")
p_sql = get_sql(schema, p_str)
flag2 = True
except:
pass
if flag2:
vis = set()
for ss in p_str.split(' '):
ss = ss.lower()
if ss == 'from':
break
if ss in stop_word:
continue
if ss in vis:
flag2 = False
for fk in ['none', 'max', 'min', 'count', 'sum', 'avg']:
if fk in p_str.lower():
flag2 = True
break
if flag2:
break
if cmp(p_sql, g_sql, kmaps[db_name], evaluator2, schema):
pass
break
vis.add(ss)
if flag2 is False:
continue
slist = p_str.lower().split(' ')
for i in range(len(slist)-2):
ss = slist[i]
if slist[i+1] == '=' and slist[i+2] == '1':
if ss in vis:
if cmp(p_sql, g_sql, kmaps[db_name], evaluator2, schema):
pass
flag2 = False
break
if flag2 == False:
continue
flag = False
for i in range(len(p_sql_socre)):
sql1 = p_sql_socre[i][0]
if cmp(sql1, p_sql, kmaps[db_name], evaluator2, schema):
#print('+++')
p_sql_socre[i] = (sql1, (p_sql_socre[i][1][0]+cur_s, p_sql_socre[i][1][1]) )
flag = True
if cmp(sql1, g_sql, kmaps[db_name], evaluator2, schema):
assert cmp(p_sql, g_sql, kmaps[db_name], evaluator2, schema)
if cmp(p_sql, g_sql, kmaps[db_name], evaluator2, schema):
assert cmp(sql1, g_sql, kmaps[db_name], evaluator2, schema)
break
if flag == False:
p_sql_socre.append( (p_sql, (cur_s, p_str) ) )
p_sql = None
max_socre = -100
p_str = "error"
for i in range(len(p_sql_socre)):
sql1 = p_sql_socre[i][0]
cur_s = p_sql_socre[i][1][0]
cur_p_str = p_sql_socre[i][1][1]
if p_sql == None or max_socre < cur_s:
p_sql = sql1
max_socre = cur_s
p_str = cur_p_str
if False and p_sql is None:
print('p', p)
print('s', s)
for pi in p:
if p_sql == None or len(p_str.split(' ')) < len(pi.split(' ')):
try:
pi = pi.replace("value", "1")
p_sql = get_sql(schema, pi)
p_str = pi
except:
pass
if p_sql is None:
# If p_sql is not valid, then we will use an empty sql to evaluate with the correct sql
p_sql = {
"except": None,
"from": {
"conds": [],
"table_units": []
},
"groupBy": [],
"having": [],
"intersect": None,
"limit": None,
"orderBy": [],
"select": [
False,
[]
],
"union": None,
"where": []
}
eval_err_num += 1
print("eval_err_num:{}".format(eval_err_num))
# rebuild sql for value evaluation
kmap = kmaps[db_name]
g_valid_col_units = build_valid_col_units(g_sql['from']['table_units'], schema)
g_sql = rebuild_sql_val(g_sql)
g_sql = rebuild_sql_col(g_valid_col_units, g_sql, kmap)
p_valid_col_units = build_valid_col_units(p_sql['from']['table_units'], schema)
p_sql = rebuild_sql_val(p_sql)
p_sql = rebuild_sql_col(p_valid_col_units, p_sql, kmap)
if etype in ["all", "exec"]:
exec_score = eval_exec_match(db, p_str, g_str, p_sql, g_sql)
if exec_score:
scores[hardness]['exec'] += 1
scores[turn_id]['exec'] += 1
turn_scores['exec'].append(1)
else:
turn_scores['exec'].append(0)
if etype in ["all", "match"]:
exact_score = evaluator.eval_exact_match(p_sql, g_sql)
partial_scores = evaluator.partial_scores
if exact_score == 0:
turn_scores['exact'].append(0)
print('question: {}'.format(question))
print("{} pred: {}".format(hardness, p_str))
print("{} gold: {}".format(hardness, g_str))
print('')
else:
print("Right")
print('question', question)
print("{} pred: {}".format(hardness, p_str))
print("{} gold: {}".format(hardness, g_str))
print('')
turn_scores['exact'].append(1)
# print(p_str)
predict_str += p_str + '\n'
scores[turn_id]['exact'] += exact_score
scores[hardness]['exact'] += exact_score
scores['all']['exact'] += exact_score
for type_ in partial_types:
if partial_scores[type_]['pred_total'] > 0:
scores[hardness]['partial'][type_]['acc'] += partial_scores[type_]['acc']
scores[hardness]['partial'][type_]['acc_count'] += 1
if partial_scores[type_]['label_total'] > 0:
scores[hardness]['partial'][type_]['rec'] += partial_scores[type_]['rec']
scores[hardness]['partial'][type_]['rec_count'] += 1
scores[hardness]['partial'][type_]['f1'] += partial_scores[type_]['f1']
if partial_scores[type_]['pred_total'] > 0:
scores['all']['partial'][type_]['acc'] += partial_scores[type_]['acc']
scores['all']['partial'][type_]['acc_count'] += 1
if partial_scores[type_]['label_total'] > 0:
scores['all']['partial'][type_]['rec'] += partial_scores[type_]['rec']
scores['all']['partial'][type_]['rec_count'] += 1
scores['all']['partial'][type_]['f1'] += partial_scores[type_]['f1']
entries.append({
'predictSQL': p_str,
'goldSQL': g_str,
'hardness': hardness,
'exact': exact_score,
'partial': partial_scores
})
if all(v == 1 for v in turn_scores["exec"]):
scores['joint_all']['exec'] += 1
if all(v == 1 for v in turn_scores["exact"]):
scores['joint_all']['exact'] += 1
predict_str += '\n'
predict_file.write(predict_str)
for turn in turns:
if scores[turn]['count'] == 0:
continue
if etype in ["all", "exec"]:
scores[turn]['exec'] /= scores[turn]['count']
if etype in ["all", "match"]:
scores[turn]['exact'] /= scores[turn]['count']
for level in levels:
if scores[level]['count'] == 0:
continue
if etype in ["all", "exec"]:
scores[level]['exec'] /= scores[level]['count']
if etype in ["all", "match"]:
scores[level]['exact'] /= scores[level]['count']
for type_ in partial_types:
if scores[level]['partial'][type_]['acc_count'] == 0:
scores[level]['partial'][type_]['acc'] = 0
else:
scores[level]['partial'][type_]['acc'] = scores[level]['partial'][type_]['acc'] / \
scores[level]['partial'][type_]['acc_count'] * 1.0
if scores[level]['partial'][type_]['rec_count'] == 0:
scores[level]['partial'][type_]['rec'] = 0
else:
scores[level]['partial'][type_]['rec'] = scores[level]['partial'][type_]['rec'] / \
scores[level]['partial'][type_]['rec_count'] * 1.0
if scores[level]['partial'][type_]['acc'] == 0 and scores[level]['partial'][type_]['rec'] == 0:
scores[level]['partial'][type_]['f1'] = 1
else:
scores[level]['partial'][type_]['f1'] = \
2.0 * scores[level]['partial'][type_]['acc'] * scores[level]['partial'][type_]['rec'] / (
scores[level]['partial'][type_]['rec'] + scores[level]['partial'][type_]['acc'])
print_scores(scores, etype)
predict_file.close() | null |
165,377 | import os, sys, copy
import json
import math
import sqlite3
import traceback
import argparse
import tqdm
from process_sql import tokenize, get_schema, get_tables_with_alias, Schema, get_sql
def build_foreign_key_map(entry):
cols_orig = entry["column_names_original"]
tables_orig = entry["table_names_original"]
# rebuild cols corresponding to idmap in Schema
cols = []
for col_orig in cols_orig:
if col_orig[0] >= 0:
t = tables_orig[col_orig[0]]
c = col_orig[1]
cols.append("__" + t.lower() + "." + c.lower() + "__")
else:
cols.append("__all__")
def keyset_in_list(k1, k2, k_list):
for k_set in k_list:
if k1 in k_set or k2 in k_set:
return k_set
new_k_set = set()
k_list.append(new_k_set)
return new_k_set
foreign_key_list = []
foreign_keys = entry["foreign_keys"]
for fkey in foreign_keys:
key1, key2 = fkey
key_set = keyset_in_list(key1, key2, foreign_key_list)
key_set.add(key1)
key_set.add(key2)
foreign_key_map = {}
for key_set in foreign_key_list:
sorted_list = sorted(list(key_set))
midx = sorted_list[0]
for idx in sorted_list:
foreign_key_map[cols[idx]] = cols[midx]
return foreign_key_map
def build_foreign_key_map_from_json(table):
with open(table) as f:
data = json.load(f)
tables = {}
for entry in data:
tables[entry['db_id']] = build_foreign_key_map(entry)
return tables | null |
165,398 | import os, sys, copy
import json
import math
import sqlite3
import traceback
import argparse
import tqdm
from process_sql import tokenize, get_schema, get_tables_with_alias, Schema, get_sql
stop_word = set(['_UNK', '_EOS', 'select', 'value', ')', '(', 'where', '=', ',', 'count', 'group_by', 'order_by', 'limit_value', 'desc', '>', 'distinct', 'and', 'avg', 'having', '<', 'in', 'sum', 'max', 'asc', 'not', 'or', 'like', 'min', 'intersect', 'except', '!=', 'union', 'between', '-', '+', '0'])
stop_word |= set(CLAUSE_KEYWORDS)
stop_word |= set(JOIN_KEYWORDS)
stop_word |= set(WHERE_OPS)
stop_word |= set(UNIT_OPS)
stop_word |= set(AGG_OPS)
stop_word |= set(COND_OPS)
stop_word |= set(SQL_OPS)
stop_word |= set(ORDER_OPS)
class Evaluator:
"""A simple evaluator"""
def __init__(self):
self.partial_scores = None
def eval_hardness(self, sql):
count_comp1_ = count_component1(sql)
count_comp2_ = count_component2(sql)
count_others_ = count_others(sql)
if count_comp1_ <= 1 and count_others_ == 0 and count_comp2_ == 0:
return "easy"
elif (count_others_ <= 2 and count_comp1_ <= 1 and count_comp2_ == 0) or \
(count_comp1_ <= 2 and count_others_ < 2 and count_comp2_ == 0):
return "medium"
elif (count_others_ > 2 and count_comp1_ <= 2 and count_comp2_ == 0) or \
(2 < count_comp1_ <= 3 and count_others_ <= 2 and count_comp2_ == 0) or \
(count_comp1_ <= 1 and count_others_ == 0 and count_comp2_ <= 1):
return "hard"
else:
return "extra"
def eval_exact_match(self, pred, label):
partial_scores = self.eval_partial_match(pred, label)
self.partial_scores = partial_scores
for key, score in partial_scores.items():
if score['f1'] != 1:
return 0
if len(label['from']['table_units']) > 0:
label_tables = sorted(label['from']['table_units'])
pred_tables = sorted(pred['from']['table_units'])
return label_tables == pred_tables
return 1
def eval_partial_match(self, pred, label):
res = {}
label_total, pred_total, cnt, cnt_wo_agg = eval_sel(pred, label)
acc, rec, f1 = get_scores(cnt, pred_total, label_total)
res['select'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total}
acc, rec, f1 = get_scores(cnt_wo_agg, pred_total, label_total)
res['select(no AGG)'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total}
label_total, pred_total, cnt, cnt_wo_agg = eval_where(pred, label)
acc, rec, f1 = get_scores(cnt, pred_total, label_total)
res['where'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total}
acc, rec, f1 = get_scores(cnt_wo_agg, pred_total, label_total)
res['where(no OP)'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total}
label_total, pred_total, cnt = eval_group(pred, label)
acc, rec, f1 = get_scores(cnt, pred_total, label_total)
res['group(no Having)'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total}
label_total, pred_total, cnt = eval_having(pred, label)
acc, rec, f1 = get_scores(cnt, pred_total, label_total)
res['group'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total}
label_total, pred_total, cnt = eval_order(pred, label)
acc, rec, f1 = get_scores(cnt, pred_total, label_total)
res['order'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total}
label_total, pred_total, cnt = eval_and_or(pred, label)
acc, rec, f1 = get_scores(cnt, pred_total, label_total)
res['and/or'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total}
label_total, pred_total, cnt = eval_IUEN(pred, label)
acc, rec, f1 = get_scores(cnt, pred_total, label_total)
res['IUEN'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total}
label_total, pred_total, cnt = eval_keywords(pred, label)
acc, rec, f1 = get_scores(cnt, pred_total, label_total)
res['keywords'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total}
return res
def print_scores(scores, etype):
turns = ['turn 1', 'turn 2', 'turn 3', 'turn 4', 'turn >4']
levels = ['easy', 'medium', 'hard', 'extra', 'all', "joint_all"]
partial_types = ['select', 'select(no AGG)', 'where', 'where(no OP)', 'group(no Having)',
'group', 'order', 'and/or', 'IUEN', 'keywords']
print("{:20} {:20} {:20} {:20} {:20} {:20} {:20}".format("", *levels))
counts = [scores[level]['count'] for level in levels]
print("{:20} {:<20d} {:<20d} {:<20d} {:<20d} {:<20d} {:<20d}".format("count", *counts))
if etype in ["all", "exec"]:
print('===================== EXECUTION ACCURACY =====================')
this_scores = [scores[level]['exec'] for level in levels]
print("{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}".format("execution", *this_scores))
if etype in ["all", "match"]:
print('\n====================== EXACT MATCHING ACCURACY =====================')
exact_scores = [scores[level]['exact'] for level in levels]
print("{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}".format("exact match", *exact_scores))
print('\n---------------------PARTIAL MATCHING ACCURACY----------------------')
for type_ in partial_types:
this_scores = [scores[level]['partial'][type_]['acc'] for level in levels]
print("{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}".format(type_, *this_scores))
print('---------------------- PARTIAL MATCHING RECALL ----------------------')
for type_ in partial_types:
this_scores = [scores[level]['partial'][type_]['rec'] for level in levels]
print("{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}".format(type_, *this_scores))
print('---------------------- PARTIAL MATCHING F1 --------------------------')
for type_ in partial_types:
this_scores = [scores[level]['partial'][type_]['f1'] for level in levels]
print("{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}".format(type_, *this_scores))
print("\n\n{:20} {:20} {:20} {:20} {:20} {:20}".format("", *turns))
counts = [scores[turn]['count'] for turn in turns]
print("{:20} {:<20d} {:<20d} {:<20d} {:<20d} {:<20d}".format("count", *counts))
if etype in ["all", "exec"]:
print('===================== TRUN XECUTION ACCURACY =====================')
this_scores = [scores[turn]['exec'] for turn in turns]
print("{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}".format("execution", *this_scores))
if etype in ["all", "match"]:
print('\n====================== TRUN EXACT MATCHING ACCURACY =====================')
exact_scores = [scores[turn]['exact'] for turn in turns]
print("{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}".format("exact match", *exact_scores))
def cmp(sql1, sql2, kmap, evaluator2, schema):
#kmap = kmaps[db_name]
p_sql = copy.deepcopy(sql1)
g_sql = copy.deepcopy(sql2)
g_valid_col_units = build_valid_col_units(g_sql['from']['table_units'], schema)
g_sql = rebuild_sql_val(g_sql)
g_sql = rebuild_sql_col(g_valid_col_units, g_sql, kmap)
p_valid_col_units = build_valid_col_units(p_sql['from']['table_units'], schema)
p_sql = rebuild_sql_val(p_sql)
p_sql = rebuild_sql_col(p_valid_col_units, p_sql, kmap)
exact_score = evaluator2.eval_exact_match(p_sql, g_sql)
return exact_score == True
def eval_exec_match(db, p_str, g_str, pred, gold):
"""
return 1 if the values between prediction and gold are matching
in the corresponding index. Currently not support multiple col_unit(pairs).
"""
conn = sqlite3.connect(db)
cursor = conn.cursor()
try:
cursor.execute(p_str)
p_res = cursor.fetchall()
except:
return False
cursor.execute(g_str)
q_res = cursor.fetchall()
def res_map(res, val_units):
rmap = {}
for idx, val_unit in enumerate(val_units):
key = tuple(val_unit[1]) if not val_unit[2] else (val_unit[0], tuple(val_unit[1]), tuple(val_unit[2]))
rmap[key] = [r[idx] for r in res]
return rmap
p_val_units = [unit[1] for unit in pred['select'][1]]
q_val_units = [unit[1] for unit in gold['select'][1]]
return res_map(p_res, p_val_units) == res_map(q_res, q_val_units)
def rebuild_sql_val(sql):
if sql is None or not DISABLE_VALUE:
return sql
sql['from']['conds'] = rebuild_condition_val(sql['from']['conds'])
sql['having'] = rebuild_condition_val(sql['having'])
sql['where'] = rebuild_condition_val(sql['where'])
sql['intersect'] = rebuild_sql_val(sql['intersect'])
sql['except'] = rebuild_sql_val(sql['except'])
sql['union'] = rebuild_sql_val(sql['union'])
return sql
def build_valid_col_units(table_units, schema):
col_ids = [table_unit[1] for table_unit in table_units if table_unit[0] == TABLE_TYPE['table_unit']]
prefixs = [col_id[:-2] for col_id in col_ids]
valid_col_units= []
for value in schema.idMap.values():
if '.' in value and value[:value.index('.')] in prefixs:
valid_col_units.append(value)
return valid_col_units
def rebuild_sql_col(valid_col_units, sql, kmap):
if sql is None:
return sql
sql['select'] = rebuild_select_col(valid_col_units, sql['select'], kmap)
sql['from'] = rebuild_from_col(valid_col_units, sql['from'], kmap)
sql['where'] = rebuild_condition_col(valid_col_units, sql['where'], kmap)
sql['groupBy'] = rebuild_group_by_col(valid_col_units, sql['groupBy'], kmap)
sql['orderBy'] = rebuild_order_by_col(valid_col_units, sql['orderBy'], kmap)
sql['having'] = rebuild_condition_col(valid_col_units, sql['having'], kmap)
sql['intersect'] = rebuild_sql_col(valid_col_units, sql['intersect'], kmap)
sql['except'] = rebuild_sql_col(valid_col_units, sql['except'], kmap)
sql['union'] = rebuild_sql_col(valid_col_units, sql['union'], kmap)
return sql
class Schema:
"""
Simple schema which maps table&column to a unique identifier
"""
def __init__(self, schema):
self._schema = schema
self._idMap = self._map(self._schema)
def schema(self):
return self._schema
def idMap(self):
return self._idMap
def _map(self, schema):
idMap = {"*": "__all__"}
id = 1
for key, vals in schema.items():
for val in vals:
idMap[key.lower() + "." + val.lower()] = (
"__" + key.lower() + "." + val.lower() + "__"
)
id += 1
for key in schema:
idMap[key.lower()] = "__" + key.lower() + "__"
id += 1
return idMap
def get_schema(db):
"""
Get database's schema, which is a dict with table name as key
and list of column names as value
:param db: database path
:return: schema dict
"""
schema = {}
conn = sqlite3.connect(db)
cursor = conn.cursor()
# fetch table names
cursor.execute("SELECT name FROM sqlite_master WHERE type='table';")
tables = [str(table[0].lower()) for table in cursor.fetchall()]
# fetch table info
for table in tables:
cursor.execute("PRAGMA table_info({})".format(table))
schema[table] = [str(col[1].lower()) for col in cursor.fetchall()]
return schema
def get_sql(schema, query):
toks = tokenize(query)
tables_with_alias = get_tables_with_alias(schema.schema, toks)
_, sql = parse_sql(toks, 0, tables_with_alias, schema)
return sql
def evaluate(gold, predict, db_dir, etype, kmaps):
with open(gold) as f:
glist = []
gseq_one = []
for l in f.readlines():
if len(l.strip()) == 0:
glist.append(gseq_one)
gseq_one = []
else:
lstrip = l.strip().split('\t')
gseq_one.append(lstrip)
#glist = [l.strip().split('\t') for l in f.readlines() if len(l.strip()) > 0]
with open(predict) as f:
plist = []
pseq_one = []
p_socre_list = []
pseq_score_one = []
question_list = []
question_one = []
while True:
l = f.readline()
if l == "":
break
if len(l.strip()) == 0:
plist.append(pseq_one)
pseq_one = []
p_socre_list.append(pseq_score_one)
pseq_score_one = []
question_list.append(question_one)
question_one = []
else:
x = l.strip().split('\t')
pseq_one.append(x)
l2 = f.readline()
y = l2.strip().split('\t')
y = [math.exp(-float(s)) for s in y]
assert len(x) == len(y)
pseq_score_one.append(y)
question_one.append(f.readline().strip())
#print('len(x)', len(x))
#plist = [l.strip().split('\t') for l in f.readlines() if len(l.strip()) > 0]
# plist = [[("select product_type_code from products group by product_type_code order by count ( * ) desc limit value", "orchestra")]]
# glist = [[("SELECT product_type_code FROM Products GROUP BY product_type_code ORDER BY count(*) DESC LIMIT 1", "customers_and_orders")]]
evaluator = Evaluator()
evaluator2 = Evaluator()
turns = ['turn 1', 'turn 2', 'turn 3', 'turn 4', 'turn >4']
levels = ['easy', 'medium', 'hard', 'extra', 'all', 'joint_all']
partial_types = ['select', 'select(no AGG)', 'where', 'where(no OP)', 'group(no Having)',
'group', 'order', 'and/or', 'IUEN', 'keywords']
entries = []
scores = {}
for turn in turns:
scores[turn] = {'count': 0, 'exact': 0.}
scores[turn]['exec'] = 0
for level in levels:
scores[level] = {'count': 0, 'partial': {}, 'exact': 0.}
scores[level]['exec'] = 0
for type_ in partial_types:
scores[level]['partial'][type_] = {'acc': 0., 'rec': 0., 'f1': 0.,'acc_count':0,'rec_count':0}
eval_err_num = 0
n1 = 0
n2 = 0
n3 = 0
predict_file = open("./predict.txt", "w")
for p, g, s, questions in zip(plist, glist, p_socre_list, question_list):
scores['joint_all']['count'] += 1
turn_scores = {"exec": [], "exact": []}
predict_str = ''
for idx, pgs in enumerate(zip(p, g, s, questions)):
p, g, s, question = pgs
#p_str = p[0]
#p_str = p_str.replace("value", "1")
g_str, db = g
db_name = db
db = os.path.join(db_dir, db, db + ".sqlite")
schema = Schema(get_schema(db))
try:
g_sql = get_sql(schema, g_str)
except:
continue
hardness = evaluator.eval_hardness(g_sql)
ori_idx = idx
if idx > 3:
idx = ">4"
else:
idx += 1
turn_id = "turn " + str(idx)
scores[turn_id]['count'] += 1
scores[hardness]['count'] += 1
scores['all']['count'] += 1
p_sql = None
flag = False
p_sql_socre = []
for p_str, s in zip(p, s):
cur_s = s
flag2 = False
try:
p_str = p_str.replace("value", "1")
p_sql = get_sql(schema, p_str)
flag2 = True
except:
pass
if flag2:
vis = set()
for ss in p_str.split(' '):
ss = ss.lower()
if ss == 'from':
break
if ss in stop_word:
continue
if ss in vis:
flag2 = False
for fk in ['none', 'max', 'min', 'count', 'sum', 'avg']:
if fk in p_str.lower():
flag2 = True
break
if flag2:
break
if cmp(p_sql, g_sql, kmaps[db_name], evaluator2, schema):
pass
break
vis.add(ss)
if flag2 is False:
continue
slist = p_str.lower().split(' ')
for i in range(len(slist)-2):
ss = slist[i]
if slist[i+1] == '=' and slist[i+2] == '1':
if ss in vis:
if cmp(p_sql, g_sql, kmaps[db_name], evaluator2, schema):
pass
flag2 = False
break
if flag2 == False:
continue
flag = False
for i in range(len(p_sql_socre)):
sql1 = p_sql_socre[i][0]
if cmp(sql1, p_sql, kmaps[db_name], evaluator2, schema):
#print('+++')
p_sql_socre[i] = (sql1, (p_sql_socre[i][1][0]+cur_s, p_sql_socre[i][1][1]) )
flag = True
if cmp(sql1, g_sql, kmaps[db_name], evaluator2, schema):
assert cmp(p_sql, g_sql, kmaps[db_name], evaluator2, schema)
if cmp(p_sql, g_sql, kmaps[db_name], evaluator2, schema):
assert cmp(sql1, g_sql, kmaps[db_name], evaluator2, schema)
break
if flag == False:
p_sql_socre.append( (p_sql, (cur_s, p_str) ) )
p_sql = None
max_socre = -100
p_str = "error"
for i in range(len(p_sql_socre)):
sql1 = p_sql_socre[i][0]
cur_s = p_sql_socre[i][1][0]
cur_p_str = p_sql_socre[i][1][1]
if p_sql == None or max_socre < cur_s:
p_sql = sql1
max_socre = cur_s
p_str = cur_p_str
if False and p_sql is None:
print('p', p)
print('s', s)
for pi in p:
if p_sql == None or len(p_str.split(' ')) < len(pi.split(' ')):
try:
pi = pi.replace("value", "1")
p_sql = get_sql(schema, pi)
p_str = pi
except:
pass
if p_sql is None:
# If p_sql is not valid, then we will use an empty sql to evaluate with the correct sql
p_sql = {
"except": None,
"from": {
"conds": [],
"table_units": []
},
"groupBy": [],
"having": [],
"intersect": None,
"limit": None,
"orderBy": [],
"select": [
False,
[]
],
"union": None,
"where": []
}
eval_err_num += 1
print("eval_err_num:{}".format(eval_err_num))
# rebuild sql for value evaluation
kmap = kmaps[db_name]
g_valid_col_units = build_valid_col_units(g_sql['from']['table_units'], schema)
g_sql = rebuild_sql_val(g_sql)
g_sql = rebuild_sql_col(g_valid_col_units, g_sql, kmap)
p_valid_col_units = build_valid_col_units(p_sql['from']['table_units'], schema)
p_sql = rebuild_sql_val(p_sql)
p_sql = rebuild_sql_col(p_valid_col_units, p_sql, kmap)
if etype in ["all", "exec"]:
exec_score = eval_exec_match(db, p_str, g_str, p_sql, g_sql)
if exec_score:
scores[hardness]['exec'] += 1
scores[turn_id]['exec'] += 1
turn_scores['exec'].append(1)
else:
turn_scores['exec'].append(0)
if etype in ["all", "match"]:
exact_score = evaluator.eval_exact_match(p_sql, g_sql)
partial_scores = evaluator.partial_scores
if exact_score == 0:
turn_scores['exact'].append(0)
"""
print('question: {}'.format(question))
print("{} pred: {}".format(hardness, p_str))
print("{} gold: {}".format(hardness, g_str))
print('')
"""
else:
"""
print("Right")
print('question', question)
print("{} pred: {}".format(hardness, p_str))
print("{} gold: {}".format(hardness, g_str))
print('')
"""
turn_scores['exact'].append(1)
print(p_str)
predict_str += p_str + '\n'
scores[turn_id]['exact'] += exact_score
scores[hardness]['exact'] += exact_score
scores['all']['exact'] += exact_score
for type_ in partial_types:
if partial_scores[type_]['pred_total'] > 0:
scores[hardness]['partial'][type_]['acc'] += partial_scores[type_]['acc']
scores[hardness]['partial'][type_]['acc_count'] += 1
if partial_scores[type_]['label_total'] > 0:
scores[hardness]['partial'][type_]['rec'] += partial_scores[type_]['rec']
scores[hardness]['partial'][type_]['rec_count'] += 1
scores[hardness]['partial'][type_]['f1'] += partial_scores[type_]['f1']
if partial_scores[type_]['pred_total'] > 0:
scores['all']['partial'][type_]['acc'] += partial_scores[type_]['acc']
scores['all']['partial'][type_]['acc_count'] += 1
if partial_scores[type_]['label_total'] > 0:
scores['all']['partial'][type_]['rec'] += partial_scores[type_]['rec']
scores['all']['partial'][type_]['rec_count'] += 1
scores['all']['partial'][type_]['f1'] += partial_scores[type_]['f1']
entries.append({
'predictSQL': p_str,
'goldSQL': g_str,
'hardness': hardness,
'exact': exact_score,
'partial': partial_scores
})
if all(v == 1 for v in turn_scores["exec"]):
scores['joint_all']['exec'] += 1
if all(v == 1 for v in turn_scores["exact"]):
scores['joint_all']['exact'] += 1
predict_str += '\n'
predict_file.write(predict_str)
for turn in turns:
if scores[turn]['count'] == 0:
continue
if etype in ["all", "exec"]:
scores[turn]['exec'] /= scores[turn]['count']
if etype in ["all", "match"]:
scores[turn]['exact'] /= scores[turn]['count']
for level in levels:
if scores[level]['count'] == 0:
continue
if etype in ["all", "exec"]:
scores[level]['exec'] /= scores[level]['count']
if etype in ["all", "match"]:
scores[level]['exact'] /= scores[level]['count']
for type_ in partial_types:
if scores[level]['partial'][type_]['acc_count'] == 0:
scores[level]['partial'][type_]['acc'] = 0
else:
scores[level]['partial'][type_]['acc'] = scores[level]['partial'][type_]['acc'] / \
scores[level]['partial'][type_]['acc_count'] * 1.0
if scores[level]['partial'][type_]['rec_count'] == 0:
scores[level]['partial'][type_]['rec'] = 0
else:
scores[level]['partial'][type_]['rec'] = scores[level]['partial'][type_]['rec'] / \
scores[level]['partial'][type_]['rec_count'] * 1.0
if scores[level]['partial'][type_]['acc'] == 0 and scores[level]['partial'][type_]['rec'] == 0:
scores[level]['partial'][type_]['f1'] = 1
else:
scores[level]['partial'][type_]['f1'] = \
2.0 * scores[level]['partial'][type_]['acc'] * scores[level]['partial'][type_]['rec'] / (
scores[level]['partial'][type_]['rec'] + scores[level]['partial'][type_]['acc'])
print_scores(scores, etype)
predict_file.close() | null |
165,399 | import os, sys, copy
import json
import math
import sqlite3
import traceback
import argparse
import tqdm
from process_sql import tokenize, get_schema, get_tables_with_alias, Schema, get_sql
def build_foreign_key_map(entry):
def build_foreign_key_map_from_json(table):
with open(table) as f:
data = json.load(f)
tables = {}
for entry in data:
tables[entry['db_id']] = build_foreign_key_map(entry)
return tables | null |
165,420 | import os, sys, copy
import json
import math
import sqlite3
import traceback
import argparse
import tqdm
from process_sql import tokenize, get_schema, get_tables_with_alias, Schema, get_sql
stop_word = set(['_UNK', '_EOS', 'select', 'value', ')', '(', 'where', '=', ',', 'count', 'group_by', 'order_by', 'limit_value', 'desc', '>', 'distinct', 'and', 'avg', 'having', '<', 'in', 'sum', 'max', 'asc', 'not', 'or', 'like', 'min', 'intersect', 'except', '!=', 'union', 'between', '-', '+', '0'])
stop_word |= set(CLAUSE_KEYWORDS)
stop_word |= set(JOIN_KEYWORDS)
stop_word |= set(WHERE_OPS)
stop_word |= set(UNIT_OPS)
stop_word |= set(AGG_OPS)
stop_word |= set(COND_OPS)
stop_word |= set(SQL_OPS)
stop_word |= set(ORDER_OPS)
class Evaluator:
"""A simple evaluator"""
def __init__(self):
self.partial_scores = None
def eval_hardness(self, sql):
count_comp1_ = count_component1(sql)
count_comp2_ = count_component2(sql)
count_others_ = count_others(sql)
if count_comp1_ <= 1 and count_others_ == 0 and count_comp2_ == 0:
return "easy"
elif (count_others_ <= 2 and count_comp1_ <= 1 and count_comp2_ == 0) or \
(count_comp1_ <= 2 and count_others_ < 2 and count_comp2_ == 0):
return "medium"
elif (count_others_ > 2 and count_comp1_ <= 2 and count_comp2_ == 0) or \
(2 < count_comp1_ <= 3 and count_others_ <= 2 and count_comp2_ == 0) or \
(count_comp1_ <= 1 and count_others_ == 0 and count_comp2_ <= 1):
return "hard"
else:
return "extra"
def eval_exact_match(self, pred, label):
partial_scores = self.eval_partial_match(pred, label)
self.partial_scores = partial_scores
for key, score in partial_scores.items():
if score['f1'] != 1:
return 0
if len(label['from']['table_units']) > 0:
label_tables = sorted(label['from']['table_units'])
pred_tables = sorted(pred['from']['table_units'])
return label_tables == pred_tables
return 1
def eval_partial_match(self, pred, label):
res = {}
label_total, pred_total, cnt, cnt_wo_agg = eval_sel(pred, label)
acc, rec, f1 = get_scores(cnt, pred_total, label_total)
res['select'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total}
acc, rec, f1 = get_scores(cnt_wo_agg, pred_total, label_total)
res['select(no AGG)'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total}
label_total, pred_total, cnt, cnt_wo_agg = eval_where(pred, label)
acc, rec, f1 = get_scores(cnt, pred_total, label_total)
res['where'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total}
acc, rec, f1 = get_scores(cnt_wo_agg, pred_total, label_total)
res['where(no OP)'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total}
label_total, pred_total, cnt = eval_group(pred, label)
acc, rec, f1 = get_scores(cnt, pred_total, label_total)
res['group(no Having)'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total}
label_total, pred_total, cnt = eval_having(pred, label)
acc, rec, f1 = get_scores(cnt, pred_total, label_total)
res['group'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total}
label_total, pred_total, cnt = eval_order(pred, label)
acc, rec, f1 = get_scores(cnt, pred_total, label_total)
res['order'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total}
label_total, pred_total, cnt = eval_and_or(pred, label)
acc, rec, f1 = get_scores(cnt, pred_total, label_total)
res['and/or'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total}
label_total, pred_total, cnt = eval_IUEN(pred, label)
acc, rec, f1 = get_scores(cnt, pred_total, label_total)
res['IUEN'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total}
label_total, pred_total, cnt = eval_keywords(pred, label)
acc, rec, f1 = get_scores(cnt, pred_total, label_total)
res['keywords'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total}
return res
def print_scores(scores, etype):
turns = ['turn 1', 'turn 2', 'turn 3', 'turn 4', 'turn >4']
levels = ['easy', 'medium', 'hard', 'extra', 'all', "joint_all"]
partial_types = ['select', 'select(no AGG)', 'where', 'where(no OP)', 'group(no Having)',
'group', 'order', 'and/or', 'IUEN', 'keywords']
print("{:20} {:20} {:20} {:20} {:20} {:20} {:20}".format("", *levels))
counts = [scores[level]['count'] for level in levels]
print("{:20} {:<20d} {:<20d} {:<20d} {:<20d} {:<20d} {:<20d}".format("count", *counts))
if etype in ["all", "exec"]:
print('===================== EXECUTION ACCURACY =====================')
this_scores = [scores[level]['exec'] for level in levels]
print("{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}".format("execution", *this_scores))
if etype in ["all", "match"]:
print('\n====================== EXACT MATCHING ACCURACY =====================')
exact_scores = [scores[level]['exact'] for level in levels]
print("{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}".format("exact match", *exact_scores))
print('\n---------------------PARTIAL MATCHING ACCURACY----------------------')
for type_ in partial_types:
this_scores = [scores[level]['partial'][type_]['acc'] for level in levels]
print("{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}".format(type_, *this_scores))
print('---------------------- PARTIAL MATCHING RECALL ----------------------')
for type_ in partial_types:
this_scores = [scores[level]['partial'][type_]['rec'] for level in levels]
print("{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}".format(type_, *this_scores))
print('---------------------- PARTIAL MATCHING F1 --------------------------')
for type_ in partial_types:
this_scores = [scores[level]['partial'][type_]['f1'] for level in levels]
print("{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}".format(type_, *this_scores))
print("\n\n{:20} {:20} {:20} {:20} {:20} {:20}".format("", *turns))
counts = [scores[turn]['count'] for turn in turns]
print("{:20} {:<20d} {:<20d} {:<20d} {:<20d} {:<20d}".format("count", *counts))
if etype in ["all", "exec"]:
print('===================== TRUN XECUTION ACCURACY =====================')
this_scores = [scores[turn]['exec'] for turn in turns]
print("{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}".format("execution", *this_scores))
if etype in ["all", "match"]:
print('\n====================== TRUN EXACT MATCHING ACCURACY =====================')
exact_scores = [scores[turn]['exact'] for turn in turns]
print("{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}".format("exact match", *exact_scores))
def cmp(sql1, sql2, kmap, evaluator2, schema):
#kmap = kmaps[db_name]
p_sql = copy.deepcopy(sql1)
g_sql = copy.deepcopy(sql2)
g_valid_col_units = build_valid_col_units(g_sql['from']['table_units'], schema)
g_sql = rebuild_sql_val(g_sql)
g_sql = rebuild_sql_col(g_valid_col_units, g_sql, kmap)
p_valid_col_units = build_valid_col_units(p_sql['from']['table_units'], schema)
p_sql = rebuild_sql_val(p_sql)
p_sql = rebuild_sql_col(p_valid_col_units, p_sql, kmap)
exact_score = evaluator2.eval_exact_match(p_sql, g_sql)
return exact_score == True
def eval_exec_match(db, p_str, g_str, pred, gold):
"""
return 1 if the values between prediction and gold are matching
in the corresponding index. Currently not support multiple col_unit(pairs).
"""
conn = sqlite3.connect(db)
cursor = conn.cursor()
try:
cursor.execute(p_str)
p_res = cursor.fetchall()
except:
return False
cursor.execute(g_str)
q_res = cursor.fetchall()
def res_map(res, val_units):
rmap = {}
for idx, val_unit in enumerate(val_units):
key = tuple(val_unit[1]) if not val_unit[2] else (val_unit[0], tuple(val_unit[1]), tuple(val_unit[2]))
rmap[key] = [r[idx] for r in res]
return rmap
p_val_units = [unit[1] for unit in pred['select'][1]]
q_val_units = [unit[1] for unit in gold['select'][1]]
return res_map(p_res, p_val_units) == res_map(q_res, q_val_units)
def rebuild_sql_val(sql):
if sql is None or not DISABLE_VALUE:
return sql
sql['from']['conds'] = rebuild_condition_val(sql['from']['conds'])
sql['having'] = rebuild_condition_val(sql['having'])
sql['where'] = rebuild_condition_val(sql['where'])
sql['intersect'] = rebuild_sql_val(sql['intersect'])
sql['except'] = rebuild_sql_val(sql['except'])
sql['union'] = rebuild_sql_val(sql['union'])
return sql
def build_valid_col_units(table_units, schema):
col_ids = [table_unit[1] for table_unit in table_units if table_unit[0] == TABLE_TYPE['table_unit']]
prefixs = [col_id[:-2] for col_id in col_ids]
valid_col_units= []
for value in schema.idMap.values():
if '.' in value and value[:value.index('.')] in prefixs:
valid_col_units.append(value)
return valid_col_units
def rebuild_sql_col(valid_col_units, sql, kmap):
if sql is None:
return sql
sql['select'] = rebuild_select_col(valid_col_units, sql['select'], kmap)
sql['from'] = rebuild_from_col(valid_col_units, sql['from'], kmap)
sql['where'] = rebuild_condition_col(valid_col_units, sql['where'], kmap)
sql['groupBy'] = rebuild_group_by_col(valid_col_units, sql['groupBy'], kmap)
sql['orderBy'] = rebuild_order_by_col(valid_col_units, sql['orderBy'], kmap)
sql['having'] = rebuild_condition_col(valid_col_units, sql['having'], kmap)
sql['intersect'] = rebuild_sql_col(valid_col_units, sql['intersect'], kmap)
sql['except'] = rebuild_sql_col(valid_col_units, sql['except'], kmap)
sql['union'] = rebuild_sql_col(valid_col_units, sql['union'], kmap)
return sql
class Schema:
"""
Simple schema which maps table&column to a unique identifier
"""
def __init__(self, schema):
self._schema = schema
self._idMap = self._map(self._schema)
def schema(self):
return self._schema
def idMap(self):
return self._idMap
def _map(self, schema):
idMap = {"*": "__all__"}
id = 1
for key, vals in schema.items():
for val in vals:
idMap[key.lower() + "." + val.lower()] = (
"__" + key.lower() + "." + val.lower() + "__"
)
id += 1
for key in schema:
idMap[key.lower()] = "__" + key.lower() + "__"
id += 1
return idMap
def get_schema(db):
"""
Get database's schema, which is a dict with table name as key
and list of column names as value
:param db: database path
:return: schema dict
"""
schema = {}
conn = sqlite3.connect(db)
cursor = conn.cursor()
# fetch table names
cursor.execute("SELECT name FROM sqlite_master WHERE type='table';")
tables = [str(table[0].lower()) for table in cursor.fetchall()]
# fetch table info
for table in tables:
cursor.execute("PRAGMA table_info({})".format(table))
schema[table] = [str(col[1].lower()) for col in cursor.fetchall()]
return schema
def get_sql(schema, query):
toks = tokenize(query)
tables_with_alias = get_tables_with_alias(schema.schema, toks)
_, sql = parse_sql(toks, 0, tables_with_alias, schema)
return sql
def evaluate(gold, predict, db_dir, etype, kmaps):
with open(gold) as f:
glist = []
gseq_one = []
for l in f.readlines():
if len(l.strip()) == 0:
glist.append(gseq_one)
gseq_one = []
else:
lstrip = l.strip().split('\t')
gseq_one.append(lstrip)
#glist = [l.strip().split('\t') for l in f.readlines() if len(l.strip()) > 0]
with open(predict) as f:
plist = []
pseq_one = []
p_socre_list = []
pseq_score_one = []
question_list = []
question_one = []
while True:
l = f.readline()
if l == "":
break
if len(l.strip()) == 0:
plist.append(pseq_one)
pseq_one = []
p_socre_list.append(pseq_score_one)
pseq_score_one = []
question_list.append(question_one)
question_one = []
else:
x = l.strip().split('\t')
pseq_one.append(x)
l2 = f.readline()
y = l2.strip().split('\t')
y = [math.exp(-float(s)) for s in y]
assert len(x) == len(y)
pseq_score_one.append(y)
question_one.append(f.readline().strip())
#print('len(x)', len(x))
#plist = [l.strip().split('\t') for l in f.readlines() if len(l.strip()) > 0]
# plist = [[("select product_type_code from products group by product_type_code order by count ( * ) desc limit value", "orchestra")]]
# glist = [[("SELECT product_type_code FROM Products GROUP BY product_type_code ORDER BY count(*) DESC LIMIT 1", "customers_and_orders")]]
evaluator = Evaluator()
evaluator2 = Evaluator()
turns = ['turn 1', 'turn 2', 'turn 3', 'turn 4', 'turn >4']
levels = ['easy', 'medium', 'hard', 'extra', 'all', 'joint_all']
partial_types = ['select', 'select(no AGG)', 'where', 'where(no OP)', 'group(no Having)',
'group', 'order', 'and/or', 'IUEN', 'keywords']
entries = []
scores = {}
for turn in turns:
scores[turn] = {'count': 0, 'exact': 0.}
scores[turn]['exec'] = 0
for level in levels:
scores[level] = {'count': 0, 'partial': {}, 'exact': 0.}
scores[level]['exec'] = 0
for type_ in partial_types:
scores[level]['partial'][type_] = {'acc': 0., 'rec': 0., 'f1': 0.,'acc_count':0,'rec_count':0}
eval_err_num = 0
n1 = 0
n2 = 0
n3 = 0
predict_file = open("./predicted_sql.txt", "w")
for p, g, s, questions in zip(plist, glist, p_socre_list, question_list):
scores['joint_all']['count'] += 1
turn_scores = {"exec": [], "exact": []}
predict_str = ''
for idx, pgs in enumerate(zip(p, g, s, questions)):
p, g, s, question = pgs
#p_str = p[0]
#p_str = p_str.replace("value", "1")
g_str, db = g
db_name = db
db = os.path.join(db_dir, db, db + ".sqlite")
schema = Schema(get_schema(db))
try:
g_sql = get_sql(schema, g_str)
except:
continue
hardness = evaluator.eval_hardness(g_sql)
ori_idx = idx
if idx > 3:
idx = ">4"
else:
idx += 1
turn_id = "turn " + str(idx)
scores[turn_id]['count'] += 1
scores[hardness]['count'] += 1
scores['all']['count'] += 1
p_sql = None
flag = False
p_sql_socre = []
for p_str, s in zip(p, s):
cur_s = s
flag2 = False
try:
p_str = p_str.replace("value", "1")
p_sql = get_sql(schema, p_str)
flag2 = True
except:
pass
if flag2:
vis = set()
for ss in p_str.split(' '):
ss = ss.lower()
if ss == 'from':
break
if ss in stop_word:
continue
if ss in vis:
flag2 = False
for fk in ['none', 'max', 'min', 'count', 'sum', 'avg']:
if fk in p_str.lower():
flag2 = True
break
if flag2:
break
if cmp(p_sql, g_sql, kmaps[db_name], evaluator2, schema):
pass
break
vis.add(ss)
if flag2 is False:
continue
slist = p_str.lower().split(' ')
for i in range(len(slist)-2):
ss = slist[i]
if slist[i+1] == '=' and slist[i+2] == '1':
if ss in vis:
if cmp(p_sql, g_sql, kmaps[db_name], evaluator2, schema):
pass
flag2 = False
break
if flag2 == False:
continue
flag = False
for i in range(len(p_sql_socre)):
sql1 = p_sql_socre[i][0]
if cmp(sql1, p_sql, kmaps[db_name], evaluator2, schema):
p_sql_socre[i] = (sql1, (p_sql_socre[i][1][0]+cur_s, p_sql_socre[i][1][1]) )
flag = True
if cmp(sql1, g_sql, kmaps[db_name], evaluator2, schema):
assert cmp(p_sql, g_sql, kmaps[db_name], evaluator2, schema)
if cmp(p_sql, g_sql, kmaps[db_name], evaluator2, schema):
assert cmp(sql1, g_sql, kmaps[db_name], evaluator2, schema)
break
if flag == False:
p_sql_socre.append( (p_sql, (cur_s, p_str) ) )
p_sql = None
max_socre = -100
p_str = "error"
for i in range(len(p_sql_socre)):
sql1 = p_sql_socre[i][0]
cur_s = p_sql_socre[i][1][0]
cur_p_str = p_sql_socre[i][1][1]
if p_sql == None or max_socre < cur_s:
p_sql = sql1
max_socre = cur_s
p_str = cur_p_str
if False and p_sql is None:
print('p', p)
print('s', s)
for pi in p:
if p_sql == None or len(p_str.split(' ')) < len(pi.split(' ')):
try:
pi = pi.replace("value", "1")
p_sql = get_sql(schema, pi)
p_str = pi
except:
pass
if p_sql is None:
# If p_sql is not valid, then we will use an empty sql to evaluate with the correct sql
p_sql = {
"except": None,
"from": {
"conds": [],
"table_units": []
},
"groupBy": [],
"having": [],
"intersect": None,
"limit": None,
"orderBy": [],
"select": [
False,
[]
],
"union": None,
"where": []
}
eval_err_num += 1
print("eval_err_num:{}".format(eval_err_num))
# rebuild sql for value evaluation
kmap = kmaps[db_name]
g_valid_col_units = build_valid_col_units(g_sql['from']['table_units'], schema)
g_sql = rebuild_sql_val(g_sql)
g_sql = rebuild_sql_col(g_valid_col_units, g_sql, kmap)
p_valid_col_units = build_valid_col_units(p_sql['from']['table_units'], schema)
p_sql = rebuild_sql_val(p_sql)
p_sql = rebuild_sql_col(p_valid_col_units, p_sql, kmap)
if etype in ["all", "exec"]:
exec_score = eval_exec_match(db, p_str, g_str, p_sql, g_sql)
if exec_score:
scores[hardness]['exec'] += 1
scores[turn_id]['exec'] += 1
turn_scores['exec'].append(1)
else:
turn_scores['exec'].append(0)
if etype in ["all", "match"]:
exact_score = evaluator.eval_exact_match(p_sql, g_sql)
partial_scores = evaluator.partial_scores
if exact_score == 0:
turn_scores['exact'].append(0)
print('question: {}'.format(question))
print("{} pred: {}".format(hardness, p_str))
print("{} gold: {}".format(hardness, g_str))
print('')
else:
print("Right")
print('question', question)
print("{} pred: {}".format(hardness, p_str))
print("{} gold: {}".format(hardness, g_str))
print('')
turn_scores['exact'].append(1)
#print(p_str)
predict_str += p_str + '\n'
scores[turn_id]['exact'] += exact_score
scores[hardness]['exact'] += exact_score
scores['all']['exact'] += exact_score
for type_ in partial_types:
if partial_scores[type_]['pred_total'] > 0:
scores[hardness]['partial'][type_]['acc'] += partial_scores[type_]['acc']
scores[hardness]['partial'][type_]['acc_count'] += 1
if partial_scores[type_]['label_total'] > 0:
scores[hardness]['partial'][type_]['rec'] += partial_scores[type_]['rec']
scores[hardness]['partial'][type_]['rec_count'] += 1
scores[hardness]['partial'][type_]['f1'] += partial_scores[type_]['f1']
if partial_scores[type_]['pred_total'] > 0:
scores['all']['partial'][type_]['acc'] += partial_scores[type_]['acc']
scores['all']['partial'][type_]['acc_count'] += 1
if partial_scores[type_]['label_total'] > 0:
scores['all']['partial'][type_]['rec'] += partial_scores[type_]['rec']
scores['all']['partial'][type_]['rec_count'] += 1
scores['all']['partial'][type_]['f1'] += partial_scores[type_]['f1']
entries.append({
'predictSQL': p_str,
'goldSQL': g_str,
'hardness': hardness,
'exact': exact_score,
'partial': partial_scores
})
if all(v == 1 for v in turn_scores["exec"]):
scores['joint_all']['exec'] += 1
if all(v == 1 for v in turn_scores["exact"]):
scores['joint_all']['exact'] += 1
predict_str += '\n'
predict_file.write(predict_str)
for turn in turns:
if scores[turn]['count'] == 0:
continue
if etype in ["all", "exec"]:
scores[turn]['exec'] /= scores[turn]['count']
if etype in ["all", "match"]:
scores[turn]['exact'] /= scores[turn]['count']
for level in levels:
if scores[level]['count'] == 0:
continue
if etype in ["all", "exec"]:
scores[level]['exec'] /= scores[level]['count']
if etype in ["all", "match"]:
scores[level]['exact'] /= scores[level]['count']
for type_ in partial_types:
if scores[level]['partial'][type_]['acc_count'] == 0:
scores[level]['partial'][type_]['acc'] = 0
else:
scores[level]['partial'][type_]['acc'] = scores[level]['partial'][type_]['acc'] / \
scores[level]['partial'][type_]['acc_count'] * 1.0
if scores[level]['partial'][type_]['rec_count'] == 0:
scores[level]['partial'][type_]['rec'] = 0
else:
scores[level]['partial'][type_]['rec'] = scores[level]['partial'][type_]['rec'] / \
scores[level]['partial'][type_]['rec_count'] * 1.0
if scores[level]['partial'][type_]['acc'] == 0 and scores[level]['partial'][type_]['rec'] == 0:
scores[level]['partial'][type_]['f1'] = 1
else:
scores[level]['partial'][type_]['f1'] = \
2.0 * scores[level]['partial'][type_]['acc'] * scores[level]['partial'][type_]['rec'] / (
scores[level]['partial'][type_]['rec'] + scores[level]['partial'][type_]['acc'])
print_scores(scores, etype)
predict_file.close() | null |
165,422 | import sys
args = sys.argv
import os
import argparse
import time
The provided code snippet includes necessary dependencies for implementing the `interpret_args` function. Write a Python function `def interpret_args()` to solve the following problem:
Interprets the command line arguments, and returns a dictionary.
Here is the function:
def interpret_args():
""" Interprets the command line arguments, and returns a dictionary. """
parser = argparse.ArgumentParser()
parser.add_argument("--no_gpus", type=bool, default=1)
id = time.strftime("%Y-%m-%d-%H:%M:%S", time.localtime())
### Data parameters
parser.add_argument(
'--raw_train_filename',
type=str,
default='../atis_data/data/resplit/processed/train_with_tables.pkl')
parser.add_argument(
'--raw_dev_filename',
type=str,
default='../atis_data/data/resplit/processed/dev_with_tables.pkl')
parser.add_argument(
'--raw_validation_filename',
type=str,
default='../atis_data/data/resplit/processed/valid_with_tables.pkl')
parser.add_argument(
'--raw_test_filename',
type=str,
default='../atis_data/data/resplit/processed/test_with_tables.pkl')
parser.add_argument('--data_directory', type=str, default='processed_data')
parser.add_argument('--processed_train_filename', type=str, default='train.pkl')
parser.add_argument('--processed_dev_filename', type=str, default='dev.pkl')
parser.add_argument('--processed_validation_filename', type=str, default='validation.pkl')
parser.add_argument('--processed_test_filename', type=str, default='test.pkl')
parser.add_argument('--database_schema_filename', type=str, default=None)
parser.add_argument('--embedding_filename', type=str, default=None)
parser.add_argument('--input_vocabulary_filename', type=str, default='input_vocabulary.pkl')
parser.add_argument('--output_vocabulary_filename',
type=str,
default='output_vocabulary.pkl')
parser.add_argument('--input_key', type=str, default='nl_with_dates')
parser.add_argument('--anonymize', type=bool, default=False)
parser.add_argument('--anonymization_scoring', type=bool, default=False)
parser.add_argument('--use_snippets', type=bool, default=False)
parser.add_argument('--use_previous_query', type=bool, default=False)
parser.add_argument('--maximum_queries', type=int, default=1)
parser.add_argument('--use_copy_switch', type=bool, default=False)
parser.add_argument('--use_query_attention', type=bool, default=False)
parser.add_argument('--use_utterance_attention', type=bool, default=False)
parser.add_argument('--freeze', type=bool, default=False)
parser.add_argument('--scheduler', type=bool, default=False)
parser.add_argument('--use_bert', type=bool, default=False)
parser.add_argument("--bert_type_abb", type=str, help="Type of BERT model to load. e.g.) uS, uL, cS, cL, and mcS")
parser.add_argument("--bert_input_version", type=str, default='v1')
parser.add_argument('--fine_tune_bert', type=bool, default=False)
parser.add_argument('--lr_bert', default=1e-5, type=float, help='BERT model learning rate.')
### Debugging/logging parameters
parser.add_argument('--logdir', type=str, default='logs')
parser.add_argument('--deterministic', type=bool, default=False)
parser.add_argument('--num_train', type=int, default=-1)
parser.add_argument('--logfile', type=str, default='log'+id+'.txt')
parser.add_argument('--results_file', type=str, default='results.txt')
### Model architecture
parser.add_argument('--input_embedding_size', type=int, default=300)
parser.add_argument('--output_embedding_size', type=int, default=300)
parser.add_argument('--encoder_state_size', type=int, default=300)
parser.add_argument('--decoder_state_size', type=int, default=300)
parser.add_argument('--encoder_num_layers', type=int, default=1)
parser.add_argument('--decoder_num_layers', type=int, default=2)
parser.add_argument('--snippet_num_layers', type=int, default=1)
parser.add_argument('--maximum_utterances', type=int, default=5)
parser.add_argument('--state_positional_embeddings', type=bool, default=False)
parser.add_argument('--positional_embedding_size', type=int, default=50)
parser.add_argument('--snippet_age_embedding', type=bool, default=False)
parser.add_argument('--snippet_age_embedding_size', type=int, default=64)
parser.add_argument('--max_snippet_age_embedding', type=int, default=4)
parser.add_argument('--previous_decoder_snippet_encoding', type=bool, default=False)
parser.add_argument('--discourse_level_lstm', type=bool, default=False)
parser.add_argument('--use_schema_attention', type=bool, default=False)
parser.add_argument('--use_encoder_attention', type=bool, default=False)
parser.add_argument('--use_schema_encoder', type=bool, default=False)
parser.add_argument('--use_schema_self_attention', type=bool, default=False)
parser.add_argument('--use_schema_encoder_2', type=bool, default=False)
### Training parameters
parser.add_argument('--batch_size', type=int, default=16)
parser.add_argument('--train_maximum_sql_length', type=int, default=200)
parser.add_argument('--train_evaluation_size', type=int, default=100)
parser.add_argument('--dropout_amount', type=float, default=0.5)
parser.add_argument('--initial_patience', type=float, default=10.)
parser.add_argument('--patience_ratio', type=float, default=1.01)
parser.add_argument('--initial_learning_rate', type=float, default=0.001)
parser.add_argument('--learning_rate_ratio', type=float, default=0.8)
parser.add_argument('--interaction_level', type=bool, default=False)
parser.add_argument('--reweight_batch', type=bool, default=False)
### Setting
parser.add_argument('--train', type=bool, default=False)
parser.add_argument('--debug', type=bool, default=False)
parser.add_argument('--evaluate', type=bool, default=False)
parser.add_argument('--attention', type=bool, default=False)
parser.add_argument('--save_file', type=str, default="")
parser.add_argument('--enable_testing', type=bool, default=False)
parser.add_argument('--use_predicted_queries', type=bool, default=False)
parser.add_argument('--evaluate_split', type=str, default='dev')
parser.add_argument('--evaluate_with_gold_forcing', type=bool, default=False)
parser.add_argument('--eval_maximum_sql_length', type=int, default=1000)
parser.add_argument('--results_note', type=str, default='')
parser.add_argument('--compute_metrics', type=bool, default=False)
parser.add_argument('--reference_results', type=str, default='')
parser.add_argument('--interactive', type=bool, default=False)
parser.add_argument('--database_username', type=str, default="aviarmy")
parser.add_argument('--database_password', type=str, default="aviarmy")
parser.add_argument('--database_timeout', type=int, default=2)
parser.add_argument('--seed', type=int, default=141)
### ALiMe Plugin
parser.add_argument('--use_transformer_relation', type=bool, default=False)
args = parser.parse_args()
if not os.path.exists(args.logdir):
os.makedirs(args.logdir)
if not (args.train or args.evaluate or args.interactive or args.attention):
raise ValueError('You need to be training or evaluating')
if args.enable_testing and not args.evaluate:
raise ValueError('You should evaluate the model if enabling testing')
if args.train:
args_file = args.logdir + '/args' + id +'.log'
if os.path.exists(args_file):
raise ValueError('Warning: arguments already exist in ' + str(args_file))
with open(args_file, 'w') as infile:
infile.write(str(args))
return args | Interprets the command line arguments, and returns a dictionary. |
165,423 | import copy
import pymysql
import random
import signal
import sqlparse
from . import util
from .snippets import Snippet
from sqlparse import tokens as token_types
from sqlparse import sql as sql_types
def get_all_in_parens(sequence):
def split_by_conj(sequence):
def get_sql_snippets(sequence):
# First, get all subsequences of the sequence that are surrounded by
# parentheses.
all_in_parens = get_all_in_parens(sequence)
all_subseq = []
# Then for each one, split the sequence on conjunctions (AND/OR).
for seq in all_in_parens:
subsequences = split_by_conj(seq)
all_subseq.append(seq)
all_subseq.extend(subsequences)
# Finally, also get "interesting" selects
for i, seq in enumerate(all_subseq):
print(str(i) + "\t" + " ".join(seq))
exit() | null |
165,424 | import copy
import pymysql
import random
import signal
import sqlparse
from . import util
from .snippets import Snippet
from sqlparse import tokens as token_types
from sqlparse import sql as sql_types
def add_snippets_to_query(snippets, ignored_entities, query, prob_align=1.):
query_copy = copy.copy(query)
# Replace the longest snippets first, so sort by length descending.
sorted_snippets = sorted(snippets, key=lambda s: len(s.sequence))[::-1]
for snippet in sorted_snippets:
ignore = False
snippet_seq = snippet.sequence
# TODO: continue here
# If it contains an ignored entity, then don't use it.
for entity in ignored_entities:
ignore = ignore or util.subsequence(entity, snippet_seq)
# No NL entities found in snippet, then see if snippet is a substring of
# the gold sequence
if not ignore:
snippet_length = len(snippet_seq)
# Iterate through gold sequence to see if it's a subsequence.
for start_idx in range(len(query_copy) - snippet_length + 1):
if query_copy[start_idx:start_idx +
snippet_length] == snippet_seq:
align = random.random() < prob_align
if align:
prev_length = len(query_copy)
# At the start position of the snippet, replace with an
# identifier.
query_copy[start_idx] = snippet.name
# Then cut out the indices which were collapsed into
# the snippet.
query_copy = query_copy[:start_idx + 1] + \
query_copy[start_idx + snippet_length:]
# Make sure the length is as expected
assert len(query_copy) == prev_length - \
(snippet_length - 1)
return query_copy | null |
165,425 | import copy
import pymysql
import random
import signal
import sqlparse
from . import util
from .snippets import Snippet
from sqlparse import tokens as token_types
from sqlparse import sql as sql_types
def fix_parentheses(sequence):
num_left = sequence.count("(")
num_right = sequence.count(")")
if num_right < num_left:
fixed_sequence = sequence[:-1] + \
[")" for _ in range(num_left - num_right)] + [sequence[-1]]
return fixed_sequence
return sequence | null |
165,426 | import nltk
import sqlparse
The provided code snippet includes necessary dependencies for implementing the `nl_tokenize` function. Write a Python function `def nl_tokenize(string)` to solve the following problem:
Tokenizes a natural language string into tokens. Inputs: string: the string to tokenize. Outputs: a list of tokens. Assumes data is space-separated (this is true of ZC07 data in ATIS2/3).
Here is the function:
def nl_tokenize(string):
"""Tokenizes a natural language string into tokens.
Inputs:
string: the string to tokenize.
Outputs:
a list of tokens.
Assumes data is space-separated (this is true of ZC07 data in ATIS2/3).
"""
return nltk.word_tokenize(string) | Tokenizes a natural language string into tokens. Inputs: string: the string to tokenize. Outputs: a list of tokens. Assumes data is space-separated (this is true of ZC07 data in ATIS2/3). |
165,427 | import nltk
import sqlparse
The provided code snippet includes necessary dependencies for implementing the `sql_tokenize` function. Write a Python function `def sql_tokenize(string)` to solve the following problem:
Tokenizes a SQL statement into tokens. Inputs: string: string to tokenize. Outputs: a list of tokens.
Here is the function:
def sql_tokenize(string):
""" Tokenizes a SQL statement into tokens.
Inputs:
string: string to tokenize.
Outputs:
a list of tokens.
"""
tokens = []
statements = sqlparse.parse(string)
# SQLparse gives you a list of statements.
for statement in statements:
# Flatten the tokens in each statement and add to the tokens list.
flat_tokens = sqlparse.sql.TokenList(statement.tokens).flatten()
for token in flat_tokens:
strip_token = str(token).strip()
if len(strip_token) > 0:
tokens.append(strip_token)
newtokens = []
keep = True
for i, token in enumerate(tokens):
if token == ".":
newtoken = newtokens[-1] + "." + tokens[i + 1]
newtokens = newtokens[:-1] + [newtoken]
keep = False
elif keep:
newtokens.append(token)
else:
keep = True
return newtokens | Tokenizes a SQL statement into tokens. Inputs: string: string to tokenize. Outputs: a list of tokens. |
165,428 | import nltk
import sqlparse
The provided code snippet includes necessary dependencies for implementing the `lambda_tokenize` function. Write a Python function `def lambda_tokenize(string)` to solve the following problem:
Tokenizes a lambda-calculus statement into tokens. Inputs: string: a lambda-calculus string Outputs: a list of tokens.
Here is the function:
def lambda_tokenize(string):
""" Tokenizes a lambda-calculus statement into tokens.
Inputs:
string: a lambda-calculus string
Outputs:
a list of tokens.
"""
space_separated = string.split(" ")
new_tokens = []
# Separate the string by spaces, then separate based on existence of ( or
# ).
for token in space_separated:
tokens = []
current_token = ""
for char in token:
if char == ")" or char == "(":
tokens.append(current_token)
tokens.append(char)
current_token = ""
else:
current_token += char
tokens.append(current_token)
new_tokens.extend([tok for tok in tokens if tok])
return new_tokens | Tokenizes a lambda-calculus statement into tokens. Inputs: string: a lambda-calculus string Outputs: a list of tokens. |
165,429 | from . import anonymization as anon
from . import sql_util
from .snippets import expand_snippets
from .utterance import Utterance, OUTPUT_KEY, ANON_INPUT_KEY
import torch
class Schema:
def __init__(self, table_schema, simple=False):
if simple:
self.helper1(table_schema)
else:
self.helper2(table_schema)
def helper1(self, table_schema):
self.table_schema = table_schema
column_names = table_schema['column_names']
column_names_original = table_schema['column_names_original']
table_names = table_schema['table_names']
table_names_original = table_schema['table_names_original']
assert len(column_names) == len(column_names_original) and len(table_names) == len(table_names_original)
column_keep_index = []
self.column_names_surface_form = []
self.column_names_surface_form_to_id = {}
for i, (table_id, column_name) in enumerate(column_names_original):
column_name_surface_form = column_name
column_name_surface_form = column_name_surface_form.lower()
if column_name_surface_form not in self.column_names_surface_form_to_id:
self.column_names_surface_form.append(column_name_surface_form)
self.column_names_surface_form_to_id[column_name_surface_form] = len(self.column_names_surface_form) - 1
column_keep_index.append(i)
column_keep_index_2 = []
for i, table_name in enumerate(table_names_original):
column_name_surface_form = table_name.lower()
if column_name_surface_form not in self.column_names_surface_form_to_id:
self.column_names_surface_form.append(column_name_surface_form)
self.column_names_surface_form_to_id[column_name_surface_form] = len(self.column_names_surface_form) - 1
column_keep_index_2.append(i)
self.column_names_embedder_input = []
self.column_names_embedder_input_to_id = {}
for i, (table_id, column_name) in enumerate(column_names):
column_name_embedder_input = column_name
if i in column_keep_index:
self.column_names_embedder_input.append(column_name_embedder_input)
self.column_names_embedder_input_to_id[column_name_embedder_input] = len(self.column_names_embedder_input) - 1
for i, table_name in enumerate(table_names):
column_name_embedder_input = table_name
if i in column_keep_index_2:
self.column_names_embedder_input.append(column_name_embedder_input)
self.column_names_embedder_input_to_id[column_name_embedder_input] = len(self.column_names_embedder_input) - 1
max_id_1 = max(v for k,v in self.column_names_surface_form_to_id.items())
max_id_2 = max(v for k,v in self.column_names_embedder_input_to_id.items())
assert (len(self.column_names_surface_form) - 1) == max_id_2 == max_id_1
self.num_col = len(self.column_names_surface_form)
def helper2(self, table_schema):
self.table_schema = table_schema
column_names = table_schema['column_names']
column_names_original = table_schema['column_names_original']
table_names = table_schema['table_names']
table_names_original = table_schema['table_names_original']
assert len(column_names) == len(column_names_original) and len(table_names) == len(table_names_original)
column_keep_index = []
self.column_names_surface_form = []
self.column_names_surface_form_to_id = {}
for i, (table_id, column_name) in enumerate(column_names_original):
if table_id >= 0:
table_name = table_names_original[table_id]
column_name_surface_form = '{}.{}'.format(table_name,column_name)
else:
column_name_surface_form = column_name
column_name_surface_form = column_name_surface_form.lower()
if column_name_surface_form not in self.column_names_surface_form_to_id:
self.column_names_surface_form.append(column_name_surface_form)
self.column_names_surface_form_to_id[column_name_surface_form] = len(self.column_names_surface_form) - 1
column_keep_index.append(i)
start_i = len(self.column_names_surface_form_to_id)
for i, table_name in enumerate(table_names_original):
column_name_surface_form = '{}.*'.format(table_name.lower())
self.column_names_surface_form.append(column_name_surface_form)
self.column_names_surface_form_to_id[column_name_surface_form] = i + start_i
self.column_names_embedder_input = []
self.column_names_embedder_input_to_id = {}
for i, (table_id, column_name) in enumerate(column_names):
if table_id >= 0:
table_name = table_names[table_id]
column_name_embedder_input = table_name + ' . ' + column_name
else:
column_name_embedder_input = column_name
if i in column_keep_index:
self.column_names_embedder_input.append(column_name_embedder_input)
self.column_names_embedder_input_to_id[column_name_embedder_input] = len(self.column_names_embedder_input) - 1
start_i = len(self.column_names_embedder_input_to_id)
for i, table_name in enumerate(table_names):
column_name_embedder_input = table_name + ' . *'
self.column_names_embedder_input.append(column_name_embedder_input)
self.column_names_embedder_input_to_id[column_name_embedder_input] = i + start_i
assert len(self.column_names_surface_form) == len(self.column_names_surface_form_to_id) == len(self.column_names_embedder_input) == len(self.column_names_embedder_input_to_id)
max_id_1 = max(v for k,v in self.column_names_surface_form_to_id.items())
max_id_2 = max(v for k,v in self.column_names_embedder_input_to_id.items())
assert (len(self.column_names_surface_form) - 1) == max_id_2 == max_id_1
self.num_col = len(self.column_names_surface_form)
def __len__(self):
return self.num_col
def in_vocabulary(self, column_name, surface_form=False):
if surface_form:
return column_name in self.column_names_surface_form_to_id
else:
return column_name in self.column_names_embedder_input_to_id
def column_name_embedder_bow(self, column_name, surface_form=False, column_name_token_embedder=None):
assert self.in_vocabulary(column_name, surface_form)
if surface_form:
column_name_id = self.column_names_surface_form_to_id[column_name]
column_name_embedder_input = self.column_names_embedder_input[column_name_id]
else:
column_name_embedder_input = column_name
column_name_embeddings = [column_name_token_embedder(token) for token in column_name_embedder_input.split()]
column_name_embeddings = torch.stack(column_name_embeddings, dim=0)
return torch.mean(column_name_embeddings, dim=0)
def set_column_name_embeddings(self, column_name_embeddings):
self.column_name_embeddings = column_name_embeddings
assert len(self.column_name_embeddings) == self.num_col
def column_name_embedder(self, column_name, surface_form=False):
assert self.in_vocabulary(column_name, surface_form)
if surface_form:
column_name_id = self.column_names_surface_form_to_id[column_name]
else:
column_name_id = self.column_names_embedder_input_to_id[column_name]
return self.column_name_embeddings[column_name_id]
class Interaction:
""" ATIS interaction class.
Attributes:
utterances (list of Utterance): The utterances in the interaction.
snippets (list of Snippet): The snippets that appear through the interaction.
anon_tok_to_ent:
identifier (str): Unique identifier for the interaction in the dataset.
"""
def __init__(self,
utterances,
schema,
snippets,
anon_tok_to_ent,
identifier,
params):
self.utterances = utterances
self.schema = schema
self.snippets = snippets
self.anon_tok_to_ent = anon_tok_to_ent
self.identifier = identifier
# Ensure that each utterance's input and output sequences, when remapped
# without anonymization or snippets, are the same as the original
# version.
for i, utterance in enumerate(self.utterances):
deanon_input = self.deanonymize(utterance.input_seq_to_use,
ANON_INPUT_KEY)
assert deanon_input == utterance.original_input_seq, "Anonymized sequence [" \
+ " ".join(utterance.input_seq_to_use) + "] is not the same as [" \
+ " ".join(utterance.original_input_seq) + "] when deanonymized (is [" \
+ " ".join(deanon_input) + "] instead)"
desnippet_gold = self.expand_snippets(utterance.gold_query_to_use)
deanon_gold = self.deanonymize(desnippet_gold, OUTPUT_KEY)
assert deanon_gold == utterance.original_gold_query, \
"Anonymized and/or snippet'd query " \
+ " ".join(utterance.gold_query_to_use) + " is not the same as " \
+ " ".join(utterance.original_gold_query)
def __str__(self):
string = "Utterances:\n"
for utterance in self.utterances:
string += str(utterance) + "\n"
string += "Anonymization dictionary:\n"
for ent_tok, deanon in self.anon_tok_to_ent.items():
string += ent_tok + "\t" + str(deanon) + "\n"
return string
def __len__(self):
return len(self.utterances)
def deanonymize(self, sequence, key):
""" Deanonymizes a predicted query or an input utterance.
Inputs:
sequence (list of str): The sequence to deanonymize.
key (str): The key in the anonymization table, e.g. NL or SQL.
"""
return anon.deanonymize(sequence, self.anon_tok_to_ent, key)
def expand_snippets(self, sequence):
""" Expands snippets for a sequence.
Inputs:
sequence (list of str): A SQL query.
"""
return expand_snippets(sequence, self.snippets)
def input_seqs(self):
in_seqs = []
for utterance in self.utterances:
in_seqs.append(utterance.input_seq_to_use)
return in_seqs
def output_seqs(self):
out_seqs = []
for utterance in self.utterances:
out_seqs.append(utterance.gold_query_to_use)
return out_seqs
class Utterance:
""" Utterance class. """
def process_input_seq(self,
anonymize,
anonymizer,
anon_tok_to_ent):
assert not anon_tok_to_ent or anonymize
assert not anonymize or anonymizer
if anonymize:
assert anonymizer
self.input_seq_to_use = anonymizer.anonymize(
self.original_input_seq, anon_tok_to_ent, ANON_INPUT_KEY, add_new_anon_toks=True)
else:
self.input_seq_to_use = self.original_input_seq
def process_gold_seq(self,
output_sequences,
nl_to_sql_dict,
available_snippets,
anonymize,
anonymizer,
anon_tok_to_ent):
# Get entities in the input sequence:
# anonymized entity types
# othe recognized entities (this includes "flight")
entities_in_input = [
[tok] for tok in self.input_seq_to_use if tok in anon_tok_to_ent]
entities_in_input.extend(
nl_to_sql_dict.get_sql_entities(
self.input_seq_to_use))
# Get the shortest gold query (this is what we use to train)
shortest_gold_and_results = min(output_sequences,
key=lambda x: len(x[0]))
# Tokenize and anonymize it if necessary.
self.original_gold_query = shortest_gold_and_results[0]
self.gold_sql_results = shortest_gold_and_results[1]
self.contained_entities = entities_in_input
# Keep track of all gold queries and the resulting tables so that we can
# give credit if it predicts a different correct sequence.
self.all_gold_queries = output_sequences
self.anonymized_gold_query = self.original_gold_query
if anonymize:
self.anonymized_gold_query = anonymizer.anonymize(
self.original_gold_query, anon_tok_to_ent, OUTPUT_KEY, add_new_anon_toks=False)
# Add snippets to it.
self.gold_query_to_use = sql_util.add_snippets_to_query(
available_snippets, entities_in_input, self.anonymized_gold_query)
def __init__(self,
example,
available_snippets,
nl_to_sql_dict,
params,
anon_tok_to_ent={},
anonymizer=None):
# Get output and input sequences from the dictionary representation.
output_sequences = example[OUTPUT_KEY]
self.original_input_seq = tokenizers.nl_tokenize(example[params.input_key])
self.available_snippets = available_snippets
self.keep = False
# pruned_output_sequences = []
# for sequence in output_sequences:
# if len(sequence[0]) > 3:
# pruned_output_sequences.append(sequence)
# output_sequences = pruned_output_sequences
if len(output_sequences) > 0 and len(self.original_input_seq) > 0:
# Only keep this example if there is at least one output sequence.
self.keep = True
if len(output_sequences) == 0 or len(self.original_input_seq) == 0:
return
# Process the input sequence
self.process_input_seq(params.anonymize,
anonymizer,
anon_tok_to_ent)
# Process the gold sequence
self.process_gold_seq(output_sequences,
nl_to_sql_dict,
self.available_snippets,
params.anonymize,
anonymizer,
anon_tok_to_ent)
def __str__(self):
string = "Original input: " + " ".join(self.original_input_seq) + "\n"
string += "Modified input: " + " ".join(self.input_seq_to_use) + "\n"
string += "Original output: " + " ".join(self.original_gold_query) + "\n"
string += "Modified output: " + " ".join(self.gold_query_to_use) + "\n"
string += "Snippets:\n"
for snippet in self.available_snippets:
string += str(snippet) + "\n"
return string
def length_valid(self, input_limit, output_limit):
return (len(self.input_seq_to_use) < input_limit \
and len(self.gold_query_to_use) < output_limit)
def load_function(parameters,
nl_to_sql_dict,
anonymizer,
database_schema=None):
def fn(interaction_example):
keep = False
raw_utterances = interaction_example["interaction"]
if "database_id" in interaction_example:
database_id = interaction_example["database_id"]
interaction_id = interaction_example["interaction_id"]
identifier = database_id + '/' + str(interaction_id)
else:
identifier = interaction_example["id"]
schema = None
if database_schema:
if 'removefrom' not in parameters.data_directory:
schema = Schema(database_schema[database_id], simple=True)
else:
schema = Schema(database_schema[database_id])
snippet_bank = []
utterance_examples = []
anon_tok_to_ent = {}
for utterance in raw_utterances:
available_snippets = [
snippet for snippet in snippet_bank if snippet.index <= 1]
proc_utterance = Utterance(utterance,
available_snippets,
nl_to_sql_dict,
parameters,
anon_tok_to_ent,
anonymizer)
keep_utterance = proc_utterance.keep
if schema:
assert keep_utterance
if keep_utterance:
keep = True
utterance_examples.append(proc_utterance)
# Update the snippet bank, and age each snippet in it.
if parameters.use_snippets:
if 'atis' in parameters.data_directory:
snippets = sql_util.get_subtrees(
proc_utterance.anonymized_gold_query,
proc_utterance.available_snippets)
else:
snippets = sql_util.get_subtrees_simple(
proc_utterance.anonymized_gold_query,
proc_utterance.available_snippets)
for snippet in snippets:
snippet.assign_id(len(snippet_bank))
snippet_bank.append(snippet)
for snippet in snippet_bank:
snippet.increase_age()
interaction = Interaction(utterance_examples,
schema,
snippet_bank,
anon_tok_to_ent,
identifier,
parameters)
return interaction, keep
return fn | null |
165,430 | import copy
import json
from . import util
The provided code snippet includes necessary dependencies for implementing the `timeval` function. Write a Python function `def timeval(string)` to solve the following problem:
Returns the numeric version of a time. Inputs: string (str): String representing a time. Returns: String representing the absolute time.
Here is the function:
def timeval(string):
"""Returns the numeric version of a time.
Inputs:
string (str): String representing a time.
Returns:
String representing the absolute time.
"""
if string.endswith("am") or string.endswith(
"pm") and string[:-2].isdigit():
numval = int(string[:-2])
if len(string) == 3 or len(string) == 4:
numval *= 100
if string.endswith("pm"):
numval += 1200
return str(numval)
return "" | Returns the numeric version of a time. Inputs: string (str): String representing a time. Returns: String representing the absolute time. |
165,431 | import copy
import json
from . import util
The provided code snippet includes necessary dependencies for implementing the `is_time` function. Write a Python function `def is_time(string)` to solve the following problem:
Returns whether a string represents a time. Inputs: string (str): String to check. Returns: Whether the string represents a time.
Here is the function:
def is_time(string):
"""Returns whether a string represents a time.
Inputs:
string (str): String to check.
Returns:
Whether the string represents a time.
"""
if string.endswith("am") or string.endswith("pm"):
if string[:-2].isdigit():
return True
return False | Returns whether a string represents a time. Inputs: string (str): String to check. Returns: Whether the string represents a time. |
165,432 | import copy
import json
from . import util
The provided code snippet includes necessary dependencies for implementing the `deanonymize` function. Write a Python function `def deanonymize(sequence, ent_dict, key)` to solve the following problem:
Deanonymizes a sequence. Inputs: sequence (list of str): List of tokens to deanonymize. ent_dict (dict str->(dict str->str)): Maps from tokens to the entity dictionary. key (str): The key to use, in this case either natural language or SQL. Returns: Deanonymized sequence of tokens.
Here is the function:
def deanonymize(sequence, ent_dict, key):
"""Deanonymizes a sequence.
Inputs:
sequence (list of str): List of tokens to deanonymize.
ent_dict (dict str->(dict str->str)): Maps from tokens to the entity dictionary.
key (str): The key to use, in this case either natural language or SQL.
Returns:
Deanonymized sequence of tokens.
"""
new_sequence = []
for token in sequence:
if token in ent_dict:
new_sequence.extend(ent_dict[token][key])
else:
new_sequence.append(token)
return new_sequence | Deanonymizes a sequence. Inputs: sequence (list of str): List of tokens to deanonymize. ent_dict (dict str->(dict str->str)): Maps from tokens to the entity dictionary. key (str): The key to use, in this case either natural language or SQL. Returns: Deanonymized sequence of tokens. |
165,433 | def is_snippet(token):
""" Determines whether a token is a snippet or not.
Inputs:
token (str): The token to check.
Returns:
bool, indicating whether it's a snippet.
"""
return token.startswith(SNIPPET_PREFIX)
The provided code snippet includes necessary dependencies for implementing the `expand_snippets` function. Write a Python function `def expand_snippets(sequence, snippets)` to solve the following problem:
Given a sequence and a list of snippets, expand the snippets in the sequence. Inputs: sequence (list of str): Query containing snippet references. snippets (list of Snippet): List of available snippets. return list of str representing the expanded sequence
Here is the function:
def expand_snippets(sequence, snippets):
""" Given a sequence and a list of snippets, expand the snippets in the sequence.
Inputs:
sequence (list of str): Query containing snippet references.
snippets (list of Snippet): List of available snippets.
return list of str representing the expanded sequence
"""
snippet_id_to_snippet = {}
for snippet in snippets:
assert snippet.name not in snippet_id_to_snippet
snippet_id_to_snippet[snippet.name] = snippet
expanded_seq = []
for token in sequence:
if token in snippet_id_to_snippet:
expanded_seq.extend(snippet_id_to_snippet[token].sequence)
else:
assert not is_snippet(token)
expanded_seq.append(token)
return expanded_seq | Given a sequence and a list of snippets, expand the snippets in the sequence. Inputs: sequence (list of str): Query containing snippet references. snippets (list of Snippet): List of available snippets. return list of str representing the expanded sequence |
165,434 | def is_snippet(token):
""" Determines whether a token is a snippet or not.
Inputs:
token (str): The token to check.
Returns:
bool, indicating whether it's a snippet.
"""
return token.startswith(SNIPPET_PREFIX)
The provided code snippet includes necessary dependencies for implementing the `snippet_index` function. Write a Python function `def snippet_index(token)` to solve the following problem:
Returns the index of a snippet. Inputs: token (str): The snippet to check. Returns: integer, the index of the snippet.
Here is the function:
def snippet_index(token):
""" Returns the index of a snippet.
Inputs:
token (str): The snippet to check.
Returns:
integer, the index of the snippet.
"""
assert is_snippet(token)
return int(token.split("_")[-1]) | Returns the index of a snippet. Inputs: token (str): The snippet to check. Returns: integer, the index of the snippet. |
165,435 | import os
import json
import copy
import torch
import sqlparse
import argparse
import subprocess
from collections import defaultdict
from reranker.predict import Ranker
import numpy
def postprocess_one(pred_sql, schema):
pred_sql = pred_sql.replace('group_by', 'group by').replace('order_by', 'order by').replace('limit_value', 'limit 1').replace('_EOS', '').replace(' value ',' 1 ').replace('distinct', '').strip(',').strip()
if pred_sql.endswith('value'):
pred_sql = pred_sql[:-len('value')] + '1'
try:
format_sql = sqlparse.format(pred_sql, reindent=True)
except:
if len(pred_sql) == 0:
return "None"
return pred_sql
format_sql_2 = normalize_space(format_sql)
num_select = format_sql_2.count('select')
if num_select > 1:
final_sql = postprocess_nested(format_sql_2, schema)
else:
final_sql, _ = postprocess_single(format_sql_2, schema)
if final_sql == "":
return "None"
return final_sql
class Ranker:
def __init__(self, model_path, base_model='roberta'):
self.model = ReRanker(base_model=base_model)
self.model.load_state_dict(torch.load(model_path))
if base_model == 'roberta':
self.tokenizer = RobertaTokenizer.from_pretrained('roberta-base', max_len=128)
print("load reranker model from ", model_path)
def get_score(self, utterances, sql):
tokens_tensor, attention_mask_tensor = preprocess(utterances, sql, self.tokenizer)
tokens_tensor = tokens_tensor.unsqueeze(0)
attention_mask_tensor = attention_mask_tensor.unsqueeze(0)
output = self.model(input_ids=tokens_tensor, attention_mask=attention_mask_tensor)
output = output.squeeze(dim=-1)
return output.item()
def get_score_batch(self, utterances, sqls):
assert isinstance(sqls, list), "only support sql list input"
tokens_tensor_batch, attention_mask_tensor_batch = preprocess(utterances, sqls[0], self.tokenizer)
tokens_tensor_batch = tokens_tensor_batch.unsqueeze(0)
attention_mask_tensor_batch = attention_mask_tensor_batch.unsqueeze(0)
if len(sqls) > 1:
for s in sqls[1:]:
new_token, new_mask = preprocess(utterances, s, self.tokenizer)
tokens_tensor_batch = torch.cat([tokens_tensor_batch, new_token.unsqueeze(0)], dim=0)
attention_mask_tensor_batch = torch.cat([attention_mask_tensor_batch, new_mask.unsqueeze(0)])
output = self.model(input_ids=tokens_tensor_batch, attention_mask=attention_mask_tensor_batch)
output = output.view(-1)
ret = []
for i in output:
ret.append(i.item())
return ret
def postprocess(predictions, database_schema, remove_from=False):
import math
use_reranker = True
ranker = Ranker("./submit_models/reranker_roberta.pt")
correct = 0
total = 0
postprocess_sqls = {}
utterances = []
Count = 0
score_vis = dict()
if os.path.exists('./score_vis.json'):
with open('./score_vis.json', 'r') as f:
score_vis = json.load(f)
for pred in predictions:
Count += 1
if Count % 10 == 0:
print('Count', Count, "score_vis", len(score_vis))
db_id = pred['database_id']
schema = database_schema[db_id]
try:
return_match = pred['return_match']
except:
return_match = ''
if db_id not in postprocess_sqls:
postprocess_sqls[db_id] = []
interaction_id = pred['interaction_id']
turn_id = pred['index_in_interaction']
total += 1
if turn_id == 0:
utterances = []
question = ' '.join(pred['input_seq'])
pred_sql_str = ' '.join(pred['flat_prediction'])
utterances.append(question)
beam_sql_strs = []
for score, beam_sql_str in pred['beam']:
beam_sql_strs.append( (score, ' '.join(beam_sql_str)))
gold_sql_str = ' '.join(pred['flat_gold_queries'][0])
if pred_sql_str == gold_sql_str:
correct += 1
postprocess_sql = pred_sql_str
if remove_from:
postprocess_sql = postprocess_one(pred_sql_str, schema)
sqls = []
key_idx = dict()
for i in range(len(beam_sql_strs)):
sql = postprocess_one(beam_sql_strs[i][1], schema)
key = '&'.join(utterances)+'#'+sql
if key not in score_vis:
if key not in key_idx:
key_idx[key]=len(sqls)
sqls.append(sql.replace("value", "1"))
if use_reranker and len(sqls) > 0:
score_list = ranker.get_score_batch(utterances, sqls)
for i in range(len(beam_sql_strs)):
score = beam_sql_strs[i][0]
sql = postprocess_one(beam_sql_strs[i][1], schema)
if use_reranker:
key = '&'.join(utterances)+'#'+sql
old_score = score
if key in score_vis:
score = score_vis[key]
else:
score = score_list[key_idx[key]]
score_vis[key] = score
score += i * 1e-4
score = -math.log(score)
score += old_score
beam_sql_strs[i] = (score, sql)
assert beam_sql_strs[0][1] == postprocess_sql
if use_reranker and Count % 100 == 0:
with open('score_vis.json', 'w') as file:
json.dump(score_vis, file)
gold_sql_str = postprocess_one(gold_sql_str, schema)
postprocess_sqls[db_id].append((postprocess_sql, interaction_id, turn_id, beam_sql_strs, question, gold_sql_str))
print (correct, total, float(correct)/total)
return postprocess_sqls | null |
165,438 | import os
import json
import copy
import torch
import sqlparse
import argparse
import subprocess
from collections import defaultdict
from reranker.predict import Ranker
import numpy
def write_and_evaluate(postprocess_sqls, db_path, table_schema_path, gold_path, dataset):
db_list = []
with open(gold_path) as f:
for line in f:
line_split = line.strip().split('\t')
if len(line_split) != 2:
continue
db = line.strip().split('\t')[1]
if db not in db_list:
db_list.append(db)
output_file = 'output_temp.txt'
if dataset == 'spider':
with open(output_file, "w") as f:
for db in db_list:
for postprocess_sql, interaction_id, turn_id in postprocess_sqls[db]:
f.write(postprocess_sql+'\n')
command = 'python3 eval_scripts/evaluation.py --db {} --table {} --etype match --gold {} --pred {}'.format(db_path,
table_schema_path,
gold_path,
os.path.abspath(output_file))
elif dataset in ['sparc', 'cosql']:
cnt = 0
with open(output_file, "w") as f:
last_id = None
for db in db_list:
for postprocess_sql, interaction_id, turn_id, beam_sql_strs, question, gold_str in postprocess_sqls[db]:
if last_id is not None and last_id != str(interaction_id)+db:
f.write('\n')
last_id = str(interaction_id) + db
f.write('{}\n'.format( '\t'.join( [x[1] for x in beam_sql_strs] ) ))
f.write('{}\n'.format( '\t'.join( [str(x[0]) for x in beam_sql_strs] )) )
f.write('{}\n'.format( question ))
cnt += 1
# TODO postprocess in here
"""
predict_file = 'predicted_sql.txt'
cnt = 0
with open(predict_file, "w") as f:
for db in db_list:
for postprocess_sql, interaction_id, turn_id, beam_sql_strs, question, gold_str in postprocess_sqls[db]:
print(postprocess_sql)
print(beam_sql_strs)
print(question)
print(gold_str)
if turn_id == 0 and cnt > 0:
f.write('\n')
f.write('{}\n'.format(postprocess_sql))
cnt += 1
"""
command = 'python3 eval_scripts/gen_final.py --db {} --table {} --etype match --gold {} --pred {}'.format(db_path,
table_schema_path,
gold_path,
os.path.abspath(output_file))
#command += '; rm output_temp.txt'
print('begin command')
return command | null |
165,439 | import argparse
import os
import sys
import pickle
import json
import shutil
import sqlparse
from postprocess_eval import get_candidate_tables
def write_interaction(interaction_list,split,output_dir):
def read_database_schema(database_schema_filename, schema_tokens, column_names, database_schemas_dict):
def read_spider(spider_dir, database_schemas, column_names, output_vocab, schema_tokens, remove_from):
def read_sparc(sparc_dir, database_schemas, column_names, output_vocab, schema_tokens, remove_from):
def read_cosql(cosql_dir, database_schemas, column_names, output_vocab, schema_tokens, remove_from):
def read_db_split(data_dir):
def preprocess(datasets, remove_from=False):
# Validate output_vocab
output_vocab = ['_UNK', '_EOS', '.', 't1', 't2', '=', 'select', 'from', 'as', 'value', 'join', 'on', ')', '(', 'where', 't3', 'by', ',', 'count', 'group', 'order', 'distinct', 't4', 'and', 'limit', 'desc', '>', 'avg', 'having', 'max', 'in', '<', 'sum', 't5', 'intersect', 'not', 'min', 'except', 'or', 'asc', 'like', '!', 'union', 'between', 't6', '-', 't7', '+', '/']
if remove_from:
output_vocab = ['_UNK', '_EOS', '=', 'select', 'value', ')', '(', 'where', ',', 'count', 'group_by', 'order_by', 'distinct', 'and', 'limit_value', 'limit', 'desc', '>', 'avg', 'having', 'max', 'in', '<', 'sum', 'intersect', 'not', 'min', 'except', 'or', 'asc', 'like', '!=', 'union', 'between', '-', '+', '/']
print('size of output_vocab', len(output_vocab))
print('output_vocab', output_vocab)
print()
print('datasets', datasets)
if 'spider' in datasets:
spider_dir = 'data/spider/'
database_schema_filename = 'data/spider/tables.json'
output_dir = 'data/spider_data'
if remove_from:
output_dir = 'data/spider_data_removefrom'
spider_train_database, _ = read_db_split(spider_dir)
if 'sparc' in datasets:
sparc_dir = 'data/sparc/'
database_schema_filename = 'data/sparc/tables.json'
output_dir = 'data/sparc_data'
if remove_from:
output_dir = 'data/sparc_data_removefrom'
sparc_train_database, dev_database = read_db_split(sparc_dir)
if 'cosql' in datasets:
cosql_dir = 'data/cosql/'
database_schema_filename = 'data/cosql/tables.json'
output_dir = 'data/cosql_data'
if remove_from:
output_dir = 'data/cosql_data_removefrom'
cosql_train_database, _ = read_db_split(cosql_dir)
#print('sparc_train_database', sparc_train_database)
#print('cosql_train_database', cosql_train_database)
#print('spider_train_database', spider_train_database)
#print('dev_database', dev_database)
#for x in dev_database:
# if x in sparc_train_database:
# print('x1', x)
# if x in spider_train_database:
# print('x2', x)
# if x in cosql_train_database:
# print('x3', x)
output_dir = './data/merge_data_removefrom'
if os.path.isdir(output_dir):
shutil.rmtree(output_dir)
os.mkdir(output_dir)
schema_tokens = {}
column_names = {}
database_schemas = {}
#assert False
print('Reading spider database schema file')
schema_tokens = dict()
column_names = dict()
database_schemas = dict()
assert remove_from
for file in ['data/sparc_data_removefrom', 'data/cosql_data_removefrom', 'data/sparc_data_removefrom']:
s, c, d = read_database_schema(database_schema_filename, schema_tokens, column_names, database_schemas)
schema_tokens.update(s)
column_names.update(c)
database_schemas.update(d)
#x = 0
#for k in schema_tokens.keys():
# x = k
#print('schema_tokens', schema_tokens[x], len(schema_tokens) )
#print('column_names', column_names[x], len(column_names) )
#print('database_schemas', database_schemas[x], len(database_schemas) )
#assert False
num_database = len(schema_tokens)
print('num_database', num_database)
print('total number of schema_tokens / databases:', len(schema_tokens))
#output_dir = 'data/merge_data_removefrom'
output_database_schema_filename = os.path.join(output_dir, 'tables.json')
#print("output_database_schema_filename", output_database_schema_filename)
with open(output_database_schema_filename, 'w') as outfile:
json.dump([v for k,v in database_schemas.items()], outfile, indent=4)
#assert False
if 'spider' in datasets:
interaction_list1 = read_spider(spider_dir, database_schemas, column_names, output_vocab, schema_tokens, remove_from)
if 'sparc' in datasets:
interaction_list2 = read_sparc(sparc_dir, database_schemas, column_names, output_vocab, schema_tokens, remove_from)
if 'cosql' in datasets:
interaction_list3 = read_cosql(cosql_dir, database_schemas, column_names, output_vocab, schema_tokens, remove_from)
print('interaction_list1 length', len(interaction_list1))
print('interaction_list2 length', len(interaction_list2))
print('interaction_list3 length', len(interaction_list3))
#assert False
s1 = 1.0
s2 = 1.0
n1 = 1.0
n2 = 1.0
m1 = 1.0
m2 = 1.0
n3 = 1.0
train_interaction = []
vis = set()
for database_id in interaction_list1:
if database_id not in dev_database:
#Len = len(interaction_list1[database_id])//2
Len = len(interaction_list1[database_id])
train_interaction += interaction_list1[database_id][:Len]
#print('xxx', type(interaction_list1[database_id]))
#print(interaction_list1[database_id][0])
#print(interaction_list2[database_id][0])
#print(interaction_list3[database_id][0])
#assert False
if database_id in interaction_list2:
train_interaction += interaction_list2[database_id]
#for x in interaction_list1[database_id]:
# if len( x['interaction'][0] ) > 300:
# continue
# train_interaction.append(x)
# n3+=1
'''if database_id in interaction_list2:
for x in interaction_list2[database_id]:
# continue
#n1 += 1
#m1 = max(m1, len(x['interaction']))
s = '&'.join([k['utterance'] for k in x['interaction']])
if s in vis:
print('s', s)
vis.add(s)
'''
if database_id in interaction_list3:
for x in interaction_list3[database_id]:
# continue
#n1 += 1
#m1 = max(m1, len(x['interaction']))
#s = '&'.join([k['utterance'] for k in x['interaction']])
#if s in vis:
# print('s1', s)
#vis.add(s)
#print(x['interaction'][0]['utterance'])
#print(x['interaction'][0]['utterance'])
if len(x['interaction']) > 4:
#assert False
continue
sumlen = 0
for one in x['interaction']:
sumlen += len(one['utterance'])
if sumlen > 300:
continue
n1 += 1
train_interaction.append(x)
print('n1', n1)
print('n3', n3)
dev_interaction = []
for database_id in dev_database:
dev_interaction += interaction_list2[database_id]
print('train interaction: ', len(train_interaction))
print('dev interaction: ', len(dev_interaction))
write_interaction(train_interaction, 'train', output_dir)
write_interaction(dev_interaction, 'dev', output_dir)
return | null |
165,440 | from collections import namedtuple
import torch
import torch.nn.functional as F
from . import torch_utils
from .attention import Attention, AttentionResult
The provided code snippet includes necessary dependencies for implementing the `score_snippets` function. Write a Python function `def score_snippets(snippets, scorer)` to solve the following problem:
Scores snippets given a scorer. Inputs: snippets (list of Snippet): The snippets to score. scorer (dy.Expression): Dynet vector against which to score the snippets. Returns: dy.Expression, list of str, where the first is the scores and the second is the names of the snippets that were scored.
Here is the function:
def score_snippets(snippets, scorer):
""" Scores snippets given a scorer.
Inputs:
snippets (list of Snippet): The snippets to score.
scorer (dy.Expression): Dynet vector against which to score the snippets.
Returns:
dy.Expression, list of str, where the first is the scores and the second
is the names of the snippets that were scored.
"""
snippet_expressions = [snippet.embedding for snippet in snippets]
all_snippet_embeddings = torch.stack(snippet_expressions, dim=1)
scores = torch.t(torch.mm(torch.t(scorer), all_snippet_embeddings))
if scores.size()[0] != len(snippets):
raise ValueError("Got " + str(scores.size()[0]) + " scores for " + str(len(snippets)) + " snippets")
return scores, [snippet.name for snippet in snippets] | Scores snippets given a scorer. Inputs: snippets (list of Snippet): The snippets to score. scorer (dy.Expression): Dynet vector against which to score the snippets. Returns: dy.Expression, list of str, where the first is the scores and the second is the names of the snippets that were scored. |
165,441 | from collections import namedtuple
import torch
import torch.nn.functional as F
from . import torch_utils
from .attention import Attention, AttentionResult
def score_schema_tokens(input_schema, schema_states, scorer):
# schema_states: emd_dim x num_tokens
scores = torch.t(torch.mm(torch.t(scorer), schema_states)) # num_tokens x 1
if scores.size()[0] != len(input_schema):
raise ValueError("Got " + str(scores.size()[0]) + " scores for " + str(len(input_schema)) + " schema tokens")
return scores, input_schema.column_names_surface_form | null |
165,442 | from collections import namedtuple
import torch
import torch.nn.functional as F
from . import torch_utils
from .attention import Attention, AttentionResult
def score_query_tokens(previous_query, previous_query_states, scorer):
scores = torch.t(torch.mm(torch.t(scorer), previous_query_states)) # num_tokens x 1
if scores.size()[0] != len(previous_query):
raise ValueError("Got " + str(scores.size()[0]) + " scores for " + str(len(previous_query)) + " query tokens")
return scores, previous_query | null |
165,443 | from collections import namedtuple
import torch
import torch.nn.functional as F
from . import torch_utils
from .attention import Attention, AttentionResult
class SchemaTokenPredictor(TokenPredictor):
""" Token predictor that also predicts snippets.
Attributes:
snippet_weights (dy.Parameter): Weights for scoring snippets against some
state.
"""
def __init__(self, params, vocabulary, utterance_attention_key_size, schema_attention_key_size, snippet_size):
TokenPredictor.__init__(self, params, vocabulary, utterance_attention_key_size)
if params.use_snippets:
if snippet_size <= 0:
raise ValueError("Snippet size must be greater than zero; was " + str(snippet_size))
self.snippet_weights = torch_utils.add_params((params.decoder_state_size, snippet_size), "weights-snippet")
if params.use_schema_attention:
self.utterance_attention_module = self.attention_module
self.schema_attention_module = Attention(params.decoder_state_size, schema_attention_key_size, schema_attention_key_size)
if self.params.use_query_attention:
self.query_attention_module = Attention(params.decoder_state_size, params.encoder_state_size, params.encoder_state_size)
self.start_query_attention_vector = torch_utils.add_params((params.encoder_state_size,), "start_query_attention_vector")
if params.use_schema_attention and self.params.use_query_attention:
self.state_transform_weights = torch_utils.add_params((params.decoder_state_size + utterance_attention_key_size + schema_attention_key_size + params.encoder_state_size, params.decoder_state_size), "weights-state-transform")
elif params.use_schema_attention:
self.state_transform_weights = torch_utils.add_params((params.decoder_state_size + utterance_attention_key_size + schema_attention_key_size, params.decoder_state_size), "weights-state-transform")
# Use lstm schema encoder
self.schema_token_weights = torch_utils.add_params((params.decoder_state_size, schema_attention_key_size), "weights-schema-token")
if self.params.use_previous_query:
self.query_token_weights = torch_utils.add_params((params.decoder_state_size, self.params.encoder_state_size), "weights-query-token")
if self.params.use_copy_switch:
if self.params.use_query_attention:
self.state2copyswitch_transform_weights = torch_utils.add_params((params.decoder_state_size + utterance_attention_key_size + schema_attention_key_size + params.encoder_state_size, 1), "weights-state-transform")
else:
self.state2copyswitch_transform_weights = torch_utils.add_params((params.decoder_state_size + utterance_attention_key_size + schema_attention_key_size, 1), "weights-state-transform")
def _get_snippet_scorer(self, state):
scorer = torch.t(torch_utils.linear_layer(state, self.snippet_weights))
return scorer
def _get_schema_token_scorer(self, state):
scorer = torch.t(torch_utils.linear_layer(state, self.schema_token_weights))
return scorer
def _get_query_token_scorer(self, state):
scorer = torch.t(torch_utils.linear_layer(state, self.query_token_weights))
return scorer
def _get_copy_switch(self, state):
copy_switch = torch.sigmoid(torch_utils.linear_layer(state, self.state2copyswitch_transform_weights))
return copy_switch.squeeze()
def forward(self, prediction_input, dropout_amount=0.):
decoder_state = prediction_input.decoder_state
input_hidden_states = prediction_input.input_hidden_states
snippets = prediction_input.snippets
input_schema = prediction_input.input_schema
schema_states = prediction_input.schema_states
if self.params.use_schema_attention:
schema_attention_results = self.schema_attention_module(decoder_state, schema_states)
utterance_attention_results = self.utterance_attention_module(decoder_state, input_hidden_states)
else:
utterance_attention_results = self.attention_module(decoder_state, input_hidden_states)
schema_attention_results = None
query_attention_results = None
if self.params.use_query_attention:
previous_query_states = prediction_input.previous_query_states
if len(previous_query_states) > 0:
query_attention_results = self.query_attention_module(decoder_state, previous_query_states[-1])
else:
query_attention_results = self.start_query_attention_vector
query_attention_results = AttentionResult(None, None, query_attention_results)
if self.params.use_schema_attention and self.params.use_query_attention:
state_and_attn = torch.cat([decoder_state, utterance_attention_results.vector, schema_attention_results.vector, query_attention_results.vector], dim=0)
elif self.params.use_schema_attention:
state_and_attn = torch.cat([decoder_state, utterance_attention_results.vector, schema_attention_results.vector], dim=0)
else:
state_and_attn = torch.cat([decoder_state, utterance_attention_results.vector], dim=0)
intermediate_state = self._get_intermediate_state(state_and_attn, dropout_amount=dropout_amount)
vocab_scores, vocab_tokens = self._score_vocabulary_tokens(intermediate_state)
final_scores = vocab_scores
aligned_tokens = []
aligned_tokens.extend(vocab_tokens)
if self.params.use_snippets and snippets:
snippet_scores, snippet_tokens = score_snippets(snippets, self._get_snippet_scorer(intermediate_state))
final_scores = torch.cat([final_scores, snippet_scores], dim=0)
aligned_tokens.extend(snippet_tokens)
schema_states = torch.stack(schema_states, dim=1)
schema_scores, schema_tokens = score_schema_tokens(input_schema, schema_states, self._get_schema_token_scorer(intermediate_state))
final_scores = torch.cat([final_scores, schema_scores], dim=0)
aligned_tokens.extend(schema_tokens)
# Previous Queries
previous_queries = prediction_input.previous_queries
previous_query_states = prediction_input.previous_query_states
copy_switch = None
query_scores = None
query_tokens = None
if self.params.use_previous_query and len(previous_queries) > 0:
if self.params.use_copy_switch:
copy_switch = self._get_copy_switch(state_and_attn)
for turn, (previous_query, previous_query_state) in enumerate(zip(previous_queries, previous_query_states)):
assert len(previous_query) == len(previous_query_state)
previous_query_state = torch.stack(previous_query_state, dim=1)
query_scores, query_tokens = score_query_tokens(previous_query, previous_query_state, self._get_query_token_scorer(intermediate_state))
query_scores = query_scores.squeeze()
final_scores = final_scores.squeeze()
return TokenPrediction(final_scores, aligned_tokens, utterance_attention_results, schema_attention_results, query_attention_results, copy_switch, query_scores, query_tokens, decoder_state)
class SnippetAnonymizationTokenPredictor(SnippetTokenPredictor, AnonymizationTokenPredictor):
""" Token predictor that both anonymizes and scores snippets."""
def __init__(self, params, vocabulary, attention_key_size, snippet_size, anonymizer):
AnonymizationTokenPredictor.__init__(self, params, vocabulary, attention_key_size, anonymizer)
SnippetTokenPredictor.__init__(self, params, vocabulary, attention_key_size, snippet_size)
def forward(self, prediction_input, dropout_amount=0.):
decoder_state = prediction_input.decoder_state
assert prediction_input.input_sequence
snippets = prediction_input.snippets
input_hidden_states = prediction_input.input_hidden_states
attention_results = self.attention_module(decoder_state,
prediction_input.input_hidden_states)
state_and_attn = torch.cat([decoder_state, attention_results.vector], dim=0)
intermediate_state = self._get_intermediate_state(state_and_attn, dropout_amount=dropout_amount)
# Vocabulary tokens
final_scores, vocab_tokens = self._score_vocabulary_tokens(intermediate_state)
aligned_tokens = []
aligned_tokens.extend(vocab_tokens)
# Snippets
if snippets:
snippet_scores, snippet_tokens = score_snippets(
snippets,
self._get_snippet_scorer(intermediate_state))
final_scores = torch.cat([final_scores, snippet_scores], dim=0)
aligned_tokens.extend(snippet_tokens)
# Anonymized tokens
anonymized_scores, anonymized_tokens = self._score_anonymized_tokens(
prediction_input.input_sequence,
attention_results.scores)
if anonymized_scores is not None:
final_scores = torch.cat([final_scores, anonymized_scores], dim=0)
aligned_tokens.extend(anonymized_tokens)
final_scores = final_scores.squeeze()
return TokenPrediction(final_scores, aligned_tokens, attention_results, None, None, None, None, None, decoder_state)
The provided code snippet includes necessary dependencies for implementing the `construct_token_predictor` function. Write a Python function `def construct_token_predictor(params, vocabulary, utterance_attention_key_size, schema_attention_key_size, snippet_size, anonymizer=None)` to solve the following problem:
Constructs a token predictor given the parameters. Inputs: parameter_collection (dy.ParameterCollection): Contains the parameters. params (dictionary): Contains the command line parameters/hyperparameters. vocabulary (Vocabulary): Vocabulary object for output generation. attention_key_size (int): The size of the attention keys. anonymizer (Anonymizer): An anonymization object.
Here is the function:
def construct_token_predictor(params,
vocabulary,
utterance_attention_key_size,
schema_attention_key_size,
snippet_size,
anonymizer=None):
""" Constructs a token predictor given the parameters.
Inputs:
parameter_collection (dy.ParameterCollection): Contains the parameters.
params (dictionary): Contains the command line parameters/hyperparameters.
vocabulary (Vocabulary): Vocabulary object for output generation.
attention_key_size (int): The size of the attention keys.
anonymizer (Anonymizer): An anonymization object.
"""
if not anonymizer and not params.previous_decoder_snippet_encoding:
print('using SchemaTokenPredictor')
return SchemaTokenPredictor(params, vocabulary, utterance_attention_key_size, schema_attention_key_size, snippet_size)
elif params.use_snippets and anonymizer and not params.previous_decoder_snippet_encoding:
print('using SnippetAnonymizationTokenPredictor')
return SnippetAnonymizationTokenPredictor(params,
vocabulary,
utterance_attention_key_size,
snippet_size,
anonymizer)
else:
print('Unknown token_predictor')
exit() | Constructs a token predictor given the parameters. Inputs: parameter_collection (dy.ParameterCollection): Contains the parameters. params (dictionary): Contains the command line parameters/hyperparameters. vocabulary (Vocabulary): Vocabulary object for output generation. attention_key_size (int): The size of the attention keys. anonymizer (Anonymizer): An anonymization object. |
165,444 | import torch.nn as nn
import math
import torch
def gelu(x):
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0))) | null |
165,445 | import torch
import torch.nn.functional as F
import data_util.snippets as snippet_handler
import data_util.vocabulary as vocabulary_handler
The provided code snippet includes necessary dependencies for implementing the `bow_snippets` function. Write a Python function `def bow_snippets(token, snippets, output_embedder, input_schema)` to solve the following problem:
Bag of words embedding for snippets
Here is the function:
def bow_snippets(token, snippets, output_embedder, input_schema):
""" Bag of words embedding for snippets"""
assert snippet_handler.is_snippet(token) and snippets
snippet_sequence = []
for snippet in snippets:
if snippet.name == token:
snippet_sequence = snippet.sequence
break
assert snippet_sequence
if input_schema:
snippet_embeddings = []
for output_token in snippet_sequence:
assert output_embedder.in_vocabulary(output_token) or input_schema.in_vocabulary(output_token, surface_form=True)
if output_embedder.in_vocabulary(output_token):
snippet_embeddings.append(output_embedder(output_token))
else:
snippet_embeddings.append(input_schema.column_name_embedder(output_token, surface_form=True))
else:
snippet_embeddings = [output_embedder(subtoken) for subtoken in snippet_sequence]
snippet_embeddings = torch.stack(snippet_embeddings, dim=0) # len(snippet_sequence) x emb_size
return torch.mean(snippet_embeddings, dim=0) # emb_size | Bag of words embedding for snippets |
165,446 | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
def linear_layer(exp, weights, biases=None):
# exp: input as size_1 or 1 x size_1
# weight: size_1 x size_2
# bias: size_2
if exp.dim() == 1:
exp = torch.unsqueeze(exp, 0)
assert exp.size()[1] == weights.size()[0]
if biases is not None:
assert weights.size()[1] == biases.size()[0]
result = torch.mm(exp, weights) + biases
else:
result = torch.mm(exp, weights)
return result | null |
165,447 | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
The provided code snippet includes necessary dependencies for implementing the `compute_loss` function. Write a Python function `def compute_loss(gold_seq, scores, index_to_token_maps, gold_tok_to_id, noise=0.00000001)` to solve the following problem:
Computes the loss of a gold sequence given scores. Inputs: gold_seq (list of str): A sequence of gold tokens. scores (list of dy.Expression): Expressions representing the scores of potential output tokens for each token in gold_seq. index_to_token_maps (list of dict str->list of int): Maps from index in the sequence to a dictionary mapping from a string to a set of integers. gold_tok_to_id (lambda (str, str)->list of int): Maps from the gold token and some lookup function to the indices in the probability distribution where the gold token occurs. noise (float, optional): The amount of noise to add to the loss. Returns: dy.Expression representing the sum of losses over the sequence.
Here is the function:
def compute_loss(gold_seq,
scores,
index_to_token_maps,
gold_tok_to_id,
noise=0.00000001):
""" Computes the loss of a gold sequence given scores.
Inputs:
gold_seq (list of str): A sequence of gold tokens.
scores (list of dy.Expression): Expressions representing the scores of
potential output tokens for each token in gold_seq.
index_to_token_maps (list of dict str->list of int): Maps from index in the
sequence to a dictionary mapping from a string to a set of integers.
gold_tok_to_id (lambda (str, str)->list of int): Maps from the gold token
and some lookup function to the indices in the probability distribution
where the gold token occurs.
noise (float, optional): The amount of noise to add to the loss.
Returns:
dy.Expression representing the sum of losses over the sequence.
"""
assert len(gold_seq) == len(scores) == len(index_to_token_maps)
losses = []
for i, gold_tok in enumerate(gold_seq):
score = scores[i]
token_map = index_to_token_maps[i]
gold_indices = gold_tok_to_id(gold_tok, token_map)
assert len(gold_indices) > 0
noise_i = noise
#if len(gold_indices) == 1:
# noise_i = 0
probdist = score
prob_of_tok = noise_i + torch.sum(probdist[gold_indices])
losses.append(-torch.log(prob_of_tok))
return torch.sum(torch.stack(losses)) | Computes the loss of a gold sequence given scores. Inputs: gold_seq (list of str): A sequence of gold tokens. scores (list of dy.Expression): Expressions representing the scores of potential output tokens for each token in gold_seq. index_to_token_maps (list of dict str->list of int): Maps from index in the sequence to a dictionary mapping from a string to a set of integers. gold_tok_to_id (lambda (str, str)->list of int): Maps from the gold token and some lookup function to the indices in the probability distribution where the gold token occurs. noise (float, optional): The amount of noise to add to the loss. Returns: dy.Expression representing the sum of losses over the sequence. |
165,448 | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
The provided code snippet includes necessary dependencies for implementing the `get_seq_from_scores` function. Write a Python function `def get_seq_from_scores(scores, index_to_token_maps)` to solve the following problem:
Gets the argmax sequence from a set of scores. Inputs: scores (list of dy.Expression): Sequences of output scores. index_to_token_maps (list of list of str): For each output token, maps the index in the probability distribution to a string. Returns: list of str, representing the argmax sequence.
Here is the function:
def get_seq_from_scores(scores, index_to_token_maps):
"""Gets the argmax sequence from a set of scores.
Inputs:
scores (list of dy.Expression): Sequences of output scores.
index_to_token_maps (list of list of str): For each output token, maps
the index in the probability distribution to a string.
Returns:
list of str, representing the argmax sequence.
"""
seq = []
for score, tok_map in zip(scores, index_to_token_maps):
# score_numpy_list = score.cpu().detach().numpy()
score_numpy_list = score.cpu().data.numpy()
assert score.size()[0] == len(tok_map) == len(list(score_numpy_list))
seq.append(tok_map[np.argmax(score_numpy_list)])
return seq | Gets the argmax sequence from a set of scores. Inputs: scores (list of dy.Expression): Sequences of output scores. index_to_token_maps (list of list of str): For each output token, maps the index in the probability distribution to a string. Returns: list of str, representing the argmax sequence. |
165,449 | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
def forward_one_multilayer(rnns, lstm_input, layer_states, dropout_amount=0.):
""" Goes forward for one multilayer RNN cell step.
Inputs:
lstm_input (dy.Expression): Some input to the step.
layer_states (list of dy.RNNState): The states of each layer in the cell.
dropout_amount (float, optional): The amount of dropout to apply, in
between the layers.
Returns:
(list of dy.Expression, list of dy.Expression), dy.Expression, (list of dy.RNNSTate),
representing (each layer's cell memory, each layer's cell hidden state),
the final hidden state, and (each layer's updated RNNState).
"""
num_layers = len(layer_states)
new_states = []
cell_states = []
hidden_states = []
state = lstm_input
for i in range(num_layers):
# view as (1, input_size)
layer_h, layer_c = rnns[i](torch.unsqueeze(state,0), layer_states[i])
new_states.append((layer_h, layer_c))
layer_h = layer_h.squeeze()
layer_c = layer_c.squeeze()
state = layer_h
if i < num_layers - 1:
# In both Dynet and Pytorch
# p stands for probability of an element to be zeroed. i.e. p=1 means switch off all activations.
state = F.dropout(state, p=dropout_amount)
cell_states.append(layer_c)
hidden_states.append(layer_h)
return (cell_states, hidden_states), state, new_states
The provided code snippet includes necessary dependencies for implementing the `encode_sequence` function. Write a Python function `def encode_sequence(sequence, rnns, embedder, dropout_amount=0.)` to solve the following problem:
Encodes a sequence given RNN cells and an embedding function. Inputs: seq (list of str): The sequence to encode. rnns (list of dy._RNNBuilder): The RNNs to use. emb_fn (dict str->dy.Expression): Function that embeds strings to word vectors. size (int): The size of the RNN. dropout_amount (float, optional): The amount of dropout to apply. Returns: (list of dy.Expression, list of dy.Expression), list of dy.Expression, where the first pair is the (final cell memories, final cell states) of all layers, and the second list is a list of the final layer's cell state for all tokens in the sequence.
Here is the function:
def encode_sequence(sequence, rnns, embedder, dropout_amount=0.):
""" Encodes a sequence given RNN cells and an embedding function.
Inputs:
seq (list of str): The sequence to encode.
rnns (list of dy._RNNBuilder): The RNNs to use.
emb_fn (dict str->dy.Expression): Function that embeds strings to
word vectors.
size (int): The size of the RNN.
dropout_amount (float, optional): The amount of dropout to apply.
Returns:
(list of dy.Expression, list of dy.Expression), list of dy.Expression,
where the first pair is the (final cell memories, final cell states) of
all layers, and the second list is a list of the final layer's cell
state for all tokens in the sequence.
"""
batch_size = 1
layer_states = []
for rnn in rnns:
hidden_size = rnn.weight_hh.size()[1]
# h_0 of shape (batch, hidden_size)
# c_0 of shape (batch, hidden_size)
if rnn.weight_hh.is_cuda:
h_0 = torch.cuda.FloatTensor(batch_size,hidden_size).fill_(0)
c_0 = torch.cuda.FloatTensor(batch_size,hidden_size).fill_(0)
else:
h_0 = torch.zeros(batch_size,hidden_size)
c_0 = torch.zeros(batch_size,hidden_size)
layer_states.append((h_0, c_0))
outputs = []
for token in sequence:
rnn_input = embedder(token)
(cell_states, hidden_states), output, layer_states = forward_one_multilayer(rnns,rnn_input,layer_states,dropout_amount)
outputs.append(output)
return (cell_states, hidden_states), outputs | Encodes a sequence given RNN cells and an embedding function. Inputs: seq (list of str): The sequence to encode. rnns (list of dy._RNNBuilder): The RNNs to use. emb_fn (dict str->dy.Expression): Function that embeds strings to word vectors. size (int): The size of the RNN. dropout_amount (float, optional): The amount of dropout to apply. Returns: (list of dy.Expression, list of dy.Expression), list of dy.Expression, where the first pair is the (final cell memories, final cell states) of all layers, and the second list is a list of the final layer's cell state for all tokens in the sequence. |
165,450 | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
The provided code snippet includes necessary dependencies for implementing the `create_multilayer_lstm_params` function. Write a Python function `def create_multilayer_lstm_params(num_layers, in_size, state_size, name="")` to solve the following problem:
Adds a multilayer LSTM to the model parameters. Inputs: num_layers (int): Number of layers to create. in_size (int): The input size to the first layer. state_size (int): The size of the states. model (dy.ParameterCollection): The parameter collection for the model. name (str, optional): The name of the multilayer LSTM.
Here is the function:
def create_multilayer_lstm_params(num_layers, in_size, state_size, name=""):
""" Adds a multilayer LSTM to the model parameters.
Inputs:
num_layers (int): Number of layers to create.
in_size (int): The input size to the first layer.
state_size (int): The size of the states.
model (dy.ParameterCollection): The parameter collection for the model.
name (str, optional): The name of the multilayer LSTM.
"""
lstm_layers = []
for i in range(num_layers):
layer_name = name + "-" + str(i)
#print("LSTM " + layer_name + ": " + str(in_size) + " x " + str(state_size) + "; default Dynet initialization of hidden weights")
lstm_layer = torch.nn.LSTMCell(input_size=int(in_size), hidden_size=int(state_size), bias=True)
lstm_layers.append(lstm_layer)
in_size = state_size
return torch.nn.ModuleList(lstm_layers) | Adds a multilayer LSTM to the model parameters. Inputs: num_layers (int): Number of layers to create. in_size (int): The input size to the first layer. state_size (int): The size of the states. model (dy.ParameterCollection): The parameter collection for the model. name (str, optional): The name of the multilayer LSTM. |
165,451 | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
The provided code snippet includes necessary dependencies for implementing the `add_params` function. Write a Python function `def add_params(size, name="")` to solve the following problem:
Adds parameters to the model. Inputs: model (dy.ParameterCollection): The parameter collection for the model. size (tuple of int): The size to create. name (str, optional): The name of the parameters. if len(size) == 1: print("vector " + name + ": " + str(size[0]) + "; uniform in [-0.1, 0.1]") else: print("matrix " + name + ": " + str(size[0]) + " x " + str(size[1]) + "; uniform in [-0.1, 0.1]")
Here is the function:
def add_params(size, name=""):
""" Adds parameters to the model.
Inputs:
model (dy.ParameterCollection): The parameter collection for the model.
size (tuple of int): The size to create.
name (str, optional): The name of the parameters.
if len(size) == 1:
print("vector " + name + ": " + str(size[0]) + "; uniform in [-0.1, 0.1]")
else:
print("matrix " + name + ": " + str(size[0]) + " x " + str(size[1]) + "; uniform in [-0.1, 0.1]")
"""
size_int = tuple([int(ss) for ss in size])
return torch.nn.Parameter(torch.empty(size_int).uniform_(-0.1, 0.1)) | Adds parameters to the model. Inputs: model (dy.ParameterCollection): The parameter collection for the model. size (tuple of int): The size to create. name (str, optional): The name of the parameters. if len(size) == 1: print("vector " + name + ": " + str(size[0]) + "; uniform in [-0.1, 0.1]") else: print("matrix " + name + ": " + str(size[0]) + " x " + str(size[1]) + "; uniform in [-0.1, 0.1]") |
165,452 | import os
import torch
import torch.nn.functional as F
from . import torch_utils
from . import utils_bert
from data_util.vocabulary import DEL_TOK, UNK_TOK
from .encoder import Encoder
from .encoder import Encoder as Transoformer_Encoder
from .embedder import Embedder
from .token_predictor import construct_token_predictor
import numpy as np
from data_util.atis_vocab import ATISVocabulary
import pickle
UNK_TOK = "_UNK"
The provided code snippet includes necessary dependencies for implementing the `get_token_indices` function. Write a Python function `def get_token_indices(token, index_to_token)` to solve the following problem:
Maps from a gold token (string) to a list of indices. Inputs: token (string): String to look up. index_to_token (list of tokens): Ordered list of tokens. Returns: list of int, representing the indices of the token in the probability distribution.
Here is the function:
def get_token_indices(token, index_to_token):
""" Maps from a gold token (string) to a list of indices.
Inputs:
token (string): String to look up.
index_to_token (list of tokens): Ordered list of tokens.
Returns:
list of int, representing the indices of the token in the probability
distribution.
"""
if token in index_to_token:
if len(set(index_to_token)) == len(index_to_token): # no duplicates
return [index_to_token.index(token)]
else:
indices = []
for index, other_token in enumerate(index_to_token):
if token == other_token:
indices.append(index)
assert len(indices) == len(set(indices))
return indices
else:
return [index_to_token.index(UNK_TOK)] | Maps from a gold token (string) to a list of indices. Inputs: token (string): String to look up. index_to_token (list of tokens): Ordered list of tokens. Returns: list of int, representing the indices of the token in the probability distribution. |
165,453 | import os
import torch
import torch.nn.functional as F
from . import torch_utils
from . import utils_bert
from data_util.vocabulary import DEL_TOK, UNK_TOK
from .encoder import Encoder
from .encoder import Encoder as Transoformer_Encoder
from .embedder import Embedder
from .token_predictor import construct_token_predictor
import numpy as np
from data_util.atis_vocab import ATISVocabulary
import pickle
DEL_TOK = ";"
The provided code snippet includes necessary dependencies for implementing the `flatten_utterances` function. Write a Python function `def flatten_utterances(utterances)` to solve the following problem:
Gets a flat sequence from a sequence of utterances. Inputs: utterances (list of list of str): Utterances to concatenate. Returns: list of str, representing the flattened sequence with separating delimiter tokens.
Here is the function:
def flatten_utterances(utterances):
""" Gets a flat sequence from a sequence of utterances.
Inputs:
utterances (list of list of str): Utterances to concatenate.
Returns:
list of str, representing the flattened sequence with separating
delimiter tokens.
"""
sequence = []
for i, utterance in enumerate(utterances):
sequence.extend(utterance)
if i < len(utterances) - 1:
sequence.append(DEL_TOK)
return sequence | Gets a flat sequence from a sequence of utterances. Inputs: utterances (list of list of str): Utterances to concatenate. Returns: list of str, representing the flattened sequence with separating delimiter tokens. |
165,454 | import os
import torch
import torch.nn.functional as F
from . import torch_utils
from . import utils_bert
from data_util.vocabulary import DEL_TOK, UNK_TOK
from .encoder import Encoder
from .encoder import Encoder as Transoformer_Encoder
from .embedder import Embedder
from .token_predictor import construct_token_predictor
import numpy as np
from data_util.atis_vocab import ATISVocabulary
import pickle
The provided code snippet includes necessary dependencies for implementing the `encode_snippets_with_states` function. Write a Python function `def encode_snippets_with_states(snippets, states)` to solve the following problem:
Encodes snippets by using previous query states instead. Inputs: snippets (list of Snippet): Input snippets. states (list of dy.Expression): Previous hidden states to use. TODO: should this by dy.Expression or vector values?
Here is the function:
def encode_snippets_with_states(snippets, states):
""" Encodes snippets by using previous query states instead.
Inputs:
snippets (list of Snippet): Input snippets.
states (list of dy.Expression): Previous hidden states to use.
TODO: should this by dy.Expression or vector values?
"""
for snippet in snippets:
snippet.set_embedding(torch.cat([states[snippet.startpos],states[snippet.endpos]], dim=0))
return snippets | Encodes snippets by using previous query states instead. Inputs: snippets (list of Snippet): Input snippets. states (list of dy.Expression): Previous hidden states to use. TODO: should this by dy.Expression or vector values? |
165,455 | import os
import torch
import torch.nn.functional as F
from . import torch_utils
from . import utils_bert
from data_util.vocabulary import DEL_TOK, UNK_TOK
from .encoder import Encoder
from .encoder import Encoder as Transoformer_Encoder
from .embedder import Embedder
from .token_predictor import construct_token_predictor
import numpy as np
from data_util.atis_vocab import ATISVocabulary
import pickle
def load_word_embeddings(input_vocabulary, output_vocabulary, output_vocabulary_schema, params):
print(output_vocabulary.inorder_tokens)
print()
#exit()
def read_glove_embedding(embedding_filename, embedding_size):
if '.pkl' in embedding_filename:
with open('/dev/shm/glove_embeddings.pkl', 'rb') as f2:
glove_embeddings = pickle.load(f2)
return glove_embeddings
else:
glove_embeddings = {}
with open(embedding_filename) as f:
cnt = 1
for line in f:
cnt += 1
if params.debug or not params.train:
if cnt == 1000:
print('Read 1000 word embeddings')
break
l_split = line.split()
word = " ".join(l_split[0:len(l_split) - embedding_size])
embedding = np.array([float(val) for val in l_split[-embedding_size:]])
glove_embeddings[word] = embedding
with open('./glove_embeddings.pkl', 'wb') as f2:
pickle.dump(glove_embeddings, f2)
return glove_embeddings
print('Loading Glove Embedding from', params.embedding_filename)
glove_embedding_size = 300
glove_embeddings = read_glove_embedding(params.embedding_filename, glove_embedding_size)
print('Done')
input_embedding_size = glove_embedding_size
def create_word_embeddings(vocab):
vocabulary_embeddings = np.zeros((len(vocab), glove_embedding_size), dtype=np.float32)
vocabulary_tokens = vocab.inorder_tokens
glove_oov = 0
para_oov = 0
for token in vocabulary_tokens:
token_id = vocab.token_to_id(token)
if token in glove_embeddings:
vocabulary_embeddings[token_id][:glove_embedding_size] = glove_embeddings[token]
else:
glove_oov += 1
return vocabulary_embeddings
input_vocabulary_embeddings = create_word_embeddings(input_vocabulary)
output_vocabulary_embeddings = create_word_embeddings(output_vocabulary)
output_vocabulary_schema_embeddings = None
if output_vocabulary_schema:
output_vocabulary_schema_embeddings = create_word_embeddings(output_vocabulary_schema)
return input_vocabulary_embeddings, output_vocabulary_embeddings, output_vocabulary_schema_embeddings, input_embedding_size | null |
165,456 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import argparse
import tensorflow as tf
import torch
import numpy as np
from modeling import BertConfig, BertModel
args = parser.parse_args()
class BertConfig(object):
"""Configuration class to store the configuration of a `BertModel`.
"""
def __init__(self,
vocab_size,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
initializer_range=0.02):
"""Constructs BertConfig.
Args:
vocab_size: Vocabulary size of `inputs_ids` in `BertModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler.
hidden_dropout_prob: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`BertModel`.
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
"""
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
def print_status(self):
"""
Wonseok add this.
"""
print( f"vocab size: {self.vocab_size}")
print( f"hidden_size: {self.hidden_size}")
print( f"num_hidden_layer: {self.num_hidden_layers}")
print( f"num_attention_heads: {self.num_attention_heads}")
print( f"hidden_act: {self.hidden_act}")
print( f"intermediate_size: {self.intermediate_size}")
print( f"hidden_dropout_prob: {self.hidden_dropout_prob}")
print( f"attention_probs_dropout_prob: {self.attention_probs_dropout_prob}")
print( f"max_position_embeddings: {self.max_position_embeddings}")
print( f"type_vocab_size: {self.type_vocab_size}")
print( f"initializer_range: {self.initializer_range}")
def from_dict(cls, json_object):
"""Constructs a `BertConfig` from a Python dictionary of parameters."""
config = BertConfig(vocab_size=None)
for (key, value) in six.iteritems(json_object):
config.__dict__[key] = value
return config
def from_json_file(cls, json_file):
"""Constructs a `BertConfig` from a json file of parameters."""
with open(json_file, "r") as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
class BertModel(nn.Module):
"""BERT model ("Bidirectional Embedding Representations from a Transformer").
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 2, 0]])
config = modeling.BertConfig(vocab_size=32000, hidden_size=512,
num_hidden_layers=8, num_attention_heads=6, intermediate_size=1024)
model = modeling.BertModel(config=config)
all_encoder_layers, pooled_output = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config: BertConfig):
"""Constructor for BertModel.
Args:
config: `BertConfig` instance.
"""
super(BertModel, self).__init__()
self.embeddings = BERTEmbeddings(config)
self.encoder = BERTEncoder(config)
self.pooler = BERTPooler(config)
def forward(self, input_ids, token_type_ids=None, attention_mask=None):
if attention_mask is None:
attention_mask = torch.ones_like(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.float()
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
embedding_output = self.embeddings(input_ids, token_type_ids)
all_encoder_layers = self.encoder(embedding_output, extended_attention_mask)
sequence_output = all_encoder_layers[-1]
pooled_output = self.pooler(sequence_output)
return all_encoder_layers, pooled_output
def convert():
# Initialise PyTorch model
config = BertConfig.from_json_file(args.bert_config_file)
model = BertModel(config)
# Load weights from TF model
path = args.tf_checkpoint_path
print("Converting TensorFlow checkpoint from {}".format(path))
init_vars = tf.train.list_variables(path)
names = []
arrays = []
for name, shape in init_vars:
print("Loading {} with shape {}".format(name, shape))
array = tf.train.load_variable(path, name)
print("Numpy array shape {}".format(array.shape))
names.append(name)
arrays.append(array)
for name, array in zip(names, arrays):
name = name[5:] # skip "bert/"
print("Loading {}".format(name))
name = name.split('/')
if name[0] in ['redictions', 'eq_relationship']:
print("Skipping")
continue
pointer = model
for m_name in name:
if re.fullmatch(r'[A-Za-z]+_\d+', m_name):
l = re.split(r'_(\d+)', m_name)
else:
l = [m_name]
if l[0] == 'kernel':
pointer = getattr(pointer, 'weight')
else:
pointer = getattr(pointer, l[0])
if len(l) >= 2:
num = int(l[1])
pointer = pointer[num]
if m_name[-11:] == '_embeddings':
pointer = getattr(pointer, 'weight')
elif m_name == 'kernel':
array = np.transpose(array)
try:
assert pointer.shape == array.shape
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
pointer.data = torch.from_numpy(array)
# Save pytorch-model
torch.save(model.state_dict(), args.pytorch_dump_path) | null |
165,458 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import unicodedata
import six
def convert_to_unicode(text):
"""Converts `text` to Unicode (if it's not already), assuming utf-8 input."""
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
return text.decode("utf-8", "ignore")
elif isinstance(text, unicode):
return text
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?")
The provided code snippet includes necessary dependencies for implementing the `load_vocab` function. Write a Python function `def load_vocab(vocab_file)` to solve the following problem:
Loads a vocabulary file into a dictionary.
Here is the function:
def load_vocab(vocab_file):
"""Loads a vocabulary file into a dictionary."""
vocab = collections.OrderedDict()
index = 0
with open(vocab_file, "r", encoding="utf-8") as reader:
while True:
token = convert_to_unicode(reader.readline())
if not token:
break
token = token.strip()
vocab[token] = index
index += 1
return vocab | Loads a vocabulary file into a dictionary. |
165,459 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import unicodedata
import six
The provided code snippet includes necessary dependencies for implementing the `whitespace_tokenize` function. Write a Python function `def whitespace_tokenize(text)` to solve the following problem:
Runs basic whitespace cleaning and splitting on a piece of text.
Here is the function:
def whitespace_tokenize(text):
"""Runs basic whitespace cleaning and splitting on a piece of text."""
text = text.strip()
if not text:
return []
tokens = text.split()
return tokens | Runs basic whitespace cleaning and splitting on a piece of text. |
165,463 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import json
import math
import six
import torch
import torch.nn as nn
from torch.nn import CrossEntropyLoss
The provided code snippet includes necessary dependencies for implementing the `gelu` function. Write a Python function `def gelu(x)` to solve the following problem:
Implementation of the gelu activation function. For information: OpenAI GPT's gelu is slightly different (and gives slightly different results): 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
Here is the function:
def gelu(x):
"""Implementation of the gelu activation function.
For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
"""
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0))) | Implementation of the gelu activation function. For information: OpenAI GPT's gelu is slightly different (and gives slightly different results): 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) |
165,464 | import os, json
import random as rd
from copy import deepcopy
import torch
import torch.nn as nn
import torch.nn.functional as F
from .bert import tokenization as tokenization
from .bert.modeling import BertConfig, BertModel
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class BertConfig(object):
"""Configuration class to store the configuration of a `BertModel`.
"""
def __init__(self,
vocab_size,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
initializer_range=0.02):
"""Constructs BertConfig.
Args:
vocab_size: Vocabulary size of `inputs_ids` in `BertModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler.
hidden_dropout_prob: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`BertModel`.
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
"""
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
def print_status(self):
"""
Wonseok add this.
"""
print( f"vocab size: {self.vocab_size}")
print( f"hidden_size: {self.hidden_size}")
print( f"num_hidden_layer: {self.num_hidden_layers}")
print( f"num_attention_heads: {self.num_attention_heads}")
print( f"hidden_act: {self.hidden_act}")
print( f"intermediate_size: {self.intermediate_size}")
print( f"hidden_dropout_prob: {self.hidden_dropout_prob}")
print( f"attention_probs_dropout_prob: {self.attention_probs_dropout_prob}")
print( f"max_position_embeddings: {self.max_position_embeddings}")
print( f"type_vocab_size: {self.type_vocab_size}")
print( f"initializer_range: {self.initializer_range}")
def from_dict(cls, json_object):
"""Constructs a `BertConfig` from a Python dictionary of parameters."""
config = BertConfig(vocab_size=None)
for (key, value) in six.iteritems(json_object):
config.__dict__[key] = value
return config
def from_json_file(cls, json_file):
"""Constructs a `BertConfig` from a json file of parameters."""
with open(json_file, "r") as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
class BertModel(nn.Module):
"""BERT model ("Bidirectional Embedding Representations from a Transformer").
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 2, 0]])
config = modeling.BertConfig(vocab_size=32000, hidden_size=512,
num_hidden_layers=8, num_attention_heads=6, intermediate_size=1024)
model = modeling.BertModel(config=config)
all_encoder_layers, pooled_output = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config: BertConfig):
"""Constructor for BertModel.
Args:
config: `BertConfig` instance.
"""
super(BertModel, self).__init__()
self.embeddings = BERTEmbeddings(config)
self.encoder = BERTEncoder(config)
self.pooler = BERTPooler(config)
def forward(self, input_ids, token_type_ids=None, attention_mask=None):
if attention_mask is None:
attention_mask = torch.ones_like(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.float()
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
embedding_output = self.embeddings(input_ids, token_type_ids)
all_encoder_layers = self.encoder(embedding_output, extended_attention_mask)
sequence_output = all_encoder_layers[-1]
pooled_output = self.pooler(sequence_output)
return all_encoder_layers, pooled_output
def get_bert(params):
BERT_PT_PATH = './model/bert/data/annotated_wikisql_and_PyTorch_bert_param'
map_bert_type_abb = {'uS': 'uncased_L-12_H-768_A-12',
'uL': 'uncased_L-24_H-1024_A-16',
'cS': 'cased_L-12_H-768_A-12',
'cL': 'cased_L-24_H-1024_A-16',
'mcS': 'multi_cased_L-12_H-768_A-12'}
bert_type = map_bert_type_abb[params.bert_type_abb]
if params.bert_type_abb == 'cS' or params.bert_type_abb == 'cL' or params.bert_type_abb == 'mcS':
do_lower_case = False
else:
do_lower_case = True
no_pretraining = False
bert_config_file = os.path.join(BERT_PT_PATH, f'bert_config_{bert_type}.json')
vocab_file = os.path.join(BERT_PT_PATH, f'vocab_{bert_type}.txt')
init_checkpoint = os.path.join(BERT_PT_PATH, f'pytorch_model_{bert_type}.bin')
print('bert_config_file', bert_config_file)
print('vocab_file', vocab_file)
print('init_checkpoint', init_checkpoint)
bert_config = BertConfig.from_json_file(bert_config_file)
tokenizer = tokenization.FullTokenizer(
vocab_file=vocab_file, do_lower_case=do_lower_case)
bert_config.print_status()
model_bert = BertModel(bert_config)
if no_pretraining:
pass
else:
model_bert.load_state_dict(torch.load(init_checkpoint, map_location='cpu'))
print("Load pre-trained parameters.")
model_bert.to(device)
return model_bert, tokenizer, bert_config | null |
165,465 | import os, json
import random as rd
from copy import deepcopy
import torch
import torch.nn as nn
import torch.nn.functional as F
from .bert import tokenization as tokenization
from .bert.modeling import BertConfig, BertModel
def get_wemb_bert(bert_config, model_bert, tokenizer, nlu_t, hds, max_seq_length, num_out_layers_n=1, num_out_layers_h=1):
# get contextual output of all tokens from bert
all_encoder_layer, pooled_output, tokens, i_nlu, i_hds,\
l_n, l_hpu, l_hs, \
nlu_tt, t_to_tt_idx, tt_to_t_idx, t_to_tt_idx_hds = get_bert_output(model_bert, tokenizer, nlu_t, hds, max_seq_length)
# all_encoder_layer: BERT outputs from all layers.
# pooled_output: output of [CLS] vec.
# tokens: BERT intput tokens
# i_nlu: start and end indices of question in tokens
# i_hds: start and end indices of headers
# get the wemb
wemb_n = get_wemb_n(i_nlu, l_n, bert_config.hidden_size, bert_config.num_hidden_layers, all_encoder_layer,
num_out_layers_n)
wemb_h = get_wemb_h(i_hds, l_hpu, l_hs, bert_config.hidden_size, bert_config.num_hidden_layers, all_encoder_layer,
num_out_layers_h)
return wemb_n, wemb_h, l_n, l_hpu, l_hs, \
nlu_tt, t_to_tt_idx, tt_to_t_idx, t_to_tt_idx_hds
def prepare_input(tokenizer, input_sequence, input_schema, max_seq_length):
nlu_t = []
hds = []
nlu_t1 = input_sequence
all_hds = input_schema.column_names_embedder_input
nlu_tt1 = []
for (i, token) in enumerate(nlu_t1):
nlu_tt1 += tokenizer.tokenize(token)
current_hds1 = []
for hds1 in all_hds:
new_hds1 = current_hds1 + [hds1]
tokens1, segment_ids1, i_nlu1, i_hds1, t_to_tt_idx_hds1 = generate_inputs(tokenizer, nlu_tt1, new_hds1)
if len(segment_ids1) > max_seq_length:
nlu_t.append(nlu_t1)
hds.append(current_hds1)
current_hds1 = [hds1]
else:
current_hds1 = new_hds1
if len(current_hds1) > 0:
nlu_t.append(nlu_t1)
hds.append(current_hds1)
return nlu_t, hds
def prepare_input_v2(tokenizer, input_sequence, input_schema):
nlu_t = []
hds = []
max_seq_length = 0
nlu_t1 = input_sequence
all_hds = input_schema.column_names_embedder_input
nlu_tt1 = []
for (i, token) in enumerate(nlu_t1):
nlu_tt1 += tokenizer.tokenize(token)
current_hds1 = []
current_table = ''
for hds1 in all_hds:
hds1_table = hds1.split('.')[0].strip()
if hds1_table == current_table:
current_hds1.append(hds1)
else:
tokens1, segment_ids1, i_nlu1, i_hds1, t_to_tt_idx_hds1 = generate_inputs(tokenizer, nlu_tt1, current_hds1)
max_seq_length = max(max_seq_length, len(segment_ids1))
nlu_t.append(nlu_t1)
hds.append(current_hds1)
current_hds1 = [hds1]
current_table = hds1_table
if len(current_hds1) > 0:
tokens1, segment_ids1, i_nlu1, i_hds1, t_to_tt_idx_hds1 = generate_inputs(tokenizer, nlu_tt1, current_hds1)
max_seq_length = max(max_seq_length, len(segment_ids1))
nlu_t.append(nlu_t1)
hds.append(current_hds1)
return nlu_t, hds, max_seq_length
def get_bert_encoding(bert_config, model_bert, tokenizer, input_sequence, input_schema, bert_input_version='v1', max_seq_length=512, num_out_layers_n=1, num_out_layers_h=1):
if bert_input_version == 'v1':
nlu_t, hds = prepare_input(tokenizer, input_sequence, input_schema, max_seq_length)
elif bert_input_version == 'v2':
nlu_t, hds, max_seq_length = prepare_input_v2(tokenizer, input_sequence, input_schema)
wemb_n, wemb_h, l_n, l_hpu, l_hs, nlu_tt, t_to_tt_idx, tt_to_t_idx, t_to_tt_idx_hds = get_wemb_bert(bert_config, model_bert, tokenizer, nlu_t, hds, max_seq_length, num_out_layers_n, num_out_layers_h)
t_to_tt_idx = t_to_tt_idx[0]
assert len(t_to_tt_idx) == len(input_sequence)
assert sum(len(t_to_tt_idx_hds1) for t_to_tt_idx_hds1 in t_to_tt_idx_hds) == len(input_schema.column_names_embedder_input)
assert list(wemb_h.size())[0] == len(input_schema.column_names_embedder_input)
utterance_states = []
for i in range(len(t_to_tt_idx)):
start = t_to_tt_idx[i]
if i == len(t_to_tt_idx)-1:
end = l_n[0]
else:
end = t_to_tt_idx[i+1]
utterance_states.append(torch.mean(wemb_n[:,start:end,:], dim=[0,1]))
assert len(utterance_states) == len(input_sequence)
schema_token_states = []
cnt = -1
for t_to_tt_idx_hds1 in t_to_tt_idx_hds:
for t_to_tt_idx_hds11 in t_to_tt_idx_hds1:
cnt += 1
schema_token_states1 = []
for i in range(len(t_to_tt_idx_hds11)):
start = t_to_tt_idx_hds11[i]
if i == len(t_to_tt_idx_hds11)-1:
end = l_hpu[cnt]
else:
end = t_to_tt_idx_hds11[i+1]
schema_token_states1.append(torch.mean(wemb_h[cnt,start:end,:], dim=0))
assert len(schema_token_states1) == len(input_schema.column_names_embedder_input[cnt].split())
schema_token_states.append(schema_token_states1)
assert len(schema_token_states) == len(input_schema.column_names_embedder_input)
return utterance_states, schema_token_states | null |
165,466 | from collections import namedtuple
import copy
import json
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from . import torch_utils
from .token_predictor import PredictionInput, PredictionInputWithSchema, PredictionStepInputWithSchema
from .beam_search import BeamSearch
import data_util.snippets as snippet_handler
from . import embedder
from data_util.vocabulary import EOS_TOK, UNK_TOK
import math
The provided code snippet includes necessary dependencies for implementing the `flatten_distribution` function. Write a Python function `def flatten_distribution(distribution_map, probabilities)` to solve the following problem:
Flattens a probability distribution given a map of "unique" values. All values in distribution_map with the same value should get the sum of the probabilities. Arguments: distribution_map (list of str): List of values to get the probability for. probabilities (np.ndarray): Probabilities corresponding to the values in distribution_map. Returns: list, np.ndarray of the same size where probabilities for duplicates in distribution_map are given the sum of the probabilities in probabilities.
Here is the function:
def flatten_distribution(distribution_map, probabilities):
""" Flattens a probability distribution given a map of "unique" values.
All values in distribution_map with the same value should get the sum
of the probabilities.
Arguments:
distribution_map (list of str): List of values to get the probability for.
probabilities (np.ndarray): Probabilities corresponding to the values in
distribution_map.
Returns:
list, np.ndarray of the same size where probabilities for duplicates
in distribution_map are given the sum of the probabilities in probabilities.
"""
assert len(distribution_map) == len(probabilities)
if len(distribution_map) != len(set(distribution_map)):
idx_first_dup = 0
seen_set = set()
for i, tok in enumerate(distribution_map):
if tok in seen_set:
idx_first_dup = i
break
seen_set.add(tok)
new_dist_map = distribution_map[:idx_first_dup] + list(
set(distribution_map) - set(distribution_map[:idx_first_dup]))
assert len(new_dist_map) == len(set(new_dist_map))
new_probs = np.array(
probabilities[:idx_first_dup] \
+ [0. for _ in range(len(set(distribution_map)) \
- idx_first_dup)])
assert len(new_probs) == len(new_dist_map)
for i, token_name in enumerate(
distribution_map[idx_first_dup:]):
if token_name not in new_dist_map:
new_dist_map.append(token_name)
new_index = new_dist_map.index(token_name)
new_probs[new_index] += probabilities[i +
idx_first_dup]
new_probs = new_probs.tolist()
else:
new_dist_map = distribution_map
new_probs = probabilities
assert len(new_dist_map) == len(new_probs)
return new_dist_map, new_probs | Flattens a probability distribution given a map of "unique" values. All values in distribution_map with the same value should get the sum of the probabilities. Arguments: distribution_map (list of str): List of values to get the probability for. probabilities (np.ndarray): Probabilities corresponding to the values in distribution_map. Returns: list, np.ndarray of the same size where probabilities for duplicates in distribution_map are given the sum of the probabilities in probabilities. |
165,472 | import os
import sys
import numpy as np
import random
from parse_args import interpret_args
import data_util
from data_util import atis_data
from model.schema_interaction_model import SchemaInteractionATISModel
from logger import Logger
from model.model import ATISModel
from model_util import Metrics, evaluate_utterance_sample, evaluate_interaction_sample, \
train_epoch_with_utterances, train_epoch_with_interactions, evaluate_using_predicted_queries
import torch
import time
VALID_EVAL_METRICS = [Metrics.LOSS, Metrics.TOKEN_ACCURACY, Metrics.STRING_ACCURACY]
class Logger():
"""Attributes:
fileptr (file): File pointer for input/output.
lines (list of str): The lines read from the log.
"""
def __init__(self, filename, option):
self.fileptr = open(filename, option)
if option == "r":
self.lines = self.fileptr.readlines()
else:
self.lines = []
def put(self, string):
"""Writes to the file."""
self.fileptr.write(string + "\n")
self.fileptr.flush()
def close(self):
"""Closes the logger."""
self.fileptr.close()
def findlast(self, identifier, default=0.):
"""Finds the last line in the log with a certain value."""
for line in self.lines[::-1]:
if line.lower().startswith(identifier):
string = line.strip().split("\t")[1]
if string.replace(".", "").isdigit():
return float(string)
elif string.lower() == "true":
return True
elif string.lower() == "false":
return False
else:
return string
return default
def contains(self, string):
"""Dtermines whether the string is present in the log."""
for line in self.lines[::-1]:
if string.lower() in line.lower():
return True
return False
def findlast_log_before(self, before_str):
"""Finds the last entry in the log before another entry."""
loglines = []
in_line = False
for line in self.lines[::-1]:
if line.startswith(before_str):
in_line = True
elif in_line:
loglines.append(line)
if line.strip() == "" and in_line:
return "".join(loglines[::-1])
return "".join(loglines[::-1])
class Metrics(Enum):
"""Definitions of simple metrics to compute."""
LOSS = 1
TOKEN_ACCURACY = 2
STRING_ACCURACY = 3
CORRECT_TABLES = 4
STRICT_CORRECT_TABLES = 5
SEMANTIC_QUERIES = 6
SYNTACTIC_QUERIES = 7
def train_epoch_with_utterances(batches,
model,
randomize=True):
"""Trains model for a single epoch given batches of utterance data.
Inputs:
batches (UtteranceBatch): The batches to give to training.
model (ATISModel): The model obect.
learning_rate (float): The learning rate to use during training.
dropout_amount (float): Amount of dropout to set in the model.
randomize (bool): Whether or not to randomize the order that the batches are seen.
"""
if randomize:
random.shuffle(batches)
progbar = get_progressbar("train ", len(batches))
progbar.start()
loss_sum = 0.
for i, batch in enumerate(batches):
batch_loss = model.train_step(batch)
loss_sum += batch_loss
progbar.update(i)
progbar.finish()
total_loss = loss_sum / len(batches)
return total_loss
def train_epoch_with_interactions(interaction_batches,
params,
model,
randomize=True):
"""Trains model for single epoch given batches of interactions.
Inputs:
interaction_batches (list of InteractionBatch): The batches to train on.
params (namespace): Parameters to run with.
model (ATISModel): Model to train.
randomize (bool): Whether or not to randomize the order that batches are seen.
"""
if randomize:
random.shuffle(interaction_batches)
progbar = get_progressbar("train ", len(interaction_batches))
progbar.start()
loss_sum = 0.
for i, interaction_batch in enumerate(interaction_batches):
print('i %d', i)
assert len(interaction_batch) == 1
interaction = interaction_batch.items[0]
#if i == 649:
# continue
if interaction.identifier == "raw/atis2/12-1.1/ATIS2/TEXT/TEST/NOV92/770/5":
continue
if 'sparc' in params.data_directory and "baseball_1" in interaction.identifier:
continue
if "baseball_1" in interaction.identifier:
continue
batch_loss = model.train_step(interaction, params.train_maximum_sql_length)
loss_sum += batch_loss
torch.cuda.empty_cache()
progbar.update(i)
progbar.finish()
total_loss = loss_sum / len(interaction_batches)
return total_loss
def evaluate_utterance_sample(sample,
model,
max_generation_length,
name="",
gold_forcing=False,
metrics=None,
total_num=-1,
database_username="",
database_password="",
database_timeout=0,
write_results=False):
"""Evaluates a sample of utterance examples.
Inputs:
sample (list of Utterance): Examples to evaluate.
model (ATISModel): Model to predict with.
max_generation_length (int): Maximum length to generate.
name (str): Name to log with.
gold_forcing (bool): Whether to force the gold tokens during decoding.
metrics (list of Metric): Metrics to evaluate with.
total_num (int): Number to divide by when reporting results.
database_username (str): Username to use for executing queries.
database_password (str): Password to use when executing queries.
database_timeout (float): Timeout on queries when executing.
write_results (bool): Whether to write the results to a file.
"""
assert metrics
if total_num < 0:
total_num = len(sample)
metrics_sums = {}
for metric in metrics:
metrics_sums[metric] = 0.
predictions_file = open(name + "_predictions.json", "w")
print("Predicting with filename " + str(name) + "_predictions.json")
progbar = get_progressbar(name, len(sample))
progbar.start()
predictions = []
for i, item in enumerate(sample):
_, loss, predicted_seq = model.eval_step(
item, max_generation_length, feed_gold_query=gold_forcing)
loss = loss / len(item.gold_query())
predictions.append(predicted_seq)
flat_sequence = item.flatten_sequence(predicted_seq)
token_accuracy = torch_utils.per_token_accuracy(
item.gold_query(), predicted_seq)
if write_results:
write_prediction(
predictions_file,
identifier=item.interaction.identifier,
input_seq=item.input_sequence(),
probability=0,
prediction=predicted_seq,
flat_prediction=flat_sequence,
gold_query=item.gold_query(),
flat_gold_queries=item.original_gold_queries(),
gold_tables=item.gold_tables(),
index_in_interaction=item.utterance_index,
database_username=database_username,
database_password=database_password,
database_timeout=database_timeout)
update_sums(metrics,
metrics_sums,
predicted_seq,
flat_sequence,
item.gold_query(),
item.original_gold_queries()[0],
gold_forcing,
loss,
token_accuracy,
database_username=database_username,
database_password=database_password,
database_timeout=database_timeout,
gold_table=item.gold_tables()[0])
progbar.update(i)
progbar.finish()
predictions_file.close()
return construct_averages(metrics_sums, total_num), None
def evaluate_interaction_sample(sample,
model,
max_generation_length,
name="",
gold_forcing=False,
metrics=None,
total_num=-1,
database_username="",
database_password="",
database_timeout=0,
use_predicted_queries=False,
write_results=False,
use_gpu=False,
compute_metrics=False):
""" Evaluates a sample of interactions. """
predictions_file = open(name + "_predictions.json", "w")
print("Predicting with file " + str(name + "_predictions.json"))
metrics_sums = {}
for metric in metrics:
metrics_sums[metric] = 0.
progbar = get_progressbar(name, len(sample))
progbar.start()
num_utterances = 0
"""
ignore_with_gpu = [line.strip() for line in open(
"data/cpu_full_interactions.txt").readlines()]
"""
predictions = []
use_gpu = not ("--no_gpus" in sys.argv or "--no_gpus=1" in sys.argv)
model.eval()
for i, interaction in enumerate(sample):
try:
with torch.no_grad():
if use_predicted_queries:
example_preds = model.predict_with_predicted_queries(
interaction,
max_generation_length)
else:
example_preds = model.predict_with_gold_queries(
interaction,
max_generation_length,
feed_gold_query=gold_forcing)
torch.cuda.empty_cache()
except RuntimeError as exception:
print("Failed on interaction: " + str(interaction.identifier))
print(exception)
print("\n\n")
exit()
predictions.extend(example_preds)
assert len(example_preds) == len(
interaction.interaction.utterances) or not example_preds
for j, pred in enumerate(example_preds):
num_utterances += 1
sequence, loss, token_accuracy, _, decoder_results = pred
if use_predicted_queries:
item = interaction.processed_utterances[j]
original_utt = interaction.interaction.utterances[item.index]
gold_query = original_utt.gold_query_to_use
original_gold_query = original_utt.original_gold_query
gold_table = original_utt.gold_sql_results
gold_queries = [q[0] for q in original_utt.all_gold_queries]
gold_tables = [q[1] for q in original_utt.all_gold_queries]
index = item.index
else:
item = interaction.gold_utterances()[j]
gold_query = item.gold_query()
original_gold_query = item.original_gold_query()
gold_table = item.gold_table()
gold_queries = item.original_gold_queries()
gold_tables = item.gold_tables()
index = item.utterance_index
if loss:
loss = loss / len(gold_query)
flat_sequence = item.flatten_sequence(sequence)
if write_results:
ori_beam = decoder_results.beam
beam = []
for x in ori_beam:
beam.append((-x[0], item.flatten_sequence(x[1].sequence)))
beam.sort()
write_prediction(
predictions_file,
identifier=interaction.identifier,
input_seq=item.input_sequence(),
probability=decoder_results.probability,
prediction=sequence,
flat_prediction=flat_sequence,
gold_query=gold_query,
flat_gold_queries=gold_queries,
gold_tables=gold_tables,
index_in_interaction=index,
database_username=database_username,
database_password=database_password,
database_timeout=database_timeout,
compute_metrics=compute_metrics,
beam = beam)##################
update_sums(metrics,
metrics_sums,
sequence,
flat_sequence,
gold_query,
original_gold_query,
gold_forcing,
loss,
token_accuracy,
database_username=database_username,
database_password=database_password,
database_timeout=database_timeout,
gold_table=gold_table)
progbar.update(i)
progbar.finish()
if total_num < 0:
total_num = num_utterances
predictions_file.close()
return construct_averages(metrics_sums, total_num), predictions
The provided code snippet includes necessary dependencies for implementing the `train` function. Write a Python function `def train(model, data, params)` to solve the following problem:
Trains a model. Inputs: model (ATISModel): The model to train. data (ATISData): The data that is used to train. params (namespace): Training parameters.
Here is the function:
def train(model, data, params):
""" Trains a model.
Inputs:
model (ATISModel): The model to train.
data (ATISData): The data that is used to train.
params (namespace): Training parameters.
"""
# Get the training batches.
log = Logger(os.path.join(params.logdir, params.logfile), "a+")
num_train_original = atis_data.num_utterances(data.train_data)
eval_fn = evaluate_utterance_sample
trainbatch_fn = data.get_utterance_batches
trainsample_fn = data.get_random_utterances
validsample_fn = data.get_all_utterances
batch_size = params.batch_size
if params.interaction_level:
batch_size = 1
eval_fn = evaluate_interaction_sample
trainbatch_fn = data.get_interaction_batches
trainsample_fn = data.get_random_interactions
validsample_fn = data.get_all_interactions
maximum_output_length = params.train_maximum_sql_length
train_batches = trainbatch_fn(batch_size,
max_output_length=maximum_output_length,
randomize=not params.deterministic)
if params.num_train >= 0:
train_batches = train_batches[:params.num_train]
training_sample = trainsample_fn(params.train_evaluation_size,
max_output_length=maximum_output_length)
valid_examples = validsample_fn(data.valid_data,
max_output_length=maximum_output_length)
num_train_examples = sum([len(batch) for batch in train_batches])
num_steps_per_epoch = len(train_batches)
# Keeping track of things during training.
epochs = 0
patience = params.initial_patience
learning_rate_coefficient = 1.
previous_epoch_loss = float('inf')
maximum_validation_accuracy = 0.
maximum_string_accuracy = 0.
countdown = int(patience)
if params.scheduler:
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(model.trainer, mode='min', )
keep_training = True
while keep_training:
log.put("Epoch:\t" + str(epochs))
model.set_dropout(params.dropout_amount)
if not params.scheduler:
model.set_learning_rate(learning_rate_coefficient * params.initial_learning_rate)
# Run a training step.
if params.interaction_level:
epoch_loss = train_epoch_with_interactions(
train_batches,
params,
model,
randomize=not params.deterministic)
else:
epoch_loss = train_epoch_with_utterances(
train_batches,
model,
randomize=not params.deterministic)
log.put("train epoch loss:\t" + str(epoch_loss))
model.set_dropout(0.)
# Run an evaluation step on a sample of the training data.
"""
train_eval_results = eval_fn(training_sample,
model,
params.train_maximum_sql_length,
name=os.path.join(params.logdir, "train-eval"),
write_results=True,
gold_forcing=True,
metrics=TRAIN_EVAL_METRICS)[0]
for name, value in train_eval_results.items():
log.put(
"train final gold-passing " +
name.name +
":\t" +
"%.2f" %
value)
"""
# Run an evaluation step on the validation set.
valid_eval_results = eval_fn(valid_examples,
model,
params.eval_maximum_sql_length,
name=os.path.join(params.logdir, "valid-eval"),
write_results=True,
gold_forcing=True,
metrics=VALID_EVAL_METRICS)[0]
for name, value in valid_eval_results.items():
log.put("valid gold-passing " + name.name + ":\t" + "%.2f" % value)
valid_loss = valid_eval_results[Metrics.LOSS]
valid_token_accuracy = valid_eval_results[Metrics.TOKEN_ACCURACY]
string_accuracy = valid_eval_results[Metrics.STRING_ACCURACY]
assert string_accuracy >= 20
if params.scheduler:
scheduler.step(valid_loss)
if valid_loss > previous_epoch_loss:
learning_rate_coefficient *= params.learning_rate_ratio
log.put(
"learning rate coefficient:\t" +
str(learning_rate_coefficient))
previous_epoch_loss = valid_loss
saved = False
if not saved and string_accuracy > maximum_string_accuracy:
maximum_string_accuracy = string_accuracy
patience = patience * params.patience_ratio
countdown = int(patience)
last_save_file = os.path.join(params.logdir, "save_" + str(epochs))
model.save(last_save_file)
log.put(
"maximum string accuracy:\t" +
str(maximum_string_accuracy))
log.put("patience:\t" + str(patience))
log.put("save file:\t" + str(last_save_file))
if countdown <= 0:
keep_training = False
countdown -= 1
log.put("countdown:\t" + str(countdown))
log.put("")
epochs += 1
log.put("Finished training!")
log.close()
return last_save_file | Trains a model. Inputs: model (ATISModel): The model to train. data (ATISData): The data that is used to train. params (namespace): Training parameters. |
165,473 | import os
import sys
import numpy as np
import random
from parse_args import interpret_args
import data_util
from data_util import atis_data
from model.schema_interaction_model import SchemaInteractionATISModel
from logger import Logger
from model.model import ATISModel
from model_util import Metrics, evaluate_utterance_sample, evaluate_interaction_sample, \
train_epoch_with_utterances, train_epoch_with_interactions, evaluate_using_predicted_queries
import torch
import time
FINAL_EVAL_METRICS = [Metrics.STRING_ACCURACY, Metrics.TOKEN_ACCURACY]
def evaluate_utterance_sample(sample,
model,
max_generation_length,
name="",
gold_forcing=False,
metrics=None,
total_num=-1,
database_username="",
database_password="",
database_timeout=0,
write_results=False):
"""Evaluates a sample of utterance examples.
Inputs:
sample (list of Utterance): Examples to evaluate.
model (ATISModel): Model to predict with.
max_generation_length (int): Maximum length to generate.
name (str): Name to log with.
gold_forcing (bool): Whether to force the gold tokens during decoding.
metrics (list of Metric): Metrics to evaluate with.
total_num (int): Number to divide by when reporting results.
database_username (str): Username to use for executing queries.
database_password (str): Password to use when executing queries.
database_timeout (float): Timeout on queries when executing.
write_results (bool): Whether to write the results to a file.
"""
assert metrics
if total_num < 0:
total_num = len(sample)
metrics_sums = {}
for metric in metrics:
metrics_sums[metric] = 0.
predictions_file = open(name + "_predictions.json", "w")
print("Predicting with filename " + str(name) + "_predictions.json")
progbar = get_progressbar(name, len(sample))
progbar.start()
predictions = []
for i, item in enumerate(sample):
_, loss, predicted_seq = model.eval_step(
item, max_generation_length, feed_gold_query=gold_forcing)
loss = loss / len(item.gold_query())
predictions.append(predicted_seq)
flat_sequence = item.flatten_sequence(predicted_seq)
token_accuracy = torch_utils.per_token_accuracy(
item.gold_query(), predicted_seq)
if write_results:
write_prediction(
predictions_file,
identifier=item.interaction.identifier,
input_seq=item.input_sequence(),
probability=0,
prediction=predicted_seq,
flat_prediction=flat_sequence,
gold_query=item.gold_query(),
flat_gold_queries=item.original_gold_queries(),
gold_tables=item.gold_tables(),
index_in_interaction=item.utterance_index,
database_username=database_username,
database_password=database_password,
database_timeout=database_timeout)
update_sums(metrics,
metrics_sums,
predicted_seq,
flat_sequence,
item.gold_query(),
item.original_gold_queries()[0],
gold_forcing,
loss,
token_accuracy,
database_username=database_username,
database_password=database_password,
database_timeout=database_timeout,
gold_table=item.gold_tables()[0])
progbar.update(i)
progbar.finish()
predictions_file.close()
return construct_averages(metrics_sums, total_num), None
def evaluate_interaction_sample(sample,
model,
max_generation_length,
name="",
gold_forcing=False,
metrics=None,
total_num=-1,
database_username="",
database_password="",
database_timeout=0,
use_predicted_queries=False,
write_results=False,
use_gpu=False,
compute_metrics=False):
""" Evaluates a sample of interactions. """
predictions_file = open(name + "_predictions.json", "w")
print("Predicting with file " + str(name + "_predictions.json"))
metrics_sums = {}
for metric in metrics:
metrics_sums[metric] = 0.
progbar = get_progressbar(name, len(sample))
progbar.start()
num_utterances = 0
"""
ignore_with_gpu = [line.strip() for line in open(
"data/cpu_full_interactions.txt").readlines()]
"""
predictions = []
use_gpu = not ("--no_gpus" in sys.argv or "--no_gpus=1" in sys.argv)
model.eval()
for i, interaction in enumerate(sample):
try:
with torch.no_grad():
if use_predicted_queries:
example_preds = model.predict_with_predicted_queries(
interaction,
max_generation_length)
else:
example_preds = model.predict_with_gold_queries(
interaction,
max_generation_length,
feed_gold_query=gold_forcing)
torch.cuda.empty_cache()
except RuntimeError as exception:
print("Failed on interaction: " + str(interaction.identifier))
print(exception)
print("\n\n")
exit()
predictions.extend(example_preds)
assert len(example_preds) == len(
interaction.interaction.utterances) or not example_preds
for j, pred in enumerate(example_preds):
num_utterances += 1
sequence, loss, token_accuracy, _, decoder_results = pred
if use_predicted_queries:
item = interaction.processed_utterances[j]
original_utt = interaction.interaction.utterances[item.index]
gold_query = original_utt.gold_query_to_use
original_gold_query = original_utt.original_gold_query
gold_table = original_utt.gold_sql_results
gold_queries = [q[0] for q in original_utt.all_gold_queries]
gold_tables = [q[1] for q in original_utt.all_gold_queries]
index = item.index
else:
item = interaction.gold_utterances()[j]
gold_query = item.gold_query()
original_gold_query = item.original_gold_query()
gold_table = item.gold_table()
gold_queries = item.original_gold_queries()
gold_tables = item.gold_tables()
index = item.utterance_index
if loss:
loss = loss / len(gold_query)
flat_sequence = item.flatten_sequence(sequence)
if write_results:
ori_beam = decoder_results.beam
beam = []
for x in ori_beam:
beam.append((-x[0], item.flatten_sequence(x[1].sequence)))
beam.sort()
write_prediction(
predictions_file,
identifier=interaction.identifier,
input_seq=item.input_sequence(),
probability=decoder_results.probability,
prediction=sequence,
flat_prediction=flat_sequence,
gold_query=gold_query,
flat_gold_queries=gold_queries,
gold_tables=gold_tables,
index_in_interaction=index,
database_username=database_username,
database_password=database_password,
database_timeout=database_timeout,
compute_metrics=compute_metrics,
beam = beam)##################
update_sums(metrics,
metrics_sums,
sequence,
flat_sequence,
gold_query,
original_gold_query,
gold_forcing,
loss,
token_accuracy,
database_username=database_username,
database_password=database_password,
database_timeout=database_timeout,
gold_table=gold_table)
progbar.update(i)
progbar.finish()
if total_num < 0:
total_num = num_utterances
predictions_file.close()
return construct_averages(metrics_sums, total_num), predictions
def evaluate_using_predicted_queries(sample,
model,
name="",
gold_forcing=False,
metrics=None,
total_num=-1,
database_username="",
database_password="",
database_timeout=0,
snippet_keep_age=1):
predictions_file = open(name + "_predictions.json", "w")
print("Predicting with file " + str(name + "_predictions.json"))
assert not gold_forcing
metrics_sums = {}
for metric in metrics:
metrics_sums[metric] = 0.
progbar = get_progressbar(name, len(sample))
progbar.start()
num_utterances = 0
predictions = []
for i, item in enumerate(sample):
int_predictions = []
item.start_interaction()
while not item.done():
utterance = item.next_utterance(snippet_keep_age)
predicted_sequence, loss, _, probability = model.eval_step(
utterance)
int_predictions.append((utterance, predicted_sequence))
flat_sequence = utterance.flatten_sequence(predicted_sequence)
if sql_util.executable(
flat_sequence,
username=database_username,
password=database_password,
timeout=database_timeout) and probability >= 0.24:
utterance.set_pred_query(
item.remove_snippets(predicted_sequence))
item.add_utterance(utterance,
item.remove_snippets(predicted_sequence),
previous_snippets=utterance.snippets())
else:
# Add the /previous/ predicted query, guaranteed to be syntactically
# correct
seq = []
utterance.set_pred_query(seq)
item.add_utterance(
utterance, seq, previous_snippets=utterance.snippets())
original_utt = item.interaction.utterances[utterance.index]
write_prediction(
predictions_file,
identifier=item.interaction.identifier,
input_seq=utterance.input_sequence(),
probability=probability,
prediction=predicted_sequence,
flat_prediction=flat_sequence,
gold_query=original_utt.gold_query_to_use,
flat_gold_queries=[
q[0] for q in original_utt.all_gold_queries],
gold_tables=[
q[1] for q in original_utt.all_gold_queries],
index_in_interaction=utterance.index,
database_username=database_username,
database_password=database_password,
database_timeout=database_timeout)
update_sums(metrics,
metrics_sums,
predicted_sequence,
flat_sequence,
original_utt.gold_query_to_use,
original_utt.original_gold_query,
gold_forcing,
loss,
token_accuracy=0,
database_username=database_username,
database_password=database_password,
database_timeout=database_timeout,
gold_table=original_utt.gold_sql_results)
predictions.append(int_predictions)
progbar.update(i)
progbar.finish()
if total_num < 0:
total_num = num_utterances
predictions_file.close()
return construct_averages(metrics_sums, total_num), predictions
The provided code snippet includes necessary dependencies for implementing the `evaluate` function. Write a Python function `def evaluate(model, data, params, last_save_file, split)` to solve the following problem:
Evaluates a pretrained model on a dataset. Inputs: model (ATISModel): Model class. data (ATISData): All of the data. params (namespace): Parameters for the model. last_save_file (str): Location where the model save file is.
Here is the function:
def evaluate(model, data, params, last_save_file, split):
"""Evaluates a pretrained model on a dataset.
Inputs:
model (ATISModel): Model class.
data (ATISData): All of the data.
params (namespace): Parameters for the model.
last_save_file (str): Location where the model save file is.
"""
if last_save_file:
model.load(last_save_file)
else:
if not params.save_file:
raise ValueError(
"Must provide a save file name if not training first.")
model.load(params.save_file)
filename = split
if filename == 'dev':
split = data.dev_data
elif filename == 'train':
split = data.train_data
elif filename == 'test':
split = data.test_data
elif filename == 'valid':
split = data.valid_data
else:
raise ValueError("Split not recognized: " + str(params.evaluate_split))
if params.use_predicted_queries:
filename += "_use_predicted_queries"
else:
filename += "_use_gold_queries"
if filename == 'train':
full_name = os.path.join(params.logdir, filename) + params.results_note
else:
full_name = os.path.join("results", params.save_file.split('/')[-1]) + params.results_note
if params.interaction_level or params.use_predicted_queries:
examples = data.get_all_interactions(split)
if params.interaction_level:
evaluate_interaction_sample(
examples,
model,
name=full_name,
metrics=FINAL_EVAL_METRICS,
total_num=atis_data.num_utterances(split),
database_username=params.database_username,
database_password=params.database_password,
database_timeout=params.database_timeout,
use_predicted_queries=params.use_predicted_queries,
max_generation_length=params.eval_maximum_sql_length,
write_results=True,
use_gpu=True,
compute_metrics=params.compute_metrics)
else:
evaluate_using_predicted_queries(
examples,
model,
name=full_name,
metrics=FINAL_EVAL_METRICS,
total_num=atis_data.num_utterances(split),
database_username=params.database_username,
database_password=params.database_password,
database_timeout=params.database_timeout)
else:
examples = data.get_all_utterances(split)
evaluate_utterance_sample(
examples,
model,
name=full_name,
gold_forcing=False,
metrics=FINAL_EVAL_METRICS,
total_num=atis_data.num_utterances(split),
max_generation_length=params.eval_maximum_sql_length,
database_username=params.database_username,
database_password=params.database_password,
database_timeout=params.database_timeout,
write_results=True) | Evaluates a pretrained model on a dataset. Inputs: model (ATISModel): Model class. data (ATISData): All of the data. params (namespace): Parameters for the model. last_save_file (str): Location where the model save file is. |
165,474 | import os
import sys
import numpy as np
import random
from parse_args import interpret_args
import data_util
from data_util import atis_data
from model.schema_interaction_model import SchemaInteractionATISModel
from logger import Logger
from model.model import ATISModel
from model_util import Metrics, evaluate_utterance_sample, evaluate_interaction_sample, \
train_epoch_with_utterances, train_epoch_with_interactions, evaluate_using_predicted_queries
import torch
import time
def init_env(params):
if params.seed == 0:
params.seed = int(time.time()) % 1000
seed = params.seed
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
random.seed(seed)
np.random.seed(seed)
os.environ["PYTHONHASHSEED"] = str(seed) | null |
165,475 | import argparse
import os
import sys
import pickle
import json
import shutil
import sqlparse
from postprocess_eval import get_candidate_tables
def write_interaction(interaction_list,split,output_dir):
json_split = os.path.join(output_dir,split+'.json')
pkl_split = os.path.join(output_dir,split+'.pkl')
with open(json_split, 'w') as outfile:
for interaction in interaction_list:
json.dump(interaction, outfile, indent = 4)
outfile.write('\n')
new_objs = []
for i, obj in enumerate(interaction_list):
new_interaction = []
for ut in obj["interaction"]:
sql = ut["sql"]
sqls = [sql]
tok_sql_list = []
for sql in sqls:
results = []
tokenized_sql = sql.split()
tok_sql_list.append((tokenized_sql, results))
ut["sql"] = tok_sql_list
new_interaction.append(ut)
obj["interaction"] = new_interaction
new_objs.append(obj)
with open(pkl_split,'wb') as outfile:
pickle.dump(new_objs, outfile)
return
def read_database_schema(database_schema_filename, schema_tokens, column_names, database_schemas_dict):
with open(database_schema_filename) as f:
database_schemas = json.load(f)
def get_schema_tokens(table_schema):
column_names_surface_form = []
column_names = []
column_names_original = table_schema['column_names_original']
table_names = table_schema['table_names']
table_names_original = table_schema['table_names_original']
for i, (table_id, column_name) in enumerate(column_names_original):
if table_id >= 0:
table_name = table_names_original[table_id]
column_name_surface_form = '{}.{}'.format(table_name,column_name)
else:
# this is just *
column_name_surface_form = column_name
column_names_surface_form.append(column_name_surface_form.lower())
column_names.append(column_name.lower())
# also add table_name.*
for table_name in table_names_original:
column_names_surface_form.append('{}.*'.format(table_name.lower()))
return column_names_surface_form, column_names
for table_schema in database_schemas:
database_id = table_schema['db_id']
database_schemas_dict[database_id] = table_schema
schema_tokens[database_id], column_names[database_id] = get_schema_tokens(table_schema)
return schema_tokens, column_names, database_schemas_dict
def read_spider(spider_dir, database_schemas, column_names, output_vocab, schema_tokens, remove_from):
interaction_list = {}
train_json = os.path.join(spider_dir, 'train.json')
interaction_list = read_spider_split(train_json, interaction_list, database_schemas, column_names, output_vocab, schema_tokens, remove_from)
dev_json = os.path.join(spider_dir, 'dev.json')
interaction_list = read_spider_split(dev_json, interaction_list, database_schemas, column_names, output_vocab, schema_tokens, remove_from)
return interaction_list
def read_sparc(sparc_dir, database_schemas, column_names, output_vocab, schema_tokens, remove_from):
interaction_list = {}
train_json = os.path.join(sparc_dir, 'train_no_value.json')
interaction_list = read_data_json(train_json, interaction_list, database_schemas, column_names, output_vocab, schema_tokens, remove_from)
dev_json = os.path.join(sparc_dir, 'dev_no_value.json')
interaction_list = read_data_json(dev_json, interaction_list, database_schemas, column_names, output_vocab, schema_tokens, remove_from)
return interaction_list
def read_cosql(cosql_dir, database_schemas, column_names, output_vocab, schema_tokens, remove_from):
interaction_list = {}
train_json = os.path.join(cosql_dir, 'train.json')
interaction_list = read_data_json(train_json, interaction_list, database_schemas, column_names, output_vocab, schema_tokens, remove_from)
dev_json = os.path.join(cosql_dir, 'dev.json')
interaction_list = read_data_json(dev_json, interaction_list, database_schemas, column_names, output_vocab, schema_tokens, remove_from)
return interaction_list
def read_db_split(data_dir):
train_database = []
with open(os.path.join(data_dir,'train_db_ids.txt')) as f:
for line in f:
train_database.append(line.strip())
dev_database = []
with open(os.path.join(data_dir,'dev_db_ids.txt')) as f:
for line in f:
dev_database.append(line.strip())
return train_database, dev_database
def preprocess(dataset, remove_from=False):
# Validate output_vocab
output_vocab = ['_UNK', '_EOS', '.', 't1', 't2', '=', 'select', 'from', 'as', 'value', 'join', 'on', ')', '(', 'where', 't3', 'by', ',', 'count', 'group', 'order', 'distinct', 't4', 'and', 'limit', 'desc', '>', 'avg', 'having', 'max', 'in', '<', 'sum', 't5', 'intersect', 'not', 'min', 'except', 'or', 'asc', 'like', '!', 'union', 'between', 't6', '-', 't7', '+', '/']
if remove_from:
output_vocab = ['_UNK', '_EOS', '=', 'select', 'value', ')', '(', 'where', ',', 'count', 'group_by', 'order_by', 'distinct', 'and', 'limit_value', 'limit', 'desc', '>', 'avg', 'having', 'max', 'in', '<', 'sum', 'intersect', 'not', 'min', 'except', 'or', 'asc', 'like', '!=', 'union', 'between', '-', '+', '/']
print('size of output_vocab', len(output_vocab))
print('output_vocab', output_vocab)
print()
if dataset == 'spider':
spider_dir = 'data/spider/'
database_schema_filename = 'data/spider/tables.json'
output_dir = 'data/spider_data'
if remove_from:
output_dir = 'data/spider_data_removefrom'
train_database, dev_database = read_db_split(spider_dir)
elif dataset == 'sparc':
sparc_dir = 'data/sparc/'
database_schema_filename = 'data/sparc/tables.json'
output_dir = 'data/sparc_data'
if remove_from:
output_dir = 'data/sparc_data_removefrom'
train_database, dev_database = read_db_split(sparc_dir)
elif dataset == 'cosql':
cosql_dir = 'data/cosql/'
database_schema_filename = 'data/cosql/tables.json'
output_dir = 'data/cosql_data'
if remove_from:
output_dir = 'data/cosql_data_removefrom'
train_database, dev_database = read_db_split(cosql_dir)
if os.path.isdir(output_dir):
shutil.rmtree(output_dir)
os.mkdir(output_dir)
schema_tokens = {}
column_names = {}
database_schemas = {}
print('Reading spider database schema file')
schema_tokens, column_names, database_schemas = read_database_schema(database_schema_filename, schema_tokens, column_names, database_schemas)
num_database = len(schema_tokens)
print('num_database', num_database, len(train_database), len(dev_database))
print('total number of schema_tokens / databases:', len(schema_tokens))
output_database_schema_filename = os.path.join(output_dir, 'tables.json')
with open(output_database_schema_filename, 'w') as outfile:
json.dump([v for k,v in database_schemas.items()], outfile, indent=4)
if dataset == 'spider':
interaction_list = read_spider(spider_dir, database_schemas, column_names, output_vocab, schema_tokens, remove_from)
elif dataset == 'sparc':
interaction_list = read_sparc(sparc_dir, database_schemas, column_names, output_vocab, schema_tokens, remove_from)
elif dataset == 'cosql':
interaction_list = read_cosql(cosql_dir, database_schemas, column_names, output_vocab, schema_tokens, remove_from)
print('interaction_list length', len(interaction_list))
train_interaction = []
for database_id in interaction_list:
if database_id not in dev_database:
train_interaction += interaction_list[database_id]
dev_interaction = []
for database_id in dev_database:
dev_interaction += interaction_list[database_id]
print('train interaction: ', len(train_interaction))
print('dev interaction: ', len(dev_interaction))
write_interaction(train_interaction, 'train', output_dir)
write_interaction(dev_interaction, 'dev', output_dir)
return | null |
165,520 | import os, sys
import json
import sqlite3
import traceback
import argparse
from process_sql import tokenize, get_schema, get_tables_with_alias, Schema, get_sql
def count_agg(units):
def count_others(sql):
count = 0
# number of aggregation
agg_count = count_agg(sql['select'][1])
agg_count += count_agg(sql['where'][::2])
agg_count += count_agg(sql['groupBy'])
if len(sql['orderBy']) > 0:
agg_count += count_agg([unit[1] for unit in sql['orderBy'][1] if unit[1]] +
[unit[2] for unit in sql['orderBy'][1] if unit[2]])
agg_count += count_agg(sql['having'])
if agg_count > 1:
count += 1
# number of select columns
if len(sql['select'][1]) > 1:
count += 1
# number of where conditions
if len(sql['where']) > 1:
count += 1
# number of group by clauses
if len(sql['groupBy']) > 1:
count += 1
return count | null |
165,544 | import os, sys, copy
import json
import math
import sqlite3
import traceback
import argparse
import tqdm
from process_sql import tokenize, get_schema, get_tables_with_alias, Schema, get_sql
stop_word = set(['_UNK', '_EOS', 'select', 'value', ')', '(', 'where', '=', ',', 'count', 'group_by', 'order_by', 'limit_value', 'desc', '>', 'distinct', 'and', 'avg', 'having', '<', 'in', 'sum', 'max', 'asc', 'not', 'or', 'like', 'min', 'intersect', 'except', '!=', 'union', 'between', '-', '+', '0'])
stop_word |= set(CLAUSE_KEYWORDS)
stop_word |= set(JOIN_KEYWORDS)
stop_word |= set(WHERE_OPS)
stop_word |= set(UNIT_OPS)
stop_word |= set(AGG_OPS)
stop_word |= set(COND_OPS)
stop_word |= set(SQL_OPS)
stop_word |= set(ORDER_OPS)
class Evaluator:
def __init__(self):
def eval_hardness(self, sql):
def eval_exact_match(self, pred, label):
def eval_partial_match(self, pred, label):
def print_scores(scores, etype):
def cmp(sql1, sql2, kmap, evaluator2, schema):
def eval_exec_match(db, p_str, g_str, pred, gold):
def rebuild_sql_val(sql):
def build_valid_col_units(table_units, schema):
def rebuild_sql_col(valid_col_units, sql, kmap):
class Schema:
def __init__(self, schema):
def schema(self):
def idMap(self):
def _map(self, schema):
def get_schema(db):
def get_sql(schema, query):
def evaluate(gold, predict, db_dir, etype, kmaps):
with open(gold) as f:
glist = []
gseq_one = []
for l in f.readlines():
if len(l.strip()) == 0:
glist.append(gseq_one)
gseq_one = []
else:
lstrip = l.strip().split('\t')
gseq_one.append(lstrip)
#glist = [l.strip().split('\t') for l in f.readlines() if len(l.strip()) > 0]
with open(predict) as f:
plist = []
pseq_one = []
p_socre_list = []
pseq_score_one = []
question_list = []
question_one = []
while True:
l = f.readline()
if l == "":
break
if len(l.strip()) == 0:
plist.append(pseq_one)
pseq_one = []
p_socre_list.append(pseq_score_one)
pseq_score_one = []
question_list.append(question_one)
question_one = []
else:
x = l.strip().split('\t')
pseq_one.append(x)
l2 = f.readline()
y = l2.strip().split('\t')
y = [math.exp(-float(s)) for s in y]
assert len(x) == len(y)
pseq_score_one.append(y)
question_one.append(f.readline().strip())
#print('len(x)', len(x))
#plist = [l.strip().split('\t') for l in f.readlines() if len(l.strip()) > 0]
# plist = [[("select product_type_code from products group by product_type_code order by count ( * ) desc limit value", "orchestra")]]
# glist = [[("SELECT product_type_code FROM Products GROUP BY product_type_code ORDER BY count(*) DESC LIMIT 1", "customers_and_orders")]]
evaluator = Evaluator()
evaluator2 = Evaluator()
turns = ['turn 1', 'turn 2', 'turn 3', 'turn 4', 'turn >4']
levels = ['easy', 'medium', 'hard', 'extra', 'all', 'joint_all']
partial_types = ['select', 'select(no AGG)', 'where', 'where(no OP)', 'group(no Having)',
'group', 'order', 'and/or', 'IUEN', 'keywords']
entries = []
scores = {}
for turn in turns:
scores[turn] = {'count': 0, 'exact': 0.}
scores[turn]['exec'] = 0
for level in levels:
scores[level] = {'count': 0, 'partial': {}, 'exact': 0.}
scores[level]['exec'] = 0
for type_ in partial_types:
scores[level]['partial'][type_] = {'acc': 0., 'rec': 0., 'f1': 0.,'acc_count':0,'rec_count':0}
eval_err_num = 0
n1 = 0
n2 = 0
n3 = 0
predict_file = open("./predict_en.txt", "w")
for p, g, s, questions in zip(plist, glist, p_socre_list, question_list):
scores['joint_all']['count'] += 1
turn_scores = {"exec": [], "exact": []}
predict_str = ''
for idx, pgs in enumerate(zip(p, g, s, questions)):
p, g, s, question = pgs
#p_str = p[0]
#p_str = p_str.replace("value", "1")
g_str, db = g
db_name = db
db = os.path.join(db_dir, db, db + ".sqlite")
schema = Schema(get_schema(db))
try:
g_sql = get_sql(schema, g_str)
except:
continue
hardness = evaluator.eval_hardness(g_sql)
ori_idx = idx
if idx > 3:
idx = ">4"
else:
idx += 1
turn_id = "turn " + str(idx)
scores[turn_id]['count'] += 1
scores[hardness]['count'] += 1
scores['all']['count'] += 1
p_sql = None
flag = False
p_sql_socre = []
for p_str, s in zip(p, s):
cur_s = s
flag2 = False
try:
p_str = p_str.replace("value", "1")
p_sql = get_sql(schema, p_str)
flag2 = True
except:
pass
if flag2:
vis = set()
for ss in p_str.split(' '):
ss = ss.lower()
if ss == 'from':
break
if ss in stop_word:
continue
if ss in vis:
flag2 = False
for fk in ['none', 'max', 'min', 'count', 'sum', 'avg']:
if fk in p_str.lower():
flag2 = True
break
if flag2:
break
if cmp(p_sql, g_sql, kmaps[db_name], evaluator2, schema):
pass
break
vis.add(ss)
if flag2 is False:
continue
slist = p_str.lower().split(' ')
for i in range(len(slist)-2):
ss = slist[i]
if slist[i+1] == '=' and slist[i+2] == '1':
if ss in vis:
if cmp(p_sql, g_sql, kmaps[db_name], evaluator2, schema):
pass
flag2 = False
break
if flag2 == False:
continue
flag = False
for i in range(len(p_sql_socre)):
sql1 = p_sql_socre[i][0]
if cmp(sql1, p_sql, kmaps[db_name], evaluator2, schema):
#print('+++')
p_sql_socre[i] = (sql1, (p_sql_socre[i][1][0]+cur_s, p_sql_socre[i][1][1]) )
flag = True
if cmp(sql1, g_sql, kmaps[db_name], evaluator2, schema):
assert cmp(p_sql, g_sql, kmaps[db_name], evaluator2, schema)
if cmp(p_sql, g_sql, kmaps[db_name], evaluator2, schema):
assert cmp(sql1, g_sql, kmaps[db_name], evaluator2, schema)
break
if flag == False:
p_sql_socre.append( (p_sql, (cur_s, p_str) ) )
p_sql = None
max_socre = -100
p_str = "error"
for i in range(len(p_sql_socre)):
sql1 = p_sql_socre[i][0]
cur_s = p_sql_socre[i][1][0]
cur_p_str = p_sql_socre[i][1][1]
if p_sql == None or max_socre < cur_s:
p_sql = sql1
max_socre = cur_s
p_str = cur_p_str
if False and p_sql is None:
print('p', p)
print('s', s)
for pi in p:
if p_sql == None or len(p_str.split(' ')) < len(pi.split(' ')):
try:
pi = pi.replace("value", "1")
p_sql = get_sql(schema, pi)
p_str = pi
except:
pass
if p_sql is None:
# If p_sql is not valid, then we will use an empty sql to evaluate with the correct sql
p_sql = {
"except": None,
"from": {
"conds": [],
"table_units": []
},
"groupBy": [],
"having": [],
"intersect": None,
"limit": None,
"orderBy": [],
"select": [
False,
[]
],
"union": None,
"where": []
}
eval_err_num += 1
print("eval_err_num:{}".format(eval_err_num))
# rebuild sql for value evaluation
kmap = kmaps[db_name]
g_valid_col_units = build_valid_col_units(g_sql['from']['table_units'], schema)
g_sql = rebuild_sql_val(g_sql)
g_sql = rebuild_sql_col(g_valid_col_units, g_sql, kmap)
p_valid_col_units = build_valid_col_units(p_sql['from']['table_units'], schema)
p_sql = rebuild_sql_val(p_sql)
p_sql = rebuild_sql_col(p_valid_col_units, p_sql, kmap)
if etype in ["all", "exec"]:
exec_score = eval_exec_match(db, p_str, g_str, p_sql, g_sql)
if exec_score:
scores[hardness]['exec'] += 1
scores[turn_id]['exec'] += 1
turn_scores['exec'].append(1)
else:
turn_scores['exec'].append(0)
if etype in ["all", "match"]:
exact_score = evaluator.eval_exact_match(p_sql, g_sql)
partial_scores = evaluator.partial_scores
if exact_score == 0:
turn_scores['exact'].append(0)
print('question: {}'.format(question))
print("{} pred: {}".format(hardness, p_str))
print("{} gold: {}".format(hardness, g_str))
print('')
else:
print("Right")
print('question', question)
print("{} pred: {}".format(hardness, p_str))
print("{} gold: {}".format(hardness, g_str))
print('')
turn_scores['exact'].append(1)
# print(p_str)
predict_str += p_str + '\n'
scores[turn_id]['exact'] += exact_score
scores[hardness]['exact'] += exact_score
scores['all']['exact'] += exact_score
for type_ in partial_types:
if partial_scores[type_]['pred_total'] > 0:
scores[hardness]['partial'][type_]['acc'] += partial_scores[type_]['acc']
scores[hardness]['partial'][type_]['acc_count'] += 1
if partial_scores[type_]['label_total'] > 0:
scores[hardness]['partial'][type_]['rec'] += partial_scores[type_]['rec']
scores[hardness]['partial'][type_]['rec_count'] += 1
scores[hardness]['partial'][type_]['f1'] += partial_scores[type_]['f1']
if partial_scores[type_]['pred_total'] > 0:
scores['all']['partial'][type_]['acc'] += partial_scores[type_]['acc']
scores['all']['partial'][type_]['acc_count'] += 1
if partial_scores[type_]['label_total'] > 0:
scores['all']['partial'][type_]['rec'] += partial_scores[type_]['rec']
scores['all']['partial'][type_]['rec_count'] += 1
scores['all']['partial'][type_]['f1'] += partial_scores[type_]['f1']
entries.append({
'predictSQL': p_str,
'goldSQL': g_str,
'hardness': hardness,
'exact': exact_score,
'partial': partial_scores
})
if all(v == 1 for v in turn_scores["exec"]):
scores['joint_all']['exec'] += 1
if all(v == 1 for v in turn_scores["exact"]):
scores['joint_all']['exact'] += 1
predict_str += '\n'
predict_file.write(predict_str)
for turn in turns:
if scores[turn]['count'] == 0:
continue
if etype in ["all", "exec"]:
scores[turn]['exec'] /= scores[turn]['count']
if etype in ["all", "match"]:
scores[turn]['exact'] /= scores[turn]['count']
for level in levels:
if scores[level]['count'] == 0:
continue
if etype in ["all", "exec"]:
scores[level]['exec'] /= scores[level]['count']
if etype in ["all", "match"]:
scores[level]['exact'] /= scores[level]['count']
for type_ in partial_types:
if scores[level]['partial'][type_]['acc_count'] == 0:
scores[level]['partial'][type_]['acc'] = 0
else:
scores[level]['partial'][type_]['acc'] = scores[level]['partial'][type_]['acc'] / \
scores[level]['partial'][type_]['acc_count'] * 1.0
if scores[level]['partial'][type_]['rec_count'] == 0:
scores[level]['partial'][type_]['rec'] = 0
else:
scores[level]['partial'][type_]['rec'] = scores[level]['partial'][type_]['rec'] / \
scores[level]['partial'][type_]['rec_count'] * 1.0
if scores[level]['partial'][type_]['acc'] == 0 and scores[level]['partial'][type_]['rec'] == 0:
scores[level]['partial'][type_]['f1'] = 1
else:
scores[level]['partial'][type_]['f1'] = \
2.0 * scores[level]['partial'][type_]['acc'] * scores[level]['partial'][type_]['rec'] / (
scores[level]['partial'][type_]['rec'] + scores[level]['partial'][type_]['acc'])
print_scores(scores, etype)
predict_file.close() | null |
165,560 | import os, sys, copy
import json
import math
import sqlite3
import traceback
import argparse
import tqdm
from process_sql import tokenize, get_schema, get_tables_with_alias, Schema, get_sql
def eval_nested(pred, label):
def eval_IUEN(pred, label):
lt1, pt1, cnt1 = eval_nested(pred['intersect'], label['intersect'])
lt2, pt2, cnt2 = eval_nested(pred['except'], label['except'])
lt3, pt3, cnt3 = eval_nested(pred['union'], label['union'])
label_total = lt1 + lt2 + lt3
pred_total = pt1 + pt2 + pt3
cnt = cnt1 + cnt2 + cnt3
return label_total, pred_total, cnt | null |
165,566 | import os, sys, copy
import json
import math
import sqlite3
import traceback
import argparse
import tqdm
from process_sql import tokenize, get_schema, get_tables_with_alias, Schema, get_sql
stop_word = set(['_UNK', '_EOS', 'select', 'value', ')', '(', 'where', '=', ',', 'count', 'group_by', 'order_by', 'limit_value', 'desc', '>', 'distinct', 'and', 'avg', 'having', '<', 'in', 'sum', 'max', 'asc', 'not', 'or', 'like', 'min', 'intersect', 'except', '!=', 'union', 'between', '-', '+', '0'])
stop_word |= set(CLAUSE_KEYWORDS)
stop_word |= set(JOIN_KEYWORDS)
stop_word |= set(WHERE_OPS)
stop_word |= set(UNIT_OPS)
stop_word |= set(AGG_OPS)
stop_word |= set(COND_OPS)
stop_word |= set(SQL_OPS)
stop_word |= set(ORDER_OPS)
class Evaluator:
def __init__(self):
def eval_hardness(self, sql):
def eval_exact_match(self, pred, label):
def eval_partial_match(self, pred, label):
def print_scores(scores, etype):
def cmp(sql1, sql2, kmap, evaluator2, schema):
def eval_exec_match(db, p_str, g_str, pred, gold):
def rebuild_sql_val(sql):
def build_valid_col_units(table_units, schema):
def rebuild_sql_col(valid_col_units, sql, kmap):
class Schema:
def __init__(self, schema):
def schema(self):
def idMap(self):
def _map(self, schema):
def get_schema(db):
def get_sql(schema, query):
def evaluate(gold, predict, db_dir, etype, kmaps):
train_f = open("cosql_rerank_train_data_1.json", "w")
with open(gold) as f:
glist = []
gseq_one = []
for l in f.readlines():
if len(l.strip()) == 0:
glist.append(gseq_one)
gseq_one = []
else:
lstrip = l.strip().split('\t')
gseq_one.append(lstrip)
#glist = [l.strip().split('\t') for l in f.readlines() if len(l.strip()) > 0]
with open(predict) as f:
plist = []
pseq_one = []
p_socre_list = []
pseq_score_one = []
question_list = []
question_one = []
while True:
l = f.readline()
if l == "":
break
if len(l.strip()) == 0:
plist.append(pseq_one)
pseq_one = []
p_socre_list.append(pseq_score_one)
pseq_score_one = []
question_list.append(question_one)
question_one = []
else:
x = l.strip().split('\t')
pseq_one.append(x)
l2 = f.readline()
y = l2.strip().split('\t')
y = [math.exp(-float(s)) for s in y]
assert len(x) == len(y)
pseq_score_one.append(y)
question_one.append(f.readline().strip())
#print('len(x)', len(x))
#plist = [l.strip().split('\t') for l in f.readlines() if len(l.strip()) > 0]
# plist = [[("select product_type_code from products group by product_type_code order by count ( * ) desc limit value", "orchestra")]]
# glist = [[("SELECT product_type_code FROM Products GROUP BY product_type_code ORDER BY count(*) DESC LIMIT 1", "customers_and_orders")]]
evaluator = Evaluator()
evaluator2 = Evaluator()
turns = ['turn 1', 'turn 2', 'turn 3', 'turn 4', 'turn >4']
levels = ['easy', 'medium', 'hard', 'extra', 'all', 'joint_all']
partial_types = ['select', 'select(no AGG)', 'where', 'where(no OP)', 'group(no Having)',
'group', 'order', 'and/or', 'IUEN', 'keywords']
entries = []
scores = {}
for turn in turns:
scores[turn] = {'count': 0, 'exact': 0.}
scores[turn]['exec'] = 0
for level in levels:
scores[level] = {'count': 0, 'partial': {}, 'exact': 0.}
scores[level]['exec'] = 0
for type_ in partial_types:
scores[level]['partial'][type_] = {'acc': 0., 'rec': 0., 'f1': 0.,'acc_count':0,'rec_count':0}
eval_err_num = 0
n1 = 0
n2 = 0
n3 = 0
predict_file = open("./predict.txt", "w")
for p, g, s, questions in zip(plist, glist, p_socre_list, question_list):
scores['joint_all']['count'] += 1
turn_scores = {"exec": [], "exact": []}
predict_str = ''
for idx, pgs in enumerate(zip(p, g, s, questions)):
p, g, s, question = pgs
#p_str = p[0]
#p_str = p_str.replace("value", "1")
g_str, db = g
db_name = db
db = os.path.join(db_dir, db, db + ".sqlite")
schema = Schema(get_schema(db))
try:
g_sql = get_sql(schema, g_str)
except:
continue
hardness = evaluator.eval_hardness(g_sql)
ori_idx = idx
if idx > 3:
idx = ">4"
else:
idx += 1
turn_id = "turn " + str(idx)
scores[turn_id]['count'] += 1
scores[hardness]['count'] += 1
scores['all']['count'] += 1
p_sql = None
flag = False
p_sql_socre = []
for p_str, s in zip(p, s):
cur_s = s
flag2 = False
try:
p_str = p_str.replace("value", "1")
p_sql = get_sql(schema, p_str)
flag2 = True
one_data = dict()
one_data['utterances'] = questions
if cmp(p_sql, g_sql, kmaps[db_name], evaluator2, schema):
one_data["match"] = True
#flag2 = True
else:
one_data["match"] = False
#flag2 = False
one_data["legitimate"] = True
one_data["pred"] = p_str
one_data["golden"] = g_str
one_data["pred_score"] = cur_s
one_data["turn_id"] = ori_idx
train_f.write(json.dumps(one_data)+'\n')
except:
one_data = dict()
one_data['utterances'] = questions
one_data["pred"] = p_str
one_data["legitimate"] = False
one_data["match"] = False
one_data["golden"] = g_str
one_data["pred_score"] = cur_s
one_data["turn_id"] = ori_idx
train_f.write(json.dumps(one_data)+'\n')
pass
if flag2:
vis = set()
for ss in p_str.split(' '):
ss = ss.lower()
if ss == 'from':
break
if ss in stop_word:
continue
if ss in vis:
flag2 = False
for fk in ['none', 'max', 'min', 'count', 'sum', 'avg']:
if fk in p_str.lower():
flag2 = True
break
if flag2:
break
if cmp(p_sql, g_sql, kmaps[db_name], evaluator2, schema):
pass
break
vis.add(ss)
if flag2 is False:
continue
slist = p_str.lower().split(' ')
for i in range(len(slist)-2):
ss = slist[i]
if slist[i+1] == '=' and slist[i+2] == '1':
if ss in vis:
if cmp(p_sql, g_sql, kmaps[db_name], evaluator2, schema):
pass
flag2 = False
break
if flag2 == False:
continue
flag = False
for i in range(len(p_sql_socre)):
sql1 = p_sql_socre[i][0]
if cmp(sql1, p_sql, kmaps[db_name], evaluator2, schema):
#print('+++')
p_sql_socre[i] = (sql1, (p_sql_socre[i][1][0]+cur_s, p_sql_socre[i][1][1]) )
flag = True
if cmp(sql1, g_sql, kmaps[db_name], evaluator2, schema):
assert cmp(p_sql, g_sql, kmaps[db_name], evaluator2, schema)
if cmp(p_sql, g_sql, kmaps[db_name], evaluator2, schema):
assert cmp(sql1, g_sql, kmaps[db_name], evaluator2, schema)
break
if flag == False:
p_sql_socre.append( (p_sql, (cur_s, p_str) ) )
p_sql = None
max_socre = -100
p_str = "error"
for i in range(len(p_sql_socre)):
sql1 = p_sql_socre[i][0]
cur_s = p_sql_socre[i][1][0]
cur_p_str = p_sql_socre[i][1][1]
if p_sql == None or max_socre < cur_s:
p_sql = sql1
max_socre = cur_s
p_str = cur_p_str
if False and p_sql is None:
print('p', p)
print('s', s)
for pi in p:
if p_sql == None or len(p_str.split(' ')) < len(pi.split(' ')):
try:
pi = pi.replace("value", "1")
p_sql = get_sql(schema, pi)
p_str = pi
except:
pass
if p_sql is None:
# If p_sql is not valid, then we will use an empty sql to evaluate with the correct sql
p_sql = {
"except": None,
"from": {
"conds": [],
"table_units": []
},
"groupBy": [],
"having": [],
"intersect": None,
"limit": None,
"orderBy": [],
"select": [
False,
[]
],
"union": None,
"where": []
}
eval_err_num += 1
print("eval_err_num:{}".format(eval_err_num))
# rebuild sql for value evaluation
kmap = kmaps[db_name]
g_valid_col_units = build_valid_col_units(g_sql['from']['table_units'], schema)
g_sql = rebuild_sql_val(g_sql)
g_sql = rebuild_sql_col(g_valid_col_units, g_sql, kmap)
p_valid_col_units = build_valid_col_units(p_sql['from']['table_units'], schema)
p_sql = rebuild_sql_val(p_sql)
p_sql = rebuild_sql_col(p_valid_col_units, p_sql, kmap)
if etype in ["all", "exec"]:
exec_score = eval_exec_match(db, p_str, g_str, p_sql, g_sql)
if exec_score:
scores[hardness]['exec'] += 1
scores[turn_id]['exec'] += 1
turn_scores['exec'].append(1)
else:
turn_scores['exec'].append(0)
if etype in ["all", "match"]:
exact_score = evaluator.eval_exact_match(p_sql, g_sql)
partial_scores = evaluator.partial_scores
if exact_score == 0:
turn_scores['exact'].append(0)
"""
print('question: {}'.format(question))
print("{} pred: {}".format(hardness, p_str))
print("{} gold: {}".format(hardness, g_str))
print('')
"""
else:
"""
print("Right")
print('question', question)
print("{} pred: {}".format(hardness, p_str))
print("{} gold: {}".format(hardness, g_str))
print('')
"""
turn_scores['exact'].append(1)
print(p_str)
predict_str += p_str + '\n'
scores[turn_id]['exact'] += exact_score
scores[hardness]['exact'] += exact_score
scores['all']['exact'] += exact_score
for type_ in partial_types:
if partial_scores[type_]['pred_total'] > 0:
scores[hardness]['partial'][type_]['acc'] += partial_scores[type_]['acc']
scores[hardness]['partial'][type_]['acc_count'] += 1
if partial_scores[type_]['label_total'] > 0:
scores[hardness]['partial'][type_]['rec'] += partial_scores[type_]['rec']
scores[hardness]['partial'][type_]['rec_count'] += 1
scores[hardness]['partial'][type_]['f1'] += partial_scores[type_]['f1']
if partial_scores[type_]['pred_total'] > 0:
scores['all']['partial'][type_]['acc'] += partial_scores[type_]['acc']
scores['all']['partial'][type_]['acc_count'] += 1
if partial_scores[type_]['label_total'] > 0:
scores['all']['partial'][type_]['rec'] += partial_scores[type_]['rec']
scores['all']['partial'][type_]['rec_count'] += 1
scores['all']['partial'][type_]['f1'] += partial_scores[type_]['f1']
entries.append({
'predictSQL': p_str,
'goldSQL': g_str,
'hardness': hardness,
'exact': exact_score,
'partial': partial_scores
})
if all(v == 1 for v in turn_scores["exec"]):
scores['joint_all']['exec'] += 1
if all(v == 1 for v in turn_scores["exact"]):
scores['joint_all']['exact'] += 1
predict_str += '\n'
predict_file.write(predict_str)
for turn in turns:
if scores[turn]['count'] == 0:
continue
if etype in ["all", "exec"]:
scores[turn]['exec'] /= scores[turn]['count']
if etype in ["all", "match"]:
scores[turn]['exact'] /= scores[turn]['count']
for level in levels:
if scores[level]['count'] == 0:
continue
if etype in ["all", "exec"]:
scores[level]['exec'] /= scores[level]['count']
if etype in ["all", "match"]:
scores[level]['exact'] /= scores[level]['count']
for type_ in partial_types:
if scores[level]['partial'][type_]['acc_count'] == 0:
scores[level]['partial'][type_]['acc'] = 0
else:
scores[level]['partial'][type_]['acc'] = scores[level]['partial'][type_]['acc'] / \
scores[level]['partial'][type_]['acc_count'] * 1.0
if scores[level]['partial'][type_]['rec_count'] == 0:
scores[level]['partial'][type_]['rec'] = 0
else:
scores[level]['partial'][type_]['rec'] = scores[level]['partial'][type_]['rec'] / \
scores[level]['partial'][type_]['rec_count'] * 1.0
if scores[level]['partial'][type_]['acc'] == 0 and scores[level]['partial'][type_]['rec'] == 0:
scores[level]['partial'][type_]['f1'] = 1
else:
scores[level]['partial'][type_]['f1'] = \
2.0 * scores[level]['partial'][type_]['acc'] * scores[level]['partial'][type_]['rec'] / (
scores[level]['partial'][type_]['rec'] + scores[level]['partial'][type_]['acc'])
print_scores(scores, etype)
predict_file.close()
train_f.close() | null |
165,588 | import os, sys, copy
import json
import math
import sqlite3
import traceback
import argparse
import tqdm
from process_sql import tokenize, get_schema, get_tables_with_alias, Schema, get_sql
stop_word = set(['_UNK', '_EOS', 'select', 'value', ')', '(', 'where', '=', ',', 'count', 'group_by', 'order_by', 'limit_value', 'desc', '>', 'distinct', 'and', 'avg', 'having', '<', 'in', 'sum', 'max', 'asc', 'not', 'or', 'like', 'min', 'intersect', 'except', '!=', 'union', 'between', '-', '+', '0'])
stop_word |= set(CLAUSE_KEYWORDS)
stop_word |= set(JOIN_KEYWORDS)
stop_word |= set(WHERE_OPS)
stop_word |= set(UNIT_OPS)
stop_word |= set(AGG_OPS)
stop_word |= set(COND_OPS)
stop_word |= set(SQL_OPS)
stop_word |= set(ORDER_OPS)
class Evaluator:
"""A simple evaluator"""
def __init__(self):
self.partial_scores = None
def eval_hardness(self, sql):
count_comp1_ = count_component1(sql)
count_comp2_ = count_component2(sql)
count_others_ = count_others(sql)
if count_comp1_ <= 1 and count_others_ == 0 and count_comp2_ == 0:
return "easy"
elif (count_others_ <= 2 and count_comp1_ <= 1 and count_comp2_ == 0) or \
(count_comp1_ <= 2 and count_others_ < 2 and count_comp2_ == 0):
return "medium"
elif (count_others_ > 2 and count_comp1_ <= 2 and count_comp2_ == 0) or \
(2 < count_comp1_ <= 3 and count_others_ <= 2 and count_comp2_ == 0) or \
(count_comp1_ <= 1 and count_others_ == 0 and count_comp2_ <= 1):
return "hard"
else:
return "extra"
def eval_exact_match(self, pred, label):
partial_scores = self.eval_partial_match(pred, label)
self.partial_scores = partial_scores
for key, score in partial_scores.items():
if score['f1'] != 1:
return 0
if len(label['from']['table_units']) > 0:
label_tables = sorted(label['from']['table_units'])
pred_tables = sorted(pred['from']['table_units'])
return label_tables == pred_tables
return 1
def eval_partial_match(self, pred, label):
res = {}
label_total, pred_total, cnt, cnt_wo_agg = eval_sel(pred, label)
acc, rec, f1 = get_scores(cnt, pred_total, label_total)
res['select'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total}
acc, rec, f1 = get_scores(cnt_wo_agg, pred_total, label_total)
res['select(no AGG)'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total}
label_total, pred_total, cnt, cnt_wo_agg = eval_where(pred, label)
acc, rec, f1 = get_scores(cnt, pred_total, label_total)
res['where'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total}
acc, rec, f1 = get_scores(cnt_wo_agg, pred_total, label_total)
res['where(no OP)'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total}
label_total, pred_total, cnt = eval_group(pred, label)
acc, rec, f1 = get_scores(cnt, pred_total, label_total)
res['group(no Having)'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total}
label_total, pred_total, cnt = eval_having(pred, label)
acc, rec, f1 = get_scores(cnt, pred_total, label_total)
res['group'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total}
label_total, pred_total, cnt = eval_order(pred, label)
acc, rec, f1 = get_scores(cnt, pred_total, label_total)
res['order'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total}
label_total, pred_total, cnt = eval_and_or(pred, label)
acc, rec, f1 = get_scores(cnt, pred_total, label_total)
res['and/or'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total}
label_total, pred_total, cnt = eval_IUEN(pred, label)
acc, rec, f1 = get_scores(cnt, pred_total, label_total)
res['IUEN'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total}
label_total, pred_total, cnt = eval_keywords(pred, label)
acc, rec, f1 = get_scores(cnt, pred_total, label_total)
res['keywords'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total}
return res
def print_scores(scores, etype):
turns = ['turn 1', 'turn 2', 'turn 3', 'turn 4', 'turn >4']
levels = ['easy', 'medium', 'hard', 'extra', 'all', "joint_all"]
partial_types = ['select', 'select(no AGG)', 'where', 'where(no OP)', 'group(no Having)',
'group', 'order', 'and/or', 'IUEN', 'keywords']
print("{:20} {:20} {:20} {:20} {:20} {:20} {:20}".format("", *levels))
counts = [scores[level]['count'] for level in levels]
print("{:20} {:<20d} {:<20d} {:<20d} {:<20d} {:<20d} {:<20d}".format("count", *counts))
if etype in ["all", "exec"]:
print('===================== EXECUTION ACCURACY =====================')
this_scores = [scores[level]['exec'] for level in levels]
print("{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}".format("execution", *this_scores))
if etype in ["all", "match"]:
print('\n====================== EXACT MATCHING ACCURACY =====================')
exact_scores = [scores[level]['exact'] for level in levels]
print("{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}".format("exact match", *exact_scores))
print('\n---------------------PARTIAL MATCHING ACCURACY----------------------')
for type_ in partial_types:
this_scores = [scores[level]['partial'][type_]['acc'] for level in levels]
print("{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}".format(type_, *this_scores))
print('---------------------- PARTIAL MATCHING RECALL ----------------------')
for type_ in partial_types:
this_scores = [scores[level]['partial'][type_]['rec'] for level in levels]
print("{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}".format(type_, *this_scores))
print('---------------------- PARTIAL MATCHING F1 --------------------------')
for type_ in partial_types:
this_scores = [scores[level]['partial'][type_]['f1'] for level in levels]
print("{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}".format(type_, *this_scores))
print("\n\n{:20} {:20} {:20} {:20} {:20} {:20}".format("", *turns))
counts = [scores[turn]['count'] for turn in turns]
print("{:20} {:<20d} {:<20d} {:<20d} {:<20d} {:<20d}".format("count", *counts))
if etype in ["all", "exec"]:
print('===================== TRUN XECUTION ACCURACY =====================')
this_scores = [scores[turn]['exec'] for turn in turns]
print("{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}".format("execution", *this_scores))
if etype in ["all", "match"]:
print('\n====================== TRUN EXACT MATCHING ACCURACY =====================')
exact_scores = [scores[turn]['exact'] for turn in turns]
print("{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}".format("exact match", *exact_scores))
def cmp(sql1, sql2, kmap, evaluator2, schema):
#kmap = kmaps[db_name]
p_sql = copy.deepcopy(sql1)
g_sql = copy.deepcopy(sql2)
g_valid_col_units = build_valid_col_units(g_sql['from']['table_units'], schema)
g_sql = rebuild_sql_val(g_sql)
g_sql = rebuild_sql_col(g_valid_col_units, g_sql, kmap)
p_valid_col_units = build_valid_col_units(p_sql['from']['table_units'], schema)
p_sql = rebuild_sql_val(p_sql)
p_sql = rebuild_sql_col(p_valid_col_units, p_sql, kmap)
exact_score = evaluator2.eval_exact_match(p_sql, g_sql)
return exact_score == True
def eval_exec_match(db, p_str, g_str, pred, gold):
"""
return 1 if the values between prediction and gold are matching
in the corresponding index. Currently not support multiple col_unit(pairs).
"""
conn = sqlite3.connect(db)
cursor = conn.cursor()
try:
cursor.execute(p_str)
p_res = cursor.fetchall()
except:
return False
cursor.execute(g_str)
q_res = cursor.fetchall()
def res_map(res, val_units):
rmap = {}
for idx, val_unit in enumerate(val_units):
key = tuple(val_unit[1]) if not val_unit[2] else (val_unit[0], tuple(val_unit[1]), tuple(val_unit[2]))
rmap[key] = [r[idx] for r in res]
return rmap
p_val_units = [unit[1] for unit in pred['select'][1]]
q_val_units = [unit[1] for unit in gold['select'][1]]
return res_map(p_res, p_val_units) == res_map(q_res, q_val_units)
def rebuild_sql_val(sql):
if sql is None or not DISABLE_VALUE:
return sql
sql['from']['conds'] = rebuild_condition_val(sql['from']['conds'])
sql['having'] = rebuild_condition_val(sql['having'])
sql['where'] = rebuild_condition_val(sql['where'])
sql['intersect'] = rebuild_sql_val(sql['intersect'])
sql['except'] = rebuild_sql_val(sql['except'])
sql['union'] = rebuild_sql_val(sql['union'])
return sql
def build_valid_col_units(table_units, schema):
col_ids = [table_unit[1] for table_unit in table_units if table_unit[0] == TABLE_TYPE['table_unit']]
prefixs = [col_id[:-2] for col_id in col_ids]
valid_col_units= []
for value in schema.idMap.values():
if '.' in value and value[:value.index('.')] in prefixs:
valid_col_units.append(value)
return valid_col_units
def rebuild_sql_col(valid_col_units, sql, kmap):
if sql is None:
return sql
sql['select'] = rebuild_select_col(valid_col_units, sql['select'], kmap)
sql['from'] = rebuild_from_col(valid_col_units, sql['from'], kmap)
sql['where'] = rebuild_condition_col(valid_col_units, sql['where'], kmap)
sql['groupBy'] = rebuild_group_by_col(valid_col_units, sql['groupBy'], kmap)
sql['orderBy'] = rebuild_order_by_col(valid_col_units, sql['orderBy'], kmap)
sql['having'] = rebuild_condition_col(valid_col_units, sql['having'], kmap)
sql['intersect'] = rebuild_sql_col(valid_col_units, sql['intersect'], kmap)
sql['except'] = rebuild_sql_col(valid_col_units, sql['except'], kmap)
sql['union'] = rebuild_sql_col(valid_col_units, sql['union'], kmap)
return sql
class Schema:
"""
Simple schema which maps table&column to a unique identifier
"""
def __init__(self, schema):
self._schema = schema
self._idMap = self._map(self._schema)
def schema(self):
return self._schema
def idMap(self):
return self._idMap
def _map(self, schema):
idMap = {"*": "__all__"}
id = 1
for key, vals in schema.items():
for val in vals:
idMap[key.lower() + "." + val.lower()] = (
"__" + key.lower() + "." + val.lower() + "__"
)
id += 1
for key in schema:
idMap[key.lower()] = "__" + key.lower() + "__"
id += 1
return idMap
def get_schema(db):
"""
Get database's schema, which is a dict with table name as key
and list of column names as value
:param db: database path
:return: schema dict
"""
schema = {}
conn = sqlite3.connect(db)
cursor = conn.cursor()
# fetch table names
cursor.execute("SELECT name FROM sqlite_master WHERE type='table';")
tables = [str(table[0].lower()) for table in cursor.fetchall()]
# fetch table info
for table in tables:
cursor.execute("PRAGMA table_info({})".format(table))
schema[table] = [str(col[1].lower()) for col in cursor.fetchall()]
return schema
def get_sql(schema, query):
toks = tokenize(query)
tables_with_alias = get_tables_with_alias(schema.schema, toks)
_, sql = parse_sql(toks, 0, tables_with_alias, schema)
return sql
def evaluate(gold, predict, db_dir, etype, kmaps):
with open(gold) as f:
glist = []
gseq_one = []
for l in f.readlines():
if len(l.strip()) == 0:
glist.append(gseq_one)
gseq_one = []
else:
lstrip = l.strip().split('\t')
gseq_one.append(lstrip)
#glist = [l.strip().split('\t') for l in f.readlines() if len(l.strip()) > 0]
with open(predict) as f:
plist = []
pseq_one = []
p_socre_list = []
pseq_score_one = []
question_list = []
question_one = []
while True:
l = f.readline()
if l == "":
break
if len(l.strip()) == 0:
plist.append(pseq_one)
pseq_one = []
p_socre_list.append(pseq_score_one)
pseq_score_one = []
question_list.append(question_one)
question_one = []
else:
x = l.strip().split('\t')
pseq_one.append(x)
l2 = f.readline()
y = l2.strip().split('\t')
y = [math.exp(-float(s)) for s in y]
assert len(x) == len(y)
pseq_score_one.append(y)
question_one.append(f.readline().strip())
#print('len(x)', len(x))
#plist = [l.strip().split('\t') for l in f.readlines() if len(l.strip()) > 0]
# plist = [[("select product_type_code from products group by product_type_code order by count ( * ) desc limit value", "orchestra")]]
# glist = [[("SELECT product_type_code FROM Products GROUP BY product_type_code ORDER BY count(*) DESC LIMIT 1", "customers_and_orders")]]
evaluator = Evaluator()
evaluator2 = Evaluator()
turns = ['turn 1', 'turn 2', 'turn 3', 'turn 4', 'turn >4']
levels = ['easy', 'medium', 'hard', 'extra', 'all', 'joint_all']
partial_types = ['select', 'select(no AGG)', 'where', 'where(no OP)', 'group(no Having)',
'group', 'order', 'and/or', 'IUEN', 'keywords']
entries = []
scores = {}
for turn in turns:
scores[turn] = {'count': 0, 'exact': 0.}
scores[turn]['exec'] = 0
for level in levels:
scores[level] = {'count': 0, 'partial': {}, 'exact': 0.}
scores[level]['exec'] = 0
for type_ in partial_types:
scores[level]['partial'][type_] = {'acc': 0., 'rec': 0., 'f1': 0.,'acc_count':0,'rec_count':0}
eval_err_num = 0
n1 = 0
n2 = 0
n3 = 0
predict_file = open("./predict.txt", "w")
for p, g, s, questions in zip(plist, glist, p_socre_list, question_list):
scores['joint_all']['count'] += 1
turn_scores = {"exec": [], "exact": []}
predict_str = ''
for idx, pgs in enumerate(zip(p, g, s, questions)):
p, g, s, question = pgs
#p_str = p[0]
#p_str = p_str.replace("value", "1")
g_str, db = g
db_name = db
db = os.path.join(db_dir, db, db + ".sqlite")
schema = Schema(get_schema(db))
try:
g_sql = get_sql(schema, g_str)
except:
continue
hardness = evaluator.eval_hardness(g_sql)
ori_idx = idx
if idx > 3:
idx = ">4"
else:
idx += 1
turn_id = "turn " + str(idx)
scores[turn_id]['count'] += 1
scores[hardness]['count'] += 1
scores['all']['count'] += 1
p_sql = None
flag = False
p_sql_socre = []
for p_str, s in zip(p, s):
cur_s = s
flag2 = False
try:
p_str = p_str.replace("value", "1")
p_sql = get_sql(schema, p_str)
flag2 = True
except:
pass
if flag2:
vis = set()
for ss in p_str.split(' '):
ss = ss.lower()
if ss == 'from':
break
if ss in stop_word:
continue
if ss in vis:
flag2 = False
for fk in ['none', 'max', 'min', 'count', 'sum', 'avg']:
if fk in p_str.lower():
flag2 = True
break
if flag2:
break
if cmp(p_sql, g_sql, kmaps[db_name], evaluator2, schema):
pass
break
vis.add(ss)
if flag2 is False:
continue
slist = p_str.lower().split(' ')
for i in range(len(slist)-2):
ss = slist[i]
if slist[i+1] == '=' and slist[i+2] == '1':
if ss in vis:
if cmp(p_sql, g_sql, kmaps[db_name], evaluator2, schema):
pass
flag2 = False
break
if flag2 == False:
continue
flag = False
for i in range(len(p_sql_socre)):
sql1 = p_sql_socre[i][0]
if cmp(sql1, p_sql, kmaps[db_name], evaluator2, schema):
p_sql_socre[i] = (sql1, (p_sql_socre[i][1][0]+cur_s, p_sql_socre[i][1][1]) )
flag = True
if cmp(sql1, g_sql, kmaps[db_name], evaluator2, schema):
assert cmp(p_sql, g_sql, kmaps[db_name], evaluator2, schema)
if cmp(p_sql, g_sql, kmaps[db_name], evaluator2, schema):
assert cmp(sql1, g_sql, kmaps[db_name], evaluator2, schema)
break
if flag == False:
p_sql_socre.append( (p_sql, (cur_s, p_str) ) )
p_sql = None
max_socre = -100
p_str = "error"
for i in range(len(p_sql_socre)):
sql1 = p_sql_socre[i][0]
cur_s = p_sql_socre[i][1][0]
cur_p_str = p_sql_socre[i][1][1]
if p_sql == None or max_socre < cur_s:
p_sql = sql1
max_socre = cur_s
p_str = cur_p_str
if False and p_sql is None:
print('p', p)
print('s', s)
for pi in p:
if p_sql == None or len(p_str.split(' ')) < len(pi.split(' ')):
try:
pi = pi.replace("value", "1")
p_sql = get_sql(schema, pi)
p_str = pi
except:
pass
if p_sql is None:
# If p_sql is not valid, then we will use an empty sql to evaluate with the correct sql
p_sql = {
"except": None,
"from": {
"conds": [],
"table_units": []
},
"groupBy": [],
"having": [],
"intersect": None,
"limit": None,
"orderBy": [],
"select": [
False,
[]
],
"union": None,
"where": []
}
eval_err_num += 1
print("eval_err_num:{}".format(eval_err_num))
# rebuild sql for value evaluation
kmap = kmaps[db_name]
g_valid_col_units = build_valid_col_units(g_sql['from']['table_units'], schema)
g_sql = rebuild_sql_val(g_sql)
g_sql = rebuild_sql_col(g_valid_col_units, g_sql, kmap)
p_valid_col_units = build_valid_col_units(p_sql['from']['table_units'], schema)
p_sql = rebuild_sql_val(p_sql)
p_sql = rebuild_sql_col(p_valid_col_units, p_sql, kmap)
if etype in ["all", "exec"]:
exec_score = eval_exec_match(db, p_str, g_str, p_sql, g_sql)
if exec_score:
scores[hardness]['exec'] += 1
scores[turn_id]['exec'] += 1
turn_scores['exec'].append(1)
else:
turn_scores['exec'].append(0)
if etype in ["all", "match"]:
exact_score = evaluator.eval_exact_match(p_sql, g_sql)
partial_scores = evaluator.partial_scores
if exact_score == 0:
turn_scores['exact'].append(0)
print('question: {}'.format(question))
print("{} pred: {}".format(hardness, p_str))
print("{} gold: {}".format(hardness, g_str))
print('')
else:
print("Right")
print('question', question)
print("{} pred: {}".format(hardness, p_str))
print("{} gold: {}".format(hardness, g_str))
print('')
turn_scores['exact'].append(1)
#print(p_str)
predict_str += p_str + '\n'
scores[turn_id]['exact'] += exact_score
scores[hardness]['exact'] += exact_score
scores['all']['exact'] += exact_score
for type_ in partial_types:
if partial_scores[type_]['pred_total'] > 0:
scores[hardness]['partial'][type_]['acc'] += partial_scores[type_]['acc']
scores[hardness]['partial'][type_]['acc_count'] += 1
if partial_scores[type_]['label_total'] > 0:
scores[hardness]['partial'][type_]['rec'] += partial_scores[type_]['rec']
scores[hardness]['partial'][type_]['rec_count'] += 1
scores[hardness]['partial'][type_]['f1'] += partial_scores[type_]['f1']
if partial_scores[type_]['pred_total'] > 0:
scores['all']['partial'][type_]['acc'] += partial_scores[type_]['acc']
scores['all']['partial'][type_]['acc_count'] += 1
if partial_scores[type_]['label_total'] > 0:
scores['all']['partial'][type_]['rec'] += partial_scores[type_]['rec']
scores['all']['partial'][type_]['rec_count'] += 1
scores['all']['partial'][type_]['f1'] += partial_scores[type_]['f1']
entries.append({
'predictSQL': p_str,
'goldSQL': g_str,
'hardness': hardness,
'exact': exact_score,
'partial': partial_scores
})
if all(v == 1 for v in turn_scores["exec"]):
scores['joint_all']['exec'] += 1
if all(v == 1 for v in turn_scores["exact"]):
scores['joint_all']['exact'] += 1
predict_str += '\n'
predict_file.write(predict_str)
for turn in turns:
if scores[turn]['count'] == 0:
continue
if etype in ["all", "exec"]:
scores[turn]['exec'] /= scores[turn]['count']
if etype in ["all", "match"]:
scores[turn]['exact'] /= scores[turn]['count']
for level in levels:
if scores[level]['count'] == 0:
continue
if etype in ["all", "exec"]:
scores[level]['exec'] /= scores[level]['count']
if etype in ["all", "match"]:
scores[level]['exact'] /= scores[level]['count']
for type_ in partial_types:
if scores[level]['partial'][type_]['acc_count'] == 0:
scores[level]['partial'][type_]['acc'] = 0
else:
scores[level]['partial'][type_]['acc'] = scores[level]['partial'][type_]['acc'] / \
scores[level]['partial'][type_]['acc_count'] * 1.0
if scores[level]['partial'][type_]['rec_count'] == 0:
scores[level]['partial'][type_]['rec'] = 0
else:
scores[level]['partial'][type_]['rec'] = scores[level]['partial'][type_]['rec'] / \
scores[level]['partial'][type_]['rec_count'] * 1.0
if scores[level]['partial'][type_]['acc'] == 0 and scores[level]['partial'][type_]['rec'] == 0:
scores[level]['partial'][type_]['f1'] = 1
else:
scores[level]['partial'][type_]['f1'] = \
2.0 * scores[level]['partial'][type_]['acc'] * scores[level]['partial'][type_]['rec'] / (
scores[level]['partial'][type_]['rec'] + scores[level]['partial'][type_]['acc'])
print_scores(scores, etype)
predict_file.close() | null |
165,590 | import sys
args = sys.argv
import os
import argparse
The provided code snippet includes necessary dependencies for implementing the `interpret_args` function. Write a Python function `def interpret_args()` to solve the following problem:
Interprets the command line arguments, and returns a dictionary.
Here is the function:
def interpret_args():
""" Interprets the command line arguments, and returns a dictionary. """
parser = argparse.ArgumentParser()
parser.add_argument("--no_gpus", type=bool, default=1)
parser.add_argument("--seed", type=int, default=141)
### Data parameters
parser.add_argument(
'--raw_train_filename',
type=str,
default='../atis_data/data/resplit/processed/train_with_tables.pkl')
parser.add_argument(
'--raw_dev_filename',
type=str,
default='../atis_data/data/resplit/processed/dev_with_tables.pkl')
parser.add_argument(
'--raw_validation_filename',
type=str,
default='../atis_data/data/resplit/processed/valid_with_tables.pkl')
parser.add_argument(
'--raw_test_filename',
type=str,
default='../atis_data/data/resplit/processed/test_with_tables.pkl')
parser.add_argument('--data_directory', type=str, default='processed_data')
parser.add_argument('--processed_train_filename', type=str, default='train.pkl')
parser.add_argument('--processed_dev_filename', type=str, default='dev.pkl')
parser.add_argument('--processed_validation_filename', type=str, default='validation.pkl')
parser.add_argument('--processed_test_filename', type=str, default='test.pkl')
parser.add_argument('--database_schema_filename', type=str, default=None)
parser.add_argument('--embedding_filename', type=str, default=None)
parser.add_argument('--input_vocabulary_filename', type=str, default='input_vocabulary.pkl')
parser.add_argument('--output_vocabulary_filename',
type=str,
default='output_vocabulary.pkl')
parser.add_argument('--input_key', type=str, default='nl_with_dates')
parser.add_argument('--anonymize', type=bool, default=False)
parser.add_argument('--anonymization_scoring', type=bool, default=False)
parser.add_argument('--use_snippets', type=bool, default=False)
parser.add_argument('--use_previous_query', type=bool, default=False)
parser.add_argument('--maximum_queries', type=int, default=1)
parser.add_argument('--use_copy_switch', type=bool, default=False)
parser.add_argument('--use_query_attention', type=bool, default=False)
parser.add_argument('--use_utterance_attention', type=bool, default=False)
parser.add_argument('--freeze', type=bool, default=False)
parser.add_argument('--scheduler', type=bool, default=False)
parser.add_argument('--use_bert', type=bool, default=False)
parser.add_argument("--bert_type_abb", type=str, help="Type of BERT model to load. e.g.) uS, uL, cS, cL, and mcS")
parser.add_argument("--bert_input_version", type=str, default='v1')
parser.add_argument('--fine_tune_bert', type=bool, default=False)
parser.add_argument('--lr_bert', default=1e-5, type=float, help='BERT model learning rate.')
### Debugging/logging parameters
parser.add_argument('--logdir', type=str, default='logs')
parser.add_argument('--deterministic', type=bool, default=False)
parser.add_argument('--num_train', type=int, default=-1)
parser.add_argument('--logfile', type=str, default='log.txt')
parser.add_argument('--results_file', type=str, default='results.txt')
### Model architecture
parser.add_argument('--input_embedding_size', type=int, default=300)
parser.add_argument('--output_embedding_size', type=int, default=300)
parser.add_argument('--encoder_state_size', type=int, default=300)
parser.add_argument('--decoder_state_size', type=int, default=300)
parser.add_argument('--encoder_num_layers', type=int, default=1)
parser.add_argument('--decoder_num_layers', type=int, default=2)
parser.add_argument('--snippet_num_layers', type=int, default=1)
parser.add_argument('--maximum_utterances', type=int, default=5)
parser.add_argument('--state_positional_embeddings', type=bool, default=False)
parser.add_argument('--positional_embedding_size', type=int, default=50)
parser.add_argument('--snippet_age_embedding', type=bool, default=False)
parser.add_argument('--snippet_age_embedding_size', type=int, default=64)
parser.add_argument('--max_snippet_age_embedding', type=int, default=4)
parser.add_argument('--previous_decoder_snippet_encoding', type=bool, default=False)
parser.add_argument('--discourse_level_lstm', type=bool, default=False)
parser.add_argument('--use_schema_attention', type=bool, default=False)
parser.add_argument('--use_encoder_attention', type=bool, default=False)
parser.add_argument('--use_schema_encoder', type=bool, default=False)
parser.add_argument('--use_schema_self_attention', type=bool, default=False)
parser.add_argument('--use_schema_encoder_2', type=bool, default=False)
### Training parameters
parser.add_argument('--batch_size', type=int, default=16)
parser.add_argument('--train_maximum_sql_length', type=int, default=200)
parser.add_argument('--train_evaluation_size', type=int, default=100)
parser.add_argument('--dropout_amount', type=float, default=0.5)
parser.add_argument('--initial_patience', type=float, default=10.)
parser.add_argument('--patience_ratio', type=float, default=1.01)
parser.add_argument('--initial_learning_rate', type=float, default=0.001)
parser.add_argument('--learning_rate_ratio', type=float, default=0.8)
parser.add_argument('--interaction_level', type=bool, default=False)
parser.add_argument('--reweight_batch', type=bool, default=False)
### Setting
parser.add_argument('--train', type=bool, default=False)
parser.add_argument('--debug', type=bool, default=False)
parser.add_argument('--evaluate', type=bool, default=False)
parser.add_argument('--attention', type=bool, default=False)
parser.add_argument('--save_file', type=str, default="")
parser.add_argument('--enable_testing', type=bool, default=False)
parser.add_argument('--use_predicted_queries', type=bool, default=False)
parser.add_argument('--evaluate_split', type=str, default='dev')
parser.add_argument('--evaluate_with_gold_forcing', type=bool, default=False)
parser.add_argument('--eval_maximum_sql_length', type=int, default=1000)
parser.add_argument('--results_note', type=str, default='')
parser.add_argument('--compute_metrics', type=bool, default=False)
parser.add_argument('--reference_results', type=str, default='')
parser.add_argument('--interactive', type=bool, default=False)
parser.add_argument('--database_username', type=str, default="aviarmy")
parser.add_argument('--database_password', type=str, default="aviarmy")
parser.add_argument('--database_timeout', type=int, default=2)
parser.add_argument('--use_transformer_relation', type=bool, default=True)
parser.add_argument('--table_schema_path', type=str, default="data/cosql/tables.json")
## relace it for colab
# parser.add_argument('--table_schema_path', type=str, default="data/table.json")
args = parser.parse_args()
if not os.path.exists(args.logdir):
os.makedirs(args.logdir)
if not (args.train or args.evaluate or args.interactive or args.attention):
raise ValueError('You need to be training or evaluating')
if args.enable_testing and not args.evaluate:
raise ValueError('You should evaluate the model if enabling testing')
if args.train:
args_file = args.logdir + '/args.log'
if os.path.exists(args_file):
raise ValueError('Warning: arguments already exist in ' + str(args_file))
with open(args_file, 'w') as infile:
infile.write(str(args))
return args | Interprets the command line arguments, and returns a dictionary. |
165,593 | import copy
import pymysql
import random
import signal
import sqlparse
from . import util
from .snippets import Snippet
from sqlparse import tokens as token_types
from sqlparse import sql as sql_types
def execution_results(query, username, password, timeout=3):
connection = pymysql.connect(user=username, password=password)
class TimeoutException(Exception):
pass
def timeout_handler(signum, frame):
raise TimeoutException
signal.signal(signal.SIGALRM, timeout_handler)
syntactic = True
semantic = True
table = []
with connection.cursor() as cursor:
signal.alarm(timeout)
try:
cursor.execute("SET sql_mode='IGNORE_SPACE';")
cursor.execute("use atis3;")
cursor.execute(query)
table = cursor.fetchall()
cursor.close()
except TimeoutException:
signal.alarm(0)
cursor.close()
except pymysql.err.ProgrammingError:
syntactic = False
semantic = False
cursor.close()
except pymysql.err.InternalError:
semantic = False
cursor.close()
except Exception as e:
signal.alarm(0)
signal.alarm(0)
cursor.close()
signal.alarm(0)
connection.close()
return (syntactic, semantic, sorted(table))
def executable(query, username, password, timeout=2):
return execution_results(query, username, password, timeout)[1] | null |
165,595 | import os
import random
import json
from . import anonymization as anon
from . import atis_batch
from . import dataset_split as ds
from .interaction import load_function
from .entities import NLtoSQLDict
from .atis_vocab import ATISVocabulary
The provided code snippet includes necessary dependencies for implementing the `num_utterances` function. Write a Python function `def num_utterances(dataset)` to solve the following problem:
Returns the total number of utterances in the dataset.
Here is the function:
def num_utterances(dataset):
"""Returns the total number of utterances in the dataset."""
return sum([len(interaction) for interaction in dataset.examples]) | Returns the total number of utterances in the dataset. |
165,596 | import nltk
nltk.data.path.append('./nltk_data/')
import sqlparse
The provided code snippet includes necessary dependencies for implementing the `nl_tokenize` function. Write a Python function `def nl_tokenize(string)` to solve the following problem:
Tokenizes a natural language string into tokens. Inputs: string: the string to tokenize. Outputs: a list of tokens. Assumes data is space-separated (this is true of ZC07 data in ATIS2/3).
Here is the function:
def nl_tokenize(string):
"""Tokenizes a natural language string into tokens.
Inputs:
string: the string to tokenize.
Outputs:
a list of tokens.
Assumes data is space-separated (this is true of ZC07 data in ATIS2/3).
"""
return nltk.word_tokenize(string) | Tokenizes a natural language string into tokens. Inputs: string: the string to tokenize. Outputs: a list of tokens. Assumes data is space-separated (this is true of ZC07 data in ATIS2/3). |
165,605 | import os
import json
import copy
import torch
import sqlparse
import argparse
import subprocess
from collections import defaultdict
from reranker.predict import Ranker
import numpy
ranker = Ranker("./submit_models/cosql_reranker_roberta.pt")
def postprocess_one(pred_sql, schema):
pred_sql = pred_sql.replace('group_by', 'group by').replace('order_by', 'order by').replace('limit_value', 'limit 1').replace('_EOS', '').replace(' value ',' 1 ').replace('distinct', '').strip(',').strip()
if pred_sql.endswith('value'):
pred_sql = pred_sql[:-len('value')] + '1'
try:
format_sql = sqlparse.format(pred_sql, reindent=True)
except:
if len(pred_sql) == 0:
return "None"
return pred_sql
format_sql_2 = normalize_space(format_sql)
num_select = format_sql_2.count('select')
if num_select > 1:
final_sql = postprocess_nested(format_sql_2, schema)
else:
final_sql, _ = postprocess_single(format_sql_2, schema)
if final_sql == "":
return "None"
return final_sql
def postprocess(predictions, database_schema, remove_from=False):
import math
use_reranker = True
correct = 0
total = 0
postprocess_sqls = {}
utterances = []
Count = 0
score_vis = dict()
if os.path.exists('./score_vis.json'):
with open('./score_vis.json', 'r') as f:
score_vis = json.load(f)
for pred in predictions:
Count += 1
if Count % 10 == 0:
print('Count', Count, "score_vis", len(score_vis))
db_id = pred['database_id']
schema = database_schema[db_id]
try:
return_match = pred['return_match']
except:
return_match = ''
if db_id not in postprocess_sqls:
postprocess_sqls[db_id] = []
interaction_id = pred['interaction_id']
turn_id = pred['index_in_interaction']
total += 1
if turn_id == 0:
utterances = []
question = ' '.join(pred['input_seq'])
pred_sql_str = ' '.join(pred['flat_prediction'])
utterances.append(question)
beam_sql_strs = []
for score, beam_sql_str in pred['beam']:
beam_sql_strs.append( (score, ' '.join(beam_sql_str)))
gold_sql_str = ' '.join(pred['flat_gold_queries'][0])
if pred_sql_str == gold_sql_str:
correct += 1
postprocess_sql = pred_sql_str
if remove_from:
postprocess_sql = postprocess_one(pred_sql_str, schema)
sqls = []
key_idx = dict()
for i in range(len(beam_sql_strs)):
sql = postprocess_one(beam_sql_strs[i][1], schema)
key = '&'.join(utterances)+'#'+sql
if key not in score_vis:
if key not in key_idx:
key_idx[key]=len(sqls)
sqls.append(sql.replace("value", "1"))
if use_reranker and len(sqls) > 0:
score_list = ranker.get_score_batch(utterances, sqls)
for i in range(len(beam_sql_strs)):
score = beam_sql_strs[i][0]
sql = postprocess_one(beam_sql_strs[i][1], schema)
if use_reranker:
key = '&'.join(utterances)+'#'+sql
old_score = score
if key in score_vis:
score = score_vis[key]
else:
score = score_list[key_idx[key]]
score_vis[key] = score
score += i * 1e-4
score = -math.log(score)
score += old_score
beam_sql_strs[i] = (score, sql)
assert beam_sql_strs[0][1] == postprocess_sql
if use_reranker and Count % 100 == 0:
with open('score_vis.json', 'w') as file:
json.dump(score_vis, file)
gold_sql_str = postprocess_one(gold_sql_str, schema)
postprocess_sqls[db_id].append((postprocess_sql, interaction_id, turn_id, beam_sql_strs, question, gold_sql_str))
print (correct, total, float(correct)/total)
return postprocess_sqls | null |
165,608 | import os
import json
import copy
import torch
import sqlparse
import argparse
import subprocess
from collections import defaultdict
from reranker.predict import Ranker
import numpy
def write_and_evaluate(postprocess_sqls, db_path, table_schema_path, gold_path, dataset):
db_list = []
if gold_path is not None:
with open(gold_path) as f:
for line in f:
line_split = line.strip().split('\t')
if len(line_split) != 2:
continue
db = line.strip().split('\t')[1]
if db not in db_list:
db_list.append(db)
else:
gold_path = './fake_gold.txt'
with open(gold_path, "w") as file:
db_list = list(postprocess_sqls.keys())
last_id = None
for db in db_list:
for postprocess_sql, interaction_id, turn_id, beam_sql_strs, question, gold_str in postprocess_sqls[db]:
if last_id is not None and last_id != str(interaction_id)+db:
file.write('\n')
last_id = str(interaction_id)+db
file.write(gold_str+'\t'+db+'\n')
output_file = 'output_temp.txt'
if dataset == 'spider':
with open(output_file, "w") as f:
for db in db_list:
for postprocess_sql, interaction_id, turn_id in postprocess_sqls[db]:
f.write(postprocess_sql+'\n')
command = 'python3 eval_scripts/evaluation.py --db {} --table {} --etype match --gold {} --pred {}'.format(db_path,
table_schema_path,
gold_path,
os.path.abspath(output_file))
elif dataset in ['sparc', 'cosql']:
cnt = 0
with open(output_file, "w") as f:
last_id = None
for db in db_list:
for postprocess_sql, interaction_id, turn_id, beam_sql_strs, question, gold_str in postprocess_sqls[db]:
if last_id is not None and last_id != str(interaction_id)+db:
f.write('\n')
last_id = str(interaction_id) + db
f.write('{}\n'.format( '\t'.join( [x[1] for x in beam_sql_strs] ) ))
f.write('{}\n'.format( '\t'.join( [str(x[0]) for x in beam_sql_strs] )) )
f.write('{}\n'.format( question ))
cnt += 1
"""
predict_file = 'predicted_sql.txt'
cnt = 0
with open(predict_file, "w") as f:
for db in db_list:
for postprocess_sql, interaction_id, turn_id, beam_sql_strs, question, gold_str in postprocess_sqls[db]:
print(postprocess_sql)
print(beam_sql_strs)
print(question)
print(gold_str)
if turn_id == 0 and cnt > 0:
f.write('\n')
f.write('{}\n'.format(postprocess_sql))
cnt += 1
"""
command = 'python3 eval_scripts/gen_final.py --db {} --table {} --etype match --gold {} --pred {}'.format(db_path,table_schema_path,gold_path,os.path.abspath(output_file))
#command = 'python3 eval_scripts/evaluation_sqa.py --db {} --table {} --etype match --gold {} --pred {}'.format(db_path,table_schema_path,gold_path,os.path.abspath(output_file))
#command += '; rm output_temp.txt'
print('begin command')
return command | null |
165,609 | from enum import Enum
import random
import sys
import json
import progressbar
import model.torch_utils
import data_util.sql_util
import torch
def get_progressbar(name, size):
"""Gets a progress bar object given a name and the total size.
Inputs:
name (str): The name to display on the side.
size (int): The maximum size of the progress bar.
"""
return progressbar.ProgressBar(maxval=size,
widgets=[name,
progressbar.Bar('=', '[', ']'),
' ',
progressbar.Percentage(),
' ',
progressbar.ETA()])
The provided code snippet includes necessary dependencies for implementing the `train_epoch_with_utterances` function. Write a Python function `def train_epoch_with_utterances(batches, model, randomize=True)` to solve the following problem:
Trains model for a single epoch given batches of utterance data. Inputs: batches (UtteranceBatch): The batches to give to training. model (ATISModel): The model obect. learning_rate (float): The learning rate to use during training. dropout_amount (float): Amount of dropout to set in the model. randomize (bool): Whether or not to randomize the order that the batches are seen.
Here is the function:
def train_epoch_with_utterances(batches,
model,
randomize=True):
"""Trains model for a single epoch given batches of utterance data.
Inputs:
batches (UtteranceBatch): The batches to give to training.
model (ATISModel): The model obect.
learning_rate (float): The learning rate to use during training.
dropout_amount (float): Amount of dropout to set in the model.
randomize (bool): Whether or not to randomize the order that the batches are seen.
"""
if randomize:
random.shuffle(batches)
progbar = get_progressbar("train ", len(batches))
progbar.start()
loss_sum = 0.
for i, batch in enumerate(batches):
batch_loss = model.train_step(batch)
loss_sum += batch_loss
progbar.update(i)
progbar.finish()
total_loss = loss_sum / len(batches)
return total_loss | Trains model for a single epoch given batches of utterance data. Inputs: batches (UtteranceBatch): The batches to give to training. model (ATISModel): The model obect. learning_rate (float): The learning rate to use during training. dropout_amount (float): Amount of dropout to set in the model. randomize (bool): Whether or not to randomize the order that the batches are seen. |
165,610 | from enum import Enum
import random
import sys
import json
import progressbar
import model.torch_utils
import data_util.sql_util
import torch
def get_progressbar(name, size):
"""Gets a progress bar object given a name and the total size.
Inputs:
name (str): The name to display on the side.
size (int): The maximum size of the progress bar.
"""
return progressbar.ProgressBar(maxval=size,
widgets=[name,
progressbar.Bar('=', '[', ']'),
' ',
progressbar.Percentage(),
' ',
progressbar.ETA()])
The provided code snippet includes necessary dependencies for implementing the `train_epoch_with_interactions` function. Write a Python function `def train_epoch_with_interactions(interaction_batches, params, model, randomize=True)` to solve the following problem:
Trains model for single epoch given batches of interactions. Inputs: interaction_batches (list of InteractionBatch): The batches to train on. params (namespace): Parameters to run with. model (ATISModel): Model to train. randomize (bool): Whether or not to randomize the order that batches are seen.
Here is the function:
def train_epoch_with_interactions(interaction_batches,
params,
model,
randomize=True):
"""Trains model for single epoch given batches of interactions.
Inputs:
interaction_batches (list of InteractionBatch): The batches to train on.
params (namespace): Parameters to run with.
model (ATISModel): Model to train.
randomize (bool): Whether or not to randomize the order that batches are seen.
"""
if randomize:
random.shuffle(interaction_batches)
progbar = get_progressbar("train ", len(interaction_batches))
progbar.start()
loss_sum = 0.
break_lst = ["baseball_1", "soccer_1", "formula_1", "cre_Drama_Workshop_Groups", "sakila_1"]
#break_lst = ["baseball_1", "soccer_1", "formula_1", "cre_Drama_Workshop_Groups", "sakila_1", "behavior_monitoring"]
for i, interaction_batch in enumerate(interaction_batches):
assert len(interaction_batch) == 1
interaction = interaction_batch.items[0]
name = interaction.identifier.split('/')[0]
if name in break_lst:
continue
print(i, interaction.identifier)
# try:
batch_loss = model.train_step(interaction, params.train_maximum_sql_length)
# except RuntimeError:
# torch.cuda.empty_cache()
# continue
print(batch_loss)
loss_sum += batch_loss
torch.cuda.empty_cache()
progbar.update(i)
progbar.finish()
total_loss = loss_sum / len(interaction_batches)
return total_loss | Trains model for single epoch given batches of interactions. Inputs: interaction_batches (list of InteractionBatch): The batches to train on. params (namespace): Parameters to run with. model (ATISModel): Model to train. randomize (bool): Whether or not to randomize the order that batches are seen. |
165,611 | from enum import Enum
import random
import sys
import json
import progressbar
import model.torch_utils
import data_util.sql_util
import torch
def write_prediction(fileptr,
identifier,
input_seq,
probability,
prediction,
flat_prediction,
gold_query,
flat_gold_queries,
gold_tables,
index_in_interaction,
database_username,
database_password,
database_timeout,
compute_metrics=True,
beam=None):
pred_obj = {}
pred_obj["identifier"] = identifier
if len(identifier.split('/')) == 2:
database_id, interaction_id = identifier.split('/')
else:
database_id = 'atis'
interaction_id = identifier
pred_obj["database_id"] = database_id
pred_obj["interaction_id"] = interaction_id
pred_obj["input_seq"] = input_seq
pred_obj["probability"] = probability
pred_obj["prediction"] = prediction
pred_obj["flat_prediction"] = flat_prediction
pred_obj["gold_query"] = gold_query
pred_obj["flat_gold_queries"] = flat_gold_queries
pred_obj["index_in_interaction"] = index_in_interaction
pred_obj["gold_tables"] = str(gold_tables)
pred_obj["beam"] = beam
# Now compute the metrics we want.
if compute_metrics:
# First metric: whether flat predicted query is in the gold query set.
correct_string = " ".join(flat_prediction) in [
" ".join(q) for q in flat_gold_queries]
pred_obj["correct_string"] = correct_string
# Database metrics
if not correct_string:
syntactic, semantic, pred_table = sql_util.execution_results(
" ".join(flat_prediction), database_username, database_password, database_timeout)
pred_table = sorted(pred_table)
best_prec = 0.
best_rec = 0.
best_f1 = 0.
for gold_table in gold_tables:
num_overlap = float(len(set(pred_table) & set(gold_table)))
if len(set(gold_table)) > 0:
prec = num_overlap / len(set(gold_table))
else:
prec = 1.
if len(set(pred_table)) > 0:
rec = num_overlap / len(set(pred_table))
else:
rec = 1.
if prec > 0. and rec > 0.:
f1 = (2 * (prec * rec)) / (prec + rec)
else:
f1 = 1.
best_prec = max(best_prec, prec)
best_rec = max(best_rec, rec)
best_f1 = max(best_f1, f1)
else:
syntactic = True
semantic = True
pred_table = []
best_prec = 1.
best_rec = 1.
best_f1 = 1.
assert best_prec <= 1.
assert best_rec <= 1.
assert best_f1 <= 1.
pred_obj["syntactic"] = syntactic
pred_obj["semantic"] = semantic
correct_table = (pred_table in gold_tables) or correct_string
pred_obj["correct_table"] = correct_table
pred_obj["strict_correct_table"] = correct_table and syntactic
pred_obj["pred_table"] = str(pred_table)
pred_obj["table_prec"] = best_prec
pred_obj["table_rec"] = best_rec
pred_obj["table_f1"] = best_f1
fileptr.write(json.dumps(pred_obj) + "\n")
def get_progressbar(name, size):
"""Gets a progress bar object given a name and the total size.
Inputs:
name (str): The name to display on the side.
size (int): The maximum size of the progress bar.
"""
return progressbar.ProgressBar(maxval=size,
widgets=[name,
progressbar.Bar('=', '[', ']'),
' ',
progressbar.Percentage(),
' ',
progressbar.ETA()])
def update_sums(metrics,
metrics_sums,
predicted_sequence,
flat_sequence,
gold_query,
original_gold_query,
gold_forcing=False,
loss=None,
token_accuracy=0.,
database_username="",
database_password="",
database_timeout=0,
gold_table=None):
"""" Updates summing for metrics in an aggregator.
TODO: don't use sums, just keep the raw value.
"""
if Metrics.LOSS in metrics:
metrics_sums[Metrics.LOSS] += loss.item()
if Metrics.TOKEN_ACCURACY in metrics:
if gold_forcing:
metrics_sums[Metrics.TOKEN_ACCURACY] += token_accuracy
else:
num_tokens_correct = 0.
for j, token in enumerate(gold_query):
if len(
predicted_sequence) > j and predicted_sequence[j] == token:
num_tokens_correct += 1
metrics_sums[Metrics.TOKEN_ACCURACY] += num_tokens_correct / \
len(gold_query)
if Metrics.STRING_ACCURACY in metrics:
metrics_sums[Metrics.STRING_ACCURACY] += int(
flat_sequence == original_gold_query)
if Metrics.CORRECT_TABLES in metrics:
assert database_username, "You did not provide a database username"
assert database_password, "You did not provide a database password"
assert database_timeout > 0, "Database timeout is 0 seconds"
# Evaluate SQL
if flat_sequence != original_gold_query:
syntactic, semantic, table = sql_util.execution_results(
" ".join(flat_sequence), database_username, database_password, database_timeout)
else:
syntactic = True
semantic = True
table = gold_table
metrics_sums[Metrics.CORRECT_TABLES] += int(table == gold_table)
if Metrics.SYNTACTIC_QUERIES in metrics:
metrics_sums[Metrics.SYNTACTIC_QUERIES] += int(syntactic)
if Metrics.SEMANTIC_QUERIES in metrics:
metrics_sums[Metrics.SEMANTIC_QUERIES] += int(semantic)
if Metrics.STRICT_CORRECT_TABLES in metrics:
metrics_sums[Metrics.STRICT_CORRECT_TABLES] += int(
table == gold_table and syntactic)
def construct_averages(metrics_sums, total_num):
""" Computes the averages for metrics.
Inputs:
metrics_sums (dict Metric -> float): Sums for a metric.
total_num (int): Number to divide by (average).
"""
metrics_averages = {}
for metric, value in metrics_sums.items():
metrics_averages[metric] = value / total_num
if metric != "loss":
metrics_averages[metric] *= 100.
return metrics_averages
The provided code snippet includes necessary dependencies for implementing the `evaluate_utterance_sample` function. Write a Python function `def evaluate_utterance_sample(sample, model, max_generation_length, name="", gold_forcing=False, metrics=None, total_num=-1, database_username="", database_password="", database_timeout=0, write_results=False)` to solve the following problem:
Evaluates a sample of utterance examples. Inputs: sample (list of Utterance): Examples to evaluate. model (ATISModel): Model to predict with. max_generation_length (int): Maximum length to generate. name (str): Name to log with. gold_forcing (bool): Whether to force the gold tokens during decoding. metrics (list of Metric): Metrics to evaluate with. total_num (int): Number to divide by when reporting results. database_username (str): Username to use for executing queries. database_password (str): Password to use when executing queries. database_timeout (float): Timeout on queries when executing. write_results (bool): Whether to write the results to a file.
Here is the function:
def evaluate_utterance_sample(sample,
model,
max_generation_length,
name="",
gold_forcing=False,
metrics=None,
total_num=-1,
database_username="",
database_password="",
database_timeout=0,
write_results=False):
"""Evaluates a sample of utterance examples.
Inputs:
sample (list of Utterance): Examples to evaluate.
model (ATISModel): Model to predict with.
max_generation_length (int): Maximum length to generate.
name (str): Name to log with.
gold_forcing (bool): Whether to force the gold tokens during decoding.
metrics (list of Metric): Metrics to evaluate with.
total_num (int): Number to divide by when reporting results.
database_username (str): Username to use for executing queries.
database_password (str): Password to use when executing queries.
database_timeout (float): Timeout on queries when executing.
write_results (bool): Whether to write the results to a file.
"""
assert metrics
if total_num < 0:
total_num = len(sample)
metrics_sums = {}
for metric in metrics:
metrics_sums[metric] = 0.
predictions_file = open(name + "_predictions.json", "w")
print("Predicting with filename " + str(name) + "_predictions.json")
progbar = get_progressbar(name, len(sample))
progbar.start()
predictions = []
for i, item in enumerate(sample):
_, loss, predicted_seq = model.eval_step(
item, max_generation_length, feed_gold_query=gold_forcing)
loss = loss / len(item.gold_query())
predictions.append(predicted_seq)
flat_sequence = item.flatten_sequence(predicted_seq)
token_accuracy = torch_utils.per_token_accuracy(
item.gold_query(), predicted_seq)
if write_results:
write_prediction(
predictions_file,
identifier=item.interaction.identifier,
input_seq=item.input_sequence(),
probability=0,
prediction=predicted_seq,
flat_prediction=flat_sequence,
gold_query=item.gold_query(),
flat_gold_queries=item.original_gold_queries(),
gold_tables=item.gold_tables(),
index_in_interaction=item.utterance_index,
database_username=database_username,
database_password=database_password,
database_timeout=database_timeout)
update_sums(metrics,
metrics_sums,
predicted_seq,
flat_sequence,
item.gold_query(),
item.original_gold_queries()[0],
gold_forcing,
loss,
token_accuracy,
database_username=database_username,
database_password=database_password,
database_timeout=database_timeout,
gold_table=item.gold_tables()[0])
progbar.update(i)
progbar.finish()
predictions_file.close()
return construct_averages(metrics_sums, total_num), None | Evaluates a sample of utterance examples. Inputs: sample (list of Utterance): Examples to evaluate. model (ATISModel): Model to predict with. max_generation_length (int): Maximum length to generate. name (str): Name to log with. gold_forcing (bool): Whether to force the gold tokens during decoding. metrics (list of Metric): Metrics to evaluate with. total_num (int): Number to divide by when reporting results. database_username (str): Username to use for executing queries. database_password (str): Password to use when executing queries. database_timeout (float): Timeout on queries when executing. write_results (bool): Whether to write the results to a file. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.