code
stringlengths
17
6.64M
def rebuild_col_unit_col(valid_col_units, col_unit, kmap): if (col_unit is None): return col_unit (agg_id, col_id, distinct) = col_unit if ((col_id in kmap) and (col_id in valid_col_units)): col_id = kmap[col_id] if DISABLE_DISTINCT: distinct = None return (agg_id, col_id, distinct)
def rebuild_val_unit_col(valid_col_units, val_unit, kmap): if (val_unit is None): return val_unit (unit_op, col_unit1, col_unit2) = val_unit col_unit1 = rebuild_col_unit_col(valid_col_units, col_unit1, kmap) col_unit2 = rebuild_col_unit_col(valid_col_units, col_unit2, kmap) return (unit_op, col_unit1, col_unit2)
def rebuild_table_unit_col(valid_col_units, table_unit, kmap): if (table_unit is None): return table_unit (table_type, col_unit_or_sql) = table_unit if isinstance(col_unit_or_sql, tuple): col_unit_or_sql = rebuild_col_unit_col(valid_col_units, col_unit_or_sql, kmap) return (table_type, col_unit_or_sql)
def rebuild_cond_unit_col(valid_col_units, cond_unit, kmap): if (cond_unit is None): return cond_unit (not_op, op_id, val_unit, val1, val2) = cond_unit val_unit = rebuild_val_unit_col(valid_col_units, val_unit, kmap) return (not_op, op_id, val_unit, val1, val2)
def rebuild_condition_col(valid_col_units, condition, kmap): for idx in range(len(condition)): if ((idx % 2) == 0): condition[idx] = rebuild_cond_unit_col(valid_col_units, condition[idx], kmap) return condition
def rebuild_select_col(valid_col_units, sel, kmap): if (sel is None): return sel (distinct, _list) = sel new_list = [] for it in _list: (agg_id, val_unit) = it new_list.append((agg_id, rebuild_val_unit_col(valid_col_units, val_unit, kmap))) if DISABLE_DISTINCT: distinct = None return (distinct, new_list)
def rebuild_from_col(valid_col_units, from_, kmap): if (from_ is None): return from_ from_['table_units'] = [rebuild_table_unit_col(valid_col_units, table_unit, kmap) for table_unit in from_['table_units']] from_['conds'] = rebuild_condition_col(valid_col_units, from_['conds'], kmap) return from_
def rebuild_group_by_col(valid_col_units, group_by, kmap): if (group_by is None): return group_by return [rebuild_col_unit_col(valid_col_units, col_unit, kmap) for col_unit in group_by]
def rebuild_order_by_col(valid_col_units, order_by, kmap): if ((order_by is None) or (len(order_by) == 0)): return order_by (direction, val_units) = order_by new_val_units = [rebuild_val_unit_col(valid_col_units, val_unit, kmap) for val_unit in val_units] return (direction, new_val_units)
def rebuild_sql_col(valid_col_units, sql, kmap): if (sql is None): return sql sql['select'] = rebuild_select_col(valid_col_units, sql['select'], kmap) sql['from'] = rebuild_from_col(valid_col_units, sql['from'], kmap) sql['where'] = rebuild_condition_col(valid_col_units, sql['where'], kmap) sql['groupBy'] = rebuild_group_by_col(valid_col_units, sql['groupBy'], kmap) sql['orderBy'] = rebuild_order_by_col(valid_col_units, sql['orderBy'], kmap) sql['having'] = rebuild_condition_col(valid_col_units, sql['having'], kmap) sql['intersect'] = rebuild_sql_col(valid_col_units, sql['intersect'], kmap) sql['except'] = rebuild_sql_col(valid_col_units, sql['except'], kmap) sql['union'] = rebuild_sql_col(valid_col_units, sql['union'], kmap) return sql
def build_foreign_key_map(entry): cols_orig = entry['column_names_original'] tables_orig = entry['table_names_original'] cols = [] for col_orig in cols_orig: if (col_orig[0] >= 0): t = tables_orig[col_orig[0]] c = col_orig[1] cols.append((((('__' + t.lower()) + '.') + c.lower()) + '__')) else: cols.append('__all__') def keyset_in_list(k1, k2, k_list): for k_set in k_list: if ((k1 in k_set) or (k2 in k_set)): return k_set new_k_set = set() k_list.append(new_k_set) return new_k_set foreign_key_list = [] foreign_keys = entry['foreign_keys'] for fkey in foreign_keys: (key1, key2) = fkey key_set = keyset_in_list(key1, key2, foreign_key_list) key_set.add(key1) key_set.add(key2) foreign_key_map = {} for key_set in foreign_key_list: sorted_list = sorted(list(key_set)) midx = sorted_list[0] for idx in sorted_list: foreign_key_map[cols[idx]] = cols[midx] return foreign_key_map
def build_foreign_key_map_from_json(table): with open(table) as f: data = json.load(f) tables = {} for entry in data: tables[entry['db_id']] = build_foreign_key_map(entry) return tables
class Schema(): '\n Simple schema which maps table&column to a unique identifier\n ' def __init__(self, schema): self._schema = schema self._idMap = self._map(self._schema) @property def schema(self): return self._schema @property def idMap(self): return self._idMap def _map(self, schema): idMap = {'*': '__all__'} id = 1 for (key, vals) in schema.items(): for val in vals: idMap[((key.lower() + '.') + val.lower())] = (((('__' + key.lower()) + '.') + val.lower()) + '__') id += 1 for key in schema: idMap[key.lower()] = (('__' + key.lower()) + '__') id += 1 return idMap
def get_schema(db): "\n Get database's schema, which is a dict with table name as key\n and list of column names as value\n :param db: database path\n :return: schema dict\n " schema = {} conn = sqlite3.connect(db) cursor = conn.cursor() cursor.execute("SELECT name FROM sqlite_master WHERE type='table';") tables = [str(table[0].lower()) for table in cursor.fetchall()] for table in tables: cursor.execute('PRAGMA table_info({})'.format(table)) schema[table] = [str(col[1].lower()) for col in cursor.fetchall()] return schema
def get_schema_from_json(fpath): with open(fpath) as f: data = json.load(f) schema = {} for entry in data: table = str(entry['table'].lower()) cols = [str(col['column_name'].lower()) for col in entry['col_data']] schema[table] = cols return schema
def tokenize(string): string = str(string) string = string.replace("'", '"') quote_idxs = [idx for (idx, char) in enumerate(string) if (char == '"')] assert ((len(quote_idxs) % 2) == 0), 'Unexpected quote' vals = {} for i in range((len(quote_idxs) - 1), (- 1), (- 2)): qidx1 = quote_idxs[(i - 1)] qidx2 = quote_idxs[i] val = string[qidx1:(qidx2 + 1)] key = '__val_{}_{}__'.format(qidx1, qidx2) string = ((string[:qidx1] + key) + string[(qidx2 + 1):]) vals[key] = val toks = [word.lower() for word in word_tokenize(string)] for i in range(len(toks)): if (toks[i] in vals): toks[i] = vals[toks[i]] eq_idxs = [idx for (idx, tok) in enumerate(toks) if (tok == '=')] eq_idxs.reverse() prefix = ('!', '>', '<') for eq_idx in eq_idxs: pre_tok = toks[(eq_idx - 1)] if (pre_tok in prefix): toks = ((toks[:(eq_idx - 1)] + [(pre_tok + '=')]) + toks[(eq_idx + 1):]) return toks
def scan_alias(toks): "Scan the index of 'as' and build the map for all alias" as_idxs = [idx for (idx, tok) in enumerate(toks) if (tok == 'as')] alias = {} for idx in as_idxs: alias[toks[(idx + 1)]] = toks[(idx - 1)] return alias
def get_tables_with_alias(schema, toks): tables = scan_alias(toks) for key in schema: assert (key not in tables), 'Alias {} has the same name in table'.format(key) tables[key] = key return tables
def parse_col(toks, start_idx, tables_with_alias, schema, default_tables=None): '\n :returns next idx, column id\n ' tok = toks[start_idx] if (tok == '*'): return ((start_idx + 1), schema.idMap[tok]) if ('.' in tok): (alias, col) = tok.split('.') key = ((tables_with_alias[alias] + '.') + col) return ((start_idx + 1), schema.idMap[key]) assert ((default_tables is not None) and (len(default_tables) > 0)), 'Default tables should not be None or empty' for alias in default_tables: table = tables_with_alias[alias] if (tok in schema.schema[table]): key = ((table + '.') + tok) return ((start_idx + 1), schema.idMap[key]) assert False, 'Error col: {}'.format(tok)
def parse_col_unit(toks, start_idx, tables_with_alias, schema, default_tables=None): '\n :returns next idx, (agg_op id, col_id)\n ' idx = start_idx len_ = len(toks) isBlock = False isDistinct = False if (toks[idx] == '('): isBlock = True idx += 1 if (toks[idx] in AGG_OPS): agg_id = AGG_OPS.index(toks[idx]) idx += 1 assert ((idx < len_) and (toks[idx] == '(')) idx += 1 if (toks[idx] == 'distinct'): idx += 1 isDistinct = True (idx, col_id) = parse_col(toks, idx, tables_with_alias, schema, default_tables) assert ((idx < len_) and (toks[idx] == ')')) idx += 1 return (idx, (agg_id, col_id, isDistinct)) if (toks[idx] == 'distinct'): idx += 1 isDistinct = True agg_id = AGG_OPS.index('none') (idx, col_id) = parse_col(toks, idx, tables_with_alias, schema, default_tables) if isBlock: assert (toks[idx] == ')') idx += 1 return (idx, (agg_id, col_id, isDistinct))
def parse_val_unit(toks, start_idx, tables_with_alias, schema, default_tables=None): idx = start_idx len_ = len(toks) isBlock = False if (toks[idx] == '('): isBlock = True idx += 1 col_unit1 = None col_unit2 = None unit_op = UNIT_OPS.index('none') (idx, col_unit1) = parse_col_unit(toks, idx, tables_with_alias, schema, default_tables) if ((idx < len_) and (toks[idx] in UNIT_OPS)): unit_op = UNIT_OPS.index(toks[idx]) idx += 1 (idx, col_unit2) = parse_col_unit(toks, idx, tables_with_alias, schema, default_tables) if isBlock: assert (toks[idx] == ')') idx += 1 return (idx, (unit_op, col_unit1, col_unit2))
def parse_table_unit(toks, start_idx, tables_with_alias, schema): '\n :returns next idx, table id, table name\n ' idx = start_idx len_ = len(toks) key = tables_with_alias[toks[idx]] if (((idx + 1) < len_) and (toks[(idx + 1)] == 'as')): idx += 3 else: idx += 1 return (idx, schema.idMap[key], key)
def parse_value(toks, start_idx, tables_with_alias, schema, default_tables=None): idx = start_idx len_ = len(toks) isBlock = False if (toks[idx] == '('): isBlock = True idx += 1 if (toks[idx] == 'select'): (idx, val) = parse_sql(toks, idx, tables_with_alias, schema) elif ('"' in toks[idx]): val = toks[idx] idx += 1 else: try: val = float(toks[idx]) idx += 1 except: end_idx = idx while ((end_idx < len_) and (toks[end_idx] != ',') and (toks[end_idx] != ')') and (toks[end_idx] != 'and') and (toks[end_idx] not in CLAUSE_KEYWORDS) and (toks[end_idx] not in JOIN_KEYWORDS)): end_idx += 1 (idx, val) = parse_col_unit(toks[start_idx:end_idx], 0, tables_with_alias, schema, default_tables) idx = end_idx if isBlock: assert (toks[idx] == ')') idx += 1 return (idx, val)
def parse_condition(toks, start_idx, tables_with_alias, schema, default_tables=None): idx = start_idx len_ = len(toks) conds = [] while (idx < len_): (idx, val_unit) = parse_val_unit(toks, idx, tables_with_alias, schema, default_tables) not_op = False if (toks[idx] == 'not'): not_op = True idx += 1 assert ((idx < len_) and (toks[idx] in WHERE_OPS)), 'Error condition: idx: {}, tok: {}'.format(idx, toks[idx]) op_id = WHERE_OPS.index(toks[idx]) idx += 1 val1 = val2 = None if (op_id == WHERE_OPS.index('between')): (idx, val1) = parse_value(toks, idx, tables_with_alias, schema, default_tables) assert (toks[idx] == 'and') idx += 1 (idx, val2) = parse_value(toks, idx, tables_with_alias, schema, default_tables) else: (idx, val1) = parse_value(toks, idx, tables_with_alias, schema, default_tables) val2 = None conds.append((not_op, op_id, val_unit, val1, val2)) if ((idx < len_) and ((toks[idx] in CLAUSE_KEYWORDS) or (toks[idx] in (')', ';')) or (toks[idx] in JOIN_KEYWORDS))): break if ((idx < len_) and (toks[idx] in COND_OPS)): conds.append(toks[idx]) idx += 1 return (idx, conds)
def parse_select(toks, start_idx, tables_with_alias, schema, default_tables=None): idx = start_idx len_ = len(toks) assert (toks[idx] == 'select'), "'select' not found" idx += 1 isDistinct = False if ((idx < len_) and (toks[idx] == 'distinct')): idx += 1 isDistinct = True val_units = [] while ((idx < len_) and (toks[idx] not in CLAUSE_KEYWORDS)): agg_id = AGG_OPS.index('none') if (toks[idx] in AGG_OPS): agg_id = AGG_OPS.index(toks[idx]) idx += 1 (idx, val_unit) = parse_val_unit(toks, idx, tables_with_alias, schema, default_tables) val_units.append((agg_id, val_unit)) if ((idx < len_) and (toks[idx] == ',')): idx += 1 return (idx, (isDistinct, val_units))
def parse_from(toks, start_idx, tables_with_alias, schema): '\n Assume in the from clause, all table units are combined with join\n ' assert ('from' in toks[start_idx:]), "'from' not found" len_ = len(toks) idx = (toks.index('from', start_idx) + 1) default_tables = [] table_units = [] conds = [] while (idx < len_): isBlock = False if (toks[idx] == '('): isBlock = True idx += 1 if (toks[idx] == 'select'): (idx, sql) = parse_sql(toks, idx, tables_with_alias, schema) table_units.append((TABLE_TYPE['sql'], sql)) else: if ((idx < len_) and (toks[idx] == 'join')): idx += 1 (idx, table_unit, table_name) = parse_table_unit(toks, idx, tables_with_alias, schema) table_units.append((TABLE_TYPE['table_unit'], table_unit)) default_tables.append(table_name) if ((idx < len_) and (toks[idx] == 'on')): idx += 1 (idx, this_conds) = parse_condition(toks, idx, tables_with_alias, schema, default_tables) if (len(conds) > 0): conds.append('and') conds.extend(this_conds) if isBlock: assert (toks[idx] == ')') idx += 1 if ((idx < len_) and ((toks[idx] in CLAUSE_KEYWORDS) or (toks[idx] in (')', ';')))): break return (idx, table_units, conds, default_tables)
def parse_where(toks, start_idx, tables_with_alias, schema, default_tables): idx = start_idx len_ = len(toks) if ((idx >= len_) or (toks[idx] != 'where')): return (idx, []) idx += 1 (idx, conds) = parse_condition(toks, idx, tables_with_alias, schema, default_tables) return (idx, conds)
def parse_group_by(toks, start_idx, tables_with_alias, schema, default_tables): idx = start_idx len_ = len(toks) col_units = [] if ((idx >= len_) or (toks[idx] != 'group')): return (idx, col_units) idx += 1 assert (toks[idx] == 'by') idx += 1 while ((idx < len_) and (not ((toks[idx] in CLAUSE_KEYWORDS) or (toks[idx] in (')', ';'))))): (idx, col_unit) = parse_col_unit(toks, idx, tables_with_alias, schema, default_tables) col_units.append(col_unit) if ((idx < len_) and (toks[idx] == ',')): idx += 1 else: break return (idx, col_units)
def parse_order_by(toks, start_idx, tables_with_alias, schema, default_tables): idx = start_idx len_ = len(toks) val_units = [] order_type = 'asc' if ((idx >= len_) or (toks[idx] != 'order')): return (idx, val_units) idx += 1 assert (toks[idx] == 'by') idx += 1 while ((idx < len_) and (not ((toks[idx] in CLAUSE_KEYWORDS) or (toks[idx] in (')', ';'))))): (idx, val_unit) = parse_val_unit(toks, idx, tables_with_alias, schema, default_tables) val_units.append(val_unit) if ((idx < len_) and (toks[idx] in ORDER_OPS)): order_type = toks[idx] idx += 1 if ((idx < len_) and (toks[idx] == ',')): idx += 1 else: break return (idx, (order_type, val_units))
def parse_having(toks, start_idx, tables_with_alias, schema, default_tables): idx = start_idx len_ = len(toks) if ((idx >= len_) or (toks[idx] != 'having')): return (idx, []) idx += 1 (idx, conds) = parse_condition(toks, idx, tables_with_alias, schema, default_tables) return (idx, conds)
def parse_limit(toks, start_idx): idx = start_idx len_ = len(toks) if ((idx < len_) and (toks[idx] == 'limit')): idx += 2 if (type(toks[(idx - 1)]) != int): return (idx, 1) return (idx, int(toks[(idx - 1)])) return (idx, None)
def parse_sql(toks, start_idx, tables_with_alias, schema): isBlock = False len_ = len(toks) idx = start_idx sql = {} if (toks[idx] == '('): isBlock = True idx += 1 (from_end_idx, table_units, conds, default_tables) = parse_from(toks, start_idx, tables_with_alias, schema) sql['from'] = {'table_units': table_units, 'conds': conds} (_, select_col_units) = parse_select(toks, idx, tables_with_alias, schema, default_tables) idx = from_end_idx sql['select'] = select_col_units (idx, where_conds) = parse_where(toks, idx, tables_with_alias, schema, default_tables) sql['where'] = where_conds (idx, group_col_units) = parse_group_by(toks, idx, tables_with_alias, schema, default_tables) sql['groupBy'] = group_col_units (idx, having_conds) = parse_having(toks, idx, tables_with_alias, schema, default_tables) sql['having'] = having_conds (idx, order_col_units) = parse_order_by(toks, idx, tables_with_alias, schema, default_tables) sql['orderBy'] = order_col_units (idx, limit_val) = parse_limit(toks, idx) sql['limit'] = limit_val idx = skip_semicolon(toks, idx) if isBlock: assert (toks[idx] == ')') idx += 1 idx = skip_semicolon(toks, idx) for op in SQL_OPS: sql[op] = None if ((idx < len_) and (toks[idx] in SQL_OPS)): sql_op = toks[idx] idx += 1 (idx, IUE_sql) = parse_sql(toks, idx, tables_with_alias, schema) sql[sql_op] = IUE_sql return (idx, sql)
def load_data(fpath): with open(fpath) as f: data = json.load(f) return data
def get_sql(schema, query): toks = tokenize(query) tables_with_alias = get_tables_with_alias(schema.schema, toks) (_, sql) = parse_sql(toks, 0, tables_with_alias, schema) return sql
def skip_semicolon(toks, start_idx): idx = start_idx while ((idx < len(toks)) and (toks[idx] == ';')): idx += 1 return idx
class Logger(): 'Attributes:\n\n fileptr (file): File pointer for input/output.\n lines (list of str): The lines read from the log.\n ' def __init__(self, filename, option): self.fileptr = open(filename, option) if (option == 'r'): self.lines = self.fileptr.readlines() else: self.lines = [] def put(self, string): 'Writes to the file.' self.fileptr.write((string + '\n')) self.fileptr.flush() def close(self): 'Closes the logger.' self.fileptr.close() def findlast(self, identifier, default=0.0): 'Finds the last line in the log with a certain value.' for line in self.lines[::(- 1)]: if line.lower().startswith(identifier): string = line.strip().split('\t')[1] if string.replace('.', '').isdigit(): return float(string) elif (string.lower() == 'true'): return True elif (string.lower() == 'false'): return False else: return string return default def contains(self, string): 'Dtermines whether the string is present in the log.' for line in self.lines[::(- 1)]: if (string.lower() in line.lower()): return True return False def findlast_log_before(self, before_str): 'Finds the last entry in the log before another entry.' loglines = [] in_line = False for line in self.lines[::(- 1)]: if line.startswith(before_str): in_line = True elif in_line: loglines.append(line) if ((line.strip() == '') and in_line): return ''.join(loglines[::(- 1)]) return ''.join(loglines[::(- 1)])
class AttentionResult(namedtuple('AttentionResult', ('scores', 'distribution', 'vector'))): 'Stores the result of an attention calculation.' __slots__ = ()
class Attention(torch.nn.Module): 'Attention mechanism class. Stores parameters for and computes attention.\n\n Attributes:\n transform_query (bool): Whether or not to transform the query being\n passed in with a weight transformation before computing attentino.\n transform_key (bool): Whether or not to transform the key being\n passed in with a weight transformation before computing attentino.\n transform_value (bool): Whether or not to transform the value being\n passed in with a weight transformation before computing attentino.\n key_size (int): The size of the key vectors.\n value_size (int): The size of the value vectors.\n the query or key.\n query_weights (dy.Parameters): Weights for transforming the query.\n key_weights (dy.Parameters): Weights for transforming the key.\n value_weights (dy.Parameters): Weights for transforming the value.\n ' def __init__(self, query_size, key_size, value_size): super().__init__() self.key_size = key_size self.value_size = value_size self.query_weights = torch_utils.add_params((query_size, self.key_size), 'weights-attention-q') def transform_arguments(self, query, keys, values): ' Transforms the query/key/value inputs before attention calculations.\n\n Arguments:\n query (dy.Expression): Vector representing the query (e.g., hidden state.)\n keys (list of dy.Expression): List of vectors representing the key\n values.\n values (list of dy.Expression): List of vectors representing the values.\n\n Returns:\n triple of dy.Expression, where the first represents the (transformed)\n query, the second represents the (transformed and concatenated)\n keys, and the third represents the (transformed and concatenated)\n values.\n ' assert (len(keys) == len(values)) all_keys = torch.stack(keys, dim=1) all_values = torch.stack(values, dim=1) assert (all_keys.size()[0] == self.key_size), ((('Expected key size of ' + str(self.key_size)) + ' but got ') + str(all_keys.size()[0])) assert (all_values.size()[0] == self.value_size) query = torch_utils.linear_layer(query, self.query_weights) return (query, all_keys, all_values) def forward(self, query, keys, values=None): if (not values): values = keys (query_t, keys_t, values_t) = self.transform_arguments(query, keys, values) scores = torch.t(torch.mm(query_t, keys_t)) distribution = F.softmax(scores, dim=0) context_vector = torch.mm(values_t, distribution).squeeze() return AttentionResult(scores, distribution, context_vector)
def convert(): config = BertConfig.from_json_file(args.bert_config_file) model = BertModel(config) path = args.tf_checkpoint_path print('Converting TensorFlow checkpoint from {}'.format(path)) init_vars = tf.train.list_variables(path) names = [] arrays = [] for (name, shape) in init_vars: print('Loading {} with shape {}'.format(name, shape)) array = tf.train.load_variable(path, name) print('Numpy array shape {}'.format(array.shape)) names.append(name) arrays.append(array) for (name, array) in zip(names, arrays): name = name[5:] print('Loading {}'.format(name)) name = name.split('/') if (name[0] in ['redictions', 'eq_relationship']): print('Skipping') continue pointer = model for m_name in name: if re.fullmatch('[A-Za-z]+_\\d+', m_name): l = re.split('_(\\d+)', m_name) else: l = [m_name] if (l[0] == 'kernel'): pointer = getattr(pointer, 'weight') else: pointer = getattr(pointer, l[0]) if (len(l) >= 2): num = int(l[1]) pointer = pointer[num] if (m_name[(- 11):] == '_embeddings'): pointer = getattr(pointer, 'weight') elif (m_name == 'kernel'): array = np.transpose(array) try: assert (pointer.shape == array.shape) except AssertionError as e: e.args += (pointer.shape, array.shape) raise pointer.data = torch.from_numpy(array) torch.save(model.state_dict(), args.pytorch_dump_path)
def input_fn_builder(features, seq_length, drop_remainder): 'Creates an `input_fn` closure to be passed to TPUEstimator.' all_unique_ids = [] all_input_ids = [] all_input_mask = [] all_segment_ids = [] all_start_positions = [] all_end_positions = [] for feature in features: all_unique_ids.append(feature.unique_id) all_input_ids.append(feature.input_ids) all_input_mask.append(feature.input_mask) all_segment_ids.append(feature.segment_ids) all_start_positions.append(feature.start_position) all_end_positions.append(feature.end_position) def input_fn(params): 'The actual input function.' batch_size = params['batch_size'] num_examples = len(features) feature_map = {'unique_ids': tf.constant(all_unique_ids, shape=[num_examples], dtype=tf.int32), 'input_ids': tf.constant(all_input_ids, shape=[num_examples, seq_length], dtype=tf.int32), 'input_mask': tf.constant(all_input_mask, shape=[num_examples, seq_length], dtype=tf.int32), 'segment_ids': tf.constant(all_segment_ids, shape=[num_examples, seq_length], dtype=tf.int32), 'start_positions': tf.constant(all_start_positions, shape=[num_examples], dtype=tf.int32), 'end_positions': tf.constant(all_end_positions, shape=[num_examples], dtype=tf.int32)} d = tf.data.Dataset.from_tensor_slices(feature_map) d = d.repeat() d = d.batch(batch_size=batch_size, drop_remainder=drop_remainder) return d return input_fn
def model_fn_builder(bert_config, init_checkpoint, learning_rate, num_train_steps, num_warmup_steps, use_tpu, use_one_hot_embeddings): 'Returns `model_fn` closure for TPUEstimator.' def model_fn(features, labels, mode, params): 'The `model_fn` for TPUEstimator.' tf.logging.info('*** Features ***') for name in sorted(features.keys()): tf.logging.info((' name = %s, shape = %s' % (name, features[name].shape))) unique_ids = features['unique_ids'] input_ids = features['input_ids'] input_mask = features['input_mask'] segment_ids = features['segment_ids'] is_training = (mode == tf.estimator.ModeKeys.TRAIN) (start_logits, end_logits) = create_model(bert_config=bert_config, is_training=is_training, input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, use_one_hot_embeddings=use_one_hot_embeddings) tvars = tf.trainable_variables() initialized_variable_names = {} scaffold_fn = None if init_checkpoint: (assignment_map, initialized_variable_names) = modeling_tensorflow.get_assigment_map_from_checkpoint(tvars, init_checkpoint) if use_tpu: def tpu_scaffold(): tf.train.init_from_checkpoint(init_checkpoint, assignment_map) return tf.train.Scaffold() scaffold_fn = tpu_scaffold else: tf.train.init_from_checkpoint(init_checkpoint, assignment_map) tf.logging.info('**** Trainable Variables ****') for var in tvars: init_string = '' if (var.name in initialized_variable_names): init_string = ', *INIT_FROM_CKPT*' tf.logging.info(' name = %s, shape = %s%s', var.name, var.shape, init_string) output_spec = None if (mode == tf.estimator.ModeKeys.TRAIN): seq_length = modeling_tensorflow.get_shape_list(input_ids)[1] def compute_loss(logits, positions): one_hot_positions = tf.one_hot(positions, depth=seq_length, dtype=tf.float32) log_probs = tf.nn.log_softmax(logits, axis=(- 1)) loss = (- tf.reduce_mean(tf.reduce_sum((one_hot_positions * log_probs), axis=(- 1)))) return loss start_positions = features['start_positions'] end_positions = features['end_positions'] start_loss = compute_loss(start_logits, start_positions) end_loss = compute_loss(end_logits, end_positions) total_loss = ((start_loss + end_loss) / 2.0) train_op = optimization.create_optimizer(total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu) output_spec = tf.contrib.tpu.TPUEstimatorSpec(mode=mode, loss=total_loss, train_op=train_op, scaffold_fn=scaffold_fn) elif (mode == tf.estimator.ModeKeys.PREDICT): batch_size = modeling_tensorflow.get_shape_list(start_logits)[0] seq_length = modeling_tensorflow.get_shape_list(input_ids)[1] def compute_loss(logits, positions): one_hot_positions = tf.one_hot(positions, depth=seq_length, dtype=tf.float32) log_probs = tf.nn.log_softmax(logits, axis=(- 1)) loss = (- tf.reduce_mean(tf.reduce_sum((one_hot_positions * log_probs), axis=(- 1)))) return loss start_positions = features['start_positions'] end_positions = features['end_positions'] start_loss = compute_loss(start_logits, start_positions) end_loss = compute_loss(end_logits, end_positions) total_loss = ((start_loss + end_loss) / 2.0) predictions = {'unique_ids': unique_ids, 'start_logits': start_logits, 'end_logits': end_logits, 'total_loss': tf.reshape(total_loss, [batch_size, 1]), 'start_loss': tf.reshape(start_loss, [batch_size, 1]), 'end_loss': tf.reshape(end_loss, [batch_size, 1])} output_spec = tf.contrib.tpu.TPUEstimatorSpec(mode=mode, predictions=predictions, scaffold_fn=scaffold_fn) else: raise ValueError(('Only TRAIN and PREDICT modes are supported: %s' % mode)) return output_spec return model_fn
def _get_best_indexes(logits, n_best_size): 'Get the n-best logits from a list.' index_and_score = sorted(enumerate(logits), key=(lambda x: x[1]), reverse=True) best_indexes = [] for i in range(len(index_and_score)): if (i >= n_best_size): break best_indexes.append(index_and_score[i][0]) return best_indexes
def _compute_softmax(scores): 'Compute softmax probability over raw logits.' if (not scores): return [] max_score = None for score in scores: if ((max_score is None) or (score > max_score)): max_score = score exp_scores = [] total_sum = 0.0 for score in scores: x = math.exp((score - max_score)) exp_scores.append(x) total_sum += x probs = [] for score in exp_scores: probs.append((score / total_sum)) return probs
def compute_predictions(all_examples, all_features, all_results, n_best_size, max_answer_length, do_lower_case): 'Compute final predictions.' example_index_to_features = collections.defaultdict(list) for feature in all_features: example_index_to_features[feature.example_index].append(feature) unique_id_to_result = {} for result in all_results: unique_id_to_result[result.unique_id] = result _PrelimPrediction = collections.namedtuple('PrelimPrediction', ['feature_index', 'start_index', 'end_index', 'start_logit', 'end_logit']) all_predictions = collections.OrderedDict() all_nbest_json = collections.OrderedDict() for (example_index, example) in enumerate(all_examples): features = example_index_to_features[example_index] prelim_predictions = [] for (feature_index, feature) in enumerate(features): result = unique_id_to_result[feature.unique_id] start_indexes = _get_best_indexes(result.start_logits, n_best_size) end_indexes = _get_best_indexes(result.end_logits, n_best_size) for start_index in start_indexes: for end_index in end_indexes: if (start_index >= len(feature.tokens)): continue if (end_index >= len(feature.tokens)): continue if (start_index not in feature.token_to_orig_map): continue if (end_index not in feature.token_to_orig_map): continue if (not feature.token_is_max_context.get(start_index, False)): continue if (end_index < start_index): continue length = ((end_index - start_index) + 1) if (length > max_answer_length): continue prelim_predictions.append(_PrelimPrediction(feature_index=feature_index, start_index=start_index, end_index=end_index, start_logit=result.start_logits[start_index], end_logit=result.end_logits[end_index])) prelim_predictions = sorted(prelim_predictions, key=(lambda x: (x.start_logit + x.end_logit)), reverse=True) _NbestPrediction = collections.namedtuple('NbestPrediction', ['text', 'start_logit', 'end_logit']) seen_predictions = {} nbest = [] for pred in prelim_predictions: if (len(nbest) >= n_best_size): break feature = features[pred.feature_index] tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1)] orig_doc_start = feature.token_to_orig_map[pred.start_index] orig_doc_end = feature.token_to_orig_map[pred.end_index] orig_tokens = example.doc_tokens[orig_doc_start:(orig_doc_end + 1)] tok_text = ' '.join(tok_tokens) tok_text = tok_text.replace(' ##', '') tok_text = tok_text.replace('##', '') tok_text = tok_text.strip() tok_text = ' '.join(tok_text.split()) orig_text = ' '.join(orig_tokens) final_text = get_final_text(tok_text, orig_text, do_lower_case) if (final_text in seen_predictions): continue seen_predictions[final_text] = True nbest.append(_NbestPrediction(text=final_text, start_logit=pred.start_logit, end_logit=pred.end_logit)) if (not nbest): nbest.append(_NbestPrediction(text='empty', start_logit=0.0, end_logit=0.0)) assert (len(nbest) >= 1) total_scores = [] for entry in nbest: total_scores.append((entry.start_logit + entry.end_logit)) probs = _compute_softmax(total_scores) nbest_json = [] for (i, entry) in enumerate(nbest): output = collections.OrderedDict() output['text'] = entry.text output['probability'] = probs[i] output['start_logit'] = entry.start_logit output['end_logit'] = entry.end_logit nbest_json.append(output) assert (len(nbest_json) >= 1) all_predictions[example.qas_id] = nbest_json[0]['text'] all_nbest_json[example.qas_id] = nbest_json return (all_predictions, all_nbest_json)
def convert_to_unicode(text): "Converts `text` to Unicode (if it's not already), assuming utf-8 input." if six.PY3: if isinstance(text, str): return text elif isinstance(text, bytes): return text.decode('utf-8', 'ignore') else: raise ValueError(('Unsupported string type: %s' % type(text))) elif six.PY2: if isinstance(text, str): return text.decode('utf-8', 'ignore') elif isinstance(text, unicode): return text else: raise ValueError(('Unsupported string type: %s' % type(text))) else: raise ValueError('Not running on Python2 or Python 3?')
def printable_text(text): 'Returns text encoded in a way suitable for print or `tf.logging`.' if six.PY3: if isinstance(text, str): return text elif isinstance(text, bytes): return text.decode('utf-8', 'ignore') else: raise ValueError(('Unsupported string type: %s' % type(text))) elif six.PY2: if isinstance(text, str): return text elif isinstance(text, unicode): return text.encode('utf-8') else: raise ValueError(('Unsupported string type: %s' % type(text))) else: raise ValueError('Not running on Python2 or Python 3?')
def load_vocab(vocab_file): 'Loads a vocabulary file into a dictionary.' vocab = collections.OrderedDict() index = 0 with open(vocab_file, 'r', encoding='utf-8') as reader: while True: token = convert_to_unicode(reader.readline()) if (not token): break token = token.strip() vocab[token] = index index += 1 return vocab
def convert_tokens_to_ids(vocab, tokens): 'Converts a sequence of tokens into ids using the vocab.' ids = [] for token in tokens: ids.append(vocab[token]) return ids
def whitespace_tokenize(text): 'Runs basic whitespace cleaning and splitting on a piece of text.' text = text.strip() if (not text): return [] tokens = text.split() return tokens
class FullTokenizer(object): 'Runs end-to-end tokenziation.' def __init__(self, vocab_file, do_lower_case=True): self.vocab = load_vocab(vocab_file) self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case) self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab) def tokenize(self, text): split_tokens = [] for token in self.basic_tokenizer.tokenize(text): for sub_token in self.wordpiece_tokenizer.tokenize(token): split_tokens.append(sub_token) return split_tokens def convert_tokens_to_ids(self, tokens): return convert_tokens_to_ids(self.vocab, tokens)
class BasicTokenizer(object): 'Runs basic tokenization (punctuation splitting, lower casing, etc.).' def __init__(self, do_lower_case=True): 'Constructs a BasicTokenizer.\n\n Args:\n do_lower_case: Whether to lower case the input.\n ' self.do_lower_case = do_lower_case def tokenize(self, text): 'Tokenizes a piece of text.' text = convert_to_unicode(text) text = self._clean_text(text) text = self._tokenize_chinese_chars(text) orig_tokens = whitespace_tokenize(text) split_tokens = [] for token in orig_tokens: if self.do_lower_case: token = token.lower() token = self._run_strip_accents(token) split_tokens.extend(self._run_split_on_punc(token)) output_tokens = whitespace_tokenize(' '.join(split_tokens)) return output_tokens def _run_strip_accents(self, text): 'Strips accents from a piece of text.' text = unicodedata.normalize('NFD', text) output = [] for char in text: cat = unicodedata.category(char) if (cat == 'Mn'): continue output.append(char) return ''.join(output) def _run_split_on_punc(self, text): 'Splits punctuation on a piece of text.' chars = list(text) i = 0 start_new_word = True output = [] while (i < len(chars)): char = chars[i] if _is_punctuation(char): output.append([char]) start_new_word = True else: if start_new_word: output.append([]) start_new_word = False output[(- 1)].append(char) i += 1 return [''.join(x) for x in output] def _tokenize_chinese_chars(self, text): 'Adds whitespace around any CJK character.' output = [] for char in text: cp = ord(char) if self._is_chinese_char(cp): output.append(' ') output.append(char) output.append(' ') else: output.append(char) return ''.join(output) def _is_chinese_char(self, cp): 'Checks whether CP is the codepoint of a CJK character.' if (((cp >= 19968) and (cp <= 40959)) or ((cp >= 13312) and (cp <= 19903)) or ((cp >= 131072) and (cp <= 173791)) or ((cp >= 173824) and (cp <= 177983)) or ((cp >= 177984) and (cp <= 178207)) or ((cp >= 178208) and (cp <= 183983)) or ((cp >= 63744) and (cp <= 64255)) or ((cp >= 194560) and (cp <= 195103))): return True return False def _clean_text(self, text): 'Performs invalid character removal and whitespace cleanup on text.' output = [] for char in text: cp = ord(char) if ((cp == 0) or (cp == 65533) or _is_control(char)): continue if _is_whitespace(char): output.append(' ') else: output.append(char) return ''.join(output)
class WordpieceTokenizer(object): 'Runs WordPiece tokenization.' def __init__(self, vocab, unk_token='[UNK]', max_input_chars_per_word=100): self.vocab = vocab self.unk_token = unk_token self.max_input_chars_per_word = max_input_chars_per_word def tokenize(self, text): 'Tokenizes a piece of text into its word pieces.\n\n This uses a greedy longest-match-first algorithm to perform tokenization\n using the given vocabulary.\n\n For example:\n input = "unaffable"\n output = ["un", "##aff", "##able"]\n\n Args:\n text: A single token or whitespace separated tokens. This should have\n already been passed through `BasicTokenizer.\n\n Returns:\n A list of wordpiece tokens.\n ' text = convert_to_unicode(text) output_tokens = [] for token in whitespace_tokenize(text): chars = list(token) if (len(chars) > self.max_input_chars_per_word): output_tokens.append(self.unk_token) continue is_bad = False start = 0 sub_tokens = [] while (start < len(chars)): end = len(chars) cur_substr = None while (start < end): substr = ''.join(chars[start:end]) if (start > 0): substr = ('##' + substr) if (substr in self.vocab): cur_substr = substr break end -= 1 if (cur_substr is None): is_bad = True break sub_tokens.append(cur_substr) start = end if is_bad: output_tokens.append(self.unk_token) else: output_tokens.extend(sub_tokens) return output_tokens
def _is_whitespace(char): 'Checks whether `chars` is a whitespace character.' if ((char == ' ') or (char == '\t') or (char == '\n') or (char == '\r')): return True cat = unicodedata.category(char) if (cat == 'Zs'): return True return False
def _is_control(char): 'Checks whether `chars` is a control character.' if ((char == '\t') or (char == '\n') or (char == '\r')): return False cat = unicodedata.category(char) if cat.startswith('C'): return True return False
def _is_punctuation(char): 'Checks whether `chars` is a punctuation character.' cp = ord(char) if (((cp >= 33) and (cp <= 47)) or ((cp >= 58) and (cp <= 64)) or ((cp >= 91) and (cp <= 96)) or ((cp >= 123) and (cp <= 126))): return True cat = unicodedata.category(char) if cat.startswith('P'): return True return False
def flatten_distribution(distribution_map, probabilities): ' Flattens a probability distribution given a map of "unique" values.\n All values in distribution_map with the same value should get the sum\n of the probabilities.\n\n Arguments:\n distribution_map (list of str): List of values to get the probability for.\n probabilities (np.ndarray): Probabilities corresponding to the values in\n distribution_map.\n\n Returns:\n list, np.ndarray of the same size where probabilities for duplicates\n in distribution_map are given the sum of the probabilities in probabilities.\n ' assert (len(distribution_map) == len(probabilities)) if (len(distribution_map) != len(set(distribution_map))): idx_first_dup = 0 seen_set = set() for (i, tok) in enumerate(distribution_map): if (tok in seen_set): idx_first_dup = i break seen_set.add(tok) new_dist_map = (distribution_map[:idx_first_dup] + list((set(distribution_map) - set(distribution_map[:idx_first_dup])))) assert (len(new_dist_map) == len(set(new_dist_map))) new_probs = np.array((probabilities[:idx_first_dup] + [0.0 for _ in range((len(set(distribution_map)) - idx_first_dup))])) assert (len(new_probs) == len(new_dist_map)) for (i, token_name) in enumerate(distribution_map[idx_first_dup:]): if (token_name not in new_dist_map): new_dist_map.append(token_name) new_index = new_dist_map.index(token_name) new_probs[new_index] += probabilities[(i + idx_first_dup)] new_probs = new_probs.tolist() else: new_dist_map = distribution_map new_probs = probabilities assert (len(new_dist_map) == len(new_probs)) return (new_dist_map, new_probs)
class SQLPrediction(namedtuple('SQLPrediction', ('predictions', 'sequence', 'probability'))): 'Contains prediction for a sequence.' __slots__ = () def __str__(self): return ((str(self.probability) + '\t') + ' '.join(self.sequence))
class SequencePredictorWithSchema(torch.nn.Module): ' Predicts a sequence.\n\n Attributes:\n lstms (list of dy.RNNBuilder): The RNN used.\n token_predictor (TokenPredictor): Used to actually predict tokens.\n ' def __init__(self, params, input_size, output_embedder, column_name_token_embedder, token_predictor): super().__init__() self.lstms = torch_utils.create_multilayer_lstm_params(params.decoder_num_layers, input_size, params.decoder_state_size, 'LSTM-d') self.token_predictor = token_predictor self.output_embedder = output_embedder self.column_name_token_embedder = column_name_token_embedder self.start_token_embedding = torch_utils.add_params((params.output_embedding_size,), 'y-0') self.input_size = input_size self.params = params def _initialize_decoder_lstm(self, encoder_state): decoder_lstm_states = [] for (i, lstm) in enumerate(self.lstms): encoder_layer_num = 0 if (len(encoder_state[0]) > 1): encoder_layer_num = i c_0 = encoder_state[0][encoder_layer_num].view(1, (- 1)) h_0 = encoder_state[1][encoder_layer_num].view(1, (- 1)) decoder_lstm_states.append((h_0, c_0)) return decoder_lstm_states def get_output_token_embedding(self, output_token, input_schema, snippets): if (self.params.use_snippets and snippet_handler.is_snippet(output_token)): output_token_embedding = embedder.bow_snippets(output_token, snippets, self.output_embedder, input_schema) elif input_schema: assert (self.output_embedder.in_vocabulary(output_token) or input_schema.in_vocabulary(output_token, surface_form=True)) if self.output_embedder.in_vocabulary(output_token): output_token_embedding = self.output_embedder(output_token) else: output_token_embedding = input_schema.column_name_embedder(output_token, surface_form=True) else: output_token_embedding = self.output_embedder(output_token) return output_token_embedding def get_decoder_input(self, output_token_embedding, prediction): if (self.params.use_schema_attention and self.params.use_query_attention): decoder_input = torch.cat([output_token_embedding, prediction.utterance_attention_results.vector, prediction.schema_attention_results.vector, prediction.query_attention_results.vector], dim=0) elif self.params.use_schema_attention: decoder_input = torch.cat([output_token_embedding, prediction.utterance_attention_results.vector, prediction.schema_attention_results.vector], dim=0) else: decoder_input = torch.cat([output_token_embedding, prediction.utterance_attention_results.vector], dim=0) return decoder_input def forward(self, final_encoder_state, encoder_states, schema_states, max_generation_length, snippets=None, gold_sequence=None, input_sequence=None, previous_queries=None, previous_query_states=None, input_schema=None, dropout_amount=0.0): ' Generates a sequence. ' index = 0 context_vector_size = (self.input_size - self.params.output_embedding_size) predictions = [] sequence = [] probability = 1.0 decoder_states = self._initialize_decoder_lstm(final_encoder_state) if self.start_token_embedding.is_cuda: decoder_input = torch.cat([self.start_token_embedding, torch.cuda.FloatTensor(context_vector_size).fill_(0)], dim=0) else: decoder_input = torch.cat([self.start_token_embedding, torch.zeros(context_vector_size)], dim=0) continue_generating = True while continue_generating: if ((len(sequence) == 0) or (sequence[(- 1)] != EOS_TOK)): (_, decoder_state, decoder_states) = torch_utils.forward_one_multilayer(self.lstms, decoder_input, decoder_states, dropout_amount) prediction_input = PredictionInputWithSchema(decoder_state=decoder_state, input_hidden_states=encoder_states, schema_states=schema_states, snippets=snippets, input_sequence=input_sequence, previous_queries=previous_queries, previous_query_states=previous_query_states, input_schema=input_schema) prediction = self.token_predictor(prediction_input, dropout_amount=dropout_amount) predictions.append(prediction) if gold_sequence: output_token = gold_sequence[index] output_token_embedding = self.get_output_token_embedding(output_token, input_schema, snippets) decoder_input = self.get_decoder_input(output_token_embedding, prediction) sequence.append(gold_sequence[index]) if (index >= (len(gold_sequence) - 1)): continue_generating = False else: assert (prediction.scores.dim() == 1) probabilities = F.softmax(prediction.scores, dim=0).cpu().data.numpy().tolist() distribution_map = prediction.aligned_tokens assert (len(probabilities) == len(distribution_map)) if (self.params.use_previous_query and self.params.use_copy_switch and (len(previous_queries) > 0)): assert (prediction.query_scores.dim() == 1) query_token_probabilities = F.softmax(prediction.query_scores, dim=0).cpu().data.numpy().tolist() query_token_distribution_map = prediction.query_tokens assert (len(query_token_probabilities) == len(query_token_distribution_map)) copy_switch = prediction.copy_switch.cpu().data.numpy() probabilities = ((np.array(probabilities) * (1 - copy_switch)).tolist() + (np.array(query_token_probabilities) * copy_switch).tolist()) distribution_map = (distribution_map + query_token_distribution_map) assert (len(probabilities) == len(distribution_map)) (distribution_map, probabilities) = flatten_distribution(distribution_map, probabilities) probabilities[distribution_map.index(UNK_TOK)] = 0.0 argmax_index = int(np.argmax(probabilities)) argmax_token = distribution_map[argmax_index] sequence.append(argmax_token) output_token_embedding = self.get_output_token_embedding(argmax_token, input_schema, snippets) decoder_input = self.get_decoder_input(output_token_embedding, prediction) probability *= probabilities[argmax_index] continue_generating = False if ((index < max_generation_length) and (argmax_token != EOS_TOK)): continue_generating = True index += 1 return SQLPrediction(predictions, sequence, probability)
class Embedder(torch.nn.Module): ' Embeds tokens. ' def __init__(self, embedding_size, name='', initializer=None, vocabulary=None, num_tokens=(- 1), anonymizer=None, freeze=False, use_unk=True): super().__init__() if vocabulary: assert (num_tokens < 0), ('Specified a vocabulary but also set number of tokens to ' + str(num_tokens)) self.in_vocabulary = (lambda token: (token in vocabulary.tokens)) self.vocab_token_lookup = (lambda token: vocabulary.token_to_id(token)) if use_unk: self.unknown_token_id = vocabulary.token_to_id(vocabulary_handler.UNK_TOK) else: self.unknown_token_id = (- 1) self.vocabulary_size = len(vocabulary) else: def check_vocab(index): ' Makes sure the index is in the vocabulary.' assert (index < num_tokens), ((('Passed token ID ' + str(index)) + '; expecting something less than ') + str(num_tokens)) return (index < num_tokens) self.in_vocabulary = check_vocab self.vocab_token_lookup = (lambda x: x) self.unknown_token_id = num_tokens self.vocabulary_size = num_tokens self.anonymizer = anonymizer emb_name = (name + '-tokens') print(((((('Creating token embedder called ' + emb_name) + ' of size ') + str(self.vocabulary_size)) + ' x ') + str(embedding_size))) if (initializer is not None): word_embeddings_tensor = torch.FloatTensor(initializer) self.token_embedding_matrix = torch.nn.Embedding.from_pretrained(word_embeddings_tensor, freeze=freeze) else: init_tensor = torch.empty(self.vocabulary_size, embedding_size).uniform_((- 0.1), 0.1) self.token_embedding_matrix = torch.nn.Embedding.from_pretrained(init_tensor, freeze=False) if self.anonymizer: emb_name = (name + '-entities') entity_size = len(self.anonymizer.entity_types) print(((((('Creating entity embedder called ' + emb_name) + ' of size ') + str(entity_size)) + ' x ') + str(embedding_size))) init_tensor = torch.empty(entity_size, embedding_size).uniform_((- 0.1), 0.1) self.entity_embedding_matrix = torch.nn.Embedding.from_pretrained(init_tensor, freeze=False) def forward(self, token): assert (isinstance(token, int) or (not snippet_handler.is_snippet(token))), 'embedder should only be called on flat tokens; use snippet_bow if you are trying to encode snippets' if self.in_vocabulary(token): index_list = torch.LongTensor([self.vocab_token_lookup(token)]) if self.token_embedding_matrix.weight.is_cuda: index_list = index_list.cuda() return self.token_embedding_matrix(index_list).squeeze() elif (self.anonymizer and self.anonymizer.is_anon_tok(token)): index_list = torch.LongTensor([self.anonymizer.get_anon_id(token)]) if self.token_embedding_matrix.weight.is_cuda: index_list = index_list.cuda() return self.entity_embedding_matrix(index_list).squeeze() else: index_list = torch.LongTensor([self.unknown_token_id]) if self.token_embedding_matrix.weight.is_cuda: index_list = index_list.cuda() return self.token_embedding_matrix(index_list).squeeze()
def bow_snippets(token, snippets, output_embedder, input_schema): ' Bag of words embedding for snippets' assert (snippet_handler.is_snippet(token) and snippets) snippet_sequence = [] for snippet in snippets: if (snippet.name == token): snippet_sequence = snippet.sequence break assert snippet_sequence if input_schema: snippet_embeddings = [] for output_token in snippet_sequence: assert (output_embedder.in_vocabulary(output_token) or input_schema.in_vocabulary(output_token, surface_form=True)) if output_embedder.in_vocabulary(output_token): snippet_embeddings.append(output_embedder(output_token)) else: snippet_embeddings.append(input_schema.column_name_embedder(output_token, surface_form=True)) else: snippet_embeddings = [output_embedder(subtoken) for subtoken in snippet_sequence] snippet_embeddings = torch.stack(snippet_embeddings, dim=0) return torch.mean(snippet_embeddings, dim=0)
class Encoder(torch.nn.Module): ' Encodes an input sequence. ' def __init__(self, num_layers, input_size, state_size): super().__init__() self.num_layers = num_layers self.forward_lstms = create_multilayer_lstm_params(self.num_layers, input_size, (state_size / 2), 'LSTM-ef') self.backward_lstms = create_multilayer_lstm_params(self.num_layers, input_size, (state_size / 2), 'LSTM-eb') def forward(self, sequence, embedder, dropout_amount=0.0): " Encodes a sequence forward and backward.\n Inputs:\n forward_seq (list of str): The string forwards.\n backward_seq (list of str): The string backwards.\n f_rnns (list of dy.RNNBuilder): The forward RNNs.\n b_rnns (list of dy.RNNBuilder): The backward RNNS.\n emb_fn (dict str->dy.Expression): Embedding function for tokens in the\n sequence.\n size (int): The size of the RNNs.\n dropout_amount (float, optional): The amount of dropout to apply.\n\n Returns:\n (list of dy.Expression, list of dy.Expression), list of dy.Expression,\n where the first pair is the (final cell memories, final cell states) of\n all layers, and the second list is a list of the final layer's cell\n state for all tokens in the sequence.\n " (forward_state, forward_outputs) = encode_sequence(sequence, self.forward_lstms, embedder, dropout_amount=dropout_amount) (backward_state, backward_outputs) = encode_sequence(sequence[::(- 1)], self.backward_lstms, embedder, dropout_amount=dropout_amount) cell_memories = [] hidden_states = [] for i in range(self.num_layers): cell_memories.append(torch.cat([forward_state[0][i], backward_state[0][i]], dim=0)) hidden_states.append(torch.cat([forward_state[1][i], backward_state[1][i]], dim=0)) assert (len(forward_outputs) == len(backward_outputs)) backward_outputs = backward_outputs[::(- 1)] final_outputs = [] for i in range(len(sequence)): final_outputs.append(torch.cat([forward_outputs[i], backward_outputs[i]], dim=0)) return ((cell_memories, hidden_states), final_outputs)
def get_token_indices(token, index_to_token): ' Maps from a gold token (string) to a list of indices.\n\n Inputs:\n token (string): String to look up.\n index_to_token (list of tokens): Ordered list of tokens.\n\n Returns:\n list of int, representing the indices of the token in the probability\n distribution.\n ' if (token in index_to_token): if (len(set(index_to_token)) == len(index_to_token)): return [index_to_token.index(token)] else: indices = [] for (index, other_token) in enumerate(index_to_token): if (token == other_token): indices.append(index) assert (len(indices) == len(set(indices))) return indices else: return [index_to_token.index(UNK_TOK)]
def flatten_utterances(utterances): ' Gets a flat sequence from a sequence of utterances.\n\n Inputs:\n utterances (list of list of str): Utterances to concatenate.\n\n Returns:\n list of str, representing the flattened sequence with separating\n delimiter tokens.\n ' sequence = [] for (i, utterance) in enumerate(utterances): sequence.extend(utterance) if (i < (len(utterances) - 1)): sequence.append(DEL_TOK) return sequence
def encode_snippets_with_states(snippets, states): ' Encodes snippets by using previous query states instead.\n\n Inputs:\n snippets (list of Snippet): Input snippets.\n states (list of dy.Expression): Previous hidden states to use.\n TODO: should this by dy.Expression or vector values?\n ' for snippet in snippets: snippet.set_embedding(torch.cat([states[snippet.startpos], states[snippet.endpos]], dim=0)) return snippets
def load_word_embeddings(input_vocabulary, output_vocabulary, output_vocabulary_schema, params): print(output_vocabulary.inorder_tokens) print() def read_glove_embedding(embedding_filename, embedding_size): glove_embeddings = {} with open(embedding_filename) as f: cnt = 1 for line in f: cnt += 1 if (params.debug or (not params.train)): if (cnt == 1000): print('Read 1000 word embeddings') break l_split = line.split() word = ' '.join(l_split[0:(len(l_split) - embedding_size)]) embedding = np.array([float(val) for val in l_split[(- embedding_size):]]) glove_embeddings[word] = embedding return glove_embeddings print('Loading Glove Embedding from', params.embedding_filename) glove_embedding_size = 300 glove_embeddings = read_glove_embedding(params.embedding_filename, glove_embedding_size) print('Done') input_embedding_size = glove_embedding_size def create_word_embeddings(vocab): vocabulary_embeddings = np.zeros((len(vocab), glove_embedding_size), dtype=np.float32) vocabulary_tokens = vocab.inorder_tokens glove_oov = 0 para_oov = 0 for token in vocabulary_tokens: token_id = vocab.token_to_id(token) if (token in glove_embeddings): vocabulary_embeddings[token_id][:glove_embedding_size] = glove_embeddings[token] else: glove_oov += 1 print('Glove OOV:', glove_oov, 'Para OOV', para_oov, 'Total', len(vocab)) return vocabulary_embeddings input_vocabulary_embeddings = create_word_embeddings(input_vocabulary) output_vocabulary_embeddings = create_word_embeddings(output_vocabulary) output_vocabulary_schema_embeddings = None if output_vocabulary_schema: output_vocabulary_schema_embeddings = create_word_embeddings(output_vocabulary_schema) return (input_vocabulary_embeddings, output_vocabulary_embeddings, output_vocabulary_schema_embeddings, input_embedding_size)
class ATISModel(torch.nn.Module): ' Sequence-to-sequence model for predicting a SQL query given an utterance\n and an interaction prefix.\n ' def __init__(self, params, input_vocabulary, output_vocabulary, output_vocabulary_schema, anonymizer): super().__init__() self.params = params if params.use_bert: (self.model_bert, self.tokenizer, self.bert_config) = utils_bert.get_bert(params) if ('atis' not in params.data_directory): if params.use_bert: (input_vocabulary_embeddings, output_vocabulary_embeddings, output_vocabulary_schema_embeddings, input_embedding_size) = load_word_embeddings(input_vocabulary, output_vocabulary, output_vocabulary_schema, params) self.output_embedder = Embedder(params.output_embedding_size, name='output-embedding', initializer=output_vocabulary_embeddings, vocabulary=output_vocabulary, anonymizer=anonymizer, freeze=False) self.column_name_token_embedder = None else: (input_vocabulary_embeddings, output_vocabulary_embeddings, output_vocabulary_schema_embeddings, input_embedding_size) = load_word_embeddings(input_vocabulary, output_vocabulary, output_vocabulary_schema, params) params.input_embedding_size = input_embedding_size self.params.input_embedding_size = input_embedding_size self.input_embedder = Embedder(params.input_embedding_size, name='input-embedding', initializer=input_vocabulary_embeddings, vocabulary=input_vocabulary, anonymizer=anonymizer, freeze=params.freeze) self.output_embedder = Embedder(params.output_embedding_size, name='output-embedding', initializer=output_vocabulary_embeddings, vocabulary=output_vocabulary, anonymizer=anonymizer, freeze=False) self.column_name_token_embedder = Embedder(params.input_embedding_size, name='schema-embedding', initializer=output_vocabulary_schema_embeddings, vocabulary=output_vocabulary_schema, anonymizer=anonymizer, freeze=params.freeze) else: self.input_embedder = Embedder(params.input_embedding_size, name='input-embedding', vocabulary=input_vocabulary, anonymizer=anonymizer, freeze=False) self.output_embedder = Embedder(params.output_embedding_size, name='output-embedding', vocabulary=output_vocabulary, anonymizer=anonymizer, freeze=False) self.column_name_token_embedder = None encoder_input_size = params.input_embedding_size encoder_output_size = params.encoder_state_size if params.use_bert: encoder_input_size = self.bert_config.hidden_size if params.discourse_level_lstm: encoder_input_size += (params.encoder_state_size / 2) self.utterance_encoder = Encoder(params.encoder_num_layers, encoder_input_size, encoder_output_size) attention_key_size = params.encoder_state_size self.schema_attention_key_size = attention_key_size if params.state_positional_embeddings: attention_key_size += params.positional_embedding_size self.positional_embedder = Embedder(params.positional_embedding_size, name='positional-embedding', num_tokens=params.maximum_utterances) self.utterance_attention_key_size = attention_key_size if params.discourse_level_lstm: self.discourse_lstms = torch_utils.create_multilayer_lstm_params(1, params.encoder_state_size, (params.encoder_state_size / 2), 'LSTM-t') self.initial_discourse_state = torch_utils.add_params(tuple([(params.encoder_state_size / 2)]), 'V-turn-state-0') final_snippet_size = 0 if (params.use_snippets and (not params.previous_decoder_snippet_encoding)): snippet_encoding_size = int((params.encoder_state_size / 2)) final_snippet_size = params.encoder_state_size if params.snippet_age_embedding: snippet_encoding_size -= int((params.snippet_age_embedding_size / 4)) self.snippet_age_embedder = Embedder(params.snippet_age_embedding_size, name='snippet-age-embedding', num_tokens=params.max_snippet_age_embedding) final_snippet_size = (params.encoder_state_size + (params.snippet_age_embedding_size / 2)) self.snippet_encoder = Encoder(params.snippet_num_layers, params.output_embedding_size, snippet_encoding_size) if params.use_previous_query: self.query_encoder = Encoder(params.encoder_num_layers, params.output_embedding_size, params.encoder_state_size) self.final_snippet_size = final_snippet_size self.dropout = 0.0 def _encode_snippets(self, previous_query, snippets, input_schema): ' Computes a single vector representation for each snippet.\n\n Inputs:\n previous_query (list of str): Previous query in the interaction.\n snippets (list of Snippet): Snippets extracted from the previous\n\n Returns:\n list of Snippets, where the embedding is set to a vector.\n ' startpoints = [snippet.startpos for snippet in snippets] endpoints = [snippet.endpos for snippet in snippets] assert ((len(startpoints) == 0) or (min(startpoints) >= 0)) if input_schema: assert ((len(endpoints) == 0) or (max(endpoints) <= len(previous_query))) else: assert ((len(endpoints) == 0) or (max(endpoints) < len(previous_query))) snippet_embedder = (lambda query_token: self.get_query_token_embedding(query_token, input_schema)) if (previous_query and snippets): (_, previous_outputs) = self.snippet_encoder(previous_query, snippet_embedder, dropout_amount=self.dropout) assert (len(previous_outputs) == len(previous_query)) for snippet in snippets: if input_schema: embedding = torch.cat([previous_outputs[snippet.startpos], previous_outputs[(snippet.endpos - 1)]], dim=0) else: embedding = torch.cat([previous_outputs[snippet.startpos], previous_outputs[snippet.endpos]], dim=0) if self.params.snippet_age_embedding: embedding = torch.cat([embedding, self.snippet_age_embedder(min(snippet.age, (self.params.max_snippet_age_embedding - 1)))], dim=0) snippet.set_embedding(embedding) return snippets def _initialize_discourse_states(self): discourse_state = self.initial_discourse_state discourse_lstm_states = [] for lstm in self.discourse_lstms: hidden_size = lstm.weight_hh.size()[1] if lstm.weight_hh.is_cuda: h_0 = torch.cuda.FloatTensor(1, hidden_size).fill_(0) c_0 = torch.cuda.FloatTensor(1, hidden_size).fill_(0) else: h_0 = torch.zeros(1, hidden_size) c_0 = torch.zeros(1, hidden_size) discourse_lstm_states.append((h_0, c_0)) return (discourse_state, discourse_lstm_states) def _add_positional_embeddings(self, hidden_states, utterances, group=False): grouped_states = [] start_index = 0 for utterance in utterances: grouped_states.append(hidden_states[start_index:(start_index + len(utterance))]) start_index += len(utterance) assert (len(hidden_states) == sum([len(seq) for seq in grouped_states]) == sum([len(utterance) for utterance in utterances])) new_states = [] flat_sequence = [] num_utterances_to_keep = min(self.params.maximum_utterances, len(utterances)) for (i, (states, utterance)) in enumerate(zip(grouped_states[(- num_utterances_to_keep):], utterances[(- num_utterances_to_keep):])): positional_sequence = [] index = ((num_utterances_to_keep - i) - 1) for state in states: positional_sequence.append(torch.cat([state, self.positional_embedder(index)], dim=0)) assert (len(positional_sequence) == len(utterance)), (((('Expected utterance and state sequence length to be the same, ' + 'but they were ') + str(len(utterance))) + ' and ') + str(len(positional_sequence))) if group: new_states.append(positional_sequence) else: new_states.extend(positional_sequence) flat_sequence.extend(utterance) return (new_states, flat_sequence) def build_optim(self): params_trainer = [] params_bert_trainer = [] for (name, param) in self.named_parameters(): if param.requires_grad: if ('model_bert' in name): params_bert_trainer.append(param) else: params_trainer.append(param) self.trainer = torch.optim.Adam(params_trainer, lr=self.params.initial_learning_rate) if self.params.fine_tune_bert: self.bert_trainer = torch.optim.Adam(params_bert_trainer, lr=self.params.lr_bert) def set_dropout(self, value): ' Sets the dropout to a specified value.\n\n Inputs:\n value (float): Value to set dropout to.\n ' self.dropout = value def set_learning_rate(self, value): ' Sets the learning rate for the trainer.\n\n Inputs:\n value (float): The new learning rate.\n ' for param_group in self.trainer.param_groups: param_group['lr'] = value def save(self, filename): ' Saves the model to the specified filename.\n\n Inputs:\n filename (str): The filename to save to.\n ' torch.save(self.state_dict(), filename) def load(self, filename): ' Loads saved parameters into the parameter collection.\n\n Inputs:\n filename (str): Name of file containing parameters.\n ' self.load_state_dict(torch.load(filename)) print(('Loaded model from file ' + filename))
class SchemaInteractionATISModel(ATISModel): ' Interaction ATIS model, where an interaction is processed all at once.\n ' def __init__(self, params, input_vocabulary, output_vocabulary, output_vocabulary_schema, anonymizer): ATISModel.__init__(self, params, input_vocabulary, output_vocabulary, output_vocabulary_schema, anonymizer) if self.params.use_schema_encoder: schema_encoder_num_layer = 1 schema_encoder_input_size = params.input_embedding_size schema_encoder_state_size = params.encoder_state_size if params.use_bert: schema_encoder_input_size = self.bert_config.hidden_size self.schema_encoder = Encoder(schema_encoder_num_layer, schema_encoder_input_size, schema_encoder_state_size) if self.params.use_schema_self_attention: self.schema2schema_attention_module = Attention(self.schema_attention_key_size, self.schema_attention_key_size, self.schema_attention_key_size) if self.params.use_utterance_attention: self.utterance_attention_module = Attention(self.params.encoder_state_size, self.params.encoder_state_size, self.params.encoder_state_size) if params.use_encoder_attention: self.utterance2schema_attention_module = Attention(self.schema_attention_key_size, self.utterance_attention_key_size, self.utterance_attention_key_size) self.schema2utterance_attention_module = Attention(self.utterance_attention_key_size, self.schema_attention_key_size, self.schema_attention_key_size) new_attention_key_size = (self.schema_attention_key_size + self.utterance_attention_key_size) self.schema_attention_key_size = new_attention_key_size self.utterance_attention_key_size = new_attention_key_size if self.params.use_schema_encoder_2: self.schema_encoder_2 = Encoder(schema_encoder_num_layer, self.schema_attention_key_size, self.schema_attention_key_size) self.utterance_encoder_2 = Encoder(params.encoder_num_layers, self.utterance_attention_key_size, self.utterance_attention_key_size) self.token_predictor = construct_token_predictor(params, output_vocabulary, self.utterance_attention_key_size, self.schema_attention_key_size, self.final_snippet_size, anonymizer) if (params.use_schema_attention and params.use_query_attention): decoder_input_size = (((params.output_embedding_size + self.utterance_attention_key_size) + self.schema_attention_key_size) + params.encoder_state_size) elif params.use_schema_attention: decoder_input_size = ((params.output_embedding_size + self.utterance_attention_key_size) + self.schema_attention_key_size) else: decoder_input_size = (params.output_embedding_size + self.utterance_attention_key_size) self.decoder = SequencePredictorWithSchema(params, decoder_input_size, self.output_embedder, self.column_name_token_embedder, self.token_predictor) def predict_turn(self, utterance_final_state, input_hidden_states, schema_states, max_generation_length, gold_query=None, snippets=None, input_sequence=None, previous_queries=None, previous_query_states=None, input_schema=None, feed_gold_tokens=False, training=False): ' Gets a prediction for a single turn -- calls decoder and updates loss, etc.\n\n TODO: this can probably be split into two methods, one that just predicts\n and another that computes the loss.\n ' predicted_sequence = [] fed_sequence = [] loss = None token_accuracy = 0.0 if self.params.use_encoder_attention: schema_attention = self.utterance2schema_attention_module(torch.stack(schema_states, dim=0), input_hidden_states).vector utterance_attention = self.schema2utterance_attention_module(torch.stack(input_hidden_states, dim=0), schema_states).vector if (schema_attention.dim() == 1): schema_attention = schema_attention.unsqueeze(1) if (utterance_attention.dim() == 1): utterance_attention = utterance_attention.unsqueeze(1) new_schema_states = torch.cat([torch.stack(schema_states, dim=1), schema_attention], dim=0) schema_states = list(torch.split(new_schema_states, split_size_or_sections=1, dim=1)) schema_states = [schema_state.squeeze() for schema_state in schema_states] new_input_hidden_states = torch.cat([torch.stack(input_hidden_states, dim=1), utterance_attention], dim=0) input_hidden_states = list(torch.split(new_input_hidden_states, split_size_or_sections=1, dim=1)) input_hidden_states = [input_hidden_state.squeeze() for input_hidden_state in input_hidden_states] if self.params.use_schema_encoder_2: (final_schema_state, schema_states) = self.schema_encoder_2(schema_states, (lambda x: x), dropout_amount=self.dropout) (final_utterance_state, input_hidden_states) = self.utterance_encoder_2(input_hidden_states, (lambda x: x), dropout_amount=self.dropout) if feed_gold_tokens: decoder_results = self.decoder(utterance_final_state, input_hidden_states, schema_states, max_generation_length, gold_sequence=gold_query, input_sequence=input_sequence, previous_queries=previous_queries, previous_query_states=previous_query_states, input_schema=input_schema, snippets=snippets, dropout_amount=self.dropout) all_scores = [] all_alignments = [] for prediction in decoder_results.predictions: scores = F.softmax(prediction.scores, dim=0) alignments = prediction.aligned_tokens if (self.params.use_previous_query and self.params.use_copy_switch and (len(previous_queries) > 0)): query_scores = F.softmax(prediction.query_scores, dim=0) copy_switch = prediction.copy_switch scores = torch.cat([(scores * (1 - copy_switch)), (query_scores * copy_switch)], dim=0) alignments = (alignments + prediction.query_tokens) all_scores.append(scores) all_alignments.append(alignments) gold_sequence = gold_query loss = torch_utils.compute_loss(gold_sequence, all_scores, all_alignments, get_token_indices) if (not training): predicted_sequence = torch_utils.get_seq_from_scores(all_scores, all_alignments) token_accuracy = torch_utils.per_token_accuracy(gold_sequence, predicted_sequence) fed_sequence = gold_sequence else: decoder_results = self.decoder(utterance_final_state, input_hidden_states, schema_states, max_generation_length, input_sequence=input_sequence, previous_queries=previous_queries, previous_query_states=previous_query_states, input_schema=input_schema, snippets=snippets, dropout_amount=self.dropout) predicted_sequence = decoder_results.sequence fed_sequence = predicted_sequence decoder_states = [pred.decoder_state for pred in decoder_results.predictions] for (token, state) in zip(fed_sequence[:(- 1)], decoder_states[1:]): if snippet_handler.is_snippet(token): snippet_length = 0 for snippet in snippets: if (snippet.name == token): snippet_length = len(snippet.sequence) break assert (snippet_length > 0) decoder_states.extend([state for _ in range(snippet_length)]) else: decoder_states.append(state) return (predicted_sequence, loss, token_accuracy, decoder_states, decoder_results) def encode_schema_bow_simple(self, input_schema): schema_states = [] for column_name in input_schema.column_names_embedder_input: schema_states.append(input_schema.column_name_embedder_bow(column_name, surface_form=False, column_name_token_embedder=self.column_name_token_embedder)) input_schema.set_column_name_embeddings(schema_states) return schema_states def encode_schema_self_attention(self, schema_states): schema_self_attention = self.schema2schema_attention_module(torch.stack(schema_states, dim=0), schema_states).vector if (schema_self_attention.dim() == 1): schema_self_attention = schema_self_attention.unsqueeze(1) residual_schema_states = list(torch.split(schema_self_attention, split_size_or_sections=1, dim=1)) residual_schema_states = [schema_state.squeeze() for schema_state in residual_schema_states] new_schema_states = [(schema_state + residual_schema_state) for (schema_state, residual_schema_state) in zip(schema_states, residual_schema_states)] return new_schema_states def encode_schema(self, input_schema, dropout=False): schema_states = [] for column_name_embedder_input in input_schema.column_names_embedder_input: tokens = column_name_embedder_input.split() if dropout: (final_schema_state_one, schema_states_one) = self.schema_encoder(tokens, self.column_name_token_embedder, dropout_amount=self.dropout) else: (final_schema_state_one, schema_states_one) = self.schema_encoder(tokens, self.column_name_token_embedder) schema_states.append(final_schema_state_one[1][(- 1)]) input_schema.set_column_name_embeddings(schema_states) if self.params.use_schema_self_attention: schema_states = self.encode_schema_self_attention(schema_states) return schema_states def get_bert_encoding(self, input_sequence, input_schema, discourse_state, dropout): (utterance_states, schema_token_states) = utils_bert.get_bert_encoding(self.bert_config, self.model_bert, self.tokenizer, input_sequence, input_schema, bert_input_version=self.params.bert_input_version, num_out_layers_n=1, num_out_layers_h=1) if self.params.discourse_level_lstm: utterance_token_embedder = (lambda x: torch.cat([x, discourse_state], dim=0)) else: utterance_token_embedder = (lambda x: x) if dropout: (final_utterance_state, utterance_states) = self.utterance_encoder(utterance_states, utterance_token_embedder, dropout_amount=self.dropout) else: (final_utterance_state, utterance_states) = self.utterance_encoder(utterance_states, utterance_token_embedder) schema_states = [] for schema_token_states1 in schema_token_states: if dropout: (final_schema_state_one, schema_states_one) = self.schema_encoder(schema_token_states1, (lambda x: x), dropout_amount=self.dropout) else: (final_schema_state_one, schema_states_one) = self.schema_encoder(schema_token_states1, (lambda x: x)) schema_states.append(final_schema_state_one[1][(- 1)]) input_schema.set_column_name_embeddings(schema_states) if self.params.use_schema_self_attention: schema_states = self.encode_schema_self_attention(schema_states) return (final_utterance_state, utterance_states, schema_states) def get_query_token_embedding(self, output_token, input_schema): if input_schema: if (not (self.output_embedder.in_vocabulary(output_token) or input_schema.in_vocabulary(output_token, surface_form=True))): output_token = 'value' if self.output_embedder.in_vocabulary(output_token): output_token_embedding = self.output_embedder(output_token) else: output_token_embedding = input_schema.column_name_embedder(output_token, surface_form=True) else: output_token_embedding = self.output_embedder(output_token) return output_token_embedding def get_utterance_attention(self, final_utterance_states_c, final_utterance_states_h, final_utterance_state, num_utterances_to_keep): final_utterance_states_c.append(final_utterance_state[0][0]) final_utterance_states_h.append(final_utterance_state[1][0]) final_utterance_states_c = final_utterance_states_c[(- num_utterances_to_keep):] final_utterance_states_h = final_utterance_states_h[(- num_utterances_to_keep):] attention_result = self.utterance_attention_module(final_utterance_states_c[(- 1)], final_utterance_states_c) final_utterance_state_attention_c = (final_utterance_states_c[(- 1)] + attention_result.vector.squeeze()) attention_result = self.utterance_attention_module(final_utterance_states_h[(- 1)], final_utterance_states_h) final_utterance_state_attention_h = (final_utterance_states_h[(- 1)] + attention_result.vector.squeeze()) final_utterance_state = ([final_utterance_state_attention_c], [final_utterance_state_attention_h]) return (final_utterance_states_c, final_utterance_states_h, final_utterance_state) def get_previous_queries(self, previous_queries, previous_query_states, previous_query, input_schema): previous_queries.append(previous_query) num_queries_to_keep = min(self.params.maximum_queries, len(previous_queries)) previous_queries = previous_queries[(- num_queries_to_keep):] query_token_embedder = (lambda query_token: self.get_query_token_embedding(query_token, input_schema)) (_, previous_outputs) = self.query_encoder(previous_query, query_token_embedder, dropout_amount=self.dropout) assert (len(previous_outputs) == len(previous_query)) previous_query_states.append(previous_outputs) previous_query_states = previous_query_states[(- num_queries_to_keep):] return (previous_queries, previous_query_states) def train_step(self, interaction, max_generation_length, snippet_alignment_probability=1.0): ' Trains the interaction-level model on a single interaction.\n\n Inputs:\n interaction (Interaction): The interaction to train on.\n learning_rate (float): Learning rate to use.\n snippet_keep_age (int): Age of oldest snippets to use.\n snippet_alignment_probability (float): The probability that a snippet will\n be used in constructing the gold sequence.\n ' losses = [] total_gold_tokens = 0 input_hidden_states = [] input_sequences = [] final_utterance_states_c = [] final_utterance_states_h = [] previous_query_states = [] previous_queries = [] decoder_states = [] discourse_state = None if self.params.discourse_level_lstm: (discourse_state, discourse_lstm_states) = self._initialize_discourse_states() discourse_states = [] input_schema = interaction.get_schema() schema_states = [] if (input_schema and (not self.params.use_bert)): schema_states = self.encode_schema_bow_simple(input_schema) for (utterance_index, utterance) in enumerate(interaction.gold_utterances()): if ((interaction.identifier in LIMITED_INTERACTIONS) and (utterance_index > LIMITED_INTERACTIONS[interaction.identifier])): break input_sequence = utterance.input_sequence() available_snippets = utterance.snippets() previous_query = utterance.previous_query() if (snippet_alignment_probability < 1.0): gold_query = (sql_util.add_snippets_to_query(available_snippets, utterance.contained_entities(), utterance.anonymized_gold_query(), prob_align=snippet_alignment_probability) + [vocab.EOS_TOK]) else: gold_query = utterance.gold_query() if (not self.params.use_bert): if self.params.discourse_level_lstm: utterance_token_embedder = (lambda token: torch.cat([self.input_embedder(token), discourse_state], dim=0)) else: utterance_token_embedder = self.input_embedder (final_utterance_state, utterance_states) = self.utterance_encoder(input_sequence, utterance_token_embedder, dropout_amount=self.dropout) else: (final_utterance_state, utterance_states, schema_states) = self.get_bert_encoding(input_sequence, input_schema, discourse_state, dropout=True) input_hidden_states.extend(utterance_states) input_sequences.append(input_sequence) num_utterances_to_keep = min(self.params.maximum_utterances, len(input_sequences)) if self.params.discourse_level_lstm: (_, discourse_state, discourse_lstm_states) = torch_utils.forward_one_multilayer(self.discourse_lstms, final_utterance_state[1][0], discourse_lstm_states, self.dropout) if self.params.use_utterance_attention: (final_utterance_states_c, final_utterance_states_h, final_utterance_state) = self.get_utterance_attention(final_utterance_states_c, final_utterance_states_h, final_utterance_state, num_utterances_to_keep) if self.params.state_positional_embeddings: (utterance_states, flat_sequence) = self._add_positional_embeddings(input_hidden_states, input_sequences) else: flat_sequence = [] for utt in input_sequences[(- num_utterances_to_keep):]: flat_sequence.extend(utt) snippets = None if self.params.use_snippets: if self.params.previous_decoder_snippet_encoding: snippets = encode_snippets_with_states(available_snippets, decoder_states) else: snippets = self._encode_snippets(previous_query, available_snippets, input_schema) if (self.params.use_previous_query and (len(previous_query) > 0)): (previous_queries, previous_query_states) = self.get_previous_queries(previous_queries, previous_query_states, previous_query, input_schema) if ((len(gold_query) <= max_generation_length) and (len(previous_query) <= max_generation_length)): prediction = self.predict_turn(final_utterance_state, utterance_states, schema_states, max_generation_length, gold_query=gold_query, snippets=snippets, input_sequence=flat_sequence, previous_queries=previous_queries, previous_query_states=previous_query_states, input_schema=input_schema, feed_gold_tokens=True, training=True) loss = prediction[1] decoder_states = prediction[3] total_gold_tokens += len(gold_query) losses.append(loss) else: if self.params.previous_decoder_snippet_encoding: break continue torch.cuda.empty_cache() if losses: average_loss = (torch.sum(torch.stack(losses)) / total_gold_tokens) normalized_loss = average_loss if self.params.reweight_batch: normalized_loss = ((len(losses) * average_loss) / float(self.params.batch_size)) normalized_loss.backward() self.trainer.step() if self.params.fine_tune_bert: self.bert_trainer.step() self.zero_grad() loss_scalar = normalized_loss.item() else: loss_scalar = 0.0 return loss_scalar def predict_with_predicted_queries(self, interaction, max_generation_length, syntax_restrict=True): ' Predicts an interaction, using the predicted queries to get snippets.' syntax_restrict = False predictions = [] input_hidden_states = [] input_sequences = [] final_utterance_states_c = [] final_utterance_states_h = [] previous_query_states = [] previous_queries = [] discourse_state = None if self.params.discourse_level_lstm: (discourse_state, discourse_lstm_states) = self._initialize_discourse_states() discourse_states = [] input_schema = interaction.get_schema() schema_states = [] if (input_schema and (not self.params.use_bert)): schema_states = self.encode_schema_bow_simple(input_schema) interaction.start_interaction() while (not interaction.done()): utterance = interaction.next_utterance() available_snippets = utterance.snippets() previous_query = utterance.previous_query() input_sequence = utterance.input_sequence() if (not self.params.use_bert): if self.params.discourse_level_lstm: utterance_token_embedder = (lambda token: torch.cat([self.input_embedder(token), discourse_state], dim=0)) else: utterance_token_embedder = self.input_embedder (final_utterance_state, utterance_states) = self.utterance_encoder(input_sequence, utterance_token_embedder) else: (final_utterance_state, utterance_states, schema_states) = self.get_bert_encoding(input_sequence, input_schema, discourse_state, dropout=False) input_hidden_states.extend(utterance_states) input_sequences.append(input_sequence) num_utterances_to_keep = min(self.params.maximum_utterances, len(input_sequences)) if self.params.discourse_level_lstm: (_, discourse_state, discourse_lstm_states) = torch_utils.forward_one_multilayer(self.discourse_lstms, final_utterance_state[1][0], discourse_lstm_states) if self.params.use_utterance_attention: (final_utterance_states_c, final_utterance_states_h, final_utterance_state) = self.get_utterance_attention(final_utterance_states_c, final_utterance_states_h, final_utterance_state, num_utterances_to_keep) if self.params.state_positional_embeddings: (utterance_states, flat_sequence) = self._add_positional_embeddings(input_hidden_states, input_sequences) else: flat_sequence = [] for utt in input_sequences[(- num_utterances_to_keep):]: flat_sequence.extend(utt) snippets = None if self.params.use_snippets: snippets = self._encode_snippets(previous_query, available_snippets, input_schema) if (self.params.use_previous_query and (len(previous_query) > 0)): (previous_queries, previous_query_states) = self.get_previous_queries(previous_queries, previous_query_states, previous_query, input_schema) results = self.predict_turn(final_utterance_state, utterance_states, schema_states, max_generation_length, input_sequence=flat_sequence, previous_queries=previous_queries, previous_query_states=previous_query_states, input_schema=input_schema, snippets=snippets) predicted_sequence = results[0] predictions.append(results) anonymized_sequence = utterance.remove_snippets(predicted_sequence) if (EOS_TOK in anonymized_sequence): anonymized_sequence = anonymized_sequence[:(- 1)] else: anonymized_sequence = ['select', '*', 'from', 't1'] if (not syntax_restrict): utterance.set_predicted_query(interaction.remove_snippets(predicted_sequence)) if input_schema: interaction.add_utterance(utterance, anonymized_sequence, previous_snippets=utterance.snippets(), simple=True) else: interaction.add_utterance(utterance, anonymized_sequence, previous_snippets=utterance.snippets(), simple=False) else: utterance.set_predicted_query(utterance.previous_query()) interaction.add_utterance(utterance, utterance.previous_query(), previous_snippets=utterance.snippets()) return predictions def predict_with_gold_queries(self, interaction, max_generation_length, feed_gold_query=False): ' Predicts SQL queries for an interaction.\n\n Inputs:\n interaction (Interaction): Interaction to predict for.\n feed_gold_query (bool): Whether or not to feed the gold token to the\n generation step.\n ' predictions = [] input_hidden_states = [] input_sequences = [] final_utterance_states_c = [] final_utterance_states_h = [] previous_query_states = [] previous_queries = [] decoder_states = [] discourse_state = None if self.params.discourse_level_lstm: (discourse_state, discourse_lstm_states) = self._initialize_discourse_states() discourse_states = [] input_schema = interaction.get_schema() schema_states = [] if (input_schema and (not self.params.use_bert)): schema_states = self.encode_schema_bow_simple(input_schema) for utterance in interaction.gold_utterances(): input_sequence = utterance.input_sequence() available_snippets = utterance.snippets() previous_query = utterance.previous_query() if (not self.params.use_bert): if self.params.discourse_level_lstm: utterance_token_embedder = (lambda token: torch.cat([self.input_embedder(token), discourse_state], dim=0)) else: utterance_token_embedder = self.input_embedder (final_utterance_state, utterance_states) = self.utterance_encoder(input_sequence, utterance_token_embedder, dropout_amount=self.dropout) else: (final_utterance_state, utterance_states, schema_states) = self.get_bert_encoding(input_sequence, input_schema, discourse_state, dropout=True) input_hidden_states.extend(utterance_states) input_sequences.append(input_sequence) num_utterances_to_keep = min(self.params.maximum_utterances, len(input_sequences)) if self.params.discourse_level_lstm: (_, discourse_state, discourse_lstm_states) = torch_utils.forward_one_multilayer(self.discourse_lstms, final_utterance_state[1][0], discourse_lstm_states, self.dropout) if self.params.use_utterance_attention: (final_utterance_states_c, final_utterance_states_h, final_utterance_state) = self.get_utterance_attention(final_utterance_states_c, final_utterance_states_h, final_utterance_state, num_utterances_to_keep) if self.params.state_positional_embeddings: (utterance_states, flat_sequence) = self._add_positional_embeddings(input_hidden_states, input_sequences) else: flat_sequence = [] for utt in input_sequences[(- num_utterances_to_keep):]: flat_sequence.extend(utt) snippets = None if self.params.use_snippets: if self.params.previous_decoder_snippet_encoding: snippets = encode_snippets_with_states(available_snippets, decoder_states) else: snippets = self._encode_snippets(previous_query, available_snippets, input_schema) if (self.params.use_previous_query and (len(previous_query) > 0)): (previous_queries, previous_query_states) = self.get_previous_queries(previous_queries, previous_query_states, previous_query, input_schema) prediction = self.predict_turn(final_utterance_state, utterance_states, schema_states, max_generation_length, gold_query=utterance.gold_query(), snippets=snippets, input_sequence=flat_sequence, previous_queries=previous_queries, previous_query_states=previous_query_states, input_schema=input_schema, feed_gold_tokens=feed_gold_query) decoder_states = prediction[3] predictions.append(prediction) return predictions
class PredictionInput(namedtuple('PredictionInput', ('decoder_state', 'input_hidden_states', 'snippets', 'input_sequence'))): ' Inputs to the token predictor. ' __slots__ = ()
class PredictionInputWithSchema(namedtuple('PredictionInputWithSchema', ('decoder_state', 'input_hidden_states', 'schema_states', 'snippets', 'input_sequence', 'previous_queries', 'previous_query_states', 'input_schema'))): ' Inputs to the token predictor. ' __slots__ = ()
class TokenPrediction(namedtuple('TokenPrediction', ('scores', 'aligned_tokens', 'utterance_attention_results', 'schema_attention_results', 'query_attention_results', 'copy_switch', 'query_scores', 'query_tokens', 'decoder_state'))): 'A token prediction.' __slots__ = ()
def score_snippets(snippets, scorer): ' Scores snippets given a scorer.\n\n Inputs:\n snippets (list of Snippet): The snippets to score.\n scorer (dy.Expression): Dynet vector against which to score the snippets.\n\n Returns:\n dy.Expression, list of str, where the first is the scores and the second\n is the names of the snippets that were scored.\n ' snippet_expressions = [snippet.embedding for snippet in snippets] all_snippet_embeddings = torch.stack(snippet_expressions, dim=1) scores = torch.t(torch.mm(torch.t(scorer), all_snippet_embeddings)) if (scores.size()[0] != len(snippets)): raise ValueError((((('Got ' + str(scores.size()[0])) + ' scores for ') + str(len(snippets))) + ' snippets')) return (scores, [snippet.name for snippet in snippets])
def score_schema_tokens(input_schema, schema_states, scorer): scores = torch.t(torch.mm(torch.t(scorer), schema_states)) if (scores.size()[0] != len(input_schema)): raise ValueError((((('Got ' + str(scores.size()[0])) + ' scores for ') + str(len(input_schema))) + ' schema tokens')) return (scores, input_schema.column_names_surface_form)
def score_query_tokens(previous_query, previous_query_states, scorer): scores = torch.t(torch.mm(torch.t(scorer), previous_query_states)) if (scores.size()[0] != len(previous_query)): raise ValueError((((('Got ' + str(scores.size()[0])) + ' scores for ') + str(len(previous_query))) + ' query tokens')) return (scores, previous_query)
class TokenPredictor(torch.nn.Module): ' Predicts a token given a (decoder) state.\n\n Attributes:\n vocabulary (Vocabulary): A vocabulary object for the output.\n attention_module (Attention): An attention module.\n state_transformation_weights (dy.Parameters): Transforms the input state\n before predicting a token.\n vocabulary_weights (dy.Parameters): Final layer weights.\n vocabulary_biases (dy.Parameters): Final layer biases.\n ' def __init__(self, params, vocabulary, attention_key_size): super().__init__() self.params = params self.vocabulary = vocabulary self.attention_module = Attention(params.decoder_state_size, attention_key_size, attention_key_size) self.state_transform_weights = torch_utils.add_params(((params.decoder_state_size + attention_key_size), params.decoder_state_size), 'weights-state-transform') self.vocabulary_weights = torch_utils.add_params((params.decoder_state_size, len(vocabulary)), 'weights-vocabulary') self.vocabulary_biases = torch_utils.add_params(tuple([len(vocabulary)]), 'biases-vocabulary') def _get_intermediate_state(self, state, dropout_amount=0.0): intermediate_state = torch.tanh(torch_utils.linear_layer(state, self.state_transform_weights)) return F.dropout(intermediate_state, dropout_amount) def _score_vocabulary_tokens(self, state): scores = torch.t(torch_utils.linear_layer(state, self.vocabulary_weights, self.vocabulary_biases)) if (scores.size()[0] != len(self.vocabulary.inorder_tokens)): raise ValueError((((('Got ' + str(scores.size()[0])) + ' scores for ') + str(len(self.vocabulary.inorder_tokens))) + ' vocabulary items')) return (scores, self.vocabulary.inorder_tokens) def forward(self, prediction_input, dropout_amount=0.0): decoder_state = prediction_input.decoder_state input_hidden_states = prediction_input.input_hidden_states attention_results = self.attention_module(decoder_state, input_hidden_states) state_and_attn = torch.cat([decoder_state, attention_results.vector], dim=0) intermediate_state = self._get_intermediate_state(state_and_attn, dropout_amount=dropout_amount) (vocab_scores, vocab_tokens) = self._score_vocabulary_tokens(intermediate_state) return TokenPrediction(vocab_scores, vocab_tokens, attention_results, decoder_state)
class SchemaTokenPredictor(TokenPredictor): ' Token predictor that also predicts snippets.\n\n Attributes:\n snippet_weights (dy.Parameter): Weights for scoring snippets against some\n state.\n ' def __init__(self, params, vocabulary, utterance_attention_key_size, schema_attention_key_size, snippet_size): TokenPredictor.__init__(self, params, vocabulary, utterance_attention_key_size) if params.use_snippets: if (snippet_size <= 0): raise ValueError(('Snippet size must be greater than zero; was ' + str(snippet_size))) self.snippet_weights = torch_utils.add_params((params.decoder_state_size, snippet_size), 'weights-snippet') if params.use_schema_attention: self.utterance_attention_module = self.attention_module self.schema_attention_module = Attention(params.decoder_state_size, schema_attention_key_size, schema_attention_key_size) if self.params.use_query_attention: self.query_attention_module = Attention(params.decoder_state_size, params.encoder_state_size, params.encoder_state_size) self.start_query_attention_vector = torch_utils.add_params((params.encoder_state_size,), 'start_query_attention_vector') if (params.use_schema_attention and self.params.use_query_attention): self.state_transform_weights = torch_utils.add_params(((((params.decoder_state_size + utterance_attention_key_size) + schema_attention_key_size) + params.encoder_state_size), params.decoder_state_size), 'weights-state-transform') elif params.use_schema_attention: self.state_transform_weights = torch_utils.add_params((((params.decoder_state_size + utterance_attention_key_size) + schema_attention_key_size), params.decoder_state_size), 'weights-state-transform') self.schema_token_weights = torch_utils.add_params((params.decoder_state_size, schema_attention_key_size), 'weights-schema-token') if self.params.use_previous_query: self.query_token_weights = torch_utils.add_params((params.decoder_state_size, self.params.encoder_state_size), 'weights-query-token') if self.params.use_copy_switch: if self.params.use_query_attention: self.state2copyswitch_transform_weights = torch_utils.add_params(((((params.decoder_state_size + utterance_attention_key_size) + schema_attention_key_size) + params.encoder_state_size), 1), 'weights-state-transform') else: self.state2copyswitch_transform_weights = torch_utils.add_params((((params.decoder_state_size + utterance_attention_key_size) + schema_attention_key_size), 1), 'weights-state-transform') def _get_snippet_scorer(self, state): scorer = torch.t(torch_utils.linear_layer(state, self.snippet_weights)) return scorer def _get_schema_token_scorer(self, state): scorer = torch.t(torch_utils.linear_layer(state, self.schema_token_weights)) return scorer def _get_query_token_scorer(self, state): scorer = torch.t(torch_utils.linear_layer(state, self.query_token_weights)) return scorer def _get_copy_switch(self, state): copy_switch = torch.sigmoid(torch_utils.linear_layer(state, self.state2copyswitch_transform_weights)) return copy_switch.squeeze() def forward(self, prediction_input, dropout_amount=0.0): decoder_state = prediction_input.decoder_state input_hidden_states = prediction_input.input_hidden_states snippets = prediction_input.snippets input_schema = prediction_input.input_schema schema_states = prediction_input.schema_states if self.params.use_schema_attention: schema_attention_results = self.schema_attention_module(decoder_state, schema_states) utterance_attention_results = self.utterance_attention_module(decoder_state, input_hidden_states) else: utterance_attention_results = self.attention_module(decoder_state, input_hidden_states) schema_attention_results = None query_attention_results = None if self.params.use_query_attention: previous_query_states = prediction_input.previous_query_states if (len(previous_query_states) > 0): query_attention_results = self.query_attention_module(decoder_state, previous_query_states[(- 1)]) else: query_attention_results = self.start_query_attention_vector query_attention_results = AttentionResult(None, None, query_attention_results) if (self.params.use_schema_attention and self.params.use_query_attention): state_and_attn = torch.cat([decoder_state, utterance_attention_results.vector, schema_attention_results.vector, query_attention_results.vector], dim=0) elif self.params.use_schema_attention: state_and_attn = torch.cat([decoder_state, utterance_attention_results.vector, schema_attention_results.vector], dim=0) else: state_and_attn = torch.cat([decoder_state, utterance_attention_results.vector], dim=0) intermediate_state = self._get_intermediate_state(state_and_attn, dropout_amount=dropout_amount) (vocab_scores, vocab_tokens) = self._score_vocabulary_tokens(intermediate_state) final_scores = vocab_scores aligned_tokens = [] aligned_tokens.extend(vocab_tokens) if (self.params.use_snippets and snippets): (snippet_scores, snippet_tokens) = score_snippets(snippets, self._get_snippet_scorer(intermediate_state)) final_scores = torch.cat([final_scores, snippet_scores], dim=0) aligned_tokens.extend(snippet_tokens) schema_states = torch.stack(schema_states, dim=1) (schema_scores, schema_tokens) = score_schema_tokens(input_schema, schema_states, self._get_schema_token_scorer(intermediate_state)) final_scores = torch.cat([final_scores, schema_scores], dim=0) aligned_tokens.extend(schema_tokens) previous_queries = prediction_input.previous_queries previous_query_states = prediction_input.previous_query_states copy_switch = None query_scores = None query_tokens = None if (self.params.use_previous_query and (len(previous_queries) > 0)): if self.params.use_copy_switch: copy_switch = self._get_copy_switch(state_and_attn) for (turn, (previous_query, previous_query_state)) in enumerate(zip(previous_queries, previous_query_states)): assert (len(previous_query) == len(previous_query_state)) previous_query_state = torch.stack(previous_query_state, dim=1) (query_scores, query_tokens) = score_query_tokens(previous_query, previous_query_state, self._get_query_token_scorer(intermediate_state)) query_scores = query_scores.squeeze() final_scores = final_scores.squeeze() return TokenPrediction(final_scores, aligned_tokens, utterance_attention_results, schema_attention_results, query_attention_results, copy_switch, query_scores, query_tokens, decoder_state)
class SnippetTokenPredictor(TokenPredictor): ' Token predictor that also predicts snippets.\n\n Attributes:\n snippet_weights (dy.Parameter): Weights for scoring snippets against some\n state.\n ' def __init__(self, params, vocabulary, attention_key_size, snippet_size): TokenPredictor.__init__(self, params, vocabulary, attention_key_size) if (snippet_size <= 0): raise ValueError(('Snippet size must be greater than zero; was ' + str(snippet_size))) self.snippet_weights = torch_utils.add_params((params.decoder_state_size, snippet_size), 'weights-snippet') def _get_snippet_scorer(self, state): scorer = torch.t(torch_utils.linear_layer(state, self.snippet_weights)) return scorer def forward(self, prediction_input, dropout_amount=0.0): decoder_state = prediction_input.decoder_state input_hidden_states = prediction_input.input_hidden_states snippets = prediction_input.snippets attention_results = self.attention_module(decoder_state, input_hidden_states) state_and_attn = torch.cat([decoder_state, attention_results.vector], dim=0) intermediate_state = self._get_intermediate_state(state_and_attn, dropout_amount=dropout_amount) (vocab_scores, vocab_tokens) = self._score_vocabulary_tokens(intermediate_state) final_scores = vocab_scores aligned_tokens = [] aligned_tokens.extend(vocab_tokens) if snippets: (snippet_scores, snippet_tokens) = score_snippets(snippets, self._get_snippet_scorer(intermediate_state)) final_scores = torch.cat([final_scores, snippet_scores], dim=0) aligned_tokens.extend(snippet_tokens) final_scores = final_scores.squeeze() return TokenPrediction(final_scores, aligned_tokens, attention_results, None, None, None, None, None, decoder_state)
class AnonymizationTokenPredictor(TokenPredictor): ' Token predictor that also predicts anonymization tokens.\n\n Attributes:\n anonymizer (Anonymizer): The anonymization object.\n\n ' def __init__(self, params, vocabulary, attention_key_size, anonymizer): TokenPredictor.__init__(self, params, vocabulary, attention_key_size) if (not anonymizer): raise ValueError('Expected an anonymizer, but was None') self.anonymizer = anonymizer def _score_anonymized_tokens(self, input_sequence, attention_scores): scores = [] tokens = [] for (i, token) in enumerate(input_sequence): if self.anonymizer.is_anon_tok(token): scores.append(attention_scores[i]) tokens.append(token) if (len(scores) > 0): if (len(scores) != len(tokens)): raise ValueError((((('Got ' + str(len(scores))) + ' scores for ') + str(len(tokens))) + ' anonymized tokens')) anonymized_scores = torch.cat(scores, dim=0) if (anonymized_scores.dim() == 1): anonymized_scores = anonymized_scores.unsqueeze(1) return (anonymized_scores, tokens) else: return (None, []) def forward(self, prediction_input, dropout_amount=0.0): decoder_state = prediction_input.decoder_state input_hidden_states = prediction_input.input_hidden_states input_sequence = prediction_input.input_sequence assert input_sequence attention_results = self.attention_module(decoder_state, input_hidden_states) state_and_attn = torch.cat([decoder_state, attention_results.vector], dim=0) intermediate_state = self._get_intermediate_state(state_and_attn, dropout_amount=dropout_amount) (vocab_scores, vocab_tokens) = self._score_vocabulary_tokens(intermediate_state) final_scores = vocab_scores aligned_tokens = [] aligned_tokens.extend(vocab_tokens) (anonymized_scores, anonymized_tokens) = self._score_anonymized_tokens(input_sequence, attention_results.scores) if anonymized_scores: final_scores = torch.cat([final_scores, anonymized_scores], dim=0) aligned_tokens.extend(anonymized_tokens) final_scores = final_scores.squeeze() return TokenPrediction(final_scores, aligned_tokens, attention_results, None, None, None, None, None, decoder_state)
class SnippetAnonymizationTokenPredictor(SnippetTokenPredictor, AnonymizationTokenPredictor): ' Token predictor that both anonymizes and scores snippets.' def __init__(self, params, vocabulary, attention_key_size, snippet_size, anonymizer): AnonymizationTokenPredictor.__init__(self, params, vocabulary, attention_key_size, anonymizer) SnippetTokenPredictor.__init__(self, params, vocabulary, attention_key_size, snippet_size) def forward(self, prediction_input, dropout_amount=0.0): decoder_state = prediction_input.decoder_state assert prediction_input.input_sequence snippets = prediction_input.snippets input_hidden_states = prediction_input.input_hidden_states attention_results = self.attention_module(decoder_state, prediction_input.input_hidden_states) state_and_attn = torch.cat([decoder_state, attention_results.vector], dim=0) intermediate_state = self._get_intermediate_state(state_and_attn, dropout_amount=dropout_amount) (final_scores, vocab_tokens) = self._score_vocabulary_tokens(intermediate_state) aligned_tokens = [] aligned_tokens.extend(vocab_tokens) if snippets: (snippet_scores, snippet_tokens) = score_snippets(snippets, self._get_snippet_scorer(intermediate_state)) final_scores = torch.cat([final_scores, snippet_scores], dim=0) aligned_tokens.extend(snippet_tokens) (anonymized_scores, anonymized_tokens) = self._score_anonymized_tokens(prediction_input.input_sequence, attention_results.scores) if (anonymized_scores is not None): final_scores = torch.cat([final_scores, anonymized_scores], dim=0) aligned_tokens.extend(anonymized_tokens) final_scores = final_scores.squeeze() return TokenPrediction(final_scores, aligned_tokens, attention_results, None, None, None, None, None, decoder_state)
def construct_token_predictor(params, vocabulary, utterance_attention_key_size, schema_attention_key_size, snippet_size, anonymizer=None): ' Constructs a token predictor given the parameters.\n\n Inputs:\n parameter_collection (dy.ParameterCollection): Contains the parameters.\n params (dictionary): Contains the command line parameters/hyperparameters.\n vocabulary (Vocabulary): Vocabulary object for output generation.\n attention_key_size (int): The size of the attention keys.\n anonymizer (Anonymizer): An anonymization object.\n ' if ((not anonymizer) and (not params.previous_decoder_snippet_encoding)): print('using SchemaTokenPredictor') return SchemaTokenPredictor(params, vocabulary, utterance_attention_key_size, schema_attention_key_size, snippet_size) elif (params.use_snippets and anonymizer and (not params.previous_decoder_snippet_encoding)): print('using SnippetAnonymizationTokenPredictor') return SnippetAnonymizationTokenPredictor(params, vocabulary, utterance_attention_key_size, snippet_size, anonymizer) else: print('Unknown token_predictor') exit()
def linear_layer(exp, weights, biases=None): if (exp.dim() == 1): exp = torch.unsqueeze(exp, 0) assert (exp.size()[1] == weights.size()[0]) if (biases is not None): assert (weights.size()[1] == biases.size()[0]) result = (torch.mm(exp, weights) + biases) else: result = torch.mm(exp, weights) return result
def compute_loss(gold_seq, scores, index_to_token_maps, gold_tok_to_id, noise=1e-08): ' Computes the loss of a gold sequence given scores.\n\n Inputs:\n gold_seq (list of str): A sequence of gold tokens.\n scores (list of dy.Expression): Expressions representing the scores of\n potential output tokens for each token in gold_seq.\n index_to_token_maps (list of dict str->list of int): Maps from index in the\n sequence to a dictionary mapping from a string to a set of integers.\n gold_tok_to_id (lambda (str, str)->list of int): Maps from the gold token\n and some lookup function to the indices in the probability distribution\n where the gold token occurs.\n noise (float, optional): The amount of noise to add to the loss.\n\n Returns:\n dy.Expression representing the sum of losses over the sequence.\n ' assert (len(gold_seq) == len(scores) == len(index_to_token_maps)) losses = [] for (i, gold_tok) in enumerate(gold_seq): score = scores[i] token_map = index_to_token_maps[i] gold_indices = gold_tok_to_id(gold_tok, token_map) assert (len(gold_indices) > 0) noise_i = noise if (len(gold_indices) == 1): noise_i = 0 probdist = score prob_of_tok = (noise_i + torch.sum(probdist[gold_indices])) losses.append((- torch.log(prob_of_tok))) return torch.sum(torch.stack(losses))
def get_seq_from_scores(scores, index_to_token_maps): 'Gets the argmax sequence from a set of scores.\n\n Inputs:\n scores (list of dy.Expression): Sequences of output scores.\n index_to_token_maps (list of list of str): For each output token, maps\n the index in the probability distribution to a string.\n\n Returns:\n list of str, representing the argmax sequence.\n ' seq = [] for (score, tok_map) in zip(scores, index_to_token_maps): score_numpy_list = score.cpu().data.numpy() assert (score.size()[0] == len(tok_map) == len(list(score_numpy_list))) seq.append(tok_map[np.argmax(score_numpy_list)]) return seq
def per_token_accuracy(gold_seq, pred_seq): ' Returns the per-token accuracy comparing two strings (recall).\n\n Inputs:\n gold_seq (list of str): A list of gold tokens.\n pred_seq (list of str): A list of predicted tokens.\n\n Returns:\n float, representing the accuracy.\n ' num_correct = 0 for (i, gold_token) in enumerate(gold_seq): if ((i < len(pred_seq)) and (pred_seq[i] == gold_token)): num_correct += 1 return (float(num_correct) / len(gold_seq))
def forward_one_multilayer(rnns, lstm_input, layer_states, dropout_amount=0.0): " Goes forward for one multilayer RNN cell step.\n\n Inputs:\n lstm_input (dy.Expression): Some input to the step.\n layer_states (list of dy.RNNState): The states of each layer in the cell.\n dropout_amount (float, optional): The amount of dropout to apply, in\n between the layers.\n\n Returns:\n (list of dy.Expression, list of dy.Expression), dy.Expression, (list of dy.RNNSTate),\n representing (each layer's cell memory, each layer's cell hidden state),\n the final hidden state, and (each layer's updated RNNState).\n " num_layers = len(layer_states) new_states = [] cell_states = [] hidden_states = [] state = lstm_input for i in range(num_layers): (layer_h, layer_c) = rnns[i](torch.unsqueeze(state, 0), layer_states[i]) new_states.append((layer_h, layer_c)) layer_h = layer_h.squeeze() layer_c = layer_c.squeeze() state = layer_h if (i < (num_layers - 1)): state = F.dropout(state, p=dropout_amount) cell_states.append(layer_c) hidden_states.append(layer_h) return ((cell_states, hidden_states), state, new_states)
def encode_sequence(sequence, rnns, embedder, dropout_amount=0.0): " Encodes a sequence given RNN cells and an embedding function.\n\n Inputs:\n seq (list of str): The sequence to encode.\n rnns (list of dy._RNNBuilder): The RNNs to use.\n emb_fn (dict str->dy.Expression): Function that embeds strings to\n word vectors.\n size (int): The size of the RNN.\n dropout_amount (float, optional): The amount of dropout to apply.\n\n Returns:\n (list of dy.Expression, list of dy.Expression), list of dy.Expression,\n where the first pair is the (final cell memories, final cell states) of\n all layers, and the second list is a list of the final layer's cell\n state for all tokens in the sequence.\n " batch_size = 1 layer_states = [] for rnn in rnns: hidden_size = rnn.weight_hh.size()[1] if rnn.weight_hh.is_cuda: h_0 = torch.cuda.FloatTensor(batch_size, hidden_size).fill_(0) c_0 = torch.cuda.FloatTensor(batch_size, hidden_size).fill_(0) else: h_0 = torch.zeros(batch_size, hidden_size) c_0 = torch.zeros(batch_size, hidden_size) layer_states.append((h_0, c_0)) outputs = [] for token in sequence: rnn_input = embedder(token) ((cell_states, hidden_states), output, layer_states) = forward_one_multilayer(rnns, rnn_input, layer_states, dropout_amount) outputs.append(output) return ((cell_states, hidden_states), outputs)
def create_multilayer_lstm_params(num_layers, in_size, state_size, name=''): ' Adds a multilayer LSTM to the model parameters.\n\n Inputs:\n num_layers (int): Number of layers to create.\n in_size (int): The input size to the first layer.\n state_size (int): The size of the states.\n model (dy.ParameterCollection): The parameter collection for the model.\n name (str, optional): The name of the multilayer LSTM.\n ' lstm_layers = [] for i in range(num_layers): layer_name = ((name + '-') + str(i)) print((((((('LSTM ' + layer_name) + ': ') + str(in_size)) + ' x ') + str(state_size)) + '; default Dynet initialization of hidden weights')) lstm_layer = torch.nn.LSTMCell(input_size=int(in_size), hidden_size=int(state_size), bias=True) lstm_layers.append(lstm_layer) in_size = state_size return torch.nn.ModuleList(lstm_layers)
def add_params(size, name=''): ' Adds parameters to the model.\n\n Inputs:\n model (dy.ParameterCollection): The parameter collection for the model.\n size (tuple of int): The size to create.\n name (str, optional): The name of the parameters.\n ' if (len(size) == 1): print((((('vector ' + name) + ': ') + str(size[0])) + '; uniform in [-0.1, 0.1]')) else: print((((((('matrix ' + name) + ': ') + str(size[0])) + ' x ') + str(size[1])) + '; uniform in [-0.1, 0.1]')) size_int = tuple([int(ss) for ss in size]) return torch.nn.Parameter(torch.empty(size_int).uniform_((- 0.1), 0.1))
def write_prediction(fileptr, identifier, input_seq, probability, prediction, flat_prediction, gold_query, flat_gold_queries, gold_tables, index_in_interaction, database_username, database_password, database_timeout, compute_metrics=True): pred_obj = {} pred_obj['identifier'] = identifier if (len(identifier.split('/')) == 2): (database_id, interaction_id) = identifier.split('/') else: database_id = 'atis' interaction_id = identifier pred_obj['database_id'] = database_id pred_obj['interaction_id'] = interaction_id pred_obj['input_seq'] = input_seq pred_obj['probability'] = probability pred_obj['prediction'] = prediction pred_obj['flat_prediction'] = flat_prediction pred_obj['gold_query'] = gold_query pred_obj['flat_gold_queries'] = flat_gold_queries pred_obj['index_in_interaction'] = index_in_interaction pred_obj['gold_tables'] = str(gold_tables) if compute_metrics: correct_string = (' '.join(flat_prediction) in [' '.join(q) for q in flat_gold_queries]) pred_obj['correct_string'] = correct_string if (not correct_string): (syntactic, semantic, pred_table) = sql_util.execution_results(' '.join(flat_prediction), database_username, database_password, database_timeout) pred_table = sorted(pred_table) best_prec = 0.0 best_rec = 0.0 best_f1 = 0.0 for gold_table in gold_tables: num_overlap = float(len((set(pred_table) & set(gold_table)))) if (len(set(gold_table)) > 0): prec = (num_overlap / len(set(gold_table))) else: prec = 1.0 if (len(set(pred_table)) > 0): rec = (num_overlap / len(set(pred_table))) else: rec = 1.0 if ((prec > 0.0) and (rec > 0.0)): f1 = ((2 * (prec * rec)) / (prec + rec)) else: f1 = 1.0 best_prec = max(best_prec, prec) best_rec = max(best_rec, rec) best_f1 = max(best_f1, f1) else: syntactic = True semantic = True pred_table = [] best_prec = 1.0 best_rec = 1.0 best_f1 = 1.0 assert (best_prec <= 1.0) assert (best_rec <= 1.0) assert (best_f1 <= 1.0) pred_obj['syntactic'] = syntactic pred_obj['semantic'] = semantic correct_table = ((pred_table in gold_tables) or correct_string) pred_obj['correct_table'] = correct_table pred_obj['strict_correct_table'] = (correct_table and syntactic) pred_obj['pred_table'] = str(pred_table) pred_obj['table_prec'] = best_prec pred_obj['table_rec'] = best_rec pred_obj['table_f1'] = best_f1 fileptr.write((json.dumps(pred_obj) + '\n'))
class Metrics(Enum): 'Definitions of simple metrics to compute.' LOSS = 1 TOKEN_ACCURACY = 2 STRING_ACCURACY = 3 CORRECT_TABLES = 4 STRICT_CORRECT_TABLES = 5 SEMANTIC_QUERIES = 6 SYNTACTIC_QUERIES = 7
def get_progressbar(name, size): 'Gets a progress bar object given a name and the total size.\n\n Inputs:\n name (str): The name to display on the side.\n size (int): The maximum size of the progress bar.\n\n ' return progressbar.ProgressBar(maxval=size, widgets=[name, progressbar.Bar('=', '[', ']'), ' ', progressbar.Percentage(), ' ', progressbar.ETA()])
def train_epoch_with_utterances(batches, model, randomize=True): 'Trains model for a single epoch given batches of utterance data.\n\n Inputs:\n batches (UtteranceBatch): The batches to give to training.\n model (ATISModel): The model obect.\n learning_rate (float): The learning rate to use during training.\n dropout_amount (float): Amount of dropout to set in the model.\n randomize (bool): Whether or not to randomize the order that the batches are seen.\n ' if randomize: random.shuffle(batches) progbar = get_progressbar('train ', len(batches)) progbar.start() loss_sum = 0.0 for (i, batch) in enumerate(batches): batch_loss = model.train_step(batch) loss_sum += batch_loss progbar.update(i) progbar.finish() total_loss = (loss_sum / len(batches)) return total_loss
def train_epoch_with_interactions(interaction_batches, params, model, randomize=True): 'Trains model for single epoch given batches of interactions.\n\n Inputs:\n interaction_batches (list of InteractionBatch): The batches to train on.\n params (namespace): Parameters to run with.\n model (ATISModel): Model to train.\n randomize (bool): Whether or not to randomize the order that batches are seen.\n ' if randomize: random.shuffle(interaction_batches) progbar = get_progressbar('train ', len(interaction_batches)) progbar.start() loss_sum = 0.0 for (i, interaction_batch) in enumerate(interaction_batches): assert (len(interaction_batch) == 1) interaction = interaction_batch.items[0] if (interaction.identifier == 'raw/atis2/12-1.1/ATIS2/TEXT/TEST/NOV92/770/5'): continue if (('sparc' in params.data_directory) and ('baseball_1' in interaction.identifier)): continue batch_loss = model.train_step(interaction, params.train_maximum_sql_length) loss_sum += batch_loss torch.cuda.empty_cache() progbar.update(i) progbar.finish() total_loss = (loss_sum / len(interaction_batches)) return total_loss
def update_sums(metrics, metrics_sums, predicted_sequence, flat_sequence, gold_query, original_gold_query, gold_forcing=False, loss=None, token_accuracy=0.0, database_username='', database_password='', database_timeout=0, gold_table=None): '" Updates summing for metrics in an aggregator.\n\n TODO: don\'t use sums, just keep the raw value.\n ' if (Metrics.LOSS in metrics): metrics_sums[Metrics.LOSS] += loss.item() if (Metrics.TOKEN_ACCURACY in metrics): if gold_forcing: metrics_sums[Metrics.TOKEN_ACCURACY] += token_accuracy else: num_tokens_correct = 0.0 for (j, token) in enumerate(gold_query): if ((len(predicted_sequence) > j) and (predicted_sequence[j] == token)): num_tokens_correct += 1 metrics_sums[Metrics.TOKEN_ACCURACY] += (num_tokens_correct / len(gold_query)) if (Metrics.STRING_ACCURACY in metrics): metrics_sums[Metrics.STRING_ACCURACY] += int((flat_sequence == original_gold_query)) if (Metrics.CORRECT_TABLES in metrics): assert database_username, 'You did not provide a database username' assert database_password, 'You did not provide a database password' assert (database_timeout > 0), 'Database timeout is 0 seconds' if (flat_sequence != original_gold_query): (syntactic, semantic, table) = sql_util.execution_results(' '.join(flat_sequence), database_username, database_password, database_timeout) else: syntactic = True semantic = True table = gold_table metrics_sums[Metrics.CORRECT_TABLES] += int((table == gold_table)) if (Metrics.SYNTACTIC_QUERIES in metrics): metrics_sums[Metrics.SYNTACTIC_QUERIES] += int(syntactic) if (Metrics.SEMANTIC_QUERIES in metrics): metrics_sums[Metrics.SEMANTIC_QUERIES] += int(semantic) if (Metrics.STRICT_CORRECT_TABLES in metrics): metrics_sums[Metrics.STRICT_CORRECT_TABLES] += int(((table == gold_table) and syntactic))
def construct_averages(metrics_sums, total_num): ' Computes the averages for metrics.\n\n Inputs:\n metrics_sums (dict Metric -> float): Sums for a metric.\n total_num (int): Number to divide by (average).\n ' metrics_averages = {} for (metric, value) in metrics_sums.items(): metrics_averages[metric] = (value / total_num) if (metric != 'loss'): metrics_averages[metric] *= 100.0 return metrics_averages
def evaluate_utterance_sample(sample, model, max_generation_length, name='', gold_forcing=False, metrics=None, total_num=(- 1), database_username='', database_password='', database_timeout=0, write_results=False): 'Evaluates a sample of utterance examples.\n\n Inputs:\n sample (list of Utterance): Examples to evaluate.\n model (ATISModel): Model to predict with.\n max_generation_length (int): Maximum length to generate.\n name (str): Name to log with.\n gold_forcing (bool): Whether to force the gold tokens during decoding.\n metrics (list of Metric): Metrics to evaluate with.\n total_num (int): Number to divide by when reporting results.\n database_username (str): Username to use for executing queries.\n database_password (str): Password to use when executing queries.\n database_timeout (float): Timeout on queries when executing.\n write_results (bool): Whether to write the results to a file.\n ' assert metrics if (total_num < 0): total_num = len(sample) metrics_sums = {} for metric in metrics: metrics_sums[metric] = 0.0 predictions_file = open((name + '_predictions.json'), 'w') print((('Predicting with filename ' + str(name)) + '_predictions.json')) progbar = get_progressbar(name, len(sample)) progbar.start() predictions = [] for (i, item) in enumerate(sample): (_, loss, predicted_seq) = model.eval_step(item, max_generation_length, feed_gold_query=gold_forcing) loss = (loss / len(item.gold_query())) predictions.append(predicted_seq) flat_sequence = item.flatten_sequence(predicted_seq) token_accuracy = torch_utils.per_token_accuracy(item.gold_query(), predicted_seq) if write_results: write_prediction(predictions_file, identifier=item.interaction.identifier, input_seq=item.input_sequence(), probability=0, prediction=predicted_seq, flat_prediction=flat_sequence, gold_query=item.gold_query(), flat_gold_queries=item.original_gold_queries(), gold_tables=item.gold_tables(), index_in_interaction=item.utterance_index, database_username=database_username, database_password=database_password, database_timeout=database_timeout) update_sums(metrics, metrics_sums, predicted_seq, flat_sequence, item.gold_query(), item.original_gold_queries()[0], gold_forcing, loss, token_accuracy, database_username=database_username, database_password=database_password, database_timeout=database_timeout, gold_table=item.gold_tables()[0]) progbar.update(i) progbar.finish() predictions_file.close() return (construct_averages(metrics_sums, total_num), None)
def evaluate_interaction_sample(sample, model, max_generation_length, name='', gold_forcing=False, metrics=None, total_num=(- 1), database_username='', database_password='', database_timeout=0, use_predicted_queries=False, write_results=False, use_gpu=False, compute_metrics=False): ' Evaluates a sample of interactions. ' predictions_file = open((name + '_predictions.json'), 'w') print(('Predicting with file ' + str((name + '_predictions.json')))) metrics_sums = {} for metric in metrics: metrics_sums[metric] = 0.0 progbar = get_progressbar(name, len(sample)) progbar.start() num_utterances = 0 ignore_with_gpu = [line.strip() for line in open('data/cpu_full_interactions.txt').readlines()] predictions = [] use_gpu = (not (('--no_gpus' in sys.argv) or ('--no_gpus=1' in sys.argv))) model.eval() for (i, interaction) in enumerate(sample): try: with torch.no_grad(): if use_predicted_queries: example_preds = model.predict_with_predicted_queries(interaction, max_generation_length) else: example_preds = model.predict_with_gold_queries(interaction, max_generation_length, feed_gold_query=gold_forcing) torch.cuda.empty_cache() except RuntimeError as exception: print(('Failed on interaction: ' + str(interaction.identifier))) print(exception) print('\n\n') exit() predictions.extend(example_preds) assert ((len(example_preds) == len(interaction.interaction.utterances)) or (not example_preds)) for (j, pred) in enumerate(example_preds): num_utterances += 1 (sequence, loss, token_accuracy, _, decoder_results) = pred if use_predicted_queries: item = interaction.processed_utterances[j] original_utt = interaction.interaction.utterances[item.index] gold_query = original_utt.gold_query_to_use original_gold_query = original_utt.original_gold_query gold_table = original_utt.gold_sql_results gold_queries = [q[0] for q in original_utt.all_gold_queries] gold_tables = [q[1] for q in original_utt.all_gold_queries] index = item.index else: item = interaction.gold_utterances()[j] gold_query = item.gold_query() original_gold_query = item.original_gold_query() gold_table = item.gold_table() gold_queries = item.original_gold_queries() gold_tables = item.gold_tables() index = item.utterance_index if loss: loss = (loss / len(gold_query)) flat_sequence = item.flatten_sequence(sequence) if write_results: write_prediction(predictions_file, identifier=interaction.identifier, input_seq=item.input_sequence(), probability=decoder_results.probability, prediction=sequence, flat_prediction=flat_sequence, gold_query=gold_query, flat_gold_queries=gold_queries, gold_tables=gold_tables, index_in_interaction=index, database_username=database_username, database_password=database_password, database_timeout=database_timeout, compute_metrics=compute_metrics) update_sums(metrics, metrics_sums, sequence, flat_sequence, gold_query, original_gold_query, gold_forcing, loss, token_accuracy, database_username=database_username, database_password=database_password, database_timeout=database_timeout, gold_table=gold_table) progbar.update(i) progbar.finish() if (total_num < 0): total_num = num_utterances predictions_file.close() return (construct_averages(metrics_sums, total_num), predictions)
def evaluate_using_predicted_queries(sample, model, name='', gold_forcing=False, metrics=None, total_num=(- 1), database_username='', database_password='', database_timeout=0, snippet_keep_age=1): predictions_file = open((name + '_predictions.json'), 'w') print(('Predicting with file ' + str((name + '_predictions.json')))) assert (not gold_forcing) metrics_sums = {} for metric in metrics: metrics_sums[metric] = 0.0 progbar = get_progressbar(name, len(sample)) progbar.start() num_utterances = 0 predictions = [] for (i, item) in enumerate(sample): int_predictions = [] item.start_interaction() while (not item.done()): utterance = item.next_utterance(snippet_keep_age) (predicted_sequence, loss, _, probability) = model.eval_step(utterance) int_predictions.append((utterance, predicted_sequence)) flat_sequence = utterance.flatten_sequence(predicted_sequence) if (sql_util.executable(flat_sequence, username=database_username, password=database_password, timeout=database_timeout) and (probability >= 0.24)): utterance.set_pred_query(item.remove_snippets(predicted_sequence)) item.add_utterance(utterance, item.remove_snippets(predicted_sequence), previous_snippets=utterance.snippets()) else: seq = [] utterance.set_pred_query(seq) item.add_utterance(utterance, seq, previous_snippets=utterance.snippets()) original_utt = item.interaction.utterances[utterance.index] write_prediction(predictions_file, identifier=item.interaction.identifier, input_seq=utterance.input_sequence(), probability=probability, prediction=predicted_sequence, flat_prediction=flat_sequence, gold_query=original_utt.gold_query_to_use, flat_gold_queries=[q[0] for q in original_utt.all_gold_queries], gold_tables=[q[1] for q in original_utt.all_gold_queries], index_in_interaction=utterance.index, database_username=database_username, database_password=database_password, database_timeout=database_timeout) update_sums(metrics, metrics_sums, predicted_sequence, flat_sequence, original_utt.gold_query_to_use, original_utt.original_gold_query, gold_forcing, loss, token_accuracy=0, database_username=database_username, database_password=database_password, database_timeout=database_timeout, gold_table=original_utt.gold_sql_results) predictions.append(int_predictions) progbar.update(i) progbar.finish() if (total_num < 0): total_num = num_utterances predictions_file.close() return (construct_averages(metrics_sums, total_num), predictions)
def interpret_args(): ' Interprets the command line arguments, and returns a dictionary. ' parser = argparse.ArgumentParser() parser.add_argument('--no_gpus', type=bool, default=1) parser.add_argument('--raw_train_filename', type=str, default='../atis_data/data/resplit/processed/train_with_tables.pkl') parser.add_argument('--raw_dev_filename', type=str, default='../atis_data/data/resplit/processed/dev_with_tables.pkl') parser.add_argument('--raw_validation_filename', type=str, default='../atis_data/data/resplit/processed/valid_with_tables.pkl') parser.add_argument('--raw_test_filename', type=str, default='../atis_data/data/resplit/processed/test_with_tables.pkl') parser.add_argument('--data_directory', type=str, default='processed_data') parser.add_argument('--processed_train_filename', type=str, default='train.pkl') parser.add_argument('--processed_dev_filename', type=str, default='dev.pkl') parser.add_argument('--processed_validation_filename', type=str, default='validation.pkl') parser.add_argument('--processed_test_filename', type=str, default='test.pkl') parser.add_argument('--database_schema_filename', type=str, default=None) parser.add_argument('--embedding_filename', type=str, default=None) parser.add_argument('--input_vocabulary_filename', type=str, default='input_vocabulary.pkl') parser.add_argument('--output_vocabulary_filename', type=str, default='output_vocabulary.pkl') parser.add_argument('--input_key', type=str, default='nl_with_dates') parser.add_argument('--anonymize', type=bool, default=False) parser.add_argument('--anonymization_scoring', type=bool, default=False) parser.add_argument('--use_snippets', type=bool, default=False) parser.add_argument('--use_previous_query', type=bool, default=False) parser.add_argument('--maximum_queries', type=int, default=1) parser.add_argument('--use_copy_switch', type=bool, default=False) parser.add_argument('--use_query_attention', type=bool, default=False) parser.add_argument('--use_utterance_attention', type=bool, default=False) parser.add_argument('--freeze', type=bool, default=False) parser.add_argument('--scheduler', type=bool, default=False) parser.add_argument('--use_bert', type=bool, default=False) parser.add_argument('--bert_type_abb', type=str, help='Type of BERT model to load. e.g.) uS, uL, cS, cL, and mcS') parser.add_argument('--bert_input_version', type=str, default='v1') parser.add_argument('--fine_tune_bert', type=bool, default=False) parser.add_argument('--lr_bert', default=1e-05, type=float, help='BERT model learning rate.') parser.add_argument('--logdir', type=str, default='logs') parser.add_argument('--deterministic', type=bool, default=False) parser.add_argument('--num_train', type=int, default=(- 1)) parser.add_argument('--logfile', type=str, default='log.txt') parser.add_argument('--results_file', type=str, default='results.txt') parser.add_argument('--input_embedding_size', type=int, default=300) parser.add_argument('--output_embedding_size', type=int, default=300) parser.add_argument('--encoder_state_size', type=int, default=300) parser.add_argument('--decoder_state_size', type=int, default=300) parser.add_argument('--encoder_num_layers', type=int, default=1) parser.add_argument('--decoder_num_layers', type=int, default=2) parser.add_argument('--snippet_num_layers', type=int, default=1) parser.add_argument('--maximum_utterances', type=int, default=5) parser.add_argument('--state_positional_embeddings', type=bool, default=False) parser.add_argument('--positional_embedding_size', type=int, default=50) parser.add_argument('--snippet_age_embedding', type=bool, default=False) parser.add_argument('--snippet_age_embedding_size', type=int, default=64) parser.add_argument('--max_snippet_age_embedding', type=int, default=4) parser.add_argument('--previous_decoder_snippet_encoding', type=bool, default=False) parser.add_argument('--discourse_level_lstm', type=bool, default=False) parser.add_argument('--use_schema_attention', type=bool, default=False) parser.add_argument('--use_encoder_attention', type=bool, default=False) parser.add_argument('--use_schema_encoder', type=bool, default=False) parser.add_argument('--use_schema_self_attention', type=bool, default=False) parser.add_argument('--use_schema_encoder_2', type=bool, default=False) parser.add_argument('--batch_size', type=int, default=16) parser.add_argument('--train_maximum_sql_length', type=int, default=200) parser.add_argument('--train_evaluation_size', type=int, default=100) parser.add_argument('--dropout_amount', type=float, default=0.5) parser.add_argument('--initial_patience', type=float, default=10.0) parser.add_argument('--patience_ratio', type=float, default=1.01) parser.add_argument('--initial_learning_rate', type=float, default=0.001) parser.add_argument('--learning_rate_ratio', type=float, default=0.8) parser.add_argument('--interaction_level', type=bool, default=False) parser.add_argument('--reweight_batch', type=bool, default=False) parser.add_argument('--train', type=bool, default=False) parser.add_argument('--debug', type=bool, default=False) parser.add_argument('--evaluate', type=bool, default=False) parser.add_argument('--attention', type=bool, default=False) parser.add_argument('--save_file', type=str, default='') parser.add_argument('--enable_testing', type=bool, default=False) parser.add_argument('--use_predicted_queries', type=bool, default=False) parser.add_argument('--evaluate_split', type=str, default='dev') parser.add_argument('--evaluate_with_gold_forcing', type=bool, default=False) parser.add_argument('--eval_maximum_sql_length', type=int, default=1000) parser.add_argument('--results_note', type=str, default='') parser.add_argument('--compute_metrics', type=bool, default=False) parser.add_argument('--reference_results', type=str, default='') parser.add_argument('--interactive', type=bool, default=False) parser.add_argument('--database_username', type=str, default='aviarmy') parser.add_argument('--database_password', type=str, default='aviarmy') parser.add_argument('--database_timeout', type=int, default=2) args = parser.parse_args() if (not os.path.exists(args.logdir)): os.makedirs(args.logdir) if (not (args.train or args.evaluate or args.interactive or args.attention)): raise ValueError('You need to be training or evaluating') if (args.enable_testing and (not args.evaluate)): raise ValueError('You should evaluate the model if enabling testing') if args.train: args_file = (args.logdir + '/args.log') if os.path.exists(args_file): raise ValueError(('Warning: arguments already exist in ' + str(args_file))) with open(args_file, 'w') as infile: infile.write(str(args)) return args
def find_shortest_path(start, end, graph): stack = [[start, []]] visited = set() while (len(stack) > 0): (ele, history) = stack.pop() if (ele == end): return history for node in graph[ele]: if (node[0] not in visited): stack.append((node[0], (history + [(node[0], node[1])]))) visited.add(node[0])
def gen_from(candidate_tables, schema): if (len(candidate_tables) <= 1): if (len(candidate_tables) == 1): ret = 'from {}'.format(schema['table_names_original'][list(candidate_tables)[0]]) else: ret = 'from {}'.format(schema['table_names_original'][0]) return ({}, ret) table_alias_dict = {} uf_dict = {} for t in candidate_tables: uf_dict[t] = (- 1) idx = 1 graph = defaultdict(list) for (acol, bcol) in schema['foreign_keys']: t1 = schema['column_names'][acol][0] t2 = schema['column_names'][bcol][0] graph[t1].append((t2, (acol, bcol))) graph[t2].append((t1, (bcol, acol))) candidate_tables = list(candidate_tables) start = candidate_tables[0] table_alias_dict[start] = idx idx += 1 ret = 'from {} as T1'.format(schema['table_names_original'][start]) try: for end in candidate_tables[1:]: if (end in table_alias_dict): continue path = find_shortest_path(start, end, graph) prev_table = start if (not path): table_alias_dict[end] = idx idx += 1 ret = '{} join {} as T{}'.format(ret, schema['table_names_original'][end], table_alias_dict[end]) continue for (node, (acol, bcol)) in path: if (node in table_alias_dict): prev_table = node continue table_alias_dict[node] = idx idx += 1 ret = '{} join {} as T{} on T{}.{} = T{}.{}'.format(ret, schema['table_names_original'][node], table_alias_dict[node], table_alias_dict[prev_table], schema['column_names_original'][acol][1], table_alias_dict[node], schema['column_names_original'][bcol][1]) prev_table = node except: traceback.print_exc() print('db:{}'.format(schema['db_id'])) return (table_alias_dict, ret) return (table_alias_dict, ret)