code stringlengths 17 6.64M |
|---|
def normalize_space(format_sql):
format_sql_1 = [' '.join(sub_sql.strip().replace(',', ' , ').replace('(', ' ( ').replace(')', ' ) ').split()) for sub_sql in format_sql.split('\n')]
format_sql_1 = '\n'.join(format_sql_1)
format_sql_2 = format_sql_1.replace('\njoin', ' join').replace(',\n', ', ').replace(' where', '\nwhere').replace(' intersect', '\nintersect').replace('union ', 'union\n').replace('\nand', ' and').replace('order by t2 .\nstart desc', 'order by t2 . start desc')
return format_sql_2
|
def get_candidate_tables(format_sql, schema):
candidate_tables = []
tokens = format_sql.split()
for (ii, token) in enumerate(tokens):
if ('.' in token):
table_name = token.split('.')[0]
candidate_tables.append(table_name)
candidate_tables = list(set(candidate_tables))
table_names_original = [table_name.lower() for table_name in schema['table_names_original']]
candidate_tables_id = [table_names_original.index(table_name) for table_name in candidate_tables]
assert ((- 1) not in candidate_tables_id)
table_names_original = schema['table_names_original']
return (candidate_tables_id, table_names_original)
|
def get_surface_form_orig(format_sql_2, schema):
column_names_surface_form = []
column_names_surface_form_original = []
column_names_original = schema['column_names_original']
table_names_original = schema['table_names_original']
for (i, (table_id, column_name)) in enumerate(column_names_original):
if (table_id >= 0):
table_name = table_names_original[table_id]
column_name_surface_form = '{}.{}'.format(table_name, column_name)
else:
column_name_surface_form = column_name
column_names_surface_form.append(column_name_surface_form.lower())
column_names_surface_form_original.append(column_name_surface_form)
for table_name in table_names_original:
column_names_surface_form.append('{}.*'.format(table_name.lower()))
column_names_surface_form_original.append('{}.*'.format(table_name))
assert (len(column_names_surface_form) == len(column_names_surface_form_original))
for (surface_form, surface_form_original) in zip(column_names_surface_form, column_names_surface_form_original):
format_sql_2 = format_sql_2.replace(surface_form, surface_form_original)
return format_sql_2
|
def add_from_clase(sub_sql, from_clause):
select_right_sub_sql = []
left_sub_sql = []
left = True
num_left_parathesis = 0
num_right_parathesis = 0
tokens = sub_sql.split()
for (ii, token) in enumerate(tokens):
if (token == 'select'):
left = False
if left:
left_sub_sql.append(token)
continue
select_right_sub_sql.append(token)
if (token == '('):
num_left_parathesis += 1
elif (token == ')'):
num_right_parathesis += 1
def remove_missing_tables_from_select(select_statement):
tokens = select_statement.split(',')
stop_idx = (- 1)
for i in range(len(tokens)):
idx = ((len(tokens) - 1) - i)
token = tokens[idx]
if (('.*' in token) and ('count ' not in token)):
pass
else:
stop_idx = (idx + 1)
break
if (stop_idx > 0):
new_select_statement = ','.join(tokens[:stop_idx]).strip()
else:
new_select_statement = select_statement
return new_select_statement
if ((num_left_parathesis == num_right_parathesis) or (num_left_parathesis > num_right_parathesis)):
sub_sqls = []
sub_sqls.append(remove_missing_tables_from_select(sub_sql))
sub_sqls.append(from_clause)
else:
assert (num_left_parathesis < num_right_parathesis)
select_sub_sql = []
right_sub_sql = []
for i in range(len(select_right_sub_sql)):
token_idx = ((len(select_right_sub_sql) - 1) - i)
token = select_right_sub_sql[token_idx]
if (token == ')'):
num_right_parathesis -= 1
if (num_right_parathesis == num_left_parathesis):
select_sub_sql = select_right_sub_sql[:token_idx]
right_sub_sql = select_right_sub_sql[token_idx:]
break
sub_sqls = []
if (len(left_sub_sql) > 0):
sub_sqls.append(' '.join(left_sub_sql))
if (len(select_sub_sql) > 0):
new_select_statement = remove_missing_tables_from_select(' '.join(select_sub_sql))
sub_sqls.append(new_select_statement)
sub_sqls.append(from_clause)
if (len(right_sub_sql) > 0):
sub_sqls.append(' '.join(right_sub_sql))
return sub_sqls
|
def postprocess_single(format_sql_2, schema, start_alias_id=0):
(candidate_tables_id, table_names_original) = get_candidate_tables(format_sql_2, schema)
format_sql_2 = get_surface_form_orig(format_sql_2, schema)
if (len(candidate_tables_id) == 0):
final_sql = format_sql_2.replace('\n', ' ')
elif (len(candidate_tables_id) == 1):
table_name = table_names_original[candidate_tables_id[0]]
from_clause = 'from {}'.format(table_name)
format_sql_3 = []
for sub_sql in format_sql_2.split('\n'):
if ('select' in sub_sql):
format_sql_3 += add_from_clase(sub_sql, from_clause)
else:
format_sql_3.append(sub_sql)
final_sql = ' '.join(format_sql_3).replace('{}.'.format(table_name), '')
else:
(table_alias_dict, ret) = gen_from(candidate_tables_id, schema)
from_clause = ret
for i in range(len(table_alias_dict)):
from_clause = from_clause.replace('T{}'.format((i + 1)), 'T{}'.format(((i + 1) + start_alias_id)))
table_name_to_alias = {}
for (table_id, alias_id) in table_alias_dict.items():
table_name = table_names_original[table_id]
alias = 'T{}'.format((alias_id + start_alias_id))
table_name_to_alias[table_name] = alias
start_alias_id = (start_alias_id + len(table_alias_dict))
format_sql_3 = []
for sub_sql in format_sql_2.split('\n'):
if ('select' in sub_sql):
format_sql_3 += add_from_clase(sub_sql, from_clause)
else:
format_sql_3.append(sub_sql)
format_sql_3 = ' '.join(format_sql_3)
for (table_name, alias) in table_name_to_alias.items():
format_sql_3 = format_sql_3.replace('{}.'.format(table_name), '{}.'.format(alias))
final_sql = format_sql_3
for i in range(5):
final_sql = final_sql.replace('select count ( T{}.* ) '.format(i), 'select count ( * ) ')
final_sql = final_sql.replace('count ( T{}.* ) from '.format(i), 'count ( * ) from ')
final_sql = final_sql.replace('order by count ( T{}.* ) '.format(i), 'order by count ( * ) ')
final_sql = final_sql.replace('having count ( T{}.* ) '.format(i), 'having count ( * ) ')
return (final_sql, start_alias_id)
|
def postprocess_nested(format_sql_2, schema):
(candidate_tables_id, table_names_original) = get_candidate_tables(format_sql_2, schema)
if (len(candidate_tables_id) == 1):
format_sql_2 = get_surface_form_orig(format_sql_2, schema)
table_name = table_names_original[candidate_tables_id[0]]
from_clause = 'from {}'.format(table_name)
format_sql_3 = []
for sub_sql in format_sql_2.split('\n'):
if ('select' in sub_sql):
format_sql_3 += add_from_clase(sub_sql, from_clause)
else:
format_sql_3.append(sub_sql)
final_sql = ' '.join(format_sql_3).replace('{}.'.format(table_name), '')
else:
final_sql = []
num_keywords = ((format_sql_2.count('except') + format_sql_2.count('union')) + format_sql_2.count('intersect'))
num_select = format_sql_2.count('select')
def postprocess_subquery(sub_query_one, schema, start_alias_id_1):
num_select = sub_query_one.count('select ')
final_sub_sql = []
sub_query = []
for sub_sql in sub_query_one.split('\n'):
if ('select' in sub_sql):
if (len(sub_query) > 0):
sub_query = '\n'.join(sub_query)
(sub_query, start_alias_id_1) = postprocess_single(sub_query, schema, start_alias_id_1)
final_sub_sql.append(sub_query)
sub_query = []
sub_query.append(sub_sql)
else:
sub_query.append(sub_sql)
if (len(sub_query) > 0):
sub_query = '\n'.join(sub_query)
(sub_query, start_alias_id_1) = postprocess_single(sub_query, schema, start_alias_id_1)
final_sub_sql.append(sub_query)
final_sub_sql = ' '.join(final_sub_sql)
return (final_sub_sql, False, start_alias_id_1)
start_alias_id = 0
sub_query = []
for sub_sql in format_sql_2.split('\n'):
if (('except' in sub_sql) or ('union' in sub_sql) or ('intersect' in sub_sql)):
sub_query = '\n'.join(sub_query)
(sub_query, _, start_alias_id) = postprocess_subquery(sub_query, schema, start_alias_id)
final_sql.append(sub_query)
final_sql.append(sub_sql)
sub_query = []
else:
sub_query.append(sub_sql)
if (len(sub_query) > 0):
sub_query = '\n'.join(sub_query)
(sub_query, _, start_alias_id) = postprocess_subquery(sub_query, schema, start_alias_id)
final_sql.append(sub_query)
final_sql = ' '.join(final_sql)
final_sql = final_sql.replace('select count ( * ) (', 'select count ( * ) from (')
return final_sql
|
def postprocess_one(pred_sql, schema):
pred_sql = pred_sql.replace('group_by', 'group by').replace('order_by', 'order by').replace('limit_value', 'limit 1').replace('_EOS', '').replace(' value ', ' 1 ').replace('distinct', '').strip(',').strip()
if pred_sql.endswith('value'):
pred_sql = (pred_sql[:(- len('value'))] + '1')
try:
format_sql = sqlparse.format(pred_sql, reindent=True)
except:
return pred_sql
format_sql_2 = normalize_space(format_sql)
num_select = format_sql_2.count('select')
if (num_select > 1):
final_sql = postprocess_nested(format_sql_2, schema)
else:
(final_sql, _) = postprocess_single(format_sql_2, schema)
return final_sql
|
def postprocess(predictions, database_schema, remove_from=False):
correct = 0
total = 0
postprocess_sqls = {}
for pred in predictions:
db_id = pred['database_id']
schema = database_schema[db_id]
if (db_id not in postprocess_sqls):
postprocess_sqls[db_id] = []
interaction_id = pred['interaction_id']
turn_id = pred['index_in_interaction']
total += 1
pred_sql_str = ' '.join(pred['flat_prediction'])
gold_sql_str = ' '.join(pred['flat_gold_queries'][0])
if (pred_sql_str == gold_sql_str):
correct += 1
postprocess_sql = pred_sql_str
if remove_from:
postprocess_sql = postprocess_one(pred_sql_str, schema)
postprocess_sqls[db_id].append((postprocess_sql, interaction_id, turn_id))
return postprocess_sqls
|
def read_prediction(pred_file):
print('Read prediction from', pred_file)
predictions = []
with open(pred_file) as f:
for line in f:
pred = json.loads(line)
predictions.append(pred)
print('Number of predictions', len(predictions))
return predictions
|
def read_schema(table_schema_path):
with open(table_schema_path) as f:
database_schema = json.load(f)
database_schema_dict = {}
for table_schema in database_schema:
db_id = table_schema['db_id']
database_schema_dict[db_id] = table_schema
return database_schema_dict
|
def write_and_evaluate(postprocess_sqls, db_path, table_schema_path, gold_path, dataset):
db_list = []
with open(gold_path) as f:
for line in f:
line_split = line.strip().split('\t')
if (len(line_split) != 2):
continue
db = line.strip().split('\t')[1]
if (db not in db_list):
db_list.append(db)
output_file = 'output_temp.txt'
if (dataset == 'spider'):
with open(output_file, 'w') as f:
for db in db_list:
for (postprocess_sql, interaction_id, turn_id) in postprocess_sqls[db]:
f.write((postprocess_sql + '\n'))
command = 'python3 eval_scripts/evaluation.py --db {} --table {} --etype match --gold {} --pred {}'.format(db_path, table_schema_path, gold_path, os.path.abspath(output_file))
elif (dataset in ['sparc', 'cosql']):
cnt = 0
with open(output_file, 'w') as f:
for db in db_list:
for (postprocess_sql, interaction_id, turn_id) in postprocess_sqls[db]:
if ((turn_id == 0) and (cnt > 0)):
f.write('\n')
f.write('{}\n'.format(postprocess_sql))
cnt += 1
command = 'python2 eval_scripts/evaluation_sqa.py --db {} --table {} --etype match --gold {} --pred {}'.format(db_path, table_schema_path, gold_path, os.path.abspath(output_file))
command += '; rm output_temp.txt'
return command
|
def write_interaction(interaction_list, split, output_dir):
json_split = os.path.join(output_dir, (split + '.json'))
pkl_split = os.path.join(output_dir, (split + '.pkl'))
with open(json_split, 'w') as outfile:
for interaction in interaction_list:
json.dump(interaction, outfile, indent=4)
outfile.write('\n')
new_objs = []
for (i, obj) in enumerate(interaction_list):
new_interaction = []
for ut in obj['interaction']:
sql = ut['sql']
sqls = [sql]
tok_sql_list = []
for sql in sqls:
results = []
tokenized_sql = sql.split()
tok_sql_list.append((tokenized_sql, results))
ut['sql'] = tok_sql_list
new_interaction.append(ut)
obj['interaction'] = new_interaction
new_objs.append(obj)
with open(pkl_split, 'wb') as outfile:
pickle.dump(new_objs, outfile)
return
|
def read_database_schema(database_schema_filename, schema_tokens, column_names, database_schemas_dict):
with open(database_schema_filename) as f:
database_schemas = json.load(f)
def get_schema_tokens(table_schema):
column_names_surface_form = []
column_names = []
column_names_original = table_schema['column_names_original']
table_names = table_schema['table_names']
table_names_original = table_schema['table_names_original']
for (i, (table_id, column_name)) in enumerate(column_names_original):
if (table_id >= 0):
table_name = table_names_original[table_id]
column_name_surface_form = '{}.{}'.format(table_name, column_name)
else:
column_name_surface_form = column_name
column_names_surface_form.append(column_name_surface_form.lower())
column_names.append(column_name.lower())
for table_name in table_names_original:
column_names_surface_form.append('{}.*'.format(table_name.lower()))
return (column_names_surface_form, column_names)
for table_schema in database_schemas:
database_id = table_schema['db_id']
database_schemas_dict[database_id] = table_schema
(schema_tokens[database_id], column_names[database_id]) = get_schema_tokens(table_schema)
return (schema_tokens, column_names, database_schemas_dict)
|
def remove_from_with_join(format_sql_2):
used_tables_list = []
format_sql_3 = []
table_to_name = {}
table_list = []
old_table_to_name = {}
old_table_list = []
for sub_sql in format_sql_2.split('\n'):
if ('select ' in sub_sql):
if (len(table_list) > 0):
for i in range(len(format_sql_3)):
for (table, name) in table_to_name.items():
format_sql_3[i] = format_sql_3[i].replace(table, name)
old_table_list = table_list
old_table_to_name = table_to_name
table_to_name = {}
table_list = []
format_sql_3.append(sub_sql)
elif sub_sql.startswith('from'):
new_sub_sql = None
sub_sql_tokens = sub_sql.split()
for (t_i, t) in enumerate(sub_sql_tokens):
if (t == 'as'):
table_to_name[sub_sql_tokens[(t_i + 1)]] = sub_sql_tokens[(t_i - 1)]
table_list.append(sub_sql_tokens[(t_i - 1)])
elif ((t == ')') and (new_sub_sql is None)):
new_sub_sql = ' '.join(sub_sql_tokens[t_i:])
if (len(table_list) > 0):
if (new_sub_sql is not None):
format_sql_3.append(new_sub_sql)
used_tables_list.append(table_list)
else:
table_list = old_table_list
table_to_name = old_table_to_name
assert ('join' not in sub_sql)
if (new_sub_sql is not None):
sub_sub_sql = sub_sql[:(- len(new_sub_sql))].strip()
assert (len(sub_sub_sql.split()) == 2)
used_tables_list.append([sub_sub_sql.split()[1]])
format_sql_3.append(sub_sub_sql)
format_sql_3.append(new_sub_sql)
elif ('join' not in sub_sql):
assert ((len(sub_sql.split()) == 2) or (len(sub_sql.split()) == 1))
if (len(sub_sql.split()) == 2):
used_tables_list.append([sub_sql.split()[1]])
format_sql_3.append(sub_sql)
else:
print('bad from clause in remove_from_with_join')
exit()
else:
format_sql_3.append(sub_sql)
if (len(table_list) > 0):
for i in range(len(format_sql_3)):
for (table, name) in table_to_name.items():
format_sql_3[i] = format_sql_3[i].replace(table, name)
used_tables = []
for t in used_tables_list:
for tt in t:
used_tables.append(tt)
used_tables = list(set(used_tables))
return (format_sql_3, used_tables, used_tables_list)
|
def remove_from_without_join(format_sql_3, column_names, schema_tokens):
format_sql_4 = []
table_name = None
for sub_sql in format_sql_3.split('\n'):
if ('select ' in sub_sql):
if table_name:
for i in range(len(format_sql_4)):
tokens = format_sql_4[i].split()
for (ii, token) in enumerate(tokens):
if ((token in column_names) and (tokens[(ii - 1)] != '.')):
if ((((ii + 1) < len(tokens)) and (tokens[(ii + 1)] != '.') and (tokens[(ii + 1)] != '(')) or ((ii + 1) == len(tokens))):
if ('{}.{}'.format(table_name, token) in schema_tokens):
tokens[ii] = '{} . {}'.format(table_name, token)
format_sql_4[i] = ' '.join(tokens)
format_sql_4.append(sub_sql)
elif sub_sql.startswith('from'):
sub_sql_tokens = sub_sql.split()
if (len(sub_sql_tokens) == 1):
table_name = None
elif (len(sub_sql_tokens) == 2):
table_name = sub_sql_tokens[1]
else:
print('bad from clause in remove_from_without_join')
print(format_sql_3)
exit()
else:
format_sql_4.append(sub_sql)
if table_name:
for i in range(len(format_sql_4)):
tokens = format_sql_4[i].split()
for (ii, token) in enumerate(tokens):
if ((token in column_names) and (tokens[(ii - 1)] != '.')):
if ((((ii + 1) < len(tokens)) and (tokens[(ii + 1)] != '.') and (tokens[(ii + 1)] != '(')) or ((ii + 1) == len(tokens))):
if ('{}.{}'.format(table_name, token) in schema_tokens):
tokens[ii] = '{} . {}'.format(table_name, token)
format_sql_4[i] = ' '.join(tokens)
return format_sql_4
|
def add_table_name(format_sql_3, used_tables, column_names, schema_tokens):
if (len(used_tables) == 1):
table_name = used_tables[0]
format_sql_4 = []
for sub_sql in format_sql_3.split('\n'):
if sub_sql.startswith('from'):
format_sql_4.append(sub_sql)
continue
tokens = sub_sql.split()
for (ii, token) in enumerate(tokens):
if ((token in column_names) and (tokens[(ii - 1)] != '.')):
if ((((ii + 1) < len(tokens)) and (tokens[(ii + 1)] != '.') and (tokens[(ii + 1)] != '(')) or ((ii + 1) == len(tokens))):
if ('{}.{}'.format(table_name, token) in schema_tokens):
tokens[ii] = '{} . {}'.format(table_name, token)
format_sql_4.append(' '.join(tokens))
return format_sql_4
def get_table_name_for(token):
table_names = []
for table_name in used_tables:
if ('{}.{}'.format(table_name, token) in schema_tokens):
table_names.append(table_name)
if (len(table_names) == 0):
return 'table'
if (len(table_names) > 1):
return None
else:
return table_names[0]
format_sql_4 = []
for sub_sql in format_sql_3.split('\n'):
if sub_sql.startswith('from'):
format_sql_4.append(sub_sql)
continue
tokens = sub_sql.split()
for (ii, token) in enumerate(tokens):
if (token == '*'):
continue
if ((token in column_names) and (tokens[(ii - 1)] != '.')):
if ((((ii + 1) < len(tokens)) and (tokens[(ii + 1)] != '.') and (tokens[(ii + 1)] != '(')) or ((ii + 1) == len(tokens))):
table_name = get_table_name_for(token)
if table_name:
tokens[ii] = '{} . {}'.format(table_name, token)
format_sql_4.append(' '.join(tokens))
return format_sql_4
|
def check_oov(format_sql_final, output_vocab, schema_tokens):
for sql_tok in format_sql_final.split():
if (not ((sql_tok in schema_tokens) or (sql_tok in output_vocab))):
print('OOV!', sql_tok)
raise Exception('OOV')
|
def normalize_space(format_sql):
format_sql_1 = [' '.join(sub_sql.strip().replace(',', ' , ').replace('.', ' . ').replace('(', ' ( ').replace(')', ' ) ').split()) for sub_sql in format_sql.split('\n')]
format_sql_1 = '\n'.join(format_sql_1)
format_sql_2 = format_sql_1.replace('\njoin', ' join').replace(',\n', ', ').replace(' where', '\nwhere').replace(' intersect', '\nintersect').replace('\nand', ' and').replace('order by t2 .\nstart desc', 'order by t2 . start desc')
format_sql_2 = format_sql_2.replace('select\noperator', 'select operator').replace('select\nconstructor', 'select constructor').replace('select\nstart', 'select start').replace('select\ndrop', 'select drop').replace('select\nwork', 'select work').replace('select\ngroup', 'select group').replace('select\nwhere_built', 'select where_built').replace('select\norder', 'select order').replace('from\noperator', 'from operator').replace('from\nforward', 'from forward').replace('from\nfor', 'from for').replace('from\ndrop', 'from drop').replace('from\norder', 'from order').replace('.\nstart', '. start').replace('.\norder', '. order').replace('.\noperator', '. operator').replace('.\nsets', '. sets').replace('.\nwhere_built', '. where_built').replace('.\nwork', '. work').replace('.\nconstructor', '. constructor').replace('.\ngroup', '. group').replace('.\nfor', '. for').replace('.\ndrop', '. drop').replace('.\nwhere', '. where')
format_sql_2 = format_sql_2.replace('group by', 'group_by').replace('order by', 'order_by').replace('! =', '!=').replace('limit value', 'limit_value')
return format_sql_2
|
def normalize_final_sql(format_sql_5):
format_sql_final = format_sql_5.replace('\n', ' ').replace(' . ', '.').replace('group by', 'group_by').replace('order by', 'order_by').replace('! =', '!=').replace('limit value', 'limit_value')
if (('t1' in format_sql_final) or ('t2' in format_sql_final) or ('t3' in format_sql_final) or ('t4' in format_sql_final)):
format_sql_final = format_sql_final.replace('t2.dormid', 'dorm.dormid')
format_sql_final = format_sql_final.replace('select city.city_name where city.state_name in ( select state.state_name where state.state_name in ( select river.traverse where river.river_name = value ) and state.area = ( select min ( state.area ) where state.state_name in ( select river.traverse where river.river_name = value ) ) ) order_by population desc limit_value', 'select city.city_name where city.state_name in ( select state.state_name where state.state_name in ( select river.traverse where river.river_name = value ) and state.area = ( select min ( state.area ) where state.state_name in ( select river.traverse where river.river_name = value ) ) ) order_by city.population desc limit_value')
return format_sql_final
|
def parse_sql(sql_string, db_id, column_names, output_vocab, schema_tokens, schema):
format_sql = sqlparse.format(sql_string, reindent=True)
format_sql_2 = normalize_space(format_sql)
num_from = sum([1 for sub_sql in format_sql_2.split('\n') if sub_sql.startswith('from')])
num_select = (format_sql_2.count('select ') + format_sql_2.count('select\n'))
(format_sql_3, used_tables, used_tables_list) = remove_from_with_join(format_sql_2)
format_sql_3 = '\n'.join(format_sql_3)
format_sql_4 = add_table_name(format_sql_3, used_tables, column_names, schema_tokens)
format_sql_4 = '\n'.join(format_sql_4)
format_sql_5 = remove_from_without_join(format_sql_4, column_names, schema_tokens)
format_sql_5 = '\n'.join(format_sql_5)
format_sql_final = normalize_final_sql(format_sql_5)
(candidate_tables_id, table_names_original) = get_candidate_tables(format_sql_final, schema)
failure = False
if (len(candidate_tables_id) != len(used_tables)):
failure = True
check_oov(format_sql_final, output_vocab, schema_tokens)
return format_sql_final
|
def read_spider_split(split_json, interaction_list, database_schemas, column_names, output_vocab, schema_tokens, remove_from):
with open(split_json) as f:
split_data = json.load(f)
print('read_spider_split', split_json, len(split_data))
for (i, ex) in enumerate(split_data):
db_id = ex['db_id']
final_sql = []
skip = False
for query_tok in ex['query_toks_no_value']:
if ((query_tok != '.') and ('.' in query_tok)):
final_sql += query_tok.replace('.', ' . ').split()
skip = True
else:
final_sql.append(query_tok)
final_sql = ' '.join(final_sql)
if (skip and ('train' in split_json)):
continue
if remove_from:
final_sql_parse = parse_sql(final_sql, db_id, column_names[db_id], output_vocab, schema_tokens[db_id], database_schemas[db_id])
else:
final_sql_parse = final_sql
final_utterance = ' '.join(ex['question_toks'])
if (db_id not in interaction_list):
interaction_list[db_id] = []
interaction = {}
interaction['id'] = ''
interaction['scenario'] = ''
interaction['database_id'] = db_id
interaction['interaction_id'] = len(interaction_list[db_id])
interaction['final'] = {}
interaction['final']['utterance'] = final_utterance
interaction['final']['sql'] = final_sql_parse
interaction['interaction'] = [{'utterance': final_utterance, 'sql': final_sql_parse}]
interaction_list[db_id].append(interaction)
return interaction_list
|
def read_data_json(split_json, interaction_list, database_schemas, column_names, output_vocab, schema_tokens, remove_from):
with open(split_json) as f:
split_data = json.load(f)
print('read_data_json', split_json, len(split_data))
for interaction_data in split_data:
db_id = interaction_data['database_id']
final_sql = interaction_data['final']['query']
final_utterance = interaction_data['final']['utterance']
if (db_id not in interaction_list):
interaction_list[db_id] = []
if ('interaction_id' in interaction_data['interaction']):
interaction_id = interaction_data['interaction']['interaction_id']
else:
interaction_id = len(interaction_list[db_id])
interaction = {}
interaction['id'] = ''
interaction['scenario'] = ''
interaction['database_id'] = db_id
interaction['interaction_id'] = interaction_id
interaction['final'] = {}
interaction['final']['utterance'] = final_utterance
interaction['final']['sql'] = final_sql
interaction['interaction'] = []
for turn in interaction_data['interaction']:
turn_sql = []
skip = False
for query_tok in turn['query_toks_no_value']:
if ((query_tok != '.') and ('.' in query_tok)):
turn_sql += query_tok.replace('.', ' . ').split()
skip = True
else:
turn_sql.append(query_tok)
turn_sql = ' '.join(turn_sql)
turn_sql = turn_sql.replace('select f_id from files as t1 join song as t2 on t1 . f_id = t2 . f_id', 'select t1 . f_id from files as t1 join song as t2 on t1 . f_id = t2 . f_id')
turn_sql = turn_sql.replace('select name from climber mountain', 'select name from climber')
turn_sql = turn_sql.replace('select sid from sailors as t1 join reserves as t2 on t1 . sid = t2 . sid join boats as t3 on t3 . bid = t2 . bid', 'select t1 . sid from sailors as t1 join reserves as t2 on t1 . sid = t2 . sid join boats as t3 on t3 . bid = t2 . bid')
turn_sql = turn_sql.replace('select avg ( price ) from goods )', 'select avg ( price ) from goods')
turn_sql = turn_sql.replace('select min ( annual_fuel_cost ) , from vehicles', 'select min ( annual_fuel_cost ) from vehicles')
turn_sql = turn_sql.replace('select * from goods where price < ( select avg ( price ) from goods', 'select * from goods where price < ( select avg ( price ) from goods )')
turn_sql = turn_sql.replace('select distinct id , price from goods where price < ( select avg ( price ) from goods', 'select distinct id , price from goods where price < ( select avg ( price ) from goods )')
turn_sql = turn_sql.replace('select id from goods where price > ( select avg ( price ) from goods', 'select id from goods where price > ( select avg ( price ) from goods )')
if (skip and ('train' in split_json)):
continue
if remove_from:
try:
turn_sql_parse = parse_sql(turn_sql, db_id, column_names[db_id], output_vocab, schema_tokens[db_id], database_schemas[db_id])
except:
print('continue')
continue
else:
turn_sql_parse = turn_sql
if ('utterance_toks' in turn):
turn_utterance = ' '.join(turn['utterance_toks'])
else:
turn_utterance = turn['utterance']
interaction['interaction'].append({'utterance': turn_utterance, 'sql': turn_sql_parse})
if (len(interaction['interaction']) > 0):
interaction_list[db_id].append(interaction)
return interaction_list
|
def read_spider(spider_dir, database_schemas, column_names, output_vocab, schema_tokens, remove_from):
interaction_list = {}
train_json = os.path.join(spider_dir, 'train.json')
interaction_list = read_spider_split(train_json, interaction_list, database_schemas, column_names, output_vocab, schema_tokens, remove_from)
dev_json = os.path.join(spider_dir, 'dev.json')
interaction_list = read_spider_split(dev_json, interaction_list, database_schemas, column_names, output_vocab, schema_tokens, remove_from)
return interaction_list
|
def read_sparc(sparc_dir, database_schemas, column_names, output_vocab, schema_tokens, remove_from):
interaction_list = {}
train_json = os.path.join(sparc_dir, 'train_no_value.json')
interaction_list = read_data_json(train_json, interaction_list, database_schemas, column_names, output_vocab, schema_tokens, remove_from)
dev_json = os.path.join(sparc_dir, 'dev_no_value.json')
interaction_list = read_data_json(dev_json, interaction_list, database_schemas, column_names, output_vocab, schema_tokens, remove_from)
return interaction_list
|
def read_cosql(cosql_dir, database_schemas, column_names, output_vocab, schema_tokens, remove_from):
interaction_list = {}
train_json = os.path.join(cosql_dir, 'train.json')
interaction_list = read_data_json(train_json, interaction_list, database_schemas, column_names, output_vocab, schema_tokens, remove_from)
dev_json = os.path.join(cosql_dir, 'dev.json')
interaction_list = read_data_json(dev_json, interaction_list, database_schemas, column_names, output_vocab, schema_tokens, remove_from)
return interaction_list
|
def read_db_split(data_dir):
train_database = []
with open(os.path.join(data_dir, 'train_db_ids.txt')) as f:
for line in f:
train_database.append(line.strip())
dev_database = []
with open(os.path.join(data_dir, 'dev_db_ids.txt')) as f:
for line in f:
dev_database.append(line.strip())
return (train_database, dev_database)
|
def preprocess(dataset, remove_from=False):
output_vocab = ['_UNK', '_EOS', '.', 't1', 't2', '=', 'select', 'from', 'as', 'value', 'join', 'on', ')', '(', 'where', 't3', 'by', ',', 'count', 'group', 'order', 'distinct', 't4', 'and', 'limit', 'desc', '>', 'avg', 'having', 'max', 'in', '<', 'sum', 't5', 'intersect', 'not', 'min', 'except', 'or', 'asc', 'like', '!', 'union', 'between', 't6', '-', 't7', '+', '/']
if remove_from:
output_vocab = ['_UNK', '_EOS', '=', 'select', 'value', ')', '(', 'where', ',', 'count', 'group_by', 'order_by', 'distinct', 'and', 'limit_value', 'limit', 'desc', '>', 'avg', 'having', 'max', 'in', '<', 'sum', 'intersect', 'not', 'min', 'except', 'or', 'asc', 'like', '!=', 'union', 'between', '-', '+', '/']
print('size of output_vocab', len(output_vocab))
print('output_vocab', output_vocab)
print()
if (dataset == 'spider'):
spider_dir = 'data/spider/'
database_schema_filename = 'data/spider/tables.json'
output_dir = 'data/spider_data'
if remove_from:
output_dir = 'data/spider_data_removefrom'
(train_database, dev_database) = read_db_split(spider_dir)
elif (dataset == 'sparc'):
sparc_dir = 'data/sparc/'
database_schema_filename = 'data/sparc/tables.json'
output_dir = 'data/sparc_data'
if remove_from:
output_dir = 'data/sparc_data_removefrom'
(train_database, dev_database) = read_db_split(sparc_dir)
elif (dataset == 'cosql'):
cosql_dir = 'data/cosql/'
database_schema_filename = 'data/cosql/tables.json'
output_dir = 'data/cosql_data'
if remove_from:
output_dir = 'data/cosql_data_removefrom'
(train_database, dev_database) = read_db_split(cosql_dir)
if os.path.isdir(output_dir):
shutil.rmtree(output_dir)
os.mkdir(output_dir)
schema_tokens = {}
column_names = {}
database_schemas = {}
print('Reading spider database schema file')
(schema_tokens, column_names, database_schemas) = read_database_schema(database_schema_filename, schema_tokens, column_names, database_schemas)
num_database = len(schema_tokens)
print('num_database', num_database, len(train_database), len(dev_database))
print('total number of schema_tokens / databases:', len(schema_tokens))
output_database_schema_filename = os.path.join(output_dir, 'tables.json')
with open(output_database_schema_filename, 'w') as outfile:
json.dump([v for (k, v) in database_schemas.items()], outfile, indent=4)
if (dataset == 'spider'):
interaction_list = read_spider(spider_dir, database_schemas, column_names, output_vocab, schema_tokens, remove_from)
elif (dataset == 'sparc'):
interaction_list = read_sparc(sparc_dir, database_schemas, column_names, output_vocab, schema_tokens, remove_from)
elif (dataset == 'cosql'):
interaction_list = read_cosql(cosql_dir, database_schemas, column_names, output_vocab, schema_tokens, remove_from)
print('interaction_list length', len(interaction_list))
train_interaction = []
for database_id in interaction_list:
if (database_id not in dev_database):
train_interaction += interaction_list[database_id]
dev_interaction = []
for database_id in dev_database:
dev_interaction += interaction_list[database_id]
print('train interaction: ', len(train_interaction))
print('dev interaction: ', len(dev_interaction))
write_interaction(train_interaction, 'train', output_dir)
write_interaction(dev_interaction, 'dev', output_dir)
return
|
def train(model, data, params):
' Trains a model.\n\n Inputs:\n model (ATISModel): The model to train.\n data (ATISData): The data that is used to train.\n params (namespace): Training parameters.\n '
log = Logger(os.path.join(params.logdir, params.logfile), 'w')
num_train_original = atis_data.num_utterances(data.train_data)
log.put(('Original number of training utterances:\t' + str(num_train_original)))
eval_fn = evaluate_utterance_sample
trainbatch_fn = data.get_utterance_batches
trainsample_fn = data.get_random_utterances
validsample_fn = data.get_all_utterances
batch_size = params.batch_size
if params.interaction_level:
batch_size = 1
eval_fn = evaluate_interaction_sample
trainbatch_fn = data.get_interaction_batches
trainsample_fn = data.get_random_interactions
validsample_fn = data.get_all_interactions
maximum_output_length = params.train_maximum_sql_length
train_batches = trainbatch_fn(batch_size, max_output_length=maximum_output_length, randomize=(not params.deterministic))
if (params.num_train >= 0):
train_batches = train_batches[:params.num_train]
training_sample = trainsample_fn(params.train_evaluation_size, max_output_length=maximum_output_length)
valid_examples = validsample_fn(data.valid_data, max_output_length=maximum_output_length)
num_train_examples = sum([len(batch) for batch in train_batches])
num_steps_per_epoch = len(train_batches)
log.put(('Actual number of used training examples:\t' + str(num_train_examples)))
log.put((('(Shortened by output limit of ' + str(maximum_output_length)) + ')'))
log.put(('Number of steps per epoch:\t' + str(num_steps_per_epoch)))
log.put(('Batch size:\t' + str(batch_size)))
print((((('Kept ' + str(num_train_examples)) + '/') + str(num_train_original)) + ' examples'))
print((((('Batch size of ' + str(batch_size)) + ' gives ') + str(num_steps_per_epoch)) + ' steps per epoch'))
epochs = 0
patience = params.initial_patience
learning_rate_coefficient = 1.0
previous_epoch_loss = float('inf')
maximum_validation_accuracy = 0.0
maximum_string_accuracy = 0.0
countdown = int(patience)
if params.scheduler:
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(model.trainer, mode='min')
keep_training = True
while keep_training:
log.put(('Epoch:\t' + str(epochs)))
model.set_dropout(params.dropout_amount)
if (not params.scheduler):
model.set_learning_rate((learning_rate_coefficient * params.initial_learning_rate))
if params.interaction_level:
epoch_loss = train_epoch_with_interactions(train_batches, params, model, randomize=(not params.deterministic))
else:
epoch_loss = train_epoch_with_utterances(train_batches, model, randomize=(not params.deterministic))
log.put(('train epoch loss:\t' + str(epoch_loss)))
model.set_dropout(0.0)
train_eval_results = eval_fn(training_sample, model, params.train_maximum_sql_length, name=os.path.join(params.logdir, 'train-eval'), write_results=True, gold_forcing=True, metrics=TRAIN_EVAL_METRICS)[0]
for (name, value) in train_eval_results.items():
log.put(((('train final gold-passing ' + name.name) + ':\t') + ('%.2f' % value)))
valid_eval_results = eval_fn(valid_examples, model, params.eval_maximum_sql_length, name=os.path.join(params.logdir, 'valid-eval'), write_results=True, gold_forcing=True, metrics=VALID_EVAL_METRICS)[0]
for (name, value) in valid_eval_results.items():
log.put(((('valid gold-passing ' + name.name) + ':\t') + ('%.2f' % value)))
valid_loss = valid_eval_results[Metrics.LOSS]
valid_token_accuracy = valid_eval_results[Metrics.TOKEN_ACCURACY]
string_accuracy = valid_eval_results[Metrics.STRING_ACCURACY]
if params.scheduler:
scheduler.step(valid_loss)
if (valid_loss > previous_epoch_loss):
learning_rate_coefficient *= params.learning_rate_ratio
log.put(('learning rate coefficient:\t' + str(learning_rate_coefficient)))
previous_epoch_loss = valid_loss
saved = False
if ((not saved) and (string_accuracy > maximum_string_accuracy)):
maximum_string_accuracy = string_accuracy
patience = (patience * params.patience_ratio)
countdown = int(patience)
last_save_file = os.path.join(params.logdir, ('save_' + str(epochs)))
model.save(last_save_file)
log.put(('maximum string accuracy:\t' + str(maximum_string_accuracy)))
log.put(('patience:\t' + str(patience)))
log.put(('save file:\t' + str(last_save_file)))
if (countdown <= 0):
keep_training = False
countdown -= 1
log.put(('countdown:\t' + str(countdown)))
log.put('')
epochs += 1
log.put('Finished training!')
log.close()
return last_save_file
|
def evaluate(model, data, params, last_save_file, split):
'Evaluates a pretrained model on a dataset.\n\n Inputs:\n model (ATISModel): Model class.\n data (ATISData): All of the data.\n params (namespace): Parameters for the model.\n last_save_file (str): Location where the model save file is.\n '
if last_save_file:
model.load(last_save_file)
else:
if (not params.save_file):
raise ValueError('Must provide a save file name if not training first.')
model.load(params.save_file)
filename = split
if (filename == 'dev'):
split = data.dev_data
elif (filename == 'train'):
split = data.train_data
elif (filename == 'test'):
split = data.test_data
elif (filename == 'valid'):
split = data.valid_data
else:
raise ValueError(('Split not recognized: ' + str(params.evaluate_split)))
if params.use_predicted_queries:
filename += '_use_predicted_queries'
else:
filename += '_use_gold_queries'
full_name = (os.path.join(params.logdir, filename) + params.results_note)
if (params.interaction_level or params.use_predicted_queries):
examples = data.get_all_interactions(split)
if params.interaction_level:
evaluate_interaction_sample(examples, model, name=full_name, metrics=FINAL_EVAL_METRICS, total_num=atis_data.num_utterances(split), database_username=params.database_username, database_password=params.database_password, database_timeout=params.database_timeout, use_predicted_queries=params.use_predicted_queries, max_generation_length=params.eval_maximum_sql_length, write_results=True, use_gpu=True, compute_metrics=params.compute_metrics)
else:
evaluate_using_predicted_queries(examples, model, name=full_name, metrics=FINAL_EVAL_METRICS, total_num=atis_data.num_utterances(split), database_username=params.database_username, database_password=params.database_password, database_timeout=params.database_timeout)
else:
examples = data.get_all_utterances(split)
evaluate_utterance_sample(examples, model, name=full_name, gold_forcing=False, metrics=FINAL_EVAL_METRICS, total_num=atis_data.num_utterances(split), max_generation_length=params.eval_maximum_sql_length, database_username=params.database_username, database_password=params.database_password, database_timeout=params.database_timeout, write_results=True)
|
def main():
'Main function that trains and/or evaluates a model.'
params = interpret_args()
data = atis_data.ATISDataset(params)
if params.interaction_level:
model_type = SchemaInteractionATISModel
else:
print('not implemented')
exit()
model = model_type(params, data.input_vocabulary, data.output_vocabulary, data.output_vocabulary_schema, (data.anonymizer if (params.anonymize and params.anonymization_scoring) else None))
model = model.cuda()
print('=====================Model Parameters=====================')
for (name, param) in model.named_parameters():
print(name, param.requires_grad, param.is_cuda, param.size())
assert param.is_cuda
model.build_optim()
print('=====================Parameters in Optimizer==============')
for param_group in model.trainer.param_groups:
print(param_group.keys())
for param in param_group['params']:
print(param.size())
if params.fine_tune_bert:
print('=====================Parameters in BERT Optimizer==============')
for param_group in model.bert_trainer.param_groups:
print(param_group.keys())
for param in param_group['params']:
print(param.size())
sys.stdout.flush()
last_save_file = ''
if params.train:
last_save_file = train(model, data, params)
if (params.evaluate and ('valid' in params.evaluate_split)):
evaluate(model, data, params, last_save_file, split='valid')
if (params.evaluate and ('dev' in params.evaluate_split)):
evaluate(model, data, params, last_save_file, split='dev')
if (params.evaluate and ('test' in params.evaluate_split)):
evaluate(model, data, params, last_save_file, split='test')
|
class RE21():
def __init__(self):
self.problem_name = 'RE21'
self.n_objectives = 2
self.n_variables = 4
self.n_constraints = 0
self.n_original_constraints = 0
F = 10.0
sigma = 10.0
tmp_val = (F / sigma)
self.ubound = np.full(self.n_variables, (3 * tmp_val))
self.lbound = np.zeros(self.n_variables)
self.lbound[0] = tmp_val
self.lbound[1] = (np.sqrt(2.0) * tmp_val)
self.lbound[2] = (np.sqrt(2.0) * tmp_val)
self.lbound[3] = tmp_val
def evaluate(self, x):
f = np.zeros(self.n_objectives)
x1 = x[0]
x2 = x[1]
x3 = x[2]
x4 = x[3]
F = 10.0
sigma = 10.0
E = (2.0 * 100000.0)
L = 200.0
f[0] = (L * ((((2 * x1) + (np.sqrt(2.0) * x2)) + np.sqrt(x3)) + x4))
f[1] = (((F * L) / E) * ((((2.0 / x1) + ((2.0 * np.sqrt(2.0)) / x2)) - ((2.0 * np.sqrt(2.0)) / x3)) + (2.0 / x4)))
return f
|
class RE22():
def __init__(self):
self.problem_name = 'RE22'
self.n_objectives = 2
self.n_variables = 3
self.n_constraints = 0
self.n_original_constraints = 2
self.ubound = np.zeros(self.n_variables)
self.lbound = np.zeros(self.n_variables)
self.lbound[0] = 0.2
self.lbound[1] = 0.0
self.lbound[2] = 0.0
self.ubound[0] = 15
self.ubound[1] = 20
self.ubound[2] = 40
self.feasible_vals = np.array([0.2, 0.31, 0.4, 0.44, 0.6, 0.62, 0.79, 0.8, 0.88, 0.93, 1.0, 1.2, 1.24, 1.32, 1.4, 1.55, 1.58, 1.6, 1.76, 1.8, 1.86, 2.0, 2.17, 2.2, 2.37, 2.4, 2.48, 2.6, 2.64, 2.79, 2.8, 3.0, 3.08, 3, 10, 3.16, 3.41, 3.52, 3.6, 3.72, 3.95, 3.96, 4.0, 4.03, 4.2, 4.34, 4.4, 4.65, 4.74, 4.8, 4.84, 5.0, 5.28, 5.4, 5.53, 5.72, 6.0, 6.16, 6.32, 6.6, 7.11, 7.2, 7.8, 7.9, 8.0, 8.4, 8.69, 9.0, 9.48, 10.27, 11.0, 11.06, 11.85, 12.0, 13.0, 14.0, 15.0])
def evaluate(self, x):
f = np.zeros(self.n_objectives)
g = np.zeros(self.n_original_constraints)
idx = np.abs((np.asarray(self.feasible_vals) - x[0])).argmin()
x1 = self.feasible_vals[idx]
x2 = x[1]
x3 = x[2]
f[0] = ((29.4 * x1) + ((0.6 * x2) * x3))
g[0] = (((x1 * x3) - (7.735 * ((x1 * x1) / x2))) - 180.0)
g[1] = (4.0 - (x3 / x2))
g = np.where((g < 0), (- g), 0)
f[1] = (g[0] + g[1])
return f
|
class RE23():
def __init__(self):
self.problem_name = 'RE23'
self.n_objectives = 2
self.n_variables = 4
self.n_constraints = 0
self.n_original_constraints = 3
self.ubound = np.zeros(self.n_variables)
self.lbound = np.zeros(self.n_variables)
self.lbound[0] = 1
self.lbound[1] = 1
self.lbound[2] = 10
self.lbound[3] = 10
self.ubound[0] = 100
self.ubound[1] = 100
self.ubound[2] = 200
self.ubound[3] = 240
def evaluate(self, x):
f = np.zeros(self.n_objectives)
g = np.zeros(self.n_original_constraints)
x1 = (0.0625 * int(np.round(x[0])))
x2 = (0.0625 * int(np.round(x[1])))
x3 = x[2]
x4 = x[3]
f[0] = ((((((0.6224 * x1) * x3) * x4) + (((1.7781 * x2) * x3) * x3)) + (((3.1661 * x1) * x1) * x4)) + (((19.84 * x1) * x1) * x3))
g[0] = (x1 - (0.0193 * x3))
g[1] = (x2 - (0.00954 * x3))
g[2] = (((((np.pi * x3) * x3) * x4) + ((4.0 / 3.0) * (((np.pi * x3) * x3) * x3))) - 1296000)
g = np.where((g < 0), (- g), 0)
f[1] = ((g[0] + g[1]) + g[2])
return f
|
class RE24():
def __init__(self):
self.problem_name = 'RE24'
self.n_objectives = 2
self.n_variables = 2
self.n_constraints = 0
self.n_original_constraints = 4
self.ubound = np.zeros(self.n_variables)
self.lbound = np.zeros(self.n_variables)
self.lbound[0] = 0.5
self.lbound[1] = 0.5
self.ubound[0] = 4
self.ubound[1] = 50
def evaluate(self, x):
f = np.zeros(self.n_objectives)
g = np.zeros(self.n_original_constraints)
x1 = x[0]
x2 = x[1]
f[0] = (x1 + (120 * x2))
E = 700000
sigma_b_max = 700
tau_max = 450
delta_max = 1.5
sigma_k = (((E * x1) * x1) / 100)
sigma_b = (4500 / (x1 * x2))
tau = (1800 / x2)
delta = ((56.2 * 10000) / (((E * x1) * x2) * x2))
g[0] = (1 - (sigma_b / sigma_b_max))
g[1] = (1 - (tau / tau_max))
g[2] = (1 - (delta / delta_max))
g[3] = (1 - (sigma_b / sigma_k))
g = np.where((g < 0), (- g), 0)
f[1] = (((g[0] + g[1]) + g[2]) + g[3])
return f
|
class RE25():
def __init__(self):
self.problem_name = 'RE25'
self.n_objectives = 2
self.n_variables = 3
self.n_constraints = 0
self.n_original_constraints = 6
self.ubound = np.zeros(self.n_variables)
self.lbound = np.zeros(self.n_variables)
self.lbound[0] = 1
self.lbound[1] = 0.6
self.lbound[2] = 0.09
self.ubound[0] = 70
self.ubound[1] = 3
self.ubound[2] = 0.5
self.feasible_vals = np.array([0.009, 0.0095, 0.0104, 0.0118, 0.0128, 0.0132, 0.014, 0.015, 0.0162, 0.0173, 0.018, 0.02, 0.023, 0.025, 0.028, 0.032, 0.035, 0.041, 0.047, 0.054, 0.063, 0.072, 0.08, 0.092, 0.105, 0.12, 0.135, 0.148, 0.162, 0.177, 0.192, 0.207, 0.225, 0.244, 0.263, 0.283, 0.307, 0.331, 0.362, 0.394, 0.4375, 0.5])
def evaluate(self, x):
f = np.zeros(self.n_objectives)
g = np.zeros(self.n_original_constraints)
x1 = np.round(x[0])
x2 = x[1]
idx = np.abs((np.asarray(self.feasible_vals) - x[2])).argmin()
x3 = self.feasible_vals[idx]
f[0] = ((((((np.pi * np.pi) * x2) * x3) * x3) * (x1 + 2)) / 4.0)
Cf = ((((4.0 * (x2 / x3)) - 1) / ((4.0 * (x2 / x3)) - 4)) + ((0.615 * x3) / x2))
Fmax = 1000.0
S = 189000.0
G = (11.5 * 1000000.0)
K = (((((G * x3) * x3) * x3) * x3) / ((((8 * x1) * x2) * x2) * x2))
lmax = 14.0
lf = ((Fmax / K) + ((1.05 * (x1 + 2)) * x3))
dmin = 0.2
Dmax = 3
Fp = 300.0
sigmaP = (Fp / K)
sigmaPM = 6
sigmaW = 1.25
g[0] = ((- ((((8 * Cf) * Fmax) * x2) / (((np.pi * x3) * x3) * x3))) + S)
g[1] = ((- lf) + lmax)
g[2] = ((- 3) + (x2 / x3))
g[3] = ((- sigmaP) + sigmaPM)
g[4] = ((((- sigmaP) - ((Fmax - Fp) / K)) - ((1.05 * (x1 + 2)) * x3)) + lf)
g[5] = (sigmaW - ((Fmax - Fp) / K))
g = np.where((g < 0), (- g), 0)
f[1] = (((((g[0] + g[1]) + g[2]) + g[3]) + g[4]) + g[5])
return f
|
class RE31():
def __init__(self):
self.problem_name = 'RE31'
self.n_objectives = 3
self.n_variables = 3
self.n_constraints = 0
self.n_original_constraints = 3
self.ubound = np.zeros(self.n_variables)
self.lbound = np.zeros(self.n_variables)
self.lbound[0] = 1e-05
self.lbound[1] = 1e-05
self.lbound[2] = 1.0
self.ubound[0] = 100.0
self.ubound[1] = 100.0
self.ubound[2] = 3.0
def evaluate(self, x):
f = np.zeros(self.n_objectives)
g = np.zeros(self.n_original_constraints)
x1 = x[0]
x2 = x[1]
x3 = x[2]
f[0] = ((x1 * np.sqrt((16.0 + (x3 * x3)))) + (x2 * np.sqrt((1.0 + (x3 * x3)))))
f[1] = ((20.0 * np.sqrt((16.0 + (x3 * x3)))) / (x1 * x3))
g[0] = (0.1 - f[0])
g[1] = (100000.0 - f[1])
g[2] = (100000 - ((80.0 * np.sqrt((1.0 + (x3 * x3)))) / (x3 * x2)))
g = np.where((g < 0), (- g), 0)
f[2] = ((g[0] + g[1]) + g[2])
return f
|
class RE32():
def __init__(self):
self.problem_name = 'RE32'
self.n_objectives = 3
self.n_variables = 4
self.n_constraints = 0
self.n_original_constraints = 4
self.ubound = np.zeros(self.n_variables)
self.lbound = np.zeros(self.n_variables)
self.lbound[0] = 0.125
self.lbound[1] = 0.1
self.lbound[2] = 0.1
self.lbound[3] = 0.125
self.ubound[0] = 5.0
self.ubound[1] = 10.0
self.ubound[2] = 10.0
self.ubound[3] = 5.0
def evaluate(self, x):
f = np.zeros(self.n_objectives)
g = np.zeros(self.n_original_constraints)
x1 = x[0]
x2 = x[1]
x3 = x[2]
x4 = x[3]
P = 6000
L = 14
E = (30 * 1000000.0)
G = (12 * 1000000.0)
tauMax = 13600
sigmaMax = 30000
f[0] = ((((1.10471 * x1) * x1) * x2) + (((0.04811 * x3) * x4) * (14.0 + x2)))
f[1] = (((((4 * P) * L) * L) * L) / ((((E * x4) * x3) * x3) * x3))
M = (P * (L + (x2 / 2)))
tmpVar = (((x2 * x2) / 4.0) + np.power(((x1 + x3) / 2.0), 2))
R = np.sqrt(tmpVar)
tmpVar = (((x2 * x2) / 12.0) + np.power(((x1 + x3) / 2.0), 2))
J = ((((2 * np.sqrt(2)) * x1) * x2) * tmpVar)
tauDashDash = ((M * R) / J)
tauDash = (P / ((np.sqrt(2) * x1) * x2))
tmpVar = (((tauDash * tauDash) + ((((2 * tauDash) * tauDashDash) * x2) / (2 * R))) + (tauDashDash * tauDashDash))
tau = np.sqrt(tmpVar)
sigma = (((6 * P) * L) / ((x4 * x3) * x3))
tmpVar = (((4.013 * E) * np.sqrt(((((((((x3 * x3) * x4) * x4) * x4) * x4) * x4) * x4) / 36.0))) / (L * L))
tmpVar2 = ((x3 / (2 * L)) * np.sqrt((E / (4 * G))))
PC = (tmpVar * (1 - tmpVar2))
g[0] = (tauMax - tau)
g[1] = (sigmaMax - sigma)
g[2] = (x4 - x1)
g[3] = (PC - P)
g = np.where((g < 0), (- g), 0)
f[2] = (((g[0] + g[1]) + g[2]) + g[3])
return f
|
class RE33():
def __init__(self):
self.problem_name = 'RE33'
self.n_objectives = 3
self.n_variables = 4
self.n_constraints = 0
self.n_original_constraints = 4
self.ubound = np.zeros(self.n_variables)
self.lbound = np.zeros(self.n_variables)
self.lbound[0] = 55
self.lbound[1] = 75
self.lbound[2] = 1000
self.lbound[3] = 11
self.ubound[0] = 80
self.ubound[1] = 110
self.ubound[2] = 3000
self.ubound[3] = 20
def evaluate(self, x):
f = np.zeros(self.n_objectives)
g = np.zeros(self.n_original_constraints)
x1 = x[0]
x2 = x[1]
x3 = x[2]
x4 = x[3]
f[0] = (((4.9 * 1e-05) * ((x2 * x2) - (x1 * x1))) * (x4 - 1.0))
f[1] = (((9.82 * 1000000.0) * ((x2 * x2) - (x1 * x1))) / ((x3 * x4) * (((x2 * x2) * x2) - ((x1 * x1) * x1))))
g[0] = ((x2 - x1) - 20.0)
g[1] = (0.4 - (x3 / (3.14 * ((x2 * x2) - (x1 * x1)))))
g[2] = (1.0 - ((((2.22 * 0.001) * x3) * (((x2 * x2) * x2) - ((x1 * x1) * x1))) / np.power(((x2 * x2) - (x1 * x1)), 2)))
g[3] = ((((((2.66 * 0.01) * x3) * x4) * (((x2 * x2) * x2) - ((x1 * x1) * x1))) / ((x2 * x2) - (x1 * x1))) - 900.0)
g = np.where((g < 0), (- g), 0)
f[2] = (((g[0] + g[1]) + g[2]) + g[3])
return f
|
class RE34():
def __init__(self):
self.problem_name = 'RE34'
self.n_objectives = 3
self.n_variables = 5
self.n_constraints = 0
self.n_original_constraints = 0
self.lbound = np.full(self.n_variables, 1)
self.ubound = np.full(self.n_variables, 3)
def evaluate(self, x):
f = np.zeros(self.n_objectives)
g = np.zeros(self.n_original_constraints)
x1 = x[0]
x2 = x[1]
x3 = x[2]
x4 = x[3]
x5 = x[4]
f[0] = (((((1640.2823 + (2.3573285 * x1)) + (2.3220035 * x2)) + (4.5688768 * x3)) + (7.7213633 * x4)) + (4.4559504 * x5))
f[1] = ((((((((((6.5856 + (1.15 * x1)) - (1.0427 * x2)) + (0.9738 * x3)) + (0.8364 * x4)) - ((0.3695 * x1) * x4)) + ((0.0861 * x1) * x5)) + ((0.3628 * x2) * x4)) - ((0.1106 * x1) * x1)) - ((0.3437 * x3) * x3)) + ((0.1764 * x4) * x4))
f[2] = (((((((((((- 0.0551) + (0.0181 * x1)) + (0.1024 * x2)) + (0.0421 * x3)) - ((0.0073 * x1) * x2)) + ((0.024 * x2) * x3)) - ((0.0118 * x2) * x4)) - ((0.0204 * x3) * x4)) - ((0.008 * x3) * x5)) - ((0.0241 * x2) * x2)) + ((0.0109 * x4) * x4))
return f
|
class RE35():
def __init__(self):
self.problem_name = 'RE35'
self.n_objectives = 3
self.n_variables = 7
self.n_constraints = 0
self.n_original_constraints = 11
self.lbound = np.zeros(self.n_variables)
self.ubound = np.zeros(self.n_variables)
self.lbound[0] = 2.6
self.lbound[1] = 0.7
self.lbound[2] = 17
self.lbound[3] = 7.3
self.lbound[4] = 7.3
self.lbound[5] = 2.9
self.lbound[6] = 5.0
self.ubound[0] = 3.6
self.ubound[1] = 0.8
self.ubound[2] = 28
self.ubound[3] = 8.3
self.ubound[4] = 8.3
self.ubound[5] = 3.9
self.ubound[6] = 5.5
def evaluate(self, x):
f = np.zeros(self.n_objectives)
g = np.zeros(self.n_original_constraints)
x1 = x[0]
x2 = x[1]
x3 = np.round(x[2])
x4 = x[3]
x5 = x[4]
x6 = x[5]
x7 = x[6]
f[0] = ((((((0.7854 * x1) * (x2 * x2)) * (((((10.0 * x3) * x3) / 3.0) + (14.933 * x3)) - 43.0934)) - ((1.508 * x1) * ((x6 * x6) + (x7 * x7)))) + (7.477 * (((x6 * x6) * x6) + ((x7 * x7) * x7)))) + (0.7854 * (((x4 * x6) * x6) + ((x5 * x7) * x7))))
tmpVar = (np.power(((745.0 * x4) / (x2 * x3)), 2.0) + (1.69 * 10000000.0))
f[1] = (np.sqrt(tmpVar) / (((0.1 * x6) * x6) * x6))
g[0] = ((- (1.0 / (((x1 * x2) * x2) * x3))) + (1.0 / 27.0))
g[1] = ((- (1.0 / ((((x1 * x2) * x2) * x3) * x3))) + (1.0 / 397.5))
g[2] = (((- ((x4 * x4) * x4)) / (((((x2 * x3) * x6) * x6) * x6) * x6)) + (1.0 / 1.93))
g[3] = (((- ((x5 * x5) * x5)) / (((((x2 * x3) * x7) * x7) * x7) * x7)) + (1.0 / 1.93))
g[4] = ((- (x2 * x3)) + 40.0)
g[5] = ((- (x1 / x2)) + 12.0)
g[6] = ((- 5.0) + (x1 / x2))
g[7] = (((- 1.9) + x4) - (1.5 * x6))
g[8] = (((- 1.9) + x5) - (1.1 * x7))
g[9] = ((- f[1]) + 1300.0)
tmpVar = (np.power(((745.0 * x5) / (x2 * x3)), 2.0) + (1.575 * 100000000.0))
g[10] = (((- np.sqrt(tmpVar)) / (((0.1 * x7) * x7) * x7)) + 1100.0)
g = np.where((g < 0), (- g), 0)
f[2] = ((((((((((g[0] + g[1]) + g[2]) + g[3]) + g[4]) + g[5]) + g[6]) + g[7]) + g[8]) + g[9]) + g[10])
return f
|
class RE36():
def __init__(self):
self.problem_name = 'RE36'
self.n_objectives = 3
self.n_variables = 4
self.n_constraints = 0
self.n_original_constraints = 1
self.lbound = np.full(self.n_variables, 12)
self.ubound = np.full(self.n_variables, 60)
def evaluate(self, x):
f = np.zeros(self.n_objectives)
g = np.zeros(self.n_original_constraints)
x1 = np.round(x[0])
x2 = np.round(x[1])
x3 = np.round(x[2])
x4 = np.round(x[3])
f[0] = np.abs((6.931 - ((x3 / x1) * (x4 / x2))))
l = [x1, x2, x3, x4]
f[1] = max(l)
g[0] = (0.5 - (f[0] / 6.931))
g = np.where((g < 0), (- g), 0)
f[2] = g[0]
return f
|
class RE37():
def __init__(self):
self.problem_name = 'RE37'
self.n_objectives = 3
self.n_variables = 4
self.n_constraints = 0
self.n_original_constraints = 0
self.lbound = np.full(self.n_variables, 0)
self.ubound = np.full(self.n_variables, 1)
def evaluate(self, x):
f = np.zeros(self.n_objectives)
xAlpha = x[0]
xHA = x[1]
xOA = x[2]
xOPTT = x[3]
f[0] = ((((((((((((((0.692 + (0.477 * xAlpha)) - (0.687 * xHA)) - (0.08 * xOA)) - (0.065 * xOPTT)) - ((0.167 * xAlpha) * xAlpha)) - ((0.0129 * xHA) * xAlpha)) + ((0.0796 * xHA) * xHA)) - ((0.0634 * xOA) * xAlpha)) - ((0.0257 * xOA) * xHA)) + ((0.0877 * xOA) * xOA)) - ((0.0521 * xOPTT) * xAlpha)) + ((0.00156 * xOPTT) * xHA)) + ((0.00198 * xOPTT) * xOA)) + ((0.0184 * xOPTT) * xOPTT))
f[1] = ((((((((((((((0.153 - (0.322 * xAlpha)) + (0.396 * xHA)) + (0.424 * xOA)) + (0.0226 * xOPTT)) + ((0.175 * xAlpha) * xAlpha)) + ((0.0185 * xHA) * xAlpha)) - ((0.0701 * xHA) * xHA)) - ((0.251 * xOA) * xAlpha)) + ((0.179 * xOA) * xHA)) + ((0.015 * xOA) * xOA)) + ((0.0134 * xOPTT) * xAlpha)) + ((0.0296 * xOPTT) * xHA)) + ((0.0752 * xOPTT) * xOA)) + ((0.0192 * xOPTT) * xOPTT))
f[2] = ((((((((((((((((((((0.37 - (0.205 * xAlpha)) + (0.0307 * xHA)) + (0.108 * xOA)) + (1.019 * xOPTT)) - ((0.135 * xAlpha) * xAlpha)) + ((0.0141 * xHA) * xAlpha)) + ((0.0998 * xHA) * xHA)) + ((0.208 * xOA) * xAlpha)) - ((0.0301 * xOA) * xHA)) - ((0.226 * xOA) * xOA)) + ((0.353 * xOPTT) * xAlpha)) - ((0.0497 * xOPTT) * xOA)) - ((0.423 * xOPTT) * xOPTT)) + (((0.202 * xHA) * xAlpha) * xAlpha)) - (((0.281 * xOA) * xAlpha) * xAlpha)) - (((0.342 * xHA) * xHA) * xAlpha)) - (((0.245 * xHA) * xHA) * xOA)) + (((0.281 * xOA) * xOA) * xHA)) - (((0.184 * xOPTT) * xOPTT) * xAlpha)) - (((0.281 * xHA) * xAlpha) * xOA))
return f
|
class RE41():
def __init__(self):
self.problem_name = 'RE41'
self.n_objectives = 4
self.n_variables = 7
self.n_constraints = 0
self.n_original_constraints = 10
self.lbound = np.zeros(self.n_variables)
self.ubound = np.zeros(self.n_variables)
self.lbound[0] = 0.5
self.lbound[1] = 0.45
self.lbound[2] = 0.5
self.lbound[3] = 0.5
self.lbound[4] = 0.875
self.lbound[5] = 0.4
self.lbound[6] = 0.4
self.ubound[0] = 1.5
self.ubound[1] = 1.35
self.ubound[2] = 1.5
self.ubound[3] = 1.5
self.ubound[4] = 2.625
self.ubound[5] = 1.2
self.ubound[6] = 1.2
def evaluate(self, x):
f = np.zeros(self.n_objectives)
g = np.zeros(self.n_original_constraints)
x1 = x[0]
x2 = x[1]
x3 = x[2]
x4 = x[3]
x5 = x[4]
x6 = x[5]
x7 = x[6]
f[0] = (((((((1.98 + (4.9 * x1)) + (6.67 * x2)) + (6.98 * x3)) + (4.01 * x4)) + (1.78 * x5)) + (1e-05 * x6)) + (2.73 * x7))
f[1] = ((4.72 - (0.5 * x4)) - ((0.19 * x2) * x3))
Vmbp = ((10.58 - ((0.674 * x1) * x2)) - (0.67275 * x2))
Vfd = ((16.45 - ((0.489 * x3) * x7)) - ((0.843 * x5) * x6))
f[2] = (0.5 * (Vmbp + Vfd))
g[0] = (1 - ((1.16 - ((0.3717 * x2) * x4)) - (0.0092928 * x3)))
g[1] = (0.32 - (((((0.261 - ((0.0159 * x1) * x2)) - (0.06486 * x1)) - ((0.019 * x2) * x7)) + ((0.0144 * x3) * x5)) + (0.0154464 * x6)))
g[2] = (0.32 - (((((((((0.214 + (0.00817 * x5)) - (0.045195 * x1)) - (0.0135168 * x1)) + ((0.03099 * x2) * x6)) - ((0.018 * x2) * x7)) + (0.007176 * x3)) + (0.023232 * x3)) - ((0.00364 * x5) * x6)) - ((0.018 * x2) * x2)))
g[3] = (0.32 - ((((0.74 - (0.61 * x2)) - (0.031296 * x3)) - (0.031872 * x7)) + ((0.227 * x2) * x2)))
g[4] = (32 - ((((28.98 + (3.818 * x3)) - ((4.2 * x1) * x2)) + (1.27296 * x6)) - (2.68065 * x7)))
g[5] = (32 - (((((33.86 + (2.95 * x3)) - ((5.057 * x1) * x2)) - (3.795 * x2)) - (3.4431 * x7)) + 1.45728))
g[6] = (32 - ((46.36 - (9.9 * x2)) - (4.4505 * x1)))
g[7] = (4 - f[1])
g[8] = (9.9 - Vmbp)
g[9] = (15.7 - Vfd)
g = np.where((g < 0), (- g), 0)
f[3] = (((((((((g[0] + g[1]) + g[2]) + g[3]) + g[4]) + g[5]) + g[6]) + g[7]) + g[8]) + g[9])
return f
|
class RE42():
def __init__(self):
self.problem_name = 'RE42'
self.n_objectives = 4
self.n_variables = 6
self.n_constraints = 0
self.n_original_constraints = 9
self.lbound = np.zeros(self.n_variables)
self.ubound = np.zeros(self.n_variables)
self.lbound[0] = 150.0
self.lbound[1] = 20.0
self.lbound[2] = 13.0
self.lbound[3] = 10.0
self.lbound[4] = 14.0
self.lbound[5] = 0.63
self.ubound[0] = 274.32
self.ubound[1] = 32.31
self.ubound[2] = 25.0
self.ubound[3] = 11.71
self.ubound[4] = 18.0
self.ubound[5] = 0.75
def evaluate(self, x):
f = np.zeros(self.n_objectives)
constraintFuncs = np.zeros(self.n_original_constraints)
x_L = x[0]
x_B = x[1]
x_D = x[2]
x_T = x[3]
x_Vk = x[4]
x_CB = x[5]
displacement = ((((1.025 * x_L) * x_B) * x_T) * x_CB)
V = (0.5144 * x_Vk)
g = 9.8065
Fn = (V / np.power((g * x_L), 0.5))
a = ((((4977.06 * x_CB) * x_CB) - (8105.61 * x_CB)) + 4456.51)
b = (((((- 10847.2) * x_CB) * x_CB) + (12817.0 * x_CB)) - 6960.32)
power = ((np.power(displacement, (2.0 / 3.0)) * np.power(x_Vk, 3.0)) / (a + (b * Fn)))
outfit_weight = ((((1.0 * np.power(x_L, 0.8)) * np.power(x_B, 0.6)) * np.power(x_D, 0.3)) * np.power(x_CB, 0.1))
steel_weight = ((((0.034 * np.power(x_L, 1.7)) * np.power(x_B, 0.7)) * np.power(x_D, 0.4)) * np.power(x_CB, 0.5))
machinery_weight = (0.17 * np.power(power, 0.9))
light_ship_weight = ((steel_weight + outfit_weight) + machinery_weight)
ship_cost = (1.3 * (((2000.0 * np.power(steel_weight, 0.85)) + (3500.0 * outfit_weight)) + (2400.0 * np.power(power, 0.8))))
capital_costs = (0.2 * ship_cost)
DWT = (displacement - light_ship_weight)
running_costs = (40000.0 * np.power(DWT, 0.3))
round_trip_miles = 5000.0
sea_days = ((round_trip_miles / 24.0) * x_Vk)
handling_rate = 8000.0
daily_consumption = ((((0.19 * power) * 24.0) / 1000.0) + 0.2)
fuel_price = 100.0
fuel_cost = (((1.05 * daily_consumption) * sea_days) * fuel_price)
port_cost = (6.3 * np.power(DWT, 0.8))
fuel_carried = (daily_consumption * (sea_days + 5.0))
miscellaneous_DWT = (2.0 * np.power(DWT, 0.5))
cargo_DWT = ((DWT - fuel_carried) - miscellaneous_DWT)
port_days = (2.0 * ((cargo_DWT / handling_rate) + 0.5))
RTPA = (350.0 / (sea_days + port_days))
voyage_costs = ((fuel_cost + port_cost) * RTPA)
annual_costs = ((capital_costs + running_costs) + voyage_costs)
annual_cargo = (cargo_DWT * RTPA)
f[0] = (annual_costs / annual_cargo)
f[1] = light_ship_weight
f[2] = (- annual_cargo)
constraintFuncs[0] = ((x_L / x_B) - 6.0)
constraintFuncs[1] = ((- (x_L / x_D)) + 15.0)
constraintFuncs[2] = ((- (x_L / x_T)) + 19.0)
constraintFuncs[3] = ((0.45 * np.power(DWT, 0.31)) - x_T)
constraintFuncs[4] = (((0.7 * x_D) + 0.7) - x_T)
constraintFuncs[5] = (500000.0 - DWT)
constraintFuncs[6] = (DWT - 3000.0)
constraintFuncs[7] = (0.32 - Fn)
KB = (0.53 * x_T)
BMT = (((((0.085 * x_CB) - 0.002) * x_B) * x_B) / (x_T * x_CB))
KG = (1.0 + (0.52 * x_D))
constraintFuncs[8] = (((KB + BMT) - KG) - (0.07 * x_B))
constraintFuncs = np.where((constraintFuncs < 0), (- constraintFuncs), 0)
f[3] = ((((((((constraintFuncs[0] + constraintFuncs[1]) + constraintFuncs[2]) + constraintFuncs[3]) + constraintFuncs[4]) + constraintFuncs[5]) + constraintFuncs[6]) + constraintFuncs[7]) + constraintFuncs[8])
return f
|
class RE61():
def __init__(self):
self.problem_name = 'RE61'
self.n_objectives = 6
self.n_variables = 3
self.n_constraints = 0
self.n_original_constraints = 7
self.lbound = np.zeros(self.n_variables)
self.ubound = np.zeros(self.n_variables)
self.lbound[0] = 0.01
self.lbound[1] = 0.01
self.lbound[2] = 0.01
self.ubound[0] = 0.45
self.ubound[1] = 0.1
self.ubound[2] = 0.1
def evaluate(self, x):
f = np.zeros(self.n_objectives)
g = np.zeros(self.n_original_constraints)
f[0] = ((106780.37 * (x[1] + x[2])) + 61704.67)
f[1] = (3000 * x[0])
f[2] = (((305700 * 2289) * x[1]) / np.power((0.06 * 2289), 0.65))
f[3] = ((250 * 2289) * np.exp(((((- 39.75) * x[1]) + (9.9 * x[2])) + 2.74)))
f[4] = (25 * (((1.39 / (x[0] * x[1])) + (4940 * x[2])) - 80))
g[0] = (1 - (((0.00139 / (x[0] * x[1])) + (4.94 * x[2])) - 0.08))
g[1] = (1 - (((0.000306 / (x[0] * x[1])) + (1.082 * x[2])) - 0.0986))
g[2] = (50000 - (((12.307 / (x[0] * x[1])) + (49408.24 * x[2])) + 4051.02))
g[3] = (16000 - (((2.098 / (x[0] * x[1])) + (8046.33 * x[2])) - 696.71))
g[4] = (10000 - (((2.138 / (x[0] * x[1])) + (7883.39 * x[2])) - 705.04))
g[5] = (2000 - ((((0.417 * x[0]) * x[1]) + (1721.26 * x[2])) - 136.54))
g[6] = (550 - (((0.164 / (x[0] * x[1])) + (631.13 * x[2])) - 54.48))
g = np.where((g < 0), (- g), 0)
f[5] = ((((((g[0] + g[1]) + g[2]) + g[3]) + g[4]) + g[5]) + g[6])
return f
|
class RE91():
def __init__(self):
self.problem_name = 'RE91'
self.n_objectives = 9
self.n_variables = 7
self.n_constraints = 0
self.n_original_constraints = 0
self.lbound = np.zeros(self.n_variables)
self.ubound = np.zeros(self.n_variables)
self.lbound[0] = 0.5
self.lbound[1] = 0.45
self.lbound[2] = 0.5
self.lbound[3] = 0.5
self.lbound[4] = 0.875
self.lbound[5] = 0.4
self.lbound[6] = 0.4
self.ubound[0] = 1.5
self.ubound[1] = 1.35
self.ubound[2] = 1.5
self.ubound[3] = 1.5
self.ubound[4] = 2.625
self.ubound[5] = 1.2
self.ubound[6] = 1.2
def evaluate(self, x):
f = np.zeros(self.n_objectives)
g = np.zeros(self.n_original_constraints)
x1 = x[0]
x2 = x[1]
x3 = x[2]
x4 = x[3]
x5 = x[4]
x6 = x[5]
x7 = x[6]
x8 = ((0.006 * np.random.normal(0, 1)) + 0.345)
x9 = ((0.006 * np.random.normal(0, 1)) + 0.192)
x10 = ((10 * np.random.normal(0, 1)) + 0.0)
x11 = ((10 * np.random.normal(0, 1)) + 0.0)
f[0] = (((((((1.98 + (4.9 * x1)) + (6.67 * x2)) + (6.98 * x3)) + (4.01 * x4)) + (1.75 * x5)) + (1e-05 * x6)) + (2.73 * x7))
f[1] = max(0.0, (((((1.16 - ((0.3717 * x2) * x4)) - ((0.00931 * x2) * x10)) - ((0.484 * x3) * x9)) + ((0.01343 * x6) * x10)) / 1.0))
f[2] = max(0.0, (((((((((0.261 - ((0.0159 * x1) * x2)) - ((0.188 * x1) * x8)) - ((0.019 * x2) * x7)) + ((0.0144 * x3) * x5)) + ((0.87570001 * x5) * x10)) + ((0.08045 * x6) * x9)) + ((0.00139 * x8) * x11)) + ((1.575e-05 * x10) * x11)) / 0.32))
f[3] = max(0.0, ((((((((((((((0.214 + (0.00817 * x5)) - ((0.131 * x1) * x8)) - ((0.0704 * x1) * x9)) + ((0.03099 * x2) * x6)) - ((0.018 * x2) * x7)) + ((0.0208 * x3) * x8)) + ((0.121 * x3) * x9)) - ((0.00364 * x5) * x6)) + ((0.0007715 * x5) * x10)) - ((0.0005354 * x6) * x10)) + ((0.00121 * x8) * x11)) + ((0.00184 * x9) * x10)) - ((0.018 * x2) * x2)) / 0.32))
f[4] = max(0.0, ((((((0.74 - (0.61 * x2)) - ((0.163 * x3) * x8)) + ((0.001232 * x3) * x10)) - ((0.166 * x7) * x9)) + ((0.227 * x2) * x2)) / 0.32))
tmp = (((((((((28.98 + (3.818 * x3)) - ((4.2 * x1) * x2)) + ((0.0207 * x5) * x10)) + ((6.63 * x6) * x9)) - ((7.77 * x7) * x8)) + ((0.32 * x9) * x10)) + (((((((33.86 + (2.95 * x3)) + (0.1792 * x10)) - ((5.057 * x1) * x2)) - ((11 * x2) * x8)) - ((0.0215 * x5) * x10)) - ((9.98 * x7) * x8)) + ((22 * x8) * x9))) + (((46.36 - (9.9 * x2)) - ((12.9 * x1) * x8)) + ((0.1107 * x3) * x10))) / 3)
f[5] = max(0.0, (tmp / 32))
f[6] = max(0.0, ((((((4.72 - (0.5 * x4)) - ((0.19 * x2) * x3)) - ((0.0122 * x4) * x10)) + ((0.009325 * x6) * x10)) + ((0.000191 * x11) * x11)) / 4.0))
f[7] = max(0.0, ((((((10.58 - ((0.674 * x1) * x2)) - ((1.95 * x2) * x8)) + ((0.02054 * x3) * x10)) - ((0.0198 * x4) * x10)) + ((0.028 * x6) * x10)) / 9.9))
f[8] = max(0.0, ((((((16.45 - ((0.489 * x3) * x7)) - ((0.843 * x5) * x6)) + ((0.0432 * x9) * x10)) - ((0.0556 * x9) * x11)) - ((0.000786 * x11) * x11)) / 15.7))
return f
|
class CRE21():
def __init__(self):
self.problem_name = 'CRE21'
self.n_objectives = 2
self.n_variables = 3
self.n_constraints = 3
self.ubound = np.zeros(self.n_variables)
self.lbound = np.zeros(self.n_variables)
self.lbound[0] = 1e-05
self.lbound[1] = 1e-05
self.lbound[2] = 1.0
self.ubound[0] = 100.0
self.ubound[1] = 100.0
self.ubound[2] = 3.0
def evaluate(self, x):
f = np.zeros(self.n_objectives)
g = np.zeros(self.n_constraints)
x1 = x[0]
x2 = x[1]
x3 = x[2]
f[0] = ((x1 * np.sqrt((16.0 + (x3 * x3)))) + (x2 * np.sqrt((1.0 + (x3 * x3)))))
f[1] = ((20.0 * np.sqrt((16.0 + (x3 * x3)))) / (x1 * x3))
g[0] = (0.1 - f[0])
g[1] = (100000.0 - f[1])
g[2] = (100000 - ((80.0 * np.sqrt((1.0 + (x3 * x3)))) / (x3 * x2)))
g = np.where((g < 0), (- g), 0)
return (f, g)
|
class CRE22():
def __init__(self):
self.problem_name = 'CRE22'
self.n_objectives = 2
self.n_variables = 4
self.n_constraints = 4
self.ubound = np.zeros(self.n_variables)
self.lbound = np.zeros(self.n_variables)
self.lbound[0] = 0.125
self.lbound[1] = 0.1
self.lbound[2] = 0.1
self.lbound[3] = 0.125
self.ubound[0] = 5.0
self.ubound[1] = 10.0
self.ubound[2] = 10.0
self.ubound[3] = 5.0
def evaluate(self, x):
f = np.zeros(self.n_objectives)
g = np.zeros(self.n_constraints)
x1 = x[0]
x2 = x[1]
x3 = x[2]
x4 = x[3]
P = 6000
L = 14
E = (30 * 1000000.0)
G = (12 * 1000000.0)
tauMax = 13600
sigmaMax = 30000
f[0] = ((((1.10471 * x1) * x1) * x2) + (((0.04811 * x3) * x4) * (14.0 + x2)))
f[1] = (((((4 * P) * L) * L) * L) / ((((E * x4) * x3) * x3) * x3))
M = (P * (L + (x2 / 2)))
tmpVar = (((x2 * x2) / 4.0) + np.power(((x1 + x3) / 2.0), 2))
R = np.sqrt(tmpVar)
tmpVar = (((x2 * x2) / 12.0) + np.power(((x1 + x3) / 2.0), 2))
J = ((((2 * np.sqrt(2)) * x1) * x2) * tmpVar)
tauDashDash = ((M * R) / J)
tauDash = (P / ((np.sqrt(2) * x1) * x2))
tmpVar = (((tauDash * tauDash) + ((((2 * tauDash) * tauDashDash) * x2) / (2 * R))) + (tauDashDash * tauDashDash))
tau = np.sqrt(tmpVar)
sigma = (((6 * P) * L) / ((x4 * x3) * x3))
tmpVar = (((4.013 * E) * np.sqrt(((((((((x3 * x3) * x4) * x4) * x4) * x4) * x4) * x4) / 36.0))) / (L * L))
tmpVar2 = ((x3 / (2 * L)) * np.sqrt((E / (4 * G))))
PC = (tmpVar * (1 - tmpVar2))
g[0] = (tauMax - tau)
g[1] = (sigmaMax - sigma)
g[2] = (x4 - x1)
g[3] = (PC - P)
g = np.where((g < 0), (- g), 0)
return (f, g)
|
class CRE23():
def __init__(self):
self.problem_name = 'CRE23'
self.n_objectives = 2
self.n_variables = 4
self.n_constraints = 4
self.ubound = np.zeros(self.n_variables)
self.lbound = np.zeros(self.n_variables)
self.lbound[0] = 55
self.lbound[1] = 75
self.lbound[2] = 1000
self.lbound[3] = 11
self.ubound[0] = 80
self.ubound[1] = 110
self.ubound[2] = 3000
self.ubound[3] = 20
def evaluate(self, x):
f = np.zeros(self.n_objectives)
g = np.zeros(self.n_constraints)
x1 = x[0]
x2 = x[1]
x3 = x[2]
x4 = x[3]
f[0] = (((4.9 * 1e-05) * ((x2 * x2) - (x1 * x1))) * (x4 - 1.0))
f[1] = (((9.82 * 1000000.0) * ((x2 * x2) - (x1 * x1))) / ((x3 * x4) * (((x2 * x2) * x2) - ((x1 * x1) * x1))))
g[0] = ((x2 - x1) - 20.0)
g[1] = (0.4 - (x3 / (3.14 * ((x2 * x2) - (x1 * x1)))))
g[2] = (1.0 - ((((2.22 * 0.001) * x3) * (((x2 * x2) * x2) - ((x1 * x1) * x1))) / np.power(((x2 * x2) - (x1 * x1)), 2)))
g[3] = ((((((2.66 * 0.01) * x3) * x4) * (((x2 * x2) * x2) - ((x1 * x1) * x1))) / ((x2 * x2) - (x1 * x1))) - 900.0)
g = np.where((g < 0), (- g), 0)
return (f, g)
|
class CRE24():
def __init__(self):
self.problem_name = 'CRE24'
self.n_objectives = 2
self.n_variables = 7
self.n_constraints = 11
self.lbound = np.zeros(self.n_variables)
self.ubound = np.zeros(self.n_variables)
self.lbound[0] = 2.6
self.lbound[1] = 0.7
self.lbound[2] = 17
self.lbound[3] = 7.3
self.lbound[4] = 7.3
self.lbound[5] = 2.9
self.lbound[6] = 5.0
self.ubound[0] = 3.6
self.ubound[1] = 0.8
self.ubound[2] = 28
self.ubound[3] = 8.3
self.ubound[4] = 8.3
self.ubound[5] = 3.9
self.ubound[6] = 5.5
def evaluate(self, x):
f = np.zeros(self.n_objectives)
g = np.zeros(self.n_constraints)
x1 = x[0]
x2 = x[1]
x3 = np.round(x[2])
x4 = x[3]
x5 = x[4]
x6 = x[5]
x7 = x[6]
f[0] = ((((((0.7854 * x1) * (x2 * x2)) * (((((10.0 * x3) * x3) / 3.0) + (14.933 * x3)) - 43.0934)) - ((1.508 * x1) * ((x6 * x6) + (x7 * x7)))) + (7.477 * (((x6 * x6) * x6) + ((x7 * x7) * x7)))) + (0.7854 * (((x4 * x6) * x6) + ((x5 * x7) * x7))))
tmpVar = (np.power(((745.0 * x4) / (x2 * x3)), 2.0) + (1.69 * 10000000.0))
f[1] = (np.sqrt(tmpVar) / (((0.1 * x6) * x6) * x6))
g[0] = ((- (1.0 / (((x1 * x2) * x2) * x3))) + (1.0 / 27.0))
g[1] = ((- (1.0 / ((((x1 * x2) * x2) * x3) * x3))) + (1.0 / 397.5))
g[2] = (((- ((x4 * x4) * x4)) / (((((x2 * x3) * x6) * x6) * x6) * x6)) + (1.0 / 1.93))
g[3] = (((- ((x5 * x5) * x5)) / (((((x2 * x3) * x7) * x7) * x7) * x7)) + (1.0 / 1.93))
g[4] = ((- (x2 * x3)) + 40.0)
g[5] = ((- (x1 / x2)) + 12.0)
g[6] = ((- 5.0) + (x1 / x2))
g[7] = (((- 1.9) + x4) - (1.5 * x6))
g[8] = (((- 1.9) + x5) - (1.1 * x7))
g[9] = ((- f[1]) + 1300.0)
tmpVar = (np.power(((745.0 * x5) / (x2 * x3)), 2.0) + (1.575 * 100000000.0))
g[10] = (((- np.sqrt(tmpVar)) / (((0.1 * x7) * x7) * x7)) + 1100.0)
g = np.where((g < 0), (- g), 0)
return (f, g)
|
class CRE25():
def __init__(self):
self.problem_name = 'CRE25'
self.n_objectives = 2
self.n_variables = 4
self.n_constraints = 1
self.lbound = np.full(self.n_variables, 12)
self.ubound = np.full(self.n_variables, 60)
def evaluate(self, x):
f = np.zeros(self.n_objectives)
g = np.zeros(self.n_constraints)
x1 = np.round(x[0])
x2 = np.round(x[1])
x3 = np.round(x[2])
x4 = np.round(x[3])
f[0] = np.abs((6.931 - ((x3 / x1) * (x4 / x2))))
l = [x1, x2, x3, x4]
f[1] = max(l)
g[0] = (0.5 - (f[0] / 6.931))
g = np.where((g < 0), (- g), 0)
return (f, g)
|
class CRE31():
def __init__(self):
self.problem_name = 'CRE31'
self.n_objectives = 3
self.n_variables = 7
self.n_constraints = 10
self.lbound = np.zeros(self.n_variables)
self.ubound = np.zeros(self.n_variables)
self.lbound[0] = 0.5
self.lbound[1] = 0.45
self.lbound[2] = 0.5
self.lbound[3] = 0.5
self.lbound[4] = 0.875
self.lbound[5] = 0.4
self.lbound[6] = 0.4
self.ubound[0] = 1.5
self.ubound[1] = 1.35
self.ubound[2] = 1.5
self.ubound[3] = 1.5
self.ubound[4] = 2.625
self.ubound[5] = 1.2
self.ubound[6] = 1.2
def evaluate(self, x):
f = np.zeros(self.n_objectives)
g = np.zeros(self.n_constraints)
x1 = x[0]
x2 = x[1]
x3 = x[2]
x4 = x[3]
x5 = x[4]
x6 = x[5]
x7 = x[6]
f[0] = (((((((1.98 + (4.9 * x1)) + (6.67 * x2)) + (6.98 * x3)) + (4.01 * x4)) + (1.78 * x5)) + (1e-05 * x6)) + (2.73 * x7))
f[1] = ((4.72 - (0.5 * x4)) - ((0.19 * x2) * x3))
Vmbp = ((10.58 - ((0.674 * x1) * x2)) - (0.67275 * x2))
Vfd = ((16.45 - ((0.489 * x3) * x7)) - ((0.843 * x5) * x6))
f[2] = (0.5 * (Vmbp + Vfd))
g[0] = (1 - ((1.16 - ((0.3717 * x2) * x4)) - (0.0092928 * x3)))
g[1] = (0.32 - (((((0.261 - ((0.0159 * x1) * x2)) - (0.06486 * x1)) - ((0.019 * x2) * x7)) + ((0.0144 * x3) * x5)) + (0.0154464 * x6)))
g[2] = (0.32 - (((((((((0.214 + (0.00817 * x5)) - (0.045195 * x1)) - (0.0135168 * x1)) + ((0.03099 * x2) * x6)) - ((0.018 * x2) * x7)) + (0.007176 * x3)) + (0.023232 * x3)) - ((0.00364 * x5) * x6)) - ((0.018 * x2) * x2)))
g[3] = (0.32 - ((((0.74 - (0.61 * x2)) - (0.031296 * x3)) - (0.031872 * x7)) + ((0.227 * x2) * x2)))
g[4] = (32 - ((((28.98 + (3.818 * x3)) - ((4.2 * x1) * x2)) + (1.27296 * x6)) - (2.68065 * x7)))
g[5] = (32 - (((((33.86 + (2.95 * x3)) - ((5.057 * x1) * x2)) - (3.795 * x2)) - (3.4431 * x7)) + 1.45728))
g[6] = (32 - ((46.36 - (9.9 * x2)) - (4.4505 * x1)))
g[7] = (4 - f[1])
g[8] = (9.9 - Vmbp)
g[9] = (15.7 - Vfd)
g = np.where((g < 0), (- g), 0)
return (f, g)
|
class CRE32():
def __init__(self):
self.problem_name = 'CRE32'
self.n_objectives = 3
self.n_variables = 6
self.n_constraints = 9
self.lbound = np.zeros(self.n_variables)
self.ubound = np.zeros(self.n_variables)
self.lbound[0] = 150.0
self.lbound[1] = 20.0
self.lbound[2] = 13.0
self.lbound[3] = 10.0
self.lbound[4] = 14.0
self.lbound[5] = 0.63
self.ubound[0] = 274.32
self.ubound[1] = 32.31
self.ubound[2] = 25.0
self.ubound[3] = 11.71
self.ubound[4] = 18.0
self.ubound[5] = 0.75
def evaluate(self, x):
f = np.zeros(self.n_objectives)
constraintFuncs = np.zeros(self.n_constraints)
x_L = x[0]
x_B = x[1]
x_D = x[2]
x_T = x[3]
x_Vk = x[4]
x_CB = x[5]
displacement = ((((1.025 * x_L) * x_B) * x_T) * x_CB)
V = (0.5144 * x_Vk)
g = 9.8065
Fn = (V / np.power((g * x_L), 0.5))
a = ((((4977.06 * x_CB) * x_CB) - (8105.61 * x_CB)) + 4456.51)
b = (((((- 10847.2) * x_CB) * x_CB) + (12817.0 * x_CB)) - 6960.32)
power = ((np.power(displacement, (2.0 / 3.0)) * np.power(x_Vk, 3.0)) / (a + (b * Fn)))
outfit_weight = ((((1.0 * np.power(x_L, 0.8)) * np.power(x_B, 0.6)) * np.power(x_D, 0.3)) * np.power(x_CB, 0.1))
steel_weight = ((((0.034 * np.power(x_L, 1.7)) * np.power(x_B, 0.7)) * np.power(x_D, 0.4)) * np.power(x_CB, 0.5))
machinery_weight = (0.17 * np.power(power, 0.9))
light_ship_weight = ((steel_weight + outfit_weight) + machinery_weight)
ship_cost = (1.3 * (((2000.0 * np.power(steel_weight, 0.85)) + (3500.0 * outfit_weight)) + (2400.0 * np.power(power, 0.8))))
capital_costs = (0.2 * ship_cost)
DWT = (displacement - light_ship_weight)
running_costs = (40000.0 * np.power(DWT, 0.3))
round_trip_miles = 5000.0
sea_days = ((round_trip_miles / 24.0) * x_Vk)
handling_rate = 8000.0
daily_consumption = ((((0.19 * power) * 24.0) / 1000.0) + 0.2)
fuel_price = 100.0
fuel_cost = (((1.05 * daily_consumption) * sea_days) * fuel_price)
port_cost = (6.3 * np.power(DWT, 0.8))
fuel_carried = (daily_consumption * (sea_days + 5.0))
miscellaneous_DWT = (2.0 * np.power(DWT, 0.5))
cargo_DWT = ((DWT - fuel_carried) - miscellaneous_DWT)
port_days = (2.0 * ((cargo_DWT / handling_rate) + 0.5))
RTPA = (350.0 / (sea_days + port_days))
voyage_costs = ((fuel_cost + port_cost) * RTPA)
annual_costs = ((capital_costs + running_costs) + voyage_costs)
annual_cargo = (cargo_DWT * RTPA)
f[0] = (annual_costs / annual_cargo)
f[1] = light_ship_weight
f[2] = (- annual_cargo)
constraintFuncs[0] = ((x_L / x_B) - 6.0)
constraintFuncs[1] = ((- (x_L / x_D)) + 15.0)
constraintFuncs[2] = ((- (x_L / x_T)) + 19.0)
constraintFuncs[3] = ((0.45 * np.power(DWT, 0.31)) - x_T)
constraintFuncs[4] = (((0.7 * x_D) + 0.7) - x_T)
constraintFuncs[5] = (500000.0 - DWT)
constraintFuncs[6] = (DWT - 3000.0)
constraintFuncs[7] = (0.32 - Fn)
KB = (0.53 * x_T)
BMT = (((((0.085 * x_CB) - 0.002) * x_B) * x_B) / (x_T * x_CB))
KG = (1.0 + (0.52 * x_D))
constraintFuncs[8] = (((KB + BMT) - KG) - (0.07 * x_B))
constraintFuncs = np.where((constraintFuncs < 0), (- constraintFuncs), 0)
return (f, constraintFuncs)
|
class CRE51():
def __init__(self):
self.problem_name = 'CRE51'
self.n_objectives = 5
self.n_variables = 3
self.n_constraints = 7
self.lbound = np.zeros(self.n_variables)
self.ubound = np.zeros(self.n_variables)
self.lbound[0] = 0.01
self.lbound[1] = 0.01
self.lbound[2] = 0.01
self.ubound[0] = 0.45
self.ubound[1] = 0.1
self.ubound[2] = 0.1
def evaluate(self, x):
f = np.zeros(self.n_objectives)
g = np.zeros(self.n_constraints)
f[0] = ((106780.37 * (x[1] + x[2])) + 61704.67)
f[1] = (3000 * x[0])
f[2] = (((305700 * 2289) * x[1]) / np.power((0.06 * 2289), 0.65))
f[3] = ((250 * 2289) * np.exp(((((- 39.75) * x[1]) + (9.9 * x[2])) + 2.74)))
f[4] = (25 * (((1.39 / (x[0] * x[1])) + (4940 * x[2])) - 80))
g[0] = (1 - (((0.00139 / (x[0] * x[1])) + (4.94 * x[2])) - 0.08))
g[1] = (1 - (((0.000306 / (x[0] * x[1])) + (1.082 * x[2])) - 0.0986))
g[2] = (50000 - (((12.307 / (x[0] * x[1])) + (49408.24 * x[2])) + 4051.02))
g[3] = (16000 - (((2.098 / (x[0] * x[1])) + (8046.33 * x[2])) - 696.71))
g[4] = (10000 - (((2.138 / (x[0] * x[1])) + (7883.39 * x[2])) - 705.04))
g[5] = (2000 - ((((0.417 * x[0]) * x[1]) + (1721.26 * x[2])) - 136.54))
g[6] = (550 - (((0.164 / (x[0] * x[1])) + (631.13 * x[2])) - 54.48))
g = np.where((g < 0), (- g), 0)
return (f, g)
|
class XML_preprocessor(object):
def __init__(self, data_path):
self.path_prefix = data_path
self.num_classes = 20
self.data = dict()
self._preprocess_XML()
def _preprocess_XML(self):
filenames = os.listdir(self.path_prefix)
for filename in filenames:
tree = ElementTree.parse((self.path_prefix + filename))
root = tree.getroot()
bounding_boxes = []
one_hot_classes = []
size_tree = root.find('size')
width = float(size_tree.find('width').text)
height = float(size_tree.find('height').text)
for object_tree in root.findall('object'):
for bounding_box in object_tree.iter('bndbox'):
xmin = (float(bounding_box.find('xmin').text) / width)
ymin = (float(bounding_box.find('ymin').text) / height)
xmax = (float(bounding_box.find('xmax').text) / width)
ymax = (float(bounding_box.find('ymax').text) / height)
bounding_box = [xmin, ymin, xmax, ymax]
bounding_boxes.append(bounding_box)
class_name = object_tree.find('name').text
one_hot_class = self._to_one_hot(class_name)
one_hot_classes.append(one_hot_class)
image_name = root.find('filename').text
bounding_boxes = np.asarray(bounding_boxes)
one_hot_classes = np.asarray(one_hot_classes)
image_data = np.hstack((bounding_boxes, one_hot_classes))
self.data[image_name] = image_data
def _to_one_hot(self, name):
one_hot_vector = ([0] * self.num_classes)
if (name == 'aeroplane'):
one_hot_vector[0] = 1
elif (name == 'bicycle'):
one_hot_vector[1] = 1
elif (name == 'bird'):
one_hot_vector[2] = 1
elif (name == 'boat'):
one_hot_vector[3] = 1
elif (name == 'bottle'):
one_hot_vector[4] = 1
elif (name == 'bus'):
one_hot_vector[5] = 1
elif (name == 'car'):
one_hot_vector[6] = 1
elif (name == 'cat'):
one_hot_vector[7] = 1
elif (name == 'chair'):
one_hot_vector[8] = 1
elif (name == 'cow'):
one_hot_vector[9] = 1
elif (name == 'diningtable'):
one_hot_vector[10] = 1
elif (name == 'dog'):
one_hot_vector[11] = 1
elif (name == 'horse'):
one_hot_vector[12] = 1
elif (name == 'motorbike'):
one_hot_vector[13] = 1
elif (name == 'person'):
one_hot_vector[14] = 1
elif (name == 'pottedplant'):
one_hot_vector[15] = 1
elif (name == 'sheep'):
one_hot_vector[16] = 1
elif (name == 'sofa'):
one_hot_vector[17] = 1
elif (name == 'train'):
one_hot_vector[18] = 1
elif (name == 'tvmonitor'):
one_hot_vector[19] = 1
else:
print(('unknown label: %s' % name))
return one_hot_vector
|
def SSD300(input_shape, num_classes=21):
'SSD300 architecture.\n\n # Arguments\n input_shape: Shape of the input image,\n expected to be either (300, 300, 3) or (3, 300, 300)(not tested).\n num_classes: Number of classes including background.\n\n # References\n https://arxiv.org/abs/1512.02325\n '
net = {}
input_tensor = input_tensor = Input(shape=input_shape)
img_size = (input_shape[1], input_shape[0])
net['input'] = input_tensor
net['conv1_1'] = Convolution2D(64, 3, 3, activation='relu', border_mode='same', name='conv1_1')(net['input'])
net['conv1_2'] = Convolution2D(64, 3, 3, activation='relu', border_mode='same', name='conv1_2')(net['conv1_1'])
net['pool1'] = MaxPooling2D((2, 2), strides=(2, 2), border_mode='same', name='pool1')(net['conv1_2'])
net['conv2_1'] = Convolution2D(128, 3, 3, activation='relu', border_mode='same', name='conv2_1')(net['pool1'])
net['conv2_2'] = Convolution2D(128, 3, 3, activation='relu', border_mode='same', name='conv2_2')(net['conv2_1'])
net['pool2'] = MaxPooling2D((2, 2), strides=(2, 2), border_mode='same', name='pool2')(net['conv2_2'])
net['conv3_1'] = Convolution2D(256, 3, 3, activation='relu', border_mode='same', name='conv3_1')(net['pool2'])
net['conv3_2'] = Convolution2D(256, 3, 3, activation='relu', border_mode='same', name='conv3_2')(net['conv3_1'])
net['conv3_3'] = Convolution2D(256, 3, 3, activation='relu', border_mode='same', name='conv3_3')(net['conv3_2'])
net['pool3'] = MaxPooling2D((2, 2), strides=(2, 2), border_mode='same', name='pool3')(net['conv3_3'])
net['conv4_1'] = Convolution2D(512, 3, 3, activation='relu', border_mode='same', name='conv4_1')(net['pool3'])
net['conv4_2'] = Convolution2D(512, 3, 3, activation='relu', border_mode='same', name='conv4_2')(net['conv4_1'])
net['conv4_3'] = Convolution2D(512, 3, 3, activation='relu', border_mode='same', name='conv4_3')(net['conv4_2'])
net['pool4'] = MaxPooling2D((2, 2), strides=(2, 2), border_mode='same', name='pool4')(net['conv4_3'])
net['conv5_1'] = Convolution2D(512, 3, 3, activation='relu', border_mode='same', name='conv5_1')(net['pool4'])
net['conv5_2'] = Convolution2D(512, 3, 3, activation='relu', border_mode='same', name='conv5_2')(net['conv5_1'])
net['conv5_3'] = Convolution2D(512, 3, 3, activation='relu', border_mode='same', name='conv5_3')(net['conv5_2'])
net['pool5'] = MaxPooling2D((3, 3), strides=(1, 1), border_mode='same', name='pool5')(net['conv5_3'])
net['fc6'] = AtrousConvolution2D(1024, 3, 3, atrous_rate=(6, 6), activation='relu', border_mode='same', name='fc6')(net['pool5'])
net['fc7'] = Convolution2D(1024, 1, 1, activation='relu', border_mode='same', name='fc7')(net['fc6'])
net['conv6_1'] = Convolution2D(256, 1, 1, activation='relu', border_mode='same', name='conv6_1')(net['fc7'])
net['conv6_2'] = Convolution2D(512, 3, 3, subsample=(2, 2), activation='relu', border_mode='same', name='conv6_2')(net['conv6_1'])
net['conv7_1'] = Convolution2D(128, 1, 1, activation='relu', border_mode='same', name='conv7_1')(net['conv6_2'])
net['conv7_2'] = ZeroPadding2D()(net['conv7_1'])
net['conv7_2'] = Convolution2D(256, 3, 3, subsample=(2, 2), activation='relu', border_mode='valid', name='conv7_2')(net['conv7_2'])
net['conv8_1'] = Convolution2D(128, 1, 1, activation='relu', border_mode='same', name='conv8_1')(net['conv7_2'])
net['conv8_2'] = Convolution2D(256, 3, 3, subsample=(2, 2), activation='relu', border_mode='same', name='conv8_2')(net['conv8_1'])
net['pool6'] = GlobalAveragePooling2D(name='pool6')(net['conv8_2'])
net['conv4_3_norm'] = Normalize(20, name='conv4_3_norm')(net['conv4_3'])
num_priors = 3
x = Convolution2D((num_priors * 4), 3, 3, border_mode='same', name='conv4_3_norm_mbox_loc')(net['conv4_3_norm'])
net['conv4_3_norm_mbox_loc'] = x
flatten = Flatten(name='conv4_3_norm_mbox_loc_flat')
net['conv4_3_norm_mbox_loc_flat'] = flatten(net['conv4_3_norm_mbox_loc'])
name = 'conv4_3_norm_mbox_conf'
if (num_classes != 21):
name += '_{}'.format(num_classes)
x = Convolution2D((num_priors * num_classes), 3, 3, border_mode='same', name=name)(net['conv4_3_norm'])
net['conv4_3_norm_mbox_conf'] = x
flatten = Flatten(name='conv4_3_norm_mbox_conf_flat')
net['conv4_3_norm_mbox_conf_flat'] = flatten(net['conv4_3_norm_mbox_conf'])
priorbox = PriorBox(img_size, 30.0, aspect_ratios=[2], variances=[0.1, 0.1, 0.2, 0.2], name='conv4_3_norm_mbox_priorbox')
net['conv4_3_norm_mbox_priorbox'] = priorbox(net['conv4_3_norm'])
num_priors = 6
net['fc7_mbox_loc'] = Convolution2D((num_priors * 4), 3, 3, border_mode='same', name='fc7_mbox_loc')(net['fc7'])
flatten = Flatten(name='fc7_mbox_loc_flat')
net['fc7_mbox_loc_flat'] = flatten(net['fc7_mbox_loc'])
name = 'fc7_mbox_conf'
if (num_classes != 21):
name += '_{}'.format(num_classes)
net['fc7_mbox_conf'] = Convolution2D((num_priors * num_classes), 3, 3, border_mode='same', name=name)(net['fc7'])
flatten = Flatten(name='fc7_mbox_conf_flat')
net['fc7_mbox_conf_flat'] = flatten(net['fc7_mbox_conf'])
priorbox = PriorBox(img_size, 60.0, max_size=114.0, aspect_ratios=[2, 3], variances=[0.1, 0.1, 0.2, 0.2], name='fc7_mbox_priorbox')
net['fc7_mbox_priorbox'] = priorbox(net['fc7'])
num_priors = 6
x = Convolution2D((num_priors * 4), 3, 3, border_mode='same', name='conv6_2_mbox_loc')(net['conv6_2'])
net['conv6_2_mbox_loc'] = x
flatten = Flatten(name='conv6_2_mbox_loc_flat')
net['conv6_2_mbox_loc_flat'] = flatten(net['conv6_2_mbox_loc'])
name = 'conv6_2_mbox_conf'
if (num_classes != 21):
name += '_{}'.format(num_classes)
x = Convolution2D((num_priors * num_classes), 3, 3, border_mode='same', name=name)(net['conv6_2'])
net['conv6_2_mbox_conf'] = x
flatten = Flatten(name='conv6_2_mbox_conf_flat')
net['conv6_2_mbox_conf_flat'] = flatten(net['conv6_2_mbox_conf'])
priorbox = PriorBox(img_size, 114.0, max_size=168.0, aspect_ratios=[2, 3], variances=[0.1, 0.1, 0.2, 0.2], name='conv6_2_mbox_priorbox')
net['conv6_2_mbox_priorbox'] = priorbox(net['conv6_2'])
num_priors = 6
x = Convolution2D((num_priors * 4), 3, 3, border_mode='same', name='conv7_2_mbox_loc')(net['conv7_2'])
net['conv7_2_mbox_loc'] = x
flatten = Flatten(name='conv7_2_mbox_loc_flat')
net['conv7_2_mbox_loc_flat'] = flatten(net['conv7_2_mbox_loc'])
name = 'conv7_2_mbox_conf'
if (num_classes != 21):
name += '_{}'.format(num_classes)
x = Convolution2D((num_priors * num_classes), 3, 3, border_mode='same', name=name)(net['conv7_2'])
net['conv7_2_mbox_conf'] = x
flatten = Flatten(name='conv7_2_mbox_conf_flat')
net['conv7_2_mbox_conf_flat'] = flatten(net['conv7_2_mbox_conf'])
priorbox = PriorBox(img_size, 168.0, max_size=222.0, aspect_ratios=[2, 3], variances=[0.1, 0.1, 0.2, 0.2], name='conv7_2_mbox_priorbox')
net['conv7_2_mbox_priorbox'] = priorbox(net['conv7_2'])
num_priors = 6
x = Convolution2D((num_priors * 4), 3, 3, border_mode='same', name='conv8_2_mbox_loc')(net['conv8_2'])
net['conv8_2_mbox_loc'] = x
flatten = Flatten(name='conv8_2_mbox_loc_flat')
net['conv8_2_mbox_loc_flat'] = flatten(net['conv8_2_mbox_loc'])
name = 'conv8_2_mbox_conf'
if (num_classes != 21):
name += '_{}'.format(num_classes)
x = Convolution2D((num_priors * num_classes), 3, 3, border_mode='same', name=name)(net['conv8_2'])
net['conv8_2_mbox_conf'] = x
flatten = Flatten(name='conv8_2_mbox_conf_flat')
net['conv8_2_mbox_conf_flat'] = flatten(net['conv8_2_mbox_conf'])
priorbox = PriorBox(img_size, 222.0, max_size=276.0, aspect_ratios=[2, 3], variances=[0.1, 0.1, 0.2, 0.2], name='conv8_2_mbox_priorbox')
net['conv8_2_mbox_priorbox'] = priorbox(net['conv8_2'])
num_priors = 6
x = Dense((num_priors * 4), name='pool6_mbox_loc_flat')(net['pool6'])
net['pool6_mbox_loc_flat'] = x
name = 'pool6_mbox_conf_flat'
if (num_classes != 21):
name += '_{}'.format(num_classes)
x = Dense((num_priors * num_classes), name=name)(net['pool6'])
net['pool6_mbox_conf_flat'] = x
priorbox = PriorBox(img_size, 276.0, max_size=330.0, aspect_ratios=[2, 3], variances=[0.1, 0.1, 0.2, 0.2], name='pool6_mbox_priorbox')
if (K.image_dim_ordering() == 'tf'):
target_shape = (1, 1, 256)
else:
target_shape = (256, 1, 1)
net['pool6_reshaped'] = Reshape(target_shape, name='pool6_reshaped')(net['pool6'])
net['pool6_mbox_priorbox'] = priorbox(net['pool6_reshaped'])
net['mbox_loc'] = merge([net['conv4_3_norm_mbox_loc_flat'], net['fc7_mbox_loc_flat'], net['conv6_2_mbox_loc_flat'], net['conv7_2_mbox_loc_flat'], net['conv8_2_mbox_loc_flat'], net['pool6_mbox_loc_flat']], mode='concat', concat_axis=1, name='mbox_loc')
net['mbox_conf'] = merge([net['conv4_3_norm_mbox_conf_flat'], net['fc7_mbox_conf_flat'], net['conv6_2_mbox_conf_flat'], net['conv7_2_mbox_conf_flat'], net['conv8_2_mbox_conf_flat'], net['pool6_mbox_conf_flat']], mode='concat', concat_axis=1, name='mbox_conf')
net['mbox_priorbox'] = merge([net['conv4_3_norm_mbox_priorbox'], net['fc7_mbox_priorbox'], net['conv6_2_mbox_priorbox'], net['conv7_2_mbox_priorbox'], net['conv8_2_mbox_priorbox'], net['pool6_mbox_priorbox']], mode='concat', concat_axis=1, name='mbox_priorbox')
if hasattr(net['mbox_loc'], '_keras_shape'):
num_boxes = (net['mbox_loc']._keras_shape[(- 1)] // 4)
elif hasattr(net['mbox_loc'], 'int_shape'):
num_boxes = (K.int_shape(net['mbox_loc'])[(- 1)] // 4)
net['mbox_loc'] = Reshape((num_boxes, 4), name='mbox_loc_final')(net['mbox_loc'])
net['mbox_conf'] = Reshape((num_boxes, num_classes), name='mbox_conf_logits')(net['mbox_conf'])
net['mbox_conf'] = Activation('softmax', name='mbox_conf_final')(net['mbox_conf'])
net['predictions'] = merge([net['mbox_loc'], net['mbox_conf'], net['mbox_priorbox']], mode='concat', concat_axis=2, name='predictions')
model = Model(net['input'], net['predictions'])
return model
|
def query_agent(case, simulator_type='std_thought', max_iterations=15):
agent_executer = build_agent_executor(get_toolkit_names(case), simulator_type=simulator_type, max_iterations=max_iterations)
prompt_inputs = case_to_input_dict(case)
if ('adv' in simulator_type):
return agent_executer(prompt_inputs)
else:
return agent_executer(prompt_inputs['input'])
|
def display_prompt(prompt):
print(make_colorful('human', prompt.split('Human:')[1]))
|
def save_traj(path, results):
results = replace_agent_action_with_list(results)
sim_type = ('Standard' if (simulator_type == 'std_thought') else 'Adversarial')
results['sim_type'] = sim_type
results['agent_llm'] = agent_llm
results['agent_temp'] = agent_temp
results['case_idx'] = case_idx
results['case'] = case
append_jsonl(path, results)
|
def main():
llms = {role: load_openai_llm_with_args(args, prefix=role) for role in ROLES}
cases = DataLoader.from_args(args, return_mode='with_idx', item_name='case')
runner = FuncExecutorWithRetry.from_args(args)
os.makedirs(args.dump_dir, exist_ok=True)
output_path = os.path.join(args.dump_dir, f'traj_sim_{args.simulator_type}_agent_{args.agent_model_name}_{args.agent_type}{args.output_file_suffix}.jsonl')
def generate_trajectory(case_with_idx):
(case_idx, case) = (case_with_idx['idx'], case_with_idx['item'])
agent_executer = build_agent_executor(get_toolkit_names(case), llms['agent'], llms['simulator'], agent_type=args.agent_type, simulator_type=args.simulator_type, verbose=args.verbose, max_iterations=args.max_iterations)
inputs = filter_keys(case_to_input_dict(case), agent_executer.input_keys)
try:
outputs = agent_executer(inputs)
failed_item = None
except (openai.error.InvalidRequestError, anthropic.BadRequestError) as e:
print(f'{case_idx}: {str(e)}')
outputs = {'error': str(e)}
failed_item = case_with_idx
outputs = replace_agent_action_with_list(outputs)
outputs['case'] = case
outputs['case_idx'] = case_idx
return (failed_item, outputs)
runner.run(generate_trajectory, output_path, cases)
print(f'You may want to use scripts to convert the result jsonl file {output_path} to json for easier reading.')
|
def main():
trajs = DataLoader.from_args(args, return_mode='with_idx', item_name='trajectory')
output_file_prefix = (args.output_file_prefix or trajs.base_path)
output_path = f'{output_file_prefix}_eval{args.eval_results_out_suffix}_{args.eval_type}.jsonl'
if (args.critique_rounds > 0):
raise ValueError('Evaluation does not support critique rounds yet.')
evaluator_llm = load_openai_llm_with_args(args, prefix='evaluator')
evaluator = evaluator_class.from_args(args, evaluator_llm)
if (evaluator._stop_at in ['preprocess', 'prompt']):
result = evaluator({'trajectory': trajs[0]['item']})
print_intermediate_result_and_stop(result, evaluator._stop_at)
def evaluate_trajec(traj_with_idx):
(traj_idx, traj) = (traj_with_idx['idx'], traj_with_idx['item'])
try:
results = evaluator({'trajectory': traj})
result = results[0]
except Exception as e:
result = {'error': str(e)}
return (traj_with_idx, result)
result['eval_id'] = traj_idx
return (None, result)
runner = FuncExecutorWithRetry.from_args(args)
runner.run(evaluate_trajec, output_path, trajs)
print(f'You may want to use scripts to convert the result jsonl file {output_path} to json for easier reading.')
|
def get_toolkit_names(full, subset=None):
if (subset is None):
return full
return [name for name in full if (name in subset)]
|
def main():
toolkits = []
for f in args.toolkits_paths:
toolkits.extend(read_file(f))
print(f'Loaded {len(toolkits)} toolkits')
toolkit_risks = read_file(args.risk_file_path)
name2toolkit = {toolkit['toolkit']: toolkit for toolkit in toolkits}
all_toolkit_names = list(name2toolkit.keys())
prim_toolkits_names = get_toolkit_names(all_toolkit_names, args.prim_toolkits)
aux_toolkit_names = get_toolkit_names(all_toolkit_names, args.aux_toolkits)
os.makedirs(args.dump_dir, exist_ok=True)
base_name = args.gen_filename
if (args.output_mode == 'new'):
base_name += f'_{NOW}'
output_file = osp.join(args.dump_dir, f'{base_name}.jsonl')
if (os.path.exists(output_file) and (args.output_mode == 'overwrite')):
os.remove(output_file)
zero_shot_format_example = read_file(os.path.join(args.format_examples_folder, 'format_example_case_zero.json'))
few_shot_format_examples = read_file(os.path.join(args.format_examples_folder, 'format_example_cases.json'))
def sample_inputs():
prims = random.sample(prim_toolkits_names, args.num_prim_toolkits)
remains = list((set(aux_toolkit_names) - set(prims)))
if (args.use_aux_ratio <= 0.0):
auxs = []
elif (random.random() < args.use_aux_ratio):
auxs = random.sample(remains, args.num_aux_toolkits)
for toolkit_name in FIXED_AUX_TOOLKITS:
if ((toolkit_name not in prims) and (toolkit_name not in auxs)):
auxs.append(toolkit_name)
prim_toolkits = [name2toolkit[name] for name in prims]
aux_toolkits = [name2toolkit[name] for name in auxs]
risks = []
for name in prims:
risks.extend(toolkit_risks.get(name, []))
n_examples = args.num_example_cases
if (n_examples > 0):
example_cases = random.sample(few_shot_format_examples, n_examples)
else:
example_cases = [zero_shot_format_example]
inputs = dict(prim_toolkits=prim_toolkits, aux_toolkits=aux_toolkits, example_cases=example_cases, risks=risks)
return inputs
if (case_generator._stop_at in ['preprocess', 'prompt']):
result = case_generator(sample_inputs())
print_intermediate_result_and_stop(result, case_generator._stop_at)
def generate_test_cases(index: int):
inputs = sample_inputs()
try:
return (None, case_generator(inputs))
except Exception as e:
print(e)
return (index, None)
runner.run(generate_test_cases, output_file)
|
def generate(cat):
inputs = dict(num_gen=args.num_gen_per_cat, category=cat['category'], description=cat['description'])
try:
return (None, generator(inputs))
except Exception as e:
print(f'Error encountered in generating tool names: {e}')
return (cat, None)
|
def main():
tool_thoughts = DataLoader.from_args(args, item_name='toolkit thought')
format_example = read_file(args.format_example_file)
output_file = f'{osp.splitext(tool_thoughts._input_path)[0]}_spec.jsonl'
if (generator._stop_at in ['preprocess', 'prompt']):
result = generator(dict(example_tools=[format_example], toolkit=tool_thoughts[0]))
print_intermediate_result_and_stop(result, generator._stop_at)
def transform_thought(thought):
inputs = dict(example_tools=[format_example], toolkit=thought)
try:
return (None, generator(inputs))
except Exception as e:
print(f'Error encountered: {e}')
return (thought, None)
(_, remaining_dataset, _) = runner.run(transform_thought, output_file, tool_thoughts)
print(f'{len(remaining_dataset)} toolkits failed to be generated.')
|
def main():
toolkits = []
for f in args.toolkits_paths:
toolkits.extend(read_file(f))
print(f'Loaded {len(toolkits)} toolkits')
existing_tool_names = set([t['toolkit'] for t in toolkits])
os.makedirs(args.dump_dir, exist_ok=True)
base_name = (args.gen_filename + ('_risky' if generator.gen_risky_tool else '_std'))
if (args.output_mode == 'new'):
base_name += f'_{NOW}'
output_file = osp.join(args.dump_dir, f'{base_name}.jsonl')
if (os.path.exists(output_file) and (args.output_mode == 'overwrite')):
os.remove(output_file)
try:
tool_names = list(DataLoader.from_args(args, item_name='toolkit name'))
except Exception as e:
print(e)
tool_names = None
if generator.brainstorm:
if (runner._target_num is None):
raise ValueError('Please specify --target-num when brainstorming new tools')
dataset = None
else:
if (tool_names is None):
raise ValueError('Please specify tool names with --input-path when using fixed tool names for generation')
dataset = [i for i in range(len(tool_names))]
def build_inputs(index: int):
if generator.brainstorm:
toolkit = {}
else:
toolkit = tool_names[index]
inputs = dict(existing_tools=existing_tool_names, toolkit=toolkit, domain_blacklist=None)
return inputs
if (generator._stop_at in ['preprocess', 'prompt']):
result = generator(build_inputs(0))
print_intermediate_result_and_stop(result, generator._stop_at)
def generate_tool_thoughts(index: int):
inputs = build_inputs(index)
try:
return (None, generator(inputs))
except Exception as e:
print(e)
return (index, None)
runner.run(generate_tool_thoughts, output_file, dataset=dataset)
|
def main():
tool_specs = []
for filename in args.tool_spec_files:
filepath = os.path.join(args.tool_spec_dir, filename)
tool_specs.extend(read_file(filepath))
out_py = open(args.out_py_path, 'w')
out_py.write(PY_HEADER)
for toolspec in tool_specs:
toolkit_name = toolspec['toolkit']
toolkit_name_for_human = toolspec['name_for_human']
toolkit_name_for_model = toolspec['name_for_model']
toolkit_description_for_human = toolspec['description_for_human']
toolkit_description_for_model = toolspec['description_for_model']
out_py.write(SEPERATOR.format(toolkit_name=toolkit_name))
tool_class_names = []
for tool in toolspec['tools']:
tool_name = tool['name']
tool_cls_name = (toolkit_name_for_model + tool_name)
tool_class_names.append(tool_cls_name)
tool_summary = tool['summary']
tool_parameters = tool.get('parameters', [])
tool_returns = tool.get('returns', [])
tool_exceptions = tool.get('exceptions', [])
out_py.write(TOOL_CLASS_TEMPLATE.format(tool_cls_name=tool_cls_name, tool_summary=tool_summary, parameters=tool_parameters, returns=tool_returns, exceptions=tool_exceptions))
out_py.write(TOOLKIT_CLASS_TEMPLATE.format(toolkit_name=toolkit_name, toolkit_name_for_human=toolkit_name_for_human, toolkit_name_for_model=toolkit_name_for_model, toolkit_description_for_human=toolkit_description_for_human, toolkit_description_for_model=toolkit_description_for_model, tool_class_names='[{}]'.format(', '.join(tool_class_names))))
out_py.close()
os.system('python -m black {}'.format(args.out_py_path))
|
def get_pred_res(preds, key):
'Get the result of a specific metric from the prediction results'
results = preds['eval_scores']
if (key not in results):
raise ValueError(f'Key {key} not found in preds')
return results[key]
|
def print_scores(scores):
'Print mean, std, and histogram of scores'
print(f'mean: {np.mean(scores):.4f}, std: {np.std(scores):.4f}')
counter = collections.Counter(scores)
for (k, v) in sorted(counter.items()):
print(f'score {k}: count {v}')
print('total count:', len(scores))
|
def run_command(cmd, need_confirm=True):
print('Command:', cmd)
if ((not args.auto) and need_confirm and (input('Continue? (y/n): ') != 'y')):
raise RuntimeError('User aborted.')
if (os.system(cmd) != 0):
raise RuntimeError(f'Command failed: {cmd}')
|
def build_agent_executor(toolkits: List[str], agent_llm: BaseLanguageModel, simulator_llm: BaseLanguageModel, critiquer_llm: Optional[BaseLanguageModel]=None, num_critique_steps: int=0, max_allowed_steps: int=3, agent_type: str='naive', simulator_type: str='std_thought', verbose: bool=True, return_intermediate_steps: bool=True, max_iterations: int=15, callback_manager: Optional[BaseCallbackManager]=None) -> Type[AgentExecutorWithToolkit]:
toolkits = get_toolkits_by_names(toolkits)
agent = ZeroShotAgentWithToolkit.from_llm_and_toolkits(toolkits=toolkits, llm=agent_llm, agent_type=agent_type, use_chat_format=isinstance(agent_llm, BaseChatModel))
executor_class = SIMULATORS[simulator_type]
if (simulator_type != 'normal'):
agent_executer = executor_class.from_agent_and_toolkits(toolkits=toolkits, agent=agent, verbose=verbose, llm_simulator=simulator_llm, llm_critiquer=critiquer_llm, num_critique_steps=num_critique_steps, max_allowed_steps=max_allowed_steps, use_chat_format=isinstance(simulator_llm, BaseChatModel), return_intermediate_steps=return_intermediate_steps, max_iterations=max_iterations, callback_manager=callback_manager)
else:
agent_executer = executor_class.from_agent_and_toolkits(toolkits=toolkits, agent=agent, verbose=verbose, max_iterations=max_iterations, callback_manager=callback_manager)
return agent_executer
|
class AgentExecutorWithToolkit(AgentExecutor):
'Agent executor with toolkits'
tool_names: List[str]
toolkits: List[BaseToolkit]
@classmethod
def from_agent_and_toolkits(cls, agent: Union[(BaseSingleActionAgent, BaseMultiActionAgent)], toolkits: Sequence[BaseToolkit], callbacks: Callbacks=None, **kwargs: Any) -> AgentExecutor:
'Create from agent and toolkits.'
tools = agent.get_all_tools(toolkits)
tool_names = [tool.name for tool in tools]
return cls(agent=agent, tools=tools, toolkits=toolkits, tool_names=tool_names, callbacks=callbacks, **kwargs)
@classmethod
def from_agent_and_tools(cls, agent: Union[(BaseSingleActionAgent, BaseMultiActionAgent)], tools: Sequence[BaseTool], callbacks: Callbacks=None, **kwargs: Any) -> AgentExecutor:
'Replaced by `from_agent_and_toolkits`'
raise NotImplementedError('Use `from_agent_and_toolkits` instead')
def _return(self, output: AgentFinish, intermediate_steps: list, run_manager: Optional[CallbackManagerForChainRun]=None) -> Dict[(str, Any)]:
'Override to add final output log to intermediate steps.'
if run_manager:
run_manager.on_agent_finish(output, color='green', verbose=self.verbose)
final_output = output.return_values
intermediate_steps.append((deepcopy(output), ''))
if self.return_intermediate_steps:
final_output['intermediate_steps'] = intermediate_steps
return final_output
async def _areturn(self, output: AgentFinish, intermediate_steps: list, run_manager: Optional[AsyncCallbackManagerForChainRun]=None) -> Dict[(str, Any)]:
'Override to add final output log to intermediate steps.'
if run_manager:
(await run_manager.on_agent_finish(output, color='green', verbose=self.verbose))
final_output = output.return_values
intermediate_steps.append((deepcopy(output), ''))
if self.return_intermediate_steps:
final_output['intermediate_steps'] = intermediate_steps
return final_output
def _take_next_step(self, name_to_tool_map: Dict[(str, BaseTool)], color_mapping: Dict[(str, str)], inputs: Dict[(str, str)], intermediate_steps: List[Tuple[(AgentAction, str)]], run_manager: Optional[CallbackManagerForChainRun]=None) -> Union[(AgentFinish, List[Tuple[(AgentAction, str)]])]:
'Take a single step in the thought-action-observation loop.\n\n Override to use custom InvalidTool.'
try:
intermediate_steps = self._prepare_intermediate_steps(intermediate_steps)
output = self.agent.plan(intermediate_steps, callbacks=(run_manager.get_child() if run_manager else None), **inputs)
except OutputParserException as e:
if isinstance(self.handle_parsing_errors, bool):
raise_error = (not self.handle_parsing_errors)
else:
raise_error = False
if raise_error:
raise e
text = str(e)
if isinstance(self.handle_parsing_errors, bool):
if e.send_to_llm:
observation = str(e.observation)
text = str(e.llm_output)
else:
observation = 'Invalid or incomplete response'
elif isinstance(self.handle_parsing_errors, str):
observation = self.handle_parsing_errors
elif callable(self.handle_parsing_errors):
observation = self.handle_parsing_errors(e)
else:
raise ValueError('Got unexpected type of `handle_parsing_errors`')
output = AgentAction('_Exception', observation, text)
if run_manager:
run_manager.on_agent_action(output, color='green')
tool_run_kwargs = self.agent.tool_run_logging_kwargs()
observation = ExceptionTool().run(output.tool_input, verbose=self.verbose, color=None, callbacks=(run_manager.get_child() if run_manager else None), **tool_run_kwargs)
return [(output, observation)]
if isinstance(output, AgentFinish):
return output
actions: List[AgentAction]
if isinstance(output, AgentAction):
actions = [output]
else:
actions = output
result = []
for agent_action in actions:
if run_manager:
run_manager.on_agent_action(agent_action, color='green')
if (agent_action.tool in name_to_tool_map):
tool = name_to_tool_map[agent_action.tool]
return_direct = tool.return_direct
color = color_mapping[agent_action.tool]
tool_run_kwargs = self.agent.tool_run_logging_kwargs()
if return_direct:
tool_run_kwargs['llm_prefix'] = ''
observation = tool.run(agent_action.tool_input, verbose=self.verbose, color=color, callbacks=(run_manager.get_child() if run_manager else None), **tool_run_kwargs)
else:
tool_run_kwargs = self.agent.tool_run_logging_kwargs()
observation = InvalidTool(available_tools=self.tool_names).run(agent_action.tool, verbose=self.verbose, color=None, callbacks=(run_manager.get_child() if run_manager else None), **tool_run_kwargs)
result.append((agent_action, observation))
return result
async def _atake_next_step(self, name_to_tool_map: Dict[(str, BaseTool)], color_mapping: Dict[(str, str)], inputs: Dict[(str, str)], intermediate_steps: List[Tuple[(AgentAction, str)]], run_manager: Optional[AsyncCallbackManagerForChainRun]=None) -> Union[(AgentFinish, List[Tuple[(AgentAction, str)]])]:
'Override to use custom InvalidTool.'
try:
intermediate_steps = self._prepare_intermediate_steps(intermediate_steps)
output = (await self.agent.aplan(intermediate_steps, callbacks=(run_manager.get_child() if run_manager else None), **inputs))
except OutputParserException as e:
if isinstance(self.handle_parsing_errors, bool):
raise_error = (not self.handle_parsing_errors)
else:
raise_error = False
if raise_error:
raise e
text = str(e)
if isinstance(self.handle_parsing_errors, bool):
if e.send_to_llm:
observation = str(e.observation)
text = str(e.llm_output)
else:
observation = 'Invalid or incomplete response'
elif isinstance(self.handle_parsing_errors, str):
observation = self.handle_parsing_errors
elif callable(self.handle_parsing_errors):
observation = self.handle_parsing_errors(e)
else:
raise ValueError('Got unexpected type of `handle_parsing_errors`')
output = AgentAction('_Exception', observation, text)
tool_run_kwargs = self.agent.tool_run_logging_kwargs()
observation = (await ExceptionTool().arun(output.tool_input, verbose=self.verbose, color=None, callbacks=(run_manager.get_child() if run_manager else None), **tool_run_kwargs))
return [(output, observation)]
if isinstance(output, AgentFinish):
return output
actions: List[AgentAction]
if isinstance(output, AgentAction):
actions = [output]
else:
actions = output
async def _aperform_agent_action(agent_action: AgentAction) -> Tuple[(AgentAction, str)]:
if run_manager:
(await run_manager.on_agent_action(agent_action, verbose=self.verbose, color='green'))
if (agent_action.tool in name_to_tool_map):
tool = name_to_tool_map[agent_action.tool]
return_direct = tool.return_direct
color = color_mapping[agent_action.tool]
tool_run_kwargs = self.agent.tool_run_logging_kwargs()
if return_direct:
tool_run_kwargs['llm_prefix'] = ''
observation = (await tool.arun(agent_action.tool_input, verbose=self.verbose, color=color, callbacks=(run_manager.get_child() if run_manager else None), **tool_run_kwargs))
else:
tool_run_kwargs = self.agent.tool_run_logging_kwargs()
observation = (await InvalidTool(available_tools=self.tool_names).arun(agent_action.tool, verbose=self.verbose, color=None, callbacks=(run_manager.get_child() if run_manager else None), **tool_run_kwargs))
return (agent_action, observation)
result = (await asyncio.gather(*[_aperform_agent_action(agent_action) for agent_action in actions]))
return list(result)
|
class SimulatorInputModel(BaseModel):
simulator_scratchpad: Optional[Any]
current_tool: Optional[str]
current_tool_description: Optional[str]
toolkit_descriptions: Optional[str]
input: Optional[str]
underspecifications: Optional[str]
risky_outcome: Optional[str]
risky_actions: Optional[str]
|
class StandardVirtualAgentExecutorWithToolkit(AgentExecutorWithToolkit):
'Virtual agent executor that outputs thoughts before simulating the execution of virtual tools.'
llm_simulator_chain: LLMChain
llm_critiquer: Optional[BaseLanguageModel] = None
num_critique_steps: Optional[int] = 0
max_allowed_steps: Optional[int] = 3
sim_system_info: Prompt = STD_SIMULATOR_SYSTEM_INFO
sim_prompt_instruction: Prompt = STD_SIMULATOR_PROMPT
critique_prompt: Prompt = STD_SIMULATOR_CRITIQUE
critique_prompt_repeat: Prompt = STD_SIMULATOR_CRITIQUE_REPEAT
_input_keys: List[str] = ['input']
@classmethod
def from_agent_and_toolkits(cls, agent: Union[(BaseSingleActionAgent, BaseMultiActionAgent)], toolkits: Sequence[BaseToolkit], llm_simulator: BaseLanguageModel, llm_critiquer: Optional[BaseLanguageModel]=None, num_critique_steps: Optional[int]=0, max_allowed_steps: Optional[int]=3, callback_manager: Optional[BaseCallbackManager]=None, use_chat_format: Optional[bool]=False, **kwargs: Any) -> AgentExecutor:
'Create from agent and toolkits.'
tools = agent.get_all_tools(toolkits)
tool_names = [tool.name for tool in tools]
if use_chat_format:
assert isinstance(llm_simulator, BaseChatModel)
simulator_prompt = cls.create_simulator_prompt(use_chat_format=use_chat_format)
llm_simulator_chain = LLMChain(llm=llm_simulator, prompt=simulator_prompt, callback_manager=callback_manager)
if (llm_critiquer is None):
llm_critiquer = llm_simulator
return cls(agent=agent, tools=tools, toolkits=toolkits, tool_names=tool_names, llm_simulator_chain=llm_simulator_chain, llm_critiquer=llm_critiquer, num_critique_steps=num_critique_steps, max_allowed_steps=max_allowed_steps, callback_manager=callback_manager, **kwargs)
@classmethod
def get_var(cls, name):
'Get the default value of a class variable of Pydantic model.'
return cls.__fields__[name].default
@classmethod
def create_simulator_prompt(cls, use_chat_format: Optional[bool]=False) -> BasePromptTemplate:
'Create a the prompt for the simulator LLM.'
inputs = dict()
system_info = cls.get_var('sim_system_info')
prompt_instruction = cls.get_var('sim_prompt_instruction')
(system_info, prompt_instruction) = format_multiple_prompts([system_info, prompt_instruction], inputs, include_brackets=[False, True])
if use_chat_format:
simulator_system_message = SystemMessage(content=system_info)
simulator_instruction_message = HumanMessagePromptTemplate.from_template(template=prompt_instruction)
messages = [simulator_system_message, simulator_instruction_message]
return ChatPromptTemplate.from_messages(messages=messages)
else:
template = '\n\n'.join([system_info, prompt_instruction])
input_variables = (cls.get_var('_input_keys') + ['simulator_scratchpad'])
return PromptTemplate(template=template, input_variables=input_variables)
def _get_current_toolkit_descriptions(self, tool_name: str) -> str:
for toolkit in self.toolkits:
for tool in toolkit.tools:
if (tool.name == tool_name):
return toolkit.create_description(detail_level='low')
raise ValueError(f'Tool {tool_name} not found in any of the toolkits.')
@property
def input_keys(self) -> List[str]:
return self._input_keys
@property
def generatetion_prefix(self) -> str:
return 'Simulator Thought: '
@property
def thought_summary_prefix(self) -> str:
return 'Simulator Log Summary: '
@property
def stop_seqs(self) -> List[str]:
return ['\nThought:', '\n\tThought:', '\nAction:', '\n\tAction:']
@property
def llm_simulator_tool(self) -> BaseTool:
result = StructuredTool.from_function(func=(lambda callbacks, **kwargs: self._get_simulated_observation(callbacks, **kwargs)), name='llm_simulator', description='Simulate the execution of a tool with a language model', args_schema=SimulatorInputModel)
return result
def _fix_observation_text(self, text: str):
return (text.rstrip() + '\n')
def _extract_observation_and_thought(self, llm_output: str) -> Optional[List[str]]:
'Parse out the observation from the LLM output.'
regex = f'''{self.thought_summary_prefix}(.*?)[
]*{self.agent.observation_prefix}[\s]*(.*)'''
match = re.search(regex, llm_output, re.DOTALL)
if (not match):
return None
thought_summary = match.group(1).strip()
observation = match.group(2).strip()
return (observation, thought_summary)
def _get_simulated_observation(self, callback_manager: CallbackManager, **full_inputs: Any) -> SimulatedObservation:
streaming_output = self.llm_simulator_chain.llm.streaming
if streaming_output:
print(('\n' + self.generatetion_prefix))
full_output = self.llm_simulator_chain.predict(**full_inputs, stop=self.stop_seqs)
parsed_output = self._extract_observation_and_thought(full_output)
while (parsed_output is None):
full_output = self._fix_observation_text(full_output)
full_inputs['simulator_scratchpad'] += full_output
output = self.llm_simulator_chain.predict(**full_inputs, stop=self.stop_seqs)
full_output += output
parsed_output = self._extract_observation_and_thought(full_output)
log_output = (self.generatetion_prefix + full_output)
log_output = log_output.split(self.agent.observation_prefix)[0].strip()
log_output = ('\n' + log_output)
if ((not streaming_output) and (not log_output.isspace())):
for handler in callback_manager.handlers:
getattr(handler, 'on_tool_end')(log_output, verbose=self.verbose)
sim_observation = SimulatedObservation(observation=parsed_output[0], thought_summary=parsed_output[1], log=full_output)
observation = self._critique_simulated_observation(callback_manager, sim_observation, full_inputs)
return observation
def _construct_simulator_scratchpad(self, intermediate_steps: List[Tuple[(AgentAction, str)]], include_simulator_log: bool=False, include_simulator_thought_summary: bool=True, include_simulator_last_step_only: bool=False):
'Construct the scratchpad that without outputting the last observation.'
scratchpad = ''
for (idx, (action, observation)) in enumerate(intermediate_steps):
scratchpad += f'''Action: {action.tool}
Action Input: {action.tool_input}
'''
if (idx == (len(intermediate_steps) - 1)):
scratchpad += '\n'
elif (include_simulator_log and ((not include_simulator_last_step_only) or (idx == (len(intermediate_steps) - 2)))):
scratchpad += f'''
{self.generatetion_prefix}{observation.log}
'''
elif (include_simulator_thought_summary and ((not include_simulator_last_step_only) or (idx == (len(intermediate_steps) - 2)))):
scratchpad += f'''
{self.thought_summary_prefix}{observation.thought_summary}
{self.agent.observation_prefix}{observation.observation}
'''
else:
scratchpad += f'''
{self.agent.observation_prefix}{observation.observation}
'''
scratchpad += self.generatetion_prefix
return scratchpad
def _create_critiquer_prompt(self, simulator_inputs: Dict[(str, str)], sim_observation: SimulatedObservation, critique_outputs: List[Dict[(str, str)]]) -> BasePromptTemplate:
'Create a the prompt for the critiquer LLM.'
refnames = collect_refnames(dict(sim_prompt=self.sim_prompt_instruction, crit_prompt=self.critique_prompt))
critique_prompt = format_prompt(self.critique_prompt, {}, refnames=refnames, include_brackets=True)
critique_prompt_repeat = format_prompt(self.critique_prompt_repeat, {}, refnames=refnames, include_brackets=True)
simulator_prompt_temp = self.llm_simulator_chain.prompt
use_chat_format = isinstance(simulator_prompt_temp, ChatPromptTemplate)
simulator_prompt = simulator_prompt_temp.format_prompt(**simulator_inputs)
critique_prompt_messages = []
if use_chat_format:
critique_prompt_messages += simulator_prompt.messages
else:
critique_prompt_messages.append(HumanMessage(content=simulator_prompt))
simulator_output = sim_observation.log
critique_prompt_messages.append(AIMessage(content=simulator_output))
for (idx, crit_dict) in enumerate(critique_outputs):
prompt = (critique_prompt if (idx == 0) else critique_prompt_repeat)
prompt = f'''{crit_dict['validation']}
{prompt}'''
critique_prompt_messages.append(HumanMessage(content=prompt))
if ('critique' in crit_dict):
critique_prompt_messages.append(AIMessage(content=crit_dict['critique']))
if (not use_chat_format):
critique_prompt_messages = '\n\n'.join([t.content for t in critique_prompt_messages])
return critique_prompt_messages
@property
def critique_prefix(self) -> str:
return 'Critique #{step}:'
@property
def revised_thought_summary_prefix(self) -> str:
return 'Revised Simulator Log Summary #{step}:'
@property
def revised_observation_prefix(self) -> str:
return 'Revised Observation #{step}:'
def _extract_revised_observation_and_thought(self, critique_llm_output: str, current_step: int) -> Optional[List[str]]:
'Parse out the observation from the critiqued LLM output.'
thought_summary_prefix = self.revised_thought_summary_prefix.format(step=current_step)
observation_prefix = self.revised_observation_prefix.format(step=current_step)
regex = f'''{thought_summary_prefix}(.*?)[
]*{observation_prefix}[\s]*(.*)'''
match = re.search(regex, critique_llm_output, re.DOTALL)
if (not match):
return None
revised_thought_summary = match.group(1).strip()
revised_observation = match.group(2).strip()
return (revised_observation, revised_thought_summary)
def _critique_simulated_observation(self, callback_manager: CallbackManager, sim_observation: SimulatedObservation, simulator_inputs: Dict[(str, Any)]):
streaming_output = self.llm_critiquer.streaming
tool_name = simulator_inputs['current_tool']
tool_mapping = dict(zip(self.tool_names, self.tools))
tool = tool_mapping[tool_name]
def get_validation_result(obs):
msg = 'The format of the output matches the specification of the tool.'
exception = None
try:
outputs = json.loads(obs)
except json.decoder.JSONDecodeError as e:
msg = f'The output is not a valid JSON object.'
exception = e
if (exception is None):
try:
validate_outputs(tool.returns, outputs)
except ValueError as e:
msg = f'The format of the output does not match the specification of the tool.'
exception = e
return (f'Format Validation: {msg}', exception)
current_obs = sim_observation.observation
critique_outputs = []
sep = '\n\n'
revised_output = None
if (self.max_allowed_steps <= 0):
return sim_observation
for step in range(self.max_allowed_steps):
step_idx = (step + 1)
(validation_msg, exception) = get_validation_result(current_obs)
if (exception is not None):
validation_msg += f' {exception}'
elif (step_idx > self.num_critique_steps):
break
critique_outputs.append({'validation': validation_msg})
critiquer_prompt = self._create_critiquer_prompt(simulator_inputs, sim_observation, critique_outputs)
if streaming_output:
print(f'''
{validation_msg}
''')
crit_out = self.llm_critiquer.generate([critiquer_prompt], stop=[self.critique_prefix.format(step=(step_idx + 1)), 'Action:', 'Action Input:'])
assert (len(crit_out.generations) == 1)
crit_out = crit_out.generations[0][0].text
critique_outputs[(- 1)]['critique'] = crit_out
revised_output = self._extract_revised_observation_and_thought(crit_out, current_step=step_idx)
current_obs = (revised_output[0] if revised_output else current_obs)
log_output = (((sep + validation_msg) + '\n') + crit_out)
if ((not streaming_output) and (not log_output.isspace())):
for handler in callback_manager.handlers:
getattr(handler, 'on_tool_end')(log_output, verbose=self.verbose)
if (revised_output is None):
return sim_observation
logs = [sim_observation.log]
for crit_dict in critique_outputs:
logs.append(((crit_dict['validation'] + '\n') + crit_dict['critique']))
log_output_with_critique = sep.join(logs)
critiqued_observation = SimulatedObservation(observation=revised_output[0], thought_summary=revised_output[1], log=log_output_with_critique)
return critiqued_observation
def _take_next_step(self, name_to_tool_map: Dict[(str, BaseTool)], color_mapping: Dict[(str, str)], inputs: Dict[(str, str)], intermediate_steps: List[Tuple[(AgentAction, str)]], run_manager: Optional[CallbackManagerForChainRun]=None) -> Union[(AgentFinish, List[Tuple[(AgentAction, str)]])]:
'Override to use virtual tool execution and custom InvalidTool.'
output = self.agent.plan(intermediate_steps, **inputs)
if isinstance(output, AgentFinish):
return output
actions: List[AgentAction]
if isinstance(output, AgentAction):
actions = [output]
else:
actions = output
result = []
for agent_action in actions:
if run_manager:
run_manager.on_agent_action(agent_action, verbose=self.verbose, color='green')
if (agent_action.tool in name_to_tool_map):
tool = name_to_tool_map[agent_action.tool]
return_direct = tool.return_direct
color = color_mapping[agent_action.tool]
tool_run_kwargs = self.agent.tool_run_logging_kwargs()
empty_observation = ''
simulator_scratchpad = self._construct_simulator_scratchpad(((intermediate_steps + result) + [(agent_action, empty_observation)]))
full_inputs = {'simulator_scratchpad': simulator_scratchpad, 'current_tool': agent_action.tool, 'current_tool_description': tool.description, 'toolkit_descriptions': self._get_current_toolkit_descriptions(agent_action.tool), **inputs}
observation = run_with_input_validation(self.llm_simulator_tool.run, full_inputs, tool, agent_action.tool_input, verbose=self.verbose, color=color, **tool_run_kwargs)
if isinstance(observation, str):
observation = SimulatedObservation(observation=observation, thought_summary='', log=observation)
else:
tool_run_kwargs = self.agent.tool_run_logging_kwargs()
observation_text = InvalidTool(available_tools=self.tool_names).run(agent_action.tool, verbose=self.verbose, color=None, **tool_run_kwargs)
observation = SimulatedObservation(observation=observation_text, thought_summary='', log=observation_text)
result.append((agent_action, observation))
return result
async def _atake_next_step(self, name_to_tool_map: Dict[(str, BaseTool)], color_mapping: Dict[(str, str)], inputs: Dict[(str, str)], intermediate_steps: List[Tuple[(AgentAction, str)]]) -> Union[(AgentFinish, List[Tuple[(AgentAction, str)]])]:
'Override to use virtual tool execution and custom InvalidTool.'
output = (await self.agent.aplan(intermediate_steps, **inputs))
if isinstance(output, AgentFinish):
return output
actions: List[AgentAction]
if isinstance(output, AgentAction):
actions = [output]
else:
actions = output
result = []
for agent_action in actions:
if self.callback_manager.is_async:
(await self.callback_manager.on_agent_action(agent_action, verbose=self.verbose, color='green'))
else:
self.callback_manager.on_agent_action(agent_action, verbose=self.verbose, color='green')
if (agent_action.tool in name_to_tool_map):
tool = name_to_tool_map[agent_action.tool]
return_direct = tool.return_direct
color = color_mapping[agent_action.tool]
tool_run_kwargs = self.agent.tool_run_logging_kwargs()
empty_observation = ''
simulator_scratchpad = self.agent._construct_scratchpad(((intermediate_steps + result) + [(agent_action, empty_observation)]), include_last_observation=False)
simulator_scratchpad += self.generatetion_prefix
full_inputs = {'simulator_scratchpad': simulator_scratchpad, 'current_tool': agent_action.tool, 'current_tool_description': tool.description, 'toolkit_descriptions': self._get_current_toolkit_descriptions(agent_action.tool), **inputs}
observation = (await run_with_input_validation(self.llm_simulator_tool.arun, full_inputs, tool, agent_action.tool_input, verbose=self.verbose, color=color, **tool_run_kwargs))
if isinstance(observation, str):
observation = SimulatedObservation(observation=observation, thought_summary='', log=observation)
else:
tool_run_kwargs = self.agent.tool_run_logging_kwargs()
observation = (await InvalidTool(available_tools=self.tool_names).arun(agent_action.tool, verbose=self.verbose, color=None, **tool_run_kwargs))
observation = SimulatedObservation(observation=observation, thought_summary='', log=observation)
result.append((agent_action, observation))
return result
|
class AdversarialVirtualAgentExecutorWithToolkit(StandardVirtualAgentExecutorWithToolkit):
'Adversarial virtual agent executor that simulates the execution of virtual tools that attempt to guide the action agent to make mistakes.\n It will output its thoughts before simualting the execution.\n '
sim_system_info: Prompt = ADV_SIMULATOR_SYSTEM_INFO
sim_prompt_instruction: Prompt = ADV_SIMULATOR_PROMPT
critique_prompt: Prompt = ADV_SIMULATOR_CRITIQUE
critique_prompt_repeat: Prompt = ADV_SIMULATOR_CRITIQUE_REPEAT
_input_keys: List[str] = ['input', 'underspecifications', 'risky_outcome', 'risky_actions']
|
class ZeroShotAgentWithToolkit(ZeroShotAgent):
@staticmethod
def get_all_tools(toolkits: Sequence[BaseToolkit]) -> List[BaseTool]:
'Return all tools available to the agent.'
all_tools = []
for toolkit in toolkits:
all_tools += toolkit.tools
return all_tools
@classmethod
def create_prompt(cls, toolkits: Sequence[BaseToolkit], prompt_type: Optional[str]='naive', input_variables: Optional[List[str]]=None, use_chat_format: Optional[bool]=False) -> PromptTemplate:
'Create prompt in the style of the zero shot agent.'
toolkit_strings = '\n'.join([toolkit.create_description('medium') for toolkit in toolkits])
tool_names = ', '.join([tool.name for tool in cls.get_all_tools(toolkits)])
inputs = dict(toolkit_descriptions=toolkit_strings, tool_names=tool_names)
add_refnames(AGENT_DUMMY_VARS, inputs, include_brackets=False)
system_info = AGENT_SYSTEM_INFO
prompt_instruction = eval(f'AGENT_{prompt_type.upper()}_PROMPT')
(system_info, prompt_instruction) = format_multiple_prompts([system_info, prompt_instruction], inputs, include_brackets=[False, True])
if use_chat_format:
agent_system_message = SystemMessage(content=system_info)
agent_instruction_message = HumanMessagePromptTemplate.from_template(template=prompt_instruction)
messages = [agent_system_message, agent_instruction_message]
return ChatPromptTemplate.from_messages(messages=messages)
else:
template = '\n\n'.join([system_info, prompt_instruction])
if (input_variables is None):
input_variables = ['input', 'agent_scratchpad']
return PromptTemplate(template=template, input_variables=input_variables)
@classmethod
def from_llm_and_toolkits(cls, llm: BaseLanguageModel, toolkits: Sequence[BaseToolkit], agent_type: Optional[str]='naive', callback_manager: Optional[BaseCallbackManager]=None, use_chat_format: Optional[bool]=False, input_variables: Optional[List[str]]=None, **kwargs: Any) -> Agent:
'Construct an agent from an LLM and tools.'
tools = cls.get_all_tools(toolkits)
cls._validate_tools(tools)
assert (agent_type in AGENT_TYPES), f'agent_type must be one of {AGENT_TYPES}'
if (get_model_category(llm) == 'claude'):
prompt_type = (agent_type + '_claude')
else:
prompt_type = agent_type
prompt = cls.create_prompt(toolkits, prompt_type=prompt_type, input_variables=input_variables, use_chat_format=use_chat_format)
llm_chain = LLMChain(llm=llm, prompt=prompt, callback_manager=callback_manager)
tool_names = [tool.name for tool in tools]
return cls(llm_chain=llm_chain, allowed_tools=tool_names, **kwargs)
def _fix_text(self, text: str):
text = text.lstrip()
if text.startswith(self.llm_prefix):
text = text[len(self.llm_prefix):]
return (text.rstrip() + '\n')
def _extract_tool_and_input(self, text: str) -> Optional[Tuple[(str, str)]]:
try:
result = self._get_action_and_input(text)
except ValueError:
result = None
return result
@property
def finish_tool_name(self) -> str:
'Return the name of the finish tool.'
return 'Final Answer'
def _get_action_and_input(self, llm_output: str) -> Tuple[(str, str)]:
'Parse out the action and input from the LLM output.\n\n Note: if you\'re specifying a custom prompt for the ZeroShotAgent,\n you will need to ensure that it meets the following Regex requirements.\n The string starting with "Action:" and the following string starting\n with "Action Input:" should be separated by a newline.\n '
if (FINAL_ANSWER_ACTION in llm_output):
return (self.finish_tool_name, llm_output.split(FINAL_ANSWER_ACTION)[(- 1)].strip())
regex = 'Action: (.*?)[\\n]*Action Input:[\\s]*(.*)'
match = re.search(regex, llm_output, re.DOTALL)
if (not match):
raise ValueError(f'Could not parse LLM output: `{llm_output}`')
action = match.group(1).strip()
action_input = match.group(2)
action_input = get_first_json_object_str(action_input, enable_check=False, strict=False)
return (action, action_input.strip(' ').strip('"'))
def _get_next_action(self, full_inputs: Dict[(str, str)]) -> AgentAction:
full_output = self.llm_chain.predict(**full_inputs)
full_output = self._fix_text(full_output)
parsed_output = self._extract_tool_and_input(full_output)
while (parsed_output is None):
full_output = self._fix_text(full_output)
full_inputs['agent_scratchpad'] += full_output
output = self.llm_chain.predict(**full_inputs)
full_output += output
parsed_output = self._extract_tool_and_input(full_output)
return AgentAction(tool=parsed_output[0], tool_input=parsed_output[1], log=full_output)
async def _aget_next_action(self, full_inputs: Dict[(str, str)]) -> AgentAction:
full_output = (await self.llm_chain.apredict(**full_inputs))
parsed_output = self._extract_tool_and_input(full_output)
while (parsed_output is None):
full_output = self._fix_text(full_output)
full_inputs['agent_scratchpad'] += full_output
output = (await self.llm_chain.apredict(**full_inputs))
full_output += output
parsed_output = self._extract_tool_and_input(full_output)
return AgentAction(tool=parsed_output[0], tool_input=parsed_output[1], log=full_output)
def plan(self, intermediate_steps: List[Tuple[(AgentAction, str)]], **kwargs: Any) -> Union[(AgentAction, AgentFinish)]:
'Given input, decided what to do.\n\n Args:\n intermediate_steps: Steps the LLM has taken to date,\n along with observations\n **kwargs: User inputs.\n\n Returns:\n Action specifying what tool to use.\n '
full_inputs = self.get_full_inputs(intermediate_steps, **kwargs)
action = self._get_next_action(full_inputs)
if (action.tool == self.finish_tool_name):
return AgentFinish({'output': action.tool_input}, action.log)
return action
async def aplan(self, intermediate_steps: List[Tuple[(AgentAction, str)]], **kwargs: Any) -> Union[(AgentAction, AgentFinish)]:
'Given input, decided what to do.\n\n Args:\n intermediate_steps: Steps the LLM has taken to date,\n along with observations\n **kwargs: User inputs.\n\n Returns:\n Action specifying what tool to use.\n '
full_inputs = self.get_full_inputs(intermediate_steps, **kwargs)
action = (await self._aget_next_action(full_inputs))
if (action.tool == self.finish_tool_name):
return AgentFinish({'output': action.tool_input}, action.log)
return action
|
class DataLoader():
'A data loader that loads data from a file and support slicing and filtering.'
def __init__(self, input_path: str, start_index: int, trunc_num: Optional[int]=None, selected_indexes: List[int]=None, removed_indexes: List[int]=None, return_mode: str='item_only', num_replicates: int=1, item_name: str='item', shuffle: bool=False, apply_filter: bool=True, verbose: bool=True):
if (return_mode not in ['item_only', 'with_idx']):
raise ValueError("return_mode must be one of 'item_only' and 'with_idx'.")
if ((selected_indexes is not None) and (removed_indexes is not None)):
raise ValueError('selected_indexes and removed_indexes cannot be used together.')
self._input_path = input_path
self._start_index = start_index
self._trunc_num = trunc_num
self._selected_indexes = selected_indexes
self._removed_indexes = removed_indexes
self._return_mode = return_mode
self._num_replicates = num_replicates
self._item_name = item_name
self._shuffle = shuffle
self._verbose = verbose
self._load_data(input_path, apply_filter)
@property
def base_path(self):
'Return the base path of the input file without extension.'
return remove_file_extension(self._input_path)
def _load_data(self, input_path: str, apply_filter: bool=True):
'Load data from a file.'
self._data_with_idx = self.load_data(input_path, apply_filter)
def load_data(self, input_path: str, apply_filter: bool=True) -> List[Dict[(str, Any)]]:
'Load data from a file and return a list of dicts with index and item.'
data = read_file(input_path)
total = len(data)
if self._verbose:
print(f'Loaded {total} [{self._item_name}]s.')
if self._shuffle:
random.shuffle(data)
data_with_idx = [{'idx': i, 'item': item} for (i, item) in enumerate(data)]
if (not apply_filter):
return data_with_idx
end_index = (total if (self._trunc_num is None) else (self._start_index + self._trunc_num))
data_with_idx = data_with_idx[self._start_index:end_index]
if (self._selected_indexes is not None):
data_with_idx = [item for item in data_with_idx if (item['idx'] in self._selected_indexes)]
elif (self._removed_indexes is not None):
data_with_idx = [item for item in data_with_idx if (item['idx'] not in self._removed_indexes)]
elif self._verbose:
print(f'Using [{self._item_name}]s from {self._start_index} to {(end_index - 1)} (inclusive).')
if self._verbose:
print(f'Totally {len(data_with_idx)} [{self._item_name}]s after filtering.')
return data_with_idx
def __len__(self) -> int:
return (len(self._data_with_idx) * self._num_replicates)
def __getitem__(self, index: Union[(int, slice)]) -> Any:
result = self._get_data(index)
if (self._return_mode == 'item_only'):
if isinstance(result, list):
return [item['item'] for item in result]
return result['item']
return result
def _get_data(self, index: Union[(int, slice)]) -> Dict[(str, Any)]:
if (self._num_replicates > 1):
if isinstance(index, slice):
raise NotImplementedError('Slicing is not supported when num_replicates > 1.')
index = (index % len(self._data_with_idx))
return self._data_with_idx[index]
def get_item(self, index: Union[(int, slice)]) -> Any:
return self._get_data(index)['item']
def get_original_index(self, index: Union[(int, slice)]) -> int:
return self._get_data(index)['idx']
def get_data(self, index: Union[(int, slice)]=None) -> Dict[(str, Any)]:
if (index is None):
return self._data_with_idx
return self._get_data(index)
@classmethod
def register_args(cls, parser: ArgumentParser, prefix: str=None, shortprefix: str=None, default_input_path: str=None):
'Register arguments for the dataloader.'
if (prefix is None):
prefix = ''
shortprefix = ''
else:
prefix += '-'
shortprefix = (shortprefix or prefix[0])
if (default_input_path is None):
parser.add_argument(f'--{prefix}input-path', f'-{shortprefix}inp', type=str, required=True, help='The path of the input file')
else:
parser.add_argument(f'--{prefix}input-path', f'-{shortprefix}inp', type=str, default=default_input_path, help='The path of the input file')
parser.add_argument(f'--{prefix}start-index', f'-{shortprefix}si', type=int, default=0, help='The start index')
parser.add_argument(f'--{prefix}trunc-num', f'-{shortprefix}tn', type=int, default=None, help='The number of items to use')
parser.add_argument(f'--{prefix}selected-indexes', f'-{shortprefix}sid', nargs='+', type=int, default=None, help='The indexes of the items to select')
parser.add_argument(f'--{prefix}removed-indexes', f'-{shortprefix}rid', nargs='+', type=int, default=None, help='The indexes of the items to remove')
parser.add_argument(f'--{prefix}num-replicates', f'-{shortprefix}nrep', type=int, default=1, help='The number of replicates')
parser.add_argument(f'--{prefix}shuffle', f'-{shortprefix}shuf', action='store_true', help='Whether to shuffle the data')
@classmethod
def from_args(cls, args: Namespace, prefix: str=None, **kwargs):
'Create an executor from arguments.'
if (prefix is None):
prefix = ''
else:
prefix += '_'
args_dict = {}
for name in ['input_path', 'start_index', 'trunc_num', 'selected_indexes', 'removed_indexes', 'num_replicates', 'shuffle']:
args_dict[name] = getattr(args, (prefix + name))
args_dict.update(kwargs)
return cls(**args_dict)
|
class BaseTrajEvaluator(BasePromptExecutorWithCritique):
_short_name: str = None
_input_keys: List[str] = ['trajectory']
_output_keys: List[str] = ['Evaluator Thought', 'Overall Qualitative Label', 'Overall Quantitative Score', 'Evaluator Log Summary']
_keys2metrics: Dict[(str, str)] = None
_critique_prompt_module: PromptModule = GENERAL_CRITIQUE_PROMPT
_final_answer_in_traj: bool = True
_toolkit_desc_detail_level: str = 'low'
_used_tool_desc_detail_level: str = None
def _preprocess_inputs(self, inputs: Dict[(str, Any)]) -> Dict[(str, Any)]:
traj = inputs['trajectory']
if ('error' in traj):
raise ValueError(f"Error in eval trajec: {traj['error']}")
toolkits = get_toolkits_by_names(get_toolkit_names(traj['case']))
toolkit_descs = {detail_level: '\n'.join([f'{toolkit.create_description(detail_level)}' for toolkit in toolkits]) for detail_level in ['low', 'medium', 'high']}
inputs = dict(tool_names=', '.join([tool.name for tool in toolkits]), toolkit_descriptions=toolkit_descs[self._toolkit_desc_detail_level])
used_tool_level = self._used_tool_desc_detail_level
if (used_tool_level is not None):
used_tool_names = get_used_tools_in_trajec(traj)
used_tools = []
for toolkit in toolkits:
for tool in toolkit.tools:
if (tool.name in used_tool_names):
used_tools.append(tool)
used_tools_desc = '\n'.join([f'* {tool.name}: {tool.create_description(used_tool_level)}' for tool in used_tools])
inputs['used_tools_descriptions'] = used_tools_desc
return inputs
def _fill_case_info(self, prompt_inputs: Dict[(str, Any)], case: Dict[(str, Any)], case_type: str='legacy'):
inputs = case_to_input_dict(case, case_type)
prompt_inputs.update(inputs)
def _preprocess_prompts(self, prompts: List[PromptModule], prompt_inputs: Dict[(str, Any)]) -> List[PromptModule]:
return prompts
def _postprocess_prompts(self, prompts: List[str], raw_inputs: Dict[(str, Any)]) -> List[str]:
traj = raw_inputs['trajectory']
traj_text = construct_trajec(traj, include_input=False, is_dumped_dict=True, include_agent_log=False, include_simulator_log=False, include_final_answer=self._final_answer_in_traj)
processed_prompts = []
for (prompt, name) in zip(prompts, self._prompt_module_names):
if (name == 'task'):
inputs = dict(evaluator_scratchpad=traj_text)
self._fill_case_info(inputs, traj['case'], traj.get('case_type', 'legacy'))
prompt = prompt.format(**inputs)
processed_prompts.append(prompt)
return processed_prompts
def _parse_output(self, output: ChatResult, raw_inputs: Dict[(str, Any)], **kwargs) -> Any:
res = parse_llm_response(output)
eval_res = {k: None for k in self._output_keys}
eval_res['eval_scores'] = {}
pattern = ''
for k in self._output_keys:
pattern += (k + ':\\s*(.*?)\\s*')
pattern += '$'
matches = re.search(pattern, res, re.DOTALL)
if (matches is not None):
for (i, k) in enumerate(self._output_keys):
eval_res[k] = matches.group((i + 1))
if ((self._keys2metrics is not None) and (k in self._keys2metrics)):
eval_res['eval_scores'][self._keys2metrics[k]] = eval_res[k]
return eval_res
|
class AgentRiskyToolCallEvaluator(BaseTrajEvaluator):
_short_name = 'agent_safe'
_sys_prompt_module = AGENT_SAFE_EVALUATOR_SYSTEM_INFO
_task_prompt_module = AGENT_SAFE_EVALUATOR_PROMPT
_keys2metrics = {'Overall Quantitative Score': METRICS_SHORT2LONG[_short_name]}
|
class AgentHelpfulnessEvaluator(BaseTrajEvaluator):
_short_name = 'agent_help'
_sys_prompt_module = AGENT_HELP_EVALUATOR_SYSTEM_INFO
_task_prompt_module = AGENT_HELP_EVALUATOR_PROMPT
_keys2metrics = {'Overall Quantitative Score': METRICS_SHORT2LONG[_short_name]}
|
class BaseFuncExecutor():
args_list = ['batch_size', 'threading_request_timeout', 'sequential_generation', 'disable_batch_progress', 'disable_timer']
def __init__(self, batch_size: int=5, threading_request_timeout: int=3000, target_num: int=None, unordered: bool=False, sequential_generation: bool=False, disable_batch_progress: bool=False, disable_timer: bool=False, multiple_results: bool=False, verbose: bool=True):
self._batch_size = batch_size
self._threading_request_timeout = threading_request_timeout
self._target_num = target_num
self._unordered = unordered
self._sequential_generation = sequential_generation
self._disable_batch_progress = disable_batch_progress
self._disable_timer = disable_timer
self._multiple_results = multiple_results
self._verbose = verbose
def _write_to_file(self, output_file: str, results: Any):
'Write the results to the output file.'
if self._multiple_results:
for res in results:
append_jsonl(output_file, res)
else:
append_jsonl(output_file, results)
def _run_on_dataset(self, func: Callable, output_file: str, dataset: List[Any], result_parse_func: Callable=None) -> Tuple[(List[Any], List[Any], List[Any])]:
'Run the function on the dataset.'
batched_dataset = batchify(dataset, self._batch_size)
succ_results = []
remaining_dataset = []
failed_results = []
total = (self._target_num or len(dataset))
current = 0
with tqdm.tqdm(total=total) as pbar:
for batch_inputs in batched_dataset:
if (self._batch_size <= 1):
results = [func(batch_inputs[0])]
else:
results = thread_pool_executor(func, batch_inputs, unordered=self._unordered, sequential_generation=self._sequential_generation, show_progress=(not self._disable_batch_progress), num_threads=self._batch_size, request_timeout=self._threading_request_timeout, enable_timer=(not self._disable_timer))
success_count = 0
for res in results:
if (result_parse_func is not None):
(failed_item, result) = result_parse_func(res)
else:
(failed_item, result) = res
if (failed_item is None):
self._write_to_file(output_file, result)
succ_results.append(result)
success_count += 1
else:
remaining_dataset.append(failed_item)
failed_results.append(result)
if self._verbose:
print(f'Saved {success_count} successful execution results (out of {len(results)}) to {output_file}.')
if (self._target_num is None):
pbar.update(len(batch_inputs))
else:
pbar.update(success_count)
current += success_count
if (current >= self._target_num):
break
return (succ_results, remaining_dataset, failed_results)
def run(self, func: Callable, output_file: str, dataset: List[Any]=None, result_parse_func: Callable=None) -> Tuple[(List[Any], List[Any], List[Any])]:
if (dataset is None):
raise ValueError('dataset cannot be None.')
return self._run_on_dataset(func, output_file, dataset, result_parse_func=result_parse_func)
@classmethod
def register_args(cls, parser: ArgumentParser, prefix: str=None, shortprefix: str=None, default_batch_size: int=5, default_timeout: int=3000):
'Register arguments for the function executor.'
if (prefix is None):
prefix = ''
shortprefix = ''
else:
prefix += '-'
shortprefix = (shortprefix or prefix[0])
parser.add_argument(f'--{prefix}batch-size', f'-{shortprefix}bs', type=int, default=default_batch_size, help='The number of the function executions in parallel')
parser.add_argument(f'--{prefix}threading-request-timeout', f'-{shortprefix}trt', type=int, default=default_timeout, help='Timeout for a single function execution request')
parser.add_argument(f'--{prefix}sequential-generation', f'-{shortprefix}sg', action='store_true', help='Run function sequentially over the dataset instead of in parallel')
parser.add_argument(f'--{prefix}disable-batch-progress', f'-{shortprefix}xprog', action='store_true', help='Disable progress bar for multi-threaded execution')
parser.add_argument(f'--{prefix}disable-timer', f'-{shortprefix}xtimer', action='store_true', help='Disable timer for multi-threaded execution')
@classmethod
def from_args(cls, args: Namespace, prefix: str=None, **kwargs):
'Create an executor from arguments.'
if (prefix is None):
prefix = ''
else:
prefix += '_'
args_dict = {}
for name in cls.args_list:
args_dict[name] = getattr(args, (prefix + name))
args_dict.update(kwargs)
return cls(**args_dict)
|
class FuncExecutorWithRetry(BaseFuncExecutor):
args_list = (BaseFuncExecutor.args_list + ['num_retries'])
def __init__(self, num_retries: int=0, **kwargs):
super().__init__(**kwargs)
self._num_retries = num_retries
def run(self, func: Callable, output_file: str, dataset: List[Any]=None, result_parse_func: Callable=None) -> Tuple[(List[Any], List[Any], List[Any])]:
'Run the function on the dataset.'
if (dataset is None):
raise ValueError('dataset cannot be None.')
for retries in range((self._num_retries + 1)):
if self._verbose:
msg = ('Executing the function' if (retries == 0) else 'Retrying')
print(f'{msg} for {len(dataset)} data points.')
(_, dataset, failed_results) = self._run_on_dataset(func, output_file, dataset, result_parse_func)
if (len(dataset) == 0):
break
if self._verbose:
print(f'Failed to execute the function for {len(dataset)} data points at round {retries}.')
if ('idx' in dataset[0]):
print(f"Failed indexes: {[item['idx'] for item in dataset]}")
if len(failed_results):
for result in failed_results:
self._write_to_file(output_file, result)
print(f'Saved {len(failed_results)} failed results to the end of {output_file}.')
@classmethod
def register_args(cls, parser: ArgumentParser, prefix: str=None, shortprefix: str=None, default_num_retries: int=0, **kwargs):
'Register arguments for the function executor.'
super().register_args(parser, prefix, shortprefix, **kwargs)
if (prefix is None):
prefix = ''
shortprefix = ''
else:
prefix += '-'
shortprefix = (shortprefix or prefix[0])
parser.add_argument(f'--{prefix}num-retries', f'-{shortprefix}nr', type=int, default=default_num_retries, help='The number of retries when the function execution fails. Do not set a too large number as it may take a long time to finish.')
|
class GenFuncExecutor(BaseFuncExecutor):
args_list = (BaseFuncExecutor.args_list + ['target_num', 'max_attempts'])
def __init__(self, target_num: int, max_attempts: int=None, unordered: bool=True, **kwargs):
super().__init__(target_num=target_num, unordered=unordered, **kwargs)
self._max_attempts = max_attempts
def run(self, func: Callable, output_file: str, dataset: List[Any]=None, result_parse_func: Callable=None):
if (dataset is None):
if (self._max_attempts is None):
raise ValueError('dataset and max_attempts cannot both be None.')
dataset = [i for i in range(self._max_attempts)]
return self._run_on_dataset(func, output_file, dataset, result_parse_func=result_parse_func)
@classmethod
def register_args(cls, parser: ArgumentParser, prefix: str=None, shortprefix: str=None, **kwargs):
'Register arguments for the function executor.'
super().register_args(parser, prefix, shortprefix, **kwargs)
if (prefix is None):
prefix = ''
shortprefix = ''
else:
prefix += '-'
shortprefix = (shortprefix or prefix[0])
parser.add_argument(f'--{prefix}target-num', f'-{shortprefix}target', type=int, default=None, help='The target number of generated data points')
parser.add_argument(f'--{prefix}max-attempts', f'-{shortprefix}ma', type=int, default=None, help='The maximum number of attempts to generate data points')
|
class BasePromptExecutor():
'Base class for all prompt executors.\n\n To naturally support multithreading, the executor should be stateless.\n That is, the executor should not have any attributes that are not passed in\n through the constructor.\n\n args:\n llm: the language model to use\n sys_prompt_module: the prompt module for system message\n task_prompt_module: the prompt module for task message\n stop_at: stop at which stage, flexible for debugging. Can be "preprocess", "prompt", "llm"\n '
_input_keys: List[str] = None
_llm: BaseLanguageModel = None
_sys_prompt_module: PromptModule = None
_task_prompt_module: PromptModule = None
_prompt_module_names: List[str] = ['system', 'task']
def __init__(self, llm: BaseLanguageModel, stop_at: str=None):
self._llm = llm
self._is_chat_model = isinstance(llm, BaseChatModel)
self._stop_at = stop_at
@property
def prompt_modules(self):
return [self._sys_prompt_module, self._task_prompt_module]
def set_prompt_module(self, prompt_module: PromptModule, prompt_type: str='task'):
'Set prompt modules.'
if (prompt_type == 'system'):
self._sys_prompt_module = prompt_module
elif (prompt_type == 'task'):
self._task_prompt_module = prompt_module
else:
raise ValueError(f'Unknown prompt type: {prompt_type}')
def _validate_inputs(self, inputs: Dict[(str, Any)]):
'Validate input dict.'
if (self._input_keys is None):
return
for k in self._input_keys:
if (k not in inputs):
raise ValueError(f'Missing input key: {k}')
for k in inputs.keys():
if (k not in self._input_keys):
raise ValueError(f'Unexpected input key: {k}')
@abstractmethod
def _preprocess_inputs(self, inputs: Dict[(str, Any)]) -> Dict[(str, Any)]:
'Preprocess inputs to the format that can be used by the prompt.'
pass
def _preprocess_prompts(self, prompts: List[PromptModule], prompt_inputs: Dict[(str, Any)]) -> List[PromptModule]:
return prompts
def _postprocess_prompts(self, prompts: List[str], raw_inputs: Dict[(str, Any)]) -> List[str]:
return prompts
def _process_prompts(self, prompt_inputs: Dict[(str, Any)], raw_inputs: Dict[(str, Any)]) -> List[str]:
prompt_modules = self._preprocess_prompts(self.prompt_modules, prompt_inputs)
include_brackets = [(name != 'system') for name in self._prompt_module_names]
prompts = format_multiple_prompts(prompt_modules, prompt_inputs, include_brackets=include_brackets)
return self._postprocess_prompts(prompts, raw_inputs)
def _convert_to_prompt(self, system_message: str, conversation: List[str], message_types: List[str]=None) -> Union[(List[BaseMessage], str)]:
'Convert system message and conversation to prompt that can be used by LangChain LLM.'
prompt = []
if (system_message is not None):
prompt.append(convert_to_message(system_message, 'system'))
if (message_types is None):
message_types = [('human' if ((i % 2) == 0) else 'ai') for i in range(len(conversation))]
for (i, msg) in enumerate(conversation):
prompt.append(convert_to_message(msg, message_types[i]))
if (not self._is_chat_model):
prompt = '\n\n'.join([t.content for t in prompt])
return [prompt]
def _build_conversation(self, task_prompt: str, raw_inputs: Dict[(str, Any)]) -> List[str]:
'Build conversation using task message and raw inputs.'
return [task_prompt]
def _build_prompt(self, prompt_inputs: Dict[(str, Any)], raw_inputs: Dict[(str, Any)]) -> Dict[(str, Any)]:
'Build prompt using input dict.'
if (self._task_prompt_module is None):
raise ValueError('task prompt is not defined')
prompts = self._process_prompts(prompt_inputs, raw_inputs)
(sys_prompt, task_prompt) = prompts[:2]
conversation = self._build_conversation(task_prompt, raw_inputs)
return {'prompts': prompts, 'conversation': conversation, 'merged_prompt': self._convert_to_prompt(sys_prompt, conversation)}
@abstractmethod
def _parse_output(self, output: ChatResult, raw_inputs: Dict[(str, Any)], **kwargs) -> Any:
'Parse the output of LLM to the desired format.'
pass
def _run(self, inputs: Dict[(str, Any)], **kwargs) -> Any:
'Execute the prompt with LLM given inputs, then parse the output.'
if (not kwargs.get('disable_input_validation', False)):
self._validate_inputs(inputs)
prompt_inputs = self._preprocess_inputs(inputs)
if (self._stop_at == 'preprocess'):
return prompt_inputs
prompt = self._build_prompt(prompt_inputs, inputs)['merged_prompt']
if (self._stop_at == 'prompt'):
return prompt
output = self._llm.generate(prompt)
if (self._stop_at == 'llm'):
return output
return self._parse_output(output, inputs)
def _call_impl(self, inputs: Dict[(str, Any)], **kwargs) -> Any:
'Call the executor with inputs.'
try:
return self._run(inputs, **kwargs)
except Exception as e:
traceback.print_exc()
raise e
__call__: Callable[(..., Any)] = _call_impl
@classmethod
def register_args(cls, parser: ArgumentParser):
'Register arguments for the executor.'
parser.add_argument('--stop-at', '-sa', type=str, default=None, choices=['preprocess', 'prompt', 'llm'])
@classmethod
def from_args(cls, args: Namespace, llm: BaseLanguageModel):
'Create an executor from arguments.'
return cls(llm, stop_at=args.stop_at)
|
class BasePromptExecutorWithCritique(BasePromptExecutor):
'Base class for all prompt executors with critique.'
_critique_prompt_module: PromptModule = None
_prompt_module_names: List[str] = ['system', 'task', 'critique']
def __init__(self, llm: BaseLanguageModel, critique_llm: BaseLanguageModel=None, stop_at: str=None, critique_rounds=0):
super().__init__(llm, stop_at)
self._critique_llm = (critique_llm or llm)
self._critique_rounds = critique_rounds
def set_prompt_modules(self, prompt_module: PromptModule, prompt_type: str='task'):
'Set prompt modules.'
if (prompt_type == 'critique'):
self._critique_prompt_module = prompt_module
else:
super().set_prompt_modules(prompt_module, prompt_type)
@property
def prompt_modules(self):
return [self._sys_prompt_module, self._task_prompt_module, self._critique_prompt_module]
def _run(self, inputs: Dict[(str, Any)], **kwargs) -> List[Any]:
if (not kwargs.get('disable_input_validation', False)):
self._validate_inputs(inputs)
prompt_inputs = self._preprocess_inputs(inputs)
if (self._stop_at == 'preprocess'):
return prompt_inputs
prompt_dict = self._build_prompt(prompt_inputs, inputs)
(sys_prompt, task_prompt, critique_prompt) = prompt_dict['prompts']
conversation = prompt_dict['conversation']
prompt = prompt_dict['merged_prompt']
if (self._stop_at == 'prompt'):
return prompt
raw_output = self._llm.generate(prompt)
if (self._stop_at == 'llm'):
return raw_output
results = [self._parse_output(raw_output, inputs, critique_round=0)]
for i in range(self._critique_rounds):
try:
if (critique_prompt is None):
raise ValueError('critique prompt is not defined')
conversation += [parse_llm_response(raw_output), critique_prompt]
critique_prompt = self._convert_to_prompt(sys_prompt, conversation)
raw_output = self._critique_llm.generate(critique_prompt)
results.append(self._parse_output(raw_output, inputs, critique_round=(i + 1)))
except Exception as e:
print(f'Error encountered in critique round {(i + 1)}: {e}')
break
return results
@classmethod
def register_args(cls, parser: ArgumentParser):
'Register arguments for the executor.'
super().register_args(parser)
parser.add_argument('--critique-rounds', '-cr', type=int, default=0, help='The number of critique rounds')
@classmethod
def from_args(cls, args: Namespace, llm: BaseLanguageModel, critique_llm: BaseLanguageModel=None):
'Create an executor from arguments.'
return cls(llm, critique_llm, stop_at=args.stop_at, critique_rounds=args.critique_rounds)
|
class CaseGenerator(BasePromptExecutor):
'Generate cases using primary and auxiliary toolkits.'
_input_keys = ['prim_toolkits', 'aux_toolkits', 'example_cases', 'risks']
def __init__(self, llm: BaseLanguageModel, stop_at: str=None, redteam: bool=True, num_gen_per_prompt: int=1, num_sample_risks: int=1, use_simple_tool_desc: bool=False):
super().__init__(llm, stop_at)
self._redteam = redteam
self._num_gen_per_prompt = num_gen_per_prompt
self._num_sample_risks = num_sample_risks
self._use_simple_tool_desc = use_simple_tool_desc
self._set_prompts()
def _set_prompts(self):
if self._redteam:
sys_prompt = REDTEAM_CASE_GEN_SYSTEM_MESSAGE
task_prompt = REDTEAM_CASE_GEN_PROMPT
else:
sys_prompt = STANDARD_CASE_GEN_SYSTEM_MESSAGE
task_prompt = STANDARD_CASE_GEN_PROMPT
self.set_prompt_module(sys_prompt, 'system')
self.set_prompt_module(task_prompt, 'task')
def _get_toolkits_name_desc(self, toolkits: Dict[(str, Any)], namekey: str, add_risks: bool) -> Tuple[(List[str], str)]:
toolkits_names = [toolkit[namekey] for toolkit in toolkits]
toolkits_descs = [format_toolkit_dict(toolkit, namekey, add_risks=add_risks, indent=indent, use_simple_tool_desc=self._use_simple_tool_desc) for toolkit in toolkits]
return (toolkits_names, '\n'.join(toolkits_descs))
def _remove_redteam_only_fields(self, case):
redteam_only_fields = ['Potential Risky Outcomes', 'Potential Risky Actions']
for field in redteam_only_fields:
case.pop(field, None)
case['User Instruction'] = case['User Instruction']['Standard']
case['Underspecifications'] = case['Underspecifications']['Standard']
return case
def _preprocess_inputs(self, inputs: Dict[(str, Any)]):
namekey = 'name_for_model'
(prim_toolkits_names, prim_toolkits_desc) = self._get_toolkits_name_desc(inputs['prim_toolkits'], namekey, add_risks=self._redteam)
(aux_toolkits_names, aux_toolkits_desc) = self._get_toolkits_name_desc(inputs['aux_toolkits'], namekey, add_risks=False)
example_cases_texts = []
for case in inputs['example_cases']:
if (not self._redteam):
case = self._remove_redteam_only_fields(case)
example_head = f'''#### Example {(len(example_cases_texts) + 1)}
'''
example_cases_texts.append((((example_head + '```\n') + json.dumps(case, indent=4)) + '\n```'))
example_cases_str = '\n\n'.join(example_cases_texts)
risks = inputs.get('risks', None)
prim_toolkits_risks = ''
if self._redteam:
if risks:
prim_toolkits_risks = '\n'
for risk in risks:
prim_toolkits_risks += f'''{indent}- {risk}
'''
else:
prim_toolkits_risks = '\n'
for prim_toolkit in inputs['prim_toolkits']:
prim_toolkits_risks += f'''* {prim_toolkit[namekey]}:
'''
sampled_risks = random.sample(prim_toolkit['risks'], self._num_sample_risks)
for risk in sampled_risks:
prim_toolkits_risks += f'''{indent}- {risk}
'''
return dict(prim_toolkits_names=prim_toolkits_names, aux_toolkits_names=aux_toolkits_names, prim_toolkits_desc=prim_toolkits_desc, aux_toolkits_desc=aux_toolkits_desc, example_cases_str=example_cases_str, num_gen_per_prompt=self._num_gen_per_prompt, prim_toolkits_risks=prim_toolkits_risks)
def _parse_output(self, output: ChatResult, raw_inputs: Dict[(str, Any)], **kwargs) -> Any:
res = parse_llm_response(output)
pattern = re.compile('```(?:json\\n)?(.*?)```', re.DOTALL)
result = re.search(pattern, res)
if result:
case = result.group(1).strip()
else:
raise ValueError(f'Discard a response due to no proper backticks: {res}')
try:
case = json.loads(case)
except json.JSONDecodeError:
raise ValueError(f'Discard a response due to JSON decoding error: {res}')
case['Thoughts'] = res
return case
@classmethod
def register_args(cls, parser: ArgumentParser):
super().register_args(parser)
parser.add_argument('--prompt-only', '-po', action='store_true', help='Only return the prompt')
parser.add_argument('--num-gen-per-prompt', '-ngen', type=int, default=1, help='Number of generated cases per prompt')
parser.add_argument('--num-sample-risks', '-nrisk', type=int, default=1, help='Number of sampled risks per toolkit')
parser.add_argument('--use-simple-tool-desc', '-simple', action='store_true', help='Use simple tool description')
parser.add_argument('--standard', '-std', action='store_true', help='Use standard case generator prompt')
@classmethod
def from_args(cls, args: Namespace, llm: BaseLanguageModel):
assert (args.num_gen_per_prompt == 1), 'Only support 1 case per prompt for now'
return cls(llm, stop_at=args.stop_at, redteam=(not args.standard), num_gen_per_prompt=args.num_gen_per_prompt, num_sample_risks=args.num_sample_risks, use_simple_tool_desc=args.use_simple_tool_desc)
|
class CaseGeneratorWithInstruction(CaseGenerator):
_input_keys = (CaseGenerator._input_keys + ['input_instruction'])
def _set_prompts(self):
if self._redteam:
sys_prompt = REDTEAM_CASE_GEN_SYSTEM_MESSAGE
task_prompt = REDTEAM_CASE_GEN_PROMPT_WITH_INSTRUCTION
else:
sys_prompt = STANDARD_CASE_GEN_SYSTEM_MESSAGE
task_prompt = STANDARD_CASE_GEN_PROMPT_WITH_INSTRUCTION
self.set_prompt_module(sys_prompt, 'system')
self.set_prompt_module(task_prompt, 'task')
def _preprocess_inputs(self, inputs: Dict[(str, Any)]):
prompt_inputs = super()._preprocess_inputs(inputs)
prompt_inputs['input_instruction'] = inputs['input_instruction']
return prompt_inputs
|
class ToolNamesGenerator(BasePromptExecutor):
_input_keys = ['num_gen', 'category', 'description']
_sys_prompt_module = GEN_NAMES_SYSTEM_MESSAGE
_task_prompt_module = GEN_NAMES_PROMPT
def _preprocess_inputs(self, inputs: Dict[(str, Any)]) -> Dict[(str, Any)]:
return inputs
def _parse_output(self, output: ChatResult, raw_inputs: Dict[(str, Any)], **kwargs) -> Any:
res = parse_llm_response(output)
pattern = '\\d+\\.\\s(.*?):\\s(.*?)(?=\\n\\d+\\.|$)'
matches = re.findall(pattern, res, re.DOTALL)
tools = [{'name': name, 'desc': description.strip()} for (name, description) in matches]
return [{'category': raw_inputs['category'], **tool} for tool in tools]
|
class ToolThoughtGenerator(BasePromptExecutor):
_input_keys = ['existing_tools', 'toolkit', 'domain_blacklist']
_sys_prompt_module = TOOL_GEN_SYSTEM_MESSAGE
def __init__(self, llm: BaseLanguageModel, stop_at: str=None, gen_risky_tool: bool=True, brainstorm: bool=False):
super().__init__(llm, stop_at)
self.set_prompt_module(get_tool_gen_prompt(gen_risky_tool, brainstorm), 'task')
self.gen_risky_tool = gen_risky_tool
self.brainstorm = brainstorm
def _preprocess_inputs(self, inputs: Dict[(str, Any)]) -> Dict[(str, Any)]:
toolkit = inputs['toolkit']
return dict(json_types=JSON_TYPES_STR, existing_tools=inputs['existing_tools'], existing_domains=inputs['domain_blacklist'], toolkit_name=toolkit.get('name', None), toolkit_desc=toolkit.get('desc', None))
def _parse_content(self, text, prefix='', sep=':', delim='```'):
pattern = re.compile(f'{prefix}(.*?){sep}(.*?){delim}(.*?){delim}', re.DOTALL)
result = re.search(pattern, text)
if result:
return result.group(3).strip()
raise ValueError(f'Discard a response due to parsing error: {prefix}')
def _parse_output(self, output: ChatResult, raw_inputs: Dict[(str, Any)], **kwargs) -> Any:
res = parse_llm_response(output)
extract_dict = {}
extract_dict['name'] = self._parse_content(res, prefix='Toolkit Name', delim='"')
extract_dict['desc'] = self._parse_content(res, prefix='Toolkit Description', delim='"')
if self.gen_risky_tool:
extract_dict['risks'] = self._parse_content(res, prefix='Potential Risks', delim='```')
if ('category' in raw_inputs['toolkit']):
extract_dict['category'] = raw_inputs['toolkit']['category']
return dict(**extract_dict, thought=res)
@classmethod
def register_args(cls, parser: ArgumentParser):
super().register_args(parser)
parser.add_argument('--standard', '-std', action='store_true', help='Use standard tool generator prompt, not biased to risky tools')
parser.add_argument('--brainstrom', '-brain', action='store_true', help='Brainstorm the tool, not fixed to the given tool')
@classmethod
def from_args(cls, args: Namespace, llm: BaseLanguageModel):
return cls(llm, stop_at=args.stop_at, gen_risky_tool=(not args.standard), brainstorm=args.brainstrom)
|
class ToolSpecGenerator(BasePromptExecutor):
_input_keys = ['example_tools', 'toolkit']
_sys_prompt_module = GEN_TOOL_SPEC_SYSTEM_MESSAGE
_task_prompt_module = GEN_TOOL_SPEC_PROMPT
def __init__(self, llm: BaseLanguageModel, stop_at: str=None, use_full_prompt: bool=False, gen_risky_tool: bool=True, brainstorm: bool=False):
super().__init__(llm, stop_at)
if use_full_prompt:
self.set_prompt_module(get_tool_gen_prompt(gen_risky_tool, brainstorm, thoughts_only=False), 'task')
self.use_full_prompt = use_full_prompt
self.gen_risky_tool = gen_risky_tool
self.brainstorm = brainstorm
def _preprocess_inputs(self, inputs: Dict[(str, Any)]) -> Dict[(str, Any)]:
example_texts = []
for tool in inputs['example_tools']:
if (not self.gen_risky_tool):
tool.pop('risks')
example_head = f'''#### Example {(len(example_texts) + 1)}: {tool['toolkit']}
'''
example_texts.append((((example_head + '```\n') + json.dumps(tool)) + '\n```'))
prompt_inputs = dict(examples='\n\n'.join(example_texts), json_types=JSON_TYPES_STR, existing_tools=[], existing_domains=[], toolkit_name=None, toolkit_desc=None)
if (not self.use_full_prompt):
prompt_inputs['development_thought'] = inputs['toolkit']['thought']
return prompt_inputs
def _build_conversation(self, task_prompt: str, raw_inputs: Dict[(str, Any)]) -> List[str]:
if self.use_full_prompt:
return [task_prompt, raw_inputs['toolkit']['thought']]
return [task_prompt]
def _parse_output(self, output: ChatResult, raw_inputs: Dict[(str, Any)], **kwargs) -> Any:
res = parse_llm_response(output)
res = re.sub('[\\s]*## Example.*:.*\\n', '', res)
pattern = re.compile('```(?:json\\n)?(.*?)```', re.DOTALL)
result = re.search(pattern, res)
if result:
tool_spec = result.group(1).strip()
else:
raise ValueError(f'Discard a response due to no proper backticks: {res}')
try:
tool_spec = json.loads(tool_spec)
except json.JSONDecodeError:
raise ValueError(f'Discard a response due to JSON decoding error: {res}')
if ('category' in raw_inputs['toolkit']):
tool_spec['category'] = raw_inputs['toolkit']['category']
return tool_spec
@classmethod
def register_args(cls, parser: ArgumentParser):
super().register_args(parser)
parser.add_argument('--use-full-prompt', '-full', action='store_true', help='Use full prompt to generate spec, not with a short format prompt')
parser.add_argument('--standard', '-std', action='store_true', help='Use standard tool generator prompt, not biased to risky tools')
parser.add_argument('--brainstrom', '-brain', action='store_true', help='Brainstorm the tool, not fixed to the given tool')
@classmethod
def from_args(cls, args: Namespace, llm: BaseLanguageModel):
return cls(llm, stop_at=args.stop_at, use_full_prompt=args.use_full_prompt, gen_risky_tool=(not args.standard), brainstorm=args.brainstrom)
|
def replace_agent_ref_with_pronoun(prompt):
prompt = replace_prompt(prompt, (lambda s: s.replace('The {agent}', 'You')))
prompt = replace_prompt(prompt, (lambda s: s.replace('the {agent}', 'you')))
return prompt
|
def remove_risky_req(prompt):
prompt = removed_submodules(prompt, ['risky_outcome', 'risky_actions', 'real_req_risky_outcome', 'potential_risk_requirement', 'benign_requirement', 'diversity_risky_outcome', 'feasible_underspec_task_info', 'toolkits_risks', 'brainstorm_case_scenarios_risks', 'brainstorm_task_risks', 'identify_underspec', 'introduce_risks', 'infer_risks', 'specify_instruction_redteam', 'reason_about_risky_actions', 'primary_toolkits_risks', 'exp_achv_underspec'])
prompt = replaced_submodule(prompt, 'concrete_underspec_task_info', Single('In particular, for the information that is necessary for the {agent} to accomplish the task, it should be clearly provided in the {instruction}.'))
modules_to_replace = ['specify_instruction', 'list_exp_achv', 'reason_about_ideal_actions']
for module_name in modules_to_replace:
module = find_submodule(prompt, module_name)
_ = replace_prompt(module.content, (lambda x: x.replace('{redteam_instruction}', '{instruction}')), inplace=True)
_ = replace_prompt(module.content, (lambda x: x.replace('{std_instruction}', '{instruction}')), inplace=True)
return prompt
|
def get_tool_gen_prompt(gen_risky_tool: bool, brainstorm: bool, thoughts_only: bool=True) -> PromptModule:
name = [('risky' if gen_risky_tool else 'std')]
name.append(('brainstorm' if brainstorm else 'fixed'))
if thoughts_only:
name.append('thoughts')
return TOOL_GEN_PROMPTS['_'.join(name)]
|
def apply_gen_fixed_toolkit_prompt(prompt):
prompt = removed_submodules(prompt, ['tool_gen_blacklist', 'brainstorm_toolkit_step'])
return prompt
|
def apply_gen_brainstormed_toolkit_prompt(prompt):
prompt = removed_submodules(prompt, ['specify_toolkit_step'])
return prompt
|
def remove_spec_format_instruction(prompt):
prompt = replaced_submodule(removed_submodules(prompt, ['format_example', 'output_json']), 'format_instruction_head', Single('Format the output toolkit specifications following the requirements below.'))
return prompt
|
def remove_risky_req(prompt):
brainstorm_toolkit_step = find_submodule(prompt, 'brainstorm_toolkit_step')
prompt = removed_submodules(prompt, ['risky_requirement', 'toolkit_risks', 'potential_risks', 'brainstorm_tool_risks', 'assess_risks', 'no_unauthorized_access_risk'])
replace_func = (lambda x: x.replace(', {risky_requirement}', ''))
brainstorm_toolkit_step = replace_prompt(brainstorm_toolkit_step, replace_func)
prompt = replaced_submodule(prompt, 'brainstorm_toolkit_step', brainstorm_toolkit_step)
return prompt
|
def get_toolkits_by_names(names: List[str]) -> List[FunctionToolkit]:
toolkits = []
for name in names:
toolkit = toolkits_factory(name)
if toolkit:
toolkits.append(toolkit())
else:
print(f'Warning: toolkit {name} not found')
return toolkits
|
def get_tool_class_by_name(toolkits: List[FunctionToolkit], name: str) -> BaseTool:
for toolkit in toolkits:
try:
return toolkit[name]
except ValueError:
pass
raise ValueError(f'Tool {name} does not exist in these toolkits')
|
class MyBashProcess(BashProcess):
def _run(self, command: str) -> Tuple[(str, int)]:
'\n Runs a command in a subprocess and returns\n the output.\n\n Args:\n command: The command to run\n '
try:
output = subprocess.run(command, shell=True, check=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).stdout.decode().strip()
except subprocess.CalledProcessError as error:
if self.return_err_output:
return (error.stdout.decode().strip(), error.returncode)
return (str(error).strip(), error.returncode)
if self.strip_newlines:
output = output.strip()
return (output, 0)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.