text
stringlengths
1
93.6k
if iters[i] in relationship_to_enti:
possible_entities_set += relationship_to_enti[iters[i]]
if not possible_entities_set:
continue
enti_replace_dict = {}
for j, seg in enumerate(expression_segment):
processed_seg = seg.strip(')')
if '.' in seg and not seg.startswith('m.') and not seg.startswith('g.') and (
expression_segment[j - 1].endswith("AND") or expression_segment[j - 1].endswith("COUNT") or
expression_segment[j - 1].endswith("MAX") or expression_segment[j - 1].endswith("MIN")) and (
not any(ele.isupper() for ele in seg)):
tokenized_enti = [re.split('\.|_', doc) for doc in possible_entities_set]
tokenized_query = re.split('\.|_', processed_seg)
bm25 = BM25Okapi(tokenized_enti)
top3_ques = bm25.get_top_n(tokenized_query, possible_entities_set, n=3)
enti_replace_dict[j] = list(set(top3_ques))
combinations_enti = list(enti_replace_dict.values())
all_iters_enti = list(itertools.product(*combinations_enti))
enti_index = list(enti_replace_dict.keys())
for iter_ent in all_iters_enti:
for k in range(len(iter_ent)):
suffix = ""
for h in range(len(expression_segment[enti_index[k]].split(')')) - 1):
suffix = suffix + ')'
expression_segment_copy[enti_index[k]] = iter_ent[k] + suffix
final = " ".join(expression_segment_copy)
added = add_reverse(final)
for exp in added:
try:
answer = generate_answer([exp])
except:
answer = None
if answer is not None:
return answer, updating_two_hop_rela_dict, exp
return None, updating_two_hop_rela_dict, None
def generate_answer(list_exp):
for exp in list_exp:
try:
sparql = lisp_to_sparql(exp)
except:
continue
try:
re = execute_query(sparql)
except:
continue
if re:
if re[0].isnumeric():
if re[0] == '0':
continue
else:
return re
else:
return re
return None
def number_of_join(exp):
count = 0
seg_list = exp.split()
for seg in seg_list:
if "JOIN" in seg:
count += 1
return count
def process_file_codex_output(filename_before, filename_after):
codex_eps_dict_before = json.load(open(filename_before, 'r'), strict=False)
codex_eps_dict_after = json.load(open(filename_after, 'r'), strict=False)
for key in codex_eps_dict_after:
codex_eps_dict_before[key] = codex_eps_dict_after[key]
return codex_eps_dict_before
def all_combiner_evaluation(data_batch, selected_quest_compare, selected_quest_compose, selected_quest,
prompt_type, hsearcher, rela_corpus, relationships, temp, que_to_s_dict_train,
question_to_mid_dict, api_key, LLM_engine, name_to_id_dict, bm25_all_fns, all_fns,
relationship_to_enti, retrieval=False, corpus=None, nlp_model=None, bm25_train_full=None,
retrieve_number=100):
correct = [0] * 6
total = [0] * 6
no_ans = [0] * 6
for data in data_batch:
logger.info("==========")
logger.info("data[id]: {}".format(data["id"]))
logger.info("data[question]: {}".format(data["question"]))
logger.info("data[exp]: {}".format(data["s_expression"]))
label = []
for ans in data["answer"]:
label.append(ans["answer_argument"])
if not retrieval:
gene_type = type_generator(data["question"], prompt_type, api_key, LLM_engine)
logger.info("gene_type: {}".format(gene_type))
else:
gene_type = None
if gene_type == "Comparison":
gene_exps = ep_generator(data["question"],
list(set(selected_quest_compare) | set(selected_quest)),
temp, que_to_s_dict_train, question_to_mid_dict, api_key, LLM_engine,