text
stringlengths
1
93.6k
SCG.set_lam_kld(lam_kld_q2p=0.95, lam_kld_p2q=0.05, \
lam_kld_amu=0.0, lam_kld_alv=0.1)
# perform a minibatch update and record the cost for this batch
Xb = sample_batch(Xtr, bs=batch_size)
Mb = sample_data_masks(Xb, drop_prob=drop_prob, occ_dim=occ_dim)
result = SCG.train_joint(Xb, Mb)
costs = [(costs[j] + result[j]) for j in range(len(result))]
# output diagnostic information and checkpoint parameters, etc.
if ((i % 250) == 0):
costs = [(v / 250.0) for v in costs]
str1 = "-- batch {0:d} --".format(i)
str2 = " total_cost: {0:.4f}".format(costs[0])
str3 = " nll_term : {0:.4f}".format(costs[1])
str4 = " kld_q2p : {0:.4f}".format(costs[2])
str5 = " kld_p2q : {0:.4f}".format(costs[3])
str6 = " kld_amu : {0:.4f}".format(costs[4])
str7 = " kld_alv : {0:.4f}".format(costs[5])
str8 = " reg_term : {0:.4f}".format(costs[6])
joint_str = "\n".join([str1, str2, str3, str4, str5, str6, str7, str8])
print(joint_str)
out_file.write(joint_str+"\n")
out_file.flush()
costs = [0.0 for v in costs]
if ((i % 500) == 0):
SCG.save_model_params("{}_params.pkl".format(result_tag))
#############################################
# check model performance on validation set #
#############################################
Xb = sample_batch(Xva, bs=500)
Mb = sample_data_masks(Xb, drop_prob=drop_prob, occ_dim=occ_dim)
result = SCG.compute_nll_bound(Xb, Mb)
str2 = " va_total_cost: {0:.4f}".format(float(result[0]))
str3 = " va_nll_term : {0:.4f}".format(float(result[1]))
str4 = " va_kld_q2p : {0:.4f}".format(float(result[2]))
str5 = " va_kld_p2q : {0:.4f}".format(float(result[3]))
str6 = " va_kld_amu : {0:.4f}".format(float(result[4]))
str7 = " va_kld_alv : {0:.4f}".format(float(result[5]))
str8 = " va_reg_term : {0:.4f}".format(float(result[6]))
joint_str = "\n".join([str2, str3, str4, str5, str6, str7, str8])
print(joint_str)
out_file.write(joint_str+"\n")
out_file.flush()
###########################################
# sample and draw attention trajectories. #
###########################################
Xb = sample_batch(Xva, bs=32)
Mb = sample_data_masks(Xb, drop_prob=drop_prob, occ_dim=occ_dim)
result = SCG.sample_attention(Xb, Mb)
post_tag = "b{0:d}".format(i)
visualize_attention(result, pre_tag=result_tag, post_tag=post_tag)
if __name__=="__main__":
#test_seq_cond_gen_copy(step_type='add', res_tag="CPY")
test_seq_cond_gen_impute(step_type='add', res_tag="IMP")
# <FILESEP>
import openai
import json
import spacy
from sparql_exe import execute_query, get_types, get_2hop_relations, lisp_to_sparql
from utils import process_file, process_file_node, process_file_rela, process_file_test
from rank_bm25 import BM25Okapi
from time import sleep
import re
import logging
from collections import Counter
import argparse
from pyserini.search import FaissSearcher, LuceneSearcher
from pyserini.search.hybrid import HybridSearcher
from pyserini.search.faiss import AutoQueryEncoder
import random
import itertools
logging.getLogger().setLevel(logging.INFO)
logging.basicConfig(format='%(asctime)s %(levelname)-8s %(message)s',
level=logging.INFO,
datefmt='%Y-%m-%d %H:%M:%S')
logger = logging.getLogger("time recoder")
def select_shot_prompt_train(train_data_in, shot_number):
random.shuffle(train_data_in)
compare_list = ["le", "ge", "gt", "lt", "ARGMIN", "ARGMAX"]
if shot_number == 1:
selected_quest_compose = [train_data_in[0]["question"]]
selected_quest_compare = [train_data_in[0]["question"]]
selected_quest = [train_data_in[0]["question"]]
else:
selected_quest_compose = []
selected_quest_compare = []
each_type_num = shot_number // 2
for data in train_data_in:
if any([x in data['s_expression'] for x in compare_list]):
selected_quest_compare.append(data["question"])
if len(selected_quest_compare) == each_type_num:
break
for data in train_data_in:
if not any([x in data['s_expression'] for x in compare_list]):
selected_quest_compose.append(data["question"])
if len(selected_quest_compose) == each_type_num: