File size: 16,206 Bytes
5e56f2f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 |
import os
import sys
import argparse
import logging
import time
import json
#os.environ["CUDA_VISIBLE_DEVICES"] = "1"
from peft import PeftModel,LoraConfig, get_peft_model, prepare_model_for_kbit_training
from transformers import AutoTokenizer, AutoModelForCausalLM, Trainer, TrainingArguments, BitsAndBytesConfig
from datasets import load_dataset,Dataset
import torch
from utils.warp import Warp,WarpLJP
from utils.dataset import DataCollatorForReward
from utils.trainer import PRGTrainer
from tree.base import Tree,Node,I_policy
from configs.hyperparametric import Reward_config,Tree_config
from model.logitsprocessor import OutputControlLogitsProcessor,RewardControlLogitsProcessor
from tree.asts import AST
from utils.model_generate import generate_string,generate_score
tree_config = Tree_config().to_dict()
#reward_config = Reward_config().to_dict()
import torch
#torch.cuda.set_device(0)
#TASKS = ['ecthr_a','ecthr_b']
TASKS = ['ljp',]
def get_args():
parser = argparse.ArgumentParser()
## ___datasets___
#parser.add_argument('--data_path',default='lex_glue',type=str, help='Path containing dataset')
parser.add_argument('--train_path',default='',type=str, help='Path containing dataset')
parser.add_argument('--eval_path',default='',type=str, help='Path containing dataset')
parser.add_argument('--test_path',default='',type=str, help='Path containing dataset')
parser.add_argument('--dataset',default='ljp',type=str, help='Dataset of choice in data_path')
parser.add_argument('--save_data_path',default='',type=str, help='The path used to save dataset')
parser.add_argument('--output_path',default='',type=str, help='The path used to save outputs')
parser.add_argument('--sample_path',default='',type=str, help='The path used to samples')
parser.add_argument('--control_file',default='./codekey_proofread.txt',type=str, help='The path used to output control')
## ___model___
parser.add_argument('--generate_model_path',default='',type=str, help='Path containing model')
parser.add_argument('--reward_model_path',default='',type=str, help='Path containing model')
parser.add_argument('--reward_save_path',default='./output/reward',type=str, help='Path containing model')
parser.add_argument('--reward_lora_path',default='',type=str,)
parser.add_argument('--per_device_train_batch_size',default=2,type=int)
parser.add_argument('--gradient_accumulation_steps',default=2,type=int)
parser.add_argument('--learning_rate',default=1e-3,type=float)
parser.add_argument('--num_train_epochs',default=10,type=int)
parser.add_argument('--logging_steps',default=200,type=int)
parser.add_argument('--save_strategy',default='epoch',type=str,)
parser.add_argument('--fp16',action='store_true',default=True,)
parser.add_argument('--optim',default='paged_adamw_8bit',type=str,)
parser.add_argument('--lora_rank',default=64,type=int)
parser.add_argument('--lora_alpha',default=16,type=int)
parser.add_argument('--lora_dropout',default=0.1,type=float)
## ___pipline___
parser.add_argument('--do_train',action='store_true',default=False, help='Training or not')
parser.add_argument('--do_test',action='store_true',default=True, help='Eval or not')
## ___parameter___
parser.add_argument('--budget',default=20,type=int, help='iterations of search')
parser.add_argument('--reward_funcation',default='leaf',type=str,choices=['random','reward','leaf'], help='iterations of search')
parser.add_argument('--iteration',default=3,type=int, help='iterations of sample')
## ___special___
parser.add_argument('--ljp_mode',default='p',type=str,choices=['p','pd','pdf'])
parser.add_argument('--logits_control',action='store_true',default=False, help='Training or not')
parser.add_argument('--add_reward',action='store_true',default=False,)
parser.add_argument('--inference_mode',default='zeroshot',type=str,choices=['zeroshot','fewshot','cot'])
return parser.parse_args()
def get_logger(path='./'):
log_path = os.path.join(path,"log_%s.txt"%(time.strftime("%Y-%m-%d-%H-%M-%S",time.localtime())))
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout),
logging.FileHandler(log_path)],
)
logger = logging.getLogger(__name__)
logger.setLevel(level=logging.DEBUG)
return logger
def load_data(args):
if os.path.isdir(args.train_path):
save_path = os.path.join(args.save_data_path,args.dataset)
if not os.path.exists(save_path):
dataset = load_dataset(path=args.train_path,name=args.dataset)
dataset.save_to_disk(save_path)
else:
dataset = load_dataset(save_path)
if os.path.isfile(args.train_path):
data_files = {mode:path for mode,path
in zip(['train','validation','test'],[args.train_path,args.eval_path,args.test_path])
if path}
dataset = load_dataset('json',data_files=data_files)
return dataset
def load_samples(sample_path,):
path_list = os.listdir(sample_path)
samples = []
for path in path_list:
path = os.path.join(sample_path,path)
with open(path,'r') as f:
for l in f.readlines():
sample = json.loads(l)
samples.append(sample)
return samples
def train(args,warp,dataset,):
# init TrainingArgument
training_args = TrainingArguments(
output_dir=os.path.join(args.reward_save_path,'reward_%s'%(time.strftime("%Y-%m-%d-%H-%M-%S",time.localtime()))),
per_device_train_batch_size=args.per_device_train_batch_size,
gradient_accumulation_steps=args.gradient_accumulation_steps,
learning_rate=args.learning_rate,
num_train_epochs=args.num_train_epochs,
logging_steps=args.logging_steps,
save_strategy=args.save_strategy,
fp16=args.fp16,
optim=args.optim,
remove_unused_columns=False
)
bnb_config = BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_compute_dtype=torch.float16,
bnb_4bit_use_double_quant=True,
bnb_4bit_quant_type="nf4",
llm_int8_threshold=6.0,
llm_int8_has_fp16_weight=False,
)
peft_config = LoraConfig(
r=args.lora_rank,
lora_alpha=args.lora_alpha,
lora_dropout=args.lora_dropout,
target_modules=["q_proj", "k_proj", "v_proj", "o_proj"],
bias="none",
task_type="CAUSAL_LM"
)
# init model
if not warp.reward_model:
warp.load_reward_model(bnb_config=bnb_config)
model = warp.reward_model
tokenizer = warp.reward_tokenizer
model = prepare_model_for_kbit_training(model)
model = get_peft_model(model, peft_config)
# init collator
collator = DataCollatorForReward(tokenizer=tokenizer)
logits_processor = RewardControlLogitsProcessor(tokenizer=tokenizer)
# init dataset
trainset = dataset['0']
# init trainer
trainer = PRGTrainer(
tokenizer=tokenizer,
model=model,
args=training_args,
train_dataset=trainset,
data_collator=collator,
logits_processor=logits_processor
)
# clean memory
warp.generate_model = None
torch.cuda.empty_cache()
# training
logger.info('start training..')
for i,trainset in dataset.items():
if i != '0':
trainer.train_dataset = trainset
trainer.train()
logger.info('training end..')
warp.reward_model = model
warp.reward_tokenizer = tokenizer
def evaluate(args,warp,dataset,):
if warp.generate_model == None:
warp.load_generate_model()
if args.add_reward:
if args.reward_lora_path != '':
bnb_config = BitsAndBytesConfig(load_in_4bit=True,
bnb_4bit_compute_dtype=torch.float16,
bnb_4bit_use_double_quant=True,
bnb_4bit_quant_type="nf4",
llm_int8_threshold=6.0,
llm_int8_has_fp16_weight=False,
)
warp.load_reward_model(bnb_config=bnb_config)
warp.reward_model = PeftModel.from_pretrained(warp.reward_model, args.reward_lora_path)
else:
warp.load_reward_model()
rewarder = {'model':warp.reward_model,'tokenizer':warp.reward_tokenizer,}
if args.logits_control:
rewarder['reward_processor'] = RewardControlLogitsProcessor(tokenizer=rewarder['tokenizer'])
else:
rewarder = {}
model = warp.generate_model
tokenizer = warp.generate_tokenizer
def get_response(x,a,tokenizer,model,rewarder={}):
if rewarder == {}:
inputs = warp.prompt_to_crime(x,a,bos=tokenizer.bos_token,eos=tokenizer.eos_token)
if args.logits_control:
outputs = generate_string(inputs,tokenizer=tokenizer,model=model,logits_processor=warp.logits_processor)
else:
outputs = generate_string(inputs,tokenizer=tokenizer,model=model,)
response = warp.step_from_response(outputs)
return response
if 'reward_processor' not in rewarder.keys():
rewarder['reward_processor'] = None
for i in range(tree_config['branch']):
inputs = warp.prompt_to_crime(x,a,bos=tokenizer.bos_token,eos=tokenizer.eos_token)
if args.logits_control:
outputs = generate_string(inputs,tokenizer=tokenizer,model=model,logits_processor=warp.logits_processor)
else:
outputs = generate_string(inputs,tokenizer=tokenizer,model=model,)
response = warp.step_from_response(outputs)
thought = warp.prompt_to_value(x,a+response,bos=rewarder['tokenizer'].bos_token,eos=rewarder['tokenizer'].eos_token)
reward = generate_score(thought,
tokenizer=rewarder['tokenizer'],
model=rewarder['model'],
reward_processor=rewarder['reward_processor']
)
r = warp.value_from_response(reward)
if '拒绝' in r:
continue
else:
break
return response
logger.info('start eval..')
time_start = time.time()
reward_control = 'rewardcontrol' if args.add_reward else 'un-reward'
task_name = args.test_path.split('/')[-2]
save_path = os.path.join(args.output_path,'eval',"%s_%s_%s_%s.json"%(task_name,args.inference_mode,reward_control,time.strftime("%Y-%m-%d-%H-%M-%S",time.localtime())))
preds = []
for i,data in enumerate(dataset):
x,a,y = warp.processing_single(data)
if args.inference_mode == 'fewshot':
x = '这是一个例子:根据案情描述和已有步骤仅给出一个推理。如果是结论则直接输出<e></e>,例如<e>盗窃罪</e>。如果是步骤则直接输出<p></p>,例如<p>步骤1:…</p>\n案情描述:2013年下半年至2015年10月26日,被告人张和菊利用担任山东泰开电力建设工程有限公司、山东泰开国际工程技术有限公司现金出纳的职务便利,多次将公司的资金共计4472572.91元挪出,用于其在深圳石油化工交易所、天津渤海商品交易所的投资交易,已全部亏损。2015年10月26日,张和菊从公司提取现金26万元后,携款潜逃至济南市长清区租房处藏匿。2015年11月6日,张和菊被公安机关抓获。\n已有推理步骤:\n<e>挪用资金罪</e>\n这是问题:\n根据案情描述和已有步骤仅给出一个推理。如果是结论则直接输出<e></e>,例如<e>盗窃罪</e>。如果是步骤则直接输出<p></p>,例如<p>步骤1:…</p>\n案情描述:'+x
elif args.inference_mode == 'cot':
x = '一步步思考并回答,' + x
y = y['crime']
a = ''
for d in range(tree_config['max_depth']):
try:
response = get_response(x,a,tokenizer,model,rewarder=rewarder)
except Exception as E:
response = ''
if '<e>' in response:
break
if '<p>' in response:
a += response
y_ = response
preds.append({'x':x,'y':y,'pred':y_})
if i % args.logging_steps == 0:
logger.info('{x}'.format(x=str({'x':x,'y':y,'pred':y_})))
logger.info('eval: save...')
with open(save_path,'w') as file:
for l in preds:
line = json.dumps(l,ensure_ascii=False)
file.write(line)
file.write('\n')
time_end = time.time()
logger.info('reward_eval : {x} '.format(x=args.reward_model_path))
logger.info('save_eval : {x} '.format(x=save_path))
logger.info('running time: {x}'.format(x=time_end-time_start))
logger.info('eval: fin')
def sample(args,warp):
# load datset
logger.info('load datset..')
dataset = load_data(args)
logger.info('start sampling..')
samples = {}
for iter in range(args.iteration):
logger.info('iter_{x}'.format(x=iter))
train_samples = []
sample_path = 'branch{b}_deep{d}_budget{g}_iter{i}'.format(b=tree_config['branch'],d=tree_config['max_depth'],g=args.budget,i=iter)
if args.sample_path != '' and sample_path in os.listdir(args.sample_path):
sample_path = os.path.join(args.sample_path,sample_path)
train_samples += load_samples(sample_path)
else:
save_path = os.path.join(args.output_path,'data',
'branch{b}_deep{d}_budget{g}_iter{i}'.format(b=tree_config['branch'],d=tree_config['max_depth'],g=args.budget,i=iter),
)
os.makedirs(save_path) if not os.path.exists(save_path) else None
for i,sample in enumerate(dataset['train']):
time_start = time.time()
tree_of_sample = Tree(sample=sample,warp=warp)
tree_of_sample.monte_carlo_tree_search(budget=args.budget,reward_funcation=args.reward_funcation)
#train_samples += tree_of_sample.sample(attribute='positive')
save_path = os.path.join(args.output_path,'data',
'branch{b}_deep{d}_budget{g}_iter{i}'.format(b=tree_config['branch'],d=tree_config['max_depth'],g=args.budget,i=iter),
'samples' + time.strftime("-%Y-%m-%d-%H:%M:%S", time.localtime()) + '.json')
train_samples += tree_of_sample.save(path=save_path)
time_end = time.time()
logger.info('\nrunning time: {x}'.format(x=time_end-time_start))
_example = tree_of_sample.root.x[:50] if len(tree_of_sample.root.x) > 50 else tree_of_sample.root.x
logger.info('{i}-th sample: {x}'.format(i=i,x=_example))
train_samples = Dataset.from_list(train_samples)
samples[str(iter)] = train_samples
return samples
def run(args):
# create framework
if args.dataset in ['ljp']:
warp = WarpLJP(args=args)
warp.load_generate_model()
# load training data
#
# data collection
if args.do_train:
trainsets = sample(args=args,warp=warp)
#raise ValueError
train(args,warp,trainsets)
# test
if args.do_test:
datasets = load_data(args)
evaluate(args,warp,datasets['test'])
if __name__ == "__main__":
args = get_args()
logger = get_logger(args.output_path)
loginfo = '\n'.join(['{k}: {v}'.format(k=k,v=v) for k,v in vars(args).items()])
logger.info(loginfo)
try:
run(args)
except Exception as E:
logger.exception('{x}'.format(x=E)) |