PCJD / code /utils /model_generate.py
knockknock404's picture
Upload 19 files
5e56f2f verified
import re
import logging
import torch
import torch.nn.functional as F
from configs.hyperparametric import Generate_config,Reward_config
from utils.warp import Warp
from transformers.generation import GenerateDecoderOnlyOutput
logger = logging.getLogger(__name__)
assistant_from_template_in_response = Warp.assistant_from_template_in_response
config = Generate_config().to_dict()
reward_config = Reward_config().to_dict()
def extra_span_from_tokens(tokens,sign='e'):
start,end = -1,-1
for i in range(len(tokens)-3):
token = ''.join(tokens[i:i+3])
if '<{x}>'.format(x=sign) in token:
start = i
break
for i in range(len(tokens)-3):
token = ''.join(tokens[i:i+3])
if '</{x}>'.format(x=sign) in token:
end = i+3
break
if start < end and 0 <= start < len(tokens) and 0 < end < len(tokens):
return start, end
else:
return -1, -1
def generate_response(inputs,model,tokenizer,logits_processor=None):
## max_length=2048, truncation=True, max_new_tokens=1024, temperature=0.7, do_sample=False
data = tokenizer.encode_plus(inputs,
max_length=config['max_length'],
truncation=config['truncation'],
return_tensors='pt')
input_ids = data['input_ids'].to('cuda')
attention_mask = data['attention_mask'].to('cuda')
#bos_token,eos_token = tokenizer.bos_token, tokenizer.eos_token
if logits_processor:
output = model.generate(input_ids, attention_mask=attention_mask,
do_sample=config['do_sample'],
max_new_tokens=config['max_new_tokens'],
temperature=config['temperature'],
output_hidden_states=config['output_hidden_states'],
return_dict_in_generate=config['return_dict_in_generate'],
output_logits=config['output_logits'],
logits_processor=[logits_processor],
bos_token_id=tokenizer.bos_token_id,
eos_token_id=tokenizer.eos_token_id,
pad_token_id=tokenizer.eos_token_id,
)
else:
output = model.generate(input_ids, attention_mask=attention_mask,
do_sample=config['do_sample'],
max_new_tokens=config['max_new_tokens'],
temperature=config['temperature'],
output_hidden_states=config['output_hidden_states'],
return_dict_in_generate=config['return_dict_in_generate'],
output_logits=config['output_logits'],
bos_token_id=tokenizer.bos_token_id,
eos_token_id=tokenizer.eos_token_id,
pad_token_id=tokenizer.eos_token_id,
)
if isinstance(output,GenerateDecoderOnlyOutput):
#ori_string = tokenizer.decode(output.sequences[0], skip_special_tokens=False)
logits = output.logits
#prob = [F.softmax(lgt,dim=-1) for lgt in output.logits]
generated_ids = torch.stack([torch.argmax(logit, dim=-1) for logit in logits], dim=1)
generated_tokens = [tokenizer.decode(t,skip_special_tokens=True) for t in generated_ids[0]]
# extract probs of the target tokens
start,end = extra_span_from_tokens(generated_tokens,'p')
if start > 0:
logits_thought = [logits[_] for _ in range(start,end)]
else:
start,end = extra_span_from_tokens(generated_tokens,'e')
if start > 0:
logits_thought = [logits[_] for _ in range(start,end)]
else:
logits_thought = [logit for logit in logits]
prob_thought = [F.softmax(logit,dim=-1) for logit in logits_thought]
prob_thought = [torch.amax(logit,dim=-1) for logit in prob_thought]
prob_thought = torch.stack(prob_thought).mean(0)
else:
logger.info('GenerateDecoderOnlyOutput error')
raise Exception
return {'response':''.join(generated_tokens),'prob':prob_thought}
def generate_string(inputs,model,tokenizer,logits_processor=None):
## max_length=2048, truncation=True, max_new_tokens=1024, temperature=0.7, do_sample=False
data = tokenizer.encode_plus(inputs,
max_length=config['max_length'],
truncation=config['truncation'],
return_tensors='pt')
input_ids = data['input_ids'].to('cuda')
attention_mask = data['attention_mask'].to('cuda')
bos_token,eos_token = tokenizer.bos_token, tokenizer.eos_token
if logits_processor:
output = model.generate(input_ids, attention_mask=attention_mask,
do_sample=config['do_sample'],
max_new_tokens=config['max_new_tokens'],
temperature=config['temperature'],
logits_processor=[logits_processor],
bos_token_id=tokenizer.bos_token_id,
eos_token_id=tokenizer.eos_token_id,
pad_token_id=tokenizer.eos_token_id)
else:
output = model.generate(input_ids, attention_mask=attention_mask,
do_sample=config['do_sample'],
max_new_tokens=config['max_new_tokens'],
temperature=config['temperature'],
bos_token_id=tokenizer.bos_token_id,
eos_token_id=tokenizer.eos_token_id,
pad_token_id=tokenizer.eos_token_id)
ori_string = tokenizer.decode(output[0], skip_special_tokens=False)
response = assistant_from_template_in_response(x=ori_string,bos=bos_token,eos=eos_token)
return response
def generate_score(inputs,model,tokenizer,reward_processor=None):
## max_length=2048, truncation=True, max_new_tokens=1024, temperature=0.7, do_sample=False
data = tokenizer.encode_plus(inputs,
max_length=config['max_length'],
truncation=config['truncation'],
return_tensors='pt')
input_ids = data['input_ids'].to('cuda')
attention_mask = data['attention_mask'].to('cuda')
bos_token,eos_token = tokenizer.bos_token, tokenizer.eos_token
if reward_processor:
output = model.generate(input_ids, attention_mask=attention_mask,
do_sample=reward_config['do_sample'],
max_new_tokens=reward_config['max_new_tokens'],
temperature=reward_config['temperature'],
logits_processor=[reward_processor],
bos_token_id=tokenizer.bos_token_id,
eos_token_id=tokenizer.eos_token_id,
pad_token_id=tokenizer.eos_token_id)
else:
output = model.generate(input_ids, attention_mask=attention_mask,
do_sample=reward_config['do_sample'],
max_new_tokens=reward_config['max_new_tokens'],
temperature=reward_config['temperature'],
bos_token_id=tokenizer.bos_token_id,
eos_token_id=tokenizer.eos_token_id,
pad_token_id=tokenizer.eos_token_id)
ori_string = tokenizer.decode(output[0], skip_special_tokens=False)
#response = assistant_from_template_in_response(x=ori_string,bos=bos_token,eos=eos_token)
response = ori_string
return response