| import torch |
| import torch.nn.functional as F |
| from transformers import Qwen2VLForConditionalGeneration, AutoProcessor |
| from peft import PeftModel |
| from PIL import Image |
| import requests |
| import argparse |
|
|
| @torch.no_grad() |
| def prt_greedy_generate( |
| ref_model, |
| reward_model, |
| input_ids, |
| attention_mask=None, |
| pixel_values=None, |
| image_grid_thw=None, |
| video_grid_thw=None, |
| max_new_tokens=64, |
| eos_token_id=None, |
| ): |
| """ |
| PRT (Portable Reward Tuning) を用いた Greedy 生成関数 |
| |
| 論文の Algorithm 2 (Inference/Generation) を実装しています。 |
| 毎回のトークン生成ステップにおいて、RefモデルとRewardモデルの両方を推論し、 |
| その出力を合成して次のトークンを決定します。 |
| |
| 数式: v_theta = log_softmax(ref_logits) + reward_logits |
| """ |
| |
| ref_model.eval() |
| reward_model.eval() |
|
|
| |
| |
| past_ref = None |
| past_reward = None |
|
|
| |
| if input_ids.dim() == 1: |
| input_ids = input_ids.unsqueeze(0) |
|
|
| current_input_ids = input_ids |
| generated_ids = input_ids.clone() |
| |
| first_step = True |
|
|
| |
| for i in range(max_new_tokens): |
| |
| |
| reward_kwargs = { |
| "use_cache": True, |
| "past_key_values": past_reward |
| } |
| |
| if first_step: |
| reward_kwargs.update({ |
| "attention_mask": attention_mask, |
| "pixel_values": pixel_values, |
| "image_grid_thw": image_grid_thw, |
| "video_grid_thw": video_grid_thw, |
| }) |
| |
| out_r = reward_model( |
| input_ids=current_input_ids, |
| **reward_kwargs |
| ) |
| past_reward = out_r.past_key_values |
| |
| reward_logits = out_r.logits[:, -1, :] |
|
|
| |
| ref_kwargs = { |
| "use_cache": True, |
| "past_key_values": past_ref |
| } |
| if first_step: |
| ref_kwargs.update({ |
| "attention_mask": attention_mask, |
| "pixel_values": pixel_values, |
| "image_grid_thw": image_grid_thw, |
| "video_grid_thw": video_grid_thw, |
| }) |
|
|
| out_ref = ref_model( |
| input_ids=current_input_ids, |
| **ref_kwargs |
| ) |
| past_ref = out_ref.past_key_values |
| |
| ref_logits = out_ref.logits[:, -1, :] |
|
|
| |
| |
| |
| |
| scores = torch.log_softmax(ref_logits, dim=-1) + reward_logits |
|
|
| |
| next_token = torch.argmax(scores, dim=-1, keepdim=True) |
| |
| |
| generated_ids = torch.cat([generated_ids, next_token], dim=-1) |
| |
| |
| current_input_ids = next_token |
| first_step = False |
| |
| |
| if eos_token_id is not None and (next_token == eos_token_id).all(): |
| break |
|
|
| return generated_ids |
|
|
| def main(): |
| parser = argparse.ArgumentParser() |
| parser.add_argument("--model_id", type=str, default="Qwen/Qwen2.5-VL-3B-Instruct") |
| parser.add_argument("--adapter_path", type=str, default=None, help="学習済みアダプタのパス") |
| parser.add_argument("--image_path", type=str, default="http://images.cocodataset.org/val2017/000000039769.jpg") |
| parser.add_argument("--prompt", type=str, default="Describe this image.") |
| args = parser.parse_args() |
|
|
| |
| device = "cuda" if torch.cuda.is_available() else "mps" if torch.backends.mps.is_available() else "cpu" |
| dtype = torch.bfloat16 if device != "cpu" else torch.float32 |
|
|
| print(f"Using device: {device}") |
|
|
| |
| |
| |
| print(f"Loading Ref Model: {args.model_id}...") |
| ref_model = Qwen2VLForConditionalGeneration.from_pretrained( |
| args.model_id, |
| torch_dtype=dtype, |
| device_map=device, |
| trust_remote_code=True |
| ).eval() |
| |
| |
| print(f"Loading Reward Model Base: {args.model_id}...") |
| reward_model = Qwen2VLForConditionalGeneration.from_pretrained( |
| args.model_id, |
| torch_dtype=dtype, |
| device_map=device, |
| trust_remote_code=True |
| ) |
| |
| if args.adapter_path: |
| print(f"Loading Adapter from {args.adapter_path}...") |
| |
| reward_model = PeftModel.from_pretrained(reward_model, args.adapter_path) |
| else: |
| print("アダプタパスが指定されていません。ベースモデルのみ(報酬なし)として動作します。") |
| |
| reward_model.to(device).eval() |
|
|
| |
| |
| processor = AutoProcessor.from_pretrained(args.model_id, trust_remote_code=True) |
|
|
| |
| if args.image_path.startswith("http"): |
| image = Image.open(requests.get(args.image_path, stream=True).raw).convert("RGB") |
| else: |
| image = Image.open(args.image_path).convert("RGB") |
|
|
| |
| messages = [ |
| { |
| "role": "user", |
| "content": [ |
| {"type": "image", "image": image}, |
| {"type": "text", "text": args.prompt}, |
| ], |
| } |
| ] |
| text_prompt = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) |
| |
| |
| inputs = processor( |
| text=[text_prompt], |
| images=[image], |
| padding=True, |
| return_tensors="pt", |
| ).to(device) |
|
|
| |
| print("Generating...") |
| gen_ids = prt_greedy_generate( |
| ref_model=ref_model, |
| reward_model=reward_model, |
| input_ids=inputs.input_ids, |
| attention_mask=inputs.attention_mask, |
| pixel_values=inputs.pixel_values, |
| image_grid_thw=inputs.image_grid_thw, |
| max_new_tokens=100, |
| eos_token_id=processor.tokenizer.eos_token_id |
| ) |
|
|
| |
| |
| generated_ids_trimmed = [ |
| out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, gen_ids) |
| ] |
| output_text = processor.batch_decode(generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False) |
| |
| print("\n[Output]:") |
| print(output_text[0]) |
|
|
| if __name__ == "__main__": |
| main() |
|
|