rosssso's picture
Upload folder using huggingface_hub
09c07bd verified
import sys,os,gc
import torch
import torch.nn.functional as F
import json
from transformers import Qwen2VLForConditionalGeneration, AutoProcessor
from peft import PeftModel
from PIL import Image
import requests
import math
import hydra
from tqdm import tqdm
from torch.utils.data import Dataset, DataLoader
from omegaconf import DictConfig, OmegaConf
import logging
import importlib
# Configure logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
class EvalDataset(Dataset):
def __init__(self, data_path):
self.data = []
with open(data_path, 'r') as f:
for line in f:
if line.strip():
self.data.append(json.loads(line))
self.image_dir = os.path.dirname(data_path) # Assuming images are relative to the data_path's directory
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
item = self.data[idx]
image_filename = item.get('image', None)
image = None
if image_filename:
image_path = image_filename if os.path.isabs(image_filename) else os.path.join(self.image_dir, image_filename)
try:
image = Image.open(image_path).convert("RGB")
except Exception as e:
logger.warning(f"Could not load image {image_path}: {e}")
image = None # Handle missing/corrupt images
return {
'id': item.get('id', idx), # Use index if 'id' is not present
'image': image,
'question': item.get('question', ''),
'answer': item.get('answer', '')
}
def eval_collate_fn(batch):
ids = [item['id'] for item in batch]
images = [item['image'] for item in batch]
questions = [item['question'] for item in batch]
answers = [item['answer'] for item in batch]
return {
'ids': ids,
'images': images,
'questions': questions,
'answers': answers
}
class PRTGenerator:
"""
推論専用の PRT ジェネレータークラス (Design A + Hydra)
RefモデルとRewardモデルを保持し、PRTによる生成を管理します。
"""
def __init__(self, base_model_id, adapter_path=None, device="cuda", dtype=torch.bfloat16):
self.device = device
self.dtype = dtype
logger.info(f"Loading Base Models from {base_model_id}...")
# 1. Reference Model (Base)
self.ref_model = Qwen2VLForConditionalGeneration.from_pretrained(
base_model_id,
torch_dtype=dtype,
device_map=device,
trust_remote_code=True,
).eval()
# 2. Reward Model (Base + Adapter)
# 初期状態では Base と同じ
self.reward_model = Qwen2VLForConditionalGeneration.from_pretrained(
base_model_id,
torch_dtype=dtype,
device_map=device,
trust_remote_code=True,
)
if adapter_path:
logger.info(f"Loading Adapter from {adapter_path}...")
# Check if adapter_model.bin exists (simple check)
import os
if os.path.exists(os.path.join(adapter_path, "adapter_model.bin")) or os.path.exists(os.path.join(adapter_path, "adapter_model.safetensors")):
self.reward_model = PeftModel.from_pretrained(self.reward_model, adapter_path)
else:
logger.warning(f"Adapter file not found at {adapter_path}. Proceeding with raw base model (Identity PRT).")
else:
logger.info("Adapter not specified. Reward model is same as Ref model (Identity PRT).")
self.reward_model.to(device).eval()
# Processor
self.processor = AutoProcessor.from_pretrained(base_model_id, trust_remote_code=True)
@torch.no_grad()
def generate(self, image_input, prompt, max_new_tokens=100, prt_lambda=1.0, temperature=0.7, top_p=0.9):
"""
PRT Generation (Greedy or Sampling)
Algorithm 2 に基づき、Stepごとに Logits を合成して生成します。
"""
# --- Preprocessing ---
# logger.info(f"Processing image...")
try:
if isinstance(image_input, Image.Image):
image = image_input
elif isinstance(image_input, str):
if image_input.startswith("http"):
image = Image.open(requests.get(image_input, stream=True).raw).convert("RGB")
else:
image = Image.open(image_input).convert("RGB")
else:
raise ValueError("image_input must be PIL.Image or str path")
except Exception as e:
logger.error(f"Failed to load image: {e}")
return "Error: Image load failed."
messages = [
{
"role": "user",
"content": [
{"type": "image", "image": image},
{"type": "text", "text": prompt},
],
}
]
text_prompt = self.processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = self.processor(
text=[text_prompt],
images=[image],
padding=True,
return_tensors="pt",
).to(self.device)
input_ids = inputs.input_ids
attention_mask = inputs.attention_mask
pixel_values = inputs.pixel_values
image_grid_thw = inputs.image_grid_thw
# --- Generation Loop ---
# KV Caches (Ref/Reward で独立して管理)
past_ref = None
past_reward = None
generated_ids = input_ids.clone()
current_input_ids = input_ids
first_step = True
for _ in range(max_new_tokens):
# 共通のkwargs (cache利用)
fwd_kwargs = {"use_cache": True}
# 初回のみ画像等のFull Inputを渡す
if first_step:
fwd_kwargs.update({
"attention_mask": attention_mask,
"pixel_values": pixel_values,
"image_grid_thw": image_grid_thw,
})
# 1. Ref Model Forward
ref_out = self.ref_model(
input_ids=current_input_ids,
past_key_values=past_ref,
**fwd_kwargs
)
past_ref = ref_out.past_key_values
ref_logits = ref_out.logits[:, -1, :]
# 2. Reward Model Forward
rew_out = self.reward_model(
input_ids=current_input_ids,
past_key_values=past_reward,
**fwd_kwargs
)
past_reward = rew_out.past_key_values
reward_logits = rew_out.logits[:, -1, :]
# 3. Logits Composition (Algorithm 2)
# v = log_softmax(ref) + lambda * reward
ref_logp = F.log_softmax(ref_logits, dim=-1)
combined_logits = ref_logp + (prt_lambda * reward_logits)
# 4. Token Selection (Sampling or Greedy)
if temperature > 0:
# Apply Temperature
logits = combined_logits / temperature
probs = F.softmax(logits, dim=-1)
# Apply Top-P (Nucleus Sampling)
if top_p < 1.0:
sorted_probs, sorted_indices = torch.sort(probs, descending=True)
cumulative_probs = torch.cumsum(sorted_probs, dim=-1)
# Remove tokens with cumulative probability above the threshold
sorted_indices_to_remove = cumulative_probs > top_p
# Shift the indices to the right to keep also the first token above the threshold
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
sorted_indices_to_remove[..., 0] = 0
indices_to_remove = sorted_indices_to_remove.scatter(1, sorted_indices, sorted_indices_to_remove)
probs[indices_to_remove] = 0.0
probs = probs / probs.sum(dim=-1, keepdim=True) # Re-normalize
next_token = torch.multinomial(probs, num_samples=1)
else:
# Greedy
next_token = torch.argmax(combined_logits, dim=-1, keepdim=True)
# Append & Update
generated_ids = torch.cat([generated_ids, next_token], dim=-1)
current_input_ids = next_token
first_step = False
# Stop Check
if next_token.item() == self.processor.tokenizer.eos_token_id:
break
# Decode
generated_ids_trimmed = generated_ids[:, inputs.input_ids.shape[1]:]
output_text = self.processor.batch_decode(generated_ids_trimmed, skip_special_tokens=True)[0]
return output_text
@hydra.main(version_base=None, config_path="configs", config_name="config")
def main(cfg: DictConfig):
logger.info(f"Configuration:\n{OmegaConf.to_yaml(cfg)}")
# Hydra はデフォルトで実行ディレクトリを outputs/... に変更するため、
# 学習成果物やローカル画像パスは to_absolute_path で元のcwd基準に解決して扱う。
adapter_path = hydra.utils.to_absolute_path(cfg.training.output_dir)
device = "cuda" if torch.cuda.is_available() else "mps" if torch.backends.mps.is_available() else "cpu"
dtype = torch.float32 if device == "cpu" else torch.bfloat16
generator = PRTGenerator(
base_model_id=cfg.model.model_id,
adapter_path=adapter_path, # 学習時のoutput_dirをadapter_pathとみなす
device=device,
dtype=dtype
)
# Setup DataLoader
data_path = hydra.utils.to_absolute_path(cfg.data.path)
logger.info(f"Loading dataset from {data_path}")
dataset = EvalDataset(data_path)
dataloader = DataLoader(
dataset,
batch_size=cfg.training.batch_size,
shuffle=False,
# NOTE: PIL.Image を返す Dataset をマルチプロセス DataLoader で扱うと
# 環境によっては不安定になりやすいので安全側で 0 に固定。
num_workers=0,
collate_fn=eval_collate_fn
)
logger.info("--- Starting Evaluation ---")
for batch in tqdm(dataloader):
ids = batch['ids']
images = batch['images']
questions = batch['questions']
answers = batch['answers']
try:
for i, idx in enumerate(ids):
img = images[i]
q = questions[i]
if img is None:
logger.warning(f"Skipping ID {idx} due to image loading error.")
continue
output = generator.generate(
image_input=img,
prompt=q,
max_new_tokens=cfg.generation.max_new_tokens,
prt_lambda=cfg.training.prt_lambda,
temperature=cfg.generation.get("temperature", 0.0), # Default to Greedy if not specified
top_p=cfg.generation.get("top_p", 1.0)
)
print(f"\n[ID: {idx}] Q: {q}\nA (GT): {answers[i]}\nA (Pred): {output}")
except Exception as e:
logger.error(f"Error in batch: {e}")
logger.info("Evaluation Completed.")
if __name__ == "__main__":
main()