Upload folder using huggingface_hub
Browse files- README.md +660 -0
- examples/prt12_qwen25vl/README.md +74 -0
- examples/prt12_qwen25vl/__pycache__/modeling_prt.cpython-314.pyc +0 -0
- examples/prt12_qwen25vl/dummy_data.jsonl +2 -0
- examples/prt12_qwen25vl/inference_prt12.py +213 -0
- examples/prt12_qwen25vl/modeling_prt.py +136 -0
- examples/prt12_qwen25vl/requirements.txt +7 -0
- examples/prt12_qwen25vl/test_image.jpg +3 -0
- examples/prt12_qwen25vl/train_prt12.py +251 -0
- examples/prt14_qwen25vl/README.md +138 -0
- examples/prt14_qwen25vl/__pycache__/eval_prt14.cpython-314.pyc +0 -0
- examples/prt14_qwen25vl/configs/config.yaml +39 -0
- examples/prt14_qwen25vl/eval_prt14.py +305 -0
- examples/prt14_qwen25vl/requirements.txt +12 -0
- examples/prt14_qwen25vl/test_image.jpg +3 -0
- examples/prt14_qwen25vl/train_prt14.py +265 -0
README.md
ADDED
|
@@ -0,0 +1,660 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# MLX
|
| 2 |
+
|
| 3 |
+
[**Quickstart**](#quickstart) | [**Installation**](#installation) |
|
| 4 |
+
[**Documentation**](https://ml-explore.github.io/mlx/build/html/index.html) |
|
| 5 |
+
[**Examples**](#examples)
|
| 6 |
+
|
| 7 |
+
[](https://circleci.com/gh/ml-explore/mlx)
|
| 8 |
+
|
| 9 |
+
MLX is an array framework for machine learning on Apple silicon,
|
| 10 |
+
brought to you by Apple machine learning research.
|
| 11 |
+
|
| 12 |
+
Some key features of MLX include:
|
| 13 |
+
|
| 14 |
+
- **Familiar APIs**: MLX has a Python API that closely follows NumPy. MLX
|
| 15 |
+
also has fully featured C++, [C](https://github.com/ml-explore/mlx-c), and
|
| 16 |
+
[Swift](https://github.com/ml-explore/mlx-swift/) APIs, which closely mirror
|
| 17 |
+
the Python API. MLX has higher-level packages like `mlx.nn` and
|
| 18 |
+
`mlx.optimizers` with APIs that closely follow PyTorch to simplify building
|
| 19 |
+
more complex models.
|
| 20 |
+
|
| 21 |
+
- **Composable function transformations**: MLX supports composable function
|
| 22 |
+
transformations for automatic differentiation, automatic vectorization,
|
| 23 |
+
and computation graph optimization.
|
| 24 |
+
|
| 25 |
+
- **Lazy computation**: Computations in MLX are lazy. Arrays are only
|
| 26 |
+
materialized when needed.
|
| 27 |
+
|
| 28 |
+
- **Dynamic graph construction**: Computation graphs in MLX are constructed
|
| 29 |
+
dynamically. Changing the shapes of function arguments does not trigger
|
| 30 |
+
slow compilations, and debugging is simple and intuitive.
|
| 31 |
+
|
| 32 |
+
- **Multi-device**: Operations can run on any of the supported devices
|
| 33 |
+
(currently the CPU and the GPU).
|
| 34 |
+
|
| 35 |
+
- **Unified memory**: A notable difference from MLX and other frameworks
|
| 36 |
+
is the *unified memory model*. Arrays in MLX live in shared memory.
|
| 37 |
+
Operations on MLX arrays can be performed on any of the supported
|
| 38 |
+
device types without transferring data.
|
| 39 |
+
|
| 40 |
+
MLX is designed by machine learning researchers for machine learning
|
| 41 |
+
researchers. The framework is intended to be user-friendly, but still efficient
|
| 42 |
+
to train and deploy models. The design of the framework itself is also
|
| 43 |
+
conceptually simple. We intend to make it easy for researchers to extend and
|
| 44 |
+
improve MLX with the goal of quickly exploring new ideas.
|
| 45 |
+
|
| 46 |
+
The design of MLX is inspired by frameworks like
|
| 47 |
+
[NumPy](https://numpy.org/doc/stable/index.html),
|
| 48 |
+
[PyTorch](https://pytorch.org/), [Jax](https://github.com/google/jax), and
|
| 49 |
+
[ArrayFire](https://arrayfire.org/).
|
| 50 |
+
|
| 51 |
+
## Examples
|
| 52 |
+
|
| 53 |
+
The [MLX examples repo](https://github.com/ml-explore/mlx-examples) has a
|
| 54 |
+
variety of examples, including:
|
| 55 |
+
|
| 56 |
+
- [Transformer language model](https://github.com/ml-explore/mlx-examples/tree/main/transformer_lm) training.
|
| 57 |
+
- Large-scale text generation with
|
| 58 |
+
[LLaMA](https://github.com/ml-explore/mlx-examples/tree/main/llms/llama) and
|
| 59 |
+
finetuning with [LoRA](https://github.com/ml-explore/mlx-examples/tree/main/lora).
|
| 60 |
+
- Generating images with [Stable Diffusion](https://github.com/ml-explore/mlx-examples/tree/main/stable_diffusion).
|
| 61 |
+
- Speech recognition with [OpenAI's Whisper](https://github.com/ml-explore/mlx-examples/tree/main/whisper).
|
| 62 |
+
|
| 63 |
+
## Quickstart
|
| 64 |
+
|
| 65 |
+
See the [quick start
|
| 66 |
+
guide](https://ml-explore.github.io/mlx/build/html/usage/quick_start.html)
|
| 67 |
+
in the documentation.
|
| 68 |
+
|
| 69 |
+
## Installation
|
| 70 |
+
|
| 71 |
+
MLX is available on [PyPI](https://pypi.org/project/mlx/). To install MLX on
|
| 72 |
+
macOS, run:
|
| 73 |
+
|
| 74 |
+
```bash
|
| 75 |
+
pip install mlx
|
| 76 |
+
```
|
| 77 |
+
|
| 78 |
+
To install the CUDA backend on Linux, run:
|
| 79 |
+
|
| 80 |
+
```bash
|
| 81 |
+
pip install mlx[cuda]
|
| 82 |
+
```
|
| 83 |
+
|
| 84 |
+
To install a CPU-only Linux package, run:
|
| 85 |
+
|
| 86 |
+
```bash
|
| 87 |
+
pip install mlx[cpu]
|
| 88 |
+
```
|
| 89 |
+
|
| 90 |
+
Checkout the
|
| 91 |
+
[documentation](https://ml-explore.github.io/mlx/build/html/install.html#)
|
| 92 |
+
for more information on building the C++ and Python APIs from source.
|
| 93 |
+
|
| 94 |
+
## ファインチューニング手法: LoRAとPRT
|
| 95 |
+
|
| 96 |
+
このリポジトリでは、Apple Silicon上で効率的にLLMをファインチューニングするための2つの手法を実装・提供しています。
|
| 97 |
+
|
| 98 |
+
### LoRA (Low-Rank Adaptation)
|
| 99 |
+
|
| 100 |
+
#### 概要
|
| 101 |
+
|
| 102 |
+
LoRAは、元のモデルの重みを変更せずに、低ランク行列を追加することでファインチューニングを行う手法です。メモリ効率が良く、複数のタスク用アダプターを切り替えて使用できます。
|
| 103 |
+
|
| 104 |
+
#### アーキテクチャ
|
| 105 |
+
|
| 106 |
+
```
|
| 107 |
+
元の計算: output = W × input
|
| 108 |
+
LoRA適用: output = (W + B × A) × input
|
| 109 |
+
|
| 110 |
+
W: 元の重み行列(固定、変更されない)
|
| 111 |
+
A, B: 低ランク行列(学習される)
|
| 112 |
+
rank: 低ランクの次元数(通常4-16)
|
| 113 |
+
```
|
| 114 |
+
|
| 115 |
+
**特徴**:
|
| 116 |
+
|
| 117 |
+
- **更新パラメータ**: 全パラメータの0.1-1%(低ランク行列のみ)
|
| 118 |
+
- **メモリ使用量**: 小さい(モデルサイズの1.2-1.5倍)
|
| 119 |
+
- **保存容量**: 非常に小さい(元のモデルの0.1-1%)
|
| 120 |
+
- **学習時間**: 短い
|
| 121 |
+
- **性能**: Full Fine-tuningに近い
|
| 122 |
+
|
| 123 |
+
#### 実装方法
|
| 124 |
+
|
| 125 |
+
**基本的な使い方**:
|
| 126 |
+
|
| 127 |
+
```bash
|
| 128 |
+
# 仮想環境を有効化
|
| 129 |
+
source .venv/bin/activate
|
| 130 |
+
|
| 131 |
+
# LoRAファインチューニング
|
| 132 |
+
python -m mlx_lm lora \
|
| 133 |
+
--model ./models/tinyllama-1.1b \
|
| 134 |
+
--train --data ./finetune_data \
|
| 135 |
+
--num-layers 8 \
|
| 136 |
+
--lora-r 8 \
|
| 137 |
+
--learning-rate 1e-4 \
|
| 138 |
+
--iters 30
|
| 139 |
+
```
|
| 140 |
+
|
| 141 |
+
**推奨パラメータ**:
|
| 142 |
+
|
| 143 |
+
- `--lora-r`: 8(低ランクの次元数)
|
| 144 |
+
- `--num-layers`: 8(適用する層数、TinyLlamaの場合は全22層中8層)
|
| 145 |
+
- `--learning-rate`: 1e-4
|
| 146 |
+
- `--iters`: 30
|
| 147 |
+
|
| 148 |
+
**学習済みアダプターでの推論**:
|
| 149 |
+
|
| 150 |
+
```bash
|
| 151 |
+
python -m mlx_lm chat \
|
| 152 |
+
--model ./models/tinyllama-1.1b \
|
| 153 |
+
--adapter-path ./adapters \
|
| 154 |
+
--prompt "### Instruction: 日本の首都はどこですか?\n### Response:"
|
| 155 |
+
```
|
| 156 |
+
|
| 157 |
+
#### アーキテクチャの詳細
|
| 158 |
+
|
| 159 |
+
**TinyLlama-1.1Bでの実装例**:
|
| 160 |
+
|
| 161 |
+
- **モデル**: TinyLlama-1.1B-Chat-v1.0
|
| 162 |
+
- **LoRA適用層数**: 8層(全22層中)
|
| 163 |
+
- **LoRA rank**: 8
|
| 164 |
+
- **LoRA scale**: 20.0
|
| 165 |
+
- **更新パラメータ**: 約0.209%(2.294M/1100.048M)
|
| 166 |
+
- **アダプターサイズ**: 約8.8MB(元のモデルの0.44%)
|
| 167 |
+
|
| 168 |
+
**パフォーマンス**:
|
| 169 |
+
|
| 170 |
+
- トレーニング速度: 約5-6 it/sec
|
| 171 |
+
- トークン速度: 約250-300 tokens/sec
|
| 172 |
+
- ピークメモリ: 約2.4-2.5 GB
|
| 173 |
+
|
| 174 |
+
詳細は [`examples/prt_llm/README.md`](examples/prt_llm/README.md) と [`knowledge/SLM_選定とファインチューニング手法と性能変化.md`](knowledge/SLM_選定とファインチューニング手法と性能変化.md) を参照してください。
|
| 175 |
+
|
| 176 |
+
---
|
| 177 |
+
|
| 178 |
+
### PRT (Portable Reward Tuning)
|
| 179 |
+
|
| 180 |
+
#### 概要
|
| 181 |
+
|
| 182 |
+
PRTは、ファインチューニングをreward最大化問題として再定式化し、rewardモデルを明示的に学習することで、**異なるベースモデル間で再利用可能な**ファインチューニング手法です。論文「Portable Reward Tuning: Towards Reusable Fine-Tuning across Different Pretrained Models」(arXiv:2502.12776v1)に基づいています。
|
| 183 |
+
|
| 184 |
+
#### このリポジトリでの呼称(PRT1/2/3)
|
| 185 |
+
|
| 186 |
+
このREADMEでは、実装の違いを分かりやすくするために以下のように呼び分けます(**いずれも「学習対象はRewardモデル」**です)。
|
| 187 |
+
|
| 188 |
+
- **PRT1**: LinearのRewardヘッド(baseのhidden → vocabのreward logits): `examples/prt_llm/prt_llm.py`
|
| 189 |
+
- **PRT2**: Tiny Causal TransformerのRewardモデル(base hidden → 小型Transformer → vocab): `examples/prt_llm/prt_llm2.py`
|
| 190 |
+
- **PRT3**: Rewardモデルをベースモデルの**完全コピー(別インスタンス)**として学習: `examples/prt_llm/prt_llm_v3.py`
|
| 191 |
+
|
| 192 |
+
#### アーキテクチャ
|
| 193 |
+
|
| 194 |
+
**PRT1(Linear Reward Head)の基本構造**:
|
| 195 |
+
|
| 196 |
+
```
|
| 197 |
+
Base Model (固定)
|
| 198 |
+
├── Transformer Layers (22層)
|
| 199 |
+
└── LM Head (Linear)
|
| 200 |
+
|
| 201 |
+
Reward Model (学習可能)
|
| 202 |
+
└── Reward Head (Linear)
|
| 203 |
+
├── Input: Base Modelのhidden states (2048次元)
|
| 204 |
+
└── Output: Reward logits (32000次元)
|
| 205 |
+
|
| 206 |
+
PRT合成:
|
| 207 |
+
v_θ = log softmax(base_logits) + reward_logits / λ
|
| 208 |
+
π_PRT = softmax(v_θ)
|
| 209 |
+
```
|
| 210 |
+
|
| 211 |
+
**PRT2 (拡張版) - Tiny Causal Transformer Reward Model**:
|
| 212 |
+
|
| 213 |
+
```
|
| 214 |
+
RewardTransformerRewardModel
|
| 215 |
+
├─ proj_in: Linear(2048 → 512)
|
| 216 |
+
├─ blocks: CausalTransformerBlock × L (L=2, default)
|
| 217 |
+
│ └─ block
|
| 218 |
+
│ ├─ norm1: RMSNorm(512)
|
| 219 |
+
│ ├─ attn: CausalSelfAttention(d=512, heads=8) + ALiBi
|
| 220 |
+
│ ├─ norm2: RMSNorm(512)
|
| 221 |
+
│ └─ mlp: SwiGLU FFN(512 → 2048 → 512)
|
| 222 |
+
├─ norm_out: RMSNorm(512)
|
| 223 |
+
└─ vocab_head: Linear(512 → 32000)
|
| 224 |
+
```
|
| 225 |
+
|
| 226 |
+
**特徴**:
|
| 227 |
+
|
| 228 |
+
- **Base Modelのパラメータ**: 完全に固定(変更されない)
|
| 229 |
+
- **Reward Model**: 独立したパラメータ(学習される)
|
| 230 |
+
- **Portable性**: 同じtokenizerを使用する異なるベースモデル間でrewardモデルを再利用可能
|
| 231 |
+
- **推論コスト(PRT1/2)**: Base Modelのforward passは1回のみ(効率的)
|
| 232 |
+
- **パラメータ数**: PRT1は約65M、PRT2は約26M(L=2の場合)
|
| 233 |
+
|
| 234 |
+
#### 実装方法
|
| 235 |
+
|
| 236 |
+
**PRT1(Linear Reward Head版)の使い方**:
|
| 237 |
+
|
| 238 |
+
```bash
|
| 239 |
+
# 仮想環境を有効化
|
| 240 |
+
source .venv/bin/activate
|
| 241 |
+
|
| 242 |
+
# Rewardモデルの学習
|
| 243 |
+
python3 examples/prt_llm/prt_llm.py train \
|
| 244 |
+
--model ./models/tinyllama-1.1b \
|
| 245 |
+
--data ./finetune_data \
|
| 246 |
+
--output-dir ./prt_reward \
|
| 247 |
+
--learning-rate 1e-4 \
|
| 248 |
+
--iters 30 \
|
| 249 |
+
--lambda 1.0 \
|
| 250 |
+
--entropy-coeff 0.05
|
| 251 |
+
|
| 252 |
+
# PRTによる生成
|
| 253 |
+
python3 examples/prt_llm/prt_llm.py generate \
|
| 254 |
+
--model ./models/tinyllama-1.1b \
|
| 255 |
+
--reward ./prt_reward \
|
| 256 |
+
--prompt "### Instruction: 日本の首都はどこですか?\n### Response:" \
|
| 257 |
+
--max-tokens 100 \
|
| 258 |
+
--temp 0.7
|
| 259 |
+
```
|
| 260 |
+
|
| 261 |
+
**PRT2(Tiny Transformer Reward版)の使い方**:
|
| 262 |
+
|
| 263 |
+
```bash
|
| 264 |
+
# Rewardモデルの学習
|
| 265 |
+
python3 examples/prt_llm/prt_llm2.py train \
|
| 266 |
+
--model ./models/tinyllama-1.1b \
|
| 267 |
+
--data ./finetune_data \
|
| 268 |
+
--output-dir ./prt2_reward \
|
| 269 |
+
--lambda 1.0 \
|
| 270 |
+
--entropy-coeff 0.05 \
|
| 271 |
+
--reward-d-model 512 \
|
| 272 |
+
--reward-layers 2 \
|
| 273 |
+
--reward-heads 8 \
|
| 274 |
+
--reward-ffn-dim 2048
|
| 275 |
+
|
| 276 |
+
# PRT2による生成
|
| 277 |
+
python3 examples/prt_llm/prt_llm2.py generate \
|
| 278 |
+
--model ./models/tinyllama-1.1b \
|
| 279 |
+
--reward ./prt2_reward \
|
| 280 |
+
--prompt "### Instruction: 日本の首都はどこですか?\n### Response:" \
|
| 281 |
+
--max-tokens 100 \
|
| 282 |
+
--temp 0.7
|
| 283 |
+
```
|
| 284 |
+
|
| 285 |
+
#### 損失関数
|
| 286 |
+
|
| 287 |
+
PRTの損失関数は以下の通りです:
|
| 288 |
+
|
| 289 |
+
```python
|
| 290 |
+
# Algorithm 1 (論��準拠)
|
| 291 |
+
v_θ = log softmax(base_logits) + reward_logits / λ
|
| 292 |
+
π_PRT = softmax(v_θ)
|
| 293 |
+
loss = CE(π_PRT, y) + (-entropy_coeff * H(ρ_θ))
|
| 294 |
+
|
| 295 |
+
# ρ_θ = softmax(reward_logits) (reward-only分布)
|
| 296 |
+
# H(ρ_θ): Entropy正則化(崩壊防止)
|
| 297 |
+
```
|
| 298 |
+
|
| 299 |
+
**パラメータ説明**:
|
| 300 |
+
|
| 301 |
+
- `--lambda`: KL正則化係数(デフォルト: 1.0)
|
| 302 |
+
- `--entropy-coeff`: Entropy正則化係数(デフォルト: 0.05)
|
| 303 |
+
- `--reward-d-model`: PRT2のTransformer次元(デフォルト: 512)
|
| 304 |
+
- `--reward-layers`: PRT2の層数(デフォルト: 2)
|
| 305 |
+
|
| 306 |
+
#### PRT vs LoRA の比較
|
| 307 |
+
|
| 308 |
+
| 観点 | LoRA | PRT1 | PRT2 | PRT3 |
|
| 309 |
+
|------|------|------|------|------|
|
| 310 |
+
| **更新パラメータ** | 0.1-1% | 約6% | 約2.4% | ≈100%(Reward=Full Copy) |
|
| 311 |
+
| **メモリ使用量** | 小さい | 中程度 | 中程度 | 大(モデル2本) |
|
| 312 |
+
| **学習時間** | 短い | 中程度 | やや長い | 長い |
|
| 313 |
+
| **保存容量** | 非常に小さい | 小さい | 小さい | 大 |
|
| 314 |
+
| **Portable性** | なし(モデル依存) | **あり**(tokenizer一致が前提) | **あり** | **あり** |
|
| 315 |
+
| **推論速度** | 高速 | 高速 | 中速 | 低〜中(forward 2回) |
|
| 316 |
+
| **表現力** | 中程度 | 低(Linear) | 高(Transformer) | 高(Full Model) |
|
| 317 |
+
|
| 318 |
+
詳細は [`examples/prt_llm/README.md`](examples/prt_llm/README.md) と [`knowledge/PRT_ネットワーク構造解説.md`](knowledge/PRT_ネットワーク構造解説.md) を参照してください。
|
| 319 |
+
|
| 320 |
+
---
|
| 321 |
+
|
| 322 |
+
### PRT Implementation History & Architectures
|
| 323 |
+
|
| 324 |
+
開発の進展に伴い、PRTはその適用範囲をLLMからVLMへ、そして実験環境からHPC/実アプリケーションへと拡大してきました。以下に各バージョンの実装箇所と特徴を示します。
|
| 325 |
+
|
| 326 |
+
**開発履歴一覧 (Version Summary):**
|
| 327 |
+
|
| 328 |
+
| Ver | 環境 | モデル | LoRA | タスク | 実装箇所 (Path) | 特記事項 |
|
| 329 |
+
| :--- | :--- | :--- | :--- | :--- | :--- | :--- |
|
| 330 |
+
| **PRT1** | Mac (MLX) | TinyLlama-1.1B | × | LLM | `examples/prt_llm/prt_llm.py` | 軽量Reward Head |
|
| 331 |
+
| **PRT2** | Mac (MLX) | TinyLlama-1.1B | × | LLM | `examples/prt_llm/prt_llm2.py` | 高表現力Reward |
|
| 332 |
+
| **PRT3** | Mac (MLX) | TinyLlama-1.1B | ◯ | LLM | `examples/prt_llm/prt_llm_v3.py` | **LLM標準 (Twin)** |
|
| 333 |
+
| **PRT4** | Mac (MLX) | DeepSeek-VL2 | ? | VLM | `examples/prt_flickr30k` | Frozen (MoE負荷大) |
|
| 334 |
+
| **PRT5** | Mac (MLX) | Qwen2-VL-2B | ◯ | VLM | `examples/prt5_qwen2vl` | **VLM標準 (Efficient)** |
|
| 335 |
+
| **PRT6** | Linux (PT) | Qwen2-VL-2B | ◯ | VLM | `examples/prt6_pytorch` | PyTorch移植版 |
|
| 336 |
+
| **PRT7** | HPC (Sing) | Qwen2-VL-2B | ◯ | VLM | `examples/prt7_singularity` | コンテナ化 |
|
| 337 |
+
| **PRT8** | Mac (MLX) | Qwen2-VL-2B | ◯ | VLM | `examples/prt8_vlm` | 汎用FW化 |
|
| 338 |
+
| **PRT9** | Mac (MLX) | Qwen2-VL-2B | ◯ | DriveLM | `examples/prt9_drivelm` | 自動運転QA |
|
| 339 |
+
| **PRT10**| Linux (PT) | Eagle2-1B | ◯ | DriveLM | `examples/prt10_pytorch_drivelm` | (Conceptual/WIP) |
|
| 340 |
+
| **PRT11**| **Mac** | **Eagle2-1B** | **◯** | **DriveLM** | `examples/prt11_drivelm` | **Mac実用版 (Active)** |
|
| 341 |
+
| **PRT12**| **Any** | **Qwen2.5-VL** | **◯** | **General** | `examples/prt12_qwen25vl` | **Transformers最小構成 (Wrapper)** |
|
| 342 |
+
| **PRT13**| **Any** | **Qwen2.5-VL** | **◯** | **General** | `examples/prt13_qwen25vl` | **Transformers標準設計 (Design A)** |
|
| 343 |
+
|
| 344 |
+
以下に各フェーズの実装詳細を示します。
|
| 345 |
+
|
| 346 |
+
#### Phase 1: LLM Foundations (PRT1 - PRT3)
|
| 347 |
+
|
| 348 |
+
初期のPRTは、LLM(TinyLlama等)を対象に、Rewardモデルの構造を探索しました。
|
| 349 |
+
|
| 350 |
+
**PRT1: Linear Reward Head**
|
| 351 |
+
|
| 352 |
+
- **構造**: ベースモデルの隠れ層から直接Linear層でRewardを計算。
|
| 353 |
+
- **特徴**: 最も軽量だが、表現力が限定的。
|
| 354 |
+
- **実装**: `examples/prt_llm/prt_llm.py`
|
| 355 |
+
|
| 356 |
+
**PRT2: Transformer Reward Model**
|
| 357 |
+
|
| 358 |
+
- **構造**: ベースモデルの隠れ層の上に、小さなTransformerブロックを追加。
|
| 359 |
+
- **特徴**: 表現力と計算コストのバランスが良い。
|
| 360 |
+
- **実装**: `examples/prt_llm/prt_llm2.py`
|
| 361 |
+
|
| 362 |
+
**PRT3: Twin Model Composition (Current Standard)**
|
| 363 |
+
|
| 364 |
+
- **構造**: Rewardモデルとして、ベースモデルと同じアーキテクチャのコピーを使用(別インスタンス)。
|
| 365 |
+
- **特徴**: 最も表現力が高く、LoRA等の既存技術をそのまま適用可能。現在のPRTの標準形。
|
| 366 |
+
- **実装**: `examples/prt_llm/prt_llm_v3.py`
|
| 367 |
+
|
| 368 |
+
```mermaid
|
| 369 |
+
graph TD
|
| 370 |
+
subgraph PRT1 [PRT1: Linear]
|
| 371 |
+
B1[Base Model] --> H1[Hidden]
|
| 372 |
+
H1 --> L1[Linear Head]
|
| 373 |
+
L1 --> R1[Reward Logits]
|
| 374 |
+
end
|
| 375 |
+
|
| 376 |
+
subgraph PRT2 [PRT2: Tiny Transformer]
|
| 377 |
+
B2[Base Model] --> H2[Hidden]
|
| 378 |
+
H2 --> T2[Tiny Transformer]
|
| 379 |
+
T2 --> L2[Linear Head]
|
| 380 |
+
L2 --> R2[Reward Logits]
|
| 381 |
+
end
|
| 382 |
+
|
| 383 |
+
subgraph PRT3 [PRT3: Twin Model]
|
| 384 |
+
B3[Base Model] --> Log3[Base Logits]
|
| 385 |
+
RM3[Reward Model (Copy)] --> R3[Reward Logits]
|
| 386 |
+
|
| 387 |
+
Log3 -- "+" --> Sum
|
| 388 |
+
R3 -- "/ lambda" --> Sum
|
| 389 |
+
end
|
| 390 |
+
```
|
| 391 |
+
|
| 392 |
+
#### Phase 2: VLM Expansion on MLX (PRT4 - PRT5)
|
| 393 |
+
|
| 394 |
+
視覚情報を含���VLM(Vision-Language Model)への適用フェーズです。
|
| 395 |
+
|
| 396 |
+
**PRT4: MoE VLM Experiment (Archived)**
|
| 397 |
+
|
| 398 |
+
- **対象**: DeepSeek-VL2 (Mixture-of-Experts)
|
| 399 |
+
- **結果**: Mac M1でのMoE計算負荷とMLXの制約により開発凍結。
|
| 400 |
+
- **ディレクトリ**: `examples/prt_flickr30k`
|
| 401 |
+
|
| 402 |
+
**PRT5: Efficient VLM Tuning (Qwen2-VL)**
|
| 403 |
+
|
| 404 |
+
- **対象**: Qwen2-VL-2B-Instruct
|
| 405 |
+
- **構造**: Vision Encoderからの特徴量を、BaseとRewardの両方のLLMに入力します。メモリ効率のためBaseは4bit量子化されています。
|
| 406 |
+
- **実装**: `examples/prt5_qwen2vl`
|
| 407 |
+
|
| 408 |
+
```mermaid
|
| 409 |
+
graph LR
|
| 410 |
+
Img[Image] --> V[Vision Encoder]
|
| 411 |
+
Txt[Text Prompt] --> Emb[Embedding]
|
| 412 |
+
|
| 413 |
+
V --> Concat
|
| 414 |
+
Emb --> Concat
|
| 415 |
+
|
| 416 |
+
Concat --> Base[Base LLM (4bit Frozen)]
|
| 417 |
+
Concat --> Reward[Reward LLM (LoRA Trainable)]
|
| 418 |
+
|
| 419 |
+
Base --> BL[Base Logits]
|
| 420 |
+
Reward --> RL[Reward Logits]
|
| 421 |
+
|
| 422 |
+
BL & RL --> PRT_Loss
|
| 423 |
+
```
|
| 424 |
+
|
| 425 |
+
#### Phase 3: Scaling to HPC (PRT6 - PRT7)
|
| 426 |
+
|
| 427 |
+
大学や研究所のHPC(スパコン)環境での学習を見据えたスケーリングフェーズです。
|
| 428 |
+
|
| 429 |
+
**PRT6: PyTorch & Accelerate Port**
|
| 430 |
+
|
| 431 |
+
- **内容**: MLXの実装をPyTorch + Hugging Face Accelerateに移植。
|
| 432 |
+
- **目的**: マルチGPU分散学習の実現。
|
| 433 |
+
- **実装**: `examples/prt6_pytorch`
|
| 434 |
+
|
| 435 |
+
**PRT7: Singularity Containerization**
|
| 436 |
+
|
| 437 |
+
- **内容**: PRT6をSingularity (Apptainer) コンテナにパッケージング。
|
| 438 |
+
- **目的**: 依存関係(CUDA, Libraries)の完全な再現性と、管理者権限のない共用スパコンでの実行。
|
| 439 |
+
- **実装**: `examples/prt7_singularity`
|
| 440 |
+
|
| 441 |
+
```mermaid
|
| 442 |
+
graph LR
|
| 443 |
+
subgraph HPC_Node [HPC Compute Node]
|
| 444 |
+
subgraph Singularity [Singularity Container (PRT7)]
|
| 445 |
+
Script[run_singularity.sh]
|
| 446 |
+
Env[Python venv / Dependencies]
|
| 447 |
+
Code[PRT6 PyTorch Code]
|
| 448 |
+
|
| 449 |
+
Script --> Env
|
| 450 |
+
Env --> Code
|
| 451 |
+
end
|
| 452 |
+
GPU[NVIDIA A100/H100 GPUs]
|
| 453 |
+
Code -.-> GPU
|
| 454 |
+
end
|
| 455 |
+
```
|
| 456 |
+
|
| 457 |
+
#### Phase 4: Generalization & Application (PRT8 - PRT9)
|
| 458 |
+
|
| 459 |
+
実用化に向けたリファクタリングと、複雑なタスクへの応用です。
|
| 460 |
+
|
| 461 |
+
**PRT8: VLM Generalization Framework**
|
| 462 |
+
|
| 463 |
+
- **内容**: PRT5-7の知見を統合し、汎用的なVLM学習ライブラリとしてリファクタリング。
|
| 464 |
+
- **構造**: `PRTForCausalLM` クラスによるモデルの抽象化と、共通化されたデータハンドラ。
|
| 465 |
+
- **実装**: `examples/prt8_vlm`
|
| 466 |
+
|
| 467 |
+
**PRT9: Autonomous Driving (DriveLM)**
|
| 468 |
+
|
| 469 |
+
- **内容**: 自動運転QAデータセット「DriveLM」への適用。
|
| 470 |
+
- **特徴**: Preprocessによる特殊なJSON形式の変換と、高解像度画像の取り扱い。
|
| 471 |
+
- **実装**: `examples/prt9_drivelm`
|
| 472 |
+
|
| 473 |
+
```mermaid
|
| 474 |
+
graph TD
|
| 475 |
+
Raw[DriveLM Raw Data] --> Prep[preprocess_drivelm.py]
|
| 476 |
+
Prep --> JSONL[Formatted QA JSONL]
|
| 477 |
+
|
| 478 |
+
JSONL --> Load[DataLoader]
|
| 479 |
+
Img[High-Res Images] --> Load
|
| 480 |
+
|
| 481 |
+
subgraph PRT9_Training
|
| 482 |
+
Load --> PRT8[PRT8 Codebase]
|
| 483 |
+
Mod[Qwen2-VL-2B] -.-> PRT8
|
| 484 |
+
end
|
| 485 |
+
|
| 486 |
+
PRT8 --> ModelFile[Saved Model]
|
| 487 |
+
```
|
| 488 |
+
|
| 489 |
+
#### Phase 5: Real-World Adaptation & Challenges (PRT10 - PRT11)
|
| 490 |
+
|
| 491 |
+
実世界での運用(特にMacローカル環境やGPUリソースの制約)に向けた適応フェーズです。実装にあたり以下の「壁」がありました。
|
| 492 |
+
|
| 493 |
+
**実装への壁 (Barriers):**
|
| 494 |
+
|
| 495 |
+
1. **PyTorch環境**: GPU環境の準備と依存関係の複雑さ。
|
| 496 |
+
2. **メモリ制約**: フルファインチューニングは不可能。LoRAであってもVLM(Vision Model + LLM)はメモリ的に厳しい。
|
| 497 |
+
3. **タスクの変化**: LLMからVLM(DriveLM)への移行。論文実装にあるViT(Vision Transformer)の実装が存在しないため、独自に構築する必要があった。
|
| 498 |
+
|
| 499 |
+
これらの課題を乗り越えるため、以下の2つのアプローチを実装しました。
|
| 500 |
+
|
| 501 |
+
| バージョン | 環境 | モデル | LoRA | タスク | ステータス |
|
| 502 |
+
| --- | --- | --- | --- | --- | --- |
|
| 503 |
+
| **論文実装** | PyTorch | LLM/(ViT) | QLoRA | Llama | (Reference) |
|
| 504 |
+
| **PRT10** | PyTorch (GPU) | VLM | LoRA | DriveLM | **実装済み** |
|
| 505 |
+
| **PRT11** | **Mac (Metal)** | **VLM** | **LoRA** | **DriveLM** | **稼働中 (Code Exists)** |
|
| 506 |
+
| **(Future)** | ? | VLM | Full FT | DriveLM | 未生成 |
|
| 507 |
+
|
| 508 |
+
**PRT10: PyTorch VLM (LoRA)**
|
| 509 |
+
|
| 510 |
+
- **環境**: Linux / PyTorch / CUDA
|
| 511 |
+
- **特徴**: PRT9のコンセプトをPyTorchネイティブ環境に移植。標準的なGPUサーバーでの学習を想定。
|
| 512 |
+
- **実装**: `examples/prt10_pytorch_drivelm` (Conceptual/WIP)
|
| 513 |
+
|
| 514 |
+
**PRT11: Mac Metal VLM (LoRA)**
|
| 515 |
+
|
| 516 |
+
- **環境**: Mac M1/M2/M3 (Metal/MPS)
|
| 517 |
+
- **特徴**:
|
| 518 |
+
- `nvidia/Eagle2-1B` モデルを採用し、MacBook Pro等のローカル環境で学習可能に最適化。
|
| 519 |
+
- **Dynamic Patching**: `transformers` ライブラリやモデルのリモートコードに対し、実行時に動的にパッチを適用することで、FlashAttention非対応環境(Mac)での動作を強制。
|
| 520 |
+
- **Memory Efficient**: 1BクラスのモデルとLoRAを組み合わせることで、ユニファイドメモリ環境での動作を実現。
|
| 521 |
+
- **実装**: `examples/prt11_drivelm/train_prt11.py`
|
| 522 |
+
|
| 523 |
+
**PRT12: Transformers Minimal Implementation**
|
| 524 |
+
|
| 525 |
+
- **環境**: Any (PyTorch + Transformers)
|
| 526 |
+
- **特徴**:
|
| 527 |
+
- `mlx` や複雑な依存関係を排除し、`transformers` と `peft` のみで構成された最小実装。
|
| 528 |
+
- Qwen2.5-VL を対象に、Algorithm 1 (学習) と Algorithm 2 (推論) のロジックをブラックボックスなしで実装。
|
| 529 |
+
- **教育的価値**: PRTの動作原理(Logits合成)を理解するための参照実装として最適です。
|
| 530 |
+
- **実装**: `examples/prt12_qwen25vl`
|
| 531 |
+
|
| 532 |
+
**PRT13: Transformers Standard Implementation (Design A)**
|
| 533 |
+
|
| 534 |
+
- **環境**: Any (PyTorch + Transformers)
|
| 535 |
+
- **特徴**:
|
| 536 |
+
- PRTの「王道」とされる設計パターン(Design A)。
|
| 537 |
+
- **構成**:
|
| 538 |
+
- **Trainer継承**: `PRTTrainer` で学習ロジックをカプセル化し、標準エコシステム(SFT Trainer等)との親和性を確保。
|
| 539 |
+
- **Generator分離**: `PRTGenerator` で推論ロジックを独立させ、**「モデルスワッピング実験(Baseモデルを入れ替えてAdapterのポータビリティを検証)」** を容易に。
|
| 540 |
+
- **成果物**: 純粋なLoRAアダプタのみ(モデルコード不要)。
|
| 541 |
+
- **実装**: `examples/prt13_qwen25vl`
|
| 542 |
+
|
| 543 |
+
---
|
| 544 |
+
|
| 545 |
+
### PRT Architecture Details (Mechanism of Portability)
|
| 546 |
+
|
| 547 |
+
PRTの最大の利点である「ポータビリティ(学習時と推論時で異なるモデルサイズを扱える)」を実現するアーキテクチャの詳細です。
|
| 548 |
+
|
| 549 |
+
#### 1. Core Concept: Logits Composition
|
| 550 |
+
|
| 551 |
+
PRTはモデル内部の隠れ層ではなく、最終出力である **「語彙空間(Logits)」** のレベルで結合を行います。これにより、モデルサイズ(7B vs 2B)が異なっていても、使用している「辞書(Tokenizer)」さえ同じであれば計算が可能になります。
|
| 552 |
+
|
| 553 |
+
$$ v_\theta(y|x) = \log P_{\text{Base}}(y|x) + \frac{R_\phi(y|x)}{\lambda} $$
|
| 554 |
+
|
| 555 |
+
#### 2. Architecture: Training vs Inference
|
| 556 |
+
|
| 557 |
+
**学習フェーズ (Training Phase)**:
|
| 558 |
+
計算コストを抑えるため、**「小さなモデル同士」**(2B + 2B)で学習を行います。
|
| 559 |
+
|
| 560 |
+
- **Base Model (Reference)**: Qwen2-VL-**2B** (Frozen)
|
| 561 |
+
- **Reward Model**: Qwen2-VL-**2B** (Frozen) + **LoRA Adapter** (Trainable)
|
| 562 |
+
|
| 563 |
+
```mermaid
|
| 564 |
+
graph LR
|
| 565 |
+
Dataset --> Input
|
| 566 |
+
Input --> Base["Reference (2B)"]
|
| 567 |
+
Input --> Reward["Reward (2B)"]
|
| 568 |
+
|
| 569 |
+
subgraph RewardModel [Reward Model Composition]
|
| 570 |
+
Reward --> LoRA["LoRA Adapter (Trainable)"]
|
| 571 |
+
LoRA -.-> RewardOut[Logits]
|
| 572 |
+
end
|
| 573 |
+
|
| 574 |
+
Base -.-> Loss
|
| 575 |
+
RewardOut --> Loss
|
| 576 |
+
Loss --> Update["Update LoRA Params"]
|
| 577 |
+
|
| 578 |
+
style LoRA fill:#f96,stroke:#333,stroke-width:2px
|
| 579 |
+
style Base fill:#eee,stroke:#333,stroke-dasharray: 5 5
|
| 580 |
+
```
|
| 581 |
+
|
| 582 |
+
**推論フェーズ (Inference Phase)**:
|
| 583 |
+
獲得したRewardモデル(2B + LoRA)を、**「大きなモデル(7B)」** の推論プロセスに組み込みます。
|
| 584 |
+
重要な点として、**Rewardモデルを7Bモデルに移植するわけではありません**。Rewardモデルは2Bのまま独立して並走し、その出力を7Bモデルの出力に「加算」することで生成をガイドします。
|
| 585 |
+
|
| 586 |
+
- **Base Model**: Qwen2-VL-**7B** (Frozen) <-- **ここだけ入れ替える**
|
| 587 |
+
- **Reward Model**: Qwen2-VL-**2B** (Frozen) + **LoRA Adapter** (Frozen) <-- **ずっと2Bのまま並走**
|
| 588 |
+
- **操作**: **Logits Level Addition (出力の単純合成)**
|
| 589 |
+
|
| 590 |
+
```mermaid
|
| 591 |
+
graph LR
|
| 592 |
+
Prompt --> Input
|
| 593 |
+
Input --> Base["Base (swapped to 7B)"]
|
| 594 |
+
Input --> Reward["Reward (stays 2B)"]
|
| 595 |
+
|
| 596 |
+
subgraph RewardComponents [Reward + LoRA]
|
| 597 |
+
Reward --> LoRA_Inf["LoRA Adapter (Frozen)"]
|
| 598 |
+
LoRA_Inf --> RewardLogits[Reward Logits]
|
| 599 |
+
end
|
| 600 |
+
|
| 601 |
+
Base --> BaseLogits[Base Logits]
|
| 602 |
+
|
| 603 |
+
BaseLogits -- "Log Probability" --> Add
|
| 604 |
+
RewardLogits -- "Reward (R/λ)" --> Add
|
| 605 |
+
Add["Compose (Logits Addition) v = log P + R/λ"] --> Output
|
| 606 |
+
|
| 607 |
+
style LoRA_Inf fill:#f96,stroke:#333,stroke-width:2px
|
| 608 |
+
```
|
| 609 |
+
|
| 610 |
+
このアーキテクチャにより、**「学習コストは2B相当、推論能力は7B相当」** という効率的な運用が可能になります。
|
| 611 |
+
|
| 612 |
+
#### 3. Full Fine-Tuning (Original PRT) vs LoRA
|
| 613 |
+
|
| 614 |
+
現在の実装 (`train_prt5.py`) はメモリ効率のため **LoRA** を使用していますが、論文通りの **Full Fine-Tuning**(全パラメータ学習)を行うことも可能です。
|
| 615 |
+
その場合はコードの以下の部分を変更します(ロジックやLoss関数は変更不要です)。
|
| 616 |
+
|
| 617 |
+
```python
|
| 618 |
+
# train_prt5.py の変更イメージ
|
| 619 |
+
|
| 620 |
+
# Current (LoRA)
|
| 621 |
+
reward_model.freeze() # 1. 全体を凍結
|
| 622 |
+
apply_lora(reward_model) # 2. LoRA層のみ追加・学習
|
| 623 |
+
|
| 624 |
+
# Target (Full FT)
|
| 625 |
+
# reward_model.freeze() # 1. 凍結しない(または Unfreeze)
|
| 626 |
+
# apply_lora(reward_model) # 2. LoRAは適用しない(スキップ)
|
| 627 |
+
```
|
| 628 |
+
|
| 629 |
+
これにより、Optimizerはモデ��全体のパラメータを学習対象として更新するようになります。
|
| 630 |
+
|
| 631 |
+
---
|
| 632 |
+
|
| 633 |
+
## Contributing
|
| 634 |
+
|
| 635 |
+
Check out the [contribution guidelines](https://github.com/ml-explore/mlx/tree/main/CONTRIBUTING.md) for more information
|
| 636 |
+
on contributing to MLX. See the
|
| 637 |
+
[docs](https://ml-explore.github.io/mlx/build/html/install.html) for more
|
| 638 |
+
information on building from source, and running tests.
|
| 639 |
+
|
| 640 |
+
We are grateful for all of [our
|
| 641 |
+
contributors](https://github.com/ml-explore/mlx/tree/main/ACKNOWLEDGMENTS.md#Individual-Contributors). If you contribute
|
| 642 |
+
to MLX and wish to be acknowledged, please add your name to the list in your
|
| 643 |
+
pull request.
|
| 644 |
+
|
| 645 |
+
## Citing MLX
|
| 646 |
+
|
| 647 |
+
The MLX software suite was initially developed with equal contribution by Awni
|
| 648 |
+
Hannun, Jagrit Digani, Angelos Katharopoulos, and Ronan Collobert. If you find
|
| 649 |
+
MLX useful in your research and wish to cite it, please use the following
|
| 650 |
+
BibTex entry:
|
| 651 |
+
|
| 652 |
+
```text
|
| 653 |
+
@software{mlx2023,
|
| 654 |
+
author = {Awni Hannun and Jagrit Digani and Angelos Katharopoulos and Ronan Collobert},
|
| 655 |
+
title = {{MLX}: Efficient and flexible machine learning on Apple silicon},
|
| 656 |
+
url = {https://github.com/ml-explore},
|
| 657 |
+
version = {0.0},
|
| 658 |
+
year = {2023},
|
| 659 |
+
}
|
| 660 |
+
```
|
examples/prt12_qwen25vl/README.md
ADDED
|
@@ -0,0 +1,74 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# PRT12: Qwen2.5-VL with Portable Reward Tuning (Minimal)
|
| 2 |
+
|
| 3 |
+
Transformers + PEFT のみを使用した、Qwen2.5-VL に対する PRT (Portable Reward Tuning) の最小実装例です。
|
| 4 |
+
複雑な依存関係を排除し、シンプルな構成で PRT の中核ロジック (Algorithm 1 & 2) を理解・実行できるようにしています。
|
| 5 |
+
|
| 6 |
+
## 概要
|
| 7 |
+
|
| 8 |
+
PRT (Portable Reward Tuning) は、学習済みのベースモデル (Reference) と、LoRAで追加学習する報酬モデル (Reward) を組み合わせる手法です。
|
| 9 |
+
本実装 (`prt12`) では、カスタムトレーナー等を使わず、Hugging Face の標準 `Trainer` とモデルラッパーのみでこれを実現しています。
|
| 10 |
+
|
| 11 |
+
### ディレクトリ構成
|
| 12 |
+
|
| 13 |
+
* **`modeling_prt.py`**: モデル定義
|
| 14 |
+
* `PRTQwenVL` クラス: 2つのモデル (Ref, Reward) を保持し、`forward` メソッドで Algorithm 1 の損失計算を行います。
|
| 15 |
+
* **`train_prt12.py`**: 学習スクリプト
|
| 16 |
+
* Hugging Face `Trainer` を使用した学習ループ。
|
| 17 |
+
* データセットの読み込みと前処理 (Qwen2.5-VL用)。
|
| 18 |
+
* **`inference_prt12.py`**: 推論スクリプト
|
| 19 |
+
* Algorithm 2 に基づく Greedy Search 生成の実装。
|
| 20 |
+
* ステップごとに Ref と Reward の出力を合成します。
|
| 21 |
+
|
| 22 |
+
## 動作原理 (Algorithm 1 & 2)
|
| 23 |
+
|
| 24 |
+
### 学習 (Algorithm 1)
|
| 25 |
+
|
| 26 |
+
`modeling_prt.py` の `forward` メソッドで実装されています。
|
| 27 |
+
|
| 28 |
+
1. **Ref Model (凍結)**: 入力 $x$ に対する対数確率 $\log \pi_{\text{ref}}(y|x)$ を計算します (Log Blob)。
|
| 29 |
+
2. **Reward Model (LoRA学習)**: 入力 $x$ に対するスカラー値 (ロジット) $r_\phi(x, y)$ を計算します。
|
| 30 |
+
3. **合成**: $v_\theta = \log \pi_{\text{ref}} + r_\phi$
|
| 31 |
+
4. **損失**: 合成された分布 $v_\theta$ と正解ラベル $y$ との間で Cross Entropy Loss を計算します。
|
| 32 |
+
|
| 33 |
+
### 推論 (Algorithm 2)
|
| 34 |
+
|
| 35 |
+
`inference_prt12.py` の `prt_greedy_generate` 関数で実装されています。
|
| 36 |
+
|
| 37 |
+
1. トークン生成の各ステップで、Ref Model と Reward Model の両方を推論します。
|
| 38 |
+
2. 学習時と同様にLogitsを合成し、最大スコアを持つトークンを選択します。
|
| 39 |
+
|
| 40 |
+
## 実行方法
|
| 41 |
+
|
| 42 |
+
### 必要ライブラリ
|
| 43 |
+
|
| 44 |
+
```bash
|
| 45 |
+
pip install -r requirements.txt
|
| 46 |
+
```
|
| 47 |
+
|
| 48 |
+
### 学習コマンド例
|
| 49 |
+
|
| 50 |
+
```bash
|
| 51 |
+
python train_prt12.py \
|
| 52 |
+
--model_id "Qwen/Qwen2.5-VL-3B-Instruct" \
|
| 53 |
+
--data_path "dummy_data.jsonl" \
|
| 54 |
+
--output_dir "output_prt12" \
|
| 55 |
+
--epochs 3 \
|
| 56 |
+
--bf16
|
| 57 |
+
```
|
| 58 |
+
|
| 59 |
+
※ GPUメモリ要件: 3Bモデルを2つロードするため、最低でも 24GB 程度のVRAMを持つGPU (A10G, A100, RTX3090/4090等) が推奨されます。Macではメモリ不足になる可能性があります。
|
| 60 |
+
|
| 61 |
+
### 推論コマンド例
|
| 62 |
+
|
| 63 |
+
```bash
|
| 64 |
+
python inference_prt12.py \
|
| 65 |
+
--model_id "Qwen/Qwen2.5-VL-3B-Instruct" \
|
| 66 |
+
--image_path "test_image.jpg" \
|
| 67 |
+
--prompt "Describe this image." \
|
| 68 |
+
--adapter_path "output_prt12" # 学習済みアダプタがある場合
|
| 69 |
+
```
|
| 70 |
+
|
| 71 |
+
## コードのポイント
|
| 72 |
+
|
| 73 |
+
全てのPythonスクリプトには詳細な日本語コメントを追加しています。
|
| 74 |
+
特に `modeling_prt.py` の `forward` メソッドと、`inference_prt12.py` のループ処理を見ることで、PRTの具体的な計算処理を確認できます。
|
examples/prt12_qwen25vl/__pycache__/modeling_prt.cpython-314.pyc
ADDED
|
Binary file (5.51 kB). View file
|
|
|
examples/prt12_qwen25vl/dummy_data.jsonl
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{"image": "test_image.jpg", "question": "Describe this image.", "answer": "A woman and her dog on the beach."}
|
| 2 |
+
{"image": "test_image.jpg", "question": "What is the dog doing?", "answer": "The dog is sitting next to the woman."}
|
examples/prt12_qwen25vl/inference_prt12.py
ADDED
|
@@ -0,0 +1,213 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn.functional as F
|
| 3 |
+
from transformers import Qwen2VLForConditionalGeneration, AutoProcessor
|
| 4 |
+
from peft import PeftModel
|
| 5 |
+
from PIL import Image
|
| 6 |
+
import requests
|
| 7 |
+
import argparse
|
| 8 |
+
|
| 9 |
+
@torch.no_grad()
|
| 10 |
+
def prt_greedy_generate(
|
| 11 |
+
ref_model,
|
| 12 |
+
reward_model,
|
| 13 |
+
input_ids,
|
| 14 |
+
attention_mask=None,
|
| 15 |
+
pixel_values=None,
|
| 16 |
+
image_grid_thw=None,
|
| 17 |
+
video_grid_thw=None,
|
| 18 |
+
max_new_tokens=64,
|
| 19 |
+
eos_token_id=None,
|
| 20 |
+
):
|
| 21 |
+
"""
|
| 22 |
+
PRT (Portable Reward Tuning) を用いた Greedy 生成関数
|
| 23 |
+
|
| 24 |
+
論文の Algorithm 2 (Inference/Generation) を実装しています。
|
| 25 |
+
毎回のトークン生成ステップにおいて、RefモデルとRewardモデルの両方を推論し、
|
| 26 |
+
その出力を合成して次のトークンを決定します。
|
| 27 |
+
|
| 28 |
+
数式: v_theta = log_softmax(ref_logits) + reward_logits
|
| 29 |
+
"""
|
| 30 |
+
# 評価モードへ
|
| 31 |
+
ref_model.eval()
|
| 32 |
+
reward_model.eval()
|
| 33 |
+
|
| 34 |
+
# KV Cache (過去の計算結果) の初期化
|
| 35 |
+
# これを使うことで、過去のトークンを再計算せずに済み、高速化できます。
|
| 36 |
+
past_ref = None
|
| 37 |
+
past_reward = None
|
| 38 |
+
|
| 39 |
+
# 入力IDの形状確認 (Batch, Seq)
|
| 40 |
+
if input_ids.dim() == 1:
|
| 41 |
+
input_ids = input_ids.unsqueeze(0)
|
| 42 |
+
|
| 43 |
+
current_input_ids = input_ids
|
| 44 |
+
generated_ids = input_ids.clone()
|
| 45 |
+
|
| 46 |
+
first_step = True # 最初のステップだけ画像入力を渡すフラグ
|
| 47 |
+
|
| 48 |
+
# トークン生成ループ
|
| 49 |
+
for i in range(max_new_tokens):
|
| 50 |
+
|
| 51 |
+
# --- 1. 報酬モデル (Reward Model) の推論 ---
|
| 52 |
+
reward_kwargs = {
|
| 53 |
+
"use_cache": True,
|
| 54 |
+
"past_key_values": past_reward
|
| 55 |
+
}
|
| 56 |
+
# 初回のみ画像関連の入力を渡します (以降は past_key_values に保存されるため不要)
|
| 57 |
+
if first_step:
|
| 58 |
+
reward_kwargs.update({
|
| 59 |
+
"attention_mask": attention_mask,
|
| 60 |
+
"pixel_values": pixel_values,
|
| 61 |
+
"image_grid_thw": image_grid_thw,
|
| 62 |
+
"video_grid_thw": video_grid_thw,
|
| 63 |
+
})
|
| 64 |
+
|
| 65 |
+
out_r = reward_model(
|
| 66 |
+
input_ids=current_input_ids,
|
| 67 |
+
**reward_kwargs
|
| 68 |
+
)
|
| 69 |
+
past_reward = out_r.past_key_values
|
| 70 |
+
# 最後のトークンのロジットのみを取得 [Batch, Vocab]
|
| 71 |
+
reward_logits = out_r.logits[:, -1, :]
|
| 72 |
+
|
| 73 |
+
# --- 2. 参照モデル (Ref Model) の推論 ---
|
| 74 |
+
ref_kwargs = {
|
| 75 |
+
"use_cache": True,
|
| 76 |
+
"past_key_values": past_ref
|
| 77 |
+
}
|
| 78 |
+
if first_step:
|
| 79 |
+
ref_kwargs.update({
|
| 80 |
+
"attention_mask": attention_mask,
|
| 81 |
+
"pixel_values": pixel_values,
|
| 82 |
+
"image_grid_thw": image_grid_thw,
|
| 83 |
+
"video_grid_thw": video_grid_thw,
|
| 84 |
+
})
|
| 85 |
+
|
| 86 |
+
out_ref = ref_model(
|
| 87 |
+
input_ids=current_input_ids,
|
| 88 |
+
**ref_kwargs
|
| 89 |
+
)
|
| 90 |
+
past_ref = out_ref.past_key_values
|
| 91 |
+
# 最後のトークンのロジットのみを取得 [Batch, Vocab]
|
| 92 |
+
ref_logits = out_ref.logits[:, -1, :]
|
| 93 |
+
|
| 94 |
+
# --- 3. PRT合成 (Composition) ---
|
| 95 |
+
# ここで2つのモデルの出力を合成します。
|
| 96 |
+
# Refモデルは確率分布に変換してから対数を取り (LogSoftmax)、
|
| 97 |
+
# Rewardモデルは生のロジット (Logits) をそのまま足します。
|
| 98 |
+
scores = torch.log_softmax(ref_logits, dim=-1) + reward_logits
|
| 99 |
+
|
| 100 |
+
# Greedy Search: スコアが最大のトークンを選択
|
| 101 |
+
next_token = torch.argmax(scores, dim=-1, keepdim=True)
|
| 102 |
+
|
| 103 |
+
# 生成結果に追加
|
| 104 |
+
generated_ids = torch.cat([generated_ids, next_token], dim=-1)
|
| 105 |
+
|
| 106 |
+
# 次のステップの入力は、今生成したトークンのみ
|
| 107 |
+
current_input_ids = next_token
|
| 108 |
+
first_step = False
|
| 109 |
+
|
| 110 |
+
# 終了判定 (EOSトークンが出たら終了)
|
| 111 |
+
if eos_token_id is not None and (next_token == eos_token_id).all():
|
| 112 |
+
break
|
| 113 |
+
|
| 114 |
+
return generated_ids
|
| 115 |
+
|
| 116 |
+
def main():
|
| 117 |
+
parser = argparse.ArgumentParser()
|
| 118 |
+
parser.add_argument("--model_id", type=str, default="Qwen/Qwen2.5-VL-3B-Instruct")
|
| 119 |
+
parser.add_argument("--adapter_path", type=str, default=None, help="学習済みアダプタのパス")
|
| 120 |
+
parser.add_argument("--image_path", type=str, default="http://images.cocodataset.org/val2017/000000039769.jpg")
|
| 121 |
+
parser.add_argument("--prompt", type=str, default="Describe this image.")
|
| 122 |
+
args = parser.parse_args()
|
| 123 |
+
|
| 124 |
+
# デバイス設定 (CUDA > MPS > CPU)
|
| 125 |
+
device = "cuda" if torch.cuda.is_available() else "mps" if torch.backends.mps.is_available() else "cpu"
|
| 126 |
+
dtype = torch.bfloat16 if device != "cpu" else torch.float32 # CPUはBFloat16未対応の場合があるため
|
| 127 |
+
|
| 128 |
+
print(f"Using device: {device}")
|
| 129 |
+
|
| 130 |
+
# --- モデルのロード ---
|
| 131 |
+
|
| 132 |
+
# 1. Reference Model (凍結モデル)
|
| 133 |
+
print(f"Loading Ref Model: {args.model_id}...")
|
| 134 |
+
ref_model = Qwen2VLForConditionalGeneration.from_pretrained(
|
| 135 |
+
args.model_id,
|
| 136 |
+
torch_dtype=dtype,
|
| 137 |
+
device_map=device,
|
| 138 |
+
trust_remote_code=True
|
| 139 |
+
).eval()
|
| 140 |
+
|
| 141 |
+
# 2. Reward Model (ベース + アダプタ)
|
| 142 |
+
print(f"Loading Reward Model Base: {args.model_id}...")
|
| 143 |
+
reward_model = Qwen2VLForConditionalGeneration.from_pretrained(
|
| 144 |
+
args.model_id,
|
| 145 |
+
torch_dtype=dtype,
|
| 146 |
+
device_map=device,
|
| 147 |
+
trust_remote_code=True
|
| 148 |
+
)
|
| 149 |
+
|
| 150 |
+
if args.adapter_path:
|
| 151 |
+
print(f"Loading Adapter from {args.adapter_path}...")
|
| 152 |
+
# 学習済みのLoRAアダプタをマージしてロード
|
| 153 |
+
reward_model = PeftModel.from_pretrained(reward_model, args.adapter_path)
|
| 154 |
+
else:
|
| 155 |
+
print("アダプタパスが指定されていません。ベースモデルのみ(報酬なし)として動作します。")
|
| 156 |
+
|
| 157 |
+
reward_model.to(device).eval()
|
| 158 |
+
|
| 159 |
+
# --- データ処理 ---
|
| 160 |
+
|
| 161 |
+
processor = AutoProcessor.from_pretrained(args.model_id, trust_remote_code=True)
|
| 162 |
+
|
| 163 |
+
# 画像ロード
|
| 164 |
+
if args.image_path.startswith("http"):
|
| 165 |
+
image = Image.open(requests.get(args.image_path, stream=True).raw).convert("RGB")
|
| 166 |
+
else:
|
| 167 |
+
image = Image.open(args.image_path).convert("RGB")
|
| 168 |
+
|
| 169 |
+
# プロンプト作成
|
| 170 |
+
messages = [
|
| 171 |
+
{
|
| 172 |
+
"role": "user",
|
| 173 |
+
"content": [
|
| 174 |
+
{"type": "image", "image": image},
|
| 175 |
+
{"type": "text", "text": args.prompt},
|
| 176 |
+
],
|
| 177 |
+
}
|
| 178 |
+
]
|
| 179 |
+
text_prompt = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
|
| 180 |
+
|
| 181 |
+
# 入力テンソル作成
|
| 182 |
+
inputs = processor(
|
| 183 |
+
text=[text_prompt],
|
| 184 |
+
images=[image],
|
| 185 |
+
padding=True,
|
| 186 |
+
return_tensors="pt",
|
| 187 |
+
).to(device)
|
| 188 |
+
|
| 189 |
+
# --- 生成実行 ---
|
| 190 |
+
print("Generating...")
|
| 191 |
+
gen_ids = prt_greedy_generate(
|
| 192 |
+
ref_model=ref_model,
|
| 193 |
+
reward_model=reward_model,
|
| 194 |
+
input_ids=inputs.input_ids,
|
| 195 |
+
attention_mask=inputs.attention_mask,
|
| 196 |
+
pixel_values=inputs.pixel_values,
|
| 197 |
+
image_grid_thw=inputs.image_grid_thw,
|
| 198 |
+
max_new_tokens=100,
|
| 199 |
+
eos_token_id=processor.tokenizer.eos_token_id
|
| 200 |
+
)
|
| 201 |
+
|
| 202 |
+
# デコードして表示
|
| 203 |
+
# 入力プロンプト部分を除去して、生成されたテキストのみを取り出します
|
| 204 |
+
generated_ids_trimmed = [
|
| 205 |
+
out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, gen_ids)
|
| 206 |
+
]
|
| 207 |
+
output_text = processor.batch_decode(generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False)
|
| 208 |
+
|
| 209 |
+
print("\n[Output]:")
|
| 210 |
+
print(output_text[0])
|
| 211 |
+
|
| 212 |
+
if __name__ == "__main__":
|
| 213 |
+
main()
|
examples/prt12_qwen25vl/modeling_prt.py
ADDED
|
@@ -0,0 +1,136 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
import torch.nn.functional as F
|
| 4 |
+
from transformers import AutoModelForCausalLM
|
| 5 |
+
from transformers.modeling_outputs import CausalLMOutputWithPast
|
| 6 |
+
|
| 7 |
+
class PRTQwenVL(nn.Module):
|
| 8 |
+
"""
|
| 9 |
+
Qwen2.5-VL 用の PRT (Portable Reward Tuning) ラッパークラス
|
| 10 |
+
|
| 11 |
+
論文の Algorithm 1 (Training) を実装しています。
|
| 12 |
+
学習時の損失関数計算において、ベースモデルの出力と報酬モデルの出力を合成します。
|
| 13 |
+
|
| 14 |
+
仕組み:
|
| 15 |
+
combined_logits (v_theta) = log_softmax(ref_logits) + reward_logits
|
| 16 |
+
Loss = CrossEntropy(v_theta, labels)
|
| 17 |
+
"""
|
| 18 |
+
def __init__(self, ref_model: nn.Module, reward_model: nn.Module):
|
| 19 |
+
super().__init__()
|
| 20 |
+
# --- 参照モデル (Reference Model) ---
|
| 21 |
+
# 重みは固定 (Frozen) され、勾配計算は行いません。
|
| 22 |
+
# これは「アンカー」として機能し、ベースモデルの知識分布を維持します。
|
| 23 |
+
self.ref_model = ref_model.eval()
|
| 24 |
+
for p in self.ref_model.parameters():
|
| 25 |
+
p.requires_grad_(False)
|
| 26 |
+
|
| 27 |
+
# --- 報酬モデル (Reward Model) ---
|
| 28 |
+
# 学習対象 (Trainable) です。ここには LoRA アダプタが適用されています。
|
| 29 |
+
# ベースモデルからの偏差(どのトークンが良いか/悪いか)を学習します。
|
| 30 |
+
self.reward_model = reward_model
|
| 31 |
+
|
| 32 |
+
# モデル設定を共有(Trainer等が参照するため)
|
| 33 |
+
self.config = self.reward_model.config
|
| 34 |
+
|
| 35 |
+
def forward(
|
| 36 |
+
self,
|
| 37 |
+
input_ids=None,
|
| 38 |
+
attention_mask=None,
|
| 39 |
+
labels=None,
|
| 40 |
+
pixel_values=None,
|
| 41 |
+
image_grid_thw=None,
|
| 42 |
+
video_grid_thw=None, # Qwen2.5-VL video input support
|
| 43 |
+
**kwargs
|
| 44 |
+
):
|
| 45 |
+
"""
|
| 46 |
+
順伝播 (Forward Pass)
|
| 47 |
+
ここでは2つのモデルを実行し、出力を合成して損失を計算します。
|
| 48 |
+
"""
|
| 49 |
+
|
| 50 |
+
# 1. 報酬モデルの Forward (学習対象)
|
| 51 |
+
# 通常の因果的言語モデルとして実行し、ロジット (logits) を取得します。
|
| 52 |
+
# gradient checkpointing 等に対応するため、use_cache 以外はそのまま渡します。
|
| 53 |
+
reward_out = self.reward_model(
|
| 54 |
+
input_ids=input_ids,
|
| 55 |
+
attention_mask=attention_mask,
|
| 56 |
+
pixel_values=pixel_values,
|
| 57 |
+
image_grid_thw=image_grid_thw,
|
| 58 |
+
video_grid_thw=video_grid_thw,
|
| 59 |
+
use_cache=kwargs.get("use_cache", False),
|
| 60 |
+
output_attentions=False,
|
| 61 |
+
output_hidden_states=False,
|
| 62 |
+
return_dict=True,
|
| 63 |
+
**{k:v for k,v in kwargs.items() if k not in ["use_cache", "output_attentions", "output_hidden_states", "return_dict"]}
|
| 64 |
+
)
|
| 65 |
+
reward_logits = reward_out.logits # 形状: [Batch, SeqLen, Vocab]
|
| 66 |
+
|
| 67 |
+
# 2. 参照モデルの Forward (学習しない)
|
| 68 |
+
# 勾配計算を無効化 (no_grad) してメモリを節約します。
|
| 69 |
+
with torch.no_grad():
|
| 70 |
+
ref_out = self.ref_model(
|
| 71 |
+
input_ids=input_ids,
|
| 72 |
+
attention_mask=attention_mask,
|
| 73 |
+
pixel_values=pixel_values,
|
| 74 |
+
image_grid_thw=image_grid_thw,
|
| 75 |
+
video_grid_thw=video_grid_thw,
|
| 76 |
+
use_cache=kwargs.get("use_cache", False),
|
| 77 |
+
output_attentions=False,
|
| 78 |
+
output_hidden_states=False,
|
| 79 |
+
return_dict=True,
|
| 80 |
+
**{k:v for k,v in kwargs.items() if k not in ["use_cache", "output_attentions", "output_hidden_states", "return_dict"]}
|
| 81 |
+
)
|
| 82 |
+
ref_logits = ref_out.logits # 形状: [Batch, SeqLen, Vocab]
|
| 83 |
+
|
| 84 |
+
# 3. PRT合成ロジック (Composition)
|
| 85 |
+
# 数式: v_theta = log(p_ref(y|x)) + r_phi(x, y)
|
| 86 |
+
# つまり、「参照モデルの対数確率」に「報酬モデルのロジット(スコア)」を加算します。
|
| 87 |
+
|
| 88 |
+
# log_softmax を適用して対数確率 (Log Probabilities) に変換
|
| 89 |
+
ref_logp = F.log_softmax(ref_logits, dim=-1)
|
| 90 |
+
|
| 91 |
+
# 単純加算 (PRTの核となる操作)
|
| 92 |
+
combined_logits = ref_logp + reward_logits
|
| 93 |
+
|
| 94 |
+
loss = None
|
| 95 |
+
if labels is not None:
|
| 96 |
+
# Shift操作: 言語モデルは「次のトークン」を予測するため、ラベルを1つずらします。
|
| 97 |
+
# inputs: [A, B, C] -> labels: [B, C, END]
|
| 98 |
+
shift_logits = combined_logits[..., :-1, :].contiguous()
|
| 99 |
+
shift_labels = labels[..., 1:].contiguous()
|
| 100 |
+
|
| 101 |
+
# CrossEntropyLoss の計算
|
| 102 |
+
# 合成された分布 v_theta に対して、正解ラベルの確率を最大化(損失を最小化)します。
|
| 103 |
+
loss = F.cross_entropy(
|
| 104 |
+
shift_logits.view(-1, shift_logits.size(-1)),
|
| 105 |
+
shift_labels.view(-1),
|
| 106 |
+
ignore_index=-100,
|
| 107 |
+
)
|
| 108 |
+
|
| 109 |
+
# Trainer が期待する形式 (CausalLMOutputWithPast) で返します。
|
| 110 |
+
# combined_logits を返すことで、eval 時の指標もこの分布に基づきます。
|
| 111 |
+
return CausalLMOutputWithPast(
|
| 112 |
+
loss=loss,
|
| 113 |
+
logits=combined_logits,
|
| 114 |
+
past_key_values=reward_out.past_key_values,
|
| 115 |
+
hidden_states=None,
|
| 116 |
+
attentions=None,
|
| 117 |
+
)
|
| 118 |
+
|
| 119 |
+
def gradient_checkpointing_enable(self, gradient_checkpointing_kwargs=None):
|
| 120 |
+
"""
|
| 121 |
+
Gradient Checkpointing の有効化
|
| 122 |
+
Trainerから呼ばれた際、内部の学習対象である reward_model に対して有効化を委譲します。
|
| 123 |
+
"""
|
| 124 |
+
self.reward_model.gradient_checkpointing_enable(gradient_checkpointing_kwargs=gradient_checkpointing_kwargs)
|
| 125 |
+
|
| 126 |
+
def save_pretrained(self, output_dir, **kwargs):
|
| 127 |
+
"""
|
| 128 |
+
モデル保存時のフック
|
| 129 |
+
全体を保存するのではなく、学習された LoRA アダプタ部分だけを保存するようにします。
|
| 130 |
+
"""
|
| 131 |
+
# Reward Model (PeftModel) の save_pretrained を呼び出す
|
| 132 |
+
if hasattr(self.reward_model, "save_pretrained"):
|
| 133 |
+
self.reward_model.save_pretrained(output_dir, **kwargs)
|
| 134 |
+
else:
|
| 135 |
+
# 万が一 PeftModel でない場合のフォールバック
|
| 136 |
+
torch.save(self.reward_model.state_dict(), f"{output_dir}/pytorch_model.bin")
|
examples/prt12_qwen25vl/requirements.txt
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
transformers
|
| 2 |
+
peft
|
| 3 |
+
accelerate
|
| 4 |
+
datasets
|
| 5 |
+
torch
|
| 6 |
+
torchvision
|
| 7 |
+
qwen_vl_utils
|
examples/prt12_qwen25vl/test_image.jpg
ADDED
|
Git LFS Details
|
examples/prt12_qwen25vl/train_prt12.py
ADDED
|
@@ -0,0 +1,251 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import argparse
|
| 2 |
+
import json
|
| 3 |
+
import logging
|
| 4 |
+
import os
|
| 5 |
+
from dataclasses import dataclass
|
| 6 |
+
from typing import Dict, List, Optional, Union
|
| 7 |
+
|
| 8 |
+
import torch
|
| 9 |
+
from datasets import Dataset
|
| 10 |
+
from peft import LoraConfig, TaskType, get_peft_model
|
| 11 |
+
from PIL import Image
|
| 12 |
+
from transformers import (
|
| 13 |
+
Qwen2VLForConditionalGeneration,
|
| 14 |
+
AutoProcessor,
|
| 15 |
+
Trainer,
|
| 16 |
+
TrainingArguments,
|
| 17 |
+
)
|
| 18 |
+
|
| 19 |
+
from modeling_prt import PRTQwenVL
|
| 20 |
+
|
| 21 |
+
logging.basicConfig(level=logging.INFO)
|
| 22 |
+
logger = logging.getLogger(__name__)
|
| 23 |
+
|
| 24 |
+
def build_models(model_id: str, torch_dtype, device_map=None, lora_rank=16, lora_alpha=32):
|
| 25 |
+
"""
|
| 26 |
+
PRT学習用にモデルを構築する関数
|
| 27 |
+
|
| 28 |
+
1. Reference Model (凍結): 比較基準となる元のモデル
|
| 29 |
+
2. Reward Model (学習): LoRAを適用し、学習を行うモデル
|
| 30 |
+
|
| 31 |
+
これらを PRTQwenVL でラップして返します。
|
| 32 |
+
"""
|
| 33 |
+
logger.info(f"Loading Reference Model from {model_id}...")
|
| 34 |
+
# Reference Model: 推論モード (eval) でロード
|
| 35 |
+
# device_map="auto" は Trainer と競合して "meta tensor" エラーを起こすことがあるため、
|
| 36 |
+
# 2B程度のサイズであれば明示的に指定しない (None) か、"cpu" 等を指定するのが安全です。
|
| 37 |
+
ref_model = Qwen2VLForConditionalGeneration.from_pretrained(
|
| 38 |
+
model_id,
|
| 39 |
+
torch_dtype=torch_dtype,
|
| 40 |
+
device_map=device_map, # トレーニング時はTrainerに任せるため None 推奨
|
| 41 |
+
trust_remote_code=True,
|
| 42 |
+
).eval()
|
| 43 |
+
|
| 44 |
+
logger.info(f"Loading Reward Model Base from {model_id}...")
|
| 45 |
+
# Reward Model: こちらも同じチェックポイントからロード
|
| 46 |
+
reward_model = Qwen2VLForConditionalGeneration.from_pretrained(
|
| 47 |
+
model_id,
|
| 48 |
+
torch_dtype=torch_dtype,
|
| 49 |
+
device_map=device_map,
|
| 50 |
+
trust_remote_code=True,
|
| 51 |
+
)
|
| 52 |
+
|
| 53 |
+
# LoRA (Low-Rank Adaptation) の適用
|
| 54 |
+
# Qwen2.5-VL の主要な層 (Attention Projections, MLP Gates) をターゲットにします。
|
| 55 |
+
target_modules = ["q_proj", "k_proj", "v_proj", "o_proj", "gate_proj", "up_proj", "down_proj"]
|
| 56 |
+
logger.info(f"Applying LoRA to Reward Model with rank={lora_rank}, alpha={lora_alpha}, targets={target_modules}")
|
| 57 |
+
|
| 58 |
+
lora_cfg = LoraConfig(
|
| 59 |
+
task_type=TaskType.CAUSAL_LM,
|
| 60 |
+
r=lora_rank,
|
| 61 |
+
lora_alpha=lora_alpha,
|
| 62 |
+
lora_dropout=0.05,
|
| 63 |
+
target_modules=target_modules,
|
| 64 |
+
bias="none",
|
| 65 |
+
)
|
| 66 |
+
reward_model = get_peft_model(reward_model, lora_cfg)
|
| 67 |
+
|
| 68 |
+
# 学習可能なパラメータ数を表示
|
| 69 |
+
reward_model.print_trainable_parameters()
|
| 70 |
+
|
| 71 |
+
# PRTラッパーで包む
|
| 72 |
+
# これにより forward 内で ref + reward の計算が行われるようになります。
|
| 73 |
+
prt_model = PRTQwenVL(ref_model=ref_model, reward_model=reward_model)
|
| 74 |
+
return prt_model
|
| 75 |
+
|
| 76 |
+
@dataclass
|
| 77 |
+
class DataCollatorForPRT:
|
| 78 |
+
"""
|
| 79 |
+
Qwen2.5-VL 用のデータコレーター
|
| 80 |
+
リスト形式のサンプルを受け取り、バッチ処理可能な Tensor に変換します。
|
| 81 |
+
"""
|
| 82 |
+
processor: AutoProcessor
|
| 83 |
+
|
| 84 |
+
def __call__(self, features: List[Dict[str, Union[List, str]]]) -> Dict[str, torch.Tensor]:
|
| 85 |
+
# features は Dataset から取得された生の辞書のリスト
|
| 86 |
+
# {"image": PIL.Image, "text": "User: <image>..."}
|
| 87 |
+
|
| 88 |
+
texts = []
|
| 89 |
+
images = []
|
| 90 |
+
|
| 91 |
+
for feature in features:
|
| 92 |
+
texts.append(feature["text"])
|
| 93 |
+
images.append(feature["image"])
|
| 94 |
+
|
| 95 |
+
# Processor で一括変換 (Padding, Resize 等)
|
| 96 |
+
inputs = self.processor(
|
| 97 |
+
text=texts,
|
| 98 |
+
images=images,
|
| 99 |
+
return_tensors="pt",
|
| 100 |
+
padding=True,
|
| 101 |
+
)
|
| 102 |
+
|
| 103 |
+
# ラベルの作成
|
| 104 |
+
# 教師あり学習 (SFT) と同様に、input_ids を正解ラベルとして使用します。
|
| 105 |
+
# パディング部分 (Attention Mask が 0 の部分) は計算対象外 (-100) にします。
|
| 106 |
+
labels = inputs["input_ids"].clone()
|
| 107 |
+
|
| 108 |
+
if "attention_mask" in inputs:
|
| 109 |
+
labels[inputs["attention_mask"] == 0] = -100
|
| 110 |
+
|
| 111 |
+
inputs["labels"] = labels
|
| 112 |
+
return inputs
|
| 113 |
+
|
| 114 |
+
|
| 115 |
+
class PRTDataset(torch.utils.data.Dataset):
|
| 116 |
+
"""
|
| 117 |
+
JSONL データを読み込むデータセットクラス
|
| 118 |
+
画像とテキストをペアにして返します。
|
| 119 |
+
"""
|
| 120 |
+
def __init__(self, jsonl_path, processor):
|
| 121 |
+
self.data = []
|
| 122 |
+
self.processor = processor
|
| 123 |
+
self.root_dir = os.path.dirname(jsonl_path)
|
| 124 |
+
|
| 125 |
+
# JSONL 読み込み
|
| 126 |
+
with open(jsonl_path, "r") as f:
|
| 127 |
+
for line in f:
|
| 128 |
+
if line.strip():
|
| 129 |
+
self.data.append(json.loads(line))
|
| 130 |
+
|
| 131 |
+
def __len__(self):
|
| 132 |
+
return len(self.data)
|
| 133 |
+
|
| 134 |
+
def __getitem__(self, idx):
|
| 135 |
+
item = self.data[idx]
|
| 136 |
+
|
| 137 |
+
# 画像の読み込み
|
| 138 |
+
image_path = item.get("image")
|
| 139 |
+
if not os.path.isabs(image_path):
|
| 140 |
+
image_path = os.path.join(self.root_dir, image_path)
|
| 141 |
+
|
| 142 |
+
try:
|
| 143 |
+
image = Image.open(image_path).convert("RGB")
|
| 144 |
+
except Exception as e:
|
| 145 |
+
logger.error(f"Failed to load image {image_path}: {e}")
|
| 146 |
+
raise e
|
| 147 |
+
|
| 148 |
+
# テキストプロンプトの構築
|
| 149 |
+
# Processor が期待するメッセージ形式を作成します。
|
| 150 |
+
# Qwen2.5-VL は "image" タイプと "text" タイプを含むリストをサポートしています。
|
| 151 |
+
question = item.get("question", "Describe this image.")
|
| 152 |
+
answer = item.get("answer", "")
|
| 153 |
+
|
| 154 |
+
messages = [
|
| 155 |
+
{
|
| 156 |
+
"role": "user",
|
| 157 |
+
"content": [
|
| 158 |
+
{"type": "image", "image": image_path},
|
| 159 |
+
{"type": "text", "text": question},
|
| 160 |
+
]
|
| 161 |
+
},
|
| 162 |
+
{
|
| 163 |
+
"role": "assistant",
|
| 164 |
+
"content": [{"type": "text", "text": answer}]
|
| 165 |
+
}
|
| 166 |
+
]
|
| 167 |
+
|
| 168 |
+
# apply_chat_template でフォーマット済みのテキストを取得
|
| 169 |
+
# (例: <|im_start|>user\n<|vision_start|><|image_pad|>...<|vision_end|>Question<|im_end|>\n<|im_start|>assistant\nAnswer<|im_end|>)
|
| 170 |
+
text_prompt = self.processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=False)
|
| 171 |
+
|
| 172 |
+
return {
|
| 173 |
+
"image": image,
|
| 174 |
+
"text": text_prompt
|
| 175 |
+
}
|
| 176 |
+
|
| 177 |
+
def train(args):
|
| 178 |
+
# データ型 (BFloat16 推奨)
|
| 179 |
+
dtype = torch.bfloat16 if args.bf16 else torch.float16
|
| 180 |
+
|
| 181 |
+
# プロセッサの準備
|
| 182 |
+
processor = AutoProcessor.from_pretrained(args.model_id, trust_remote_code=True)
|
| 183 |
+
|
| 184 |
+
# モデルの準備 (PRT構成)
|
| 185 |
+
prt_model = build_models(
|
| 186 |
+
args.model_id,
|
| 187 |
+
torch_dtype=dtype,
|
| 188 |
+
device_map=None, # Trainerにデバイス管理を委譲
|
| 189 |
+
lora_rank=args.lora_rank,
|
| 190 |
+
lora_alpha=args.lora_alpha
|
| 191 |
+
)
|
| 192 |
+
|
| 193 |
+
# データセットとコレーター
|
| 194 |
+
train_dataset = PRTDataset(args.data_path, processor)
|
| 195 |
+
collator = DataCollatorForPRT(processor)
|
| 196 |
+
|
| 197 |
+
# Trainer 設定
|
| 198 |
+
training_args = TrainingArguments(
|
| 199 |
+
output_dir=args.output_dir,
|
| 200 |
+
per_device_train_batch_size=args.batch_size,
|
| 201 |
+
gradient_accumulation_steps=args.gradient_accumulation_steps,
|
| 202 |
+
learning_rate=args.learning_rate,
|
| 203 |
+
num_train_epochs=args.epochs,
|
| 204 |
+
eval_strategy="no",
|
| 205 |
+
save_strategy="steps",
|
| 206 |
+
save_steps=args.save_steps,
|
| 207 |
+
logging_steps=10,
|
| 208 |
+
bf16=args.bf16,
|
| 209 |
+
fp16=not args.bf16,
|
| 210 |
+
remove_unused_columns=False, # PRTWrapperやマルチモーダルのために必須
|
| 211 |
+
report_to="none",
|
| 212 |
+
ddp_find_unused_parameters=False, # ラッパー使用時はFalse推奨
|
| 213 |
+
)
|
| 214 |
+
|
| 215 |
+
# Trainer 初期化
|
| 216 |
+
trainer = Trainer(
|
| 217 |
+
model=prt_model,
|
| 218 |
+
args=training_args,
|
| 219 |
+
train_dataset=train_dataset,
|
| 220 |
+
data_collator=collator,
|
| 221 |
+
)
|
| 222 |
+
|
| 223 |
+
logger.info("Starting PRT Training...")
|
| 224 |
+
trainer.train()
|
| 225 |
+
|
| 226 |
+
logger.info(f"Saving Reward Model Adapter to {args.output_dir}")
|
| 227 |
+
# save_model -> PRTQwenVL.save_pretrained -> RewardModel.save_pretrained
|
| 228 |
+
# これにより、アダプタのみが保存されます。
|
| 229 |
+
trainer.save_model()
|
| 230 |
+
|
| 231 |
+
|
| 232 |
+
if __name__ == "__main__":
|
| 233 |
+
parser = argparse.ArgumentParser()
|
| 234 |
+
parser.add_argument("--model_id", type=str, default="Qwen/Qwen2.5-VL-3B-Instruct")
|
| 235 |
+
parser.add_argument("--data_path", type=str, required=True, help="Path to training jsonl")
|
| 236 |
+
parser.add_argument("--output_dir", type=str, default="output_prt12")
|
| 237 |
+
parser.add_argument("--batch_size", type=int, default=1)
|
| 238 |
+
parser.add_argument("--gradient_accumulation_steps", type=int, default=8)
|
| 239 |
+
parser.add_argument("--learning_rate", type=float, default=2e-4)
|
| 240 |
+
parser.add_argument("--epochs", type=int, default=1)
|
| 241 |
+
parser.add_argument("--save_steps", type=int, default=100)
|
| 242 |
+
parser.add_argument("--lora_rank", type=int, default=16)
|
| 243 |
+
parser.add_argument("--lora_alpha", type=int, default=32)
|
| 244 |
+
parser.add_argument("--bf16", action="store_true", help="Use bfloat16")
|
| 245 |
+
|
| 246 |
+
args = parser.parse_args()
|
| 247 |
+
|
| 248 |
+
if not torch.cuda.is_available() and not torch.backends.mps.is_available():
|
| 249 |
+
logger.warning("No GPU detected. Training might be extremely slow.")
|
| 250 |
+
|
| 251 |
+
train(args)
|
examples/prt14_qwen25vl/README.md
ADDED
|
@@ -0,0 +1,138 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# PRT14: Portable Reward Tuning (Design A + Hydra + Batch Eval)
|
| 2 |
+
|
| 3 |
+
このディレクトリには、**PRT (Portable Reward Tuning)** の第14版実装が含まれています。
|
| 4 |
+
PRT14は、これまでの実験で確立された **Design A (Trainer/Generator Separation)** パターンを採用し、設定管理に **Hydra** を導入、さらに評価時の **バッチ推論** に対応した実用的な実装セットです。
|
| 5 |
+
|
| 6 |
+
## 概要
|
| 7 |
+
|
| 8 |
+
PRTは、大規模な言語モデル(VL含む)の生成スタイルや特定の報酬を、小型の **Reward Model (LoRA Adapter)** として学習・転移させる技術です。
|
| 9 |
+
本実装では、以下の構成で動作します:
|
| 10 |
+
|
| 11 |
+
* **学習 (Train)**: ベースモデルを2つ用意し(Ref:凍結, Reward:学習)、KL制約付きの報酬最大化(実際にはCrossEntropyによる模倣学習的アプローチ)を行います。
|
| 12 |
+
* **推論 (Eval)**: 学習したReward Adapterをロードし、StepごとにRefモデルとRewardモデルのLogitsを合成して生成を行います。
|
| 13 |
+
|
| 14 |
+
## アーキテクチャ
|
| 15 |
+
|
| 16 |
+
### Training Phase (Design A-Train)
|
| 17 |
+
|
| 18 |
+
学習時は `PRTTrainer` を使用し、Reference Model の出力を基準として Reward Model を更新します。
|
| 19 |
+
|
| 20 |
+
```mermaid
|
| 21 |
+
graph LR
|
| 22 |
+
Input["Input Image/Text"] --> Ref["Reference Model (Frozen)"]
|
| 23 |
+
Input --> Rew["Reward Model (LoRA Trainable)"]
|
| 24 |
+
|
| 25 |
+
Ref -->|Logits| Loss
|
| 26 |
+
Rew -->|Logits| Loss
|
| 27 |
+
|
| 28 |
+
Loss["PRT Loss"] -->|Backprop| Rew
|
| 29 |
+
|
| 30 |
+
subgraph Loss Calculation
|
| 31 |
+
L["Logits Composition"]
|
| 32 |
+
L -->|"ref_logits + lambda * reward_logits"| CE["CrossEntropy w/ Labels"]
|
| 33 |
+
end
|
| 34 |
+
```
|
| 35 |
+
|
| 36 |
+
* **Reference Model**: ベースモデルそのまま (Frozen)。分布の基準となります。
|
| 37 |
+
* **Reward Model**: ベースモデル + LoRA Adapter。報酬信号(正解テキストへの誘導)を学習します。
|
| 38 |
+
* **PRT Loss**: `ref_logits + prt_lambda * reward_logits` を合成し、これをTarget Labelに近づけるように学習します。
|
| 39 |
+
* *Note*: `ref_logits` は確率分布の基準なので `log_softmax` を適用しますが、`reward_logits` は「報酬値」そのものとして扱うため、`log_softmax` を適用せずにそのまま加算します。
|
| 40 |
+
|
| 41 |
+
### Inference Phase (Design A-Gen)
|
| 42 |
+
|
| 43 |
+
推論時は `PRTGenerator` を使用し、2つのモデルを並列で回しながらLogitsを合成してトークンを選択します。
|
| 44 |
+
|
| 45 |
+
```mermaid
|
| 46 |
+
graph TD
|
| 47 |
+
User["User Input"] --> P[Processor]
|
| 48 |
+
P --> Ref["Reference Model (Base)"]
|
| 49 |
+
P --> Rew["Reward Model (Base + Adapter)"]
|
| 50 |
+
|
| 51 |
+
Ref -->|Logits| Mix(("Composition"))
|
| 52 |
+
Rew -->|Logits| Mix
|
| 53 |
+
|
| 54 |
+
Mix -->|"v = ref + lambda * rew"| Samp["Sampler / Greedy"]
|
| 55 |
+
Samp -->|"Next Token"| Out["Output Text"]
|
| 56 |
+
Out -->|Autoregressive| P
|
| 57 |
+
```
|
| 58 |
+
|
| 59 |
+
* **Portable**: このAdapterは、同じトークナイザ/埋め込み空間を持つ他のモデル(サイズ違いなど)にも適用できる可能性があります(実験的機能)。
|
| 60 |
+
|
| 61 |
+
## ファイル構成
|
| 62 |
+
|
| 63 |
+
* `train_prt14.py`: 学習スクリプト。Hydraで設定を読み込み、`PRTTrainer`でLoRAを学習します。
|
| 64 |
+
* `eval_prt14.py`: 評価スクリプト。`PRTGenerator`を使用し、テストデータのバッチ推論を行います。
|
| 65 |
+
* `configs/`: Hydra設定ファイルディレクトリ。
|
| 66 |
+
* `config.yaml`: メイン設定ファイル。モデルID、データパス、ハイパーパラメータ等を記述します。
|
| 67 |
+
|
| 68 |
+
## 使用方法
|
| 69 |
+
|
| 70 |
+
### 1. 環境構築
|
| 71 |
+
|
| 72 |
+
```bash
|
| 73 |
+
# (推奨)プロジェクトルートの仮想環境を有効化
|
| 74 |
+
source .venv/bin/activate
|
| 75 |
+
|
| 76 |
+
# 依存関係インストール(このExample用)
|
| 77 |
+
pip install -r examples/prt14_qwen25vl/requirements.txt
|
| 78 |
+
```
|
| 79 |
+
|
| 80 |
+
### 2. データセット準備
|
| 81 |
+
|
| 82 |
+
JSONL形式のデータセットが必要です。
|
| 83 |
+
各行は以下のフォーマットです:
|
| 84 |
+
|
| 85 |
+
```json
|
| 86 |
+
{"image": "path/to/image.jpg", "question": "Describe...", "answer": "Target answer..."}
|
| 87 |
+
```
|
| 88 |
+
|
| 89 |
+
### 3. 学習 (Training)
|
| 90 |
+
|
| 91 |
+
`config.yaml` を編集するか、コマンドライン引数で設定を上書きして実行します。
|
| 92 |
+
|
| 93 |
+
```bash
|
| 94 |
+
# 基本実行 (config.yamlの設定を使用)
|
| 95 |
+
python examples/prt14_qwen25vl/train_prt14.py
|
| 96 |
+
|
| 97 |
+
# パラメータを上書きして実行
|
| 98 |
+
python examples/prt14_qwen25vl/train_prt14.py \
|
| 99 |
+
model.model_id="Qwen/Qwen2-VL-2B-Instruct" \
|
| 100 |
+
data.path="data/my_dataset.jsonl" \
|
| 101 |
+
training.batch_size=4 \
|
| 102 |
+
training.prt_lambda=1.5
|
| 103 |
+
```
|
| 104 |
+
|
| 105 |
+
### 4. 推論・評価 (Evaluation)
|
| 106 |
+
|
| 107 |
+
学習したAdapter(`output_prt14`など)を指定して推論を行います。
|
| 108 |
+
PRT14ではDataLoaderによるバッチ処理に対応しており、大量の評価データを効率的に処理できます。
|
| 109 |
+
|
| 110 |
+
```bash
|
| 111 |
+
# 基本実行 (学習時のoutput_dirが自動的にadapter_pathとして使われる設定の場合)
|
| 112 |
+
python examples/prt14_qwen25vl/eval_prt14.py
|
| 113 |
+
|
| 114 |
+
# データセットとパラメータを指定して実行
|
| 115 |
+
python examples/prt14_qwen25vl/eval_prt14.py \
|
| 116 |
+
data.path="data/test_data.jsonl" \
|
| 117 |
+
training.batch_size=8 \
|
| 118 |
+
training.output_dir="output_prt14" \
|
| 119 |
+
generation.max_new_tokens=100
|
| 120 |
+
```
|
| 121 |
+
|
| 122 |
+
## 主なクラス
|
| 123 |
+
|
| 124 |
+
### `PRTTrainer` (`train_prt14.py`)
|
| 125 |
+
|
| 126 |
+
`transformers.Trainer` を継承。`compute_loss` をオーバーライドし、Algorithm 1 に基づく PRT Loss 計算を実装しています。
|
| 127 |
+
|
| 128 |
+
### `PRTGenerator` (`eval_prt14.py`)
|
| 129 |
+
|
| 130 |
+
推論専用クラス。RefモデルとRewardモデルを保持し、`generate` メソッド内で Step-by-Step の Logits 合成 (Algorithm 2) を行います。
|
| 131 |
+
|
| 132 |
+
### `EvalDataset` (`eval_prt14.py`)
|
| 133 |
+
|
| 134 |
+
評価用データローダー。画像パスの解決と読み込み、バッチ化のための前処理を行います。
|
| 135 |
+
|
| 136 |
+
## 更新履歴
|
| 137 |
+
|
| 138 |
+
* **PRT14**: Hydra設定管理の導入、DataLoaderによるBatch Eval対応、コード構造の整理。
|
examples/prt14_qwen25vl/__pycache__/eval_prt14.cpython-314.pyc
ADDED
|
Binary file (14 kB). View file
|
|
|
examples/prt14_qwen25vl/configs/config.yaml
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
defaults:
|
| 2 |
+
- _self_
|
| 3 |
+
|
| 4 |
+
model:
|
| 5 |
+
model_id: "Qwen/Qwen2-VL-2B-Instruct"
|
| 6 |
+
bf16: true
|
| 7 |
+
use_lora: true
|
| 8 |
+
|
| 9 |
+
data:
|
| 10 |
+
path: "examples/prt12_qwen25vl/dummy_data.jsonl"
|
| 11 |
+
test_image_path: "examples/prt12_qwen25vl/test_image.jpg"
|
| 12 |
+
test_prompt: "Describe this image."
|
| 13 |
+
|
| 14 |
+
generation:
|
| 15 |
+
max_new_tokens: 100
|
| 16 |
+
temperature: 0.7
|
| 17 |
+
top_p: 0.9
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
training:
|
| 21 |
+
output_dir: "output_prt14"
|
| 22 |
+
batch_size: 1
|
| 23 |
+
gradient_accumulation_steps: 8
|
| 24 |
+
learning_rate: 2.0e-4
|
| 25 |
+
epochs: 1
|
| 26 |
+
save_steps: 100
|
| 27 |
+
prt_lambda: 1.0
|
| 28 |
+
|
| 29 |
+
lora:
|
| 30 |
+
rank: 16
|
| 31 |
+
alpha: 32
|
| 32 |
+
target_modules:
|
| 33 |
+
- "q_proj"
|
| 34 |
+
- "k_proj"
|
| 35 |
+
- "v_proj"
|
| 36 |
+
- "o_proj"
|
| 37 |
+
- "gate_proj"
|
| 38 |
+
- "up_proj"
|
| 39 |
+
- "down_proj"
|
examples/prt14_qwen25vl/eval_prt14.py
ADDED
|
@@ -0,0 +1,305 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import sys,os,gc
|
| 2 |
+
import torch
|
| 3 |
+
import torch.nn.functional as F
|
| 4 |
+
import json
|
| 5 |
+
from transformers import Qwen2VLForConditionalGeneration, AutoProcessor
|
| 6 |
+
from peft import PeftModel
|
| 7 |
+
from PIL import Image
|
| 8 |
+
import requests
|
| 9 |
+
import math
|
| 10 |
+
import hydra
|
| 11 |
+
from tqdm import tqdm
|
| 12 |
+
from torch.utils.data import Dataset, DataLoader
|
| 13 |
+
from omegaconf import DictConfig, OmegaConf
|
| 14 |
+
import logging
|
| 15 |
+
import importlib
|
| 16 |
+
|
| 17 |
+
# Configure logging
|
| 18 |
+
logging.basicConfig(level=logging.INFO)
|
| 19 |
+
logger = logging.getLogger(__name__)
|
| 20 |
+
|
| 21 |
+
class EvalDataset(Dataset):
|
| 22 |
+
def __init__(self, data_path):
|
| 23 |
+
self.data = []
|
| 24 |
+
with open(data_path, 'r') as f:
|
| 25 |
+
for line in f:
|
| 26 |
+
if line.strip():
|
| 27 |
+
self.data.append(json.loads(line))
|
| 28 |
+
self.image_dir = os.path.dirname(data_path) # Assuming images are relative to the data_path's directory
|
| 29 |
+
|
| 30 |
+
def __len__(self):
|
| 31 |
+
return len(self.data)
|
| 32 |
+
|
| 33 |
+
def __getitem__(self, idx):
|
| 34 |
+
item = self.data[idx]
|
| 35 |
+
image_filename = item.get('image', None)
|
| 36 |
+
image = None
|
| 37 |
+
if image_filename:
|
| 38 |
+
image_path = image_filename if os.path.isabs(image_filename) else os.path.join(self.image_dir, image_filename)
|
| 39 |
+
try:
|
| 40 |
+
image = Image.open(image_path).convert("RGB")
|
| 41 |
+
except Exception as e:
|
| 42 |
+
logger.warning(f"Could not load image {image_path}: {e}")
|
| 43 |
+
image = None # Handle missing/corrupt images
|
| 44 |
+
|
| 45 |
+
return {
|
| 46 |
+
'id': item.get('id', idx), # Use index if 'id' is not present
|
| 47 |
+
'image': image,
|
| 48 |
+
'question': item.get('question', ''),
|
| 49 |
+
'answer': item.get('answer', '')
|
| 50 |
+
}
|
| 51 |
+
|
| 52 |
+
def eval_collate_fn(batch):
|
| 53 |
+
ids = [item['id'] for item in batch]
|
| 54 |
+
images = [item['image'] for item in batch]
|
| 55 |
+
questions = [item['question'] for item in batch]
|
| 56 |
+
answers = [item['answer'] for item in batch]
|
| 57 |
+
return {
|
| 58 |
+
'ids': ids,
|
| 59 |
+
'images': images,
|
| 60 |
+
'questions': questions,
|
| 61 |
+
'answers': answers
|
| 62 |
+
}
|
| 63 |
+
|
| 64 |
+
class PRTGenerator:
|
| 65 |
+
"""
|
| 66 |
+
推論専用の PRT ジェネレータークラス (Design A + Hydra)
|
| 67 |
+
RefモデルとRewardモデルを保持し、PRTによる生成を管理します。
|
| 68 |
+
"""
|
| 69 |
+
def __init__(self, base_model_id, adapter_path=None, device="cuda", dtype=torch.bfloat16):
|
| 70 |
+
self.device = device
|
| 71 |
+
self.dtype = dtype
|
| 72 |
+
|
| 73 |
+
logger.info(f"Loading Base Models from {base_model_id}...")
|
| 74 |
+
# 1. Reference Model (Base)
|
| 75 |
+
self.ref_model = Qwen2VLForConditionalGeneration.from_pretrained(
|
| 76 |
+
base_model_id,
|
| 77 |
+
torch_dtype=dtype,
|
| 78 |
+
device_map=device,
|
| 79 |
+
trust_remote_code=True,
|
| 80 |
+
).eval()
|
| 81 |
+
|
| 82 |
+
# 2. Reward Model (Base + Adapter)
|
| 83 |
+
# 初期状態では Base と同じ
|
| 84 |
+
self.reward_model = Qwen2VLForConditionalGeneration.from_pretrained(
|
| 85 |
+
base_model_id,
|
| 86 |
+
torch_dtype=dtype,
|
| 87 |
+
device_map=device,
|
| 88 |
+
trust_remote_code=True,
|
| 89 |
+
)
|
| 90 |
+
|
| 91 |
+
if adapter_path:
|
| 92 |
+
logger.info(f"Loading Adapter from {adapter_path}...")
|
| 93 |
+
# Check if adapter_model.bin exists (simple check)
|
| 94 |
+
import os
|
| 95 |
+
if os.path.exists(os.path.join(adapter_path, "adapter_model.bin")) or os.path.exists(os.path.join(adapter_path, "adapter_model.safetensors")):
|
| 96 |
+
self.reward_model = PeftModel.from_pretrained(self.reward_model, adapter_path)
|
| 97 |
+
else:
|
| 98 |
+
logger.warning(f"Adapter file not found at {adapter_path}. Proceeding with raw base model (Identity PRT).")
|
| 99 |
+
else:
|
| 100 |
+
logger.info("Adapter not specified. Reward model is same as Ref model (Identity PRT).")
|
| 101 |
+
|
| 102 |
+
self.reward_model.to(device).eval()
|
| 103 |
+
|
| 104 |
+
# Processor
|
| 105 |
+
self.processor = AutoProcessor.from_pretrained(base_model_id, trust_remote_code=True)
|
| 106 |
+
|
| 107 |
+
@torch.no_grad()
|
| 108 |
+
def generate(self, image_input, prompt, max_new_tokens=100, prt_lambda=1.0, temperature=0.7, top_p=0.9):
|
| 109 |
+
"""
|
| 110 |
+
PRT Generation (Greedy or Sampling)
|
| 111 |
+
Algorithm 2 に基づき、Stepごとに Logits を合成して生成します。
|
| 112 |
+
"""
|
| 113 |
+
|
| 114 |
+
# --- Preprocessing ---
|
| 115 |
+
# logger.info(f"Processing image...")
|
| 116 |
+
try:
|
| 117 |
+
if isinstance(image_input, Image.Image):
|
| 118 |
+
image = image_input
|
| 119 |
+
elif isinstance(image_input, str):
|
| 120 |
+
if image_input.startswith("http"):
|
| 121 |
+
image = Image.open(requests.get(image_input, stream=True).raw).convert("RGB")
|
| 122 |
+
else:
|
| 123 |
+
image = Image.open(image_input).convert("RGB")
|
| 124 |
+
else:
|
| 125 |
+
raise ValueError("image_input must be PIL.Image or str path")
|
| 126 |
+
except Exception as e:
|
| 127 |
+
logger.error(f"Failed to load image: {e}")
|
| 128 |
+
return "Error: Image load failed."
|
| 129 |
+
|
| 130 |
+
messages = [
|
| 131 |
+
{
|
| 132 |
+
"role": "user",
|
| 133 |
+
"content": [
|
| 134 |
+
{"type": "image", "image": image},
|
| 135 |
+
{"type": "text", "text": prompt},
|
| 136 |
+
],
|
| 137 |
+
}
|
| 138 |
+
]
|
| 139 |
+
text_prompt = self.processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
|
| 140 |
+
inputs = self.processor(
|
| 141 |
+
text=[text_prompt],
|
| 142 |
+
images=[image],
|
| 143 |
+
padding=True,
|
| 144 |
+
return_tensors="pt",
|
| 145 |
+
).to(self.device)
|
| 146 |
+
|
| 147 |
+
input_ids = inputs.input_ids
|
| 148 |
+
attention_mask = inputs.attention_mask
|
| 149 |
+
pixel_values = inputs.pixel_values
|
| 150 |
+
image_grid_thw = inputs.image_grid_thw
|
| 151 |
+
|
| 152 |
+
# --- Generation Loop ---
|
| 153 |
+
|
| 154 |
+
# KV Caches (Ref/Reward で独立して管理)
|
| 155 |
+
past_ref = None
|
| 156 |
+
past_reward = None
|
| 157 |
+
|
| 158 |
+
generated_ids = input_ids.clone()
|
| 159 |
+
current_input_ids = input_ids
|
| 160 |
+
|
| 161 |
+
first_step = True
|
| 162 |
+
|
| 163 |
+
for _ in range(max_new_tokens):
|
| 164 |
+
|
| 165 |
+
# 共通のkwargs (cache利用)
|
| 166 |
+
fwd_kwargs = {"use_cache": True}
|
| 167 |
+
|
| 168 |
+
# 初回のみ画像等のFull Inputを渡す
|
| 169 |
+
if first_step:
|
| 170 |
+
fwd_kwargs.update({
|
| 171 |
+
"attention_mask": attention_mask,
|
| 172 |
+
"pixel_values": pixel_values,
|
| 173 |
+
"image_grid_thw": image_grid_thw,
|
| 174 |
+
})
|
| 175 |
+
|
| 176 |
+
# 1. Ref Model Forward
|
| 177 |
+
ref_out = self.ref_model(
|
| 178 |
+
input_ids=current_input_ids,
|
| 179 |
+
past_key_values=past_ref,
|
| 180 |
+
**fwd_kwargs
|
| 181 |
+
)
|
| 182 |
+
past_ref = ref_out.past_key_values
|
| 183 |
+
ref_logits = ref_out.logits[:, -1, :]
|
| 184 |
+
|
| 185 |
+
# 2. Reward Model Forward
|
| 186 |
+
rew_out = self.reward_model(
|
| 187 |
+
input_ids=current_input_ids,
|
| 188 |
+
past_key_values=past_reward,
|
| 189 |
+
**fwd_kwargs
|
| 190 |
+
)
|
| 191 |
+
past_reward = rew_out.past_key_values
|
| 192 |
+
reward_logits = rew_out.logits[:, -1, :]
|
| 193 |
+
|
| 194 |
+
# 3. Logits Composition (Algorithm 2)
|
| 195 |
+
# v = log_softmax(ref) + lambda * reward
|
| 196 |
+
ref_logp = F.log_softmax(ref_logits, dim=-1)
|
| 197 |
+
combined_logits = ref_logp + (prt_lambda * reward_logits)
|
| 198 |
+
|
| 199 |
+
# 4. Token Selection (Sampling or Greedy)
|
| 200 |
+
if temperature > 0:
|
| 201 |
+
# Apply Temperature
|
| 202 |
+
logits = combined_logits / temperature
|
| 203 |
+
probs = F.softmax(logits, dim=-1)
|
| 204 |
+
|
| 205 |
+
# Apply Top-P (Nucleus Sampling)
|
| 206 |
+
if top_p < 1.0:
|
| 207 |
+
sorted_probs, sorted_indices = torch.sort(probs, descending=True)
|
| 208 |
+
cumulative_probs = torch.cumsum(sorted_probs, dim=-1)
|
| 209 |
+
|
| 210 |
+
# Remove tokens with cumulative probability above the threshold
|
| 211 |
+
sorted_indices_to_remove = cumulative_probs > top_p
|
| 212 |
+
# Shift the indices to the right to keep also the first token above the threshold
|
| 213 |
+
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
|
| 214 |
+
sorted_indices_to_remove[..., 0] = 0
|
| 215 |
+
|
| 216 |
+
indices_to_remove = sorted_indices_to_remove.scatter(1, sorted_indices, sorted_indices_to_remove)
|
| 217 |
+
probs[indices_to_remove] = 0.0
|
| 218 |
+
probs = probs / probs.sum(dim=-1, keepdim=True) # Re-normalize
|
| 219 |
+
|
| 220 |
+
next_token = torch.multinomial(probs, num_samples=1)
|
| 221 |
+
else:
|
| 222 |
+
# Greedy
|
| 223 |
+
next_token = torch.argmax(combined_logits, dim=-1, keepdim=True)
|
| 224 |
+
|
| 225 |
+
# Append & Update
|
| 226 |
+
generated_ids = torch.cat([generated_ids, next_token], dim=-1)
|
| 227 |
+
current_input_ids = next_token
|
| 228 |
+
first_step = False
|
| 229 |
+
|
| 230 |
+
# Stop Check
|
| 231 |
+
if next_token.item() == self.processor.tokenizer.eos_token_id:
|
| 232 |
+
break
|
| 233 |
+
|
| 234 |
+
# Decode
|
| 235 |
+
generated_ids_trimmed = generated_ids[:, inputs.input_ids.shape[1]:]
|
| 236 |
+
output_text = self.processor.batch_decode(generated_ids_trimmed, skip_special_tokens=True)[0]
|
| 237 |
+
|
| 238 |
+
return output_text
|
| 239 |
+
|
| 240 |
+
|
| 241 |
+
@hydra.main(version_base=None, config_path="configs", config_name="config")
|
| 242 |
+
def main(cfg: DictConfig):
|
| 243 |
+
logger.info(f"Configuration:\n{OmegaConf.to_yaml(cfg)}")
|
| 244 |
+
# Hydra はデフォルトで実行ディレクトリを outputs/... に変更するため、
|
| 245 |
+
# 学習成果物やローカル画像パスは to_absolute_path で元のcwd基準に解決して扱う。
|
| 246 |
+
adapter_path = hydra.utils.to_absolute_path(cfg.training.output_dir)
|
| 247 |
+
|
| 248 |
+
device = "cuda" if torch.cuda.is_available() else "mps" if torch.backends.mps.is_available() else "cpu"
|
| 249 |
+
dtype = torch.float32 if device == "cpu" else torch.bfloat16
|
| 250 |
+
|
| 251 |
+
generator = PRTGenerator(
|
| 252 |
+
base_model_id=cfg.model.model_id,
|
| 253 |
+
adapter_path=adapter_path, # 学習時のoutput_dirをadapter_pathとみなす
|
| 254 |
+
device=device,
|
| 255 |
+
dtype=dtype
|
| 256 |
+
)
|
| 257 |
+
|
| 258 |
+
# Setup DataLoader
|
| 259 |
+
data_path = hydra.utils.to_absolute_path(cfg.data.path)
|
| 260 |
+
logger.info(f"Loading dataset from {data_path}")
|
| 261 |
+
dataset = EvalDataset(data_path)
|
| 262 |
+
dataloader = DataLoader(
|
| 263 |
+
dataset,
|
| 264 |
+
batch_size=cfg.training.batch_size,
|
| 265 |
+
shuffle=False,
|
| 266 |
+
# NOTE: PIL.Image を返す Dataset をマルチプロセス DataLoader で扱うと
|
| 267 |
+
# 環境によっては不安定になりやすいので安全側で 0 に固定。
|
| 268 |
+
num_workers=0,
|
| 269 |
+
collate_fn=eval_collate_fn
|
| 270 |
+
)
|
| 271 |
+
|
| 272 |
+
logger.info("--- Starting Evaluation ---")
|
| 273 |
+
|
| 274 |
+
for batch in tqdm(dataloader):
|
| 275 |
+
ids = batch['ids']
|
| 276 |
+
images = batch['images']
|
| 277 |
+
questions = batch['questions']
|
| 278 |
+
answers = batch['answers']
|
| 279 |
+
|
| 280 |
+
try:
|
| 281 |
+
for i, idx in enumerate(ids):
|
| 282 |
+
img = images[i]
|
| 283 |
+
q = questions[i]
|
| 284 |
+
|
| 285 |
+
if img is None:
|
| 286 |
+
logger.warning(f"Skipping ID {idx} due to image loading error.")
|
| 287 |
+
continue
|
| 288 |
+
|
| 289 |
+
output = generator.generate(
|
| 290 |
+
image_input=img,
|
| 291 |
+
prompt=q,
|
| 292 |
+
max_new_tokens=cfg.generation.max_new_tokens,
|
| 293 |
+
prt_lambda=cfg.training.prt_lambda,
|
| 294 |
+
temperature=cfg.generation.get("temperature", 0.0), # Default to Greedy if not specified
|
| 295 |
+
top_p=cfg.generation.get("top_p", 1.0)
|
| 296 |
+
)
|
| 297 |
+
|
| 298 |
+
print(f"\n[ID: {idx}] Q: {q}\nA (GT): {answers[i]}\nA (Pred): {output}")
|
| 299 |
+
except Exception as e:
|
| 300 |
+
logger.error(f"Error in batch: {e}")
|
| 301 |
+
|
| 302 |
+
logger.info("Evaluation Completed.")
|
| 303 |
+
|
| 304 |
+
if __name__ == "__main__":
|
| 305 |
+
main()
|
examples/prt14_qwen25vl/requirements.txt
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
transformers
|
| 2 |
+
peft
|
| 3 |
+
accelerate
|
| 4 |
+
datasets
|
| 5 |
+
torch
|
| 6 |
+
torchvision
|
| 7 |
+
pillow
|
| 8 |
+
requests
|
| 9 |
+
tqdm
|
| 10 |
+
qwen_vl_utils
|
| 11 |
+
hydra-core
|
| 12 |
+
omegaconf
|
examples/prt14_qwen25vl/test_image.jpg
ADDED
|
Git LFS Details
|
examples/prt14_qwen25vl/train_prt14.py
ADDED
|
@@ -0,0 +1,265 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import logging
|
| 2 |
+
import os
|
| 3 |
+
import json
|
| 4 |
+
import hydra
|
| 5 |
+
from omegaconf import DictConfig, OmegaConf
|
| 6 |
+
from dataclasses import dataclass
|
| 7 |
+
from typing import Dict, List, Optional, Union
|
| 8 |
+
|
| 9 |
+
import torch
|
| 10 |
+
import torch.nn.functional as F
|
| 11 |
+
from torch.utils.data import Dataset
|
| 12 |
+
from peft import LoraConfig, TaskType, get_peft_model
|
| 13 |
+
from PIL import Image
|
| 14 |
+
from transformers import (
|
| 15 |
+
Qwen2VLForConditionalGeneration,
|
| 16 |
+
AutoProcessor,
|
| 17 |
+
Trainer,
|
| 18 |
+
TrainingArguments,
|
| 19 |
+
get_scheduler,
|
| 20 |
+
)
|
| 21 |
+
|
| 22 |
+
# Configure logging
|
| 23 |
+
logging.basicConfig(level=logging.INFO)
|
| 24 |
+
logger = logging.getLogger(__name__)
|
| 25 |
+
|
| 26 |
+
@dataclass
|
| 27 |
+
class PRTTrainingArguments(TrainingArguments):
|
| 28 |
+
"""PRT固有のパラメータを追加したTrainingArguments"""
|
| 29 |
+
prt_lambda: float = 1.0 # Rewardの強さ
|
| 30 |
+
|
| 31 |
+
class PRTTrainer(Trainer):
|
| 32 |
+
"""
|
| 33 |
+
PRT (Portable Reward Tuning) 用のカスタムトレーナー
|
| 34 |
+
標準の Trainer を継承し、compute_loss をオーバーライドして PRT の損失計算を行います。
|
| 35 |
+
Architecture:
|
| 36 |
+
- self.model: 学習対象の Reward Model (LoRA適用済み)
|
| 37 |
+
- self.ref_model: 凍結された Reference Model (コンストラクタで受け取る)
|
| 38 |
+
"""
|
| 39 |
+
def __init__(self, ref_model, *args, **kwargs):
|
| 40 |
+
super().__init__(*args, **kwargs)
|
| 41 |
+
self.ref_model = ref_model
|
| 42 |
+
# Reference Model は常に評価モード&勾配計算なし
|
| 43 |
+
self.ref_model.eval()
|
| 44 |
+
for p in self.ref_model.parameters():
|
| 45 |
+
p.requires_grad_(False)
|
| 46 |
+
|
| 47 |
+
# args から prt_lambda を取得 (デフォルトは 1.0)
|
| 48 |
+
self.prt_lambda = getattr(self.args, "prt_lambda", 1.0)
|
| 49 |
+
logger.info(f"Initialized PRTTrainer with lambda={self.prt_lambda}")
|
| 50 |
+
|
| 51 |
+
def compute_loss(self, model, inputs, return_outputs=False, num_items_in_batch=None):
|
| 52 |
+
"""
|
| 53 |
+
PRT ロス計算ロジック (Algorithm 1)
|
| 54 |
+
Loss = CrossEntropy( log_softmax(ref_logits) + lambda * reward_logits, labels )
|
| 55 |
+
"""
|
| 56 |
+
device = model.device
|
| 57 |
+
|
| 58 |
+
# ref_model が同じデバイスにあるか確認 (なければ移動)
|
| 59 |
+
if self.ref_model.device != device:
|
| 60 |
+
self.ref_model.to(device)
|
| 61 |
+
|
| 62 |
+
with torch.no_grad():
|
| 63 |
+
ref_outputs = self.ref_model(**inputs)
|
| 64 |
+
ref_logits = ref_outputs.logits
|
| 65 |
+
|
| 66 |
+
# Reward Model Forward
|
| 67 |
+
outputs = model(**inputs)
|
| 68 |
+
reward_logits = outputs.logits
|
| 69 |
+
|
| 70 |
+
# Logits 合成
|
| 71 |
+
ref_logp = F.log_softmax(ref_logits, dim=-1)
|
| 72 |
+
# Rewardモデルはスカラー値(Logits)として扱うため、log_softmaxはかけない
|
| 73 |
+
combined_logits = ref_logp + (self.prt_lambda * reward_logits)
|
| 74 |
+
|
| 75 |
+
# Cross Entropy Loss 計算
|
| 76 |
+
labels = inputs.get("labels")
|
| 77 |
+
if labels is None:
|
| 78 |
+
raise ValueError("Labels are required for PRT training")
|
| 79 |
+
|
| 80 |
+
shift_logits = combined_logits[..., :-1, :].contiguous()
|
| 81 |
+
shift_labels = labels[..., 1:].contiguous()
|
| 82 |
+
|
| 83 |
+
loss_fct = torch.nn.CrossEntropyLoss()
|
| 84 |
+
loss = loss_fct(
|
| 85 |
+
shift_logits.view(-1, shift_logits.size(-1)),
|
| 86 |
+
shift_labels.view(-1)
|
| 87 |
+
)
|
| 88 |
+
|
| 89 |
+
return (loss, outputs) if return_outputs else loss
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
@dataclass
|
| 93 |
+
class DataCollatorForPRT:
|
| 94 |
+
"""Qwen2.5-VL 用のデータコレーター"""
|
| 95 |
+
processor: AutoProcessor
|
| 96 |
+
|
| 97 |
+
def __call__(self, features: List[Dict[str, Union[List, str]]]) -> Dict[str, torch.Tensor]:
|
| 98 |
+
texts = [f["text"] for f in features]
|
| 99 |
+
images = [f["image"] for f in features]
|
| 100 |
+
|
| 101 |
+
inputs = self.processor(
|
| 102 |
+
text=texts,
|
| 103 |
+
images=images,
|
| 104 |
+
return_tensors="pt",
|
| 105 |
+
padding=True,
|
| 106 |
+
)
|
| 107 |
+
|
| 108 |
+
labels = inputs["input_ids"].clone()
|
| 109 |
+
if "attention_mask" in inputs:
|
| 110 |
+
labels[inputs["attention_mask"] == 0] = -100
|
| 111 |
+
inputs["labels"] = labels
|
| 112 |
+
return inputs
|
| 113 |
+
|
| 114 |
+
class PRTDataset(Dataset):
|
| 115 |
+
"""JSONL データセット"""
|
| 116 |
+
def __init__(self, jsonl_path, processor):
|
| 117 |
+
self.data = []
|
| 118 |
+
self.processor = processor
|
| 119 |
+
self.root_dir = os.path.dirname(jsonl_path)
|
| 120 |
+
with open(jsonl_path, "r") as f:
|
| 121 |
+
for line in f:
|
| 122 |
+
if line.strip():
|
| 123 |
+
self.data.append(json.loads(line))
|
| 124 |
+
|
| 125 |
+
def __len__(self):
|
| 126 |
+
return len(self.data)
|
| 127 |
+
|
| 128 |
+
def __getitem__(self, idx):
|
| 129 |
+
item = self.data[idx]
|
| 130 |
+
image_path = item.get("image")
|
| 131 |
+
if not os.path.isabs(image_path):
|
| 132 |
+
image_path = os.path.join(self.root_dir, image_path)
|
| 133 |
+
try:
|
| 134 |
+
image = Image.open(image_path).convert("RGB")
|
| 135 |
+
except:
|
| 136 |
+
image = Image.new("RGB", (224, 224)) # Fallback dummy
|
| 137 |
+
|
| 138 |
+
question = item.get("question", "Describe this image.")
|
| 139 |
+
answer = item.get("answer", "")
|
| 140 |
+
|
| 141 |
+
messages = [
|
| 142 |
+
{"role": "user", "content": [{"type": "image", "image": image_path}, {"type": "text", "text": question}]},
|
| 143 |
+
{"role": "assistant", "content": [{"type": "text", "text": answer}]}
|
| 144 |
+
]
|
| 145 |
+
text_prompt = self.processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=False)
|
| 146 |
+
return {"image": image, "text": text_prompt}
|
| 147 |
+
|
| 148 |
+
|
| 149 |
+
@hydra.main(version_base=None, config_path="configs", config_name="config")
|
| 150 |
+
def main(cfg: DictConfig):
|
| 151 |
+
logger.info(f"Configuration:\n{OmegaConf.to_yaml(cfg)}")
|
| 152 |
+
# Hydra はデフォルトで実行ディレクトリを outputs/... に変更するため、
|
| 153 |
+
# 相対パスは to_absolute_path で元のcwd基準に解決して扱う。
|
| 154 |
+
data_path = hydra.utils.to_absolute_path(cfg.data.path)
|
| 155 |
+
output_dir = hydra.utils.to_absolute_path(cfg.training.output_dir)
|
| 156 |
+
|
| 157 |
+
# 0. Load processor (Lightweight, needed for dataset)
|
| 158 |
+
logger.info(f"Loading Processor: {cfg.model.model_id}")
|
| 159 |
+
processor = AutoProcessor.from_pretrained(cfg.model.model_id, trust_remote_code=True)
|
| 160 |
+
dataset_cls = PRTDataset
|
| 161 |
+
|
| 162 |
+
# 1. Load dataset (Fail fast)
|
| 163 |
+
logger.info(f"Loading Dataset from {data_path}")
|
| 164 |
+
train_dataset = dataset_cls(data_path, processor)
|
| 165 |
+
collator = DataCollatorForPRT(processor)
|
| 166 |
+
|
| 167 |
+
# 2. Load Models (Heavy)
|
| 168 |
+
logger.info(f"Loading Models: {cfg.model.model_id}")
|
| 169 |
+
|
| 170 |
+
# Reference Model (Frozen)
|
| 171 |
+
ref_model = Qwen2VLForConditionalGeneration.from_pretrained(
|
| 172 |
+
cfg.model.model_id,
|
| 173 |
+
torch_dtype=torch.bfloat16 if cfg.model.bf16 else torch.float16,
|
| 174 |
+
device_map=None,
|
| 175 |
+
trust_remote_code=True,
|
| 176 |
+
)
|
| 177 |
+
|
| 178 |
+
# Reward Model (Trainable Base)
|
| 179 |
+
reward_model = Qwen2VLForConditionalGeneration.from_pretrained(
|
| 180 |
+
cfg.model.model_id,
|
| 181 |
+
torch_dtype=torch.bfloat16 if cfg.model.bf16 else torch.float16,
|
| 182 |
+
device_map=None,
|
| 183 |
+
trust_remote_code=True,
|
| 184 |
+
)
|
| 185 |
+
|
| 186 |
+
# Configure LoRA
|
| 187 |
+
if cfg.model.use_lora:
|
| 188 |
+
logger.info("Applying LoRA to Reward Model...")
|
| 189 |
+
lora_config = LoraConfig(
|
| 190 |
+
r=cfg.lora.rank,
|
| 191 |
+
lora_alpha=cfg.lora.alpha,
|
| 192 |
+
target_modules=list(cfg.lora.target_modules),
|
| 193 |
+
bias="none",
|
| 194 |
+
task_type="CAUSAL_LM",
|
| 195 |
+
)
|
| 196 |
+
reward_model = get_peft_model(reward_model, lora_config)
|
| 197 |
+
reward_model.print_trainable_parameters()
|
| 198 |
+
else:
|
| 199 |
+
logger.info("Full Fine-Tuning Mode")
|
| 200 |
+
reward_model.gradient_checkpointing_enable()
|
| 201 |
+
|
| 202 |
+
# Define training arguments
|
| 203 |
+
training_args = PRTTrainingArguments(
|
| 204 |
+
output_dir=output_dir,
|
| 205 |
+
per_device_train_batch_size=cfg.training.batch_size,
|
| 206 |
+
gradient_accumulation_steps=cfg.training.gradient_accumulation_steps,
|
| 207 |
+
learning_rate=cfg.training.learning_rate,
|
| 208 |
+
num_train_epochs=cfg.training.epochs,
|
| 209 |
+
save_strategy="steps",
|
| 210 |
+
save_steps=cfg.training.save_steps,
|
| 211 |
+
logging_steps=10,
|
| 212 |
+
bf16=cfg.model.bf16,
|
| 213 |
+
fp16=not cfg.model.bf16,
|
| 214 |
+
report_to="none",
|
| 215 |
+
remove_unused_columns=False,
|
| 216 |
+
ddp_find_unused_parameters=False,
|
| 217 |
+
prt_lambda=cfg.training.prt_lambda,
|
| 218 |
+
)
|
| 219 |
+
|
| 220 |
+
# Prepare optimizer + scheduler
|
| 221 |
+
# Trainerのデフォルトに任せることもできるが、明示的な管理のためにここで構築する
|
| 222 |
+
# ※ Full FTとLoRAでパラメータが異なるため、filtered_paramsを使用
|
| 223 |
+
optimizer = torch.optim.AdamW(
|
| 224 |
+
filter(lambda p: p.requires_grad, reward_model.parameters()),
|
| 225 |
+
lr=cfg.training.learning_rate
|
| 226 |
+
)
|
| 227 |
+
|
| 228 |
+
# スケジューラ計算のためにステップ数が必要
|
| 229 |
+
num_update_steps_per_epoch = max(
|
| 230 |
+
1,
|
| 231 |
+
len(train_dataset) // (cfg.training.batch_size * cfg.training.gradient_accumulation_steps),
|
| 232 |
+
)
|
| 233 |
+
max_train_steps = max(1, cfg.training.epochs * num_update_steps_per_epoch)
|
| 234 |
+
|
| 235 |
+
lr_scheduler = get_scheduler(
|
| 236 |
+
name="linear",
|
| 237 |
+
optimizer=optimizer,
|
| 238 |
+
num_warmup_steps=0, # 必要に応じてConfig化
|
| 239 |
+
num_training_steps=max_train_steps,
|
| 240 |
+
)
|
| 241 |
+
|
| 242 |
+
# Initialize PRT Trainer
|
| 243 |
+
logger.info("Initializing PRTTrainer")
|
| 244 |
+
trainer = PRTTrainer(
|
| 245 |
+
model=reward_model,
|
| 246 |
+
ref_model=ref_model,
|
| 247 |
+
args=training_args,
|
| 248 |
+
train_dataset=train_dataset,
|
| 249 |
+
data_collator=collator,
|
| 250 |
+
optimizers=(optimizer, lr_scheduler), # 明示的に渡す
|
| 251 |
+
)
|
| 252 |
+
|
| 253 |
+
# Train
|
| 254 |
+
logger.info("Starting PRT14 Training...")
|
| 255 |
+
trainer.train()
|
| 256 |
+
|
| 257 |
+
# 8. Save fine-tuned model and processor
|
| 258 |
+
trainer.save_model()
|
| 259 |
+
# processorはtrainer.save_modelでは保存されない場合があるため明示的に保存推奨だが、
|
| 260 |
+
# PeftModelの場合はsave_pretrainedが呼ばれる。念のためプロセッサも保存。
|
| 261 |
+
processor.save_pretrained(output_dir)
|
| 262 |
+
logger.info(f"Saved model and processor to {output_dir}")
|
| 263 |
+
|
| 264 |
+
if __name__ == "__main__":
|
| 265 |
+
main()
|